diff --git a/backend/proton/.gitignore b/backend/proton/.gitignore new file mode 100644 index 00000000..f604d92f --- /dev/null +++ b/backend/proton/.gitignore @@ -0,0 +1,3 @@ +# Ignore everything in this directory +* +# Except this file !.gitignore diff --git a/backend/tecs/HA.conf b/backend/tecs/HA.conf new file mode 100755 index 00000000..e64b5ea8 --- /dev/null +++ b/backend/tecs/HA.conf @@ -0,0 +1,166 @@ +## HA配置(双机) +# 每套HA系统配置一个配置文件,该文件名命令规律如下,一套HA为HA_1.conf,两套HA命令格式为HA_2_1.conf和HA_2_2.conf,依次类推 +# 建议拷贝该模版改名后再编辑,如使用vi命令,应先执行 export LC_ALL="zh_CN.GB2312" ,否则会有乱码,编辑后unset LC_ALL + +[DEFAULT] +# HA安装的OpenCOS组件, 可以填写为loadbalance,database,amqp,keystone,neutron,glance,cinder,nova,horizon,heat,ceilometer,ironic(与下面组件服务列表的关键字一致) +# 之中的任意组合,用逗号分开, 全部可简写为all, 无顺序要求,haproxy代表配置LB. +# 注意HA是通过conf方式安装的,但这种方式不支持安装ironic,如果这里配置了ironic,应在整个安装流程前手动通过custom方式单独安装ironic +# 该配置项必填 +components=database,amqp,keystone,neutron,glance,cinder,nova,horizon,heat,ceilometer + +# 由HA管理的组件服务(可裁剪),多个服务以逗号分开. +# 一般对服务无增加或减少可不必修改如下选项,多余组件也无需注释掉,组件选择与否由“components”决定 +loadbalance = haproxy + +database=mariadb + +amqp=rabbitmq-server + +keystone=openstack-keystone + +#neutron-metadata-agent,neutron-lbaas-agent don't use default +neutron=neutron-server,neutron-l3-agent,neutron-dhcp-agent + +#openstack-glance-scrubber don't use default +glance=openstack-glance-api,openstack-glance-registry + +#openstack-cinder-backup don't use default +cinder=openstack-cinder-api,openstack-cinder-scheduler,openstack-cinder-volume + +nova=openstack-nova-api,openstack-nova-conductor,openstack-nova-scheduler,openstack-nova-cert,openstack-nova-consoleauth,openstack-nova-novncproxy + +horizon=httpd,opencos-alarmmanager,opencos-alarmagent + +heat=openstack-heat-api,openstack-heat-engine,openstack-heat-api-cfn,openstack-heat-api-cloudwatch + +ceilometer=openstack-ceilometer-api,openstack-ceilometer-central,openstack-ceilometer-alarm-evaluator,openstack-ceilometer-alarm-notifier,openstack-ceilometer-notification,openstack-ceilometer-collector + +ironic=openstack-ironic-api,openstack-ironic-conductor + +# 根据业务需要,增加clone服务资源(每个节点都运行),填写去掉.service后的服务名,多个服务以逗号分隔,可选 +#clone_service= + +# guard服务名字 +guard=tfg-guard + +# HA集群心跳线,至少一条,建议三条,每条是一对IP,用逗号分开 +# 如果LB和HA是使用相同服务器,则此处心跳线不用再填写 +# 第一条心跳线,例中是外网IP,必填 +heartbeat_link1=10.43.179.221,10.43.179.222 +# 第二条心跳线,不能与其他心跳线有相同IP,可选 +heartbeat_link2= +# 第三条心跳线,不能与其他心跳线有相同IP,可选 +heartbeat_link3= + +#执行HA脚本的节点为local node,其他节点为remote node,这里为ssh登录remote node的root用户密码,必填 +remote_node_password=ossdbg1 + + +# haproxy浮动IP地址,配置LB时,必填 +#loadbalance_fip=192.160.0.226 +#loadbalance_nic=ens33 +#loadbalance_netmask=23 +#############DB################ +# 数据库浮动IP,可以与LB浮动IP相同,必填 +# 浮动IP地址 +#database_fip=192.160.0.225 +# 浮动IP所在网卡 +#database_nic=baseleft +# 掩码,CIDR格式 +#database_netmask=23 + +# 数据库共享磁盘全路径名,组件存在则必填 +# 磁盘名,建议用lv方式,使用lv时应注意配置为逻辑盘名 +#database_device=/dev/mapper/vg_mysql-lv_mysql +# 文件系统类型 +#database_fs_type=ext4 + +#数据库备份共享磁盘全路径名,不能和其他共享磁盘相同(功能暂不支持),可选 +#backup_database_device=/dev/mapper/vg_mysqlbackup-lv_mysqlbackup +#backup_database_fs_type=ext4 + +##############AMQP################ +# AMQP浮动IP,可以与LB浮动IP相同,必填 +#amqp_fip=192.160.0.225 +#amqp_nic=baseleft +#amqp_netmask=23 + +##############keystone################ +# keystone浮动IP,配置为LB时,浮动IP不用填写,否则组件存在则必填 +#keystone_fip=192.160.0.225 +#keystone_nic=baseleft +#keystone_netmask=23 + +##############neutron################ +# neutron 浮动IP,配置为LB时,浮动IP不用填写,否则组件存在则必填 +#neutron_fip=192.160.0.225 +#neutron_nic=baseleft +#neutron_netmask=23 + +##############glance################ +# glance 浮动IP,配置为LB时,浮动IP不用填写,否则组件存在则必填 +#glance_fip=192.160.0.225 +#glance_nic=baseleft +#glance_netmask=23 + +# 镜像共享磁盘设置,不能和其他共享磁盘相同,组件存在则必填 +# glance_device_type可选drbd或iscsi + +#glance_device_type=drbd +#glance_device=/dev/mapper/vg_glance-lv_glance +#glance_fs_type=ext4 + +##############cinder################ +# cinder浮动IP,配置为LB时,浮动IP不用填写,否则组件存在则必填 +#cinder_fip=192.160.0.225 +#cinder_nic=baseleft +#cinder_netmask=23 + +#虚拟机块设备使用的磁阵管理口IP,如果有多个IP,用空格分开,可选 +#cinder_ping_ip=192.160.0.7 + +##############nova################ +# nova浮动IP,配置为LB时,浮动IP不用填写,否则组件存在则必填 +#nova_fip=192.160.0.225 +#nova_nic=baseleft +#nova_netmask=23 + +##############horizon################ +# TECS dashboard登录时使用的浮动IP,配置为LB时,浮动IP不用填写,否则组件存在则必填 +# 不同浮动IP的组件可以运行在不同节点上,如果还想与 +# 某个组件运行在相同节点,需配置location_constraint +#horizon_fip=10.43.179.230 +#horizon_nic=kmportv1 +#horizon_netmask=23 + +##############ironic################ +# ironic 浮动IP,配置为LB时,浮动IP不用填写,否则组件存在则必填 +#ironic_fip=192.160.0.225 +#ironic_nic=baseleft +#ironic_netmask=23 + +##############heat################ +# heat 浮动IP,配置为LB时,浮动IP不用填写,否则组件存在则必填 +#heat_fip=192.160.0.225 +#heat_nic=baseleft +#heat_netmask=23 + +##############ceilometer################ +# ceilometer浮动IP,配置为LB时,浮动IP不用填写,否则组件存在则必填 +#ceilometer_fip=192.160.0.225 +#ceilometer_nic=baseleft +#ceilometer_netmask=23 + +# mongod数据库共享磁盘全路径名,建议配置 +#mongod_device=/dev/mapper/vg_mongodb-lv_mongodb +# 文件系统类型 +#mongod_fs_type=ext4 + +# 若mongod数据库使用本地盘则配置成local,否则为空 +mongod_local=local + +# 如下两个配置项表示共享盘所在的磁阵信息,暂时仅支持本配置中用到的所有共享盘都在一个磁阵上,可选 +# 参数说明:(主控制器业务口IP地址,主控制器iqn),(备控制器业务口IP地址,备控制器iqn) +# 如果两个控制iqn相同,可以配置为(主控制器业务口IP地址,主控制器iqn) +#iscsi_storage=(172.32.1.1,iqn.2099-01.cn.com.zte:usp.spr11-4c:09:b4:b0:56:8b),(172.32.1.2,iqn.2099-01.cn.com.zte:usp.spr11-4c:09:b4:b0:56:8c) diff --git a/backend/tecs/getnodeinfo.sh b/backend/tecs/getnodeinfo.sh new file mode 100755 index 00000000..214fcc9a --- /dev/null +++ b/backend/tecs/getnodeinfo.sh @@ -0,0 +1,159 @@ +#!/bin/bash + +dhcp_ip="127.0.0.1" +DISCOVERD_URL="http://$dhcp_ip:5050/v1/continue" + +function update() { + jq "$1" data.json > temp.json || echo "Error: update $1 to json failed" + mv temp.json data.json +} + +function get_system_info(){ + PRODUCT=$(dmidecode -s system-product-name) + FAMILY=$(dmidecode -t system|grep "Family"|cut -d ":" -f2) + VERSION=$(dmidecode -s system-version) + SERIAL=$(dmidecode -s system-serial-number) + MANUFACTURER=$(dmidecode -s system-manufacturer) + UUID=$(dmidecode -s system-uuid) + FQDN=$(hostname -f) + echo '{"system":{}}' > data.json + update ".system[\"product\"] = \"$PRODUCT\"" + update ".system[\"family\"] = \"$FAMILY\"" + update ".system[\"fqdn\"] = \"$FQDN\"" + update ".system[\"version\"] = \"$VERSION\"" + update ".system[\"serial\"] = \"$SERIAL\"" + update ".system[\"manufacturer\"] = \"$MANUFACTURER\"" + update ".system[\"uuid\"] = \"$UUID\"" +} + +function get_cpu_info(){ + REAL=$(cat /proc/cpuinfo |grep "physical id"|sort |uniq|wc -l) + TOTAL=$(cat /proc/cpuinfo |grep "processor"|wc -l) + update ".cpu[\"real\"] = $REAL" + update ".cpu[\"total\"] = $TOTAL" + + for i in $(seq $TOTAL) + do + if [ ! -z "$i" ]; then + SPEC_MODEL=$(cat /proc/cpuinfo | grep name | cut -f2 -d:|sed -n $i"p") + SPEC_FRE=$(cat /proc/cpuinfo | grep MHz | cut -f2 -d:|sed -n $i"p") + update ".cpu[\"spec_$i\"] = {model:\"$SPEC_MODEL\", frequency:$SPEC_FRE}" + fi + done +} + +function get_memory_info(){ + PHY_NUM=$(dmidecode -t memory|grep "Physical Memory Array"|wc -l) + TOTAL_MEM=$(cat /proc/meminfo |grep MemTotal |cut -d ":" -f2) + update ".memory[\"total\"] = \"$TOTAL_MEM\"" + for num in $(seq $PHY_NUM) + do + SLOTS=$(dmidecode -t memory |grep "Number Of Devices" |cut -d ":" -f2|sed -n $num"p") + MAX_CAP=$(dmidecode -t memory |grep "Maximum Capacity" |cut -d ":" -f2|sed -n $num"p") + update ".memory[\"phy_memory_$num\"] = {slots:\"$SLOTS\", maximum_capacity:\"$MAX_CAP\"}" + + for i in $(seq $SLOTS) + do + if [ ! -z "$i" ]; then + DEVICE_FRE=$(dmidecode -t memory |grep "Speed" |cut -d ":" -f2|sed -n $i"p") + DEVICE_TYPE=$(dmidecode -t memory |grep 'Type:' |grep -v "Error Correction Type"|cut -d ":" -f2|sed -n $i"p") + DEVICE_SIZE=$(dmidecode -t memory |grep Size |cut -d ":" -f2|sed -n $i"p") + update ".memory[\"phy_memory_$num\"][\"devices_$i\"] = {frequency:\"$DEVICE_FRE\", type:\"$DEVICE_TYPE\", size:\"$DEVICE_SIZE\"}" + fi + done + done +} + +function get_net_info(){ + physical_networks=`ls -l /sys/class/net/ | grep -v lo |grep "pci"|awk -F 'net/' '{print $2}'` + if [ -f "/sys/class/net/bonding_masters" ]; then + bond_network=$(cat /sys/class/net/bonding_masters) + if [ ! -z "$bond_network" ];then + physical_networks+=" $bond_network" + fi + fi + for iface in $physical_networks + do + NAME=$iface + MAC=$(ip link show $iface | awk '/ether/ {print $2}') + IP=$(ip addr show $iface | awk '/inet / { sub(/\/.*/, "", $2); print $2 }') + NETMASK=$(ifconfig $iface | grep netmask | awk '{print $4}') + STATE=$(ip link show $iface | awk '/mtu/ {print $3}') + PCI=$(ethtool -i $iface|grep "bus-info"|cut -d " " -f2) + CURRENT_SPEED=$(ethtool $iface |grep Speed |awk -F " " '{print $2}') + LINE=$(ethtool $iface|grep -n "Supported pause frame use"|awk -F ":" '{print $1}') + LINE=$[ LINE - 1 ] + LINE_SPEED=$(ethtool $iface|grep -n "Supported link modes"|awk -F ":" '{print $1}') + BOND=$(ifconfig $iface | grep MASTER) + if [ $LINE -eq $LINE_SPEED ]; then + MAX_SPEED=$(ethtool $iface|grep "Supported link modes"|cut -d ":" -f2) + else + MAX_SPEED=$(ethtool $iface |sed -n $LINE"p"|awk -F " " '{print $1}') + fi + + UP="UP" + if [[ "$STATE" =~ "$UP" ]]; then + STATE="up" + else + STATE="down" + fi + if [ -z "$BOND" ]; then + TYPE="ether" + else + TYPE="bond" + SLAVES=$(find /etc/sysconfig/network-scripts/ -name "ifcfg-*" |xargs grep "MASTER=$iface"|awk -F 'ifcfg-' '{print $2}'|awk -F ':' '{print $1}') + fi + if [ ! -z "$MAC" ]; then + update ".interfaces[\"$iface\"] = {mac: \"$MAC\", ip: \"$IP\", netmask: \"$NETMASK\", name: \"$iface\", max_speed: \"$MAX_SPEED\", state: \"$STATE\", pci: \"$PCI\", current_speed: \"$CURRENT_SPEED\", type: \"$TYPE\", slaves:\"$SLAVES\"}" + fi + done +} + +function get_disk_info(){ + for disk in $(fdisk -l|grep Disk|grep "/dev" |cut -d ":" -f1|awk -F "/" '{print $NF}') + do + DISK_NAME=$disk + DISK_SIZE=$(fdisk -l|grep Disk|grep "/dev" |grep -w $disk|cut -d "," -f2) + DISK_DISK=$(ls -l /dev/disk/by-path/|grep $disk"$"|awk '{print $9}') + DISK_EXTRA_1=$(ls -l /dev/disk/by-id/|grep $disk"$"|awk '{print $9}'|sed -n 1p) + DISK_EXTRA_2=$(ls -l /dev/disk/by-id/|grep $disk"$"|awk '{print $9}'|sed -n 2p) + MODEL=$(hdparm -I /dev/sda |grep Model | cut -d ":" -f2) + REMOVABLE=$(hdparm -I /dev/sda |grep removable|awk '{print $4}') + update ".disk[\"$disk\"] = {name: \"$DISK_NAME\", size: \"$DISK_SIZE\", disk: \"$DISK_DISK\", model: \"$MODEL\", removable: \"$REMOVABLE\",extra: [\"$DISK_EXTRA_1\", \"$DISK_EXTRA_2\"]}" + done +} + +function main(){ + get_system_info + get_cpu_info + get_memory_info + get_net_info + get_disk_info +} +main + +BMC_ADDRESS=$(ipmitool lan print | grep -e "IP Address [^S]" | awk '{ print $4 }') +if [ -z "$BMC_ADDRESS" ]; then + BMC_ADDRESS=$(ipmitool lan print 3| grep -e "IP Address [^S]" | awk '{ print $4 }') +fi +update ".ipmi_address = \"$BMC_ADDRESS\"" + +update ".data_name = \"baremetal_source\"" + +update ".os_status = \"active\"" + +echo Collected: +cat data.json + +RESULT=$(eval curl -i -X POST \ + "-H 'Accept: application/json'" \ + "-H 'Content-Type: application/json'" \ + "-d @data.json" \ + "$DISCOVERD_URL") + +if echo $RESULT | grep "HTTP/1.0 4"; then + echo "Ironic API returned error: $RESULT" +fi + +echo "Node is now discovered! Halting..." +sleep 5 diff --git a/backend/tecs/jq-1.3-2.el7.x86_64.rpm b/backend/tecs/jq-1.3-2.el7.x86_64.rpm new file mode 100755 index 00000000..61008c9c Binary files /dev/null and b/backend/tecs/jq-1.3-2.el7.x86_64.rpm differ diff --git a/backend/tecs/network-configuration-1.1.1-25.x86_64.rpm b/backend/tecs/network-configuration-1.1.1-25.x86_64.rpm new file mode 100755 index 00000000..e45fde00 Binary files /dev/null and b/backend/tecs/network-configuration-1.1.1-25.x86_64.rpm differ diff --git a/backend/tecs/storage_auto_config/base/cinder.json.sample b/backend/tecs/storage_auto_config/base/cinder.json.sample new file mode 100755 index 00000000..c8e5c33d --- /dev/null +++ b/backend/tecs/storage_auto_config/base/cinder.json.sample @@ -0,0 +1,39 @@ +[ + { + "protocol_type": "ISCSI", + "service": "glance", + "lun": "0", + "data_ips": [ + "10.43.177.159" + ], + "lvm_config": { + "size": 100, + "vg_name": "VolGroupHAImage", + "lv_name": "lvHAImage", + "fs_type": "ext4" + } + }, + { + "protocol_type": "ISCSI", + "service": "db", + "lun": "1", + "data_ips": [ + "162.1.1.101" + ], + "lvm_config": { + "size": 100, + "vg_name": "VolGroupHAMysql", + "lv_name": "lvHAMysql", + "fs_type": "ext4" + } + }, + { + "protocol_type": "CEPH", + "rbd_config": { + "size": 100, + "rbd_pool": "mysql", + "rbd_volume": "mysql", + "fs_type": "ext4" # can be none + } + } +] \ No newline at end of file diff --git a/backend/tecs/storage_auto_config/base/control.json.sample b/backend/tecs/storage_auto_config/base/control.json.sample new file mode 100755 index 00000000..c8e5c33d --- /dev/null +++ b/backend/tecs/storage_auto_config/base/control.json.sample @@ -0,0 +1,39 @@ +[ + { + "protocol_type": "ISCSI", + "service": "glance", + "lun": "0", + "data_ips": [ + "10.43.177.159" + ], + "lvm_config": { + "size": 100, + "vg_name": "VolGroupHAImage", + "lv_name": "lvHAImage", + "fs_type": "ext4" + } + }, + { + "protocol_type": "ISCSI", + "service": "db", + "lun": "1", + "data_ips": [ + "162.1.1.101" + ], + "lvm_config": { + "size": 100, + "vg_name": "VolGroupHAMysql", + "lv_name": "lvHAMysql", + "fs_type": "ext4" + } + }, + { + "protocol_type": "CEPH", + "rbd_config": { + "size": 100, + "rbd_pool": "mysql", + "rbd_volume": "mysql", + "fs_type": "ext4" # can be none + } + } +] \ No newline at end of file diff --git a/backend/tecs/storage_auto_config/base/multipath.conf b/backend/tecs/storage_auto_config/base/multipath.conf new file mode 100755 index 00000000..31d49e8b --- /dev/null +++ b/backend/tecs/storage_auto_config/base/multipath.conf @@ -0,0 +1,144 @@ +# This is a basic configuration file with some examples, for device mapper +# mulead of using WWIDs as names. +defaults { + user_friendly_names yes + queue_without_daemon no +# find_multipaths yes +} +## +## Here is an example of how to configure some standard options. +## +# +#defaults { +# udev_dir /dev +# polling_interval 10 +# selector "round-robin 0" +# path_grouping_policy multibus +# getuid_callout "/lib/udev/scsi_id --whitelisted --device=/dev/%n" +# prio alua +# path_checker readsector0 +# rr_min_io 100 +# max_fds 8192 +# rr_weight priorities +# failback immediate +# no_path_retry fail +# user_friendly_names yes +#} +## +## The wwid line in the following blacklist section is shown as an example +## of how to blacklist devices by wwid. The 2 devnode lines are the +## compiled in default blacklist. If you want to blacklist entire types +## of devices, such as all scsi devices, you should use a devnode line. +## However, if you want to blacklist specific devices, you should use +## a wwid line. Since there is no guarantee that a specific device will +## not change names on reboot (from /dev/sda to /dev/sdb for example) +## devnode lines are not recommended for blacklisting specific devices. +## +#blacklist { +# wwid 26353900f02796769 +# devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*" +# devnode "^hd[a-z]" +#} +#multipaths { +# multipath { +# wwid 3600508b4000156d700012000000b0000 +# alias yellow +# path_grouping_policy multibus +# path_checker readsector0 +# path_selector "round-robin 0" +# failback manual +# rr_weight priorities +# no_path_retry 5 +# } +# multipath { +# wwid 1DEC_____321816758474 +# alias red +# } +#} +#devices { +# device { +# vendor "COMPAQ " +# product "HSV110 (C)COMPAQ" +# path_grouping_policy multibus +# getuid_callout "/lib/udev/scsi_id --whitelisted --device=/dev/%n" +# path_checker readsector0 +# path_selector "round-robin 0" +# hardware_handler "0" +# failback 15 +# rr_weight priorities +# no_path_retry queue +# } +# device { +# vendor "COMPAQ " +# product "MSA1000 " +# path_grouping_policy multibus +# } +#} +devices { + device { + vendor "FUJITSU" + product "ETERNUS_DXL" + prio alua + path_grouping_policy group_by_prio + path_selector "round-robin 0" + failback immediate + no_path_retry 0 (*1) + path_checker tur + dev_loss_tmo 2097151 (*2) + fast_io_fail_tmo 1 + } + device { + vendor "FUJITSU" + product "ETERNUS_DXM" + prio alua + path_grouping_policy group_by_prio + path_selector "round-robin 0" + failback immediate + no_path_retry 0 (*1) + path_checker tur + dev_loss_tmo 2097151 (*2) + fast_io_fail_tmo 1 + } + device { + vendor "FUJITSU" + product "ETERNUS_DX400" + prio alua + path_grouping_policy group_by_prio + path_selector "round-robin 0" + failback immediate + no_path_retry 0 (*1) + path_checker tur + dev_loss_tmo 2097151 (*2) + fast_io_fail_tmo 1 + } + device { + vendor "FUJITSU" + product "ETERNUS_DX8000" + prio alua + path_grouping_policy group_by_prio + path_selector "round-robin 0" + failback immediate + no_path_retry 0 (*1) + path_checker tur + dev_loss_tmo 2097151 (*2) + fast_io_fail_tmo 1 + } + device { + vendor "ZTE" + product "ZXUSP" + path_grouping_policy group_by_prio + path_checker tur + prio alua + path_selector "round-robin 0" + hardware_handler "1 alua" + failback immediate + rr_weight priorities + no_path_retry 0 (*1) + rr_min_io_rq 1 + flush_on_last_del yes + } +} + + +blacklist { +} diff --git a/backend/tecs/storage_auto_config/common/__init__.py b/backend/tecs/storage_auto_config/common/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/backend/tecs/storage_auto_config/common/cinder_conf.py b/backend/tecs/storage_auto_config/common/cinder_conf.py new file mode 100755 index 00000000..c0932653 --- /dev/null +++ b/backend/tecs/storage_auto_config/common/cinder_conf.py @@ -0,0 +1,281 @@ + +import uuid +from utils import * +from xml.etree.ElementTree import ElementTree, Element + + +class BaseConfig(): + _CINDER_CONF_PATH = "/etc/cinder/cinder.conf" + SET_CONFIG = \ + "openstack-config --set {config_file} {section} {key} {value}" + GET_CONFIG = \ + "openstack-config --get {config_file} {section} {key}" + instance = None + + def __init__(self): + self._BACKEND_MAPPING = { + 'KS3200_IPSAN': ZTEBackendConfig, + 'KS3200_FCSAN': ZTEBackendConfig, + 'FUJISTU_ETERNUS': FUJISTUBackendConfig, + 'LVM': None, + 'CEPH': CEPHBackendConfig, + } + self.instance_mapping = {} + + def __get_backend_instance(self, backend_type): + if not backend_type or \ + backend_type not in self._BACKEND_MAPPING.keys(): + print_or_raise("Volume driver type '%s' is not valid." % + backend_type, + ScriptInnerError) + + backend_instance = self.instance_mapping.get(backend_type, BaseConfig) + if isinstance(backend_instance, self._BACKEND_MAPPING[backend_type]): + return backend_instance + else: + self.instance_mapping.update( + {backend_type: self._BACKEND_MAPPING[backend_type]()}) + return self.instance_mapping[backend_type] + + @classmethod + def single_instance(cls): + if not BaseConfig.instance: + BaseConfig.instance = BaseConfig() + return BaseConfig.instance + + def _construct_particular_cinder_data(self, backend, backend_data): + print_or_raise("Backend _construct_particular_cinder_data method no " + "implement!", ScriptInnerError) + + def _write_xml(self, fp_xml, **backend_device_args): + self.backend_instance._write_xml(fp_xml, **backend_device_args) + + def _construct_commonality_cinder_data(self, backend, backend_data): + backend_pools, xml_path = \ + self.backend_instance._construct_particular_cinder_data( + backend, backend_data) + + backend_data['volume_backend_name'] = \ + backend_data.pop('volume_type') + + set_backend = lambda x, y: self.SET_CONFIG.format( + config_file=self._CINDER_CONF_PATH, + section=backend, + key=x, value=y) + + backend_config_list = list() + backend_config_list += map( + set_backend, backend_data.keys(), backend_data.values()) + + get_bakcends = \ + self.GET_CONFIG.format(config_file=self._CINDER_CONF_PATH, + section="DEFAULT", + key="enabled_backends") + out, err = execute(get_bakcends, check_exit_code=[0, 1]) + exist_backends = out.split("\n")[0] if out else "" + enabled_backends = \ + exist_backends if backend in exist_backends else \ + "%s" % backend if not out else "%s,%s" % \ + (exist_backends, backend) + set_bakcends = \ + self.SET_CONFIG.format(config_file=self._CINDER_CONF_PATH, + section="DEFAULT", + key="enabled_backends", + value=enabled_backends) + + # write to cinder.conf + config_set_all = set_bakcends + ";" + ";".join(backend_config_list) + execute(config_set_all) + + return backend_pools, xml_path + + def is_needed_generate_backend_xml(self, backend_driver): + if backend_driver in ['KS3200_IPSAN', 'KS3200_FCSAN', + 'FUJISTU_ETERNUS']: + return True + else: + return False + + def config_backend(self, backend_cinder_args, **backend_device_args): + """ + Config outer interface,for public flow. + :param backend_device_args: device config + :param backend_cinder_args: backend config + :return: + """ + backend_data = backend_cinder_args[1] + backend_driver = backend_data.get('volume_driver', None) + self.backend_instance = self.__get_backend_instance(backend_driver) + + # config cinder.conf + backend_pools, xml_path = \ + self._construct_commonality_cinder_data(backend_cinder_args[0], + backend_data) + + # config xml + if self.is_needed_generate_backend_xml(backend_driver): + backend_device_args.update({'pools': backend_pools}) + with open(xml_path, "w+") as fp_xml: + self._write_xml(fp_xml, **backend_device_args) + execute("chown cinder:cinder %s" % xml_path) + + def update_xml_node(self, element_obj, node_path, content): + node_list = element_obj.findall(node_path) + if node_list: + node_list[0].text = content + else: + new_element = Element(node_path.split('/')[-1]) + new_element.text = content + parent_node = element_obj.findall(node_path.split('/')[0]) + parent_node[0].append(new_element) + + +class ZTEBackendConfig(BaseConfig): + _DEFAULT_USERNAME = "admin" + _DEFAULT_USERPWD = "admin" + _DEFAULT_XML_FILE_PREFIX = "cinder_zte_conf_file" + _DEFAULT_XML_TEMPLATE_PATH = "/etc/cinder/cinder_zte_conf.xml" + _ISCSI_DRIVER = 'cinder.volume.drivers.zte.zte_ks.ZteISCSIDriver' + _FC_DRIVER = 'cinder.volume.drivers.zte.zte_ks.ZteFCDriver' + + def _construct_particular_cinder_data(self, backend, backend_data): + # construct commonality data in cinder.conf + backend_data['volume_driver'] = \ + self._ISCSI_DRIVER \ + if "KS3200_IPSAN" == backend_data['volume_driver'] \ + else self._FC_DRIVER + backend_data[self._DEFAULT_XML_FILE_PREFIX] = \ + backend_data.pop('backend_config_file') \ + if backend_data.get('backend_config_file', None) \ + else "/etc/cinder/%s_%s.xml" % (self._DEFAULT_XML_FILE_PREFIX, + backend) + backend_data['use_multipath_for_image_xfer'] = \ + backend_data.get('multipath_tool', True) + backend_pools = backend_data.pop('pools') + + return backend_pools, backend_data[self._DEFAULT_XML_FILE_PREFIX] + + def _write_xml(self, fp, **backend_device_args): + if not os.path.exists(self._DEFAULT_XML_TEMPLATE_PATH): + print_or_raise("XML file template %s not exists,can't load defult " + "params." % self._DEFAULT_XML_TEMPLATE_PATH, + ScriptInnerError) + + mgnt_ips = backend_device_args['management_ips'] + user_name = backend_device_args['user_name'] + user_pwd = backend_device_args['user_pwd'] + cinder_host_ip = backend_device_args['cinder_host_ip'] + pools = backend_device_args['pools'] + xml_fp = fp + + tree = ElementTree() + elements = tree.parse(self._DEFAULT_XML_TEMPLATE_PATH) + for index in range(len(mgnt_ips)): + self.update_xml_node( + elements, + "Storage/ControllerIP" + str(index), mgnt_ips[index]) + + if cinder_host_ip: + self.update_xml_node(elements, "Storage/LocalIP", cinder_host_ip) + self.update_xml_node(elements, "Storage/UserName", user_name) + self.update_xml_node(elements, "Storage/UserPassword", user_pwd) + + # del all StoragePool and StorageVd node + pool_parent_node = elements.findall("LUN") + pool_child_nodes = elements.findall("LUN/StoragePool") + vd_child_nodes = elements.findall("LUN/StorageVd") + map(pool_parent_node[0].remove, pool_child_nodes + vd_child_nodes) + + # add StoragePool node base on pools + for pool in pools: + element = Element("StoragePool") + element.text = pool + element.tail = "\n\t" + pool_parent_node[0].insert(0, element) + + tree.write(xml_fp, encoding="utf-8", xml_declaration=True) + + +class FUJISTUBackendConfig(BaseConfig): + _DEFAULT_USERNAME = "root" + _DEFAULT_USERPWD = "root" + _DEFAULT_XML_FILE_PREFIX = "cinder_eternus_config_file" + _DEFAULT_XML_TEMPLATE_PATH = \ + "/etc/cinder/cinder_fujitsu_eternus_dx.xml" + FUJISTU_DRIVER = \ + "cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver" + + def _construct_particular_cinder_data(self, backend, backend_data): + # construct commonality data in cinder.conf + backend_data['volume_driver'] = self.FUJISTU_DRIVER + backend_data[self._DEFAULT_XML_FILE_PREFIX] = \ + backend_data.pop('backend_config_file') \ + if backend_data.get('backend_config_file', None) \ + else "/etc/cinder/%s_%s.xml" % (self._DEFAULT_XML_FILE_PREFIX, + backend) + backend_data['use_multipath_for_image_xfer'] = \ + backend_data.get('multipath_tool', True) + backend_data['use_fujitsu_image_volume'] = \ + backend_data.get('use_fujitsu_image_volume', True) + backend_data['fujitsu_min_image_volume_per_storage'] = \ + backend_data.get('fujitsu_min_image_volume_per_storage', 1) + backend_data['fujitsu_image_management_dir'] = \ + backend_data.get('fujitsu_image_management_dir', + '/var/lib/glance/conversion') + backend_pools = backend_data.pop('pools') + + return backend_pools, backend_data[self._DEFAULT_XML_FILE_PREFIX] + + def _write_xml(self, fp, **backend_device_args): + if not os.path.exists(self._DEFAULT_XML_TEMPLATE_PATH): + print_or_raise("XML file template %s not exists,can't load defult " + "params." % self._DEFAULT_XML_TEMPLATE_PATH, + ScriptInnerError) + + mgnt_ip = backend_device_args['management_ips'][0] + data_ips = backend_device_args['data_ips'] + user_name = backend_device_args['user_name'] + user_pwd = backend_device_args['user_pwd'] + pool = backend_device_args['pools'][0] + xml_fp = fp + + tree = ElementTree() + elements = tree.parse(self._DEFAULT_XML_TEMPLATE_PATH) + self.update_xml_node(elements, "EternusIP", mgnt_ip) + self.update_xml_node(elements, "EternusUser", user_name) + self.update_xml_node(elements, "EternusPassword", user_pwd) + self.update_xml_node(elements, "EternusPool", pool) + self.update_xml_node(elements, "EternusSnapPool", pool) + + root = tree.getroot() + map(root.remove, root.findall("EternusISCSIIP")) + for ip in data_ips: + element = Element("EternusISCSIIP") + element.text = ip + element.tail = "\n" + root.insert(4, element) + # root.append(element) + + tree.write(xml_fp, encoding="utf-8", xml_declaration=True) + + +class CEPHBackendConfig(BaseConfig): + NOVA_CONF_FILE = "/etc/nova/nova.conf" + GLANCE_API_CONF_FILE = "/etc/glance/glance-api.conf" + _RBD_STORE_USER = "cinder" + _RBD_POOL = "volumes" + _RBD_MAX_CLONE_DEPTH = 5 + _RBD_FLATTEN_VOLUME_FROM_SNAPSHOT = "False" + _RBD_CEPH_CONF = "/etc/ceph/ceph.conf" + _RBD_DRIVER = 'cinder.volume.drivers.rbd.RBDDriver' + + def _construct_particular_cinder_data(self, backend, backend_data): + backend_data['volume_driver'] = self._RBD_DRIVER + backend_data['rbd_pool'] = self._RBD_POOL + backend_data['rbd_max_clone_depth'] = self._RBD_MAX_CLONE_DEPTH + backend_data['rbd_flatten_volume_from_snapshot'] = \ + self._RBD_FLATTEN_VOLUME_FROM_SNAPSHOT + backend_data['rbd_ceph_conf'] = self._RBD_CEPH_CONF + uuid_instance = uuid.uuid3(uuid.NAMESPACE_DNS, "zte.com.cn") + backend_data['rbd_secret_uuid'] = uuid_instance.urn.split(":")[2] + return [], [] diff --git a/backend/tecs/storage_auto_config/common/share_disk.py b/backend/tecs/storage_auto_config/common/share_disk.py new file mode 100755 index 00000000..0b682c52 --- /dev/null +++ b/backend/tecs/storage_auto_config/common/share_disk.py @@ -0,0 +1,312 @@ + +from utils import * + + +class BaseShareDisk(): + instance = None + + def __init__(self): + self._PROTOCOL_MAPPING = { + 'ISCSI': ISCSIShareDisk, + 'CEPH': CEPHShareDisk + } + self.instance_mapping = {} + + def __get_protocol_instance(self, protocol_type): + if not protocol_type or \ + protocol_type not in self._PROTOCOL_MAPPING.keys(): + print_or_raise("Protocol type '%s' is not valid." % protocol_type, + ScriptInnerError) + + protocol_instance = self.instance_mapping.get(protocol_type, + BaseShareDisk) + if isinstance(protocol_instance, + self._PROTOCOL_MAPPING[protocol_type]): + return protocol_instance + else: + self.instance_mapping.update( + {protocol_type: self._PROTOCOL_MAPPING[protocol_type]()}) + return self.instance_mapping[protocol_type] + + @classmethod + def single_instance(cls): + if not BaseShareDisk.instance: + BaseShareDisk.instance = BaseShareDisk() + return BaseShareDisk.instance + + def deploy_share_disk(self, item, host_name): + protocol_instance = self.__get_protocol_instance( + item.get('protocol_type', 'ISCSI')) + protocol_instance.deploy_share_disk(item, host_name) + + +class ISCSIShareDisk(BaseShareDisk): + _LV_DEFAULT_NAME = { + 'glance': ("VolGroupHAImage", "lvHAImage", 254), + 'db': ("VolGroupHAMysql", "lvHAMysql", 253), + 'db_backup': ("VolGroupHABakMysql", "lvHABakMysql", 252), + 'mongodb': ("VolGroupHAMongodb", "lvHAMongodb", 251), + } + + def _get_iscsi_configs(self, record_list): + raid_config = {} + for record in record_list: + discovery_media_ip = record.split(" ")[0].split(":")[0] + discovery_media_iqn = record.split(" ")[1] + try: + execute("ping -c 1 -W 2 %s" % discovery_media_ip) + except ProcessExecutionError: + execute("iscsiadm -m node -T %s -p %s -o delete" % + (discovery_media_iqn, discovery_media_ip), + check_exit_code=[0, 1]) + continue + + if discovery_media_ip in raid_config.get(discovery_media_iqn, []): + execute("iscsiadm -m node -T %s -p %s -R" % + (discovery_media_iqn, discovery_media_ip), + check_exit_code=[0, 1]) + + elif discovery_media_iqn in raid_config.keys(): + raid_config[discovery_media_iqn] += [discovery_media_ip] + else: + raid_config[discovery_media_iqn] = [discovery_media_ip] + + print_or_raise("Raid config is:\n%s" % str(raid_config)) + return raid_config + + def _lv_reentrant_check( + self, vg_name, lv_name, iscsi_session_setup, lun=None, + data_ips=[]): + """ + Check if share disk operation is reentrant. + :return:True,continue follow action; False, do nothing. + """ + lv_device_path = "/dev/%s/%s" % (vg_name, lv_name) + if not os.path.exists(lv_device_path): + return True + + if not iscsi_session_setup: + exist_volumes = \ + [sd for sd in self._ls_sd_path() if "-lun-" + lun in sd + for ip in data_ips if "ip-" + ip in sd] + if not exist_volumes: + print_or_raise("Lvm %s is exist, but no sd device match!" % + lv_device_path, ScriptInnerError) + + return False + + def _lv_rollback(self, lv, vg, block_device): + try: + execute("lvremove -y -ff /dev/%s/%s" % (lv, vg), + check_exit_code=[0, 1, 5]) + execute("vgremove -y -ff %s" % vg, check_exit_code=[0, 1, 5]) + execute("pvremove -y -ff %s" % block_device, + check_exit_code=[0, 1, 5]) + except Exception as e: + print_or_raise("Rollback lvm resource failed!", e) + + def _establish_iscsi_session(self, available_data_ips): + # discovery + discovery_ret = "" + for ip in available_data_ips: + out, err = execute( + "iscsiadm -m discovery -t st -p %s:3260" % ip) + discovery_ret += out + # if('0' != err) or ('0\n' != err ) or err: + # print_or_raise("Discovery ip:%s failed,continue.." % ip) + + if not discovery_ret: + print_or_raise("No discovery record!", ScriptInnerError) + + record_list = list(set(discovery_ret.split('\n')[:-1])) + print_or_raise( + "Discovery successful! Record:\n%s" % "\n".join(record_list)) + + # get iqn and ip like {iqn1: ip1, iqn2:ip2} + raid_config = self._get_iscsi_configs(record_list) + + # auto config & login + login_cmd = \ + lambda x, y: "iscsiadm -m node -T %s -p %s:3260 -l" % (x, y) + auto_cmd = \ + lambda x, y: "iscsiadm -m node -T %s -p %s -o update -n " \ + "node.startup -v automatic" % (x, y) + login = [] + auto_config = [] + for index in range(len(raid_config.keys())): + k = raid_config.keys()[index] + v = raid_config[k] + login += map(login_cmd, [k] * len(v), v) + auto_config += map(auto_cmd, [k] * len(v), v) + execute(";".join(login)) + execute(";".join(auto_config)) + print_or_raise("Login successful!") + return raid_config + + def _modify_host_iqn(self, host_name): + # modify host IQN + host_iqn, err = execute("cat /etc/iscsi/initiatorname.iscsi") + md5_str, err = execute("echo -n %s | openssl md5" % host_name) + host_iqn = host_iqn.split("=")[1].strip() + wish_iqn = "iqn.opencos.rh:" + md5_str.split("=")[1].strip() + if wish_iqn != host_iqn: + print_or_raise( + "The host iqn is:%s, but wish iqn is %s, it will be modified." + % (host_iqn, wish_iqn)) + with open("/etc/iscsi/initiatorname.iscsi", "w") as fp: + fp.write("InitiatorName=" + wish_iqn + "\n") + execute("systemctl restart iscsid.service") + + def _ls_sd_path(self): + out, err = execute("ls /dev/disk/by-path") + return out.split("\n")[:-1] + + def _find_multipath_by_sd(self, iqns, lun_id): + sd_path = [] + attemps = 0 + while not sd_path: + sd_path = \ + [sd for sd in self._ls_sd_path() + if filter(lambda complex_sd_path: complex_sd_path in sd, + [iqn + "-lun-" + str(lun_id) for iqn in iqns])] + attemps += 1 + + if attemps == 5: + execute("iscsiadm -m node -R") + elif attemps > 10: + print_or_raise( + "After login successful," + "there is no local sd device match with block device.", + ScriptInnerError) + + time.sleep(2) + + sd_path = "/dev/disk/by-path/" + sd_path[0] + sd_real_path = os.path.realpath(sd_path) + + attemps = 0 + multipath_path = "" + while not os.path.exists(multipath_path): + multipath_device, err = execute("multipath -l %s" % sd_real_path) + # if not multipath_device or ('0' != err) or ('0\n' != err) or err: + # continue + + multipath_path = "/dev/mapper/" + \ + multipath_device.split("\n")[0].split(" ")[0] + attemps += 1 + + if attemps > 5: + print_or_raise( + "No multipath match with local sd device:%s." % + sd_real_path, + ScriptInnerError) + time.sleep(2) + + return multipath_path + + def _create_lv_by_multipath_device( + self, multipath, vg_name, lv_name, size, fs_type): + try: + # create lvm base on block device + execute("pvcreate -y -ff %s" % multipath, + check_exit_code=[0, 1, 5]) + execute("vgcreate -y -ff %s %s" % (vg_name, multipath), + check_exit_code=[0, 1, 5]) + + if size == -1: + lvcreate = "lvcreate -W y -l 100%%FREE -n %s %s" % \ + (lv_name, vg_name) + else: + lvcreate = "lvcreate -W y -L %sG -n %s %s" % \ + (round(size * 0.95, 2), lv_name, vg_name) + execute(lvcreate, check_exit_code=[0, 1, 5]) + execute("pvscan --cache --activate ay") + + # make filesystem + execute("mkfs.%s /dev/%s/%s" % (fs_type, vg_name, lv_name)) + except Exception as e: + self._lv_rollback(lv_name, vg_name, multipath) + print_or_raise("LVM create failed, resource has been rollbacked.", + e) + + def deploy_share_disk(self, item, host_name): + config_computer() + self._modify_host_iqn(host_name) + service = item['service'] + if service not in ['glance', 'db', 'db_backup', 'mongodb']: + print_or_raise("Service name '%s' is not valid." % service) + + # check ip + available_data_ips, invalid_ips = \ + get_available_data_ip(item['data_ips']) + if not available_data_ips: + print_or_raise("No valid data ips,please check.", ScriptInnerError) + + raid_config = self._establish_iscsi_session(available_data_ips) + + lv_config = item.get('lvm_config', None) + vg_name = lv_config.get('vg_name', self._LV_DEFAULT_NAME[service][0]) + lv_name = lv_config.get('lv_name', self._LV_DEFAULT_NAME[service][1]) + if not self._lv_reentrant_check(vg_name, lv_name, True): + return + + multipath = self._find_multipath_by_sd( + raid_config.keys(), + item.get('lun', self._LV_DEFAULT_NAME[service][2])) + + self._create_lv_by_multipath_device(multipath, + vg_name, + lv_name, + lv_config.get('size', -1), + lv_config.get('fs_type', 'ext4')) + + +class CEPHShareDisk(BaseShareDisk): + def __init__(self): + self.monitor_ip = '' + self.monitor_passwd = '' + + def deploy_share_disk(self, item, host_name): + self.monitor_ip = item.get('monitor_ip', '') + self.monitor_passwd = item.get('monitor_passwd', '') + rbd_pool = item['rbd_config']['rbd_pool'] + rbd_img = item['rbd_config']['rbd_volume'] + img_size = int(item['rbd_config']['size'])*1024 + fs_type = item['rbd_config'].get('fs_type', 'ext4') + cmd_create = 'sshpass -p %s ssh %s rbd create -p %s --size %s %s ' % \ + (self.monitor_passwd, + self.monitor_ip, + rbd_pool, + img_size, + rbd_img) + cmd_query = 'sshpass -p %s ssh %s rbd ls -l %s' % ( + self.monitor_passwd, self.monitor_ip, rbd_pool) + image_in_monitor = [] + print_or_raise("Create image %s in pool %s at monitor %s." % + (rbd_img, rbd_pool, self.monitor_ip)) + try: + out, err = execute(cmd_query) + if out: + for line in out.splitlines(): + image_in_monitor.append(line.split()[0]) + if rbd_img not in image_in_monitor: + execute(cmd_create) + except Exception as e: + print_or_raise("Query pool %s in monitor error or create image %s " + "in pool %s." % (rbd_pool, rbd_img, rbd_pool), e) + execute("systemctl stop rbdmap") + rbd_map = '%s/%s id=admin,' \ + 'keyring=/etc/ceph/ceph.client.admin.keyring' % (rbd_pool, + rbd_img) + rbd_map_need_to_write = True + print_or_raise("Write rbdmap.") + with open("/etc/ceph/rbdmap", "a+") as fp: + for line in fp: + if line == rbd_map + "\n": + rbd_map_need_to_write = False + if rbd_map_need_to_write is True: + fp.write(rbd_map + "\n") + execute("chmod 777 /etc/ceph/rbdmap") + execute("systemctl enable rbdmap") + execute("systemctl start rbdmap") + execute("mkfs.%s /dev/rbd/%s/%s" % (fs_type, rbd_pool, rbd_img)) diff --git a/backend/tecs/storage_auto_config/common/utils.py b/backend/tecs/storage_auto_config/common/utils.py new file mode 100755 index 00000000..db810791 --- /dev/null +++ b/backend/tecs/storage_auto_config/common/utils.py @@ -0,0 +1,231 @@ +import subprocess +import random +import shlex +import signal +import time +import os +import logging + + +LOG = logging.getLogger() +formatter = "%(asctime)s %(name)s %(levelname)s %(message)s" +logging.basicConfig(format=formatter, + filename="storage_auto_config.log", + filemode="a", + level=logging.DEBUG) +stream_log = logging.StreamHandler() +stream_log.setLevel(logging.DEBUG) +stream_log.setFormatter(logging.Formatter(formatter)) +LOG.addHandler(stream_log) + + +def print_or_raise(msg, exc=None): + if not exc: + LOG.debug(msg) + else: + if isinstance(exc, Exception): + LOG.error(msg) + raise exc + elif issubclass(exc, Exception): + raise exc(msg) + + +class ScriptInnerError(Exception): + def __init__(self, message=None): + super(ScriptInnerError, self).__init__(message) + + +class UnknownArgumentError(Exception): + def __init__(self, message=None): + super(UnknownArgumentError, self).__init__(message) + + +class NoRootWrapSpecified(Exception): + def __init__(self, message=None): + super(NoRootWrapSpecified, self).__init__(message) + + +class ProcessExecutionError(Exception): + def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, + description=None): + self.exit_code = exit_code + self.stderr = stderr + self.stdout = stdout + self.cmd = cmd + self.description = description + + if description is None: + description = "Unexpected error while running command." + if exit_code is None: + exit_code = '-' + message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" + % (description, cmd, exit_code, stdout, stderr)) + super(ProcessExecutionError, self).__init__(message) + + +def execute(cmd, **kwargs): + """Helper method to shell out and execute a command through subprocess. + + Allows optional retry.s + + :param cmd: Passed to subprocess.Popen. + :type cmd: string + TODO:param process_input: Send to opened process. + :type proces_input: string + TODO:param check_exit_code: Single bool, int, or list of allowed exit + codes. Defaults to [0]. Raise + :class:`ProcessExecutionError` unless + program exits with one of these code. + :type check_exit_code: boolean, int, or [int] + :param delay_on_retry: True | False. Defaults to True. If set to True, + wait a short amount of time before retrying. + :type delay_on_retry: boolean + :param attempts: How many times to retry cmd. + :type attempts: int + TODO:param run_as_root: True | False. Defaults to False. If set to True, + the command is prefixed by the command specified + in the root_helper kwarg. + :type run_as_root: boolean + :param root_helper: command to prefix to commands called with + run_as_root=True + :type root_helper: string + TODO:param shell: whether or not there should be a shell used to + execute this command. Defaults to false. + :type shell: boolean + :param loglevel: log level for execute commands. + :type loglevel: int. (Should be logging.DEBUG or logging.INFO) + :returns: (stdout, stderr) from process execution + :raises: :class:`UnknownArgumentError` on + receiving unknown arguments + :raises: :class:`ProcessExecutionError` + """ + def _subprocess_setup(): + # Python installs a SIGPIPE handler by default. + # This is usually not what non-Python subprocesses expect. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + + # stdin + process_input = kwargs.pop('process_input', None) + check_exit_code = kwargs.pop('check_exit_code', [0]) + ignore_exit_code = False + delay_on_retry = kwargs.pop('delay_on_retry', True) + attempts = kwargs.pop('attempts', 1) + run_as_root = kwargs.pop('run_as_root', False) + root_helper = kwargs.pop('root_helper', '') + shell = kwargs.pop('shell', True) + silent = kwargs.pop('silent', False) + # loglevel = kwargs.pop('loglevel', logging.DEBUG) + + if isinstance(check_exit_code, bool): + ignore_exit_code = not check_exit_code + check_exit_code = [0] + elif isinstance(check_exit_code, int): + check_exit_code = [check_exit_code] + + if kwargs: + raise UnknownArgumentError( + 'Got unknown keyword args to utils.execute: %r' % kwargs) + + if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: + if not root_helper: + raise NoRootWrapSpecified( + message=('Command requested root, but did not specify a root ' + 'helper.')) + cmd = shlex.split(root_helper) + list(cmd) + + while attempts > 0: + attempts -= 1 + try: + if not silent: + print_or_raise('Running cmd (subprocess): %s' % cmd) + + # windows + if os.name == 'nt': + preexec_fn = None + close_fds = False + else: + preexec_fn = _subprocess_setup + close_fds = True + + obj = subprocess.Popen(cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=close_fds, + preexec_fn=preexec_fn, + shell=shell) + + if process_input is not None: + result = obj.communicate(process_input) + else: + result = obj.communicate() + obj.stdin.close() + _returncode = obj.returncode + if not silent: + print_or_raise('Result was %s' % _returncode) + if not ignore_exit_code and _returncode not in check_exit_code: + (stdout, stderr) = result + raise ProcessExecutionError(exit_code=_returncode, + stdout=stdout, + stderr=stderr, + cmd=cmd) + # cmd=sanitized_cmd) + return result + except ProcessExecutionError: + if not attempts: + raise + else: + if not silent: + print_or_raise('%r failed. Retrying.' % cmd) + if delay_on_retry: + time.sleep(random.randint(20, 200) / 100.0) + finally: + time.sleep(0) + + +def get_available_data_ip(media_ips): + unavailable_ip = [] + for media_ip in media_ips: + try: + execute("ping -c 1 -W 2 %s" % media_ip) + except ProcessExecutionError: + unavailable_ip.append(media_ip) + continue + return list(set(media_ips) - set(unavailable_ip)), unavailable_ip + + +def clear_host_iscsi_resource(): + out, err = execute("iscsiadm -m node", check_exit_code=[0, 21]) + if not out: + return + + sd_ips_list = map(lambda x: x.split(":3260")[0], out.split("\n")[:-1]) + if not sd_ips_list: + return + + valid_ips, invalid_ips = get_available_data_ip(sd_ips_list) + clear_resource = "" + for ip in invalid_ips: + logout_session = "iscsiadm -m node -p %s -u;" % ip + del_node = "iscsiadm -m node -p %s -o delete;" % ip + # manual_startup = "iscsiadm -m node -p %s -o update -n node.startup " + # "-v manual;" % ip + clear_resource += (logout_session + del_node) + execute(clear_resource, check_exit_code=[0, 21], silent=True) + # _execute("multipath -F") + + +def config_computer(): + # remove exist iscsi resource + clear_host_iscsi_resource() + config_multipath() + + +def config_multipath(): + if os.path.exists("/etc/multipath.conf"): + execute("echo y|mv /etc/multipath.conf /etc/multipath.conf.bak", + check_exit_code=[0, 1]) + + execute("cp -p base/multipath.conf /etc/") + execute("systemctl enable multipathd.service;" + "systemctl restart multipathd.service") diff --git a/backend/tecs/storage_auto_config/storage_auto_config.py b/backend/tecs/storage_auto_config/storage_auto_config.py new file mode 100755 index 00000000..afe45281 --- /dev/null +++ b/backend/tecs/storage_auto_config/storage_auto_config.py @@ -0,0 +1,168 @@ +############################################################################### +# Author: CG +# Description: +# 1.The script should be copied to the host, before running. +# 2.The script is not thread safe. +# 3.Example for script call: +# [config share disk]: +# python storage_auto_config share_disk , +# we use host_pxe_mac to generate host IQN by md5 and write it to +# '/etc/iscsi/initiatorname.iscsi' +# [config cinder]: python storage_auto_config cinder_conf 10.43.177.129, +# the second parameter for cinder_config is cinder . +# If the backend is CEPH,you should call the following command: +# python storage_auto_config glance_rbd_conf at glance node & +# python storage_auto_config nova_rbd_conf at nova node. +# [config multipath]:python storage_auto_config check_multipath. +# 4.Before run script,the cinder.json and control.json file +# must be must be config. +############################################################################### +import sys +import uuid +import traceback +from common.utils import * +from common.cinder_conf import BaseConfig, CEPHBackendConfig +from common.share_disk import BaseShareDisk + +try: + import simplejson as json +except ImportError: + import json + + +def _set_config_file(file, section, key, value): + set_config = BaseConfig.SET_CONFIG.format( + config_file=file, + section=section, + key=key, + value=value) + execute(set_config) + + +def config_share_disk(config, host_name): + # deploy share_disk + for item in config: + BaseShareDisk.single_instance().deploy_share_disk(item, host_name) + + +def config_cinder(config, cinder_host_ip=""): + # config xml and cinder.conf + for config in config['disk_array']: + # load disk array global config + backends = config['backend'] + for item in backends.items(): + BaseConfig.single_instance().config_backend( + item, + management_ips=config.get('management_ips', []), + data_ips=config.get('data_ips', []), + user_name=config.get('user_name', []), + user_pwd=config.get('user_pwd', []), + cinder_host_ip=cinder_host_ip) + + # config multipath + config_computer() + + # enable config + execute("systemctl restart openstack-cinder-volume.service") + + +def config_nova_with_rbd(config): + # config xml and cinder.conf + for config in config['disk_array']: + # load disk array global config + backends = config['backend'] + for key, value in backends.items(): + if value.get('volume_driver') == 'CEPH': + uuid_instance = uuid.uuid3(uuid.NAMESPACE_DNS, "zte.com.cn") + uuid_str = uuid_instance.urn.split(":")[2] + _set_config_file(CEPHBackendConfig.NOVA_CONF_FILE, + 'libvirt', + 'images_type', + 'rbd') + _set_config_file(CEPHBackendConfig.NOVA_CONF_FILE, + 'libvirt', + 'rbd_secret_uuid', + uuid_str) + return + + # enable config + execute("systemctl restart openstack-nova-compute.service") + + +def config_glance_with_rbd(config): + # config xml and cinder.conf + for config in config['disk_array']: + # load disk array global config + backends = config['backend'] + for key, value in backends.items(): + if value.get('volume_driver') == 'CEPH': + _set_config_file(CEPHBackendConfig.GLANCE_API_CONF_FILE, + 'DEFAULT', + 'show_image_direct_url', + 'True') + _set_config_file(CEPHBackendConfig.GLANCE_API_CONF_FILE, + 'glance_store', + 'default_store', + 'rbd') + return + + # enable config + execute("systemctl restart openstack-glance-api.service") + + +def _launch_script(): + def subcommand_launcher(args, valid_args_len, json_path, oper_type): + if len(args) < valid_args_len: + print_or_raise("Too few parameter is given,please check.", + ScriptInnerError) + + with open(json_path, "r") as fp_json: + params = json.load(fp_json) + + print_or_raise("-----Begin config %s, params is %s.-----" % + (oper_type, params)) + return params + + oper_type = sys.argv[1] if len(sys.argv) > 1 else "" + try: + if oper_type == "share_disk": + share_disk_config = \ + subcommand_launcher(sys.argv, 3, "base/control.json", + oper_type) + config_share_disk(share_disk_config, sys.argv[2]) + elif oper_type == "cinder_conf": + cinder_backend_config = subcommand_launcher(sys.argv, 3, + "base/cinder.json", + oper_type) + config_cinder(cinder_backend_config, sys.argv[2]) + elif oper_type == "nova_rbd_conf": + nova_rbd_config = subcommand_launcher(sys.argv, 1, + "base/cinder.json", + oper_type) + config_nova_with_rbd(nova_rbd_config) + elif oper_type == "glance_rbd_conf": + glance_rbd_config = subcommand_launcher(sys.argv, 1, + "base/cinder.json", + oper_type) + config_glance_with_rbd(glance_rbd_config) + elif oper_type == "check_multipath": + print_or_raise("-----Begin config %s.-----") + config_computer() + elif oper_type == "debug": + pass + else: + print_or_raise("Script operation is not given,such as:share_disk," + "cinder_conf,nova_rbd_conf,glance_rbd_conf," + "check_multipath.", ScriptInnerError) + except Exception as e: + print_or_raise("----------Operation %s is Failed.----------\n" + "Exception call chain as follow,%s" % + (oper_type, traceback.format_exc())) + raise e + else: + print_or_raise("----------Operation %s is done!----------" % + oper_type) + + +if __name__ == "__main__": + _launch_script() \ No newline at end of file diff --git a/backend/tecs/tecs.conf b/backend/tecs/tecs.conf new file mode 100755 index 00000000..681012d8 --- /dev/null +++ b/backend/tecs/tecs.conf @@ -0,0 +1,1447 @@ +[general] + +# Cluster ID for daisy +CLUSTER_ID= + +# Path to a public key to install on servers. If a usable key has not +# been installed on the remote servers, the user is prompted for a +# password and this key is installed so the password will not be +# required again. +CONFIG_SSH_KEY=/root/.ssh/id_rsa.pub + +# Default password to be used ssh login operation system +CONFIG_OS_PASSWORD=ossdbg1 + +# Default password to be used everywhere (overridden by passwords set +# for individual services or users). +CONFIG_DEFAULT_PASSWORD= + +# Specify 'y' to install MariaDB. ['y', 'n'] +CONFIG_MARIADB_INSTALL=y + +# Specify 'y' to install OpenStack Image Service (glance). ['y', 'n'] +CONFIG_GLANCE_INSTALL=y + +# Specify 'y' to install OpenStack Block Storage (cinder). ['y', 'n'] +CONFIG_CINDER_INSTALL=y + +# Specify 'y' to install OpenStack Shared File System (manila). ['y', +# 'n'] +CONFIG_MANILA_INSTALL=n + +# Specify 'y' to install OpenStack Compute (nova). ['y', 'n'] +CONFIG_NOVA_INSTALL=y + +# Specify 'y' to install OpenStack Networking (neutron); otherwise, +# Compute Networking (nova) will be used. ['y', 'n'] +CONFIG_NEUTRON_INSTALL=y + +# Specify 'y' to install OpenStack Dashboard (horizon). ['y', 'n'] +CONFIG_HORIZON_INSTALL=y + +# Specify 'y' to install OpenStack Object Storage (swift). ['y', 'n'] +CONFIG_SWIFT_INSTALL=n + +# Specify 'y' to install OpenStack Metering (ceilometer). ['y', 'n'] +CONFIG_CEILOMETER_INSTALL=y + +# Specify 'y' to install OpenStack Orchestration (heat). ['y', 'n'] +CONFIG_HEAT_INSTALL=y + +# Specify 'y' to install OpenStack Data Processing (sahara). ['y', +# 'n'] +CONFIG_SAHARA_INSTALL=n + +# Specify 'y' to install OpenStack Database (trove) ['y', 'n'] +CONFIG_TROVE_INSTALL=n + +# Specify 'y' to install OpenStack Bare Metal Provisioning (ironic). +# ['y', 'n'] +CONFIG_IRONIC_INSTALL=n + +# Set to 'y' if you would like Packstack to install ha +CONFIG_HA_INSTALL=n + +# Set to 'y' if you would like Packstack to install LB +CONFIG_LB_INSTALL=n + +#IP address of the servers on which to config HA,including HA master host +CONFIG_HA_HOST= + +# IP address of the servers on which to install ha software +CONFIG_HA_HOSTS= + +#Float IP of LB, only one LB system is support now. +CONFIG_LB_HOST= + +# IP address of LB front-end servers on which to install haproxy +CONFIG_LB_FRONTEND_HOSTS= + +# IP address of LB front-end servers on which to install LB services +CONFIG_LB_BACKEND_HOSTS= + +# Specify 'y' to install the OpenStack Client packages (command-line +# tools). An admin "rc" file will also be installed. ['y', 'n'] +CONFIG_CLIENT_INSTALL=y + +# Comma-separated list of NTP servers. Leave plain if Packstack +# should not install ntpd on instances. +# Please give float ip if NTP server is HA node. +CONFIG_NTP_SERVERS= + +# Specify 'y' to install Nagios to monitor OpenStack hosts. Nagios +# provides additional tools for monitoring the OpenStack environment. +# ['y', 'n'] +CONFIG_NAGIOS_INSTALL=n + +# Comma-separated list of servers to be excluded from the +# installation. This is helpful if you are running Packstack a second +# time with the same answer file and do not want Packstack to +# overwrite these server's configurations. Leave empty if you do not +# need to exclude any servers. +EXCLUDE_SERVERS= + +# Specify 'y' if you want to run OpenStack services in debug mode; +# otherwise, specify 'n'. ['y', 'n'] +CONFIG_DEBUG_MODE=n + +# IP address of the server on which to install OpenStack services +# specific to the controller role (for example, API servers or +# dashboard). +CONFIG_CONTROLLER_HOST= + +# List of IP addresses of the servers on which to install the Compute +# service. +CONFIG_COMPUTE_HOSTS= + +# public IP to form public url of services +CONFIG_PUBLIC_IP= + +# admin IP to form admin url of services +CONFIG_ADMIN_IP= + +# internal IP to form internal url of services +CONFIG_INTERNAL_IP= + +# List of IP addresses of the servers on which to install the Api cell +# and child cell service. +CONFIG_NOVA_CELLS_HOST= +CONFIG_NOVA_CELLS_HOSTS= +CONFIG_CHILD_CELL_DICT= + +# Specify 'y' if you want to use VMware vCenter as hypervisor and +# storage; otherwise, specify 'n'. ['y', 'n'] +CONFIG_VMWARE_BACKEND=n + +# Specify 'y' if you want to use unsupported parameters. This should +# be used only if you know what you are doing. Issues caused by using +# unsupported options will not be fixed before the next major release. +# ['y', 'n'] +CONFIG_UNSUPPORTED=y + +# IP address of the VMware vCenter server. +CONFIG_VCENTER_HOST= + +# User name for VMware vCenter server authentication. +CONFIG_VCENTER_USER= + +# Password for VMware vCenter server authentication. +CONFIG_VCENTER_PASSWORD= + +# Name of the VMware vCenter cluster. +CONFIG_VCENTER_CLUSTER_NAME= + +# (Unsupported!) IP address of the server on which to install +# OpenStack services specific to storage servers such as Image or +# Block Storage services. +CONFIG_STORAGE_HOST= + +# (Unsupported!) IP address of the server on which to install +# OpenStack services specific to OpenStack Data Processing (sahara). +CONFIG_SAHARA_HOST= + +# Specify 'y' to enable the EPEL repository (Extra Packages for +# Enterprise Linux). ['y', 'n'] +CONFIG_USE_EPEL=n + +# Comma-separated list of URLs for any additional yum repositories, +# to use for installation. +#for example:http://127.0.0.1/tecs_install/ +CONFIG_REPO= + +# To subscribe each server with Red Hat Subscription Manager, include +# this with CONFIG_RH_PW. +CONFIG_RH_USER= + +# To subscribe each server to receive updates from a Satellite +# server, provide the URL of the Satellite server. You must also +# provide a user name (CONFIG_SATELLITE_USERNAME) and password +# (CONFIG_SATELLITE_PASSWORD) or an access key (CONFIG_SATELLITE_AKEY) +# for authentication. +CONFIG_SATELLITE_URL= + +# To subscribe each server with Red Hat Subscription Manager, include +# this with CONFIG_RH_USER. +CONFIG_RH_PW= + +# Specify 'y' to enable RHEL optional repositories. ['y', 'n'] +CONFIG_RH_OPTIONAL=n + +# HTTP proxy to use with Red Hat Subscription Manager. +CONFIG_RH_PROXY= + +# Port to use for Red Hat Subscription Manager's HTTP proxy. +CONFIG_RH_PROXY_PORT= + +# User name to use for Red Hat Subscription Manager's HTTP proxy. +CONFIG_RH_PROXY_USER= + +# Password to use for Red Hat Subscription Manager's HTTP proxy. +CONFIG_RH_PROXY_PW= + +# User name to authenticate with the RHN Satellite server; if you +# intend to use an access key for Satellite authentication, leave this +# blank. +CONFIG_SATELLITE_USER= + +# Password to authenticate with the RHN Satellite server; if you +# intend to use an access key for Satellite authentication, leave this +# blank. +CONFIG_SATELLITE_PW= + +# Access key for the Satellite server; if you intend to use a user +# name and password for Satellite authentication, leave this blank. +CONFIG_SATELLITE_AKEY= + +# Certificate path or URL of the certificate authority to verify that +# the connection with the Satellite server is secure. If you are not +# using Satellite in your deployment, leave this blank. +CONFIG_SATELLITE_CACERT= + +# Profile name that should be used as an identifier for the system in +# RHN Satellite (if required). +CONFIG_SATELLITE_PROFILE= + +# Comma-separated list of flags passed to the rhnreg_ks command. +# Valid flags are: novirtinfo, norhnsd, nopackages ['novirtinfo', +# 'norhnsd', 'nopackages'] +CONFIG_SATELLITE_FLAGS= + +# HTTP proxy to use when connecting to the RHN Satellite server (if +# required). +CONFIG_SATELLITE_PROXY= + +# User name to authenticate with the Satellite-server HTTP proxy. +CONFIG_SATELLITE_PROXY_USER= + +# User password to authenticate with the Satellite-server HTTP proxy. +CONFIG_SATELLITE_PROXY_PW= + +# Service to be used as the AMQP broker. Allowed values are: qpid, +# rabbitmq ['qpid', 'rabbitmq'] +CONFIG_AMQP_BACKEND=rabbitmq + +# support LB, HA or None +CONFIG_AMQP_INSTALL_MODE=None + +# IF CONFIG_AMQP_INSTALL_MODE is LB, please set cluster master Node IP +CONFIG_AMQP_CLUSTER_MASTER_NODE_IP= + +# IF CONFIG_AMQP_INSTALL_MODE is LB, please set cluster master Node Name +CONFIG_AMQP_CLUSTER_MASTER_NODE_HOSTNAME= + +# float IP address of the server to use the AMQP service. +CONFIG_AMQP_HOST= + +# IP address of the server on which to install the AMQP service. +CONFIG_AMQP_HOSTS= + +# dict of install amqp.such +# as,{'10.43.179.1':'10.43.179.2,10.43.179.3'} +CONFIG_AMQP_DICT= + +# Specify 'y' to enable SSL for the AMQP service. ['y', 'n'] +CONFIG_AMQP_ENABLE_SSL=n + +# Specify 'y' to enable authentication for the AMQP service. ['y', +# 'n'] +CONFIG_AMQP_ENABLE_AUTH=n + +# Password for the NSS certificate database of the AMQP service. +CONFIG_AMQP_NSS_CERTDB_PW=amqp + +# Port on which the AMQP service listens for SSL connections. +CONFIG_AMQP_SSL_PORT=5671 + +# File name of the CAcertificate that the AMQP service will use for +# verification. +CONFIG_AMQP_SSL_CACERT_FILE=/etc/pki/tls/certs/amqp_selfcert.pem + +# File name of the certificate that the AMQP service will use for +# verification. +CONFIG_AMQP_SSL_CERT_FILE=/etc/pki/tls/certs/amqp_selfcert.pem + +# File name of the private key that the AMQP service will use for +# verification. +CONFIG_AMQP_SSL_KEY_FILE=/etc/pki/tls/private/amqp_selfkey.pem + +# Specify 'y' to automatically generate a self-signed SSL certificate +# and key. ['y', 'n'] +CONFIG_AMQP_SSL_SELF_SIGNED=y + +# User for amqp authentication +CONFIG_AMQP_AUTH_USER=guest + +# Password for user authentication +CONFIG_AMQP_AUTH_PASSWORD=guest + +# IP address of the server on which to use MariaDB. If a MariaDB +# installation was not specified in CONFIG_MARIADB_INSTALL, specify +# the IP address of an existing database server (a MariaDB cluster can +# also be specified). +CONFIG_MARIADB_HOST= + +# The IP address of the servers on which to install Mariadb +CONFIG_MARIADB_HOSTS= + +# IP address of the servers on which to assign Mariadb.such +# as{'10.43.179.1':'10.43.179.2,10.43.179.3'} +CONFIG_MARIADB_DICT= + +# User name for the MariaDB administrative user. +CONFIG_MARIADB_USER=root + +# Password for the MariaDB administrative user. +CONFIG_MARIADB_PW=root + +# support LB, HA or None +CONFIG_KEYSTONE_INSTALL_MODE=None + +# The float IP address of the server on which to install Keystone +CONFIG_KEYSTONE_HOST= + +# The IP address of the server on which to install Keystone +CONFIG_KEYSTONE_HOSTS= + +# Password to use for the Identity service (keystone) to access the +# database. +CONFIG_KEYSTONE_DB_PW=keystone + +# Default region name to use when creating tenants in the Identity +# service. +CONFIG_KEYSTONE_REGION=RegionOne + +# Token to use for the Identity service API. +CONFIG_KEYSTONE_ADMIN_TOKEN=e93e9abf42f84be48e0996e5bd44f096 + +# Email address for the Identity service 'admin' user. Defaults to +CONFIG_KEYSTONE_ADMIN_EMAIL=root@localhost + +# User name for the Identity service 'admin' user. Defaults to +# 'admin'. +CONFIG_KEYSTONE_ADMIN_USERNAME=admin + +# Password to use for the Identity service 'admin' user. +CONFIG_KEYSTONE_ADMIN_PW=keystone + +# Password to use for the Identity service 'demo' user. +CONFIG_KEYSTONE_DEMO_PW=keystone + +# Identity service API version string. ['v2.0', 'v3'] +CONFIG_KEYSTONE_API_VERSION=v2.0 + +# Identity service token format (UUID or PKI). The recommended format +# for new deployments is UUID. ['UUID', 'PKI'] +CONFIG_KEYSTONE_TOKEN_FORMAT=UUID + +# Name of service to use to run the Identity service (keystone or +# httpd). ['keystone', 'httpd'] +CONFIG_KEYSTONE_SERVICE_NAME=keystone + +# Type of Identity service backend (sql or ldap). ['sql', 'ldap'] +CONFIG_KEYSTONE_IDENTITY_BACKEND=sql + +# URL for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_URL=ldap://127.0.0.1 + +# User DN for the Identity service LDAP backend. Used to bind to the +# LDAP server if the LDAP server does not allow anonymous +# authentication. +CONFIG_KEYSTONE_LDAP_USER_DN= + +# User DN password for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_USER_PASSWORD= + +# Base suffix for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_SUFFIX= + +# Query scope for the Identity service LDAP backend (base, one, sub). +# ['base', 'one', 'sub'] +CONFIG_KEYSTONE_LDAP_QUERY_SCOPE=one + +# Query page size for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_PAGE_SIZE=-1 + +# User subtree for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_USER_SUBTREE= + +# User query filter for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_USER_FILTER= + +# User object class for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_USER_OBJECTCLASS= + +# User ID attribute for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_USER_ID_ATTRIBUTE= + +# User name attribute for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_USER_NAME_ATTRIBUTE= + +# User email address attribute for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_USER_MAIL_ATTRIBUTE= + +# User-enabled attribute for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_USER_ENABLED_ATTRIBUTE= + +# Bit mask applied to user-enabled attribute for the Identity service +# LDAP backend. +CONFIG_KEYSTONE_LDAP_USER_ENABLED_MASK=-1 + +# Value of enabled attribute which indicates user is enabled for the +# Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_USER_ENABLED_DEFAULT=TRUE + +# Specify 'y' if users are disabled (not enabled) in the Identity +# service LDAP backend. ['n', 'y'] +CONFIG_KEYSTONE_LDAP_USER_ENABLED_INVERT=n + +# Comma-separated list of attributes stripped from LDAP user entry +# upon update. +CONFIG_KEYSTONE_LDAP_USER_ATTRIBUTE_IGNORE= + +# Identity service LDAP attribute mapped to default_project_id for +# users. +CONFIG_KEYSTONE_LDAP_USER_DEFAULT_PROJECT_ID_ATTRIBUTE= + +# Specify 'y' if you want to be able to create Identity service users +# through the Identity service interface; specify 'n' if you will +# create directly in the LDAP backend. ['n', 'y'] +CONFIG_KEYSTONE_LDAP_USER_ALLOW_CREATE=n + +# Specify 'y' if you want to be able to update Identity service users +# through the Identity service interface; specify 'n' if you will +# update directly in the LDAP backend. ['n', 'y'] +CONFIG_KEYSTONE_LDAP_USER_ALLOW_UPDATE=n + +# Specify 'y' if you want to be able to delete Identity service users +# through the Identity service interface; specify 'n' if you will +# delete directly in the LDAP backend. ['n', 'y'] +CONFIG_KEYSTONE_LDAP_USER_ALLOW_DELETE=n + +# Identity service LDAP attribute mapped to password. +CONFIG_KEYSTONE_LDAP_USER_PASS_ATTRIBUTE= + +# DN of the group entry to hold enabled LDAP users when using enabled +# emulation. +CONFIG_KEYSTONE_LDAP_USER_ENABLED_EMULATION_DN= + +# List of additional LDAP attributes for mapping additional attribute +# mappings for users. The attribute-mapping format is +# :, where ldap_attr is the attribute in the +# LDAP entry and user_attr is the Identity API attribute. +CONFIG_KEYSTONE_LDAP_USER_ADDITIONAL_ATTRIBUTE_MAPPING= + +# Group subtree for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_GROUP_SUBTREE= + +# Group query filter for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_GROUP_FILTER= + +# Group object class for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_GROUP_OBJECTCLASS= + +# Group ID attribute for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_GROUP_ID_ATTRIBUTE= + +# Group name attribute for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_GROUP_NAME_ATTRIBUTE= + +# Group member attribute for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_GROUP_MEMBER_ATTRIBUTE= + +# Group description attribute for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_GROUP_DESC_ATTRIBUTE= + +# Comma-separated list of attributes stripped from LDAP group entry +# upon update. +CONFIG_KEYSTONE_LDAP_GROUP_ATTRIBUTE_IGNORE= + +# Specify 'y' if you want to be able to create Identity service +# groups through the Identity service interface; specify 'n' if you +# will create directly in the LDAP backend. ['n', 'y'] +CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_CREATE=n + +# Specify 'y' if you want to be able to update Identity service +# groups through the Identity service interface; specify 'n' if you +# will update directly in the LDAP backend. ['n', 'y'] +CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_UPDATE=n + +# Specify 'y' if you want to be able to delete Identity service +# groups through the Identity service interface; specify 'n' if you +# will delete directly in the LDAP backend. ['n', 'y'] +CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_DELETE=n + +# List of additional LDAP attributes used for mapping additional +# attribute mappings for groups. The attribute=mapping format is +# :, where ldap_attr is the attribute in the +# LDAP entry and group_attr is the Identity API attribute. +CONFIG_KEYSTONE_LDAP_GROUP_ADDITIONAL_ATTRIBUTE_MAPPING= + +# Specify 'y' if the Identity service LDAP backend should use TLS. +# ['n', 'y'] +CONFIG_KEYSTONE_LDAP_USE_TLS=n + +# CA certificate directory for Identity service LDAP backend (if TLS +# is used). +CONFIG_KEYSTONE_LDAP_TLS_CACERTDIR= + +# CA certificate file for Identity service LDAP backend (if TLS is +# used). +CONFIG_KEYSTONE_LDAP_TLS_CACERTFILE= + +# Certificate-checking strictness level for Identity service LDAP +# backend; valid options are: never, allow, demand. ['never', 'allow', +# 'demand'] +CONFIG_KEYSTONE_LDAP_TLS_REQ_CERT=demand + +# support LB, HA or None +CONFIG_GLANCE_API_INSTALL_MODE=None + +# support LB, HA or None +CONFIG_GLANCE_REGISTRY_INSTALL_MODE=None + +# The float IP address of the server on which to install Glance +CONFIG_GLANCE_HOST= + +# The IP address of the server on which to install Glance +CONFIG_GLANCE_HOSTS= + +# Password to use for the Image service (glance) to access the +# database. +CONFIG_GLANCE_DB_PW=glance + +# Password to use for the Image service to authenticate with the +# Identity service. +CONFIG_GLANCE_KS_PW=glance + +# Storage backend for the Image service (controls how the Image +# service stores disk images). Valid options are: file or swift +# (Object Storage). The Object Storage service must be enabled to use +# it as a working backend; otherwise, Packstack falls back to 'file'. +# ['file', 'swift'] +CONFIG_GLANCE_BACKEND=file + +# support LB, HA or None +CONFIG_CINDER_API_INSTALL_MODE=None + +# The float IP address of the server on which to install Cinder Api +CONFIG_CINDER_API_HOST= + +# IP address of the server on which to install Cinder Api +CONFIG_CINDER_API_HOSTS= + +# The float IP address of the server on which to install Cinder Scheduler +CONFIG_CINDER_SCHEDULER_HOST= + +# IP address of the server on which to install Cinder Scheduler +CONFIG_CINDER_SCHEDULER_HOSTS= + +# The float IP address of the server on which to install Cinder Volume +CONFIG_CINDER_VOLUME_HOST= + +# IP address of the server on which to install Cinder Volume +CONFIG_CINDER_VOLUME_HOSTS= + +# The float IP address of the server on which to install Cinder Backup +CONFIG_CINDER_BACKUP_HOST= + +# IP address of the server on which to install Cinder Backup +CONFIG_CINDER_BACKUP_HOSTS= + +# Password to use for the Block Storage service (cinder) to access +# the database. +CONFIG_CINDER_DB_PW=cinder + +# Password to use for the Block Storage service to authenticate with +# the Identity service. +CONFIG_CINDER_KS_PW=cinder + +# Storage backend to use for the Block Storage service; valid options +# are: lvm, gluster, nfs, vmdk, netapp. ['lvm', 'gluster', 'nfs', +# 'vmdk', 'netapp'] +CONFIG_CINDER_BACKEND=lvm + +# Specify 'y' to create the Block Storage volumes group. That is, +# Packstack creates a raw disk image in /var/lib/cinder, and mounts it +# using a loopback device. This should only be used for testing on a +# proof-of-concept installation of the Block Storage service (a file- +# backed volume group is not suitable for production usage). ['y', +# 'n'] +CONFIG_CINDER_VOLUMES_CREATE=y + +# Size of Block Storage volumes group. Actual volume size will be +# extended with 3% more space for VG metadata. Remember that the size +# of the volume group will restrict the amount of disk space that you +# can expose to Compute instances, and that the specified amount must +# be available on the device used for /var/lib/cinder. +CONFIG_CINDER_VOLUMES_SIZE=20G + +# A single or comma-separated list of Red Hat Storage (gluster) +# volume shares to mount. Example: 'ip-address:/vol-name', 'domain +# :/vol-name' +CONFIG_CINDER_GLUSTER_MOUNTS= + +# A single or comma-separated list of NFS exports to mount. Example: +# 'ip-address:/export-name' +CONFIG_CINDER_NFS_MOUNTS= + +# Administrative user account name used to access the NetApp storage +# system or proxy server. +CONFIG_CINDER_NETAPP_LOGIN= + +# Password for the NetApp administrative user account specified in +# the CONFIG_CINDER_NETAPP_LOGIN parameter. +CONFIG_CINDER_NETAPP_PASSWORD= + +# Hostname (or IP address) for the NetApp storage system or proxy +# server. +CONFIG_CINDER_NETAPP_HOSTNAME= + +# The TCP port to use for communication with the storage system or +# proxy. If not specified, Data ONTAP drivers will use 80 for HTTP and +# 443 for HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS. +# Defaults to 80. +CONFIG_CINDER_NETAPP_SERVER_PORT=80 + +# Storage family type used on the NetApp storage system; valid +# options are ontap_7mode for using Data ONTAP operating in 7-Mode, +# ontap_cluster for using clustered Data ONTAP, or E-Series for NetApp +# E-Series. Defaults to ontap_cluster. ['ontap_7mode', +# 'ontap_cluster', 'eseries'] +CONFIG_CINDER_NETAPP_STORAGE_FAMILY=ontap_cluster + +# The transport protocol used when communicating with the NetApp +# storage system or proxy server. Valid values are http or https. +# Defaults to 'http'. ['http', 'https'] +CONFIG_CINDER_NETAPP_TRANSPORT_TYPE=http + +# Storage protocol to be used on the data path with the NetApp +# storage system; valid options are iscsi, fc, nfs. Defaults to nfs. +# ['iscsi', 'fc', 'nfs'] +CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL=nfs + +# Quantity to be multiplied by the requested volume size to ensure +# enough space is available on the virtual storage server (Vserver) to +# fulfill the volume creation request. Defaults to 1.0. +CONFIG_CINDER_NETAPP_SIZE_MULTIPLIER=1.0 + +# Time period (in minutes) that is allowed to elapse after the image +# is last accessed, before it is deleted from the NFS image cache. +# When a cache-cleaning cycle begins, images in the cache that have +# not been accessed in the last M minutes, where M is the value of +# this parameter, are deleted from the cache to create free space on +# the NFS share. Defaults to 720. +CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES=720 + +# If the percentage of available space for an NFS share has dropped +# below the value specified by this parameter, the NFS image cache is +# cleaned. Defaults to 20. +CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_START=20 + +# When the percentage of available space on an NFS share has reached +# the percentage specified by this parameter, the driver stops +# clearing files from the NFS image cache that have not been accessed +# in the last M minutes, where M is the value of the +# CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES parameter. Defaults to 60. +CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_STOP=60 + +# Single or comma-separated list of NetApp NFS shares for Block +# Storage to use. Format: ip-address:/export-name. Defaults to ''. +CONFIG_CINDER_NETAPP_NFS_SHARES= + +# File with the list of available NFS shares. Defaults to +# '/etc/cinder/shares.conf'. +CONFIG_CINDER_NETAPP_NFS_SHARES_CONFIG=/etc/cinder/shares.conf + +# This parameter is only utilized when the storage protocol is +# configured to use iSCSI or FC. This parameter is used to restrict +# provisioning to the specified controller volumes. Specify the value +# of this parameter to be a comma separated list of NetApp controller +# volume names to be used for provisioning. Defaults to ''. +CONFIG_CINDER_NETAPP_VOLUME_LIST= + +# The vFiler unit on which provisioning of block storage volumes will +# be done. This parameter is only used by the driver when connecting +# to an instance with a storage family of Data ONTAP operating in +# 7-Mode Only use this parameter when utilizing the MultiStore feature +# on the NetApp storage system. Defaults to ''. +CONFIG_CINDER_NETAPP_VFILER= + +# The name of the config.conf stanza for a Data ONTAP (7-mode) HA +# partner. This option is only used by the driver when connecting to +# an instance with a storage family of Data ONTAP operating in 7-Mode, +# and it is required if the storage protocol selected is FC. Defaults +# to ''. +CONFIG_CINDER_NETAPP_PARTNER_BACKEND_NAME= + +# This option specifies the virtual storage server (Vserver) name on +# the storage cluster on which provisioning of block storage volumes +# should occur. Defaults to ''. +CONFIG_CINDER_NETAPP_VSERVER= + +# Restricts provisioning to the specified controllers. Value must be +# a comma-separated list of controller hostnames or IP addresses to be +# used for provisioning. This option is only utilized when the storage +# family is configured to use E-Series. Defaults to ''. +CONFIG_CINDER_NETAPP_CONTROLLER_IPS= + +# Password for the NetApp E-Series storage array. Defaults to ''. +CONFIG_CINDER_NETAPP_SA_PASSWORD= + +# This option is used to define how the controllers in the E-Series +# storage array will work with the particular operating system on the +# hosts that are connected to it. Defaults to 'linux_dm_mp' +CONFIG_CINDER_NETAPP_ESERIES_HOST_TYPE=linux_dm_mp + +# Path to the NetApp E-Series proxy application on a proxy server. +# The value is combined with the value of the +# CONFIG_CINDER_NETAPP_TRANSPORT_TYPE, CONFIG_CINDER_NETAPP_HOSTNAME, +# and CONFIG_CINDER_NETAPP_HOSTNAME options to create the URL used by +# the driver to connect to the proxy application. Defaults to +# '/devmgr/v2'. +CONFIG_CINDER_NETAPP_WEBSERVICE_PATH=/devmgr/v2 + +# Restricts provisioning to the specified storage pools. Only dynamic +# disk pools are currently supported. The value must be a comma- +# separated list of disk pool names to be used for provisioning. +# Defaults to ''. +CONFIG_CINDER_NETAPP_STORAGE_POOLS= + +# Password to use for the OpenStack File Share service (manila) to +# access the database. +CONFIG_MANILA_DB_PW=manila + +# Password to use for the OpenStack File Share service (manila) to +# authenticate with the Identity service. +CONFIG_MANILA_KS_PW=manila + +# Backend for the OpenStack File Share service (manila); valid +# options are: generic or netapp. ['generic', 'netapp'] +CONFIG_MANILA_BACKEND=generic + +# Denotes whether the driver should handle the responsibility of +# managing share servers. This must be set to false if the driver is +# to operate without managing share servers. Defaults to 'false' +# ['true', 'false'] +CONFIG_MANILA_NETAPP_DRV_HANDLES_SHARE_SERVERS=false + +# The transport protocol used when communicating with the storage +# system or proxy server. Valid values are 'http' and 'https'. +# Defaults to 'https'. ['https', 'http'] +CONFIG_MANILA_NETAPP_TRANSPORT_TYPE=https + +# Administrative user account name used to access the NetApp storage +# system. Defaults to ''. +CONFIG_MANILA_NETAPP_LOGIN=admin + +# Password for the NetApp administrative user account specified in +# the CONFIG_MANILA_NETAPP_LOGIN parameter. Defaults to ''. +CONFIG_MANILA_NETAPP_PASSWORD= + +# Hostname (or IP address) for the NetApp storage system or proxy +# server. Defaults to ''. +CONFIG_MANILA_NETAPP_SERVER_HOSTNAME= + +# The storage family type used on the storage system; valid values +# are ontap_cluster for clustered Data ONTAP. Defaults to +# 'ontap_cluster'. ['ontap_cluster'] +CONFIG_MANILA_NETAPP_STORAGE_FAMILY=ontap_cluster + +# The TCP port to use for communication with the storage system or +# proxy server. If not specified, Data ONTAP drivers will use 80 for +# HTTP and 443 for HTTPS. Defaults to '443'. +CONFIG_MANILA_NETAPP_SERVER_PORT=443 + +# Pattern for searching available aggregates for NetApp provisioning. +# Defaults to '(.*)'. +CONFIG_MANILA_NETAPP_AGGREGATE_NAME_SEARCH_PATTERN=(.*) + +# Name of aggregate on which to create the NetApp root volume. This +# option only applies when the option +# CONFIG_MANILA_NETAPP_DRV_HANDLES_SHARE_SERVERS is set to True. +CONFIG_MANILA_NETAPP_ROOT_VOLUME_AGGREGATE= + +# NetApp root volume name. Defaults to 'root'. +CONFIG_MANILA_NETAPP_ROOT_VOLUME_NAME=root + +# This option specifies the storage virtual machine (previously +# called a Vserver) name on the storage cluster on which provisioning +# of shared file systems should occur. This option only applies when +# the option driver_handles_share_servers is set to False. Defaults to +# ''. +CONFIG_MANILA_NETAPP_VSERVER= + +# Denotes whether the driver should handle the responsibility of +# managing share servers. This must be set to false if the driver is +# to operate without managing share servers. Defaults to 'true'. +# ['true', 'false'] +CONFIG_MANILA_GENERIC_DRV_HANDLES_SHARE_SERVERS=true + +# Volume name template for Manila service. Defaults to 'manila- +# share-%s'. +CONFIG_MANILA_GENERIC_VOLUME_NAME_TEMPLATE=manila-share-%s + +# Share mount path for Manila service. Defaults to '/shares'. +CONFIG_MANILA_GENERIC_SHARE_MOUNT_PATH=/shares + +# Location of disk image for Manila service instance. Defaults to ' +CONFIG_MANILA_SERVICE_IMAGE_LOCATION=https://www.dropbox.com/s/vi5oeh10q1qkckh/ubuntu_1204_nfs_cifs.qcow2 + +# User in Manila service instance. +CONFIG_MANILA_SERVICE_INSTANCE_USER=ubuntu + +# Password to service instance user. +CONFIG_MANILA_SERVICE_INSTANCE_PASSWORD=ubuntu + +# Type of networking that the backend will use. A more detailed +# description of each option is available in the Manila docs. Defaults +# to 'neutron'. ['neutron', 'nova-network', 'standalone'] +CONFIG_MANILA_NETWORK_TYPE=neutron + +# Gateway IPv4 address that should be used. Required. Defaults to ''. +CONFIG_MANILA_NETWORK_STANDALONE_GATEWAY= + +# Network mask that will be used. Can be either decimal like '24' or +# binary like '255.255.255.0'. Required. Defaults to ''. +CONFIG_MANILA_NETWORK_STANDALONE_NETMASK= + +# Set it if network has segmentation (VLAN, VXLAN, etc). It will be +# assigned to share-network and share drivers will be able to use this +# for network interfaces within provisioned share servers. Optional. +# Example: 1001. Defaults to ''. +CONFIG_MANILA_NETWORK_STANDALONE_SEG_ID= + +# Can be IP address, range of IP addresses or list of addresses or +# ranges. Contains addresses from IP network that are allowed to be +# used. If empty, then will be assumed that all host addresses from +# network can be used. Optional. Examples: 10.0.0.10 or +# 10.0.0.10-10.0.0.20 or +# 10.0.0.10-10.0.0.20,10.0.0.30-10.0.0.40,10.0.0.50. Defaults to ''. +CONFIG_MANILA_NETWORK_STANDALONE_IP_RANGE= + +# IP version of network. Optional. Defaults to '4'. ['4', '6'] +CONFIG_MANILA_NETWORK_STANDALONE_IP_VERSION=4 + +# support LB, HA or None +CONFIG_IRONIC_API_INSTALL_MODE=None + +# The float IP address of the server on which to install the Ironic +# service +CONFIG_IRONIC_HOST= + +# The IP address of the servers on which to install the Ironic +# service +CONFIG_IRONIC_HOSTS= + +# Password to use for OpenStack Bare Metal Provisioning (ironic) to +# access the database. +CONFIG_IRONIC_DB_PW=ironic + +# Password to use for OpenStack Bare Metal Provisioning to +# authenticate with the Identity service. +CONFIG_IRONIC_KS_PW=ironic + +# support LB, HA or None +CONFIG_NOVA_API_INSTALL_MODE=None + +# The float IP address of the server on which to install the Nova API +# service +CONFIG_NOVA_API_HOST= + +# The IP address of the servers on which to install the Nova API +# service +CONFIG_NOVA_API_HOSTS= + +# The float IP address of the server on which to install the Nova +# Cert service +CONFIG_NOVA_CERT_HOST= + +# The IP address of the servers on which to install the Nova Cert +# service +CONFIG_NOVA_CERT_HOSTS= + +# support LB, HA or None +CONFIG_NOVA_VNCPROXY_INSTALL_MODE=None + +# The float IP address of the server on which to install the Nova VNC +# proxy +CONFIG_NOVA_VNCPROXY_HOST= + +# The IP address of the servers on which to install the Nova VNC +# proxy +CONFIG_NOVA_VNCPROXY_HOSTS= + +# The IP address of the server on which to install the Nova Conductor +# service +CONFIG_NOVA_CONDUCTOR_HOST= + +# The IP address of the server on which to install the Nova Conductor +# service +CONFIG_NOVA_CONDUCTOR_HOSTS= + +# The IP address of the server on which to install the Nova Scheduler +# service +CONFIG_NOVA_SCHED_HOST= + +# The IP address of the servers on which to install the Nova +# Scheduler service +CONFIG_NOVA_SCHED_HOSTS= + +# Password to use for the Compute service (nova) to access the +# database. +CONFIG_NOVA_DB_PW=nova + +# Password to use for the Compute service to authenticate with the +# Identity service. +CONFIG_NOVA_KS_PW=nova + +# Overcommitment ratio for virtual to physical CPUs. Specify 1.0 to +# disable CPU overcommitment. +CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO=1.0 + +# Overcommitment ratio for virtual to physical RAM. Specify 1.0 to +# disable RAM overcommitment. +CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO=1.5 + +# Protocol used for instance migration. Valid options are: tcp and +# ssh. Note that by default, the Compute user is created with the +# /sbin/nologin shell so that the SSH protocol will not work. To make +# the SSH protocol work, you must configure the Compute user on +# compute hosts manually. ['tcp', 'ssh'] +CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL=tcp + +# Manager that runs the Compute service. +CONFIG_NOVA_COMPUTE_MANAGER=nova.compute.manager.ComputeManager + +# Private interface for flat DHCP on the Compute servers. +CONFIG_NOVA_COMPUTE_PRIVIF=eth1 + +# The list of IP addresses of the server on which to install the Nova +# Network service +CONFIG_NOVA_NETWORK_HOSTS= + +# Compute Network Manager. ['^nova\.network\.manager\.\w+Manager$'] +CONFIG_NOVA_NETWORK_MANAGER=nova.network.manager.FlatDHCPManager + +# Public interface on the Compute network server. +CONFIG_NOVA_NETWORK_PUBIF=eth0 + +# Private interface for flat DHCP on the Compute network server. +CONFIG_NOVA_NETWORK_PRIVIF=eth1 + +# IP Range for flat DHCP. ['^[\:\.\da-fA-f]+(\/\d+){0,1}$'] +CONFIG_NOVA_NETWORK_FIXEDRANGE=192.168.32.0/22 + +# IP Range for floating IP addresses. ['^[\:\.\da- +# fA-f]+(\/\d+){0,1}$'] +CONFIG_NOVA_NETWORK_FLOATRANGE=10.3.4.0/22 + +# Specify 'y' to automatically assign a floating IP to new instances. +# ['y', 'n'] +CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP=n + +# First VLAN for private networks (Compute networking). +CONFIG_NOVA_NETWORK_VLAN_START=100 + +# Number of networks to support (Compute networking). +CONFIG_NOVA_NETWORK_NUMBER=1 + +# Number of addresses in each private subnet (Compute networking). +CONFIG_NOVA_NETWORK_SIZE=255 + +# support LB, HA or None +CONFIG_NEUTRON_SERVER_INSTALL_MODE=None + +# The float IP addresses of the server on which to install the +# Neutron server +CONFIG_NEUTRON_SERVER_HOST= + +# IP addresses of the servers on which to install the Neutron server +CONFIG_NEUTRON_SERVER_HOSTS= + +# Password to use for OpenStack Networking (neutron) to authenticate +# with the Identity service. +CONFIG_NEUTRON_KS_PW=neutron + +# The password to use for OpenStack Networking to access the +# database. +CONFIG_NEUTRON_DB_PW=neutron + +# A comma separated list of IP addresses on which to install Neutron +# L3 agent +CONFIG_NEUTRON_L3_HOSTS= + +# The name of the Open vSwitch bridge (or empty for linuxbridge) for +# the OpenStack Networking L3 agent to use for external traffic. +# Specify 'provider' if you intend to use a provider network to handle +# external traffic. +CONFIG_NEUTRON_L3_EXT_BRIDGE=br-ex + +# A comma separated list of IP addresses on which to install Neutron +# DHCP agent +CONFIG_NEUTRON_DHCP_HOSTS= + +# A comma separated list of IP addresses on which to install Neutron +# LBaaS agent +CONFIG_NEUTRON_LBAAS_HOSTS= + +# A comma separated list of IP addresses on which to install Neutron +# metadata agent +CONFIG_NEUTRON_METADATA_HOSTS= + +# Password for the OpenStack Networking metadata agent. +CONFIG_NEUTRON_METADATA_PW=neutron + +# Specify 'y' to install OpenStack Networking's Load-Balancing- +# as-a-Service (LBaaS). ['y', 'n'], discard +#CONFIG_LBAAS_INSTALL=n + +# Specify 'y' to install OpenStack Networking's L3 Metering agent +# ['y', 'n'] +CONFIG_NEUTRON_METERING_AGENT_INSTALL=n + +# Specify 'y' to configure OpenStack Networking's Firewall- +# as-a-Service (FWaaS). ['y', 'n'] +CONFIG_NEUTRON_FWAAS=n + +#The MAC address pattern to use. +CONFIG_NEUTRON_BASE_MAC=fa:16:3e:00:00:00 + +# Comma-separated list of network-type driver entry points to be +# loaded from the neutron.ml2.type_drivers namespace. ['local', +# 'flat', 'vlan', 'gre', 'vxlan'] +CONFIG_NEUTRON_ML2_TYPE_DRIVERS=vlan,vxlan + +# Comma-separated, ordered list of network types to allocate as +# tenant networks. The 'local' value is only useful for single-box +# testing and provides no connectivity between hosts. ['local', +# 'vlan', 'gre', 'vxlan'] +CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES=vlan,vxlan + +# Comma-separated ordered list of networking mechanism driver entry +# points to be loaded from the neutron.ml2.mechanism_drivers +# namespace. ['logger', 'test', 'linuxbridge', 'openvswitch', +# 'hyperv', 'ncs', 'arista', 'cisco_nexus', 'mlnx', 'l2population' +# 'sriovnicswitch', 'proxydriver'] +CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS=openvswitch,sriovnicswitch + +# Comma-separated list of physical_network names with which flat +# networks can be created. Use * to allow flat networks with arbitrary +# physical_network names. +CONFIG_NEUTRON_ML2_FLAT_NETWORKS=* + +# Comma-separated list of :: or +# specifying physical_network names usable for VLAN +# provider and tenant networks, as well as ranges of VLAN tags on each +# available for allocation to tenant networks. +CONFIG_NEUTRON_ML2_VLAN_RANGES=physnet1:2:2999,physnet2:2:2999 + +# Comma-separated list of : tuples enumerating +# ranges of GRE tunnel IDs that are available for tenant-network +# allocation. A tuple must be an array with tun_max +1 - tun_min > +# 1000000. +CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES= + +# Comma-separated list of addresses for VXLAN multicast group. If +# left empty, disables VXLAN from sending allocate broadcast traffic +# (disables multicast VXLAN mode). Should be a Multicast IP (v4 or v6) +# address. +CONFIG_NEUTRON_ML2_VXLAN_GROUP= + +# Comma-separated list of : tuples enumerating +# ranges of VXLAN VNI IDs that are available for tenant network +# allocation. Minimum value is 0 and maximum value is 16777215. +CONFIG_NEUTRON_ML2_VNI_RANGES=10:100 + +# Comma-separated list of [['192.168.0.2','192.168.0.200'],["192.168.2.2","192.168.2.200"]] +# list enumerating ranges of VXLAN local ip that available for +# neutron-openvswitch-agent or neutron-ovdk-agent. +CONFIG_NEUTRON_ML2_VTEP_IP_RANGES=[['172.43.166.2','172.43.166.20']] + +# The IP address and port of zenic northbound interface.(eg. 1.1.1.1:8181 ) +CONFIG_ZENIC_API_NODE= + +# The user name and password of zenic northbound interface. +CONFIG_ZENIC_USER_AND_PW=restconf:LkfhRDGIPyGzbWGM2uAaNQ== + +#Custom l2 json files include sriov agent and ovs agent ether configuration +#configuration just for daisy +CONFIG_NEUTRON_ML2_JSON_PATH= + +# Name of the L2 agent to be used with OpenStack Networking. +# ['linuxbridge', 'openvswitch'] +CONFIG_NEUTRON_L2_AGENT=openvswitch + +# Comma-separated list of interface mappings for the OpenStack +# Networking linuxbridge plugin. Each tuple in the list must be in the +# format :. Example: +# physnet1:eth1,physnet2:eth2,physnet3:eth3. +CONFIG_NEUTRON_LB_INTERFACE_MAPPINGS= + +# Comma-separated list of bridge mappings for the OpenStack +# Networking Open vSwitch plugin. Each tuple in the list must be in +# the format :. Example: physnet1:br- +# eth1,physnet2:br-eth2,physnet3:br-eth3 +CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS=physnet1:br-data1 + +# A comma separated list of colon-separated OVS physnet:interface +# pairs. The interface will be added to the associated physnet. +CONFIG_NEUTRON_OVS_PHYSNET_IFACES= + +# Comma-separated list of colon-separated Open vSwitch +# : pairs. The interface will be added to the +# associated bridge. +CONFIG_NEUTRON_OVS_BRIDGE_IFACES=br-data1:eth0 + +#config compute hosts for the Neutron sriov agent type +CONFIG_NEUTRON_SRIOV_AGENT_TYPE= + +#Enter a comma separated list of bridge mappings for +#the Neutron sriov plugin +CONFIG_NEUTRON_SRIOV_BRIDGE_MAPPINGS= + +#A comma separated list of colon-separated SRIOV +#physnet:interface pairs. The interface will be added to the associated physnet. +CONFIG_NEUTRON_SRIOV_PHYSNET_IFACES= + +# Interface for the Open vSwitch tunnel. Packstack overrides the IP +# address used for tunnels on this hypervisor to the IP found on the +# specified interface (for example, eth1). +CONFIG_NEUTRON_OVS_TUNNEL_IF= + +# VXLAN UDP port. +CONFIG_NEUTRON_OVS_VXLAN_UDP_PORT=4789 + +# support LB, HA or None. when select LB, TECS BIN must be run in another node +# which doesn't install any openstack service. +# HORIZON LB is not support now for some technical matter. +CONFIG_HORIZON_INSTALL_MODE=None + +# The float IP address of the server on which to install Horizon +CONFIG_HORIZON_HOST= + +# IP address of the servers on which to install Horizon +CONFIG_HORIZON_HOSTS= + +# Specify 'y' to set up Horizon communication over https. ['y', 'n'] +CONFIG_HORIZON_SSL=n + +# PEM-encoded certificate to be used for SSL connections on the https +# server (the certificate should not require a passphrase). To +# generate a certificate, leave blank. +CONFIG_SSL_CERT= + +# SSL keyfile corresponding to the certificate if one was specified. +CONFIG_SSL_KEY= + +# PEM-encoded CA certificates from which the certificate chain of the +# server certificate can be assembled. +CONFIG_SSL_CACHAIN= + +# Password to use for the Object Storage service to authenticate with +# the Identity service. +CONFIG_SWIFT_KS_PW=swift + +# Comma-separated list of devices to use as storage device for Object +# Storage. Each entry must take the format /path/to/dev (for example, +# specifying /dev/vdb installs /dev/vdb as the Object Storage storage +# device; Packstack does not create the filesystem, you must do this +# first). If left empty, Packstack creates a loopback device for test +# setup. +CONFIG_SWIFT_STORAGES= + +# Number of Object Storage storage zones; this number MUST be no +# larger than the number of configured storage devices. +CONFIG_SWIFT_STORAGE_ZONES=1 + +# Number of Object Storage storage replicas; this number MUST be no +# larger than the number of configured storage zones. +CONFIG_SWIFT_STORAGE_REPLICAS=1 + +# File system type for storage nodes. ['xfs', 'ext4'] +CONFIG_SWIFT_STORAGE_FSTYPE=ext4 + +# Custom seed number to use for swift_hash_path_suffix in +# /etc/swift/swift.conf. If you do not provide a value, a seed number +# is automatically generated. +CONFIG_SWIFT_HASH=4348fdf97ba34767 + +# Size of the Object Storage loopback file storage device. +CONFIG_SWIFT_STORAGE_SIZE=2G + +# support LB, HA or None +CONFIG_HEAT_API_INSTALL_MODE=None + +# support LB, HA or None +CONFIG_HEAT_API_CFN_INSTALL_MODE=None + +# The float IP address of the server on which to install Heat service +CONFIG_HEAT_HOST= + +# IP address of the servers on which to install Heat service +CONFIG_HEAT_HOSTS= +# The float IP address of the server on which to install heat-api service +CONFIG_HEAT_API_HOST= + +# IP address of the servers on which to install heat-api service +CONFIG_HEAT_API_HOSTS= + +# The float IP address of the server on which to install heat-api-cfn service +CONFIG_HEAT_API_CFN_HOST= + +# IP address of the servers on which to install heat-api-cfn service +CONFIG_HEAT_API_CFN_HOSTS= + +# The float IP address of the server on which to install heat-api-cloudwatch service +CONFIG_HEAT_API_CLOUDWATCH_HOST= + +# IP address of the servers on which to install heat-api-cloudwatch service +CONFIG_HEAT_API_CLOUDWATCH_HOSTS= + +# The float IP address of the server on which to install heat-engine service +CONFIG_HEAT_ENGINE_HOST= + +# IP address of the servers on which to install heat-engine service +CONFIG_HEAT_ENGINE_HOSTS= + +# Password used by Orchestration service user to authenticate against +# the database. +CONFIG_HEAT_DB_PW=heat + +# Encryption key to use for authentication in the Orchestration +# database (16, 24, or 32 chars). +CONFIG_HEAT_AUTH_ENC_KEY=d344d3167eb34b07 + +# Password to use for the Orchestration service to authenticate with +# the Identity service. +CONFIG_HEAT_KS_PW=heat + +# Specify 'y' to install the Orchestration CloudWatch API. ['y', 'n'] +CONFIG_HEAT_CLOUDWATCH_INSTALL=n + +# Specify 'y' to install the Orchestration CloudFormation API. ['y', +# 'n'] +CONFIG_HEAT_CFN_INSTALL=y + +# Name of the Identity domain for Orchestration. +CONFIG_HEAT_DOMAIN=heat + +# Name of the Identity domain administrative user for Orchestration. +CONFIG_HEAT_DOMAIN_ADMIN=heat_admin + +# Password for the Identity domain administrative user for Orchestration. +CONFIG_HEAT_DOMAIN_PASSWORD=heat + +# Specify 'y' to provision for demo usage and testing. ['y', 'n'] +CONFIG_PROVISION_DEMO=n + +# Specify 'y' to configure the OpenStack Integration Test Suite +# (tempest) for testing. The test suite requires OpenStack Networking +# to be installed. ['y', 'n'] +CONFIG_PROVISION_TEMPEST=n + +# CIDR network address for the floating IP subnet. +CONFIG_PROVISION_DEMO_FLOATRANGE=172.24.4.224/28 + +# The name to be assigned to the demo image in Glance (default +# "cirros"). +CONFIG_PROVISION_IMAGE_NAME=cirros + +# A URL or local file location for an image to download and provision +# in Glance (defaults to a URL for a recent "cirros" image). +CONFIG_PROVISION_IMAGE_URL=http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img + +# Format for the demo image (default "qcow2"). +CONFIG_PROVISION_IMAGE_FORMAT=qcow2 + +# User to use when connecting to instances booted from the demo +# image. +CONFIG_PROVISION_IMAGE_SSH_USER=cirros + +# Name of the Integration Test Suite provisioning user. If you do not +# provide a user name, Tempest is configured in a standalone mode. +CONFIG_PROVISION_TEMPEST_USER= + +# Password to use for the Integration Test Suite provisioning user. +CONFIG_PROVISION_TEMPEST_USER_PW=tempest + +# CIDR network address for the floating IP subnet. +CONFIG_PROVISION_TEMPEST_FLOATRANGE=172.24.4.224/28 + +# URI of the Integration Test Suite git repository. +CONFIG_PROVISION_TEMPEST_REPO_URI=https://github.com/openstack/tempest.git + +# Revision (branch) of the Integration Test Suite git repository. +CONFIG_PROVISION_TEMPEST_REPO_REVISION=master + +# Specify 'y' to configure the Open vSwitch external bridge for an +# all-in-one deployment (the L3 external bridge acts as the gateway +# for virtual machines). ['y', 'n'] +CONFIG_PROVISION_ALL_IN_ONE_OVS_BRIDGE=n + +# Password to use for OpenStack Data Processing (sahara) to access +# the database. +CONFIG_SAHARA_DB_PW=sahara + +# Password to use for OpenStack Data Processing to authenticate with +# the Identity service. +CONFIG_SAHARA_KS_PW=sahara + +# Secret key for signing Telemetry service (ceilometer) messages. +CONFIG_CEILOMETER_SECRET=d8c381820a444a6e + +# Password to use for Telemetry to authenticate with the Identity +# service. +CONFIG_CEILOMETER_KS_PW=ceilometer + +# Backend driver for Telemetry's group membership coordination. +# ['redis', 'none'] +CONFIG_CEILOMETER_COORDINATION_BACKEND=none + +# support LB, HA or None +CONFIG_CEILOMETER_API_INSTALL_MODE=None + +#float ip address of ceilometer-api +CONFIG_CEILOMETER_API_HOST= + +#IP address of the server on which to install ceilometer-api +CONFIG_CEILOMETER_API_HOSTS= + +#float ip address of ceilometer-collector +CONFIG_CEILOMETER_COLLECTOR_HOST= + +#IP address of the server on which to install ceilometer-collector +CONFIG_CEILOMETER_COLLECTOR_HOSTS= + +#float ip address of ceilometer-notification +CONFIG_CEILOMETER_NOTIFICATION_HOST= + +#IP address of the server on which to install ceilometer-notification +CONFIG_CEILOMETER_NOTIFICATION_HOSTS= + +#float ip address of ceilometer-central +CONFIG_CEILOMETER_CENTRAL_HOST= + +#IP address of the server on which to install ceilometer-central +CONFIG_CEILOMETER_CENTRAL_HOSTS= + +#float ip address of ceilometer-alarm-evaluator & ceilometer-alarm-notifier. +CONFIG_CEILOMETER_ALARM_HOST= + +#IP address of the server on which to install ceilometer-alarm-evaluator & ceilometer-alarm-notifier. +CONFIG_CEILOMETER_ALARM_HOSTS= + +# float IP address of the MongoDB. +CONFIG_MONGODB_HOST= + +#IP address of the server on which to install MongoDB. +CONFIG_MONGODB_HOSTS= + +# IP address of the server on which to install the Redis master +# server. +CONFIG_REDIS_MASTER_HOST= + +# Port on which the Redis server(s) listens. +CONFIG_REDIS_PORT=6379 + +# Specify 'y' to have Redis try to use HA. ['y', 'n'] +CONFIG_REDIS_HA=n + +# Hosts on which to install Redis slaves. +CONFIG_REDIS_SLAVE_HOSTS= + +# Hosts on which to install Redis sentinel servers. +CONFIG_REDIS_SENTINEL_HOSTS= + +# Host to configure as the Redis coordination sentinel. +CONFIG_REDIS_SENTINEL_CONTACT_HOST= + +# Port on which Redis sentinel servers listen. +CONFIG_REDIS_SENTINEL_PORT=26379 + +# Quorum value for Redis sentinel servers. +CONFIG_REDIS_SENTINEL_QUORUM=2 + +# Name of the master server watched by the Redis sentinel. ['[a-z]+'] +CONFIG_REDIS_MASTER_NAME=mymaster + +# Password to use for OpenStack Database-as-a-Service (trove) to +# access the database. +CONFIG_TROVE_DB_PW=trove + +# Password to use for OpenStack Database-as-a-Service to authenticate +# with the Identity service. +CONFIG_TROVE_KS_PW=trove + +# User name to use when OpenStack Database-as-a-Service connects to +# the Compute service. +CONFIG_TROVE_NOVA_USER=admin + +# Tenant to use when OpenStack Database-as-a-Service connects to the +# Compute service. +CONFIG_TROVE_NOVA_TENANT=services + +# Password to use when OpenStack Database-as-a-Service connects to +# the Compute service. +CONFIG_TROVE_NOVA_PW=trove + +# Password of the nagiosadmin user on the Nagios server. +CONFIG_NAGIOS_PW=nagios + +# This option decides whether install ovdk and ovdk agents or +# ovs agent patch which can support sdn vxlan, the format such as +# {'ovdk':['10.43.211.2','10.43.211.12','10.43.211.15'], +# 'ovs_agent_patch':['10.43.211.105','10.43.211.106']}", +CONFIG_DVS_TYPE= + +# Comma-separated list of physical nics used by ovdk +# Example: eth0,eth1 +CONFIG_DVS_PHYSICAL_NICS= + +# Set dvs vxlan info, when use "vxlan bond" mode, the format +# as:bondname(bond mode; lacp mode; bond nics) +# Example: bond1(active-backup;off;eth0-eth1) or eth0 +CONFIG_DVS_VXLAN_INFO= + +# This option decide whether let ovdk agents to enable dvs support outside vtep endpoint. +CONFIG_DVS_VTEP=n + +# The ID of DCI domain, to let agents between different DCI center donot create vxlan endpoint + +# Comma-separated dict of {0:['10.43.211.2','10.43.211.12','10.43.211.15'],1:['10.43.211.105','10.43.211.105']} +# list enumerating the ID of DCI domain to let agents +# between different DCI center donot create vxlan endpoint neutron-ovdk-agent. +CONFIG_DVS_NODE_DOMAIN_ID=0 + +# Type of network to allocate for tenant networks (eg. vlan, local, +# gre, vxlan) +CONFIG_NEUTRON_OVS_TENANT_NETWORK_TYPE=vxlan + +#if install log +CONFIG_LOG_INSTALL=n + +#hosts install log server +CONFIG_LOG_SERVER_HOSTS= + +# Set to 'y' if you would like Packstack to install ha +CONFIG_HA_INSTALL_MONGODB_LOCAL=n \ No newline at end of file diff --git a/backend/tecs/tfg_upgrade.sh b/backend/tecs/tfg_upgrade.sh new file mode 100755 index 00000000..39825a78 --- /dev/null +++ b/backend/tecs/tfg_upgrade.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +scriptsdir=$(cd $(dirname $0) && pwd) +ISODIR=`mktemp -d /mnt/TFG_ISOXXXXXX` +mount -o loop $scriptsdir/*CGSL_VPLAT*.iso ${ISODIR} +cp ${ISODIR}/*CGSL_VPLAT*.bin $scriptsdir +umount ${ISODIR} +[ -e ${ISODIR} ] && rm -rf ${ISODIR} +$scriptsdir/*CGSL_VPLAT*.bin upgrade reboot diff --git a/backend/tecs/trustme.sh b/backend/tecs/trustme.sh new file mode 100755 index 00000000..77303894 --- /dev/null +++ b/backend/tecs/trustme.sh @@ -0,0 +1,93 @@ +#!/bin/sh +# 让某个主机彻底信任我,以后ssh登录过去不需要密码 + +#检查参数是否合法 +logfile=/var/log/trustme.log +function print_log +{ + local promt="$1" + echo -e "$promt" + echo -e "`date -d today +"%Y-%m-%d %H:%M:%S"` $promt" >> $logfile +} + +ip=$1 +if [ -z $ip ]; then + print_log "Usage: `basename $0` ipaddr passwd" + exit 1 +fi + +passwd=$2 +if [ -z $passwd ]; then + print_log "Usage: `basename $0` ipaddr passwd" + exit 1 +fi + +rpm -qi sshpass >/dev/null +if [ $? != 0 ]; then + print_log "Please install sshpass first" + exit 1 +fi + +#试试对端能不能ping得通 +unreachable=`ping $ip -c 1 -W 3 | grep -c "100% packet loss"` +if [ $unreachable -eq 1 ]; then + print_log "host $ip is unreachable" + exit 1 +fi + +#如果本机还没有ssh公钥,就生成一个 +if [ ! -e ~/.ssh/id_dsa.pub ]; then + print_log "generating ssh public key ..." + ssh-keygen -t dsa -f /root/.ssh/id_dsa -N "" + if [ $? != 0 ]; then + print_log "ssh-keygen failed" + exit 1 + fi +fi + +#首先在对端删除原来保存的信任公钥 +user=`whoami` +host=`hostname` +keyend="$user@$host" +print_log "my keyend = $keyend" +cmd="sed '/$keyend$/d' -i ~/.ssh/authorized_keys" +#echo cmd:$cmd +print_log "clear my old pub key on $ip ..." +sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "rm -rf /root/.ssh/known_hosts" +if [ $? != 0 ]; then + print_log "ssh $ip to delete known_hosts failed" + exit 1 +fi +sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "touch ~/.ssh/authorized_keys" +if [ $? != 0 ]; then + print_log "ssh $ip to create file authorized_keys failed" + exit 1 +fi +sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "$cmd" +if [ $? != 0 ]; then + print_log "ssh $ip to edit authorized_keys failed" + exit 1 +fi +#把新生成的拷贝过去 +print_log "copy my public key to $ip ..." +tmpfile=/tmp/`hostname`.key.pub +sshpass -p $passwd scp -o StrictHostKeyChecking=no ~/.ssh/id_dsa.pub $ip:$tmpfile +if [ $? != 0 ]; then + print_log "scp file to $ip failed" + exit 1 +fi +#在对端将其追加到authorized_keys +print_log "on $ip, append my public key to ~/.ssh/authorized_keys ..." +sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "cat $tmpfile >> ~/.ssh/authorized_keys" +if [ $? != 0 ]; then + print_log "ssh $ip to add public key for authorized_keys failed" + exit 1 +fi +print_log "rm tmp file $ip:$tmpfile" +sshpass -p $passwd ssh -o StrictHostKeyChecking=no $ip "rm $tmpfile" +if [ $? != 0 ]; then + print_log "ssh $ip to delete tmp file failed" + exit 1 +fi +print_log "trustme ok!" + diff --git a/backend/zenic/trustme.sh b/backend/zenic/trustme.sh new file mode 100755 index 00000000..54bd7cb3 --- /dev/null +++ b/backend/zenic/trustme.sh @@ -0,0 +1,62 @@ +#!/bin/sh +# 让某个主机彻底信任我,以后ssh登录过去不需要密码 + +#检查参数是否合法 +ip=$1 +if [ -z $ip ]; then + echo "Usage: `basename $0` ipaddr passwd" >&2 + exit 1 +fi + +passwd=$2 +if [ -z $passwd ]; then + echo "Usage: `basename $0` ipaddr passwd" >&2 + exit 1 +fi + +rpm -qi sshpass >/dev/null +if [ $? != 0 ]; then + echo "Please install sshpass first!" >&2 + exit 1 +fi + +#试试对端能不能ping得通 +unreachable=`ping $ip -c 1 -W 3 | grep -c "100% packet loss"` +if [ $unreachable -eq 1 ]; then + echo "host $ip is unreachable!!!" + exit 1 +fi + +#如果本机还没有ssh公钥,就生成一个 +if [ ! -e ~/.ssh/id_dsa.pub ]; then + echo "generating ssh public key ..." + ssh-keygen -t dsa -f /root/.ssh/id_dsa -N "" +fi + +#首先在对端删除原来保存的信任公钥 +user=`whoami` +host=`hostname` +keyend="$user@$host" +echo "my keyend = $keyend" +cmd="sed '/$keyend$/d' -i ~/.ssh/authorized_keys" +#echo cmd:$cmd +echo "clear my old pub key on $ip ..." +sshpass -p $passwd ssh $ip "rm -rf /root/.ssh/known_hosts" +sshpass -p $passwd ssh $ip "touch ~/.ssh/authorized_keys" +sshpass -p $passwd ssh $ip "$cmd" + +#把新生成的拷贝过去 +echo "copy my public key to $ip ..." +tmpfile=/tmp/`hostname`.key.pub +sshpass -p $passwd scp ~/.ssh/id_dsa.pub $ip:$tmpfile + +#在对端将其追加到authorized_keys +echo "on $ip, append my public key to ~/.ssh/authorized_keys ..." +sshpass -p $passwd ssh $ip "cat $tmpfile >> ~/.ssh/authorized_keys" +echo "rm tmp file $ip:$tmpfile" +sshpass -p $passwd ssh $ip "rm $tmpfile" +echo "trustme ok!" + + + + diff --git a/backend/zenic/zenic.conf b/backend/zenic/zenic.conf new file mode 100755 index 00000000..ca243272 --- /dev/null +++ b/backend/zenic/zenic.conf @@ -0,0 +1,17 @@ +[general] +nodeip=192.168.3.1 +nodeid=1 +hostname=sdn59 +needzamp=y +zbpips=192.168.3.1 +zbp_node_num=1 +zbpnodelist=1,256 +zampips=192.168.3.1 +zamp_node_num=1 +mongodbips=192.168.3.1 +mongodb_node_num=1 +zamp_vip= +mongodb_vip= +MacName=eth1 +netid=1234 +memmode=tiny diff --git a/code/daisy/AUTHORS b/code/daisy/AUTHORS new file mode 100755 index 00000000..f4425b40 --- /dev/null +++ b/code/daisy/AUTHORS @@ -0,0 +1,348 @@ +Aaron Rosen +Abhijeet Malawade +Abhishek Kekane +Adam Gandelman +Adam Gandelman +Alberto Planas +Alessandro Pilotti +Alessio Ababilov +Alessio Ababilov +Alex Gaynor +Alex Meade +Alexander Gordeev +Alexander Tivelkov +Amala Basha +AmalaBasha +AmalaBasha +Anastasia Vlaskina +Andreas Jaeger +Andrew Hutchings +Andrew Melton +Andrew Tranquada +Andrey Brindeyev +Andy McCrae +Anita Kuno +Arnaud Legendre +Artur Svechnikov +Ashish Jain +Ashwini Shukla +Aswad Rangnekar +Attila Fazekas +Avinash Prasad +Balazs Gibizer +Bartosz Fic +Ben Nemec +Ben Roble +Bernhard M. Wiedemann +Bhuvan Arumugam +Boris Pavlovic +Brant Knudson +Brian Cline +Brian D. Elliott +Brian Elliott +Brian Elliott +Brian Lamar +Brian Rosmaita +Brian Waldon +Cerberus +Chang Bo Guo +ChangBo Guo(gcb) +Chmouel Boudjnah +Chris Allnutt +Chris Behrens +Chris Buccella +Chris Buccella +Chris Fattarsi +Christian Berendt +Christopher MacGown +Chuck Short +Cindy Pallares +Clark Boylan +Cory Wright +Dan Prince +Danny Al-Gaaf +Davanum Srinivas +Davanum Srinivas +Dave Chen +Dave Walker (Daviey) +David Koo +David Peraza +David Ripton +Dean Troyer +DennyZhang +Derek Higgins +Dirk Mueller +Dmitry Kulishenko +Dolph Mathews +Donal Lafferty +Doron Chen +Doug Hellmann +Doug Hellmann +Duncan McGreggor +Eddie Sheffield +Edward Hope-Morley +Eldar Nugaev +Elena Ezhova +Eoghan Glynn +Eric Brown +Eric Windisch +Erno Kuvaja +Eugeniya Kudryashova +Ewan Mellor +Fabio M. Di Nitto +Fei Long Wang +Fei Long Wang +Fengqian Gao +Flaper Fesp +Flavio Percoco +Florent Flament +Gabriel Hurley +Gauvain Pocentek +Geetika Batra +George Peristerakis +Georgy Okrokvertskhov +Gerardo Porras +Gorka Eguileor +Grant Murphy +Haiwei Xu +He Yongli +Hemanth Makkapati +Hemanth Makkapati +Hengqing Hu +Hirofumi Ichihara +Hui Xiang +Ian Cordasco +Iccha Sethi +Igor A. Lukyanenkov +Ihar Hrachyshka +Ildiko Vancsa +Ilya Pekelny +Inessa Vasilevskaya +Ionu葲 Ar葲膬ri葯i +Isaku Yamahata +J. Daniel Schmidt +Jakub Ruzicka +James Carey +James E. Blair +James Li +James Morgan +James Polley +Jamie Lennox +Jared Culp +Jasakov Artem +Jason Koelker +Jason K枚lker +Jay Pipes +Jeremy Stanley +Jesse Andrews +Jesse J. Cook +Jia Dong +Jinwoo 'Joseph' Suh +Joe Gordon +Joe Gordon +Johannes Erdfelt +John Bresnahan +John Lenihan +John Warren +Jon Bernard +Joseph Suh +Josh Durgin +Josh Durgin +Josh Kearney +Joshua Harlow +Juan Manuel Olle +Juerg Haefliger +Julia Varlamova +Julien Danjou +Jun Hong Li +Justin Santa Barbara +Justin Shepherd +KIYOHIRO ADACHI +Kamil Rykowski +Kasey Alusi +Ken Pepple +Ken Thomas +Kent Wang +Keshava Bharadwaj +Kevin L. Mitchell +Kui Shi +Kun Huang +Lakshmi N Sampath +Lars Gellrich +Leam +Leandro I. Costantino +Liu Yuan +Lorin Hochstein +Louis Taylor +Louis Taylor +Luis A. Garcia +Major Hayden +Mark J. Washenberger +Mark J. Washenberger +Mark McLoughlin +Mark Washenberger +Martin Kletzander +Maru Newby +Masashi Ozawa +Matt Dietz +Matt Fischer +Matt Riedemann +Matthew Booth +Matthew Treinish +Matthias Schmitz +Maurice Leeflang +Mauro S. M. Rodrigues +Michael J Fork +Michael Still +Michal Dulko +Mike Fedosin +Mike Lundy +Monty Taylor +Nassim Babaci +Nicholas Kuechler +Nicolas Simonds +Nikhil Komawar +Nikhil Komawar +Nikolaj Starodubtsev +Noboru Arai +Noboru arai +Oleksii Chuprykov +Olena Logvinova +Pamela-Rose Virtucio +Patrick Mezard +Paul Bourke +Paul Bourke +Paul McMillan +Pavan Kumar Sunkara +Pawel Koniszewski +Pawel Skowron +Peng Yong +Pete Zaitcev +Pranali Deore +PranaliDeore +P谩draig Brady +P谩draig Brady +Radu +Rainya Mosher +Rajesh Tailor +Ray Chen +Reynolds Chin +Rick Clark +Rick Harris +Robert Collins +Rohan Kanade +Roman Bogorodskiy +Roman Bogorodskiy +Roman Vasilets +Rongze Zhu +RongzeZhu +Russell Bryant +Russell Sim +Sabari Kumar Murugesan +Sam Morrison +Sam Stavinoha +Samuel Merritt +Sascha Peilicke +Sascha Peilicke +Sathish Nagappan +Sean Dague +Sean Dague +Sergey Nikitin +Sergey Skripnick +Sergey Vilgelm +Sergio Cazzolato +Shane Wang +Soren Hansen +Stan Lagun +Steve Kowalik +Steve Lewis +Stuart McLaren +Sulochan Acharya +Svetlana Shturm +Taku Fukushima +Tatyana Leontovich +Therese McHale +Thierry Carrez +Thomas Bechtold +Thomas Bechtold +Thomas Leaman +Tim Daly, Jr +Toan Nguyen +Tom Hancock +Tom Leaman +Tomas Hancock +Travis Tripp +Unmesh Gurjar +Unmesh Gurjar +Vaibhav Bhatkar +Venkatesh Sampath +Venkatesh Sampath +Victor Morales +Victor Sergeyev +Vincent Untz +Vishvananda Ishaya +Vitaliy Kolosov +Vyacheslav Vakhlyuev +Wayne A. Walls +Wayne Okuma +Wen Cheng Ma +Wu Wenxiang +YAMAMOTO Takashi +Yaguang Tang +Yanis Guenane +Yufang Zhang +Yuriy Taraday +Yusuke Ide +ZHANG Hua +Zhenguo Niu +Zhi Yan Liu +ZhiQiang Fan +ZhiQiang Fan +Zhiteng Huang +Zhongyue Luo +abhishek-kekane +abhishekkekane +amalaba +ankitagrawal +ankur +annegentle +daisy-ycguo +eddie-sheffield +eos2102 +gengjh +henriquetruta +huangtianhua +hzrandd <82433422@qq.com> +iccha +iccha-sethi +iccha.sethi +isethi +jakedahn +jare6412 +jaypipes@gmail.com <> +jola-mirecka +lawrancejing +leseb +ling-yun +liuqing +liyingjun +liyingjun +lizheming +llg8212 +ls1175 +marianitadn +mathrock +nanhai liao +pran1990 +ravikumar-venkatesan +sai krishna sripada +sarvesh-ranjan +shreeduth-awasthi +shrutiranade38 +shu,xinxin +sridevik +sridevik +tanlin +tmcpeak +wanghong +yangxurong diff --git a/code/daisy/CONTRIBUTING.rst b/code/daisy/CONTRIBUTING.rst new file mode 100755 index 00000000..8fe8d547 --- /dev/null +++ b/code/daisy/CONTRIBUTING.rst @@ -0,0 +1,16 @@ +If you would like to contribute to the development of OpenStack, +you must follow the steps in documented at: + + http://docs.openstack.org/infra/manual/developers.html#development-workflow + +Once those steps have been completed, changes to OpenStack +should be submitted for review via the Gerrit tool, following +the workflow documented at: + + http://docs.openstack.org/infra/manual/developers.html#development-workflow + +Pull requests submitted through GitHub will be ignored. + +Bugs should be filed on Launchpad, not GitHub: + + https://bugs.launchpad.net/glance diff --git a/code/daisy/ChangeLog b/code/daisy/ChangeLog new file mode 100755 index 00000000..63933ed3 --- /dev/null +++ b/code/daisy/ChangeLog @@ -0,0 +1,2869 @@ +CHANGES +======= + +2015.1.0 +-------- + +* Metadef JSON files need to be updated +* Plugin types are not exposed to the client +* v1 API should be in SUPPORTED status +* Read tag name instead of ID +* Updated from global requirements +* Release Import of Translations from Transifex +* update .gitreview for stable/kilo + +2015.1.0rc1 +----------- + +* Fixes glance-manage exporting meta definitions issue +* Catch UnknownScheme exception +* Refactor API function test class +* Move elasticsearch dep to test-requirements.txt +* Update openstack-common reference in openstack/common/README +* Zero downtime config reload (glance-control) +* Imported Translations from Transifex +* Glance cache to not prune newly cached images +* glance-manage db load_metadefs does not load all resource_type_associations +* Fix intermittent unit test failures +* Fix intermittent test case failure due to dict order +* Imported Translations from Transifex +* A mixin for jsonpatch requests validation +* Artifact Plugins Loader +* Declarative definitions of Artifact Types +* Creating metadef object without any properties +* Zero downtime config reload (log handling) +* Database layer for Artifact Repository +* Catalog Index Service - Index Update +* Catalog Index Service +* Zero downtime config reload (socket handling) +* Typo in pylintrc file +* Fix metadef tags migrations +* Update documentation for glance-manage +* Fix common misspellings + +2015.1.0b3 +---------- + +* Replace assert statements with proper control-flow +* Remove use of contextlib.nested +* Use graduated oslo.policy +* oslo: migrate namespace-less import paths +* Fix typo in rpc controller +* Fixes typo in doc-string +* wsgi: clean JSON serializer +* use is_valid_port from oslo.utils +* Add ability to deactivate an image +* Remove deprecated option db_enforce_mysql_charset +* Raise exception if store location URL not found +* Fix missing translations for error and info +* Basic support for image conversion +* Extend images api v2 with new sorting syntax +* Add the ability to specify the sort dir for each key +* Move to graduated oslo.log module +* Provide a way to upgrade metadata definitions +* Pass a real image target to the policy enforcer +* Glance basic architecture section +* Fix typo in configuration file +* Updated from global requirements +* Add sync check for models_metadef +* Notifications for metadefinition resources +* Update config and docs for multiple datastores support +* Avoid usability regression when generating config +* Glance Image Introspection +* Add capabilities to storage driver +* Updated from global requirements +* Zero downtime configuration reload +* Add operators to provide multivalue support +* Remove the eventlet executor +* SemVer utility to store object versions in DB +* Switch to latest oslo-incubator +* Use oslo_config choices support +* Fix the wrong format in the example +* Remove en_US translation +* db_export_metadefs generates inappropriate json files +* Synchronising oslo-incubator service module +* Unify using six.moves.range rename everywhere +* Updated from global requirements +* Glance returns HTTP 500 for image download +* Remove boto from requirements.txt +* Unbreak python-swiftclient gate +* Eventlet green threads not released back to pool +* Imported Translations from Transifex +* Removes unnecessary assert +* Prevents swap files from being found by Git +* Add BadStoreConfiguration handling to glance-api +* Remove redundant parentheses in conditional statements +* Make sure the parameter has the consistent meaning +* Image data remains in backend for deleted image +* Remove is_public from reserved attribute in v2 +* unify some messages +* Typos fixed in the comments +* The metadef tags create api does not match blue-print +* Clarified doc of public_endpoint config option +* Add detail description of image_cache_max_size +* Updated from global requirements + +2015.1.0b2 +---------- + +* Add Support for TaskFlow Executor +* Include readonly flag in metadef API +* Fix for CooperativeReader to process read length +* Software Metadata Definitions +* Updated from global requirements +* Rewrite SSL tests +* Replace snet config with endpoint config +* Simplify context by using oslo.context +* Handle empty request body with chunked encoding +* Update vmware_adaptertype metadef values +* Typos fixed in the comments +* Updated from global requirements +* Redundant __init__ def in api.authorization.MetadefTagProxy +* Make digest algorithm configurable +* Switch to mox3 +* Remove argparse from requirement +* Remove optparse from glance-replicator +* Eliminate shell param from subprocesses in tests +* Remove test dependency on curl +* Cleanup chunks for deleted image that was 'saving' +* remove need for netaddr +* Fix copy-from when user_storage_quota is enabled +* remove extraneous --concurrency line in tox +* SQL scripts should not manage transactions +* Fixes line continuations +* Upgrade to hacking 0.10 +* Removed python-cinderclient from requirements.txt +* Move from oslo.db to oslo_db +* Move from oslo.config to oslo_config +* Improve documentation for glance_stores +* Fix reference to "stores" from deprecated name +* Move from oslo.utils to oslo_utils +* Updated from global requirements +* Updated from global requirements +* Prevent file, swift+config and filesystem schemes +* Simplify usage of str.startswith +* Adding filesystem schema check in async task +* Fix spelling typo +* Fix rendering of readme document +* Imported Translations from Transifex +* Add swift_store_cacert to config files and docs +* Add latest swift options in glance-cache.conf +* Fix document issue of image recover status +* rename oslo.concurrency to oslo_concurrency +* Provide a quick way to run flake8 +* Fix 3 intermittently failing tests +* Removed obsolete db_auto_create configuration option +* Fix client side i18n support for v1 api +* Move default_store option in glance-api.conf +* Removes http-requests to glance/example.com in glance test +* Remove _i18n from openstack-common +* Adds the ability to sort images with multiple keys +* Add sort key validation in v2 api +* Fixes typo: glance exception additional dot +* Allow $OS_AUTH_URL environment variable to override config file value +* Bump API version to 2.3 +* Replace '_' with '_LI', '_LE', '_LW', '_LC' + +2015.1.0b1 +---------- + +* Removes unused modules: timeutils and importutils +* Generate glance-manage.conf +* Imported Translations from Transifex +* Adding Metadef Tag support +* Removed unnecessary dot(.) from log message +* Using oslo.concurrency lib +* Update config and docs for Multiple Containers +* To prevent client use v2 patch api to handle file and swift location +* Updated from global requirements +* Use testr directly from tox +* Remove reliance on import order of oslo.db mods +* Remove openstack.common.gettextutils module +* Fix typo in common module +* Fix and add a test case for IPv6 +* Start server message changed +* Fix getaddrinfo if dnspython is installed +* Workflow documentation is now in infra-manual +* Allow None values to be returned from the API +* Expose nullable fields properties +* Allow some fields to be None +* Update glance.openstack.common.policy and cleanup +* A small refactoring of the domain +* Updated from global requirements +* Disable osprofiler by default +* Work toward Python 3.4 support and testing +* Correct GlanceStoreException to provide valid message - Glance +* Remove Python 2.6 classifier +* Add ModelSMigrationSync classes +* Alter models and add migration +* No 4 byte unicode allowed in image parameters +* Update rally-jobs files +* Move from using _ builtin to using glance.i18n _ +* Change Glance to use i18n instead of gettextutils +* Raising glance logging levels +* Imported Translations from Transifex +* Do not use LazyPluggable +* metadef modules should only use - from wsme.rest import json +* Wrong order of assertEquals args(Glance) +* Removal of unnecessary sample file from repository +* Upgrade tests' mocks to match glance_store +* Remove exception declarations from replicator.py +* Typo correction of the prefix value in compute-host-capabilities +* Replace custom lazy loading by stevedore +* vim ropeproject directories added to gitignore +* Initiate deletion of image files if the import was interrupted +* Raise an exception when quota config parameter is broken +* Fix context storage bug +* Ignore Eric IDE files and folders in git +* Make RequestContext use auth_token (not auth_tok) +* Swift Multi-tenant store: Pass context on upload +* Use unicode for error message +* change default value for s3_store_host +* remove url-path from the default value of s3_store_host +* Complete the change of adding public_endpoint option +* Update the vmware_disktype metadefs values +* Add config option to override url for versions +* Separate glance and eventlet wsgi logging +* Remove openstack.common.test +* Remove modules from openstack-common.conf +* Improve error log for expired image location url +* Handle some exceptions of image_create v2 api +* Remove eventlet_hub option +* Adds openSUSE in the installing documentation +* Glance scrubber should page thru images from registry +* Add logging to image_members and image_tags +* Update glance.openstack.common + +2014.2 +------ + +* Fix options and their groups - etc/glance-api.conf +* Fix options and their groups - etc/glance-api.conf +* Adjust authentication.rst doc to reference "identity_uri" +* Can not delete images if db deadlock occurs +* Reduce extraneous test output +* Isolate test from environment variables +* Fix for adopt glance.store library in Glance +* Adjust authentication.rst doc to reference "identity_uri" + +2014.2.rc2 +---------- + +* Use identity_uri instead of older fragments +* Prevent setting swift+config locations +* Metadef schema column name is a reserved word in MySQL +* Remove stale chunks when failed to update image to registry +* GET property which name includes resource type prefix +* g-api raises 500 error while uploading image +* Fix for Adopt glance.store library in Glance +* Update Metadefs associated with ImagePropertiesFilter +* updated translations +* Use ID for namespace generated by DB +* Metadef Property and Object schema columns should use JSONEncodedDict +* Add missing metadefs for shutdown behavior +* Update driver metadata definitions to Juno +* Mark custom properties in image schema as non-base +* Specify the MetadefNamespace.namespace column is not nullable +* Make compute-trust.json compatible with TrustFilter +* Include Metadata Defs Concepts in Dev Docs +* Nova instance config drive Metadata Definition +* Add missing metadefs for Aggregate Filters +* Updated from global requirements + +2014.2.rc1 +---------- + +* Imported Translations from Transifex +* Add specific docs build option to tox +* Add documentation for a new storage file permissions option +* Updated from global requirements +* Remove db_enforce_mysql_charset option for db_sync of glance-manage +* Fix assertEqual arguments order +* Prevent setting swift+config locations +* Remove stale chunks when failed to update image to registry +* Use specific exceptions instead of the general MetadefRecordNotFound +* Metadef schema column name is a reserved word in MySQL +* Fix for Adopt glance.store library in Glance +* GET property which name includes resource type prefix +* Incorrect parameters passed +* g-api raises 500 error while uploading image +* Minor style tidy up in metadata code +* Metadef Property and Object schema columns should use JSONEncodedDict +* Updated from global requirements +* Use ID for namespace generated by DB +* Switch to oslo.serialization +* Switch to oslo.utils +* Imported Translations from Transifex +* Add missing metadefs for shutdown behavior +* hacking: upgrade to 0.9.x serie +* Fix bad header bug in glance-replicator +* Run tests with default concurrency 0 +* Refactor test_migrations module +* Include Metadata Defs Concepts in Dev Docs +* Open Kilo development +* Mark custom properties in image schema as non-base +* Fix missing space in user_storage_quota help message +* Fix glance V2 incorrectly implements JSON Patch'add' +* Make compute-trust.json compatible with TrustFilter +* replace dict.iteritems() with six.iteritems(dict) +* Enforce using six.text_type() over unicode() +* Update driver metadata definitions to Juno +* Remove uses of unicode() builtin +* Fixes Error Calling GET on V1 Registry +* Enabling separated sample config file generation +* Update Metadefs associated with ImagePropertiesFilter +* Fixes logging in image_import's main module +* Refactor metadef ORM classes to use to_dict instead of as_dict +* Stop using intersphinx +* Just call register_opts in tests +* Replaces assertEqual with assertTrue and assertFalse +* Block sqlalchemy-migrate 0.9.2 +* Specify the MetadefNamespace.namespace column is not nullable +* Add missing metadefs for Aggregate Filters +* Nova instance config drive Metadata Definition +* Improve OS::Compute::HostCapabilities description +* Sync glance docs with metadefs api changes +* Change open(file) to with block +* Fix CommonImageProperties missing ":" +* Fix VMware Namespace capitalization & description +* Imported Translations from Transifex +* Duplicated image id return 409 instead of 500 in API v2 +* Glance API V2 can't recognize parameter 'id' +* API support for random access to images +* Adopt glance.store library in Glance +* Adds missing db registry api tests for Tasks +* warn against sorting requirements +* Introduces eventlet executor for Glance Tasks + +2014.2.b3 +--------- + +* Glance Metadata Definitions Catalog - API +* ignore .idea folder in glance +* Glance Metadata Definitions Catalog - Seed +* Glance Metadata Definitions Catalog - DB +* Restrict users from downloading protected image +* Syncing changes from oslo-incubator policy engine +* Use identity_uri instead of older fragments +* Fix legacy tests using system policy.json file +* Improve Glance profiling +* Fix collection order issues and unit test failures +* Check on schemes not stores +* Replacement mox by mock +* Imported Translations from Transifex +* Log task ID when the task status changes +* Changes HTTP response code for unsupported methods +* Enforce image_size_cap on v2 upload +* Do not assume order of images +* Ensure constant order when setting all image tags +* Fix bad indentation in glance +* Use @mock.patch.object instead of mock.MagicMock +* Adding status field to image location -- scrubber queue switching +* Bump osprofiler requirement to 0.3.0 +* Fix migration on older postgres +* Fix rally performance job in glance +* Integrate OSprofiler and Glance +* Fix image killed after deletion +* VMware store: Use the Content-Length if available +* Fix RBD store to use READ_CHUNKSIZE +* Trivial fix typo: Unavilable to Unavailable +* Quota column name 'key' in downgrade script +* Do not log password in swift URLs in g-registry +* Updated from global requirements +* Use `_LW` where appropriate in db/sqla/api +* Log upload failed exception trace rather than debug +* Decouple read chunk size from write chunk size +* Enable F821 check: undefined name 'name' + +2014.2.b2 +--------- + +* Security hardening: fix possible shell injection vulnerability +* Move to oslo.db +* Catch exception.InUseByStore at API layer +* Fixes the failure of updating or deleting image empty property +* Adding status field to image location -- scrubber changes +* Also run v2 functional tests with registry +* Refactoring Glance logging lowering levels +* Set defaults for amqp in glance-registry.conf +* Fix typo in swift store message +* Add a `_retry_on_deadlock` decorator +* Use auth_token from keystonemiddleware +* Allow some property operations when quota exceeded +* Raising 400 Bad Request when using "changes-since" filter on v2 +* Moving eventlet.hubs.use_hub call up +* Adding status field to image location -- domain and APIs changes +* Add task functions to v2 registry +* Changing replicator to use openstack.common.log +* Fix unsaved exception in v1 API controller +* Pass Message object to webob exception +* Some exceptions raise UnicodeError +* Handle session timeout in the VMware store +* Some v2 exceptions raise unicodeError +* Resolving the performance issue for image listing of v2 API on server +* Switch over oslo.i18n +* Fix typo in comment +* Updated from global requirements +* Imported Translations from Transifex +* Updated from global requirements +* Raise NotImplementedError instead of NotImplemented +* Fix unsaved exception in store.rbd.Store.add() +* Fix docstrings in enforce() and check() policy methods +* Added an extra parameter to the df command +* Add CONTRIBUTING.rst +* Imported Translations from Transifex +* Use (# of CPUs) glance workers by default +* Sync processutils and lockutils from oslo with deps +* Document registry 'workers' option +* Removing translation from debug messages +* Unifies how BadStoreUri gets raised and logged +* Fix lazy translation UnicodeErrors +* Changing Sheepdog driver to use correct configuration function +* Implemented S3 multi-part upload functionality +* Log swift container creation +* Synced jsonutils and its dependencies from oslo-incubator +* Remove user and key from location in swift +* Updated from global requirements +* Changed psutil dep. to match global requirements +* Add pluging sample for glance gate +* Fixes v2 return status on unauthorized download +* Update documentation surrounding the api and registry servers +* Do not call configure several times at startup +* Move `location`'s domain code out of glance.store +* sync oslo incubator code +* notifier: remove notifier_strategy compat support +* notifier: simply notifier_strategy compat support +* colorizer: use staticmethod rather than classmethod +* Improved coverage for glance.api.* +* Assign local variable in api.v2.image_data + +2014.2.b1 +--------- + +* Use df(1) in a portable way +* Add test for no_translate_debug_logs hacking check +* Add hacking checks +* replace dict.iteritems() with six.iteritems(dict) +* make uploading an image as public admin only by default +* remove default=None for config options +* Bump python-swiftclient version +* TaskTest:test_fail() should use asserIstNone +* debug level logs should not be translated +* use /usr/bin/env python instead of /usr/bin/python +* Remove all mostly untranslated PO files +* Remove duplicated is_uuid_like() function +* fixed typos found by RETF rules in RST files +* Use safe way through "with" statement to work with files +* Clean up openstack-common.conf +* Removing duplicate entry from base_conf +* Use safe way through "with" statement to work with files +* Use Chunked transfer encoding in the VMware store +* Ensures that task.message is of type unicode +* Replace unicode() for six.text_type +* Prevent creation of http images with invalid URIs +* Fixed a handful of typos +* Fixes installation of test-requirements +* Add rally performance gate job for glance +* To fixes import error for run_tests.sh +* Replace assert* with more suitable asserts in unit tests +* Get rid of TaskDetails in favor of TaskStub +* Fixes "bad format" in replicator for valid hosts +* Sync latest network_utils module from Oslo +* Fixes spelling error in test name +* Uses None instead of mutables for function param defaults +* Fix various Pep8 1.5.4 errors +* Fixes Glance Registry V2 client +* Update Glance configuration sample files for database options +* To prevent remote code injection on Sheepdog store +* Added undescore function to some log messages +* Adds TaskStub class +* Updated from global requirements +* user_storage_quota now accepts units with value +* Do not allow HEAD images/detail +* Configuration doc for VMware storage backend +* Catch loading failures if transport_url is not set +* Fix Jenkins translation jobs +* Fixed the pydev error message + +2014.1.rc1 +---------- + +* Open Juno development +* Making DB sanity checking be optional for DB migration +* Fix deprecation warning in test_multiprocessing +* Do not set Location header on HTTP/OK (200) responses +* Fix swift functional test "test_create_store" +* Sanitize set passed to jsonutils.dumps() +* When re-raising exceptions, use save_and_reraise +* Imported Translations from Transifex +* Sync common db code from Oslo +* Return 405 when attempting DELETE on /tasks +* Remove openstack.common.fixture +* Enable H304 check +* VMware store.add to return the image size uploaded +* registry: log errors on failure +* Removes use of timeutils.set_time_override +* Provide explicit image create value for test_image_paginate case +* Make the VMware datastore backend more robust +* Pass Message object to webob exception +* Detect MultiDict when generating json body +* Makes possible to enable Registry API v1 and v2 +* Do not use __builtin__ in python3 +* Updated from global requirements +* Fix swift functional test +* Provide an upgrade period for enabling stores +* API v2: Allow GET on unowned images with show_image_direct_url +* Add copyright text to glance/openstack/common/__init__.py +* Don't enable all stores by default +* Remove unused methods +* Fix glance db migration failed on 031 +* Document for API message localization + +2014.1.b3 +--------- + +* Add support for API message localization +* Add the OVA container format +* Store URI must start with the expected URI scheme +* Documentation for Glance tasks +* Remove import specific validation from tasks resource +* Remove dependency of test_v1_api on other tests +* Include Location header in POST /tasks response +* Catch exception when image cache pruning +* VMware storage backend should use oslo.vmware +* Sync common db code from Oslo +* Refactor UUID test +* Replaced calls of get(foo, None) -> get(foo) +* Use six.StringIO/BytesIO instead of StringIO.StringIO +* Replaced "...\'%s\'..." with "...'%s'..." +* Updated from global requirements +* Fix logging context to include user_identity +* Log 'image_id' with all BadStoreURI error messages +* Added undescore function to some strings +* Use 0-based indices for location entries +* Glance all: Replace basestring by six for python3 compatability +* Delete image metadata after image is deleted +* Modify assert statement when comparing with None +* Enable hacking H301 and disable H304, H302 +* Replacement mox by mock +* Keep py3.X compatibility for urllib +* Use uuid instead of uuidutils +* Use six.moves.urllib.parse instead of urlparse +* Switch over to oslosphinx +* Fix parsing of AMQP configuration +* Add `virtual_size` to Glance's API v2 +* Add a virtual_size attribute to the Image model +* Enable F841 check +* Add support for PartialTask list +* Rename Openstack to OpenStack +* Add a mailmap entry for myself +* Sync log.py from oslo +* Add unit tests around glance-manage +* Remove tox locale overrides +* Improve help strings +* Provide explicit image create value in Registry v2 API test +* VMware Datastore storage backend +* Adding status field to image location -- DB migration +* Apply image location selection strategy +* Switch to testrepository for running tests +* Clean up DatabaseMigrationError +* Enable H302 check +* Fix misspellings in glance +* Expose image property 'owner' in v2 API +* Removes logging of location uri +* Updated from global requirements +* Remove duplicate type defination of v2 images schema +* Enable H202 check +* Modify my mailmap +* glance-manage wont take version into consideration +* Move scrubber outside the store package +* Depending on python-swiftclient>=1.6 +* Now psutil>=1.1.0 is actually on PyPI +* Fix indentation errors found by Pep8 1.4.6+ +* Add VMware storage backend to location strategy +* Log a warning when a create fails due to quota +* glance requires pyOpenSSL>=0.11 +* Imported Translations from Transifex +* Restore image status to 'queued' if upload failed +* Don't override transport_url with old configs +* Provide explicit image create value in Registry v2 Client test +* Provide explicit task create and update value in controller tests +* Enable hacking H703 check +* Sync with global requirements +* Sync oslo.messaging version with global-requirements +* Don't rewrite the NotFound error message +* Update all the glance manpages +* Use common db migrations module from Oslo +* Check --store parameter validity before _reserve +* Sync gettextutils from Oslo +* Enable gating on H501 +* Add multifilesystem store to support NFS servers as backend +* Check first matching rule for protected properties +* Retry failed image download from Swift +* Restore image status on duplicate image upload + +2014.1.b2 +--------- + +* Tests added for glance/cmd/cache_pruner.py +* Prevent E500 when delayed delete is enabled +* Sync unhandled exception logging change from Oslo +* Check image id format before executing operations +* fix bug:range() is not same in py3.x and py2.x +* Fix the incorrect log message when creating images +* Adding image location selection strategies +* Fix inconsistent doc string and code of db_sync +* fixing typo in rst file +* Fix tmp DB path calculation for test_migrations.py +* Change assertTrue(isinstance()) by optimal assert +* add log for _get_images method +* Makes 'expires_at' not appear if not set on task +* Remove vim header +* Update the glance-api manpage +* Remove 'openstack/common/context.py' +* Allow users to customize max header size +* Decouple the config dependence on glance domain +* Fix typo in doc string +* Prevent min_disk and min_ram from being negative +* Set image size to None after removing all locations +* Update README to the valid Oslo-incubator doc +* Cleans up imports in models.py +* Sync Log levels from OSLO +* Align glance-api.conf rbd option defaults with config +* Bump hacking to 0.8 and get python 3.x compatibility +* Add config option to limit image locations +* replace type calls with isinstance +* Adding logs to tasks +* Skip unconfigurable drivers for store initialization +* Fix typo in gridfs store +* Oslo sync to recover from db2 server disconnects +* fix comments and docstrings misspelled words +* Fix call to store.safe_delete_from_backend +* Switch to Hacking 0.8.x +* assertEquals is deprecated, use assertEqual (H234) +* Consider @,! in properties protection rule as a configuration error +* Remove unused imports in glance +* Remove return stmt of add,save and remove method +* Migrate json to glance.openstack.common.jsonutils +* Use common Oslo database session +* Define sheepdog_port as an integer value +* Sync with oslo-incubator (git 6827012) +* Enable gating on F811 (duplicate function definition) +* Set image size after updating/adding locations +* Disallow negative image sizes +* Fix and enable gating on H306 +* Make code base E125 and E126 compliant +* Fix 031 migration failed on DB2 +* Remove the redundant code +* Correct URL in v1 test_get_images_unauthorized +* Refactor tests.unit.utils:FakeDB.reset +* Fixed wrong string format in glance.api.v2.image_data +* Empty files shouldn't contain copyright nor license +* Use uuid instead of uuidutils +* Enable H233/H301/H302 tests that are ignored at the moment +* Remove duplicate method implementations in ImageLocationsProxy +* Make Glance code base H102 compliant +* Make Glance code base H201 compliant +* Cleanup: remove unused code from store_utils +* Filter out deleted images from storage usage +* Add db2 communication error code when check the db connection +* Refine output of glance service managment +* Adds guard against upload contention +* Fixes HTTP 500 when updating image with locations for V2 +* Increase test coverage for glance.common.wsgi +* Return 204 when image data does not exist +* V2: disallow image format update for active status +* Enable tasks REST API for async worker +* Cleanly fail when location URI is malformed +* Rename duplicate test_add_copy_from_upload_image_unauthorized +* Adding missing copy_from policy from policy.json +* Fix simple-db image filtering on extra properties +* Pin sphinx to <1.2 +* assertEquals is deprecated, use assertEqual instead +* Fix and enable gating on H702 +* Replace startswith by more precise store matching +* Remove unused exceptions +* Remove duplicate method __getitem__ in quota/__init__.py +* Enforce copy_from policy during image-update +* Refactor StorageQuotaFull test cases in test_quota +* remove hardcode of usage +* Added error logging for http store +* Forbidden update message diffs images/tasks/member +* Unittests added for glance/cmd/cache_manage.py +* Makes tasks owner not nullable in models.py +* Move is_image_sharable to registry api +* Remove TestRegistryDB dependency on TestRegistryAPI +* Introduce Task Info Table + +2014.1.b1 +--------- + +* Migrate to oslo.messaging +* Add config option to limit image members +* Add config option to limit image tags +* Glance image-list failed when image number exceed DEFAULT_PAGE_SIZE +* DB migration changes to support DB2 as sqlalchemy backend +* Add documentation for some API parameters +* RBD add() now returns correct size if given zero +* Set upload_image policy to control data upload +* Replace deprecated method assertEquals +* Clean up duplicate code in v2.image_data.py +* Fix docstring on detail in glance/api/v1/images.py +* Use assertEqual instead of assertEquals in unit tests +* Remove unused package in requirement.txt +* Enable F40X checking +* Verify for duplicate location+metadata instances +* Adds domain level support for tasks +* Add eclipse project files to .gitignore +* Added unit tests for api/middleware/cache_manage.py +* Fixed quotes in _assert_tables() method +* Use common db model class from Oslo +* Add upload policy for glance v2 api +* Adding an image status transition diagram for dev doc +* Add config option to limit image properties +* Explicit listing of Glance policies in json file +* Imported Translations from Transifex +* Sync openstack.common.local from oslo +* Clean up numeric expressions with oslo constants +* Don't use deprecated module commands +* Add tests for glance/notifier/notify_kombu +* Fixes image delete and upload contention +* Log unhandled exceptions +* Add tests for glance/image_cache/client.py +* Remove lxml requirement +* Sync common db and db.sqlalchemy code from Oslo +* Update glance/opensatck/common from oslo Part 3 +* Tests added for glance/cmd/cache_cleaner.py +* glance-manage should work like nova-manage +* Adds tasks to db api +* Sync lockutils from oslo +* sync log from oslo +* Add policy style '@'/'!' rules to prop protections +* Enable H501: do not use locals() for formatting +* Remove use of locals() when creating messages +* Remove "image_cache_invalid_entry_grace_period" option +* Add unit test cases for get func of db member repo +* assertEquals is deprecated, use assertEqual +* Document default log location in config files +* Remove unused method setup_logging +* Start using PyFlakes and Hacking +* Sync units module from olso +* Fixes error message encoding issue when using qpid +* Use mock in test_policy +* Use packaged version of ordereddict +* Imported Translations from Transifex +* Glance v2: Include image/member id in 404 Response +* Replace qpid_host with qpid_hostname +* Fix Pep8 1.4.6 warnings +* Fixes content-type checking for image uploading in API v1 and v2 +* Update my mailmap +* Addition of third example for Property Protections +* Sync iso8601 requirement and fixes test case failures +* Fixes wrong Qpid protocol configuration +* Use HTTP storage to test copy file functionality +* Remove redundant dependencies in test-requirements +* Documentation for using policies for protected properties +* checking length of argument list in "glance-cache-image" command +* optimize queries for image-list +* Using policies for protected properties +* Cleanup and make HACKING.rst DRYer +* Enable tasks data model and table for async worker +* Updated from global requirements +* Add call to get specific image member +* Put formatting operation outside localisation call +* Remove unused import +* The V2 Api should delete a non existent image +* Avoid printing URIs which can contain credentials +* Remove whitespace from cfg options +* Use Unix style LF instead of DOS style CRLF +* Adding 'download_image' policy enforcement to image cache middleware +* Glance manage should parse glance-api.conf +* Fixes rbd _delete_image snapshot with missing image +* Correct documentation related to protected properties +* Update functional tests for swift changes +* Removed unsued import, HTTPError in v1/images.py +* Allow tests to run with both provenances of mox +* Glance GET /v2/images fails with 500 due to erroneous policy check +* Do not allow the same member to be added twice + +2013.2.rc1 +---------- + +* V2 RpcApi should register when db pool is enabled +* Imported Translations from Transifex +* Open Icehouse development +* Convert Windows to Unix style line endings +* Add documentation for property protections +* Adding checking to prevent conflict image size +* Fixes V2 member-create allows adding an empty tenantId as member +* Fixing glance-api hangs in the qpid notifier +* Change response code for successful delete image member to 204 +* Cache cleaner wrongly deletes cache for non invalid images +* Require oslo.config 1.2.0 final +* Use built-in print() instead of print statement +* Swift store add should not use wildcard raise +* Corrected v2 image sharing documentation +* Add swift_store_ssl_compression param +* Log a message when image object not found in swift +* Ensure prop protections are read/enforced in order +* Funtional Tests should call glance.db.get_api +* Enclose command args in with_venv.sh +* Fix typo in config string +* Adding encryption support for image multiple locations +* Fixes typos of v1 meta data in glanceapi.rst +* Respond with 410 after upload if image was deleted +* Fix misused assertTrue in unit tests +* Convert location meta data from pickle to string +* Disallow access/modify members of deleted image +* Fix typo in protected property message +* Remove the unused mapper of image member create +* Changed header from LLC to Foundation based on trademark policies +* Implement protected properties for API v1 +* Add rbd store support for zero size image +* Remove start index 0 in range() +* Convert non-English exception message when a store loading error +* add missing index for 'owner' column on images table +* Publish recent api changes as v2.2 +* Update schema descriptions to indicate readonly +* Enable protected properties in gateway +* Property Protection Layer +* Rule parser for property protections +* Scrubber refactoring +* Fix typo in IMAGE_META_HEADERS +* Fix localisation string usage +* Notify error not called on upload errors in V2 +* Fixes files with wrong bitmode +* Remove unused local vars +* Clean up data when store receiving image occurs error +* Show traceback info if a functional test fails +* Add a storage quota +* Avoid redefinition of test +* Fix useless assertTrue +* emit warning while running flake8 without virtual env +* Fix up trivial License mismatches +* Introduced DB pooling for non blocking DB calls +* Use latest Oslo's version +* Improve the error msg of v2 image_data.py +* Fix Sphinx warning +* Remove unused import +* test failure induced by reading system config file +* Prefetcher should perform data integrity check +* Make size/checksum immutable for active images +* Remove unused var DEFAULT_MAX_CACHE_SIZE +* Implement image query by tag +* Remove unused import of oslo.config +* Code dedup in glance/tests/unit/v1/test_registry_api.py +* Add unit test for migration 012 +* Call _post_downgrade_### after downgrade migration is run +* Use _pre_upgrade_### instead of _prerun_### +* Perform database migration snake walk test correctly +* redundant conditions in paginate-query +* Refactor glance/tests/unit/v2/test_registry_client.py +* Refactor glance/tests/unit/v1/test_registry_client.py +* Improve test/utils.py +* Make sure owner column doesn't get dropped during downgrade +* image-delete fires multiple queries to delete its child entries +* glance-replicator: enable logging exceptions into log file +* Make disk and container formats configurable +* Add space in etc/glance-cache.conf +* Removes duplicate options registration in registry clients +* remove flake8 option in run_tests.sh +* Allow tests to run without installation +* Remove glance CLI man page +* Fix some logic in get_caching_iter +* Adding metadata checking to image location proxy layer +* Update .mailmap +* Migrate to PBR for setup and version code +* Interpolate strings after calling _() +* BaseException.message is deprecated since Python 2.6 +* Raise jsonschema requirement +* Text formatting changes +* Using unicode() convert non-English exception message +* ambiguous column 'checksum' error when querying image-list(v2) +* Handle None value properties in glance-replicator +* Fixes Opt types in glance/notifier/notify_kombu.py +* Add unit test for migration 010 +* Sync models with migrations +* Rename requirements files to standard names +* Include pipeline option for using identity headers +* Adding arguments pre-check for glance-replicator +* Add v1 API x-image-meta- header whitelist +* Stub out dependency on subprocess in unit tests +* Allow insecure=True to be set in swiftclient +* Verify if the RPC result is an instance of dict +* Adds help messages to mongodb_store_db and mongodb_store_uri +* Remove support for sqlalchemy-migrate < 0.7 +* Don't rely on prog.Name for paste app +* Simulate image_locations table in simple/api.py +* Turn off debug logging in sqlalchemy by default +* Glance api to pass identity headers to registry v1 +* add doc/source/api in gitignore +* Use cross-platform 'ps' for test_multiprocessing +* Fix stubs setup and exception message formatting +* Handle client disconnect during image upload +* improving error handling in chunked upload + +2013.2.b2 +--------- + +* Adding Cinder backend storage driver to Glance +* File system store can send metadata back with the location +* index checksum image property +* removed unused variable 'registry_port' +* DB Driver for the Registry Service +* Unit tests for scrubber +* Remove references to clean arg from cache-manage +* Deleting image that is uploading leaves data +* Adding a policy layer for locations APIs +* Add/remove/replace locations from an image +* Adding multiple locations support to image downloading +* Make db properties functions consistent with the DB API +* Adds missing error msg for HTTPNotFound exception +* Allow storage drivers to add metadata to locations +* Fixes image-download error of v2 +* On deleting an image, its image_tags are not deleted +* Sync gettextutils from oslo +* Adding store location proxy to domain +* Notify does not occur on all image upload fails +* Add location specific information to image locations db +* Add custom RPC(Des|S)erializer to common/rpc.py +* use tenant:* as swift r/w acl +* Add image id to the logging message for upload +* Fix cache delete-all-queued-images for xattr +* Fix stale process after unit tests complete +* Sync install_venv_common from oslo +* Fix list formatting in docs +* Fix doc formatting issue +* Ignore files created by Sphinx build +* Use oslo.sphinx and remove local copy of doc theme +* Refactor unsupported default store testing +* Add Sheepdog store +* Fix 'glance-cache-manage -h' default interpolation +* Fix 'glance-cache-manage list-cached' for xattr +* Dont raise NotFound in simple db image_tag_get_all +* Use python module loading to run glance-manage +* Removed unusued variables to clean the code +* Fixes exposing trace during calling image create API +* Pin kombu and anyjson versions +* Do not raise NEW exceptions +* Port slow, overly assertive v1 functional tests to integration tests +* Add a bit of description +* Updated documentation to include notifications introduced in Grizzly +* Make eventlet hub choice configurable +* Don't run store tests without a store! +* Import sql_connection option before using it +* Fix for unencrypted uris in scrubber queue files +* Fix incorrect assertion in test_create_pool +* Do not send traceback to clients by default +* Use Python 3.x compatible octal literals +* Remove explicit distribute depend +* Add missing Keystone settings to scrubber conf +* Sql query optimization for image detail +* Prevent '500' error when admin uses private marker +* Replace openstack-common with oslo in HACKING.rst +* Patch changes Fedora 16 to 18 on install page +* Pass configure_via_auth down to auth plugin +* Move sql_connection option into sqlalchemy package +* Remove unused dictionary from test_registry_api.py +* Remove routes collection mappings +* updated content_type in the exception where it is missing +* python3: Introduce py33 to tox.ini +* Don't make functional tests inherit from IsolatedUnitTest +* Add a policy layer for membership APIs +* Prevent E500 when listing with null values +* Encode headers and params +* Fix pydevd module import error +* Add documentation on reserving a Glance image +* Import strutils from oslo, and convert to it +* Sync oslo imports to the latest version + +2013.2.b1 +--------- + +* Fix undefined variable in cache +* Make passing user token to registry configurable +* Respond with 412 after upload if image was deleted +* Add unittests for image upload functionality in v1 +* Remove glance-control from the test suite +* Prevent '500' error when using forbidden marker +* Improve unit tests for glance.common package +* Improve unit tests for glance.api.v1 module +* rbd: remove extra str() conversions and test with unicode +* rbd: return image size when asked +* Add qpid-python to test-requires +* tests: remove unused methods from test_s3 and test_swift +* Implement Registry's Client V2 +* RBD store uses common utils for reading file chunks +* Redirects requests from /v# to /v#/ with correct Location header +* Add documentation for query parameters +* Small change to 'is_public' documentation +* Fix test_mismatched_X test data deletion check +* Add GLANCE_LOCALEDIR env variable +* Remove gettext.install() from glance/__init__.py +* Implement registry API v2 +* Add RBD support with the location option +* Use flake8/hacking instead of pep8 +* Use RBAC policy to determine if context is admin +* Create package for registry's client +* Compress response's content according to client's accepted encoding +* Call os.kill for each child instead of the process group +* Improve unit tests for glance.common.auth module +* Convert scripts to entry points +* Fix functional test 'test_copy_from_swift' +* Remove unused configure_db function +* Don't raise HTTPForbidden on a multitenant environment +* Expand HACKING with commit message guidelines +* Redirects requests from /v# to /v#/ +* Functional tests use a clean cached db that is only created once +* Fixes for mis-use of various exceptions +* scrubber: dont print URI of image to be deleted +* Eliminate the race when selecting a port for tests +* Raise 404 while deleting a deleted image +* Fix test redifinitions +* Sync with oslo-incubator copy of setup.py and version.py +* Gracefully handle qpid errors +* Fix Qpid test cases +* Imported Translations from Transifex +* Fix the deletion of a pending_delete image +* Imported Translations from Transifex +* Imported Translations from Transifex +* Fix functional test 'test_scrubber_with_metadata_enc' +* Make "private" functions that shouldn't be exported +* Call monkey_patch before other modules are loaded +* Adding help text to the options that did not have it +* Improve unit tests for glance.api.middleware.cache module +* Add placeholder migrations to allow backports +* Add GridFS store +* glance-manage should not require glance-registry.conf +* Verify SSL certificates at boot time +* Invalid reference to self in functional test test_scrubber.py +* Make is_public an argument rather than a filter +* remove deprecated assert_unicode sqlalchemy attribute +* Functional tests display the logs of the services they started +* Add 'set_image_location' policy option +* Add a policy handler to control copy-from functionality +* Fallback to inferring image_members unique constraint name +* Standardize on newer except syntax +* Directly verifying that time and socket are monkey patched +* Reformat openstack-common.conf +* Fix domain database initialization +* Add tests for image visibility filter in db +* Add image_size_cap documentation +* Return 413 when image_size_cap exceeded +* Small change to exception handling in swift store +* Remove internal store references from migration 017 +* Check if creds are present and not None + +2013.1.rc1 +---------- + +* Delete swift segments when image_size_cap exceeded +* bump version to 2013.2 +* Don't print sql password in debug messages +* fixes use the fact that empty sequences are false +* Handle Swift 404 in scrubber +* Remove internal store references from migration 015 +* Pin SQLAlchemy to 0.7.x +* Add unit tests for glance.api.cached_images module +* Document the os options config for swift store +* Segmented images not deleted cleanly from swift +* Do not return location in headers +* Fix uniqueness constraint on image_members table +* Declare index on ImageMember model +* Log when image_size_cap has been exceeded +* Publish API version 2.1 +* Fix scrubber and other utils to use log.setup() +* Switch to final 1.1.0 oslo.config release +* Mark password options secret +* Fix circular import in glance/db/sqlalchemy +* Fix up publicize_image unit test +* Fix rabbit_max_retry +* Fix visibility on db image_member_find +* Fix calls to image_member_find in tests +* Characterize image_member_find +* Retain migration 12 indexes for table image_properties with sqlite +* Insure that migration 6 retains deleted image property index +* Fix check_003 method +* Ensure disk_ and container_format during upload +* Honor metadata_encryption_key in glance domain +* Fix v2 data upload to swift +* Switch to oslo.config +* Update acls in the domain model +* Refactor leaky abstractions +* Remove unused variable 'image_member_factory' +* Generate notification for cached v2 download +* A test for concurrency when glance uses sleep +* Update documentation to reflect API v2 image sharing +* v1 api image-list does not return shared images +* Cannot change locations on immutable images +* Update db layer to expose multiple image locations +* Test date with UTC instead of local timezone +* Added better schemas for image members, revised tests +* Add pre and check phases to test migration 006 +* Fix response code for successful image upload +* Remove unused imports +* Add pre and check phases to test migration 005 +* Add pre and check phases to test migration 004 +* Add PostgreSQL support to test migrations +* Enable support for MySQL with test migrations +* Set status to 'active' after image is uploaded +* Removed controversial common image property 'os_libosinfo_shortid' +* Parse JSON Schema Draft 10 in v2 Image update +* Redact location from notifications +* Fix broken JSON schemas in v2 tests +* Add migration 021 set_engine_mysql_innodb +* Refactor data migration tests +* Fix migration 016 for sqlite +* Pin jsonschema version below 1.0.0 +* Add check for image_locations table +* Avoid using logging in signal handlers +* monkey_patch the time module for eventlet +* Remove compat cfg wrapper +* Remove unnecessary logging from migration 019 +* Fix migration 015 downgrade with sqlite +* Document db_auto_create in default config files +* Update openstack.common +* Extend the domain model to v2 image data + +2013.1.g3 +--------- + +* Add migration 20 - drop images.location +* Add migration 19 - move image location data +* Filter images by status and add visibility shared +* Update oslo-config version +* Sync latest install_venv_common.py +* Adding new common image properties +* Use oslo-config-2013.1b3 +* Add migration 18 - create the image_locations table +* Create connection for each qpid notification +* Add migration to quote encrypted image location urls +* Updates OpenStack LLC with OpenStack Foundation +* Allowing member to set status of image membership +* Add an update option to run_tests.sh +* Use install_venv_common.py from oslo +* Add status column to image_members +* Adding image members in glance v2 api +* Fix issues with migration 012 +* Add migration.py based on the one in nova +* Updated_at not being passed to db in image create +* Fix moker typo in test_notifier +* Clean dangling image fragments in filesystem store +* Sample config and doc for the show_image_direct_url option +* Avoid dangling partial image on size/checksum mismatch +* Fix version issue during nosetests run +* Adding database layer for image members domain model +* Image Member Domain Model +* Additional image member information +* Adding finer notifications +* Add LazyPluggable utility from nova +* Update .coveragerc +* Removed unnecessary code +* Use more-specific value for X-Object-Manifest header +* Allow description fields to be translated in schema +* Mark password config options with secret +* Update HACKING.rst per recent changes +* Encrypt scrubber marker files +* Quote action strings before passing to registry +* Fixes 'not in' operator usage +* Add to multi-tenant swift store documentation +* Replace nose plugin with testtools details +* Convert some prints to addDetails calls +* Rearrange db tests in prep for testr +* Stop using detailed-errors plugin for nose +* Add _FATAL_EXCEPTION_FORMAT_ERRORS global +* Fix kwargs in xattr BadDriverConfiguration exc +* Prints list-cached dates in isoformat +* Fail sensibly if swiftclient absent in test +* Initialize CONF properly in store func tests +* Ensure swift_store_admin_tenants ACLs are set +* Remove Swift location/password from messages +* Removed unnecessary code +* Removed unncessary code +* Pull in tarball version fix from oslo +* Updated image loop to not use an enumerator +* Log exception details +* Update version code from oslo +* Revert "Avoid testtools 0.9.25" +* Avoid testtools 0.9.25 +* Update glance config files with log defaults +* Sync latest cfg and log from oslo-incubator +* Make v2 image tags test not load system policy +* Replace custom tearDown with fixtures and cleanup +* Update version code from oslo +* Use testtools for unittest base class +* Stub out find_file... fix policy.json test issue +* Remove unused declaration in images.py +* Add import for filesystem_store_datadir config +* Update v1/images DELETE so it returns empty body +* Relax version constraint on Webob-1.0.8 +* Set content-length despite webob +* Update common openstack code from oslo-incubator +* Modify the v2 image tags to use domain model + +grizzly-2 +--------- + +* Fix broken link in docs to controllingservers +* Adding a means for a glance worker to connect back to a pydevd debugger +* Use imported exception for update_store_acls +* Fix import order nits +* Verify size in addition to checksum of uploaded image +* Use one wsgi app, one dbengine worker +* Set Content-MD5 after calling webob.Response._app_iter__set +* Modify the v2 image controller to use domain model +* Log error on failure to load paste deploy app +* Configure endpoint_type and service_type for swift +* Refactor multi-tenant swift store +* Add registry_client_timeout parameter +* Use io.BufferedIOBase.read() instead of io.BytesIO.getvalue() +* Port to argparse based cfg +* wsgi.Middleware forward-compatibility with webob 1.2b1 or later +* Allow running testsuite as root user +* Allow newer boto library versions +* Fixed image not getting deleted from cache +* Updates keystone middleware classname in docs +* v2 API image upload set image status to active +* Use auth_token middleware from python-keystoneclient +* Add domain proxies that stop unauthorized actions +* Add domain proxies that do policy.enforce checks +* Use 'notifications' as default notification queue name +* Unused variables removed +* Fixed deleted image being downloadable by admin +* Rewrite S3 functional tests +* Add store test coverage for the get_size method +* Implement get_size filesystem store method +* Add an image repo proxy that handles notifications +* Fixed Typo +* Return size as int from store get call +* Wrap log messages with _() +* Add pep8 ignore options to run_tests.sh +* Fix typo uudiutils -> uuidutils +* Make cooperative reader always support read() +* Add an image proxy to handle stored image data + +grizzly-1 +--------- + +* Allow for not running pep8 +* Refactor where store drivers are initialized +* Audit error logging +* Stop logging all registry client exceptions +* Remove unused imports +* Add note about urlencoding the sql_connection config opt +* Add an image repo to encapsulate db api access +* Add an image domain model and related helpers +* Fix simple db image_get to look like sqlalchemy +* Return 403 on images you can see but can't modify +* Fixes is_image_visible to not use deleted key +* Ensure strings passed to librbd are not unicode +* Use generate_uuid from openstack common +* Update uuidutils from openstack common +* Code cleanup: remove ImageAddResult class +* Lowering certain log lines from error to info +* Prevent infinite respawn of child processes +* Make run_tests.sh run pep8 checks on bin +* Make tox.ini run pep8 checks on bin +* Pep8 fixes to bin/glance* scripts +* Ensure authorization before deleting from store +* Port uuidutils to Glance +* Delete from store after registry delete +* Unit test remaining glance-replicator methods +* Use openstack common timeutils in simple db api +* Unit test replication_dump +* pin sqlalchemy to the 0.7 series +* DRY up image fetch code in v2 API +* Return 403 when admin deletes a deleted image +* Pull in a versioning fix from openstack-common +* Fixes deletion of invalid image member +* Return HTTP 404 for deleted images in v2 +* Update common to 18 October 2012 +* implements selecting version in db sync +* add command "status" to "glance-control" +* Disallow admin updating deleted images in v2 api +* Clean up is_public filtering in image_get_all +* SSL functional tests always omitted +* Fix scrubber not scrubbing with swift backend +* Add OpenStack trove classifier for PyPI +* Disallow updating deleted images +* Unit test replication_size +* Add noseopts and replace noseargs where needed to run_test.sh +* Setup the pep8 config to check bin/glance-control +* Change useexisting to extend_existing to fix deprecation warnings +* Fix fragile respawn storm test +* Fix glance filesystem store race condition +* Add support for multiple db test classes +* Don't parse commandline in filesystem tests +* Improve test coverage for replicator's REST client +* Correct conversion of properties in headers +* Add test for v2 image visibility +* change the default sql connection timeout to 60s +* Add test for v1 image visibility +* FakeAuth not always admin +* Add GLANCE_TEST_TMP_DIR environment var for tests +* Call setup_s3 before checking for disabled state +* Add insecure option to registry https client +* Clean up pep8 E128 violations +* Rename non-public method in sqlalchemy db driver +* Add image_member_update to simple db api +* Multiprocess respawn functional test fix +* Remove unnecessary set_acl calls +* Clean up pep8 E127 violations +* Remove notifications on error +* Change type of rabbit_durable_queues to boolean +* Pass empty args to test config parser +* Document api deployment configuration +* Clean up pep8 E125 violations +* Clean up pep8 E124 violations +* Ensure workers set to 0 for all functional tests +* image_member_* db functions return dicts +* Alter image_member_[update|delete] to use member id +* Add test for db api method image_member_create +* Add test for image_tag_set_all +* Add rabbit_durable_queues config option +* Remove extraneous db method image_property_update +* Update docs with modified workers default value +* Replace README with links to better docs +* Remove unused animation module +* Drop Glance Client +* Enable multi-processing by default +* Ensure glance-api application is "greened" +* Clean up pep8 E122, E123 violations +* Clean up pep8 E121 violations +* Fix scrubber start & not scrubbing when not daemon +* Clean up pep8 E502, E711 violations +* Expand cache middleware unit tests +* Change qpid_heartbeat default +* Don't WARN if trying to add a scheme which exists +* Add unit tests for size_checked_iter +* Add functional tests for the HTTP store +* Generalize remote image functional test +* Add filesystem store driver to new func testing +* Add region configuration for swift +* Update openstack-common log and setup code +* Update v2.0 API version to CURRENT +* Set new version to open Grizzly development +* Add s3_store_bucket_url_format config option +* Ensure status of 'queued' image updated on delete +* Fallback to a temp pid file in glance-control +* Separate glance cache client from main client +* Rewrite Swift store functional tests +* Raise bad request early if image metadata is invalid +* Return actual unicode instead of escape sequences in v2 +* Handle multi-process SIGHUP correctly +* Remove extraneous whitespace in config files +* Remove db auto-creation magic from glance-manage +* Makes deployed APIs configurable +* Asynchronously copy from external image source +* Sort UUID lists in test_image_get_all_owned +* Call do_start correctly in glance-control reload +* Sync some misc changes from openstack-common +* Sync latest cfg changes from openstack-common +* Exception Handling for image upload in v2 +* Fix cache not handling backend failures +* Instantiate wsgi app for each worker +* Require 'status' in simple db image_create +* Drop glance client + keystone config docs +* Use PATCH instead of PUT for v2 image modification +* Delete image from backend store on delete +* Document how to deploy cachemanage middleware +* Clean up comments in paste files +* WARN and use defaults when no policy file is found +* Encode headers in v1 API to utf-8 +* Fix LP bug #1044462 cfg items need secret=True +* Always call stop_servers() after having started them in tests +* Adds registry logging +* Filter out deleted image properties in v2 api +* Limit simple db image_create to known image attrs +* Raise Duplicate on image_create with duplicate id +* Expand image_create db test +* Add test for nonexistent image in db layer +* Catch pruner exception when no images are cached +* Remove bad error message in glance-cache-manage +* Add missing columns to migration 14 +* Adds notifications for images v2 +* Move authtoken config out of paste +* Add kernel/ramdisk_id, instance_uuid to v2 schema +* Tweak doc page titles +* Drop architecture doc page +* Add link to notifications docs on index +* Remove repeated image-sharing docs +* Tidy up API docs +* Log level for BaseContextMiddleware should be warn +* Raise Forbidden exception in image_get +* Activation notification for glance v1 api +* Add glance/versioninfo to MANIFEST.in +* HTTPBadRequest in v2 on malformed JSON request body +* PEP8 fix in conf.py +* Typo fix in glance: existant => existent +* Rename glance api docs to something more concise +* Drop deprecated client docs +* Clean up policies docs page +* Remove autodoc and useless index docs +* Add nosehtmloutput as a test dependency +* Remove partial image data when filesystem is full +* Add 'bytes' to image size rejection message +* Add policy check for downloading image +* Convert limiting_iter to LimitingReader +* Add back necessary import +* Adds glance registry req id to glance api logging +* Make max image size upload configurable +* Correctly re-raise exception on bad v1 checksum +* Return httplib.HTTPResponse from fake reg conn +* Add DB Management docs +* Fix auth cred opts for glance-cache-manage +* Remove unused imports +* Set proper auth middleware option for anon. access +* multi_tenant: Fix 'context' is not defined error +* Validate uuid-ness in v2 image entity +* v2 Images API returns 201 on image data upload +* Fixes issue with non string header values in glance client +* Fix build_sphinx setup.py command +* Updates Image attribute updated_at +* Add policy enforcment for v2 api +* Raise 400 error on POST/PUTs missing request bodies + +folsom-3 +-------- + +* Mark bin/glance as deprecated +* Return 201 on v2 image create +* Ignore duplicate tags in v2 API +* Expose 'protected' image attribute in v2 API +* Move to tag-based versioning +* Update restrictions on allowed v2 image properties +* Reveal v2 API as v2.0 in versions response +* Add min_ram and min_disk to v2 images schema +* Filter out None values from v2 API image entity +* Refactor v2 images resource unit tests +* Use container_format and disk_format as-is in v2 +* Make swift_store_admin_tenants a ListOpt +* Update rbd store to allow copy-on-write clones +* Call stop_servers() in direct_url func tests +* Drop unfinshed parts of v2 API +* Fix a couple i18n issues in glance/common/auth.py +* Sync with latest version of openstack.common.notifier +* Sync with latest version of openstack.common.log +* Sync with latest version of openstack.common.timeutils +* Sync with latest version of openstack.common.importutils +* Sync with latest version of openstack.common.cfg +* Allows exposing image location based on config +* Do not cache images that fail checksum verfication +* Omit deleted properties on image-list by property +* Allow server-side validation of client ssl certs +* Handle images which exist but can't be seen +* Adds proper response checking to HTTP Store +* Use function registration for policy checks +* fix the qpid_heartbeat option so that it's effective +* Add links to image access schema +* ^c shouldn't leave incomplete images in cache +* uuid is a silly name for a var +* Support master and slave having different tokens +* Add a missing header strip opportunity +* URLs to glance need to be absolute +* Use with for file IO +* Add swift_store_admin_tenants option +* Update v1/v2 images APIs to set store ACLs +* Use event.listen() instead of deprecated listeners kwarg +* Store context in local thread store for logging +* Process umask shouldn't allow world-readable files +* Make TCP_KEEPIDLE configurable +* Reject rather than ignore forbidden updates +* Raise HTTPBadRequest when schema validation fails +* Expose 'status' on v2 image entities +* Simplify image and access_record responses +* Move optional dependencies from pip-requires to test-requires +* Fix dead link to image access collection schema +* Add in missing image collection schema link +* Drop static API v2 responses +* Include dates in detailed image output +* Update image caching middleware for v2 URIs +* Ensure Content-Type is JSON-like where necessary +* Have non-empty image properties in image.delete payload +* Add Content-MD5 header to V2 API image download +* Adds set_acls function for swift store +* Store swift images in separate containers +* Include chunk_name in swift debug message +* Set deleted_at field when image members and properties are deleted +* Use size_checked_iter in v2 API +* Honor '--insecure' commandline flag also for keystone authentication +* Make functional tests listen on 127.0.0.1 +* Adds multi tenant support for swift backend +* Provide stores access to the request context +* Increase wait time for test_unsupported_default_store +* Match path_info in image cache middleware +* Dont show stack trace on command line for service error +* Replace example.com with localhost for some tests +* Fix registry error message and exception contents +* Move checked_iter from v1 API glance.api.common +* Support zero-size image creation via the v1 API +* Prevent client from overriding important headers +* Updates run_tests.sh to exclude openstack-common +* Use openstack.common.log to log request id +* Update 'logging' imports to openstack-common +* Make get_endpoint a generic reusable function +* Adds service_catalog to the context +* Add openstack-common's local and notifier modules +* Making docs pretty! +* Removing 'Indices and tables' heading from docs +* Remove microseconds before time format conversion +* Add bin/glance-replicator to scripts in setup.py +* Initial implementation of glance replication +* Generate request id and return in header to client +* Reorganize context module +* Add openstack.common.log +* Ignore openstack-common in pep8 check +* Keystone dep is not actually needed +* Report size of image file in v2 API +* Expose owner on v2 image entities +* Add function tests for image members +* Allow admin's to modify image members +* Allow admins to share images regardless of owner +* Improve eventlet concurrency when uploading/downloading +* Simplify v2 API functional tests + +folsom-2 +-------- + +* Fix IndexError when adding/updating image members +* Report image checksum in v2 API +* Store properties dict as list in simple db driver +* Use PyPI for swiftclient +* Refactor pagination db functional tests +* Combine same-time tests with main db test case +* Add retry to server launch in respawn test +* Reorder imports by full import path +* Adds /v2/schemas/images +* Implement image filtering in v2 +* Include all tests in generated tarballs +* Allow CONF.notifier_strategy to be a full path +* Add image access records schema for image resources +* Remove image members joinedload +* Clean up image member db api methods +* Retry test server launch on failure to listen +* Make image.upload notification send up2date metadata +* Added schema links logic to image resources +* Simplify sqlalchemy imports in driver +* Reduce 'global' usage in sqlalchemy db driver +* Standardize logger instantiation +* Add link descriptor objects to schemas +* Fix exception if glance fails to load schema +* Move the particulars of v2 schemas under v2 +* Remove listing of image tags +* Set up Simple DB driver tests +* Trace glance service on launch failure +* Revert "Funnel debug logging through nose properly." +* Capture logs of failing services in assertion msg +* Remove some more glance-cache PasteDeploy remnants +* Fix typo of conf variable in config.py +* Remove unused imports in db migrations +* Increase timeout to avoid spurious test failures +* adds missing import and removes empty docstring +* Convert db testing to use inheritance +* Clean up .pyc files before running tests +* make roles case-insensitive +* Funnel debug logging through nose properly +* Fix typo of swift_client/swiftclient in store_utils +* Stop revealing sensitive store info +* Avoid thread creation prior to service launch +* Don't use PasteDeploy for scrubber and cache daemons +* Remove some unused glance-cache-queue-image code +* Implement pagination and sorting in v2 +* Turn off SQL query logging at log level INFO +* Default db_auto_create to False +* Use zipballs instead of git urls +* Add metadata_encryption_key to glance-cache.conf +* Fix help messages for --debug +* Use python-swiftclient for swift store +* Fix to not use deprecated response.environ any more +* Import db driver through configuration +* Move RequestContext.is_image_* methods to db layer +* Begin replacement of sqlalchemy driver imports +* webob exception incorrectly used in v1 images.py +* Add tests and simplify GlanceExceptions +* Update default values for known_stores config +* Remove the conf passing PasteDeploy factories +* Port remaining code to global conf object +* Made changes to adhere to HACKING.rst specifications +* Use openstack-common's policy module +* Re-add migrate.cfg to tarball +* Implements cleaner fake_request +* Create 'simple' db driver +* Glance should use openstack.common.timeutils +* Clean up a few ugly bits from the testing patch +* Fix typo in doc +* Add cfg's new global CONF object +* fix side effects from seekability test on input file +* Just use pure nosetests +* Fix coverage jobs. Also, clean up the tox.ini +* Move glance.registry.db to glance.db +* Glance should use openstack.common.importutils +* Add read-only enforcement to v2 API +* Add a base class for tests +* Expose tags on image entities in v2 API +* Add additional info. to image.delete notification +* Expose timestamps on image entities in v2 API +* Sync with latest version of openstack.common.cfg +* Enable anonymous access through context middleware +* Add allow_additional_image_properties +* Fix integration of image properties in v2 API +* Lock pep8 at v1.1 +* Lock pep8 to version 0.6.1 in tox.ini +* Fail gracefully if paste config file is missing +* Add missing files to tarball +* Remove unused imports in setup.py +* Adds sql_ config settings to glance-api.conf +* Correct format of schema-image.json +* Fix paste to correctly deploy v2 API +* Add connection timeout to glance client +* Leave behind sqlite DB for red functional tests +* Support DB auto-create suppression +* Fix glance-api process leak in respawn storm test +* Stubout httplib to avoid actual http calls +* Backslash continuation removal (Glance folsom-1) +* Implement image visibility in v2 API +* Add min_ram and min_disk to bin/glance help +* Implements blueprint import-dynamic-stores +* Add credential quoting to Swift's StoreLocation +* Combine v2 functional image tests +* Simplify JSON Schema validation in v2 API +* Expose deployer-specific properties in v2 API +* Test that v2 deserializers use custom schemas +* Load schema properties when v2 API starts +* Support custom properties in schemas for v2 API +* Fix tiny format string nit in log message +* Fixes bug 997565 +* Allow chunked image upload in v2 API +* wsgi: do not respawn on missing eventlet hub +* Implement v2 API access resource +* Disallow image uploads in v2 API when data exists +* Implement v2 API image tags +* Use ConfigOpts.find_file() for policy and paste +* Implement image data upload/download for v2 API +* Use sdist cmdclass from openstack-common +* glance-api: separate exit status from message +* Update noauth caching pipeline to use unauth-ctx +* Return 204 from DELETE /v2/images/ +* Add localization catalog and initial po files to Glance. Fix bug 706449 +* Add /v2 to sample glance-api-paste.ini +* Basic functionality of v2 /images resource +* Split noauth context middleware into new class +* Add -c|--coverage option to run_tests.sh +* Convert glance to glance/openstack/common/setup.py +* Update glance to pass properly tenant_name +* Cleanup authtoken examples +* Support for directory source of config files +* Support conf from URL's with versions +* Auto generate AUTHORS file for glance +* Integrate openstack-common using update.py +* Fixes LP #992096 - Ensure version in URL +* Begin functional testing of v2 API +* Fixes LP #978119 - cachemanagement w/o keystone +* Omit Content-Length on chunked transfer +* Fix content type for qpid notifier +* Remove __init__.py from locale dir +* Fix i18n in glance.notifier.notify_kombu +* Override OS_AUTH_URL when running functional tests +* remove superfluous 'pass' +* fix bug lp:980892,update glance doc +* Add a space to fix minor typo in glance help +* Suppress pagination on non-tty glance index +* Kill glance-api child workers on SIGINT +* Ensure swift auth URL includes trailing slash +* add postgresql support to test_migrations +* 012_id_to_uuid: Also convert ramdisk + kernel ids +* API v2 controller/serialization separation +* search for logger in PATH +* Set install_requires in setup.py +* Minor grammar corrections +* Bootstrapping v2 Image API implementation +* Fix db migration 12 +* Remove unused imports +* Reorganize pipelines for multiple api versions +* Skip test depending on sqlite3 if unavailable +* Defaulted amazon disk & container formats +* Compile BigInteger to INTEGER for sqlite +* Updated RST docs on containers, fewer references to OVF format +* rename the right index +* Reject excessively long image names +* Test coverage for update of image ownership +* Add MySQLPingListener() back +* Add support for auth version 2 +* Run version_control after auto-creating the DB +* Allow specifying the current version in 'glance-manage version_control' +* Publish v2 in versions responses +* Allow yes-like values to be interpreted as bool +* Support owner paramater to glance add +* Adding versioned namespaces in test dir +* Typo +* Ensure functional db connection in configure_db() +* Set content_type for messages in Qpid notifier +* Avoid leaking secrets into config logging +* Fixes lp959670 +* Send output of stty test cmd to stderr +* Use unique per-test S3 bucket name +* Specify location when creating s3 bucket +* Open Folsom +* Update 'bin/glance add' docstring *_format options +* Ensure all unauthorized reponses return 403 +* Avoid leaking s3 credentials into logs +* Avoid glance-logcapture displaying empty logs +* Add 'publicize_image' policy +* Fixed db conn recovery issue. Fixes bug 954971 +* tox tests with run_tests.sh instead of nosetests +* Don't use auth url to determine service protocol +* Use tenant/user ids rather than names +* Update context middleware with supported headers +* Fixes LP #957401 - Remove stray output on stderr +* check connection in Listener. refer to Bug #943031 +* Avoid tests leaking empty tmp dirs +* Remove keystone.middleware.glance_auth_token +* Updating version of Keystone +* Add policy checks for cache manage middleware +* nose plugin to capture glance service logs +* Add new UnexpectedStatus exception +* Do not error when service does not have 'type' +* Disambiguates HTTP 401 and HTTP 403 in Glance. Fixes bug 956513 +* Add admin_role option +* Remove references to admin_token +* Remove glance-cache-queue-image +* Remove dependency on apiv1app from cachemanage +* Return 403 when policy engine denies action +* Add error checking to get_terminal_size +* Well-formed exception types for 413 & 503 +* Ensure copy and original image IDs differ +* Include babel.cfg and glance.pot in tarballs +* Updating authentication docs +* General cleanup +* General docs cleanup +* Remove todolist from docs +* Add note about cache config options +* Change CLIAuth arg names +* Retry sendfile on EAGAIN or EBUSY +* Add module name to ClientException +* Update cli docs +* Remove 'community' doc page +* Removing registry spec from docs +* Fixes LP#934492 - Allow Null Name +* Refresh SSL cfg after parsing service catalog entry +* Fix typo in tox.ini +* Glance cache updates to support Keystone Essex +* updates man page for glance-scrubber. this time with extra pep8 scrubbing powers. Fixes bug 908803 +* Update tox.ini for jenkins +* Replaced use of webob.Request.str_param +* Update paste file to use service tenant +* Update bin/glance to allow for specifying image id +* Fix deprecated warnings +* Remove trailing whitespaces in regular file +* add git commit date / sha1 to sphinx html docs +* Glance skip prompting if stdin isn't a tty +* Allow region selection when using V2 keystone +* Disallow file:// sources on location or copy-from +* Progress bar causes intermittent test failures +* Added first step of babel-based translations +* Complete fix for modification of unowned image +* Fix update of queued image with location set +* Support copy-from for queued images +* Add checksum to an external image during add +* Align to jenkins tox patterns +* Fix MANIFEST.in to include missing files +* Fix exception name +* Correct kernel/ramdisk example in docs +* Create sorting/pagination helper function +* Support new image copied from external storage +* blueprint progressbar-upload-image +* Avoid TestClient error on missing '__mro__' attr +* disk/container_format required on image activate +* Require container & disk formats on image create +* Support non-UTC timestamps in changes-since filter +* Return 503 if insufficient permission on filestore +* Adds README.rst to the tarball +* Ensure StorageFull only raised on space starvation +* Require auth URL if keystone strategy is enabled +* 003_add_disk_format.py: Avoid deadlock in upgrade +* Function uses 'msg' not 'message' +* Fix paging ties +* Ensure sane chunk size when pysendfile unavailable +* New -k/--insecure command line option +* Add a generic tox build environment +* Fix pep8 error +* Update Authors file +* Implement blueprint add-qpid-support +* Include glance/tests/etc +* Don't fail response if caching failed +* Force auth_strategy=keystone if --auth_url or OS_AUTH_URL is set +* Make Glance work with SQLAlchemy 0.7 +* Use sendfile() for zero-copy of uploaded images +* Respawn glance services on unexpected death +* Blueprint cli-auth: common cli args +* Prep tox config for jenkins builds +* Get rid of DeprecationWarning during db migration +* Add --capture-output option to glance-control +* Add filter validation to glance API +* Fixes LP 922723 +* Typofix is_publi -> is_public +* Add --await-child option to glance-control +* Fix Bug #919255 +* Cap boto version at 2.1.1 +* Simplify pep8 output to one line per violation +* Handle access restriction to public unowned image +* Check service catalogue type rather than name +* Restore inadvertantly dropped lines +* Include the LICENSE file in the tarball +* Change xattr usage to be more broadly compatible +* Fix mixed usage of 's' and 'self' +* Don't force client to supply SSL cert/key +* Few small cleanups to align with Nova + +essex-3 +------- + +* Adds documentation for policy files +* Client.add_image() accepts image data as iterable +* More flexible specification of auth credentials +* glance-api fails fast if default store unsupported +* Bug #909574: Glance does not sanity-check given image size on upload +* glance-control need not locate a server's config file (lp#919520) +* Bug#911599 - Location field wiped on update +* Return 400 if registry returns 400 +* Set url's on AuthBadRequest exceptions +* Add policy checking for basic image operations +* Swallow exception on unsupported image deletion +* Ensure we only send a single content-type header +* Multi-process Glance API server support +* Set size metadata correctly for remote images +* Make paste.ini file location configurable +* Avoid the need for users to manually edit PasteDeploy config in order to switch pipelines +* Split out paste deployment config from the core glance *.conf files into corresponding *-paste.ini files +* Fixes LP Bug#913608 - tests should be isolated +* Set correct Content-Length on cached remote images +* Implement retries in notify_kombu +* Return correct href if bind_host is 0.0.0.0 +* Remove assertDictEqual for python 2.6 compatibility +* Add optional revision field to version number +* LP Bug#912800 - Delete image remain in cache +* Add notifications for sending an image +* Bug #909533: Swift uploads through Glance using ridiculously small chunks +* Add Fedora clauses to the installing document +* Remove doc/Makefile +* Fixes incorrect URI scheme for s3 backend +* Add comments for swift options in glance-api.conf +* Split notification strategies out into modules +* fix bug 911681 +* Fix help output for inverse of BoolOpt +* PEP8 glance cleanup +* Add more man pages +* Set execute permissions on glance-cache-queue-image +* Add a LICENSE file +* Add ability to specify syslog facility +* Install an actual good version of pip +* Bug #909538: Swift upload via Glance logs the password it's using +* Add tox.ini file +* Synchronize notification queue setup between nova and glance +* Fixes keystone auth test failures in python 2.6 +* Removed bin/glance's TTY detection +* Fixes request with a deleted image as marker +* Adds support for protecting images from accidental deletion +* Fix for bug 901609, when using v2 auth should use /v2.0/tokens path +* Updated glance.registry.db for bug 904863 +* Removing caching cruft from bin/glance +* Fixes LP Bug#901534 - Lost properties in upload +* Update glance caching middleware so doesn't try to process calls to subresources. Fixes LP bug #889209 +* Ensure functional tests clean up their images +* Remove extra swift delete_object call +* Add missing files to tarball +* Allow glance keystone unit tests to run with essex keystone +* Convert glance to use the new cfg module +* Add new cfg module +* Lock keystone to specific commit in pip-requires +* Add the missing column header to list-cached +* Rename 'options' variables to 'conf' +* Add generic PasteDeploy app and filter factories +* Secondary iteration of fix for bug 891738 +* Rename .glance-venv to .venv +* Fix for bug 900258 -- add documentation for '--url' glance cli option +* Add --url option to glance cli +* Fixes LP Bug#850377 +* Fixes LP Bug#861650 - Glance client deps +* Added some examples for "glance add" +* Bug#894027: use correct module when building docs +* Adds option to set custom data buffer dir +* Fix bug 891738 +* Added missing depend on nosexcover +* Removed some cruft +* Fixes LP Bug#837817 - bin/glance cache disabled +* Separating add vs general store configuration +* Fixes LP Bug#885341 - Test failure in TestImageCacheManageXattr +* Making prefetcher call create_stores +* Fix handle get_from_backend returning a tuple +* Casting foreign_keys to a list in order to index into it +* Using Keystone's new port number 35357 +* Adding admin_token to image-cache config +* Removing assertGreaterEqual +* Correcting image cleanup in cache drivers +* Adding tests to check 'glance show ' format +* Update 'glance show' to print a valid URI. Fixes bug #888370 +* Gracefully handle image_cache_dir being undefined +* Remove unused versions pipeline from PasteDeploy config +* Allow glance-cache-* find their config files +* Add some test cases for glance.common.config +* Fix name error in cache middleware +* Check to make sure the incomplete file exists before moving it during rollback. Fixes bug #888241 +* Fix global name 'sleep' is not defined in wsgi.py. Fixes bug #888215 +* Fixes LP Bug#878411 - No docs for image cache +* Fix typo in the cached images controller + +essex-1 +------- + +* load gettext in __init__ to fix '_ is not defined' +* Adds option to encrypt 'location' metadata +* Fix LP Bug#885696 two issues with checked_iter +* Fix Keystone API skew issue with Glance client +* Fixed test failure in Python 2.6 +* Glance redirect support for clients +* Fixes LP Bug#882185 - Document Swift HTTPS default +* Fixes LP Bug#884297 - Install docs should have git +* Add "import errno" to a couple of files +* Consolidate glance.utils into glance.common.utils +* Correcting exception handling in glance-manage +* More cache refactoring - Management Middleware +* Fixes LP Bug#882585 - Backend storage disconnect +* Convert image id value to a uuid +* Remove 'location' from POST/PUT image responses +* Removing glance-upload +* Adds Driver Layer to Image Cache +* Removed 'mox==0.5.0' and replaced with just 'mox' in tools/pip-requires +* Removing duplicate mox install in pip-requires +* Add .gitreview config file for gerrit +* Making TCP_KEEPIDLE socket option optional +* Overhauls the image cache to be truly optional +* Fixing functional tests that require keystone +* Fixes LP Bug#844618 - SQLAlchemy errors not logged +* Additions to .gitignore +* Better document using Glance with Keystone +* Fixes LP Bug#872276 - small typo in error message +* Adds SSL configuration params to the client +* Increases test coverage for the common utils +* Refactoring/cleanup around our exception handling +* Port Authors test to git +* Add RBD store backend +* Fixes LP Bug#860862 - Security creds still shown +* Extract image members into new Glance API controller +* Refactoring registry api controllers +* Returning functionality of s3 backend to stream remote images +* Make remote swift image streaming functional +* Improving swfit store uri construction +* Fixes LP Bug #850685 +* Do not allow min_ram or min_disk properties to be NULL and if they are None, make sure to default to 0. Fixes bug 857711 +* Implementing changes-since param in api & registry +* Documenting nova_to_os_env.sh tool +* Added min_disk and min_ram properties to images Fixes LP Bug#849368 +* Fixing bug 794582 - Now able to stream http(s) images +* Fixes LP Bug#755916 - Location field shows creds +* Fixes LP Bug #804429 +* Fixes Bug #851216 +* Fixes LP Bug #833285 +* Fixes bug 851016 +* Fix keystone paste config for functional tests +* Updating image status docs +* * Scrubber now uses registry client to communicate with registry * glance-api writes out to a scrubber "queue" dir on delete * Scrubber determines images to deleted from "queue" dir not db +* Fixes LP Bug#845788 +* Open Essex +* Remove PWD from possible config_file_dirs +* Update paste config files with keystone examples. see ticket: lp839559 +* Adding Keystone support for Glance client +* Fix cached-images API endpoint +* Bug fix lp:726864 +* Fixes Bug: lp825024 +* Add functional tests +* Switch file based logging to WatchedFileHandler for logrotate +* Fixes LP Bug #827660 - Swift driver fail 5G upload +* Bug lp:829064 +* Bug lp:829654 +* Update rfc.sh to use 'true' +* Addresses glance/+spec/i18n +* Addresses glance/+spec/i18n +* Add rfc.sh for git review +* Add support for shared images +* Add notifications for uploads, updates and deletes +* Bug Fix lp:825493 +* Bug fix lp:824706 +* Adds syslog support +* Fixes image cache enabled config +* Improves logging by including traceback +* Addresses glance/+spec/i18n +* casting image_id to int in db api to prevent false matching in database lookups +* Addresses Bug lp:781410 +* Removes faked out datastore entirely, allowing the DB API to be unit tested +* Consolidates the functional API test cases into /glance/tests/functional/test_api.py, adds a new Swift functional test case, verified that it works on Cloud Files with a test account +* breaking up MAX_ITEM_LIMIT and making the new values configurable +* Add @skip_if_disabled decorator to test.utils and integrate it into the base functional API test case. The S3 functional test case now uses test_api.TestApi as its base class and the setUp() method sets the disabled and disabled_message attributes that the @skip_if_disabled decorator uses +* Adds swift_enable_snet config +* Fixes bug lp:821296 +* Detect python version in install_venv +* Implemented @utils.skip_test, @utils.skip_unless and @utils.skip_if functionality in glance/test/utils.py. Added glance/tests/unit/test_skip_examples.py which contains example skip case usages +* Changed setup.py to pull version info from git +* Removes the call to webob.Request.make_body_seekable() in the general images controller to prevent the image from being copied into memory. In the S3 controller, which needs a seekable file-like object when calling boto.s3.Key.set_contents_from_file(), we work around this by writing chunks of the request body to a tempfile on the API node, then stream this tempfile to S3 +* Make sure we're passing the temporary file in a read-mode file descriptor to S3 +* Removes the call to webob.Request.make_body_seekable() in the general images controller to prevent the image from being copied into memory. In the S3 controller, which needs a seekable file-like object when calling boto.s3.Key.set_contents_from_file(), we work around this by writing chunks of the request body to a tempfile on the API node, then stream this tempfile to S3 +* - removed curl api functional tests - moved httplib2 api functional tests to tests/functional/test_api.py +* merging trunk +* Make tests a package under glance +* removing curl tests and moving httplib2 tests +* Move tests under the glance namespace +* Add filter support to bin/glance index and details calls +* merging trunk +* Update registry db api to properly handle pagination through sorted results +* Our code doesn't work with python-xattr 0.5.0, and that's the version installed in RH/Centos :( Andrey has updated the RPM config to specify 0.6.0, and this does the same to pip-requires +* Replaced occurances of |str(e)| with |"%s" % e| +* First round of refactoring on stores +* Remove expected_size stuff +* Make calling delete on a store that doesn't support it raise an exception, clean up stubout of HTTP store and testing of http store +* adding sort_key/sort_dir to details +* merging lp:~rackspace-titan/glance/registry-marker-lp819551 +* adding sort_key/sort_dir params +* adding --fixes +* adding complex test cases to recreate bug; updating db api to respect marker +* Add configuration check for Filesystem store on configure(), not every call to add() +* Refactor S3 store to make configuration one-time at init versus every method call invocation +* Refactor Swift store to make configuration one-time at init versus every method call invocation +* Forgot to add a new file.. +* Refactors stores to be stateful: +* Make sure xattr>=0.6.0 in pip-requires +* updating documentation +* making limit option an integer +* updating broken tests +* adding limit/marker to bin/glance details call +* adding limit/marker params to bin/glance index +* merging trunk +* Use of "%default" in help string does not work, have to use "%(default)s". Per the 4th example http://docs.python.org/dev/library/argparse.html#prog +* Added nose-exclude to pip-requires +* Installed nose-exclude, ./run_tests.sh --unittests-only add '--exclude-dir=tests/functional' to NOSEARGS +* This one has been bugging me for a while, finally found out how to use the local default variable in the help string +* adding --fixes to commit +* Replaced occurances of |str(e)| with |"%s" % e| +* Completes the S3 storage backend. The original code did not actually fit the API from boto it turned out, and the stubs that were in the unit test were hiding this fact +* Fix for boto1.9b issue 540 (http://code.google.com/p/boto/issues/detail?id=540) +* Remove unnecessary hashlib entry in pip-requires +* Add myself to Authors (again) +* hashlib exists all of the way back to python 2.5, there's no need to install an additional copy +* Adds image_cache_enabled config needed to enable/disable the image-cache in the glance-api +* Add more unit tests for URI parsing and get_backend_class() (which is going away in refactor-stores branch, but oh well..) +* Added unit tests for swift_auth_url @property. It was broken. startwith('swift+http') matches swift+https first +* Don't tee into the cache if that image is already being written +* Re-add else: raise +* Final fixes merging Rick's swift_auth_url @property with previous URI parsing fixes that were in the S3 bug branch.. +* merge trunk +* This updates the pep8 version in pip-requires and updates run_tests.sh to provide a '-p' option that allows for just pep8 to be run +* Adding back image_cache_enabled config option for glance-api +* Don't tee same image into cache multiple times +* Fixes two things: +* adding run_tests.sh -p +* PEP8 whitespace fix +* Swift client library needs scheme +* Add tests for bad schemes passed to get_backend_class() +* Add tests for bad URI parsing and get_backend_class() +* Include missing bin/glance-scrubber in tarball +* Include bin/glance-scrubber in tarball binaries +* One more auth_tok-related change, to make it easier for nova to use the client without violating any abstraction boundaries +* Add fix for Bug #816386. Wait up to 5 min for the image to be deleted, but at least 15 seconds +* remove superfluous if statement +* Loop up to 5 min checking for when the scrubber deletes +* Typo in error condition for create_bucket_on_put, make body seekable in req object, and remove +glance from docs and configs +* Add functional test case for checking delete and get of non-existing image +* New local filesystem image cache with REST managment API +* PEP8 Fixes +* Using DELETE instead of POST reap_invalid, reap_stalled +* Forgot to put back fix for the get_backend_class problem.. +* Adding logging if unable to delete image cache file +* Add test case for S3 s3_store_host variations and fixes for URL bug +* Ensure image is active before trying to fetch it +* Boy, I'm an idiot...put this in the wrong branch directory.. +* Handling ZeroDivision Error +* Using alternate logging syntax +* Missing import of common.config in S3 driver +* Tighten up file-mode handling for cache entry +* Adding request context handling +* Merging trunk +* Fixed review stuff from Brian +* Allow delaying the actual deletion of an image +* have the scrubber init a real context instead of a dict +* merge trunk +* Adds authentication middleware support in glance (integration to keystone will be performed as a piece of middleware extending this and committed to the keystone repository). Also implements private images. No limited-visibility shared image support is provided yet +* Take out extraneous comments; tune up doc string; rename image_visible() to is_image_visible(); log authorization failures +* use runs_sql instead of hackery +* Updating setup.py per bin/image_cache removal +* Removing bin/image_cache directory +* Removing cache enabled flag from most confs +* Removing imagecache from default WSGI pipeline +* Allow plugging in alternate context classes so the owner property and the image_visible() method can be overridden +* Make a context property 'owner' that returns the tenant; this makes it possible to change the concept of ownership by using a different context object +* Unit tests for the context's image_visible() routine +* We don't really need elevate().. +* Merging in adding_image_caching +* Importing module rather than function +* PEP 8 fixes +* Adding reap stalled images +* Returning number of files deleted by cache-clear +* Returning num_reaped from reap_invalid +* Moving bin to image_cache/ +* Fixing comment +* Adding reaper script +* Adding percent done to incomplete and invalid image listing +* Renaming tmp_path to incomplete_path +* Renaming tmp_path to incomplete_path +* Renaming purge_all clear, less elegant variation +* Refactor to use lookup_command, so command map is used in one place +* Refactoring to use same command map between functions +* Renaming to cache-prefetching +* Renaming to cache-prefetch +* Renaming to cache-purge-all +* Renaming to cache-purge +* Renaming to cache-invalid +* Beginning to normalize names +* Refactoring out common code +* Refactoring prefetch +* Refactoring purge +* Refactoring purge_all +* Refactoring listing of prefetching images +* Using querystring params for invalid images +* Link incoming context with image owner for authorization decisions +* How in the world did I manage to forget this? *sigh* +* Make tests work again +* merge trunk +* pull-up from trunk +* This patch: +* PEP8 nit +* Added fix for Bug #813291: POST to /images setting x-image-meta-id to an already existing image id causes a 500 error +* One more try.. +* Yet another attempt to fix URIs +* Add in security context information +* Moving cached image list to middleware +* Initial work on moving cached_images to WSGI middleware +* API is now returning a 409 error on duplicate POST. I also modified the testcase to expect a 409 response +* Add owner to database schema +* Fix URI parsing on MacOSX - Python 2.6.1 urlparse bugs +* Namespacing xattr keys +* PEP8 fixes +* Added 3 tests in tests/functional/test_httplib2_api.py to validate is_public filtering works +* left in 2 fixes.. removing redundant fix +* If meta-data contains an id field, pass it to _image_update() +* Adding functional test to show bug #813291 +* fixed an inline comment +* removed pprint import, and added check for other 3 images to make sure is_public=True +* Added 3 tests to validate is_public filtering works +* Completed rewrite of tests/functional/test_curl_api.py using httplib2 +* Changes the default filtering of images to only show is_public to actually use a default filter instead of hard coding. This allows us to override the default behavior by passing in a new filter +* removing pprint import +* completed rewrite of test_ordered_images().. this completes rewrite of test_curl_api using httplib2 +* test_ordered_images() missing closing self.stop_servers() +* finished rewrite of test_filtered_images() +* add tests and make None filters work +* Change default is_public = True to just set a default filter instead of hard coding so it can be overridden +* make the tests work with new trunk +* merge trunk +* Refactoring PrettyTable so it doesn't print the lines itself +* Adding pruner and prefetcher to setup.py +* Removing extraneous text +* PEP 8 fixes +* Adding prefetching list to bin/glance +* More cleanups +* Adding prefetching of images +* Overhaul the way that the store URI works. We can now support specifying the authurls for Swift and S3 with either an http://, an https:// or no prefix at all +* Typo fix +* Removing test exception +* PEP 8 fixes +* Adding Error to invalid cache images +* Show invalid images from bin/glance +* Improving comments +* Cleaning up cache write +* Moving xattrs out to utils +* Clip and justify columns for display +* Including last accessed time in cached list +* Adding more comments +* Adding hit counter +* Pruning invalid cache entries after grace period +* Clear invalid images when purging all cached images +* Rollback by moving images to invalid_path +* Improving comments +* PEP8 fixes +* Adding cached image purge to bin/glance +* Adding purge all to bin/glance +* Adding catch_error decorator to bin/glance +* Adding 'cached' command to bin/glance +* Write incomplete files to tmp path +* Adding purge_all, skip if set if xattrs arent supported +* Adding purge cache API call +* Adding API call to query for cache entries +* Create bin/glance-pruner +* Adding image_caching +* rewrote test_traceback_not_consumed(), working on test_filtered_images() +* Only changes is reverting the patch that added migration to configure_db() and resets the in-memory SQLite database as the one used in functional testing. Yamahata's commits were unmodified.. +* Reverts commit that did db migration during configure_db() and makes functional tests use in-memory database again. The issues we were seeing had to do with the timeout not being long enough when starting servers with disk-based registry databases and migrate taking too long when spinning up the registry server... this was shown in almost random failures of tests saying failure to start servers. Rather than increase the timeout from 3 seconds, I reverted the change that runs migrate on every startup and cut the total test duration down about 15 seconds +* merged glance trunk +* updated Authors +* Resolves bug lp:803260, by adding a check to ensure req.headers['Accept'] exists before it gets assigned to a variable +* run_tests.py: make test runner accepts plugins +* run_tests.py: make run_tests.py work +* Fix the poor error handling uncovered through bug in nova +* Added stop_servers() to the end of the test cases +* adding testing & error handling for invalid markers +* removed pprint import +* removed extra space on test_queued_process_flow method definition +* removing commented out line +* merged in lp:~jshepher/glance/functional_tests_using_httplib2_part2 +* applied requested fix in merge-prop +* Removing ordering numbers from the test cases, per jay pipes +* cleaning up the 'no accept headers' test cases. this should fail until Bug lp:803260 is resolved +* Cleaning up docstring spacing +* rewrite of test_size_greater_2G_mysql from test_curl_api.py using httplib2. All tests currently pass +* completed rewrite of test_003_version_variations. bug lp:803260 filed about step #0, and noted as a comment in code +* Fix for bug 803188. This branch also proposed for merging into trunk +* miss-numbering of steps +* fixing pep8 violation +* Added a check to ensure req.headers['Accept'] exists before it gets assigned to a variable. All unit/functional tests pass with this patch +* half way done with rewrite of test_003_version_variations.. step #0 causes a 500 error unless we supply an Accept header +* Prevent query params from being set to None instead of a dict +* removing rogue print +* fixing issue where filters are set to None +* Backport for bug 803055 +* rewrote test_002_queued_process_flow from test_curl_api.py, all 6 steps pass against trunk revno:146 +* Backport for bug 803055 +* Prevent clients from adding query parameters set to None +* ignores None param values passed to do_request +* cleaning up docstrings +* merging trunk +* docstring +* Added sort_key and sort_dir query params to apis and clients +* fixing one last docstring +* docstrings\! +* unit/test_config.py: make it independent on sys.argv +* run_tests.py: make test runner accepts plugins +* reverting one import change; another docstring fix +* docstring +* Switch image_data to be a file-like object instead of bare string in image creating and updating Without this Glance loads all image into memory, then copies it one time, then writes it to temp file, and only after all this copies image to target repository +* Add myself to Authors file +* cleaning up None values being passed into images_get_all_public db call +* adding base client module +* restructuring client code +* merging trunk +* Explicitly set headers rather than add them +* fixing httplib2 functional test that was expecting wrong content-type value +* merging trunk +* rewrite of test_get_head_simple_post from tests/functional/test_curl_api.py using httplib2 +* adding assert to check content_type in GET /images/ test +* Explicitly setting Content-Type, Content-Length, ETag, Location headers to prevent duplication +* Bug #801703: No logging is configured for unit tests +* Bug #801703: No logging is configured for unit tests +* Change image_data to body_file instead of body +* reset _MAKER every test and make sure to stop the servers +* Trunk merge, changed returned content-type header from 'application/octet-stream' to 'text/html; charset=UTF-8, application/octet-stream' +* yea python strings +* updated main docstring, as it was directly coppied from test_curl_api.py +* merged trunk +* refactoring for Jay +* make image data a constant +* Fixes build failures due to webob upgrade. Updated pop-requires as well +* upgrading webob and fixing tests +* - refactoring wsgi code to divide deserialization, controller, serialization among different objects - Resource object acts as coordinator +* updating client docs +* fixing bad request error messages +* making SUPPORTED_* lists into tuples +* slight refactoring +* updating docs +* adding ordering support to glance api +* adding support to registry server and client for sort_key and sort_dir params +* re-ordered imports, using alpha-ordering +* removing unnecessary unittest import +* moved httplib2 tests to their own test case file, and uncommented md5 match +* updating docs; adding support for status filter +* adding query filters to bin/glance details +* adding query filters to bin/glance index +* forgot to remove pprint import +* adding hashlib as a dependency to pip-requires (not 100% sure it is not part of the base install though) +* fixed pep8 violation +* rewote the test #7 - #11 for testcase (test_get_head_simple_post) +* refactoring for Brian +* refactoring from Rick's comments +* Added httplib2 dependency to tools/pip-requires +* rewriting functional tests to utilize httplib2 instead of curl +* make sure it runs as a daemon for the tests +* default to no daemon +* also allow for daemon in the config file so that we can test it easier +* default to non-daemon mode +* change order of paramaters and make event optional +* initial refactoring from Jay's comments +* remove eventlet import and leftover function from previous refactoring +* remove file that got resurrected by accident +* fixed test case +* add functional tests of the scrubber and delayed_delete +* start the scrubber in addition to the api and registry +* add glance-scrubber to glance-control +* call it a Daemon, cuz it is +* Update Authors +* add the function to the stubs +* cleanup +* adding tests for wsgi module +* removing rogue print +* further refactoring +* adding refactored wsgi code from nova; moving registry api to new wsgi +* delayed scrubbing now works +* add the scrubber startup script +* remove unnecessary option +* add pending_delete to stub api +* pep8 fixed +* pep8 fixes +* pass in the type we want so it gets converted properly +* self leaked ;( +* only return the results that we need to act on +* allow passing of time to get only results earlier than the time' +* server and scrubber work +* update the docstring to reflect current +* pass in a wakeup_time for the default time between database hits +* start making the server that will periodicly scrub +* Config file for the scrubber. We make our own connection to the db here and bypass using the registry client so we don't have to expose non-public images over the http connection +* make the commits +* Add webob>=1.0.7 requirement to tools/pip-requires +* all delayed deletes will be going through a new service, if delayed_delete is False, then delete it right away, otherwise set it to pending_delete +* add scrub file +* set the image to pending delete prior to scheduling the delete +* refactor a bit so the db gets updated as needed and we only trigger the delay if the config option is set +* add scheduled_delete_from_backend which delays the deletion of images for at least 1 second +* don't delete directly but schedule deletion +* add the api function to get the images that are pending deleteion +* add in delayed delete options +* Add workaround for Webob bug issue #12 and fix DELETE operation in S3 where URL parsing was broken +* Add ability to create missing s3 bucket on first post, similar to Swift driver +* Adding support for marker/limit query params from api, through registry client/api, and implementing at registry db api layer +* Bug #787296: test_walk_versions fails with SQLalchemy 0.7 +* OK, fixes the issue where older versions of webob.Request did not have the body_file_seekable attribute. After investigation, turned out that webob.Request.make_body_seekable() method was available in all versions of webob, so we use that instead +* Added new disk_format type of 'iso'. Nova can use this information to identify images that have to be booted from a CDROM +* adding marker & limit params to glance client +* Auto-migrate if the tables don't exist yet +* Fix up unit tests for S3 after note from Chris. Also fix bug when S3 test was skipped, was returning error by accident +* * Adds functional test that works with Amazon S3 * Fixes parsing of "S3 URLs" which urlparse utterly barfs on because Amazon stupidly allows forward slashes in their secret keys * Update /etc/glance-api.conf for S3 settings +* merging trunk, resolving conflicts +* fixing sql query +* completing marker functionality +* Call stop_servers() for those 2 test cases missing it +* Correct documentation +* Add missing stop_servers() calls to two functional test cases +* Remove changes to stub database +* Auto-migrate if tables don't exist +* Fix accidental delete +* Remove additions to FIXTURES in test/stubs.py, which requried changes elsewhere +* Sync with trunk +* Documentation for new results filtering in the API and client +* Fix tiny typo +* Documentation for new results filtering in the API and client +* Adding support for query filtering from the glance client library +* renaming query_params to params +* abstracting out filters query param serialization into BaseClient.do_request +* renaming tests to resolve conflict +* adding filters param to get_images and get_images_detailed in glance client +* Bug #787296: test_walk_versions fails with SQLalchemy 0.7 +* Updated doc with 'iso' disk_format +* Update documentation +* Adding support for api query filtering - equality testing on select attributes: name, status, container_format, disk_format - relative comparison of size attribute with size_min, size_max - equality testing on user-defined properties (preface property name with "property-" in query) +* updating stubs with new sorting logic; updating tests +* fixing some copy/paste errors +* fixing some webob exceptions +* slight modification to registry db api to ensure marker works correctly +* slight refactoring per jaypipes' suggestions; sort on get images calls is now created_at desc +* Add tests for 'iso' image type. Remove hard coding of next available image id in tests. This prevents new test images from being added to the set generated by tests.unit.stubs.FakeDatastore +* pulling from parent branch +* docstring fix +* pushing marker/limit logic down into registry db api +* adding support for marker & limit query params +* removing some unnecessary imports +* making registry db api filters more structured; adding in a bit of sqlalchemy code to filter image properties more efficiently +* consolidating image_get_all_public and image_get_filtered in registry db api +* adding test case for multiple parameters from command line +* adding custom property api filtering +* adding size_min and size_max api query filters +* implemented api filtering on name, status, disk_format, and container_format +* Adds versioning to the Glance API +* Add test and fix for /v1.2/images not properly returning version choices +* Add more tests for version URIs and accept headers and fix up some of Brian's review comments +* Fix merge conflict.. +* Changes versioned URIs to be /v1/ instead of /v1.0/ +* Improve logging configuration docs.. +* Doc and docstring fixes from Dan's review +* Removed some test config files that slipped in.. +* Fix up find_config_file() to accept an app_name arg. Update all documentation referencing config files +* Fix pep8 complaint +* Add DISK_FORMAT for 'iso' type images +* Adds versioning to Glance's API +* Changes glance index to return all public images in any status other than 'killed'. This should allow tools like euca-describe-images to show images while they are in a saving/untarring/decrypting state +* Fix numbering in comment.. +* Fixed doh. Updates test case to test for condition that should have failed with status!='active' +* Changes glance index to return all public images in any status other than 'killed'. This should allow tools like euca-describe-images to show images while they are in a saving/untarring/decrypting state +* Adding prefilled Authors, mailmap files Adding test to validate Authors file is properly set up +* Documentation updates to make glance add command clearer, hopefully :) +* adding Authors functionality; fixing one rogue pep8 violation +* Improve logging configuration docs.. +* Prevent users from uploading images with a bad or missing store. Allow deletion from registry when backend cannot be used +* bcwaldon review fixups +* adding comment +* Fix for bug #768969: glance index shows non-active images; glance show does not show status +* Completes the S3 storage backend. The original code did not actually fit the API from boto it turned out, and the stubs that were in the unit test were hiding this fact +* catching NotFound to prevent failure on bad location +* Prevent requests with invalid store in location param +* Allow registry deletion to succeed if store deletion fails +* Documentation updates to make glance add command clearer, hopefully :) +* Fix for LP Bug #768969 +* Expanding user confirmation default behavior +* removing excessive exception handling +* pep8 fixes +* docstring and exception handling +* Expanding user_confirm default behavior +* I modified documentation to show more first-time user friendly examples on using glance. With the previous examples, I followed it as a first-time user and had to spend more than necessary time to figure out how to use it. With this modification, other first-time users would make it work on their systems more quickly +* - Require user confirmation for "bin/glance clear" and "bin/glance delete " - Allow for override with -f/--force command-line option +* adding --force option to test_add_clear +* Adds a test case for updating an image's Name attribute. glance update was not regarding 'name' as a top-level modifiable attribute.. +* Name is an attribute that is modifiable in glance update, too. +* Mark image properties as deleted when deleting images. Added a unit test to verify public images and their properties get deleted when running a 'glance clear' command +* Update tests and .bzrignore to use tests.sqlite instead of glance.sqlite +* Only modify the connection URL in runs_sql if the original connection string starts with 'sqlite' +* Create a decorator that handles setting the SQL store to a disk-based SQLite database when arbitrary SQL statements need to be run against the registry database during a test case +* Docstring update on the run_sql_command function +* Mark image properties as deleted when deleting images. Added a unit test to verify public images and their properties get deleted when running a 'glance clear' command +* Add log_file to example glance.conf +* fixing spacing in help text +* adding confirmation on image delete/clear; adding user_confirm functionality +* Add log_file to example glance.conf +* Make sure we use get_option() when dealing with boolean values read from configuration files...otherwise "False" is True :( +* Fixing tests. Sorry for late response +* Make sure we use get_option() when dealing with boolean values read from configuration files...otherwise "False" is True :( +* resolve merge conflicts +* chnaged output +* Open Diablo release +* Diablo versioning +* Fake merge with ancient trunk. This is only so that people who "accidentally" have been following lp:~hudson-openstack/glance/trunk will not have problems updating to this +* Final versioning for Cactus +* fixing after review +* Removes capture of exception from eventlet in _upload_and_activate(), which catches the exceptions that come from the _safe_kill() method properly +* RickH fixups from review +* Add catch-all except: block in _upload() +* change output from glance-registry +* get latest from lp:glance +* Ensures that configuration values for debug and verbose are used if command-line options are not set +* Removes capture of exception from eventlet in _upload_and_activate(), which catches the exceptions that come from the _safe_kill() method properly +* Fix logging in swift +* Fix Thierry's notice about switched debug and verbose +* Change parsing of headers to accept 'True', 'on', 1 for boolean truth values +* Final cactus versioning +* OK, fix docs to make it clear that only the string 'true' is allowed for boolean headers. Add False-hood unit tests as well +* Logging was not being setup with configuration file values for debug/verbose +* Fix up the way the exception is raised from _safe_kill()... When I "fixed" bug 729726, I mistakenly used the traceback as the message. doh +* Change parsing of headers to accept 'True', 'on', 1 for boolean truth values +* Add the migration sql scripts to MANIFEST.in. The gets them included in not only the tarball, but also by setup.py install +* Add the migration sql scripts to MANIFEST.in. The gets them included in not only the tarball, but also by setup.py install +* Changed raise of exception to avoid displaying incorrect error message in _safe_kill() +* fix logging in swift +* Changes "key" column in image_properties to "name" +* Updated properties should be marked as deleted=0. This allows previously deleted properties to be reactivated on an update +* Adds --config-file option to common options processing +* Update the docs in bin/glance so that help for the 'update' command states that metadata not specified will be deleted +* Fix config test fixtures and pep8 error in bin/glance-manage +* Provide revised schema and migration scripts for turning 'size' column in 'images' table to BIGINT. This overcomes a 2 gig limit on images sizes that can be downloaded from Glance +* Updated properties should be marked as deleted=0. Add unit tests +* Use logging module, not echo, for logging SQLAlchemy. Fixes bug 746435 +* Change order of setting debug/verbose logging. Thanks for spotting this, Elgar +* Use logging module, not echo, for logging SQLAlchemy. Fixes bug 746435 +* Ensure we don't ask the backend store to delete an image if the image is in a queued or saving state, since clearly the backend state has yet to completely store the image +* Changes "key" column in image_properties to "name" +* Use logging module, not echo for logging SQLAlchemy +* Updates glance-manage to use configuration files as well as command line options +* Ensure we don't ask a backend store to delete an image if the image is queued or saving +* Moved migration into Python script, otherwise PostgreSQL was not migrated. Added changes to the functional test base class to reset the data store between tests. GLANCE_SQL_CONNECTION env variable is now GLANCE_TEST_SQL_CONNECTION +* changed to more typical examples +* Add migration scripts for revising the datatype of the 'size' column in the images table +* Changes to database schema required to support images larger than 2Gig on MySQL. Does not update the migration scripts +* Updates to the Registry API such that only external requests to update image properties purge existing properties. The update_image call now contains an extra flag to purge_props which is set to True for external requests but False internally +* Updates to the Registry API such that only external requests to update image properties purge existing properties. The update_image call now contains an extra flag to purge_props which is set to True for external requests but False internally +* Update the glance registry so that it marks properties as deleted if they are no longer exist when images are updated +* Simple one.. just add back the Changelog I removed by accident in r94. Fixes bug #742353 +* Adds checksumming to Glance +* Uhhhm, stop_servers() should stop servers, not start them! Thanks to Cory for uncovering this copy/paste fail +* Fix up test case after merging in bug fixes from trunk... expected results were incorrect in curl test +* Add ChangeLog back to MANIFEST.in +* Add migration testing and migration for disk_format/container_format +* tests.unit.test_misc.execute -> tests.utils.execute after merge +* Allow someone to set the GLANCE_TEST_MIGRATIONS_CONF environment variable to override the config file to run for the migrations unit test: +* Update the glance registry so that it marks properties as deleted if they are no longer in the update list +* Start eventlet WSGI server with a logger to avoid stdout output +* Adds robust functional testing to Glance +* Add migration script for checksum column +* Fixed an oops. Didn't realized Repository.latest returned a 0-based version number, and forgot to reversed() the downgrade test +* OK, migrations are finally under control and properly tested +* Remove non-existing files from MANIFEST.in +* Removed glance-combined. Fixed README +* Removed glance-commit +* Re-raise _safe_kill() exception in non-3-arg form to avoid pep8 deprecation error +* Bug #737979: glance-control uses fixed path to Python interpreter, breaking virtualenv +* Bug #737979: glance-control uses fixed path to Python interpreter, breaking virtualenv +* Removes glance-combined and fixes TypeError from bad function calls in glance-manage +* Start eventlet WSGI server with a logger to avoid stdout output +* Pass boolean values to glance.client as strings, not integers +* Small adjustment on wait_for_servers()... fixed infinite loop possibility +* Adds robust functional testing to Glance +* Ensure Content-type set to application/octet-stream for GET /images/ +* Ensure Content-Length sent for GET /images/ +* HTTPBackend.get() needed options in kwargs +* Remove glance-combined (use glance-control all start). Fix glance-manage to call the setup_logging() and add_logging_options() methods according to the way they are called in glance-api and glance-registry +* Support account:user:key in Swift URIs. Adds unit tests for various calls to parse_swift_tokens() +* Adds documentation on configuring logging and a unit test for checking simple log output +* Support account:user:key in Swift URIs. Adds unit tests for various calls to parse_swift_tokens() +* Cherry pick r86 from bug720816 +* Cherry pick r87 from bug720816 +* Fixed run_tests.py addError() method since I noted it was faulty in another branch.. +* Tiny pep8'ers +* I stole the colorized code from nova +* Fix typo +* A quick patch to allow running the test suite on an alternate db backend +* Merged trunk -resolved conflicts +* [Add] colorization stolen from nova +* Don't require swift module for unit-tests +* Pep8 fix +* Backing out unit-test workaround +* Changed to have 2 slashes +* Allow unit-tests to run without swift module +* Remove spurios comment in test file +* Add Glance CLI tool +* Silly mistake when resolving merge conflict...fixed +* Fixes passing of None values in metadata by turning them into strings. Also fixes the passing of the deleted column by converting it to and from a bool. The test for passing metadata was updated to include these values +* Adds documentation on configuring logging and a test that log_file works. It didn't, so this also inludes fixes for setting up log handling :) +* fix data passing +* add failing test for None and deleted +* Uses logger instead of logging in migration.py +* Using logger in migration api instead of logging directly +* Only clean up in the cleanup method. Also, we don't need the separate URI now +* Use unregister_models instead of os.unlink to clean up after ourselves +* Fixed unregister_models to actually work +* Fixed migration test to use a second DB URL +* Replaced use of has_key with get + default value +* Make it clear that the checksum is an MD5 checksum in docs +* Adds checksumming to Glance +* Whoops! Left out a self.db_path +* Allow tests to run on an alternate dburi given via environment variables +* Adds ability for Swift to be used as a full-fledged backend. Adds POST/PUT capabilities to the SwiftBackend Adds lots of unit tests for both FilesystemBackend and SwiftBackend Removes now-unused tests.unit.fakeswifthttp module +* Remove last vestiges of account in Swift store +* Quick fixup on registry.get_client() +* Public? => Public: per Cory's comment. Added a little more robust exception handling to some methods in bin/glance +* Fixes for Devin and Rick's reviews +* Adds disk_format and container_format to Image, and removes the type column +* Fixes client update_image to work like create_image. Also fixes some messed up exceptions that were causing a try, except to reraise +* Final review fixes. Makes disk_format and container_format optional. Makes glance-upload --type put the type in properties +* remove test skip +* Put account in glance.conf.sample's swift_store_auth_address, use real swift.common.client.ClientException, ensure tests work with older installed versions of Swift (which do not have, for example, swift.common.client.Connection.get_auth method) +* Work around Eventlet exception clearing by memorizing exception context and re-raising using 3-arg form +* Adds bin/glance to setup.py +* Fixes from Rick's review #1 +* Reverts Image `type` back to the old behavior of being nullable +* Work around Eventlet exception clearing +* Add sys.path mangling to glance-upload +* Add sys.path adjustment magic to glance-upload +* Adds ability for Swift to be used as a full-fledged backend. Adds POST/PUT capabilities to the SwiftBackend Adds lots of unit tests for both FilesystemBackend and SwiftBackend Removes now-unused tests.unit.fakeswifthttp module +* Couple tiny cleanups noticed when readin merge diff. +* bin/glance-admin => bin/glance, since it's really just the CLI tool to interact with Glance. Added lots of documentation and more logging statements in some critical areas (like the glance.registry calls.. +* Adds lots of unit tests for verifying exceptions are raised properly with invalid or mismatched disk and container formats +* Makes --kernel and --ramdisk required arguments for glance-upload since Nova currently requires them +* Removing image_type required behavior +* Removing requirement to pass kernel and ramdisk +* Add test cases for missing and invalid disk and container formats +* Requiring kernel and ramdisk args in glance-upload +* Make disk_format and container_format required +* Make disk_format and container_format required +* Adds an admin tool to Glance (bin/glance-admin) that allows a user to administer the Glance server: +* Make sure validate_image() doesn't throw exception on missing status when updating image +* Adds disk_format and container_format to Image, and removes the type column +* This adds a test case for LP Bug 704854 -- Exception raised by Registry server gets eaten by API server +* Add debugging output to assert in test_misc. Trying to debug what Hudson fails on.. +* Fixups from Rick's review +* Removes now-unnecessary @validates decorator on model +* I should probably rebase this commit considering all the previous commits weren't actually addressing the issue. The fact that I had glance-api and glance-registry installed on my local machine was causing the test runs to improperly return a passing result +* Use Nova's path trick in all bins.. +* Add path to glance-control +* Removes image type validation in the Glance registry +* Adding vhd as recognized image type +* Reverting the removal of validation +* Removing image type validation +* Adds --pid-file option to bin/glance-control +* Add %default for image type in glance-upload +* Adds Location: header to return from API server for POST /images, per APP spec +* Cleanups from Soren's review +* Add an ImportError check when importing migrate.exceptions, as the location of that module changed in a recent version of the sqlalchemy-migrate library +* Adds Location: header to return from API server for POST /images, per APP spec +* This adds a test case for LP Bug 704854 -- Exception raised by Registry server gets eaten by API server +* Adds --pid-file option to bin/glance-control +* Add an ImportError check when importing migrate.exceptions, as the location of that module changed in a recent version of the sqlalchemy-migrate library +* Adds sql_idle_timeout to reestablish connections to database after given period of time +* Add sql_idle_timeout +* Removes lockfile and custom python-daemon server initialization in favour of paste.deploy +* Review 3 fixups +* Remove get_config_file_options() from glance-control +* Fixes for Rick review #2 +* Remove no-longer-needed imports.. +* Remove extraneous debug import.. +* Changes the server daemon programs to be configured only via paste.deploy configuration files. Removed ability to configure server options from CLI options when starting the servers with the exception of --verbose and --debug, which are useful during debugging +* Adds glance-combined and glance-manage to setup.py +* Fix merge conflicts +* Adds glance-combined and glance-manage to setup.py +* Fixes bug 714454 +* ReStructure Text files need to end in .rst, not .py ;) +* Update README, remove some vestigial directories, and other small tweaks +* Removing dubious advice +* Adds facilities for configuring Glance's servers via configuration files +* Use fix_path on find_config_file() too +* Fixups from Rick's review +* Including tests/ in pep8 +* Typo fixes, clarifying +* Updating README, rmdir some empty dirs +* Adds bin/glance-control program server daemonization wrapper program based on Swift's swift-init script +* Ignore build and deploy-related files +* Adds sqlalchemy migrations +* Fix bug 712575. Make BASE = models.BASE +* Make sure BASE is the models.BASE, not a new declarative_base() object +* Had to reverse search order of directories for finding config files +* Removes lockfile and custom python-daemon server initialization in favour of paste.deploy +* Adds facilities for configuring Glance's servers via configuration files +* Creating indexes +* Adding migration test +* Fixing migration import errors +* Small cleanups +* glance-manage uses common options +* Merging in glance/cactus +* Pep8 fix +* Pep8 fixes +* Refactoring into option groups +* Hopefully-final versioning (0.1.7), no review needed +* Final versioning, no review needed +* Adding db_sync to mirror nova +* Adding some basic documentation +* Better logging +* Adding image_properties migration +* Adding migration for images table +* Adding migration management commands +* Remove debugging output that wasn't supposed to go into this branch (yet) :) +* Adds --debug option for DEBUG-level logging. --verbose now only outputs INFO-level log records +* Typo add_option -> add_options +* Fixes from Rick's review. Thanks, Rick +* Adds --sql-connection option +* First round of logging functionality: +* Merged use-optparse +* Removes glance.common.db.sqlalchemy and moves registration of models and create_engine into glance.registry.db.api +* pep8-er in bin/glance-combined +* Fixes lp710789 - use-optparse breaks daemonized process stop +* Adds bin/glance-combined. Useful in testing.. +* Tiny pep8 fixup in setup.py +* Rework what comes back from parse_options()[0] to not stringify option values. Keep them typed +* Remove use of gflags entirely. Use optparse +* Removing unecessary param to get_all_public +* Merging trunk +* Adding back some missing code +* Cleaning up some code +* Makes Glance's versioning non-static. Uses Nova's versioning scheme +* Adds/updates the copyright info on most of the files in glance and copies over the Authors check from Nova +* Removing sqlalchemy dir +* Removed methods from sqlalchemy/api +* Refactor update/create +* Messed up a permission somehow +* Refactoring destroy +* feh +* A few more +* A few more I missed +* version bumped after tarball cut. no review needed.. +* Bump version +* Removing authors test for now +* PEP8 cleanup +* PEP8 cleanup +* Should fix the sphinx issue +* Adds architecture docs and enables Graphviz sphinx extension. Also cleans up source code formatting in docs +* Make sphinx conditional +* bumps version after tarball release of 0.1.4 +* Bump version +* Added bzr to pip-requires and refixed some pep8 stuff +* Authors check +* A few more copyrights +* Copyright year change +* Pylint cleanup +* Added copyright info +* Adds architecture docs and enables Graphviz sphinx extension. Also cleans up source code formatting in docs +* bumps release version. ready for Bexar final release +* Version bump after release +* added sphinx and argparse into tools/pip-requires so that setup.py works. this bug also prevents nova from creating a virtualenv +* fixes setup install pip dependencies +* Version bump for release +* Fixes bug #706636: Make sure pep8 failures will return failure for run_tests.sh +* Make run_tests.sh return failure when pep8 returns fail, and fix the pep8 error in /bin/glance-upload +* This patch: * Converts dashes to underscores when extracting image-properties from HTTP headers (we already do this for 'regular' image attributes * Update image_properties on image PUTs rather than trying to create dups +* This patch replaces some remaining references to req.body (which buffers the entire request body into memory!) with the util.has_body method which can determine whether a body is present without reading any of it into memory +* Adding Apache license, fixing long line +* Making glance-upload a first-class binary +* Revove useless test_data.py file, add image uploader +* Fix property create +* Dont buffer entire image stream on PUT +* Adds man pages for glance-registry and glance-api programs. Adds Getting Started guide to the Glance documentation +* Fixes LP Bug #700162: Images greater than 2GB cannot be uploaded using glance.client.Client +* Duh, it helps to import the class you are inheriting from... +* OK, found a solution to our test or functional dilemma. w00t +* Make compat with chunked transfer +* Removes the last vestiges of Twisted from Glance +* Pull in typo fix +* Add in manpage installation hook. Thanks Soren :) +* Fixes LP Bug #700162: Images greater than 2GB cannot be uploaded using glance.client.Client +* Removes Twisted from tools/install_venv.py and zope.interface from tools/pip-requires. Shaved a full 45 seconds for me off of run_tests.sh -V -f now we're not downloading a giant Twisted tarball.. +* Remove last little vestiges of twisted +* Quick typo fix in docs +* Add run_tests.py to tarball +* Also include run_tests.py in tarball +* Adds man pages for glance-registry and glance-api. Adds Getting Started guide to Glance docs +* Fixes bug #696375: x-image-meta-size not optional despite documentation saying so +* PEP8 fixes in /glance/store/__init__.py +* Fix Bug #704038: Unable to start or connect to register server on anything other than 0.0.0.0:9191 +* Fix Bug #704038: Unable to start or connect to register server on anything other than 0.0.0.0:9191 +* upgrade version.. +* Fixes Bug#696375: x-image-meta-size is not optional, contrary to documentation +* Increase version after release +* Cut 0.1.2 +* Files missing from the tarball (and you probably need to cut a 0.1.2.) +* Cleanup of RST documentation and addition of docs on an image's status +* Include some files that were left out +* Implements the S3 store to the level of the swift store +* fixes bug698318 +* Fixes suggested by JayPipes review. Did not modify docstrings in non-related files +* This merge is in conjunction with lp:~rconradharris/nova/xs-snap-return-image-id-before-snapshot +* Updating docs +* Merging trunk +* Clean up the rest of Glance's PEP8 problems +* PEP-8 Fixes +* Fixing eventlet-raise issue +* Bug #698316: Glance reads the whole image into memory when handling a POST /images request +* Merging trunk +* Fixed pylint/pep8 for glance.store.s3 +* Implement S3 to the level of swift +* removing old methods +* refactoring so update can take image_data +* More PEP8 fixes +* Fix all Glance's pep8 problems +* Remove incorrect doccomments about there being a default for the host parameter, fix misdocumented default port, and remove handling of missing parameters in BaseClient, because the values are always specified by the subclass's __init__ +* Bug #696385: Glance is not pep8-clean +* Bug #696382: Glance client parameter defaults misdocumented +* Fixes a number of things that came up during initial coding of the admin tool: +* Made review changes from Rick +* Duh, use_ssl should not use HTTPConnection.. +* Remove final debugging statement +* merge trunk +* Remove debugging statements +* Fixes a number of things that came up during initial coding of the admin tool: +* fix bug 694382 +* Bug #694382: setup.py refers to parallax-server and teller-server, when these have been renamed +* documentation cleanup and matching to other OpenStack projects. Glance is no longer the red-headed documentation stepchild in OpenStack.. +* Converts timestamp attributes to datetime objects before persisting +* Adding __protected_attributes__, some PEP8 cleanups +* review fixes +* Update sphinx conf to match other OpenStack projects +* Documentation cleanup. Splits out index.rst into multiple section docs +* Converting to datetime before saving image +* Enhances POST /images call to, you know, actually make it work.. +* Make directory for filesystem backend +* doing the merge of this again...somehow the trunk branch never got rev26 :( +* Adds POST /images work that saves image data to a store backend +* Update docs for adding image.. +* Fix Chris minor nit on docstring +* Fixes binaries, updates WSGI file to more recent version from Nova, and fixes an issue in SQLAlchemy API that was being hidden by stubs and only showed up when starting up the actual binaries and testing.. +* Major refactoring.. +* Fix testing/debug left in +* Fixes from review +* Documentation updates and GlanceClient -> Client +* Refactor a bunch of stuff around the image files collection +* Cleanup around x-image-meta and x-image-meta-property HTTP headers in GET/HEAD +* Update /glance/client.py to have GlanceClient do all operations that RegistryClient does +* Merges Glance API with the registry API: * Makes HEAD /images/ return metadata in headers * Make GET /images/ return image data with metadata in headers Updates docs some (more needed) +* Second step in simplifying the Glance API +* This is the first part of simplifying the Glance API and consolidating the Teller and Parallax APIs into a single, unified Glance API +* Adds DELETE call to Teller API +* Fixes Swift URL Parsing in Python 2.6.5 by adding back netloc +* Moving imports into main which will only be executed after we daemonize thus avoiding the premature initialization of epoll +* Delaying eventlet import until after daemonization +* Fix Swift URL parsing for Python 2.6.5 +* Don't leak implementation details in Swift backend. Return None on successful delete_object call +* Adds call to Swift's DELETE +* Typo fixed and tiny cleanups +* Adds DELETE to Teller's API +* Just some small cleanups, fixing: * Swapped port numbers (Parallax Port <=> Teller port) * Removing extraneous routes in Teller API * Adding required slashes to do_request +* * Changes Teller API to use REST with opaque ID sent in API calls instead of a "parallax URI". This hides the URI stuff behind the API layer in communication between Parallax and Teller. * Adds unit tests for the only complete Teller API call so far: GET images/, which returns a gzip'd string of image data +* Fixing swapped port numbers, removing extraneous routes in Teller controller, adding required slash for do_request calls +* * Changes Teller API to use REST with opaque ID sent in API calls instead of a "parallax URI". This hides the URI stuff behind the API layer in communication between Parallax and Teller. * Adds unit tests for the only complete Teller API call so far: GET images/, which returns a gzip'd string of image data +* Add files attribute to Parallax client tests +* Adds client classes for Parallax and Teller and fixes some issues where our controller was not returning proper HTTP response codes on errors.. +* Cleanup/fixes for Rick review +* Adds client classes ParallaxClient and (stubbed) TellerClient to new glance.client module +* packaging fixups preparing for release candidate +* Remove symlinks in bin/ +* Packaging fixups +* awesomeness. merging into trunk since my parallax-api is already in trunk I believe. :) +* Moving ATTR helpers into db module +* PUTing and POSTing using image key +* Quick fix...gives base Model an update() method to make it behave like a dict +* Make returned mapping have an 'image' key to help in XML serialization +* Ignore virtualenv directory in bzr +* This patch removes unique index on the 'key' column of image_metadatum and replaces it with a compound UniqueConstraint on 'image_id' and 'key'. The 'key' column remains indexed +* Fixes lp653358 +* Renaming is_cloudfiles_available -> is_swift_available +* Adds compound unique constraint to ImageMetadatum +* Using swift.common.client rather than python-cloudfiles in Teller's Swift backend +* Adds DELETE to the Parallax REST API +* Implements the REST call for updating image metadata in the Parallax API +* Implements Parallax API call to register a new image +* Adds a /images/detail route to the Parallax controller, adds a unit test for it, and cleans up Michael's suggestions +* Works around non-RFC compliance in Python (< 2.6.5) urlparse library +* Workaround for bug in Python 2.6.1 urlparse library +* Adds tests for bad status set on image +* Implements Parallax API call to register a new image +* This patch overhauls the testing in Glance: +* unittest2 -> unittest. For now, since not using unittest2 features yet +* Fixes up test_teller_api.py to use stubout correctly. Fixes a few bugs that showed up in the process, and remove the now-unnecessary FakeParallaxAdapter +* First round of cleaning up the unittests. Adds test suite runner, support for virtualenv setup and library dependencies, resolves issues with ImportErrors on cloudfiles, adds pymox/stubout support and splits the backend testing into distinct unittest cases +* With this patch Parallax and teller now work end-to-end with the Swift backend +* Adding missing backend files, fixing typos in comments +* This patch: * Decouples Controller for ParallaxAdapter implementation by adding generic RegistryAdapter and providing a lookup function * Adds base model attributes to Parallax's JSON (created_at, etc) +* Improving symmetry between teller and parallax +* Fixing swift authurl +* Add RegistryAdapter, include ModelBase attributes +* Fixing Teller image tests +* Created teller-server.py in bin/ +* Cleaning up Teller backend +* Rewrote ImageController to inherit from the work Rick Harris did in glance.common. Moved it into teller/api/images.py to make teller match parallax. Fixed tests. Renamed them to distinguish if any parallax tests ever get written +* Adding Image index call, nesting the Image show dict to facilitate XML serialization +* Moving parallax models out of common and into the parallax module +* Updated tests +* Reimplements server.py as a wsgi api inheriting from glance.common +* This patch: * pulls in a number of useful libraries from Nova under the common/ path (we can factor those out to a shared library in Bexar-release) * Defines the models in common.db.sqlalchemy.models.py (this should be factored out into the parallax package soon) * Adds the parallax api-server under /bin (if PyPI was used to pull python-daemon and python-lockfile, you may need to apply a patch I have against it) +* Changes the obj['uri'] to obj['location'] to better sync with the representation within Nova. Adds the image_lookup_fn = ParallaxAdapter.lookup to teller.server +* ImageChunk -> ImageFile, merging APIRouter into API for now +* Adding Apache header to test_data.py +* Small cleanups +* Parallax will return obj['location'] instead of obj['uri'], also maybe a parallax lookup fn would be nice? +* Implements a Parallax adapter for looking up images requested from nova. Adds a size check to SwiftBackend to ensure that the chunks haven't been truncated or anything +* Reconciling parallax modifications with modulization of glance +* Adding Images controller +* Adding API directory and server.py +* Modulify the imports +* Implements Parallax adapter for lookups from Teller, also adds size expectations to the backend adapters +* Adding files from Nova +* Makes glance a module, containing teller and parallax sub-modules +* libify glance into teller and parallax modules. Make nosetests work by making tests and tests/unit/ into packages +* Rearranged the code a little. Added a setup.py. Added sphinx doc skeleton +* Added setup.py and sphinx docs +* Reorg to make Monty's build pedanticness side happier +* Implements Swift backend for teller +* ignore all .pyc files +* Merging ricks changes +* Adding basic image controller and mock backends +* Adding description of registry data structure +* Adding teller_server +* adding filesystem and http backends +* Initial check-in diff --git a/code/daisy/HACKING.rst b/code/daisy/HACKING.rst new file mode 100755 index 00000000..769d76ef --- /dev/null +++ b/code/daisy/HACKING.rst @@ -0,0 +1,25 @@ +glance Style Commandments +======================= + +- Step 1: Read the OpenStack Style Commandments + http://docs.openstack.org/developer/hacking/ +- Step 2: Read on + +glance Specific Commandments +-------------------------- + +- [G316] Change assertTrue(isinstance(A, B)) by optimal assert like + assertIsInstance(A, B) +- [G317] Change assertEqual(type(A), B) by optimal assert like + assertIsInstance(A, B) +- [G318] Change assertEqual(A, None) or assertEqual(None, A) by optimal assert like + assertIsNone(A) +- [G319] Validate that debug level logs are not translated +- [G320] For python 3 compatibility, use six.text_type() instead of unicode() +- [G321] Validate that LOG messages, except debug ones, have translations +- [G322] Validate that LOG.info messages use _LI. +- [G323] Validate that LOG.exception messages use _LE. +- [G324] Validate that LOG.error messages use _LE. +- [G325] Validate that LOG.critical messages use _LC. +- [G326] Validate that LOG.warning messages use _LW. +- [G327] Prevent use of deprecated contextlib.nested \ No newline at end of file diff --git a/code/daisy/LICENSE b/code/daisy/LICENSE new file mode 100755 index 00000000..68c771a0 --- /dev/null +++ b/code/daisy/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/code/daisy/MANIFEST.in b/code/daisy/MANIFEST.in new file mode 100755 index 00000000..5fe071de --- /dev/null +++ b/code/daisy/MANIFEST.in @@ -0,0 +1,20 @@ +include run_tests.sh ChangeLog +include README.rst builddeb.sh +include MANIFEST.in pylintrc +include AUTHORS +include run_tests.py +include HACKING.rst +include LICENSE +include ChangeLog +include babel.cfg tox.ini +include openstack-common.conf +include glance/openstack/common/README +include glance/db/sqlalchemy/migrate_repo/README +include glance/db/sqlalchemy/migrate_repo/migrate.cfg +include glance/db/sqlalchemy/migrate_repo/versions/*.sql +graft doc +graft etc +graft glance/locale +graft glance/tests +graft tools +global-exclude *.pyc diff --git a/code/daisy/PKG-INFO b/code/daisy/PKG-INFO new file mode 100755 index 00000000..c916893d --- /dev/null +++ b/code/daisy/PKG-INFO @@ -0,0 +1,30 @@ +Metadata-Version: 1.1 +Name: glance +Version: 2015.1.0 +Summary: OpenStack Image Service +Home-page: http://www.openstack.org/ +Author: OpenStack +Author-email: openstack-dev@lists.openstack.org +License: UNKNOWN +Description: ====== + Glance + ====== + + Glance is a project that defines services for discovering, registering, + retrieving and storing virtual machine images. + + Use the following resources to learn more: + + * `Official Glance documentation `_ + * `Official Client documentation `_ + + +Platform: UNKNOWN +Classifier: Environment :: OpenStack +Classifier: Intended Audience :: Information Technology +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: POSIX :: Linux +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 diff --git a/code/daisy/README.rst b/code/daisy/README.rst new file mode 100755 index 00000000..406a999b --- /dev/null +++ b/code/daisy/README.rst @@ -0,0 +1,11 @@ +====== +Glance +====== + +Glance is a project that defines services for discovering, registering, +retrieving and storing virtual machine images. + +Use the following resources to learn more: + +* `Official Glance documentation `_ +* `Official Client documentation `_ diff --git a/code/daisy/babel.cfg b/code/daisy/babel.cfg new file mode 100755 index 00000000..efceab81 --- /dev/null +++ b/code/daisy/babel.cfg @@ -0,0 +1 @@ +[python: **.py] diff --git a/code/daisy/daisy.egg-info/PKG-INFO b/code/daisy/daisy.egg-info/PKG-INFO new file mode 100755 index 00000000..f3a72b7e --- /dev/null +++ b/code/daisy/daisy.egg-info/PKG-INFO @@ -0,0 +1,30 @@ +Metadata-Version: 1.1 +Name: daisy +Version: 2015.1.0 +Summary: OpenStack Image Service +Home-page: http://www.openstack.org/ +Author: OpenStack +Author-email: openstack-dev@lists.openstack.org +License: UNKNOWN +Description: ====== + Dasiy + ====== + + Daisy is a project that defines services for discovering, registering, + retrieving and storing virtual machine images. + + Use the following resources to learn more: + + * `Official Daisy documentation `_ + * `Official Client documentation `_ + + +Platform: UNKNOWN +Classifier: Environment :: OpenStack +Classifier: Intended Audience :: Information Technology +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: POSIX :: Linux +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 diff --git a/code/daisy/daisy.egg-info/SOURCES.txt b/code/daisy/daisy.egg-info/SOURCES.txt new file mode 100755 index 00000000..3fa13d6a --- /dev/null +++ b/code/daisy/daisy.egg-info/SOURCES.txt @@ -0,0 +1,525 @@ +.coveragerc +.mailmap +.testr.conf +AUTHORS +CONTRIBUTING.rst +ChangeLog +HACKING.rst +LICENSE +MANIFEST.in +README.rst +babel.cfg +openstack-common.conf +pylintrc +requirements.txt +run_tests.sh +setup.cfg +setup.py +test-requirements.txt +tox.ini +doc/source/architecture.rst +doc/source/authentication.rst +doc/source/cache.rst +doc/source/common-image-properties.rst +doc/source/conf.py +doc/source/configuring.rst +doc/source/controllingservers.rst +doc/source/db.rst +doc/source/formats.rst +doc/source/daisyapi.rst +doc/source/daisyclient.rst +doc/source/daisymetadefcatalogapi.rst +doc/source/identifiers.rst +doc/source/index.rst +doc/source/installing.rst +doc/source/metadefs-concepts.rst +doc/source/notifications.rst +doc/source/policies.rst +doc/source/property-protections.rst +doc/source/statuses.rst +doc/source/images/architecture.png +doc/source/images/image_status_transition.png +doc/source/images_src/architecture.graphml +doc/source/images_src/image_status_transition.dot +doc/source/images_src/image_status_transition.png +doc/source/man/footer.rst +doc/source/man/general_options.rst +doc/source/man/daisyapi.rst +doc/source/man/daisycachecleaner.rst +doc/source/man/daisycachemanage.rst +doc/source/man/daisycacheprefetcher.rst +doc/source/man/daisycachepruner.rst +doc/source/man/daisycontrol.rst +doc/source/man/daisymanage.rst +doc/source/man/daisyregistry.rst +doc/source/man/daisyreplicator.rst +doc/source/man/daisyscrubber.rst +doc/source/man/openstack_options.rst +etc/daisy-api-paste.ini +etc/daisy-api.conf +etc/daisy-cache.conf +etc/daisy-manage.conf +etc/daisy-registry-paste.ini +etc/daisy-registry.conf +etc/daisy-scrubber.conf +etc/daisy-search-paste.ini +etc/daisy-search.conf +etc/daisy-swift.conf.sample +etc/policy.json +etc/property-protections-policies.conf.sample +etc/property-protections-roles.conf.sample +etc/schema-image.json +etc/search-policy.json +etc/metadefs/README +etc/metadefs/compute-aggr-disk-filter.json +etc/metadefs/compute-aggr-iops-filter.json +etc/metadefs/compute-aggr-num-instances.json +etc/metadefs/compute-guest-shutdown.json +etc/metadefs/compute-host-capabilities.json +etc/metadefs/compute-hypervisor.json +etc/metadefs/compute-instance-data.json +etc/metadefs/compute-libvirt-image.json +etc/metadefs/compute-libvirt.json +etc/metadefs/compute-quota.json +etc/metadefs/compute-randomgen.json +etc/metadefs/compute-trust.json +etc/metadefs/compute-vcputopology.json +etc/metadefs/compute-vmware-flavor.json +etc/metadefs/compute-vmware-quota-flavor.json +etc/metadefs/compute-vmware.json +etc/metadefs/compute-watchdog.json +etc/metadefs/compute-xenapi.json +etc/metadefs/daisy-common-image-props.json +etc/metadefs/operating-system.json +etc/metadefs/software-databases.json +etc/metadefs/software-runtimes.json +etc/metadefs/software-webservers.json +etc/oslo-config-generator/daisy-api.conf +etc/oslo-config-generator/daisy-cache.conf +etc/oslo-config-generator/daisy-manage.conf +etc/oslo-config-generator/daisy-registry.conf +etc/oslo-config-generator/daisy-scrubber.conf +daisy/__init__.py +daisy/context.py +daisy/gateway.py +daisy/i18n.py +daisy/listener.py +daisy/location.py +daisy/notifier.py +daisy/opts.py +daisy/schema.py +daisy/scrubber.py +daisy/service.py +daisy/version.py +daisy.egg-info/PKG-INFO +daisy.egg-info/SOURCES.txt +daisy.egg-info/dependency_links.txt +daisy.egg-info/entry_points.txt +daisy.egg-info/not-zip-safe +daisy.egg-info/pbr.json +daisy.egg-info/requires.txt +daisy.egg-info/top_level.txt +daisy/api/__init__.py +daisy/api/authorization.py +daisy/api/cached_images.py +daisy/api/common.py +daisy/api/policy.py +daisy/api/property_protections.py +daisy/api/versions.py +daisy/api/middleware/__init__.py +daisy/api/middleware/cache.py +daisy/api/middleware/cache_manage.py +daisy/api/middleware/context.py +daisy/api/middleware/gzip.py +daisy/api/middleware/version_negotiation.py +daisy/api/v1/__init__.py +daisy/api/v1/controller.py +daisy/api/v1/filters.py +daisy/api/v1/images.py +daisy/api/v1/members.py +daisy/api/v1/router.py +daisy/api/v1/upload_utils.py +daisy/api/v2/__init__.py +daisy/api/v2/image_actions.py +daisy/api/v2/image_data.py +daisy/api/v2/image_members.py +daisy/api/v2/image_tags.py +daisy/api/v2/images.py +daisy/api/v2/metadef_namespaces.py +daisy/api/v2/metadef_objects.py +daisy/api/v2/metadef_properties.py +daisy/api/v2/metadef_resource_types.py +daisy/api/v2/metadef_tags.py +daisy/api/v2/router.py +daisy/api/v2/schemas.py +daisy/api/v2/tasks.py +daisy/api/v2/model/__init__.py +daisy/api/v2/model/metadef_namespace.py +daisy/api/v2/model/metadef_object.py +daisy/api/v2/model/metadef_property_item_type.py +daisy/api/v2/model/metadef_property_type.py +daisy/api/v2/model/metadef_resource_type.py +daisy/api/v2/model/metadef_tag.py +daisy/artifacts/__init__.py +daisy/async/__init__.py +daisy/async/taskflow_executor.py +daisy/async/utils.py +daisy/async/flows/__init__.py +daisy/async/flows/base_import.py +daisy/async/flows/convert.py +daisy/async/flows/introspect.py +daisy/cmd/__init__.py +daisy/cmd/agent_notification.py +daisy/cmd/api.py +daisy/cmd/cache_cleaner.py +daisy/cmd/cache_manage.py +daisy/cmd/cache_prefetcher.py +daisy/cmd/cache_pruner.py +daisy/cmd/control.py +daisy/cmd/index.py +daisy/cmd/manage.py +daisy/cmd/registry.py +daisy/cmd/replicator.py +daisy/cmd/scrubber.py +daisy/cmd/search.py +daisy/cmd/orchestration.py +daisy/common/__init__.py +daisy/common/auth.py +daisy/common/client.py +daisy/common/config.py +daisy/common/crypt.py +daisy/common/exception.py +daisy/common/jsonpatchvalidator.py +daisy/common/property_utils.py +daisy/common/rpc.py +daisy/common/semver_db.py +daisy/common/store_utils.py +daisy/common/swift_store_utils.py +daisy/common/utils.py +daisy/common/wsgi.py +daisy/common/wsme_utils.py +daisy/common/artifacts/__init__.py +daisy/common/artifacts/declarative.py +daisy/common/artifacts/definitions.py +daisy/common/artifacts/loader.py +daisy/common/artifacts/serialization.py +daisy/common/location_strategy/__init__.py +daisy/common/location_strategy/location_order.py +daisy/common/location_strategy/store_type.py +daisy/common/scripts/__init__.py +daisy/common/scripts/utils.py +daisy/common/scripts/image_import/__init__.py +daisy/common/scripts/image_import/main.py +daisy/contrib/__init__.py +daisy/contrib/plugins/__init__.py +daisy/contrib/plugins/artifacts_sample/__init__.py +daisy/contrib/plugins/artifacts_sample/base.py +daisy/contrib/plugins/artifacts_sample/setup.cfg +daisy/contrib/plugins/artifacts_sample/setup.py +daisy/contrib/plugins/artifacts_sample/v1/__init__.py +daisy/contrib/plugins/artifacts_sample/v1/artifact.py +daisy/contrib/plugins/artifacts_sample/v2/__init__.py +daisy/contrib/plugins/artifacts_sample/v2/artifact.py +daisy/contrib/plugins/image_artifact/__init__.py +daisy/contrib/plugins/image_artifact/requirements.txt +daisy/contrib/plugins/image_artifact/setup.cfg +daisy/contrib/plugins/image_artifact/setup.py +daisy/contrib/plugins/image_artifact/version_selector.py +daisy/contrib/plugins/image_artifact/v1/__init__.py +daisy/contrib/plugins/image_artifact/v1/image.py +daisy/contrib/plugins/image_artifact/v1_1/__init__.py +daisy/contrib/plugins/image_artifact/v1_1/image.py +daisy/contrib/plugins/image_artifact/v2/__init__.py +daisy/contrib/plugins/image_artifact/v2/image.py +daisy/db/__init__.py +daisy/db/metadata.py +daisy/db/migration.py +daisy/db/registry/__init__.py +daisy/db/registry/api.py +daisy/db/simple/__init__.py +daisy/db/simple/api.py +daisy/db/sqlalchemy/__init__.py +daisy/db/sqlalchemy/api.py +daisy/db/sqlalchemy/artifacts.py +daisy/db/sqlalchemy/metadata.py +daisy/db/sqlalchemy/models.py +daisy/db/sqlalchemy/models_artifacts.py +daisy/db/sqlalchemy/models_metadef.py +daisy/db/sqlalchemy/metadef_api/__init__.py +daisy/db/sqlalchemy/metadef_api/namespace.py +daisy/db/sqlalchemy/metadef_api/object.py +daisy/db/sqlalchemy/metadef_api/property.py +daisy/db/sqlalchemy/metadef_api/resource_type.py +daisy/db/sqlalchemy/metadef_api/resource_type_association.py +daisy/db/sqlalchemy/metadef_api/tag.py +daisy/db/sqlalchemy/metadef_api/utils.py +daisy/db/sqlalchemy/migrate_repo/README +daisy/db/sqlalchemy/migrate_repo/__init__.py +daisy/db/sqlalchemy/migrate_repo/manage.py +daisy/db/sqlalchemy/migrate_repo/migrate.cfg +daisy/db/sqlalchemy/migrate_repo/schema.py +daisy/db/sqlalchemy/migrate_repo/versions/001_add_images_table.py +daisy/db/sqlalchemy/migrate_repo/versions/002_add_image_properties_table.py +daisy/db/sqlalchemy/migrate_repo/versions/003_add_disk_format.py +daisy/db/sqlalchemy/migrate_repo/versions/003_sqlite_downgrade.sql +daisy/db/sqlalchemy/migrate_repo/versions/003_sqlite_upgrade.sql +daisy/db/sqlalchemy/migrate_repo/versions/004_add_checksum.py +daisy/db/sqlalchemy/migrate_repo/versions/005_size_big_integer.py +daisy/db/sqlalchemy/migrate_repo/versions/006_key_to_name.py +daisy/db/sqlalchemy/migrate_repo/versions/006_mysql_downgrade.sql +daisy/db/sqlalchemy/migrate_repo/versions/006_mysql_upgrade.sql +daisy/db/sqlalchemy/migrate_repo/versions/006_sqlite_downgrade.sql +daisy/db/sqlalchemy/migrate_repo/versions/006_sqlite_upgrade.sql +daisy/db/sqlalchemy/migrate_repo/versions/007_add_owner.py +daisy/db/sqlalchemy/migrate_repo/versions/008_add_image_members_table.py +daisy/db/sqlalchemy/migrate_repo/versions/009_add_mindisk_and_minram.py +daisy/db/sqlalchemy/migrate_repo/versions/010_default_update_at.py +daisy/db/sqlalchemy/migrate_repo/versions/011_make_mindisk_and_minram_notnull.py +daisy/db/sqlalchemy/migrate_repo/versions/011_sqlite_downgrade.sql +daisy/db/sqlalchemy/migrate_repo/versions/011_sqlite_upgrade.sql +daisy/db/sqlalchemy/migrate_repo/versions/012_id_to_uuid.py +daisy/db/sqlalchemy/migrate_repo/versions/013_add_protected.py +daisy/db/sqlalchemy/migrate_repo/versions/013_sqlite_downgrade.sql +daisy/db/sqlalchemy/migrate_repo/versions/014_add_image_tags_table.py +daisy/db/sqlalchemy/migrate_repo/versions/015_quote_swift_credentials.py +daisy/db/sqlalchemy/migrate_repo/versions/016_add_status_image_member.py +daisy/db/sqlalchemy/migrate_repo/versions/016_sqlite_downgrade.sql +daisy/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py +daisy/db/sqlalchemy/migrate_repo/versions/018_add_image_locations_table.py +daisy/db/sqlalchemy/migrate_repo/versions/019_migrate_image_locations.py +daisy/db/sqlalchemy/migrate_repo/versions/020_drop_images_table_location.py +daisy/db/sqlalchemy/migrate_repo/versions/021_set_engine_mysql_innodb.py +daisy/db/sqlalchemy/migrate_repo/versions/022_image_member_index.py +daisy/db/sqlalchemy/migrate_repo/versions/023_placeholder.py +daisy/db/sqlalchemy/migrate_repo/versions/024_placeholder.py +daisy/db/sqlalchemy/migrate_repo/versions/025_placeholder.py +daisy/db/sqlalchemy/migrate_repo/versions/026_add_location_storage_information.py +daisy/db/sqlalchemy/migrate_repo/versions/027_checksum_index.py +daisy/db/sqlalchemy/migrate_repo/versions/028_owner_index.py +daisy/db/sqlalchemy/migrate_repo/versions/029_location_meta_data_pickle_to_string.py +daisy/db/sqlalchemy/migrate_repo/versions/030_add_tasks_table.py +daisy/db/sqlalchemy/migrate_repo/versions/031_remove_duplicated_locations.py +daisy/db/sqlalchemy/migrate_repo/versions/032_add_task_info_table.py +daisy/db/sqlalchemy/migrate_repo/versions/033_add_location_status.py +daisy/db/sqlalchemy/migrate_repo/versions/034_add_virtual_size.py +daisy/db/sqlalchemy/migrate_repo/versions/035_add_metadef_tables.py +daisy/db/sqlalchemy/migrate_repo/versions/036_rename_metadef_schema_columns.py +daisy/db/sqlalchemy/migrate_repo/versions/037_add_changes_to_satisfy_models.py +daisy/db/sqlalchemy/migrate_repo/versions/037_sqlite_downgrade.sql +daisy/db/sqlalchemy/migrate_repo/versions/037_sqlite_upgrade.sql +daisy/db/sqlalchemy/migrate_repo/versions/038_add_metadef_tags_table.py +daisy/db/sqlalchemy/migrate_repo/versions/039_add_changes_to_satisfy_models_metadef.py +daisy/db/sqlalchemy/migrate_repo/versions/040_add_changes_to_satisfy_metadefs_tags.py +daisy/db/sqlalchemy/migrate_repo/versions/041_add_artifact_tables.py +daisy/db/sqlalchemy/migrate_repo/versions/__init__.py +daisy/domain/__init__.py +daisy/domain/proxy.py +daisy/hacking/__init__.py +daisy/hacking/checks.py +daisy/image_cache/__init__.py +daisy/image_cache/base.py +daisy/image_cache/cleaner.py +daisy/image_cache/client.py +daisy/image_cache/prefetcher.py +daisy/image_cache/pruner.py +daisy/image_cache/drivers/__init__.py +daisy/image_cache/drivers/base.py +daisy/image_cache/drivers/sqlite.py +daisy/image_cache/drivers/xattr.py +daisy/locale/daisy-log-critical.pot +daisy/locale/daisy-log-error.pot +daisy/locale/daisy-log-info.pot +daisy/locale/daisy-log-warning.pot +daisy/locale/daisy.pot +daisy/locale/en_GB/LC_MESSAGES/daisy-log-info.po +daisy/locale/fr/LC_MESSAGES/daisy-log-info.po +daisy/locale/pt_BR/LC_MESSAGES/daisy-log-info.po +daisy/openstack/__init__.py +daisy/openstack/common/README +daisy/openstack/common/__init__.py +daisy/openstack/common/_i18n.py +daisy/openstack/common/eventlet_backdoor.py +daisy/openstack/common/fileutils.py +daisy/openstack/common/local.py +daisy/openstack/common/loopingcall.py +daisy/openstack/common/service.py +daisy/openstack/common/systemd.py +daisy/openstack/common/threadgroup.py +daisy/quota/__init__.py +daisy/registry/__init__.py +daisy/registry/api/__init__.py +daisy/registry/api/v1/__init__.py +daisy/registry/api/v1/images.py +daisy/registry/api/v1/members.py +daisy/registry/api/v2/__init__.py +daisy/registry/api/v2/rpc.py +daisy/registry/client/__init__.py +daisy/registry/client/v1/__init__.py +daisy/registry/client/v1/api.py +daisy/registry/client/v1/client.py +daisy/registry/client/v2/__init__.py +daisy/registry/client/v2/api.py +daisy/registry/client/v2/client.py +daisy/search/__init__.py +daisy/search/api/__init__.py +daisy/search/api/v0_1/__init__.py +daisy/search/api/v0_1/router.py +daisy/search/api/v0_1/search.py +daisy/search/plugins/__init__.py +daisy/search/plugins/base.py +daisy/search/plugins/images.py +daisy/search/plugins/images_notification_handler.py +daisy/search/plugins/metadefs.py +daisy/search/plugins/metadefs_notification_handler.py +daisy/orchestration/__init__.py +daisy/orchestration/manager.py +daisy/tests/__init__.py +daisy/tests/stubs.py +daisy/tests/test_hacking.py +daisy/tests/utils.py +daisy/tests/etc/daisy-swift.conf +daisy/tests/etc/policy.json +daisy/tests/etc/property-protections-policies.conf +daisy/tests/etc/property-protections.conf +daisy/tests/etc/schema-image.json +daisy/tests/functional/__init__.py +daisy/tests/functional/store_utils.py +daisy/tests/functional/test_api.py +daisy/tests/functional/test_bin_daisy_cache_manage.py +daisy/tests/functional/test_cache_middleware.py +daisy/tests/functional/test_client_exceptions.py +daisy/tests/functional/test_client_redirects.py +daisy/tests/functional/test_daisy_manage.py +daisy/tests/functional/test_gzip_middleware.py +daisy/tests/functional/test_logging.py +daisy/tests/functional/test_reload.py +daisy/tests/functional/test_scrubber.py +daisy/tests/functional/test_sqlite.py +daisy/tests/functional/test_ssl.py +daisy/tests/functional/db/__init__.py +daisy/tests/functional/db/base.py +daisy/tests/functional/db/base_artifacts.py +daisy/tests/functional/db/base_metadef.py +daisy/tests/functional/db/test_registry.py +daisy/tests/functional/db/test_rpc_endpoint.py +daisy/tests/functional/db/test_simple.py +daisy/tests/functional/db/test_sqlalchemy.py +daisy/tests/functional/v1/__init__.py +daisy/tests/functional/v1/test_api.py +daisy/tests/functional/v1/test_copy_to_file.py +daisy/tests/functional/v1/test_misc.py +daisy/tests/functional/v1/test_multiprocessing.py +daisy/tests/functional/v2/__init__.py +daisy/tests/functional/v2/registry_data_api.py +daisy/tests/functional/v2/test_images.py +daisy/tests/functional/v2/test_metadef_namespaces.py +daisy/tests/functional/v2/test_metadef_objects.py +daisy/tests/functional/v2/test_metadef_properties.py +daisy/tests/functional/v2/test_metadef_resourcetypes.py +daisy/tests/functional/v2/test_metadef_tags.py +daisy/tests/functional/v2/test_schemas.py +daisy/tests/functional/v2/test_tasks.py +daisy/tests/integration/__init__.py +daisy/tests/integration/legacy_functional/__init__.py +daisy/tests/integration/legacy_functional/base.py +daisy/tests/integration/legacy_functional/test_v1_api.py +daisy/tests/integration/v2/__init__.py +daisy/tests/integration/v2/base.py +daisy/tests/integration/v2/test_property_quota_violations.py +daisy/tests/integration/v2/test_tasks_api.py +daisy/tests/unit/__init__.py +daisy/tests/unit/base.py +daisy/tests/unit/fake_rados.py +daisy/tests/unit/test_artifact_type_definition_framework.py +daisy/tests/unit/test_artifacts_plugin_loader.py +daisy/tests/unit/test_auth.py +daisy/tests/unit/test_cache_middleware.py +daisy/tests/unit/test_cached_images.py +daisy/tests/unit/test_context.py +daisy/tests/unit/test_context_middleware.py +daisy/tests/unit/test_db.py +daisy/tests/unit/test_db_metadef.py +daisy/tests/unit/test_domain.py +daisy/tests/unit/test_domain_proxy.py +daisy/tests/unit/test_gateway.py +daisy/tests/unit/test_daisy_replicator.py +daisy/tests/unit/test_image_cache.py +daisy/tests/unit/test_image_cache_client.py +daisy/tests/unit/test_jsonpatchmixin.py +daisy/tests/unit/test_manage.py +daisy/tests/unit/test_migrations.py +daisy/tests/unit/test_misc.py +daisy/tests/unit/test_notifier.py +daisy/tests/unit/test_opts.py +daisy/tests/unit/test_policy.py +daisy/tests/unit/test_quota.py +daisy/tests/unit/test_schema.py +daisy/tests/unit/test_scrubber.py +daisy/tests/unit/test_search.py +daisy/tests/unit/test_store_image.py +daisy/tests/unit/test_store_location.py +daisy/tests/unit/test_versions.py +daisy/tests/unit/utils.py +daisy/tests/unit/api/__init__.py +daisy/tests/unit/api/test_cmd.py +daisy/tests/unit/api/test_cmd_cache_manage.py +daisy/tests/unit/api/test_common.py +daisy/tests/unit/api/test_property_protections.py +daisy/tests/unit/api/middleware/__init__.py +daisy/tests/unit/api/middleware/test_cache_manage.py +daisy/tests/unit/async/__init__.py +daisy/tests/unit/async/test_async.py +daisy/tests/unit/async/test_taskflow_executor.py +daisy/tests/unit/async/flows/__init__.py +daisy/tests/unit/async/flows/test_convert.py +daisy/tests/unit/async/flows/test_import.py +daisy/tests/unit/async/flows/test_introspect.py +daisy/tests/unit/common/__init__.py +daisy/tests/unit/common/test_client.py +daisy/tests/unit/common/test_config.py +daisy/tests/unit/common/test_exception.py +daisy/tests/unit/common/test_location_strategy.py +daisy/tests/unit/common/test_property_utils.py +daisy/tests/unit/common/test_rpc.py +daisy/tests/unit/common/test_scripts.py +daisy/tests/unit/common/test_semver.py +daisy/tests/unit/common/test_swift_store_utils.py +daisy/tests/unit/common/test_utils.py +daisy/tests/unit/common/test_wsgi.py +daisy/tests/unit/common/test_wsgi_ipv6.py +daisy/tests/unit/common/scripts/__init__.py +daisy/tests/unit/common/scripts/test_scripts_utils.py +daisy/tests/unit/common/scripts/image_import/__init__.py +daisy/tests/unit/common/scripts/image_import/test_main.py +daisy/tests/unit/v0_1/test_search.py +daisy/tests/unit/v1/__init__.py +daisy/tests/unit/v1/test_api.py +daisy/tests/unit/v1/test_registry_api.py +daisy/tests/unit/v1/test_registry_client.py +daisy/tests/unit/v1/test_upload_utils.py +daisy/tests/unit/v2/__init__.py +daisy/tests/unit/v2/test_image_actions_resource.py +daisy/tests/unit/v2/test_image_data_resource.py +daisy/tests/unit/v2/test_image_members_resource.py +daisy/tests/unit/v2/test_image_tags_resource.py +daisy/tests/unit/v2/test_images_resource.py +daisy/tests/unit/v2/test_metadef_resources.py +daisy/tests/unit/v2/test_registry_api.py +daisy/tests/unit/v2/test_registry_client.py +daisy/tests/unit/v2/test_schemas_resource.py +daisy/tests/unit/v2/test_tasks_resource.py +daisy/tests/var/ca.crt +daisy/tests/var/ca.key +daisy/tests/var/certificate.crt +daisy/tests/var/privatekey.key +rally-jobs/README.rst +rally-jobs/daisy.yaml +rally-jobs/extra/README.rst +rally-jobs/extra/fake.img +rally-jobs/plugins/README.rst +rally-jobs/plugins/plugin_sample.py +tools/colorizer.py +tools/install_venv.py +tools/install_venv_common.py +tools/migrate_image_owners.py +tools/with_venv.sh \ No newline at end of file diff --git a/code/daisy/daisy.egg-info/dependency_links.txt b/code/daisy/daisy.egg-info/dependency_links.txt new file mode 100755 index 00000000..8b137891 --- /dev/null +++ b/code/daisy/daisy.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/code/daisy/daisy.egg-info/entry_points.txt b/code/daisy/daisy.egg-info/entry_points.txt new file mode 100755 index 00000000..339ad9c4 --- /dev/null +++ b/code/daisy/daisy.egg-info/entry_points.txt @@ -0,0 +1,43 @@ +[console_scripts] +daisy-api = daisy.cmd.api:main +daisy-cache-cleaner = daisy.cmd.cache_cleaner:main +daisy-cache-manage = daisy.cmd.cache_manage:main +daisy-cache-prefetcher = daisy.cmd.cache_prefetcher:main +daisy-cache-pruner = daisy.cmd.cache_pruner:main +daisy-control = daisy.cmd.control:main +daisy-index = daisy.cmd.index:main +daisy-manage = daisy.cmd.manage:main +daisy-registry = daisy.cmd.registry:main +daisy-replicator = daisy.cmd.replicator:main +daisy-scrubber = daisy.cmd.scrubber:main +daisy-search = daisy.cmd.search:main +daisy-orchestration = daisy.cmd.orchestration:main + +[daisy.common.image_location_strategy.modules] +location_order_strategy = daisy.common.location_strategy.location_order +store_type_strategy = daisy.common.location_strategy.store_type + +[daisy.database.metadata_backend] +sqlalchemy = daisy.db.sqlalchemy.metadata + +[daisy.database.migration_backend] +sqlalchemy = oslo.db.sqlalchemy.migration + +[daisy.flows] +import = daisy.async.flows.base_import:get_flow + +[daisy.flows.import] +convert = daisy.async.flows.convert:get_flow +introspect = daisy.async.flows.introspect:get_flow + +[daisy.search.index_backend] +image = daisy.search.plugins.images:ImageIndex +metadef = daisy.search.plugins.metadefs:MetadefIndex + +[oslo.config.opts] +daisy.api = daisy.opts:list_api_opts +daisy.cache = daisy.opts:list_cache_opts +daisy.manage = daisy.opts:list_manage_opts +daisy.registry = daisy.opts:list_registry_opts +daisy.scrubber = daisy.opts:list_scrubber_opts + diff --git a/code/daisy/daisy.egg-info/not-zip-safe b/code/daisy/daisy.egg-info/not-zip-safe new file mode 100755 index 00000000..8b137891 --- /dev/null +++ b/code/daisy/daisy.egg-info/not-zip-safe @@ -0,0 +1 @@ + diff --git a/code/daisy/daisy.egg-info/pbr.json b/code/daisy/daisy.egg-info/pbr.json new file mode 100755 index 00000000..eb36fa57 --- /dev/null +++ b/code/daisy/daisy.egg-info/pbr.json @@ -0,0 +1 @@ +{"is_release": true, "git_version": "93b0d5f"} \ No newline at end of file diff --git a/code/daisy/daisy.egg-info/requires.txt b/code/daisy/daisy.egg-info/requires.txt new file mode 100755 index 00000000..47f8c1ae --- /dev/null +++ b/code/daisy/daisy.egg-info/requires.txt @@ -0,0 +1,40 @@ +pbr>=0.6,!=0.7,<1.0 +greenlet>=0.3.2 +SQLAlchemy>=0.9.7,<=0.9.99 +anyjson>=0.3.3 +eventlet>=0.16.1,!=0.17.0 +PasteDeploy>=1.5.0 +Routes>=1.12.3,!=2.0 +WebOb>=1.2.3 +sqlalchemy-migrate>=0.9.5 +httplib2>=0.7.5 +kombu>=2.5.0 +pycrypto>=2.6 +iso8601>=0.1.9 +ordereddict +oslo.config>=1.9.3,<1.10.0 # Apache-2.0 +oslo.concurrency>=1.8.0,<1.9.0 # Apache-2.0 +oslo.context>=0.2.0,<0.3.0 # Apache-2.0 +oslo.utils>=1.4.0,<1.5.0 # Apache-2.0 +stevedore>=1.3.0,<1.4.0 # Apache-2.0 +taskflow>=0.7.1,<0.8.0 +keystonemiddleware>=1.5.0,<1.6.0 +WSME>=0.6 +posix_ipc +python-swiftclient>=2.2.0,<2.5.0 +oslo.vmware>=0.11.1,<0.12.0 # Apache-2.0 +Paste +jsonschema>=2.0.0,<3.0.0 +python-keystoneclient>=1.1.0,<1.4.0 +pyOpenSSL>=0.11 +six>=1.9.0 +oslo.db>=1.7.0,<1.8.0 # Apache-2.0 +oslo.i18n>=1.5.0,<1.6.0 # Apache-2.0 +oslo.log>=1.0.0,<1.1.0 # Apache-2.0 +oslo.messaging>=1.8.0,<1.9.0 # Apache-2.0 +oslo.policy>=0.3.1,<0.4.0 # Apache-2.0 +oslo.serialization>=1.4.0,<1.5.0 # Apache-2.0 +retrying>=1.2.3,!=1.3.0 # Apache-2.0 +osprofiler>=0.3.0 # Apache-2.0 +glance_store>=0.3.0,<0.5.0 # Apache-2.0 +semantic_version>=2.3.1 diff --git a/code/daisy/daisy.egg-info/top_level.txt b/code/daisy/daisy.egg-info/top_level.txt new file mode 100755 index 00000000..33a08d69 --- /dev/null +++ b/code/daisy/daisy.egg-info/top_level.txt @@ -0,0 +1 @@ +daisy diff --git a/code/daisy/daisy/__init__.py b/code/daisy/daisy/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/api/__init__.py b/code/daisy/daisy/api/__init__.py new file mode 100755 index 00000000..e7ebaab8 --- /dev/null +++ b/code/daisy/daisy/api/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2011-2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import paste.urlmap + + +def root_app_factory(loader, global_conf, **local_conf): + return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) diff --git a/code/daisy/daisy/api/authorization.py b/code/daisy/daisy/api/authorization.py new file mode 100755 index 00000000..015eca95 --- /dev/null +++ b/code/daisy/daisy/api/authorization.py @@ -0,0 +1,899 @@ +# Copyright 2012 OpenStack Foundation +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from daisy.common import exception +import daisy.domain.proxy +from daisy import i18n + +_ = i18n._ + + +def is_image_mutable(context, image): + """Return True if the image is mutable in this context.""" + if context.is_admin: + return True + + if image.owner is None or context.owner is None: + return False + + return image.owner == context.owner + + +def proxy_image(context, image): + if is_image_mutable(context, image): + return ImageProxy(image, context) + else: + return ImmutableImageProxy(image, context) + + +def is_member_mutable(context, member): + """Return True if the image is mutable in this context.""" + if context.is_admin: + return True + + if context.owner is None: + return False + + return member.member_id == context.owner + + +def proxy_member(context, member): + if is_member_mutable(context, member): + return member + else: + return ImmutableMemberProxy(member) + + +def is_task_mutable(context, task): + """Return True if the task is mutable in this context.""" + if context.is_admin: + return True + + if context.owner is None: + return False + + return task.owner == context.owner + + +def is_task_stub_mutable(context, task_stub): + """Return True if the task stub is mutable in this context.""" + if context.is_admin: + return True + + if context.owner is None: + return False + + return task_stub.owner == context.owner + + +def proxy_task(context, task): + if is_task_mutable(context, task): + return task + else: + return ImmutableTaskProxy(task) + + +def proxy_task_stub(context, task_stub): + if is_task_stub_mutable(context, task_stub): + return task_stub + else: + return ImmutableTaskStubProxy(task_stub) + + +class ImageRepoProxy(daisy.domain.proxy.Repo): + + def __init__(self, image_repo, context): + self.context = context + self.image_repo = image_repo + proxy_kwargs = {'context': self.context} + super(ImageRepoProxy, self).__init__(image_repo, + item_proxy_class=ImageProxy, + item_proxy_kwargs=proxy_kwargs) + + def get(self, image_id): + image = self.image_repo.get(image_id) + return proxy_image(self.context, image) + + def list(self, *args, **kwargs): + images = self.image_repo.list(*args, **kwargs) + return [proxy_image(self.context, i) for i in images] + + +class ImageMemberRepoProxy(daisy.domain.proxy.Repo): + + def __init__(self, member_repo, image, context): + self.member_repo = member_repo + self.image = image + self.context = context + super(ImageMemberRepoProxy, self).__init__(member_repo) + + def get(self, member_id): + if (self.context.is_admin or + self.context.owner in (self.image.owner, member_id)): + member = self.member_repo.get(member_id) + return proxy_member(self.context, member) + else: + message = _("You cannot get image member for %s") + raise exception.Forbidden(message % member_id) + + def list(self, *args, **kwargs): + members = self.member_repo.list(*args, **kwargs) + if (self.context.is_admin or + self.context.owner == self.image.owner): + return [proxy_member(self.context, m) for m in members] + for member in members: + if member.member_id == self.context.owner: + return [proxy_member(self.context, member)] + message = _("You cannot get image member for %s") + raise exception.Forbidden(message % self.image.image_id) + + def remove(self, image_member): + if (self.image.owner == self.context.owner or + self.context.is_admin): + self.member_repo.remove(image_member) + else: + message = _("You cannot delete image member for %s") + raise exception.Forbidden(message + % self.image.image_id) + + def add(self, image_member): + if (self.image.owner == self.context.owner or + self.context.is_admin): + self.member_repo.add(image_member) + else: + message = _("You cannot add image member for %s") + raise exception.Forbidden(message + % self.image.image_id) + + def save(self, image_member, from_state=None): + if (self.context.is_admin or + self.context.owner == image_member.member_id): + self.member_repo.save(image_member, from_state=from_state) + else: + message = _("You cannot update image member %s") + raise exception.Forbidden(message % image_member.member_id) + + +class ImageFactoryProxy(daisy.domain.proxy.ImageFactory): + + def __init__(self, image_factory, context): + self.image_factory = image_factory + self.context = context + kwargs = {'context': self.context} + super(ImageFactoryProxy, self).__init__(image_factory, + proxy_class=ImageProxy, + proxy_kwargs=kwargs) + + def new_image(self, **kwargs): + owner = kwargs.pop('owner', self.context.owner) + + if not self.context.is_admin: + if owner is None or owner != self.context.owner: + message = _("You are not permitted to create images " + "owned by '%s'.") + raise exception.Forbidden(message % owner) + + return super(ImageFactoryProxy, self).new_image(owner=owner, **kwargs) + + +class ImageMemberFactoryProxy(object): + + def __init__(self, image_member_factory, context): + self.image_member_factory = image_member_factory + self.context = context + + def new_image_member(self, image, member_id): + owner = image.owner + + if not self.context.is_admin: + if owner is None or owner != self.context.owner: + message = _("You are not permitted to create image members " + "for the image.") + raise exception.Forbidden(message) + + if image.visibility == 'public': + message = _("Public images do not have members.") + raise exception.Forbidden(message) + + return self.image_member_factory.new_image_member(image, member_id) + + +def _immutable_attr(target, attr, proxy=None): + + def get_attr(self): + value = getattr(getattr(self, target), attr) + if proxy is not None: + value = proxy(value) + return value + + def forbidden(self, *args, **kwargs): + resource = getattr(self, 'resource_name', 'resource') + message = _("You are not permitted to modify '%(attr)s' on this " + "%(resource)s.") + raise exception.Forbidden(message % {'attr': attr, + 'resource': resource}) + + return property(get_attr, forbidden, forbidden) + + +class ImmutableLocations(list): + def forbidden(self, *args, **kwargs): + message = _("You are not permitted to modify locations " + "for this image.") + raise exception.Forbidden(message) + + def __deepcopy__(self, memo): + return ImmutableLocations(copy.deepcopy(list(self), memo)) + + append = forbidden + extend = forbidden + insert = forbidden + pop = forbidden + remove = forbidden + reverse = forbidden + sort = forbidden + __delitem__ = forbidden + __delslice__ = forbidden + __iadd__ = forbidden + __imul__ = forbidden + __setitem__ = forbidden + __setslice__ = forbidden + + +class ImmutableProperties(dict): + def forbidden_key(self, key, *args, **kwargs): + message = _("You are not permitted to modify '%s' on this image.") + raise exception.Forbidden(message % key) + + def forbidden(self, *args, **kwargs): + message = _("You are not permitted to modify this image.") + raise exception.Forbidden(message) + + __delitem__ = forbidden_key + __setitem__ = forbidden_key + pop = forbidden + popitem = forbidden + setdefault = forbidden + update = forbidden + + +class ImmutableTags(set): + def forbidden(self, *args, **kwargs): + message = _("You are not permitted to modify tags on this image.") + raise exception.Forbidden(message) + + add = forbidden + clear = forbidden + difference_update = forbidden + intersection_update = forbidden + pop = forbidden + remove = forbidden + symmetric_difference_update = forbidden + update = forbidden + + +class ImmutableImageProxy(object): + def __init__(self, base, context): + self.base = base + self.context = context + self.resource_name = 'image' + + name = _immutable_attr('base', 'name') + image_id = _immutable_attr('base', 'image_id') + name = _immutable_attr('base', 'name') + status = _immutable_attr('base', 'status') + created_at = _immutable_attr('base', 'created_at') + updated_at = _immutable_attr('base', 'updated_at') + visibility = _immutable_attr('base', 'visibility') + min_disk = _immutable_attr('base', 'min_disk') + min_ram = _immutable_attr('base', 'min_ram') + protected = _immutable_attr('base', 'protected') + locations = _immutable_attr('base', 'locations', proxy=ImmutableLocations) + checksum = _immutable_attr('base', 'checksum') + owner = _immutable_attr('base', 'owner') + disk_format = _immutable_attr('base', 'disk_format') + container_format = _immutable_attr('base', 'container_format') + size = _immutable_attr('base', 'size') + virtual_size = _immutable_attr('base', 'virtual_size') + extra_properties = _immutable_attr('base', 'extra_properties', + proxy=ImmutableProperties) + tags = _immutable_attr('base', 'tags', proxy=ImmutableTags) + + def delete(self): + message = _("You are not permitted to delete this image.") + raise exception.Forbidden(message) + + def get_member_repo(self): + member_repo = self.base.get_member_repo() + return ImageMemberRepoProxy(member_repo, self, self.context) + + def get_data(self, *args, **kwargs): + return self.base.get_data(*args, **kwargs) + + def set_data(self, *args, **kwargs): + message = _("You are not permitted to upload data for this image.") + raise exception.Forbidden(message) + + +class ImmutableMemberProxy(object): + def __init__(self, base): + self.base = base + self.resource_name = 'image member' + + id = _immutable_attr('base', 'id') + image_id = _immutable_attr('base', 'image_id') + member_id = _immutable_attr('base', 'member_id') + status = _immutable_attr('base', 'status') + created_at = _immutable_attr('base', 'created_at') + updated_at = _immutable_attr('base', 'updated_at') + + +class ImmutableTaskProxy(object): + def __init__(self, base): + self.base = base + self.resource_name = 'task' + + task_id = _immutable_attr('base', 'task_id') + type = _immutable_attr('base', 'type') + status = _immutable_attr('base', 'status') + owner = _immutable_attr('base', 'owner') + expires_at = _immutable_attr('base', 'expires_at') + created_at = _immutable_attr('base', 'created_at') + updated_at = _immutable_attr('base', 'updated_at') + input = _immutable_attr('base', 'input') + message = _immutable_attr('base', 'message') + result = _immutable_attr('base', 'result') + + def run(self, executor): + self.base.run(executor) + + def begin_processing(self): + message = _("You are not permitted to set status on this task.") + raise exception.Forbidden(message) + + def succeed(self, result): + message = _("You are not permitted to set status on this task.") + raise exception.Forbidden(message) + + def fail(self, message): + message = _("You are not permitted to set status on this task.") + raise exception.Forbidden(message) + + +class ImmutableTaskStubProxy(object): + def __init__(self, base): + self.base = base + self.resource_name = 'task stub' + + task_id = _immutable_attr('base', 'task_id') + type = _immutable_attr('base', 'type') + status = _immutable_attr('base', 'status') + owner = _immutable_attr('base', 'owner') + expires_at = _immutable_attr('base', 'expires_at') + created_at = _immutable_attr('base', 'created_at') + updated_at = _immutable_attr('base', 'updated_at') + + +class ImageProxy(daisy.domain.proxy.Image): + + def __init__(self, image, context): + self.image = image + self.context = context + super(ImageProxy, self).__init__(image) + + def get_member_repo(self, **kwargs): + if self.image.visibility == 'public': + message = _("Public images do not have members.") + raise exception.Forbidden(message) + else: + member_repo = self.image.get_member_repo(**kwargs) + return ImageMemberRepoProxy(member_repo, self, self.context) + + +class TaskProxy(daisy.domain.proxy.Task): + + def __init__(self, task): + self.task = task + super(TaskProxy, self).__init__(task) + + +class TaskFactoryProxy(daisy.domain.proxy.TaskFactory): + + def __init__(self, task_factory, context): + self.task_factory = task_factory + self.context = context + super(TaskFactoryProxy, self).__init__( + task_factory, + task_proxy_class=TaskProxy) + + def new_task(self, **kwargs): + owner = kwargs.get('owner', self.context.owner) + + # NOTE(nikhil): Unlike Images, Tasks are expected to have owner. + # We currently do not allow even admins to set the owner to None. + if owner is not None and (owner == self.context.owner + or self.context.is_admin): + return super(TaskFactoryProxy, self).new_task(**kwargs) + else: + message = _("You are not permitted to create this task with " + "owner as: %s") + raise exception.Forbidden(message % owner) + + +class TaskRepoProxy(daisy.domain.proxy.TaskRepo): + + def __init__(self, task_repo, context): + self.task_repo = task_repo + self.context = context + super(TaskRepoProxy, self).__init__(task_repo) + + def get(self, task_id): + task = self.task_repo.get(task_id) + return proxy_task(self.context, task) + + +class TaskStubRepoProxy(daisy.domain.proxy.TaskStubRepo): + + def __init__(self, task_stub_repo, context): + self.task_stub_repo = task_stub_repo + self.context = context + super(TaskStubRepoProxy, self).__init__(task_stub_repo) + + def list(self, *args, **kwargs): + task_stubs = self.task_stub_repo.list(*args, **kwargs) + return [proxy_task_stub(self.context, t) for t in task_stubs] + + +# Metadef Namespace classes +def is_namespace_mutable(context, namespace): + """Return True if the namespace is mutable in this context.""" + if context.is_admin: + return True + + if context.owner is None: + return False + + return namespace.owner == context.owner + + +def proxy_namespace(context, namespace): + if is_namespace_mutable(context, namespace): + return namespace + else: + return ImmutableMetadefNamespaceProxy(namespace) + + +class ImmutableMetadefNamespaceProxy(object): + + def __init__(self, base): + self.base = base + self.resource_name = 'namespace' + + namespace_id = _immutable_attr('base', 'namespace_id') + namespace = _immutable_attr('base', 'namespace') + display_name = _immutable_attr('base', 'display_name') + description = _immutable_attr('base', 'description') + owner = _immutable_attr('base', 'owner') + visibility = _immutable_attr('base', 'visibility') + protected = _immutable_attr('base', 'protected') + created_at = _immutable_attr('base', 'created_at') + updated_at = _immutable_attr('base', 'updated_at') + + def delete(self): + message = _("You are not permitted to delete this namespace.") + raise exception.Forbidden(message) + + def save(self): + message = _("You are not permitted to update this namespace.") + raise exception.Forbidden(message) + + +class MetadefNamespaceProxy(daisy.domain.proxy.MetadefNamespace): + + def __init__(self, namespace): + self.namespace_input = namespace + super(MetadefNamespaceProxy, self).__init__(namespace) + + +class MetadefNamespaceFactoryProxy( + daisy.domain.proxy.MetadefNamespaceFactory): + + def __init__(self, meta_namespace_factory, context): + self.meta_namespace_factory = meta_namespace_factory + self.context = context + super(MetadefNamespaceFactoryProxy, self).__init__( + meta_namespace_factory, + meta_namespace_proxy_class=MetadefNamespaceProxy) + + def new_namespace(self, **kwargs): + owner = kwargs.pop('owner', self.context.owner) + + if not self.context.is_admin: + if owner is None or owner != self.context.owner: + message = _("You are not permitted to create namespace " + "owned by '%s'") + raise exception.Forbidden(message % (owner)) + + return super(MetadefNamespaceFactoryProxy, self).new_namespace( + owner=owner, **kwargs) + + +class MetadefNamespaceRepoProxy(daisy.domain.proxy.MetadefNamespaceRepo): + + def __init__(self, namespace_repo, context): + self.namespace_repo = namespace_repo + self.context = context + super(MetadefNamespaceRepoProxy, self).__init__(namespace_repo) + + def get(self, namespace): + namespace_obj = self.namespace_repo.get(namespace) + return proxy_namespace(self.context, namespace_obj) + + def list(self, *args, **kwargs): + namespaces = self.namespace_repo.list(*args, **kwargs) + return [proxy_namespace(self.context, namespace) for + namespace in namespaces] + + +# Metadef Object classes +def is_object_mutable(context, object): + """Return True if the object is mutable in this context.""" + if context.is_admin: + return True + + if context.owner is None: + return False + + return object.namespace.owner == context.owner + + +def proxy_object(context, object): + if is_object_mutable(context, object): + return object + else: + return ImmutableMetadefObjectProxy(object) + + +class ImmutableMetadefObjectProxy(object): + + def __init__(self, base): + self.base = base + self.resource_name = 'object' + + object_id = _immutable_attr('base', 'object_id') + name = _immutable_attr('base', 'name') + required = _immutable_attr('base', 'required') + description = _immutable_attr('base', 'description') + properties = _immutable_attr('base', 'properties') + created_at = _immutable_attr('base', 'created_at') + updated_at = _immutable_attr('base', 'updated_at') + + def delete(self): + message = _("You are not permitted to delete this object.") + raise exception.Forbidden(message) + + def save(self): + message = _("You are not permitted to update this object.") + raise exception.Forbidden(message) + + +class MetadefObjectProxy(daisy.domain.proxy.MetadefObject): + + def __init__(self, meta_object): + self.meta_object = meta_object + super(MetadefObjectProxy, self).__init__(meta_object) + + +class MetadefObjectFactoryProxy(daisy.domain.proxy.MetadefObjectFactory): + + def __init__(self, meta_object_factory, context): + self.meta_object_factory = meta_object_factory + self.context = context + super(MetadefObjectFactoryProxy, self).__init__( + meta_object_factory, + meta_object_proxy_class=MetadefObjectProxy) + + def new_object(self, **kwargs): + owner = kwargs.pop('owner', self.context.owner) + + if not self.context.is_admin: + if owner is None or owner != self.context.owner: + message = _("You are not permitted to create object " + "owned by '%s'") + raise exception.Forbidden(message % (owner)) + + return super(MetadefObjectFactoryProxy, self).new_object(**kwargs) + + +class MetadefObjectRepoProxy(daisy.domain.proxy.MetadefObjectRepo): + + def __init__(self, object_repo, context): + self.object_repo = object_repo + self.context = context + super(MetadefObjectRepoProxy, self).__init__(object_repo) + + def get(self, namespace, object_name): + meta_object = self.object_repo.get(namespace, object_name) + return proxy_object(self.context, meta_object) + + def list(self, *args, **kwargs): + objects = self.object_repo.list(*args, **kwargs) + return [proxy_object(self.context, meta_object) for + meta_object in objects] + + +# Metadef ResourceType classes +def is_meta_resource_type_mutable(context, meta_resource_type): + """Return True if the meta_resource_type is mutable in this context.""" + if context.is_admin: + return True + + if context.owner is None: + return False + + # (lakshmiS): resource type can exist without an association with + # namespace and resource type cannot be created/update/deleted directly( + # they have to be associated/de-associated from namespace) + if meta_resource_type.namespace: + return meta_resource_type.namespace.owner == context.owner + else: + return False + + +def proxy_meta_resource_type(context, meta_resource_type): + if is_meta_resource_type_mutable(context, meta_resource_type): + return meta_resource_type + else: + return ImmutableMetadefResourceTypeProxy(meta_resource_type) + + +class ImmutableMetadefResourceTypeProxy(object): + + def __init__(self, base): + self.base = base + self.resource_name = 'meta_resource_type' + + namespace = _immutable_attr('base', 'namespace') + name = _immutable_attr('base', 'name') + prefix = _immutable_attr('base', 'prefix') + properties_target = _immutable_attr('base', 'properties_target') + created_at = _immutable_attr('base', 'created_at') + updated_at = _immutable_attr('base', 'updated_at') + + def delete(self): + message = _("You are not permitted to delete this meta_resource_type.") + raise exception.Forbidden(message) + + +class MetadefResourceTypeProxy(daisy.domain.proxy.MetadefResourceType): + + def __init__(self, meta_resource_type): + self.meta_resource_type = meta_resource_type + super(MetadefResourceTypeProxy, self).__init__(meta_resource_type) + + +class MetadefResourceTypeFactoryProxy( + daisy.domain.proxy.MetadefResourceTypeFactory): + + def __init__(self, resource_type_factory, context): + self.meta_resource_type_factory = resource_type_factory + self.context = context + super(MetadefResourceTypeFactoryProxy, self).__init__( + resource_type_factory, + resource_type_proxy_class=MetadefResourceTypeProxy) + + def new_resource_type(self, **kwargs): + owner = kwargs.pop('owner', self.context.owner) + + if not self.context.is_admin: + if owner is None or owner != self.context.owner: + message = _("You are not permitted to create resource_type " + "owned by '%s'") + raise exception.Forbidden(message % (owner)) + + return super(MetadefResourceTypeFactoryProxy, self).new_resource_type( + **kwargs) + + +class MetadefResourceTypeRepoProxy( + daisy.domain.proxy.MetadefResourceTypeRepo): + + def __init__(self, meta_resource_type_repo, context): + self.meta_resource_type_repo = meta_resource_type_repo + self.context = context + super(MetadefResourceTypeRepoProxy, self).__init__( + meta_resource_type_repo) + + def list(self, *args, **kwargs): + meta_resource_types = self.meta_resource_type_repo.list( + *args, **kwargs) + return [proxy_meta_resource_type(self.context, meta_resource_type) for + meta_resource_type in meta_resource_types] + + def get(self, *args, **kwargs): + meta_resource_type = self.meta_resource_type_repo.get(*args, **kwargs) + return proxy_meta_resource_type(self.context, meta_resource_type) + + +# Metadef namespace properties classes +def is_namespace_property_mutable(context, namespace_property): + """Return True if the object is mutable in this context.""" + if context.is_admin: + return True + + if context.owner is None: + return False + + return namespace_property.namespace.owner == context.owner + + +def proxy_namespace_property(context, namespace_property): + if is_namespace_property_mutable(context, namespace_property): + return namespace_property + else: + return ImmutableMetadefPropertyProxy(namespace_property) + + +class ImmutableMetadefPropertyProxy(object): + + def __init__(self, base): + self.base = base + self.resource_name = 'namespace_property' + + property_id = _immutable_attr('base', 'property_id') + name = _immutable_attr('base', 'name') + schema = _immutable_attr('base', 'schema') + + def delete(self): + message = _("You are not permitted to delete this property.") + raise exception.Forbidden(message) + + def save(self): + message = _("You are not permitted to update this property.") + raise exception.Forbidden(message) + + +class MetadefPropertyProxy(daisy.domain.proxy.MetadefProperty): + + def __init__(self, namespace_property): + self.meta_object = namespace_property + super(MetadefPropertyProxy, self).__init__(namespace_property) + + +class MetadefPropertyFactoryProxy(daisy.domain.proxy.MetadefPropertyFactory): + + def __init__(self, namespace_property_factory, context): + self.meta_object_factory = namespace_property_factory + self.context = context + super(MetadefPropertyFactoryProxy, self).__init__( + namespace_property_factory, + property_proxy_class=MetadefPropertyProxy) + + def new_namespace_property(self, **kwargs): + owner = kwargs.pop('owner', self.context.owner) + + if not self.context.is_admin: + if owner is None or owner != self.context.owner: + message = _("You are not permitted to create property " + "owned by '%s'") + raise exception.Forbidden(message % (owner)) + + return super(MetadefPropertyFactoryProxy, self).new_namespace_property( + **kwargs) + + +class MetadefPropertyRepoProxy(daisy.domain.proxy.MetadefPropertyRepo): + + def __init__(self, namespace_property_repo, context): + self.namespace_property_repo = namespace_property_repo + self.context = context + super(MetadefPropertyRepoProxy, self).__init__(namespace_property_repo) + + def get(self, namespace, object_name): + namespace_property = self.namespace_property_repo.get(namespace, + object_name) + return proxy_namespace_property(self.context, namespace_property) + + def list(self, *args, **kwargs): + namespace_properties = self.namespace_property_repo.list( + *args, **kwargs) + return [proxy_namespace_property(self.context, namespace_property) for + namespace_property in namespace_properties] + + +# Metadef Tag classes +def is_tag_mutable(context, tag): + """Return True if the tag is mutable in this context.""" + if context.is_admin: + return True + + if context.owner is None: + return False + + return tag.namespace.owner == context.owner + + +def proxy_tag(context, tag): + if is_tag_mutable(context, tag): + return tag + else: + return ImmutableMetadefTagProxy(tag) + + +class ImmutableMetadefTagProxy(object): + + def __init__(self, base): + self.base = base + self.resource_name = 'tag' + + tag_id = _immutable_attr('base', 'tag_id') + name = _immutable_attr('base', 'name') + created_at = _immutable_attr('base', 'created_at') + updated_at = _immutable_attr('base', 'updated_at') + + def delete(self): + message = _("You are not permitted to delete this tag.") + raise exception.Forbidden(message) + + def save(self): + message = _("You are not permitted to update this tag.") + raise exception.Forbidden(message) + + +class MetadefTagProxy(daisy.domain.proxy.MetadefTag): + pass + + +class MetadefTagFactoryProxy(daisy.domain.proxy.MetadefTagFactory): + + def __init__(self, meta_tag_factory, context): + self.meta_tag_factory = meta_tag_factory + self.context = context + super(MetadefTagFactoryProxy, self).__init__( + meta_tag_factory, + meta_tag_proxy_class=MetadefTagProxy) + + def new_tag(self, **kwargs): + owner = kwargs.pop('owner', self.context.owner) + if not self.context.is_admin: + if owner is None: + message = _("Owner must be specified to create a tag.") + raise exception.Forbidden(message) + elif owner != self.context.owner: + message = _("You are not permitted to create a tag" + " in the namespace owned by '%s'") + raise exception.Forbidden(message % (owner)) + + return super(MetadefTagFactoryProxy, self).new_tag(**kwargs) + + +class MetadefTagRepoProxy(daisy.domain.proxy.MetadefTagRepo): + + def __init__(self, tag_repo, context): + self.tag_repo = tag_repo + self.context = context + super(MetadefTagRepoProxy, self).__init__(tag_repo) + + def get(self, namespace, tag_name): + meta_tag = self.tag_repo.get(namespace, tag_name) + return proxy_tag(self.context, meta_tag) + + def list(self, *args, **kwargs): + tags = self.tag_repo.list(*args, **kwargs) + return [proxy_tag(self.context, meta_tag) for + meta_tag in tags] diff --git a/code/daisy/daisy/api/backends/__init__.py b/code/daisy/daisy/api/backends/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/api/backends/common.py b/code/daisy/daisy/api/backends/common.py new file mode 100755 index 00000000..745baf9a --- /dev/null +++ b/code/daisy/daisy/api/backends/common.py @@ -0,0 +1,235 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/install endpoint for tecs API +""" +import copy +import subprocess +import time + +import traceback +import webob.exc +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPForbidden + +from threading import Thread + +from daisy import i18n +from daisy import notifier + +from daisy.api import policy +import daisy.api.v1 + +from daisy.common import exception +import daisy.registry.client.v1.api as registry + + +try: + import simplejson as json +except ImportError: + import json + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +daisy_path = '/var/lib/daisy/' +tecs_backend_name = "tecs" +zenic_backend_name = "zenic" +proton_backend_name = "proton" +os_install_start_time = 0.0 + +def subprocess_call(command,file=None): + if file: + return_code = subprocess.call(command, + shell=True, + stdout=file, + stderr=file) + else: + return_code = subprocess.call(command, + shell=True, + stdout=open('/dev/null', 'w'), + stderr=subprocess.STDOUT) + if return_code != 0: + msg = "execute '%s' failed by subprocess call." % command + raise exception.SubprocessCmdFailed(msg) + +def get_host_detail(req, host_id): + try: + host_detail = registry.get_host_metadata(req.context, host_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return host_detail + +def get_roles_detail(req): + try: + roles = registry.get_roles_detail(req.context) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return roles + +def get_cluster_roles_detail(req, cluster_id): + try: + params = {'cluster_id':cluster_id} + roles = registry.get_roles_detail(req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return roles + +def get_hosts_of_role(req, role_id): + try: + hosts = registry.get_role_host_metadata(req.context, role_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return hosts + +def get_role_detail(req, role_id): + try: + role = registry.get_role_metadata(req.context, role_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return role + +def update_role(req, role_id,role_meta): + try: + registry.update_role_metadata(req.context, role_id, role_meta) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + +def update_role_host(req, role_id, role_host): + try: + registry.update_role_host_metadata(req.context, role_id, role_host) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + +def delete_role_hosts(req, role_id): + try: + registry.delete_role_host_metadata(req.context, role_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + +def get_cluster_networks_detail(req, cluster_id): + try: + networks = registry.get_networks_detail(req.context, cluster_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return networks + +def get_assigned_network(req, host_interface_id, network_id): + try: + assigned_network = registry.get_assigned_network(req.context, host_interface_id, network_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return assigned_network + +def _ping_hosts_test(ips): + ping_cmd = 'fping' + for ip in set(ips): + ping_cmd = ping_cmd + ' ' + ip + obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (stdoutput, erroutput) = obj.communicate() + _returncode = obj.returncode + if _returncode == 0 or _returncode == 1: + ping_result = stdoutput.split('\n') + unreachable_hosts = [result.split()[0] for result in ping_result if result and result.split()[2] != 'alive'] + else: + msg = "ping failed beaceuse there is invlid ip in %s" % ips + raise exception.InvalidIP(msg) + return unreachable_hosts + + +def check_ping_hosts(ping_ips, max_ping_times): + if not ping_ips: + LOG.info(_("no ip got for ping test")) + return ping_ips + ping_count = 0 + time_step = 5 + LOG.info(_("begin ping test for %s" % ','.join(ping_ips))) + while True: + if ping_count == 0: + ips = _ping_hosts_test(ping_ips) + else: + ips = _ping_hosts_test(ips) + + ping_count += 1 + if ips: + LOG.debug(_("ping host %s for %s times" % (','.join(ips), ping_count))) + if ping_count >= max_ping_times: + LOG.info(_("ping host %s timeout for %ss" % (','.join(ips), ping_count*time_step))) + return ips + time.sleep(time_step) + else: + LOG.info(_("ping %s successfully" % ','.join(ping_ips))) + return ips + +def _ping_reachable_to_unreachable_host_test(ip,max_ping_times): + ping_cmd = 'fping' + ping_cmd = ping_cmd + ' ' + ip + ping_count = 0 + time_step = 5 + while True: + obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (stdoutput, erroutput) = obj.communicate() + _returncode = obj.returncode + if _returncode != 0: + return True + ping_count += 1 + if ping_count >= max_ping_times: + LOG.info(_("ping host %s timeout for %ss" % (ip, ping_count*time_step))) + return False + time.sleep(time_step) + return False + +def _ping_unreachable_to_reachable_host_test(ip, max_ping_times): + ping_count = 0 + time_step = 5 + ping_cmd = 'fping' + ping_cmd = ping_cmd + ' ' + ip + while True: + obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (stdoutput, erroutput) = obj.communicate() + _returncode = obj.returncode + if _returncode == 0: + return True + ping_count += 1 + if ping_count >= max_ping_times: + LOG.info(_("ping host %s timeout for %ss" % (ip, ping_count*time_step))) + return False + time.sleep(time_step) + return False + +def check_reboot_ping(ip): + stop_max_ping_times = 360 #ha host reboot may spend 20 min,so timeout time is 30min + start_max_ping_times = 60 + _ping_reachable_to_unreachable_host_test(ip, stop_max_ping_times) + _ping_unreachable_to_reachable_host_test(ip, start_max_ping_times) + time.sleep(5) + +def cidr_to_netmask(cidr): + ip_netmask = cidr.split('/') + if len(ip_netmask) != 2 or not ip_netmask[1]: + raise exception.InvalidNetworkConfig("cidr is not valid") + + cidr_end = ip_netmask[1] + mask = ~(2**(32 - int(cidr_end)) - 1) + inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)]) + netmask = inter_ip(mask) + + return netmask \ No newline at end of file diff --git a/code/daisy/daisy/api/backends/driver.py b/code/daisy/daisy/api/backends/driver.py new file mode 100755 index 00000000..bb065d54 --- /dev/null +++ b/code/daisy/daisy/api/backends/driver.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Driver base-classes: + + (Beginning of) the contract that deployment backends drivers must follow, and shared + types that support that contract +""" + +import sys + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import importutils + +from daisy import i18n +from daisy.common import exception + +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +LOG = logging.getLogger(__name__) + +class DeploymentDriver(object): + """base class for deployment interface. + + """ + def install(self, req, cluster_id): + raise NotImplementedError() + + def upgrade(self, req, cluster_id): + raise NotImplementedError() + + def uninstall(self, req, cluster_id): + raise NotImplementedError() + + def uninstall_progress(self, req, cluster_id): + LOG.info(_("driver no interface for 'uninstall_progress'")) + return {} + + def upgrade_progress(self, req, cluster_id): + LOG.info(_("driver no interface for 'upgrade_progress'")) + return {} + + def exprot_db(self, req, cluster_id): + LOG.info(_("driver no interface for 'exprot_db'")) + return {} + + def update_disk_array(self, req, cluster_id): + LOG.info(_("driver no interface for 'update_disk_array'")) + return {} + +def check_isinstance(obj, cls): + """Checks that obj is of type cls, and lets PyLint infer types.""" + if isinstance(obj, cls): + return obj + raise Exception(_('Expected object of type: %s') % (str(cls))) + +def load_deployment_dirver(backend_name): + """Load a cluster backend installation driver. + """ + backend_driver = "%s.api.API" % backend_name + + LOG.info(_("Loading deployment backend '%s'") % backend_driver) + try: + driver = importutils.import_object_ns('daisy.api.backends',backend_driver) + return check_isinstance(driver, DeploymentDriver) + except ImportError: + LOG.exception(_("Error, unable to load the deployment backends '%s'" % backend_driver)) + return None diff --git a/code/daisy/daisy/api/backends/os.py b/code/daisy/daisy/api/backends/os.py new file mode 100755 index 00000000..65f4ddc1 --- /dev/null +++ b/code/daisy/daisy/api/backends/os.py @@ -0,0 +1,742 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/install endpoint for tecs API +""" +import copy +import subprocess +import time + +import traceback +import webob.exc +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPForbidden +from webob.exc import HTTPServerError +import threading +from threading import Thread + +from daisy import i18n +from daisy import notifier + +from daisy.api import policy +import daisy.api.v1 + +from daisy.common import exception +import daisy.registry.client.v1.api as registry +from daisy.api.backends.tecs import config +from daisy.api.backends import driver +from daisy.api.network_api import network as neutron +from ironicclient import client as ironic_client +import daisy.api.backends.common as daisy_cmn +import daisy.api.backends.tecs.common as tecs_cmn + + +try: + import simplejson as json +except ImportError: + import json + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +CONF = cfg.CONF +install_opts = [ + cfg.StrOpt('max_parallel_os_number', default=10, + help='Maximum number of hosts install os at the same time.'), +] +CONF.register_opts(install_opts) +upgrade_opts = [ + cfg.StrOpt('max_parallel_os_upgrade_number', default=10, + help='Maximum number of hosts upgrade os at the same time.'), +] +CONF.register_opts(upgrade_opts) + +host_os_status = { + 'INIT' : 'init', + 'INSTALLING' : 'installing', + 'ACTIVE' : 'active', + 'INSTALL_FAILED': 'install-failed', + 'UPDATING': 'updating', + 'UPDATE_FAILED': 'update-failed' +} + +LINUX_BOND_MODE = {'balance-rr':'0', 'active-backup':'1', 'balance-xor':'2', 'broadcast':'3','802.3ad':'4', 'balance-tlb':'5', 'balance-alb':'6'} + +daisy_tecs_path = tecs_cmn.daisy_tecs_path + +def get_ironicclient(): # pragma: no cover + """Get Ironic client instance.""" + args = {'os_auth_token': 'fake', + 'ironic_url':'http://127.0.0.1:6385/v1'} + return ironic_client.get_client(1, **args) + +def pxe_server_build(req, install_meta): + cluster_id = install_meta['cluster_id'] + try: + networks = registry.get_networks_detail(req.context, cluster_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + + try: + ip_inter = lambda x:sum([256**j*int(i) for j,i in enumerate(x.split('.')[::-1])]) + inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)]) + network_cidr = [network['cidr'] for network in networks if network['name'] == 'DEPLOYMENT'][0] + if not network_cidr: + msg = "Error:The CIDR is blank of DEPLOYMENT!" + raise exception.Forbidden(msg) + cidr_end=network_cidr.split('/')[1] + ip_addr = network_cidr.split('/')[0] + ip_addr_int=ip_inter(ip_addr) + mask = ~(2**(32 - int(cidr_end)) - 1) + net_mask=inter_ip(mask) + ip_addr_min = inter_ip(ip_addr_int & (mask & 0xffffffff)) + ip_addr_max = inter_ip(ip_addr_int | (~mask & 0xffffffff)) + pxe_server_ip=inter_ip((ip_inter(ip_addr_min))+1) + client_ip_begin=inter_ip((ip_inter(ip_addr_min))+2) + client_ip_end=ip_addr_max + args = {'build_pxe': 'yes', 'eth_name': install_meta['deployment_interface'], 'ip_address': pxe_server_ip, 'net_mask': net_mask, + 'client_ip_begin': client_ip_begin, 'client_ip_end': client_ip_end} + ironic = get_ironicclient() + ironic.daisy.build_pxe(**args) + except exception.Invalid as e: + msg = "build pxe server failed" + raise exception.InvalidNetworkConfig(msg) + +def _get_network_plat(req,host_config, cluster_networks, dhcp_mac): + host_config['dhcp_mac'] = dhcp_mac + if host_config['interfaces']: + count = 0 + host_config_orig = copy.deepcopy(host_config) + for interface in host_config['interfaces']: + count += 1 + if (interface.has_key('assigned_networks') and + interface['assigned_networks']): + assigned_networks = copy.deepcopy(interface['assigned_networks']) + host_config['interfaces'][count-1]['assigned_networks'] = [] + alias = [] + for assigned_network in assigned_networks: + network_name = assigned_network['name'] + cluster_network = [network for network in cluster_networks if network['name'] in network_name][0] + alias.append(cluster_network['alias']) + # convert cidr to netmask + cidr_to_ip = "" + assigned_networks_ip=tecs_cmn.get_host_network_ip(req, host_config_orig, cluster_networks, network_name) + if cluster_network.get('cidr', None): + inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)]) + cidr_to_ip = inter_ip(2**32-2**(32-int(cluster_network['cidr'].split('/')[1]))) + if cluster_network['alias'] == None or len(alias) == 1: + network_type = cluster_network['network_type'] + network_plat = dict(network_type=network_type, + ml2_type=cluster_network['ml2_type'], + capability=cluster_network['capability'], + physnet_name=cluster_network['physnet_name'], + gateway=cluster_network.get('gateway', ""), + ip=assigned_networks_ip, + #ip=cluster_network.get('ip', ""), + netmask=cidr_to_ip, + vlan_id=cluster_network.get('vlan_id', "")) + if network_type == "MANAGEMENT" and cluster_network.get('gateway', "") == "": + msg = "Error: The gateway of network 'MANAGEMENT' is not given!" + raise exception.Forbidden(msg) + host_config['interfaces'][count-1]['assigned_networks'].append(network_plat) + interface['ip']="" + interface['netmask']="" + interface['gateway']="" + + return host_config + +def get_cluster_hosts_config(req, cluster_id): + params = dict(limit=1000000) + try: + cluster_data = registry.get_cluster_metadata(req.context, cluster_id) + networks = registry.get_networks_detail(req.context, cluster_id) + all_roles = registry.get_roles_detail(req.context) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + + roles = [role for role in all_roles if role['cluster_id'] == cluster_id] + all_hosts_ids = cluster_data['nodes'] + hosts_config = [] + for host_id in all_hosts_ids: + host_detail = daisy_cmn.get_host_detail(req, host_id) + role_host_db_lv_size_lists = list() + if host_detail.has_key('role') and host_detail['role']: + host_roles = host_detail['role'] + for role in roles: + if role['name'] in host_detail['role'] and role['glance_lv_size']: + host_detail['glance_lv_size'] = role['glance_lv_size'] + if role.get('db_lv_size', None) and host_roles and role['name'] in host_roles: + role_host_db_lv_size_lists.append(role['db_lv_size']) + if role['name'] == 'COMPUTER' and role['name'] in host_detail['role'] and role['nova_lv_size']: + host_detail['nova_lv_size'] = role['nova_lv_size'] + service_disks = tecs_cmn.get_service_disk_list(req, {'role_id':role['id']}) + for service_disk in service_disks: + if service_disk['disk_location'] == 'local' and service_disk['service'] == 'mongodb': + host_detail['mongodb_lv_size'] = service_disk['size'] + break + if role_host_db_lv_size_lists: + host_detail['db_lv_size'] = max(role_host_db_lv_size_lists) + else: + host_detail['db_lv_size'] = 0 + + for interface in host_detail['interfaces']: + if interface['type'] == 'bond'and interface['mode'] in LINUX_BOND_MODE.keys(): + interface['mode'] = LINUX_BOND_MODE[interface['mode']] + + if (host_detail['os_status'] == host_os_status['INIT'] or + host_detail['os_status'] == host_os_status['INSTALLING'] or + host_detail['os_status'] == host_os_status['INSTALL_FAILED']): + host_dhcp_interface = [hi for hi in host_detail['interfaces'] if hi['is_deployment']] + if not host_dhcp_interface: + msg = "cann't find dhcp interface on host %s" % host_detail['id'] + raise exception.InvalidNetworkConfig(msg) + if len(host_dhcp_interface) > 1: + msg = "dhcp interface should only has one on host %s" % host_detail['id'] + raise exception.InvalidNetworkConfig(msg) + + host_config_detail = copy.deepcopy(host_detail) + host_config = _get_network_plat(req,host_config_detail, + networks, + host_dhcp_interface[0]['mac']) + hosts_config.append(tecs_cmn.sort_interfaces_by_pci(host_config)) + return hosts_config + +def check_tfg_exist(): + get_tfg_patch = "ls %s|grep CGSL_VPLAT-.*\.iso$" % daisy_tecs_path + obj = subprocess.Popen(get_tfg_patch, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + (stdoutput, erroutput) = obj.communicate() + tfg_patch_pkg_file = "" + tfg_patch_pkg_name = "" + if stdoutput: + tfg_patch_pkg_name = stdoutput.split('\n')[0] + tfg_patch_pkg_file = daisy_tecs_path + tfg_patch_pkg_name + chmod_for_tfg_bin = 'chmod +x %s' % tfg_patch_pkg_file + daisy_cmn.subprocess_call(chmod_for_tfg_bin) + + if not stdoutput or not tfg_patch_pkg_name: + LOG.info(_("no CGSL_VPLAT iso file got in %s" % daisy_tecs_path)) + return "" + return tfg_patch_pkg_file + +def update_db_host_status(req, host_id, host_status): + """ + Update host status and intallation progress to db. + :return: + """ + try: + host_meta = {} + host_meta['os_progress'] = host_status['os_progress'] + host_meta['os_status'] = host_status['os_status'] + host_meta['messages'] = host_status['messages'] + registry.update_host_metadata(req.context, + host_id, + host_meta) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + +class OSInstall(): + """ + Class for install OS. + """ + """ Definition for install states.""" + def __init__(self, req, cluster_id): + self.req = req + self.cluster_id = cluster_id + #5s + self.time_step = 5 + # 30 min + self.single_host_install_timeout = 30 * (12*self.time_step) + + self.max_parallel_os_num = int(CONF.max_parallel_os_number) + self.cluster_hosts_install_timeout = (self.max_parallel_os_num/4 + 2 )* 60 * (12*self.time_step) + self.ironicclient = get_ironicclient() + + def _set_boot_or_power_state(self, user, passwd, addr, action): + count = 0 + repeat_times = 24 + while count < repeat_times: + set_obj = self.ironicclient.daisy.set_boot_or_power_state(user, + passwd, + addr, + action) + set_dict = dict([(f, getattr(set_obj, f, '')) for f in ['return_code', 'info']]) + rc = int(set_dict['return_code']) + if rc == 0: + LOG.info(_("set %s to '%s' successfully for %s times by ironic" % (addr,action,count+1))) + break + else: + count += 1 + LOG.info(_("try setting %s to '%s' failed for %s times by ironic" % (addr,action,count))) + time.sleep(count*2) + if count >= repeat_times: + message = "set %s to '%s' failed for 10 mins" % (addr,action) + raise exception.IMPIOprationFailed(message=message) + + def _baremetal_install_os(self, host_detail): + # os_install_disk = 'sda' + os_version_file = host_detail['os_version_file'] + if os_version_file: + test_os_version_exist = 'test -f %s' % os_version_file + daisy_cmn.subprocess_call(test_os_version_exist) + else: + self.message = "no OS version file configed for host %s" % host_detail['id'] + raise exception.NotFound(message=self.message) + + if host_detail.get('root_disk',None): + root_disk = host_detail['root_disk'] + else: + root_disk = 'sda' + if host_detail.get('root_lv_size',None): + root_lv_size_m = host_detail['root_lv_size'] + else: + root_lv_size_m = 51200 + memory_size_b_str = str(host_detail['memory']['total']) + memory_size_b_int = int(memory_size_b_str.strip().split()[0]) + memory_size_m = memory_size_b_int//1024 + memory_size_g = memory_size_m//1024 + swap_lv_size_m = host_detail['swap_lv_size'] + cinder_vg_size_m = 0 + disk_list = [] + disk_storage_size_b = 0 + for key in host_detail['disks']: + disk_list.append(host_detail['disks'][key]['name']) + stroage_size_str = host_detail['disks'][key]['size'] + stroage_size_b_int = int(stroage_size_str.strip().split()[0]) + disk_storage_size_b = disk_storage_size_b + stroage_size_b_int + + disk_list = ','.join(disk_list) + disk_storage_size_m = disk_storage_size_b//(1024*1024) + if host_detail.has_key('root_pwd') and host_detail['root_pwd']: + root_pwd = host_detail['root_pwd'] + else: + root_pwd = 'ossdbg1' + + if host_detail.has_key('isolcpus') and host_detail['isolcpus']: + isolcpus = host_detail['isolcpus'] + else: + isolcpus = None + + if host_detail.get('hugepages',None): + hugepages = host_detail['hugepages'] + else: + hugepages = 0 + + if host_detail.get('hugepagesize',None): + hugepagesize = host_detail['hugepagesize'] + else: + hugepagesize = '1G' + + + + #tfg_patch_pkg_file = check_tfg_exist() + + if (not host_detail['ipmi_user'] or + not host_detail['ipmi_passwd'] or + not host_detail['ipmi_addr'] ): + self.message = "Invalid ipmi information configed for host %s" % host_detail['id'] + raise exception.NotFound(message=self.message) + + + + self._set_boot_or_power_state(host_detail['ipmi_user'], + host_detail['ipmi_passwd'], + host_detail['ipmi_addr'], + 'pxe') + + kwargs = {'hostname':host_detail['name'], + 'iso_path':os_version_file, + #'tfg_bin':tfg_patch_pkg_file, + 'dhcp_mac':host_detail['dhcp_mac'], + 'storage_size':disk_storage_size_m, + 'memory_size':memory_size_g, + 'interfaces':host_detail['interfaces'], + 'root_lv_size':root_lv_size_m, + 'swap_lv_size':swap_lv_size_m, + 'cinder_vg_size':cinder_vg_size_m, + 'disk_list':disk_list, + 'root_disk':root_disk, + 'root_pwd':root_pwd, + 'isolcpus':isolcpus, + 'hugepagesize':hugepagesize, + 'hugepages':hugepages, + 'reboot':'no'} + + if host_detail.has_key('glance_lv_size'): + kwargs['glance_lv_size'] = host_detail['glance_lv_size'] + else: + kwargs['glance_lv_size'] = 0 + + if host_detail.has_key('db_lv_size') and host_detail['db_lv_size']: + kwargs['db_lv_size'] = host_detail['db_lv_size'] + else: + kwargs['db_lv_size'] = 0 + + if host_detail.has_key('mongodb_lv_size') and host_detail['mongodb_lv_size']: + kwargs['mongodb_lv_size'] = host_detail['mongodb_lv_size'] + else: + kwargs['mongodb_lv_size'] = 0 + + if host_detail.has_key('nova_lv_size') and host_detail['nova_lv_size']: + kwargs['nova_lv_size'] = host_detail['nova_lv_size'] + else: + kwargs['nova_lv_size'] = 0 + install_os_obj = self.ironicclient.daisy.install_os(**kwargs) + install_os_dict = dict([(f, getattr(install_os_obj, f, '')) for f in ['return_code', 'info']]) + rc = int(install_os_dict['return_code']) + if rc != 0: + install_os_description = install_os_dict['info'] + LOG.info(_("install os config failed because of '%s'" % (install_os_description))) + host_status = {'os_status':host_os_status['INSTALL_FAILED'], + 'os_progress':0, + 'messages':install_os_description} + update_db_host_status(self.req, host_detail['id'],host_status) + msg = "ironic install os return failed for host %s" % host_detail['id'] + raise exception.OSInstallFailed(message=msg) + + self._set_boot_or_power_state(host_detail['ipmi_user'], + host_detail['ipmi_passwd'], + host_detail['ipmi_addr'], + 'reset') + + + + def _install_os_by_rousource_type(self, hosts_detail): + # all hosts status set to 'init' before install os + for host_detail in hosts_detail: + host_status = {'os_status':host_os_status['INIT'], + 'os_progress':0, + 'messages':''} + update_db_host_status(self.req, host_detail['id'],host_status) + + for host_detail in hosts_detail: + self._baremetal_install_os(host_detail) + + + def _set_disk_start_mode(self, host_detail): + LOG.info(_("Set boot from disk for host %s" % (host_detail['id']))) + self._set_boot_or_power_state(host_detail['ipmi_user'], + host_detail['ipmi_passwd'], + host_detail['ipmi_addr'], + 'disk') + LOG.info(_("reboot host %s" % (host_detail['id']))) + self._set_boot_or_power_state(host_detail['ipmi_user'], + host_detail['ipmi_passwd'], + host_detail['ipmi_addr'], + 'reset') + + def _init_progress(self, host_detail, hosts_status): + host_id = host_detail['id'] + + host_status = hosts_status[host_id] = {} + host_status['os_status'] = host_os_status['INSTALLING'] + host_status['os_progress'] = 0 + host_status['count'] = 0 + if host_detail['resource_type'] == 'docker': + host_status['messages'] = "docker container is creating" + else: + host_status['messages'] = "OS installing" + + update_db_host_status(self.req, host_id, host_status) + + def _query_host_progress(self, host_detail, host_status, host_last_status): + host_id = host_detail['id'] + install_result_obj = \ + self.ironicclient.daisy.get_install_progress(host_detail['dhcp_mac']) + install_result = dict([(f, getattr(install_result_obj, f, '')) + for f in ['return_code', 'info', 'progress']]) + rc = int(install_result['return_code']) + host_status['os_progress'] = int(install_result['progress']) + if rc == 0: + if host_status['os_progress'] == 100: + time_cost = str(round((time.time() - daisy_cmn.os_install_start_time)/60, 2)) + LOG.info(_("It takes %s min for host %s to install os" % (time_cost, host_id))) + LOG.info(_("host %s install os completely." % host_id)) + host_status['os_status'] = host_os_status['ACTIVE'] + host_status['messages'] = "OS installed successfully" + # wait for nicfix script complete + time.sleep(10) + self._set_disk_start_mode(host_detail) + else: + if host_status['os_progress'] == host_last_status['os_progress']: + host_status['count'] = host_status['count'] + 1 + LOG.debug(_("host %s has kept %ss when progress is %s." % (host_id, + host_status['count']*self.time_step, host_status['os_progress']))) + else: + LOG.info(_("host %s install failed." % host_id)) + host_status['os_status'] = host_os_status['INSTALL_FAILED'] + host_status['messages'] = install_result['info'] + + def _query_progress(self, hosts_last_status, hosts_detail): + hosts_status = copy.deepcopy(hosts_last_status) + for host_detail in hosts_detail: + host_id = host_detail['id'] + if not hosts_status.has_key(host_id): + self._init_progress(host_detail, hosts_status) + continue + + host_status = hosts_status[host_id] + host_last_status = hosts_last_status[host_id] + #only process installing hosts after init, other hosts info will be kept in hosts_status + if host_status['os_status'] != host_os_status['INSTALLING']: + continue + + self._query_host_progress(host_detail, host_status, host_last_status) + + if host_status['count']*self.time_step >= self.single_host_install_timeout: + host_status['os_status'] = host_os_status['INSTALL_FAILED'] + if host_detail['resource_type'] == 'docker': + host_status['messages'] = "docker container created timeout" + else: + host_status['messages'] = "os installed timeout" + if (host_status['os_progress'] != host_last_status['os_progress'] or\ + host_status['os_status'] != host_last_status['os_status']): + host_status['count'] = 0 + update_db_host_status(self.req, host_id,host_status) + return hosts_status + + def _get_install_status(self, hosts_detail): + query_count = 0 + hosts_last_status = {} + while True: + hosts_install_status = self._query_progress(hosts_last_status, hosts_detail) + # if all hosts install over, break + installing_hosts = [id for id in hosts_install_status.keys() + if hosts_install_status[id]['os_status'] == host_os_status['INSTALLING']] + if not installing_hosts: + break + #after 3h, if some hosts are not 'active', label them to 'failed'. + elif query_count*self.time_step >= self.cluster_hosts_install_timeout: + for host_id,host_status in hosts_install_status.iteritems(): + if (host_status['os_status'] != host_os_status['ACTIVE'] and + host_status['os_status'] != host_os_status['INSTALL_FAILED']): + # label the host install failed because of time out for 3h + host_status['os_status'] = host_os_status['INSTALL_FAILED'] + host_status['messages'] = "cluster os installed timeout" + update_db_host_status(self.req, host_id, host_status) + break + else: + query_count += 1 + hosts_last_status = hosts_install_status + time.sleep(self.time_step) + return hosts_install_status + + def install_os(self, hosts_detail, role_hosts_ids): + if len(hosts_detail) > self.max_parallel_os_num: + install_hosts = hosts_detail[:self.max_parallel_os_num] + hosts_detail = hosts_detail[self.max_parallel_os_num:] + else: + install_hosts = hosts_detail + hosts_detail = [] + + install_hosts_id = [host_detail['id'] for host_detail in install_hosts] + LOG.info(_("Begin install os for hosts %s." % ','.join(install_hosts_id))) + daisy_cmn.os_install_start_time = time.time() + self._install_os_by_rousource_type(install_hosts) + LOG.info(_("Begin to query install progress...")) + # wait to install completely + cluster_install_status = self._get_install_status(install_hosts) + total_time_cost = str(round((time.time() - daisy_cmn.os_install_start_time)/60, 2)) + LOG.info(_("It totally takes %s min for all host to install os" % total_time_cost)) + LOG.info(_("OS install in cluster %s result is:" % self.cluster_id)) + LOG.info(_("%s %s %s" % ('host-id', 'os-status', 'description'))) + + for host_id,host_status in cluster_install_status.iteritems(): + LOG.info(_("%s %s %s" % (host_id, host_status['os_status'], host_status['messages']))) + if host_id in role_hosts_ids: + if host_status['os_status'] == host_os_status['INSTALL_FAILED']: + break + else: + role_hosts_ids.remove(host_id) + return (hosts_detail, role_hosts_ids) + + +def _os_thread_bin(req, host_ip, host_id): + host_meta = {} + password = "ossdbg1" + LOG.info(_("Begin update os for host %s." % (host_ip))) + cmd = 'mkdir -p /var/log/daisy/daisy_update/' + daisy_cmn.subprocess_call(cmd) + + var_log_path = "/var/log/daisy/daisy_update/%s_update_tfg.log" % host_ip + with open(var_log_path, "w+") as fp: + cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) + daisy_cmn.subprocess_call(cmd,fp) + cmd = 'clush -S -w %s "mkdir -p /home/daisy_update"' % (host_ip,) + daisy_cmn.subprocess_call(cmd,fp) + cmd = 'clush -S -b -w %s "rm -rf /home/daisy_update/*"' % (host_ip,) + daisy_cmn.subprocess_call(cmd,fp) + cmd = 'clush -S -w %s -c /var/lib/daisy/tecs/*CGSL_VPLAT*.iso /var/lib/daisy/tecs/tfg_upgrade.sh --dest=/home/daisy_update' % (host_ip,) + daisy_cmn.subprocess_call(cmd,fp) + cmd = 'clush -S -w %s "chmod 777 /home/daisy_update/*"' % (host_ip,) + daisy_cmn.subprocess_call(cmd,fp) + host_meta['os_progress'] = 30 + host_meta['os_status'] = host_os_status['UPDATING'] + host_meta['messages'] = "" + update_db_host_status(req, host_id, host_meta) + try: + exc_result = subprocess.check_output( + 'clush -S -w %s "/home/daisy_update/tfg_upgrade.sh"' % (host_ip,), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + if e.returncode == 255 and "reboot" in e.output.strip(): + host_meta['os_progress'] = 100 + host_meta['os_status'] = host_os_status['ACTIVE'] + host_meta['messages'] = "upgrade tfg successfully,os reboot" + LOG.info(_("Update tfg for %s successfully,os reboot!" % host_ip)) + daisy_cmn.check_reboot_ping(host_ip) + else: + host_meta['os_progress'] = 0 + host_meta['os_status'] = host_os_status['UPDATE_FAILED'] + host_meta['messages'] = e.output.strip()[-400:-200].replace('\n',' ') + LOG.error(_("Update tfg for %s failed!" % host_ip)) + update_db_host_status(req, host_id, host_meta) + fp.write(e.output.strip()) + else: + host_meta['os_progress'] = 100 + host_meta['os_status'] = host_os_status['ACTIVE'] + host_meta['messages'] = "upgrade tfg successfully" + update_db_host_status(req, host_id, host_meta) + LOG.info(_("Update os for %s successfully!" % host_ip)) + fp.write(exc_result) + if "reboot" in exc_result: + daisy_cmn.check_reboot_ping(host_ip) +# this will be raise raise all the exceptions of the thread to log file +def os_thread_bin(req, host_ip, host_id): + try: + _os_thread_bin(req, host_ip, host_id) + except Exception as e: + LOG.exception(e.message) + raise exception.ThreadBinException(message=e.message) + + +def _get_host_os_version(host_ip, host_pwd='ossdbg1'): + version = "" + tfg_version_file = '/usr/sbin/tfg_showversion' + try: + subprocess.check_output("sshpass -p %s ssh -o StrictHostKeyChecking=no" + " %s test -f %s" % (host_pwd, host_ip, + tfg_version_file), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + LOG.info(_("Host %s os version is TFG" % host_ip)) + return version + try: + process = subprocess.Popen(["sshpass", "-p", "%s" % host_pwd, "ssh", + "-o StrictHostKeyChecking=no", "%s" % host_ip, + 'tfg_showversion'], shell=False, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + version = process.stdout.read().strip('\n') + except subprocess.CalledProcessError: + msg = _("Get host %s os version by subprocess failed!" % host_ip) + raise exception.SubprocessCmdFailed(message=msg) + + if version: + LOG.info(_("Host %s os version is %s" % (host_ip, version))) + return version + else: + msg = _("Get host %s os version by tfg_showversion failed!" % host_ip) + LOG.error(msg) + raise exception.Invalid(message=msg) + + +def _cmp_os_version(new_os_file, old_os_version, target_host_ip, password='ossdbg1'): + shell_file = '/usr/sbin/tfg_showversion' + if old_os_version: + try: + subprocess.check_output("test -f %s" % shell_file, shell=True, + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + scripts = ["sshpass -p %s scp -r -o StrictHostKeyChecking=no %s:%s " + "/usr/sbin/" % (password, target_host_ip, shell_file)] + tecs_cmn.run_scrip(scripts) + + cmp_script = "tfg_showversion %s %s" % (new_os_file, old_os_version) + try: + result = subprocess.check_output(cmp_script, shell=True, + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + return -1 + else: + if new_os_file.find("Mimosa") != -1: + return 0 + else: + msg = _("Please use Mimosa os to upgrade instead of TFG") + LOG.error(msg) + raise exception.Forbidden(message=msg) + return result.find("yes") + + +def upgrade_os(req, hosts_list): + upgrade_hosts = [] + max_parallel_os_upgrade_number = int(CONF.max_parallel_os_upgrade_number) + while hosts_list: + host_meta = {} + threads = [] + if len(hosts_list) > max_parallel_os_upgrade_number: + upgrade_hosts = hosts_list[:max_parallel_os_upgrade_number] + hosts_list = hosts_list[max_parallel_os_upgrade_number:] + else: + upgrade_hosts = hosts_list + hosts_list = [] + + new_os_file = check_tfg_exist() + for host_info in upgrade_hosts: + host_id = host_info.keys()[0] + host_ip = host_info.values()[0] + host_detail = daisy_cmn.get_host_detail(req, host_id) + target_host_os = _get_host_os_version(host_ip, host_detail['root_pwd']) + + if _cmp_os_version(new_os_file, target_host_os, host_ip) == 0: + host_meta['os_progress'] = 10 + host_meta['os_status'] = host_os_status['UPDATING'] + host_meta['messages'] = "os updating,begin copy iso" + update_db_host_status(req, host_id, host_meta) + t = threading.Thread(target=os_thread_bin, args=(req, host_ip, + host_id)) + t.setDaemon(True) + t.start() + threads.append(t) + else: + LOG.warn(_("new os version is lower than or equal to that of " + "host %s, don't need to upgrade!" % host_ip)) + try: + for t in threads: + t.join() + except: + LOG.warn(_("Join update thread %s failed!" % t)) + else: + for host_info in upgrade_hosts: + update_failed_flag = False + host_id = host_info.keys()[0] + host_ip = host_info.values()[0] + host = registry.get_host_metadata(req.context, host_id) + if host['os_status'] == host_os_status['UPDATE_FAILED'] or host['os_status'] == host_os_status['INIT']: + update_failed_flag = True + raise exception.ThreadBinException("%s update tfg failed! %s" % (host_ip, host['messages'])) + if not update_failed_flag: + host_meta = {} + host_meta['os_progress'] = 100 + host_meta['os_status'] = host_os_status['ACTIVE'] + host_meta['messages'] = "os upgrade successfully" + update_db_host_status(req, host_id,host_meta) diff --git a/code/daisy/daisy/api/backends/proton/__init__.py b/code/daisy/daisy/api/backends/proton/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/api/backends/proton/api.py b/code/daisy/daisy/api/backends/proton/api.py new file mode 100755 index 00000000..677afcf2 --- /dev/null +++ b/code/daisy/daisy/api/backends/proton/api.py @@ -0,0 +1,126 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/install endpoint for proton API +""" +from oslo_log import log as logging + +import threading + +from daisy import i18n + +from daisy.common import exception +from daisy.api.backends import driver +import daisy.api.backends.common as daisy_cmn +import daisy.api.backends.proton.common as proton_cmn +import daisy.api.backends.proton.install as instl +import daisy.api.backends.proton.uninstall as unstl + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +proton_state = proton_cmn.PROTON_STATE + + +class API(driver.DeploymentDriver): + """ + The hosts API is a RESTful web service for host data. The API + is as follows:: + + GET /hosts -- Returns a set of brief metadata about hosts + GET /hosts/detail -- Returns a set of detailed metadata about + hosts + HEAD /hosts/ -- Return metadata about an host with id + GET /hosts/ -- Return host data for host with id + POST /hosts -- Store host data and return metadata about the + newly-stored host + PUT /hosts/ -- Update host metadata and/or upload host + data for a previously-reserved host + DELETE /hosts/ -- Delete the host with id + """ + def __init__(self): + super(API, self).__init__() + return + + def install(self, req, cluster_id): + """ + Install PROTON to a cluster. + cluster_id:cluster id + """ + proton_install_task = instl.ProtonInstallTask(req, cluster_id) + proton_install_task.start() + + def _uninstall(self, req, role_id, threads): + try: + for t in threads: + t.setDaemon(True) + t.start() + LOG.info(_("uninstall threads have started," + " please waiting....")) + + for t in threads: + t.join() + except: + LOG.warn(_("Join uninstall thread failed!")) + else: + uninstall_failed_flag = False + role = daisy_cmn.get_role_detail(req, role_id) + if role['progress'] == 100: + unstl.update_progress_to_db( + req, role_id, proton_state['UNINSTALL_FAILED']) + uninstall_failed_flag = True + return + if role['status'] == proton_state['UNINSTALL_FAILED']: + uninstall_failed_flag = True + return + if not uninstall_failed_flag: + LOG.info(_("all uninstall threads have done," + " set role of proton status to 'init'!")) + unstl.update_progress_to_db(req, role_id, + proton_state['INIT']) + + def uninstall(self, req, cluster_id): + """ + Uninstall PROTON to a cluster. + :raises HTTPBadRequest if x-install-cluster is missing + """ + (role_id, hosts_list) = proton_cmn.get_roles_and_hosts_list(req, + cluster_id) + if role_id: + if not hosts_list: + msg = _("there is no host in cluster %s") % cluster_id + raise exception.ThreadBinException(msg) + + unstl.update_progress_to_db(req, role_id, + proton_state['UNINSTALLING'], 0.0) + uninstall_progress_percentage = \ + round(1 * 1.0 / len(hosts_list), 2) * 100 + + threads = [] + for host in hosts_list: + host_detail = proton_cmn.get_host_detail(req, host['host_id']) + t = threading.Thread(target=unstl.thread_bin, + args=(req, + host_detail['interfaces'][0]['ip'], + role_id, + uninstall_progress_percentage)) + threads.append(t) + + self._uninstall(req, role_id, threads) diff --git a/code/daisy/daisy/api/backends/proton/common.py b/code/daisy/daisy/api/backends/proton/common.py new file mode 100755 index 00000000..552119ed --- /dev/null +++ b/code/daisy/daisy/api/backends/proton/common.py @@ -0,0 +1,178 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/install endpoint for proton API +""" +import subprocess +from oslo_log import log as logging +from webob.exc import HTTPBadRequest + +from daisy import i18n + +from daisy.common import exception +import daisy.registry.client.v1.api as registry +import daisy.api.backends.common as daisy_cmn + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +daisy_proton_path = '/var/lib/daisy/proton/' +PROTON_STATE = { + 'INIT': 'init', + 'INSTALLING': 'installing', + 'ACTIVE': 'active', + 'INSTALL_FAILED': 'install-failed', + 'UNINSTALLING': 'uninstalling', + 'UNINSTALL_FAILED': 'uninstall-failed', + 'UPDATING': 'updating', + 'UPDATE_FAILED': 'update-failed', +} + + +def get_host_detail(req, host_id): + try: + host_detail = registry.get_host_metadata(req.context, host_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return host_detail + + +def get_roles_detail(req): + try: + roles = registry.get_roles_detail(req.context) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return roles + + +def get_hosts_of_role(req, role_id): + try: + hosts = registry.get_role_host_metadata(req.context, role_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return hosts + + +def get_roles_and_hosts_list(req, cluster_id): + roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) + for role in roles: + if role['deployment_backend'] == daisy_cmn.proton_backend_name: + role_hosts = get_hosts_of_role(req, role['id']) + return (role['id'], role_hosts) + + +def get_role_detail(req, role_id): + try: + role = registry.get_role_metadata(req.context, role_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return role + + +def check_and_get_proton_version(daisy_proton_path): + proton_version_pkg_name = "" + get_proton_version_pkg = "ls %s| grep ^ZXDTC-PROTON.*\.bin$" \ + % daisy_proton_path + obj = subprocess.Popen( + get_proton_version_pkg, shell=True, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + (stdoutput, erroutput) = obj.communicate() + if stdoutput: + proton_version_pkg_name = stdoutput.split('\n')[0] + proton_version_pkg_file = daisy_proton_path + proton_version_pkg_name + chmod_for_proton_version = 'chmod +x %s' % proton_version_pkg_file + daisy_cmn.subprocess_call(chmod_for_proton_version) + return proton_version_pkg_name + + +class ProtonShellExector(): + """ + Install proton bin. + """ + def __init__(self, mgt_ip, proton_version_name, task_type, rmc_ip=''): + self.task_type = task_type + self.mgt_ip = mgt_ip + self.proton_version_file = daisy_proton_path + proton_version_name + self.rmc_ip = rmc_ip + self.clush_cmd = "" + self.oper_type = { + 'install': self._install_proton, + 'uninstall': self._uninstall_proton + } + self.oper_shell = { + 'CMD_SSHPASS_PRE': "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s", + 'CMD_BIN_SCP': + "scp %(path)s root@%(ssh_ip)s:/home" % + {'path': self.proton_version_file, 'ssh_ip': mgt_ip}, + 'CMD_BIN_INSTALL': "sudo /home/%s install %s 7777" % + (proton_version_name, self.rmc_ip), + 'CMD_BIN_UNINSTALL': "sudo /home/%s uninstall" % + proton_version_name, + 'CMD_BIN_REMOVE': "sudo rm -rf /home/%s" % proton_version_name + } + + self._execute() + + def _install_proton(self): + self.clush_cmd = \ + "%s;%s" % ( + self.oper_shell['CMD_SSHPASS_PRE'] % + {"ssh_ip": "", "cmd": self.oper_shell['CMD_BIN_SCP']}, + self.oper_shell['CMD_SSHPASS_PRE'] % + { + "ssh_ip": "ssh " + self.mgt_ip, "cmd": + self.oper_shell['CMD_BIN_INSTALL'] + } + ) + + subprocess.check_output(self.clush_cmd, shell=True, + stderr=subprocess.STDOUT) + + def _uninstall_proton(self): + self.clush_cmd = \ + "%s;%s" % ( + self.oper_shell['CMD_SSHPASS_PRE'] % + {"ssh_ip": "", "cmd": self.oper_shell['CMD_BIN_SCP']}, + self.oper_shell['CMD_SSHPASS_PRE'] % + { + "ssh_ip": "ssh " + self.mgt_ip, + "cmd": self.oper_shell['CMD_BIN_UNINSTALL'] + } + ) + + subprocess.check_output(self.clush_cmd, shell=True, + stderr=subprocess.STDOUT) + + def _execute(self): + try: + if not self.task_type or not self.mgt_ip: + LOG.error(_("<<>>")) + return + + self.oper_type[self.task_type]() + except subprocess.CalledProcessError as e: + LOG.warn(_("<<>>" % e.output.strip())) + except Exception as e: + LOG.exception(_(e.message)) + else: + LOG.info(_("<<>>" % self.clush_cmd)) diff --git a/code/daisy/daisy/api/backends/proton/install.py b/code/daisy/daisy/api/backends/proton/install.py new file mode 100755 index 00000000..59780054 --- /dev/null +++ b/code/daisy/daisy/api/backends/proton/install.py @@ -0,0 +1,153 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/install endpoint for proton API +""" +from oslo_log import log as logging +from threading import Thread + +from daisy import i18n +import daisy.api.v1 + +from daisy.common import exception +import daisy.api.backends.common as daisy_cmn +import daisy.api.backends.proton.common as proton_cmn + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS +SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE + + +proton_state = proton_cmn.PROTON_STATE +daisy_proton_path = proton_cmn.daisy_proton_path + + +def get_proton_ip(req, role_hosts): + proton_ip_list = [] + for role_host in role_hosts: + host_detail = proton_cmn.get_host_detail(req, + role_host['host_id']) + for interface in host_detail['interfaces']: + for network in interface['assigned_networks']: + if network.get("name") == "MANAGEMENT": + proton_ip_list.append(network.get("ip")) + + return proton_ip_list + + + +def get_proton_hosts(req, cluster_id): + all_roles = proton_cmn.get_roles_detail(req) + for role in all_roles: + if role['cluster_id'] == cluster_id and role['name'] == 'PROTON': + role_hosts = proton_cmn.get_hosts_of_role(req, role['id']) + + return get_proton_ip(req, role_hosts) + + +def get_rmc_host(req, cluster_id): + return "10.43.211.63" + + +class ProtonInstallTask(Thread): + """ + Class for install proton bin. + """ + def __init__(self, req, cluster_id): + super(ProtonInstallTask, self).__init__() + self.req = req + self.cluster_id = cluster_id + self.progress = 0 + self.message = "" + self.state = proton_state['INIT'] + self.proton_ip_list = [] + self.install_log_fp = None + self.last_line_num = 0 + self.need_install = False + self.ping_times = 36 + + def _update_install_progress_to_db(self): + """ + Update progress of intallation to db. + :return: + """ + roles = daisy_cmn.get_cluster_roles_detail(self.req, self.cluster_id) + for role in roles: + if role['deployment_backend'] != daisy_cmn.proton_backend_name: + continue + role_hosts = daisy_cmn.get_hosts_of_role(self.req, role['id']) + for role_host in role_hosts: + if role_host['status'] != proton_state['ACTIVE']: + self.need_install = True + role_host['status'] = self.state + daisy_cmn.update_role_host(self.req, role_host['id'], + role_host) + role['status'] = self.state + role['messages'] = self.message + daisy_cmn.update_role(self.req, role['id'], role) + + def run(self): + try: + self._run() + except (exception.InstallException, + exception.NotFound, + exception.InstallTimeoutException) as e: + LOG.exception(e.message) + else: + self.progress = 100 + self.state = proton_state['ACTIVE'] + self.message = "Proton install successfully" + LOG.info(_("Install PROTON for cluster %s successfully." % + self.cluster_id)) + finally: + self._update_install_progress_to_db() + + def _run(self): + """ + Exectue install file(.bin) with sync mode. + :return: + """ + if not self.cluster_id or not self.req: + raise exception.InstallException( + cluster_id=self.cluster_id, reason="invalid params.") + + self.proton_ip_list = get_proton_hosts(self.req, self.cluster_id) + unreached_hosts = daisy_cmn.check_ping_hosts(self.proton_ip_list, + self.ping_times) + if unreached_hosts: + self.state = proton_state['INSTALL_FAILED'] + self.message = "hosts %s ping failed" % unreached_hosts + raise exception.NotFound(message=self.message) + + proton_version_name = \ + proton_cmn.check_and_get_proton_version(daisy_proton_path) + if not proton_version_name: + self.state = proton_state['INSTALL_FAILED'] + self.message = "PROTON version file not found in %s" % \ + daisy_proton_path + raise exception.NotFound(message=self.message) + + rmc_ip = get_rmc_host(self.req, self.cluster_id) + + for proton_ip in self.proton_ip_list: + proton_cmn.ProtonShellExector(proton_ip, proton_version_name, + 'install', rmc_ip) diff --git a/code/daisy/daisy/api/backends/proton/uninstall.py b/code/daisy/daisy/api/backends/proton/uninstall.py new file mode 100755 index 00000000..e8847fec --- /dev/null +++ b/code/daisy/daisy/api/backends/proton/uninstall.py @@ -0,0 +1,103 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/hosts endpoint for Daisy v1 API +""" +import subprocess + +from oslo_log import log as logging +import threading +from daisy import i18n +import daisy.api.backends.common as daisy_cmn +import daisy.api.backends.proton.common as proton_cmn + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +proton_state = proton_cmn.PROTON_STATE +daisy_proton_path = proton_cmn.daisy_proton_path + +# uninstall init progress is 100, when uninstall succefully, +# uninstall progress is 0, and web display progress is reverted +uninstall_proton_progress = 100.0 +uninstall_mutex = threading.Lock() + + +def update_progress_to_db(req, role_id, status, progress_percentage_step=0.0): + """ + Write uninstall progress and status to db, we use global lock object + 'uninstall_mutex' to make sure this function is thread safety. + :param req: http req. + :param role_id_list: Column neeb be update in role table. + :param status: Uninstall status. + :return: + """ + global uninstall_mutex + global uninstall_proton_progress + uninstall_mutex.acquire(True) + uninstall_proton_progress -= progress_percentage_step + role = {} + + role_hosts = daisy_cmn.get_hosts_of_role(req, role_id) + if status == proton_state['UNINSTALLING']: + role['status'] = status + role['progress'] = uninstall_proton_progress + role['messages'] = 'Proton uninstalling' + for role_host in role_hosts: + role_host_meta = dict() + role_host_meta['status'] = status + role_host_meta['progress'] = uninstall_proton_progress + daisy_cmn.update_role_host(req, role_host['id'], role_host_meta) + if status == proton_state['UNINSTALL_FAILED']: + role['status'] = status + role['messages'] = 'Uninstall-failed' + for role_host in role_hosts: + role_host_meta = dict() + role_host_meta['status'] = status + daisy_cmn.update_role_host(req, role_host['id'], role_host_meta) + elif status == proton_state['INIT']: + role['status'] = status + role['progress'] = 0 + role['messages'] = 'Proton uninstall successfully' + daisy_cmn.delete_role_hosts(req, role_id) + + daisy_cmn.update_role(req, role_id, role) + uninstall_mutex.release() + + +def _thread_bin(req, host_ip, role_id, uninstall_progress_percentage): + try: + proton_version_name = \ + proton_cmn.check_and_get_proton_version(daisy_proton_path) + proton_cmn.ProtonShellExector(host_ip, proton_version_name, + 'uninstall') + except subprocess.CalledProcessError: + update_progress_to_db(req, role_id, proton_state['UNINSTALL_FAILED']) + LOG.info(_("Uninstall PROTON for %s failed!" % host_ip)) + else: + update_progress_to_db(req, role_id, proton_state['UNINSTALLING'], + uninstall_progress_percentage) + LOG.info(_("Uninstall PROTON for %s successfully!" % host_ip)) + + +def thread_bin(req, host_ip, role_id, uninstall_progress_percentage): + try: + _thread_bin(req, host_ip, role_id, uninstall_progress_percentage) + except Exception as e: + LOG.exception(e.message) diff --git a/code/daisy/daisy/api/backends/tecs/__init__.py b/code/daisy/daisy/api/backends/tecs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/code/daisy/daisy/api/backends/tecs/api.py b/code/daisy/daisy/api/backends/tecs/api.py new file mode 100755 index 00000000..fa7472f5 --- /dev/null +++ b/code/daisy/daisy/api/backends/tecs/api.py @@ -0,0 +1,382 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/install endpoint for tecs API +""" +import os +import copy +import subprocess +import time +import commands + +import traceback +import webob.exc +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPForbidden +from webob.exc import HTTPServerError + +import threading +from threading import Thread + +from daisy import i18n +from daisy import notifier + +from daisy.api import policy +import daisy.api.v1 + +from daisy.common import exception +import daisy.registry.client.v1.api as registry +from daisy.api.backends.tecs import config +from daisy.api.backends import driver +from daisy.api.network_api import network as neutron +from ironicclient import client as ironic_client +import daisy.api.backends.os as os_handle +import daisy.api.backends.common as daisy_cmn +import daisy.api.backends.tecs.common as tecs_cmn +import daisy.api.backends.tecs.install as instl +import daisy.api.backends.tecs.uninstall as unstl +import daisy.api.backends.tecs.upgrade as upgrd +import daisy.api.backends.tecs.disk_array as disk_array + +try: + import simplejson as json +except ImportError: + import json + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +CONF = cfg.CONF +upgrade_opts = [ + cfg.StrOpt('max_parallel_os_upgrade_number', default=10, + help='Maximum number of hosts upgrade os at the same time.'), +] +CONF.register_opts(upgrade_opts) + +tecs_state = tecs_cmn.TECS_STATE + +class API(driver.DeploymentDriver): + """ + The hosts API is a RESTful web service for host data. The API + is as follows:: + + GET /hosts -- Returns a set of brief metadata about hosts + GET /hosts/detail -- Returns a set of detailed metadata about + hosts + HEAD /hosts/ -- Return metadata about an host with id + GET /hosts/ -- Return host data for host with id + POST /hosts -- Store host data and return metadata about the + newly-stored host + PUT /hosts/ -- Update host metadata and/or upload host + data for a previously-reserved host + DELETE /hosts/ -- Delete the host with id + """ + + def __init__(self): + super(API, self).__init__() + return + + def install(self, req, cluster_id): + """ + Install TECS to a cluster. + + param req: The WSGI/Webob Request object + cluster_id:cluster id + """ + + tecs_install_task = instl.TECSInstallTask(req, cluster_id) + tecs_install_task.start() + + def _get_roles_and_hosts_ip_list(self, req, cluster_id): + host_ha_list = set() + host_ip_list = set() + role_id_list = set() + hosts_id_list = [] + hosts_list = [] + + roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id) + cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) + for role in roles: + if role['deployment_backend'] != daisy_cmn.tecs_backend_name: + continue + role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) + if role_hosts: + for role_host in role_hosts: + host = daisy_cmn.get_host_detail(req, role_host['host_id']) + host_ip = tecs_cmn.get_host_network_ip(req, host, cluster_networks, 'MANAGEMENT') + if role['name'] == "CONTROLLER_HA": + host_ha_list.add(host_ip) + host_ip_list.add(host_ip) + hosts_id_list.append({host['id']:host_ip}) + role_id_list.add(role['id']) + for host in hosts_id_list: + if host not in hosts_list: + hosts_list.append(host) + return (role_id_list, host_ip_list, host_ha_list, hosts_list) + + def _query_progress(self, req, cluster_id, action=""): + nodes_list = [] + roles = daisy_cmn.get_roles_detail(req) + (role_id_list,host_ip_list,host_ha_list,hosts_list) = self._get_roles_and_hosts_ip_list(req, cluster_id) + for host in hosts_list: + node = {} + host_id = host.keys()[0] + host = daisy_cmn.get_host_detail(req, host_id) + node['id'] = host['id'] + node['name'] = host['name'] + + if 0 == cmp("upgrade", action): + node['os-progress'] = host['os_progress'] + node['os-status'] = host['os_status'] + node['os-messages'] = host['messages'] + + if host['status'] == "with-role": + host_roles = [ role for role in roles if role['name'] in host['role'] and role['cluster_id'] == cluster_id] + if host_roles: + node['role-status'] = host_roles[0]['status'] + node['role-progress'] = str(host_roles[0]['progress']) + # node['role-message'] = host_roles[0]['messages'] + nodes_list.append(node) + if nodes_list: + return {'tecs_nodes': nodes_list} + else: + return {'tecs_nodes': "TECS uninstall successfully, the host has been removed from the host_roles table"} + + def uninstall(self, req, cluster_id): + """ + Uninstall TECS to a cluster. + + :param req: The WSGI/Webob Request object + + :raises HTTPBadRequest if x-install-cluster is missing + """ + (role_id_list, host_ip_list,host_ha_list, hosts_list) = self._get_roles_and_hosts_ip_list(req, cluster_id) + if role_id_list: + if not host_ip_list: + msg = _("there is no host in cluster %s") % cluster_id + raise exception.ThreadBinException(msg) + + unstl.update_progress_to_db(req, role_id_list, tecs_state['UNINSTALLING'], hosts_list) + + threads = [] + for host_ip in host_ip_list: + t = threading.Thread(target=unstl.thread_bin,args=(req,host_ip,role_id_list,hosts_list)) + t.setDaemon(True) + t.start() + threads.append(t) + LOG.info(_("Uninstall threads have started, please waiting....")) + + try: + for t in threads: + t.join() + except: + LOG.warn(_("Join uninstall thread %s failed!" % t)) + else: + uninstall_failed_flag = False + for role_id in role_id_list: + role_hosts=daisy_cmn.get_hosts_of_role(req,role_id) + for role_host in role_hosts: + if role_host['status'] == tecs_state['UNINSTALL_FAILED']: + unstl.update_progress_to_db(req, role_id_list, tecs_state['UNINSTALL_FAILED'],hosts_list) + uninstall_failed_flag = True + break + if not uninstall_failed_flag: + LOG.info(_("All uninstall threads have done, set all roles status to 'init'!")) + unstl.update_progress_to_db(req, role_id_list, tecs_state['INIT'], hosts_list) + try: + (status, output) = commands.getstatusoutput('rpm -e --nodeps openstack-packstack\ + openstack-packstack-puppet openstack-puppet-modules puppet') + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + + def uninstall_progress(self, req, cluster_id): + return self._query_progress(req, cluster_id, "uninstall") + + def upgrade(self, req, cluster_id): + """ + update TECS to a cluster. + + :param req: The WSGI/Webob Request object + + :raises HTTPBadRequest if x-install-cluster is missing + """ + (role_id_list,host_ip_list,host_ha_list,hosts_list) = self._get_roles_and_hosts_ip_list(req, cluster_id) + if role_id_list: + if not host_ip_list: + msg = _("there is no host in cluster %s") % cluster_id + raise exception.ThreadBinException(msg) + unreached_hosts = daisy_cmn.check_ping_hosts(host_ip_list, 1) + if unreached_hosts: + self.message = "hosts %s ping failed" % unreached_hosts + raise exception.NotFound(message=self.message) + + daisy_cmn.subprocess_call('rm -rf /root/.ssh/known_hosts') + + if os_handle.check_tfg_exist(): + os_handle.upgrade_os(req, hosts_list) + unreached_hosts = daisy_cmn.check_ping_hosts(host_ip_list, 30) + if unreached_hosts: + self.message = "hosts %s ping failed after tfg upgrade" % unreached_hosts + raise exception.NotFound(message=self.message) + # check and get TECS version + tecs_version_pkg_file = tecs_cmn.check_and_get_tecs_version(tecs_cmn.daisy_tecs_path) + if not tecs_version_pkg_file: + self.state = tecs_state['INSTALL_FAILED'] + self.message = "TECS version file not found in %s" % tecs_cmn.daisy_tecs_path + raise exception.NotFound(message=self.message) + threads = [] + LOG.info(_("Begin to update TECS controller nodes, please waiting....")) + upgrd.update_progress_to_db(req, role_id_list, tecs_state['UPDATING'], hosts_list) + for host_ip in host_ha_list: + LOG.info(_("Update TECS controller node %s..." % host_ip)) + rc = upgrd.thread_bin(req,role_id_list,host_ip,hosts_list) + if rc == 0: + LOG.info(_("Update TECS for %s successfully" % host_ip)) + else: + LOG.info(_("Update TECS failed for %s, return %s" % (host_ip,rc))) + return + LOG.info(_("Begin to update TECS other nodes, please waiting....")) + max_parallel_upgrade_number = int(CONF.max_parallel_os_upgrade_number) + compute_ip_list = host_ip_list - host_ha_list + while compute_ip_list: + threads = [] + if len(compute_ip_list) > max_parallel_upgrade_number: + upgrade_hosts = compute_ip_list[:max_parallel_upgrade_number] + compute_ip_list = compute_ip_list[max_parallel_upgrade_number:] + else: + upgrade_hosts = compute_ip_list + compute_ip_list = [] + for host_ip in upgrade_hosts: + t = threading.Thread(target=upgrd.thread_bin,args=(req,role_id_list,host_ip,hosts_list)) + t.setDaemon(True) + t.start() + threads.append(t) + try: + for t in threads: + t.join() + except: + LOG.warn(_("Join update thread %s failed!" % t)) + + for role_id in role_id_list: + role_hosts=daisy_cmn.get_hosts_of_role(req,role_id) + for role_host in role_hosts: + if (role_host['status'] == tecs_state['UPDATE_FAILED'] or + role_host['status'] == tecs_state['UPDATING']): + role_id = [role_host['role_id']] + upgrd.update_progress_to_db(req, + role_id, + tecs_state['UPDATE_FAILED'], + hosts_list) + break + elif role_host['status'] == tecs_state['ACTIVE']: + role_id = [role_host['role_id']] + upgrd.update_progress_to_db(req, + role_id, + tecs_state['ACTIVE'], + hosts_list) + + def upgrade_progress(self, req, cluster_id): + return self._query_progress(req, cluster_id, "upgrade") + + + def export_db(self, req, cluster_id): + """ + Export daisy db data to tecs.conf and HA.conf. + + :param req: The WSGI/Webob Request object + + :raises HTTPBadRequest if x-install-cluster is missing + """ + + (tecs_config, mgnt_ip_list) =\ + instl.get_cluster_tecs_config(req, cluster_id) + + config_files = {'tecs_conf':'','ha_conf':''} + tecs_install_path = "/home/tecs_install" + tecs_config_file = '' + if tecs_config: + cluster_conf_path = tecs_install_path + "/" + cluster_id + create_cluster_conf_path =\ + "rm -rf %s;mkdir %s" %(cluster_conf_path, cluster_conf_path) + daisy_cmn.subprocess_call(create_cluster_conf_path) + config.update_tecs_config(tecs_config, cluster_conf_path) + + get_tecs_conf = "ls %s|grep tecs.conf" % cluster_conf_path + obj = subprocess.Popen(get_tecs_conf, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + (stdoutput, erroutput) = obj.communicate() + tecs_conf_file = "" + if stdoutput: + tecs_conf_file = stdoutput.split('\n')[0] + config_files['tecs_conf'] =\ + cluster_conf_path + "/" + tecs_conf_file + + get_ha_conf_cmd = "ls %s|grep HA_1.conf" % cluster_conf_path + obj = subprocess.Popen(get_ha_conf_cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + (stdoutput, erroutput) = obj.communicate() + ha_conf_file = "" + if stdoutput: + ha_conf_file = stdoutput.split('\n')[0] + config_files['ha_conf'] =\ + cluster_conf_path + "/" + ha_conf_file + else: + LOG.info(_("No TECS config files generated.")) + + return config_files + + def update_disk_array(self, req, cluster_id): + (share_disk_info, volume_disk_info) =\ + disk_array.get_disk_array_info(req, cluster_id) + (controller_ha_nodes, computer_ips) =\ + disk_array.get_ha_and_compute_ips(req, cluster_id) + all_nodes_ip = computer_ips + controller_ha_nodes.keys() + + if all_nodes_ip: + compute_error_msg =\ + disk_array.config_compute_multipath(all_nodes_ip) + if compute_error_msg: + return compute_error_msg + else: + LOG.info(_("Config Disk Array multipath successfully")) + + if share_disk_info: + ha_error_msg =\ + disk_array.config_ha_share_disk(share_disk_info, + controller_ha_nodes) + if ha_error_msg: + return ha_error_msg + else: + LOG.info(_("Config Disk Array for HA nodes successfully")) + + if volume_disk_info: + cinder_error_msg =\ + disk_array.config_ha_cinder_volume(volume_disk_info, + controller_ha_nodes.keys()) + if cinder_error_msg: + return cinder_error_msg + else: + LOG.info(_("Config cinder volume for HA nodes successfully")) + + return 'update successfully' diff --git a/code/daisy/daisy/api/backends/tecs/common.py b/code/daisy/daisy/api/backends/tecs/common.py new file mode 100755 index 00000000..6f05b6cb --- /dev/null +++ b/code/daisy/daisy/api/backends/tecs/common.py @@ -0,0 +1,364 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/install endpoint for tecs API +""" +import os +import copy +import subprocess +import time +import re +import traceback +import webob.exc +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPForbidden + +from threading import Thread + +from daisy import i18n +from daisy import notifier + +from daisy.api import policy +import daisy.api.v1 + +from daisy.common import exception +import daisy.registry.client.v1.api as registry +import daisy.api.backends.common as daisy_cmn + + +try: + import simplejson as json +except ImportError: + import json + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +daisy_tecs_path = '/var/lib/daisy/tecs/' + +TECS_STATE = { + 'INIT' : 'init', + 'INSTALLING' : 'installing', + 'ACTIVE' : 'active', + 'INSTALL_FAILED': 'install-failed', + 'UNINSTALLING': 'uninstalling', + 'UNINSTALL_FAILED': 'uninstall-failed', + 'UPDATING': 'updating', + 'UPDATE_FAILED': 'update-failed', +} + + +def _get_cluster_network(cluster_networks, network_name): + network = [cn for cn in cluster_networks + if cn['name'] in network_name] + if not network or not network[0]: + msg = "network %s is not exist" % (network_name) + raise exception.InvalidNetworkConfig(msg) + else: + return network[0] + +def get_host_interface_by_network(host_detail, network_name): + host_detail_info = copy.deepcopy(host_detail) + interface_list = [hi for hi in host_detail_info['interfaces'] + for assigned_network in hi['assigned_networks'] + if assigned_network and network_name == assigned_network['name']] + interface = {} + if interface_list: + interface = interface_list[0] + + if not interface and 'MANAGEMENT' == network_name: + msg = "network %s of host %s is not exist" % (network_name, host_detail_info['id']) + raise exception.InvalidNetworkConfig(msg) + + return interface + +def get_host_network_ip(req, host_detail, cluster_networks, network_name): + interface_network_ip = '' + host_interface = get_host_interface_by_network(host_detail, network_name) + if host_interface: + network = _get_cluster_network(cluster_networks, network_name) + assigned_network = daisy_cmn.get_assigned_network(req, + host_interface['id'], + network['id']) + interface_network_ip = assigned_network['ip'] + + if not interface_network_ip and 'MANAGEMENT' == network_name : + msg = "%s network ip of host %s can't be empty" % (network_name, host_detail['id']) + raise exception.InvalidNetworkConfig(msg) + return interface_network_ip + + +def get_storage_name_ip_dict(req, cluster_id, network_type): + name_ip_list = [] + ip_list = [] + roles = daisy_cmn.get_cluster_roles_detail(req, cluster_id) + cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) + + networks_list = [network for network in cluster_networks + if network['network_type'] == network_type] + networks_name_list = [network['name'] for network in networks_list] + + for role in roles: + role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) + for role_host in role_hosts: + host_detail = daisy_cmn.get_host_detail(req, role_host['host_id']) + for network_name in networks_name_list: + ip = get_host_network_ip(req, host_detail, cluster_networks, + network_name) + + name_ip_dict = {} + if ip and ip not in ip_list: + ip_list.append(ip) + name_ip_dict.update({host_detail['name'] + '.' + + network_name: ip}) + name_ip_list.append(name_ip_dict) + + return name_ip_list + + +def get_network_netmask(cluster_networks, network_name): + network = _get_cluster_network(cluster_networks, network_name) + cidr = network['cidr'] + if not cidr: + msg = "cidr of network %s is not exist" % (network_name) + raise exception.InvalidNetworkConfig(msg) + + netmask = daisy_cmn.cidr_to_netmask(cidr) + if not netmask: + msg = "netmask of network %s is not exist" % (network_name) + raise exception.InvalidNetworkConfig(msg) + return netmask + +# every host only have one gateway +def get_network_gateway(cluster_networks, network_name): + network = _get_cluster_network(cluster_networks, network_name) + gateway = network['gateway'] + if not gateway and 'MANAGEMENT' == network_name: + msg = "gateway of network %s can't be empty" % (network_name) + raise exception.InvalidNetworkConfig(msg) + return gateway + +def get_mngt_network_vlan_id(cluster_networks): + mgnt_vlan_id = "" + management_network = [network for network in cluster_networks if network['network_type'] == 'MANAGEMENT'] + if (not management_network or + not management_network[0] or + not management_network[0].has_key('vlan_id')): + msg = "can't get management network vlan id" + raise exception.InvalidNetworkConfig(msg) + else: + mgnt_vlan_id = management_network[0]['vlan_id'] + return mgnt_vlan_id + + +def get_network_vlan_id(cluster_networks, network_type): + vlan_id = "" + general_network = [network for network in cluster_networks + if network['network_type'] == network_type] + if (not general_network or not general_network[0] or + not general_network[0].has_key('vlan_id')): + msg = "can't get %s network vlan id" % network_type + raise exception.InvalidNetworkConfig(msg) + else: + vlan_id = general_network[0]['vlan_id'] + return vlan_id + + +def sort_interfaces_by_pci(host_detail): + """ + Sort interfaces by pci segment, if interface type is bond, + user the pci of first memeber nic.This function is fix bug for + the name length of ovs virtual port, because if the name length large than + 15 characters, the port will create failed. + :param interfaces: interfaces info of the host + :return: + """ + interfaces = eval(host_detail.get('interfaces', None)) \ + if isinstance(host_detail, unicode) else host_detail.get('interfaces', None) + if not interfaces: + LOG.info("This host don't have /interfaces info.") + return host_detail + + tmp_interfaces = copy.deepcopy(interfaces) + if not [interface for interface in tmp_interfaces + if interface.get('name', None) and len(interface['name']) > 8]: + LOG.info("The interfaces name of host is all less than 9 character, no need sort.") + return host_detail + + # add pci segment for the bond nic, the pci is equal to the first member nic pci + slaves_name_list = [] + for interface in tmp_interfaces: + if interface.get('type', None) == "bond" and \ + interface.get('slave1', None) and interface.get('slave2', None): + + slaves_name_list.append(interface['slave1']) + slaves_name_list.append(interface['slave2']) + first_member_nic_name = interface['slave1'] + + tmp_pci = [interface_tmp['pci'] + for interface_tmp in tmp_interfaces + if interface_tmp.get('name', None) and + interface_tmp.get('pci', None) and + interface_tmp['name'] == first_member_nic_name] + + if len(tmp_pci) != 1: + LOG.error("This host have two nics with same pci.") + continue + interface['pci'] = tmp_pci[0] + + tmp_interfaces = [interface for interface in tmp_interfaces + if interface.get('name', None) and + interface['name'] not in slaves_name_list] + + tmp_interfaces = sorted(tmp_interfaces, key = lambda interface: interface['pci']) + for index in range(0, len(tmp_interfaces)): + for interface in interfaces: + if interface['name'] != tmp_interfaces[index]['name']: + continue + + interface['name'] = "b" + str(index) if interface['type'] == "bond" else "e" + str(index) + + tmp_host_detail = copy.deepcopy(host_detail) + tmp_host_detail.update({'interfaces': interfaces}) + return tmp_host_detail + +def check_and_get_tecs_version(daisy_tecs_pkg_path): + tecs_version_pkg_file = "" + get_tecs_version_pkg = "ls %s| grep ^ZXTECS.*\.bin$" % daisy_tecs_pkg_path + obj = subprocess.Popen(get_tecs_version_pkg, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + (stdoutput, erroutput) = obj.communicate() + if stdoutput: + tecs_version_pkg_name = stdoutput.split('\n')[0] + tecs_version_pkg_file = daisy_tecs_pkg_path + tecs_version_pkg_name + chmod_for_tecs_version = 'chmod +x %s' % tecs_version_pkg_file + daisy_cmn.subprocess_call(chmod_for_tecs_version) + return tecs_version_pkg_file + +def get_service_disk_list(req, params): + try: + service_disks = registry.list_service_disk_metadata(req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return service_disks + +def get_cinder_volume_list(req, params): + try: + cinder_volumes = registry.list_cinder_volume_metadata(req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return cinder_volumes + + +def get_network_configuration_rpm_name(): + cmd = "ls %s | grep ^network-configuration.*\.rpm" % daisy_tecs_path + try: + network_rpm_name = subprocess.check_output( + cmd, shell=True, stderr=subprocess.STDOUT).split('\n')[0] + except subprocess.CalledProcessError: + msg = _("Get network-configuration rpm name by subprocess failed!") + raise exception.SubprocessCmdFailed(message=msg) + return network_rpm_name + + +def run_scrip(script, ip=None, password=None): + script = "\n".join(script) + _PIPE = subprocess.PIPE + if ip: + cmd = ["sshpass", "-p", "%s" % password, + "ssh", "-o StrictHostKeyChecking=no", + "%s" % ip, "bash -x"] + else: + cmd = ["bash", "-x"] + environ = os.environ + environ['LANG'] = 'en_US.UTF8' + obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, + close_fds=True, shell=False, env=environ) + + script = "function t(){ exit $? ; } \n trap t ERR \n" + script + out, err = obj.communicate(script) + return out, err + + +class TecsShellExector(object): + """ + Class config task before install tecs bin. + """ + def __init__(self, mgnt_ip, task_type, params={}): + self.task_type = task_type + self.mgnt_ip = mgnt_ip + self.params = params + self.clush_cmd = "" + self.rpm_name = get_network_configuration_rpm_name() + self.NETCFG_RPM_PATH = daisy_tecs_path + self.rpm_name + self.oper_type = { + 'install_rpm' : self._install_netcfg_rpm, + 'uninstall_rpm' : self._uninstall_netcfg_rpm, + 'update_rpm' : self._update_netcfg_rpm, + } + self.oper_shell = { + 'CMD_SSHPASS_PRE': "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s", + 'CMD_RPM_UNINSTALL': "rpm -e network-configuration", + 'CMD_RPM_INSTALL': "rpm -i /home/%(rpm)s" % {'rpm': self.rpm_name}, + 'CMD_RPM_UPDATE': "rpm -U /home/%(rpm)s" % {'rpm': self.rpm_name}, + 'CMD_RPM_SCP': "scp -o StrictHostKeyChecking=no %(path)s root@%(ssh_ip)s:/home" % + {'path': self.NETCFG_RPM_PATH, 'ssh_ip': mgnt_ip} + } + LOG.info(_("<<>>" % self.rpm_name)) + self._execute() + + def _uninstall_netcfg_rpm(self): + self.clush_cmd = self.oper_shell['CMD_SSHPASS_PRE'] % \ + {"ssh_ip":"ssh -o StrictHostKeyChecking=no " + self.mgnt_ip, "cmd":self.oper_shell['CMD_RPM_UNINSTALL']} + subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT) + + def _update_netcfg_rpm(self): + self.clush_cmd = self.oper_shell['CMD_SSHPASS_PRE'] % \ + {"ssh_ip":"ssh -o StrictHostKeyChecking=no " + self.mgnt_ip, "cmd":self.oper_shell['CMD_RPM_UPDATE']} + subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT) + + def _install_netcfg_rpm(self): + if not os.path.exists(self.NETCFG_RPM_PATH): + LOG.error(_("<<>>" % self.NETCFG_RPM_PATH)) + return + + self.clush_cmd = "%s;%s" % \ + (self.oper_shell['CMD_SSHPASS_PRE'] % + {"ssh_ip":"", "cmd":self.oper_shell['CMD_RPM_SCP']}, \ + self.oper_shell['CMD_SSHPASS_PRE'] % + {"ssh_ip":"ssh -o StrictHostKeyChecking=no " + self.mgnt_ip, "cmd":self.oper_shell['CMD_RPM_INSTALL']}) + subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT) + + def _execute(self): + try: + if not self.task_type or not self.mgnt_ip : + LOG.error(_("<<>>" % self.mgnt_ip, )) + return + + self.oper_type[self.task_type]() + except subprocess.CalledProcessError as e: + LOG.warn(_("<<>>" % (self.mgnt_ip, e.output.strip()))) + except Exception as e: + LOG.exception(_(e.message)) + else: + LOG.info(_("<<>>" % (self.clush_cmd, self.mgnt_ip))) diff --git a/code/daisy/daisy/api/backends/tecs/config.py b/code/daisy/daisy/api/backends/tecs/config.py new file mode 100755 index 00000000..e594917c --- /dev/null +++ b/code/daisy/daisy/api/backends/tecs/config.py @@ -0,0 +1,832 @@ +# -*- coding: utf-8 -*- +import os +import re +import commands +import types +import subprocess +from oslo_log import log as logging +from ConfigParser import ConfigParser +from daisy.common import exception +from daisy import i18n + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +service_map = { + 'lb': 'haproxy', + 'mongodb': 'mongod', + 'ha': '', + 'mariadb': 'mariadb', + 'amqp': 'rabbitmq-server', + 'ceilometer-api':'openstack-ceilometer-api', + 'ceilometer-collector':'openstack-ceilometer-collector,openstack-ceilometer-mend', + 'ceilometer-central':'openstack-ceilometer-central', + 'ceilometer-notification':'openstack-ceilometer-notification', + 'ceilometer-alarm':'openstack-ceilometer-alarm-evaluator,openstack-ceilometer-alarm-notifier', + 'heat-api': 'openstack-heat-api', + 'heat-api-cfn': 'openstack-heat-api-cfn', + 'heat-engine': 'openstack-heat-engine', + 'ironic': 'openstack-ironic-api,openstack-ironic-conductor', + 'horizon': 'httpd', + 'keystone': 'openstack-keystone', + 'glance': 'openstack-glance-api,openstack-glance-registry', + 'cinder-volume': 'openstack-cinder-volume', + 'cinder-scheduler': 'openstack-cinder-scheduler', + 'cinder-api': 'openstack-cinder-api', + 'neutron-metadata': 'neutron-metadata-agent', + 'neutron-lbaas': 'neutron-lbaas-agent', + 'neutron-dhcp': 'neutron-dhcp-agent', + 'neutron-server': 'neutron-server', + 'neutron-l3': 'neutron-l3-agent', + 'compute': 'openstack-nova-compute', + 'nova-cert': 'openstack-nova-cert', + 'nova-sched': 'openstack-nova-scheduler', + 'nova-vncproxy': 'openstack-nova-novncproxy,openstack-nova-consoleauth', + 'nova-conductor': 'openstack-nova-conductor', + 'nova-api': 'openstack-nova-api', + 'nova-cells': 'openstack-nova-cells' + } + + +def add_service_with_host(services, name, host): + if name not in services: + services[name] = [] + services[name].append(host) + + +def add_service_with_hosts(services, name, hosts): + if name not in services: + services[name] = [] + for h in hosts: + services[name].append(h['management']['ip']) + +def test_ping(ping_src_nic, ping_desc_ips): + ping_cmd = 'fping' + for ip in set(ping_desc_ips): + ping_cmd = ping_cmd + ' -I ' + ping_src_nic + ' ' + ip + obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (stdoutput, erroutput) = obj.communicate() + _returncode = obj.returncode + if _returncode == 0 or _returncode == 1: + ping_result = stdoutput.split('\n') + unreachable_hosts = [result.split()[0] for result in ping_result if result and result.split()[2] != 'alive'] + else: + msg = "ping failed beaceuse there is invlid ip in %s" % ping_desc_ips + raise exception.InvalidIP(msg) + return unreachable_hosts + +def get_local_deployment_ip(tecs_deployment_ip): + def _get_ip_segment(full_ip): + if not full_ip: + return None + match = re.search('([0-9]{1,3}\.){3}', full_ip) + if match: + return match.group() + else: + print "can't find ip segment" + return None + + (status, output) = commands.getstatusoutput('ifconfig') + netcard_pattern = re.compile('\S*: ') + ip_str = '([0-9]{1,3}\.){3}[0-9]{1,3}' + ip_pattern = re.compile('(inet %s)' % ip_str) + pattern = re.compile(ip_str) + nic_ip = {} + for netcard in re.finditer(netcard_pattern, str(output)): + nic_name = netcard.group().split(': ')[0] + if nic_name == "lo": + continue + ifconfig_nic_cmd = "ifconfig %s" % nic_name + (status, output) = commands.getstatusoutput(ifconfig_nic_cmd) + if status: + continue + ip = pattern.search(str(output)) + if ip and ip.group() != "127.0.0.1": + nic_ip[nic_name] = ip.group() + + deployment_ip = '' + ip_segment = _get_ip_segment(tecs_deployment_ip) + for nic in nic_ip.keys(): + if ip_segment == _get_ip_segment(nic_ip[nic]): + deployment_ip = nic_ip[nic] + break + if not deployment_ip: + for nic,ip in nic_ip.items(): + if not test_ping(nic,[tecs_deployment_ip]): + deployment_ip = nic_ip[nic] + break + return deployment_ip + + +class AnalsyConfig(object): + def __init__(self, all_configs): + self.all_configs = all_configs + + self.services = {} + self.components = [] + self.modes = {} + # self.ha_conf = {} + self.services_in_component = {} + # self.heartbeat = {} + self.lb_components = [] + self.heartbeats = [[], [], []] + self.lb_vip = '' + self.ha_vip = '' + self.db_vip = '' + self.glance_vip = '' + self.public_vip = '' + self.share_disk_services = [] + self.ha_conf = {} + self.child_cell_dict = {} + self.ha_master_host = {} + + def get_heartbeats(self, host_interfaces): + for network in host_interfaces: + #if network.has_key("deployment") and network["deployment"]["ip"]: + # self.heartbeats[0].append(network["deployment"]["ip"]) + self.heartbeats[0].append(network["management"]["ip"]) + if network.has_key("storage") and network["storage"]["ip"]: + self.heartbeats[1].append(network["storage"]["ip"]) + + #delete empty heartbeat line + if not self.heartbeats[0]: + self.heartbeats[0] = self.heartbeats[1] + self.heartbeats[1] = self.heartbeats[2] + if not self.heartbeats[1]: + self.heartbeats[1] = self.heartbeats[2] + + # remove repeated ip + if set(self.heartbeats[1]) == set(self.heartbeats[0]): + self.heartbeats[1] = [] + if set(self.heartbeats[2]) != set(self.heartbeats[0]): + self.heartbeats[1] = self.heartbeats[2] + self.heartbeats[2] = [] + if set(self.heartbeats[2]) == set(self.heartbeats[0]) or set(self.heartbeats[2]) == set(self.heartbeats[1]): + self.heartbeats[2] = [] + + def prepare_child_cell(self, child_cell_name, configs): + cell_compute_hosts = str() + cell_compute_name = child_cell_name[11:] + '_COMPUTER' + for role_name, role_configs in self.all_configs.items(): + if role_name == cell_compute_name: + cell_compute_host = [ + host_interface['management']['ip'] + for host_interface in role_configs['host_interfaces']] + cell_compute_hosts = ",".join(cell_compute_host) + self.all_configs.pop(role_name) + + child_cell_host = configs['host_interfaces'][0]['management']['ip'] + self.child_cell_dict[repr(child_cell_host).strip("u'")] \ + = repr(cell_compute_hosts).strip("u'") + add_service_with_host(self.services, 'CONFIG_CHILD_CELL_DICT', + str(self.child_cell_dict)) + + def prepare_ha_lb(self, role_configs, is_ha, is_lb): + if is_lb: + self.ha_master_host['ip'] = role_configs['host_interfaces'][0]['management']['ip'] + self.ha_master_host['hostname'] = role_configs['host_interfaces'][0]['name'] + self.components.append('CONFIG_LB_INSTALL') + add_service_with_hosts(self.services, + 'CONFIG_LB_BACKEND_HOSTS', + role_configs['host_interfaces']) + self.lb_vip = role_configs['vip'] + if is_ha: + self.ha_vip = role_configs['vip'] + self.share_disk_services += role_configs['share_disk_services'] + local_deployment_ip = get_local_deployment_ip( + role_configs['host_interfaces'][0]['management']['ip']) + if local_deployment_ip: + add_service_with_host( + self.services, 'CONFIG_REPO', + 'http://'+local_deployment_ip+'/tecs_install/') + else: + msg = "can't find ip for yum repo" + raise exception.InvalidNetworkConfig(msg) + self.components.append('CONFIG_HA_INSTALL') + add_service_with_host( + self.services, 'CONFIG_HA_HOST', + role_configs['host_interfaces'][0]['management']['ip']) + add_service_with_hosts(self.services, 'CONFIG_HA_HOSTS', + role_configs['host_interfaces']) + ntp_host = role_configs['ntp_server'] \ + if role_configs['ntp_server'] else role_configs['vip'] + add_service_with_host(self.services, 'CONFIG_NTP_SERVERS', + ntp_host) + + if role_configs['db_vip']: + self.db_vip = role_configs['db_vip'] + add_service_with_host(self.services, 'CONFIG_MARIADB_HOST', role_configs['db_vip']) + else: + self.db_vip = role_configs['vip'] + add_service_with_host(self.services, 'CONFIG_MARIADB_HOST', role_configs['vip']) + + if role_configs['glance_vip']: + self.glance_vip = role_configs['glance_vip'] + add_service_with_host(self.services, 'CONFIG_GLANCE_HOST', role_configs['glance_vip']) + else: + self.glance_vip = role_configs['vip'] + add_service_with_host(self.services, 'CONFIG_GLANCE_HOST', role_configs['vip']) + + if role_configs['public_vip']: + vip = role_configs['public_vip'] + self.public_vip = role_configs['public_vip'] + else: + vip = role_configs['vip'] + + self.public_vip = vip + add_service_with_host(self.services, + 'CONFIG_NOVA_VNCPROXY_HOST', vip) + add_service_with_host(self.services, 'CONFIG_PUBLIC_IP', vip) + add_service_with_host(self.services, 'CONFIG_HORIZON_HOST', vip) + + add_service_with_host(self.services, 'CONFIG_ADMIN_IP', vip) + add_service_with_host(self.services, 'CONFIG_INTERNAL_IP', vip) + + def prepare_role_service(self, is_ha, service, role_configs): + host_key_name = "CONFIG_%s_HOST" % service + hosts_key_name = "CONFIG_%s_HOSTS" % service + + add_service_with_hosts(self.services, hosts_key_name, + role_configs['host_interfaces']) + if service != 'LB' and service not in ['NOVA_VNCPROXY', 'MARIADB', 'GLANCE', 'HORIZON']: + add_service_with_host(self.services, host_key_name, + role_configs['vip']) + + if is_ha and service == 'LB': + add_service_with_hosts( + self.services, 'CONFIG_LB_FRONTEND_HOSTS', + role_configs['host_interfaces']) + + def prepare_mode(self, is_ha, is_lb, service): + mode_key = "CONFIG_%s_INSTALL_MODE" % service + if is_ha: + self.modes.update({mode_key: 'HA'}) + elif is_lb: + self.modes.update({mode_key: 'LB'}) + # special process + if service == 'GLANCE': + self.modes.update( + {'CONFIG_GLANCE_API_INSTALL_MODE': 'LB'}) + self.modes.update( + {'CONFIG_GLANCE_REGISTRY_INSTALL_MODE': 'LB'}) + #if s == 'HEAT': + # self.modes.update({'CONFIG_HEAT_API_INSTALL_MODE': 'LB'}) + # self.modes.update({'CONFIG_HEAT_API_CFN_INSTALL_MODE': 'LB'}) + #if s == 'CEILOMETER': + # self.modes.update({'CONFIG_CEILOMETER_API_INSTALL_MODE': 'LB'}) + if service == 'IRONIC': + self.modes.update( + {'CONFIG_IRONIC_API_INSTALL_MODE': 'LB'}) + else: + self.modes.update({mode_key: 'None'}) + + def prepare_services_in_component(self, component, service, role_configs): + if component not in self.services_in_component.keys(): + self.services_in_component[component] = {} + self.services_in_component[component]["service"] = [] + self.services_in_component[component]["service"].append(service_map[service]) + + + if component == "horizon": + self.services_in_component[component]["fip"] = self.public_vip + elif component == "database": + self.services_in_component[component]["fip"] = self.db_vip + elif component == "glance": + self.services_in_component[component]["fip"] = self.glance_vip + else: + self.services_in_component[component]["fip"] = role_configs["vip"] + + + network_name = '' + if component in ['horizon'] and role_configs["host_interfaces"][0].has_key('public'): + network_name = 'public' + else: + network_name = 'management' + + self.services_in_component[component]["netmask"] = \ + role_configs["host_interfaces"][0][network_name]["netmask"] + self.services_in_component[component]["nic_name"] = \ + role_configs["host_interfaces"][0][network_name]["name"] + if component == 'loadbalance' and \ + self.all_configs.has_key('CONTROLLER_LB') and \ + self.all_configs['CONTROLLER_LB']['vip']: + self.services_in_component[component]["fip"] = \ + self.all_configs['CONTROLLER_LB']['vip'] + + def prepare_amqp_mariadb(self): + if self.lb_vip: + amqp_vip = '' + if self.modes['CONFIG_AMQP_INSTALL_MODE'] == 'LB': + amqp_vip = self.lb_vip + add_service_with_host( + self.services, + 'CONFIG_AMQP_CLUSTER_MASTER_NODE_IP', + self.ha_master_host['ip']) + add_service_with_host( + self.services, 'CONFIG_AMQP_CLUSTER_MASTER_NODE_HOSTNAME', + self.ha_master_host['hostname']) + else: + amqp_vip = self.ha_vip + amqp_dict = "{'%s':'%s,%s,%s,%s'}" % (amqp_vip, self.ha_vip, + self.lb_vip, self.glance_vip, self.public_vip) + mariadb_dict = "{'%s':'%s,%s,%s,%s'}" % (self.db_vip, self.ha_vip, + self.lb_vip, self.glance_vip, self.public_vip) + add_service_with_host(self.services, 'CONFIG_LB_HOST', self.lb_vip) + elif self.ha_vip: + amqp_dict = "{'%s':'%s,%s,%s'}" % (self.ha_vip, self.ha_vip, + self.glance_vip, self.public_vip) + mariadb_dict = "{'%s':'%s,%s,%s'}" % (self.db_vip, self.ha_vip, + self.glance_vip, self.public_vip) + else: + amqp_dict = "{}" + mariadb_dict = "{}" + if self.lb_vip or self.ha_vip: + add_service_with_host(self.services, 'CONFIG_MARIADB_DICT', + mariadb_dict) + add_service_with_host(self.services, 'CONFIG_AMQP_DICT', amqp_dict) + + def prepare(self): + for role_name, role_configs in self.all_configs.items(): + if role_name == "OTHER": + continue + + is_ha = re.match(".*_HA$", role_name) is not None + is_lb = re.match(".*_LB$", role_name) is not None + is_child_cell = re.match(".*_CHILD_CELL.*", role_name) is not None + if is_child_cell: + self.prepare_child_cell(role_name, role_configs) + continue + self.prepare_ha_lb(role_configs, is_ha, is_lb) + + for service, component in role_configs['services'].items(): + s = service.strip().upper().replace('-', '_') + self.prepare_role_service(is_ha, s, role_configs) + self.prepare_mode(is_ha, is_lb, s) + + if is_lb: + self.lb_components.append(component) + c = "CONFIG_%s_INSTALL" % \ + component.strip().upper().replace('-', '_') + self.components.append(c) + + if is_ha: + if component == 'log': + continue + self.prepare_services_in_component(component, service, + role_configs) + if is_ha: + self.get_heartbeats(role_configs['host_interfaces']) + + self.prepare_amqp_mariadb() + + def update_conf_with_services(self, tecs): + for s in self.services: + if tecs.has_option("general", s): + print "%s is update" % s + if type(self.services[s]) is types.ListType: + if self.services[s] and not self.services[s][0]: + return + tecs.set("general", s, ','.join(self.services[s])) + else: + print "service %s is not exit in conf file" % s + + def update_conf_with_components(self, tecs): + for s in self.components: + if tecs.has_option("general", s): + print "Component %s is update" % s + tecs.set("general", s, 'y') + else: + print "component %s is not exit in conf file" % s + + def update_conf_with_modes(self, tecs): + for k, v in self.modes.items(): + if tecs.has_option("general", k): + print "mode %s is update" % k + tecs.set("general", k, v) + else: + print "mode %s is not exit in conf file" % k + + def update_tecs_conf(self, tecs): + self.update_conf_with_services(tecs) + self.update_conf_with_components(tecs) + self.update_conf_with_modes(tecs) + + def update_ha_conf(self, ha, ha_nic_name, tecs=None): + print "heartbeat line is update" + heart_beat_list = [] + if self.all_configs['OTHER'].get('dns_config'): + for heartbeat in self.heartbeats: + tmp_list = [] + for name_ip in self.all_configs['OTHER']['dns_config']: + for tmp in heartbeat: + if tmp == name_ip.keys()[0]: + tmp_list.append(name_ip.values()[0]) + heart_beat_list.append(tmp_list) + self.heartbeats = heart_beat_list + + for k, v in self.services_in_component.items(): + for name_ip in self.all_configs['OTHER']['dns_config']: + if v['fip'] == name_ip.keys()[0]: + v['fip'] = name_ip.values()[0] + ha.set('DEFAULT', 'heartbeat_link1', ','.join(self.heartbeats[0])) + ha.set('DEFAULT', 'heartbeat_link2', ','.join(self.heartbeats[1])) + ha.set('DEFAULT', 'heartbeat_link3', ','.join(self.heartbeats[2])) + + ha.set('DEFAULT', 'components', ','.join(self.services_in_component.keys())) + + for k, v in self.services_in_component.items(): + print "component %s is update" % k + ha.set('DEFAULT', k, ','.join(v['service'])) + if k == 'glance': + if 'glance' in self.share_disk_services: + ha.set('DEFAULT', 'glance_device_type', 'iscsi') + ha.set('DEFAULT', 'glance_device', '/dev/mapper/vg_glance-lv_glance') + ha.set('DEFAULT', 'glance_fs_type', 'ext4') + else: + ha.set('DEFAULT', 'glance_device_type', 'drbd') + ha.set('DEFAULT', 'glance_device', '/dev/vg_data/lv_glance') + ha.set('DEFAULT', 'glance_fs_type', 'ext4') + # mariadb now not support db cluster, don't support share disk. + if k == "database": + if 'db' in self.share_disk_services: + ha.set('DEFAULT', 'database_device', '/dev/mapper/vg_db-lv_db') + ha.set('DEFAULT', 'database_fs_type', 'ext4') + + if "mongod" in v['service']: + if 'mongodb' in self.share_disk_services: + ha.set('DEFAULT', 'mongod_device', '/dev/mapper/vg_mongodb-lv_mongodb') + ha.set('DEFAULT', 'mongod_fs_type', 'ext4') + ha.set('DEFAULT', 'mongod_local', '') + if tecs: + tecs.set("general", 'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'n') + else: + ha.set('DEFAULT', 'mongod_fs_type', 'ext4') + ha.set('DEFAULT', 'mongod_local', 'yes') + if tecs: + tecs.set("general", 'CONFIG_HA_INSTALL_MONGODB_LOCAL', 'y') + + if k not in self.lb_components: + # if "bond" in v['nic_name']: + # v['nic_name'] = "vport" + ha.set('DEFAULT', k+'_fip', v['fip']) + if ha_nic_name and k not in ['horizon']: + nic_name = ha_nic_name + else: + nic_name = v['nic_name'] + ha.set('DEFAULT', k+'_nic', nic_name) + cidr_netmask = reduce(lambda x, y: x + y, + [bin(int(i)).count('1') for i in v['netmask'].split('.')]) + ha.set('DEFAULT', k+'_netmask', cidr_netmask) + +def update_conf(tecs, key, value): + tecs.set("general", key, value) + +def get_conf(tecs_conf_file, **kwargs): + result = {} + if not kwargs: + return result + + tecs = ConfigParser() + tecs.optionxform = str + tecs.read(tecs_conf_file) + + result = {key : tecs.get("general", kwargs.get(key, None)) + for key in kwargs.keys() + if tecs.has_option("general", kwargs.get(key, None))} + return result + + +def _get_physnics_info(network_type, phynics): + # bond1(active-backup;lacp;eth1-eth2) + # eth0 + # phynet1:eth0 + # phynet1:bond1(active-backup;lacp;eth1-eth2), phynet2:eth3 + phynics_info = [] + if not phynics: + return + + phynic_info = phynics.split("(") + if 2 == len(phynic_info): + phynic_info = phynic_info[1][0:-1].split(";") + phynics_info.extend(phynic_info[-1].split('-')) + else: + phynic_info = phynic_info[0].split(":") + if network_type == 'vlan': + phynics_info.append(phynic_info[1]) + else: + phynics_info.append(phynic_info[0]) + return phynics_info + + +def get_physnics_info(network_type, phynics): + # bond1(active-backup;lacp;eth1-eth2) + # phynet1:eth0 + # phynet1:bond1(active-backup;lacp;eth1-eth2), phynet1:eth3 + phynics_info = [] + if network_type == 'vxlan': + phynics_info.extend(_get_physnics_info(network_type, phynics)) + elif network_type == 'vlan': + phynics = phynics.split(',') + for phynic_info in phynics: + phynics_info.extend(_get_physnics_info(network_type, phynic_info)) + return phynics_info + + +def update_conf_with_zenic(tecs, zenic_configs): + zenic_vip = zenic_configs.get('vip') + if not zenic_vip: + return + + auth = zenic_configs.get('auth') + if not auth: + auth = 'restconf:LkfhRDGIPyGzbWGM2uAaNQ==' + + update_conf(tecs, 'CONFIG_ZENIC_USER_AND_PW', auth) + update_conf(tecs, 'CONFIG_ZENIC_API_NODE', '%s:8181' % zenic_vip) + + ml2_drivers = tecs.get( + "general", 'CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS').split(',') + ml2_drivers.extend(['proxydriver']) + update_conf( + tecs, 'CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS', ','.join(ml2_drivers)) + + +class DvsDaisyConfig(object): + + def __init__(self, tecs, networks_config): + self.tecs = tecs + self.networks_config = networks_config + + # common + self.dvs_network_type = [] + self.dvs_vswitch_type = {} + self.dvs_physnics = [] + self.enable_sdn = False + + # for vlan + self.dvs_physical_mappings = [] + self.dvs_bridge_mappings = [] + + # for vxlan + self.dvs_vtep_ip_ranges = [] + self.dvs_vxlan_info = '' + self.dvs_domain_id = {} + + def config_tecs_for_dvs(self): + self._get_dvs_config() + self._set_dvs_config() + + def _get_dvs_config(self): + network = self.networks_config + vswitch_type = network.get('vswitch_type') + if not vswitch_type: + return + self.dvs_vswitch_type.update(vswitch_type) + + network_type = network['network_config'].get('network_type') + + if network_type in ['vlan']: + self.dvs_network_type.extend(['vlan']) + self._private_network_conf_for_dvs(network) + + elif network_type in ['vxlan']: + self.dvs_network_type.extend(['vxlan']) + self._bearing_network_conf_for_dvs(network) + + def _set_dvs_config(self): + if not self.networks_config.get('enable_sdn') and ( + self.dvs_vswitch_type.get('ovs_agent_patch')) and ( + len(self.dvs_vswitch_type.get('ovs_agent_patch')) > 0): + return + + if not self.dvs_vswitch_type.get('ovs_agent_patch') and not self.dvs_vswitch_type.get('ovdk'): + return + + update_conf(self.tecs, 'CONFIG_DVS_TYPE', self.dvs_vswitch_type) + update_conf(self.tecs, 'CONFIG_DVS_PHYSICAL_NICS', + ",".join(set(self.dvs_physnics))) + + if 'vlan' in self.dvs_network_type: + update_conf(self.tecs, 'CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS', + self.dvs_bridge_mappings) + update_conf(self.tecs, 'CONFIG_NEUTRON_OVS_PHYSNET_IFACES', + self.dvs_physical_mappings) + + elif 'vxlan' in self.dvs_network_type: + update_conf(self.tecs, 'CONFIG_DVS_VXLAN_INFO', + self.dvs_vxlan_info) + update_conf(self.tecs, 'CONFIG_DVS_NODE_DOMAIN_ID', + self.dvs_domain_id) + update_conf(self.tecs, 'CONFIG_NEUTRON_ML2_VTEP_IP_RANGES', + self.dvs_vtep_ip_ranges) + + ''' + private_networks_config_for_dvs + { + network_config = { + enable_sdn = '' + network_type = ['vlan'] + } + + vswitch_type = { ===============> such as vxlan + 'ovdk': ['192.168.0.2', '192.168.0.20'] , + 'ovs_agent_patch': ['192.168.0.21', '192.168.0.30'] + } + + physnics_config = { + physical_mappings = eth0 ===============> such as ovs vlan + bridge_mappings = ==========> private->name & physical_name + } + } + ''' + + def _private_network_conf_for_dvs(self, private_network): + self.dvs_vswitch_type.update(private_network.get('vswitch_type')) + self.dvs_bridge_mappings = \ + private_network['physnics_config'].get('bridge_mappings') + self.dvs_physical_mappings = \ + private_network['physnics_config'].get('physical_mappings') + self.dvs_physical_mappings = self.dvs_physical_mappings.encode('utf8') + + self.dvs_physnics.extend( + get_physnics_info('vlan', self.dvs_physical_mappings)) + + ''' + bearing_networks_config + { + network_config = { + enable_sdn = '' + network_type = ['vxlan'] + vtep_ip_ranges=[['192.168.0.2','192.168.0.200']]==>bearing->ip_range + } + + vswitch_type = { ==========> bearing->assigned_network + 'ovdk': ['192.168.0.2', '192.168.0.20'] , + 'ovs_agent_patch': ['192.168.0.21', '192.168.0.30'] + } + + physnics_config = { + vxlan_info = eth0 ======>bearing->assigned_network->host_interface + domain_id = { ==========> bearing->assigned_network + '0': ['192.168.0.2', '192.168.0.20'] , + '1': ['192.168.0.21', '192.168.0.30'] + } + } + } + ''' + + def _bearing_network_conf_for_dvs(self, bearing_network): + self.dvs_vtep_ip_ranges.extend( + bearing_network['network_config'].get('vtep_ip_ranges')) + self.dvs_vswitch_type.update(bearing_network.get('vswitch_type')) + self.dvs_domain_id.update( + bearing_network['physnics_config'].get('dvs_domain_id')) + self.dvs_vxlan_info = \ + bearing_network['physnics_config'].get('vxlan_info') + self.dvs_physnics.extend( + get_physnics_info('vxlan', self.dvs_vxlan_info)) + + +default_tecs_conf_template_path = "/var/lib/daisy/tecs/" +tecs_conf_template_path = default_tecs_conf_template_path + +def private_network_conf(tecs, private_networks_config): + if private_networks_config: + mode_str = { + '0':'(active-backup;off;"%s-%s")', + '1':'(balance-slb;off;"%s-%s")', + '2':'(balance-tcp;active;"%s-%s")' + } + + config_neutron_sriov_bridge_mappings = [] + config_neutron_sriov_physnet_ifaces = [] + config_neutron_ovs_bridge_mappings = [] + config_neutron_ovs_physnet_ifaces = [] + for private_network in private_networks_config: + type = private_network.get('type', None) + name = private_network.get('name', None) + assign_networks = private_network.get('assigned_networks', None) + slave1 = private_network.get('slave1', None) + slave2 = private_network.get('slave2', None) + mode = private_network.get('mode', None) + if not type or not name or not assign_networks or not slave1 or not slave2 or not mode: + break + + for assign_network in assign_networks: + network_type = assign_network.get('network_type', None) + # TODO:why ml2_type & physnet_name is null + ml2_type = assign_network.get('ml2_type', None) + physnet_name = assign_network.get('physnet_name', None) + if not network_type or not ml2_type or not physnet_name: + break + + # ether + if 0 == cmp(type, 'ether') and 0 == cmp(network_type, 'PRIVATE'): + if 0 == cmp(ml2_type, 'sriov'): + config_neutron_sriov_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name)) + config_neutron_sriov_physnet_ifaces.append("%s:%s" % (physnet_name, name)) + elif 0 == cmp(ml2_type, 'ovs'): + config_neutron_ovs_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name)) + config_neutron_ovs_physnet_ifaces.append("%s:%s" % (physnet_name, name)) + # bond + elif 0 == cmp(type, 'bond') and 0 == cmp(network_type, 'PRIVATE'): + if 0 == cmp(ml2_type, 'sriov'): + config_neutron_sriov_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name)) + config_neutron_sriov_physnet_ifaces.append( + "%s:%s" % (physnet_name, name + mode_str[mode] % (slave1, slave2))) + elif 0 == cmp(ml2_type, 'ovs'): + config_neutron_ovs_bridge_mappings.append("%s:%s" % (physnet_name, "br-" + name)) + config_neutron_ovs_physnet_ifaces.append( + "%s:%s" % (physnet_name, name + mode_str[mode] % (slave1, slave2))) + + if config_neutron_sriov_bridge_mappings: + update_conf(tecs, + 'CONFIG_NEUTRON_SRIOV_BRIDGE_MAPPINGS', + ",".join(config_neutron_sriov_bridge_mappings)) + if config_neutron_sriov_physnet_ifaces: + update_conf(tecs, + 'CONFIG_NEUTRON_SRIOV_PHYSNET_IFACES', + ",".join(config_neutron_sriov_physnet_ifaces)) + if config_neutron_ovs_bridge_mappings : + update_conf(tecs, 'CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS', ",".join(config_neutron_ovs_bridge_mappings)) + if config_neutron_ovs_physnet_ifaces: + update_conf(tecs, 'CONFIG_NEUTRON_OVS_PHYSNET_IFACES', ",".join(config_neutron_ovs_physnet_ifaces)) + +def update_tecs_config(config_data, cluster_conf_path): + print "tecs config data is:" + import pprint + pprint.pprint(config_data) + msg="tecs config data is: %s" % config_data + LOG.info(msg) + + daisy_tecs_path = tecs_conf_template_path + tecs_conf_template_file = os.path.join(daisy_tecs_path, "tecs.conf") + ha_conf_template_file = os.path.join(daisy_tecs_path, "HA.conf") + if not os.path.exists(cluster_conf_path): + os.makedirs(cluster_conf_path) + tecs_conf_out = os.path.join(cluster_conf_path, "tecs.conf") + ha_config_out = os.path.join(cluster_conf_path, "HA_1.conf") + + tecs = ConfigParser() + tecs.optionxform = str + tecs.read(tecs_conf_template_file) + + cluster_data = config_data['OTHER']['cluster_data'] + update_conf(tecs, 'CLUSTER_ID', cluster_data['id']) + if cluster_data.has_key('networking_parameters'): + networking_parameters = cluster_data['networking_parameters'] + if networking_parameters.has_key('base_mac') and networking_parameters['base_mac']: + update_conf(tecs, 'CONFIG_NEUTRON_BASE_MAC', networking_parameters['base_mac']) + if networking_parameters.has_key('gre_id_range') and len(networking_parameters['gre_id_range'])>1 \ + and networking_parameters['gre_id_range'][0] and networking_parameters['gre_id_range'][1]: + update_conf(tecs, 'CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES', ("%s:%s" % (networking_parameters['gre_id_range'][0],networking_parameters['gre_id_range'][1]))) + if networking_parameters.get("vni_range",['1000','3000']) and len(networking_parameters['vni_range'])>1 \ + and networking_parameters['vni_range'][0] and networking_parameters['vni_range'][1]: + update_conf(tecs, 'CONFIG_NEUTRON_ML2_VNI_RANGES', ("%s:%s" % (networking_parameters['vni_range'][0],networking_parameters['vni_range'][1]))) + if networking_parameters.get("segmentation_type","vlan"): + segmentation_type = networking_parameters.get("segmentation_type","vlan") + update_conf(tecs, 'CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES', segmentation_type) + update_conf(tecs, 'CONFIG_NEUTRON_ML2_TYPE_DRIVERS', segmentation_type) + + physic_network_cfg = config_data['OTHER']['physic_network_config'] + if physic_network_cfg.get('json_path', None): + update_conf(tecs, 'CONFIG_NEUTRON_ML2_JSON_PATH', physic_network_cfg['json_path']) + if physic_network_cfg.get('vlan_ranges', None): + update_conf(tecs, 'CONFIG_NEUTRON_ML2_VLAN_RANGES',physic_network_cfg['vlan_ranges']) + if config_data['OTHER']['tecs_installed_hosts']: + update_conf(tecs, 'EXCLUDE_SERVERS', ",".join(config_data['OTHER']['tecs_installed_hosts'])) + + ha = ConfigParser() + ha.optionxform = str + ha.read(ha_conf_template_file) + + config = AnalsyConfig(config_data) + if config_data['OTHER'].has_key('ha_nic_name'): + ha_nic_name = config_data['OTHER']['ha_nic_name'] + else: + ha_nic_name = "" + + config.prepare() + + config.update_tecs_conf(tecs) + config.update_ha_conf(ha, ha_nic_name, tecs) + + update_conf_with_zenic(tecs, config_data['OTHER']['zenic_config']) + if config_data['OTHER']['dvs_config'].has_key('network_config'): + config_data['OTHER']['dvs_config']['network_config']['enable_sdn'] = \ + config_data['OTHER']['zenic_config'].get('vip', False) + dvs_config = DvsDaisyConfig(tecs, config_data['OTHER']['dvs_config']) + + dvs_config.config_tecs_for_dvs() + + tecs.write(open(tecs_conf_out, "w+")) + ha.write(open(ha_config_out, "w+")) + + return + + +def test(): + print("Hello, world!") diff --git a/code/daisy/daisy/api/backends/tecs/disk_array.py b/code/daisy/daisy/api/backends/tecs/disk_array.py new file mode 100755 index 00000000..17dc469e --- /dev/null +++ b/code/daisy/daisy/api/backends/tecs/disk_array.py @@ -0,0 +1,230 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/install endpoint for tecs API +""" +import os +import copy +import subprocess +import time + +import traceback +import webob.exc +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPForbidden + +from threading import Thread + +from daisy import i18n +from daisy import notifier + +from daisy.common import exception +import daisy.registry.client.v1.api as registry +import daisy.api.backends.common as daisy_cmn +import daisy.api.backends.tecs.common as tecs_cmn + +try: + import simplejson as json +except ImportError: + import json + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +tecs_state = tecs_cmn.TECS_STATE +def _get_service_disk_for_disk_array(req, role_id): + disk_info = [] + service_disks = tecs_cmn.get_service_disk_list(req, {'filters': {'role_id': role_id}}) + for service_disk in service_disks: + share_disk = {} + if service_disk['disk_location'] == 'share': + share_disk['service'] = service_disk['service'] + share_disk['lun'] = service_disk['lun'] + share_disk['data_ips'] = service_disk['data_ips'].split(',') + share_disk['lvm_config'] = {} + share_disk['lvm_config']['size'] = service_disk['size'] + share_disk['lvm_config']['vg_name'] = 'vg_%s' % service_disk['service'] + share_disk['lvm_config']['lv_name'] = 'lv_%s' % service_disk['service'] + share_disk['lvm_config']['fs_type'] = 'ext4' + disk_info.append(share_disk) + return disk_info + +def _get_cinder_volume_for_disk_array(req, role_id): + cinder_volume_info = [] + cinder_volumes = tecs_cmn.get_cinder_volume_list(req, {'filters': {'role_id': role_id}}) + for cinder_volume in cinder_volumes: + cv_info = {} + cv_info['management_ips'] = cinder_volume['management_ips'].split(',') + cv_info['data_ips'] = cinder_volume['data_ips'].split(',') + cv_info['user_name'] = cinder_volume['user_name'] + cv_info['user_pwd'] = cinder_volume['user_pwd'] + index = cinder_volume['backend_index'] + cv_info['backend'] = {index:{}} + cv_info['backend'][index]['volume_driver'] = cinder_volume['volume_driver'] + cv_info['backend'][index]['volume_type'] = cinder_volume['volume_type'] + cv_info['backend'][index]['pools'] = cinder_volume['pools'].split(',') + cinder_volume_info.append(cv_info) + return cinder_volume_info + +def get_disk_array_info(req, cluster_id): + share_disk_info = [] + volume_disk_info = {} + cinder_volume_disk_list = [] + roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id) + for role in roles: + if role['deployment_backend'] != daisy_cmn.tecs_backend_name: + continue + if role['name'] == 'CONTROLLER_HA': + share_disks = _get_service_disk_for_disk_array(req, role['id']) + share_disk_info += share_disks + cinder_volumes = _get_cinder_volume_for_disk_array(req, role['id']) + cinder_volume_disk_list += cinder_volumes + if cinder_volume_disk_list: + volume_disk_info['disk_array'] = cinder_volume_disk_list + return (share_disk_info, volume_disk_info) + +def get_host_min_mac(host_interfaces): + macs = [interface['mac'] for interface in host_interfaces + if interface['type'] == 'ether' and interface['mac']] + macs.sort() + return macs[0] + +def get_ha_and_compute_ips(req, cluster_id): + controller_ha_nodes = {} + computer_ips = [] + + roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id) + cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) + for role in roles: + if role['deployment_backend'] != daisy_cmn.tecs_backend_name: + continue + role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) + for role_host in role_hosts: + #host has installed tecs are exclusive + if (role_host['status'] == tecs_state['ACTIVE'] or + role_host['status'] == tecs_state['UPDATING'] or + role_host['status'] == tecs_state['UPDATE_FAILED']): + continue + host_detail = daisy_cmn.get_host_detail(req, + role_host['host_id']) + host_ip = tecs_cmn.get_host_network_ip(req, + host_detail, + cluster_networks, + 'MANAGEMENT') + if role['name'] == "CONTROLLER_HA": + pxe_mac = [interface['mac'] for interface in host_detail['interfaces'] + if interface['is_deployment'] == True] + if pxe_mac and pxe_mac[0]: + controller_ha_nodes[host_ip] = pxe_mac[0] + else: + min_mac = get_host_min_mac(host_detail['interfaces']) + controller_ha_nodes[host_ip] = min_mac + if role['name'] == "COMPUTER": + computer_ips.append(host_ip) + return (controller_ha_nodes, computer_ips) + +def config_ha_share_disk(share_disk_info, controller_ha_nodes): + + error_msg = "" + cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json' + daisy_cmn.subprocess_call(cmd) + with open("/var/lib/daisy/tecs/storage_auto_config/base/control.json", "w") as fp: + json.dump(share_disk_info, fp, indent=2) + + + for host_ip in controller_ha_nodes.keys(): + password = "ossdbg1" + cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) + daisy_cmn.subprocess_call(cmd) + cmd = 'clush -S -w %s "mkdir -p /home/tecs_install"' % (host_ip,) + daisy_cmn.subprocess_call(cmd) + try: + scp_bin_result = subprocess.check_output( + 'scp -o StrictHostKeyChecking=no -r /var/lib/daisy/tecs/storage_auto_config %s:/home/tecs_install' % (host_ip,), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + error_msg = "scp /var/lib/daisy/tecs/storage_auto_config to %s failed!" % host_ip + return error_msg + try: + LOG.info(_("Config share disk for host %s" % host_ip)) + cmd = "cd /home/tecs_install/storage_auto_config/; python storage_auto_config.py share_disk %s" % controller_ha_nodes[host_ip] + exc_result = subprocess.check_output('clush -S -w %s "%s"' % (host_ip,cmd), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + LOG.info(_("Storage script error message: %s" % e.output)) + error_msg = "config Disk Array share disks on %s failed!" % host_ip + return error_msg + return error_msg + +def config_ha_cinder_volume(volume_disk_info, controller_ha_ips): + error_msg = "" + cmd = 'rm -rf /var/lib/daisy/tecs/storage_auto_config/base/*.json' + daisy_cmn.subprocess_call(cmd) + with open("/var/lib/daisy/tecs/storage_auto_config/base/cinder.json", "w") as fp: + json.dump(volume_disk_info, fp, indent=2) + for host_ip in controller_ha_ips: + password = "ossdbg1" + cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) + daisy_cmn.subprocess_call(cmd) + cmd = 'clush -S -w %s "mkdir -p /home/tecs_install"' % (host_ip,) + daisy_cmn.subprocess_call(cmd) + try: + scp_bin_result = subprocess.check_output( + 'scp -o StrictHostKeyChecking=no -r /var/lib/daisy/tecs/storage_auto_config %s:/home/tecs_install' % (host_ip,), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + error_msg = "scp /var/lib/daisy/tecs/storage_auto_config to %s failed!" % host_ip + return error_msg + try: + LOG.info(_("Config cinder volume for host %s" % host_ip)) + cmd = 'cd /home/tecs_install/storage_auto_config/; python storage_auto_config.py cinder_conf %s' % host_ip + exc_result = subprocess.check_output('clush -S -w %s "%s"' % (host_ip,cmd), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + LOG.info(_("Storage script error message: %s" % e.output)) + error_msg = "config Disk Array cinder volumes on %s failed!" % host_ip + return error_msg + return error_msg + +def config_compute_multipath(all_nodes_ip): + error_msg = "" + for host_ip in all_nodes_ip: + password = "ossdbg1" + cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) + daisy_cmn.subprocess_call(cmd) + cmd = 'clush -S -w %s "mkdir -p /home/tecs_install"' % (host_ip,) + daisy_cmn.subprocess_call(cmd) + try: + scp_bin_result = subprocess.check_output( + 'scp -o StrictHostKeyChecking=no -r /var/lib/daisy/tecs/storage_auto_config %s:/home/tecs_install' % (host_ip,), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + error_msg = "scp /var/lib/daisy/tecs/storage_auto_config to %s failed!" % host_ip + return error_msg + try: + LOG.info(_("Config multipath for host %s" % host_ip)) + cmd = 'cd /home/tecs_install/storage_auto_config/; python storage_auto_config.py check_multipath' + exc_result = subprocess.check_output('clush -S -w %s "%s"' % (host_ip,cmd), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + LOG.info(_("Storage script error message: %s" % e.output)) + error_msg = "config Disk Array multipath on %s failed!" % host_ip + return error_msg + return error_msg \ No newline at end of file diff --git a/code/daisy/daisy/api/backends/tecs/install.py b/code/daisy/daisy/api/backends/tecs/install.py new file mode 100755 index 00000000..9477c944 --- /dev/null +++ b/code/daisy/daisy/api/backends/tecs/install.py @@ -0,0 +1,1279 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/install endpoint for tecs API +""" +import os +import re +import copy +import subprocess +import time + +import traceback +import webob.exc +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPForbidden +from webob.exc import HTTPServerError + +from threading import Thread + +from daisy import i18n +from daisy import notifier + +from daisy.api import policy +import daisy.api.v1 + +from daisy.common import exception +import daisy.registry.client.v1.api as registry +from daisy.api.backends.tecs import config +from daisy.api.backends import driver +from daisy.api.network_api import network as neutron +from ironicclient import client as ironic_client +import daisy.api.backends.common as daisy_cmn +import daisy.api.backends.tecs.common as tecs_cmn +import daisy.api.backends.tecs.disk_array as disk_array +from daisy.api.configset import manager + +try: + import simplejson as json +except ImportError: + import json + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS +SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE + +CONF = cfg.CONF +install_opts = [ + cfg.StrOpt('max_parallel_os_number', default=10, + help='Maximum number of hosts install os at the same time.'), +] +CONF.register_opts(install_opts) + +CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') +CONF.import_opt('container_formats', 'daisy.common.config', + group='image_format') +CONF.import_opt('image_property_quota', 'daisy.common.config') + + +tecs_state = tecs_cmn.TECS_STATE +daisy_tecs_path = tecs_cmn.daisy_tecs_path + + +def _invalid_bond_type(network_type, vswitch_type, bond_mode): + msg = "Invalid bond_mode(%s) for %s in %s network" % ( + bond_mode, vswitch_type, network_type) + raise_exception = False + if bond_mode in ['0', '1', '2', '3', '4']: + return + + if bond_mode and (2 == len(bond_mode.split(';'))): + bond_mode, lacp_mode = bond_mode.split(';') + if network_type in ['vxlan'] and vswitch_type in ['dvs', 'DVS']: + if bond_mode in ['active-backup', 'balance-slb']: + if lacp_mode not in ['off']: + raise_exception = True + else: + raise_exception = True + + elif network_type in ['vlan'] and vswitch_type in ['dvs', 'DVS', + 'ovs', 'OVS']: + if bond_mode in ['balance-tcp']: + if lacp_mode not in ['active', 'passive', 'off']: + raise_exception = True + elif bond_mode in ['active-backup', 'balance-slb']: + if lacp_mode not in ['off']: + raise_exception = True + else: + raise_exception = True + else: + raise_exception = True + + if raise_exception: + raise exception.InstallException(msg) + + +def _get_host_private_networks(host_detail, cluster_private_networks_name): + """ + User member nic pci segment replace the bond pci, we use it generate the mappings.json. + :param host_detail: host infos + :param cluster_private_networks_name: network info in cluster + :return: + """ + host_private_networks = [hi for pn in cluster_private_networks_name + for hi in host_detail['interfaces'] + for assigned_network in hi['assigned_networks'] + if assigned_network and pn == assigned_network['name']] + + # If port type is bond,use pci segment of member port replace pci1 & pci2 segments of bond port + for interface_outer in host_private_networks: + if 0 != cmp(interface_outer.get('type', None), "bond"): + continue + slave1 = interface_outer.get('slave1', None) + slave2 = interface_outer.get('slave2', None) + if not slave1 or not slave2: + continue + interface_outer.pop('pci') + + for interface_inner in host_detail['interfaces']: + if 0 == cmp(interface_inner.get('name', None), slave1): + interface_outer['pci1'] = interface_inner['pci'] + elif 0 == cmp(interface_inner.get('name', None), slave2): + interface_outer['pci2'] = interface_inner['pci'] + return host_private_networks + +def _write_private_network_cfg_to_json(req, cluster_id, private_networks): + """ + Generate cluster private network json. We use the json file after tecs is installed. + :param private_networks: cluster private network params set. + :return: + """ + if not private_networks: + LOG.error("private networks can't be empty!") + return False + + cluster_hosts_network_cfg = {} + hosts_network_cfg = {} + for k in private_networks.keys(): + private_network_info = {} + for private_network in private_networks[k]: + # host_interface + type = private_network.get('type', None) + name = private_network.get('name', None) + assign_networks = private_network.get('assigned_networks', None) + slave1 = private_network.get('slave1', None) + slave2 = private_network.get('slave2', None) + pci = private_network.get('pci', None) + pci1 = private_network.get('pci1', None) + pci2 = private_network.get('pci2', None) + mode = private_network.get('mode', None) + if not type or not name or not assign_networks: + LOG.error("host_interface params invalid in private networks!") + continue + + for assign_network in assign_networks: + # network + #network_type = assign_network.get('network_type', None) + vswitch_type_network = daisy_cmn.get_assigned_network( + req, private_network['id'], assign_network['id']) + + vswitch_type = vswitch_type_network['vswitch_type'] + physnet_name = assign_network.get('name', None) + mtu = assign_network.get('mtu', None) + if not vswitch_type or not physnet_name: + LOG.error("private networks vswitch_type or physnet name is invalid!") + continue + + physnet_name_conf = {} + physnet_name_conf['type'] = type + physnet_name_conf['name'] = name + physnet_name_conf['vswitch_type'] = vswitch_type + if mtu: + physnet_name_conf['mtu'] = mtu + # physnet_name_conf['ml2'] = ml2_type + "(direct)" + if 0 == cmp("bond", type): + if not pci1 or not pci2 or not slave1 or not slave2 or not mode: + LOG.error("when type is 'bond',input params is invalid in private networks!") + continue + physnet_name_conf['slave1'] = slave1 + physnet_name_conf['slave2'] = slave2 + physnet_name_conf['pci1'] = pci1 + physnet_name_conf['pci2'] = pci2 + physnet_name_conf['mode'] = mode + _invalid_bond_type('vlan', 'OVS', mode) + elif 0 == cmp("ether", type): + if not pci: + LOG.error("when type is 'ether',input params is invalid in private networks!") + continue + physnet_name_conf['pci'] = pci + + if not physnet_name_conf: + continue + private_network_info[physnet_name] = physnet_name_conf + + if not private_network_info: + continue + hosts_network_cfg[k] = private_network_info + + if not hosts_network_cfg: + return False + cluster_hosts_network_cfg['hosts'] = hosts_network_cfg + mkdir_daisy_tecs_path = "mkdir -p " + daisy_tecs_path + cluster_id + daisy_cmn.subprocess_call(mkdir_daisy_tecs_path) + mapping_json = daisy_tecs_path + "/" + cluster_id + "/" + "mappings.json" + with open(mapping_json, "w+") as fp: + fp.write(json.dumps(cluster_hosts_network_cfg)) + return True + +def _conf_private_network(req, cluster_id, host_private_networks_dict, cluster_private_network_dict): + if not host_private_networks_dict: + LOG.info(_("No private network need config")) + return {} + + # different host(with ip) in host_private_networks_dict + config_neutron_ml2_vlan_ranges = [] + for k in host_private_networks_dict.keys(): + host_private_networks = host_private_networks_dict[k] + # different private network plane in host_interface + for host_private_network in host_private_networks: + assigned_networks = host_private_network.get('assigned_networks', None) + if not assigned_networks: + break + private_network_info = \ + [network for assigned_network in assigned_networks + for network in cluster_private_network_dict + if assigned_network and assigned_network['name'] == network['name']] + + host_private_network['assigned_networks'] = private_network_info + config_neutron_ml2_vlan_ranges += \ + ["%(name)s:%(vlan_start)s:%(vlan_end)s" % + {'name':network['name'], 'vlan_start':network['vlan_start'], 'vlan_end':network['vlan_end']} + for network in private_network_info + if network['name'] and network['vlan_start'] and network['vlan_end']] + + physic_network_cfg = {} + if _write_private_network_cfg_to_json(req, cluster_id, host_private_networks_dict): + physic_network_cfg['json_path'] = daisy_tecs_path + "/" + cluster_id + "/" + "mappings.json" + if config_neutron_ml2_vlan_ranges: + host_private_networks_vlan_range = ",".join(list(set(config_neutron_ml2_vlan_ranges))) + physic_network_cfg['vlan_ranges'] = host_private_networks_vlan_range + return physic_network_cfg + + +def _enable_network(host_networks_dict): + for network in host_networks_dict: + if network != []: + return True + return False + + +def _get_dvs_network_type(vxlan, vlan): + if _enable_network(vxlan): + return 'vxlan', vxlan + elif _enable_network(vlan): + return 'vlan', vlan + else: + return None, None + + +def _get_vtep_ip_ranges(ip_ranges): + vtep_ip_ranges = [] + for ip_range in ip_ranges: + ip_range_start = ip_range.get('start') + ip_range_end = ip_range.get('end') + if ip_range_start and ip_range_end: + vtep_ip_ranges.append( + [ip_range_start.encode('utf8'), + ip_range_end.encode('utf8')]) + return vtep_ip_ranges + + +def _get_dvs_vxlan_info(interfaces, mode_str): + vxlan_nic_info = '' + for interface in interfaces: + if interface['type'] == 'ether': + vxlan_nic_info = interface['name'] + elif interface['type'] == 'bond': + _invalid_bond_type('vxlan', 'DVS', interface.get('mode')) + name = interface.get('name', 'bond1') + if interface.get('mode') in ['0', '1', '2', '3', '4']: + try: + bond_mode = mode_str[ + 'vxlan'].get(interface.get('mode')) + except: + bond_mode = mode_str['vxlan']['0'] + vxlan_nic_info = name + bond_mode % ( + interface['slave1'], interface['slave2']) + else: + vxlan_nic_info = "%s(%s;%s-%s)" % ( + name, interface.get('mode'), + interface['slave1'], interface['slave2']) + return vxlan_nic_info + + +def _get_dvs_domain_id(assign_network, dvs_domain_id, host_ip): + domain_id = assign_network.get('dvs_domain_id') + if not domain_id: + domain_id = '0' + + domain_ip = dvs_domain_id.get(domain_id, []) + domain_ip.append(host_ip) + domain_ip = {domain_id.encode('utf8'): domain_ip} + return domain_ip + + +def _get_bridge_mappings(interface): + try: + interface = interface['assigned_networks'][0] + except: + return {} + + bridge_mappings = {} + if interface.get('network_type') in ['PRIVATE']: + phynet_name, nic = interface.get( + 'physnet_name').split('_') + phynet_name = interface.get('name') + if phynet_name and nic: + bridge_mappings.update({nic: phynet_name}) + return bridge_mappings + + +def _convert_bridge_mappings2list(bridge_mappings): + bridge_maps = [] + for nic, phynet in bridge_mappings.items(): + bridge_maps.append('%s:br_%s' % (phynet, nic)) + return set(bridge_maps) + + +def _convert_physical_mappings2list(physical_mappings): + physical_maps = [] + for phynet, nic_info in physical_mappings.items(): + physical_maps.append('%s:%s' % (phynet, nic_info)) + return set(physical_maps) + + +def _get_physical_mappings(interface, mode_str, bridge_mappings): + # bridge_mappings = {'eth0':'phynet1': 'bond0':'phynet2'} + vlan_nic_map_info = {} + phynic_name = interface.get('name') + physnet_name = bridge_mappings.get(phynic_name) + if interface['type'] == 'bond': + _invalid_bond_type('vlan', 'DVS', interface.get('mode')) + if interface.get('mode') in ['0', '1', '2', '3', '4']: + try: + bond_mode = mode_str['vlan'].get(interface.get('mode')) + except: + bond_mode = mode_str['vlan']['0'] + vlan_nic_map_info[physnet_name] = phynic_name + bond_mode % ( + interface['slave1'], interface['slave2']) + else: + # interface.get('mode') = active-backup;off + vlan_nic_map_info[physnet_name] = "%s(%s;%s-%s)" % ( + phynic_name, interface.get('mode'), + interface['slave1'], interface['slave2']) + else: + vlan_nic_map_info[physnet_name] = phynic_name + + return vlan_nic_map_info + + +def get_network_config_for_dvs(host_private_networks_dict, + cluster_private_network_dict): + # different private network plane in host_interface + host_private_networks_dict_for_dvs = copy.deepcopy( + host_private_networks_dict) + + for host_private_network in host_private_networks_dict_for_dvs: + private_networks = host_private_network.get( + 'assigned_networks', None) + if not private_networks: + break + private_network_info = \ + [network for private_network in private_networks + for network in cluster_private_network_dict + if private_network and private_network['name'] == network['name']] + host_private_network['assigned_networks'] = private_network_info + return host_private_networks_dict_for_dvs + + +def conf_dvs(req, host_vxlan_networks_dict, host_private_networks_dict): + mode_str = { + 'vxlan': + { + '0': '(active-backup;off;%s-%s)', + '1': '(balance-slb;off;%s-%s)', + }, + 'vlan': { + '0': '(active-backup;off;%s-%s)', + '1': '(balance-slb;off;%s-%s)', + '2': '(balance-tcp;active;%s-%s)' + } + } + + network_type, networks_dict = _get_dvs_network_type( + host_vxlan_networks_dict, host_private_networks_dict) + + if not network_type: + return {} + + dvs_config = {} + + network_config = {} + vswitch_type = {} + physnics_config = {} + installed_dvs = [] + installed_ovs = [] + network_config['network_type'] = network_type + + # for vxlan + network_config['vtep_ip_ranges'] = [] + dvs_domain_id = {} + + # for vlan + bridge_mappings = {} + physical_mappings = {} + + for host_ip, interfaces in networks_dict.items(): + host_ip = host_ip.encode('utf8') + assign_network = daisy_cmn.get_assigned_network( + req, interfaces[0]['id'], + interfaces[0]['assigned_networks'][0].get('id')) + + if assign_network['vswitch_type'] in ['dvs', 'DVS']: + installed_dvs.append(host_ip) + elif assign_network['vswitch_type'] in ['ovs', 'OVS']: + installed_ovs.append(host_ip) + + if network_type == 'vxlan': + network_config['vtep_ip_ranges'].extend( + _get_vtep_ip_ranges( + interfaces[0]['assigned_networks'][0].get('ip_ranges'))) + + dvs_domain_id.update( + _get_dvs_domain_id(assign_network, dvs_domain_id, host_ip)) + + if not physnics_config.get('vxlan_info'): + physnics_config['vxlan_info'] = _get_dvs_vxlan_info( + interfaces, mode_str) + + if network_type == 'vlan': + for interface in interfaces: + bridge_mapping = _get_bridge_mappings(interface) + physical_mapping = _get_physical_mappings( + interface, mode_str, bridge_mapping) + bridge_mappings.update(bridge_mapping) + physical_mappings.update(physical_mapping) + + vswitch_type['ovdk'] = installed_dvs + vswitch_type['ovs_agent_patch'] = installed_ovs + physnics_config['dvs_domain_id'] = dvs_domain_id + physnics_config['physical_mappings'] = ",".join( + _convert_physical_mappings2list(physical_mappings)) + physnics_config['bridge_mappings'] = ",".join( + _convert_bridge_mappings2list(bridge_mappings)) + + dvs_config['vswitch_type'] = vswitch_type + dvs_config['network_config'] = network_config + dvs_config['physnics_config'] = physnics_config + + return dvs_config + + +def _get_interfaces_network(req, host_detail, cluster_networks): + has_interfaces = {} + host_mngt_network = tecs_cmn.get_host_interface_by_network(host_detail, 'MANAGEMENT') + host_mgnt_ip = tecs_cmn.get_host_network_ip(req, host_detail, cluster_networks, 'MANAGEMENT') + host_mgnt_netmask = tecs_cmn.get_network_netmask(cluster_networks, 'MANAGEMENT') + host_mngt_network['ip'] = host_mgnt_ip + host_mngt_network['netmask'] = host_mgnt_netmask + has_interfaces['management'] = host_mngt_network + + host_deploy_network = tecs_cmn.get_host_interface_by_network(host_detail, 'DEPLOYMENT') + host_deploy_network_info = tecs_cmn.get_host_interface_by_network(host_detail, 'DEPLOYMENT') + #note:"is_deployment" can't label delpoyment network, it only used to label dhcp mac + if host_deploy_network_info: + host_deploy_ip = tecs_cmn.get_host_network_ip(req, host_detail, cluster_networks, 'DEPLOYMENT') + host_deploy_netmask = tecs_cmn.get_network_netmask(cluster_networks, 'DEPLOYMENT') + host_deploy_network_info['ip'] = host_deploy_ip + host_deploy_network_info['netmask'] = host_deploy_netmask + has_interfaces['deployment'] = host_deploy_network_info + + + host_storage_network_info = tecs_cmn.get_host_interface_by_network(host_detail, 'STORAGE') + if host_storage_network_info: + host_storage_ip = tecs_cmn.get_host_network_ip(req, host_detail, cluster_networks, 'STORAGE') + host_storage_netmask = tecs_cmn.get_network_netmask(cluster_networks, 'STORAGE') + host_storage_network_info['ip'] = host_storage_ip + host_storage_network_info['netmask'] = host_storage_netmask + has_interfaces['storage'] = host_storage_network_info + + host_public_network_info = tecs_cmn.get_host_interface_by_network(host_detail, 'PUBLIC') + + if host_public_network_info: + public_vlan_id = tecs_cmn.get_network_vlan_id(cluster_networks, 'PUBLIC') + + if public_vlan_id: + public_nic_name = host_public_network_info['name'] + '.' + public_vlan_id + else: + public_nic_name = host_public_network_info['name'] + + host_public_ip = tecs_cmn.get_host_network_ip(req, host_detail, cluster_networks, 'PUBLIC') + host_public_netmask = tecs_cmn.get_network_netmask(cluster_networks, 'PUBLIC') + host_public_network_info['ip'] = host_public_ip + host_public_network_info['name'] = public_nic_name + host_public_network_info['netmask'] = host_public_netmask + has_interfaces['public'] = host_public_network_info + return has_interfaces + +def _get_host_nic_name(cluster_network, host_detail): + """ + Different networking will generate different ha port name, the rule of generation + is describe in comment. + :param cluster_network: Network info in cluster. + :param host_detail: + :return: + """ + copy_host_detail = copy.deepcopy(host_detail) + + mgr_interface_info = tecs_cmn.get_host_interface_by_network(copy_host_detail, 'MANAGEMENT') + nic_info = [network + for network in cluster_network + for netname in mgr_interface_info.get('assigned_networks', None) + if network.get('name', None) == netname] + + nic_capability = [info['capability'] for info in nic_info if info['network_type'] != "PRIVATE"] + if not nic_capability or nic_capability == [None]: + return mgr_interface_info['name'] + + mgr_nic_info = [mgr_net for mgr_net in nic_info if mgr_net['network_type'] == "MANAGEMENT"][0] + # if private and management plane is unifier + if set(["PRIVATE", "MANAGEMENT"]).issubset(set([info['network_type'] for info in nic_info])): + # if type = 'ether' and 'ovs' not in ml2 and management is 'high' + if "ether" == mgr_interface_info.get('type', None) and \ + "ovs" not in [mgr_interface_info.get('vswitch_type', None)] and \ + "high" == mgr_nic_info['capability']: + return mgr_interface_info['name'] + + # if ip at outer + if mgr_interface_info.get('ip', None) and mgr_interface_info.get('name', None): + return "v_" + mgr_interface_info['name'] + # ip at inner + elif mgr_nic_info.get('ip', None): + return "managent" + + if "low" not in nic_capability: + return mgr_interface_info['name'] + + # if ip at outer + if mgr_interface_info.get('ip', None) and mgr_interface_info.get('name', None): + return "v_" + mgr_interface_info['name'] + + # ip at inner + elif mgr_nic_info.get('ip', None): + return "managent" + +def get_share_disk_services(req, role_id): + service_disks = tecs_cmn.get_service_disk_list(req, {'role_id':role_id}) + share_disk_services = [] + + for service_disk in service_disks: + if service_disk['disk_location'] == 'share': + share_disk_services.append(service_disk['service']) + return share_disk_services + +def get_cluster_tecs_config(req, cluster_id): + LOG.info(_("Get tecs config from database...")) + params = dict(limit=1000000) + roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id) + cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) + try: + all_services = registry.get_services_detail(req.context, **params) + all_components = registry.get_components_detail(req.context, **params) + cluster_data = registry.get_cluster_metadata(req.context, cluster_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + + cluster_private_network_dict = [network for network in cluster_networks if network['network_type'] == 'PRIVATE'] + cluster_private_networks_name = [network['name'] for network in cluster_private_network_dict] + + cluster_vxlan_network_dict = [network for network in cluster_networks if network['network_type'] == 'VXLAN'] + + tecs_config = {} + tecs_config.update({'OTHER':{}}) + other_config = tecs_config['OTHER'] + other_config.update({'cluster_data':cluster_data}) + tecs_installed_hosts = set() + host_private_networks_dict = {} + host_vxlan_network_dict = {} + mgnt_ip_list = set() + host_private_networks_dict_for_dvs = {} + zenic_cfg = {} + + for role in roles: + if role['name'] == 'ZENIC_NFM': + zenic_cfg['vip'] = role['vip'] + if role['deployment_backend'] != daisy_cmn.tecs_backend_name: + continue + try: + role_service_ids = registry.get_role_services(req.context, role['id']) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + + role_services_detail = [asc for rsci in role_service_ids for asc in all_services if asc['id'] == rsci['service_id']] + component_id_to_name = dict([(ac['id'], ac['name']) for ac in all_components]) + service_components = dict([(scd['name'], component_id_to_name[scd['component_id']]) for scd in role_services_detail]) + + role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) + + host_interfaces = [] + for role_host in role_hosts: + host_detail = daisy_cmn.get_host_detail(req, role_host['host_id']) + + sorted_host_detail = tecs_cmn.sort_interfaces_by_pci(host_detail) + host_private_networks_list = _get_host_private_networks(sorted_host_detail, + cluster_private_networks_name) + # get ha nic port name + if not other_config.has_key('ha_nic_name') and role['name'] == "CONTROLLER_HA": + mgr_nic_name = _get_host_nic_name(cluster_networks, sorted_host_detail) + mgr_vlan_id = tecs_cmn.get_mngt_network_vlan_id(cluster_networks) + if mgr_vlan_id: + mgr_nic_name = mgr_nic_name + '.' + mgr_vlan_id + other_config.update({'ha_nic_name':mgr_nic_name}) + + has_interfaces = _get_interfaces_network(req, host_detail, cluster_networks) + has_interfaces.update({'name':host_detail['name']}) + host_interfaces.append(has_interfaces) + # mangement network must be configed + host_mgnt_ip = has_interfaces['management']['ip'] + + # host_mgnt_ip used to label who the private networks is + host_private_networks_dict[host_mgnt_ip] = host_private_networks_list + if role['name'] == 'COMPUTER': + host_vxlan_network_list = _get_host_private_networks(sorted_host_detail, ['VXLAN']) + if host_vxlan_network_list: + host_private_networks_dict_for_dvs = {} + host_vxlan_network_dict[host_mgnt_ip] = get_network_config_for_dvs( + host_vxlan_network_list, cluster_vxlan_network_dict) + elif host_private_networks_list: + host_vxlan_network_dict = {} + host_private_networks_dict_for_dvs[host_mgnt_ip] = get_network_config_for_dvs( + host_private_networks_list, cluster_private_network_dict) + + #get host ip of tecs is active + if (role_host['status'] == tecs_state['ACTIVE'] or + role_host['status'] == tecs_state['UPDATING'] or + role_host['status'] == tecs_state['UPDATE_FAILED']): + tecs_installed_hosts.add(host_mgnt_ip) + else: + mgnt_ip_list.add(host_mgnt_ip) + + share_disk_services = get_share_disk_services(req, role['id']) + is_ha = re.match(".*_HA$", role['name']) is not None + if host_interfaces: + if role['public_vip'] and not host_interfaces[0].has_key('public'): + msg = "no public networkplane found while role has public vip" + LOG.error(msg) + raise exception.NotFound(message=msg) + + tecs_config.update({role['name']: {'services': service_components, + 'vip': role['vip'], + 'host_interfaces': host_interfaces, + 'share_disk_services': share_disk_services + }}) + if is_ha: + tecs_config[role['name']]['ntp_server'] = role['ntp_server'] + tecs_config[role['name']]['public_vip'] = role['public_vip'] + tecs_config[role['name']]['glance_vip'] = role['glance_vip'] + tecs_config[role['name']]['db_vip'] = role['db_vip'] + + other_config.update({'tecs_installed_hosts':tecs_installed_hosts}) + # replace private network + physic_network_cfg = _conf_private_network(req, cluster_id, host_private_networks_dict, cluster_private_network_dict) + dvs_cfg = conf_dvs(req, host_vxlan_network_dict, host_private_networks_dict_for_dvs) + other_config.update({'physic_network_config':physic_network_cfg}) + other_config.update({'dvs_config':dvs_cfg}) + other_config.update({'zenic_config':zenic_cfg}) + return (tecs_config, mgnt_ip_list) + + +def get_host_name_and_mgnt_ip(tecs_config): + name_ip_list = [] + ip_list = [] + nodes_ips = {'ha': [], 'lb': [], 'computer': []} + + for role_name, role_configs in tecs_config.items(): + if role_name == "OTHER": + continue + for host in role_configs['host_interfaces']: + ip_domain_dict = {} + host_mgt = host['management'] + if host_mgt['ip'] not in ip_list: + ip_list.append(host_mgt['ip']) + ip_domain_dict.update({host['name']: host_mgt['ip']}) + name_ip_list.append(ip_domain_dict) + + if role_name == 'CONTROLLER_HA': + nodes_ips['ha'].append(host_mgt['ip']) + if role_name == 'CONTROLLER_LB': + nodes_ips['lb'].append(host_mgt['ip']) + if role_name == 'COMPUTER': + nodes_ips['computer'].append(host_mgt['ip']) + return name_ip_list, nodes_ips + + +def replace_ip_with_domain_name(req, tecs_config): + domain_ip_list = [] + ip_list = [] + lb_float_ip = tecs_config['CONTROLLER_LB']['vip'] + for role_name, role_configs in tecs_config.items(): + if role_name == "OTHER": + continue + is_ha = re.match(".*_HA$", role_name) is not None + is_lb = re.match(".*_LB$", role_name) is not None + + for host in role_configs['host_interfaces']: + ip_domain_dict = {} + host_mgt = host['management'] + if host_mgt['ip'] not in ip_list: + ip_list.append(host_mgt['ip']) + ip_domain_dict.update({host['name']: host_mgt['ip']}) + domain_ip_list.append(ip_domain_dict) + host_mgt['ip'] = host['name'] + + if is_ha and role_configs.get('vip'): + domain_ip_list.append({'ha-vip': role_configs['vip']}) + if role_configs['ntp_server'] == role_configs['vip']: + role_configs['ntp_server'] = 'ha-vip' + elif role_configs['ntp_server'] == lb_float_ip: + role_configs['ntp_server'] = 'lb-vip' + role_configs['vip'] = 'ha-vip' + + if role_configs.get('public_vip'): + domain_ip_list.append({'public-vip': role_configs['public_vip']}) + role_configs['public_vip'] = 'public-vip' + if role_configs.get('glance_vip'): + domain_ip_list.append({'glance-vip': role_configs['glance_vip']}) + role_configs['glance_vip'] = 'glance-vip' + if role_configs.get('db_vip'): + domain_ip_list.append({'db-vip': role_configs['db_vip']}) + role_configs['db_vip'] = 'db-vip' + + if is_lb and role_configs.get('vip'): + domain_ip_list.append({'lb-vip': role_configs['vip']}) + role_configs['vip'] = 'lb-vip' + return domain_ip_list + + +def config_dnsmasq_server(host_ip_list, domain_ip_list, password='ossdbg1'): + dns_conf = "/etc/dnsmasq.conf" + for host_ip in host_ip_list: + try: + result = subprocess.check_output( + "sshpass -p %s ssh -o StrictHostKeyChecking=no %s " + "test -f %s" % (password, host_ip, dns_conf), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + msg = '%s does not exist in %s' % (dns_conf, host_ip) + LOG.error(msg) + raise exception.NotFound(message=msg) + + config_scripts = [ + "sed -i '/^[^#]/s/no-resolv[[:space:]]*/\#no-resolv/' %s" % dns_conf, + "sed -i '/^[^#]/s/no-poll[[:space:]]*/\#no-poll/' %s" % dns_conf, + "cache_size_linenumber=`grep -n 'cache-size=' %s| awk -F ':' " + "'{print $1}'` && [ ! -z $cache_size_linenumber ] && sed -i " + "${cache_size_linenumber}d %s" % (dns_conf, dns_conf), + "echo 'cache-size=3000' >> %s" % dns_conf] + + tecs_cmn.run_scrip(config_scripts, host_ip, password) + + config_ip_scripts = [] + for domain_name_ip in domain_ip_list: + domain_name = domain_name_ip.keys()[0] + domain_ip = domain_name_ip.values()[0] + config_ip_scripts.append( + "controller1_linenumber=`grep -n 'address=/%s' %s| awk -F ':' " + "'{print $1}'` && [ ! -z ${controller1_linenumber} ] && " + "sed -i ${controller1_linenumber}d %s" % + (domain_name, dns_conf, dns_conf)) + config_ip_scripts.append("echo 'address=/%s/%s' >> %s" % + (domain_name, domain_ip, dns_conf)) + tecs_cmn.run_scrip(config_ip_scripts, host_ip, password) + + service_start_scripts = [ + "dns_linenumber=`grep -n \"^[[:space:]]*ExecStart=/usr/sbin/dnsmasq -k\" " + "/usr/lib/systemd/system/dnsmasq.service|cut -d \":\" -f 1` && " + "sed -i \"${dns_linenumber}c ExecStart=/usr/sbin/dnsmasq -k " + "--dns-forward-max=50000\" /usr/lib/systemd/system/dnsmasq.service", + "for i in `ps -elf | grep dnsmasq |grep -v grep | awk -F ' ' '{print $4}'`;do kill -9 $i;done ", + "systemctl daemon-reload && systemctl enable dnsmasq.service && " + "systemctl restart dnsmasq.service"] + tecs_cmn.run_scrip(service_start_scripts, host_ip, password) + + +def config_dnsmasq_client(host_ip_list, ha_ip_list, password='ossdbg1'): + dns_client_file = "/etc/resolv.conf" + config_scripts = ["rm -rf %s" % dns_client_file] + for ha_ip in ha_ip_list: + config_scripts.append("echo 'nameserver %s' >> %s" % + (ha_ip, dns_client_file)) + for host_ip in host_ip_list: + tecs_cmn.run_scrip(config_scripts, host_ip, password) + tecs_cmn.run_scrip(config_scripts) + + +def config_nodes_hosts(host_ip_list, domain_ip, password='ossdbg1'): + hosts_file = "/etc/hosts" + config_scripts = [] + for name_ip in domain_ip: + config_scripts.append("linenumber=`grep -n '%s' /etc/hosts | " + "awk -F '' '{print $1}'` && " + "[ ! -z $linenumber ] && " + "sed -i ${linenumber}d %s" % + (name_ip.keys()[0], hosts_file)) + config_scripts.append("echo '%s %s' >> %s" % (name_ip.values()[0], + name_ip.keys()[0], + hosts_file)) + + for host_ip in host_ip_list: + tecs_cmn.run_scrip(config_scripts, host_ip, password) + tecs_cmn.run_scrip(config_scripts) + + +def revise_nova_config(computer_nodes, ha_vip, public_vip, compute_ip_domain, + password='ossdbg1'): + nova_file = "/etc/nova/nova.conf" + for host_ip in computer_nodes: + scripts = [] + if public_vip: + scripts.extend(["linenumber=`grep -n '^novncproxy_base_url' %s | " + "awk -F ':' '{print $1}'`" % nova_file, + 'sed -i "${linenumber}s/public-vip/%s/" %s' % + (public_vip, nova_file)]) + else: + scripts.extend(["linenumber=`grep -n '^novncproxy_base_url' %s | " + "awk -F ':' '{print $1}'`" % nova_file, + 'sed -i "${linenumber}s/ha-vip/%s/" %s' % + (ha_vip, nova_file)]) + scripts.extend(["linenumber=`grep -n '^vncserver_proxyclient_address' " + "%s | awk -F ':' '{print $1}'`" % nova_file, + 'sed -i "${linenumber}s/127.0.0.1/%s/" %s' % + (compute_ip_domain[host_ip], nova_file), + "systemctl restart openstack-nova-compute.service "]) + tecs_cmn.run_scrip(scripts, host_ip, password) + + +def revise_horizon_config(ha_nodes, ha_vip, public_vip, password='ossdbg1'): + dashboard_file = "/etc/httpd/conf.d/15-horizon_vhost.conf" + for host_ip in ha_nodes: + config_scripts = ["linenumber1=`grep -n 'ServerAlias %s' " + "%s| awk -F ':' '{print $1}'` && " + "[ ! -z ${linenumber1} ] && sed -i " + "${linenumber1}d %s" % (host_ip, + dashboard_file, + dashboard_file), + "linenumber2=`grep -n 'ServerAlias %s' %s| awk -F ':' '" + "{print $1}'` && [ ! -z ${linenumber2} ] && sed -i " + "${linenumber2}d %s" % (ha_vip, dashboard_file, + dashboard_file), + "linenumber3=`grep -n 'ServerAlias %s' %s| awk -F ':' '" + "{print $1}'` && [ ! -z ${linenumber3} ] && sed -i " + "${linenumber3}d %s" % (public_vip, dashboard_file, + dashboard_file), + 'dasboard_linenumber1=`grep -n "ServerAlias localhost" ' + '%s|cut -d ":" -f 1` && sed -i "${dasboard_linenumber1}a ' + 'ServerAlias %s" %s' % (dashboard_file, host_ip, + dashboard_file), + 'dasboard_linenumber1=`grep -n "ServerAlias localhost" %s' + '|cut -d ":" -f 1` && sed -i "${dasboard_linenumber1}a ' + 'ServerAlias %s" %s' % (dashboard_file, ha_vip, + dashboard_file)] + if public_vip: + config_scripts.append('dasboard_linenumber2=`grep -n ' + '"ServerAlias localhost" %s|cut ' + '-d ":" -f 1` && sed -i ' + '"${dasboard_linenumber2}a ' + 'ServerAlias %s" %s' % + (dashboard_file, public_vip, + dashboard_file)) + + tecs_cmn.run_scrip(config_scripts, host_ip, password) + + restart_http_scripts = ['systemctl daemon-reload &&' + 'systemctl restart httpd.service'] + tecs_cmn.run_scrip(restart_http_scripts, ha_vip, password) + + +class TECSInstallTask(Thread): + """ + Class for install tecs bin. + """ + """ Definition for install states.""" + + def __init__(self, req, cluster_id): + super(TECSInstallTask, self).__init__() + self.req = req + self.cluster_id = cluster_id + self.progress = 0 + self.state = tecs_state['INIT'] + self.message = "" + self.tecs_config_file = '' + self.mgnt_ip_list = '' + self.install_log_fp = None + self.last_line_num = 0 + self.need_install = False + self.ping_times = 36 + self.log_file = "/var/log/daisy/tecs_%s_install.log" % self.cluster_id + self.dns_name_ip = [] + self.password = 'ossdbg1' + self.nodes_ips = {} + + def _check_install_log(self, tell_pos): + with open(self.log_file, "r") as tmp_fp: + tmp_fp.seek(tell_pos, os.SEEK_SET) + line_num = self.last_line_num + for lnum, lcontent in enumerate(tmp_fp, 1): + tell_pos = tmp_fp.tell() + line_num += 1 + LOG.debug("<<>>", line_num, lcontent) + if -1 != lcontent.find("Preparing servers"): + self.progress = 3 + + if -1 != lcontent.find("successfully"): + self.progress = 100 + elif -1 != lcontent.find("Error") \ + or -1 != lcontent.find("ERROR") \ + or -1 != lcontent.find("error") \ + or -1 != lcontent.find("not found"): + self.state = tecs_state['INSTALL_FAILED'] + self.message = "Tecs install error, see line %s in '%s'" % (line_num,self.log_file) + raise exception.InstallException(self.message) + self.last_line_num = line_num + return tell_pos + + def _calc_progress(self, path): + """ + Calculate the progress of installing bin. + :param path: directory contain ".pp" and ".log" files + :return: installing progress(between 1~100) + """ + ppcount = logcount = 0 + for file in os.listdir(path): + if file.endswith(".log"): + logcount += 1 + elif file.endswith(".pp"): + ppcount += 1 + + progress = 0 + if 0 != ppcount: + progress = (logcount * 100.00)/ ppcount + return progress + + def _update_install_progress_to_db(self): + """ + Update progress of intallation to db. + :return: + """ + roles = daisy_cmn.get_cluster_roles_detail(self.req,self.cluster_id) + for role in roles: + if role['deployment_backend'] != daisy_cmn.tecs_backend_name: + continue + role_hosts = daisy_cmn.get_hosts_of_role(self.req, role['id']) + for role_host in role_hosts: + if role_host['status'] != tecs_state['ACTIVE']: + self.need_install = True + role_host['status'] = self.state + role_host['progress'] = self.progress + role_host['messages'] = self.message + daisy_cmn.update_role_host(self.req, role_host['id'], role_host) + role['progress'] = self.progress + role['status'] = self.state + role['messages'] = self.message + daisy_cmn.update_role(self.req, role['id'], role) + + def _generate_tecs_config_file(self, cluster_id, tecs_config): + tecs_config_file = '' + if tecs_config: + cluster_conf_path = daisy_tecs_path + cluster_id + LOG.info(_("Generate tecs config...")) + config.update_tecs_config(tecs_config, cluster_conf_path) + tecs_config_file = cluster_conf_path + "/tecs.conf" + ha_config_file = cluster_conf_path + "/HA_1.conf" + mkdir_tecs_install = "mkdir -p /home/tecs_install/" + daisy_cmn.subprocess_call(mkdir_tecs_install) + cp_ha_conf = "\cp %s /home/tecs_install/" % ha_config_file + tecs_conf = "\cp %s /home/tecs_install/" % ha_config_file + daisy_cmn.subprocess_call(cp_ha_conf) + return tecs_config_file + + def run(self): + try: + start_time = time.time() + self._run() + except Exception as e: + self.state = tecs_state['INSTALL_FAILED'] + self.message = e.message + self._update_install_progress_to_db() + LOG.exception(e.message) + else: + if not self.need_install: + return + self.progress = 100 + self.state = tecs_state['ACTIVE'] + self.message = "Tecs installed successfully" + LOG.info(_("Install TECS for cluster %s successfully." + % self.cluster_id)) + time_cost = str(round((time.time() - start_time)/60, 2)) + LOG.info(_("It totally takes %s min for installing tecs" % time_cost)) + + if self.dns_name_ip: + ha_vip = "" + public_vip = "" + compute_ip_domain = {} + for dns_dict in self.dns_name_ip: + domain_name = dns_dict.keys()[0] + domain_ip = dns_dict.values()[0] + if domain_name == "ha-vip": + ha_vip = domain_ip + if domain_name == "public-vip": + public_vip = domain_ip + if domain_ip in self.nodes_ips['computer']: + compute_ip_domain.update({domain_ip: domain_name}) + + revise_nova_config(self.nodes_ips['computer'], ha_vip, + public_vip, compute_ip_domain) + revise_horizon_config(self.nodes_ips['ha'], ha_vip, public_vip) + + # load neutron conf after installation + roles = registry.get_roles_detail(self.req.context) + for role in roles: + if role['cluster_id'] == self.cluster_id: + backend=manager.configBackend('clushshell', self.req, role['id']) + backend.push_config() + result = config.get_conf(self.tecs_config_file, + neutron_float_ip="CONFIG_NEUTRON_SERVER_HOST", + keystone_float_ip="CONFIG_KEYSTONE_HOST", + neutron_install_mode="CONFIG_NEUTRON_SERVER_INSTALL_MODE", + keystone_install_mode="CONFIG_KEYSTONE_INSTALL_MODE", + lb_float_ip="CONFIG_LB_HOST") + if (result.get('keystone_install_mode', None) == "LB" and + result.get('neutron_install_mode', None) == "LB"): + LOG.info(_("<<>>")) + time.sleep(20) + neutron(self.req, + result.get('lb_float_ip', None), + result.get('lb_float_ip', None), + self.cluster_id) + else: + LOG.info(_("<<>>")) + time.sleep(20) + neutron(self.req, + result.get('neutron_float_ip', None), + result.get('keystone_float_ip', None), + self.cluster_id) + finally: + self._update_install_progress_to_db() + if self.install_log_fp: + self.install_log_fp.close() + + def _run(self): + """ + Exectue install file(.bin) with sync mode. + :return: + """ + + def executor(**params): + # if subprocsee is failed, we need break + if os.path.exists(self.log_file): + params['tell_pos'] = self._check_install_log(params.get('tell_pos', 0)) + LOG.debug(_("<<>>")) + if 100 == self.progress: + return params + if 3 == self.progress: + self._update_install_progress_to_db() + # waiting for 'progress_log_location' file exist + if not params.get("if_progress_file_read", None): + if not os.path.exists(self.progress_log_location): + params['if_progress_file_read'] = False + return params + else: + with open(self.progress_log_location, "r") as fp: + line = fp.readline() + self.progress_logs_path = line.split('\n')[0] + "/manifests" + LOG.info(_("TECS installation log path: %s." + % self.progress_logs_path)) + params['if_progress_file_read'] = True + + # waiting for 'self.progress_logs_path' file exist + if not os.path.exists(self.progress_logs_path): + return params + + LOG.debug(_("<<>>")) + + # cacl progress & sync to db + progress = self._calc_progress(self.progress_logs_path) + + if self.progress != progress and progress >= 3: + self.progress = progress + self.state = tecs_state['INSTALLING'] + self._update_install_progress_to_db() + elif progress == 100: + self.progress = 100 + self.state = tecs_state['ACTIVE'] + self.message = "Tecs installed successfully" + return params + + if not self.cluster_id or \ + not self.req: + raise exception.InstallException("invalid params.") + + self.progress = 0 + self.message = "Preparing for TECS installation" + self._update_install_progress_to_db() + if not self.need_install: + LOG.info(_("No host in cluster %s need to install tecs." + % self.cluster_id)) + return + + (tecs_config, self.mgnt_ip_list) = get_cluster_tecs_config(self.req, self.cluster_id) + # after os is installed successfully, if ping all role hosts + # management ip successfully, begin to install TECS + unreached_hosts = daisy_cmn.check_ping_hosts(self.mgnt_ip_list, self.ping_times) + if unreached_hosts: + self.message = "ping hosts %s failed" % ','.join(unreached_hosts) + raise exception.InstallException(self.message) + else: + # os maybe not reboot completely, wait for 20s to ensure ssh successfully. + # ssh test until sucess should better here + time.sleep(20) + + name_ip_list, self.nodes_ips = get_host_name_and_mgnt_ip(tecs_config) + all_nodes = list(set(self.nodes_ips['ha'] + self.nodes_ips['lb'] + + self.nodes_ips['computer'])) + # delete daisy server known_hosts file to avoid + # ssh command failed because of incorrect host key. + daisy_cmn.subprocess_call('rm -rf /root/.ssh/known_hosts') + if tecs_config['OTHER']['cluster_data']['use_dns']: + self.dns_name_ip = replace_ip_with_domain_name(self.req, tecs_config) + storage_ip_list = tecs_cmn.get_storage_name_ip_dict( + self.req, self.cluster_id, 'STORAGE') + + self.dns_name_ip.extend(storage_ip_list) + tecs_config['OTHER'].update({'dns_config': self.dns_name_ip}) + + config_dnsmasq_server(self.nodes_ips['ha'], self.dns_name_ip) + config_dnsmasq_client(all_nodes, self.nodes_ips['ha']) + config_nodes_hosts(all_nodes, self.dns_name_ip) + host_domain = [name_ip.keys()[0] for name_ip in self.dns_name_ip + if name_ip.keys()[0] .find('vip') == -1] + unreached_hosts = daisy_cmn.check_ping_hosts(host_domain, + self.ping_times) + if unreached_hosts: + self.message = "ping hosts %s failed after DNS configuration" %\ + ','.join(unreached_hosts) + raise exception.InstallException(self.message) + else: + config_nodes_hosts(all_nodes, name_ip_list) + # generate tecs config must be after ping check + self.tecs_config_file = self._generate_tecs_config_file(self.cluster_id, + tecs_config) + + # install network-configuration-1.1.1-15.x86_64.rpm + if self.mgnt_ip_list: + for mgnt_ip in self.mgnt_ip_list: + LOG.info(_("begin to install network-configuration on %s"% mgnt_ip)) + tecs_cmn.TecsShellExector(mgnt_ip, 'install_rpm') + # network-configuration will restart network, wait until ping test successfully + time.sleep(10) + unreached_hosts = daisy_cmn.check_ping_hosts(self.mgnt_ip_list, self.ping_times) + if unreached_hosts: + self.message = "ping hosts %s failed after network configuration" % ','.join(unreached_hosts) + raise exception.InstallException(self.message) + + (share_disk_info, volume_disk_info) =\ + disk_array.get_disk_array_info(self.req, self.cluster_id) + if share_disk_info or volume_disk_info: + (controller_ha_nodes, computer_ips) =\ + disk_array.get_ha_and_compute_ips(self.req, self.cluster_id) + else: + controller_ha_nodes = {} + computer_ips = [] + + all_nodes_ip = computer_ips + controller_ha_nodes.keys() + if all_nodes_ip: + LOG.info(_("begin to config multipth ...")) + compute_error_msg = disk_array.config_compute_multipath(all_nodes_ip) + if compute_error_msg: + self.message = compute_error_msg + raise exception.InstallException(self.message) + else: + LOG.info(_("config Disk Array multipath successfully")) + + if share_disk_info: + LOG.info(_("begin to config Disk Array ...")) + ha_error_msg = disk_array.config_ha_share_disk(share_disk_info, + controller_ha_nodes) + if ha_error_msg: + self.message = ha_error_msg + raise exception.InstallException(message=self.message) + else: + LOG.info(_("config Disk Array for HA nodes successfully")) + + # check and get TECS version + tecs_version_pkg_file = tecs_cmn.check_and_get_tecs_version(daisy_tecs_path) + if not tecs_version_pkg_file: + self.state = tecs_state['INSTALL_FAILED'] + self.message = "TECS version file not found in %s" % daisy_tecs_path + raise exception.NotFound(message=self.message) + + # use pattern 'tecs_%s_install' to distinguish multi clusters installation + LOG.info(_("Open log file for TECS installation.")) + self.install_log_fp = open(self.log_file, "w+") + + # delete cluster_id file before installing, in case getting old log path + self.progress_log_location = "/var/tmp/packstack/%s" % self.cluster_id + if os.path.exists(self.progress_log_location): + os.remove(self.progress_log_location) + + install_cmd = "sudo %s conf_file %s" % (tecs_version_pkg_file, self.tecs_config_file) + LOG.info(_("Begin to install TECS in cluster %s." % self.cluster_id)) + clush_bin = subprocess.Popen( + install_cmd, shell=True, stdout=self.install_log_fp, stderr=self.install_log_fp) + + self.progress = 1 + self.state = tecs_state['INSTALLING'] + self.message = "TECS installing" + self._update_install_progress_to_db() + # if clush_bin is not terminate + # while not clush_bin.returncode: + params = {} # executor params + execute_times = 0 # executor run times + while True: + time.sleep(5) + if self.progress == 100: + if volume_disk_info: + LOG.info(_("Begin to config cinder volume ...")) + ha_error_msg = disk_array.config_ha_cinder_volume( + volume_disk_info, + controller_ha_nodes.keys()) + if ha_error_msg: + self.message = ha_error_msg + raise exception.InstallException(self.message) + else: + LOG.info(_("Config cinder volume for HA nodes successfully")) + break + elif execute_times >= 1440: + self.state = tecs_state['INSTALL_FAILED'] + self.message = "TECS install timeout for 2 hours" + raise exception.InstallTimeoutException(cluster_id=self.cluster_id) + params = executor( + # just read cluster_id file once in 'while' + if_progress_file_read=params.get("if_progress_file_read", False), + # current fp location of tecs_install.log + tell_pos=params.get("tell_pos", 0)) + + # get clush_bin.returncode + # clush_bin.poll() + execute_times += 1 + + diff --git a/code/daisy/daisy/api/backends/tecs/uninstall.py b/code/daisy/daisy/api/backends/tecs/uninstall.py new file mode 100755 index 00000000..d87d8590 --- /dev/null +++ b/code/daisy/daisy/api/backends/tecs/uninstall.py @@ -0,0 +1,155 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/hosts endpoint for Daisy v1 API +""" + +import webob.exc +import subprocess + +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPForbidden + +from threading import Thread, Lock +import threading +from daisy import i18n +from daisy import notifier + +from daisy.api import policy +import daisy.api.v1 + +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import utils +from daisy.common import wsgi +from daisy.api.v1 import controller +from daisy.api.v1 import filters +import daisy.api.backends.common as daisy_cmn +import daisy.api.backends.tecs.common as tecs_cmn +import daisy.registry.client.v1.api as registry + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +tecs_state = tecs_cmn.TECS_STATE + +def update_progress_to_db(req, role_id_list, status, hosts_list,host_ip=None): + """ + Write uninstall progress and status to db, we use global lock object 'uninstall_mutex' + to make sure this function is thread safety. + :param req: http req. + :param role_id_list: Column neeb be update in role table. + :param status: Uninstall status. + :return: + """ + for role_id in role_id_list: + role_hosts = daisy_cmn.get_hosts_of_role(req, role_id) + for host_id_ip in hosts_list: + host_ip_tmp=host_id_ip.values()[0] + host_id_tmp=host_id_ip.keys()[0] + if host_ip: + for role_host in role_hosts: + if (host_ip_tmp == host_ip and + role_host['host_id']== host_id_tmp): + role_host_meta = {} + if 0 == cmp(status, tecs_state['UNINSTALLING']): + role_host_meta['progress'] = 10 + role_host_meta['messages'] = 'TECS uninstalling' + if 0 == cmp(status, tecs_state['UNINSTALL_FAILED']): + role_host_meta['messages'] = 'TECS uninstalled failed' + elif 0 == cmp(status, tecs_state['ACTIVE']): + role_host_meta['progress'] = 100 + role_host_meta['messages'] = 'TECS uninstalled successfully' + if role_host_meta: + role_host_meta['status'] = status + daisy_cmn.update_role_host(req, + role_host['id'], + role_host_meta) + else: + role = {} + if 0 == cmp(status, tecs_state['UNINSTALLING']): + for role_host in role_hosts: + role_host_meta = {} + role_host_meta['status'] = status + role_host_meta['progress'] = 0 + daisy_cmn.update_role_host(req, + role_host['id'], + role_host_meta) + role['progress']=0 + role['messages'] = 'TECS uninstalling' + if 0 == cmp(status, tecs_state['UNINSTALL_FAILED']): + role['messages'] = 'TECS uninstalled failed' + elif 0 == cmp(status, tecs_state['INIT']): + role['progress'] = 100 + role['messages'] = 'TECS uninstalled successfully' + if role: + role['status'] = status + daisy_cmn.update_role(req, role_id, role) + if 0 == cmp(status, tecs_state['INIT']): + daisy_cmn.delete_role_hosts(req, role_id) + +def _thread_bin(req, host_ip, role_id_list,hosts_list): + # uninstall network-configuration-1.1.1-15.x86_64.rpm + update_progress_to_db(req,role_id_list,tecs_state['UNINSTALLING'],hosts_list,host_ip) + tecs_cmn.TecsShellExector(host_ip, 'uninstall_rpm') + + cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/' + daisy_cmn.subprocess_call(cmd) + password = "ossdbg1" + var_log_path = "/var/log/daisy/daisy_uninstall/%s_uninstall_tecs.log" % host_ip + with open(var_log_path, "w+") as fp: + cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) + daisy_cmn.subprocess_call(cmd,fp) + cmd = 'clush -S -b -w %s "rm -rf /home/daisy_uninstall"' % (host_ip,) + daisy_cmn.subprocess_call(cmd,fp) + cmd = 'clush -S -w %s "mkdir -p /home/daisy_uninstall"' % (host_ip,) + daisy_cmn.subprocess_call(cmd,fp) + + try: + scp_bin_result = subprocess.check_output( + 'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin --dest=/home/daisy_uninstall' % (host_ip,), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + update_progress_to_db(req, role_id_list, tecs_state['UNINSTALL_FAILED'],hosts_list,host_ip) + LOG.error(_("scp TECS bin for %s failed!" % host_ip)) + fp.write(e.output.strip()) + + cmd = 'clush -S -w %s "chmod 777 /home/daisy_uninstall/*"' % (host_ip,) + daisy_cmn.subprocess_call(cmd,fp) + + try: + exc_result = subprocess.check_output( + 'clush -S -w %s /home/daisy_uninstall/ZXTECS*.bin clean' % (host_ip,), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + update_progress_to_db(req, role_id_list, tecs_state['UNINSTALL_FAILED'],hosts_list,host_ip) + LOG.error(_("Uninstall TECS for %s failed!" % host_ip)) + fp.write(e.output.strip()) + else: + update_progress_to_db(req, role_id_list, tecs_state['ACTIVE'], hosts_list,host_ip) + LOG.info(_("Uninstall TECS for %s successfully!" % host_ip)) + fp.write(exc_result) +# this will be raise raise all the exceptions of the thread to log file +def thread_bin(req, host_ip, role_id_list, hosts_list): + try: + _thread_bin(req, host_ip, role_id_list, hosts_list) + except Exception as e: + LOG.exception(e.message) \ No newline at end of file diff --git a/code/daisy/daisy/api/backends/tecs/upgrade.py b/code/daisy/daisy/api/backends/tecs/upgrade.py new file mode 100755 index 00000000..70113960 --- /dev/null +++ b/code/daisy/daisy/api/backends/tecs/upgrade.py @@ -0,0 +1,151 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/update endpoint for Daisy v1 API +""" + +import webob.exc +import subprocess + +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPForbidden + +from threading import Thread, Lock +import threading +import time +from daisy import i18n +from daisy import notifier + +from daisy.api import policy +import daisy.api.v1 +import daisy.registry.client.v1.api as registry +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import utils +from daisy.common import wsgi +from daisy.api.v1 import controller +from daisy.api.v1 import filters +from daisy.api.backends import os as os_handle +import daisy.api.backends.common as daisy_cmn +import daisy.api.backends.tecs.common as tecs_cmn + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +tecs_state = tecs_cmn.TECS_STATE + +def update_progress_to_db(req,role_id_list,status,hosts_list,host_ip=None): + """ + Write update progress and status to db, + to make sure this function is thread safety. + :param req: http req. + :param role_id_list: Column neeb be update in role table. + :param status: Update status. + :return: + """ + for role_id in role_id_list: + role_hosts = daisy_cmn.get_hosts_of_role(req, role_id) + for host_id_ip in hosts_list: + host_ip_tmp=host_id_ip.values()[0] + host_id_tmp=host_id_ip.keys()[0] + if host_ip: + for role_host in role_hosts: + if (host_ip_tmp == host_ip and + role_host['host_id']== host_id_tmp): + role_host_meta = {} + if 0 == cmp(status, tecs_state['UPDATING']): + role_host_meta['progress'] = 10 + role_host_meta['messages'] = 'TECS upgrading' + if 0 == cmp(status, tecs_state['UPDATE_FAILED']): + role_host_meta['messages'] = 'TECS upgraded failed' + elif 0 == cmp(status, tecs_state['ACTIVE']): + role_host_meta['progress'] = 100 + role_host_meta['messages'] = 'TECS upgraded successfully' + if role_host_meta: + role_host_meta['status'] = status + daisy_cmn.update_role_host(req, + role_host['id'], + role_host_meta) + else: + role = {} + if 0 == cmp(status, tecs_state['UPDATING']): + for role_host in role_hosts: + role_host_meta = {} + role_host_meta['status'] = status + role_host_meta['progress'] = 0 + role_host_meta['messages'] = 'TECS upgrading' + daisy_cmn.update_role_host(req, + role_host['id'], + role_host_meta) + role['progress']=0 + role['messages'] = 'TECS upgrading' + if 0 == cmp(status, tecs_state['UPDATE_FAILED']): + role['messages'] = 'TECS upgraded failed' + elif 0 == cmp(status, tecs_state['ACTIVE']): + role['progress'] = 100 + role['messages'] = 'TECS upgraded successfully' + if role: + role['status'] = status + daisy_cmn.update_role(req, role_id, role) + +def thread_bin(req,role_id_list, host_ip,hosts_list): + # update network-configuration-1.1.1-15.x86_64.rpm + update_progress_to_db(req,role_id_list,tecs_state['UPDATING'],hosts_list,host_ip) + cmd = 'mkdir -p /var/log/daisy/daisy_update/' + daisy_cmn.subprocess_call(cmd) + password = "ossdbg1" + var_log_path = "/var/log/daisy/daisy_update/%s_update_tecs.log" % host_ip + with open(var_log_path, "w+") as fp: + cmd = '/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, password) + daisy_cmn.subprocess_call(cmd,fp) + cmd = 'clush -S -w %s "mkdir -p /home/daisy_update"' % (host_ip,) + daisy_cmn.subprocess_call(cmd,fp) + cmd = 'clush -S -b -w %s "rm -rf /home/daisy_update/ZXTECS*.bin"' % (host_ip,) + daisy_cmn.subprocess_call(cmd,fp) + tecs_cmn.TecsShellExector(host_ip, 'update_rpm') + try: + scp_bin_result = subprocess.check_output( + 'clush -S -w %s -c /var/lib/daisy/tecs/ZXTECS*.bin --dest=/home/daisy_update' % (host_ip,), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + update_progress_to_db(req,role_id_list,tecs_state['UPDATE_FAILED'],hosts_list,host_ip) + LOG.error(_("scp TECS bin for %s failed!" % host_ip)) + fp.write(e.output.strip()) + return 1 + + cmd = 'clush -S -w %s "chmod 777 /home/daisy_update/*"' % (host_ip,) + daisy_cmn.subprocess_call(cmd,fp) + + try: + exc_result = subprocess.check_output( + 'clush -S -w %s "/home/daisy_update/ZXTECS*.bin upgrade"' % (host_ip,), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + update_progress_to_db(req,role_id_list,tecs_state['UPDATE_FAILED'],hosts_list,host_ip) + LOG.error(_("Update TECS for %s failed!" % host_ip)) + fp.write(e.output.strip()) + return 2 + else: + update_progress_to_db(req,role_id_list,tecs_state['ACTIVE'],hosts_list,host_ip) + fp.write(exc_result) + return 0 + diff --git a/code/daisy/daisy/api/backends/zenic/__init__.py b/code/daisy/daisy/api/backends/zenic/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/api/backends/zenic/api.py b/code/daisy/daisy/api/backends/zenic/api.py new file mode 100755 index 00000000..226144b8 --- /dev/null +++ b/code/daisy/daisy/api/backends/zenic/api.py @@ -0,0 +1,194 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/install endpoint for zenic API +""" +import os +import copy +import subprocess +import time +import commands + +import traceback +import webob.exc +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPForbidden +from webob.exc import HTTPServerError + +import threading +from threading import Thread + +from daisy import i18n +from daisy import notifier + +from daisy.api import policy +import daisy.api.v1 + +from daisy.common import exception +import daisy.registry.client.v1.api as registry +from daisy.api.backends.zenic import config +from daisy.api.backends import driver +from daisy.api.network_api import network as neutron +from ironicclient import client as ironic_client +import daisy.api.backends.os as os_handle +import daisy.api.backends.common as daisy_cmn +import daisy.api.backends.zenic.common as zenic_cmn +import daisy.api.backends.zenic.install as instl +import daisy.api.backends.zenic.uninstall as unstl +import daisy.api.backends.zenic.upgrade as upgrd + +try: + import simplejson as json +except ImportError: + import json + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +zenic_state = zenic_cmn.ZENIC_STATE + +class API(driver.DeploymentDriver): + + def __init__(self): + super(API, self).__init__() + return + + def install(self, req, cluster_id): + """ + Install zenic to a cluster. + + param req: The WSGI/Webob Request object + cluster_id:cluster id + """ + + #instl.pxe_server_build(req, install_meta) + # get hosts config which need to install OS + #hosts_need_os = instl.get_cluster_hosts_config(req, cluster_id) + # if have hosts need to install os, ZENIC installataion executed in OSInstallTask + #if hosts_need_os: + #os_install_obj = instl.OSInstallTask(req, cluster_id, hosts_need_os) + #os_install_thread = Thread(target=os_install_obj.run) + #os_install_thread.start() + #else: + LOG.info(_("No host need to install os, begin install ZENIC for cluster %s." % cluster_id)) + zenic_install_task = instl.ZENICInstallTask(req, cluster_id) + zenic_install_task.start() + + LOG.info((_("begin install zenic, please waiting...."))) + time.sleep(5) + LOG.info((_("install zenic successfully"))) + + def uninstall(self, req, cluster_id): + """ + Uninstall ZENIC to a cluster. + + :param req: The WSGI/Webob Request object + + :raises HTTPBadRequest if x-install-cluster is missing + """ + + (role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(req, cluster_id) + if role_id_list: + if not hosts_list: + msg = _("there is no host in cluster %s") % cluster_id + raise exception.ThreadBinException(msg) + + unstl.update_progress_to_db(req, role_id_list, zenic_state['UNINSTALLING'], 0.0) + uninstall_progress_percentage = round(1*1.0/len(hosts_list), 2)*100 + + threads = [] + for host in hosts_list: + t = threading.Thread(target=unstl.thread_bin,args=(req,host,role_id_list,uninstall_progress_percentage)) + t.setDaemon(True) + t.start() + threads.append(t) + LOG.info(_("uninstall threads have started, please waiting....")) + + try: + for t in threads: + t.join() + except: + LOG.warn(_("Join uninstall thread %s failed!" % t)) + else: + uninstall_failed_flag = False + for role_id in role_id_list: + role = daisy_cmn.get_role_detail(req, role_id) + if role['progress'] == 100: + unstl.update_progress_to_db(req, role_id_list, zenic_state['UNINSTALL_FAILED']) + uninstall_failed_flag = True + break + if role['status'] == zenic_state['UNINSTALL_FAILED']: + uninstall_failed_flag = True + break + if not uninstall_failed_flag: + LOG.info(_("all uninstall threads have done, set all roles status to 'init'!")) + unstl.update_progress_to_db(req, role_id_list, zenic_state['INIT']) + + LOG.info((_("begin uninstall zenic, please waiting...."))) + time.sleep(5) + LOG.info((_("uninstall zenic successfully"))) + + def upgrade(self, req, cluster_id): + """ + update zenic to a cluster. + + :param req: The WSGI/Webob Request object + + :raises HTTPBadRequest if x-install-cluster is missing + + """ + (role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(req, cluster_id) + if not hosts_list: + msg = _("there is no host in cluster %s") % cluster_id + raise exception.ThreadBinException(msg) + + upgrd.update_progress_to_db(req, role_id_list, zenic_state['UPDATING'], 0.0) + update_progress_percentage = round(1*1.0/len(hosts_list), 2)*100 + + threads = [] + for host in hosts_list: + t = threading.Thread(target=upgrd.thread_bin,args=(req,host,role_id_list,update_progress_percentage)) + t.setDaemon(True) + t.start() + threads.append(t) + LOG.info(_("upgrade threads have started, please waiting....")) + + try: + for t in threads: + t.join() + except: + LOG.warn(_("Join upgrade thread %s failed!" % t)) + else: + update_failed_flag = False + for role_id in role_id_list: + role = daisy_cmn.get_role_detail(req, role_id) + if role['progress'] == 0: + upgrd.update_progress_to_db(req, role_id_list, zenic_state['UPDATE_FAILED']) + update_failed_flag = True + break + if role['status'] == zenic_state['UPDATE_FAILED']: + update_failed_flag = True + break + if not update_failed_flag: + LOG.info(_("all update threads have done, set all roles status to 'active'!")) + upgrd.update_progress_to_db(req, role_id_list, zenic_state['ACTIVE']) + + \ No newline at end of file diff --git a/code/daisy/daisy/api/backends/zenic/common.py b/code/daisy/daisy/api/backends/zenic/common.py new file mode 100755 index 00000000..a08c9f74 --- /dev/null +++ b/code/daisy/daisy/api/backends/zenic/common.py @@ -0,0 +1,300 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/install endpoint for zenic API +""" +import os +import copy +import subprocess +import time + +import traceback +import webob.exc +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPForbidden + +from threading import Thread + +from daisy import i18n +from daisy import notifier + +from daisy.api import policy +import daisy.api.v1 + +from daisy.common import exception +import daisy.registry.client.v1.api as registry +import daisy.api.backends.common as daisy_cmn + + +try: + import simplejson as json +except ImportError: + import json + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +daisy_zenic_path = '/var/lib/daisy/zenic/' +ZENIC_STATE = { + 'INIT' : 'init', + 'INSTALLING' : 'installing', + 'ACTIVE' : 'active', + 'INSTALL_FAILED': 'install-failed', + 'UNINSTALLING': 'uninstalling', + 'UNINSTALL_FAILED': 'uninstall-failed', + 'UPDATING': 'updating', + 'UPDATE_FAILED': 'update-failed', +} + +def get_cluster_hosts(req, cluster_id): + try: + cluster_hosts = registry.get_cluster_hosts(req.context, cluster_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return cluster_hosts + +def get_host_detail(req, host_id): + try: + host_detail = registry.get_host_metadata(req.context, host_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return host_detail + +def get_roles_detail(req): + try: + roles = registry.get_roles_detail(req.context) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return roles + +def get_hosts_of_role(req, role_id): + try: + hosts = registry.get_role_host_metadata(req.context, role_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return hosts + +def get_role_detail(req, role_id): + try: + role = registry.get_role_metadata(req.context, role_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return role + +def update_role(req, role_id,role_meta): + try: + registry.update_role_metadata(req.context, role_id, role_meta) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + +def update_role_host(req, role_id, role_host): + try: + registry.update_role_host_metadata(req.context, role_id, role_host) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + +def delete_role_hosts(req, role_id): + try: + registry.delete_role_host_metadata(req.context, role_id) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + +def _get_cluster_network(cluster_networks, network_type): + network = [cn for cn in cluster_networks + if cn['name'] in network_type] + if not network or not network[0]: + msg = "network %s is not exist" % (network_type) + raise exception.InvalidNetworkConfig(msg) + else: + return network[0] + +def get_host_interface_by_network(host_detail, network_type): + host_detail_info = copy.deepcopy(host_detail) + interface_list = [hi for hi in host_detail_info['interfaces'] + for assigned_network in hi['assigned_networks'] + if assigned_network and network_type == assigned_network['name']] + interface = {} + if interface_list: + interface = interface_list[0] + + if not interface: + msg = "network %s of host %s is not exist" % (network_type, host_detail_info['id']) + raise exception.InvalidNetworkConfig(msg) + + return interface + +def get_host_network_ip(req, host_detail, cluster_networks, network_type): + interface_network_ip = '' + host_interface = get_host_interface_by_network(host_detail, network_type) + if host_interface: + network = _get_cluster_network(cluster_networks, network_type) + assigned_network = daisy_cmn.get_assigned_network(req, + host_interface['id'], + network['id']) + interface_network_ip = assigned_network['ip'] + + if not interface_network_ip: + msg = "%s network ip of host %s can't be empty" % (network_type, host_detail['id']) + raise exception.InvalidNetworkConfig(msg) + return interface_network_ip + +def get_deploy_node_cfg(req, host_detail, cluster_networks): + host_deploy_network = get_host_interface_by_network(host_detail, 'DEPLOYMENT') + host_deploy_ip = get_host_network_ip(req, host_detail, cluster_networks, 'DEPLOYMENT') + if not host_deploy_ip: + msg = "deployment ip of host %s can't be empty" % host_detail['id'] + raise exception.InvalidNetworkConfig(msg) + host_deploy_macname = host_deploy_network['name'] + if not host_deploy_macname: + msg = "deployment macname of host %s can't be empty" % host_detail['id'] + raise exception.InvalidNetworkConfig(msg) + + host_mgt_ip = get_host_network_ip(req, host_detail, cluster_networks, 'MANAGEMENT') + if not host_mgt_ip: + msg = "management ip of host %s can't be empty" % host_detail['id'] + raise exception.InvalidNetworkConfig(msg) + + memmode = 'tiny' + host_memory = 0 + + if host_detail.has_key('memory'): + host_memory = (int(host_detail['memory']['total'].strip().split()[0]))/(1024*1024) + + if host_memory < 8: + memmode = 'tiny' + elif host_memory < 16: + memmode = 'small' + elif host_memory < 32: + memmode = 'medium' + else: + memmode = 'large' + + + deploy_node_cfg = {} + deploy_node_cfg.update({'hostid':host_detail['id']}) + deploy_node_cfg.update({'hostname':host_detail['name']}) + deploy_node_cfg.update({'nodeip':host_deploy_ip}) + deploy_node_cfg.update({'MacName':host_deploy_macname}) + deploy_node_cfg.update({'memmode':memmode}) + deploy_node_cfg.update({'mgtip':host_mgt_ip}) + return deploy_node_cfg + +def get_roles_and_hosts_list(req, cluster_id): + roles_id_list = set() + hosts_id_list = set() + hosts_list = [] + + cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) + roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id) + for role in roles: + if role['deployment_backend'] != daisy_cmn.zenic_backend_name: + continue + role_hosts = daisy_cmn.get_hosts_of_role(req, role['id']) + if role_hosts: + for role_host in role_hosts: + if role_host['host_id'] not in hosts_id_list: + host = daisy_cmn.get_host_detail(req, role_host['host_id']) + host_ip = get_host_network_ip(req, host, cluster_networks, 'MANAGEMENT') + hosts_id_list.add(host['id']) + + host_cfg = {} + host_cfg['mgtip'] = host_ip + host_cfg['rootpwd'] = host['root_pwd'] + hosts_list.append(host_cfg) + + roles_id_list.add(role['id']) + + return (roles_id_list, hosts_list) + +def check_and_get_zenic_version(daisy_zenic_pkg_path): + zenic_version_pkg_file = "" + zenic_version_pkg_name = "" + get_zenic_version_pkg = "ls %s| grep ^ZENIC.*\.zip$" % daisy_zenic_pkg_path + obj = subprocess.Popen(get_zenic_version_pkg, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + (stdoutput, erroutput) = obj.communicate() + if stdoutput: + zenic_version_pkg_name = stdoutput.split('\n')[0] + zenic_version_pkg_file = daisy_zenic_pkg_path + zenic_version_pkg_name + chmod_for_zenic_version = 'chmod +x %s' % zenic_version_pkg_file + daisy_cmn.subprocess_call(chmod_for_zenic_version) + return (zenic_version_pkg_file,zenic_version_pkg_name) + +class ZenicShellExector(): + """ + Class config task before install zenic bin. + """ + def __init__(self, mgt_ip, task_type, params={}): + self.task_type = task_type + self.mgt_ip = mgt_ip + self.params = params + self.clush_cmd = "" + self.PKG_NAME = self.params['pkg_name'] + self.PKG_PATH = daisy_zenic_path + self.PKG_NAME + self.CFG_PATH =daisy_zenic_path + mgt_ip + "_zenic.conf" + self.oper_type = { + 'install' : self._install_pkg + } + self.oper_shell = { + 'CMD_SSHPASS_PRE' : "sshpass -p ossdbg1 %(ssh_ip)s %(cmd)s", + 'CMD_CFG_SCP' : "scp %(path)s root@%(ssh_ip)s:/etc/zenic/config" % + {'path': self.CFG_PATH, 'ssh_ip':mgt_ip}, + 'CMD_PKG_UNZIP' : "unzip /home/workspace/%(pkg_name)s -d /home/workspace/PKG" % {'pkg_name':self.PKG_NAME}, + 'CMD_PKG_SCP' : "scp %(path)s root@%(ssh_ip)s:/home/workspace/" % + {'path': self.PKG_PATH, 'ssh_ip':mgt_ip} + } + + self._execute() + + def _install_pkg(self): + if not os.path.exists(self.CFG_PATH): + LOG.error(_("<<>>" % self.CFG_PATH)) + return + + if not os.path.exists(self.PKG_PATH): + LOG.error(_("<<>>" % self.PKG_PATH)) + return + + self.clush_cmd = "%s;%s;%s" % \ + (self.oper_shell['CMD_SSHPASS_PRE'] % + {"ssh_ip":"", "cmd":self.oper_shell['CMD_PKG_SCP']}, \ + self.oper_shell['CMD_SSHPASS_PRE'] % + {"ssh_ip":"", "cmd":self.oper_shell['CMD_CFG_SCP']}, \ + self.oper_shell['CMD_SSHPASS_PRE'] % + {"ssh_ip":"ssh " + self.mgt_ip, "cmd":self.oper_shell['CMD_PKG_UNZIP']}) + + subprocess.check_output(self.clush_cmd, shell = True, stderr=subprocess.STDOUT) + + def _execute(self): + try: + if not self.task_type or not self.mgt_ip : + LOG.error(_("<<>>")) + return + + self.oper_type[self.task_type]() + except subprocess.CalledProcessError as e: + LOG.warn(_("<<>>" % e.output.strip())) + except Exception as e: + LOG.exception(_(e.message)) + else: + LOG.info(_("<<>>" % self.clush_cmd)) diff --git a/code/daisy/daisy/api/backends/zenic/config.py b/code/daisy/daisy/api/backends/zenic/config.py new file mode 100755 index 00000000..3231c80e --- /dev/null +++ b/code/daisy/daisy/api/backends/zenic/config.py @@ -0,0 +1,135 @@ +# -*- coding: utf-8 -*- +import os +import re +import commands +import types +import subprocess +from ConfigParser import ConfigParser +from daisy.common import exception + + + +default_zenic_conf_template_path = "/var/lib/daisy/zenic/" +zenic_conf_template_path = default_zenic_conf_template_path + +def update_conf(zenic, key, value): + zenic.set("general", key, value) + +def get_conf(zenic_conf_file, **kwargs): + result = {} + if not kwargs: + return result + + zenic = ConfigParser() + zenic.optionxform = str + zenic.read(zenic_conf_file) + + result = {key : zenic.get("general", kwargs.get(key, None)) + for key in kwargs.keys() + if zenic.has_option("general", kwargs.get(key, None))} + return result + +def get_nodeid(deploy_ip,zbp_ips): + nodeid = 0 + i = 0 + for ip in zbp_ips: + if deploy_ip == ip: + break + else: + i=i+1 + + if i == 0: + nodeid = 1 + elif i == 1: + nodeid = 256 + else: + nodeid = i + + return nodeid + + +def update_zenic_conf(config_data, cluster_conf_path): + print "zenic config data is:" + import pprint + pprint.pprint(config_data) + + daisy_zenic_path = zenic_conf_template_path + zenic_conf_template_file = os.path.join(daisy_zenic_path, "zenic.conf") + if not os.path.exists(cluster_conf_path): + os.makedirs(cluster_conf_path) + + zenic = ConfigParser() + zenic.optionxform = str + zenic.read(zenic_conf_template_file) + + zbpips = '' + for ip in config_data['zbp_ips']: + if not zbpips: + zbpips = ip + else: + zbpips = zbpips + ',' + ip + update_conf(zenic, 'zbpips', zbpips) + update_conf(zenic, 'zbp_node_num', config_data['zbp_node_num']) + nodelist = '1,256' + if len(config_data['zbp_ips']) > 2: + for i in range(2,len(config_data['zbp_ips'])): + nodelist = nodelist + ',' + 'i' + update_conf(zenic, 'zbpnodelist',nodelist) + + zampips = '' + for ip in config_data['zamp_ips']: + if not zampips: + zampips = ip + else: + zampips = zampips + ',' + ip + update_conf(zenic, 'zampips', zampips) + update_conf(zenic, 'zamp_node_num', config_data['zamp_node_num']) + + + mongodbips = '' + for ip in config_data['mongodb_ips']: + if not mongodbips: + mongodbips = ip + else: + mongodbips = mongodbips + ',' + ip + update_conf(zenic, 'mongodbips', mongodbips) + update_conf(zenic, 'mongodb_node_num', config_data['mongodb_node_num']) + + update_conf(zenic, 'zamp_vip', config_data['zamp_vip']) + update_conf(zenic, 'mongodb_vip', config_data['mongodb_vip']) + + + deploy_hosts = config_data['deploy_hosts'] + for deploy_host in deploy_hosts: + nodeip = deploy_host['nodeip'] + hostname = deploy_host['hostname'] + MacName = deploy_host['MacName'] + memmode = deploy_host['memmode'] + + update_conf(zenic,'nodeip',nodeip) + update_conf(zenic,'hostname',hostname) + update_conf(zenic,'MacName',MacName) + update_conf(zenic,'memmode',memmode) + + nodeid = get_nodeid(nodeip,config_data['zbp_ips']) + update_conf(zenic,'nodeid',nodeid) + + if nodeip in config_data['zamp_ips']: + update_conf(zenic,'needzamp','y') + else: + update_conf(zenic,'needzamp','n') + + zenic_conf = "%s_zenic.conf" % deploy_host['mgtip'] + zenic_conf_cluster_out = os.path.join(cluster_conf_path, zenic_conf) + zenic_conf_out = os.path.join(daisy_zenic_path, zenic_conf) + zenic.write(open(zenic_conf_cluster_out, "w+")) + + with open(zenic_conf_cluster_out,'r') as fr,open(zenic_conf_out,'w') as fw: + for line in fr.readlines(): + fw.write(line.replace(' ', '')) + return + + + +def test(): + print("Hello, world!") diff --git a/code/daisy/daisy/api/backends/zenic/install.py b/code/daisy/daisy/api/backends/zenic/install.py new file mode 100755 index 00000000..4485def7 --- /dev/null +++ b/code/daisy/daisy/api/backends/zenic/install.py @@ -0,0 +1,450 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/install endpoint for zenic API +""" +import os +import copy +import subprocess +import time + +import traceback +import webob.exc +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPForbidden +from webob.exc import HTTPServerError + +from threading import Thread, Lock +import threading + +from daisy import i18n +from daisy import notifier + +from daisy.api import policy +import daisy.api.v1 + +from daisy.common import exception +import daisy.registry.client.v1.api as registry +from daisy.api.backends.zenic import config +from daisy.api.backends import driver +from daisy.api.network_api import network as neutron +from ironicclient import client as ironic_client +import daisy.api.backends.common as daisy_cmn +import daisy.api.backends.zenic.common as zenic_cmn + + +try: + import simplejson as json +except ImportError: + import json + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS +SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE + +CONF = cfg.CONF +install_opts = [ + cfg.StrOpt('max_parallel_os_number', default=10, + help='Maximum number of hosts install os at the same time.'), +] +CONF.register_opts(install_opts) + +CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') +CONF.import_opt('container_formats', 'daisy.common.config', + group='image_format') +CONF.import_opt('image_property_quota', 'daisy.common.config') + + +host_os_status = { + 'INIT' : 'init', + 'INSTALLING' : 'installing', + 'ACTIVE' : 'active', + 'FAILED': 'install-failed' +} + +zenic_state = zenic_cmn.ZENIC_STATE +daisy_zenic_path = zenic_cmn.daisy_zenic_path + +install_zenic_progress=0.0 +install_mutex = threading.Lock() + +def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.0): + """ + Write install progress and status to db, we use global lock object 'install_mutex' + to make sure this function is thread safety. + :param req: http req. + :param role_id_list: Column neeb be update in role table. + :param status: install status. + :return: + """ + + global install_mutex + global install_zenic_progress + install_mutex.acquire(True) + install_zenic_progress += progress_percentage_step + role = {} + for role_id in role_id_list: + if 0 == cmp(status, zenic_state['INSTALLING']): + role['status'] = status + role['progress'] = install_zenic_progress + if 0 == cmp(status, zenic_state['INSTALL_FAILED']): + role['status'] = status + elif 0 == cmp(status, zenic_state['ACTIVE']): + role['status'] = status + role['progress'] = 100 + daisy_cmn.update_role(req, role_id, role) + install_mutex.release() + +def _ping_hosts_test(ips): + ping_cmd = 'fping' + for ip in set(ips): + ping_cmd = ping_cmd + ' ' + ip + obj = subprocess.Popen(ping_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (stdoutput, erroutput) = obj.communicate() + _returncode = obj.returncode + if _returncode == 0 or _returncode == 1: + ping_result = stdoutput.split('\n') + unreachable_hosts = [result.split()[0] for result in ping_result if result and result.split()[2] != 'alive'] + else: + msg = "ping failed beaceuse there is invlid ip in %s" % ips + raise exception.InvalidIP(msg) + return unreachable_hosts + +def _check_ping_hosts(ping_ips, max_ping_times): + if not ping_ips: + LOG.info(_("no ip got for ping test")) + return ping_ips + ping_count = 0 + time_step = 5 + LOG.info(_("begin ping test for %s" % ','.join(ping_ips))) + while True: + if ping_count == 0: + ips = _ping_hosts_test(ping_ips) + else: + ips = _ping_hosts_test(ips) + + ping_count += 1 + if ips: + LOG.debug(_("ping host %s for %s times" % (','.join(ips), ping_count))) + if ping_count >= max_ping_times: + LOG.info(_("ping host %s timeout for %ss" % (','.join(ips), ping_count*time_step))) + return ips + time.sleep(time_step) + else: + LOG.info(_("ping host %s success" % ','.join(ping_ips))) + time.sleep(120) + LOG.info(_("120s after ping host %s success" % ','.join(ping_ips))) + return ips + + +def _get_host_private_networks(host_detail, cluster_private_networks_name): + host_private_networks = [hi for pn in cluster_private_networks_name + for hi in host_detail['interfaces'] if pn in hi['assigned_networks']] + + # If port type is bond,use pci segment of member port replace pci1 & pci2 segments of bond port + for interface_outer in host_private_networks: + if 0 != cmp(interface_outer.get('type', None), "bond"): + continue + slave1 = interface_outer.get('slave1', None) + slave2 = interface_outer.get('slave2', None) + if not slave1 or not slave2: + continue + interface_outer.pop('pci') + for interface_inner in host_detail['interfaces']: + if 0 == cmp(interface_inner.get('name', None), slave1): + interface_outer['pci1'] = interface_inner['pci'] + elif 0 == cmp(interface_inner.get('name', None), slave2): + interface_outer['pci2'] = interface_inner['pci'] + return host_private_networks + + +def get_cluster_zenic_config(req, cluster_id): + LOG.info(_("get zenic config from database...")) + params = dict(limit=1000000) + + zenic_config = {} + + deploy_hosts = [] + deploy_host_cfg = {} + + mgt_ip = '' + zbp_ip_list = set() + mgt_ip_list = set() + + zamp_ip_list = set() + zamp_vip = '' + + mongodb_ip_list = set() + mongodb_vip= '' + + cluster_networks = daisy_cmn.get_cluster_networks_detail(req, cluster_id) + + all_roles = zenic_cmn.get_roles_detail(req) + + roles = [role for role in all_roles if (role['cluster_id'] == cluster_id and role['deployment_backend'] == daisy_cmn.zenic_backend_name)] + for role in roles: + if not (role['name'] == 'ZENIC_CTL' or role['name'] == 'ZENIC_NFM'): + continue + if role['name'] == 'ZENIC_NFM': + if not zamp_vip: + zamp_vip = role['vip'] + if not mongodb_vip: + mongodb_vip = role['mongodb_vip'] + role_hosts = zenic_cmn.get_hosts_of_role(req, role['id']) + + for role_host in role_hosts: + mgt_ip = '' + for deploy_host in deploy_hosts: + if role_host['host_id'] == deploy_host['hostid']: + mgt_ip = deploy_host['mgtip'] + deploy_ip = deploy_host['nodeip'] + break + if not mgt_ip: + host_detail = zenic_cmn.get_host_detail(req, role_host['host_id']) + deploy_host_cfg = zenic_cmn.get_deploy_node_cfg(req, host_detail, cluster_networks) + deploy_hosts.append(deploy_host_cfg) + mgt_ip = deploy_host_cfg['mgtip'] + deploy_ip = deploy_host_cfg['nodeip'] + + + mgt_ip_list.add(mgt_ip) + if role['name'] == 'ZENIC_CTL': + zbp_ip_list.add(deploy_ip) + elif role['name'] == 'ZENIC_NFM': + zamp_ip_list.add(deploy_ip) + mongodb_ip_list.add(deploy_ip) + else: + LOG.warn(_("<<>>" % role['name'])) + + zenic_config.update({'deploy_hosts':deploy_hosts}) + zenic_config.update({'zbp_ips':zbp_ip_list}) + zenic_config.update({'zbp_node_num':len(zbp_ip_list)}) + zenic_config.update({'zamp_ips':zamp_ip_list}) + zenic_config.update({'zamp_node_num':len(zamp_ip_list)}) + zenic_config.update({'mongodb_ips':mongodb_ip_list}) + zenic_config.update({'mongodb_node_num':len(mongodb_ip_list)}) + zenic_config.update({'zamp_vip':zamp_vip}) + zenic_config.update({'mongodb_vip':mongodb_vip}) + return (zenic_config, mgt_ip_list) + +def generate_zenic_config_file(cluster_id, zenic_config): + LOG.info(_("generate zenic config...")) + if zenic_config: + cluster_conf_path = daisy_zenic_path + cluster_id + config.update_zenic_conf(zenic_config, cluster_conf_path) + +def thread_bin(req,host, role_id_list, pkg_name, install_progress_percentage): + host_ip = host['mgtip'] + password = host['rootpwd'] + + cmd = 'mkdir -p /var/log/daisy/daisy_install/' + daisy_cmn.subprocess_call(cmd) + + var_log_path = "/var/log/daisy/daisy_install/%s_install_zenic.log" % host_ip + with open(var_log_path, "w+") as fp: + + cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password) + daisy_cmn.subprocess_call(cmd,fp) + + cmd = 'clush -S -b -w %s mkdir -p /home/workspace' % (host_ip,) + daisy_cmn.subprocess_call(cmd,fp) + + cmd = 'clush -S -b -w %s mkdir -p /etc/zenic' % (host_ip,) + daisy_cmn.subprocess_call(cmd,fp) + + cmd = 'clush -S -b -w %s rm -rf /etc/zenic/config' % (host_ip,) + daisy_cmn.subprocess_call(cmd,fp) + + cmd = 'clush -S -b -w %s rm -rf /home/zenic' % (host_ip,) + daisy_cmn.subprocess_call(cmd,fp) + + cmd = 'clush -S -b -w %s rm -rf /home/workspace/unipack' % (host_ip,) + daisy_cmn.subprocess_call(cmd,fp) + + pkg_file = daisy_zenic_path + pkg_name + cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % (host_ip,pkg_name) + daisy_cmn.subprocess_call(cmd,fp) + + + cfg_file = daisy_zenic_path + host_ip + "_zenic.conf" + try: + exc_result = subprocess.check_output( + 'sshpass -p ossdbg1 scp %s root@%s:/etc/zenic/config' % (cfg_file,host_ip,), + shell=True, stderr=fp) + except subprocess.CalledProcessError as e: + update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) + LOG.info(_("scp zenic pkg for %s failed!" % host_ip)) + fp.write(e.output.strip()) + exit() + else: + LOG.info(_("scp zenic config for %s successfully!" % host_ip)) + fp.write(exc_result) + + + try: + exc_result = subprocess.check_output( + 'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % (pkg_file,host_ip,), + shell=True, stderr=fp) + except subprocess.CalledProcessError as e: + update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) + LOG.info(_("scp zenic pkg for %s failed!" % host_ip)) + fp.write(e.output.strip()) + exit() + else: + LOG.info(_("scp zenic pkg for %s successfully!" % host_ip)) + fp.write(exc_result) + + cmd = 'clush -S -b -w %s unzip /home/workspace/%s -d /home/workspace/unipack' % (host_ip,pkg_name,) + daisy_cmn.subprocess_call(cmd) + + try: + exc_result = subprocess.check_output( + 'clush -S -b -w %s /home/workspace/unipack/node_install.sh' % (host_ip,), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) + LOG.info(_("install zenic for %s failed!" % host_ip)) + fp.write(e.output.strip()) + exit() + else: + LOG.info(_("install zenic for %s successfully!" % host_ip)) + fp.write(exc_result) + + try: + exc_result = subprocess.check_output( + 'clush -S -b -w %s /home/zenic/node_start.sh' % (host_ip,), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) + LOG.info(_("start zenic for %s failed!" % host_ip)) + fp.write(e.output.strip()) + exit() + else: + update_progress_to_db(req, role_id_list, zenic_state['INSTALLING'], install_progress_percentage) + LOG.info(_("start zenic for %s successfully!" % host_ip)) + fp.write(exc_result) + +class ZENICInstallTask(Thread): + """ + Class for install tecs bin. + """ + """ Definition for install states.""" + INSTALL_STATES = { + 'INIT' : 'init', + 'INSTALLING' : 'installing', + 'ACTIVE' : 'active', + 'FAILED': 'install-failed' + } + + def __init__(self, req, cluster_id): + super(ZENICInstallTask, self).__init__() + self.req = req + self.cluster_id = cluster_id + self.progress = 0 + self.state = ZENICInstallTask.INSTALL_STATES['INIT'] + self.message = "" + self.zenic_config_file = '' + self.mgt_ip_list = '' + self.install_log_fp = None + self.last_line_num = 0 + self.need_install = False + self.ping_times = 36 + self.log_file = "/var/log/daisy/zenic_%s_install.log" % self.cluster_id + + + + + def run(self): + try: + self._run() + except (exception.InstallException, + exception.NotFound, + exception.InstallTimeoutException) as e: + LOG.exception(e.message) + else: + if not self.need_install: + return + self.progress = 100 + self.state = zenic_state['ACTIVE'] + self.message = "Zenic install successfully" + LOG.info(_("install Zenic for cluster %s successfully." + % self.cluster_id)) + + def _run(self): + + (zenic_config, self.mgt_ip_list) = get_cluster_zenic_config(self.req, self.cluster_id) + + if not self.mgt_ip_list: + msg = _("there is no host in cluster %s") % self.cluster_id + raise exception.ThreadBinException(msg) + + unreached_hosts = _check_ping_hosts(self.mgt_ip_list, self.ping_times) + if unreached_hosts: + self.state = zenic_state['INSTALL_FAILED'] + self.message = "hosts %s ping failed" % unreached_hosts + raise exception.NotFound(message=self.message) + + generate_zenic_config_file(self.cluster_id, zenic_config) + + + # check and get ZENIC version + (zenic_version_pkg_file,zenic_version_pkg_name) = zenic_cmn.check_and_get_zenic_version(daisy_zenic_path) + if not zenic_version_pkg_file: + self.state = zenic_state['INSTALL_FAILED'] + self.message = "ZENIC version file not found in %s" % daisy_zenic_path + raise exception.NotFound(message=self.message) + + (role_id_list, hosts_list) = zenic_cmn.get_roles_and_hosts_list(self.req, self.cluster_id) + + update_progress_to_db(self.req, role_id_list, zenic_state['INSTALLING'], 0.0) + install_progress_percentage = round(1*1.0/len(hosts_list), 2)*100 + + threads = [] + for host in hosts_list: + t = threading.Thread(target=thread_bin,args=(self.req,host,role_id_list,zenic_version_pkg_name,install_progress_percentage)) + t.setDaemon(True) + t.start() + threads.append(t) + LOG.info(_("install threads have started, please waiting....")) + + try: + for t in threads: + t.join() + except: + LOG.warn(_("Join install thread %s failed!" % t)) + else: + install_failed_flag = False + for role_id in role_id_list: + role = daisy_cmn.get_role_detail(self.req, role_id) + if role['progress'] == 0: + update_progress_to_db(self.req, role_id_list, zenic_state['INSTALL_FAILED']) + install_failed_flag = True + break + if role['status'] == zenic_state['INSTALL_FAILED']: + install_failed_flag = True + break + if not install_failed_flag: + LOG.info(_("all install threads have done, set all roles status to 'active'!")) + update_progress_to_db(self.req, role_id_list, zenic_state['ACTIVE']) + + diff --git a/code/daisy/daisy/api/backends/zenic/uninstall.py b/code/daisy/daisy/api/backends/zenic/uninstall.py new file mode 100755 index 00000000..7c492cdc --- /dev/null +++ b/code/daisy/daisy/api/backends/zenic/uninstall.py @@ -0,0 +1,106 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/hosts endpoint for Daisy v1 API +""" + +import os +import webob.exc +import subprocess + +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPForbidden + +from threading import Thread, Lock +import threading +from daisy import i18n +from daisy import notifier + +from daisy.api import policy +import daisy.api.v1 + +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import utils +from daisy.common import wsgi +from daisy.api.v1 import controller +from daisy.api.v1 import filters +from daisy.api.backends.zenic.common import ZenicShellExector +import daisy.api.backends.common as daisy_cmn +import daisy.api.backends.zenic.common as zenic_cmn + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +zenic_state = zenic_cmn.ZENIC_STATE + +uninstall_zenic_progress=100.0 +uninstall_mutex = threading.Lock() + +def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.0): + """ + Write uninstall progress and status to db, we use global lock object 'uninstall_mutex' + to make sure this function is thread safety. + :param req: http req. + :param role_id_list: Column neeb be update in role table. + :param status: Uninstall status. + :return: + """ + + global uninstall_mutex + global uninstall_zenic_progress + uninstall_mutex.acquire(True) + uninstall_zenic_progress -= progress_percentage_step + role = {} + for role_id in role_id_list: + if 0 == cmp(status, zenic_state['UNINSTALLING']): + role['status'] = status + role['progress'] = uninstall_zenic_progress + if 0 == cmp(status, zenic_state['UNINSTALL_FAILED']): + role['status'] = status + elif 0 == cmp(status, zenic_state['INIT']): + role['status'] = status + role['progress'] = 0 + daisy_cmn.update_role(req, role_id, role) + uninstall_mutex.release() + +def thread_bin(req, host, role_id_list,uninstall_progress_percentage): + host_ip = host['mgtip'] + password = host['rootpwd'] + cmd = 'mkdir -p /var/log/daisy/daisy_uninstall/' + daisy_cmn.subprocess_call(cmd) + var_log_path = "/var/log/daisy/daisy_uninstall/%s_uninstall_zenic.log" % host_ip + with open(var_log_path, "w+") as fp: + cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password) + daisy_cmn.subprocess_call(cmd,fp) + + try: + exc_result = subprocess.check_output( + 'clush -S -b -w %s /home/zenic/node_stop.sh' % (host_ip,), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + update_progress_to_db(req, role_id_list, zenic_state['UNINSTALL_FAILED']) + fp.write(e.output.strip()) + else: + update_progress_to_db(req, role_id_list, zenic_state['UNINSTALLING'], uninstall_progress_percentage) + fp.write(exc_result) + + diff --git a/code/daisy/daisy/api/backends/zenic/upgrade.py b/code/daisy/daisy/api/backends/zenic/upgrade.py new file mode 100755 index 00000000..54f63d35 --- /dev/null +++ b/code/daisy/daisy/api/backends/zenic/upgrade.py @@ -0,0 +1,158 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/update endpoint for Daisy v1 API +""" + +import os +import webob.exc +import subprocess + +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPForbidden + +from threading import Thread, Lock +import threading +from daisy import i18n +from daisy import notifier + +from daisy.api import policy +import daisy.api.v1 + +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import utils +from daisy.common import wsgi +from daisy.api.v1 import controller +from daisy.api.v1 import filters +from daisy.api.backends.zenic.common import ZenicShellExector +import daisy.api.backends.common as daisy_cmn +import daisy.api.backends.zenic.common as zenic_cmn + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +zenic_state = zenic_cmn.ZENIC_STATE +daisy_zenic_path = zenic_cmn.daisy_zenic_path + + +update_zenic_progress=0.0 +update_mutex = threading.Lock() + +def update_progress_to_db(req, role_id_list, status, progress_percentage_step=0.0): + """ + Write update progress and status to db, we use global lock object 'update_mutex' + to make sure this function is thread safety. + :param req: http req. + :param role_id_list: Column neeb be update in role table. + :param status: Update status. + :return: + """ + + global update_mutex + global update_zenic_progress + update_mutex.acquire(True) + update_zenic_progress += progress_percentage_step + role = {} + for role_id in role_id_list: + if 0 == cmp(status, zenic_state['UPDATING']): + role['status'] = status + role['progress'] = update_zenic_progress + if 0 == cmp(status, zenic_state['UPDATE_FAILED']): + role['status'] = status + elif 0 == cmp(status, zenic_state['ACTIVE']): + role['status'] = status + role['progress'] = 100 + daisy_cmn.update_role(req, role_id, role) + update_mutex.release() + + +def thread_bin(req, host,role_id_list,update_progress_percentage): + + (zenic_version_pkg_file,zenic_version_pkg_name) = zenic_cmn.check_and_get_zenic_version(daisy_zenic_path) + if not zenic_version_pkg_file: + self.state = zenic_state['INSTALL_FAILED'] + self.message = "ZENIC version file not found in %s" % daisy_zenic_path + raise exception.NotFound(message=self.message) + + host_ip = host['mgtip'] + password = host['rootpwd'] + + cmd = 'mkdir -p /var/log/daisy/daisy_upgrade/' + daisy_cmn.subprocess_call(cmd) + + var_log_path = "/var/log/daisy/daisy_upgrade/%s_upgrade_zenic.log" % host_ip + with open(var_log_path, "w+") as fp: + cmd = '/var/lib/daisy/zenic/trustme.sh %s %s' % (host_ip, password) + daisy_cmn.subprocess_call(cmd,fp) + cmd = 'clush -S -b -w %s /home/zenic/node_stop.sh' % (host_ip,) + daisy_cmn.subprocess_call(cmd,fp) + + + cmd = 'clush -S -b -w %s rm -rf /home/workspace/%s' % (host_ip,zenic_version_pkg_name) + daisy_cmn.subprocess_call(cmd,fp) + + cmd = 'clush -S -b -w %s rm -rf /home/workspace/unipack' % (host_ip,) + daisy_cmn.subprocess_call(cmd,fp) + + try: + exc_result = subprocess.check_output( + 'sshpass -p ossdbg1 scp %s root@%s:/home/workspace/' % (zenic_version_pkg_file,host_ip,), + shell=True, stderr=fp) + except subprocess.CalledProcessError as e: + update_progress_to_db(req, role_id_list, zenic_state['INSTALL_FAILED']) + LOG.info(_("scp zenic pkg for %s failed!" % host_ip)) + fp.write(e.output.strip()) + exit() + else: + LOG.info(_("scp zenic pkg for %s successfully!" % host_ip)) + fp.write(exc_result) + + cmd = 'clush -S -b -w %s unzip /home/workspace/%s -d /home/workspace/unipack' % (host_ip,zenic_version_pkg_name,) + daisy_cmn.subprocess_call(cmd) + + try: + exc_result = subprocess.check_output( + 'clush -S -b -w %s /home/workspace/unipack/node_upgrade.sh' % (host_ip,), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + update_progress_to_db(req, role_id_list, zenic_state['UPDATE_FAILED']) + LOG.info(_("Upgrade zenic for %s failed!" % host_ip)) + fp.write(e.output.strip()) + else: + update_progress_to_db(req, role_id_list, zenic_state['UPDATING'], update_progress_percentage) + LOG.info(_("Upgrade zenic for %s successfully!" % host_ip)) + fp.write(exc_result) + + try: + exc_result = subprocess.check_output( + 'clush -S -b -w %s /home/zenic/node_start.sh' % (host_ip,), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + update_progress_to_db(req, role_id_list, zenic_state['UPDATE_FAILED']) + LOG.info(_("Start zenic for %s failed!" % host_ip)) + fp.write(e.output.strip()) + else: + update_progress_to_db(req, role_id_list, zenic_state['UPDATING'], update_progress_percentage) + LOG.info(_("Start zenic for %s successfully!" % host_ip)) + fp.write(exc_result) + + diff --git a/code/daisy/daisy/api/cached_images.py b/code/daisy/daisy/api/cached_images.py new file mode 100755 index 00000000..363a2909 --- /dev/null +++ b/code/daisy/daisy/api/cached_images.py @@ -0,0 +1,125 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Controller for Image Cache Management API +""" + +import webob.exc + +from daisy.api import policy +from daisy.api.v1 import controller +from daisy.common import exception +from daisy.common import wsgi +from daisy import image_cache + + +class Controller(controller.BaseController): + """ + A controller for managing cached images. + """ + + def __init__(self): + self.cache = image_cache.ImageCache() + self.policy = policy.Enforcer() + + def _enforce(self, req): + """Authorize request against 'manage_image_cache' policy""" + try: + self.policy.enforce(req.context, 'manage_image_cache', {}) + except exception.Forbidden: + raise webob.exc.HTTPForbidden() + + def get_cached_images(self, req): + """ + GET /cached_images + + Returns a mapping of records about cached images. + """ + self._enforce(req) + images = self.cache.get_cached_images() + return dict(cached_images=images) + + def delete_cached_image(self, req, image_id): + """ + DELETE /cached_images/ + + Removes an image from the cache. + """ + self._enforce(req) + self.cache.delete_cached_image(image_id) + + def delete_cached_images(self, req): + """ + DELETE /cached_images - Clear all active cached images + + Removes all images from the cache. + """ + self._enforce(req) + return dict(num_deleted=self.cache.delete_all_cached_images()) + + def get_queued_images(self, req): + """ + GET /queued_images + + Returns a mapping of records about queued images. + """ + self._enforce(req) + images = self.cache.get_queued_images() + return dict(queued_images=images) + + def queue_image(self, req, image_id): + """ + PUT /queued_images/ + + Queues an image for caching. We do not check to see if + the image is in the registry here. That is done by the + prefetcher... + """ + self._enforce(req) + self.cache.queue_image(image_id) + + def delete_queued_image(self, req, image_id): + """ + DELETE /queued_images/ + + Removes an image from the cache. + """ + self._enforce(req) + self.cache.delete_queued_image(image_id) + + def delete_queued_images(self, req): + """ + DELETE /queued_images - Clear all active queued images + + Removes all images from the cache. + """ + self._enforce(req) + return dict(num_deleted=self.cache.delete_all_queued_images()) + + +class CachedImageDeserializer(wsgi.JSONRequestDeserializer): + pass + + +class CachedImageSerializer(wsgi.JSONResponseSerializer): + pass + + +def create_resource(): + """Cached Images resource factory method""" + deserializer = CachedImageDeserializer() + serializer = CachedImageSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/api/common.py b/code/daisy/daisy/api/common.py new file mode 100755 index 00000000..71a98990 --- /dev/null +++ b/code/daisy/daisy/api/common.py @@ -0,0 +1,220 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re + +from oslo_concurrency import lockutils +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import units + +from daisy.common import exception +from daisy.common import wsgi +from daisy import i18n + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +CONF = cfg.CONF + +_CACHED_THREAD_POOL = {} + + +def size_checked_iter(response, image_meta, expected_size, image_iter, + notifier): + image_id = image_meta['id'] + bytes_written = 0 + + def notify_image_sent_hook(env): + image_send_notification(bytes_written, expected_size, + image_meta, response.request, notifier) + + # Add hook to process after response is fully sent + if 'eventlet.posthooks' in response.request.environ: + response.request.environ['eventlet.posthooks'].append( + (notify_image_sent_hook, (), {})) + + try: + for chunk in image_iter: + yield chunk + bytes_written += len(chunk) + except Exception as err: + with excutils.save_and_reraise_exception(): + msg = (_LE("An error occurred reading from backend storage for " + "image %(image_id)s: %(err)s") % {'image_id': image_id, + 'err': err}) + LOG.error(msg) + + if expected_size != bytes_written: + msg = (_LE("Backend storage for image %(image_id)s " + "disconnected after writing only %(bytes_written)d " + "bytes") % {'image_id': image_id, + 'bytes_written': bytes_written}) + LOG.error(msg) + raise exception.DaisyException(_("Corrupt image download for " + "image %(image_id)s") % + {'image_id': image_id}) + + +def image_send_notification(bytes_written, expected_size, image_meta, request, + notifier): + """Send an image.send message to the notifier.""" + try: + context = request.context + payload = { + 'bytes_sent': bytes_written, + 'image_id': image_meta['id'], + 'owner_id': image_meta['owner'], + 'receiver_tenant_id': context.tenant, + 'receiver_user_id': context.user, + 'destination_ip': request.remote_addr, + } + if bytes_written != expected_size: + notify = notifier.error + else: + notify = notifier.info + + notify('image.send', payload) + + except Exception as err: + msg = (_LE("An error occurred during image.send" + " notification: %(err)s") % {'err': err}) + LOG.error(msg) + + +def get_remaining_quota(context, db_api, image_id=None): + """Method called to see if the user is allowed to store an image. + + Checks if it is allowed based on the given size in glance based on their + quota and current usage. + + :param context: + :param db_api: The db_api in use for this configuration + :param image_id: The image that will be replaced with this new data size + :return: The number of bytes the user has remaining under their quota. + None means infinity + """ + + # NOTE(jbresnah) in the future this value will come from a call to + # keystone. + users_quota = CONF.user_storage_quota + + # set quota must have a number optionally followed by B, KB, MB, + # GB or TB without any spaces in between + pattern = re.compile('^(\d+)((K|M|G|T)?B)?$') + match = pattern.match(users_quota) + + if not match: + LOG.error(_LE("Invalid value for option user_storage_quota: " + "%(users_quota)s") + % {'users_quota': users_quota}) + raise exception.InvalidOptionValue(option='user_storage_quota', + value=users_quota) + + quota_value, quota_unit = (match.groups())[0:2] + # fall back to Bytes if user specified anything other than + # permitted values + quota_unit = quota_unit or "B" + factor = getattr(units, quota_unit.replace('B', 'i'), 1) + users_quota = int(quota_value) * factor + + if users_quota <= 0: + return + + usage = db_api.user_get_storage_usage(context, + context.owner, + image_id=image_id) + return users_quota - usage + + +def check_quota(context, image_size, db_api, image_id=None): + """Method called to see if the user is allowed to store an image. + + Checks if it is allowed based on the given size in glance based on their + quota and current usage. + + :param context: + :param image_size: The size of the image we hope to store + :param db_api: The db_api in use for this configuration + :param image_id: The image that will be replaced with this new data size + :return: + """ + + remaining = get_remaining_quota(context, db_api, image_id=image_id) + + if remaining is None: + return + + user = getattr(context, 'user', '') + + if image_size is None: + # NOTE(jbresnah) When the image size is None it means that it is + # not known. In this case the only time we will raise an + # exception is when there is no room left at all, thus we know + # it will not fit + if remaining <= 0: + LOG.warn(_LW("User %(user)s attempted to upload an image of" + " unknown size that will exceed the quota." + " %(remaining)d bytes remaining.") + % {'user': user, 'remaining': remaining}) + raise exception.StorageQuotaFull(image_size=image_size, + remaining=remaining) + return + + if image_size > remaining: + LOG.warn(_LW("User %(user)s attempted to upload an image of size" + " %(size)d that will exceed the quota. %(remaining)d" + " bytes remaining.") + % {'user': user, 'size': image_size, 'remaining': remaining}) + raise exception.StorageQuotaFull(image_size=image_size, + remaining=remaining) + + return remaining + + +def memoize(lock_name): + def memoizer_wrapper(func): + @lockutils.synchronized(lock_name) + def memoizer(lock_name): + if lock_name not in _CACHED_THREAD_POOL: + _CACHED_THREAD_POOL[lock_name] = func() + + return _CACHED_THREAD_POOL[lock_name] + + return memoizer(lock_name) + + return memoizer_wrapper + + +def get_thread_pool(lock_name, size=1024): + """Initializes eventlet thread pool. + + If thread pool is present in cache, then returns it from cache + else create new pool, stores it in cache and return newly created + pool. + + @param lock_name: Name of the lock. + @param size: Size of eventlet pool. + + @return: eventlet pool + """ + @memoize(lock_name) + def _get_thread_pool(): + return wsgi.get_asynchronous_eventlet_pool(size=size) + + return _get_thread_pool diff --git a/code/daisy/daisy/api/configset/__init__.py b/code/daisy/daisy/api/configset/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/api/configset/clush.py b/code/daisy/daisy/api/configset/clush.py new file mode 100755 index 00000000..4663705d --- /dev/null +++ b/code/daisy/daisy/api/configset/clush.py @@ -0,0 +1,143 @@ + +import subprocess +import daisy.registry.client.v1.api as registry +from daisy.api.backends.tecs import config +from oslo_log import log as logging +import webob.exc + +LOG = logging.getLogger(__name__) +CONFIG_MAP = { + 'cinder_config': '/etc/cinder/cinder.conf', + 'cinder_api_paste_ini': '/etc/cinder/api-paste.ini', + 'glance_api_config': '/etc/glance/glance-api.conf', + 'glance_api_paste_ini': '/etc/glance/glance-api-paste.ini', + } + +class config_clushshell(): + """ Class for clush backend.""" + def __init__(self, req, role_id): + if not req and not role_id: + LOG.error("<<>>") + return + + self.context = req.context + self.role_id = role_id + + self.CLUSH_CMD = "clush -S -w %(management_ip)s \"%(sub_command)s\"" + self.SUB_COMMAND = "openstack-config --set %(config_file)s %(section)s %(key)s %(value)s" + + def _openstack_set_config(self, host_ip, config_set): + """ + Set all config items on one host + :param host_ip: + :param config_set: + :return: + """ + if not host_ip or not config_set: + LOG.debug('<<>>') + return + + sub_command_by_one_host = [] + for config in config_set['config']: + if config['config_version'] == config['running_version']: + continue + + config_file = registry.get_config_file_metadata(self.context, config['config_file_id']) + sub_command_by_one_host.append( + self.SUB_COMMAND % \ + {'config_file':config_file['name'] ,'section':config['section'], + 'key':config['key'], 'value':config['value']}) + + try: + sub_command_by_one_host = ";".join(sub_command_by_one_host) + clush_cmd = self.CLUSH_CMD % {'management_ip':host_ip, 'sub_command':sub_command_by_one_host} + subprocess.check_output(clush_cmd, shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + msg = ("<<>>" % (host_ip, e.output.strip())) + LOG.exception(msg) + raise webob.exc.HTTPServerError(explanation=msg) + else: + msg = ("<<>>" % host_ip) + LOG.info(msg) + config['running_version'] = config['config_version'] + + def push_config(self): + """ + Push config to remote host. + :param req: http req + :param role_id: host role id + :return: + """ + self.role_info = registry.get_role_metadata(self.context, self.role_id) + if not self.role_info or not self.role_info.get('config_set_id'): + LOG.error("<<>>") + return + + config_set = registry.get_config_set_metadata(self.context, self.role_info['config_set_id']) + if not config_set or not config_set.has_key('config'): + LOG.info("<<>>") + return + + config_set['config'] = \ + [config for config in config_set['config'] + if config.has_key('config_version') and config.has_key('running_version') + and config['config_version'] != config['running_version']] + + if not config_set['config']: + LOG.info('<<>>' % + self.role_id) + return + + self.role_hosts = registry.get_role_host_metadata(self.context, self.role_id) + current_count = 0 + all_host_config_sets = [] + for role_host in self.role_hosts: + host = registry.get_host_metadata(self.context, role_host['host_id']) + #change by 10166727--------start------------- + host_ip=[] + for interface in host['interfaces']: + find_flag=interface['ip'].find(':') + if find_flag<0: + host_ip=[interface['ip']] + else: + ip_list_tmp=interface['ip'].split(",") + for ip_list in ip_list_tmp: + if ip_list.split(':')[0] == "MANAGEMENT": + host_ip=[str(ip_list.split(':')[1])] + #change by 10166727--------end--------------- + if not host_ip: + continue + host_ip = host_ip[0] + + if 0 != subprocess.call('/var/lib/daisy/tecs/trustme.sh %s %s' % (host_ip, 'ossdbg1'), + shell=True, + stderr=subprocess.STDOUT): + raise Exception("trustme.sh error!") + if not config_set.has_key("config"): + continue + + self._openstack_set_config(host_ip, config_set) + all_host_config_sets.append(config_set) + registry.update_configs_metadata_by_role_hosts(self.context, all_host_config_sets) + + LOG.debug("Update config for host:%s successfully!" % host_ip) + + self._host_service_restart(host_ip) + current_count +=1 + self.role_info['config_set_update_progress'] = round(current_count*1.0/len(self.role_hosts), 2)*100 + registry.update_role_metadata(self.context, self.role_id, self.role_info) + + def _host_service_restart(self,host_ip): + """ """ + for service in self.role_info['service_name']: + for service_detail_name in config.service_map.get(service).split(','): + cmd = "" + if self.role_info['name'] == "CONTROLLER_HA": + cmd = "clush -S -w %s [ `systemctl is-active %s` != 'active' ] && systemctl restart %s" % \ + (host_ip, service_detail_name, service_detail_name) + else: + cmd = "clush -S -w %s systemctl restart %s" % (host_ip, service_detail_name) + if 0 != subprocess.call(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE): + LOG.error("Service %s restart failed in host:%s." % (service_detail_name, host_ip)) + + \ No newline at end of file diff --git a/code/daisy/daisy/api/configset/manager.py b/code/daisy/daisy/api/configset/manager.py new file mode 100755 index 00000000..afb46a0e --- /dev/null +++ b/code/daisy/daisy/api/configset/manager.py @@ -0,0 +1,16 @@ +from daisy.api.configset.clush import config_clushshell + +class configBackend(): + def __init__(self, type, req, role_id): + self.type = type + self._instance = None + + if type == "clushshell": + self._instance = config_clushshell(req, role_id) + elif type == "puppet": + pass + + def push_config(self): + self._instance.push_config() + + \ No newline at end of file diff --git a/code/daisy/daisy/api/configset/puppet.py b/code/daisy/daisy/api/configset/puppet.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/api/middleware/__init__.py b/code/daisy/daisy/api/middleware/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/api/middleware/cache.py b/code/daisy/daisy/api/middleware/cache.py new file mode 100755 index 00000000..f12e8994 --- /dev/null +++ b/code/daisy/daisy/api/middleware/cache.py @@ -0,0 +1,323 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Transparent image file caching middleware, designed to live on +Glance API nodes. When images are requested from the API node, +this middleware caches the returned image file to local filesystem. + +When subsequent requests for the same image file are received, +the local cached copy of the image file is returned. +""" + +import re + +from oslo_log import log as logging +import webob + +from daisy.api.common import size_checked_iter +from daisy.api import policy +from daisy.api.v1 import images +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +from daisy import i18n +from daisy import image_cache +from daisy import notifier +import daisy.registry.client.v1.api as registry + +LOG = logging.getLogger(__name__) +_LI = i18n._LI +_LE = i18n._LE +_LW = i18n._LW + +PATTERNS = { + ('v1', 'GET'): re.compile(r'^/v1/images/([^\/]+)$'), + ('v1', 'DELETE'): re.compile(r'^/v1/images/([^\/]+)$'), + ('v2', 'GET'): re.compile(r'^/v2/images/([^\/]+)/file$'), + ('v2', 'DELETE'): re.compile(r'^/v2/images/([^\/]+)$') +} + + +class CacheFilter(wsgi.Middleware): + + def __init__(self, app): + self.cache = image_cache.ImageCache() + self.serializer = images.ImageSerializer() + self.policy = policy.Enforcer() + LOG.info(_LI("Initialized image cache middleware")) + super(CacheFilter, self).__init__(app) + + def _verify_metadata(self, image_meta): + """ + Sanity check the 'deleted' and 'size' metadata values. + """ + # NOTE: admins can see image metadata in the v1 API, but shouldn't + # be able to download the actual image data. + if image_meta['status'] == 'deleted' and image_meta['deleted']: + raise exception.NotFound() + + if not image_meta['size']: + # override image size metadata with the actual cached + # file size, see LP Bug #900959 + image_meta['size'] = self.cache.get_image_size(image_meta['id']) + + @staticmethod + def _match_request(request): + """Determine the version of the url and extract the image id + + :returns tuple of version and image id if the url is a cacheable, + otherwise None + """ + for ((version, method), pattern) in PATTERNS.items(): + if request.method != method: + continue + match = pattern.match(request.path_info) + if match is None: + continue + image_id = match.group(1) + # Ensure the image id we got looks like an image id to filter + # out a URI like /images/detail. See LP Bug #879136 + if image_id != 'detail': + return (version, method, image_id) + + def _enforce(self, req, action, target=None): + """Authorize an action against our policies""" + if target is None: + target = {} + try: + self.policy.enforce(req.context, action, target) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg, request=req) + + def _get_v1_image_metadata(self, request, image_id): + """ + Retrieves image metadata using registry for v1 api and creates + dictionary-like mash-up of image core and custom properties. + """ + try: + image_metadata = registry.get_image_metadata(request.context, + image_id) + return utils.create_mashup_dict(image_metadata) + except exception.NotFound as e: + LOG.debug("No metadata found for image '%s'" % image_id) + raise webob.exc.HTTPNotFound(explanation=e.msg, request=request) + + def _get_v2_image_metadata(self, request, image_id): + """ + Retrieves image and for v2 api and creates adapter like object + to access image core or custom properties on request. + """ + db_api = daisy.db.get_api() + image_repo = daisy.db.ImageRepo(request.context, db_api) + try: + image = image_repo.get(image_id) + # Storing image object in request as it is required in + # _process_v2_request call. + request.environ['api.cache.image'] = image + + return policy.ImageTarget(image) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg, request=request) + + def process_request(self, request): + """ + For requests for an image file, we check the local image + cache. If present, we return the image file, appending + the image metadata in headers. If not present, we pass + the request on to the next application in the pipeline. + """ + match = self._match_request(request) + try: + (version, method, image_id) = match + except TypeError: + # Trying to unpack None raises this exception + return None + + self._stash_request_info(request, image_id, method, version) + + if request.method != 'GET' or not self.cache.is_cached(image_id): + return None + method = getattr(self, '_get_%s_image_metadata' % version) + image_metadata = method(request, image_id) + + # Deactivated images shall not be served from cache + if image_metadata['status'] == 'deactivated': + return None + + try: + self._enforce(request, 'download_image', target=image_metadata) + except exception.Forbidden: + return None + + LOG.debug("Cache hit for image '%s'", image_id) + image_iterator = self.get_from_cache(image_id) + method = getattr(self, '_process_%s_request' % version) + + try: + return method(request, image_id, image_iterator, image_metadata) + except exception.NotFound: + msg = _LE("Image cache contained image file for image '%s', " + "however the registry did not contain metadata for " + "that image!") % image_id + LOG.error(msg) + self.cache.delete_cached_image(image_id) + + @staticmethod + def _stash_request_info(request, image_id, method, version): + """ + Preserve the image id, version and request method for later retrieval + """ + request.environ['api.cache.image_id'] = image_id + request.environ['api.cache.method'] = method + request.environ['api.cache.version'] = version + + @staticmethod + def _fetch_request_info(request): + """ + Preserve the cached image id, version for consumption by the + process_response method of this middleware + """ + try: + image_id = request.environ['api.cache.image_id'] + method = request.environ['api.cache.method'] + version = request.environ['api.cache.version'] + except KeyError: + return None + else: + return (image_id, method, version) + + def _process_v1_request(self, request, image_id, image_iterator, + image_meta): + # Don't display location + if 'location' in image_meta: + del image_meta['location'] + image_meta.pop('location_data', None) + self._verify_metadata(image_meta) + + response = webob.Response(request=request) + raw_response = { + 'image_iterator': image_iterator, + 'image_meta': image_meta, + } + return self.serializer.show(response, raw_response) + + def _process_v2_request(self, request, image_id, image_iterator, + image_meta): + # We do some contortions to get the image_metadata so + # that we can provide it to 'size_checked_iter' which + # will generate a notification. + # TODO(mclaren): Make notification happen more + # naturally once caching is part of the domain model. + image = request.environ['api.cache.image'] + self._verify_metadata(image_meta) + response = webob.Response(request=request) + response.app_iter = size_checked_iter(response, image_meta, + image_meta['size'], + image_iterator, + notifier.Notifier()) + # NOTE (flwang): Set the content-type, content-md5 and content-length + # explicitly to be consistent with the non-cache scenario. + # Besides, it's not worth the candle to invoke the "download" method + # of ResponseSerializer under image_data. Because method "download" + # will reset the app_iter. Then we have to call method + # "size_checked_iter" to avoid missing any notification. But after + # call "size_checked_iter", we will lose the content-md5 and + # content-length got by the method "download" because of this issue: + # https://github.com/Pylons/webob/issues/86 + response.headers['Content-Type'] = 'application/octet-stream' + response.headers['Content-MD5'] = image.checksum + response.headers['Content-Length'] = str(image.size) + return response + + def process_response(self, resp): + """ + We intercept the response coming back from the main + images Resource, removing image file from the cache + if necessary + """ + status_code = self.get_status_code(resp) + if not 200 <= status_code < 300: + return resp + + try: + (image_id, method, version) = self._fetch_request_info( + resp.request) + except TypeError: + return resp + + if method == 'GET' and status_code == 204: + # Bugfix:1251055 - Don't cache non-existent image files. + # NOTE: Both GET for an image without locations and DELETE return + # 204 but DELETE should be processed. + return resp + + method_str = '_process_%s_response' % method + try: + process_response_method = getattr(self, method_str) + except AttributeError: + LOG.error(_LE('could not find %s') % method_str) + # Nothing to do here, move along + return resp + else: + return process_response_method(resp, image_id, version=version) + + def _process_DELETE_response(self, resp, image_id, version=None): + if self.cache.is_cached(image_id): + LOG.debug("Removing image %s from cache", image_id) + self.cache.delete_cached_image(image_id) + return resp + + def _process_GET_response(self, resp, image_id, version=None): + image_checksum = resp.headers.get('Content-MD5') + if not image_checksum: + # API V1 stores the checksum in a different header: + image_checksum = resp.headers.get('x-image-meta-checksum') + + if not image_checksum: + LOG.error(_LE("Checksum header is missing.")) + + # fetch image_meta on the basis of version + image_metadata = None + if version: + method = getattr(self, '_get_%s_image_metadata' % version) + image_metadata = method(resp.request, image_id) + # NOTE(zhiyan): image_cache return a generator object and set to + # response.app_iter, it will be called by eventlet.wsgi later. + # So we need enforce policy firstly but do it by application + # since eventlet.wsgi could not catch webob.exc.HTTPForbidden and + # return 403 error to client then. + self._enforce(resp.request, 'download_image', target=image_metadata) + + resp.app_iter = self.cache.get_caching_iter(image_id, image_checksum, + resp.app_iter) + return resp + + def get_status_code(self, response): + """ + Returns the integer status code from the response, which + can be either a Webob.Response (used in testing) or httplib.Response + """ + if hasattr(response, 'status_int'): + return response.status_int + return response.status + + def get_from_cache(self, image_id): + """Called if cache hit""" + with self.cache.open_for_read(image_id) as cache_file: + chunks = utils.chunkiter(cache_file) + for chunk in chunks: + yield chunk diff --git a/code/daisy/daisy/api/middleware/cache_manage.py b/code/daisy/daisy/api/middleware/cache_manage.py new file mode 100755 index 00000000..11c5ffe3 --- /dev/null +++ b/code/daisy/daisy/api/middleware/cache_manage.py @@ -0,0 +1,85 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Image Cache Management API +""" + +from oslo_log import log as logging +import routes + +from daisy.api import cached_images +from daisy.common import wsgi +from daisy import i18n + +LOG = logging.getLogger(__name__) +_LI = i18n._LI + + +class CacheManageFilter(wsgi.Middleware): + def __init__(self, app): + mapper = routes.Mapper() + resource = cached_images.create_resource() + + mapper.connect("/v1/cached_images", + controller=resource, + action="get_cached_images", + conditions=dict(method=["GET"])) + + mapper.connect("/v1/cached_images/{image_id}", + controller=resource, + action="delete_cached_image", + conditions=dict(method=["DELETE"])) + + mapper.connect("/v1/cached_images", + controller=resource, + action="delete_cached_images", + conditions=dict(method=["DELETE"])) + + mapper.connect("/v1/queued_images/{image_id}", + controller=resource, + action="queue_image", + conditions=dict(method=["PUT"])) + + mapper.connect("/v1/queued_images", + controller=resource, + action="get_queued_images", + conditions=dict(method=["GET"])) + + mapper.connect("/v1/queued_images/{image_id}", + controller=resource, + action="delete_queued_image", + conditions=dict(method=["DELETE"])) + + mapper.connect("/v1/queued_images", + controller=resource, + action="delete_queued_images", + conditions=dict(method=["DELETE"])) + + self._mapper = mapper + self._resource = resource + + LOG.info(_LI("Initialized image cache management middleware")) + super(CacheManageFilter, self).__init__(app) + + def process_request(self, request): + # Map request to our resource object if we can handle it + match = self._mapper.match(request.path_info, request.environ) + if match: + request.environ['wsgiorg.routing_args'] = (None, match) + return self._resource(request) + # Pass off downstream if we don't match the request path + else: + return None diff --git a/code/daisy/daisy/api/middleware/context.py b/code/daisy/daisy/api/middleware/context.py new file mode 100755 index 00000000..21edc52f --- /dev/null +++ b/code/daisy/daisy/api/middleware/context.py @@ -0,0 +1,137 @@ +# Copyright 2011-2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.serialization import jsonutils +from oslo_config import cfg +from oslo_log import log as logging +import webob.exc + +from daisy.api import policy +from daisy.common import wsgi +import daisy.context +from daisy import i18n + +_ = i18n._ + +context_opts = [ + cfg.BoolOpt('owner_is_tenant', default=True, + help=_('When true, this option sets the owner of an image ' + 'to be the tenant. Otherwise, the owner of the ' + ' image will be the authenticated user issuing the ' + 'request.')), + cfg.StrOpt('admin_role', default='admin', + help=_('Role used to identify an authenticated user as ' + 'administrator.')), + cfg.BoolOpt('allow_anonymous_access', default=False, + help=_('Allow unauthenticated users to access the API with ' + 'read-only privileges. This only applies when using ' + 'ContextMiddleware.')), +] + +CONF = cfg.CONF +CONF.register_opts(context_opts) + +LOG = logging.getLogger(__name__) + + +class BaseContextMiddleware(wsgi.Middleware): + def process_response(self, resp): + try: + request_id = resp.request.context.request_id + except AttributeError: + LOG.warn(_('Unable to retrieve request id from context')) + else: + resp.headers['x-openstack-request-id'] = 'req-%s' % request_id + return resp + + +class ContextMiddleware(BaseContextMiddleware): + def __init__(self, app): + self.policy_enforcer = policy.Enforcer() + super(ContextMiddleware, self).__init__(app) + + def process_request(self, req): + """Convert authentication information into a request context + + Generate a daisy.context.RequestContext object from the available + authentication headers and store on the 'context' attribute + of the req object. + + :param req: wsgi request object that will be given the context object + :raises webob.exc.HTTPUnauthorized: when value of the X-Identity-Status + header is not 'Confirmed' and + anonymous access is disallowed + """ + if req.headers.get('X-Identity-Status') == 'Confirmed': + req.context = self._get_authenticated_context(req) + elif CONF.allow_anonymous_access: + req.context = self._get_anonymous_context() + else: + raise webob.exc.HTTPUnauthorized() + + def _get_anonymous_context(self): + kwargs = { + 'user': None, + 'tenant': None, + 'roles': [], + 'is_admin': False, + 'read_only': True, + 'policy_enforcer': self.policy_enforcer, + } + return daisy.context.RequestContext(**kwargs) + + def _get_authenticated_context(self, req): + # NOTE(bcwaldon): X-Roles is a csv string, but we need to parse + # it into a list to be useful + roles_header = req.headers.get('X-Roles', '') + roles = [r.strip().lower() for r in roles_header.split(',')] + + # NOTE(bcwaldon): This header is deprecated in favor of X-Auth-Token + deprecated_token = req.headers.get('X-Storage-Token') + + service_catalog = None + if req.headers.get('X-Service-Catalog') is not None: + try: + catalog_header = req.headers.get('X-Service-Catalog') + service_catalog = jsonutils.loads(catalog_header) + except ValueError: + raise webob.exc.HTTPInternalServerError( + _('Invalid service catalog json.')) + + kwargs = { + 'user': req.headers.get('X-User-Id'), + 'tenant': req.headers.get('X-Tenant-Id'), + 'roles': roles, + 'is_admin': CONF.admin_role.strip().lower() in roles, + 'auth_token': req.headers.get('X-Auth-Token', deprecated_token), + 'owner_is_tenant': CONF.owner_is_tenant, + 'service_catalog': service_catalog, + 'policy_enforcer': self.policy_enforcer, + } + + return daisy.context.RequestContext(**kwargs) + + +class UnauthenticatedContextMiddleware(BaseContextMiddleware): + def process_request(self, req): + """Create a context without an authorized user.""" + kwargs = { + 'user': None, + 'tenant': None, + 'roles': [], + 'is_admin': True, + } + + req.context = daisy.context.RequestContext(**kwargs) diff --git a/code/daisy/daisy/api/middleware/gzip.py b/code/daisy/daisy/api/middleware/gzip.py new file mode 100755 index 00000000..c208a6c9 --- /dev/null +++ b/code/daisy/daisy/api/middleware/gzip.py @@ -0,0 +1,66 @@ +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Use gzip compression if the client accepts it. +""" + +import re + +from oslo_log import log as logging + +from daisy.common import wsgi +from daisy import i18n + +LOG = logging.getLogger(__name__) +_LI = i18n._LI + + +class GzipMiddleware(wsgi.Middleware): + + re_zip = re.compile(r'\bgzip\b') + + def __init__(self, app): + LOG.info(_LI("Initialized gzip middleware")) + super(GzipMiddleware, self).__init__(app) + + def process_response(self, response): + request = response.request + accept_encoding = request.headers.get('Accept-Encoding', '') + + if self.re_zip.search(accept_encoding): + # NOTE(flaper87): Webob removes the content-md5 when + # app_iter is called. We'll keep it and reset it later + checksum = response.headers.get("Content-MD5") + + # NOTE(flaper87): We'll use lazy for images so + # that they can be compressed without reading + # the whole content in memory. Notice that using + # lazy will set response's content-length to 0. + content_type = response.headers["Content-Type"] + lazy = content_type == "application/octet-stream" + + # NOTE(flaper87): Webob takes care of the compression + # process, it will replace the body either with a + # compressed body or a generator - used for lazy com + # pression - depending on the lazy value. + # + # Webob itself will set the Content-Encoding header. + response.encode_content(lazy=lazy) + + if checksum: + response.headers['Content-MD5'] = checksum + + return response diff --git a/code/daisy/daisy/api/middleware/version_negotiation.py b/code/daisy/daisy/api/middleware/version_negotiation.py new file mode 100755 index 00000000..0bdd6382 --- /dev/null +++ b/code/daisy/daisy/api/middleware/version_negotiation.py @@ -0,0 +1,109 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A filter middleware that inspects the requested URI for a version string +and/or Accept headers and attempts to negotiate an API controller to +return +""" + +from oslo_config import cfg +from oslo_log import log as logging + +from daisy.api import versions +from daisy.common import wsgi +from daisy import i18n + +CONF = cfg.CONF + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LW = i18n._LW + + +class VersionNegotiationFilter(wsgi.Middleware): + + def __init__(self, app): + self.versions_app = versions.Controller() + super(VersionNegotiationFilter, self).__init__(app) + + def process_request(self, req): + """Try to find a version first in the accept header, then the URL""" + msg = _("Determining version of request: %(method)s %(path)s" + " Accept: %(accept)s") + args = {'method': req.method, 'path': req.path, 'accept': req.accept} + LOG.debug(msg % args) + + # If the request is for /versions, just return the versions container + # TODO(bcwaldon): deprecate this behavior + if req.path_info_peek() == "versions": + return self.versions_app + + accept = str(req.accept) + if accept.startswith('application/vnd.openstack.images-'): + LOG.debug("Using media-type versioning") + token_loc = len('application/vnd.openstack.images-') + req_version = accept[token_loc:] + else: + LOG.debug("Using url versioning") + # Remove version in url so it doesn't conflict later + req_version = self._pop_path_info(req) + + try: + version = self._match_version_string(req_version) + except ValueError: + LOG.warn(_LW("Unknown version. Returning version choices.")) + return self.versions_app + + req.environ['api.version'] = version + req.path_info = ''.join(('/v', str(version), req.path_info)) + LOG.debug("Matched version: v%d", version) + LOG.debug('new path %s', req.path_info) + return None + + def _match_version_string(self, subject): + """ + Given a string, tries to match a major and/or + minor version number. + + :param subject: The string to check + :returns version found in the subject + :raises ValueError if no acceptable version could be found + """ + if subject in ('v1', 'v1.0', 'v1.1') and CONF.enable_v1_api: + major_version = 1 + elif subject in ('v2', 'v2.0', 'v2.1', 'v2.2') and CONF.enable_v2_api: + major_version = 2 + else: + raise ValueError() + + return major_version + + def _pop_path_info(self, req): + """ + 'Pops' off the next segment of PATH_INFO, returns the popped + segment. Do NOT push it onto SCRIPT_NAME. + """ + path = req.path_info + if not path: + return None + while path.startswith('/'): + path = path[1:] + idx = path.find('/') + if idx == -1: + idx = len(path) + r = path[:idx] + req.path_info = path[idx:] + return r diff --git a/code/daisy/daisy/api/network_api.py b/code/daisy/daisy/api/network_api.py new file mode 100755 index 00000000..7bc34d7d --- /dev/null +++ b/code/daisy/daisy/api/network_api.py @@ -0,0 +1,202 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/Initialize network configuration about neutron +""" +import time +from oslo_log import log as logging +import daisy.registry.client.v1.api as registry +from webob.exc import HTTPBadRequest +from neutronclient.v2_0 import client as clientv20 +from daisy.common import exception +LOG = logging.getLogger(__name__) + +class network(object): + """ + network config + """ + def __init__(self, req, neutron_host, keystone_host, cluster_id): + registry.configure_registry_client() + auth_url = 'http://' + keystone_host + ':35357/v2.0' + end_url = 'http://' + neutron_host + ':9696' + params = {'username': 'admin', + 'ca_cert': None, + 'tenant_name': 'admin', + 'insecure': False, + 'auth_url': auth_url, + 'timeout': 30, + 'password': 'keystone', + 'endpoint_url': end_url, + 'auth_strategy': 'keystone' + } + self.cluster_id = cluster_id + self.neutron = clientv20.Client(**params) + try: + cluster = registry.get_cluster_metadata(req.context, cluster_id) + except exception.Invalid as e: + LOG.exception(e.msg) + raise HTTPBadRequest(explanation=e.msg, request=req) + LOG.info("<<>>", cluster, neutron_host, keystone_host) + if 'logic_networks' in cluster and cluster['logic_networks'] is not None: + self.nets = cluster['logic_networks'] + #self._flat_network_uniqueness_check() + if 'routers' in cluster and cluster['routers'] is not None: + self.routers = cluster['routers'] + else: + self.routers = [] + self._network_check() + self.name_mappings = {} + self._network_config() + + def _router_create(self, name): + body = {} + body['router'] = {"name": name, "admin_state_up": True} + router = self.neutron.create_router(body) + return router['router']['id'] + + def _subnet_create(self, net_id, **kwargs): + body = {} + body['subnet'] = {'enable_dhcp': True, + 'network_id': net_id, + 'ip_version': 4 + } + for k in kwargs.keys(): + body['subnet'][k] = kwargs[k] + LOG.info("<<>>", body) + subnet = self.neutron.create_subnet(body) + return subnet['subnet']['id'] + + def _router_link(self): + for router in self.routers: + router_id = self._router_create(router['name']) + if 'external_logic_network' in router: + body = {'network_id': self.name_mappings[router['external_logic_network']]} + self.neutron.add_gateway_router(router_id, body) + if 'subnets' in router: + for i in router['subnets']: + body = {'subnet_id': self.name_mappings[i]} + self.neutron.add_interface_router(router_id, body) + + def _net_subnet_same_router_check(self, ex_network, subnet): + for router in self.routers: + if 'external_logic_network' in router and router['external_logic_network'] == ex_network: + if 'subnets' in router: + for i in router['subnets']: + if i == subnet: + return True + return False + + def _subnet_check_and_create(self, net_id, subnet): + kwargs = {} + key_list = ['name', 'cidr', 'floating_ranges', 'dns_nameservers'] + for i in key_list: + if i not in subnet: + raise exception.Invalid() + kwargs['name'] = subnet['name'] + kwargs['cidr'] = subnet['cidr'] + if len(subnet['dns_nameservers']) != 0: + kwargs['dns_nameservers'] = subnet['dns_nameservers'] + kwargs['allocation_pools'] = [] + if len(subnet['floating_ranges']) != 0: + for pool in subnet['floating_ranges']: + if len(pool) != 2: + raise exception.Invalid() + else: + alloc_pool = {} + alloc_pool['start'] = pool[0] + alloc_pool['end'] = pool[1] + kwargs['allocation_pools'].append(alloc_pool) + if 'gateway' in subnet and subnet['gateway'] is not None: + kwargs['gateway_ip'] = subnet['gateway'] + subnet_id = self._subnet_create(net_id, **kwargs) + return subnet_id + + def _network_check(self): + execute_times = 0 + while True: + try: + nets = self.neutron.list_networks() + except: + LOG.info("can not connect neutron server,sleep 5s,try") + time.sleep(5) + execute_times += 1 + if execute_times >= 60: + LOG.info("connect neutron server failed") + break + else: + LOG.info("connect neutron server sucessful") + if 'networks' in nets and len(nets['networks']) > 0: + raise exception.Invalid() + break + + def _flat_network_uniqueness_check(self): + flat_mapping = [] + for net in self.nets: + if net['physnet_name'] in flat_mapping: + raise exception.Invalid() + else: + if net['segmentation_type'].strip() == 'flat': + flat_mapping.append(net['physnet_name']) + + def _network_config(self): + for net in self.nets: + body = {} + if net['type'] == 'external': + body['network'] = {'name': net['name'], + 'router:external': True, + 'provider:network_type': net['segmentation_type']} + if net['segmentation_type'].strip() == 'flat': + body['network']['provider:physical_network'] = net['physnet_name'] + elif net['segmentation_type'].strip() == 'vxlan': + if 'segmentation_id' in net and net['segmentation_id'] is not None: + body['network']['provider:segmentation_id'] = net['segmentation_id'] + else: + if 'segmentation_id' in net and net['segmentation_id'] is not None: + body['network']['provider:segmentation_id'] = net['segmentation_id'] + body['network']['provider:physical_network'] = net['physnet_name'] + if net['shared']: + body['network']['shared'] = True + else: + body['network']['shared'] = False + external = self.neutron.create_network(body) + self.name_mappings[net['name']] = external['network']['id'] + last_create_subnet = [] + for subnet in net['subnets']: + if self._net_subnet_same_router_check(net['name'], subnet['name']): + last_create_subnet.append(subnet) + else: + subnet_id = self._subnet_check_and_create(external['network']['id'], subnet) + self.name_mappings[subnet['name']] = subnet_id + for subnet in last_create_subnet: + subnet_id = self._subnet_check_and_create(external['network']['id'], subnet) + self.name_mappings[subnet['name']] = subnet_id + else: + body['network'] = {'name': net['name'], + 'provider:network_type': net['segmentation_type']} + if net['segmentation_type'].strip() == 'vlan': + body['network']['provider:physical_network'] = net['physnet_name'] + if 'segmentation_id' in net and net['segmentation_id'] is not None: + body['network']['provider:segmentation_id'] = net['segmentation_id'] + if net['shared']: + body['network']['shared'] = True + else: + body['network']['shared'] = False + inner = self.neutron.create_network(body) + self.name_mappings[net['name']] = inner['network']['id'] + for subnet in net['subnets']: + subnet_id = self._subnet_check_and_create(inner['network']['id'], subnet) + self.name_mappings[subnet['name']] = subnet_id + self._router_link() diff --git a/code/daisy/daisy/api/policy.py b/code/daisy/daisy/api/policy.py new file mode 100755 index 00000000..c8de0b8b --- /dev/null +++ b/code/daisy/daisy/api/policy.py @@ -0,0 +1,699 @@ +# Copyright (c) 2011 OpenStack Foundation +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Policy Engine For Glance""" + +import copy + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_policy import policy + +from daisy.common import exception +import daisy.domain.proxy +from daisy import i18n + + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + +DEFAULT_RULES = policy.Rules.from_dict({ + 'context_is_admin': 'role:admin', + 'default': '@', + 'manage_image_cache': 'role:admin', +}) + +_ = i18n._ +_LI = i18n._LI +_LW = i18n._LW + + +class Enforcer(policy.Enforcer): + """Responsible for loading and enforcing rules""" + + def __init__(self): + if CONF.find_file(CONF.oslo_policy.policy_file): + kwargs = dict(rules=None, use_conf=True) + else: + kwargs = dict(rules=DEFAULT_RULES, use_conf=False) + super(Enforcer, self).__init__(CONF, overwrite=False, **kwargs) + + def add_rules(self, rules): + """Add new rules to the Rules object""" + self.set_rules(rules, overwrite=False, use_conf=self.use_conf) + + def enforce(self, context, action, target): + """Verifies that the action is valid on the target in this context. + + :param context: Glance request context + :param action: String representing the action to be checked + :param target: Dictionary representing the object of the action. + :raises: `daisy.common.exception.Forbidden` + :returns: A non-False value if access is allowed. + """ + credentials = { + 'roles': context.roles, + 'user': context.user, + 'tenant': context.tenant, + } + return super(Enforcer, self).enforce(action, target, credentials, + do_raise=True, + exc=exception.Forbidden, + action=action) + + def check(self, context, action, target): + """Verifies that the action is valid on the target in this context. + + :param context: Glance request context + :param action: String representing the action to be checked + :param target: Dictionary representing the object of the action. + :returns: A non-False value if access is allowed. + """ + credentials = { + 'roles': context.roles, + 'user': context.user, + 'tenant': context.tenant, + } + return super(Enforcer, self).enforce(action, target, credentials) + + def check_is_admin(self, context): + """Check if the given context is associated with an admin role, + as defined via the 'context_is_admin' RBAC rule. + + :param context: Glance request context + :returns: A non-False value if context role is admin. + """ + return self.check(context, 'context_is_admin', context.to_dict()) + + +class ImageRepoProxy(daisy.domain.proxy.Repo): + + def __init__(self, image_repo, context, policy): + self.context = context + self.policy = policy + self.image_repo = image_repo + proxy_kwargs = {'context': self.context, 'policy': self.policy} + super(ImageRepoProxy, self).__init__(image_repo, + item_proxy_class=ImageProxy, + item_proxy_kwargs=proxy_kwargs) + + def get(self, image_id): + try: + image = super(ImageRepoProxy, self).get(image_id) + except exception.NotFound: + self.policy.enforce(self.context, 'get_image', {}) + raise + else: + self.policy.enforce(self.context, 'get_image', ImageTarget(image)) + return image + + def list(self, *args, **kwargs): + self.policy.enforce(self.context, 'get_images', {}) + return super(ImageRepoProxy, self).list(*args, **kwargs) + + def save(self, image, from_state=None): + self.policy.enforce(self.context, 'modify_image', image.target) + return super(ImageRepoProxy, self).save(image, from_state=from_state) + + def add(self, image): + self.policy.enforce(self.context, 'add_image', image.target) + return super(ImageRepoProxy, self).add(image) + + +class ImageProxy(daisy.domain.proxy.Image): + + def __init__(self, image, context, policy): + self.image = image + self.target = ImageTarget(image) + self.context = context + self.policy = policy + super(ImageProxy, self).__init__(image) + + @property + def visibility(self): + return self.image.visibility + + @visibility.setter + def visibility(self, value): + if value == 'public': + self.policy.enforce(self.context, 'publicize_image', self.target) + self.image.visibility = value + + @property + def locations(self): + return ImageLocationsProxy(self.image.locations, + self.context, self.policy) + + @locations.setter + def locations(self, value): + if not isinstance(value, (list, ImageLocationsProxy)): + raise exception.Invalid(_('Invalid locations: %s') % value) + self.policy.enforce(self.context, 'set_image_location', self.target) + new_locations = list(value) + if (set([loc['url'] for loc in self.image.locations]) - + set([loc['url'] for loc in new_locations])): + self.policy.enforce(self.context, 'delete_image_location', + self.target) + self.image.locations = new_locations + + def delete(self): + self.policy.enforce(self.context, 'delete_image', self.target) + return self.image.delete() + + def deactivate(self): + LOG.debug('Attempting deactivate') + target = ImageTarget(self.image) + self.policy.enforce(self.context, 'deactivate', target=target) + LOG.debug('Deactivate allowed, continue') + self.image.deactivate() + + def reactivate(self): + LOG.debug('Attempting reactivate') + target = ImageTarget(self.image) + self.policy.enforce(self.context, 'reactivate', target=target) + LOG.debug('Reactivate allowed, continue') + self.image.reactivate() + + def get_data(self, *args, **kwargs): + self.policy.enforce(self.context, 'download_image', self.target) + return self.image.get_data(*args, **kwargs) + + def set_data(self, *args, **kwargs): + self.policy.enforce(self.context, 'upload_image', self.target) + return self.image.set_data(*args, **kwargs) + + def get_member_repo(self, **kwargs): + member_repo = self.image.get_member_repo(**kwargs) + return ImageMemberRepoProxy(member_repo, self.context, self.policy) + + +class ImageFactoryProxy(daisy.domain.proxy.ImageFactory): + + def __init__(self, image_factory, context, policy): + self.image_factory = image_factory + self.context = context + self.policy = policy + proxy_kwargs = {'context': self.context, 'policy': self.policy} + super(ImageFactoryProxy, self).__init__(image_factory, + proxy_class=ImageProxy, + proxy_kwargs=proxy_kwargs) + + def new_image(self, **kwargs): + if kwargs.get('visibility') == 'public': + self.policy.enforce(self.context, 'publicize_image', {}) + return super(ImageFactoryProxy, self).new_image(**kwargs) + + +class ImageMemberFactoryProxy(daisy.domain.proxy.ImageMembershipFactory): + + def __init__(self, member_factory, context, policy): + super(ImageMemberFactoryProxy, self).__init__( + member_factory, + image_proxy_class=ImageProxy, + image_proxy_kwargs={'context': context, 'policy': policy}) + + +class ImageMemberRepoProxy(daisy.domain.proxy.Repo): + + def __init__(self, member_repo, context, policy): + self.member_repo = member_repo + self.target = ImageTarget(self.member_repo.image) + self.context = context + self.policy = policy + + def add(self, member): + self.policy.enforce(self.context, 'add_member', self.target) + self.member_repo.add(member) + + def get(self, member_id): + self.policy.enforce(self.context, 'get_member', self.target) + return self.member_repo.get(member_id) + + def save(self, member, from_state=None): + self.policy.enforce(self.context, 'modify_member', self.target) + self.member_repo.save(member, from_state=from_state) + + def list(self, *args, **kwargs): + self.policy.enforce(self.context, 'get_members', self.target) + return self.member_repo.list(*args, **kwargs) + + def remove(self, member): + self.policy.enforce(self.context, 'delete_member', self.target) + self.member_repo.remove(member) + + +class ImageLocationsProxy(object): + + __hash__ = None + + def __init__(self, locations, context, policy): + self.locations = locations + self.context = context + self.policy = policy + + def __copy__(self): + return type(self)(self.locations, self.context, self.policy) + + def __deepcopy__(self, memo): + # NOTE(zhiyan): Only copy location entries, others can be reused. + return type(self)(copy.deepcopy(self.locations, memo), + self.context, self.policy) + + def _get_checker(action, func_name): + def _checker(self, *args, **kwargs): + self.policy.enforce(self.context, action, {}) + method = getattr(self.locations, func_name) + return method(*args, **kwargs) + return _checker + + count = _get_checker('get_image_location', 'count') + index = _get_checker('get_image_location', 'index') + __getitem__ = _get_checker('get_image_location', '__getitem__') + __contains__ = _get_checker('get_image_location', '__contains__') + __len__ = _get_checker('get_image_location', '__len__') + __cast = _get_checker('get_image_location', '__cast') + __cmp__ = _get_checker('get_image_location', '__cmp__') + __iter__ = _get_checker('get_image_location', '__iter__') + + append = _get_checker('set_image_location', 'append') + extend = _get_checker('set_image_location', 'extend') + insert = _get_checker('set_image_location', 'insert') + reverse = _get_checker('set_image_location', 'reverse') + __iadd__ = _get_checker('set_image_location', '__iadd__') + __setitem__ = _get_checker('set_image_location', '__setitem__') + + pop = _get_checker('delete_image_location', 'pop') + remove = _get_checker('delete_image_location', 'remove') + __delitem__ = _get_checker('delete_image_location', '__delitem__') + __delslice__ = _get_checker('delete_image_location', '__delslice__') + + del _get_checker + + +class TaskProxy(daisy.domain.proxy.Task): + + def __init__(self, task, context, policy): + self.task = task + self.context = context + self.policy = policy + super(TaskProxy, self).__init__(task) + + +class TaskStubProxy(daisy.domain.proxy.TaskStub): + + def __init__(self, task_stub, context, policy): + self.task_stub = task_stub + self.context = context + self.policy = policy + super(TaskStubProxy, self).__init__(task_stub) + + +class TaskRepoProxy(daisy.domain.proxy.TaskRepo): + + def __init__(self, task_repo, context, task_policy): + self.context = context + self.policy = task_policy + self.task_repo = task_repo + proxy_kwargs = {'context': self.context, 'policy': self.policy} + super(TaskRepoProxy, + self).__init__(task_repo, + task_proxy_class=TaskProxy, + task_proxy_kwargs=proxy_kwargs) + + def get(self, task_id): + self.policy.enforce(self.context, 'get_task', {}) + return super(TaskRepoProxy, self).get(task_id) + + def add(self, task): + self.policy.enforce(self.context, 'add_task', {}) + super(TaskRepoProxy, self).add(task) + + def save(self, task): + self.policy.enforce(self.context, 'modify_task', {}) + super(TaskRepoProxy, self).save(task) + + +class TaskStubRepoProxy(daisy.domain.proxy.TaskStubRepo): + + def __init__(self, task_stub_repo, context, task_policy): + self.context = context + self.policy = task_policy + self.task_stub_repo = task_stub_repo + proxy_kwargs = {'context': self.context, 'policy': self.policy} + super(TaskStubRepoProxy, + self).__init__(task_stub_repo, + task_stub_proxy_class=TaskStubProxy, + task_stub_proxy_kwargs=proxy_kwargs) + + def list(self, *args, **kwargs): + self.policy.enforce(self.context, 'get_tasks', {}) + return super(TaskStubRepoProxy, self).list(*args, **kwargs) + + +class TaskFactoryProxy(daisy.domain.proxy.TaskFactory): + + def __init__(self, task_factory, context, policy): + self.task_factory = task_factory + self.context = context + self.policy = policy + proxy_kwargs = {'context': self.context, 'policy': self.policy} + super(TaskFactoryProxy, self).__init__( + task_factory, + task_proxy_class=TaskProxy, + task_proxy_kwargs=proxy_kwargs) + + +class ImageTarget(object): + SENTINEL = object() + + def __init__(self, target): + """Initialize the object + + :param target: Object being targetted + """ + self.target = target + + def __getitem__(self, key): + """Return the value of 'key' from the target. + + If the target has the attribute 'key', return it. + + :param key: value to retrieve + """ + key = self.key_transforms(key) + + value = getattr(self.target, key, self.SENTINEL) + if value is self.SENTINEL: + extra_properties = getattr(self.target, 'extra_properties', None) + if extra_properties is not None: + value = extra_properties[key] + else: + value = None + return value + + def key_transforms(self, key): + if key == 'id': + key = 'image_id' + + return key + + +# Metadef Namespace classes +class MetadefNamespaceProxy(daisy.domain.proxy.MetadefNamespace): + + def __init__(self, namespace, context, policy): + self.namespace_input = namespace + self.context = context + self.policy = policy + super(MetadefNamespaceProxy, self).__init__(namespace) + + +class MetadefNamespaceRepoProxy(daisy.domain.proxy.MetadefNamespaceRepo): + + def __init__(self, namespace_repo, context, namespace_policy): + self.context = context + self.policy = namespace_policy + self.namespace_repo = namespace_repo + proxy_kwargs = {'context': self.context, 'policy': self.policy} + super(MetadefNamespaceRepoProxy, + self).__init__(namespace_repo, + namespace_proxy_class=MetadefNamespaceProxy, + namespace_proxy_kwargs=proxy_kwargs) + + def get(self, namespace): + self.policy.enforce(self.context, 'get_metadef_namespace', {}) + return super(MetadefNamespaceRepoProxy, self).get(namespace) + + def list(self, *args, **kwargs): + self.policy.enforce(self.context, 'get_metadef_namespaces', {}) + return super(MetadefNamespaceRepoProxy, self).list(*args, **kwargs) + + def save(self, namespace): + self.policy.enforce(self.context, 'modify_metadef_namespace', {}) + return super(MetadefNamespaceRepoProxy, self).save(namespace) + + def add(self, namespace): + self.policy.enforce(self.context, 'add_metadef_namespace', {}) + return super(MetadefNamespaceRepoProxy, self).add(namespace) + + +class MetadefNamespaceFactoryProxy( + daisy.domain.proxy.MetadefNamespaceFactory): + + def __init__(self, meta_namespace_factory, context, policy): + self.meta_namespace_factory = meta_namespace_factory + self.context = context + self.policy = policy + proxy_kwargs = {'context': self.context, 'policy': self.policy} + super(MetadefNamespaceFactoryProxy, self).__init__( + meta_namespace_factory, + meta_namespace_proxy_class=MetadefNamespaceProxy, + meta_namespace_proxy_kwargs=proxy_kwargs) + + +# Metadef Object classes +class MetadefObjectProxy(daisy.domain.proxy.MetadefObject): + + def __init__(self, meta_object, context, policy): + self.meta_object = meta_object + self.context = context + self.policy = policy + super(MetadefObjectProxy, self).__init__(meta_object) + + +class MetadefObjectRepoProxy(daisy.domain.proxy.MetadefObjectRepo): + + def __init__(self, object_repo, context, object_policy): + self.context = context + self.policy = object_policy + self.object_repo = object_repo + proxy_kwargs = {'context': self.context, 'policy': self.policy} + super(MetadefObjectRepoProxy, + self).__init__(object_repo, + object_proxy_class=MetadefObjectProxy, + object_proxy_kwargs=proxy_kwargs) + + def get(self, namespace, object_name): + self.policy.enforce(self.context, 'get_metadef_object', {}) + return super(MetadefObjectRepoProxy, self).get(namespace, object_name) + + def list(self, *args, **kwargs): + self.policy.enforce(self.context, 'get_metadef_objects', {}) + return super(MetadefObjectRepoProxy, self).list(*args, **kwargs) + + def save(self, meta_object): + self.policy.enforce(self.context, 'modify_metadef_object', {}) + return super(MetadefObjectRepoProxy, self).save(meta_object) + + def add(self, meta_object): + self.policy.enforce(self.context, 'add_metadef_object', {}) + return super(MetadefObjectRepoProxy, self).add(meta_object) + + +class MetadefObjectFactoryProxy(daisy.domain.proxy.MetadefObjectFactory): + + def __init__(self, meta_object_factory, context, policy): + self.meta_object_factory = meta_object_factory + self.context = context + self.policy = policy + proxy_kwargs = {'context': self.context, 'policy': self.policy} + super(MetadefObjectFactoryProxy, self).__init__( + meta_object_factory, + meta_object_proxy_class=MetadefObjectProxy, + meta_object_proxy_kwargs=proxy_kwargs) + + +# Metadef ResourceType classes +class MetadefResourceTypeProxy(daisy.domain.proxy.MetadefResourceType): + + def __init__(self, meta_resource_type, context, policy): + self.meta_resource_type = meta_resource_type + self.context = context + self.policy = policy + super(MetadefResourceTypeProxy, self).__init__(meta_resource_type) + + +class MetadefResourceTypeRepoProxy( + daisy.domain.proxy.MetadefResourceTypeRepo): + + def __init__(self, resource_type_repo, context, resource_type_policy): + self.context = context + self.policy = resource_type_policy + self.resource_type_repo = resource_type_repo + proxy_kwargs = {'context': self.context, 'policy': self.policy} + super(MetadefResourceTypeRepoProxy, self).__init__( + resource_type_repo, + resource_type_proxy_class=MetadefResourceTypeProxy, + resource_type_proxy_kwargs=proxy_kwargs) + + def list(self, *args, **kwargs): + self.policy.enforce(self.context, 'list_metadef_resource_types', {}) + return super(MetadefResourceTypeRepoProxy, self).list(*args, **kwargs) + + def get(self, *args, **kwargs): + self.policy.enforce(self.context, 'get_metadef_resource_type', {}) + return super(MetadefResourceTypeRepoProxy, self).get(*args, **kwargs) + + def add(self, resource_type): + self.policy.enforce(self.context, + 'add_metadef_resource_type_association', {}) + return super(MetadefResourceTypeRepoProxy, self).add(resource_type) + + +class MetadefResourceTypeFactoryProxy( + daisy.domain.proxy.MetadefResourceTypeFactory): + + def __init__(self, resource_type_factory, context, policy): + self.resource_type_factory = resource_type_factory + self.context = context + self.policy = policy + proxy_kwargs = {'context': self.context, 'policy': self.policy} + super(MetadefResourceTypeFactoryProxy, self).__init__( + resource_type_factory, + resource_type_proxy_class=MetadefResourceTypeProxy, + resource_type_proxy_kwargs=proxy_kwargs) + + +# Metadef namespace properties classes +class MetadefPropertyProxy(daisy.domain.proxy.MetadefProperty): + + def __init__(self, namespace_property, context, policy): + self.namespace_property = namespace_property + self.context = context + self.policy = policy + super(MetadefPropertyProxy, self).__init__(namespace_property) + + +class MetadefPropertyRepoProxy(daisy.domain.proxy.MetadefPropertyRepo): + + def __init__(self, property_repo, context, object_policy): + self.context = context + self.policy = object_policy + self.property_repo = property_repo + proxy_kwargs = {'context': self.context, 'policy': self.policy} + super(MetadefPropertyRepoProxy, self).__init__( + property_repo, + property_proxy_class=MetadefPropertyProxy, + property_proxy_kwargs=proxy_kwargs) + + def get(self, namespace, property_name): + self.policy.enforce(self.context, 'get_metadef_property', {}) + return super(MetadefPropertyRepoProxy, self).get(namespace, + property_name) + + def list(self, *args, **kwargs): + self.policy.enforce(self.context, 'get_metadef_properties', {}) + return super(MetadefPropertyRepoProxy, self).list( + *args, **kwargs) + + def save(self, namespace_property): + self.policy.enforce(self.context, 'modify_metadef_property', {}) + return super(MetadefPropertyRepoProxy, self).save( + namespace_property) + + def add(self, namespace_property): + self.policy.enforce(self.context, 'add_metadef_property', {}) + return super(MetadefPropertyRepoProxy, self).add( + namespace_property) + + +class MetadefPropertyFactoryProxy(daisy.domain.proxy.MetadefPropertyFactory): + + def __init__(self, namespace_property_factory, context, policy): + self.namespace_property_factory = namespace_property_factory + self.context = context + self.policy = policy + proxy_kwargs = {'context': self.context, 'policy': self.policy} + super(MetadefPropertyFactoryProxy, self).__init__( + namespace_property_factory, + property_proxy_class=MetadefPropertyProxy, + property_proxy_kwargs=proxy_kwargs) + + +# Metadef Tag classes +class MetadefTagProxy(daisy.domain.proxy.MetadefTag): + + def __init__(self, meta_tag, context, policy): + self.context = context + self.policy = policy + super(MetadefTagProxy, self).__init__(meta_tag) + + +class MetadefTagRepoProxy(daisy.domain.proxy.MetadefTagRepo): + + def __init__(self, tag_repo, context, tag_policy): + self.context = context + self.policy = tag_policy + self.tag_repo = tag_repo + proxy_kwargs = {'context': self.context, 'policy': self.policy} + super(MetadefTagRepoProxy, + self).__init__(tag_repo, + tag_proxy_class=MetadefTagProxy, + tag_proxy_kwargs=proxy_kwargs) + + def get(self, namespace, tag_name): + self.policy.enforce(self.context, 'get_metadef_tag', {}) + return super(MetadefTagRepoProxy, self).get(namespace, tag_name) + + def list(self, *args, **kwargs): + self.policy.enforce(self.context, 'get_metadef_tags', {}) + return super(MetadefTagRepoProxy, self).list(*args, **kwargs) + + def save(self, meta_tag): + self.policy.enforce(self.context, 'modify_metadef_tag', {}) + return super(MetadefTagRepoProxy, self).save(meta_tag) + + def add(self, meta_tag): + self.policy.enforce(self.context, 'add_metadef_tag', {}) + return super(MetadefTagRepoProxy, self).add(meta_tag) + + def add_tags(self, meta_tags): + self.policy.enforce(self.context, 'add_metadef_tags', {}) + return super(MetadefTagRepoProxy, self).add_tags(meta_tags) + + +class MetadefTagFactoryProxy(daisy.domain.proxy.MetadefTagFactory): + + def __init__(self, meta_tag_factory, context, policy): + self.meta_tag_factory = meta_tag_factory + self.context = context + self.policy = policy + proxy_kwargs = {'context': self.context, 'policy': self.policy} + super(MetadefTagFactoryProxy, self).__init__( + meta_tag_factory, + meta_tag_proxy_class=MetadefTagProxy, + meta_tag_proxy_kwargs=proxy_kwargs) + + +# Catalog Search classes +class CatalogSearchRepoProxy(object): + + def __init__(self, search_repo, context, search_policy): + self.context = context + self.policy = search_policy + self.search_repo = search_repo + + def search(self, *args, **kwargs): + self.policy.enforce(self.context, 'catalog_search', {}) + return self.search_repo.search(*args, **kwargs) + + def plugins_info(self, *args, **kwargs): + self.policy.enforce(self.context, 'catalog_plugins', {}) + return self.search_repo.plugins_info(*args, **kwargs) + + def index(self, *args, **kwargs): + self.policy.enforce(self.context, 'catalog_index', {}) + return self.search_repo.index(*args, **kwargs) diff --git a/code/daisy/daisy/api/property_protections.py b/code/daisy/daisy/api/property_protections.py new file mode 100755 index 00000000..7328f711 --- /dev/null +++ b/code/daisy/daisy/api/property_protections.py @@ -0,0 +1,126 @@ +# Copyright 2013 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from daisy.common import exception +import daisy.domain.proxy + + +class ProtectedImageFactoryProxy(daisy.domain.proxy.ImageFactory): + + def __init__(self, image_factory, context, property_rules): + self.image_factory = image_factory + self.context = context + self.property_rules = property_rules + kwargs = {'context': self.context, + 'property_rules': self.property_rules} + super(ProtectedImageFactoryProxy, self).__init__( + image_factory, + proxy_class=ProtectedImageProxy, + proxy_kwargs=kwargs) + + def new_image(self, **kwargs): + extra_props = kwargs.pop('extra_properties', {}) + + extra_properties = {} + for key in extra_props.keys(): + if self.property_rules.check_property_rules(key, 'create', + self.context): + extra_properties[key] = extra_props[key] + else: + raise exception.ReservedProperty(property=key) + return super(ProtectedImageFactoryProxy, self).new_image( + extra_properties=extra_properties, **kwargs) + + +class ProtectedImageRepoProxy(daisy.domain.proxy.Repo): + + def __init__(self, image_repo, context, property_rules): + self.context = context + self.image_repo = image_repo + self.property_rules = property_rules + proxy_kwargs = {'context': self.context} + super(ProtectedImageRepoProxy, self).__init__( + image_repo, item_proxy_class=ProtectedImageProxy, + item_proxy_kwargs=proxy_kwargs) + + def get(self, image_id): + return ProtectedImageProxy(self.image_repo.get(image_id), + self.context, self.property_rules) + + def list(self, *args, **kwargs): + images = self.image_repo.list(*args, **kwargs) + return [ProtectedImageProxy(image, self.context, self.property_rules) + for image in images] + + +class ProtectedImageProxy(daisy.domain.proxy.Image): + + def __init__(self, image, context, property_rules): + self.image = image + self.context = context + self.property_rules = property_rules + + self.image.extra_properties = ExtraPropertiesProxy( + self.context, + self.image.extra_properties, + self.property_rules) + super(ProtectedImageProxy, self).__init__(self.image) + + +class ExtraPropertiesProxy(daisy.domain.ExtraProperties): + + def __init__(self, context, extra_props, property_rules): + self.context = context + self.property_rules = property_rules + extra_properties = {} + for key in extra_props.keys(): + if self.property_rules.check_property_rules(key, 'read', + self.context): + extra_properties[key] = extra_props[key] + super(ExtraPropertiesProxy, self).__init__(extra_properties) + + def __getitem__(self, key): + if self.property_rules.check_property_rules(key, 'read', self.context): + return dict.__getitem__(self, key) + else: + raise KeyError + + def __setitem__(self, key, value): + # NOTE(isethi): Exceptions are raised only for actions update, delete + # and create, where the user proactively interacts with the properties. + # A user cannot request to read a specific property, hence reads do + # raise an exception + try: + if self.__getitem__(key) is not None: + if self.property_rules.check_property_rules(key, 'update', + self.context): + return dict.__setitem__(self, key, value) + else: + raise exception.ReservedProperty(property=key) + except KeyError: + if self.property_rules.check_property_rules(key, 'create', + self.context): + return dict.__setitem__(self, key, value) + else: + raise exception.ReservedProperty(property=key) + + def __delitem__(self, key): + if key not in super(ExtraPropertiesProxy, self).keys(): + raise KeyError + + if self.property_rules.check_property_rules(key, 'delete', + self.context): + return dict.__delitem__(self, key) + else: + raise exception.ReservedProperty(property=key) diff --git a/code/daisy/daisy/api/v1/__init__.py b/code/daisy/daisy/api/v1/__init__.py new file mode 100755 index 00000000..4d042010 --- /dev/null +++ b/code/daisy/daisy/api/v1/__init__.py @@ -0,0 +1,23 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +SUPPORTED_FILTERS = ['name', 'status','cluster_id','id','host_id', 'role_id', 'auto_scale','container_format', 'disk_format', + 'min_ram', 'min_disk', 'size_min', 'size_max', + 'is_public', 'changes-since', 'protected'] + +SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir') + +# Metadata which only an admin can change once the image is active +ACTIVE_IMMUTABLE = ('size', 'checksum') diff --git a/code/daisy/daisy/api/v1/clusters.py b/code/daisy/daisy/api/v1/clusters.py new file mode 100755 index 00000000..548f3a37 --- /dev/null +++ b/code/daisy/daisy/api/v1/clusters.py @@ -0,0 +1,742 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/clusters endpoint for Daisy v1 API +""" +import copy + +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPConflict +from webob.exc import HTTPForbidden +from webob.exc import HTTPNotFound +from webob.exc import HTTPServerError +from webob import Response + +from daisy.api import policy +import daisy.api.v1 +from daisy.api.v1 import controller +from daisy.api.v1 import filters +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import utils +from daisy.common import wsgi +from daisy import i18n +from daisy import notifier +import daisy.registry.client.v1.api as registry + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS +SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE + +CONF = cfg.CONF +CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') +CONF.import_opt('container_formats', 'daisy.common.config', + group='image_format') +CONF.import_opt('image_property_quota', 'daisy.common.config') +CLUSTER_DEFAULT_NETWORKS = ['PUBLIC', 'DEPLOYMENT', 'PRIVATE', 'EXTERNAL', + 'STORAGE', 'VXLAN', 'MANAGEMENT'] + +class Controller(controller.BaseController): + """ + WSGI controller for clusters resource in Daisy v1 API + + The clusters resource API is a RESTful web service for cluster data. The API + is as follows:: + + GET /clusters -- Returns a set of brief metadata about clusters + GET /clusters -- Returns a set of detailed metadata about + clusters + HEAD /clusters/ -- Return metadata about an cluster with id + GET /clusters/ -- Return cluster data for cluster with id + POST /clusters -- Store cluster data and return metadata about the + newly-stored cluster + PUT /clusters/ -- Update cluster metadata and/or upload cluster + data for a previously-reserved cluster + DELETE /clusters/ -- Delete the cluster with id + """ + def check_params(f): + """ + Cluster add and update operation params valid check. + :param f: Function hanle for 'cluster_add' and 'cluster_update'. + :return: f + """ + def wrapper(*args, **kwargs): + controller, req = args + cluster_meta = kwargs.get('cluster_meta', None) + cluster_id = kwargs.get('id', None) + errmsg = (_("I'm params checker.")) + + LOG.debug(_("Params check for cluster-add or cluster-update begin!")) + + def check_params_range(param, type=None): + ''' + param : input a list ,such as [start, end] + check condition: start must less than end, and existed with pair + return True of False + ''' + if len(param) != 2: + msg = '%s range must be existed in pairs.' % type + raise HTTPForbidden(explanation=msg) + if param[0] == None or param[0] == '': + msg = 'The start value of %s range can not be None.' % type + raise HTTPForbidden(explanation=msg) + if param[1] == None: + msg = 'The end value of %s range can not be None.' % type + raise HTTPForbidden(explanation=msg) + if int(param[0]) > int(param[1]): + msg = 'The start value of the %s range must be less than the end value.' % type + raise HTTPForbidden(explanation=msg) + if type not in ['vni']: + if int(param[0]) < 0 or int(param[0]) > 4096: + msg = 'Invalid value of the start value(%s) of the %s range .' % (param[0], type) + raise HTTPForbidden(explanation=msg) + if int(param[1]) < 0 or int(param[1]) > 4096: + msg = 'Invalid value of the end value(%s) of the %s range .' % (param[1], type) + raise HTTPForbidden(explanation=msg) + else: + if int(param[0]) < 0 or int(param[0]) > 16777216: + msg = 'Invalid value of the start value(%s) of the %s range .' % (param[0], type) + raise HTTPForbidden(explanation=msg) + if int(param[1]) < 0 or int(param[1]) > 16777216: + msg = 'Invalid value of the end value(%s) of the %s range .' % (param[1], type) + raise HTTPForbidden(explanation=msg) + return True + + def _check_auto_scale(req, cluster_meta): + if cluster_meta.has_key('auto_scale') and cluster_meta['auto_scale'] =='1': + meta = { "auto_scale":'1' } + params = { 'filters': meta } + clusters = registry.get_clusters_detail(req.context, **params) + if clusters: + if cluster_id: + temp_cluster = [cluster for cluster in clusters if cluster['id'] !=cluster_id] + if temp_cluster: + errmsg = (_("already exist cluster auto_scale is true")) + raise HTTPBadRequest(explanation=errmsg) + else: + errmsg = (_("already exist cluster auto_scale is true")) + raise HTTPBadRequest(explanation=errmsg) + + + def _ip_into_int(ip): + """ + Switch ip string to decimalism integer.. + :param ip: ip string + :return: decimalism integer + """ + return reduce(lambda x, y: (x<<8)+y, map(int, ip.split('.'))) + + def _is_in_network_range(ip, network): + """ + Check ip is in range + :param ip: Ip will be checked, like:192.168.1.2. + :param network: Ip range,like:192.168.0.0/24. + :return: If ip in range,return True,else return False. + """ + network = network.split('/') + mask = ~(2**(32 - int(network[1])) - 1) + return (_ip_into_int(ip) & mask) == (_ip_into_int(network[0]) & mask) + + def _check_param_nonull_and_valid(values_set, keys_set, valids_set={}): + """ + Check operation params is not null and valid. + :param values_set: Params set. + :param keys_set: Params will be checked. + :param valids_set: + :return: + """ + for k in keys_set: + v = values_set.get(k, None) + if type(v) == type(True) and v == None: + errmsg = (_("Segment %s can't be None." % k)) + raise HTTPBadRequest(explanation=errmsg) + elif type(v) != type(True) and not v: + errmsg = (_("Segment %s can't be None." % k)) + raise HTTPBadRequest(explanation=errmsg) + + for (k, v) in valids_set.items(): + # if values_set.get(k, None) and values_set[k] not in v: + if values_set.get(k, None) and -1 == v.find(values_set[k]): + errmsg = (_("Segment %s is out of valid range." % k)) + raise HTTPBadRequest(explanation=errmsg) + + def _get_network_detail(req, cluster_id, networks_list): + all_network_list = [] + if cluster_id: + all_network_list = registry.get_networks_detail(req.context, cluster_id) + + if networks_list: + for net_id in networks_list: + network_detail = registry.get_network_metadata(req.context, net_id) + all_network_list.append(network_detail) + + all_private_network_list = \ + [network for network in all_network_list if network['network_type'] == "PRIVATE"] + return all_private_network_list + + def _check_cluster_add_parameters(req, cluster_meta): + """ + By params set segment,check params is available. + :param req: http req + :param cluster_meta: params set + :return:error message + """ + if cluster_meta.has_key('nodes'): + orig_keys = list(eval(cluster_meta['nodes'])) + for host_id in orig_keys: + controller._raise_404_if_host_deleted(req, host_id) + + if cluster_meta.has_key('networks'): + orig_keys = list(eval(cluster_meta['networks'])) + network_with_same_name = [] + for network_id in orig_keys: + network_name = controller._raise_404_if_network_deleted(req, network_id) + if network_name in CLUSTER_DEFAULT_NETWORKS: + return (_("Network name %s of %s already exits" + " in the cluster, please check." % + (network_name, network_id))) + if network_name in network_with_same_name: + return (_("Network name can't be same with each other in 'networks[]', " + "please check.")) + network_with_same_name.append(network_name) + + # checkout network_params-------------------------------------------------- + if cluster_meta.get('networking_parameters', None): + networking_parameters = eval(cluster_meta['networking_parameters']) + _check_param_nonull_and_valid(networking_parameters, + ['segmentation_type']) + segmentation_type_set = networking_parameters['segmentation_type'].split(",") + for segmentation_type in segmentation_type_set: + if segmentation_type not in ['vlan', 'vxlan', 'flat', 'gre']: + return (_("Segmentation_type of networking_parameters is not valid.")) + if segmentation_type =='vxlan': + _check_param_nonull_and_valid(networking_parameters,['vni_range']) + elif segmentation_type =='gre': + _check_param_nonull_and_valid(networking_parameters,['gre_id_range']) + + vlan_range = networking_parameters.get("vlan_range", None) + vni_range = networking_parameters.get("vni_range", None) + gre_id_range = networking_parameters.get("gre_id_range", None) + #if (vlan_range and len(vlan_range) != 2) \ + # or (vni_range and len(vni_range) != 2) \ + # or (gre_id_range and len(gre_id_range) != 2): + # return (_("Range params must be pair.")) + if vlan_range: + check_params_range(vlan_range, 'vlan') + if vni_range: + check_params_range(vni_range, 'vni') + if gre_id_range: + check_params_range(gre_id_range, 'gre_id') + + # check logic_networks-------------------------------------------------- + subnet_name_set = [] # record all subnets's name + logic_network_name_set = [] # record all logic_network's name + subnets_in_logic_network = {} + external_logic_network_name = [] + if cluster_meta.get('logic_networks', None): + # get physnet_name list + all_private_cluster_networks_list = _get_network_detail( + req, cluster_id, + cluster_meta.get('networks', None) + if not isinstance(cluster_meta.get('networks', None), unicode) + else eval(cluster_meta.get('networks', None))) + if not all_private_cluster_networks_list: + LOG.info("Private network is empty in db, it lead logical network config invalid.") + physnet_name_set = [net['name'] for net in all_private_cluster_networks_list] + + logic_networks = eval(cluster_meta['logic_networks']) + for logic_network in logic_networks: + subnets_in_logic_network[logic_network['name']] = [] + + # We force setting the physnet_name of flat logical network to 'flat'. + if logic_network.get('segmentation_type', None) == "flat": + if logic_network['physnet_name'] != "physnet1" or logic_network['type'] != "external": + LOG.info("When 'segmentation_type' is flat the 'physnet_name' and 'type' segmentation" + "must be 'physnet1'' and 'external'', but got '%s' and '%s'.We have changed" + "it to the valid value.") + logic_network['physnet_name'] = "physnet1" + logic_network['type'] = "external" + physnet_name_set.append("physnet1") + + _check_param_nonull_and_valid( + logic_network, + ['name', 'type', 'physnet_name', 'segmentation_type', 'shared', 'segmentation_id'], + {'segmentation_type' : networking_parameters['segmentation_type'], + 'physnet_name' : ','.join(physnet_name_set), + 'type' : ','.join(["external", "internal"])}) + + if logic_network['type'] == "external": + external_logic_network_name.append(logic_network['name']) + + logic_network_name_set.append(logic_network['name']) + + # By segmentation_type check segmentation_id is in range + segmentation_id = logic_network.get('segmentation_id', None) + if segmentation_id: + err = "Segmentation_id is out of private network %s of %s.Vaild range is [%s, %s]." + segmentation_type = logic_network.get('segmentation_type', None) + if 0 == cmp(segmentation_type, "vlan"): + private_vlan_range = \ + [(net['vlan_start'], net['vlan_end']) + for net in all_private_cluster_networks_list + if logic_network['physnet_name'] == net['name']] + + if private_vlan_range and \ + not private_vlan_range[0][0] or \ + not private_vlan_range[0][1]: + return (_("Private network plane %s don't config the 'vlan_start' or " + "'vlan_end' parameter.")) + + if int(segmentation_id) not in range(private_vlan_range[0][0], private_vlan_range[0][1]): + return (_(err % ("vlan_range", logic_network['physnet_name'], + private_vlan_range[0][0], private_vlan_range[0][1]))) + elif 0 == cmp(segmentation_type, "vxlan") and vni_range: + if int(segmentation_id) not in range(vni_range[0], vni_range[1]): + return (_("Segmentation_id is out of vni_range.")) + elif 0 == cmp(segmentation_type, "gre") and gre_id_range: + if int(segmentation_id) not in range(gre_id_range[0], gre_id_range[1]): + return (_("Segmentation_id is out of gre_id_range.")) + + # checkout subnets params-------------------------------------------------- + if logic_network.get('subnets', None): + subnet_data = logic_network['subnets'] + for subnet in subnet_data: + _check_param_nonull_and_valid( + subnet, + ['name', 'cidr']) + subnet_name_set.append(subnet['name']) + # By cidr check floating_ranges is in range and not overlap + #---------------start----- + if subnet['gateway'] and not _is_in_network_range(subnet['gateway'], subnet['cidr']): + return (_("Wrong gateway format.")) + if subnet['floating_ranges']: + inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)]) + floating_ranges_with_int_ip = list() + sorted_floating_ranges = list() + sorted_floating_ranges_with_int_ip = list() + for floating_ip in subnet['floating_ranges']: + if len(floating_ip) != 2: + return (_("Floating ip must be paris.")) + ip_start = _ip_into_int(floating_ip[0]) + ip_end = _ip_into_int(floating_ip[1]) + if ip_start > ip_end: + return (_("Wrong floating ip format.")) + floating_ranges_with_int_ip.append([ip_start, ip_end]) + sorted_floating_ranges_with_int_ip = sorted(floating_ranges_with_int_ip, key=lambda x : x[0]) + for ip_range in sorted_floating_ranges_with_int_ip: + ip_start = inter_ip(ip_range[0]) + ip_end = inter_ip(ip_range[1]) + sorted_floating_ranges.append([ip_start, ip_end]) + + last_rang_ip = [] + for floating in sorted_floating_ranges: + if not _is_in_network_range(floating[0], subnet['cidr']) \ + or not _is_in_network_range(floating[1], subnet['cidr']): + return (_("Floating ip or gateway is out of range cidr.")) + + err_list = [err for err in last_rang_ip if _ip_into_int(floating[0]) < err] + if last_rang_ip and 0 < len(err_list): + return (_("Between floating ip range can not be overlap.")) + last_rang_ip.append(_ip_into_int(floating[1])) + subnets_in_logic_network[logic_network['name']].append(subnet['name']) + + # check external logical network uniqueness + if len(external_logic_network_name) > 1: + return (_("External logical network is uniqueness in the cluster.Got %s." % + ",".join(external_logic_network_name))) + + # check logic_network_name uniqueness + if len(logic_network_name_set) != len(set(logic_network_name_set)): + return (_("Logic network name segment is repetition.")) + + # check subnet_name uniqueness + if len(subnet_name_set) != len(set(subnet_name_set)): + return (_("Subnet name segment is repetition.")) + + cluster_meta['logic_networks'] = unicode(logic_networks) + + # check routers-------------------------------------------------- + subnet_name_set_deepcopy = copy.deepcopy(subnet_name_set) + router_name_set = [] # record all routers name + if cluster_meta.get('routers', None): + router_data = eval(cluster_meta['routers']) + for router in router_data: + _check_param_nonull_and_valid(router, ['name']) + + # check relevance logic_network is valid + external_logic_network_data = router.get('external_logic_network', None) + if external_logic_network_data and \ + external_logic_network_data not in logic_network_name_set: + return (_("Logic_network %s is not valid range." % external_logic_network_data)) + router_name_set.append(router['name']) + + # check relevance subnets is valid + for subnet in router.get('subnets', []): + if subnet not in subnet_name_set: + return (_("Subnet %s is not valid range." % subnet)) + + # subnet cann't relate with two routers + if subnet not in subnet_name_set_deepcopy: + return (_("The subnet can't be related with multiple routers.")) + subnet_name_set_deepcopy.remove(subnet) + + if external_logic_network_data and \ + subnets_in_logic_network[external_logic_network_data] and \ + set(subnets_in_logic_network[external_logic_network_data]). \ + issubset(set(router['subnets'])): + return (_("Logic network's subnets is all related with a router, it's not allowed.")) + + # check subnet_name uniqueness + if len(router_name_set) != len(set(router_name_set)): + return (_("Router name segment is repetition.")) + return (_("I'm params checker.")) + _check_auto_scale(req, cluster_meta) + check_result = _check_cluster_add_parameters(req, cluster_meta) + if 0 != cmp(check_result, errmsg): + LOG.exception(_("Params check for cluster-add or cluster-update is failed!")) + raise HTTPBadRequest(explanation=check_result) + + LOG.debug(_("Params check for cluster-add or cluster-update is done!")) + + return f(*args, **kwargs) + return wrapper + + def __init__(self): + self.notifier = notifier.Notifier() + registry.configure_registry_client() + self.policy = policy.Enforcer() + if property_utils.is_property_protection_enabled(): + self.prop_enforcer = property_utils.PropertyRules(self.policy) + else: + self.prop_enforcer = None + + def _enforce(self, req, action, target=None): + """Authorize an action against our policies""" + if target is None: + target = {} + try: + self.policy.enforce(req.context, action, target) + except exception.Forbidden: + raise HTTPForbidden() + + def _raise_404_if_host_deleted(self, req, host_id): + host = self.get_host_meta_or_404(req, host_id) + if host['deleted']: + msg = _("Host with identifier %s has been deleted.") % host_id + raise HTTPNotFound(msg) + + def _raise_404_if_network_deleted(self, req, network_id): + network = self.get_network_meta_or_404(req, network_id) + if network['deleted']: + msg = _("Network with identifier %s has been deleted.") % network_id + raise HTTPNotFound(msg) + return network.get('name', None) + + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + query_filters = {} + for param in req.params: + if param in SUPPORTED_FILTERS: + query_filters[param] = req.params.get(param) + if not filters.validate(param, query_filters[param]): + raise HTTPBadRequest(_('Bad value passed to filter ' + '%(filter)s got %(val)s') + % {'filter': param, + 'val': query_filters[param]}) + return query_filters + + def _get_query_params(self, req): + """ + Extracts necessary query params from request. + + :param req: the WSGI Request object + :retval dict of parameters that can be used by registry client + """ + params = {'filters': self._get_filters(req)} + + for PARAM in SUPPORTED_PARAMS: + if PARAM in req.params: + params[PARAM] = req.params.get(PARAM) + return params + + @utils.mutating + @check_params + def add_cluster(self, req, cluster_meta): + """ + Adds a new cluster to Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about cluster + + :raises HTTPBadRequest if x-cluster-name is missing + """ + self._enforce(req, 'add_cluster') + cluster_name = cluster_meta["name"] + if not cluster_name: + raise ValueError('cluster name is null!') + cluster_name_split = cluster_name.split('_') + for cluster_name_info in cluster_name_split : + if not cluster_name_info.isalnum(): + raise ValueError('cluster name must be numbers or letters or underscores !') + if cluster_meta.get('nodes', None): + orig_keys = list(eval(cluster_meta['nodes'])) + for host_id in orig_keys: + self._raise_404_if_host_deleted(req, host_id) + node = registry.get_host_metadata(req.context, host_id) + if node['status'] == 'in-cluster': + msg = _("Forbidden to add host %s with status " + "'in-cluster' in another cluster") % host_id + raise HTTPForbidden(explanation=msg) + if node.get('interfaces', None): + interfaces = node['interfaces'] + input_host_pxe_info = [interface for interface in interfaces + if interface.get('is_deployment', None) == 1] + if not input_host_pxe_info and node.get('os_status',None) != 'active': + msg = _("The host %s has more than one dhcp server, " + "please choose one interface for deployment") % host_id + raise HTTPServerError(explanation=msg) + print cluster_name + print cluster_meta + cluster_meta = registry.add_cluster_metadata(req.context, cluster_meta) + return {'cluster_meta': cluster_meta} + + @utils.mutating + def delete_cluster(self, req, id): + """ + Deletes a cluster from Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about cluster + + :raises HTTPBadRequest if x-cluster-name is missing + """ + self._enforce(req, 'delete_cluster') + + #cluster = self.get_cluster_meta_or_404(req, id) + print "delete_cluster:%s" % id + try: + registry.delete_cluster_metadata(req.context, id) + except exception.NotFound as e: + msg = (_("Failed to find cluster to delete: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to delete cluster: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except exception.InUseByStore as e: + msg = (_("cluster %(id)s could not be deleted because it is in use: " + "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) + LOG.warn(msg) + raise HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + else: + #self.notifier.info('cluster.delete', cluster) + return Response(body='', status=200) + + @utils.mutating + def get_cluster(self, req, id): + """ + Returns metadata about an cluster in the HTTP headers of the + response object + + :param req: The WSGI/Webob Request object + :param id: The opaque cluster identifier + + :raises HTTPNotFound if cluster metadata is not available to user + """ + self._enforce(req, 'get_cluster') + cluster_meta = self.get_cluster_meta_or_404(req, id) + return {'cluster_meta': cluster_meta} + + def detail(self, req): + """ + Returns detailed information for all available clusters + + :param req: The WSGI/Webob Request object + :retval The response body is a mapping of the following form:: + + {'clusters': [ + {'id': , + 'name': , + 'nodes': , + 'networks': , + 'description': , + 'created_at': , + 'updated_at': , + 'deleted_at': |,}, ... + ]} + """ + self._enforce(req, 'get_clusters') + params = self._get_query_params(req) + try: + clusters = registry.get_clusters_detail(req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return dict(clusters=clusters) + + @utils.mutating + @check_params + def update_cluster(self, req, id, cluster_meta): + """ + Updates an existing cluster with the registry. + + :param request: The WSGI/Webob Request object + :param id: The opaque cluster identifier + + :retval Returns the updated cluster information as a mapping + """ + self._enforce(req, 'update_cluster') + if cluster_meta.has_key('nodes'): + orig_keys = list(eval(cluster_meta['nodes'])) + for host_id in orig_keys: + self._raise_404_if_host_deleted(req, host_id) + node = registry.get_host_metadata(req.context, host_id) + if node['status'] == 'in-cluster': + host_cluster = registry.get_host_clusters(req.context, host_id) + if host_cluster[0]['cluster_id'] != id: + msg = _("Forbidden to add host %s with status " + "'in-cluster' in another cluster") % host_id + raise HTTPForbidden(explanation=msg) + if node.get('interfaces', None): + interfaces = node['interfaces'] + input_host_pxe_info = [interface for interface in interfaces + if interface.get('is_deployment', None) == 1] + if not input_host_pxe_info and node.get('os_status', None) != 'active': + msg = _("The host %s has more than one dhcp server, " + "please choose one interface for deployment") % host_id + raise HTTPServerError(explanation=msg) + if cluster_meta.has_key('networks'): + orig_keys = list(eval(cluster_meta['networks'])) + for network_id in orig_keys: + self._raise_404_if_network_deleted(req, network_id) + orig_cluster_meta = self.get_cluster_meta_or_404(req, id) + + # Do not allow any updates on a deleted cluster. + # Fix for LP Bug #1060930 + if orig_cluster_meta['deleted']: + msg = _("Forbidden to update deleted cluster.") + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + try: + cluster_meta = registry.update_cluster_metadata(req.context, + id, + cluster_meta) + + except exception.Invalid as e: + msg = (_("Failed to update cluster metadata. Got error: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + except exception.NotFound as e: + msg = (_("Failed to find cluster to update: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to update cluster: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except (exception.Conflict, exception.Duplicate) as e: + LOG.warn(utils.exception_to_str(e)) + raise HTTPConflict(body=_('Cluster operation conflicts'), + request=req, + content_type='text/plain') + else: + self.notifier.info('cluster.update', cluster_meta) + + return {'cluster_meta': cluster_meta} + +class ProjectDeserializer(wsgi.JSONRequestDeserializer): + """Handles deserialization of specific controller method requests.""" + + def _deserialize(self, request): + result = {} + result["cluster_meta"] = utils.get_cluster_meta(request) + return result + + def add_cluster(self, request): + return self._deserialize(request) + + def update_cluster(self, request): + return self._deserialize(request) + +class ProjectSerializer(wsgi.JSONResponseSerializer): + """Handles serialization of specific controller method responses.""" + + def __init__(self): + self.notifier = notifier.Notifier() + + def add_cluster(self, response, result): + cluster_meta = result['cluster_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(cluster=cluster_meta)) + return response + + def update_cluster(self, response, result): + cluster_meta = result['cluster_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(cluster=cluster_meta)) + return response + + def delete_cluster(self, response, result): + cluster_meta = result['cluster_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(cluster=cluster_meta)) + return response + def get_cluster(self, response, result): + cluster_meta = result['cluster_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(cluster=cluster_meta)) + return response + +def create_resource(): + """Projects resource factory method""" + deserializer = ProjectDeserializer() + serializer = ProjectSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) + diff --git a/code/daisy/daisy/api/v1/components.py b/code/daisy/daisy/api/v1/components.py new file mode 100755 index 00000000..f235a56f --- /dev/null +++ b/code/daisy/daisy/api/v1/components.py @@ -0,0 +1,328 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/components endpoint for Daisy v1 API +""" + +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPConflict +from webob.exc import HTTPForbidden +from webob.exc import HTTPNotFound +from webob import Response + +from daisy.api import policy +import daisy.api.v1 +from daisy.api.v1 import controller +from daisy.api.v1 import filters +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import utils +from daisy.common import wsgi +from daisy import i18n +from daisy import notifier +import daisy.registry.client.v1.api as registry + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS +SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE + +CONF = cfg.CONF +CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') +CONF.import_opt('container_formats', 'daisy.common.config', + group='image_format') +CONF.import_opt('image_property_quota', 'daisy.common.config') + +class Controller(controller.BaseController): + """ + WSGI controller for components resource in Daisy v1 API + + The components resource API is a RESTful web service for component data. The API + is as follows:: + + GET /components -- Returns a set of brief metadata about components + GET /components/detail -- Returns a set of detailed metadata about + components + HEAD /components/ -- Return metadata about an component with id + GET /components/ -- Return component data for component with id + POST /components -- Store component data and return metadata about the + newly-stored component + PUT /components/ -- Update component metadata and/or upload component + data for a previously-reserved component + DELETE /components/ -- Delete the component with id + """ + + def __init__(self): + self.notifier = notifier.Notifier() + registry.configure_registry_client() + self.policy = policy.Enforcer() + if property_utils.is_property_protection_enabled(): + self.prop_enforcer = property_utils.PropertyRules(self.policy) + else: + self.prop_enforcer = None + + def _enforce(self, req, action, target=None): + """Authorize an action against our policies""" + if target is None: + target = {} + try: + self.policy.enforce(req.context, action, target) + except exception.Forbidden: + raise HTTPForbidden() + + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + query_filters = {} + for param in req.params: + if param in SUPPORTED_FILTERS: + query_filters[param] = req.params.get(param) + if not filters.validate(param, query_filters[param]): + raise HTTPBadRequest(_('Bad value passed to filter ' + '%(filter)s got %(val)s') + % {'filter': param, + 'val': query_filters[param]}) + return query_filters + + def _get_query_params(self, req): + """ + Extracts necessary query params from request. + + :param req: the WSGI Request object + :retval dict of parameters that can be used by registry client + """ + params = {'filters': self._get_filters(req)} + + for PARAM in SUPPORTED_PARAMS: + if PARAM in req.params: + params[PARAM] = req.params.get(PARAM) + return params + + @utils.mutating + def add_component(self, req, component_meta): + """ + Adds a new component to Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about component + + :raises HTTPBadRequest if x-component-name is missing + """ + self._enforce(req, 'add_component') + #component_id=component_meta["id"] + #component_owner=component_meta["owner"] + component_name = component_meta["name"] + component_description = component_meta["description"] + #print component_id + #print component_owner + print component_name + print component_description + component_meta = registry.add_component_metadata(req.context, component_meta) + + return {'component_meta': component_meta} + + @utils.mutating + def delete_component(self, req, id): + """ + Deletes a component from Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about component + + :raises HTTPBadRequest if x-component-name is missing + """ + self._enforce(req, 'delete_component') + + #component = self.get_component_meta_or_404(req, id) + print "delete_component:%s" % id + try: + registry.delete_component_metadata(req.context, id) + except exception.NotFound as e: + msg = (_("Failed to find component to delete: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to delete component: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except exception.InUseByStore as e: + msg = (_("component %(id)s could not be deleted because it is in use: " + "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) + LOG.warn(msg) + raise HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + else: + #self.notifier.info('component.delete', component) + return Response(body='', status=200) + + @utils.mutating + def get_component(self, req, id): + """ + Returns metadata about an component in the HTTP headers of the + response object + + :param req: The WSGI/Webob Request object + :param id: The opaque component identifier + + :raises HTTPNotFound if component metadata is not available to user + """ + self._enforce(req, 'get_component') + component_meta = self.get_component_meta_or_404(req, id) + return {'component_meta': component_meta} + + def detail(self, req): + """ + Returns detailed information for all available components + + :param req: The WSGI/Webob Request object + :retval The response body is a mapping of the following form:: + + {'components': [ + {'id': , + 'name': , + 'description': , + 'created_at': , + 'updated_at': , + 'deleted_at': |,}, ... + ]} + """ + self._enforce(req, 'get_components') + params = self._get_query_params(req) + try: + components = registry.get_components_detail(req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return dict(components=components) + + @utils.mutating + def update_component(self, req, id, component_meta): + """ + Updates an existing component with the registry. + + :param request: The WSGI/Webob Request object + :param id: The opaque image identifier + + :retval Returns the updated image information as a mapping + """ + self._enforce(req, 'modify_image') + orig_component_meta = self.get_component_meta_or_404(req, id) + + # Do not allow any updates on a deleted image. + # Fix for LP Bug #1060930 + if orig_component_meta['deleted']: + msg = _("Forbidden to update deleted component.") + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + try: + component_meta = registry.update_component_metadata(req.context, + id, + component_meta) + + except exception.Invalid as e: + msg = (_("Failed to update component metadata. Got error: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + except exception.NotFound as e: + msg = (_("Failed to find component to update: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to update component: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except (exception.Conflict, exception.Duplicate) as e: + LOG.warn(utils.exception_to_str(e)) + raise HTTPConflict(body=_('Host operation conflicts'), + request=req, + content_type='text/plain') + else: + self.notifier.info('component.update', component_meta) + + return {'component_meta': component_meta} + +class ComponentDeserializer(wsgi.JSONRequestDeserializer): + """Handles deserialization of specific controller method requests.""" + + def _deserialize(self, request): + result = {} + result["component_meta"] = utils.get_component_meta(request) + return result + + def add_component(self, request): + return self._deserialize(request) + + def update_component(self, request): + return self._deserialize(request) + +class ComponentSerializer(wsgi.JSONResponseSerializer): + """Handles serialization of specific controller method responses.""" + + def __init__(self): + self.notifier = notifier.Notifier() + + def add_component(self, response, result): + component_meta = result['component_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(component=component_meta)) + return response + + def delete_component(self, response, result): + component_meta = result['component_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(component=component_meta)) + return response + def get_component(self, response, result): + component_meta = result['component_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(component=component_meta)) + return response + +def create_resource(): + """Components resource factory method""" + deserializer = ComponentDeserializer() + serializer = ComponentSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) + diff --git a/code/daisy/daisy/api/v1/config_files.py b/code/daisy/daisy/api/v1/config_files.py new file mode 100755 index 00000000..e9899fdf --- /dev/null +++ b/code/daisy/daisy/api/v1/config_files.py @@ -0,0 +1,325 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/config_files endpoint for Daisy v1 API +""" + +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPConflict +from webob.exc import HTTPForbidden +from webob.exc import HTTPNotFound +from webob import Response + +from daisy.api import policy +import daisy.api.v1 +from daisy.api.v1 import controller +from daisy.api.v1 import filters +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import utils +from daisy.common import wsgi +from daisy import i18n +from daisy import notifier +import daisy.registry.client.v1.api as registry + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS +SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE + +CONF = cfg.CONF +CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') +CONF.import_opt('container_formats', 'daisy.common.config', + group='image_format') +CONF.import_opt('image_property_quota', 'daisy.common.config') + +class Controller(controller.BaseController): + """ + WSGI controller for config_files resource in Daisy v1 API + + The config_files resource API is a RESTful web service for config_file data. The API + is as follows:: + + GET /config_files -- Returns a set of brief metadata about config_files + GET /config_files/detail -- Returns a set of detailed metadata about + config_files + HEAD /config_files/ -- Return metadata about an config_file with id + GET /config_files/ -- Return config_file data for config_file with id + POST /config_files -- Store config_file data and return metadata about the + newly-stored config_file + PUT /config_files/ -- Update config_file metadata and/or upload config_file + data for a previously-reserved config_file + DELETE /config_files/ -- Delete the config_file with id + """ + + def __init__(self): + self.notifier = notifier.Notifier() + registry.configure_registry_client() + self.policy = policy.Enforcer() + if property_utils.is_property_protection_enabled(): + self.prop_enforcer = property_utils.PropertyRules(self.policy) + else: + self.prop_enforcer = None + + def _enforce(self, req, action, target=None): + """Authorize an action against our policies""" + if target is None: + target = {} + try: + self.policy.enforce(req.context, action, target) + except exception.Forbidden: + raise HTTPForbidden() + + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + query_filters = {} + for param in req.params: + if param in SUPPORTED_FILTERS: + query_filters[param] = req.params.get(param) + if not filters.validate(param, query_filters[param]): + raise HTTPBadRequest(_('Bad value passed to filter ' + '%(filter)s got %(val)s') + % {'filter': param, + 'val': query_filters[param]}) + return query_filters + + def _get_query_params(self, req): + """ + Extracts necessary query params from request. + + :param req: the WSGI Request object + :retval dict of parameters that can be used by registry client + """ + params = {'filters': self._get_filters(req)} + + for PARAM in SUPPORTED_PARAMS: + if PARAM in req.params: + params[PARAM] = req.params.get(PARAM) + return params + + @utils.mutating + def add_config_file(self, req, config_file_meta): + """ + Adds a new config_file to Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about config_file + + :raises HTTPBadRequest if x-config_file-name is missing + """ + self._enforce(req, 'add_config_file') + #config_file_id=config_file_meta["id"] + config_file_name = config_file_meta["name"] + config_file_description = config_file_meta["description"] + #print config_file_id + print config_file_name + print config_file_description + config_file_meta = registry.add_config_file_metadata(req.context, config_file_meta) + + return {'config_file_meta': config_file_meta} + + @utils.mutating + def delete_config_file(self, req, id): + """ + Deletes a config_file from Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about config_file + + :raises HTTPBadRequest if x-config_file-name is missing + """ + self._enforce(req, 'delete_config_file') + + try: + registry.delete_config_file_metadata(req.context, id) + except exception.NotFound as e: + msg = (_("Failed to find config_file to delete: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to delete config_file: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except exception.InUseByStore as e: + msg = (_("config_file %(id)s could not be deleted because it is in use: " + "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) + LOG.warn(msg) + raise HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + else: + #self.notifier.info('config_file.delete', config_file) + return Response(body='', status=200) + + @utils.mutating + def get_config_file(self, req, id): + """ + Returns metadata about an config_file in the HTTP headers of the + response object + + :param req: The WSGI/Webob Request object + :param id: The opaque config_file identifier + + :raises HTTPNotFound if config_file metadata is not available to user + """ + self._enforce(req, 'get_config_file') + config_file_meta = self.get_config_file_meta_or_404(req, id) + return {'config_file_meta': config_file_meta} + + def detail(self, req): + """ + Returns detailed information for all available config_files + + :param req: The WSGI/Webob Request object + :retval The response body is a mapping of the following form:: + + {'config_files': [ + {'id': , + 'name': , + 'description': , + 'created_at': , + 'updated_at': , + 'deleted_at': |,}, ... + ]} + """ + self._enforce(req, 'get_config_files') + params = self._get_query_params(req) + try: + config_files = registry.get_config_files_detail(req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return dict(config_files=config_files) + + @utils.mutating + def update_config_file(self, req, id, config_file_meta): + """ + Updates an existing config_file with the registry. + + :param request: The WSGI/Webob Request object + :param id: The opaque image identifier + + :retval Returns the updated image information as a mapping + """ + self._enforce(req, 'modify_image') + orig_config_file_meta = self.get_config_file_meta_or_404(req, id) + + # Do not allow any updates on a deleted image. + # Fix for LP Bug #1060930 + if orig_config_file_meta['deleted']: + msg = _("Forbidden to update deleted config_file.") + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + try: + config_file_meta = registry.update_config_file_metadata(req.context, + id, + config_file_meta) + + except exception.Invalid as e: + msg = (_("Failed to update config_file metadata. Got error: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + except exception.NotFound as e: + msg = (_("Failed to find config_file to update: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to update config_file: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except (exception.Conflict, exception.Duplicate) as e: + LOG.warn(utils.exception_to_str(e)) + raise HTTPConflict(body=_('config_file operation conflicts'), + request=req, + content_type='text/plain') + else: + self.notifier.info('config_file.update', config_file_meta) + + return {'config_file_meta': config_file_meta} + +class Config_fileDeserializer(wsgi.JSONRequestDeserializer): + """Handles deserialization of specific controller method requests.""" + + def _deserialize(self, request): + result = {} + result["config_file_meta"] = utils.get_config_file_meta(request) + return result + + def add_config_file(self, request): + return self._deserialize(request) + + def update_config_file(self, request): + return self._deserialize(request) + +class Config_fileSerializer(wsgi.JSONResponseSerializer): + """Handles serialization of specific controller method responses.""" + + def __init__(self): + self.notifier = notifier.Notifier() + + def add_config_file(self, response, result): + config_file_meta = result['config_file_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(config_file=config_file_meta)) + return response + + def delete_config_file(self, response, result): + config_file_meta = result['config_file_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(config_file=config_file_meta)) + return response + + def get_config_file(self, response, result): + config_file_meta = result['config_file_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(config_file=config_file_meta)) + return response + +def create_resource(): + """config_files resource factory method""" + deserializer = Config_fileDeserializer() + serializer = Config_fileSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) + diff --git a/code/daisy/daisy/api/v1/config_sets.py b/code/daisy/daisy/api/v1/config_sets.py new file mode 100755 index 00000000..c275267c --- /dev/null +++ b/code/daisy/daisy/api/v1/config_sets.py @@ -0,0 +1,434 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/config_sets endpoint for Daisy v1 API +""" + +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPConflict +from webob.exc import HTTPForbidden +from webob.exc import HTTPNotFound +from webob import Response + +from daisy.api import policy +import daisy.api.v1 +from daisy.api.v1 import controller +from daisy.api.v1 import filters +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import utils +from daisy.common import wsgi +from daisy import i18n +from daisy import notifier +import daisy.registry.client.v1.api as registry +from daisy.api.configset import manager + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS +SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE + +CONF = cfg.CONF +CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') +CONF.import_opt('container_formats', 'daisy.common.config', + group='image_format') +CONF.import_opt('image_property_quota', 'daisy.common.config') + +class Controller(controller.BaseController): + """ + WSGI controller for config_sets resource in Daisy v1 API + + The config_sets resource API is a RESTful web service for config_set data. The API + is as follows:: + + GET /config_sets -- Returns a set of brief metadata about config_sets + GET /config_sets/detail -- Returns a set of detailed metadata about + config_sets + HEAD /config_sets/ -- Return metadata about an config_set with id + GET /config_sets/ -- Return config_set data for config_set with id + POST /config_sets -- Store config_set data and return metadata about the + newly-stored config_set + PUT /config_sets/ -- Update config_set metadata and/or upload config_set + data for a previously-reserved config_set + DELETE /config_sets/ -- Delete the config_set with id + """ + + def __init__(self): + self.notifier = notifier.Notifier() + registry.configure_registry_client() + self.policy = policy.Enforcer() + if property_utils.is_property_protection_enabled(): + self.prop_enforcer = property_utils.PropertyRules(self.policy) + else: + self.prop_enforcer = None + + def _enforce(self, req, action, target=None): + """Authorize an action against our policies""" + if target is None: + target = {} + try: + self.policy.enforce(req.context, action, target) + except exception.Forbidden: + raise HTTPForbidden() + + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + query_filters = {} + for param in req.params: + if param in SUPPORTED_FILTERS: + query_filters[param] = req.params.get(param) + if not filters.validate(param, query_filters[param]): + raise HTTPBadRequest(_('Bad value passed to filter ' + '%(filter)s got %(val)s') + % {'filter': param, + 'val': query_filters[param]}) + return query_filters + + def _get_query_params(self, req): + """ + Extracts necessary query params from request. + + :param req: the WSGI Request object + :retval dict of parameters that can be used by registry client + """ + params = {'filters': self._get_filters(req)} + + for PARAM in SUPPORTED_PARAMS: + if PARAM in req.params: + params[PARAM] = req.params.get(PARAM) + return params + + def _raise_404_if_cluster_deleted(self, req, cluster_id): + cluster = self.get_cluster_meta_or_404(req, cluster_id) + if cluster['deleted']: + msg = _("cluster with identifier %s has been deleted.") % cluster_id + raise HTTPNotFound(msg) + + @utils.mutating + def add_config_set(self, req, config_set_meta): + """ + Adds a new config_set to Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about config_set + + :raises HTTPBadRequest if x-config_set-name is missing + """ + self._enforce(req, 'add_config_set') + #config_set_id=config_set_meta["id"] + config_set_name = config_set_meta["name"] + config_set_description = config_set_meta["description"] + #print config_set_id + print config_set_name + print config_set_description + config_set_meta = registry.add_config_set_metadata(req.context, config_set_meta) + + return {'config_set_meta': config_set_meta} + + @utils.mutating + def delete_config_set(self, req, id): + """ + Deletes a config_set from Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about config_set + + :raises HTTPBadRequest if x-config_set-name is missing + """ + self._enforce(req, 'delete_config_set') + + try: + registry.delete_config_set_metadata(req.context, id) + except exception.NotFound as e: + msg = (_("Failed to find config_set to delete: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to delete config_set: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except exception.InUseByStore as e: + msg = (_("config_set %(id)s could not be deleted because it is in use: " + "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) + LOG.warn(msg) + raise HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + else: + #self.notifier.info('config_set.delete', config_set) + return Response(body='', status=200) + + @utils.mutating + def get_config_set(self, req, id): + """ + Returns metadata about an config_set in the HTTP headers of the + response object + + :param req: The WSGI/Webob Request object + :param id: The opaque config_set identifier + + :raises HTTPNotFound if config_set metadata is not available to user + """ + self._enforce(req, 'get_config_set') + config_set_meta = self.get_config_set_meta_or_404(req, id) + return {'config_set_meta': config_set_meta} + + def detail(self, req): + """ + Returns detailed information for all available config_sets + + :param req: The WSGI/Webob Request object + :retval The response body is a mapping of the following form:: + + {'config_sets': [ + {'id': , + 'name': , + 'description': , + 'created_at': , + 'updated_at': , + 'deleted_at': |,}, ... + ]} + """ + self._enforce(req, 'get_config_sets') + params = self._get_query_params(req) + try: + config_sets = registry.get_config_sets_detail(req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return dict(config_sets=config_sets) + + @utils.mutating + def update_config_set(self, req, id, config_set_meta): + """ + Updates an existing config_set with the registry. + + :param request: The WSGI/Webob Request object + :param id: The opaque image identifier + + :retval Returns the updated image information as a mapping + """ + self._enforce(req, 'modify_image') + orig_config_set_meta = self.get_config_set_meta_or_404(req, id) + + # Do not allow any updates on a deleted image. + # Fix for LP Bug #1060930 + if orig_config_set_meta['deleted']: + msg = _("Forbidden to update deleted config_set.") + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + try: + config_set_meta = registry.update_config_set_metadata(req.context, + id, + config_set_meta) + + except exception.Invalid as e: + msg = (_("Failed to update config_set metadata. Got error: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + except exception.NotFound as e: + msg = (_("Failed to find config_set to update: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to update config_set: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except (exception.Conflict, exception.Duplicate) as e: + LOG.warn(utils.exception_to_str(e)) + raise HTTPConflict(body=_('config_set operation conflicts'), + request=req, + content_type='text/plain') + else: + self.notifier.info('config_set.update', config_set_meta) + + return {'config_set_meta': config_set_meta} + + def _raise_404_if_role_exist(self,req,config_set_meta): + role_id_list=[] + try: + roles = registry.get_roles_detail(req.context) + for role in roles: + for role_name in eval(config_set_meta['role']): + if role['cluster_id'] == config_set_meta['cluster'] and role['name'] == role_name: + role_id_list.append(role['id']) + break + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return role_id_list + + @utils.mutating + def cluster_config_set_update(self, req, config_set_meta): + if config_set_meta.has_key('cluster'): + orig_cluster = str(config_set_meta['cluster']) + self._raise_404_if_cluster_deleted(req, orig_cluster) + try: + if config_set_meta.get('role',None): + role_id_list=self._raise_404_if_role_exist(req,config_set_meta) + if len(role_id_list) == len(eval(config_set_meta['role'])): + for role_id in role_id_list: + backend=manager.configBackend('clushshell', req, role_id) + backend.push_config() + else: + msg = "the role is not exist" + LOG.error(msg) + raise HTTPNotFound(msg) + else: + roles = registry.get_roles_detail(req.context) + for role in roles: + if role['cluster_id'] == config_set_meta['cluster']: + backend=manager.configBackend('clushshell', req, role['id']) + backend.push_config() + + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + + config_status={"status":"config successful"} + return {'config_set':config_status} + else: + msg = "the cluster is not exist" + LOG.error(msg) + raise HTTPNotFound(msg) + + @utils.mutating + def cluster_config_set_progress(self, req, config_set_meta): + role_list = [] + if config_set_meta.has_key('cluster'): + orig_cluster = str(config_set_meta['cluster']) + self._raise_404_if_cluster_deleted(req, orig_cluster) + try: + if config_set_meta.get('role',None): + role_id_list=self._raise_404_if_role_exist(req,config_set_meta) + if len(role_id_list) == len(eval(config_set_meta['role'])): + for role_id in role_id_list: + role_info = {} + role_meta=registry.get_role_metadata(req.context, role_id) + role_info['role-name']=role_meta['name'] + role_info['config_set_update_progress']=role_meta['config_set_update_progress'] + role_list.append(role_info) + else: + msg = "the role is not exist" + LOG.error(msg) + raise HTTPNotFound(msg) + else: + roles = registry.get_roles_detail(req.context) + for role in roles: + if role['cluster_id'] == config_set_meta['cluster']: + role_info = {} + role_info['role-name']=role['name'] + role_info['config_set_update_progress']=role['config_set_update_progress'] + role_list.append(role_info) + + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return role_list + + else: + msg = "the cluster is not exist" + LOG.error(msg) + raise HTTPNotFound(msg) + +class Config_setDeserializer(wsgi.JSONRequestDeserializer): + """Handles deserialization of specific controller method requests.""" + + def _deserialize(self, request): + result = {} + result["config_set_meta"] = utils.get_config_set_meta(request) + return result + + def add_config_set(self, request): + return self._deserialize(request) + + def update_config_set(self, request): + return self._deserialize(request) + + def cluster_config_set_update(self, request): + return self._deserialize(request) + + def cluster_config_set_progress(self, request): + return self._deserialize(request) + +class Config_setSerializer(wsgi.JSONResponseSerializer): + """Handles serialization of specific controller method responses.""" + + def __init__(self): + self.notifier = notifier.Notifier() + + def add_config_set(self, response, result): + config_set_meta = result['config_set_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(config_set=config_set_meta)) + return response + + def delete_config_set(self, response, result): + config_set_meta = result['config_set_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(config_set=config_set_meta)) + return response + + def get_config_set(self, response, result): + config_set_meta = result['config_set_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(config_set=config_set_meta)) + return response + + def cluster_config_set_update(self, response, result): + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(result) + return response + + def cluster_config_set_progress(self, response, result): + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(config_set=result)) + return response + +def create_resource(): + """config_sets resource factory method""" + deserializer = Config_setDeserializer() + serializer = Config_setSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) + diff --git a/code/daisy/daisy/api/v1/configs.py b/code/daisy/daisy/api/v1/configs.py new file mode 100755 index 00000000..15bfd303 --- /dev/null +++ b/code/daisy/daisy/api/v1/configs.py @@ -0,0 +1,301 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/configs endpoint for Daisy v1 API +""" + +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPConflict +from webob.exc import HTTPForbidden +from webob.exc import HTTPNotFound +from webob import Response + +from daisy.api import policy +import daisy.api.v1 +from daisy.api.v1 import controller +from daisy.api.v1 import filters +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import utils +from daisy.common import wsgi +from daisy import i18n +from daisy import notifier +import daisy.registry.client.v1.api as registry + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS +SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE + +CONF = cfg.CONF +CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') +CONF.import_opt('container_formats', 'daisy.common.config', + group='image_format') +CONF.import_opt('image_property_quota', 'daisy.common.config') + +class Controller(controller.BaseController): + """ + WSGI controller for configs resource in Daisy v1 API + + The configs resource API is a RESTful web service for config data. The API + is as follows:: + + GET /configs -- Returns a set of brief metadata about configs + GET /configs/detail -- Returns a set of detailed metadata about + configs + HEAD /configs/ -- Return metadata about an config with id + GET /configs/ -- Return config data for config with id + POST /configs -- Store config data and return metadata about the + newly-stored config + PUT /configs/ -- Update config metadata and/or upload config + data for a previously-reserved config + DELETE /configs/ -- Delete the config with id + """ + + def __init__(self): + self.notifier = notifier.Notifier() + registry.configure_registry_client() + self.policy = policy.Enforcer() + if property_utils.is_property_protection_enabled(): + self.prop_enforcer = property_utils.PropertyRules(self.policy) + else: + self.prop_enforcer = None + + def _enforce(self, req, action, target=None): + """Authorize an action against our policies""" + if target is None: + target = {} + try: + self.policy.enforce(req.context, action, target) + except exception.Forbidden: + raise HTTPForbidden() + + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + query_filters = {} + for param in req.params: + if param in SUPPORTED_FILTERS: + query_filters[param] = req.params.get(param) + if not filters.validate(param, query_filters[param]): + raise HTTPBadRequest(_('Bad value passed to filter ' + '%(filter)s got %(val)s') + % {'filter': param, + 'val': query_filters[param]}) + return query_filters + + def _get_query_params(self, req): + """ + Extracts necessary query params from request. + + :param req: the WSGI Request object + :retval dict of parameters that can be used by registry client + """ + params = {'filters': self._get_filters(req)} + + for PARAM in SUPPORTED_PARAMS: + if PARAM in req.params: + params[PARAM] = req.params.get(PARAM) + return params + def _raise_404_if_config_set_delete(self, req, config_set_id): + config_set = self.get_config_set_meta_or_404(req, config_set_id) + if config_set['deleted']: + msg = _("config_set with identifier %s has been deleted.") % config_set_id + raise HTTPNotFound(msg) + + def _raise_404_if_config_file_delete(self, req, config_file_id): + config_file = self.get_config_file_meta_or_404(req, config_file_id) + if config_file['deleted']: + msg = _("config_file with identifier %s has been deleted.") % config_file_id + raise HTTPNotFound(msg) + def _raise_404_if_role_exist(self,req,config_meta): + role_id="" + try: + roles = registry.get_roles_detail(req.context) + for role in roles: + if role['cluster_id'] == config_meta['cluster'] and role['name'] == config_meta['role']: + role_id=role['id'] + break + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return role_id + def _raise_404_if_cluster_deleted(self, req, cluster_id): + cluster = self.get_cluster_meta_or_404(req, cluster_id) + if cluster['deleted']: + msg = _("cluster with identifier %s has been deleted.") % cluster_id + raise HTTPNotFound(msg) + + @utils.mutating + def add_config(self, req, config_meta): + """ + Adds a new config to Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about config + + :raises HTTPBadRequest if x-config-name is missing + """ + self._enforce(req, 'add_config') + + if config_meta.has_key('cluster'): + orig_cluster = str(config_meta['cluster']) + self._raise_404_if_cluster_deleted(req, orig_cluster) + + if config_meta.has_key('role'): + role_id=self._raise_404_if_role_exist(req,config_meta) + if not role_id: + msg = "the role name is not exist" + LOG.error(msg) + raise HTTPNotFound(msg) + + config_meta = registry.config_interface_metadata(req.context, config_meta) + return config_meta + + @utils.mutating + def delete_config(self, req, config_meta): + """ + Deletes a config from Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about config + + :raises HTTPBadRequest if x-config-name is missing + """ + self._enforce(req, 'delete_config') + + try: + for id in eval(config_meta['config']): + registry.delete_config_metadata(req.context, id) + except exception.NotFound as e: + msg = (_("Failed to find config to delete: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to delete config: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except exception.InUseByStore as e: + msg = (_("config %(id)s could not be deleted because it is in use: " + "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) + LOG.warn(msg) + raise HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + else: + #self.notifier.info('config.delete', config) + return Response(body='', status=200) + + @utils.mutating + def get_config(self, req, id): + """ + Returns metadata about an config in the HTTP headers of the + response object + + :param req: The WSGI/Webob Request object + :param id: The opaque config identifier + + :raises HTTPNotFound if config metadata is not available to user + """ + self._enforce(req, 'get_config') + config_meta = self.get_config_meta_or_404(req, id) + return {'config_meta': config_meta} + + def detail(self, req): + """ + Returns detailed information for all available configs + + :param req: The WSGI/Webob Request object + :retval The response body is a mapping of the following form:: + + {'configs': [ + {'id': , + 'name': , + 'description': , + 'created_at': , + 'updated_at': , + 'deleted_at': |,}, ... + ]} + """ + self._enforce(req, 'get_configs') + params = self._get_query_params(req) + try: + configs = registry.get_configs_detail(req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return dict(configs=configs) + +class ConfigDeserializer(wsgi.JSONRequestDeserializer): + """Handles deserialization of specific controller method requests.""" + + def _deserialize(self, request): + result = {} + result["config_meta"] = utils.get_config_meta(request) + return result + + def add_config(self, request): + return self._deserialize(request) + + def delete_config(self, request): + return self._deserialize(request) + +class ConfigSerializer(wsgi.JSONResponseSerializer): + """Handles serialization of specific controller method responses.""" + + def __init__(self): + self.notifier = notifier.Notifier() + + def add_config(self, response, result): + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(result) + return response + + def delete_config(self, response, result): + config_meta = result['config_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(config=config_meta)) + return response + + def get_config(self, response, result): + config_meta = result['config_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(config=config_meta)) + return response + +def create_resource(): + """configs resource factory method""" + deserializer = ConfigDeserializer() + serializer = ConfigSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) + diff --git a/code/daisy/daisy/api/v1/controller.py b/code/daisy/daisy/api/v1/controller.py new file mode 100755 index 00000000..cc47a4d8 --- /dev/null +++ b/code/daisy/daisy/api/v1/controller.py @@ -0,0 +1,369 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import glance_store as store +from oslo_log import log as logging +import webob.exc + +from daisy.common import exception +from daisy import i18n +import daisy.registry.client.v1.api as registry + + +LOG = logging.getLogger(__name__) +_ = i18n._ + + +class BaseController(object): + def get_image_meta_or_404(self, request, image_id): + """ + Grabs the image metadata for an image with a supplied + identifier or raises an HTTPNotFound (404) response + + :param request: The WSGI/Webob Request object + :param image_id: The opaque image identifier + + :raises HTTPNotFound if image does not exist + """ + context = request.context + try: + return registry.get_image_metadata(context, image_id) + except exception.NotFound: + msg = "Image with identifier %s not found" % image_id + LOG.debug(msg) + raise webob.exc.HTTPNotFound( + msg, request=request, content_type='text/plain') + except exception.Forbidden: + msg = "Forbidden image access" + LOG.debug(msg) + raise webob.exc.HTTPForbidden(msg, + request=request, + content_type='text/plain') + + def get_host_meta_or_404(self, request, host_id): + """ + Grabs the host metadata for an host with a supplied + identifier or raises an HTTPNotFound (404) response + + :param request: The WSGI/Webob Request object + :param host_id: The opaque host identifier + + :raises HTTPNotFound if host does not exist + """ + context = request.context + try: + return registry.get_host_metadata(context, host_id) + except exception.NotFound: + msg = "Host with identifier %s not found" % host_id + LOG.debug(msg) + raise webob.exc.HTTPNotFound( + msg, request=request, content_type='text/plain') + except exception.Forbidden: + msg = "Forbidden host access" + LOG.debug(msg) + raise webob.exc.HTTPForbidden(msg, + request=request, + content_type='text/plain') + + def get_cluster_meta_or_404(self, request, cluster_id): + """ + Grabs the cluster metadata for an cluster with a supplied + identifier or raises an HTTPNotFound (404) response + + :param request: The WSGI/Webob Request object + :param cluster_id: The opaque cluster identifier + + :raises HTTPNotFound if cluster does not exist + """ + context = request.context + try: + return registry.get_cluster_metadata(context, cluster_id) + except exception.NotFound: + msg = "Cluster with identifier %s not found" % cluster_id + LOG.debug(msg) + raise webob.exc.HTTPNotFound( + msg, request=request, content_type='text/plain') + except exception.Forbidden: + msg = "Forbidden host access" + LOG.debug(msg) + raise webob.exc.HTTPForbidden(msg, + request=request, + content_type='text/plain') + def get_component_meta_or_404(self, request, component_id): + """ + Grabs the component metadata for an component with a supplied + identifier or raises an HTTPNotFound (404) response + + :param request: The WSGI/Webob Request object + :param component_id: The opaque component identifier + + :raises HTTPNotFound if component does not exist + """ + context = request.context + try: + return registry.get_component_metadata(context, component_id) + except exception.NotFound: + msg = "Component with identifier %s not found" % component_id + LOG.debug(msg) + raise webob.exc.HTTPNotFound( + msg, request=request, content_type='text/plain') + except exception.Forbidden: + msg = "Forbidden host access" + LOG.debug(msg) + raise webob.exc.HTTPForbidden(msg, + request=request, + content_type='text/plain') + + def get_service_meta_or_404(self, request, service_id): + """ + Grabs the service metadata for an service with a supplied + identifier or raises an HTTPNotFound (404) response + + :param request: The WSGI/Webob Request object + :param service_id: The opaque service identifier + + :raises HTTPNotFound if service does not exist + """ + context = request.context + try: + return registry.get_service_metadata(context, service_id) + except exception.NotFound: + msg = "Service with identifier %s not found" % service_id + LOG.debug(msg) + raise webob.exc.HTTPNotFound( + msg, request=request, content_type='text/plain') + except exception.Forbidden: + msg = "Forbidden host access" + LOG.debug(msg) + raise webob.exc.HTTPForbidden(msg, + request=request, + content_type='text/plain') + + def get_role_meta_or_404(self, request, role_id): + """ + Grabs the role metadata for an role with a supplied + identifier or raises an HTTPNotFound (404) response + + :param request: The WSGI/Webob Request object + :param role_id: The opaque role identifier + + :raises HTTPNotFound if role does not exist + """ + context = request.context + try: + return registry.get_role_metadata(context, role_id) + except exception.NotFound: + msg = "Role with identifier %s not found" % role_id + LOG.debug(msg) + raise webob.exc.HTTPNotFound( + msg, request=request, content_type='text/plain') + except exception.Forbidden: + msg = "Forbidden host access" + LOG.debug(msg) + raise webob.exc.HTTPForbidden(msg, + request=request, + content_type='text/plain') + + def get_network_meta_or_404(self, request, network_id): + """ + Grabs the network metadata for an network with a supplied + identifier or raises an HTTPNotFound (404) response + + :param request: The WSGI/Webob Request object + :param network_id: The opaque network identifier + + :raises HTTPNotFound if network does not exist + """ + context = request.context + try: + return registry.get_network_metadata(context, network_id) + except exception.NotFound: + msg = "Network with identifier %s not found" % network_id + LOG.debug(msg) + raise webob.exc.HTTPNotFound( + msg, request=request, content_type='text/plain') + except exception.Forbidden: + msg = "Forbidden network access" + LOG.debug(msg) + raise webob.exc.HTTPForbidden(msg, + request=request, + content_type='text/plain') + + def get_active_image_meta_or_error(self, request, image_id): + """ + Same as get_image_meta_or_404 except that it will raise a 403 if the + image is deactivated or 404 if the image is otherwise not 'active'. + """ + image = self.get_image_meta_or_404(request, image_id) + if image['status'] == 'deactivated': + msg = "Image %s is deactivated" % image_id + LOG.debug(msg) + msg = _("Image %s is deactivated") % image_id + raise webob.exc.HTTPForbidden( + msg, request=request, content_type='type/plain') + if image['status'] != 'active': + msg = "Image %s is not active" % image_id + LOG.debug(msg) + msg = _("Image %s is not active") % image_id + raise webob.exc.HTTPNotFound( + msg, request=request, content_type='text/plain') + return image + + def update_store_acls(self, req, image_id, location_uri, public=False): + if location_uri: + try: + read_tenants = [] + write_tenants = [] + members = registry.get_image_members(req.context, image_id) + if members: + for member in members: + if member['can_share']: + write_tenants.append(member['member_id']) + else: + read_tenants.append(member['member_id']) + store.set_acls(location_uri, public=public, + read_tenants=read_tenants, + write_tenants=write_tenants, + context=req.context) + except store.UnknownScheme: + msg = _("Store for image_id not found: %s") % image_id + raise webob.exc.HTTPBadRequest(explanation=msg, + request=req, + content_type='text/plain') + + def get_config_file_meta_or_404(self, request, config_file_id): + """ + Grabs the config_file metadata for an config_file with a supplied + identifier or raises an HTTPNotFound (404) response + + :param request: The WSGI/Webob Request object + :param host_id: The opaque config_file identifier + + :raises HTTPNotFound if config_file does not exist + """ + context = request.context + try: + return registry.get_config_file_metadata(context, config_file_id) + except exception.NotFound: + msg = "config_file with identifier %s not found" % config_file_id + LOG.debug(msg) + raise webob.exc.HTTPNotFound( + msg, request=request, content_type='text/plain') + except exception.Forbidden: + msg = "Forbidden config_filke access" + LOG.debug(msg) + raise webob.exc.HTTPForbidden(msg, + request=request, + content_type='text/plain') + + def get_config_set_meta_or_404(self, request, config_set_id): + """ + Grabs the config_set metadata for an config_set with a supplied + identifier or raises an HTTPNotFound (404) response + + :param request: The WSGI/Webob Request object + :param host_id: The opaque config_set identifier + + :raises HTTPNotFound if config_set does not exist + """ + context = request.context + try: + return registry.get_config_set_metadata(context, config_set_id) + except exception.NotFound: + msg = "config_set with identifier %s not found" % config_set_id + LOG.debug(msg) + raise webob.exc.HTTPNotFound( + msg, request=request, content_type='text/plain') + except exception.Forbidden: + msg = "Forbidden config_set access" + LOG.debug(msg) + raise webob.exc.HTTPForbidden(msg, + request=request, + content_type='text/plain') + + def get_config_meta_or_404(self, request, config_id): + """ + Grabs the config metadata for an config with a supplied + identifier or raises an HTTPNotFound (404) response + + :param request: The WSGI/Webob Request object + :param host_id: The opaque config identifier + + :raises HTTPNotFound if config does not exist + """ + context = request.context + try: + return registry.get_config_metadata(context, config_id) + except exception.NotFound: + msg = "config with identifier %s not found" % config_id + LOG.debug(msg) + raise webob.exc.HTTPNotFound( + msg, request=request, content_type='text/plain') + except exception.Forbidden: + msg = "Forbidden config access" + LOG.debug(msg) + raise webob.exc.HTTPForbidden(msg, + request=request, + content_type='text/plain') + + def get_service_disk_meta_or_404(self, request, id): + """ + Grabs the config metadata for an config with a supplied + identifier or raises an HTTPNotFound (404) response + + :param request: The WSGI/Webob Request object + :param host_id: The opaque config identifier + + :raises HTTPNotFound if config does not exist + """ + context = request.context + try: + return registry.get_service_disk_detail_metadata(context, id) + except exception.NotFound: + msg = "config with identifier %s not found" % id + LOG.debug(msg) + raise webob.exc.HTTPNotFound( + msg, request=request, content_type='text/plain') + except exception.Forbidden: + msg = "Forbidden config access" + LOG.debug(msg) + raise webob.exc.HTTPForbidden(msg, + request=request, + content_type='text/plain') + + def get_cinder_volume_meta_or_404(self, request, id): + """ + Grabs the config metadata for an config with a supplied + identifier or raises an HTTPNotFound (404) response + + :param request: The WSGI/Webob Request object + :param host_id: The opaque config identifier + + :raises HTTPNotFound if config does not exist + """ + context = request.context + try: + return registry.get_cinder_volume_detail_metadata(context, id) + except exception.NotFound: + msg = "config with identifier %s not found" % id + LOG.debug(msg) + raise webob.exc.HTTPNotFound( + msg, request=request, content_type='text/plain') + except exception.Forbidden: + msg = "Forbidden config access" + LOG.debug(msg) + raise webob.exc.HTTPForbidden(msg, + request=request, + content_type='text/plain') \ No newline at end of file diff --git a/code/daisy/daisy/api/v1/disk_array.py b/code/daisy/daisy/api/v1/disk_array.py new file mode 100755 index 00000000..4efce262 --- /dev/null +++ b/code/daisy/daisy/api/v1/disk_array.py @@ -0,0 +1,668 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/hosts endpoint for Daisy v1 API +""" +import time +import traceback +import ast +import webob.exc + +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPForbidden +from webob.exc import HTTPServerError +from webob import Response + +from threading import Thread + +from daisy import i18n +from daisy import notifier + +from daisy.api import policy +import daisy.api.v1 + +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import utils +from daisy.common import wsgi +import daisy.registry.client.v1.api as registry +from daisy.api.v1 import controller +from daisy.api.v1 import filters + +try: + import simplejson as json +except ImportError: + import json + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS +SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE +SERVICE_DISK_SERVICE = ('db', 'glance', 'dbbackup', 'mongodb', 'nova') +DISK_LOCATION = ('local', 'share') +CINDER_VOLUME_BACKEND_PARAMS = ('management_ips', 'data_ips','pools', + 'volume_driver', 'volume_type', + 'role_id', 'user_name','user_pwd') +CINDER_VOLUME_BACKEND_DRIVER = ['KS3200_IPSAN', 'KS3200_FCSAN', + 'FUJISTU_ETERNUS'] + +class Controller(controller.BaseController): + """ + WSGI controller for hosts resource in Daisy v1 API + + The hosts resource API is a RESTful web service for host data. The API + is as follows:: + + GET /hosts -- Returns a set of brief metadata about hosts + GET /hosts/detail -- Returns a set of detailed metadata about + hosts + HEAD /hosts/ -- Return metadata about an host with id + GET /hosts/ -- Return host data for host with id + POST /hosts -- Store host data and return metadata about the + newly-stored host + PUT /hosts/ -- Update host metadata and/or upload host + data for a previously-reserved host + DELETE /hosts/ -- Delete the host with id + """ + def __init__(self): + self.notifier = notifier.Notifier() + registry.configure_registry_client() + self.policy = policy.Enforcer() + if property_utils.is_property_protection_enabled(): + self.prop_enforcer = property_utils.PropertyRules(self.policy) + else: + self.prop_enforcer = None + + def _enforce(self, req, action, target=None): + """Authorize an action against our policies""" + if target is None: + target = {} + try: + self.policy.enforce(req.context, action, target) + except exception.Forbidden: + raise HTTPForbidden() + + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + query_filters = {} + for param in req.params: + if param in SUPPORTED_FILTERS: + query_filters[param] = req.params.get(param) + if not filters.validate(param, query_filters[param]): + raise HTTPBadRequest(_('Bad value passed to filter ' + '%(filter)s got %(val)s') + % {'filter': param, + 'val': query_filters[param]}) + return query_filters + + def _get_query_params(self, req): + """ + Extracts necessary query params from request. + + :param req: the WSGI Request object + :retval dict of parameters that can be used by registry client + """ + params = {'filters': self._get_filters(req)} + + for PARAM in SUPPORTED_PARAMS: + if PARAM in req.params: + params[PARAM] = req.params.get(PARAM) + return params + + def _raise_404_if_role_deleted(self, req, role_id): + role = self.get_role_meta_or_404(req, role_id) + if role is None or role['deleted']: + msg = _("role with identifier %s has been deleted.") % role_id + raise HTTPNotFound(msg) + if role['type'] == 'template': + msg = "role type of %s is 'template'" % role_id + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + def _raise_404_if_service_disk_deleted(self, req, service_disk_id): + service_disk = self.get_service_disk_meta_or_404(req, service_disk_id) + if service_disk is None or service_disk['deleted']: + msg = _("service_disk with identifier %s has been deleted.") % service_disk_id + raise HTTPNotFound(msg) + + def _default_value_set(self, disk_meta): + if (not disk_meta.has_key('disk_location') or + not disk_meta['disk_location'] or + disk_meta['disk_location'] == ''): + disk_meta['disk_location'] = 'local' + if not disk_meta.has_key('lun'): + disk_meta['lun'] = 0 + if not disk_meta.has_key('size'): + disk_meta['size'] = -1 + + def _unique_service_in_role(self, req, disk_meta): + params = {'filters': {'role_id': disk_meta['role_id']}} + service_disks = registry.list_service_disk_metadata(req.context, **params) + for service_disk in service_disks: + if service_disk['service'] == disk_meta['service']: + msg = "disk service %s has existed in role %s" %(disk_meta['service'], disk_meta['role_id']) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + def _service_disk_add_meta_valid(self, req, disk_meta): + if not disk_meta.has_key('role_id'): + msg = "'role_id' must be given" + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + else: + self._raise_404_if_role_deleted(req,disk_meta['role_id']) + + if not disk_meta.has_key('service'): + msg = "'service' must be given" + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + else: + if disk_meta['service'] not in SERVICE_DISK_SERVICE: + msg = "service '%s' is not supported" % disk_meta['service'] + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + if disk_meta['disk_location'] not in DISK_LOCATION: + msg = "disk_location %s is not supported" % disk_meta['disk_location'] + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + if disk_meta['disk_location'] == 'share' and not disk_meta.has_key('data_ips'): + msg = "'data_ips' must be given when disk_location is share" + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + if disk_meta['lun'] < 0: + msg = "'lun' should not be less than 0" + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + disk_meta['size'] = ast.literal_eval(str(disk_meta['size'])) + if not isinstance(disk_meta['size'], int): + msg = "'size' is not integer" + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + if disk_meta['size'] < -1: + msg = "'size' is invalid" + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + self._unique_service_in_role(req, disk_meta) + + def _service_disk_update_meta_valid(self, req, id, disk_meta): + orig_disk_meta = self.get_service_disk_meta_or_404(req, id) + if disk_meta.has_key('role_id'): + self._raise_404_if_role_deleted(req,disk_meta['role_id']) + + if disk_meta.has_key('service'): + if disk_meta['service'] not in SERVICE_DISK_SERVICE: + msg = "service '%s' is not supported" % disk_meta['service'] + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + if disk_meta.has_key('disk_location'): + if disk_meta['disk_location'] not in DISK_LOCATION: + msg = "disk_location '%s' is not supported" % disk_meta['disk_location'] + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + if (disk_meta['disk_location'] == 'share' and + not disk_meta.has_key('data_ips') and + not orig_disk_meta['data_ips']): + msg = "'data_ips' must be given when disk_location is share" + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + if disk_meta.has_key('size'): + disk_meta['size'] = ast.literal_eval(str(disk_meta['size'])) + if not isinstance(disk_meta['size'], int): + msg = "'size' is not integer" + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + if disk_meta['size'] < -1: + msg = "'size' is invalid" + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + @utils.mutating + def service_disk_add(self, req, disk_meta): + """ + Export daisy db data to tecs.conf and HA.conf. + + :param req: The WSGI/Webob Request object + + :raises HTTPBadRequest if x-install-cluster is missing + """ + + self._enforce(req, 'service_disk_add') + self._default_value_set(disk_meta) + self._service_disk_add_meta_valid(req, disk_meta) + service_disk_meta = registry.add_service_disk_metadata(req.context, disk_meta) + return {'disk_meta': service_disk_meta} + + + @utils.mutating + def service_disk_delete(self, req, id): + """ + Deletes a service_disk from Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about service_disk + + :raises HTTPBadRequest if x-service-disk-name is missing + """ + self._enforce(req, 'delete_service_disk') + try: + registry.delete_service_disk_metadata(req.context, id) + except exception.NotFound as e: + msg = (_("Failed to find service_disk to delete: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to delete service_disk: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except exception.InUseByStore as e: + msg = (_("service_disk %(id)s could not be deleted because it is in use: " + "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) + LOG.warn(msg) + raise HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + else: + return Response(body='', status=200) + + @utils.mutating + def service_disk_update(self, req, id, disk_meta): + self._enforce(req, 'service_disk_update') + self._service_disk_update_meta_valid(req, id, disk_meta) + try: + service_disk_meta = registry.update_service_disk_metadata(req.context, + id, + disk_meta) + + except exception.Invalid as e: + msg = (_("Failed to update role metadata. Got error: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + except exception.NotFound as e: + msg = (_("Failed to find role to update: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to update role: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except (exception.Conflict, exception.Duplicate) as e: + LOG.warn(utils.exception_to_str(e)) + raise HTTPConflict(body=_('Host operation conflicts'), + request=req, + content_type='text/plain') + else: + self.notifier.info('role.update', service_disk_meta) + + return {'disk_meta': service_disk_meta} + + + @utils.mutating + def service_disk_detail(self, req, id): + """ + Returns metadata about an role in the HTTP headers of the + response object + + :param req: The WSGI/Webob Request object + :param id: The opaque role identifier + + :raises HTTPNotFound if role metadata is not available to user + """ + + self._enforce(req, 'service_disk_detail') + service_disk_meta = self.get_service_disk_meta_or_404(req, id) + return {'disk_meta': service_disk_meta} + + def service_disk_list(self, req): + self._enforce(req, 'service_disk_list') + params = self._get_query_params(req) + filters=params.get('filters',None) + if 'role_id' in filters: + role_id=filters['role_id'] + self._raise_404_if_role_deleted(req, role_id) + try: + service_disks = registry.list_service_disk_metadata(req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return dict(disk_meta=service_disks) + + def _cinder_volume_list(self, req, params): + try: + cinder_volumes = registry.list_cinder_volume_metadata(req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return cinder_volumes + + def _is_cinder_volume_repeat(self, req, array_disk_info, update_id = None): + cinder_volume_id = None + params = {'filters': {}} + + if update_id: + cinder_volume_metal = self.get_cinder_volume_meta_or_404(req, update_id) + new_management_ips = array_disk_info.get('management_ips', cinder_volume_metal['management_ips']).split(",") + new_pools = array_disk_info.get('pools', cinder_volume_metal['pools']).split(",") + else: + new_management_ips = array_disk_info['management_ips'].split(",") + new_pools = array_disk_info['pools'].split(",") + + org_cinder_volumes = self._cinder_volume_list(req, params) + for cinder_volume in org_cinder_volumes: + if (set(cinder_volume['management_ips'].split(",")) == set(new_management_ips) and + set(cinder_volume['pools'].split(",")) == set(new_pools)): + if cinder_volume['id'] != update_id: + msg = 'cinder_volume array disks conflict with cinder_volume %s' % cinder_volume['id'] + raise HTTPBadRequest(explanation=msg, request=req) + + def _get_cinder_volume_backend_index(self, req, disk_array): + params = {'filters': {}} + cinder_volumes = self._cinder_volume_list(req, params) + index = 1 + while True: + backend_index = "%s-%s" %(disk_array['volume_driver'], index) + flag = True + for cinder_volume in cinder_volumes: + if backend_index == cinder_volume['backend_index']: + index=index+1 + flag = False + break + if flag: + break + return backend_index + + @utils.mutating + def cinder_volume_add(self, req, disk_meta): + """ + Export daisy db data to tecs.conf and HA.conf. + + :param req: The WSGI/Webob Request object + + :raises HTTPBadRequest if x-install-cluster is missing + """ + self._enforce(req, 'cinder_volume_add') + if not disk_meta.has_key('role_id'): + msg = "'role_id' must be given" + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + else: + self._raise_404_if_role_deleted(req,disk_meta['role_id']) + + disk_arrays = eval(disk_meta['disk_array']) + for disk_array in disk_arrays: + for key in disk_array.keys(): + if (key not in CINDER_VOLUME_BACKEND_PARAMS and + key != 'data_ips'): + msg = "'%s' must be given for cinder volume config" % key + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + if disk_array['volume_driver'] not in CINDER_VOLUME_BACKEND_DRIVER: + msg = "volume_driver %s is not supported" % disk_array['volume_driver'] + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + if (disk_array['volume_driver'] == 'FUJISTU_ETERNUS' and + (not disk_array.has_key('data_ips') or + not disk_array['data_ips'])): + msg = "data_ips must be given when using FUJISTU Disk Array" + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + self._is_cinder_volume_repeat(req, disk_array) + disk_array['role_id'] = disk_meta['role_id'] + disk_array['backend_index'] = self._get_cinder_volume_backend_index(req, disk_array) + cinder_volumes = registry.add_cinder_volume_metadata(req.context, disk_array) + return {'disk_meta': cinder_volumes} + + @utils.mutating + def cinder_volume_delete(self, req, id): + """ + Deletes a service_disk from Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about service_disk + + :raises HTTPBadRequest if x-service-disk-name is missing + """ + self._enforce(req, 'delete_cinder_volume') + try: + registry.delete_cinder_volume_metadata(req.context, id) + except exception.NotFound as e: + msg = (_("Failed to find cinder volume to delete: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to delete cinder volume: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except exception.InUseByStore as e: + msg = (_("cindre volume %(id)s could not be deleted because it is in use: " + "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) + LOG.warn(msg) + raise HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + else: + return Response(body='', status=200) + + def _is_data_ips_valid(self, req, update_id, update_meta): + orgin_cinder_volume = self.get_cinder_volume_meta_or_404(req, update_id) + + new_driver = update_meta.get('volume_driver', + orgin_cinder_volume['volume_driver']) + if new_driver != 'FUJISTU_ETERNUS': + return + + new_data_ips = update_meta.get('data_ips', + orgin_cinder_volume['data_ips']) + if not new_data_ips: + msg = "data_ips must be given when using FUJISTU Disk Array" + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + @utils.mutating + def cinder_volume_update(self, req, id, disk_meta): + for key in disk_meta.keys(): + if key not in CINDER_VOLUME_BACKEND_PARAMS: + msg = "'%s' must be given for cinder volume config" % key + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + if disk_meta.has_key('role_id'): + self._raise_404_if_role_deleted(req,disk_meta['role_id']) + if (disk_meta.has_key('volume_driver') and + disk_meta['volume_driver'] not in CINDER_VOLUME_BACKEND_DRIVER): + msg = "volume_driver %s is not supported" % disk_meta['volume_driver'] + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + self._is_cinder_volume_repeat(req, disk_meta, id) + self._is_data_ips_valid(req, id, disk_meta) + + try: + cinder_volume_meta = registry.update_cinder_volume_metadata(req.context, + id, + disk_meta) + + except exception.Invalid as e: + msg = (_("Failed to update cinder_volume metadata. Got error: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + except exception.NotFound as e: + msg = (_("Failed to find cinder_volume to update: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to update cinder_volume: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except (exception.Conflict, exception.Duplicate) as e: + LOG.warn(utils.exception_to_str(e)) + raise HTTPConflict(body=_('cinder_volume operation conflicts'), + request=req, + content_type='text/plain') + else: + self.notifier.info('cinder_volume.update', cinder_volume_meta) + + return {'disk_meta': cinder_volume_meta} + + @utils.mutating + def cinder_volume_detail(self, req, id): + """ + Returns metadata about an role in the HTTP headers of the + response object + + :param req: The WSGI/Webob Request object + :param id: The opaque role identifier + + :raises HTTPNotFound if role metadata is not available to user + """ + self._enforce(req, 'cinder_volume_detail') + cinder_volume_meta = self.get_cinder_volume_meta_or_404(req, id) + return {'disk_meta': cinder_volume_meta} + + def cinder_volume_list(self, req): + self._enforce(req, 'cinder_volume_list') + params = self._get_query_params(req) + filters=params.get('filters',None) + if 'role_id' in filters: + role_id=filters['role_id'] + self._raise_404_if_role_deleted(req, role_id) + cinder_volumes = self._cinder_volume_list(req, params) + return dict(disk_meta=cinder_volumes) + + +class DiskArrayDeserializer(wsgi.JSONRequestDeserializer): + """Handles deserialization of specific controller method requests.""" + + def _deserialize(self, request): + result = {} + result["disk_meta"] = utils.get_dict_meta(request) + return result + + def service_disk_add(self, request): + return self._deserialize(request) + + def service_disk_update(self, request): + return self._deserialize(request) + + def cinder_volume_add(self, request): + return self._deserialize(request) + + def cinder_volume_update(self, request): + return self._deserialize(request) + +class DiskArraySerializer(wsgi.JSONResponseSerializer): + """Handles serialization of specific controller method responses.""" + + def __init__(self): + self.notifier = notifier.Notifier() + + def service_disk_add(self, response, result): + disk_meta = result['disk_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(result) + return response + + def service_disk_update(self, response, result): + disk_meta = result['disk_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(result) + return response + + def cinder_volume_add(self, response, result): + disk_meta = result['disk_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(result) + return response + + def cinder_volume_update(self, response, result): + disk_meta = result['disk_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(result) + return response + +def create_resource(): + """Image members resource factory method""" + deserializer = DiskArrayDeserializer() + serializer = DiskArraySerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/api/v1/filters.py b/code/daisy/daisy/api/v1/filters.py new file mode 100755 index 00000000..a71b13cc --- /dev/null +++ b/code/daisy/daisy/api/v1/filters.py @@ -0,0 +1,40 @@ +# Copyright 2012, Piston Cloud Computing, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def validate(filter, value): + return FILTER_FUNCTIONS.get(filter, lambda v: True)(value) + + +def validate_int_in_range(min=0, max=None): + def _validator(v): + try: + if max is None: + return min <= int(v) + return min <= int(v) <= max + except ValueError: + return False + return _validator + + +def validate_boolean(v): + return v.lower() in ('none', 'true', 'false', '1', '0') + + +FILTER_FUNCTIONS = {'size_max': validate_int_in_range(), # build validator + 'size_min': validate_int_in_range(), # build validator + 'min_ram': validate_int_in_range(), # build validator + 'protected': validate_boolean, + 'is_public': validate_boolean, } diff --git a/code/daisy/daisy/api/v1/host_template.py b/code/daisy/daisy/api/v1/host_template.py new file mode 100755 index 00000000..b5a15f95 --- /dev/null +++ b/code/daisy/daisy/api/v1/host_template.py @@ -0,0 +1,569 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/host_Templates endpoint for Daisy v1 API +""" + +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPConflict +from webob.exc import HTTPForbidden +from webob.exc import HTTPNotFound +from webob import Response +import copy +import json + +from daisy.api import policy +import daisy.api.v1 +from daisy.api.v1 import controller +from daisy.api.v1 import filters +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import utils +from daisy.common import wsgi +from daisy import i18n +from daisy import notifier +import daisy.registry.client.v1.api as registry +from daisy.registry.api.v1 import template + +import daisy.api.backends.tecs.common as tecs_cmn +import daisy.api.backends.common as daisy_cmn +try: + import simplejson as json +except ImportError: + import json + +daisy_tecs_path = tecs_cmn.daisy_tecs_path + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +SUPPORTED_PARAMS = template.SUPPORTED_PARAMS +SUPPORTED_FILTERS = template.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE +CONF = cfg.CONF +CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') +CONF.import_opt('container_formats', 'daisy.common.config', + group='image_format') +CONF.import_opt('image_property_quota', 'daisy.common.config') + +class Controller(controller.BaseController): + """ + WSGI controller for Templates resource in Daisy v1 API + + The HostTemplates resource API is a RESTful web Template for Template data. The API + is as follows:: + + GET /HostTemplates -- Returns a set of brief metadata about Templates + GET /HostTemplates/detail -- Returns a set of detailed metadata about + HostTemplates + HEAD /HostTemplates/ -- Return metadata about an Template with id + GET /HostTemplates/ -- Return Template data for Template with id + POST /HostTemplates -- Store Template data and return metadata about the + newly-stored Template + PUT /HostTemplates/ -- Update Template metadata and/or upload Template + data for a previously-reserved Template + DELETE /HostTemplates/ -- Delete the Template with id + """ + + def __init__(self): + self.notifier = notifier.Notifier() + registry.configure_registry_client() + self.policy = policy.Enforcer() + if property_utils.is_property_protection_enabled(): + self.prop_enforcer = property_utils.PropertyRules(self.policy) + else: + self.prop_enforcer = None + + def _enforce(self, req, action, target=None): + """Authorize an action against our policies""" + if target is None: + target = {} + try: + self.policy.enforce(req.context, action, target) + except exception.Forbidden: + raise HTTPForbidden() + + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + query_filters = {} + for param in req.params: + if param in SUPPORTED_FILTERS: + query_filters[param] = req.params.get(param) + if not filters.validate(param, query_filters[param]): + raise HTTPBadRequest(_('Bad value passed to filter ' + '%(filter)s got %(val)s') + % {'filter': param, + 'val': query_filters[param]}) + return query_filters + + def _get_query_params(self, req): + """ + Extracts necessary query params from request. + + :param req: the WSGI Request object + :retval dict of parameters that can be used by registry client + """ + params = {'filters': self._get_filters(req)} + + for PARAM in SUPPORTED_PARAMS: + if PARAM in req.params: + params[PARAM] = req.params.get(PARAM) + return params + + def _raise_404_if_cluster_deleted(self, req, cluster_id): + cluster = self.get_cluster_meta_or_404(req, cluster_id) + if cluster['deleted']: + msg = _("Cluster with identifier %s has been deleted.") % cluster_id + raise webob.exc.HTTPNotFound(msg) + + @utils.mutating + def add_template(self, req, host_template): + """ + Adds a new cluster template to Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about Template + + :raises HTTPBadRequest if x-Template-name is missing + """ + self._enforce(req, 'add_host_template') + template_name = host_template["name"] + + host_template = registry.add_host_template_metadata(req.context, host_template) + + return {'host_template': template} + + @utils.mutating + def update_host_template(self, req, template_id, host_template): + """ + Updates an existing Template with the registry. + + :param request: The WSGI/Webob Request object + :param id: The opaque image identifier + + :retval Returns the updated image information as a mapping + """ + self._enforce(req, 'update_host_template') + #orig_Template_meta = self.get_Template_meta_or_404(req, id) + ''' + if orig_Template_meta['deleted']: + msg = _("Forbidden to update deleted Template.") + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + ''' + try: + host_template = registry.update_host_template_metadata(req.context, + template_id, + host_template) + + except exception.Invalid as e: + msg = (_("Failed to update template metadata. Got error: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + except exception.NotFound as e: + msg = (_("Failed to find host_template to update: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to update host_template: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except (exception.Conflict, exception.Duplicate) as e: + LOG.warn(utils.exception_to_str(e)) + raise HTTPConflict(body=_('host_template operation conflicts'), + request=req, + content_type='text/plain') + else: + self.notifier.info('host_template.update', host_template) + + return {'host_template': host_template} + + def _filter_params(self, host_meta): + for key in host_meta.keys(): + if key=="id" or key=="updated_at" or key=="deleted_at" or key=="created_at" or key=="deleted": + del host_meta[key] + if host_meta.has_key("memory"): + del host_meta['memory'] + + if host_meta.has_key("system"): + del host_meta['system'] + + if host_meta.has_key("disks"): + del host_meta['disks'] + + if host_meta.has_key("os_status"): + del host_meta['os_status'] + + if host_meta.has_key("status"): + del host_meta['status'] + + if host_meta.has_key("messages"): + del host_meta['messages'] + + if host_meta.has_key("cpu"): + del host_meta['cpu'] + + if host_meta.has_key("ipmi_addr"): + del host_meta['ipmi_addr'] + + if host_meta.has_key("interfaces"): + for interface in host_meta['interfaces']: + for key in interface.keys(): + if key=="id" or key=="updated_at" or key=="deleted_at" \ + or key=="created_at" or key=="deleted" or key=="current_speed" \ + or key=="max_speed" or key=="host_id" or key=="state": + del interface[key] + for assigned_network in interface['assigned_networks']: + if assigned_network.has_key("ip"): + assigned_network['ip'] = "" + return host_meta + + @utils.mutating + def get_host_template_detail(self, req, template_id): + """ + delete a existing cluster template with the registry. + + :param request: The WSGI/Webob Request object + :param id: The opaque image identifier + + :retval Returns the updated image information as a mapping + """ + self._enforce(req, 'get_host_template_detail') + try: + host_template = registry.host_template_detail_metadata(req.context, template_id) + return {'host_template': host_template} + except exception.NotFound as e: + msg = (_("Failed to find host template: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to get host template: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except exception.InUseByStore as e: + msg = (_("host template %(id)s could not be get because it is in use: " + "%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)}) + LOG.error(msg) + raise HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + else: + #self.notifier.info('host.delete', host) + return Response(body='', status=200) + + @utils.mutating + def get_host_template_lists(self, req): + self._enforce(req, 'get_template_lists') + params = self._get_query_params(req) + template_meta = {} + try: + host_template_lists = registry.host_template_lists_metadata(req.context, **params) + if host_template_lists and host_template_lists[0]: + template_meta = json.loads(host_template_lists[0]['hosts']) + return {'host_template': template_meta} + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return dict(host_template=host_template_lists) + + @utils.mutating + def host_to_template(self, req, host_template): + """ + host to Template. + + :param req: The WSGI/Webob Request object + + :raises HTTPBadRequest if x-Template-cluster is missing + """ + self._enforce(req, 'host_to_template') + if host_template.get('host_id', None): + origin_host_meta = self.get_host_meta_or_404(req, host_template['host_id']) + host_meta = self._filter_params(origin_host_meta) + if host_template.get('host_template_name', None) and host_template.get('cluster_name', None): + host_meta['name'] = host_template['host_template_name'] + host_meta['description'] = host_template.get('description', None) + params = {'filters':{'cluster_name':host_template['cluster_name']}} + templates = registry.host_template_lists_metadata(req.context, **params) + if templates and templates[0]: + had_host_template = False + if templates[0]['hosts']: + templates[0]['hosts'] = json.loads(templates[0]['hosts']) + else: + templates[0]['hosts'] = [] + for index in range(len(templates[0]['hosts'])): + if host_template['host_template_name'] == templates[0]['hosts'][index]['name']: + had_host_template = True + templates[0]['hosts'][index] = host_meta + break + if not had_host_template: + host_meta['name'] = host_template['host_template_name'] + templates[0]['hosts'].append(host_meta) + templates[0]['hosts'] = json.dumps(templates[0]['hosts']) + host_template = registry.update_host_template_metadata(req.context, + templates[0]['id'], + templates[0]) + else: + param = {"cluster_name": host_template['cluster_name'], "hosts":json.dumps([host_meta])} + host_template = registry.add_host_template_metadata(req.context, param) + return {'host_template': host_template} + + @utils.mutating + def template_to_host(self, req, host_template): + if not host_template.get('cluster_name', None): + msg = "cluster name is null" + raise HTTPNotFound(explanation=msg) + params = {'filters':{'cluster_name':host_template['cluster_name']}} + templates = registry.host_template_lists_metadata(req.context, **params) + hosts_param = [] + host_template_used = {} + if templates and templates[0]: + hosts_param = json.loads(templates[0]['hosts']) + for host in hosts_param: + if host['name'] == host_template['host_template_name']: + host_template_used = host + break + if not host_template_used: + msg = "not host_template %s" % host_template['host_template_name'] + raise HTTPNotFound(explanation=msg, request=req, content_type="text/plain") + if host_template.get('host_id', None): + self.get_host_meta_or_404(req, host_template['host_id']) + else: + msg="host_id is not null" + raise HTTPBadRequest(explanation = msg) + host_id = host_template['host_id'] + params = {'filters':{'name': host_template['cluster_name']}} + clusters = registry.get_clusters_detail(req.context, **params) + if clusters and clusters[0]: + host_template_used['cluster'] = clusters[0]['id'] + if host_template_used.has_key('role') and host_template_used['role']: + role_id_list = [] + host_role_list = [] + if host_template_used.has_key('cluster'): + params = self._get_query_params(req) + role_list = registry.get_roles_detail(req.context, **params) + for role_name in role_list: + if role_name['cluster_id'] == host_template_used['cluster']: + host_role_list = list(host_template_used['role']) + if role_name['name'] in host_role_list: + role_id_list.append(role_name['id']) + host_template_used['role'] = role_id_list + if host_template_used.has_key('name'): + host_template_used.pop('name') + if host_template_used.has_key('dmi_uuid'): + host_template_used.pop('dmi_uuid') + if host_template_used.has_key('ipmi_user'): + host_template_used.pop('ipmi_user') + if host_template_used.has_key('ipmi_passwd'): + host_template_used.pop('ipmi_passwd') + if host_template_used.has_key('ipmi_addr'): + host_template_used.pop('ipmi_addr') + host_template_interfaces = host_template_used.get('interfaces', None) + if host_template_interfaces: + template_ether_interface = [interface for interface in host_template_interfaces if interface['type'] == "ether" ] + orig_host_meta = registry.get_host_metadata(req.context, host_id) + orig_host_interfaces = orig_host_meta.get('interfaces', None) + temp_orig_host_interfaces = [ interface for interface in orig_host_interfaces if interface['type'] == "ether" ] + if len(temp_orig_host_interfaces) != len(template_ether_interface): + msg = (_('host_id %s does not match the host_id host_template ' + '%s.') % (host_id, host_template['host_template_name'])) + raise HTTPBadRequest(explanation = msg) + interface_match_flag = 0 + for host_template_interface in host_template_interfaces: + if host_template_interface['type'] == 'ether': + for orig_host_interface in orig_host_interfaces: + if orig_host_interface['pci'] == host_template_interface['pci']: + interface_match_flag += 1 + host_template_interface['mac'] = orig_host_interface['mac'] + if host_template_interface.has_key('ip'): + host_template_interface.pop('ip') + if interface_match_flag != len(template_ether_interface): + msg = (_('host_id %s does not match the host ' + 'host_template %s.') % (host_id, host_template['host_template_name'])) + raise HTTPBadRequest(explanation=msg) + host_template_used['interfaces'] = str(host_template_interfaces) + host_template = registry.update_host_metadata(req.context, host_id, host_template_used) + return {"host_template": host_template} + + @utils.mutating + def delete_host_template(self, req, host_template): + """ + delete a existing host template with the registry. + + :param request: The WSGI/Webob Request object + :param id: The opaque image identifier + + :retval Returns the updated image information as a mapping + """ + self._enforce(req, 'delete_host_template') + try: + if not host_template.get('cluster_name', None): + msg = "cluster name is null" + raise HTTPNotFound(explanation=msg) + params = {'filters':{'cluster_name':host_template['cluster_name']}} + host_templates = registry.host_template_lists_metadata(req.context, **params) + template_param = [] + had_host_template = False + if host_templates and host_templates[0]: + template_param = json.loads(host_templates[0]['hosts']) + for host in template_param: + if host['name'] == host_template['host_template_name']: + template_param.remove(host) + had_host_template = True + break + if not had_host_template: + msg = "not host template name %s" %host_template['host_template_name'] + raise HTTPNotFound(explanation=msg) + else: + host_templates[0]['hosts'] = json.dumps(template_param) + host_template = registry.update_host_template_metadata(req.context, + host_templates[0]['id'], + host_templates[0]) + return {"host_template": host_template} + else: + msg = "host template cluster name %s is null" %host_template['cluster_name'] + raise HTTPNotFound(explanation=msg) + + except exception.NotFound as e: + msg = (_("Failed to find host template to delete: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to delete template: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except exception.InUseByStore as e: + msg = (_("template %(id)s could not be deleted because it is in use: " + "%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)}) + LOG.error(msg) + raise HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + else: + return Response(body='', status=200) + +class HostTemplateDeserializer(wsgi.JSONRequestDeserializer): + """Handles deserialization of specific controller method requests.""" + + def _deserialize(self, request): + result = {} + result["host_template"] = utils.get_template_meta(request) + return result + + def add_host_template(self, request): + return self._deserialize(request) + + def update_host_template(self, request): + return self._deserialize(request) + + + def host_to_template(self, request): + return self._deserialize(request) + + def template_to_host(self, request): + return self._deserialize(request) + + def delete_host_template(self, request): + return self._deserialize(request) + +class HostTemplateSerializer(wsgi.JSONResponseSerializer): + """Handles serialization of specific controller method responses.""" + + def __init__(self): + self.notifier = notifier.Notifier() + + def add_host_template(self, response, result): + host_template = result['host_template'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(host_template=host_template)) + return response + + def delete_host_template(self, response, result): + host_template = result['host_template'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(host_template=host_template)) + return response + def get_host_template_detail(self, response, result): + host_template = result['host_template'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(host_template=host_template)) + return response + def update_host_template(self, response, result): + host_template = result['host_template'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(host_template=host_template)) + return response + + def host_to_template(self, response, result): + host_template = result['host_template'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(host_template=host_template)) + return response + + def template_to_host(self, response, result): + host_template = result['host_template'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(host_template=host_template)) + return response + + def get_host_template_lists(self, response, result): + host_template = result['host_template'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(host_template=host_template)) + + +def create_resource(): + """Templates resource factory method""" + deserializer = HostTemplateDeserializer() + serializer = HostTemplateSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/api/v1/hosts.py b/code/daisy/daisy/api/v1/hosts.py new file mode 100755 index 00000000..219f24c1 --- /dev/null +++ b/code/daisy/daisy/api/v1/hosts.py @@ -0,0 +1,1728 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/hosts endpoint for Daisy v1 API +""" +import subprocess +import re +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPConflict +from webob.exc import HTTPForbidden +from webob.exc import HTTPNotFound +from webob import Response +from collections import Counter +from webob.exc import HTTPServerError +from daisy.api import policy +import daisy.api.v1 +from daisy.api.v1 import controller +from daisy.api.v1 import filters +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import utils +from daisy.common import wsgi +from daisy import i18n +from daisy import notifier +import daisy.registry.client.v1.api as registry +import threading +import daisy.api.backends.common as daisy_cmn +import ConfigParser + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS +SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE + +CONF = cfg.CONF +CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') +CONF.import_opt('container_formats', 'daisy.common.config', + group='image_format') +CONF.import_opt('image_property_quota', 'daisy.common.config') +config = ConfigParser.ConfigParser() +config.read("/home/daisy_install/daisy.conf") +ML2_TYPE = ['ovs', 'dvs', 'ovs,sriov(macvtap)', 'ovs,sriov(direct)', 'sriov(macvtap)', 'sriov(direct)'] + +class Controller(controller.BaseController): + """ + WSGI controller for hosts resource in Daisy v1 API + + The hosts resource API is a RESTful web service for host data. The API + is as follows:: + + GET /nodes -- Returns a set of brief metadata about hosts + GET /nodes -- Returns a set of detailed metadata about + hosts + HEAD /nodes/ -- Return metadata about an host with id + GET /nodes/ -- Return host data for host with id + POST /nodes -- Store host data and return metadata about the + newly-stored host + PUT /nodes/ -- Update host metadata and/or upload host + data for a previously-reserved host + DELETE /nodes/ -- Delete the host with id + """ + support_resource_type = ['baremetal', 'server', 'docker'] + + def __init__(self): + self.notifier = notifier.Notifier() + registry.configure_registry_client() + self.policy = policy.Enforcer() + if property_utils.is_property_protection_enabled(): + self.prop_enforcer = property_utils.PropertyRules(self.policy) + else: + self.prop_enforcer = None + + def _enforce(self, req, action, target=None): + """Authorize an action against our policies""" + if target is None: + target = {} + try: + self.policy.enforce(req.context, action, target) + except exception.Forbidden: + raise HTTPForbidden() + + def _raise_404_if_network_deleted(self, req, network_id): + network = self.get_network_meta_or_404(req, network_id) + if network is None or network['deleted']: + msg = _("Network with identifier %s has been deleted.") % network_id + raise HTTPNotFound(msg) + + def _raise_404_if_cluster_deleted(self, req, cluster_id): + cluster = self.get_cluster_meta_or_404(req, cluster_id) + if cluster is None or cluster['deleted']: + msg = _("Cluster with identifier %s has been deleted.") % cluster_id + raise HTTPNotFound(msg) + + def _raise_404_if_role_deleted(self, req, role_id): + role = self.get_role_meta_or_404(req, role_id) + if role is None or role['deleted']: + msg = _("Cluster with identifier %s has been deleted.") % role_id + raise HTTPNotFound(msg) + + + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + query_filters = {} + for param in req.params: + if param in SUPPORTED_FILTERS: + query_filters[param] = req.params.get(param) + if not filters.validate(param, query_filters[param]): + raise HTTPBadRequest(_('Bad value passed to filter ' + '%(filter)s got %(val)s') + % {'filter': param, + 'val': query_filters[param]}) + return query_filters + + def _get_query_params(self, req): + """ + Extracts necessary query params from request. + + :param req: the WSGI Request object + :retval dict of parameters that can be used by registry client + """ + params = {'filters': self._get_filters(req)} + + for PARAM in SUPPORTED_PARAMS: + if PARAM in req.params: + params[PARAM] = req.params.get(PARAM) + return params + + def check_bond_slaves_validity(self, bond_slaves_lists, ether_nic_names_list): + ''' + members in bond slaves must be in ether_nic_names_list + len(set(bond_slaves)) == 2, and can not be overlap between slaves members + bond_slaves_lists: [[name1,name2], [name1,name2], ...] + ether_nic_names_list: [name1, name2, ...] + ''' + for bond_slaves in bond_slaves_lists: + LOG.warn('bond_slaves: %s' % bond_slaves) + if len(set(bond_slaves)) != 2: + LOG.error('set(bond_slaves: %s' % set(bond_slaves)) + msg = (_("Bond slaves(%s) must be different nic and existed in ether nics in pairs." % bond_slaves)) + LOG.error(msg) + raise HTTPForbidden(msg) + if not set(bond_slaves).issubset(set(ether_nic_names_list)): + msg = (_("Pay attention: illegal ether nic existed in bond slaves(%s)." % bond_slaves)) + LOG.error(msg) + raise HTTPForbidden(msg) + def validate_ip_format(self, ip_str): + ''' + valid ip_str format = '10.43.178.9' + invalid ip_str format : '123. 233.42.12', spaces existed in field + '3234.23.453.353', out of range + '-2.23.24.234', negative number in field + '1.2.3.4d', letter in field + '10.43.1789', invalid format + ''' + valid_fromat = False + if ip_str.count('.') == 3 and \ + all(num.isdigit() and 0<=int(num)<256 for num in ip_str.rstrip().split('.')): + valid_fromat = True + if valid_fromat == False: + msg = (_("%s invalid ip format!") % ip_str) + LOG.error(msg) + raise HTTPForbidden(msg) + + def _ip_into_int(self, ip): + """ + Switch ip string to decimalism integer.. + :param ip: ip string + :return: decimalism integer + """ + return reduce(lambda x, y: (x<<8)+y, map(int, ip.split('.'))) + + def _is_in_network_range(self, ip, network): + """ + Check ip is in range + :param ip: Ip will be checked, like:192.168.1.2. + :param network: Ip range,like:192.168.0.0/24. + :return: If ip in range,return True,else return False. + """ + network = network.split('/') + mask = ~(2**(32 - int(network[1])) - 1) + return (self._ip_into_int(ip) & mask) == (self._ip_into_int(network[0]) & mask) + + def get_cluster_networks_info(self, req, cluster_id): + ''' + get_cluster_networks_info by cluster id + ''' + all_networks = registry.get_all_networks(req.context) + cluster_networks = [network for network in all_networks if network['cluster_id'] == cluster_id] + return cluster_networks + + def _check_assigned_networks(self, req, cluster_id, assigned_networks): + LOG.info("assigned_networks %s " % assigned_networks) + cluster_networks = self.get_cluster_networks_info(req, cluster_id) + list_of_assigned_networks = [] + for assigned_network in assigned_networks: + LOG.info("assigned_network %s " % assigned_network) + if not assigned_network.has_key('name') or not assigned_network['name']: + msg = "assigned networks '%s' are invalid" % (assigned_networks) + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + network_info = [network for network in cluster_networks if network['name'] == assigned_network['name']] + if network_info and network_info[0]: + network_cidr = network_info[0]['cidr'] + LOG.info("network_info %s " % network_info) + if network_info[0]['network_type'] != 'PRIVATE': + if network_cidr: + if assigned_network.has_key('ip') and assigned_network['ip']: + self.validate_ip_format(assigned_network['ip']) + ip_in_cidr = self._is_in_network_range(assigned_network['ip'], network_cidr) + if not ip_in_cidr: + msg = (_("The ip '%s' for network '%s' is not in cidr range." % + (assigned_network['ip'], assigned_network['name']))) + raise HTTPBadRequest(explanation=msg) + else: + msg = "error, cidr of network '%s' is empty" % (assigned_network['name']) + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + else: + msg = "can't find network named '%s' in cluster '%s'" % (assigned_network['name'], cluster_id) + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + list_of_assigned_networks.append(network_info[0]) + return list_of_assigned_networks + + def _compare_assigned_networks_of_interface(self, interface1, interface2): + for network in interface1: + for network_compare in interface2: + if network['cidr'] == network_compare['cidr']: + return network['name'], network_compare['name'] + return False, False + + def _compare_assigned_networks_between_interfaces( + self, interface_num, assigned_networks_of_interfaces): + for interface_id in range(interface_num): + for interface_id_compare in range(interface_id+1, interface_num): + network1_name, network2_name = self.\ + _compare_assigned_networks_of_interface\ + (assigned_networks_of_interfaces[interface_id], + assigned_networks_of_interfaces[interface_id_compare]) + if network1_name and network2_name: + msg = (_('Network %s and network %s with same ' + 'cidr can not be assigned to different ' + 'interfaces.')) % (network1_name, network2_name) + raise HTTPBadRequest(explanation=msg) + + def _check_add_host_interfaces(self, req, host_meta): + host_meta_interfaces = [] + if host_meta.has_key('interfaces'): + host_meta_interfaces = list(eval(host_meta['interfaces'])) + else: + return + + cluster_id = host_meta.get('cluster', None) + + exist_id = self._verify_interface_among_hosts(req, host_meta) + if exist_id: + host_meta['id'] = exist_id + self.update_host(req, exist_id, host_meta) + LOG.info("<<>>" % exist_id) + return {'host_meta': host_meta} + + if self._host_with_bad_pxe_info_in_params(host_meta): + if cluster_id and host_meta.get('os_status', None) != 'active': + msg = _("There is no nic for deployment, please choose " + "one interface to set it's 'is_deployment' True") + raise HTTPServerError(explanation=msg) + + ether_nic_names_list = list() + bond_nic_names_list = list() + bond_slaves_lists = list() + have_assigned_network = False + have_ip_netmask = False + assigned_networks_of_intefaces = [] + interface_num = 0 + for interface in host_meta_interfaces: + assigned_networks_of_one_interface = [] + if interface.get('type', None) != 'bond' and not interface.get('mac', None): + msg = _('The ether interface need a non-null mac ') + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + if interface.get('type', None) != 'bond' and not interface.get('pci', None): + msg = "The Interface need a non-null pci" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + if interface.get('name', None): + if interface.has_key('type') and interface['type'] == 'bond': + bond_nic_names_list.append(interface['name']) + if interface.get('slaves', None): + bond_slaves_lists.append(interface['slaves']) + else: + msg = (_("Slaves parameter can not be None when nic type was bond.")) + LOG.error(msg) + raise HTTPForbidden(msg) + else: # type == ether or interface without type field + ether_nic_names_list.append(interface['name']) + else: + msg = (_("Nic name can not be None.")) + LOG.error(msg) + raise HTTPForbidden(msg) + + if interface.has_key('is_deployment'): + if interface['is_deployment'] == "True" or interface['is_deployment'] == True: + interface['is_deployment'] = 1 + else: + interface['is_deployment'] = 0 + + if (interface.has_key('assigned_networks') and + interface['assigned_networks'] != [''] and + interface['assigned_networks']): + have_assigned_network = True + if cluster_id: + assigned_networks_of_one_interface = self.\ + _check_assigned_networks(req, + cluster_id, + interface['assigned_networks']) + else: + msg = "cluster must be given first when network plane is allocated" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + if (interface.has_key('ip') and interface['ip'] and + interface.has_key('netmask') and interface['netmask']): + have_ip_netmask = True + + if interface.has_key('mac') and interface.has_key('ip'): + host_infos = registry.get_host_interface(req.context, host_meta) + for host_info in host_infos: + if host_info.has_key('host_id'): + host_meta["id"] = host_info['host_id'] + + if interface.has_key('vswitch_type') and interface['vswitch_type'] != '' and interface['vswitch_type'] not in ML2_TYPE: + msg = "vswitch_type %s is not supported" % interface['vswitch_type'] + raise HTTPBadRequest(explanation=msg, request=req, + content_type="text/plain") + interface_num += 1 + assigned_networks_of_intefaces.\ + append(assigned_networks_of_one_interface) + + for interface_id in range(interface_num): + for interface_id_compare in range(interface_id+1, interface_num): + network1_name, network2_name = self.\ + _compare_assigned_networks_of_interface\ + (assigned_networks_of_intefaces[interface_id], + assigned_networks_of_intefaces[interface_id_compare]) + if network1_name and network2_name: + msg = (_('Network %s and network %s with same ' + 'cidr can not be assigned to different ' + 'interfaces.')) % (network1_name, network2_name) + raise HTTPBadRequest(explanation=msg) + + # when assigned_network is empty, ip must be config + if not have_assigned_network: + if not have_ip_netmask: + msg = "ip and netmask must be given when network plane is not allocated" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + # check bond slaves validity + self.check_bond_slaves_validity(bond_slaves_lists, ether_nic_names_list) + nic_name_list = ether_nic_names_list + bond_nic_names_list + if len(set(nic_name_list)) != len(nic_name_list): + msg = (_("Nic name must be unique.")) + LOG.error(msg) + raise HTTPForbidden(msg) + + @utils.mutating + def add_host(self, req, host_meta): + """ + Adds a new host to Daisy + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about host + + :raises HTTPBadRequest if x-host-name is missing + """ + self._enforce(req, 'add_host') + # if host is update in '_verify_interface_among_hosts', no need add host continue. + cluster_id = host_meta.get('cluster', None) + if cluster_id: + self.get_cluster_meta_or_404(req, cluster_id) + if host_meta.has_key('role') and host_meta['role']: + role_id_list = [] + host_roles=[] + if host_meta.has_key('cluster'): + params = self._get_query_params(req) + role_list = registry.get_roles_detail(req.context, **params) + for role_name in role_list: + if role_name['cluster_id'] == host_meta['cluster']: + host_roles = list(eval(host_meta['role'])) + for host_role in host_roles: + if role_name['name'] == host_role: + role_id_list.append(role_name['id']) + continue + if len(role_id_list) != len(host_roles): + msg = "The role of params %s is not exist, please use the right name" % host_roles + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + host_meta['role'] = role_id_list + else: + msg = "cluster params is none" + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + + self._check_add_host_interfaces(req, host_meta) + + if host_meta.has_key('resource_type'): + if host_meta['resource_type'] not in self.support_resource_type: + msg = "resource type is not supported, please use it in %s" % self.support_resource_type + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + else: + host_meta['resource_type'] = 'baremetal' + + if host_meta.has_key('os_status'): + if host_meta['os_status'] not in ['init', 'installing', 'active', 'failed', 'none']: + msg = "os_status is not valid." + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + if host_meta.has_key('ipmi_addr') and host_meta['ipmi_addr']: + if not host_meta.has_key('ipmi_user'): + host_meta['ipmi_user'] = 'zteroot' + if not host_meta.has_key('ipmi_passwd'): + host_meta['ipmi_passwd'] = 'superuser' + + host_meta = registry.add_host_metadata(req.context, host_meta) + + return {'host_meta': host_meta} + + @utils.mutating + def delete_host(self, req, id): + """ + Deletes a host from Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about host + + :raises HTTPBadRequest if x-host-name is missing + """ + self._enforce(req, 'delete_host') + try: + registry.delete_host_metadata(req.context, id) + except exception.NotFound as e: + msg = (_("Failed to find host to delete: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to delete host: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except exception.InUseByStore as e: + msg = (_("Host %(id)s could not be deleted because it is in use: " + "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) + LOG.error(msg) + raise HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + else: + #self.notifier.info('host.delete', host) + params= {} + discover_hosts = registry.get_discover_hosts_detail(req.context, **params) + for host in discover_hosts: + if host.get('host_id') == id: + LOG.info("delete discover host: %s" % id) + registry.delete_discover_host_metadata(req.context, host['id']) + return Response(body='', status=200) + + @utils.mutating + def get_host(self, req, id): + """ + Returns metadata about an host in the HTTP headers of the + response object + + :param req: The WSGI/Webob Request object + :param id: The opaque host identifier + + :raises HTTPNotFound if host metadata is not available to user + """ + self._enforce(req, 'get_host') + host_meta = self.get_host_meta_or_404(req, id) + return {'host_meta': host_meta} + + def detail(self, req): + """ + Returns detailed information for all available nodes + + :param req: The WSGI/Webob Request object + :retval The response body is a mapping of the following form:: + + {'nodes': [ + {'id': , + 'name': , + 'description': , + 'created_at': , + 'updated_at': , + 'deleted_at': |,}, ... + ]} + """ + self._enforce(req, 'get_hosts') + params = self._get_query_params(req) + try: + nodes = registry.get_hosts_detail(req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return dict(nodes=nodes) + + def _compute_hugepage_memory(self, hugepages, memory, hugepagesize='1G'): + hugepage_memory = 0 + if hugepagesize == '2M': + hugepage_memory = 2*1024*int(hugepages) + if hugepagesize == '1G': + hugepage_memory = 1*1024*1024*int(hugepages) + if hugepage_memory > memory: + msg = "The memory hugepages used is bigger than total memory." + raise HTTPBadRequest(explanation=msg) + + def _host_with_no_pxe_info_in_db(self, host_interfaces): + input_host_pxe_info = self._count_host_pxe_info(host_interfaces) + if not input_host_pxe_info: + return True + + def _host_with_bad_pxe_info_in_params(self, host_meta): + input_host_pxe_info = self._count_host_pxe_info(host_meta['interfaces']) + # In default,we think there is only one pxe interface. + if not input_host_pxe_info: + LOG.info("<<>>" + % host_meta.get('name', None)) + return True + # If it not only the exception will be raise. + if len(input_host_pxe_info) > 1: + msg = ("There are more than one pxe nics among the same host," + "it isn't allowed.") + raise HTTPBadRequest(explanation=msg) + + def _count_host_pxe_info(self, interfaces): + interfaces = eval(interfaces) + input_host_pxe_info = [interface + for interface in interfaces + if interface.get('is_deployment', None) == "True" or interface.get('is_deployment', None) == "true" + or interface.get('is_deployment', None) == 1] + return input_host_pxe_info + + def _update_networks_phyname(self, req, interface, cluster_id): + phyname_networks = {} + cluster_networks = registry.get_networks_detail(req.context, cluster_id) + for assigned_network in list(interface['assigned_networks']): + network_info_list = [network for network in cluster_networks + if assigned_network['name'] == network['name']] + if network_info_list and network_info_list[0]: + network_info = network_info_list[0] + phyname_networks[network_info['id']] = \ + [network_info['name'], interface['name']] + else: + msg = "can't find network named '%s' in cluster '%s'" % (assigned_network['name'], cluster_id) + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + # by cluster id and network_name search interface table + registry.update_phyname_of_network(req.context, phyname_networks) + + def _verify_interface_in_same_host(self, interfaces, id = None): + """ + Verify interface in the input host. + :param interface: host interface info + :return: + """ + # verify interface among the input host + interfaces = eval(interfaces) + same_mac_list = [interface1['name'] + for interface1 in interfaces for interface2 in interfaces + if interface1.get('name', None) and interface1.get('mac', None) and + interface2.get('name', None) and interface2.get('mac', None) and + interface1.get('type', None) and interface2.get('type', None) and + interface1['name'] != interface2['name'] and interface1['mac'] == interface2['mac'] + and interface1['type'] != "bond" and interface2['type'] != "bond"] + # Notice:If interface with same 'mac' is illegal,we need delete code #1,and raise exception in 'if' block. + # This code block is just verify for early warning. + if same_mac_list: + msg = "%s%s" % ("" if not id else "Host id:%s." % id, + "The nic name of interface [%s] with same mac,please check!" % + ",".join(same_mac_list)) + LOG.warn(msg) + + # 1----------------------------------------------------------------- + # if interface with same 'pci', raise exception + same_pci_list = [interface1['name'] + for interface1 in interfaces for interface2 in interfaces + if interface1.get('name', None) and interface1.get('pci', None) and + interface2.get('name', None) and interface2.get('pci', None) and + interface1.get('type', None) and interface2.get('type', None) and + interface1['name'] != interface2['name'] and interface1['pci'] == interface2['pci'] + and interface1['type'] != "bond" and interface2['type'] != "bond"] + + if same_pci_list: + msg = "The nic name of interface [%s] with same pci,please check!" % ",".join(same_pci_list) + raise HTTPForbidden(explanation = msg) + # 1----------------------------------------------------------------- + + def _verify_interface_among_hosts(self, req, host_meta): + """ + Verify interface among the hosts in cluster + :param req: + :param cluster_id: + :param host_meta: + :return:True,host already update False,host need add + """ + # If true, the host need update, not add and update is successful. + self._verify_interface_in_same_host(host_meta['interfaces']) + + # host pxe interface info + input_host_pxe_info = self._count_host_pxe_info(host_meta['interfaces']) + # verify interface between exist host and input host in cluster + list_params = { + 'sort_key': u'name', + 'sort_dir': u'asc'} + all_hosts = registry.get_hosts_detail(req.context, **list_params) + exist_nodes = [] + for id in [host['id'] for host in all_hosts]: + host_meta_list = registry.get_host_metadata(req.context, id) + exist_nodes.append(host_meta_list) + if input_host_pxe_info: + input_host_pxe_info = input_host_pxe_info[0] + for exist_node in exist_nodes: + id = exist_node.get('id', None) + exist_node_info = self.get_host(req, id).get('host_meta', None) + if not exist_node_info.get('interfaces', None): + continue + + for interface in exist_node_info['interfaces']: + if interface.get('mac', None) != input_host_pxe_info.get('mac', None) or \ + interface.get('type', None) == "bond": + continue + if exist_node.get('dmi_uuid', None) != host_meta.get('dmi_uuid', None): + msg = "The 'mac' of host interface is exist in db, but 'dmi_uuid' is different." \ + "We think you want update the host, but the host can't find." + raise HTTPForbidden(explanation=msg) + return id + + def _get_swap_lv_size_m(self, memory_size_m): + if memory_size_m <= 4096: + swap_lv_size_m = 4096 + elif memory_size_m <= 16384: + swap_lv_size_m = 8192 + elif memory_size_m <= 65536: + swap_lv_size_m = 32768 + else: + swap_lv_size_m = 65536 + return swap_lv_size_m + + @utils.mutating + def update_host(self, req, id, host_meta): + """ + Updates an existing host with the registry. + + :param request: The WSGI/Webob Request object + :param id: The opaque image identifier + + :retval Returns the updated image information as a mapping + """ + self._enforce(req, 'update_host') + orig_host_meta = self.get_host_meta_or_404(req, id) + # Do not allow any updates on a deleted image. + # Fix for LP Bug #1060930 + if orig_host_meta['deleted']: + msg = _("Forbidden to update deleted host.") + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + if host_meta.has_key('interfaces'): + for interface_param in eval(host_meta['interfaces']): + if not interface_param.get('pci', None) and \ + interface_param.get('type', None) != 'bond': + msg = "The Interface need a non-null pci" + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + if interface_param.has_key('vswitch_type') and interface_param['vswitch_type'] != '' and interface_param['vswitch_type'] not in ML2_TYPE: + msg = "vswitch_type %s is not supported" % interface_param['vswitch_type'] + raise HTTPBadRequest(explanation=msg, request=req, + content_type="text/plain") + if orig_host_meta.get('interfaces', None): + interfaces_db = orig_host_meta['interfaces'] + interfaces_param = eval(host_meta['interfaces']) + interfaces_db_ether = [interface_db for interface_db in + interfaces_db if interface_db.get('type', None) != 'bond'] + interfaces_param_ether = [interface_param for interface_param in + interfaces_param if interface_param.get('type', None) != 'bond'] + if len(interfaces_param) < len(interfaces_db_ether): + msg = "Forbidden to update part of interfaces" + raise HTTPForbidden(explanation=msg) + pci_count = 0 + for interface_db in interfaces_db: + if interface_db.get('type', None) != 'bond': + for interface_param in interfaces_param_ether: + if interface_param['pci'] == interface_db['pci']: + pci_count += 1 + if interface_param['mac'] != interface_db['mac']: + msg = "Forbidden to modify mac of " \ + "interface with pci %s" % interface_db['pci'] + raise HTTPForbidden(explanation=msg) + if interface_param['type'] != interface_db['type']: + msg = "Forbidden to modify type of " \ + "interface with pci %s" % interface_db['pci'] + raise HTTPForbidden(explanation=msg) + if pci_count != len(interfaces_db_ether): + msg = "Forbidden to modify pci of interface" + raise HTTPForbidden(explanation=msg) + + if host_meta.has_key('cluster'): + self.get_cluster_meta_or_404(req, host_meta['cluster']) + if host_meta.has_key('cluster'): + if orig_host_meta['status'] == 'in-cluster': + host_cluster = registry.get_host_clusters(req.context, id) + if host_meta['cluster'] != host_cluster[0]['cluster_id']: + msg = _("Forbidden to add host %s with status " + "'in-cluster' in another cluster") % id + raise HTTPForbidden(explanation=msg) + + if (host_meta.has_key('resource_type') and + host_meta['resource_type'] not in self.support_resource_type): + msg = "resource type is not supported, please use it in %s" % self.support_resource_type + raise HTTPNotFound(msg) + + if host_meta.get('os_status',None) != 'init' and orig_host_meta.get('os_status',None) == 'active': + if host_meta.get('root_disk',None) and host_meta['root_disk'] != orig_host_meta['root_disk']: + msg = _("Forbidden to update root_disk of %s when os_status is active if " + "you don't want to install os") % host_meta['name'] + raise HTTPForbidden(explanation=msg) + else: + host_meta['root_disk'] = orig_host_meta['root_disk'] + else: + if host_meta.get('root_disk',None): + root_disk = host_meta['root_disk'] + elif orig_host_meta.get('root_disk',None): + root_disk = str(orig_host_meta['root_disk']) + else: + host_meta['root_disk'] = 'sda' + root_disk = host_meta['root_disk'] + if not orig_host_meta.get('disks',None): + msg = "there is no disks in %s" %orig_host_meta['id'] + raise HTTPNotFound(msg) + if root_disk not in orig_host_meta['disks'].keys(): + msg = "There is no disk named %s" % root_disk + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + if host_meta.get('os_status',None) != 'init' and orig_host_meta.get('os_status',None) == 'active': + if host_meta.get('root_lv_size',None) and int(host_meta['root_lv_size']) != orig_host_meta['root_lv_size']: + msg = _("Forbidden to update root_lv_size of %s when os_status is active if " + "you don't want to install os") % host_meta['name'] + raise HTTPForbidden(explanation=msg) + else: + host_meta['root_lv_size'] = str(orig_host_meta['root_lv_size']) + else: + if host_meta.get('root_lv_size',None): + root_lv_size = host_meta['root_lv_size'] + elif orig_host_meta.get('root_lv_size',None): + root_lv_size = str(orig_host_meta['root_lv_size']) + else: + host_meta['root_lv_size'] = '51200' + root_lv_size = host_meta['root_lv_size'] + if not orig_host_meta.get('disks',None): + msg = "there is no disks in %s" %orig_host_meta['id'] + raise HTTPNotFound(msg) + if root_lv_size.isdigit(): + root_lv_size=int(root_lv_size) + root_disk_storage_size_b_str = str(orig_host_meta['disks']['%s' %root_disk]['size']) + root_disk_storage_size_b_int = int(root_disk_storage_size_b_str.strip().split()[0]) + root_disk_storage_size_m = root_disk_storage_size_b_int//(1024*1024) + boot_partition_m = 400 + redundant_partiton_m = 600 + free_root_disk_storage_size_m = root_disk_storage_size_m - boot_partition_m - redundant_partiton_m + if (root_lv_size/4)*4 > free_root_disk_storage_size_m: + msg = "root_lv_size of %s is larger than the free_root_disk_storage_size."%orig_host_meta['id'] + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + if (root_lv_size/4)*4 < 51200: + msg = "root_lv_size of %s is too small ,it must be larger than 51200M."%orig_host_meta['id'] + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + else: + msg = (_("root_lv_size of %s is wrong,please input a number and it must be positive number") %orig_host_meta['id']) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + + if host_meta.get('os_status',None) != 'init' and orig_host_meta.get('os_status',None) == 'active': + if host_meta.get('swap_lv_size',None) and int(host_meta['swap_lv_size']) != orig_host_meta['swap_lv_size']: + msg = _("Forbidden to update swap_lv_size of %s when os_status is active if " + "you don't want to install os") % host_meta['name'] + raise HTTPForbidden(explanation=msg) + else: + host_meta['swap_lv_size'] = str(orig_host_meta['swap_lv_size']) + else: + if host_meta.get('swap_lv_size',None): + swap_lv_size = host_meta['swap_lv_size'] + elif orig_host_meta.get('swap_lv_size',None): + swap_lv_size = str(orig_host_meta['swap_lv_size']) + else: + if not orig_host_meta.get('memory',None): + msg = "there is no memory in %s" %orig_host_meta['id'] + raise HTTPNotFound(msg) + memory_size_b_str = str(orig_host_meta['memory']['total']) + memory_size_b_int = int(memory_size_b_str.strip().split()[0]) + memory_size_m = memory_size_b_int//1024 + swap_lv_size_m = self._get_swap_lv_size_m(memory_size_m) + host_meta['swap_lv_size'] = str(swap_lv_size_m) + swap_lv_size = host_meta['swap_lv_size'] + if swap_lv_size.isdigit(): + swap_lv_size=int(swap_lv_size) + disk_storage_size_b = 0 + for key in orig_host_meta['disks']: + stroage_size_str = orig_host_meta['disks'][key]['size'] + stroage_size_b_int = int(stroage_size_str.strip().split()[0]) + disk_storage_size_b = disk_storage_size_b + stroage_size_b_int + disk_storage_size_m = disk_storage_size_b/(1024*1024) + boot_partition_m = 400 + redundant_partiton_m = 600 + if host_meta.get('role',None): + host_role_names = eval(host_meta['role']) + elif orig_host_meta.get('role',None): + host_role_names = orig_host_meta['role'] + else: + host_role_names = None + if host_role_names: + roles_of_host=[] + params = self._get_query_params(req) + role_lists = registry.get_roles_detail(req.context, **params) + for host_role_name in host_role_names: + for role in role_lists: + if host_role_name == role['name'] and role['type'] == 'default': + roles_of_host.append(role) + db_lv_size = 0 + nova_lv_size = 0 + glance_lv_size = 0 + for role_of_host in roles_of_host: + if role_of_host['name'] == 'CONTROLLER_HA': + if role_of_host.get('glance_lv_size',None): + glance_lv_size = role_of_host['glance_lv_size'] + if role_of_host.get('db_lv_size',None): + db_lv_size = role_of_host['db_lv_size'] + if role_of_host['name'] == 'COMPUTER': + nova_lv_size = role_of_host['nova_lv_size'] + free_disk_storage_size_m = disk_storage_size_m - boot_partition_m - redundant_partiton_m - \ + (root_lv_size/4)*4 - (glance_lv_size/4)*4- (nova_lv_size/4)*4- (db_lv_size/4)*4 + else: + free_disk_storage_size_m = disk_storage_size_m - boot_partition_m - \ + redundant_partiton_m - (root_lv_size/4)*4 + if (swap_lv_size/4)*4 > free_disk_storage_size_m: + msg = "the sum of swap_lv_size and glance_lv_size and nova_lv_size and db_lv_size of %s is larger " \ + "than the free_disk_storage_size."%orig_host_meta['id'] + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + if (swap_lv_size/4)*4 < 2000: + msg = "swap_lv_size of %s is too small ,it must be larger than 2000M."%orig_host_meta['id'] + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + else: + msg = (_("swap_lv_size of %s is wrong,please input a number and it must be positive number") %orig_host_meta['id']) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + + + if host_meta.get('os_status',None) != 'init' and orig_host_meta.get('os_status',None) == 'active': + if host_meta.get('root_pwd',None) and host_meta['root_pwd'] != orig_host_meta['root_pwd']: + msg = _("Forbidden to update root_pwd of %s when os_status is active if " + "you don't want to install os") % host_meta['name'] + raise HTTPForbidden(explanation=msg) + else: + host_meta['root_pwd'] = orig_host_meta['root_pwd'] + else: + if not host_meta.get('root_pwd',None) and not orig_host_meta.get('root_pwd',None): + host_meta['root_pwd'] = 'ossdbg1' + + if host_meta.get('os_status',None) != 'init' and orig_host_meta.get('os_status',None) == 'active': + if host_meta.get('isolcpus',None) and host_meta['isolcpus'] != orig_host_meta['isolcpus']: + msg = _("Forbidden to update isolcpus of %s when os_status is active if " + "you don't want to install os") % host_meta['name'] + raise HTTPForbidden(explanation=msg) + else: + host_meta['isolcpus'] = orig_host_meta['isolcpus'] + else: + if host_meta.get('isolcpus',None): + isolcpus = host_meta['isolcpus'] + elif orig_host_meta.get('isolcpus',None): + isolcpus = orig_host_meta['isolcpus'] + else: + host_meta['isolcpus'] = None + isolcpus = host_meta['isolcpus'] + if not orig_host_meta.get('cpu',None): + msg = "there is no cpu in %s" %orig_host_meta['id'] + raise HTTPNotFound(msg) + cpu_num = orig_host_meta['cpu']['total'] + if isolcpus: + isolcpus_lists = [value.split('-') for value in isolcpus.split(',')] + isolcpus_list = [] + for value in isolcpus_lists: + isolcpus_list = isolcpus_list + value + for value in isolcpus_list: + if int(value)<0 or int(value)>cpu_num -1: + msg = "isolcpus number must be lager than 0 and less than %d" %(cpu_num-1) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + + if host_meta.has_key('role'): + role_id_list = [] + if host_meta.has_key('cluster'): + params = self._get_query_params(req) + role_list = registry.get_roles_detail(req.context, **params) + host_roles = list() + for role_name in role_list: + if role_name['cluster_id'] == host_meta['cluster']: + host_roles = list(eval(host_meta['role'])) + for host_role in host_roles: + if role_name['name'] == host_role: + role_id_list.append(role_name['id']) + continue + if len(role_id_list) != len(host_roles) and host_meta['role'] != u"[u'']": + msg = "The role of params %s is not exist, please use the right name" % host_roles + raise HTTPNotFound(msg) + host_meta['role'] = role_id_list + else: + msg = "cluster params is none" + raise HTTPNotFound(msg) + + if host_meta.has_key('interfaces'): + if self._host_with_bad_pxe_info_in_params(host_meta): + msg = _('The parameter interfaces of %s is wrong, there is no interface for pxe.') % id + #raise HTTPBadRequest(explanation=msg) + else: + host_meta_interfaces = list(eval(host_meta['interfaces'])) + ether_nic_names_list = list() + bond_nic_names_list = list() + bond_slaves_lists = list() + interface_num = 0 + assigned_networks_of_interfaces = [] + for interface in host_meta_interfaces: + if interface.get('name', None): + if interface.has_key('type') and interface['type'] == 'bond': + bond_nic_names_list.append(interface['name']) + slave_list = [] + if interface.get('slaves', None): + bond_slaves_lists.append(interface['slaves']) + elif interface.get('slave1', None) and interface.get('slave2', None): + slave_list.append(interface['slave1']) + slave_list.append(interface['slave2']) + bond_slaves_lists.append(slave_list) + else: + msg = (_("Slaves parameter can not be None when nic type was bond.")) + LOG.error(msg) + raise HTTPForbidden(msg) + else: # type == ether or interface without type field + ether_nic_names_list.append(interface['name']) + else: + msg = (_("Nic name can not be None.")) + LOG.error(msg) + raise HTTPForbidden(msg) + if interface.has_key('is_deployment'): + if interface['is_deployment'] == "True" or interface['is_deployment'] == True: + interface['is_deployment'] = 1 + else: + interface['is_deployment'] = 0 + + if (interface.has_key('assigned_networks') and + interface['assigned_networks'] != [''] and + interface['assigned_networks']): + clusters = registry.get_clusters_detail(req.context) + orig_cluster_name = orig_host_meta.get('cluster', None) + orig_cluster_id = None + for cluster in clusters: + if cluster['name'] == orig_cluster_name: + orig_cluster_id = cluster['id'] + cluster_id = host_meta.get('cluster', orig_cluster_id) + if cluster_id: + LOG.info("interface['assigned_networks']: %s" % interface['assigned_networks']) + assigned_networks_of_one_interface = self.\ + _check_assigned_networks(req, + cluster_id, + interface['assigned_' + 'networks']) + self._update_networks_phyname(req, interface, cluster_id) + host_meta['cluster'] = cluster_id + else: + msg = "cluster must be given first when network plane is allocated" + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + assigned_networks_of_interfaces.\ + append(assigned_networks_of_one_interface) + else: + assigned_networks_of_interfaces.\ + append([]) + interface_num += 1 + self._compare_assigned_networks_between_interfaces\ + (interface_num, assigned_networks_of_interfaces) + + # check bond slaves validity + self.check_bond_slaves_validity(bond_slaves_lists, ether_nic_names_list) + nic_name_list = ether_nic_names_list + bond_nic_names_list + if len(set(nic_name_list)) != len(nic_name_list): + msg = (_("Nic name must be unique.")) + LOG.error(msg) + raise HTTPForbidden(msg) + else: + if host_meta.has_key('cluster'): + host_interfaces = orig_host_meta.get('interfaces', None) + if host_interfaces: + if host_meta.has_key('os_status'): + if host_meta['os_status'] != 'active': + if self._host_with_no_pxe_info_in_db(str(host_interfaces)): + msg = _("The host has more than one dhcp " + "server, please choose one interface " + "for deployment") + raise HTTPServerError(explanation=msg) + else: + if orig_host_meta.get('os_status', None) != 'active': + if self._host_with_no_pxe_info_in_db(str(host_interfaces)): + msg = _("There is no nic for deployment, " + "please choose one interface to set " + "it's 'is_deployment' True") + raise HTTPServerError(explanation=msg) + + if host_meta.has_key('os_status'): + if host_meta['os_status'] not in ['init', 'installing', 'active', 'failed', 'none']: + msg = "os_status is not valid." + raise HTTPNotFound(msg) + if host_meta['os_status'] == 'init': + if orig_host_meta.get('interfaces', None): + macs = [interface['mac'] for interface in orig_host_meta['interfaces'] + if interface['mac']] + for mac in macs: + delete_host_discovery_info = 'pxe_os_install_clean ' + mac + subprocess.call(delete_host_discovery_info, + shell=True, + stdout=open('/dev/null', 'w'), + stderr=subprocess.STDOUT) + if (not host_meta.has_key('role') and + orig_host_meta.has_key('status') and + orig_host_meta['status'] == 'with-role' and + orig_host_meta['os_status'] != 'init'): + host_meta['role'] = [] + if not host_meta.has_key('os_progress'): + host_meta['os_progress'] = 0 + if not host_meta.has_key('messages'): + host_meta['messages'] = '' + + if ((host_meta.has_key('ipmi_addr') and host_meta['ipmi_addr']) + or orig_host_meta['ipmi_addr']): + if not host_meta.has_key('ipmi_user') and not orig_host_meta['ipmi_user']: + host_meta['ipmi_user'] = 'zteroot' + if not host_meta.has_key('ipmi_passwd') and not orig_host_meta['ipmi_passwd']: + host_meta['ipmi_passwd'] = 'superuser' + + if host_meta.get('os_status',None) != 'init' and orig_host_meta.get('os_status',None) == 'active': + if host_meta.get('hugepages',None) and int(host_meta['hugepages']) != orig_host_meta['hugepages']: + msg = _("Forbidden to update hugepages of %s when os_status is active if " + "you don't want to install os") % host_meta['name'] + raise HTTPForbidden(explanation=msg) + else: + host_meta['hugepages'] = str(orig_host_meta['hugepages']) + else: + if host_meta.has_key('hugepages'): + if not orig_host_meta.get('memory', {}).get('total', None): + msg = "The host %s has no memory" % id + raise HTTPNotFound(explanation=msg) + memory = orig_host_meta.get('memory', {}).get('total', None) + if host_meta['hugepages'] is None: + host_meta['hugepages'] = 0 + if int(host_meta['hugepages']) < 0: + msg = "The parameter hugepages must be zero or positive integer." + raise HTTPBadRequest(explanation=msg) + if not host_meta.has_key('hugepagesize') and \ + orig_host_meta.get('hugepagesize', None): + self._compute_hugepage_memory(host_meta['hugepages'], + int(memory.strip().split(' ')[0]), + orig_host_meta['hugepagesize']) + if not host_meta.has_key('hugepagesize') and \ + not orig_host_meta.get('hugepagesize', None): + self._compute_hugepage_memory(host_meta['hugepages'], + int(memory.strip().split(' ')[0])) + + if host_meta.get('os_status',None) != 'init' and orig_host_meta.get('os_status',None) == 'active': + if host_meta.get('hugepagesize',None) and host_meta['hugepagesize'] != orig_host_meta['hugepagesize']: + msg = _("Forbidden to update hugepagesize of %s when os_status is active if " + "you don't want to install os") % host_meta['name'] + raise HTTPForbidden(explanation=msg) + else: + host_meta['hugepagesize'] = orig_host_meta['hugepagesize'] + else: + if host_meta.has_key('hugepagesize'): + if not orig_host_meta.get('memory', {}).get('total', None): + msg = "The host %s has no memory" % id + raise HTTPNotFound(explanation=msg) + memory = orig_host_meta.get('memory', {}).get('total', None) + if host_meta['hugepagesize'] is None: + host_meta['hugepagesize'] = '1G' + elif host_meta['hugepagesize'] != '2m' and \ + host_meta['hugepagesize'] != '2M' and \ + host_meta['hugepagesize'] != '1g' and \ + host_meta['hugepagesize'] != '1G': + msg = "The value 0f parameter hugepagesize is not supported." + raise HTTPBadRequest(explanation=msg) + if host_meta['hugepagesize'] == '2m': + host_meta['hugepagesize'] = '2M' + if host_meta['hugepagesize'] == '1g': + host_meta['hugepagesize'] = '1G' + if host_meta['hugepagesize'] == '2M' and \ + int(host_meta['hugepagesize'][0])*1024 > \ + int(memory.strip().split(' ')[0]): + msg = "The host %s forbid to use hugepage because it's " \ + "memory is too small" % id + raise HTTPForbidden(explanation=msg) + if host_meta['hugepagesize'] == '1G' and \ + int(host_meta['hugepagesize'][0])*1024*1024 > \ + int(memory.strip().split(' ')[0]): + msg = "The hugepagesize is too big, you can choose 2M " \ + "for a try." + raise HTTPBadRequest(explanation=msg) + if host_meta.has_key('hugepages'): + self._compute_hugepage_memory(host_meta['hugepages'], + int(memory.strip().split(' ')[0]), + host_meta['hugepagesize']) + if not host_meta.has_key('hugepages') and orig_host_meta.get('hugepages', None): + self._compute_hugepage_memory(orig_host_meta['hugepages'], + int(memory.strip().split(' ')[0]), + host_meta['hugepagesize']) + + if host_meta.get('os_status',None) != 'init' and orig_host_meta.get('os_status',None) == 'active': + if host_meta.get('os_version',None) and host_meta['os_version'] != orig_host_meta['os_version_file']: + msg = _("Forbidden to update os_version of %s when os_status is active if " + "you don't want to install os") % host_meta['name'] + raise HTTPForbidden(explanation=msg) + + try: + host_meta = registry.update_host_metadata(req.context, + id, + host_meta) + + except exception.Invalid as e: + msg = (_("Failed to update host metadata. Got error: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + except exception.NotFound as e: + msg = (_("Failed to find host to update: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to update host: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except (exception.Conflict, exception.Duplicate) as e: + LOG.error(utils.exception_to_str(e)) + raise HTTPConflict(body=_('Host operation conflicts'), + request=req, + content_type='text/plain') + else: + self.notifier.info('host.update', host_meta) + + return {'host_meta': host_meta} + + + def update_progress_to_db(self, req, update_info, discover_host_meta): + discover= {} + discover['status'] = update_info['status'] + discover['message'] = update_info['message'] + if update_info.get('host_id'): + discover['host_id'] = update_info['host_id'] + LOG.info("discover:%s", discover) + registry.update_discover_host_metadata(req.context, discover_host_meta['id'], discover) + + def thread_bin(self,req,discover_host_meta): + cmd = 'mkdir -p /var/log/daisy/discover_host/' + daisy_cmn.subprocess_call(cmd) + if not discover_host_meta['passwd']: + msg = "the passwd of ip %s is none."%discover_host_meta['ip'] + LOG.error(msg) + raise HTTPForbidden(msg) + var_log_path = "/var/log/daisy/discover_host/%s_discovery_host.log" % discover_host_meta['ip'] + with open(var_log_path, "w+") as fp: + try: + trustme_result = subprocess.check_output( + '/var/lib/daisy/tecs/trustme.sh %s %s' % (discover_host_meta['ip'],discover_host_meta['passwd']), + shell=True, stderr=subprocess.STDOUT) + if 'Permission denied' in trustme_result: #when passwd was wrong + update_info = {} + update_info['status'] = 'DISCOVERY_FAILED' + update_info['message'] = "Passwd was wrong, do trustme.sh %s failed!" % discover_host_meta['ip'] + self.update_progress_to_db(req, update_info, discover_host_meta) + msg = (_("Do trustme.sh %s failed!" % discover_host_meta['ip'])) + LOG.warn(_(msg)) + fp.write(msg) + elif 'is unreachable' in trustme_result: #when host ip was unreachable + update_info = {} + update_info['status'] = 'DISCOVERY_FAILED' + update_info['message'] = "Host ip was unreachable, do trustme.sh %s failed!" % discover_host_meta['ip'] + self.update_progress_to_db(req,update_info, discover_host_meta) + msg = (_("Do trustme.sh %s failed!" % discover_host_meta['ip'])) + LOG.warn(_(msg)) + except subprocess.CalledProcessError as e: + update_info = {} + update_info['status'] = 'DISCOVERY_FAILED' + msg = "discover host for %s failed! raise CalledProcessError when execute trustme.sh." % discover_host_meta['ip'] + update_info['message'] = msg + self.update_progress_to_db(req,update_info, discover_host_meta) + LOG.error(_(msg)) + fp.write(e.output.strip()) + return + except: + update_info = {} + update_info['status'] = 'DISCOVERY_FAILED' + update_info['message'] = "discover host for %s failed!" % discover_host_meta['ip'] + self.update_progress_to_db(req,update_info, discover_host_meta) + LOG.error(_("discover host for %s failed!" % discover_host_meta['ip'])) + fp.write("discover host for %s failed!" % discover_host_meta['ip']) + return + + try: + cmd = 'clush -S -b -w %s "rm -rf /home/daisy/discover_host"' % (discover_host_meta['ip'],) + daisy_cmn.subprocess_call(cmd,fp) + cmd = 'clush -S -w %s "mkdir -p /home/daisy/discover_host"' % (discover_host_meta['ip'],) + daisy_cmn.subprocess_call(cmd,fp) + cmd = 'clush -S -w %s "chmod 777 /home/daisy/discover_host"' % (discover_host_meta['ip'],) + daisy_cmn.subprocess_call(cmd,fp) + except subprocess.CalledProcessError as e: + update_info = {} + update_info['status'] = 'DISCOVERY_FAILED' + msg = "raise CalledProcessError when execute cmd for host %s." % discover_host_meta['ip'] + update_info['message'] = msg + self.update_progress_to_db(req,update_info, discover_host_meta) + LOG.error(_(msg)) + fp.write(e.output.strip()) + return + + try: + scp_sh_and_rpm_result = subprocess.check_output( + 'clush -S -w %s -c /var/lib/daisy/tecs/getnodeinfo.sh /var/lib/daisy/tecs/jq-1.3-2.el7.x86_64.rpm --dest=/home/daisy/discover_host' % (discover_host_meta['ip'],), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + update_info = {} + update_info['status'] = 'DISCOVERY_FAILED' + update_info['message'] = "scp getnodeinfo.sh and jq-1.3-2.el7.x86_64.rpm for %s failed!" % discover_host_meta['ip'] + self.update_progress_to_db(req, update_info, discover_host_meta) + LOG.error(_("scp getnodeinfo.sh and jq-1.3-2.el7.x86_64.rpm for %s failed!" % discover_host_meta['ip'])) + fp.write(e.output.strip()) + return + + try: + rpm_install_result = subprocess.check_output( + 'clush -S -w %s rpm -ivh --force /home/daisy/discover_host/jq-1.3-2.el7.x86_64.rpm' % (discover_host_meta['ip'],), + shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + update_info = {} + update_info['status'] = 'DISCOVERY_FAILED' + update_info['message'] = "install jq-1.3-2.el7.x86_64.rpm for %s failed!" % discover_host_meta['ip'] + self.update_progress_to_db(req, update_info, discover_host_meta) + LOG.error(_("install jq-1.3-2.el7.x86_64.rpm for %s failed!" % discover_host_meta['ip'])) + fp.write(e.output.strip()) + return + + try: + exc_result = subprocess.check_output( + 'clush -S -w %s /home/daisy/discover_host/getnodeinfo.sh' % (discover_host_meta['ip'],), + shell=True, stderr=subprocess.STDOUT) + if 'Failed connect to' in exc_result: #when openstack-ironic-discoverd.service has problem + update_info = {} + update_info['status'] = 'DISCOVERY_FAILED' + update_info['message'] = "Do getnodeinfo.sh %s failed!" % discover_host_meta['ip'] + self.update_progress_to_db(req, update_info, discover_host_meta) + msg = (_("Do trustme.sh %s failed!" % discover_host_meta['ip'])) + LOG.warn(_(msg)) + fp.write(msg) + else: + update_info = {} + update_info['status'] = 'DISCOVERY_SUCCESSFUL' + update_info['message'] = "discover host for %s successfully!" % discover_host_meta['ip'] + mac_info = re.search(r'"mac": ([^,\n]*)', exc_result) + mac = eval(mac_info.group(1)) + filters = {'mac': mac} + host_interfaces = registry.get_all_host_interfaces(req.context, filters) + if host_interfaces: + update_info['host_id'] = host_interfaces[0]['host_id'] + LOG.info("update_info['host_id']:%s", update_info['host_id']) + self.update_progress_to_db(req,update_info, discover_host_meta) + LOG.info(_("discover host for %s successfully!" % discover_host_meta['ip'])) + fp.write(exc_result) + + except subprocess.CalledProcessError as e: + update_info = {} + update_info['status'] = 'DISCOVERY_FAILED' + update_info['message'] = "discover host for %s failed!" % discover_host_meta['ip'] + self.update_progress_to_db(req,update_info, discover_host_meta) + LOG.error(_("discover host for %s failed!" % discover_host_meta['ip'])) + fp.write(e.output.strip()) + return + + + @utils.mutating + def discover_host_bin(self, req, host_meta): + params={} + discover_host_meta_list=registry.get_discover_hosts_detail(req.context, **params) + filters = {} + host_interfaces = registry.get_all_host_interfaces(req.context, filters) + existed_host_ip = [host['ip'] for host in host_interfaces] + LOG.info('existed_host_ip**: %s', existed_host_ip) + + for discover_host in discover_host_meta_list: + if discover_host['status'] != 'DISCOVERY_SUCCESSFUL': + update_info = {} + update_info['status'] = 'DISCOVERING' + update_info['message'] = 'DISCOVERING' + update_info['host_id'] = 'None' + self.update_progress_to_db(req, update_info, discover_host) + threads = [] + for discover_host_meta in discover_host_meta_list: + if discover_host_meta['ip'] in existed_host_ip: + update_info = {} + update_info['status'] = 'DISCOVERY_SUCCESSFUL' + update_info['message'] = "discover host for %s successfully!" % discover_host_meta['ip'] + host_id_list = [host['host_id'] for host in host_interfaces if discover_host_meta['ip'] == host['ip']] + update_info['host_id'] = host_id_list[0] + self.update_progress_to_db(req,update_info, discover_host_meta) + continue + if discover_host_meta['status'] != 'DISCOVERY_SUCCESSFUL': + t = threading.Thread(target=self.thread_bin,args=(req,discover_host_meta)) + t.setDaemon(True) + t.start() + threads.append(t) + LOG.info(_("all host discovery threads have started, please waiting....")) + + try: + for t in threads: + t.join() + except: + LOG.warn(_("Join discover host thread %s failed!" % t)) + + @utils.mutating + def discover_host(self, req, host_meta): + daisy_management_ip=config.get("DEFAULT", "daisy_management_ip") + if daisy_management_ip: + cmd = 'dhcp_linenumber=`grep -n "dhcp_ip=" /var/lib/daisy/tecs/getnodeinfo.sh|cut -d ":" -f 1` && sed -i "${dhcp_linenumber}c dhcp_ip=\'%s\'" /var/lib/daisy/tecs/getnodeinfo.sh'% (daisy_management_ip,) + daisy_cmn.subprocess_call(cmd) + + discovery_host_thread = threading.Thread(target=self.discover_host_bin,args=(req, host_meta)) + discovery_host_thread.start() + return {"status":"begin discover host"} + + @utils.mutating + def add_discover_host(self, req, host_meta): + """ + Adds a new discover host to Daisy + + :param req: The WSGI/Webob Request object + :param host_meta: Mapping of metadata about host + + :raises HTTPBadRequest if x-host-name is missing + """ + self._enforce(req, 'add_discover_host') + LOG.warn("host_meta: %s" % host_meta) + if not host_meta.get('ip', None): + msg = "IP parameter can not be None." + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + else: + discover_hosts_ip = self._get_discover_host_ip(req) + if host_meta['ip'] in discover_hosts_ip: + host = self._get_host_by_ip(req, host_meta['ip']) + if host and host['status'] != 'DISCOVERY_SUCCESSFUL': + host_info = {} + host_info['ip'] = host_meta.get('ip', host.get('ip')) + host_info['passwd'] = host_meta.get('passwd', host.get('passwd')) + host_info['user'] = host_meta.get('user', host.get('user')) + host_info['status'] = 'init' + host_info['message'] = 'None' + host_meta = registry.update_discover_host_metadata(req.context, + host['id'], + host_info) + return {'host_meta': host_meta} + else: + msg = (_("ip %s already existed and this host has been discovered successfully. " % host_meta['ip'])) + LOG.error(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + self.validate_ip_format(host_meta['ip']) + + if not host_meta.get('user', None): + host_meta['user'] = 'root' + + if not host_meta.get('passwd', None): + msg = "PASSWD parameter can not be None." + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + if not host_meta.get('status', None): + host_meta['status'] = 'init' + + try: + discover_host_info = registry.add_discover_host_metadata(req.context, host_meta) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return {'host_meta': discover_host_info} + + @utils.mutating + def delete_discover_host(self, req, id): + """ + Deletes a discover host from Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about host + + :raises HTTPBadRequest if x-host-name is missing + """ + self._enforce(req, 'delete_discover_host') + try: + registry.delete_discover_host_metadata(req.context, id) + except exception.NotFound as e: + msg = (_("Failed to find host to delete: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to delete host: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except exception.InUseByStore as e: + msg = (_("Host %(id)s could not be deleted because it is in use: " + "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) + LOG.error(msg) + raise HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + else: + #self.notifier.info('host.delete', host) + return Response(body='', status=200) + + def detail_discover_host(self, req): + """ + Returns detailed information for all available nodes + + :param req: The WSGI/Webob Request object + :retval The response body is a mapping of the following form:: + + {'nodes': [ + {'id': , + 'name': , + 'description': , + 'created_at': , + 'updated_at': , + 'deleted_at': |,}, ... + ]} + """ + + self._enforce(req, 'get_discover_hosts') + params = self._get_query_params(req) + try: + nodes = registry.get_discover_hosts_detail(req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + + return dict(nodes=nodes) + + def update_discover_host(self, req, id, host_meta): + ''' + ''' + self._enforce(req, 'update_discover_host') + params = {'id': id} + orig_host_meta = registry.get_discover_host_metadata(req.context, id) + if host_meta.get('ip', None): + discover_hosts_ip = self._get_discover_host_ip(req) + if host_meta['ip'] in discover_hosts_ip: + host_status = host_meta.get('status', orig_host_meta['status']) + if host_status == 'DISCOVERY_SUCCESSFUL': + msg = (_("Host with ip %s already has been discovered successfully, can not change host ip to %s " % (orig_host_meta['ip'], host_meta['ip']))) + LOG.error(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + self.validate_ip_format(host_meta['ip']) + if orig_host_meta['ip'] != host_meta['ip']: + host_meta['status'] = 'init' + try: + host_meta = registry.update_discover_host_metadata(req.context, + id, + host_meta) + + except exception.Invalid as e: + msg = (_("Failed to update host metadata. Got error: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + except exception.NotFound as e: + msg = (_("Failed to find host to update: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to update host: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except (exception.Conflict, exception.Duplicate) as e: + LOG.error(utils.exception_to_str(e)) + raise HTTPConflict(body=_('Host operation conflicts'), + request=req, + content_type='text/plain') + else: + self.notifier.info('host.update', host_meta) + + return {'host_meta': host_meta} + + def _get_discover_host_ip(self, req): + params= {} + hosts_ip = list() + discover_hosts = registry.get_discover_hosts_detail(req.context, **params) + for host in discover_hosts: + if host.get('ip', None): + hosts_ip.append(host['ip']) + return hosts_ip + + def _get_host_by_ip(self, req, host_ip): + params= {} + discover_hosts = registry.get_discover_hosts_detail(req.context, **params) + LOG.info("%s" % discover_hosts) + for host in discover_hosts: + if host.get('ip') == host_ip: + return host + return + + def get_discover_host_detail(self, req, discover_host_id): + ''' + ''' + try: + host_meta = registry.get_discover_host_metadata(req.context, discover_host_id) + except exception.Invalid as e: + msg = (_("Failed to update host metadata. Got error: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + except exception.NotFound as e: + msg = (_("Failed to find host to update: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to update host: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except (exception.Conflict, exception.Duplicate) as e: + LOG.error(utils.exception_to_str(e)) + raise HTTPConflict(body=_('Host operation conflicts'), + request=req, + content_type='text/plain') + else: + self.notifier.info('host.update', host_meta) + + return {'host_meta': host_meta} + +class HostDeserializer(wsgi.JSONRequestDeserializer): + """Handles deserialization of specific controller method requests.""" + + def _deserialize(self, request): + result = {} + result["host_meta"] = utils.get_host_meta(request) + return result + + def add_host(self, request): + return self._deserialize(request) + + def update_host(self, request): + return self._deserialize(request) + + def discover_host(self, request): + return self._deserialize(request) + + def add_discover_host(self, request): + return self._deserialize(request) + + def update_discover_host(self, request): + return self._deserialize(request) + +class HostSerializer(wsgi.JSONResponseSerializer): + """Handles serialization of specific controller method responses.""" + + def __init__(self): + self.notifier = notifier.Notifier() + + def add_host(self, response, result): + host_meta = result['host_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(host=host_meta)) + return response + + def delete_host(self, response, result): + host_meta = result['host_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(host=host_meta)) + return response + + def get_host(self, response, result): + host_meta = result['host_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(host=host_meta)) + return response + + def discover_host(self, response, result): + host_meta = result + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(host_meta)) + return response + + def add_discover_host(self, response, result): + host_meta = result['host_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(host=host_meta)) + return response + + def update_discover_host(self, response, result): + host_meta = result['host_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(host=host_meta)) + + def get_discover_host_detail(self, response, result): + host_meta = result['host_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(host=host_meta)) + return response + +def create_resource(): + """Hosts resource factory method""" + deserializer = HostDeserializer() + serializer = HostSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) + diff --git a/code/daisy/daisy/api/v1/images.py b/code/daisy/daisy/api/v1/images.py new file mode 100755 index 00000000..0a9001c3 --- /dev/null +++ b/code/daisy/daisy/api/v1/images.py @@ -0,0 +1,1264 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/images endpoint for Glance v1 API +""" + +import copy + +import glance_store as store +import glance_store.location +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import strutils +from webob.exc import HTTPBadRequest +from webob.exc import HTTPConflict +from webob.exc import HTTPForbidden +from webob.exc import HTTPMethodNotAllowed +from webob.exc import HTTPNotFound +from webob.exc import HTTPRequestEntityTooLarge +from webob.exc import HTTPServiceUnavailable +from webob import Response + +from daisy.api import common +from daisy.api import policy +import daisy.api.v1 +from daisy.api.v1 import controller +from daisy.api.v1 import filters +from daisy.api.v1 import upload_utils +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import store_utils +from daisy.common import utils +from daisy.common import wsgi +from daisy import i18n +from daisy import notifier +import daisy.registry.client.v1.api as registry + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS +SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE + +CONF = cfg.CONF +CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') +CONF.import_opt('container_formats', 'daisy.common.config', + group='image_format') +CONF.import_opt('image_property_quota', 'daisy.common.config') + + +def validate_image_meta(req, values): + + name = values.get('name') + disk_format = values.get('disk_format') + container_format = values.get('container_format') + + if 'disk_format' in values: + if disk_format not in CONF.image_format.disk_formats: + msg = _("Invalid disk format '%s' for image.") % disk_format + raise HTTPBadRequest(explanation=msg, request=req) + + if 'container_format' in values: + if container_format not in CONF.image_format.container_formats: + msg = _("Invalid container format '%s' " + "for image.") % container_format + raise HTTPBadRequest(explanation=msg, request=req) + + if name and len(name) > 255: + msg = _('Image name too long: %d') % len(name) + raise HTTPBadRequest(explanation=msg, request=req) + + amazon_formats = ('aki', 'ari', 'ami') + + if disk_format in amazon_formats or container_format in amazon_formats: + if disk_format is None: + values['disk_format'] = container_format + elif container_format is None: + values['container_format'] = disk_format + elif container_format != disk_format: + msg = (_("Invalid mix of disk and container formats. " + "When setting a disk or container format to " + "one of 'aki', 'ari', or 'ami', the container " + "and disk formats must match.")) + raise HTTPBadRequest(explanation=msg, request=req) + + return values + + +def redact_loc(image_meta, copy_dict=True): + """ + Create a shallow copy of image meta with 'location' removed + for security (as it can contain credentials). + """ + if copy_dict: + new_image_meta = copy.copy(image_meta) + else: + new_image_meta = image_meta + new_image_meta.pop('location', None) + new_image_meta.pop('location_data', None) + return new_image_meta + + +class Controller(controller.BaseController): + """ + WSGI controller for images resource in Glance v1 API + + The images resource API is a RESTful web service for image data. The API + is as follows:: + + GET /images -- Returns a set of brief metadata about images + GET /images/detail -- Returns a set of detailed metadata about + images + HEAD /images/ -- Return metadata about an image with id + GET /images/ -- Return image data for image with id + POST /images -- Store image data and return metadata about the + newly-stored image + PUT /images/ -- Update image metadata and/or upload image + data for a previously-reserved image + DELETE /images/ -- Delete the image with id + """ + + def __init__(self): + self.notifier = notifier.Notifier() + registry.configure_registry_client() + self.policy = policy.Enforcer() + if property_utils.is_property_protection_enabled(): + self.prop_enforcer = property_utils.PropertyRules(self.policy) + else: + self.prop_enforcer = None + + def _enforce(self, req, action, target=None): + """Authorize an action against our policies""" + if target is None: + target = {} + try: + self.policy.enforce(req.context, action, target) + except exception.Forbidden: + raise HTTPForbidden() + + def _enforce_image_property_quota(self, + image_meta, + orig_image_meta=None, + purge_props=False, + req=None): + if CONF.image_property_quota < 0: + # If value is negative, allow unlimited number of properties + return + + props = image_meta['properties'].keys() + + # NOTE(ameade): If we are not removing existing properties, + # take them in to account + if (not purge_props) and orig_image_meta: + original_props = orig_image_meta['properties'].keys() + props.extend(original_props) + props = set(props) + + if len(props) > CONF.image_property_quota: + msg = (_("The limit has been exceeded on the number of allowed " + "image properties. Attempted: %(num)s, Maximum: " + "%(quota)s") % {'num': len(props), + 'quota': CONF.image_property_quota}) + LOG.warn(msg) + raise HTTPRequestEntityTooLarge(explanation=msg, + request=req, + content_type="text/plain") + + def _enforce_create_protected_props(self, create_props, req): + """ + Check request is permitted to create certain properties + + :param create_props: List of properties to check + :param req: The WSGI/Webob Request object + + :raises HTTPForbidden if request forbidden to create a property + """ + if property_utils.is_property_protection_enabled(): + for key in create_props: + if (self.prop_enforcer.check_property_rules( + key, 'create', req.context) is False): + msg = _("Property '%s' is protected") % key + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + + def _enforce_read_protected_props(self, image_meta, req): + """ + Remove entries from metadata properties if they are read protected + + :param image_meta: Mapping of metadata about image + :param req: The WSGI/Webob Request object + """ + if property_utils.is_property_protection_enabled(): + for key in image_meta['properties'].keys(): + if (self.prop_enforcer.check_property_rules( + key, 'read', req.context) is False): + image_meta['properties'].pop(key) + + def _enforce_update_protected_props(self, update_props, image_meta, + orig_meta, req): + """ + Check request is permitted to update certain properties. Read + permission is required to delete a property. + + If the property value is unchanged, i.e. a noop, it is permitted, + however, it is important to ensure read access first. Otherwise the + value could be discovered using brute force. + + :param update_props: List of properties to check + :param image_meta: Mapping of proposed new metadata about image + :param orig_meta: Mapping of existing metadata about image + :param req: The WSGI/Webob Request object + + :raises HTTPForbidden if request forbidden to create a property + """ + if property_utils.is_property_protection_enabled(): + for key in update_props: + has_read = self.prop_enforcer.check_property_rules( + key, 'read', req.context) + if ((self.prop_enforcer.check_property_rules( + key, 'update', req.context) is False and + image_meta['properties'][key] != + orig_meta['properties'][key]) or not has_read): + msg = _("Property '%s' is protected") % key + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + + def _enforce_delete_protected_props(self, delete_props, image_meta, + orig_meta, req): + """ + Check request is permitted to delete certain properties. Read + permission is required to delete a property. + + Note, the absence of a property in a request does not necessarily + indicate a delete. The requester may not have read access, and so can + not know the property exists. Hence, read access is a requirement for + delete, otherwise the delete is ignored transparently. + + :param delete_props: List of properties to check + :param image_meta: Mapping of proposed new metadata about image + :param orig_meta: Mapping of existing metadata about image + :param req: The WSGI/Webob Request object + + :raises HTTPForbidden if request forbidden to create a property + """ + if property_utils.is_property_protection_enabled(): + for key in delete_props: + if (self.prop_enforcer.check_property_rules( + key, 'read', req.context) is False): + # NOTE(bourke): if read protected, re-add to image_meta to + # prevent deletion + image_meta['properties'][key] = orig_meta[ + 'properties'][key] + elif (self.prop_enforcer.check_property_rules( + key, 'delete', req.context) is False): + msg = _("Property '%s' is protected") % key + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + + def index(self, req): + """ + Returns the following information for all public, available images: + + * id -- The opaque image identifier + * name -- The name of the image + * disk_format -- The disk image format + * container_format -- The "container" format of the image + * checksum -- MD5 checksum of the image data + * size -- Size of image data in bytes + + :param req: The WSGI/Webob Request object + :retval The response body is a mapping of the following form:: + + {'images': [ + {'id': , + 'name': , + 'disk_format': , + 'container_format': , + 'checksum': + 'size': }, ... + ]} + """ + self._enforce(req, 'get_images') + params = self._get_query_params(req) + try: + images = registry.get_images_list(req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + + return dict(images=images) + + def detail(self, req): + """ + Returns detailed information for all available images + + :param req: The WSGI/Webob Request object + :retval The response body is a mapping of the following form:: + + {'images': [ + {'id': , + 'name': , + 'size': , + 'disk_format': , + 'container_format': , + 'checksum': , + 'min_disk': , + 'min_ram': , + 'store': , + 'status': , + 'created_at': , + 'updated_at': , + 'deleted_at': |, + 'properties': {'distro': 'Ubuntu 10.04 LTS', ...}}, ... + ]} + """ + if req.method == 'HEAD': + msg = (_("This operation is currently not permitted on " + "Glance images details.")) + raise HTTPMethodNotAllowed(explanation=msg, + headers={'Allow': 'GET'}, + body_template='${explanation}') + self._enforce(req, 'get_images') + params = self._get_query_params(req) + try: + images = registry.get_images_detail(req.context, **params) + # Strip out the Location attribute. Temporary fix for + # LP Bug #755916. This information is still coming back + # from the registry, since the API server still needs access + # to it, however we do not return this potential security + # information to the API end user... + for image in images: + redact_loc(image, copy_dict=False) + self._enforce_read_protected_props(image, req) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return dict(images=images) + + def _get_query_params(self, req): + """ + Extracts necessary query params from request. + + :param req: the WSGI Request object + :retval dict of parameters that can be used by registry client + """ + params = {'filters': self._get_filters(req)} + + for PARAM in SUPPORTED_PARAMS: + if PARAM in req.params: + params[PARAM] = req.params.get(PARAM) + + # Fix for LP Bug #1132294 + # Ensure all shared images are returned in v1 + params['member_status'] = 'all' + return params + + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + query_filters = {} + for param in req.params: + if param in SUPPORTED_FILTERS or param.startswith('property-'): + query_filters[param] = req.params.get(param) + if not filters.validate(param, query_filters[param]): + raise HTTPBadRequest(_('Bad value passed to filter ' + '%(filter)s got %(val)s') + % {'filter': param, + 'val': query_filters[param]}) + return query_filters + + def meta(self, req, id): + """ + Returns metadata about an image in the HTTP headers of the + response object + + :param req: The WSGI/Webob Request object + :param id: The opaque image identifier + :retval similar to 'show' method but without image_data + + :raises HTTPNotFound if image metadata is not available to user + """ + self._enforce(req, 'get_image') + image_meta = self.get_image_meta_or_404(req, id) + image_meta = redact_loc(image_meta) + self._enforce_read_protected_props(image_meta, req) + return { + 'image_meta': image_meta + } + + @staticmethod + def _validate_source(source, req): + """ + Validate if external sources (as specified via the location + or copy-from headers) are supported. Otherwise we reject + with 400 "Bad Request". + """ + if source: + if store_utils.validate_external_location(source): + return source + else: + msg = _("External sources are not supported: '%s'") % source + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + @staticmethod + def _copy_from(req): + return req.headers.get('x-glance-api-copy-from') + + def _external_source(self, image_meta, req): + source = image_meta.get('location') + if source is not None: + self._enforce(req, 'set_image_location') + else: + source = Controller._copy_from(req) + return Controller._validate_source(source, req) + + @staticmethod + def _get_from_store(context, where, dest=None): + try: + loc = glance_store.location.get_location_from_uri(where) + src_store = store.get_store_from_uri(where) + + if dest is not None: + src_store.READ_CHUNKSIZE = dest.WRITE_CHUNKSIZE + + image_data, image_size = src_store.get(loc, context=context) + + except store.RemoteServiceUnavailable as e: + raise HTTPServiceUnavailable(explanation=e.msg) + except store.NotFound as e: + raise HTTPNotFound(explanation=e.msg) + except (store.StoreGetNotSupported, + store.StoreRandomGetNotSupported, + store.UnknownScheme) as e: + raise HTTPBadRequest(explanation=e.msg) + image_size = int(image_size) if image_size else None + return image_data, image_size + + def show(self, req, id): + """ + Returns an iterator that can be used to retrieve an image's + data along with the image metadata. + + :param req: The WSGI/Webob Request object + :param id: The opaque image identifier + + :raises HTTPNotFound if image is not available to user + """ + + self._enforce(req, 'get_image') + + try: + image_meta = self.get_active_image_meta_or_error(req, id) + except HTTPNotFound: + # provision for backward-compatibility breaking issue + # catch the 404 exception and raise it after enforcing + # the policy + with excutils.save_and_reraise_exception(): + self._enforce(req, 'download_image') + else: + target = utils.create_mashup_dict(image_meta) + self._enforce(req, 'download_image', target=target) + + self._enforce_read_protected_props(image_meta, req) + + if image_meta.get('size') == 0: + image_iterator = iter([]) + else: + image_iterator, size = self._get_from_store(req.context, + image_meta['location']) + image_iterator = utils.cooperative_iter(image_iterator) + image_meta['size'] = size or image_meta['size'] + image_meta = redact_loc(image_meta) + return { + 'image_iterator': image_iterator, + 'image_meta': image_meta, + } + + def _reserve(self, req, image_meta): + """ + Adds the image metadata to the registry and assigns + an image identifier if one is not supplied in the request + headers. Sets the image's status to `queued`. + + :param req: The WSGI/Webob Request object + :param id: The opaque image identifier + :param image_meta: The image metadata + + :raises HTTPConflict if image already exists + :raises HTTPBadRequest if image metadata is not valid + """ + location = self._external_source(image_meta, req) + scheme = image_meta.get('store') + if scheme and scheme not in store.get_known_schemes(): + msg = _("Required store %s is invalid") % scheme + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, + content_type='text/plain') + + image_meta['status'] = ('active' if image_meta.get('size') == 0 + else 'queued') + + if location: + try: + backend = store.get_store_from_location(location) + except (store.UnknownScheme, store.BadStoreUri): + msg = _("Invalid location %s") % location + LOG.debug(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + # check the store exists before we hit the registry, but we + # don't actually care what it is at this point + self.get_store_or_400(req, backend) + + # retrieve the image size from remote store (if not provided) + image_meta['size'] = self._get_size(req.context, image_meta, + location) + else: + # Ensure that the size attribute is set to zero for directly + # uploadable images (if not provided). The size will be set + # to a non-zero value during upload + image_meta['size'] = image_meta.get('size', 0) + + try: + image_meta = registry.add_image_metadata(req.context, image_meta) + self.notifier.info("image.create", redact_loc(image_meta)) + return image_meta + except exception.Duplicate: + msg = (_("An image with identifier %s already exists") % + image_meta['id']) + LOG.warn(msg) + raise HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + except exception.Invalid as e: + msg = (_("Failed to reserve image. Got error: %s") % + utils.exception_to_str(e)) + LOG.exception(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden: + msg = _("Forbidden to reserve image.") + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + + def _upload(self, req, image_meta): + """ + Uploads the payload of the request to a backend store in + daisy. If the `x-image-meta-store` header is set, Glance + will attempt to use that scheme; if not, Glance will use the + scheme set by the flag `default_store` to find the backing store. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about image + + :raises HTTPConflict if image already exists + :retval The location where the image was stored + """ + + scheme = req.headers.get('x-image-meta-store', + CONF.glance_store.default_store) + + store = self.get_store_or_400(req, scheme) + + copy_from = self._copy_from(req) + if copy_from: + try: + image_data, image_size = self._get_from_store(req.context, + copy_from, + dest=store) + except Exception: + upload_utils.safe_kill(req, image_meta['id'], 'queued') + msg = (_LE("Copy from external source '%(scheme)s' failed for " + "image: %(image)s") % + {'scheme': scheme, 'image': image_meta['id']}) + LOG.exception(msg) + return + image_meta['size'] = image_size or image_meta['size'] + else: + try: + req.get_content_type(('application/octet-stream',)) + except exception.InvalidContentType: + upload_utils.safe_kill(req, image_meta['id'], 'queued') + msg = _("Content-Type must be application/octet-stream") + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg) + + image_data = req.body_file + + image_id = image_meta['id'] + LOG.debug("Setting image %s to status 'saving'", image_id) + registry.update_image_metadata(req.context, image_id, + {'status': 'saving'}) + + LOG.debug("Uploading image data for image %(image_id)s " + "to %(scheme)s store", {'image_id': image_id, + 'scheme': scheme}) + + self.notifier.info("image.prepare", redact_loc(image_meta)) + + image_meta, location_data = upload_utils.upload_data_to_store( + req, image_meta, image_data, store, self.notifier) + + self.notifier.info('image.upload', redact_loc(image_meta)) + + return location_data + + def _activate(self, req, image_id, location_data, from_state=None): + """ + Sets the image status to `active` and the image's location + attribute. + + :param req: The WSGI/Webob Request object + :param image_id: Opaque image identifier + :param location_data: Location of where Glance stored this image + """ + image_meta = {} + image_meta['location'] = location_data['url'] + image_meta['status'] = 'active' + image_meta['location_data'] = [location_data] + + try: + s = from_state + image_meta_data = registry.update_image_metadata(req.context, + image_id, + image_meta, + from_state=s) + self.notifier.info("image.activate", redact_loc(image_meta_data)) + self.notifier.info("image.update", redact_loc(image_meta_data)) + return image_meta_data + except exception.Duplicate: + with excutils.save_and_reraise_exception(): + # Delete image data since it has been supersceded by another + # upload and re-raise. + LOG.debug("duplicate operation - deleting image data for " + " %(id)s (location:%(location)s)" % + {'id': image_id, 'location': image_meta['location']}) + upload_utils.initiate_deletion(req, location_data, image_id) + except exception.Invalid as e: + msg = (_("Failed to activate image. Got error: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + def _upload_and_activate(self, req, image_meta): + """ + Safely uploads the image data in the request payload + and activates the image in the registry after a successful + upload. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about image + + :retval Mapping of updated image data + """ + location_data = self._upload(req, image_meta) + image_id = image_meta['id'] + LOG.info(_LI("Uploaded data of image %s from request " + "payload successfully.") % image_id) + + if location_data: + try: + image_meta = self._activate(req, + image_id, + location_data, + from_state='saving') + except Exception as e: + with excutils.save_and_reraise_exception(): + if not isinstance(e, exception.Duplicate): + # NOTE(zhiyan): Delete image data since it has already + # been added to store by above _upload() call. + LOG.warn(_LW("Failed to activate image %s in " + "registry. About to delete image " + "bits from store and update status " + "to 'killed'.") % image_id) + upload_utils.initiate_deletion(req, location_data, + image_id) + upload_utils.safe_kill(req, image_id, 'saving') + else: + image_meta = None + + return image_meta + + def _get_size(self, context, image_meta, location): + # retrieve the image size from remote store (if not provided) + try: + return (image_meta.get('size', 0) or + store.get_size_from_backend(location, context=context)) + except store.NotFound as e: + # NOTE(rajesht): The exception is logged as debug message because + # the image is located at third-party server and it has nothing to + # do with daisy. If log.exception is used here, in that case the + # log file might be flooded with exception log messages if + # malicious user keeps on trying image-create using non-existent + # location url. Used log.debug because administrator can + # disable debug logs. + LOG.debug(utils.exception_to_str(e)) + raise HTTPNotFound(explanation=e.msg, content_type="text/plain") + except (store.UnknownScheme, store.BadStoreUri) as e: + # NOTE(rajesht): See above note of store.NotFound + LOG.debug(utils.exception_to_str(e)) + raise HTTPBadRequest(explanation=e.msg, content_type="text/plain") + + def _handle_source(self, req, image_id, image_meta, image_data): + copy_from = self._copy_from(req) + location = image_meta.get('location') + sources = filter(lambda x: x, (copy_from, location, image_data)) + if len(sources) >= 2: + msg = _("It's invalid to provide multiple image sources.") + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + if image_data: + image_meta = self._validate_image_for_activation(req, + image_id, + image_meta) + image_meta = self._upload_and_activate(req, image_meta) + elif copy_from: + msg = _LI('Triggering asynchronous copy from external source') + LOG.info(msg) + pool = common.get_thread_pool("copy_from_eventlet_pool") + pool.spawn_n(self._upload_and_activate, req, image_meta) + else: + if location: + self._validate_image_for_activation(req, image_id, image_meta) + image_size_meta = image_meta.get('size') + if image_size_meta: + try: + image_size_store = store.get_size_from_backend( + location, req.context) + except (store.BadStoreUri, store.UnknownScheme) as e: + LOG.debug(utils.exception_to_str(e)) + raise HTTPBadRequest(explanation=e.msg, + request=req, + content_type="text/plain") + # NOTE(zhiyan): A returned size of zero usually means + # the driver encountered an error. In this case the + # size provided by the client will be used as-is. + if (image_size_store and + image_size_store != image_size_meta): + msg = (_("Provided image size must match the stored" + " image size. (provided size: %(ps)d, " + "stored size: %(ss)d)") % + {"ps": image_size_meta, + "ss": image_size_store}) + LOG.warn(msg) + raise HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + location_data = {'url': location, 'metadata': {}, + 'status': 'active'} + image_meta = self._activate(req, image_id, location_data) + return image_meta + + def _validate_image_for_activation(self, req, id, values): + """Ensures that all required image metadata values are valid.""" + image = self.get_image_meta_or_404(req, id) + if 'disk_format' not in values: + values['disk_format'] = image['disk_format'] + if 'container_format' not in values: + values['container_format'] = image['container_format'] + if 'name' not in values: + values['name'] = image['name'] + + values = validate_image_meta(req, values) + return values + + @utils.mutating + def create(self, req, image_meta, image_data): + """ + Adds a new image to daisy. Four scenarios exist when creating an + image: + + 1. If the image data is available directly for upload, create can be + passed the image data as the request body and the metadata as the + request headers. The image will initially be 'queued', during + upload it will be in the 'saving' status, and then 'killed' or + 'active' depending on whether the upload completed successfully. + + 2. If the image data exists somewhere else, you can upload indirectly + from the external source using the x-glance-api-copy-from header. + Once the image is uploaded, the external store is not subsequently + consulted, i.e. the image content is served out from the configured + glance image store. State transitions are as for option #1. + + 3. If the image data exists somewhere else, you can reference the + source using the x-image-meta-location header. The image content + will be served out from the external store, i.e. is never uploaded + to the configured glance image store. + + 4. If the image data is not available yet, but you'd like reserve a + spot for it, you can omit the data and a record will be created in + the 'queued' state. This exists primarily to maintain backwards + compatibility with OpenStack/Rackspace API semantics. + + The request body *must* be encoded as application/octet-stream, + otherwise an HTTPBadRequest is returned. + + Upon a successful save of the image data and metadata, a response + containing metadata about the image is returned, including its + opaque identifier. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about image + :param image_data: Actual image data that is to be stored + + :raises HTTPBadRequest if x-image-meta-location is missing + and the request body is not application/octet-stream + image data. + """ + self._enforce(req, 'add_image') + is_public = image_meta.get('is_public') + if is_public: + self._enforce(req, 'publicize_image') + if Controller._copy_from(req): + self._enforce(req, 'copy_from') + if image_data or Controller._copy_from(req): + self._enforce(req, 'upload_image') + + self._enforce_create_protected_props(image_meta['properties'].keys(), + req) + + self._enforce_image_property_quota(image_meta, req=req) + + image_meta = self._reserve(req, image_meta) + id = image_meta['id'] + + image_meta = self._handle_source(req, id, image_meta, image_data) + + location_uri = image_meta.get('location') + if location_uri: + self.update_store_acls(req, id, location_uri, public=is_public) + + # Prevent client from learning the location, as it + # could contain security credentials + image_meta = redact_loc(image_meta) + + return {'image_meta': image_meta} + + @utils.mutating + def update(self, req, id, image_meta, image_data): + """ + Updates an existing image with the registry. + + :param request: The WSGI/Webob Request object + :param id: The opaque image identifier + + :retval Returns the updated image information as a mapping + """ + self._enforce(req, 'modify_image') + is_public = image_meta.get('is_public') + if is_public: + self._enforce(req, 'publicize_image') + if Controller._copy_from(req): + self._enforce(req, 'copy_from') + if image_data or Controller._copy_from(req): + self._enforce(req, 'upload_image') + + orig_image_meta = self.get_image_meta_or_404(req, id) + orig_status = orig_image_meta['status'] + + # Do not allow any updates on a deleted image. + # Fix for LP Bug #1060930 + if orig_status == 'deleted': + msg = _("Forbidden to update deleted image.") + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + + if req.context.is_admin is False: + # Once an image is 'active' only an admin can + # modify certain core metadata keys + for key in ACTIVE_IMMUTABLE: + if (orig_status == 'active' and image_meta.get(key) is not None + and image_meta.get(key) != orig_image_meta.get(key)): + msg = _("Forbidden to modify '%s' of active image.") % key + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + + # The default behaviour for a PUT /images/ is to + # override any properties that were previously set. This, however, + # leads to a number of issues for the common use case where a caller + # registers an image with some properties and then almost immediately + # uploads an image file along with some more properties. Here, we + # check for a special header value to be false in order to force + # properties NOT to be purged. However we also disable purging of + # properties if an image file is being uploaded... + purge_props = req.headers.get('x-glance-registry-purge-props', True) + purge_props = (strutils.bool_from_string(purge_props) and + image_data is None) + + if image_data is not None and orig_status != 'queued': + raise HTTPConflict(_("Cannot upload to an unqueued image")) + + # Only allow the Location|Copy-From fields to be modified if the + # image is in queued status, which indicates that the user called + # POST /images but originally supply neither a Location|Copy-From + # field NOR image data + location = self._external_source(image_meta, req) + reactivating = orig_status != 'queued' and location + activating = orig_status == 'queued' and (location or image_data) + + # Make image public in the backend store (if implemented) + orig_or_updated_loc = location or orig_image_meta.get('location') + if orig_or_updated_loc: + try: + self.update_store_acls(req, id, orig_or_updated_loc, + public=is_public) + except store.BadStoreUri: + msg = _("Invalid location: %s") % location + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + if reactivating: + msg = _("Attempted to update Location field for an image " + "not in queued status.") + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + # ensure requester has permissions to create/update/delete properties + # according to property-protections.conf + orig_keys = set(orig_image_meta['properties']) + new_keys = set(image_meta['properties']) + self._enforce_update_protected_props( + orig_keys.intersection(new_keys), image_meta, + orig_image_meta, req) + self._enforce_create_protected_props( + new_keys.difference(orig_keys), req) + if purge_props: + self._enforce_delete_protected_props( + orig_keys.difference(new_keys), image_meta, + orig_image_meta, req) + + self._enforce_image_property_quota(image_meta, + orig_image_meta=orig_image_meta, + purge_props=purge_props, + req=req) + + try: + if location: + image_meta['size'] = self._get_size(req.context, image_meta, + location) + + image_meta = registry.update_image_metadata(req.context, + id, + image_meta, + purge_props) + + if activating: + image_meta = self._handle_source(req, id, image_meta, + image_data) + + except exception.Invalid as e: + msg = (_("Failed to update image metadata. Got error: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + except exception.NotFound as e: + msg = (_("Failed to find image to update: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to update image: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except (exception.Conflict, exception.Duplicate) as e: + LOG.warn(utils.exception_to_str(e)) + raise HTTPConflict(body=_('Image operation conflicts'), + request=req, + content_type='text/plain') + else: + self.notifier.info('image.update', redact_loc(image_meta)) + + # Prevent client from learning the location, as it + # could contain security credentials + image_meta = redact_loc(image_meta) + + self._enforce_read_protected_props(image_meta, req) + + return {'image_meta': image_meta} + + @utils.mutating + def delete(self, req, id): + """ + Deletes the image and all its chunks from the Glance + + :param req: The WSGI/Webob Request object + :param id: The opaque image identifier + + :raises HttpBadRequest if image registry is invalid + :raises HttpNotFound if image or any chunk is not available + :raises HttpUnauthorized if image or any chunk is not + deleteable by the requesting user + """ + self._enforce(req, 'delete_image') + + image = self.get_image_meta_or_404(req, id) + if image['protected']: + msg = _("Image is protected") + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + + if image['status'] == 'pending_delete': + msg = (_("Forbidden to delete a %s image.") % + image['status']) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + elif image['status'] == 'deleted': + msg = _("Image %s not found.") % id + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, request=req, + content_type="text/plain") + + if image['location'] and CONF.delayed_delete: + status = 'pending_delete' + else: + status = 'deleted' + + ori_status = image['status'] + + try: + # Update the image from the registry first, since we rely on it + # for authorization checks. + # See https://bugs.launchpad.net/glance/+bug/1065187 + image = registry.update_image_metadata(req.context, id, + {'status': status}) + + try: + # The image's location field may be None in the case + # of a saving or queued image, therefore don't ask a backend + # to delete the image if the backend doesn't yet store it. + # See https://bugs.launchpad.net/glance/+bug/747799 + if image['location']: + for loc_data in image['location_data']: + if loc_data['status'] == 'active': + upload_utils.initiate_deletion(req, loc_data, id) + except Exception: + with excutils.save_and_reraise_exception(): + registry.update_image_metadata(req.context, id, + {'status': ori_status}) + + registry.delete_image_metadata(req.context, id) + except exception.NotFound as e: + msg = (_("Failed to find image to delete: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to delete image: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except exception.InUseByStore as e: + msg = (_("Image %(id)s could not be deleted because it is in use: " + "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) + LOG.warn(msg) + raise HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + else: + self.notifier.info('image.delete', redact_loc(image)) + return Response(body='', status=200) + + def get_store_or_400(self, request, scheme): + """ + Grabs the storage backend for the supplied store name + or raises an HTTPBadRequest (400) response + + :param request: The WSGI/Webob Request object + :param scheme: The backend store scheme + + :raises HTTPBadRequest if store does not exist + """ + try: + return store.get_store_from_scheme(scheme) + except store.UnknownScheme: + msg = _("Store for scheme %s not found") % scheme + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, + request=request, + content_type='text/plain') + + +class ImageDeserializer(wsgi.JSONRequestDeserializer): + """Handles deserialization of specific controller method requests.""" + + def _deserialize(self, request): + result = {} + try: + result['image_meta'] = utils.get_image_meta_from_headers(request) + except exception.InvalidParameterValue as e: + msg = utils.exception_to_str(e) + LOG.warn(msg, exc_info=True) + raise HTTPBadRequest(explanation=e.msg, request=request) + + image_meta = result['image_meta'] + image_meta = validate_image_meta(request, image_meta) + if request.content_length: + image_size = request.content_length + elif 'size' in image_meta: + image_size = image_meta['size'] + else: + image_size = None + + data = request.body_file if self.has_body(request) else None + + if image_size is None and data is not None: + data = utils.LimitingReader(data, CONF.image_size_cap) + + # NOTE(bcwaldon): this is a hack to make sure the downstream code + # gets the correct image data + request.body_file = data + + elif image_size > CONF.image_size_cap: + max_image_size = CONF.image_size_cap + msg = (_("Denying attempt to upload image larger than %d" + " bytes.") % max_image_size) + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, request=request) + + result['image_data'] = data + return result + + def create(self, request): + return self._deserialize(request) + + def update(self, request): + return self._deserialize(request) + + +class ImageSerializer(wsgi.JSONResponseSerializer): + """Handles serialization of specific controller method responses.""" + + def __init__(self): + self.notifier = notifier.Notifier() + + def _inject_location_header(self, response, image_meta): + location = self._get_image_location(image_meta) + response.headers['Location'] = location.encode('utf-8') + + def _inject_checksum_header(self, response, image_meta): + if image_meta['checksum'] is not None: + response.headers['ETag'] = image_meta['checksum'].encode('utf-8') + + def _inject_image_meta_headers(self, response, image_meta): + """ + Given a response and mapping of image metadata, injects + the Response with a set of HTTP headers for the image + metadata. Each main image metadata field is injected + as a HTTP header with key 'x-image-meta-' except + for the properties field, which is further broken out + into a set of 'x-image-meta-property-' headers + + :param response: The Webob Response object + :param image_meta: Mapping of image metadata + """ + headers = utils.image_meta_to_http_headers(image_meta) + + for k, v in headers.items(): + response.headers[k.encode('utf-8')] = v.encode('utf-8') + + def _get_image_location(self, image_meta): + """Build a relative url to reach the image defined by image_meta.""" + return "/v1/images/%s" % image_meta['id'] + + def meta(self, response, result): + image_meta = result['image_meta'] + self._inject_image_meta_headers(response, image_meta) + self._inject_checksum_header(response, image_meta) + return response + + def show(self, response, result): + image_meta = result['image_meta'] + + image_iter = result['image_iterator'] + # image_meta['size'] should be an int, but could possibly be a str + expected_size = int(image_meta['size']) + response.app_iter = common.size_checked_iter( + response, image_meta, expected_size, image_iter, self.notifier) + # Using app_iter blanks content-length, so we set it here... + response.headers['Content-Length'] = str(image_meta['size']) + response.headers['Content-Type'] = 'application/octet-stream' + + self._inject_image_meta_headers(response, image_meta) + self._inject_checksum_header(response, image_meta) + + return response + + def update(self, response, result): + image_meta = result['image_meta'] + response.body = self.to_json(dict(image=image_meta)) + response.headers['Content-Type'] = 'application/json' + self._inject_checksum_header(response, image_meta) + return response + + def create(self, response, result): + image_meta = result['image_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(image=image_meta)) + self._inject_location_header(response, image_meta) + self._inject_checksum_header(response, image_meta) + return response + + +def create_resource(): + """Images resource factory method""" + deserializer = ImageDeserializer() + serializer = ImageSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/api/v1/install.py b/code/daisy/daisy/api/v1/install.py new file mode 100755 index 00000000..49768bba --- /dev/null +++ b/code/daisy/daisy/api/v1/install.py @@ -0,0 +1,405 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/hosts endpoint for Daisy v1 API +""" +import time +import traceback +import webob.exc + +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPForbidden +from webob.exc import HTTPServerError + +from threading import Thread + +from daisy import i18n +from daisy import notifier + +from daisy.api import policy +import daisy.api.v1 + +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import utils +from daisy.common import wsgi +import daisy.registry.client.v1.api as registry +from daisy.api.v1 import controller +from daisy.api.v1 import filters +import daisy.api.backends.common as daisy_cmn +from daisy.api.backends import driver +from daisy.api.backends import os as os_handle + +try: + import simplejson as json +except ImportError: + import json + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS +SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE + +# if some backends have order constraint, please add here +# if backend not in the next three order list, we will be +# think it does't have order constraint. +BACKENDS_INSTALL_ORDER = ['proton', 'zenic', 'tecs'] +BACKENDS_UPGRADE_ORDER = ['proton', 'zenic', 'tecs'] +BACKENDS_UNINSTALL_ORDER = [] + + +def get_deployment_backends(req, cluster_id, backends_order): + cluster_roles = daisy_cmn.get_cluster_roles_detail(req,cluster_id) + cluster_backends = set([role['deployment_backend'] for role in cluster_roles if daisy_cmn.get_hosts_of_role(req, role['id'])]) + ordered_backends = [backend for backend in backends_order if backend in cluster_backends] + other_backends = [backend for backend in cluster_backends if backend not in backends_order] + deployment_backends =ordered_backends + other_backends + return deployment_backends + +class InstallTask(object): + """ + Class for install OS and TECS. + """ + """ Definition for install states.""" + def __init__(self, req, cluster_id): + self.req = req + self.cluster_id = cluster_id + + def _backends_install(self): + backends = get_deployment_backends(self.req, self.cluster_id, BACKENDS_INSTALL_ORDER) + if not backends: + LOG.info(_("No backends need to install.")) + return + for backend in backends: + backend_driver = driver.load_deployment_dirver(backend) + backend_driver.install(self.req, self.cluster_id) + # this will be raise raise all the exceptions of the thread to log file + def run(self): + try: + self._run() + except Exception as e: + LOG.exception(e.message) + + def _run(self): + """ + Exectue os installation with sync mode. + :return: + """ + # get hosts config which need to install OS + all_hosts_need_os = os_handle.get_cluster_hosts_config(self.req, self.cluster_id) + if all_hosts_need_os: + hosts_with_role_need_os = [host_detail for host_detail in all_hosts_need_os if host_detail['status'] == 'with-role'] + hosts_without_role_need_os = [host_detail for host_detail in all_hosts_need_os if host_detail['status'] != 'with-role'] + else: + LOG.info(_("No host need to install os, begin to install " + "backends for cluster %s." % self.cluster_id)) + self._backends_install() + return + + run_once_flag = True + # if no hosts with role need os, install backend applications immediately + if not hosts_with_role_need_os: + run_once_flag = False + role_hosts_need_os = [] + LOG.info(_("All of hosts with role is 'active', begin to install " + "backend applications for cluster %s first." % self.cluster_id)) + self._backends_install() + else: + role_hosts_need_os = [host_detail['id'] for host_detail in hosts_with_role_need_os] + + # hosts with role put the head of the list + order_hosts_need_os = hosts_with_role_need_os + hosts_without_role_need_os + while order_hosts_need_os: + os_install = os_handle.OSInstall(self.req, self.cluster_id) + #all os will be installed batch by batch with max_parallel_os_number which was set in daisy-api.conf + (order_hosts_need_os,role_hosts_need_os) = os_install.install_os(order_hosts_need_os,role_hosts_need_os) + # after a batch of os install over, judge if all role hosts install os completely, + # if role_hosts_need_os is empty, install TECS immediately + if run_once_flag and not role_hosts_need_os: + run_once_flag = False + #wait to reboot os after new os installed + time.sleep(10) + LOG.info(_("All hosts with role install successfully, " + "begin to install backend applications for cluster %s." % self.cluster_id)) + self._backends_install() + + +class Controller(controller.BaseController): + """ + WSGI controller for hosts resource in Daisy v1 API + + The hosts resource API is a RESTful web service for host data. The API + is as follows:: + + GET /hosts -- Returns a set of brief metadata about hosts + GET /hosts/detail -- Returns a set of detailed metadata about + hosts + HEAD /hosts/ -- Return metadata about an host with id + GET /hosts/ -- Return host data for host with id + POST /hosts -- Store host data and return metadata about the + newly-stored host + PUT /hosts/ -- Update host metadata and/or upload host + data for a previously-reserved host + DELETE /hosts/ -- Delete the host with id + """ + def __init__(self): + self.notifier = notifier.Notifier() + registry.configure_registry_client() + self.policy = policy.Enforcer() + if property_utils.is_property_protection_enabled(): + self.prop_enforcer = property_utils.PropertyRules(self.policy) + else: + self.prop_enforcer = None + + def _enforce(self, req, action, target=None): + """Authorize an action against our policies""" + if target is None: + target = {} + try: + self.policy.enforce(req.context, action, target) + except exception.Forbidden: + raise HTTPForbidden() + + def _raise_404_if_cluster_deleted(self, req, cluster_id): + cluster = self.get_cluster_meta_or_404(req, cluster_id) + if cluster['deleted']: + msg = _("Cluster with identifier %s has been deleted.") % cluster_id + raise webob.exc.HTTPNotFound(msg) + + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + query_filters = {} + for param in req.params: + if param in SUPPORTED_FILTERS: + query_filters[param] = req.params.get(param) + if not filters.validate(param, query_filters[param]): + raise HTTPBadRequest(_('Bad value passed to filter ' + '%(filter)s got %(val)s') + % {'filter': param, + 'val': query_filters[param]}) + return query_filters + + def _get_query_params(self, req): + """ + Extracts necessary query params from request. + + :param req: the WSGI Request object + :retval dict of parameters that can be used by registry client + """ + params = {'filters': self._get_filters(req)} + + for PARAM in SUPPORTED_PARAMS: + if PARAM in req.params: + params[PARAM] = req.params.get(PARAM) + return params + + @utils.mutating + def install_cluster(self, req, install_meta): + """ + Install TECS to a cluster. + + :param req: The WSGI/Webob Request object + + :raises HTTPBadRequest if x-install-cluster is missing + """ + cluster_id = install_meta['cluster_id'] + self._enforce(req, 'install_cluster') + self._raise_404_if_cluster_deleted(req, cluster_id) + + if install_meta.get("deployment_interface", None): + os_handle.pxe_server_build(req, install_meta) + return {"status": "pxe is installed"} + + # if have hosts need to install os, TECS installataion executed in InstallTask + os_install_obj = InstallTask(req, cluster_id) + os_install_thread = Thread(target=os_install_obj.run) + os_install_thread.start() + return {"status":"begin install"} + + @utils.mutating + def uninstall_cluster(self, req, cluster_id): + """ + Uninstall TECS to a cluster. + + :param req: The WSGI/Webob Request object + + :raises HTTPBadRequest if x-install-cluster is missing + """ + self._enforce(req, 'uninstall_cluster') + self._raise_404_if_cluster_deleted(req, cluster_id) + + backends = get_deployment_backends(req, cluster_id, BACKENDS_UNINSTALL_ORDER) + for backend in backends: + backend_driver = driver.load_deployment_dirver(backend) + uninstall_thread = Thread(target=backend_driver.uninstall, args=(req, cluster_id)) + uninstall_thread.start() + return {"status":"begin uninstall"} + + @utils.mutating + def uninstall_progress(self, req, cluster_id): + self._enforce(req, 'uninstall_progress') + self._raise_404_if_cluster_deleted(req, cluster_id) + + all_nodes = {} + backends = get_deployment_backends(req, cluster_id, BACKENDS_UNINSTALL_ORDER) + if not backends: + LOG.info(_("No backends need to uninstall.")) + return all_nodes + for backend in backends: + backend_driver = driver.load_deployment_dirver(backend) + nodes_process = backend_driver.uninstall_progress(req, cluster_id) + all_nodes.update(nodes_process) + return all_nodes + + + @utils.mutating + def update_cluster(self, req, cluster_id): + """ + Uninstall TECS to a cluster. + + :param req: The WSGI/Webob Request object + + :raises HTTPBadRequest if x-install-cluster is missing + """ + self._enforce(req, 'update_cluster') + self._raise_404_if_cluster_deleted(req, cluster_id) + + backends = get_deployment_backends(req, cluster_id, BACKENDS_UPGRADE_ORDER) + if not backends: + LOG.info(_("No backends need to update.")) + return {"status":""} + for backend in backends: + backend_driver = driver.load_deployment_dirver(backend) + update_thread = Thread(target=backend_driver.upgrade, args=(req, cluster_id)) + update_thread.start() + return {"status":"begin update"} + + @utils.mutating + def update_progress(self, req, cluster_id): + self._enforce(req, 'update_progress') + self._raise_404_if_cluster_deleted(req, cluster_id) + + backends = get_deployment_backends(req, cluster_id, BACKENDS_UPGRADE_ORDER) + all_nodes = {} + for backend in backends: + backend_driver = driver.load_deployment_dirver(backend) + nodes_process = backend_driver.upgrade_progress(req, cluster_id) + all_nodes.update(nodes_process) + return all_nodes + + @utils.mutating + def export_db(self, req, install_meta): + """ + Export daisy db data to tecs.conf and HA.conf. + + :param req: The WSGI/Webob Request object + + :raises HTTPBadRequest if x-install-cluster is missing + """ + self._enforce(req, 'export_db') + cluster_id = install_meta['cluster_id'] + self._raise_404_if_cluster_deleted(req, cluster_id) + + all_config_files = {} + backends = get_deployment_backends(req, cluster_id, BACKENDS_INSTALL_ORDER) + if not backends: + LOG.info(_("No backends need to export.")) + return all_config_files + for backend in backends: + backend_driver = driver.load_deployment_dirver(backend) + backend_config_files = backend_driver.export_db(req, cluster_id) + all_config_files.update(backend_config_files) + return all_config_files + + @utils.mutating + def update_disk_array(self, req, cluster_id): + """ + update TECS Disk Array config for a cluster. + + :param req: The WSGI/Webob Request object + + :raises HTTPBadRequest if x-cluster is missing + """ + self._enforce(req, 'update_disk_array') + self._raise_404_if_cluster_deleted(req, cluster_id) + + tecs_backend_name = 'tecs' + backends = get_deployment_backends(req, cluster_id, BACKENDS_UNINSTALL_ORDER) + if tecs_backend_name not in backends: + message = "No tecs backend" + LOG.info(_(message)) + else: + backend_driver = driver.load_deployment_dirver(tecs_backend_name) + message = backend_driver.update_disk_array(req, cluster_id) + return {'status':message} + + +class InstallDeserializer(wsgi.JSONRequestDeserializer): + """Handles deserialization of specific controller method requests.""" + + def _deserialize(self, request): + result = {} + result["install_meta"] = utils.get_dict_meta(request) + return result + + def install_cluster(self, request): + return self._deserialize(request) + + def export_db(self, request): + return self._deserialize(request) + + def update_disk_array(self, request): + return {} + +class InstallSerializer(wsgi.JSONResponseSerializer): + """Handles serialization of specific controller method responses.""" + + def __init__(self): + self.notifier = notifier.Notifier() + + def install_cluster(self, response, result): + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(result) + return response + + def export_db(self, response, result): + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(result) + return response + + def update_disk_array(self, response, result): + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(result) + return response + +def create_resource(): + """Image members resource factory method""" + deserializer = InstallDeserializer() + serializer = InstallSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/api/v1/members.py b/code/daisy/daisy/api/v1/members.py new file mode 100755 index 00000000..ef038e09 --- /dev/null +++ b/code/daisy/daisy/api/v1/members.py @@ -0,0 +1,278 @@ +# Copyright 2012 OpenStack Foundation. +# Copyright 2013 NTT corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_log import log as logging +import webob.exc + +from daisy.api import policy +from daisy.api.v1 import controller +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +from daisy import i18n +import daisy.registry.client.v1.api as registry + +LOG = logging.getLogger(__name__) +_ = i18n._ +CONF = cfg.CONF +CONF.import_opt('image_member_quota', 'daisy.common.config') + + +class Controller(controller.BaseController): + + def __init__(self): + self.policy = policy.Enforcer() + + def _enforce(self, req, action): + """Authorize an action against our policies""" + try: + self.policy.enforce(req.context, action, {}) + except exception.Forbidden: + raise webob.exc.HTTPForbidden() + + def _raise_404_if_host_deleted(self, req, host_id): + host = self.get_host_meta_or_404(req, host_id) + if host['deleted']: + msg = _("Host with identifier %s has been deleted.") % host_id + raise webob.exc.HTTPNotFound(msg) + + def _raise_404_if_project_deleted(self, req, cluster_id): + project = self.get_cluster_meta_or_404(req, cluster_id) + if project['deleted']: + msg = _("Cluster with identifier %s has been deleted.") % cluster_id + raise webob.exc.HTTPNotFound(msg) + + # def get_cluster_hosts(self, req, cluster_id, host_id=None): + # """ + # Return a list of dictionaries indicating the members of the + # image, i.e., those tenants the image is shared with. +# + # :param req: the Request object coming from the wsgi layer + # :param image_id: The opaque image identifier + # :retval The response body is a mapping of the following form:: + + # {'members': [ + # {'host_id': , ...}, ... + # ]} + # """ + # self._enforce(req, 'get_cluster_hosts') + # self._raise_404_if_project_deleted(req, cluster_id) +# + # try: + # members = registry.get_cluster_hosts(req.context, cluster_id, host_id) + # except exception.NotFound: + # msg = _("Project with identifier %s not found") % cluster_id + # LOG.warn(msg) + # raise webob.exc.HTTPNotFound(msg) + # except exception.Forbidden: + # msg = _("Unauthorized project access") + # LOG.warn(msg) + # raise webob.exc.HTTPForbidden(msg) + # return dict(members=members) + + @utils.mutating + def delete(self, req, image_id, id): + """ + Removes a membership from the image. + """ + self._check_can_access_image_members(req.context) + self._enforce(req, 'delete_member') + self._raise_404_if_image_deleted(req, image_id) + + try: + registry.delete_member(req.context, image_id, id) + self._update_store_acls(req, image_id) + except exception.NotFound as e: + LOG.debug(utils.exception_to_str(e)) + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Forbidden as e: + LOG.debug(utils.exception_to_str(e)) + raise webob.exc.HTTPNotFound(explanation=e.msg) + + return webob.exc.HTTPNoContent() + + @utils.mutating + def add_cluster_host(self, req, cluster_id, host_id, body=None): + """ + Adds a host with host_id to project with cluster_id. + """ + self._enforce(req, 'add_cluster_host') + self._raise_404_if_project_deleted(req, cluster_id) + self._raise_404_if_host_deleted(req, host_id) + + try: + registry.add_cluster_host(req.context, cluster_id, host_id) + except exception.Invalid as e: + LOG.debug(utils.exception_to_str(e)) + raise webob.exc.HTTPBadRequest(explanation=e.msg) + except exception.NotFound as e: + LOG.debug(utils.exception_to_str(e)) + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Forbidden as e: + LOG.debug(utils.exception_to_str(e)) + raise webob.exc.HTTPNotFound(explanation=e.msg) + + return webob.exc.HTTPNoContent() + + @utils.mutating + def delete_cluster_host(self, req, cluster_id, host_id): + """ + Delete a host with host_id from project with cluster_id. + """ + self._enforce(req, 'delete_cluster_host') + self._raise_404_if_project_deleted(req, cluster_id) + self._raise_404_if_host_deleted(req, host_id) + + try: + registry.delete_cluster_host(req.context, cluster_id, host_id) + except exception.NotFound as e: + LOG.debug(utils.exception_to_str(e)) + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Forbidden as e: + LOG.debug(utils.exception_to_str(e)) + raise webob.exc.HTTPNotFound(explanation=e.msg) + + return webob.exc.HTTPNoContent() + + def default(self, req, image_id, id, body=None): + """This will cover the missing 'show' and 'create' actions""" + raise webob.exc.HTTPMethodNotAllowed() + + def _enforce_image_member_quota(self, req, attempted): + if CONF.image_member_quota < 0: + # If value is negative, allow unlimited number of members + return + + maximum = CONF.image_member_quota + if attempted > maximum: + msg = _("The limit has been exceeded on the number of allowed " + "image members for this image. Attempted: %(attempted)s, " + "Maximum: %(maximum)s") % {'attempted': attempted, + 'maximum': maximum} + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, + request=req) + + @utils.mutating + def update(self, req, image_id, id, body=None): + """ + Adds a membership to the image, or updates an existing one. + If a body is present, it is a dict with the following format:: + + {"member": { + "can_share": [True|False] + }} + + If "can_share" is provided, the member's ability to share is + set accordingly. If it is not provided, existing memberships + remain unchanged and new memberships default to False. + """ + self._check_can_access_image_members(req.context) + self._enforce(req, 'modify_member') + self._raise_404_if_image_deleted(req, image_id) + + new_number_of_members = len(registry.get_image_members(req.context, + image_id)) + 1 + self._enforce_image_member_quota(req, new_number_of_members) + + # Figure out can_share + can_share = None + if body and 'member' in body and 'can_share' in body['member']: + can_share = bool(body['member']['can_share']) + try: + registry.add_member(req.context, image_id, id, can_share) + self._update_store_acls(req, image_id) + except exception.Invalid as e: + LOG.debug(utils.exception_to_str(e)) + raise webob.exc.HTTPBadRequest(explanation=e.msg) + except exception.NotFound as e: + LOG.debug(utils.exception_to_str(e)) + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Forbidden as e: + LOG.debug(utils.exception_to_str(e)) + raise webob.exc.HTTPNotFound(explanation=e.msg) + + return webob.exc.HTTPNoContent() + + @utils.mutating + def update_all(self, req, image_id, body): + """ + Replaces the members of the image with those specified in the + body. The body is a dict with the following format:: + + {"memberships": [ + {"member_id": , + ["can_share": [True|False]]}, ... + ]} + """ + self._check_can_access_image_members(req.context) + self._enforce(req, 'modify_member') + self._raise_404_if_image_deleted(req, image_id) + + memberships = body.get('memberships') + if memberships: + new_number_of_members = len(body['memberships']) + self._enforce_image_member_quota(req, new_number_of_members) + + try: + registry.replace_members(req.context, image_id, body) + self._update_store_acls(req, image_id) + except exception.Invalid as e: + LOG.debug(utils.exception_to_str(e)) + raise webob.exc.HTTPBadRequest(explanation=e.msg) + except exception.NotFound as e: + LOG.debug(utils.exception_to_str(e)) + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Forbidden as e: + LOG.debug(utils.exception_to_str(e)) + raise webob.exc.HTTPNotFound(explanation=e.msg) + + return webob.exc.HTTPNoContent() + + def get_host_projects(self, req, host_id): + """ + Retrieves list of image memberships for the given member. + + :param req: the Request object coming from the wsgi layer + :param id: the opaque member identifier + :retval The response body is a mapping of the following form:: + + {'multi_projects': [ + {'cluster_id': , ...}, ... + ]} + """ + try: + members = registry.get_host_projects(req.context, host_id) + except exception.NotFound as e: + LOG.debug(utils.exception_to_str(e)) + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Forbidden as e: + LOG.debug(utils.exception_to_str(e)) + raise webob.exc.HTTPForbidden(explanation=e.msg) + return dict(multi_projects=members) + + def _update_store_acls(self, req, image_id): + image_meta = self.get_image_meta_or_404(req, image_id) + location_uri = image_meta.get('location') + public = image_meta.get('is_public') + self.update_store_acls(req, image_id, location_uri, public) + + +def create_resource(): + """Image members resource factory method""" + deserializer = wsgi.JSONRequestDeserializer() + serializer = wsgi.JSONResponseSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/api/v1/networks.py b/code/daisy/daisy/api/v1/networks.py new file mode 100755 index 00000000..40473473 --- /dev/null +++ b/code/daisy/daisy/api/v1/networks.py @@ -0,0 +1,691 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/hosts endpoint for Daisy v1 API +""" +import copy +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPConflict +from webob.exc import HTTPForbidden +from webob.exc import HTTPNotFound +from webob import Response + +from daisy.api import policy +import daisy.api.v1 +from daisy.api.v1 import controller +from daisy.api.v1 import filters +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import utils +from daisy.common import wsgi +from daisy import i18n +from daisy import notifier +import daisy.registry.client.v1.api as registry + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS +SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE + +CONF = cfg.CONF +CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') +CONF.import_opt('container_formats', 'daisy.common.config', + group='image_format') +CONF.import_opt('image_property_quota', 'daisy.common.config') + +SUPPORT_NETWORK_TYPE = ('PUBLIC', 'PRIVATE', 'STORAGE', 'MANAGEMENT', 'EXTERNAL', 'DEPLOYMENT', 'VXLAN') +SUPPORT_NETWORK_TEMPLATE_TYPE = ('custom', 'template', 'default') +SUPPORT_ML2_TYPE = ('ovs', 'sriov(direct)', 'sriov(macvtap)', + 'ovs,sriov(direct)', 'ovs,sriov(macvtap)') +SUPPORT_NETWORK_CAPABILITY = ('high', 'low') + + +class Controller(controller.BaseController): + """ + WSGI controller for networks resource in Daisy v1 API + + The networks resource API is a RESTful web service for host data. The API + is as follows:: + + GET /networks -- Returns a set of brief metadata about networks + GET /networks/detail -- Returns a set of detailed metadata about + networks + HEAD /networks/ -- Return metadata about an host with id + GET /networks/ -- Return host data for host with id + POST /networks -- Store host data and return metadata about the + newly-stored host + PUT /networks/ -- Update host metadata and/or upload host + data for a previously-reserved host + DELETE /networks/ -- Delete the host with id + """ + + def __init__(self): + self.notifier = notifier.Notifier() + registry.configure_registry_client() + self.policy = policy.Enforcer() + if property_utils.is_property_protection_enabled(): + self.prop_enforcer = property_utils.PropertyRules(self.policy) + else: + self.prop_enforcer = None + + def _enforce(self, req, action, target=None): + """Authorize an action against our policies""" + if target is None: + target = {} + try: + self.policy.enforce(req.context, action, target) + except exception.Forbidden: + raise HTTPForbidden() + + def _raise_404_if_network_deleted(self, req, network_id): + network = self.get_network_meta_or_404(req, network_id) + if network['deleted']: + msg = _("Network with identifier %s has been deleted.") % network_id + raise HTTPNotFound(msg) + def _raise_404_if_cluster_delete(self, req, cluster_id): + cluster_id = self.get_cluster_meta_or_404(req, cluster_id) + if cluster_id['deleted']: + msg = _("cluster_id with identifier %s has been deleted.") % cluster_id + raise HTTPNotFound(msg) + + def _get_network_name_by_cluster_id(self, context, cluster_id): + networks = registry.get_networks_detail(context, cluster_id) + network_name_list = [] + for network in networks: + network_name_list.append(network['name']) + return network_name_list + + + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + query_filters = {} + for param in req.params: + if param in SUPPORTED_FILTERS: + query_filters[param] = req.params.get(param) + if not filters.validate(param, query_filters[param]): + raise HTTPBadRequest(_('Bad value passed to filter ' + '%(filter)s got %(val)s') + % {'filter': param, + 'val': query_filters[param]}) + return query_filters + + def _get_query_params(self, req): + """ + Extracts necessary query params from request. + + :param req: the WSGI Request object + :retval dict of parameters that can be used by registry client + """ + params = {'filters': self._get_filters(req)} + + for PARAM in SUPPORTED_PARAMS: + if PARAM in req.params: + params[PARAM] = req.params.get(PARAM) + return params + + def validate_ip_format(self, ip_str): + ''' + valid ip_str format = '10.43.178.9' + invalid ip_str format : '123. 233.42.12', spaces existed in field + '3234.23.453.353', out of range + '-2.23.24.234', negative number in field + '1.2.3.4d', letter in field + '10.43.1789', invalid format + ''' + valid_fromat = False + if ip_str.count('.') == 3 and \ + all(num.isdigit() and 0<=int(num)<256 for num in ip_str.rstrip().split('.')): + valid_fromat = True + if valid_fromat == False: + msg = (_("%s invalid ip format!") % ip_str) + LOG.warn(msg) + raise HTTPForbidden(msg) + + def _ip_into_int(self, ip): + """ + Switch ip string to decimalism integer.. + :param ip: ip string + :return: decimalism integer + """ + return reduce(lambda x, y: (x<<8)+y, map(int, ip.split('.'))) + + def _is_in_network_range(self, ip, network): + """ + Check ip is in range + :param ip: Ip will be checked, like:192.168.1.2. + :param network: Ip range,like:192.168.0.0/24. + :return: If ip in range,return True,else return False. + """ + network = network.split('/') + mask = ~(2**(32 - int(network[1])) - 1) + return (self._ip_into_int(ip) & mask) == (self._ip_into_int(network[0]) & mask) + + def _verify_uniqueness_of_network_name(self, req, network_list, network_meta, is_update = False): + """ + Network name is match case and uniqueness in cluster. + :param req: + :param network_list: network plane in cluster + :param network_meta: network plane need be verified + :return: + """ + if not network_list or not network_meta or not network_meta.get('name', None): + msg = _("Input params invalid for verifying uniqueness of network name.") + raise HTTPBadRequest(msg, request=req, content_type="text/plain") + + network_name = network_meta['name'] + for network in network_list['networks']: + if (is_update and + network_name == network['name'] and + network_meta['id'] == network['id']): + return + + # network name don't match case + network_name_list = [network['name'].lower() for network in + network_list['networks'] if network.get('name', None)] + if network_name.lower() in network_name_list: + msg = _("Name of network isn't match case and %s already exits in the cluster." % network_name) + raise HTTPConflict(msg, request=req, content_type="text/plain") + + if not is_update: + # Input networks type can't be same with db record which is all ready exit, + # except PRIVATE network. + network_type_exist_list = \ + [network['network_type'] for network in network_list['networks'] + if network.get('network_type', None) and network['network_type'] != "PRIVATE" + and network['network_type'] != "STORAGE"] + if network_meta.get("network_type", None) in network_type_exist_list: + msg = _("The %s network plane %s must be only, except PRIVATE network." % + (network_meta['network_type'], network_name)) + raise HTTPConflict(msg, request=req, content_type="text/plain") + + def _valid_vlan_range(self, req, network_meta): + if ((network_meta.has_key('vlan_start') and not network_meta.has_key('vlan_end')) or + (not network_meta.has_key('vlan_start') and network_meta.has_key('vlan_end'))): + raise HTTPBadRequest(explanation="vlan-start and vlan-end must be appeared at the same time", request=req) + if network_meta.has_key('vlan_start'): + if not (int(network_meta['vlan_start']) >= 1 and + int(network_meta['vlan_start']) <= 4094): + raise HTTPBadRequest(explanation="vlan-start must be a integer in '1~4096'", request=req) + if network_meta.has_key('vlan_end'): + if not (int(network_meta['vlan_end']) >= 1 and + int(network_meta['vlan_end']) <= 4094): + raise HTTPBadRequest(explanation="vlan-end must be a integer in '1~4096'", request=req) + if int(network_meta['vlan_start']) > int(network_meta['vlan_end']): + raise HTTPBadRequest(explanation="vlan-start must be less than vlan-end", request=req) + + @utils.mutating + def add_network(self, req, network_meta): + """ + Adds a new networks to Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about network + + :raises HTTPBadRequest if x-host-name is missing + """ + self._enforce(req, 'add_network') + cluster_id = network_meta.get('cluster_id',None) + if cluster_id: + self._raise_404_if_cluster_delete(req, cluster_id) + network_list = self.detail(req, cluster_id) + self._verify_uniqueness_of_network_name(req, network_list, network_meta) + # else: + # if network_meta.get('type',None) != "template": + # raise HTTPBadRequest(explanation="cluster id must be given", request=req) + network_name=network_meta.get('name',None) + network_name_split = network_name.split('_') + for network_name_info in network_name_split : + if not network_name_info.isalnum(): + raise ValueError('network name must be numbers or letters or underscores !') + if not network_meta.has_key('network_type'): + raise HTTPBadRequest(explanation="network-type must be given", request=req) + if network_meta['network_type'] not in SUPPORT_NETWORK_TYPE: + raise HTTPBadRequest(explanation="unsupported network-type", request=req) + + + if (network_meta.has_key('type') and + network_meta['type'] not in SUPPORT_NETWORK_TEMPLATE_TYPE): + raise HTTPBadRequest(explanation="unsupported type", request=req) + + if (network_meta.has_key('capability') and + network_meta['capability'] not in SUPPORT_NETWORK_CAPABILITY): + raise HTTPBadRequest(explanation="unsupported capability type", request=req) + + self._valid_vlan_range(req, network_meta) + + if network_meta.get('ip_ranges', None): + cidr = None + if not network_meta.has_key('cidr'): + msg = (_("When ip range was specified, the CIDR parameter can not be empty.")) + LOG.warn(msg) + raise HTTPForbidden(msg) + else: + cidr = network_meta['cidr'] + cidr_division = cidr.split('/') + if len(cidr_division) != 2 or ( cidr_division[1] \ + and int(cidr_division[1]) > 32 or int(cidr_division[1]) < 0): + msg = (_("Wrong CIDR format.")) + LOG.warn(msg) + raise HTTPForbidden(msg) + self.validate_ip_format(cidr_division[0]) + + ip_ranges = eval(network_meta['ip_ranges']) + last_ip_range_end = 0 + int_ip_ranges_list = list() + sorted_int_ip_ranges_list = list() + for ip_pair in ip_ranges: + if ['start', 'end'] != ip_pair.keys(): + msg = (_("IP range was not start with 'start:' or end with 'end:'.")) + LOG.warn(msg) + raise HTTPForbidden(msg) + ip_start = ip_pair['start'] + ip_end = ip_pair['end'] + self.validate_ip_format(ip_start) #check ip format + self.validate_ip_format(ip_end) + + if not self._is_in_network_range(ip_start, cidr): + msg = (_("IP address %s was not in the range of CIDR %s." % (ip_start, cidr))) + LOG.warn(msg) + raise HTTPForbidden(msg) + + if not self._is_in_network_range(ip_end, cidr): + msg = (_("IP address %s was not in the range of CIDR %s." % (ip_end, cidr))) + LOG.warn(msg) + raise HTTPForbidden(msg) + + #transform ip format to int when the string format is valid + int_ip_start = self._ip_into_int(ip_start) + int_ip_end = self._ip_into_int(ip_end) + + if int_ip_start > int_ip_end: + msg = (_("Wrong ip range format.")) + LOG.warn(msg) + raise HTTPForbidden(msg) + int_ip_ranges_list.append([int_ip_start, int_ip_end]) + sorted_int_ip_ranges_list = sorted(int_ip_ranges_list, key=lambda x : x[0]) + + for int_ip_range in sorted_int_ip_ranges_list: + if last_ip_range_end and last_ip_range_end >= int_ip_range[0]: + msg = (_("Between ip ranges can not be overlap.")) + LOG.warn(msg) # such as "[10, 15], [12, 16]", last_ip_range_end >= int_ip_range[0], this ip ranges were overlap + raise HTTPForbidden(msg) + else: + last_ip_range_end = int_ip_range[1] + + if network_meta.get('cidr', None) \ + and network_meta.get('vlan_id', None) \ + and cluster_id: + networks = registry.get_networks_detail(req.context, cluster_id) + for network in networks: + if network['cidr'] and network['vlan_id']: + if network_meta['cidr'] == network['cidr'] \ + and network_meta['vlan_id'] != network['vlan_id']: + msg = (_('Networks with the same cidr must ' + 'have the same vlan_id')) + raise HTTPBadRequest(explanation=msg) + if network_meta['vlan_id'] == network['vlan_id'] \ + and network_meta['cidr'] != network['cidr']: + msg = (_('Networks with the same vlan_id must ' + 'have the same cidr')) + raise HTTPBadRequest(explanation=msg) + + if network_meta.get('gateway', None) and network_meta.get('cidr', None): + gateway = network_meta['gateway'] + cidr = network_meta['cidr'] + + self.validate_ip_format(gateway) + return_flag = self._is_in_network_range(gateway, cidr) + if not return_flag: + msg = (_('The gateway %s was not in the same segment with the cidr %s of management network.' % (gateway, cidr))) + raise HTTPBadRequest(explanation=msg) + + network_meta = registry.add_network_metadata(req.context, network_meta) + return {'network_meta': network_meta} + + @utils.mutating + def delete_network(self, req, network_id): + """ + Deletes a network from Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about host + + :raises HTTPBadRequest if x-host-name is missing + """ + self._enforce(req, 'delete_network') + #self._raise_404_if_cluster_deleted(req, cluster_id) + #self._raise_404_if_network_deleted(req, network_id) + network = self.get_network_meta_or_404(req, network_id) + if network['deleted']: + msg = _("Network with identifier %s has been deleted.") % network_id + raise HTTPNotFound(msg) + if network['type'] != 'custom': + msg = _("Type of network was not custom, can not delete this network.") + raise HTTPForbidden(msg) + try: + registry.delete_network_metadata(req.context, network_id) + except exception.NotFound as e: + msg = (_("Failed to find network to delete: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to delete network: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except exception.InUseByStore as e: + msg = (_("Network %(id)s could not be deleted because it is in use: " + "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) + LOG.warn(msg) + raise HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + else: + #self.notifier.info('host.delete', host) + return Response(body='', status=200) + + @utils.mutating + def get_network(self, req, id): + """ + Returns metadata about an network in the HTTP headers of the + response object + + :param req: The WSGI/Webob Request object + :param id: The opaque host identifier + + :raises HTTPNotFound if host metadata is not available to user + """ + self._enforce(req, 'get_network') + network_meta = self.get_network_meta_or_404(req, id) + return {'network_meta': network_meta} + + def get_all_network(self, req): + """ + List all network. + :param req: + :return: + """ + self._enforce(req, 'get_all_network') + params = self._get_query_params(req) + try: + networks = registry.get_all_networks(req.context,**params) + except Exception: + raise HTTPBadRequest(explanation="Get all networks failed.", request=req) + return dict(networks=networks) + + def detail(self, req, id): + """ + Returns detailed information for all available hosts + + :param req: The WSGI/Webob Request object + :retval The response body is a mapping of the following form:: + + {'networks': [ + {'id': , + 'name': , + 'description': , + 'created_at': , + 'updated_at': , + 'deleted_at': |,}, ... + ]} + """ + cluster_id = self._raise_404_if_cluster_delete(req, id) + self._enforce(req, 'get_networks') + params = self._get_query_params(req) + try: + networks = registry.get_networks_detail(req.context, id,**params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return dict(networks=networks) + + @utils.mutating + def update_network(self, req, network_id, network_meta): + """ + Updates an existing host with the registry. + + :param request: The WSGI/Webob Request object + :param id: The opaque image identifier + + :retval Returns the updated image information as a mapping + """ + if network_meta.has_key('name'): + network_name=network_meta.get('name',None) + network_name_split = network_name.split('_') + for network_name_info in network_name_split : + if not network_name_info.isalnum(): + raise ValueError('network name must be numbers or letters or underscores !') + self._enforce(req, 'update_network') + #orig_cluster_meta = self.get_cluster_meta_or_404(req, cluster_id) + orig_network_meta = self.get_network_meta_or_404(req, network_id) + # Do not allow any updates on a deleted network. + if orig_network_meta['deleted']: + msg = _("Forbidden to update deleted host.") + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + if (network_meta.has_key('network_type') and + network_meta['network_type'] not in SUPPORT_NETWORK_TYPE): + raise HTTPBadRequest(explanation="unsupported network-type", request=req) + if (network_meta.has_key('type') and + network_meta['type'] not in SUPPORT_NETWORK_TEMPLATE_TYPE): + raise HTTPBadRequest(explanation="unsupported type", request=req) + if (network_meta.has_key('type') and + network_meta['type'] == 'template'): + raise HTTPBadRequest(explanation="network template type is not allowed to update", request=req) + + + + if (network_meta.has_key('capability') and + network_meta['capability'] not in SUPPORT_NETWORK_CAPABILITY): + raise HTTPBadRequest(explanation="unsupported capability type", request=req) + + self._valid_vlan_range(req, network_meta) + + network_name = network_meta.get('name', None) + cluster_id = orig_network_meta['cluster_id'] + if network_name and cluster_id: + network_updated = copy.deepcopy(network_meta) + network_updated['id'] = network_id + network_type = network_meta.get('network_type', None) + network_updated['network_type'] = \ + orig_network_meta['network_type'] if not network_type else network_type + network_list = self.detail(req, cluster_id) + self._verify_uniqueness_of_network_name(req, network_list, network_updated, True) + + cidr = network_meta.get('cidr', orig_network_meta['cidr']) + vlan_id = network_meta.get('vlan_id', orig_network_meta['vlan_id']) + if cidr: + cidr_division = cidr.split('/') + if len(cidr_division) != 2 or ( cidr_division[1] \ + and int(cidr_division[1]) > 32 or int(cidr_division[1]) < 0): + msg = (_("Wrong CIDR format.")) + LOG.warn(msg) + raise HTTPForbidden(msg) + self.validate_ip_format(cidr_division[0]) + + if cidr and vlan_id and cluster_id: + networks = registry.get_networks_detail(req.context, cluster_id) + for network in networks: + if network['cidr'] and network['vlan_id']: + if cidr == network['cidr'] \ + and vlan_id != network['vlan_id'] \ + and network['id'] != network_id: + msg = (_('Networks with the same cidr must have ' + 'the same vlan_id')) + raise HTTPBadRequest(explanation=msg) + if vlan_id == network['vlan_id'] \ + and cidr != network['cidr'] \ + and network['id'] != network_id: + msg = (_('Networks with the same vlan_id must ' + 'have the same cidr')) + raise HTTPBadRequest(explanation=msg) + + if network_meta.get('ip_ranges', None): + if not cidr: + msg = (_("When ip range was specified, the CIDR parameter can not be empty.")) + LOG.warn(msg) + raise HTTPForbidden(msg) + ip_ranges = eval(network_meta['ip_ranges']) + last_ip_range_end = 0 + int_ip_ranges_list = list() + sorted_int_ip_ranges_list = list() + for ip_pair in ip_ranges: + if ['start', 'end'] != ip_pair.keys(): + msg = (_("IP range was not start with 'start:' or end with 'end:'.")) + LOG.warn(msg) + raise HTTPForbidden(msg) + ip_start = ip_pair['start'] + ip_end = ip_pair['end'] + self.validate_ip_format(ip_start) #check ip format + self.validate_ip_format(ip_end) + + if not self._is_in_network_range(ip_start, cidr): + msg = (_("IP address %s was not in the range of CIDR %s." % (ip_start, cidr))) + LOG.warn(msg) + raise HTTPForbidden(msg) + + if not self._is_in_network_range(ip_end, cidr): + msg = (_("IP address %s was not in the range of CIDR %s." % (ip_end, cidr))) + LOG.warn(msg) + raise HTTPForbidden(msg) + + #transform ip format to int when the string format is valid + int_ip_start = self._ip_into_int(ip_start) + int_ip_end = self._ip_into_int(ip_end) + + if int_ip_start > int_ip_end: + msg = (_("Wrong ip range format.")) + LOG.warn(msg) + raise HTTPForbidden(msg) + int_ip_ranges_list.append([int_ip_start, int_ip_end]) + sorted_int_ip_ranges_list = sorted(int_ip_ranges_list, key=lambda x : x[0]) + LOG.warn("sorted_int_ip_ranges_list: "% sorted_int_ip_ranges_list) + #check ip ranges overlap + for int_ip_range in sorted_int_ip_ranges_list: + if last_ip_range_end and last_ip_range_end >= int_ip_range[0]: + msg = (_("Between ip ranges can not be overlap.")) + LOG.warn(msg) # such as "[10, 15], [12, 16]", last_ip_range_end >= int_ip_range[0], this ip ranges were overlap + raise HTTPForbidden(msg) + else: + last_ip_range_end = int_ip_range[1] + + if network_meta.get('gateway', orig_network_meta['gateway']) and network_meta.get('cidr', orig_network_meta['cidr']): + gateway = network_meta.get('gateway', orig_network_meta['gateway']) + cidr = network_meta.get('cidr', orig_network_meta['cidr']) + self.validate_ip_format(gateway) + return_flag = self._is_in_network_range(gateway, cidr) + if not return_flag: + msg = (_('The gateway %s was not in the same segment with the cidr %s of management network.' % (gateway, cidr))) + raise HTTPBadRequest(explanation=msg) + + try: + network_meta = registry.update_network_metadata(req.context, + network_id, + network_meta) + except exception.Invalid as e: + msg = (_("Failed to update network metadata. Got error: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + except exception.NotFound as e: + msg = (_("Failed to find network to update: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to update network: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except (exception.Conflict, exception.Duplicate) as e: + LOG.warn(utils.exception_to_str(e)) + raise HTTPConflict(body=_('Network operation conflicts'), + request=req, + content_type='text/plain') + else: + self.notifier.info('network.update', network_meta) + + return {'network_meta': network_meta} + +class HostDeserializer(wsgi.JSONRequestDeserializer): + """Handles deserialization of specific controller method requests.""" + + def _deserialize(self, request): + result = {} + result["network_meta"] = utils.get_network_meta(request) + return result + + def add_network(self, request): + return self._deserialize(request) + + def update_network(self, request): + return self._deserialize(request) + +class HostSerializer(wsgi.JSONResponseSerializer): + """Handles serialization of specific controller method responses.""" + + def __init__(self): + self.notifier = notifier.Notifier() + + def add_network(self, response, result): + network_meta = result['network_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(network=network_meta)) + return response + + def delete_network(self, response, result): + network_meta = result['network_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(network=network_meta)) + return response + + def get_network(self, response, result): + network_meta = result['network_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(network=network_meta)) + return response + +def create_resource(): + """Hosts resource factory method""" + deserializer = HostDeserializer() + serializer = HostSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) + diff --git a/code/daisy/daisy/api/v1/roles.py b/code/daisy/daisy/api/v1/roles.py new file mode 100755 index 00000000..1c47c2bf --- /dev/null +++ b/code/daisy/daisy/api/v1/roles.py @@ -0,0 +1,782 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/roles endpoint for Daisy v1 API +""" + +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPConflict +from webob.exc import HTTPForbidden +from webob.exc import HTTPNotFound +from webob import Response + +from daisy.api import policy +import daisy.api.v1 +from daisy.api.v1 import controller +from daisy.api.v1 import filters +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import utils +from daisy.common import wsgi +from daisy import i18n +from daisy import notifier +import daisy.registry.client.v1.api as registry + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS +SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE +SUPPORTED_DEPLOYMENT_BACKENDS = ('tecs', 'zenic', 'proton') +SUPPORTED_ROLE = ('CONTROLLER_LB', 'CONTROLLER_HA', 'COMPUTER', 'ZENIC_CTL', 'ZENIC_NFM', + 'ZENIC_MDB', 'PROTON', 'CHILD_CELL_1_COMPUTER', 'CONTROLLER_CHILD_CELL_1') +SUPPORT_DISK_LOCATION = ('local', 'share') + +CONF = cfg.CONF +CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') +CONF.import_opt('container_formats', 'daisy.common.config', + group='image_format') +CONF.import_opt('image_property_quota', 'daisy.common.config') + +class Controller(controller.BaseController): + """ + WSGI controller for roles resource in Daisy v1 API + + The roles resource API is a RESTful web role for role data. The API + is as follows:: + + GET /roles -- Returns a set of brief metadata about roles + GET /roles/detail -- Returns a set of detailed metadata about + roles + HEAD /roles/ -- Return metadata about an role with id + GET /roles/ -- Return role data for role with id + POST /roles -- Store role data and return metadata about the + newly-stored role + PUT /roles/ -- Update role metadata and/or upload role + data for a previously-reserved role + DELETE /roles/ -- Delete the role with id + """ + + def __init__(self): + self.notifier = notifier.Notifier() + registry.configure_registry_client() + self.policy = policy.Enforcer() + if property_utils.is_property_protection_enabled(): + self.prop_enforcer = property_utils.PropertyRules(self.policy) + else: + self.prop_enforcer = None + + def _enforce(self, req, action, target=None): + """Authorize an action against our policies""" + if target is None: + target = {} + try: + self.policy.enforce(req.context, action, target) + except exception.Forbidden: + raise HTTPForbidden() + + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + query_filters = {} + for param in req.params: + if param in SUPPORTED_FILTERS: + query_filters[param] = req.params.get(param) + if not filters.validate(param, query_filters[param]): + raise HTTPBadRequest(_('Bad value passed to filter ' + '%(filter)s got %(val)s') + % {'filter': param, + 'val': query_filters[param]}) + return query_filters + + def _get_query_params(self, req): + """ + Extracts necessary query params from request. + + :param req: the WSGI Request object + :retval dict of parameters that can be used by registry client + """ + params = {'filters': self._get_filters(req)} + + for PARAM in SUPPORTED_PARAMS: + if PARAM in req.params: + params[PARAM] = req.params.get(PARAM) + return params + + def _raise_404_if_host_deleted(self, req, host_id): + host = self.get_host_meta_or_404(req, host_id) + if host['deleted']: + msg = _("Node with identifier %s has been deleted.") % host_id + raise HTTPNotFound(msg) + def _raise_404_if_service_deleted(self, req, service_id): + service = self.get_service_meta_or_404(req, service_id) + if service['deleted']: + msg = _("Service with identifier %s has been deleted.") % service_id + raise HTTPNotFound(msg) + def _raise_404_if_config_set_deleted(self, req, config_set_id): + config_set = self.get_config_set_meta_or_404(req, config_set_id) + if config_set['deleted']: + msg = _("Config_Set with identifier %s has been deleted.") % config_set_id + raise HTTPNotFound(msg) + def _raise_404_if_cluster_deleted(self, req, cluster_id): + cluster = self.get_cluster_meta_or_404(req, cluster_id) + if cluster['deleted']: + msg = _("cluster with identifier %s has been deleted.") % cluster_id + raise HTTPNotFound(msg) + + def _get_service_name_list(self, req, role_service_id_list): + service_name_list = [] + for service_id in role_service_id_list: + service_meta = registry.get_service_metadata(req.context, service_id) + service_name_list.append(service_meta['name']) + return service_name_list + + def _get_host_disk_except_os_disk_by_info(self, host_info): + ''' + type(host_info): + host_disk_except_os_disk_lists: disk_size , type = int + ''' + #import pdb;pdb.set_trace() + host_disk_except_os_disk_lists = 0 + os_disk_m = host_info.get('root_lv_size', 51200) + swap_size_m = host_info.get('swap_lv_size', None) + if swap_size_m: + swap_size_m = (swap_size_m / 4)*4 + else: + swap_size_m = 0 + boot_partition_m = 400 + redundant_partiton_m = 600 + if not os_disk_m: + os_disk_m = 51200 + #host_disk = 1024 + host_disks = host_info.get('disks', None) + host_disk_size_m = 0 + if host_disks: + for key, value in host_disks.items(): + disk_size_b = str(value.get('size', None)) + disk_size_b_str = disk_size_b.strip().split()[0] + if disk_size_b_str: + disk_size_b_int = int(disk_size_b_str) + disk_size_m = disk_size_b_int//(1024*1024) + host_disk_size_m = host_disk_size_m + disk_size_m + host_disk_except_os_disk_lists = host_disk_size_m - os_disk_m - swap_size_m - boot_partition_m - redundant_partiton_m + LOG.warn('----start----host_disk_except_os_disk_lists: %s -----end--' % host_disk_except_os_disk_lists) + return host_disk_except_os_disk_lists + + def _check_host_validity(self, **paras): + ''' + paras['db_lv_size'], paras['glance_lv_size'] , paras['disk_size'] + ''' + disk_size = paras.get('disk_size', None) + LOG.warn('--------disk_size:----- %s'% disk_size) + if disk_size: + disk_size_m = int(disk_size) + else: + disk_size_m = 0 + if disk_size_m == 0: #Host hard disk size was 0, think that the host does not need to install the system + return #Don't need to ckeck the validity of hard disk size + + db_lv_size_m = paras.get('db_lv_size', 300) + if db_lv_size_m: + db_lv_size_m = int(db_lv_size_m) + else: + db_lv_size_m = 0 + + glance_lv_size_m = paras.get('glance_lv_size', 17100) + if glance_lv_size_m: + glance_lv_size_m = int(glance_lv_size_m) + else: + glance_lv_size_m = 0 + + nova_lv_size_m = paras.get('nova_lv_size', 0) + if nova_lv_size_m: + nova_lv_size_m = int(nova_lv_size_m) + else: + nova_lv_size_m = 0 + if nova_lv_size_m == -1: + nova_lv_size_m = 0 + glance_lv_size_m = (glance_lv_size_m/4)*4 + db_lv_size_m = (db_lv_size_m/4)*4 + nova_lv_size_m = (nova_lv_size_m/4)*4 + if glance_lv_size_m + db_lv_size_m + nova_lv_size_m > disk_size_m: + msg = _("There isn't enough disk space to specify database or glance or nova disk, please specify database or glance or nova disk size again") + LOG.debug(msg) + raise HTTPForbidden(msg) + + def _check_nodes_exist(self, req, nodes): + for role_host_id in nodes: + self._raise_404_if_host_deleted(req, role_host_id) + + def _check_services_exist(self, req, services): + for role_service_id in services: + self._raise_404_if_service_deleted(req, role_service_id) + + def _check_config_set_id_exist(self, req, config_set_id): + self._raise_404_if_config_set_deleted(req, config_set_id) + + def _check_glance_lv_value(self, req, glance_lv_value, role_name, service_name_list): + if int(glance_lv_value) < 0 and int(glance_lv_value) != -1: + msg = _("glance_lv_size can't be negative except -1.") + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + if not service_name_list or 'glance' not in service_name_list: + msg = _("service 'glance' is not in role %s, so can't " + "set the size of glance lv.") % role_name + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + + def _check_db_lv_size(self, req, db_lv_size, service_name_list): + if int(db_lv_size) < 0 and int(db_lv_size) != -1 : + msg = _("The size of database disk can't be negative except -1.") + LOG.debug(msg) + raise HTTPForbidden(msg) + #Only the role with database service can be formulated the size of a database. + if 'mariadb' not in service_name_list and 'mongodb' not in service_name_list: + msg = _('The role without database service is unable ' + 'to specify the size of the database!') + LOG.debug(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + + def _check_nova_lv_size(self, req, nova_lv_size, role_name): + if role_name != "COMPUTER": + msg = _("The role is not COMPUTER, it can't set logic " + "volume disk for nova.") + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + try: + if int(nova_lv_size) < 0 and int(nova_lv_size) != -1: + msg = _("The nova_lv_size must be -1 or [0, N).") + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except: + msg = _("The nova_lv_size must be -1 or [0, N).") + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + + def _check_all_lv_size(self, req, db_lv_size, glance_lv_size, nova_lv_size, + host_id_list, cluster_id, argws): + if db_lv_size or glance_lv_size or nova_lv_size: + for host_id in host_id_list: + host_disk_db_glance_nova_size = self.get_host_disk_db_glance_nova_size(req, host_id, cluster_id) + if host_disk_db_glance_nova_size['db_lv_size'] and db_lv_size and \ + int(db_lv_size) < int(host_disk_db_glance_nova_size['db_lv_size']): + argws['db_lv_size'] = host_disk_db_glance_nova_size['db_lv_size'] + else: + argws['db_lv_size'] = db_lv_size + if host_disk_db_glance_nova_size['glance_lv_size'] and glance_lv_size and \ + int(glance_lv_size) < int(host_disk_db_glance_nova_size['glance_lv_size']): + argws['glance_lv_size'] = host_disk_db_glance_nova_size['glance_lv_size'] + else: + argws['glance_lv_size'] = glance_lv_size + if host_disk_db_glance_nova_size['nova_lv_size'] and nova_lv_size and \ + int(nova_lv_size) < int(host_disk_db_glance_nova_size['nova_lv_size']): + argws['nova_lv_size'] = host_disk_db_glance_nova_size['nova_lv_size'] + else: + argws['nova_lv_size'] = nova_lv_size + argws['disk_size'] = host_disk_db_glance_nova_size['disk_size'] + LOG.warn('--------host(%s) check_host_validity argws:----- %s'% (host_id, argws)) + self._check_host_validity(**argws) + + def _check_deployment_backend(self, req, deployment_backend): + if deployment_backend not in SUPPORTED_DEPLOYMENT_BACKENDS: + msg = "deployment backend '%s' is not supported." % deployment_backend + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + + def _check_role_type_in_update_role(self, req, role_type, orig_role_meta): + if orig_role_meta['type'].lower() != role_type.lower(): + msg = _("Role type can not be updated to other type.") + LOG.debug(msg) + raise HTTPForbidden(msg) + + def _check_cluster_id_in_role_update(self, req, role_cluster, orig_role_meta): + if orig_role_meta['type'].lower() == 'template': + msg = _("The template role does not belong to any cluster.") + LOG.debug(msg) + raise HTTPForbidden(msg) + orig_role_cluster = orig_role_meta['cluster_id'] + if orig_role_cluster != role_cluster: #Can not change the cluster which the role belongs to + msg = _("Can't update the cluster of the role.") + LOG.debug(msg) + raise HTTPForbidden(msg) + else: + self._raise_404_if_cluster_deleted(req, role_cluster) + + def _check_role_name_in_role_update(self, req, role_meta, orig_role_meta): + role_name = role_meta['name'] + cluster_id = role_meta.get('cluster_id', orig_role_meta['cluster_id']) + if cluster_id: + self.check_cluster_role_name_repetition(req, role_name, cluster_id) + else: #role type was template, cluster id was None + self.check_template_role_name_repetition(req, role_name) + + def _check_all_lv_size_of_nodes_with_role_in_role_update(self, req, role_meta, orig_role_meta, + role_host_id_list): + #check host with this role at the same time + cluster_id = role_meta.get('cluster_id', None) + if not cluster_id: #role with cluster + cluster_id = orig_role_meta['cluster_id'] + if not cluster_id: #without cluster id, raise Error + msg = _("The cluster_id parameter can not be None!") + LOG.debug(msg) + raise HTTPForbidden(msg) + argws = dict() + if role_meta.has_key('db_lv_size'): + db_lv_size = role_meta['db_lv_size'] + else: #The db_lv_size has been specified before. + db_lv_size = orig_role_meta.get('db_lv_size') + if role_meta.has_key('glance_lv_size'): + glance_lv_size = role_meta['glance_lv_size'] + else: + glance_lv_size = orig_role_meta.get('glance_lv_size') + if role_meta.has_key('nova_lv_size'): + nova_lv_size = role_meta['nova_lv_size'] + else: + nova_lv_size = orig_role_meta.get('nova_lv_size') + if role_meta.has_key('nodes'): + host_id_list = list(eval(role_meta['nodes'])) + role_host_id_list + else: + host_id_list = role_host_id_list + self._check_all_lv_size(req, db_lv_size, glance_lv_size, + nova_lv_size, host_id_list, cluster_id, argws) + + def _check_ntp_server(self, req, role_name): + if role_name != 'CONTROLLER_HA': + msg = 'The role %s need no ntp_server' % role_name + raise HTTPForbidden(explanation=msg) + + + def _check_role_type_in_role_add(self, req, role_meta): + #role_type == None or not template, cluster id must not be None + role_type = role_meta['type'] + if role_type.lower() != 'template': + role_cluster_id = role_meta.get('cluster_id', None) + if not role_cluster_id: #add role without cluster id parameter, raise error + msg = _("The cluster_id parameter can not be None if role was not a template type.") + LOG.debug(msg) + raise HTTPForbidden(msg) + else: #role_type == template, cluster id is not necessary + if role_meta.has_key('cluster_id'): + msg = _("Tht template role cannot be added to any cluster.") + LOG.debug(msg) + raise HTTPForbidden(msg) + + def _check_all_lv_size_with_role_in_role_add(self, req, role_meta): + cluster_id = role_meta.get('cluster_id', None) + if not cluster_id: #without cluster id, raise Error + msg = _("The cluster_id parameter can not be None!") + LOG.debug(msg) + raise HTTPForbidden(msg) + argws = dict() + db_lv_size = role_meta.get('db_lv_size', 0) + glance_lv_size = role_meta.get('glance_lv_size', 0) + nova_lv_size = role_meta.get('nova_lv_size', 0) + host_id_list = list(eval(role_meta['nodes'])) + self._check_all_lv_size(req, db_lv_size, glance_lv_size, + nova_lv_size, host_id_list, cluster_id, argws) + + def get_host_disk_db_glance_nova_size(self, req, host_id, cluster_id): + ''' + return : + host_disk_db_glance_nova_size['disk_size'] = 1024000 + host_disk_db_glance_nova_size['db_lv_size'] = 1011 + host_disk_db_glance_nova_size['glance_lv_size'] = 1011 + host_disk_db_glance_nova_size['nova_lv_size'] = 1011 + ''' + #import pdb;pdb.set_trace() + host_disk_db_glance_nova_size = dict() + db_lv_size = list() + glance_lv_size = list() + nova_lv_size= list() + disk_size = list() + + host_info = self.get_host_meta_or_404(req, host_id) + if host_info: + if host_info.has_key('deleted') and host_info['deleted']: + msg = _("Node with identifier %s has been deleted.") % host_info['id'] + LOG.debug(msg) + raise HTTPNotFound(msg) + #get host disk infomation + host_disk = self._get_host_disk_except_os_disk_by_info(host_info) + host_disk_db_glance_nova_size['disk_size'] = host_disk + #get role_host db/galnce/nova infomation + cluster_info = self.get_cluster_meta_or_404(req, cluster_id) + if host_info.has_key('cluster'): #host with cluster + if host_info['cluster'] != cluster_info['name']: + #type(host_info['cluster']) = list, type(cluster_info['name']) = str + msg = _("Role and hosts belong to different cluster.") + LOG.debug(msg) + raise HTTPNotFound(msg) + else: + all_roles = registry.get_roles_detail(req.context) + cluster_roles = [role for role in all_roles if role['cluster_id'] == cluster_id] + #roles infomation saved in cluster_roles + if host_info.has_key('role') and host_info['role']: #host with role + for role in cluster_roles: + if role['name'] in host_info['role'] and cluster_roles: + db_lv_size.append(role.get('db_lv_size', None)) + glance_lv_size.append(role.get('glance_lv_size', None)) + nova_lv_size.append(role.get('nova_lv_size', None)) + + if db_lv_size: + host_disk_db_glance_nova_size['db_lv_size'] = max(db_lv_size) + else: #host without cluster + host_disk_db_glance_nova_size['db_lv_size'] = 0 + if glance_lv_size: + host_disk_db_glance_nova_size['glance_lv_size'] = max(glance_lv_size) + else: + host_disk_db_glance_nova_size['glance_lv_size'] = 0 + if nova_lv_size: + host_disk_db_glance_nova_size['nova_lv_size'] = max(nova_lv_size) + else: + host_disk_db_glance_nova_size['nova_lv_size'] = 0 + LOG.warn('--------host(%s)disk_db_glance_nova_size:----- %s'% (host_id, host_disk_db_glance_nova_size)) + return host_disk_db_glance_nova_size + + def check_cluster_role_name_repetition(self, req, role_name, cluster_id): + all_roles = registry.get_roles_detail(req.context) + cluster_roles = [role for role in all_roles if role['cluster_id'] == cluster_id] + cluster_roles_name = [role['name'].lower() for role in cluster_roles] + if role_name.lower() in cluster_roles_name: + msg = _("The role %s has already been in the cluster %s!" % (role_name, cluster_id)) + LOG.debug(msg) + raise HTTPForbidden(msg) + + def check_template_role_name_repetition(self, req, role_name): + all_roles = registry.get_roles_detail(req.context) + template_roles = [role for role in all_roles if role['cluster_id'] == None] + template_roles_name = [role['name'].lower() for role in template_roles] + if role_name.lower() in template_roles_name: + msg = _("The role %s has already been in the the template role." % role_name) + LOG.debug(msg) + raise HTTPForbidden(msg) + + def _check_disk_parameters(self, req, role_meta): + if (role_meta.has_key('disk_location') and + role_meta['disk_location'] not in SUPPORT_DISK_LOCATION): + msg = _("value of disk_location is not supported.") + raise HTTPForbidden(msg) + + def _check_type_role_reasonable(self, req, role_meta): + if role_meta['role_type'] not in SUPPORTED_ROLE: + msg = 'The role type %s is illegal' % role_meta['role_type'] + raise HTTPForbidden(explanation=msg) + + def _check_role_update_parameters(self, req, role_meta, orig_role_meta, + role_service_id_list, role_host_id_list): + role_name = orig_role_meta['name'] + if role_meta.get('type', None): + self._check_role_type_in_update_role(req, role_meta['type'], orig_role_meta) + if role_meta.has_key('ntp_server'): + self._check_ntp_server(req, role_name) + if role_meta.has_key('nodes'): + self._check_nodes_exist(req, list(eval(role_meta['nodes']))) + if role_meta.has_key('services'): + self._check_services_exist(req, list(eval(role_meta['services']))) + role_service_id_list.extend(list(eval(role_meta['services']))) + if role_meta.has_key('config_set_id'): + self._check_config_set_id_exist(req, str(role_meta['config_set_id'])) + if role_meta.has_key('cluster_id'): + self._check_cluster_id_in_role_update(req, str(role_meta['cluster_id']), orig_role_meta) + if role_meta.has_key('name'): + self._check_role_name_in_role_update(req, role_meta, orig_role_meta) + service_name_list = self._get_service_name_list(req, role_service_id_list) + glance_lv_value = role_meta.get('glance_lv_size', orig_role_meta['glance_lv_size']) + if glance_lv_value: + self._check_glance_lv_value(req, glance_lv_value, role_name, service_name_list) + if role_meta.get('db_lv_size', None) and role_meta['db_lv_size']: + self._check_db_lv_size(req, role_meta['db_lv_size'], service_name_list) + if role_meta.get('nova_lv_size', None): + self._check_nova_lv_size(req, role_meta['nova_lv_size'], role_name) + if role_meta.has_key('nodes') or role_host_id_list: + self._check_all_lv_size_of_nodes_with_role_in_role_update(req, role_meta, orig_role_meta, + role_host_id_list) + self._check_disk_parameters(req, role_meta) + if role_meta.has_key('deployment_backend'): + self._check_deployment_backend(req, role_meta['deployment_backend']) + if role_meta.get('role_type', None): + self._check_type_role_reasonable(req, role_meta) + + + def _check_role_add_parameters(self, req, role_meta, role_service_id_list): + role_type = role_meta.get('type', None) + role_name = role_meta.get('name', None) + if role_meta.get('type', None): + self._check_role_type_in_role_add(req, role_meta) + if role_meta.has_key('nodes'): + self._check_nodes_exist(req, list(eval(role_meta['nodes']))) + if role_meta.has_key('services'): + self._check_services_exist(req, list(eval(role_meta['services']))) + role_service_id_list.extend(list(eval(role_meta['services']))) + if role_meta.has_key('config_set_id'): + self._check_config_set_id_exist(req, str(role_meta['config_set_id'])) + if role_meta.has_key('cluster_id'): + orig_cluster = str(role_meta['cluster_id']) + self._raise_404_if_cluster_deleted(req, orig_cluster) + self.check_cluster_role_name_repetition(req, role_name, orig_cluster) + else: + self.check_template_role_name_repetition(req, role_name) + service_name_list = self._get_service_name_list(req, role_service_id_list) + glance_lv_value = role_meta.get('glance_lv_size', None) + if glance_lv_value: + self._check_glance_lv_value(req, glance_lv_value, role_name, service_name_list) + if role_meta.get('db_lv_size', None) and role_meta['db_lv_size']: + self._check_db_lv_size(req, role_meta['db_lv_size'], service_name_list) + if role_meta.get('nova_lv_size', None): + self._check_nova_lv_size(req, role_meta['nova_lv_size'], role_name) + if role_meta.has_key('nodes'): + self._check_all_lv_size_with_role_in_role_add(req, role_meta) + self._check_disk_parameters(req, role_meta) + if role_meta.has_key('deployment_backend'): + self._check_deployment_backend(req, role_meta['deployment_backend']) + else: + role_meta['deployment_backend'] = 'tecs' + if role_meta.get('role_type', None): + self._check_type_role_reasonable(req, role_meta) + + @utils.mutating + def add_role(self, req, role_meta): + """ + Adds a new role to Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about role + + :raises HTTPBadRequest if x-role-name is missing + """ + + self._enforce(req, 'add_role') + role_service_id_list = [] + self._check_role_add_parameters(req, role_meta, role_service_id_list) + role_name = role_meta["name"] + role_description = role_meta["description"] + print role_name + print role_description + + role_meta = registry.add_role_metadata(req.context, role_meta) + + return {'role_meta': role_meta} + + @utils.mutating + def delete_role(self, req, id): + """ + Deletes a role from Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about role + + :raises HTTPBadRequest if x-role-name is missing + """ + self._enforce(req, 'delete_role') + + #role = self.get_role_meta_or_404(req, id) + print "delete_role:%s" % id + try: + registry.delete_role_metadata(req.context, id) + except exception.NotFound as e: + msg = (_("Failed to find role to delete: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to delete role: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except exception.InUseByStore as e: + msg = (_("role %(id)s could not be deleted because it is in use: " + "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) + LOG.warn(msg) + raise HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + else: + #self.notifier.info('role.delete', role) + return Response(body='', status=200) + + @utils.mutating + def get_role(self, req, id): + """ + Returns metadata about an role in the HTTP headers of the + response object + + :param req: The WSGI/Webob Request object + :param id: The opaque role identifier + + :raises HTTPNotFound if role metadata is not available to user + """ + self._enforce(req, 'get_role') + role_meta = self.get_role_meta_or_404(req, id) + return {'role_meta': role_meta} + + def detail(self, req): + """ + Returns detailed information for all available roles + + :param req: The WSGI/Webob Request object + :retval The response body is a mapping of the following form:: + + {'roles': [ + {'id': , + 'name': , + 'description': , + 'created_at': , + 'updated_at': , + 'deleted_at': |,}, ... + ]} + """ + self._enforce(req, 'get_roles') + params = self._get_query_params(req) + filters=params.get('filters',None) + if 'cluster_id' in filters: + cluster_id=filters['cluster_id'] + self._raise_404_if_cluster_deleted(req, cluster_id) + + try: + roles = registry.get_roles_detail(req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return dict(roles=roles) + + @utils.mutating + def update_role(self, req, id, role_meta): + """ + Updates an existing role with the registry. + + :param request: The WSGI/Webob Request object + :param id: The opaque image identifier + + :retval Returns the updated image information as a mapping + """ + orig_role_meta = self.get_role_meta_or_404(req, id) + role_service_list = registry.get_role_services(req.context, id) + role_service_id_list = [ role_service['service_id'] for role_service in role_service_list ] + role_host_info_list = registry.get_role_host_metadata(req.context, id) + role_host_id_list = [role_host['host_id'] for role_host in role_host_info_list] + self._check_role_update_parameters(req, role_meta, orig_role_meta, role_service_id_list, role_host_id_list) + + self._enforce(req, 'modify_image') + #orig_role_meta = self.get_role_meta_or_404(req, id) + + # Do not allow any updates on a deleted image. + # Fix for LP Bug #1060930 + if orig_role_meta['deleted']: + msg = _("Forbidden to update deleted role.") + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + try: + role_meta = registry.update_role_metadata(req.context, + id, + role_meta) + + except exception.Invalid as e: + msg = (_("Failed to update role metadata. Got error: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + except exception.NotFound as e: + msg = (_("Failed to find role to update: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to update role: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except (exception.Conflict, exception.Duplicate) as e: + LOG.warn(utils.exception_to_str(e)) + raise HTTPConflict(body=_('Host operation conflicts'), + request=req, + content_type='text/plain') + else: + self.notifier.info('role.update', role_meta) + + return {'role_meta': role_meta} + +class RoleDeserializer(wsgi.JSONRequestDeserializer): + """Handles deserialization of specific controller method requests.""" + + def _deserialize(self, request): + result = {} + result["role_meta"] = utils.get_role_meta(request) + return result + + def add_role(self, request): + return self._deserialize(request) + + def update_role(self, request): + return self._deserialize(request) + +class RoleSerializer(wsgi.JSONResponseSerializer): + """Handles serialization of specific controller method responses.""" + + def __init__(self): + self.notifier = notifier.Notifier() + + def add_role(self, response, result): + role_meta = result['role_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(role=role_meta)) + return response + + def delete_role(self, response, result): + role_meta = result['role_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(role=role_meta)) + return response + def get_role(self, response, result): + role_meta = result['role_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(role=role_meta)) + return response + +def create_resource(): + """Roles resource factory method""" + deserializer = RoleDeserializer() + serializer = RoleSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/api/v1/router.py b/code/daisy/daisy/api/v1/router.py new file mode 100755 index 00000000..bfcbbdce --- /dev/null +++ b/code/daisy/daisy/api/v1/router.py @@ -0,0 +1,574 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +#from daisy.api.v1 import images +from daisy.api.v1 import hosts +from daisy.api.v1 import clusters +from daisy.api.v1 import template +from daisy.api.v1 import components +from daisy.api.v1 import services +from daisy.api.v1 import roles +from daisy.api.v1 import members +from daisy.api.v1 import config_files +from daisy.api.v1 import config_sets +from daisy.api.v1 import configs +from daisy.api.v1 import networks +from daisy.api.v1 import install +from daisy.api.v1 import disk_array +from daisy.api.v1 import host_template +from daisy.common import wsgi + +class API(wsgi.Router): + + """WSGI router for Glance v1 API requests.""" + + def __init__(self, mapper): + reject_method_resource = wsgi.Resource(wsgi.RejectMethodController()) + + '''images_resource = images.create_resource() + + mapper.connect("/", + controller=images_resource, + action="index") + mapper.connect("/images", + controller=images_resource, + action='index', + conditions={'method': ['GET']}) + mapper.connect("/images", + controller=images_resource, + action='create', + conditions={'method': ['POST']}) + mapper.connect("/images", + controller=reject_method_resource, + action='reject', + allowed_methods='GET, POST', + conditions={'method': ['PUT', 'DELETE', 'HEAD', + 'PATCH']}) + mapper.connect("/images/detail", + controller=images_resource, + action='detail', + conditions={'method': ['GET', 'HEAD']}) + mapper.connect("/images/detail", + controller=reject_method_resource, + action='reject', + allowed_methods='GET, HEAD', + conditions={'method': ['POST', 'PUT', 'DELETE', + 'PATCH']}) + mapper.connect("/images/{id}", + controller=images_resource, + action="meta", + conditions=dict(method=["HEAD"])) + mapper.connect("/images/{id}", + controller=images_resource, + action="show", + conditions=dict(method=["GET"])) + mapper.connect("/images/{id}", + controller=images_resource, + action="update", + conditions=dict(method=["PUT"])) + mapper.connect("/images/{id}", + controller=images_resource, + action="delete", + conditions=dict(method=["DELETE"])) + mapper.connect("/images/{id}", + controller=reject_method_resource, + action='reject', + allowed_methods='GET, HEAD, PUT, DELETE', + conditions={'method': ['POST', 'PATCH']}) + + members_resource = members.create_resource() + + mapper.connect("/images/{image_id}/members", + controller=members_resource, + action="index", + conditions={'method': ['GET']}) + mapper.connect("/images/{image_id}/members", + controller=members_resource, + action="update_all", + conditions=dict(method=["PUT"])) + mapper.connect("/images/{image_id}/members", + controller=reject_method_resource, + action='reject', + allowed_methods='GET, PUT', + conditions={'method': ['POST', 'DELETE', 'HEAD', + 'PATCH']}) + mapper.connect("/images/{image_id}/members/{id}", + controller=members_resource, + action="show", + conditions={'method': ['GET']}) + mapper.connect("/images/{image_id}/members/{id}", + controller=members_resource, + action="update", + conditions={'method': ['PUT']}) + mapper.connect("/images/{image_id}/members/{id}", + controller=members_resource, + action="delete", + conditions={'method': ['DELETE']}) + mapper.connect("/images/{image_id}/members/{id}", + controller=reject_method_resource, + action='reject', + allowed_methods='GET, PUT, DELETE', + conditions={'method': ['POST', 'HEAD', 'PATCH']}) + mapper.connect("/shared-images/{id}", + controller=members_resource, + action="index_shared_images")''' + + + hosts_resource = hosts.create_resource() + + mapper.connect("/nodes", + controller=hosts_resource, + action='add_host', + conditions={'method': ['POST']}) + mapper.connect("/nodes/{id}", + controller=hosts_resource, + action='delete_host', + conditions={'method': ['DELETE']}) + mapper.connect("/nodes/{id}", + controller=hosts_resource, + action='update_host', + conditions={'method': ['PUT']}) + mapper.connect("/nodes", + controller=hosts_resource, + action='detail', + conditions={'method': ['GET']}) + + mapper.connect("/nodes/{id}", + controller=hosts_resource, + action='get_host', + conditions={'method': ['GET']}) + mapper.connect("/discover_host/", + controller=hosts_resource, + action='discover_host', + conditions={'method': ['POST']}) + + mapper.connect("/discover/nodes", + controller=hosts_resource, + action='add_discover_host', + conditions={'method': ['POST']}) + + mapper.connect("/discover/nodes/{id}", + controller=hosts_resource, + action='delete_discover_host', + conditions={'method': ['DELETE']}) + + mapper.connect("/discover/nodes", + controller=hosts_resource, + action='detail_discover_host', + conditions={'method': ['GET']}) + + mapper.connect("/discover/nodes/{id}", + controller=hosts_resource, + action='update_discover_host', + conditions={'method': ['PUT']}) + + mapper.connect("/discover/nodes/{discover_host_id}", + controller=hosts_resource, + action='get_discover_host_detail', + conditions={'method': ['GET']}) + + clusters_resource = clusters.create_resource() + + mapper.connect("/clusters", + controller=clusters_resource, + action='add_cluster', + conditions={'method': ['POST']}) + mapper.connect("/clusters/{id}", + controller=clusters_resource, + action='delete_cluster', + conditions={'method': ['DELETE']}) + mapper.connect("/clusters/{id}", + controller=clusters_resource, + action='update_cluster', + conditions={'method': ['PUT']}) + + mapper.connect("/clusters", + controller=clusters_resource, + action='detail', + conditions={'method': ['GET']}) + + mapper.connect("/clusters/{id}", + controller=clusters_resource, + action='get_cluster', + conditions={'method': ['GET']}) + + + mapper.connect("/clusters/{id}", + controller=clusters_resource, + action='update_cluster', + conditions={'method': ['PUT']}) + + template_resource = template.create_resource() + mapper.connect("/template", + controller=template_resource, + action='add_template', + conditions={'method': ['POST']}) + + mapper.connect("/template/{template_id}", + controller=template_resource, + action='update_template', + conditions={'method': ['PUT']}) + + + mapper.connect("/template/{template_id}", + controller=template_resource, + action='delete_template', + conditions={'method': ['DELETE']}) + + mapper.connect("/template/lists", + controller=template_resource, + action='get_template_lists', + conditions={'method': ['GET']}) + + mapper.connect("/template/{template_id}", + controller=template_resource, + action='get_template_detail', + conditions={'method': ['GET']}) + + mapper.connect("/export_db_to_json", + controller=template_resource, + action='export_db_to_json', + conditions={'method': ['POST']}) + + mapper.connect("/import_json_to_template", + controller=template_resource, + action='import_json_to_template', + conditions={'method': ['POST']}) + + mapper.connect("/import_template_to_db", + controller=template_resource, + action='import_template_to_db', + conditions={'method': ['POST']}) + + + host_template_resource = host_template.create_resource() + mapper.connect("/host_template", + controller=host_template_resource, + action='add_host_template', + conditions={'method': ['POST']}) + mapper.connect("/host_template/{template_id}", + controller=host_template_resource, + action='update_host_template', + conditions={'method': ['PUT']}) + mapper.connect("/host_template", + controller=host_template_resource, + action='delete_host_template', + conditions={'method': ['PUT']}) + mapper.connect("/host_template/lists", + controller=host_template_resource, + action='get_host_template_lists', + conditions={'method': ['GET']}) + mapper.connect("/host_template/{template_id}", + controller=host_template_resource, + action='get_host_template_detail', + conditions={'method': ['GET']}) + mapper.connect("/host_to_template", + controller=host_template_resource, + action='host_to_template', + conditions={'method': ['POST']}) + mapper.connect("/template_to_host", + controller=host_template_resource, + action='template_to_host', + conditions={'method': ['PUT']}) + + components_resource = components.create_resource() + mapper.connect("/components", + controller=components_resource, + action='add_component', + conditions={'method': ['POST']}) + mapper.connect("/components/{id}", + controller=components_resource, + action='delete_component', + conditions={'method': ['DELETE']}) + mapper.connect("/components/detail", + controller=components_resource, + action='detail', + conditions={'method': ['GET']}) + mapper.connect("/components/{id}", + controller=components_resource, + action='get_component', + conditions={'method': ['GET']}) + mapper.connect("/components/{id}", + controller=components_resource, + action='update_component', + conditions={'method': ['PUT']}) + + services_resource = services.create_resource() + mapper.connect("/services", + controller=services_resource, + action='add_service', + conditions={'method': ['POST']}) + mapper.connect("/services/{id}", + controller=services_resource, + action='delete_service', + conditions={'method': ['DELETE']}) + mapper.connect("/services/detail", + controller=services_resource, + action='detail', + conditions={'method': ['GET']}) + mapper.connect("/services/{id}", + controller=services_resource, + action='get_service', + conditions={'method': ['GET']}) + mapper.connect("/services/{id}", + controller=services_resource, + action='update_service', + conditions={'method': ['PUT']}) + + roles_resource = roles.create_resource() + mapper.connect("/roles", + controller=roles_resource, + action='add_role', + conditions={'method': ['POST']}) + mapper.connect("/roles/{id}", + controller=roles_resource, + action='delete_role', + conditions={'method': ['DELETE']}) + mapper.connect("/roles/detail", + controller=roles_resource, + action='detail', + conditions={'method': ['GET']}) + mapper.connect("/roles/{id}", + controller=roles_resource, + action='get_role', + conditions={'method': ['GET']}) + mapper.connect("/roles/{id}", + controller=roles_resource, + action='update_role', + conditions={'method': ['PUT']}) + + members_resource = members.create_resource() + mapper.connect("/clusters/{cluster_id}/nodes/{host_id}", + controller=members_resource, + action="add_cluster_host", + conditions={'method': ['PUT']}) + mapper.connect("/clusters/{cluster_id}/nodes/{host_id}", + controller=members_resource, + action="delete_cluster_host", + conditions={'method': ['DELETE']}) + # mapper.connect("/clusters/{cluster_id}/nodes/{host_id}", + # controller=members_resource, + # action="get_cluster_hosts", + # conditions={'method': ['GET']}) + # mapper.connect("/clusters/{cluster_id}/nodes", + # controller=members_resource, + # action="get_cluster_hosts", + # conditions={'method': ['GET']}) + # mapper.connect("/multi_clusters/nodes/{host_id}", + # controller=members_resource, + # action="get_host_clusters", + # conditions={'method': ['GET']}) + + config_files_resource = config_files.create_resource() + + mapper.connect("/config_files", + controller=config_files_resource, + action="add_config_file", + conditions={'method': ['POST']}) + + mapper.connect("/config_files/{id}", + controller=config_files_resource, + action="delete_config_file", + conditions={'method': ['DELETE']}) + + mapper.connect("/config_files/{id}", + controller=config_files_resource, + action="update_config_file", + conditions={'method': ['PUT']}) + + mapper.connect("/config_files/detail", + controller=config_files_resource, + action="detail", + conditions={'method': ['GET']}) + + mapper.connect("/config_files/{id}", + controller=config_files_resource, + action="get_config_file", + conditions=dict(method=["GET"])) + config_sets_resource = config_sets.create_resource() + + mapper.connect("/config_sets", + controller=config_sets_resource, + action="add_config_set", + conditions={'method': ['POST']}) + + mapper.connect("/config_sets/{id}", + controller=config_sets_resource, + action="delete_config_set", + conditions={'method': ['DELETE']}) + + mapper.connect("/config_sets/{id}", + controller=config_sets_resource, + action="update_config_set", + conditions={'method': ['PUT']}) + + mapper.connect("/config_sets/detail", + controller=config_sets_resource, + action="detail", + conditions={'method': ['GET']}) + + mapper.connect("/config_sets/{id}", + controller=config_sets_resource, + action="get_config_set", + conditions=dict(method=["GET"])) + mapper.connect("/cluster_config_set_update", + controller=config_sets_resource, + action="cluster_config_set_update", + conditions={'method': ['POST']}) + + mapper.connect("/cluster_config_set_progress", + controller=config_sets_resource, + action="cluster_config_set_progress", + conditions={'method': ['POST']}) + + configs_resource = configs.create_resource() + + mapper.connect("/configs", + controller=configs_resource, + action="add_config", + conditions={'method': ['POST']}) + + mapper.connect("/configs_delete", + controller=configs_resource, + action="delete_config", + conditions={'method': ['DELETE']}) + + mapper.connect("/configs/detail", + controller=configs_resource, + action="detail", + conditions={'method': ['GET']}) + + mapper.connect("/configs/{id}", + controller=configs_resource, + action="get_config", + conditions=dict(method=["GET"])) + + networks_resource = networks.create_resource() + + mapper.connect("/networks", + controller=networks_resource, + action='add_network', + conditions={'method': ['POST']}) + mapper.connect("/networks/{network_id}", + controller=networks_resource, + action='delete_network', + conditions={'method': ['DELETE']}) + mapper.connect("/networks/{network_id}", + controller=networks_resource, + action='update_network', + conditions={'method': ['PUT']}) + mapper.connect("/clusters/{id}/networks", + controller=networks_resource, + action='detail', + conditions={'method': ['GET']}) + + mapper.connect("/networks/{id}", + controller=networks_resource, + action='get_network', + conditions={'method': ['GET']}) + + mapper.connect("/networks", + controller=networks_resource, + action='get_all_network', + conditions={'method': ['GET']}) + + install_resource = install.create_resource() + + mapper.connect("/install", + controller=install_resource, + action='install_cluster', + conditions={'method': ['POST']}) + + mapper.connect("/export_db", + controller=install_resource, + action='export_db', + conditions={'method': ['POST']}) + + mapper.connect("/uninstall/{cluster_id}", + controller=install_resource, + action='uninstall_cluster', + conditions={'method': ['POST']}) + mapper.connect("/uninstall/{cluster_id}", + controller=install_resource, + action='uninstall_progress', + conditions={'method': ['GET']}) + + mapper.connect("/update/{cluster_id}", + controller=install_resource, + action='update_cluster', + conditions={'method': ['POST']}) + + mapper.connect("/update/{cluster_id}", + controller=install_resource, + action='update_progress', + conditions={'method': ['GET']}) + + mapper.connect("/disk_array/{cluster_id}", + controller=install_resource, + action='update_disk_array', + conditions={'method': ['POST']}) + + #mapper.connect("/update/{cluster_id}/versions/{versions_id}", + # controller=update_resource, + # action='update_cluster_version', + # conditions={'method': ['POST']}) + + array_resource = disk_array.create_resource() + mapper.connect("/service_disk", + controller=array_resource, + action='service_disk_add', + conditions={'method': ['POST']}) + mapper.connect("/service_disk/{id}", + controller=array_resource, + action='service_disk_delete', + conditions={'method': ['DELETE']}) + mapper.connect("/service_disk/{id}", + controller=array_resource, + action='service_disk_update', + conditions={'method': ['PUT']}) + mapper.connect("/service_disk/list", + controller=array_resource, + action='service_disk_list', + conditions={'method': ['GET']}) + mapper.connect("/service_disk/{id}", + controller=array_resource, + action='service_disk_detail', + conditions={'method': ['GET']}) + + mapper.connect("/cinder_volume", + controller=array_resource, + action='cinder_volume_add', + conditions={'method': ['POST']}) + mapper.connect("/cinder_volume/{id}", + controller=array_resource, + action='cinder_volume_delete', + conditions={'method': ['DELETE']}) + mapper.connect("/cinder_volume/{id}", + controller=array_resource, + action='cinder_volume_update', + conditions={'method': ['PUT']}) + mapper.connect("/cinder_volume/list", + controller=array_resource, + action='cinder_volume_list', + conditions={'method': ['GET']}) + mapper.connect("/cinder_volume/{id}", + controller=array_resource, + action='cinder_volume_detail', + conditions={'method': ['GET']}) + + super(API, self).__init__(mapper) + + diff --git a/code/daisy/daisy/api/v1/services.py b/code/daisy/daisy/api/v1/services.py new file mode 100755 index 00000000..b9f55d61 --- /dev/null +++ b/code/daisy/daisy/api/v1/services.py @@ -0,0 +1,334 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/services endpoint for Daisy v1 API +""" + +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPConflict +from webob.exc import HTTPForbidden +from webob.exc import HTTPNotFound +from webob import Response + +from daisy.api import policy +import daisy.api.v1 +from daisy.api.v1 import controller +from daisy.api.v1 import filters +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import utils +from daisy.common import wsgi +from daisy import i18n +from daisy import notifier +import daisy.registry.client.v1.api as registry + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS +SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE + +CONF = cfg.CONF +CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') +CONF.import_opt('container_formats', 'daisy.common.config', + group='image_format') +CONF.import_opt('image_property_quota', 'daisy.common.config') + +class Controller(controller.BaseController): + """ + WSGI controller for services resource in Daisy v1 API + + The services resource API is a RESTful web service for service data. The API + is as follows:: + + GET /services -- Returns a set of brief metadata about services + GET /services/detail -- Returns a set of detailed metadata about + services + HEAD /services/ -- Return metadata about an service with id + GET /services/ -- Return service data for service with id + POST /services -- Store service data and return metadata about the + newly-stored service + PUT /services/ -- Update service metadata and/or upload service + data for a previously-reserved service + DELETE /services/ -- Delete the service with id + """ + + def __init__(self): + self.notifier = notifier.Notifier() + registry.configure_registry_client() + self.policy = policy.Enforcer() + if property_utils.is_property_protection_enabled(): + self.prop_enforcer = property_utils.PropertyRules(self.policy) + else: + self.prop_enforcer = None + + def _enforce(self, req, action, target=None): + """Authorize an action against our policies""" + if target is None: + target = {} + try: + self.policy.enforce(req.context, action, target) + except exception.Forbidden: + raise HTTPForbidden() + + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + query_filters = {} + for param in req.params: + if param in SUPPORTED_FILTERS: + query_filters[param] = req.params.get(param) + if not filters.validate(param, query_filters[param]): + raise HTTPBadRequest(_('Bad value passed to filter ' + '%(filter)s got %(val)s') + % {'filter': param, + 'val': query_filters[param]}) + return query_filters + + def _get_query_params(self, req): + """ + Extracts necessary query params from request. + + :param req: the WSGI Request object + :retval dict of parameters that can be used by registry client + """ + params = {'filters': self._get_filters(req)} + + for PARAM in SUPPORTED_PARAMS: + if PARAM in req.params: + params[PARAM] = req.params.get(PARAM) + return params + + def _raise_404_if_component_deleted(self, req, component_id): + component = self.get_component_meta_or_404(req, component_id) + if component['deleted']: + msg = _("Component with identifier %s has been deleted.") % component_id + raise HTTPNotFound(msg) + + @utils.mutating + def add_service(self, req, service_meta): + """ + Adds a new service to Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about service + + :raises HTTPBadRequest if x-service-name is missing + """ + self._enforce(req, 'add_service') + service_name = service_meta["name"] + service_description = service_meta["description"] + + if service_meta.has_key('component_id'): + orig_component_id = str(service_meta['component_id']) + self._raise_404_if_component_deleted(req, orig_component_id) + + print service_name + print service_description + service_meta = registry.add_service_metadata(req.context, service_meta) + + return {'service_meta': service_meta} + + @utils.mutating + def delete_service(self, req, id): + """ + Deletes a service from Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about service + + :raises HTTPBadRequest if x-service-name is missing + """ + self._enforce(req, 'delete_service') + + #service = self.get_service_meta_or_404(req, id) + print "delete_service:%s" % id + try: + registry.delete_service_metadata(req.context, id) + except exception.NotFound as e: + msg = (_("Failed to find service to delete: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to delete service: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except exception.InUseByStore as e: + msg = (_("service %(id)s could not be deleted because it is in use: " + "%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)}) + LOG.warn(msg) + raise HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + else: + #self.notifier.info('service.delete', service) + return Response(body='', status=200) + + @utils.mutating + def get_service(self, req, id): + """ + Returns metadata about an service in the HTTP headers of the + response object + + :param req: The WSGI/Webob Request object + :param id: The opaque service identifier + + :raises HTTPNotFound if service metadata is not available to user + """ + self._enforce(req, 'get_service') + service_meta = self.get_service_meta_or_404(req, id) + return {'service_meta': service_meta} + + def detail(self, req): + """ + Returns detailed information for all available services + + :param req: The WSGI/Webob Request object + :retval The response body is a mapping of the following form:: + + {'services': [ + {'id': , + 'name': , + 'description': , + 'created_at': , + 'updated_at': , + 'deleted_at': |,}, ... + ]} + """ + self._enforce(req, 'get_services') + params = self._get_query_params(req) + try: + services = registry.get_services_detail(req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return dict(services=services) + + @utils.mutating + def update_service(self, req, id, service_meta): + """ + Updates an existing service with the registry. + + :param request: The WSGI/Webob Request object + :param id: The opaque image identifier + + :retval Returns the updated image information as a mapping + """ + self._enforce(req, 'modify_image') + orig_service_meta = self.get_service_meta_or_404(req, id) + + # Do not allow any updates on a deleted image. + # Fix for LP Bug #1060930 + if orig_service_meta['deleted']: + msg = _("Forbidden to update deleted service.") + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + try: + service_meta = registry.update_service_metadata(req.context, + id, + service_meta) + + except exception.Invalid as e: + msg = (_("Failed to update service metadata. Got error: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + except exception.NotFound as e: + msg = (_("Failed to find service to update: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to update service: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except (exception.Conflict, exception.Duplicate) as e: + LOG.warn(utils.exception_to_str(e)) + raise HTTPConflict(body=_('Host operation conflicts'), + request=req, + content_type='text/plain') + else: + self.notifier.info('service.update', service_meta) + + return {'service_meta': service_meta} + +class ServiceDeserializer(wsgi.JSONRequestDeserializer): + """Handles deserialization of specific controller method requests.""" + + def _deserialize(self, request): + result = {} + result["service_meta"] = utils.get_service_meta(request) + return result + + def add_service(self, request): + return self._deserialize(request) + + def update_service(self, request): + return self._deserialize(request) + +class ServiceSerializer(wsgi.JSONResponseSerializer): + """Handles serialization of specific controller method responses.""" + + def __init__(self): + self.notifier = notifier.Notifier() + + def add_service(self, response, result): + service_meta = result['service_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(service=service_meta)) + return response + + def delete_service(self, response, result): + service_meta = result['service_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(service=service_meta)) + return response + def get_service(self, response, result): + service_meta = result['service_meta'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(service=service_meta)) + return response + +def create_resource(): + """Services resource factory method""" + deserializer = ServiceDeserializer() + serializer = ServiceSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/api/v1/template.py b/code/daisy/daisy/api/v1/template.py new file mode 100755 index 00000000..ba491ab7 --- /dev/null +++ b/code/daisy/daisy/api/v1/template.py @@ -0,0 +1,629 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/Templates endpoint for Daisy v1 API +""" + +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPConflict +from webob.exc import HTTPForbidden +from webob.exc import HTTPNotFound +from webob import Response +import copy +import json + +from daisy.api import policy +import daisy.api.v1 +from daisy.api.v1 import controller +from daisy.api.v1 import filters +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import utils +from daisy.common import wsgi +from daisy import i18n +from daisy import notifier +import daisy.registry.client.v1.api as registry +from daisy.registry.api.v1 import template + +import daisy.api.backends.tecs.common as tecs_cmn +import daisy.api.backends.common as daisy_cmn +try: + import simplejson as json +except ImportError: + import json + +daisy_tecs_path = tecs_cmn.daisy_tecs_path + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +SUPPORTED_PARAMS = template.SUPPORTED_PARAMS +SUPPORTED_FILTERS = template.SUPPORTED_FILTERS +ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE +CONF = cfg.CONF +CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') +CONF.import_opt('container_formats', 'daisy.common.config', + group='image_format') +CONF.import_opt('image_property_quota', 'daisy.common.config') + +class Controller(controller.BaseController): + """ + WSGI controller for Templates resource in Daisy v1 API + + The Templates resource API is a RESTful web Template for Template data. The API + is as follows:: + + GET /Templates -- Returns a set of brief metadata about Templates + GET /Templates/detail -- Returns a set of detailed metadata about + Templates + HEAD /Templates/ -- Return metadata about an Template with id + GET /Templates/ -- Return Template data for Template with id + POST /Templates -- Store Template data and return metadata about the + newly-stored Template + PUT /Templates/ -- Update Template metadata and/or upload Template + data for a previously-reserved Template + DELETE /Templates/ -- Delete the Template with id + """ + + def __init__(self): + self.notifier = notifier.Notifier() + registry.configure_registry_client() + self.policy = policy.Enforcer() + if property_utils.is_property_protection_enabled(): + self.prop_enforcer = property_utils.PropertyRules(self.policy) + else: + self.prop_enforcer = None + + def _enforce(self, req, action, target=None): + """Authorize an action against our policies""" + if target is None: + target = {} + try: + self.policy.enforce(req.context, action, target) + except exception.Forbidden: + raise HTTPForbidden() + + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + query_filters = {} + for param in req.params: + if param in SUPPORTED_FILTERS: + query_filters[param] = req.params.get(param) + if not filters.validate(param, query_filters[param]): + raise HTTPBadRequest(_('Bad value passed to filter ' + '%(filter)s got %(val)s') + % {'filter': param, + 'val': query_filters[param]}) + return query_filters + + def _get_query_params(self, req): + """ + Extracts necessary query params from request. + + :param req: the WSGI Request object + :retval dict of parameters that can be used by registry client + """ + params = {'filters': self._get_filters(req)} + + for PARAM in SUPPORTED_PARAMS: + if PARAM in req.params: + params[PARAM] = req.params.get(PARAM) + return params + + def _raise_404_if_cluster_deleted(self, req, cluster_id): + cluster = self.get_cluster_meta_or_404(req, cluster_id) + if cluster['deleted']: + msg = _("Cluster with identifier %s has been deleted.") % cluster_id + raise webob.exc.HTTPNotFound(msg) + + @utils.mutating + def add_template(self, req, template): + """ + Adds a new cluster template to Daisy. + + :param req: The WSGI/Webob Request object + :param image_meta: Mapping of metadata about Template + + :raises HTTPBadRequest if x-Template-name is missing + """ + self._enforce(req, 'add_template') + template_name = template["name"] + + template = registry.add_template_metadata(req.context, template) + + return {'template': template} + + @utils.mutating + def update_template(self, req, template_id, template): + """ + Updates an existing Template with the registry. + + :param request: The WSGI/Webob Request object + :param id: The opaque image identifier + + :retval Returns the updated image information as a mapping + """ + self._enforce(req, 'update_template') + try: + template = registry.update_template_metadata(req.context, + template_id, + template) + + except exception.Invalid as e: + msg = (_("Failed to update template metadata. Got error: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPBadRequest(explanation=msg, + request=req, + content_type="text/plain") + except exception.NotFound as e: + msg = (_("Failed to find template to update: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to update template: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except (exception.Conflict, exception.Duplicate) as e: + LOG.warn(utils.exception_to_str(e)) + raise HTTPConflict(body=_('template operation conflicts'), + request=req, + content_type='text/plain') + else: + self.notifier.info('template.update', template) + + return {'template': template} + @utils.mutating + def delete_template(self, req, template_id): + """ + delete a existing cluster template with the registry. + + :param request: The WSGI/Webob Request object + :param id: The opaque image identifier + + :retval Returns the updated image information as a mapping + """ + self._enforce(req, 'delete_template') + try: + registry.delete_template_metadata(req.context, template_id) + except exception.NotFound as e: + msg = (_("Failed to find template to delete: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to delete template: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except exception.InUseByStore as e: + msg = (_("template %(id)s could not be deleted because it is in use: " + "%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)}) + LOG.error(msg) + raise HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + else: + return Response(body='', status=200) + + def _del_general_params(self,param): + del param['created_at'] + del param['updated_at'] + del param['deleted'] + del param['deleted_at'] + del param['id'] + + def _del_cluster_params(self,cluster): + del cluster['networks'] + del cluster['vlan_start'] + del cluster['vlan_end'] + del cluster['vni_start'] + del cluster['vni_end'] + del cluster['gre_id_start'] + del cluster['gre_id_end'] + del cluster['net_l23_provider'] + del cluster['public_vip'] + del cluster['segmentation_type'] + del cluster['base_mac'] + del cluster['name'] + + @utils.mutating + def export_db_to_json(self, req, template): + """ + Template TECS to a cluster. + :param req: The WSGI/Webob Request object + :raises HTTPBadRequest if x-Template-cluster is missing + """ + cluster_name = template.get('cluster_name',None) + type = template.get('type',None) + description = template.get('description',None) + template_name = template.get('template_name',None) + self._enforce(req, 'export_db_to_json') + cinder_volume_list = [] + template_content = {} + template_json = {} + template_id = "" + if not type or type == "tecs": + try: + params = {'filters': {'name':cluster_name}} + clusters = registry.get_clusters_detail(req.context, **params) + if clusters: + cluster_id = clusters[0]['id'] + else: + msg = "the cluster %s is not exist"%cluster_name + LOG.error(msg) + raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") + + params = {'filters': {'cluster_id':cluster_id}} + cluster = registry.get_cluster_metadata(req.context, cluster_id) + roles = registry.get_roles_detail(req.context, **params) + networks = registry.get_networks_detail(req.context, cluster_id,**params) + for role in roles: + cinder_volume_params = {'filters': {'role_id':role['id']}} + cinder_volumes = registry.list_cinder_volume_metadata(req.context, **cinder_volume_params) + for cinder_volume in cinder_volumes: + if cinder_volume.get('role_id',None): + cinder_volume['role_id'] = role['name'] + self._del_general_params(cinder_volume) + cinder_volume_list.append(cinder_volume) + if role.get('config_set_id',None): + config_set = registry.get_config_set_metadata(req.context, role['config_set_id']) + role['config_set_id'] = config_set['name'] + del role['cluster_id'] + del role['status'] + del role['progress'] + del role['messages'] + del role['config_set_update_progress'] + self._del_general_params(role) + for network in networks: + network_detail = registry.get_network_metadata(req.context, network['id']) + if network_detail.get('ip_ranges',None): + network['ip_ranges'] = network_detail['ip_ranges'] + del network['cluster_id'] + self._del_general_params(network) + if cluster.get('routers',None): + for router in cluster['routers']: + del router['cluster_id'] + self._del_general_params(router) + if cluster.get('logic_networks',None): + for logic_network in cluster['logic_networks']: + for subnet in logic_network['subnets']: + del subnet['logic_network_id'] + del subnet['router_id'] + self._del_general_params(subnet) + del logic_network['cluster_id'] + self._del_general_params(logic_network) + if cluster.get('nodes',None): + del cluster['nodes'] + self._del_general_params(cluster) + self._del_cluster_params(cluster) + template_content['cluster'] = cluster + template_content['roles'] = roles + template_content['networks'] = networks + template_content['cinder_volumes'] = cinder_volume_list + template_json['content'] = json.dumps(template_content) + template_json['type'] = 'tecs' + template_json['name'] = template_name + template_json['description'] = description + + template_host_params = {'cluster_name':cluster_name} + template_hosts = registry.host_template_lists_metadata(req.context, **template_host_params) + if template_hosts: + template_json['hosts'] = template_hosts[0]['hosts'] + else: + template_json['hosts'] = "[]" + + template_params = {'filters': {'name':template_name}} + template_list = registry.template_lists_metadata(req.context, **template_params) + if template_list: + update_template = registry.update_template_metadata(req.context, template_list[0]['id'], template_json) + template_id = template_list[0]['id'] + else: + add_template = registry.add_template_metadata(req.context, template_json) + template_id = add_template['id'] + + if template_id: + template_detail = registry.template_detail_metadata(req.context, template_id) + self._del_general_params(template_detail) + template_detail['content'] = json.loads(template_detail['content']) + if template_detail['hosts']: + template_detail['hosts'] = json.loads(template_detail['hosts']) + + tecs_json = daisy_tecs_path + "%s.json"%template_name + cmd = 'rm -rf %s' % (tecs_json,) + daisy_cmn.subprocess_call(cmd) + with open(tecs_json, "w+") as fp: + fp.write(json.dumps(template_detail)) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + + return {"template":template_detail} + + @utils.mutating + def import_json_to_template(self, req, template): + template_id = "" + template = json.loads(template.get('template',None)) + template_cluster = copy.deepcopy(template) + template_name = template_cluster.get('name',None) + template_params = {'filters': {'name':template_name}} + try: + if template_cluster.get('content',None): + template_cluster['content'] = json.dumps(template_cluster['content']) + if template_cluster.get('hosts',None): + template_cluster['hosts'] = json.dumps(template_cluster['hosts']) + else: + template_cluster['hosts'] = "[]" + + template_list = registry.template_lists_metadata(req.context, **template_params) + if template_list: + update_template_cluster = registry.update_template_metadata(req.context, template_list[0]['id'], template_cluster) + template_id = template_list[0]['id'] + else: + add_template_cluster = registry.add_template_metadata(req.context, template_cluster) + template_id = add_template_cluster['id'] + + if template_id: + template_detail = registry.template_detail_metadata(req.context, template_id) + del template_detail['deleted'] + del template_detail['deleted_at'] + + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + + return {"template":template_detail} + + @utils.mutating + def import_template_to_db(self, req, template): + cluster_id = "" + template_cluster = {} + cluster_meta = {} + template_meta = copy.deepcopy(template) + template_name = template_meta.get('name',None) + cluster_name = template_meta.get('cluster',None) + template_params = {'filters': {'name':template_name}} + template_list = registry.template_lists_metadata(req.context, **template_params) + if template_list: + template_cluster = template_list[0] + else: + msg = "the template %s is not exist" % template_name + LOG.error(msg) + raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") + + try: + template_content = json.loads(template_cluster['content']) + template_content_cluster = template_content['cluster'] + template_content_cluster['name'] = cluster_name + template_content_cluster['networking_parameters'] = str(template_content_cluster['networking_parameters']) + template_content_cluster['logic_networks'] = str(template_content_cluster['logic_networks']) + template_content_cluster['logic_networks'] = template_content_cluster['logic_networks'].replace("\'true\'","True") + template_content_cluster['routers'] = str(template_content_cluster['routers']) + + if template_cluster['hosts']: + template_hosts = json.loads(template_cluster['hosts']) + template_host_params = {'cluster_name':cluster_name} + template_host_list = registry.host_template_lists_metadata(req.context, **template_host_params) + if template_host_list: + update_template_meta = {"cluster_name": cluster_name, "hosts":json.dumps(template_hosts)} + registry.update_host_template_metadata(req.context, template_host_list[0]['id'], update_template_meta) + else: + template_meta = {"cluster_name": cluster_name, "hosts":json.dumps(template_hosts)} + registry.add_host_template_metadata(req.context, template_meta) + + cluster_params = {'filters': {'name':cluster_name}} + clusters = registry.get_clusters_detail(req.context, **cluster_params) + if clusters: + msg = "the cluster %s is exist" % clusters[0]['name'] + LOG.error(msg) + raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") + else: + cluster_meta = registry.add_cluster_metadata(req.context, template_content['cluster']) + cluster_id = cluster_meta['id'] + + params = {'filters':{}} + networks = registry.get_networks_detail(req.context, cluster_id,**params) + template_content_networks = template_content['networks'] + for template_content_network in template_content_networks: + template_content_network['ip_ranges'] = str(template_content_network['ip_ranges']) + network_exist = 'false' + for network in networks: + if template_content_network['name'] == network['name']: + update_network_meta = registry.update_network_metadata(req.context, network['id'], template_content_network) + network_exist = 'true' + + if network_exist == 'false': + template_content_network['cluster_id'] = cluster_id + add_network_meta = registry.add_network_metadata(req.context, template_content_network) + + params = {'filters': {'cluster_id':cluster_id}} + roles = registry.get_roles_detail(req.context, **params) + template_content_roles = template_content['roles'] + for template_content_role in template_content_roles: + role_exist = 'false' + del template_content_role['config_set_id'] + for role in roles: + if template_content_role['name'] == role['name']: + update_role_meta = registry.update_role_metadata(req.context, role['id'], template_content_role) + role_exist = 'true' + + if role_exist == 'false': + template_content_role['cluster_id'] = cluster_id + add_role_meta = registry.add_role_metadata(req.context, template_content_role) + + cinder_volumes = registry.list_cinder_volume_metadata(req.context, **params) + template_content_cinder_volumes = template_content['cinder_volumes'] + for template_content_cinder_volume in template_content_cinder_volumes: + cinder_volume_exist = 'false' + roles = registry.get_roles_detail(req.context, **params) + for role in roles: + if template_content_cinder_volume['role_id'] == role['name']: + template_content_cinder_volume['role_id'] = role['id'] + + for cinder_volume in cinder_volumes: + if template_content_cinder_volume['role_id'] == cinder_volume['role_id']: + update_cinder_volume_meta = registry.update_cinder_volume_metadata(req.context, cinder_volume['id'], template_content_cinder_volume) + cinder_volume_exist = 'true' + + if cinder_volume_exist == 'false': + add_cinder_volumes = registry.add_cinder_volume_metadata(req.context, template_content_cinder_volume) + + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return {"template":cluster_meta} + + @utils.mutating + def get_template_detail(self, req, template_id): + """ + delete a existing cluster template with the registry. + :param request: The WSGI/Webob Request object + :param id: The opaque image identifie + :retval Returns the updated image information as a mapping + """ + self._enforce(req, 'get_template_detail') + try: + template = registry.template_detail_metadata(req.context, template_id) + return {'template': template} + except exception.NotFound as e: + msg = (_("Failed to find template: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPNotFound(explanation=msg, + request=req, + content_type="text/plain") + except exception.Forbidden as e: + msg = (_("Forbidden to get template: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + raise HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + except exception.InUseByStore as e: + msg = (_("template %(id)s could not be get because it is in use: " + "%(exc)s") % {"id": template_id, "exc": utils.exception_to_str(e)}) + LOG.error(msg) + raise HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + else: + return Response(body='', status=200) + + @utils.mutating + def get_template_lists(self, req): + self._enforce(req, 'get_template_lists') + params = self._get_query_params(req) + try: + template_lists = registry.template_lists_metadata(req.context, **params) + except exception.Invalid as e: + raise HTTPBadRequest(explanation=e.msg, request=req) + return dict(template=template_lists) + +class TemplateDeserializer(wsgi.JSONRequestDeserializer): + """Handles deserialization of specific controller method requests.""" + + def _deserialize(self, request): + result = {} + result["template"] = utils.get_template_meta(request) + return result + + def add_template(self, request): + return self._deserialize(request) + + def update_template(self, request): + return self._deserialize(request) + + def export_db_to_json(self, request): + return self._deserialize(request) + + def import_json_to_template(self, request): + return self._deserialize(request) + + def import_template_to_db(self, request): + return self._deserialize(request) + +class TemplateSerializer(wsgi.JSONResponseSerializer): + """Handles serialization of specific controller method responses.""" + + def __init__(self): + self.notifier = notifier.Notifier() + + def add_template(self, response, result): + template = result['template'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(template=template)) + return response + + def delete_template(self, response, result): + template = result['template'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(template=template)) + return response + def get_template_detail(self, response, result): + template = result['template'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(template=template)) + return response + def update_template(self, response, result): + template = result['template'] + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(dict(template=template)) + return response + + def export_db_to_json(self, response, result): + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(result) + return response + + def import_json_to_template(self, response, result): + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(result) + return response + + def import_template_to_db(self, response, result): + response.status = 201 + response.headers['Content-Type'] = 'application/json' + response.body = self.to_json(result) + return response + +def create_resource(): + """Templates resource factory method""" + deserializer = TemplateDeserializer() + serializer = TemplateSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/api/v1/upload_utils.py b/code/daisy/daisy/api/v1/upload_utils.py new file mode 100755 index 00000000..89c2fb4a --- /dev/null +++ b/code/daisy/daisy/api/v1/upload_utils.py @@ -0,0 +1,289 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import glance_store as store_api +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +import webob.exc + +from daisy.common import exception +from daisy.common import store_utils +from daisy.common import utils +import daisy.db +from daisy import i18n +import daisy.registry.client.v1.api as registry + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + + +def initiate_deletion(req, location_data, id): + """ + Deletes image data from the location of backend store. + + :param req: The WSGI/Webob Request object + :param location_data: Location to the image data in a data store + :param id: Opaque image identifier + """ + store_utils.delete_image_location_from_backend(req.context, + id, location_data) + + +def _kill(req, image_id, from_state): + """ + Marks the image status to `killed`. + + :param req: The WSGI/Webob Request object + :param image_id: Opaque image identifier + :param from_state: Permitted current status for transition to 'killed' + """ + # TODO(dosaboy): http://docs.openstack.org/developer/glance/statuses.html + # needs updating to reflect the fact that queued->killed and saving->killed + # are both allowed. + registry.update_image_metadata(req.context, image_id, + {'status': 'killed'}, + from_state=from_state) + + +def safe_kill(req, image_id, from_state): + """ + Mark image killed without raising exceptions if it fails. + + Since _kill is meant to be called from exceptions handlers, it should + not raise itself, rather it should just log its error. + + :param req: The WSGI/Webob Request object + :param image_id: Opaque image identifier + :param from_state: Permitted current status for transition to 'killed' + """ + try: + _kill(req, image_id, from_state) + except Exception: + LOG.exception(_LE("Unable to kill image %(id)s: ") % {'id': image_id}) + + +def upload_data_to_store(req, image_meta, image_data, store, notifier): + """ + Upload image data to specified store. + + Upload image data to the store and cleans up on error. + """ + image_id = image_meta['id'] + + db_api = daisy.db.get_api() + image_size = image_meta.get('size') + + try: + # By default image_data will be passed as CooperativeReader object. + # But if 'user_storage_quota' is enabled and 'remaining' is not None + # then it will be passed as object of LimitingReader to + # 'store_add_to_backend' method. + image_data = utils.CooperativeReader(image_data) + + remaining = daisy.api.common.check_quota( + req.context, image_size, db_api, image_id=image_id) + if remaining is not None: + image_data = utils.LimitingReader(image_data, remaining) + + (uri, + size, + checksum, + location_metadata) = store_api.store_add_to_backend( + image_meta['id'], + image_data, + image_meta['size'], + store, + context=req.context) + + location_data = {'url': uri, + 'metadata': location_metadata, + 'status': 'active'} + + try: + # recheck the quota in case there were simultaneous uploads that + # did not provide the size + daisy.api.common.check_quota( + req.context, size, db_api, image_id=image_id) + except exception.StorageQuotaFull: + with excutils.save_and_reraise_exception(): + LOG.info(_LI('Cleaning up %s after exceeding ' + 'the quota') % image_id) + store_utils.safe_delete_from_backend( + req.context, image_meta['id'], location_data) + + def _kill_mismatched(image_meta, attr, actual): + supplied = image_meta.get(attr) + if supplied and supplied != actual: + msg = (_("Supplied %(attr)s (%(supplied)s) and " + "%(attr)s generated from uploaded image " + "(%(actual)s) did not match. Setting image " + "status to 'killed'.") % {'attr': attr, + 'supplied': supplied, + 'actual': actual}) + LOG.error(msg) + safe_kill(req, image_id, 'saving') + initiate_deletion(req, location_data, image_id) + raise webob.exc.HTTPBadRequest(explanation=msg, + content_type="text/plain", + request=req) + + # Verify any supplied size/checksum value matches size/checksum + # returned from store when adding image + _kill_mismatched(image_meta, 'size', size) + _kill_mismatched(image_meta, 'checksum', checksum) + + # Update the database with the checksum returned + # from the backend store + LOG.debug("Updating image %(image_id)s data. " + "Checksum set to %(checksum)s, size set " + "to %(size)d", {'image_id': image_id, + 'checksum': checksum, + 'size': size}) + update_data = {'checksum': checksum, + 'size': size} + try: + try: + state = 'saving' + image_meta = registry.update_image_metadata(req.context, + image_id, + update_data, + from_state=state) + except exception.Duplicate: + image = registry.get_image_metadata(req.context, image_id) + if image['status'] == 'deleted': + raise exception.NotFound() + else: + raise + except exception.NotFound: + msg = _LI("Image %s could not be found after upload. The image may" + " have been deleted during the upload.") % image_id + LOG.info(msg) + + # NOTE(jculp): we need to clean up the datastore if an image + # resource is deleted while the image data is being uploaded + # + # We get "location_data" from above call to store.add(), any + # exceptions that occur there handle this same issue internally, + # Since this is store-agnostic, should apply to all stores. + initiate_deletion(req, location_data, image_id) + raise webob.exc.HTTPPreconditionFailed(explanation=msg, + request=req, + content_type='text/plain') + + except store_api.StoreAddDisabled: + msg = _("Error in store configuration. Adding images to store " + "is disabled.") + LOG.exception(msg) + safe_kill(req, image_id, 'saving') + notifier.error('image.upload', msg) + raise webob.exc.HTTPGone(explanation=msg, request=req, + content_type='text/plain') + + except exception.Duplicate as e: + msg = (_("Attempt to upload duplicate image: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + # NOTE(dosaboy): do not delete the image since it is likely that this + # conflict is a result of another concurrent upload that will be + # successful. + notifier.error('image.upload', msg) + raise webob.exc.HTTPConflict(explanation=msg, + request=req, + content_type="text/plain") + + except exception.Forbidden as e: + msg = (_("Forbidden upload attempt: %s") % + utils.exception_to_str(e)) + LOG.warn(msg) + safe_kill(req, image_id, 'saving') + notifier.error('image.upload', msg) + raise webob.exc.HTTPForbidden(explanation=msg, + request=req, + content_type="text/plain") + + except store_api.StorageFull as e: + msg = (_("Image storage media is full: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + safe_kill(req, image_id, 'saving') + notifier.error('image.upload', msg) + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, + request=req, + content_type='text/plain') + + except store_api.StorageWriteDenied as e: + msg = (_("Insufficient permissions on image storage media: %s") % + utils.exception_to_str(e)) + LOG.error(msg) + safe_kill(req, image_id, 'saving') + notifier.error('image.upload', msg) + raise webob.exc.HTTPServiceUnavailable(explanation=msg, + request=req, + content_type='text/plain') + + except exception.ImageSizeLimitExceeded as e: + msg = (_("Denying attempt to upload image larger than %d bytes.") + % CONF.image_size_cap) + LOG.warn(msg) + safe_kill(req, image_id, 'saving') + notifier.error('image.upload', msg) + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, + request=req, + content_type='text/plain') + + except exception.StorageQuotaFull as e: + msg = (_("Denying attempt to upload image because it exceeds the " + "quota: %s") % utils.exception_to_str(e)) + LOG.warn(msg) + safe_kill(req, image_id, 'saving') + notifier.error('image.upload', msg) + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, + request=req, + content_type='text/plain') + + except webob.exc.HTTPError: + # NOTE(bcwaldon): Ideally, we would just call 'raise' here, + # but something in the above function calls is affecting the + # exception context and we must explicitly re-raise the + # caught exception. + msg = _LE("Received HTTP error while uploading image %s") % image_id + notifier.error('image.upload', msg) + with excutils.save_and_reraise_exception(): + LOG.exception(msg) + safe_kill(req, image_id, 'saving') + + except (ValueError, IOError) as e: + msg = _("Client disconnected before sending all data to backend") + LOG.warn(msg) + safe_kill(req, image_id, 'saving') + raise webob.exc.HTTPBadRequest(explanation=msg, + content_type="text/plain", + request=req) + + except Exception as e: + msg = _("Failed to upload image %s") % image_id + LOG.exception(msg) + safe_kill(req, image_id, 'saving') + notifier.error('image.upload', msg) + raise webob.exc.HTTPInternalServerError(explanation=msg, + request=req, + content_type='text/plain') + + return image_meta, location_data diff --git a/code/daisy/daisy/api/v2/__init__.py b/code/daisy/daisy/api/v2/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/api/v2/image_actions.py b/code/daisy/daisy/api/v2/image_actions.py new file mode 100755 index 00000000..a982a449 --- /dev/null +++ b/code/daisy/daisy/api/v2/image_actions.py @@ -0,0 +1,89 @@ +# Copyright 2015 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import glance_store +from oslo_log import log as logging +import webob.exc + +from daisy.api import policy +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +import daisy.gateway +from daisy import i18n +import daisy.notifier + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LI = i18n._LI + + +class ImageActionsController(object): + def __init__(self, db_api=None, policy_enforcer=None, notifier=None, + store_api=None): + self.db_api = db_api or daisy.db.get_api() + self.policy = policy_enforcer or policy.Enforcer() + self.notifier = notifier or daisy.notifier.Notifier() + self.store_api = store_api or glance_store + self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api, + self.notifier, self.policy) + + @utils.mutating + def deactivate(self, req, image_id): + image_repo = self.gateway.get_repo(req.context) + try: + image = image_repo.get(image_id) + image.deactivate() + image_repo.save(image) + LOG.info(_LI("Image %s is deactivated") % image_id) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.InvalidImageStatusTransition as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + + @utils.mutating + def reactivate(self, req, image_id): + image_repo = self.gateway.get_repo(req.context) + try: + image = image_repo.get(image_id) + image.reactivate() + image_repo.save(image) + LOG.info(_LI("Image %s is reactivated") % image_id) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.InvalidImageStatusTransition as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + + +class ResponseSerializer(wsgi.JSONResponseSerializer): + + def deactivate(self, response, result): + response.status_int = 204 + + def reactivate(self, response, result): + response.status_int = 204 + + +def create_resource(): + """Image data resource factory method""" + deserializer = None + serializer = ResponseSerializer() + controller = ImageActionsController() + return wsgi.Resource(controller, deserializer, serializer) diff --git a/code/daisy/daisy/api/v2/image_data.py b/code/daisy/daisy/api/v2/image_data.py new file mode 100755 index 00000000..c0e62020 --- /dev/null +++ b/code/daisy/daisy/api/v2/image_data.py @@ -0,0 +1,250 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import glance_store +from oslo_log import log as logging +from oslo_utils import excutils +import webob.exc + +import daisy.api.policy +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +import daisy.gateway +from daisy import i18n +import daisy.notifier + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE + + +class ImageDataController(object): + def __init__(self, db_api=None, store_api=None, + policy_enforcer=None, notifier=None, + gateway=None): + if gateway is None: + db_api = db_api or daisy.db.get_api() + store_api = store_api or glance_store + policy = policy_enforcer or daisy.api.policy.Enforcer() + notifier = notifier or daisy.notifier.Notifier() + gateway = daisy.gateway.Gateway(db_api, store_api, + notifier, policy) + self.gateway = gateway + + def _restore(self, image_repo, image): + """ + Restore the image to queued status. + + :param image_repo: The instance of ImageRepo + :param image: The image will be restored + """ + try: + if image_repo and image: + image.status = 'queued' + image_repo.save(image) + except Exception as e: + msg = (_LE("Unable to restore image %(image_id)s: %(e)s") % + {'image_id': image.image_id, + 'e': utils.exception_to_str(e)}) + LOG.exception(msg) + + @utils.mutating + def upload(self, req, image_id, data, size): + image_repo = self.gateway.get_repo(req.context) + image = None + try: + image = image_repo.get(image_id) + image.status = 'saving' + try: + image_repo.save(image) + image.set_data(data, size) + image_repo.save(image, from_state='saving') + except (exception.NotFound, exception.Conflict): + msg = (_("Image %s could not be found after upload. " + "The image may have been deleted during the " + "upload, cleaning up the chunks uploaded.") % + image_id) + LOG.warn(msg) + # NOTE(sridevi): Cleaning up the uploaded chunks. + try: + image.delete() + except exception.NotFound: + # NOTE(sridevi): Ignore this exception + pass + raise webob.exc.HTTPGone(explanation=msg, + request=req, + content_type='text/plain') + + except ValueError as e: + LOG.debug("Cannot save data for image %(id)s: %(e)s", + {'id': image_id, 'e': utils.exception_to_str(e)}) + self._restore(image_repo, image) + raise webob.exc.HTTPBadRequest( + explanation=utils.exception_to_str(e)) + + except glance_store.StoreAddDisabled: + msg = _("Error in store configuration. Adding images to store " + "is disabled.") + LOG.exception(msg) + self._restore(image_repo, image) + raise webob.exc.HTTPGone(explanation=msg, request=req, + content_type='text/plain') + + except exception.InvalidImageStatusTransition as e: + msg = utils.exception_to_str(e) + LOG.exception(msg) + raise webob.exc.HTTPConflict(explanation=e.msg, request=req) + + except exception.Forbidden as e: + msg = ("Not allowed to upload image data for image %s" % + image_id) + LOG.debug(msg) + raise webob.exc.HTTPForbidden(explanation=msg, request=req) + + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + + except glance_store.StorageFull as e: + msg = _("Image storage media " + "is full: %s") % utils.exception_to_str(e) + LOG.error(msg) + self._restore(image_repo, image) + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, + request=req) + + except exception.StorageQuotaFull as e: + msg = _("Image exceeds the storage " + "quota: %s") % utils.exception_to_str(e) + LOG.error(msg) + self._restore(image_repo, image) + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, + request=req) + + except exception.ImageSizeLimitExceeded as e: + msg = _("The incoming image is " + "too large: %s") % utils.exception_to_str(e) + LOG.error(msg) + self._restore(image_repo, image) + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg, + request=req) + + except glance_store.StorageWriteDenied as e: + msg = _("Insufficient permissions on image " + "storage media: %s") % utils.exception_to_str(e) + LOG.error(msg) + self._restore(image_repo, image) + raise webob.exc.HTTPServiceUnavailable(explanation=msg, + request=req) + + except webob.exc.HTTPGone as e: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Failed to upload image data due to HTTP error")) + + except webob.exc.HTTPError as e: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("Failed to upload image data due to HTTP error")) + self._restore(image_repo, image) + + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.exception(_LE("Failed to upload image data due to " + "internal error")) + self._restore(image_repo, image) + + def download(self, req, image_id): + image_repo = self.gateway.get_repo(req.context) + try: + image = image_repo.get(image_id) + if image.status == 'deactivated': + msg = _('The requested image has been deactivated. ' + 'Image data download is forbidden.') + raise exception.Forbidden(message=msg) + if not image.locations: + raise exception.ImageDataNotFound() + except exception.ImageDataNotFound as e: + raise webob.exc.HTTPNoContent(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + + return image + + +class RequestDeserializer(wsgi.JSONRequestDeserializer): + + def upload(self, request): + try: + request.get_content_type(('application/octet-stream',)) + except exception.InvalidContentType as e: + raise webob.exc.HTTPUnsupportedMediaType(explanation=e.msg) + + image_size = request.content_length or None + return {'size': image_size, 'data': request.body_file} + + +class ResponseSerializer(wsgi.JSONResponseSerializer): + + def download(self, response, image): + offset, chunk_size = 0, None + range_val = response.request.get_content_range() + + if range_val: + # NOTE(flaper87): if not present, both, start + # and stop, will be None. + if range_val.start is not None: + offset = range_val.start + + if range_val.stop is not None: + chunk_size = range_val.stop - offset + + response.headers['Content-Type'] = 'application/octet-stream' + + try: + # NOTE(markwash): filesystem store (and maybe others?) cause a + # problem with the caching middleware if they are not wrapped in + # an iterator very strange + response.app_iter = iter(image.get_data(offset=offset, + chunk_size=chunk_size)) + except glance_store.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except glance_store.RemoteServiceUnavailable as e: + raise webob.exc.HTTPServiceUnavailable(explanation=e.msg) + except (glance_store.StoreGetNotSupported, + glance_store.StoreRandomGetNotSupported) as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + # NOTE(saschpe): "response.app_iter = ..." currently resets Content-MD5 + # (https://github.com/Pylons/webob/issues/86), so it should be set + # afterwards for the time being. + if image.checksum: + response.headers['Content-MD5'] = image.checksum + # NOTE(markwash): "response.app_iter = ..." also erroneously resets the + # content-length + response.headers['Content-Length'] = str(image.size) + + def upload(self, response, result): + response.status_int = 204 + + +def create_resource(): + """Image data resource factory method""" + deserializer = RequestDeserializer() + serializer = ResponseSerializer() + controller = ImageDataController() + return wsgi.Resource(controller, deserializer, serializer) diff --git a/code/daisy/daisy/api/v2/image_members.py b/code/daisy/daisy/api/v2/image_members.py new file mode 100755 index 00000000..1d2d615c --- /dev/null +++ b/code/daisy/daisy/api/v2/image_members.py @@ -0,0 +1,351 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +import glance_store +from oslo.serialization import jsonutils +from oslo_log import log as logging +from oslo_utils import timeutils +import six +import webob + +from daisy.api import policy +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +import daisy.gateway +from daisy import i18n +import daisy.notifier +import daisy.schema + + +LOG = logging.getLogger(__name__) +_ = i18n._ + + +class ImageMembersController(object): + def __init__(self, db_api=None, policy_enforcer=None, notifier=None, + store_api=None): + self.db_api = db_api or daisy.db.get_api() + self.policy = policy_enforcer or policy.Enforcer() + self.notifier = notifier or daisy.notifier.Notifier() + self.store_api = store_api or glance_store + self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api, + self.notifier, self.policy) + + @utils.mutating + def create(self, req, image_id, member_id): + """ + Adds a membership to the image. + :param req: the Request object coming from the wsgi layer + :param image_id: the image identifier + :param member_id: the member identifier + :retval The response body is a mapping of the following form:: + + {'member_id': , + 'image_id': , + 'status': + 'created_at': .., + 'updated_at': ..} + + """ + image_repo = self.gateway.get_repo(req.context) + image_member_factory = self.gateway.get_image_member_factory( + req.context) + try: + image = image_repo.get(image_id) + member_repo = image.get_member_repo() + new_member = image_member_factory.new_image_member(image, + member_id) + member_repo.add(new_member) + + return new_member + except exception.NotFound: + msg = _("Image %s not found.") % image_id + LOG.warning(msg) + raise webob.exc.HTTPNotFound(explanation=msg) + except exception.Forbidden: + msg = _("Not allowed to create members for image %s.") % image_id + LOG.warning(msg) + raise webob.exc.HTTPForbidden(explanation=msg) + except exception.Duplicate: + msg = _("Member %(member_id)s is duplicated for image " + "%(image_id)s") % {"member_id": member_id, + "image_id": image_id} + LOG.warning(msg) + raise webob.exc.HTTPConflict(explanation=msg) + except exception.ImageMemberLimitExceeded as e: + msg = (_("Image member limit exceeded for image %(id)s: %(e)s:") + % {"id": image_id, "e": utils.exception_to_str(e)}) + LOG.warning(msg) + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) + + @utils.mutating + def update(self, req, image_id, member_id, status): + """ + Adds a membership to the image. + :param req: the Request object coming from the wsgi layer + :param image_id: the image identifier + :param member_id: the member identifier + :retval The response body is a mapping of the following form:: + + {'member_id': , + 'image_id': , + 'status': + 'created_at': .., + 'updated_at': ..} + + """ + image_repo = self.gateway.get_repo(req.context) + try: + image = image_repo.get(image_id) + member_repo = image.get_member_repo() + member = member_repo.get(member_id) + member.status = status + member_repo.save(member) + return member + except exception.NotFound: + msg = _("Image %s not found.") % image_id + LOG.warning(msg) + raise webob.exc.HTTPNotFound(explanation=msg) + except exception.Forbidden: + msg = _("Not allowed to update members for image %s.") % image_id + LOG.warning(msg) + raise webob.exc.HTTPForbidden(explanation=msg) + except ValueError as e: + msg = _("Incorrect request: %s") % utils.exception_to_str(e) + LOG.warning(msg) + raise webob.exc.HTTPBadRequest(explanation=msg) + + def index(self, req, image_id): + """ + Return a list of dictionaries indicating the members of the + image, i.e., those tenants the image is shared with. + + :param req: the Request object coming from the wsgi layer + :param image_id: The image identifier + :retval The response body is a mapping of the following form:: + + {'members': [ + {'member_id': , + 'image_id': , + 'status': + 'created_at': .., + 'updated_at': ..}, .. + ]} + """ + image_repo = self.gateway.get_repo(req.context) + try: + image = image_repo.get(image_id) + member_repo = image.get_member_repo() + members = [] + for member in member_repo.list(): + members.append(member) + return dict(members=members) + except exception.NotFound: + msg = _("Image %s not found.") % image_id + LOG.warning(msg) + raise webob.exc.HTTPNotFound(explanation=msg) + except exception.Forbidden: + msg = _("Not allowed to list members for image %s.") % image_id + LOG.warning(msg) + raise webob.exc.HTTPForbidden(explanation=msg) + + def show(self, req, image_id, member_id): + """ + Returns the membership of the tenant wrt to the image_id specified. + + :param req: the Request object coming from the wsgi layer + :param image_id: The image identifier + :retval The response body is a mapping of the following form:: + + {'member_id': , + 'image_id': , + 'status': + 'created_at': .., + 'updated_at': ..} + """ + image_repo = self.gateway.get_repo(req.context) + try: + image = image_repo.get(image_id) + member_repo = image.get_member_repo() + member = member_repo.get(member_id) + return member + except (exception.NotFound, exception.Forbidden): + msg = _("Image %s not found.") % image_id + LOG.warning(msg) + raise webob.exc.HTTPNotFound(explanation=msg) + + @utils.mutating + def delete(self, req, image_id, member_id): + """ + Removes a membership from the image. + """ + + image_repo = self.gateway.get_repo(req.context) + try: + image = image_repo.get(image_id) + member_repo = image.get_member_repo() + member = member_repo.get(member_id) + member_repo.remove(member) + return webob.Response(body='', status=204) + except exception.NotFound: + msg = _("Image %s not found.") % image_id + LOG.warning(msg) + raise webob.exc.HTTPNotFound(explanation=msg) + except exception.Forbidden: + msg = _("Not allowed to delete members for image %s.") % image_id + LOG.warning(msg) + raise webob.exc.HTTPForbidden(explanation=msg) + + +class RequestDeserializer(wsgi.JSONRequestDeserializer): + + def __init__(self): + super(RequestDeserializer, self).__init__() + + def _get_request_body(self, request): + output = super(RequestDeserializer, self).default(request) + if 'body' not in output: + msg = _('Body expected in request.') + raise webob.exc.HTTPBadRequest(explanation=msg) + return output['body'] + + def create(self, request): + body = self._get_request_body(request) + try: + member_id = body['member'] + if not member_id: + raise ValueError() + except KeyError: + msg = _("Member to be added not specified") + raise webob.exc.HTTPBadRequest(explanation=msg) + except ValueError: + msg = _("Member can't be empty") + raise webob.exc.HTTPBadRequest(explanation=msg) + return dict(member_id=member_id) + + def update(self, request): + body = self._get_request_body(request) + try: + status = body['status'] + except KeyError: + msg = _("Status not specified") + raise webob.exc.HTTPBadRequest(explanation=msg) + return dict(status=status) + + +class ResponseSerializer(wsgi.JSONResponseSerializer): + def __init__(self, schema=None): + super(ResponseSerializer, self).__init__() + self.schema = schema or get_schema() + + def _format_image_member(self, member): + member_view = {} + attributes = ['member_id', 'image_id', 'status'] + for key in attributes: + member_view[key] = getattr(member, key) + member_view['created_at'] = timeutils.isotime(member.created_at) + member_view['updated_at'] = timeutils.isotime(member.updated_at) + member_view['schema'] = '/v2/schemas/member' + member_view = self.schema.filter(member_view) + return member_view + + def create(self, response, image_member): + image_member_view = self._format_image_member(image_member) + body = jsonutils.dumps(image_member_view, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + def update(self, response, image_member): + image_member_view = self._format_image_member(image_member) + body = jsonutils.dumps(image_member_view, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + def index(self, response, image_members): + image_members = image_members['members'] + image_members_view = [] + for image_member in image_members: + image_member_view = self._format_image_member(image_member) + image_members_view.append(image_member_view) + totalview = dict(members=image_members_view) + totalview['schema'] = '/v2/schemas/members' + body = jsonutils.dumps(totalview, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + def show(self, response, image_member): + image_member_view = self._format_image_member(image_member) + body = jsonutils.dumps(image_member_view, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + +_MEMBER_SCHEMA = { + 'member_id': { + 'type': 'string', + 'description': _('An identifier for the image member (tenantId)') + }, + 'image_id': { + 'type': 'string', + 'description': _('An identifier for the image'), + 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' + '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), + }, + 'created_at': { + 'type': 'string', + 'description': _('Date and time of image member creation'), + # TODO(brian-rosmaita): our jsonschema library doesn't seem to like the + # format attribute, figure out why (and also fix in images.py) + # 'format': 'date-time', + }, + 'updated_at': { + 'type': 'string', + 'description': _('Date and time of last modification of image member'), + # 'format': 'date-time', + }, + 'status': { + 'type': 'string', + 'description': _('The status of this image member'), + 'enum': [ + 'pending', + 'accepted', + 'rejected' + ] + }, + 'schema': {'type': 'string'} +} + + +def get_schema(): + properties = copy.deepcopy(_MEMBER_SCHEMA) + schema = daisy.schema.Schema('member', properties) + return schema + + +def get_collection_schema(): + member_schema = get_schema() + return daisy.schema.CollectionSchema('members', member_schema) + + +def create_resource(): + """Image Members resource factory method""" + deserializer = RequestDeserializer() + serializer = ResponseSerializer() + controller = ImageMembersController() + return wsgi.Resource(controller, deserializer, serializer) diff --git a/code/daisy/daisy/api/v2/image_tags.py b/code/daisy/daisy/api/v2/image_tags.py new file mode 100755 index 00000000..745884ef --- /dev/null +++ b/code/daisy/daisy/api/v2/image_tags.py @@ -0,0 +1,99 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import glance_store +from oslo_log import log as logging +import webob.exc + +from daisy.api import policy +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +import daisy.gateway +from daisy import i18n +import daisy.notifier + + +LOG = logging.getLogger(__name__) +_ = i18n._ + + +class Controller(object): + def __init__(self, db_api=None, policy_enforcer=None, notifier=None, + store_api=None): + self.db_api = db_api or daisy.db.get_api() + self.policy = policy_enforcer or policy.Enforcer() + self.notifier = notifier or daisy.notifier.Notifier() + self.store_api = store_api or glance_store + self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api, + self.notifier, self.policy) + + @utils.mutating + def update(self, req, image_id, tag_value): + image_repo = self.gateway.get_repo(req.context) + try: + image = image_repo.get(image_id) + image.tags.add(tag_value) + image_repo.save(image) + except exception.NotFound: + msg = _("Image %s not found.") % image_id + LOG.warning(msg) + raise webob.exc.HTTPNotFound(explanation=msg) + except exception.Forbidden: + msg = _("Not allowed to update tags for image %s.") % image_id + LOG.warning(msg) + raise webob.exc.HTTPForbidden(explanation=msg) + except exception.Invalid as e: + msg = _("Could not update image: %s") % utils.exception_to_str(e) + LOG.warning(msg) + raise webob.exc.HTTPBadRequest(explanation=msg) + except exception.ImageTagLimitExceeded as e: + msg = (_("Image tag limit exceeded for image %(id)s: %(e)s:") + % {"id": image_id, "e": utils.exception_to_str(e)}) + LOG.warning(msg) + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) + + @utils.mutating + def delete(self, req, image_id, tag_value): + image_repo = self.gateway.get_repo(req.context) + try: + image = image_repo.get(image_id) + if tag_value not in image.tags: + raise webob.exc.HTTPNotFound() + image.tags.remove(tag_value) + image_repo.save(image) + except exception.NotFound: + msg = _("Image %s not found.") % image_id + LOG.warning(msg) + raise webob.exc.HTTPNotFound(explanation=msg) + except exception.Forbidden: + msg = _("Not allowed to delete tags for image %s.") % image_id + LOG.warning(msg) + raise webob.exc.HTTPForbidden(explanation=msg) + + +class ResponseSerializer(wsgi.JSONResponseSerializer): + def update(self, response, result): + response.status_int = 204 + + def delete(self, response, result): + response.status_int = 204 + + +def create_resource(): + """Images resource factory method""" + serializer = ResponseSerializer() + controller = Controller() + return wsgi.Resource(controller, serializer=serializer) diff --git a/code/daisy/daisy/api/v2/images.py b/code/daisy/daisy/api/v2/images.py new file mode 100755 index 00000000..6151ec03 --- /dev/null +++ b/code/daisy/daisy/api/v2/images.py @@ -0,0 +1,957 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re + +import glance_store +from oslo.serialization import jsonutils as json +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import timeutils +import six +import six.moves.urllib.parse as urlparse +import webob.exc + +from daisy.api import policy +from daisy.common import exception +from daisy.common import location_strategy +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +import daisy.gateway +from daisy import i18n +import daisy.notifier +import daisy.schema + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LW = i18n._LW + +CONF = cfg.CONF +CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format') +CONF.import_opt('container_formats', 'daisy.common.config', + group='image_format') + + +class ImagesController(object): + def __init__(self, db_api=None, policy_enforcer=None, notifier=None, + store_api=None): + self.db_api = db_api or daisy.db.get_api() + self.policy = policy_enforcer or policy.Enforcer() + self.notifier = notifier or daisy.notifier.Notifier() + self.store_api = store_api or glance_store + self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api, + self.notifier, self.policy) + + @utils.mutating + def create(self, req, image, extra_properties, tags): + image_factory = self.gateway.get_image_factory(req.context) + image_repo = self.gateway.get_repo(req.context) + try: + image = image_factory.new_image(extra_properties=extra_properties, + tags=tags, **image) + image_repo.add(image) + except exception.DuplicateLocation as dup: + raise webob.exc.HTTPBadRequest(explanation=dup.msg) + except exception.Invalid as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.InvalidParameterValue as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + except exception.LimitExceeded as e: + LOG.warn(utils.exception_to_str(e)) + raise webob.exc.HTTPRequestEntityTooLarge( + explanation=e.msg, request=req, content_type='text/plain') + except exception.Duplicate as dupex: + raise webob.exc.HTTPConflict(explanation=dupex.msg) + except exception.ReservedProperty as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.ReadonlyProperty as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except TypeError as e: + LOG.debug(utils.exception_to_str(e)) + raise webob.exc.HTTPBadRequest( + explanation=utils.exception_to_str(e)) + + return image + + def index(self, req, marker=None, limit=None, sort_key=None, + sort_dir=None, filters=None, member_status='accepted'): + sort_key = ['created_at'] if not sort_key else sort_key + + sort_dir = ['desc'] if not sort_dir else sort_dir + + result = {} + if filters is None: + filters = {} + filters['deleted'] = False + + if limit is None: + limit = CONF.limit_param_default + limit = min(CONF.api_limit_max, limit) + + image_repo = self.gateway.get_repo(req.context) + try: + images = image_repo.list(marker=marker, limit=limit, + sort_key=sort_key, + sort_dir=sort_dir, + filters=filters, + member_status=member_status) + if len(images) != 0 and len(images) == limit: + result['next_marker'] = images[-1].image_id + except (exception.NotFound, exception.InvalidSortKey, + exception.InvalidFilterRangeValue) as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + result['images'] = images + return result + + def show(self, req, image_id): + image_repo = self.gateway.get_repo(req.context) + try: + return image_repo.get(image_id) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + + @utils.mutating + def update(self, req, image_id, changes): + image_repo = self.gateway.get_repo(req.context) + try: + image = image_repo.get(image_id) + + for change in changes: + change_method_name = '_do_%s' % change['op'] + assert hasattr(self, change_method_name) + change_method = getattr(self, change_method_name) + change_method(req, image, change) + + if changes: + image_repo.save(image) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Invalid as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.InvalidParameterValue as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + except exception.StorageQuotaFull as e: + msg = (_("Denying attempt to upload image because it exceeds the" + " quota: %s") % utils.exception_to_str(e)) + LOG.warn(msg) + raise webob.exc.HTTPRequestEntityTooLarge( + explanation=msg, request=req, content_type='text/plain') + except exception.LimitExceeded as e: + LOG.exception(utils.exception_to_str(e)) + raise webob.exc.HTTPRequestEntityTooLarge( + explanation=e.msg, request=req, content_type='text/plain') + + return image + + def _do_replace(self, req, image, change): + path = change['path'] + path_root = path[0] + value = change['value'] + if path_root == 'locations': + self._do_replace_locations(image, value) + else: + if hasattr(image, path_root): + setattr(image, path_root, value) + elif path_root in image.extra_properties: + image.extra_properties[path_root] = value + else: + msg = _("Property %s does not exist.") + raise webob.exc.HTTPConflict(msg % path_root) + + def _do_add(self, req, image, change): + path = change['path'] + path_root = path[0] + value = change['value'] + json_schema_version = change.get('json_schema_version', 10) + if path_root == 'locations': + self._do_add_locations(image, path[1], value) + else: + if ((hasattr(image, path_root) or + path_root in image.extra_properties) + and json_schema_version == 4): + msg = _("Property %s already present.") + raise webob.exc.HTTPConflict(msg % path_root) + if hasattr(image, path_root): + setattr(image, path_root, value) + else: + image.extra_properties[path_root] = value + + def _do_remove(self, req, image, change): + path = change['path'] + path_root = path[0] + if path_root == 'locations': + self._do_remove_locations(image, path[1]) + else: + if hasattr(image, path_root): + msg = _("Property %s may not be removed.") + raise webob.exc.HTTPForbidden(msg % path_root) + elif path_root in image.extra_properties: + del image.extra_properties[path_root] + else: + msg = _("Property %s does not exist.") + raise webob.exc.HTTPConflict(msg % path_root) + + @utils.mutating + def delete(self, req, image_id): + image_repo = self.gateway.get_repo(req.context) + try: + image = image_repo.get(image_id) + image.delete() + image_repo.remove(image) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + msg = (_("Failed to find image %(image_id)s to delete") % + {'image_id': image_id}) + LOG.warn(msg) + raise webob.exc.HTTPNotFound(explanation=msg) + except exception.InUseByStore as e: + msg = (_("Image %(id)s could not be deleted " + "because it is in use: %(exc)s") % + {"id": image_id, + "exc": e.msg}) + LOG.warn(msg) + raise webob.exc.HTTPConflict(explanation=msg) + + def _get_locations_op_pos(self, path_pos, max_pos, allow_max): + if path_pos is None or max_pos is None: + return None + pos = max_pos if allow_max else max_pos - 1 + if path_pos.isdigit(): + pos = int(path_pos) + elif path_pos != '-': + return None + if not (allow_max or 0 <= pos < max_pos): + return None + return pos + + def _do_replace_locations(self, image, value): + if len(image.locations) > 0 and len(value) > 0: + msg = _("Cannot replace locations from a non-empty " + "list to a non-empty list.") + raise webob.exc.HTTPBadRequest(explanation=msg) + if len(value) == 0: + # NOTE(zhiyan): this actually deletes the location + # from the backend store. + del image.locations[:] + if image.status == 'active': + image.status = 'queued' + else: # NOTE(zhiyan): len(image.locations) == 0 + try: + image.locations = value + if image.status == 'queued': + image.status = 'active' + except (exception.BadStoreUri, exception.DuplicateLocation) as bse: + raise webob.exc.HTTPBadRequest(explanation=bse.msg) + except ValueError as ve: # update image status failed. + raise webob.exc.HTTPBadRequest( + explanation=utils.exception_to_str(ve)) + + def _do_add_locations(self, image, path_pos, value): + pos = self._get_locations_op_pos(path_pos, + len(image.locations), True) + if pos is None: + msg = _("Invalid position for adding a location.") + raise webob.exc.HTTPBadRequest(explanation=msg) + try: + image.locations.insert(pos, value) + if image.status == 'queued': + image.status = 'active' + except (exception.BadStoreUri, exception.DuplicateLocation) as bse: + raise webob.exc.HTTPBadRequest(explanation=bse.msg) + except ValueError as ve: # update image status failed. + raise webob.exc.HTTPBadRequest( + explanation=utils.exception_to_str(ve)) + + def _do_remove_locations(self, image, path_pos): + pos = self._get_locations_op_pos(path_pos, + len(image.locations), False) + if pos is None: + msg = _("Invalid position for removing a location.") + raise webob.exc.HTTPBadRequest(explanation=msg) + try: + # NOTE(zhiyan): this actually deletes the location + # from the backend store. + image.locations.pop(pos) + except Exception as e: + raise webob.exc.HTTPInternalServerError( + explanation=utils.exception_to_str(e)) + if len(image.locations) == 0 and image.status == 'active': + image.status = 'queued' + + +class RequestDeserializer(wsgi.JSONRequestDeserializer): + + _disallowed_properties = ('direct_url', 'self', 'file', 'schema') + _readonly_properties = ('created_at', 'updated_at', 'status', 'checksum', + 'size', 'virtual_size', 'direct_url', 'self', + 'file', 'schema') + _reserved_properties = ('owner', 'location', 'deleted', 'deleted_at') + _base_properties = ('checksum', 'created_at', 'container_format', + 'disk_format', 'id', 'min_disk', 'min_ram', 'name', + 'size', 'virtual_size', 'status', 'tags', + 'updated_at', 'visibility', 'protected') + _available_sort_keys = ('name', 'status', 'container_format', + 'disk_format', 'size', 'id', 'created_at', + 'updated_at') + + _default_sort_key = 'created_at' + + _default_sort_dir = 'desc' + + _path_depth_limits = {'locations': {'add': 2, 'remove': 2, 'replace': 1}} + + _default_sort_dir = 'desc' + + def __init__(self, schema=None): + super(RequestDeserializer, self).__init__() + self.schema = schema or get_schema() + + def _get_request_body(self, request): + output = super(RequestDeserializer, self).default(request) + if 'body' not in output: + msg = _('Body expected in request.') + raise webob.exc.HTTPBadRequest(explanation=msg) + return output['body'] + + @classmethod + def _check_allowed(cls, image): + for key in cls._disallowed_properties: + if key in image: + msg = _("Attribute '%s' is read-only.") % key + raise webob.exc.HTTPForbidden( + explanation=utils.exception_to_str(msg)) + + def create(self, request): + body = self._get_request_body(request) + self._check_allowed(body) + try: + self.schema.validate(body) + except exception.InvalidObject as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + image = {} + properties = body + tags = properties.pop('tags', []) + for key in self._base_properties: + try: + # NOTE(flwang): Instead of changing the _check_unexpected + # of ImageFactory. It would be better to do the mapping + # at here. + if key == 'id': + image['image_id'] = properties.pop(key) + else: + image[key] = properties.pop(key) + except KeyError: + pass + return dict(image=image, extra_properties=properties, tags=tags) + + def _get_change_operation_d10(self, raw_change): + try: + return raw_change['op'] + except KeyError: + msg = _("Unable to find '%s' in JSON Schema change") % 'op' + raise webob.exc.HTTPBadRequest(explanation=msg) + + def _get_change_operation_d4(self, raw_change): + op = None + for key in ['replace', 'add', 'remove']: + if key in raw_change: + if op is not None: + msg = _('Operation objects must contain only one member' + ' named "add", "remove", or "replace".') + raise webob.exc.HTTPBadRequest(explanation=msg) + op = key + if op is None: + msg = _('Operation objects must contain exactly one member' + ' named "add", "remove", or "replace".') + raise webob.exc.HTTPBadRequest(explanation=msg) + return op + + def _get_change_path_d10(self, raw_change): + try: + return raw_change['path'] + except KeyError: + msg = _("Unable to find '%s' in JSON Schema change") % 'path' + raise webob.exc.HTTPBadRequest(explanation=msg) + + def _get_change_path_d4(self, raw_change, op): + return raw_change[op] + + def _decode_json_pointer(self, pointer): + """Parse a json pointer. + + Json Pointers are defined in + http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer . + The pointers use '/' for separation between object attributes, such + that '/A/B' would evaluate to C in {"A": {"B": "C"}}. A '/' character + in an attribute name is encoded as "~1" and a '~' character is encoded + as "~0". + """ + self._validate_json_pointer(pointer) + ret = [] + for part in pointer.lstrip('/').split('/'): + ret.append(part.replace('~1', '/').replace('~0', '~').strip()) + return ret + + def _validate_json_pointer(self, pointer): + """Validate a json pointer. + + We only accept a limited form of json pointers. + """ + if not pointer.startswith('/'): + msg = _('Pointer `%s` does not start with "/".') % pointer + raise webob.exc.HTTPBadRequest(explanation=msg) + if re.search('/\s*?/', pointer[1:]): + msg = _('Pointer `%s` contains adjacent "/".') % pointer + raise webob.exc.HTTPBadRequest(explanation=msg) + if len(pointer) > 1 and pointer.endswith('/'): + msg = _('Pointer `%s` end with "/".') % pointer + raise webob.exc.HTTPBadRequest(explanation=msg) + if pointer[1:].strip() == '/': + msg = _('Pointer `%s` does not contains valid token.') % pointer + raise webob.exc.HTTPBadRequest(explanation=msg) + if re.search('~[^01]', pointer) or pointer.endswith('~'): + msg = _('Pointer `%s` contains "~" not part of' + ' a recognized escape sequence.') % pointer + raise webob.exc.HTTPBadRequest(explanation=msg) + + def _get_change_value(self, raw_change, op): + if 'value' not in raw_change: + msg = _('Operation "%s" requires a member named "value".') + raise webob.exc.HTTPBadRequest(explanation=msg % op) + return raw_change['value'] + + def _validate_change(self, change): + path_root = change['path'][0] + if path_root in self._readonly_properties: + msg = _("Attribute '%s' is read-only.") % path_root + raise webob.exc.HTTPForbidden(explanation=six.text_type(msg)) + if path_root in self._reserved_properties: + msg = _("Attribute '%s' is reserved.") % path_root + raise webob.exc.HTTPForbidden(explanation=six.text_type(msg)) + + if change['op'] == 'delete': + return + + partial_image = None + if len(change['path']) == 1: + partial_image = {path_root: change['value']} + elif ((path_root in get_base_properties().keys()) and + (get_base_properties()[path_root].get('type', '') == 'array')): + # NOTE(zhiyan): cient can use PATCH API to adding element to + # the image's existing set property directly. + # Such as: 1. using '/locations/N' path to adding a location + # to the image's 'locations' list at N position. + # (implemented) + # 2. using '/tags/-' path to appending a tag to the + # image's 'tags' list at last. (Not implemented) + partial_image = {path_root: [change['value']]} + + if partial_image: + try: + self.schema.validate(partial_image) + except exception.InvalidObject as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + + def _validate_path(self, op, path): + path_root = path[0] + limits = self._path_depth_limits.get(path_root, {}) + if len(path) != limits.get(op, 1): + msg = _("Invalid JSON pointer for this resource: " + "'/%s'") % '/'.join(path) + raise webob.exc.HTTPBadRequest(explanation=six.text_type(msg)) + + def _parse_json_schema_change(self, raw_change, draft_version): + if draft_version == 10: + op = self._get_change_operation_d10(raw_change) + path = self._get_change_path_d10(raw_change) + elif draft_version == 4: + op = self._get_change_operation_d4(raw_change) + path = self._get_change_path_d4(raw_change, op) + else: + msg = _('Unrecognized JSON Schema draft version') + raise webob.exc.HTTPBadRequest(explanation=msg) + + path_list = self._decode_json_pointer(path) + return op, path_list + + def update(self, request): + changes = [] + content_types = { + 'application/openstack-images-v2.0-json-patch': 4, + 'application/openstack-images-v2.1-json-patch': 10, + } + if request.content_type not in content_types: + headers = {'Accept-Patch': + ', '.join(sorted(content_types.keys()))} + raise webob.exc.HTTPUnsupportedMediaType(headers=headers) + + json_schema_version = content_types[request.content_type] + + body = self._get_request_body(request) + + if not isinstance(body, list): + msg = _('Request body must be a JSON array of operation objects.') + raise webob.exc.HTTPBadRequest(explanation=msg) + + for raw_change in body: + if not isinstance(raw_change, dict): + msg = _('Operations must be JSON objects.') + raise webob.exc.HTTPBadRequest(explanation=msg) + + (op, path) = self._parse_json_schema_change(raw_change, + json_schema_version) + + # NOTE(zhiyan): the 'path' is a list. + self._validate_path(op, path) + change = {'op': op, 'path': path, + 'json_schema_version': json_schema_version} + + if not op == 'remove': + change['value'] = self._get_change_value(raw_change, op) + self._validate_change(change) + + changes.append(change) + + return {'changes': changes} + + def _validate_limit(self, limit): + try: + limit = int(limit) + except ValueError: + msg = _("limit param must be an integer") + raise webob.exc.HTTPBadRequest(explanation=msg) + + if limit < 0: + msg = _("limit param must be positive") + raise webob.exc.HTTPBadRequest(explanation=msg) + + return limit + + def _validate_sort_key(self, sort_key): + if sort_key not in self._available_sort_keys: + msg = _('Invalid sort key: %(sort_key)s. ' + 'It must be one of the following: %(available)s.') % \ + {'sort_key': sort_key, + 'available': ', '.join(self._available_sort_keys)} + raise webob.exc.HTTPBadRequest(explanation=msg) + + return sort_key + + def _validate_sort_dir(self, sort_dir): + if sort_dir not in ['asc', 'desc']: + msg = _('Invalid sort direction: %s') % sort_dir + raise webob.exc.HTTPBadRequest(explanation=msg) + + return sort_dir + + def _validate_member_status(self, member_status): + if member_status not in ['pending', 'accepted', 'rejected', 'all']: + msg = _('Invalid status: %s') % member_status + raise webob.exc.HTTPBadRequest(explanation=msg) + + return member_status + + def _get_filters(self, filters): + visibility = filters.get('visibility') + if visibility: + if visibility not in ['public', 'private', 'shared']: + msg = _('Invalid visibility value: %s') % visibility + raise webob.exc.HTTPBadRequest(explanation=msg) + changes_since = filters.get('changes-since', None) + if changes_since: + msg = _('The "changes-since" filter is no longer available on v2.') + raise webob.exc.HTTPBadRequest(explanation=msg) + + return filters + + def _get_sorting_params(self, params): + """ + Process sorting params. + Currently glance supports two sorting syntax: classic and new one, + that is uniform for all Openstack projects. + Classic syntax: sort_key=name&sort_dir=asc&sort_key=size&sort_dir=desc + New syntax: sort=name:asc,size:desc + """ + sort_keys = [] + sort_dirs = [] + + if 'sort' in params: + # use new sorting syntax here + if 'sort_key' in params or 'sort_dir' in params: + msg = _('Old and new sorting syntax cannot be combined') + raise webob.exc.HTTPBadRequest(explanation=msg) + for sort_param in params.pop('sort').strip().split(','): + key, _sep, dir = sort_param.partition(':') + if not dir: + dir = self._default_sort_dir + sort_keys.append(self._validate_sort_key(key.strip())) + sort_dirs.append(self._validate_sort_dir(dir.strip())) + else: + # continue with classic syntax + # NOTE(mfedosin): we have 3 options here: + # 1. sort_dir wasn't passed: we use default one - 'desc'. + # 2. Only one sort_dir was passed: use it for every sort_key + # in the list. + # 3. Multiple sort_dirs were passed: consistently apply each one to + # the corresponding sort_key. + # If number of sort_dirs and sort_keys doesn't match then raise an + # exception. + while 'sort_key' in params: + sort_keys.append(self._validate_sort_key( + params.pop('sort_key').strip())) + + while 'sort_dir' in params: + sort_dirs.append(self._validate_sort_dir( + params.pop('sort_dir').strip())) + + if sort_dirs: + dir_len = len(sort_dirs) + key_len = len(sort_keys) + + if dir_len > 1 and dir_len != key_len: + msg = _('Number of sort dirs does not match the number ' + 'of sort keys') + raise webob.exc.HTTPBadRequest(explanation=msg) + + if not sort_keys: + sort_keys = [self._default_sort_key] + + if not sort_dirs: + sort_dirs = [self._default_sort_dir] + + return sort_keys, sort_dirs + + def index(self, request): + params = request.params.copy() + limit = params.pop('limit', None) + marker = params.pop('marker', None) + member_status = params.pop('member_status', 'accepted') + + # NOTE (flwang) To avoid using comma or any predefined chars to split + # multiple tags, now we allow user specify multiple 'tag' parameters + # in URL, such as v2/images?tag=x86&tag=64bit. + tags = [] + while 'tag' in params: + tags.append(params.pop('tag').strip()) + + query_params = { + 'filters': self._get_filters(params), + 'member_status': self._validate_member_status(member_status), + } + + if marker is not None: + query_params['marker'] = marker + + if limit is not None: + query_params['limit'] = self._validate_limit(limit) + + if tags: + query_params['filters']['tags'] = tags + + # NOTE(mfedosin): param is still called sort_key and sort_dir, + # instead of sort_keys and sort_dirs respectively. + # It's done because in v1 it's still a single value. + + query_params['sort_key'], query_params['sort_dir'] = \ + self._get_sorting_params(params) + + return query_params + + +class ResponseSerializer(wsgi.JSONResponseSerializer): + def __init__(self, schema=None): + super(ResponseSerializer, self).__init__() + self.schema = schema or get_schema() + + def _get_image_href(self, image, subcollection=''): + base_href = '/v2/images/%s' % image.image_id + if subcollection: + base_href = '%s/%s' % (base_href, subcollection) + return base_href + + def _format_image(self, image): + image_view = dict() + try: + image_view = dict(image.extra_properties) + attributes = ['name', 'disk_format', 'container_format', + 'visibility', 'size', 'virtual_size', 'status', + 'checksum', 'protected', 'min_ram', 'min_disk', + 'owner'] + for key in attributes: + image_view[key] = getattr(image, key) + image_view['id'] = image.image_id + image_view['created_at'] = timeutils.isotime(image.created_at) + image_view['updated_at'] = timeutils.isotime(image.updated_at) + + if CONF.show_multiple_locations: + locations = list(image.locations) + if locations: + image_view['locations'] = [] + for loc in locations: + tmp = dict(loc) + tmp.pop('id', None) + tmp.pop('status', None) + image_view['locations'].append(tmp) + else: + # NOTE (flwang): We will still show "locations": [] if + # image.locations is None to indicate it's allowed to show + # locations but it's just non-existent. + image_view['locations'] = [] + LOG.debug("There is not available location " + "for image %s" % image.image_id) + + if CONF.show_image_direct_url: + if image.locations: + # Choose best location configured strategy + l = location_strategy.choose_best_location(image.locations) + image_view['direct_url'] = l['url'] + else: + LOG.debug("There is not available location " + "for image %s" % image.image_id) + + image_view['tags'] = list(image.tags) + image_view['self'] = self._get_image_href(image) + image_view['file'] = self._get_image_href(image, 'file') + image_view['schema'] = '/v2/schemas/image' + image_view = self.schema.filter(image_view) # domain + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + return image_view + + def create(self, response, image): + response.status_int = 201 + self.show(response, image) + response.location = self._get_image_href(image) + + def show(self, response, image): + image_view = self._format_image(image) + body = json.dumps(image_view, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + def update(self, response, image): + image_view = self._format_image(image) + body = json.dumps(image_view, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + def index(self, response, result): + params = dict(response.request.params) + params.pop('marker', None) + query = urlparse.urlencode(params) + body = { + 'images': [self._format_image(i) for i in result['images']], + 'first': '/v2/images', + 'schema': '/v2/schemas/images', + } + if query: + body['first'] = '%s?%s' % (body['first'], query) + if 'next_marker' in result: + params['marker'] = result['next_marker'] + next_query = urlparse.urlencode(params) + body['next'] = '/v2/images?%s' % next_query + response.unicode_body = six.text_type(json.dumps(body, + ensure_ascii=False)) + response.content_type = 'application/json' + + def delete(self, response, result): + response.status_int = 204 + + +def get_base_properties(): + return { + 'id': { + 'type': 'string', + 'description': _('An identifier for the image'), + 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' + '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), + }, + 'name': { + 'type': ['null', 'string'], + 'description': _('Descriptive name for the image'), + 'maxLength': 255, + }, + 'status': { + 'type': 'string', + 'description': _('Status of the image (READ-ONLY)'), + 'enum': ['queued', 'saving', 'active', 'killed', + 'deleted', 'pending_delete'], + }, + 'visibility': { + 'type': 'string', + 'description': _('Scope of image accessibility'), + 'enum': ['public', 'private'], + }, + 'protected': { + 'type': 'boolean', + 'description': _('If true, image will not be deletable.'), + }, + 'checksum': { + 'type': ['null', 'string'], + 'description': _('md5 hash of image contents. (READ-ONLY)'), + 'maxLength': 32, + }, + 'owner': { + 'type': ['null', 'string'], + 'description': _('Owner of the image'), + 'maxLength': 255, + }, + 'size': { + 'type': ['null', 'integer'], + 'description': _('Size of image file in bytes (READ-ONLY)'), + }, + 'virtual_size': { + 'type': ['null', 'integer'], + 'description': _('Virtual size of image in bytes (READ-ONLY)'), + }, + 'container_format': { + 'type': ['null', 'string'], + 'description': _('Format of the container'), + 'enum': [None] + CONF.image_format.container_formats, + }, + 'disk_format': { + 'type': ['null', 'string'], + 'description': _('Format of the disk'), + 'enum': [None] + CONF.image_format.disk_formats, + }, + 'created_at': { + 'type': 'string', + 'description': _('Date and time of image registration' + ' (READ-ONLY)'), + # TODO(bcwaldon): our jsonschema library doesn't seem to like the + # format attribute, figure out why! + # 'format': 'date-time', + }, + 'updated_at': { + 'type': 'string', + 'description': _('Date and time of the last image modification' + ' (READ-ONLY)'), + # 'format': 'date-time', + }, + 'tags': { + 'type': 'array', + 'description': _('List of strings related to the image'), + 'items': { + 'type': 'string', + 'maxLength': 255, + }, + }, + 'direct_url': { + 'type': 'string', + 'description': _('URL to access the image file kept in external ' + 'store (READ-ONLY)'), + }, + 'min_ram': { + 'type': 'integer', + 'description': _('Amount of ram (in MB) required to boot image.'), + }, + 'min_disk': { + 'type': 'integer', + 'description': _('Amount of disk space (in GB) required to boot ' + 'image.'), + }, + 'self': { + 'type': 'string', + 'description': '(READ-ONLY)' + }, + 'file': { + 'type': 'string', + 'description': '(READ-ONLY)' + }, + 'schema': { + 'type': 'string', + 'description': '(READ-ONLY)' + }, + 'locations': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'url': { + 'type': 'string', + 'maxLength': 255, + }, + 'metadata': { + 'type': 'object', + }, + }, + 'required': ['url', 'metadata'], + }, + 'description': _('A set of URLs to access the image file kept in ' + 'external store'), + }, + } + + +def _get_base_links(): + return [ + {'rel': 'self', 'href': '{self}'}, + {'rel': 'enclosure', 'href': '{file}'}, + {'rel': 'describedby', 'href': '{schema}'}, + ] + + +def get_schema(custom_properties=None): + properties = get_base_properties() + links = _get_base_links() + if CONF.allow_additional_image_properties: + schema = daisy.schema.PermissiveSchema('image', properties, links) + else: + schema = daisy.schema.Schema('image', properties) + + if custom_properties: + for property_value in custom_properties.values(): + property_value['is_base'] = False + schema.merge_properties(custom_properties) + return schema + + +def get_collection_schema(custom_properties=None): + image_schema = get_schema(custom_properties) + return daisy.schema.CollectionSchema('images', image_schema) + + +def load_custom_properties(): + """Find the schema properties files and load them into a dict.""" + filename = 'schema-image.json' + match = CONF.find_file(filename) + if match: + with open(match, 'r') as schema_file: + schema_data = schema_file.read() + return json.loads(schema_data) + else: + msg = (_LW('Could not find schema properties file %s. Continuing ' + 'without custom properties') % filename) + LOG.warn(msg) + return {} + + +def create_resource(custom_properties=None): + """Images resource factory method""" + schema = get_schema(custom_properties) + deserializer = RequestDeserializer(schema) + serializer = ResponseSerializer(schema) + controller = ImagesController() + return wsgi.Resource(controller, deserializer, serializer) diff --git a/code/daisy/daisy/api/v2/metadef_namespaces.py b/code/daisy/daisy/api/v2/metadef_namespaces.py new file mode 100755 index 00000000..b95980e5 --- /dev/null +++ b/code/daisy/daisy/api/v2/metadef_namespaces.py @@ -0,0 +1,806 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.serialization import jsonutils +from oslo_config import cfg +from oslo_log import log as logging +import six +import six.moves.urllib.parse as urlparse +import webob.exc +from wsme.rest import json + +from daisy.api import policy +from daisy.api.v2.model.metadef_namespace import Namespace +from daisy.api.v2.model.metadef_namespace import Namespaces +from daisy.api.v2.model.metadef_object import MetadefObject +from daisy.api.v2.model.metadef_property_type import PropertyType +from daisy.api.v2.model.metadef_resource_type import ResourceTypeAssociation +from daisy.api.v2.model.metadef_tag import MetadefTag +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +from daisy.common import wsme_utils +import daisy.db +import daisy.gateway +from daisy import i18n +import daisy.notifier +import daisy.schema + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LW = i18n._LW +_LI = i18n._LI + +CONF = cfg.CONF + + +class NamespaceController(object): + def __init__(self, db_api=None, policy_enforcer=None, notifier=None): + self.db_api = db_api or daisy.db.get_api() + self.policy = policy_enforcer or policy.Enforcer() + self.notifier = notifier or daisy.notifier.Notifier() + self.gateway = daisy.gateway.Gateway(db_api=self.db_api, + notifier=self.notifier, + policy_enforcer=self.policy) + self.ns_schema_link = '/v2/schemas/metadefs/namespace' + self.obj_schema_link = '/v2/schemas/metadefs/object' + self.tag_schema_link = '/v2/schemas/metadefs/tag' + + def index(self, req, marker=None, limit=None, sort_key='created_at', + sort_dir='desc', filters=None): + try: + ns_repo = self.gateway.get_metadef_namespace_repo(req.context) + + # Get namespace id + if marker: + namespace_obj = ns_repo.get(marker) + marker = namespace_obj.namespace_id + + database_ns_list = ns_repo.list( + marker=marker, limit=limit, sort_key=sort_key, + sort_dir=sort_dir, filters=filters) + for db_namespace in database_ns_list: + # Get resource type associations + filters = dict() + filters['namespace'] = db_namespace.namespace + rs_repo = ( + self.gateway.get_metadef_resource_type_repo(req.context)) + repo_rs_type_list = rs_repo.list(filters=filters) + resource_type_list = [ResourceTypeAssociation.to_wsme_model( + resource_type) for resource_type in repo_rs_type_list] + if resource_type_list: + db_namespace.resource_type_associations = ( + resource_type_list) + + namespace_list = [Namespace.to_wsme_model( + db_namespace, + get_namespace_href(db_namespace), + self.ns_schema_link) for db_namespace in database_ns_list] + namespaces = Namespaces() + namespaces.namespaces = namespace_list + if len(namespace_list) != 0 and len(namespace_list) == limit: + namespaces.next = namespace_list[-1].namespace + + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + return namespaces + + @utils.mutating + def create(self, req, namespace): + try: + namespace_created = False + # Create Namespace + ns_factory = self.gateway.get_metadef_namespace_factory( + req.context) + ns_repo = self.gateway.get_metadef_namespace_repo(req.context) + new_namespace = ns_factory.new_namespace(**namespace.to_dict()) + ns_repo.add(new_namespace) + namespace_created = True + + # Create Resource Types + if namespace.resource_type_associations: + rs_factory = (self.gateway.get_metadef_resource_type_factory( + req.context)) + rs_repo = self.gateway.get_metadef_resource_type_repo( + req.context) + for resource_type in namespace.resource_type_associations: + new_resource = rs_factory.new_resource_type( + namespace=namespace.namespace, + **resource_type.to_dict()) + rs_repo.add(new_resource) + + # Create Objects + if namespace.objects: + object_factory = self.gateway.get_metadef_object_factory( + req.context) + object_repo = self.gateway.get_metadef_object_repo( + req.context) + for metadata_object in namespace.objects: + new_meta_object = object_factory.new_object( + namespace=namespace.namespace, + **metadata_object.to_dict()) + object_repo.add(new_meta_object) + + # Create Tags + if namespace.tags: + tag_factory = self.gateway.get_metadef_tag_factory( + req.context) + tag_repo = self.gateway.get_metadef_tag_repo(req.context) + for metadata_tag in namespace.tags: + new_meta_tag = tag_factory.new_tag( + namespace=namespace.namespace, + **metadata_tag.to_dict()) + tag_repo.add(new_meta_tag) + + # Create Namespace Properties + if namespace.properties: + prop_factory = (self.gateway.get_metadef_property_factory( + req.context)) + prop_repo = self.gateway.get_metadef_property_repo( + req.context) + for (name, value) in namespace.properties.items(): + new_property_type = ( + prop_factory.new_namespace_property( + namespace=namespace.namespace, + **self._to_property_dict(name, value) + )) + prop_repo.add(new_property_type) + + except exception.Forbidden as e: + self._cleanup_namespace(ns_repo, namespace, namespace_created) + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + self._cleanup_namespace(ns_repo, namespace, namespace_created) + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Duplicate as e: + self._cleanup_namespace(ns_repo, namespace, namespace_created) + raise webob.exc.HTTPConflict(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + + # Return the user namespace as we don't expose the id to user + new_namespace.properties = namespace.properties + new_namespace.objects = namespace.objects + new_namespace.resource_type_associations = ( + namespace.resource_type_associations) + new_namespace.tags = namespace.tags + return Namespace.to_wsme_model(new_namespace, + get_namespace_href(new_namespace), + self.ns_schema_link) + + def _to_property_dict(self, name, value): + # Convert the model PropertyTypes dict to a JSON string + db_property_type_dict = dict() + db_property_type_dict['schema'] = json.tojson(PropertyType, value) + db_property_type_dict['name'] = name + return db_property_type_dict + + def _cleanup_namespace(self, namespace_repo, namespace, namespace_created): + if namespace_created: + try: + namespace_obj = namespace_repo.get(namespace.namespace) + namespace_obj.delete() + namespace_repo.remove(namespace_obj) + msg = ("Cleaned up namespace %(namespace)s " + % {'namespace': namespace.namespace}) + LOG.debug(msg) + except exception: + msg = (_LE("Failed to delete namespace %(namespace)s ") % + {'namespace': namespace.namespace}) + LOG.error(msg) + + def show(self, req, namespace, filters=None): + try: + # Get namespace + ns_repo = self.gateway.get_metadef_namespace_repo(req.context) + namespace_obj = ns_repo.get(namespace) + namespace_detail = Namespace.to_wsme_model( + namespace_obj, + get_namespace_href(namespace_obj), + self.ns_schema_link) + ns_filters = dict() + ns_filters['namespace'] = namespace + + # Get objects + object_repo = self.gateway.get_metadef_object_repo(req.context) + db_metaobject_list = object_repo.list(filters=ns_filters) + object_list = [MetadefObject.to_wsme_model( + db_metaobject, + get_object_href(namespace, db_metaobject), + self.obj_schema_link) for db_metaobject in db_metaobject_list] + if object_list: + namespace_detail.objects = object_list + + # Get resource type associations + rs_repo = self.gateway.get_metadef_resource_type_repo(req.context) + db_resource_type_list = rs_repo.list(filters=ns_filters) + resource_type_list = [ResourceTypeAssociation.to_wsme_model( + resource_type) for resource_type in db_resource_type_list] + if resource_type_list: + namespace_detail.resource_type_associations = ( + resource_type_list) + + # Get properties + prop_repo = self.gateway.get_metadef_property_repo(req.context) + db_properties = prop_repo.list(filters=ns_filters) + property_list = Namespace.to_model_properties(db_properties) + if property_list: + namespace_detail.properties = property_list + + if filters and filters['resource_type']: + namespace_detail = self._prefix_property_name( + namespace_detail, filters['resource_type']) + + # Get tags + tag_repo = self.gateway.get_metadef_tag_repo(req.context) + db_metatag_list = tag_repo.list(filters=ns_filters) + tag_list = [MetadefTag(**{'name': db_metatag.name}) + for db_metatag in db_metatag_list] + if tag_list: + namespace_detail.tags = tag_list + + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + return namespace_detail + + def update(self, req, user_ns, namespace): + namespace_repo = self.gateway.get_metadef_namespace_repo(req.context) + try: + ns_obj = namespace_repo.get(namespace) + ns_obj._old_namespace = ns_obj.namespace + ns_obj.namespace = wsme_utils._get_value(user_ns.namespace) + ns_obj.display_name = wsme_utils._get_value(user_ns.display_name) + ns_obj.description = wsme_utils._get_value(user_ns.description) + # Following optional fields will default to same values as in + # create namespace if not specified + ns_obj.visibility = ( + wsme_utils._get_value(user_ns.visibility) or 'private') + ns_obj.protected = ( + wsme_utils._get_value(user_ns.protected) or False) + ns_obj.owner = ( + wsme_utils._get_value(user_ns.owner) or req.context.owner) + updated_namespace = namespace_repo.save(ns_obj) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Duplicate as e: + raise webob.exc.HTTPConflict(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + + return Namespace.to_wsme_model(updated_namespace, + get_namespace_href(updated_namespace), + self.ns_schema_link) + + def delete(self, req, namespace): + namespace_repo = self.gateway.get_metadef_namespace_repo(req.context) + try: + namespace_obj = namespace_repo.get(namespace) + namespace_obj.delete() + namespace_repo.remove(namespace_obj) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + + def delete_objects(self, req, namespace): + ns_repo = self.gateway.get_metadef_namespace_repo(req.context) + try: + namespace_obj = ns_repo.get(namespace) + namespace_obj.delete() + ns_repo.remove_objects(namespace_obj) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + + def delete_tags(self, req, namespace): + ns_repo = self.gateway.get_metadef_namespace_repo(req.context) + try: + namespace_obj = ns_repo.get(namespace) + namespace_obj.delete() + ns_repo.remove_tags(namespace_obj) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + + def delete_properties(self, req, namespace): + ns_repo = self.gateway.get_metadef_namespace_repo(req.context) + try: + namespace_obj = ns_repo.get(namespace) + namespace_obj.delete() + ns_repo.remove_properties(namespace_obj) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + + def _prefix_property_name(self, namespace_detail, user_resource_type): + prefix = None + if user_resource_type and namespace_detail.resource_type_associations: + for resource_type in namespace_detail.resource_type_associations: + if resource_type.name == user_resource_type: + prefix = resource_type.prefix + break + + if prefix: + if namespace_detail.properties: + new_property_dict = dict() + for (key, value) in namespace_detail.properties.items(): + new_property_dict[prefix + key] = value + namespace_detail.properties = new_property_dict + + if namespace_detail.objects: + for object in namespace_detail.objects: + new_object_property_dict = dict() + for (key, value) in object.properties.items(): + new_object_property_dict[prefix + key] = value + object.properties = new_object_property_dict + + if object.required and len(object.required) > 0: + required = [prefix + name for name in object.required] + object.required = required + + return namespace_detail + + +class RequestDeserializer(wsgi.JSONRequestDeserializer): + _disallowed_properties = ['self', 'schema', 'created_at', 'updated_at'] + + def __init__(self, schema=None): + super(RequestDeserializer, self).__init__() + self.schema = schema or get_schema() + + def _get_request_body(self, request): + output = super(RequestDeserializer, self).default(request) + if 'body' not in output: + msg = _('Body expected in request.') + raise webob.exc.HTTPBadRequest(explanation=msg) + return output['body'] + + @classmethod + def _check_allowed(cls, image): + for key in cls._disallowed_properties: + if key in image: + msg = _("Attribute '%s' is read-only.") % key + raise webob.exc.HTTPForbidden(explanation=msg) + + def index(self, request): + params = request.params.copy() + limit = params.pop('limit', None) + marker = params.pop('marker', None) + sort_dir = params.pop('sort_dir', 'desc') + + if limit is None: + limit = CONF.limit_param_default + limit = min(CONF.api_limit_max, int(limit)) + + query_params = { + 'sort_key': params.pop('sort_key', 'created_at'), + 'sort_dir': self._validate_sort_dir(sort_dir), + 'filters': self._get_filters(params) + } + + if marker is not None: + query_params['marker'] = marker + + if limit is not None: + query_params['limit'] = self._validate_limit(limit) + + return query_params + + def _validate_sort_dir(self, sort_dir): + if sort_dir not in ['asc', 'desc']: + msg = _('Invalid sort direction: %s') % sort_dir + raise webob.exc.HTTPBadRequest(explanation=msg) + + return sort_dir + + def _get_filters(self, filters): + visibility = filters.get('visibility') + if visibility: + if visibility not in ['public', 'private']: + msg = _('Invalid visibility value: %s') % visibility + raise webob.exc.HTTPBadRequest(explanation=msg) + + return filters + + def _validate_limit(self, limit): + try: + limit = int(limit) + except ValueError: + msg = _("limit param must be an integer") + raise webob.exc.HTTPBadRequest(explanation=msg) + + if limit < 0: + msg = _("limit param must be positive") + raise webob.exc.HTTPBadRequest(explanation=msg) + + return limit + + def show(self, request): + params = request.params.copy() + query_params = { + 'filters': self._get_filters(params) + } + return query_params + + def create(self, request): + body = self._get_request_body(request) + self._check_allowed(body) + try: + self.schema.validate(body) + except exception.InvalidObject as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + namespace = json.fromjson(Namespace, body) + return dict(namespace=namespace) + + def update(self, request): + body = self._get_request_body(request) + self._check_allowed(body) + try: + self.schema.validate(body) + except exception.InvalidObject as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + namespace = json.fromjson(Namespace, body) + return dict(user_ns=namespace) + + +class ResponseSerializer(wsgi.JSONResponseSerializer): + def __init__(self, schema=None): + super(ResponseSerializer, self).__init__() + self.schema = schema + + def create(self, response, namespace): + ns_json = json.tojson(Namespace, namespace) + response = self.__render(ns_json, response, 201) + response.location = get_namespace_href(namespace) + + def show(self, response, namespace): + ns_json = json.tojson(Namespace, namespace) + response = self.__render(ns_json, response) + + def index(self, response, result): + params = dict(response.request.params) + params.pop('marker', None) + query = urlparse.urlencode(params) + result.first = "/v2/metadefs/namespaces" + result.schema = "/v2/schemas/metadefs/namespaces" + if query: + result.first = '%s?%s' % (result.first, query) + if result.next: + params['marker'] = result.next + next_query = urlparse.urlencode(params) + result.next = '/v2/metadefs/namespaces?%s' % next_query + + ns_json = json.tojson(Namespaces, result) + response = self.__render(ns_json, response) + + def update(self, response, namespace): + ns_json = json.tojson(Namespace, namespace) + response = self.__render(ns_json, response, 200) + + def delete(self, response, result): + response.status_int = 204 + + def delete_objects(self, response, result): + response.status_int = 204 + + def delete_properties(self, response, result): + response.status_int = 204 + + def __render(self, json_data, response, response_status=None): + body = jsonutils.dumps(json_data, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + if response_status: + response.status_int = response_status + return response + + +def _get_base_definitions(): + return get_schema_definitions() + + +def get_schema_definitions(): + return { + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ + {"$ref": "#/definitions/positiveInteger"}, + {"default": 0} + ] + }, + "stringArray": { + "type": "array", + "items": {"type": "string"}, + # "minItems": 1, + "uniqueItems": True + }, + "property": { + "type": "object", + "additionalProperties": { + "type": "object", + "required": ["title", "type"], + "properties": { + "name": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "operators": { + "type": "array", + "items": { + "type": "string" + } + }, + "type": { + "type": "string", + "enum": [ + "array", + "boolean", + "integer", + "number", + "object", + "string", + None + ] + }, + "required": { + "$ref": "#/definitions/stringArray" + }, + "minimum": { + "type": "number" + }, + "maximum": { + "type": "number" + }, + "maxLength": { + "$ref": "#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "type": "string", + "format": "regex" + }, + "enum": { + "type": "array" + }, + "readonly": { + "type": "boolean" + }, + "default": {}, + "items": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "array", + "boolean", + "integer", + "number", + "object", + "string", + None + ] + }, + "enum": { + "type": "array" + } + } + }, + "maxItems": { + "$ref": "#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "type": "boolean", + "default": False + }, + "additionalItems": { + "type": "boolean" + }, + } + } + } + } + + +def _get_base_properties(): + return { + "namespace": { + "type": "string", + "description": _("The unique namespace text."), + "maxLength": 80, + }, + "display_name": { + "type": "string", + "description": _("The user friendly name for the namespace. Used " + "by UI if available."), + "maxLength": 80, + }, + "description": { + "type": "string", + "description": _("Provides a user friendly description of the " + "namespace."), + "maxLength": 500, + }, + "visibility": { + "type": "string", + "description": _("Scope of namespace accessibility."), + "enum": ["public", "private"], + }, + "protected": { + "type": "boolean", + "description": _("If true, namespace will not be deletable."), + }, + "owner": { + "type": "string", + "description": _("Owner of the namespace."), + "maxLength": 255, + }, + "created_at": { + "type": "string", + "description": _("Date and time of namespace creation" + " (READ-ONLY)"), + "format": "date-time" + }, + "updated_at": { + "type": "string", + "description": _("Date and time of the last namespace modification" + " (READ-ONLY)"), + "format": "date-time" + }, + "schema": { + "type": "string" + }, + "self": { + "type": "string" + }, + "resource_type_associations": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "properties_target": { + "type": "string" + } + } + } + }, + "properties": { + "$ref": "#/definitions/property" + }, + "objects": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "required": { + "$ref": "#/definitions/stringArray" + }, + "properties": { + "$ref": "#/definitions/property" + }, + } + } + }, + "tags": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + } + } + }, + } + + +def get_schema(): + properties = _get_base_properties() + definitions = _get_base_definitions() + mandatory_attrs = Namespace.get_mandatory_attrs() + schema = daisy.schema.Schema( + 'namespace', + properties, + required=mandatory_attrs, + definitions=definitions + ) + return schema + + +def get_collection_schema(): + namespace_schema = get_schema() + return daisy.schema.CollectionSchema('namespaces', namespace_schema) + + +def get_namespace_href(namespace): + base_href = '/v2/metadefs/namespaces/%s' % namespace.namespace + return base_href + + +def get_object_href(namespace_name, metadef_object): + base_href = ('/v2/metadefs/namespaces/%s/objects/%s' % + (namespace_name, metadef_object.name)) + return base_href + + +def get_tag_href(namespace_name, metadef_tag): + base_href = ('/v2/metadefs/namespaces/%s/tags/%s' % + (namespace_name, metadef_tag.name)) + return base_href + + +def create_resource(): + """Namespaces resource factory method""" + schema = get_schema() + deserializer = RequestDeserializer(schema) + serializer = ResponseSerializer(schema) + controller = NamespaceController() + return wsgi.Resource(controller, deserializer, serializer) diff --git a/code/daisy/daisy/api/v2/metadef_objects.py b/code/daisy/daisy/api/v2/metadef_objects.py new file mode 100755 index 00000000..aed3d8c1 --- /dev/null +++ b/code/daisy/daisy/api/v2/metadef_objects.py @@ -0,0 +1,338 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.serialization import jsonutils +from oslo_config import cfg +from oslo_log import log as logging +import six +import webob.exc +from wsme.rest import json + +from daisy.api import policy +from daisy.api.v2 import metadef_namespaces as namespaces +from daisy.api.v2.model.metadef_object import MetadefObject +from daisy.api.v2.model.metadef_object import MetadefObjects +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +from daisy.common import wsme_utils +import daisy.db +from daisy import i18n +import daisy.notifier +import daisy.schema + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI + +CONF = cfg.CONF + + +class MetadefObjectsController(object): + def __init__(self, db_api=None, policy_enforcer=None, notifier=None): + self.db_api = db_api or daisy.db.get_api() + self.policy = policy_enforcer or policy.Enforcer() + self.notifier = notifier or daisy.notifier.Notifier() + self.gateway = daisy.gateway.Gateway(db_api=self.db_api, + notifier=self.notifier, + policy_enforcer=self.policy) + self.obj_schema_link = '/v2/schemas/metadefs/object' + + def create(self, req, metadata_object, namespace): + object_factory = self.gateway.get_metadef_object_factory(req.context) + object_repo = self.gateway.get_metadef_object_repo(req.context) + try: + new_meta_object = object_factory.new_object( + namespace=namespace, + **metadata_object.to_dict()) + object_repo.add(new_meta_object) + + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Duplicate as e: + raise webob.exc.HTTPConflict(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + return MetadefObject.to_wsme_model( + new_meta_object, + get_object_href(namespace, new_meta_object), + self.obj_schema_link) + + def index(self, req, namespace, marker=None, limit=None, + sort_key='created_at', sort_dir='desc', filters=None): + try: + filters = filters or dict() + filters['namespace'] = namespace + object_repo = self.gateway.get_metadef_object_repo(req.context) + db_metaobject_list = object_repo.list( + marker=marker, limit=limit, sort_key=sort_key, + sort_dir=sort_dir, filters=filters) + object_list = [MetadefObject.to_wsme_model( + db_metaobject, + get_object_href(namespace, db_metaobject), + self.obj_schema_link) for db_metaobject in db_metaobject_list] + metadef_objects = MetadefObjects() + metadef_objects.objects = object_list + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + return metadef_objects + + def show(self, req, namespace, object_name): + meta_object_repo = self.gateway.get_metadef_object_repo( + req.context) + try: + metadef_object = meta_object_repo.get(namespace, object_name) + return MetadefObject.to_wsme_model( + metadef_object, + get_object_href(namespace, metadef_object), + self.obj_schema_link) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + + def update(self, req, metadata_object, namespace, object_name): + meta_repo = self.gateway.get_metadef_object_repo(req.context) + try: + metadef_object = meta_repo.get(namespace, object_name) + metadef_object._old_name = metadef_object.name + metadef_object.name = wsme_utils._get_value( + metadata_object.name) + metadef_object.description = wsme_utils._get_value( + metadata_object.description) + metadef_object.required = wsme_utils._get_value( + metadata_object.required) + metadef_object.properties = wsme_utils._get_value( + metadata_object.properties) + updated_metadata_obj = meta_repo.save(metadef_object) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Duplicate as e: + raise webob.exc.HTTPConflict(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + return MetadefObject.to_wsme_model( + updated_metadata_obj, + get_object_href(namespace, updated_metadata_obj), + self.obj_schema_link) + + def delete(self, req, namespace, object_name): + meta_repo = self.gateway.get_metadef_object_repo(req.context) + try: + metadef_object = meta_repo.get(namespace, object_name) + metadef_object.delete() + meta_repo.remove(metadef_object) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + + +def _get_base_definitions(): + return namespaces.get_schema_definitions() + + +def _get_base_properties(): + return { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "required": { + "$ref": "#/definitions/stringArray" + }, + "properties": { + "$ref": "#/definitions/property" + }, + "schema": { + "type": "string" + }, + "self": { + "type": "string" + }, + "created_at": { + "type": "string", + "description": _("Date and time of object creation" + " (READ-ONLY)"), + "format": "date-time" + }, + "updated_at": { + "type": "string", + "description": _("Date and time of the last object modification" + " (READ-ONLY)"), + "format": "date-time" + } + } + + +def get_schema(): + definitions = _get_base_definitions() + properties = _get_base_properties() + mandatory_attrs = MetadefObject.get_mandatory_attrs() + schema = daisy.schema.Schema( + 'object', + properties, + required=mandatory_attrs, + definitions=definitions, + ) + return schema + + +def get_collection_schema(): + object_schema = get_schema() + return daisy.schema.CollectionSchema('objects', object_schema) + + +class RequestDeserializer(wsgi.JSONRequestDeserializer): + _disallowed_properties = ['self', 'schema', 'created_at', 'updated_at'] + + def __init__(self, schema=None): + super(RequestDeserializer, self).__init__() + self.schema = schema or get_schema() + + def _get_request_body(self, request): + output = super(RequestDeserializer, self).default(request) + if 'body' not in output: + msg = _('Body expected in request.') + raise webob.exc.HTTPBadRequest(explanation=msg) + return output['body'] + + def create(self, request): + body = self._get_request_body(request) + self._check_allowed(body) + try: + self.schema.validate(body) + except exception.InvalidObject as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + metadata_object = json.fromjson(MetadefObject, body) + return dict(metadata_object=metadata_object) + + def update(self, request): + body = self._get_request_body(request) + self._check_allowed(body) + try: + self.schema.validate(body) + except exception.InvalidObject as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + metadata_object = json.fromjson(MetadefObject, body) + return dict(metadata_object=metadata_object) + + def index(self, request): + params = request.params.copy() + limit = params.pop('limit', None) + marker = params.pop('marker', None) + sort_dir = params.pop('sort_dir', 'desc') + + query_params = { + 'sort_key': params.pop('sort_key', 'created_at'), + 'sort_dir': self._validate_sort_dir(sort_dir), + 'filters': self._get_filters(params) + } + + if marker is not None: + query_params['marker'] = marker + + if limit is not None: + query_params['limit'] = self._validate_limit(limit) + + return query_params + + def _validate_sort_dir(self, sort_dir): + if sort_dir not in ['asc', 'desc']: + msg = _('Invalid sort direction: %s') % sort_dir + raise webob.exc.HTTPBadRequest(explanation=msg) + + return sort_dir + + def _get_filters(self, filters): + visibility = filters.get('visibility') + if visibility: + if visibility not in ['public', 'private', 'shared']: + msg = _('Invalid visibility value: %s') % visibility + raise webob.exc.HTTPBadRequest(explanation=msg) + + return filters + + @classmethod + def _check_allowed(cls, image): + for key in cls._disallowed_properties: + if key in image: + msg = _("Attribute '%s' is read-only.") % key + raise webob.exc.HTTPForbidden(explanation=msg) + + +class ResponseSerializer(wsgi.JSONResponseSerializer): + def __init__(self, schema=None): + super(ResponseSerializer, self).__init__() + self.schema = schema or get_schema() + + def create(self, response, metadata_object): + response.status_int = 201 + self.show(response, metadata_object) + + def show(self, response, metadata_object): + metadata_object_json = json.tojson(MetadefObject, metadata_object) + body = jsonutils.dumps(metadata_object_json, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + def update(self, response, metadata_object): + response.status_int = 200 + self.show(response, metadata_object) + + def index(self, response, result): + result.schema = "v2/schemas/metadefs/objects" + metadata_objects_json = json.tojson(MetadefObjects, result) + body = jsonutils.dumps(metadata_objects_json, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + def delete(self, response, result): + response.status_int = 204 + + +def get_object_href(namespace_name, metadef_object): + base_href = ('/v2/metadefs/namespaces/%s/objects/%s' % + (namespace_name, metadef_object.name)) + return base_href + + +def create_resource(): + """Metadef objects resource factory method""" + schema = get_schema() + deserializer = RequestDeserializer(schema) + serializer = ResponseSerializer(schema) + controller = MetadefObjectsController() + return wsgi.Resource(controller, deserializer, serializer) diff --git a/code/daisy/daisy/api/v2/metadef_properties.py b/code/daisy/daisy/api/v2/metadef_properties.py new file mode 100755 index 00000000..19883abf --- /dev/null +++ b/code/daisy/daisy/api/v2/metadef_properties.py @@ -0,0 +1,300 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.serialization import jsonutils +from oslo_log import log as logging +import six +import webob.exc +from wsme.rest import json + +from daisy.api import policy +from daisy.api.v2 import metadef_namespaces as namespaces +from daisy.api.v2.model.metadef_namespace import Namespace +from daisy.api.v2.model.metadef_property_type import PropertyType +from daisy.api.v2.model.metadef_property_type import PropertyTypes +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +import daisy.gateway +from daisy import i18n +import daisy.notifier +import daisy.schema + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI + + +class NamespacePropertiesController(object): + def __init__(self, db_api=None, policy_enforcer=None, notifier=None): + self.db_api = db_api or daisy.db.get_api() + self.policy = policy_enforcer or policy.Enforcer() + self.notifier = notifier or daisy.notifier.Notifier() + self.gateway = daisy.gateway.Gateway(db_api=self.db_api, + notifier=self.notifier, + policy_enforcer=self.policy) + + def _to_dict(self, model_property_type): + # Convert the model PropertyTypes dict to a JSON encoding + db_property_type_dict = dict() + db_property_type_dict['schema'] = json.tojson( + PropertyType, model_property_type) + db_property_type_dict['name'] = model_property_type.name + return db_property_type_dict + + def _to_model(self, db_property_type): + # Convert the persisted json schema to a dict of PropertyTypes + property_type = json.fromjson( + PropertyType, db_property_type.schema) + property_type.name = db_property_type.name + return property_type + + def index(self, req, namespace): + try: + filters = dict() + filters['namespace'] = namespace + prop_repo = self.gateway.get_metadef_property_repo(req.context) + db_properties = prop_repo.list(filters=filters) + property_list = Namespace.to_model_properties(db_properties) + namespace_properties = PropertyTypes() + namespace_properties.properties = property_list + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + return namespace_properties + + def show(self, req, namespace, property_name, filters=None): + try: + if filters and filters['resource_type']: + rs_repo = self.gateway.get_metadef_resource_type_repo( + req.context) + db_resource_type = rs_repo.get(filters['resource_type'], + namespace) + prefix = db_resource_type.prefix + if prefix and property_name.startswith(prefix): + property_name = property_name[len(prefix):] + else: + msg = (_("Property %(property_name)s does not start " + "with the expected resource type association " + "prefix of '%(prefix)s'.") + % {'property_name': property_name, + 'prefix': prefix}) + raise exception.NotFound(msg) + + prop_repo = self.gateway.get_metadef_property_repo(req.context) + db_property = prop_repo.get(namespace, property_name) + property = self._to_model(db_property) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + return property + + def create(self, req, namespace, property_type): + prop_factory = self.gateway.get_metadef_property_factory(req.context) + prop_repo = self.gateway.get_metadef_property_repo(req.context) + try: + new_property_type = prop_factory.new_namespace_property( + namespace=namespace, **self._to_dict(property_type)) + prop_repo.add(new_property_type) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Duplicate as e: + raise webob.exc.HTTPConflict(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + return self._to_model(new_property_type) + + def update(self, req, namespace, property_name, property_type): + prop_repo = self.gateway.get_metadef_property_repo(req.context) + try: + db_property_type = prop_repo.get(namespace, property_name) + db_property_type._old_name = db_property_type.name + db_property_type.name = property_type.name + db_property_type.schema = (self._to_dict(property_type))['schema'] + updated_property_type = prop_repo.save(db_property_type) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Duplicate as e: + raise webob.exc.HTTPConflict(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + return self._to_model(updated_property_type) + + def delete(self, req, namespace, property_name): + prop_repo = self.gateway.get_metadef_property_repo(req.context) + try: + property_type = prop_repo.get(namespace, property_name) + property_type.delete() + prop_repo.remove(property_type) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + + +class RequestDeserializer(wsgi.JSONRequestDeserializer): + _disallowed_properties = ['created_at', 'updated_at'] + + def __init__(self, schema=None): + super(RequestDeserializer, self).__init__() + self.schema = schema or get_schema() + + def _get_request_body(self, request): + output = super(RequestDeserializer, self).default(request) + if 'body' not in output: + msg = _('Body expected in request.') + raise webob.exc.HTTPBadRequest(explanation=msg) + return output['body'] + + @classmethod + def _check_allowed(cls, image): + for key in cls._disallowed_properties: + if key in image: + msg = _("Attribute '%s' is read-only.") % key + raise webob.exc.HTTPForbidden(explanation=msg) + + def create(self, request): + body = self._get_request_body(request) + self._check_allowed(body) + try: + self.schema.validate(body) + except exception.InvalidObject as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + property_type = json.fromjson(PropertyType, body) + return dict(property_type=property_type) + + def update(self, request): + body = self._get_request_body(request) + self._check_allowed(body) + try: + self.schema.validate(body) + except exception.InvalidObject as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + property_type = json.fromjson(PropertyType, body) + return dict(property_type=property_type) + + def show(self, request): + params = request.params.copy() + query_params = { + 'filters': params + } + return query_params + + +class ResponseSerializer(wsgi.JSONResponseSerializer): + def __init__(self, schema=None): + super(ResponseSerializer, self).__init__() + self.schema = schema + + def show(self, response, result): + property_type_json = json.tojson(PropertyType, result) + body = jsonutils.dumps(property_type_json, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + def index(self, response, result): + property_type_json = json.tojson(PropertyTypes, result) + body = jsonutils.dumps(property_type_json, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + def create(self, response, result): + response.status_int = 201 + self.show(response, result) + + def update(self, response, result): + response.status_int = 200 + self.show(response, result) + + def delete(self, response, result): + response.status_int = 204 + + +def _get_base_definitions(): + return { + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ + {"$ref": "#/definitions/positiveInteger"}, + {"default": 0} + ] + }, + "stringArray": { + "type": "array", + "items": {"type": "string"}, + "minItems": 1, + "uniqueItems": True + } + } + + +def _get_base_properties(): + base_def = namespaces.get_schema_definitions() + return base_def['property']['additionalProperties']['properties'] + + +def get_schema(): + definitions = _get_base_definitions() + properties = _get_base_properties() + mandatory_attrs = PropertyType.get_mandatory_attrs() + # name is required attribute when use as single property type + mandatory_attrs.append('name') + schema = daisy.schema.Schema( + 'property', + properties, + required=mandatory_attrs, + definitions=definitions + ) + return schema + + +def get_collection_schema(): + namespace_properties_schema = get_schema() + # Property name is a dict key and not a required attribute in + # individual property schema inside property collections + namespace_properties_schema.required.remove('name') + return daisy.schema.DictCollectionSchema('properties', + namespace_properties_schema) + + +def create_resource(): + """NamespaceProperties resource factory method""" + schema = get_schema() + deserializer = RequestDeserializer(schema) + serializer = ResponseSerializer(schema) + controller = NamespacePropertiesController() + return wsgi.Resource(controller, deserializer, serializer) diff --git a/code/daisy/daisy/api/v2/metadef_resource_types.py b/code/daisy/daisy/api/v2/metadef_resource_types.py new file mode 100755 index 00000000..983ea5ea --- /dev/null +++ b/code/daisy/daisy/api/v2/metadef_resource_types.py @@ -0,0 +1,265 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.serialization import jsonutils +from oslo_log import log as logging +import six +import webob.exc +from wsme.rest import json + +from daisy.api import policy +from daisy.api.v2.model.metadef_resource_type import ResourceType +from daisy.api.v2.model.metadef_resource_type import ResourceTypeAssociation +from daisy.api.v2.model.metadef_resource_type import ResourceTypeAssociations +from daisy.api.v2.model.metadef_resource_type import ResourceTypes +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +import daisy.gateway +from daisy import i18n +import daisy.notifier +import daisy.schema + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI + + +class ResourceTypeController(object): + def __init__(self, db_api=None, policy_enforcer=None, notifier=None): + self.db_api = db_api or daisy.db.get_api() + self.policy = policy_enforcer or policy.Enforcer() + self.notifier = notifier or daisy.notifier.Notifier() + self.gateway = daisy.gateway.Gateway(db_api=self.db_api, + notifier=self.notifier, + policy_enforcer=self.policy) + + def index(self, req): + try: + filters = {} + filters['namespace'] = None + rs_type_repo = self.gateway.get_metadef_resource_type_repo( + req.context) + db_resource_type_list = rs_type_repo.list(filters=filters) + resource_type_list = [ResourceType.to_wsme_model( + resource_type) for resource_type in db_resource_type_list] + resource_types = ResourceTypes() + resource_types.resource_types = resource_type_list + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError(e) + return resource_types + + def show(self, req, namespace): + try: + filters = {} + filters['namespace'] = namespace + rs_type_repo = self.gateway.get_metadef_resource_type_repo( + req.context) + db_resource_type_list = rs_type_repo.list(filters=filters) + resource_type_list = [ResourceTypeAssociation.to_wsme_model( + resource_type) for resource_type in db_resource_type_list] + resource_types = ResourceTypeAssociations() + resource_types.resource_type_associations = resource_type_list + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError(e) + return resource_types + + def create(self, req, resource_type, namespace): + rs_type_factory = self.gateway.get_metadef_resource_type_factory( + req.context) + rs_type_repo = self.gateway.get_metadef_resource_type_repo(req.context) + try: + new_resource_type = rs_type_factory.new_resource_type( + namespace=namespace, **resource_type.to_dict()) + rs_type_repo.add(new_resource_type) + + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Duplicate as e: + raise webob.exc.HTTPConflict(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + return ResourceTypeAssociation.to_wsme_model(new_resource_type) + + def delete(self, req, namespace, resource_type): + rs_type_repo = self.gateway.get_metadef_resource_type_repo(req.context) + try: + filters = {} + found = False + filters['namespace'] = namespace + db_resource_type_list = rs_type_repo.list(filters=filters) + for db_resource_type in db_resource_type_list: + if db_resource_type.name == resource_type: + db_resource_type.delete() + rs_type_repo.remove(db_resource_type) + found = True + if not found: + raise exception.NotFound() + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + msg = (_("Failed to find resource type %(resourcetype)s to " + "delete") % {'resourcetype': resource_type}) + LOG.error(msg) + raise webob.exc.HTTPNotFound(explanation=msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + + +class RequestDeserializer(wsgi.JSONRequestDeserializer): + _disallowed_properties = ['created_at', 'updated_at'] + + def __init__(self, schema=None): + super(RequestDeserializer, self).__init__() + self.schema = schema or get_schema() + + def _get_request_body(self, request): + output = super(RequestDeserializer, self).default(request) + if 'body' not in output: + msg = _('Body expected in request.') + raise webob.exc.HTTPBadRequest(explanation=msg) + return output['body'] + + @classmethod + def _check_allowed(cls, image): + for key in cls._disallowed_properties: + if key in image: + msg = _("Attribute '%s' is read-only.") % key + raise webob.exc.HTTPForbidden(explanation=msg) + + def create(self, request): + body = self._get_request_body(request) + self._check_allowed(body) + try: + self.schema.validate(body) + except exception.InvalidObject as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + resource_type = json.fromjson(ResourceTypeAssociation, body) + return dict(resource_type=resource_type) + + +class ResponseSerializer(wsgi.JSONResponseSerializer): + def __init__(self, schema=None): + super(ResponseSerializer, self).__init__() + self.schema = schema + + def show(self, response, result): + resource_type_json = json.tojson(ResourceTypeAssociations, result) + body = jsonutils.dumps(resource_type_json, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + def index(self, response, result): + resource_type_json = json.tojson(ResourceTypes, result) + body = jsonutils.dumps(resource_type_json, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + def create(self, response, result): + resource_type_json = json.tojson(ResourceTypeAssociation, result) + response.status_int = 201 + body = jsonutils.dumps(resource_type_json, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + def delete(self, response, result): + response.status_int = 204 + + +def _get_base_properties(): + return { + 'name': { + 'type': 'string', + 'description': _('Resource type names should be aligned with Heat ' + 'resource types whenever possible: ' + 'http://docs.openstack.org/developer/heat/' + 'template_guide/openstack.html'), + 'maxLength': 80, + }, + 'prefix': { + 'type': 'string', + 'description': _('Specifies the prefix to use for the given ' + 'resource type. Any properties in the namespace ' + 'should be prefixed with this prefix when being ' + 'applied to the specified resource type. Must ' + 'include prefix separator (e.g. a colon :).'), + 'maxLength': 80, + }, + 'properties_target': { + 'type': 'string', + 'description': _('Some resource types allow more than one key / ' + 'value pair per instance. For example, Cinder ' + 'allows user and image metadata on volumes. Only ' + 'the image properties metadata is evaluated by ' + 'Nova (scheduling or drivers). This property ' + 'allows a namespace target to remove the ' + 'ambiguity.'), + 'maxLength': 80, + }, + "created_at": { + "type": "string", + "description": _("Date and time of resource type association" + " (READ-ONLY)"), + "format": "date-time" + }, + "updated_at": { + "type": "string", + "description": _("Date and time of the last resource type " + "association modification (READ-ONLY)"), + "format": "date-time" + } + } + + +def get_schema(): + properties = _get_base_properties() + mandatory_attrs = ResourceTypeAssociation.get_mandatory_attrs() + schema = daisy.schema.Schema( + 'resource_type_association', + properties, + required=mandatory_attrs, + ) + return schema + + +def get_collection_schema(): + resource_type_schema = get_schema() + return daisy.schema.CollectionSchema('resource_type_associations', + resource_type_schema) + + +def create_resource(): + """ResourceTypeAssociation resource factory method""" + schema = get_schema() + deserializer = RequestDeserializer(schema) + serializer = ResponseSerializer(schema) + controller = ResourceTypeController() + return wsgi.Resource(controller, deserializer, serializer) diff --git a/code/daisy/daisy/api/v2/metadef_tags.py b/code/daisy/daisy/api/v2/metadef_tags.py new file mode 100755 index 00000000..49db0d9b --- /dev/null +++ b/code/daisy/daisy/api/v2/metadef_tags.py @@ -0,0 +1,389 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.serialization import jsonutils +from oslo_config import cfg +from oslo_log import log as logging +import six +import webob.exc +from wsme.rest import json + +from daisy.api import policy +from daisy.api.v2.model.metadef_tag import MetadefTag +from daisy.api.v2.model.metadef_tag import MetadefTags +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +from daisy.common import wsme_utils +import daisy.db +from daisy import i18n +import daisy.notifier +import daisy.schema + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI + +CONF = cfg.CONF + + +class TagsController(object): + def __init__(self, db_api=None, policy_enforcer=None, notifier=None): + self.db_api = db_api or daisy.db.get_api() + self.policy = policy_enforcer or policy.Enforcer() + self.notifier = notifier or daisy.notifier.Notifier() + self.gateway = daisy.gateway.Gateway(db_api=self.db_api, + notifier=self.notifier, + policy_enforcer=self.policy) + self.tag_schema_link = '/v2/schemas/metadefs/tag' + + def create(self, req, namespace, tag_name): + tag_factory = self.gateway.get_metadef_tag_factory(req.context) + tag_repo = self.gateway.get_metadef_tag_repo(req.context) + tag_name_as_dict = {'name': tag_name} + try: + new_meta_tag = tag_factory.new_tag( + namespace=namespace, + **tag_name_as_dict) + tag_repo.add(new_meta_tag) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Duplicate as e: + raise webob.exc.HTTPConflict(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + + return MetadefTag.to_wsme_model(new_meta_tag) + + def create_tags(self, req, metadata_tags, namespace): + tag_factory = self.gateway.get_metadef_tag_factory(req.context) + tag_repo = self.gateway.get_metadef_tag_repo(req.context) + try: + tag_list = [] + for metadata_tag in metadata_tags.tags: + tag_list.append(tag_factory.new_tag( + namespace=namespace, **metadata_tag.to_dict())) + tag_repo.add_tags(tag_list) + tag_list_out = [MetadefTag(**{'name': db_metatag.name}) + for db_metatag in tag_list] + metadef_tags = MetadefTags() + metadef_tags.tags = tag_list_out + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Duplicate as e: + raise webob.exc.HTTPConflict(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + + return metadef_tags + + def index(self, req, namespace, marker=None, limit=None, + sort_key='created_at', sort_dir='desc', filters=None): + try: + filters = filters or dict() + filters['namespace'] = namespace + + tag_repo = self.gateway.get_metadef_tag_repo(req.context) + if marker: + metadef_tag = tag_repo.get(namespace, marker) + marker = metadef_tag.tag_id + + db_metatag_list = tag_repo.list( + marker=marker, limit=limit, sort_key=sort_key, + sort_dir=sort_dir, filters=filters) + + tag_list = [MetadefTag(**{'name': db_metatag.name}) + for db_metatag in db_metatag_list] + + metadef_tags = MetadefTags() + metadef_tags.tags = tag_list + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + + return metadef_tags + + def show(self, req, namespace, tag_name): + meta_tag_repo = self.gateway.get_metadef_tag_repo(req.context) + try: + metadef_tag = meta_tag_repo.get(namespace, tag_name) + return MetadefTag.to_wsme_model(metadef_tag) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + + def update(self, req, metadata_tag, namespace, tag_name): + meta_repo = self.gateway.get_metadef_tag_repo(req.context) + try: + metadef_tag = meta_repo.get(namespace, tag_name) + metadef_tag._old_name = metadef_tag.name + metadef_tag.name = wsme_utils._get_value( + metadata_tag.name) + updated_metadata_tag = meta_repo.save(metadef_tag) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Duplicate as e: + raise webob.exc.HTTPConflict(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + + return MetadefTag.to_wsme_model(updated_metadata_tag) + + def delete(self, req, namespace, tag_name): + meta_repo = self.gateway.get_metadef_tag_repo(req.context) + try: + metadef_tag = meta_repo.get(namespace, tag_name) + metadef_tag.delete() + meta_repo.remove(metadef_tag) + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + + +def _get_base_definitions(): + return None + + +def _get_base_properties(): + return { + "name": { + "type": "string" + }, + "created_at": { + "type": "string", + "description": _("Date and time of tag creation" + " (READ-ONLY)"), + "format": "date-time" + }, + "updated_at": { + "type": "string", + "description": _("Date and time of the last tag modification" + " (READ-ONLY)"), + "format": "date-time" + } + } + + +def _get_base_properties_for_list(): + return { + "tags": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + }, + 'required': ['name'], + "additionalProperties": False + } + }, + } + + +def get_schema(): + definitions = _get_base_definitions() + properties = _get_base_properties() + mandatory_attrs = MetadefTag.get_mandatory_attrs() + schema = daisy.schema.Schema( + 'tag', + properties, + required=mandatory_attrs, + definitions=definitions, + ) + return schema + + +def get_schema_for_list(): + definitions = _get_base_definitions() + properties = _get_base_properties_for_list() + schema = daisy.schema.Schema( + 'tags', + properties, + required=None, + definitions=definitions, + ) + return schema + + +def get_collection_schema(): + tag_schema = get_schema() + return daisy.schema.CollectionSchema('tags', tag_schema) + + +class RequestDeserializer(wsgi.JSONRequestDeserializer): + _disallowed_properties = ['created_at', 'updated_at'] + + def __init__(self, schema=None): + super(RequestDeserializer, self).__init__() + self.schema = schema or get_schema() + self.schema_for_list = get_schema_for_list() + + def _get_request_body(self, request): + output = super(RequestDeserializer, self).default(request) + if 'body' not in output: + msg = _('Body expected in request.') + raise webob.exc.HTTPBadRequest(explanation=msg) + return output['body'] + + def _validate_sort_dir(self, sort_dir): + if sort_dir not in ['asc', 'desc']: + msg = _('Invalid sort direction: %s') % sort_dir + raise webob.exc.HTTPBadRequest(explanation=msg) + + return sort_dir + + def _get_filters(self, filters): + visibility = filters.get('visibility') + if visibility: + if visibility not in ['public', 'private', 'shared']: + msg = _('Invalid visibility value: %s') % visibility + raise webob.exc.HTTPBadRequest(explanation=msg) + + return filters + + def _validate_limit(self, limit): + try: + limit = int(limit) + except ValueError: + msg = _("limit param must be an integer") + raise webob.exc.HTTPBadRequest(explanation=msg) + + if limit < 0: + msg = _("limit param must be positive") + raise webob.exc.HTTPBadRequest(explanation=msg) + + return limit + + def update(self, request): + body = self._get_request_body(request) + self._check_allowed(body) + try: + self.schema.validate(body) + except exception.InvalidObject as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + metadata_tag = json.fromjson(MetadefTag, body) + return dict(metadata_tag=metadata_tag) + + def index(self, request): + params = request.params.copy() + limit = params.pop('limit', None) + marker = params.pop('marker', None) + sort_dir = params.pop('sort_dir', 'desc') + + query_params = { + 'sort_key': params.pop('sort_key', 'created_at'), + 'sort_dir': self._validate_sort_dir(sort_dir), + 'filters': self._get_filters(params) + } + + if marker: + query_params['marker'] = marker + + if limit: + query_params['limit'] = self._validate_limit(limit) + + return query_params + + def create_tags(self, request): + body = self._get_request_body(request) + self._check_allowed(body) + try: + self.schema_for_list.validate(body) + except exception.InvalidObject as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + metadata_tags = json.fromjson(MetadefTags, body) + return dict(metadata_tags=metadata_tags) + + @classmethod + def _check_allowed(cls, image): + for key in cls._disallowed_properties: + if key in image: + msg = _("Attribute '%s' is read-only.") % key + raise webob.exc.HTTPForbidden(explanation=msg) + + +class ResponseSerializer(wsgi.JSONResponseSerializer): + def __init__(self, schema=None): + super(ResponseSerializer, self).__init__() + self.schema = schema or get_schema() + + def create(self, response, metadata_tag): + response.status_int = 201 + self.show(response, metadata_tag) + + def create_tags(self, response, result): + response.status_int = 201 + metadata_tags_json = json.tojson(MetadefTags, result) + body = jsonutils.dumps(metadata_tags_json, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + def show(self, response, metadata_tag): + metadata_tag_json = json.tojson(MetadefTag, metadata_tag) + body = jsonutils.dumps(metadata_tag_json, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + def update(self, response, metadata_tag): + response.status_int = 200 + self.show(response, metadata_tag) + + def index(self, response, result): + metadata_tags_json = json.tojson(MetadefTags, result) + body = jsonutils.dumps(metadata_tags_json, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + def delete(self, response, result): + response.status_int = 204 + + +def get_tag_href(namespace_name, metadef_tag): + base_href = ('/v2/metadefs/namespaces/%s/tags/%s' % + (namespace_name, metadef_tag.name)) + return base_href + + +def create_resource(): + """Metadef tags resource factory method""" + schema = get_schema() + deserializer = RequestDeserializer(schema) + serializer = ResponseSerializer(schema) + controller = TagsController() + return wsgi.Resource(controller, deserializer, serializer) diff --git a/code/daisy/daisy/api/v2/model/__init__.py b/code/daisy/daisy/api/v2/model/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/api/v2/model/metadef_namespace.py b/code/daisy/daisy/api/v2/model/metadef_namespace.py new file mode 100755 index 00000000..f9685264 --- /dev/null +++ b/code/daisy/daisy/api/v2/model/metadef_namespace.py @@ -0,0 +1,79 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import wsme +from wsme.rest import json +from wsme import types + +from daisy.api.v2.model.metadef_object import MetadefObject +from daisy.api.v2.model.metadef_property_type import PropertyType +from daisy.api.v2.model.metadef_resource_type import ResourceTypeAssociation +from daisy.api.v2.model.metadef_tag import MetadefTag +from daisy.common.wsme_utils import WSMEModelTransformer + + +class Namespace(types.Base, WSMEModelTransformer): + + # Base fields + namespace = wsme.wsattr(types.text, mandatory=True) + display_name = wsme.wsattr(types.text, mandatory=False) + description = wsme.wsattr(types.text, mandatory=False) + visibility = wsme.wsattr(types.text, mandatory=False) + protected = wsme.wsattr(bool, mandatory=False) + owner = wsme.wsattr(types.text, mandatory=False) + + # Not using datetime since time format has to be + # in oslo_utils.timeutils.isotime() format + created_at = wsme.wsattr(types.text, mandatory=False) + updated_at = wsme.wsattr(types.text, mandatory=False) + + # Contained fields + resource_type_associations = wsme.wsattr([ResourceTypeAssociation], + mandatory=False) + properties = wsme.wsattr({types.text: PropertyType}, mandatory=False) + objects = wsme.wsattr([MetadefObject], mandatory=False) + tags = wsme.wsattr([MetadefTag], mandatory=False) + + # Generated fields + self = wsme.wsattr(types.text, mandatory=False) + schema = wsme.wsattr(types.text, mandatory=False) + + def __init__(cls, **kwargs): + super(Namespace, cls).__init__(**kwargs) + + @staticmethod + def to_model_properties(db_property_types): + property_types = {} + for db_property_type in db_property_types: + # Convert the persisted json schema to a dict of PropertyTypes + property_type = json.fromjson( + PropertyType, db_property_type.schema) + property_type_name = db_property_type.name + property_types[property_type_name] = property_type + + return property_types + + +class Namespaces(types.Base, WSMEModelTransformer): + + namespaces = wsme.wsattr([Namespace], mandatory=False) + + # Pagination + next = wsme.wsattr(types.text, mandatory=False) + schema = wsme.wsattr(types.text, mandatory=True) + first = wsme.wsattr(types.text, mandatory=True) + + def __init__(self, **kwargs): + super(Namespaces, self).__init__(**kwargs) diff --git a/code/daisy/daisy/api/v2/model/metadef_object.py b/code/daisy/daisy/api/v2/model/metadef_object.py new file mode 100755 index 00000000..cb84cf72 --- /dev/null +++ b/code/daisy/daisy/api/v2/model/metadef_object.py @@ -0,0 +1,49 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import wsme +from wsme import types + +from daisy.api.v2.model.metadef_property_type import PropertyType +from daisy.common.wsme_utils import WSMEModelTransformer + + +class MetadefObject(types.Base, WSMEModelTransformer): + + name = wsme.wsattr(types.text, mandatory=True) + required = wsme.wsattr([types.text], mandatory=False) + description = wsme.wsattr(types.text, mandatory=False) + properties = wsme.wsattr({types.text: PropertyType}, mandatory=False) + + # Not using datetime since time format has to be + # in oslo_utils.timeutils.isotime() format + created_at = wsme.wsattr(types.text, mandatory=False) + updated_at = wsme.wsattr(types.text, mandatory=False) + + # Generated fields + self = wsme.wsattr(types.text, mandatory=False) + schema = wsme.wsattr(types.text, mandatory=False) + + def __init__(cls, **kwargs): + super(MetadefObject, cls).__init__(**kwargs) + + +class MetadefObjects(types.Base, WSMEModelTransformer): + + objects = wsme.wsattr([MetadefObject], mandatory=False) + schema = wsme.wsattr(types.text, mandatory=True) + + def __init__(self, **kwargs): + super(MetadefObjects, self).__init__(**kwargs) diff --git a/code/daisy/daisy/api/v2/model/metadef_property_item_type.py b/code/daisy/daisy/api/v2/model/metadef_property_item_type.py new file mode 100755 index 00000000..228147a1 --- /dev/null +++ b/code/daisy/daisy/api/v2/model/metadef_property_item_type.py @@ -0,0 +1,27 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import wsme +from wsme import types + + +class ItemType(types.Base): + type = wsme.wsattr(types.text, mandatory=True) + enum = wsme.wsattr([types.text], mandatory=False) + + _wsme_attr_order = ('type', 'enum') + + def __init__(self, **kwargs): + super(ItemType, self).__init__(**kwargs) diff --git a/code/daisy/daisy/api/v2/model/metadef_property_type.py b/code/daisy/daisy/api/v2/model/metadef_property_type.py new file mode 100755 index 00000000..cf68cbe1 --- /dev/null +++ b/code/daisy/daisy/api/v2/model/metadef_property_type.py @@ -0,0 +1,61 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import wsme +from wsme import types + +from daisy.api.v2.model.metadef_property_item_type import ItemType +from daisy.common.wsme_utils import WSMEModelTransformer + + +class PropertyType(types.Base, WSMEModelTransformer): + # When used in collection of PropertyTypes, name is a dictionary key + # and not included as separate field. + name = wsme.wsattr(types.text, mandatory=False) + + type = wsme.wsattr(types.text, mandatory=True) + title = wsme.wsattr(types.text, mandatory=True) + description = wsme.wsattr(types.text, mandatory=False) + operators = wsme.wsattr([types.text], mandatory=False) + default = wsme.wsattr(types.bytes, mandatory=False) + readonly = wsme.wsattr(bool, mandatory=False) + + # fields for type = string + minimum = wsme.wsattr(int, mandatory=False) + maximum = wsme.wsattr(int, mandatory=False) + enum = wsme.wsattr([types.text], mandatory=False) + pattern = wsme.wsattr(types.text, mandatory=False) + + # fields for type = integer, number + minLength = wsme.wsattr(int, mandatory=False) + maxLength = wsme.wsattr(int, mandatory=False) + confidential = wsme.wsattr(bool, mandatory=False) + + # fields for type = array + items = wsme.wsattr(ItemType, mandatory=False) + uniqueItems = wsme.wsattr(bool, mandatory=False) + minItems = wsme.wsattr(int, mandatory=False) + maxItems = wsme.wsattr(int, mandatory=False) + additionalItems = wsme.wsattr(bool, mandatory=False) + + def __init__(self, **kwargs): + super(PropertyType, self).__init__(**kwargs) + + +class PropertyTypes(types.Base, WSMEModelTransformer): + properties = wsme.wsattr({types.text: PropertyType}, mandatory=False) + + def __init__(self, **kwargs): + super(PropertyTypes, self).__init__(**kwargs) diff --git a/code/daisy/daisy/api/v2/model/metadef_resource_type.py b/code/daisy/daisy/api/v2/model/metadef_resource_type.py new file mode 100755 index 00000000..bd4a32e7 --- /dev/null +++ b/code/daisy/daisy/api/v2/model/metadef_resource_type.py @@ -0,0 +1,62 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import wsme +from wsme import types + +from daisy.common.wsme_utils import WSMEModelTransformer + + +class ResourceTypeAssociation(types.Base, WSMEModelTransformer): + name = wsme.wsattr(types.text, mandatory=True) + prefix = wsme.wsattr(types.text, mandatory=False) + properties_target = wsme.wsattr(types.text, mandatory=False) + + # Not using datetime since time format has to be + # in oslo_utils.timeutils.isotime() format + created_at = wsme.wsattr(types.text, mandatory=False) + updated_at = wsme.wsattr(types.text, mandatory=False) + + def __init__(self, **kwargs): + super(ResourceTypeAssociation, self).__init__(**kwargs) + + +class ResourceTypeAssociations(types.Base, WSMEModelTransformer): + + resource_type_associations = wsme.wsattr([ResourceTypeAssociation], + mandatory=False) + + def __init__(self, **kwargs): + super(ResourceTypeAssociations, self).__init__(**kwargs) + + +class ResourceType(types.Base, WSMEModelTransformer): + name = wsme.wsattr(types.text, mandatory=True) + + # Not using datetime since time format has to be + # in oslo_utils.timeutils.isotime() format + created_at = wsme.wsattr(types.text, mandatory=False) + updated_at = wsme.wsattr(types.text, mandatory=False) + + def __init__(self, **kwargs): + super(ResourceType, self).__init__(**kwargs) + + +class ResourceTypes(types.Base, WSMEModelTransformer): + + resource_types = wsme.wsattr([ResourceType], mandatory=False) + + def __init__(self, **kwargs): + super(ResourceTypes, self).__init__(**kwargs) diff --git a/code/daisy/daisy/api/v2/model/metadef_tag.py b/code/daisy/daisy/api/v2/model/metadef_tag.py new file mode 100755 index 00000000..63ef8c25 --- /dev/null +++ b/code/daisy/daisy/api/v2/model/metadef_tag.py @@ -0,0 +1,34 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import wsme +from wsme import types + +from daisy.common import wsme_utils + + +class MetadefTag(types.Base, wsme_utils.WSMEModelTransformer): + + name = wsme.wsattr(types.text, mandatory=True) + + # Not using datetime since time format has to be + # in oslo_utils.timeutils.isotime() format + created_at = wsme.wsattr(types.text, mandatory=False) + updated_at = wsme.wsattr(types.text, mandatory=False) + + +class MetadefTags(types.Base, wsme_utils.WSMEModelTransformer): + + tags = wsme.wsattr([MetadefTag], mandatory=False) diff --git a/code/daisy/daisy/api/v2/router.py b/code/daisy/daisy/api/v2/router.py new file mode 100755 index 00000000..9d8c166d --- /dev/null +++ b/code/daisy/daisy/api/v2/router.py @@ -0,0 +1,570 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from daisy.api.v2 import image_actions +from daisy.api.v2 import image_data +from daisy.api.v2 import image_members +from daisy.api.v2 import image_tags +from daisy.api.v2 import images +from daisy.api.v2 import metadef_namespaces +from daisy.api.v2 import metadef_objects +from daisy.api.v2 import metadef_properties +from daisy.api.v2 import metadef_resource_types +from daisy.api.v2 import metadef_tags +from daisy.api.v2 import schemas +from daisy.api.v2 import tasks +from daisy.common import wsgi + + +class API(wsgi.Router): + + """WSGI router for Glance v2 API requests.""" + + def __init__(self, mapper): + custom_image_properties = images.load_custom_properties() + reject_method_resource = wsgi.Resource(wsgi.RejectMethodController()) + + schemas_resource = schemas.create_resource(custom_image_properties) + mapper.connect('/schemas/image', + controller=schemas_resource, + action='image', + conditions={'method': ['GET']}) + mapper.connect('/schemas/image', + controller=reject_method_resource, + action='reject', + allowed_methods='GET', + conditions={'method': ['POST', 'PUT', 'DELETE', + 'PATCH', 'HEAD']}) + mapper.connect('/schemas/images', + controller=schemas_resource, + action='images', + conditions={'method': ['GET']}) + mapper.connect('/schemas/images', + controller=reject_method_resource, + action='reject', + allowed_methods='GET', + conditions={'method': ['POST', 'PUT', 'DELETE', + 'PATCH', 'HEAD']}) + mapper.connect('/schemas/member', + controller=schemas_resource, + action='member', + conditions={'method': ['GET']}) + mapper.connect('/schemas/member', + controller=reject_method_resource, + action='reject', + allowed_methods='GET', + conditions={'method': ['POST', 'PUT', 'DELETE', + 'PATCH', 'HEAD']}) + + mapper.connect('/schemas/members', + controller=schemas_resource, + action='members', + conditions={'method': ['GET']}) + mapper.connect('/schemas/members', + controller=reject_method_resource, + action='reject', + allowed_methods='GET', + conditions={'method': ['POST', 'PUT', 'DELETE', + 'PATCH', 'HEAD']}) + + mapper.connect('/schemas/task', + controller=schemas_resource, + action='task', + conditions={'method': ['GET']}) + mapper.connect('/schemas/task', + controller=reject_method_resource, + action='reject', + allowed_methods='GET', + conditions={'method': ['POST', 'PUT', 'DELETE', + 'PATCH', 'HEAD']}) + mapper.connect('/schemas/tasks', + controller=schemas_resource, + action='tasks', + conditions={'method': ['GET']}) + mapper.connect('/schemas/tasks', + controller=reject_method_resource, + action='reject', + allowed_methods='GET', + conditions={'method': ['POST', 'PUT', 'DELETE', + 'PATCH', 'HEAD']}) + + mapper.connect('/schemas/metadefs/namespace', + controller=schemas_resource, + action='metadef_namespace', + conditions={'method': ['GET']}) + mapper.connect('/schemas/metadefs/namespace', + controller=reject_method_resource, + action='reject', + allowed_methods='GET', + conditions={'method': ['POST', 'PUT', 'DELETE', + 'PATCH', 'HEAD']}) + + mapper.connect('/schemas/metadefs/namespaces', + controller=schemas_resource, + action='metadef_namespaces', + conditions={'method': ['GET']}) + mapper.connect('/schemas/metadefs/namespaces', + controller=reject_method_resource, + action='reject', + allowed_methods='GET', + conditions={'method': ['POST', 'PUT', 'DELETE', + 'PATCH', 'HEAD']}) + + mapper.connect('/schemas/metadefs/resource_type', + controller=schemas_resource, + action='metadef_resource_type', + conditions={'method': ['GET']}) + mapper.connect('/schemas/metadefs/resource_type', + controller=reject_method_resource, + action='reject', + allowed_methods='GET', + conditions={'method': ['POST', 'PUT', 'DELETE', + 'PATCH', 'HEAD']}) + + mapper.connect('/schemas/metadefs/resource_types', + controller=schemas_resource, + action='metadef_resource_types', + conditions={'method': ['GET']}) + mapper.connect('/schemas/metadefs/resource_types', + controller=reject_method_resource, + action='reject', + allowed_methods='GET', + conditions={'method': ['POST', 'PUT', 'DELETE', + 'PATCH', 'HEAD']}) + + mapper.connect('/schemas/metadefs/property', + controller=schemas_resource, + action='metadef_property', + conditions={'method': ['GET']}) + mapper.connect('/schemas/metadefs/property', + controller=reject_method_resource, + action='reject', + allowed_methods='GET', + conditions={'method': ['POST', 'PUT', 'DELETE', + 'PATCH', 'HEAD']}) + + mapper.connect('/schemas/metadefs/properties', + controller=schemas_resource, + action='metadef_properties', + conditions={'method': ['GET']}) + mapper.connect('/schemas/metadefs/properties', + controller=reject_method_resource, + action='reject', + allowed_methods='GET', + conditions={'method': ['POST', 'PUT', 'DELETE', + 'PATCH', 'HEAD']}) + + mapper.connect('/schemas/metadefs/object', + controller=schemas_resource, + action='metadef_object', + conditions={'method': ['GET']}) + mapper.connect('/schemas/metadefs/object', + controller=reject_method_resource, + action='reject', + allowed_methods='GET', + conditions={'method': ['POST', 'PUT', 'DELETE', + 'PATCH', 'HEAD']}) + + mapper.connect('/schemas/metadefs/objects', + controller=schemas_resource, + action='metadef_objects', + conditions={'method': ['GET']}) + mapper.connect('/schemas/metadefs/objects', + controller=reject_method_resource, + action='reject', + allowed_methods='GET', + conditions={'method': ['POST', 'PUT', 'DELETE', + 'PATCH', 'HEAD']}) + + mapper.connect('/schemas/metadefs/tag', + controller=schemas_resource, + action='metadef_tag', + conditions={'method': ['GET']}) + mapper.connect('/schemas/metadefs/tag', + controller=reject_method_resource, + action='reject', + allowed_methods='GET', + conditions={'method': ['POST', 'PUT', 'DELETE', + 'PATCH', 'HEAD']}) + + mapper.connect('/schemas/metadefs/tags', + controller=schemas_resource, + action='metadef_tags', + conditions={'method': ['GET']}) + mapper.connect('/schemas/metadefs/tags', + controller=reject_method_resource, + action='reject', + allowed_methods='GET', + conditions={'method': ['POST', 'PUT', 'DELETE', + 'PATCH', 'HEAD']}) + + # Metadef resource types + metadef_resource_types_resource = ( + metadef_resource_types.create_resource()) + + mapper.connect('/metadefs/resource_types', + controller=metadef_resource_types_resource, + action='index', + conditions={'method': ['GET']}) + mapper.connect('/metadefs/resource_types', + controller=reject_method_resource, + action='reject', + allowed_methods='GET', + conditions={'method': ['POST', 'PUT', 'DELETE', + 'PATCH', 'HEAD']}) + + mapper.connect('/metadefs/namespaces/{namespace}/resource_types', + controller=metadef_resource_types_resource, + action='show', + conditions={'method': ['GET']}) + mapper.connect('/metadefs/namespaces/{namespace}/resource_types', + controller=metadef_resource_types_resource, + action='create', + conditions={'method': ['POST']}) + mapper.connect('/metadefs/namespaces/{namespace}/resource_types', + controller=reject_method_resource, + action='reject', + allowed_methods='GET, POST', + conditions={'method': ['PUT', 'DELETE', + 'PATCH', 'HEAD']}) + + mapper.connect('/metadefs/namespaces/{namespace}/resource_types/' + '{resource_type}', + controller=metadef_resource_types_resource, + action='delete', + conditions={'method': ['DELETE']}) + mapper.connect('/metadefs/namespaces/{namespace}/resource_types/' + '{resource_type}', + controller=reject_method_resource, + action='reject', + allowed_methods='DELETE', + conditions={'method': ['GET', 'POST', 'PUT', + 'PATCH', 'HEAD']}) + + # Metadef Namespaces + metadef_namespace_resource = metadef_namespaces.create_resource() + mapper.connect('/metadefs/namespaces', + controller=metadef_namespace_resource, + action='index', + conditions={'method': ['GET']}) + mapper.connect('/metadefs/namespaces', + controller=metadef_namespace_resource, + action='create', + conditions={'method': ['POST']}) + mapper.connect('/metadefs/namespaces', + controller=reject_method_resource, + action='reject', + allowed_methods='GET, POST', + conditions={'method': ['PUT', 'DELETE', + 'PATCH', 'HEAD']}) + + mapper.connect('/metadefs/namespaces/{namespace}', + controller=metadef_namespace_resource, + action='show', + conditions={'method': ['GET']}) + mapper.connect('/metadefs/namespaces/{namespace}', + controller=metadef_namespace_resource, + action='update', + conditions={'method': ['PUT']}) + mapper.connect('/metadefs/namespaces/{namespace}', + controller=metadef_namespace_resource, + action='delete', + conditions={'method': ['DELETE']}) + mapper.connect('/metadefs/namespaces/{namespace}', + controller=reject_method_resource, + action='reject', + allowed_methods='GET, PUT, DELETE', + conditions={'method': ['POST', 'PATCH', 'HEAD']}) + + # Metadef namespace properties + metadef_properties_resource = metadef_properties.create_resource() + mapper.connect('/metadefs/namespaces/{namespace}/properties', + controller=metadef_properties_resource, + action='index', + conditions={'method': ['GET']}) + mapper.connect('/metadefs/namespaces/{namespace}/properties', + controller=metadef_properties_resource, + action='create', + conditions={'method': ['POST']}) + mapper.connect('/metadefs/namespaces/{namespace}/properties', + controller=metadef_namespace_resource, + action='delete_properties', + conditions={'method': ['DELETE']}) + mapper.connect('/metadefs/namespaces/{namespace}/properties', + controller=reject_method_resource, + action='reject', + allowed_methods='GET, POST, DELETE', + conditions={'method': ['PUT', 'PATCH', 'HEAD']}) + + mapper.connect('/metadefs/namespaces/{namespace}/properties/{' + 'property_name}', + controller=metadef_properties_resource, + action='show', + conditions={'method': ['GET']}) + mapper.connect('/metadefs/namespaces/{namespace}/properties/{' + 'property_name}', + controller=metadef_properties_resource, + action='update', + conditions={'method': ['PUT']}) + mapper.connect('/metadefs/namespaces/{namespace}/properties/{' + 'property_name}', + controller=metadef_properties_resource, + action='delete', + conditions={'method': ['DELETE']}) + mapper.connect('/metadefs/namespaces/{namespace}/properties/{' + 'property_name}', + controller=reject_method_resource, + action='reject', + allowed_methods='GET, PUT, DELETE', + conditions={'method': ['POST', 'PATCH', 'HEAD']}) + + # Metadef objects + metadef_objects_resource = metadef_objects.create_resource() + mapper.connect('/metadefs/namespaces/{namespace}/objects', + controller=metadef_objects_resource, + action='index', + conditions={'method': ['GET']}) + mapper.connect('/metadefs/namespaces/{namespace}/objects', + controller=metadef_objects_resource, + action='create', + conditions={'method': ['POST']}) + mapper.connect('/metadefs/namespaces/{namespace}/objects', + controller=metadef_namespace_resource, + action='delete_objects', + conditions={'method': ['DELETE']}) + mapper.connect('/metadefs/namespaces/{namespace}/objects', + controller=reject_method_resource, + action='reject', + allowed_methods='GET, POST, DELETE', + conditions={'method': ['PUT', 'PATCH', 'HEAD']}) + + mapper.connect('/metadefs/namespaces/{namespace}/objects/{' + 'object_name}', + controller=metadef_objects_resource, + action='show', + conditions={'method': ['GET']}) + mapper.connect('/metadefs/namespaces/{namespace}/objects/{' + 'object_name}', + controller=metadef_objects_resource, + action='update', + conditions={'method': ['PUT']}) + mapper.connect('/metadefs/namespaces/{namespace}/objects/{' + 'object_name}', + controller=metadef_objects_resource, + action='delete', + conditions={'method': ['DELETE']}) + mapper.connect('/metadefs/namespaces/{namespace}/objects/{' + 'object_name}', + controller=reject_method_resource, + action='reject', + allowed_methods='GET, PUT, DELETE', + conditions={'method': ['POST', 'PATCH', 'HEAD']}) + + # Metadef tags + metadef_tags_resource = metadef_tags.create_resource() + mapper.connect('/metadefs/namespaces/{namespace}/tags', + controller=metadef_tags_resource, + action='index', + conditions={'method': ['GET']}) + mapper.connect('/metadefs/namespaces/{namespace}/tags', + controller=metadef_tags_resource, + action='create_tags', + conditions={'method': ['POST']}) + mapper.connect('/metadefs/namespaces/{namespace}/tags', + controller=metadef_namespace_resource, + action='delete_tags', + conditions={'method': ['DELETE']}) + mapper.connect('/metadefs/namespaces/{namespace}/tags', + controller=reject_method_resource, + action='reject', + allowed_methods='GET, POST, DELETE', + conditions={'method': ['PUT', 'PATCH', 'HEAD']}) + + mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', + controller=metadef_tags_resource, + action='show', + conditions={'method': ['GET']}) + mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', + controller=metadef_tags_resource, + action='create', + conditions={'method': ['POST']}) + mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', + controller=metadef_tags_resource, + action='update', + conditions={'method': ['PUT']}) + mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', + controller=metadef_tags_resource, + action='delete', + conditions={'method': ['DELETE']}) + mapper.connect('/metadefs/namespaces/{namespace}/tags/{tag_name}', + controller=reject_method_resource, + action='reject', + allowed_methods='GET, POST, PUT, DELETE', + conditions={'method': ['PATCH', 'HEAD']}) + + images_resource = images.create_resource(custom_image_properties) + mapper.connect('/images', + controller=images_resource, + action='index', + conditions={'method': ['GET']}) + mapper.connect('/images', + controller=images_resource, + action='create', + conditions={'method': ['POST']}) + mapper.connect('/images', + controller=reject_method_resource, + action='reject', + allowed_methods='GET, POST', + conditions={'method': ['PUT', 'DELETE', 'PATCH', + 'HEAD']}) + + mapper.connect('/images/{image_id}', + controller=images_resource, + action='update', + conditions={'method': ['PATCH']}) + mapper.connect('/images/{image_id}', + controller=images_resource, + action='show', + conditions={'method': ['GET']}) + mapper.connect('/images/{image_id}', + controller=images_resource, + action='delete', + conditions={'method': ['DELETE']}) + mapper.connect('/images/{image_id}', + controller=reject_method_resource, + action='reject', + allowed_methods='GET, PATCH, DELETE', + conditions={'method': ['POST', 'PUT', 'HEAD']}) + + image_actions_resource = image_actions.create_resource() + mapper.connect('/images/{image_id}/actions/deactivate', + controller=image_actions_resource, + action='deactivate', + conditions={'method': ['POST']}) + mapper.connect('/images/{image_id}/actions/reactivate', + controller=image_actions_resource, + action='reactivate', + conditions={'method': ['POST']}) + mapper.connect('/images/{image_id}/actions/deactivate', + controller=reject_method_resource, + action='reject', + allowed_methods='POST', + conditions={'method': ['GET', 'PUT', 'DELETE', 'PATCH', + 'HEAD']}) + mapper.connect('/images/{image_id}/actions/reactivate', + controller=reject_method_resource, + action='reject', + allowed_methods='POST', + conditions={'method': ['GET', 'PUT', 'DELETE', 'PATCH', + 'HEAD']}) + + image_data_resource = image_data.create_resource() + mapper.connect('/images/{image_id}/file', + controller=image_data_resource, + action='download', + conditions={'method': ['GET']}) + mapper.connect('/images/{image_id}/file', + controller=image_data_resource, + action='upload', + conditions={'method': ['PUT']}) + mapper.connect('/images/{image_id}/file', + controller=reject_method_resource, + action='reject', + allowed_methods='GET, PUT', + conditions={'method': ['POST', 'DELETE', 'PATCH', + 'HEAD']}) + + image_tags_resource = image_tags.create_resource() + mapper.connect('/images/{image_id}/tags/{tag_value}', + controller=image_tags_resource, + action='update', + conditions={'method': ['PUT']}) + mapper.connect('/images/{image_id}/tags/{tag_value}', + controller=image_tags_resource, + action='delete', + conditions={'method': ['DELETE']}) + mapper.connect('/images/{image_id}/tags/{tag_value}', + controller=reject_method_resource, + action='reject', + allowed_methods='PUT, DELETE', + conditions={'method': ['GET', 'POST', 'PATCH', + 'HEAD']}) + + image_members_resource = image_members.create_resource() + mapper.connect('/images/{image_id}/members', + controller=image_members_resource, + action='index', + conditions={'method': ['GET']}) + mapper.connect('/images/{image_id}/members', + controller=image_members_resource, + action='create', + conditions={'method': ['POST']}) + mapper.connect('/images/{image_id}/members', + controller=reject_method_resource, + action='reject', + allowed_methods='GET, POST', + conditions={'method': ['PUT', 'DELETE', 'PATCH', + 'HEAD']}) + + mapper.connect('/images/{image_id}/members/{member_id}', + controller=image_members_resource, + action='show', + conditions={'method': ['GET']}) + mapper.connect('/images/{image_id}/members/{member_id}', + controller=image_members_resource, + action='update', + conditions={'method': ['PUT']}) + mapper.connect('/images/{image_id}/members/{member_id}', + controller=image_members_resource, + action='delete', + conditions={'method': ['DELETE']}) + mapper.connect('/images/{image_id}/members/{member_id}', + controller=reject_method_resource, + action='reject', + allowed_methods='GET, PUT, DELETE', + conditions={'method': ['POST', 'PATCH', 'HEAD']}) + + tasks_resource = tasks.create_resource() + mapper.connect('/tasks', + controller=tasks_resource, + action='create', + conditions={'method': ['POST']}) + mapper.connect('/tasks', + controller=tasks_resource, + action='index', + conditions={'method': ['GET']}) + mapper.connect('/tasks', + controller=reject_method_resource, + action='reject', + allowed_methods='GET, POST', + conditions={'method': ['PUT', 'DELETE', 'PATCH', + 'HEAD']}) + + mapper.connect('/tasks/{task_id}', + controller=tasks_resource, + action='get', + conditions={'method': ['GET']}) + mapper.connect('/tasks/{task_id}', + controller=tasks_resource, + action='delete', + conditions={'method': ['DELETE']}) + mapper.connect('/tasks/{task_id}', + controller=reject_method_resource, + action='reject', + allowed_methods='GET, DELETE', + conditions={'method': ['POST', 'PUT', 'PATCH', + 'HEAD']}) + + super(API, self).__init__(mapper) diff --git a/code/daisy/daisy/api/v2/schemas.py b/code/daisy/daisy/api/v2/schemas.py new file mode 100755 index 00000000..832ffa02 --- /dev/null +++ b/code/daisy/daisy/api/v2/schemas.py @@ -0,0 +1,109 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from daisy.api.v2 import image_members +from daisy.api.v2 import images +from daisy.api.v2 import metadef_namespaces +from daisy.api.v2 import metadef_objects +from daisy.api.v2 import metadef_properties +from daisy.api.v2 import metadef_resource_types +from daisy.api.v2 import metadef_tags +from daisy.api.v2 import tasks +from daisy.common import wsgi + + +class Controller(object): + def __init__(self, custom_image_properties=None): + self.image_schema = images.get_schema(custom_image_properties) + self.image_collection_schema = images.get_collection_schema( + custom_image_properties) + self.member_schema = image_members.get_schema() + self.member_collection_schema = image_members.get_collection_schema() + self.task_schema = tasks.get_task_schema() + self.task_collection_schema = tasks.get_collection_schema() + + # Metadef schemas + self.metadef_namespace_schema = metadef_namespaces.get_schema() + self.metadef_namespace_collection_schema = \ + metadef_namespaces.get_collection_schema() + + self.metadef_resource_type_schema = metadef_resource_types.get_schema() + self.metadef_resource_type_collection_schema = \ + metadef_resource_types.get_collection_schema() + + self.metadef_property_schema = metadef_properties.get_schema() + self.metadef_property_collection_schema = \ + metadef_properties.get_collection_schema() + + self.metadef_object_schema = metadef_objects.get_schema() + self.metadef_object_collection_schema = \ + metadef_objects.get_collection_schema() + + self.metadef_tag_schema = metadef_tags.get_schema() + self.metadef_tag_collection_schema = ( + metadef_tags.get_collection_schema()) + + def image(self, req): + return self.image_schema.raw() + + def images(self, req): + return self.image_collection_schema.raw() + + def member(self, req): + return self.member_schema.minimal() + + def members(self, req): + return self.member_collection_schema.minimal() + + def task(self, req): + return self.task_schema.minimal() + + def tasks(self, req): + return self.task_collection_schema.minimal() + + def metadef_namespace(self, req): + return self.metadef_namespace_schema.raw() + + def metadef_namespaces(self, req): + return self.metadef_namespace_collection_schema.raw() + + def metadef_resource_type(self, req): + return self.metadef_resource_type_schema.raw() + + def metadef_resource_types(self, req): + return self.metadef_resource_type_collection_schema.raw() + + def metadef_property(self, req): + return self.metadef_property_schema.raw() + + def metadef_properties(self, req): + return self.metadef_property_collection_schema.raw() + + def metadef_object(self, req): + return self.metadef_object_schema.raw() + + def metadef_objects(self, req): + return self.metadef_object_collection_schema.raw() + + def metadef_tag(self, req): + return self.metadef_tag_schema.raw() + + def metadef_tags(self, req): + return self.metadef_tag_collection_schema.raw() + + +def create_resource(custom_image_properties=None): + controller = Controller(custom_image_properties) + return wsgi.Resource(controller) diff --git a/code/daisy/daisy/api/v2/tasks.py b/code/daisy/daisy/api/v2/tasks.py new file mode 100755 index 00000000..9b770062 --- /dev/null +++ b/code/daisy/daisy/api/v2/tasks.py @@ -0,0 +1,393 @@ +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +import glance_store +import oslo.serialization.jsonutils as json +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import timeutils +import six +import six.moves.urllib.parse as urlparse +import webob.exc + +from daisy.api import policy +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +import daisy.gateway +from daisy import i18n +import daisy.notifier +import daisy.schema + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LW = i18n._LW + +CONF = cfg.CONF +CONF.import_opt('task_time_to_live', 'daisy.common.config', group='task') + + +class TasksController(object): + """Manages operations on tasks.""" + + def __init__(self, db_api=None, policy_enforcer=None, notifier=None, + store_api=None): + self.db_api = db_api or daisy.db.get_api() + self.policy = policy_enforcer or policy.Enforcer() + self.notifier = notifier or daisy.notifier.Notifier() + self.store_api = store_api or glance_store + self.gateway = daisy.gateway.Gateway(self.db_api, self.store_api, + self.notifier, self.policy) + + def create(self, req, task): + task_factory = self.gateway.get_task_factory(req.context) + executor_factory = self.gateway.get_task_executor_factory(req.context) + task_repo = self.gateway.get_task_repo(req.context) + live_time = CONF.task.task_time_to_live + try: + new_task = task_factory.new_task(task_type=task['type'], + owner=req.context.owner, + task_time_to_live=live_time, + task_input=task['input']) + task_repo.add(new_task) + task_executor = executor_factory.new_task_executor(req.context) + new_task.run(task_executor) + except exception.Forbidden as e: + msg = (_LW("Forbidden to create task. Reason: %(reason)s") + % {'reason': utils.exception_to_str(e)}) + LOG.warn(msg) + raise webob.exc.HTTPForbidden(explanation=e.msg) + return new_task + + def index(self, req, marker=None, limit=None, sort_key='created_at', + sort_dir='desc', filters=None): + result = {} + if filters is None: + filters = {} + filters['deleted'] = False + + if limit is None: + limit = CONF.limit_param_default + limit = min(CONF.api_limit_max, limit) + + task_repo = self.gateway.get_task_stub_repo(req.context) + try: + tasks = task_repo.list(marker, limit, sort_key, + sort_dir, filters) + if len(tasks) != 0 and len(tasks) == limit: + result['next_marker'] = tasks[-1].task_id + except (exception.NotFound, exception.InvalidSortKey, + exception.InvalidFilterRangeValue) as e: + LOG.warn(utils.exception_to_str(e)) + raise webob.exc.HTTPBadRequest(explanation=e.msg) + except exception.Forbidden as e: + LOG.warn(utils.exception_to_str(e)) + raise webob.exc.HTTPForbidden(explanation=e.msg) + result['tasks'] = tasks + return result + + def get(self, req, task_id): + try: + task_repo = self.gateway.get_task_repo(req.context) + task = task_repo.get(task_id) + except exception.NotFound as e: + msg = (_LW("Failed to find task %(task_id)s. Reason: %(reason)s") % + {'task_id': task_id, 'reason': utils.exception_to_str(e)}) + LOG.warn(msg) + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Forbidden as e: + msg = (_LW("Forbidden to get task %(task_id)s. Reason:" + " %(reason)s") % + {'task_id': task_id, 'reason': utils.exception_to_str(e)}) + LOG.warn(msg) + raise webob.exc.HTTPForbidden(explanation=e.msg) + return task + + def delete(self, req, task_id): + msg = (_("This operation is currently not permitted on Glance Tasks. " + "They are auto deleted after reaching the time based on " + "their expires_at property.")) + raise webob.exc.HTTPMethodNotAllowed(explanation=msg, + headers={'Allow': 'GET'}, + body_template='${explanation}') + + +class RequestDeserializer(wsgi.JSONRequestDeserializer): + _required_properties = ['type', 'input'] + + def _get_request_body(self, request): + output = super(RequestDeserializer, self).default(request) + if 'body' not in output: + msg = _('Body expected in request.') + raise webob.exc.HTTPBadRequest(explanation=msg) + return output['body'] + + def _validate_sort_dir(self, sort_dir): + if sort_dir not in ['asc', 'desc']: + msg = _('Invalid sort direction: %s') % sort_dir + raise webob.exc.HTTPBadRequest(explanation=msg) + + return sort_dir + + def _get_filters(self, filters): + status = filters.get('status') + if status: + if status not in ['pending', 'processing', 'success', 'failure']: + msg = _('Invalid status value: %s') % status + raise webob.exc.HTTPBadRequest(explanation=msg) + + type = filters.get('type') + if type: + if type not in ['import']: + msg = _('Invalid type value: %s') % type + raise webob.exc.HTTPBadRequest(explanation=msg) + + return filters + + def _validate_marker(self, marker): + if marker and not utils.is_uuid_like(marker): + msg = _('Invalid marker format') + raise webob.exc.HTTPBadRequest(explanation=msg) + return marker + + def _validate_limit(self, limit): + try: + limit = int(limit) + except ValueError: + msg = _("limit param must be an integer") + raise webob.exc.HTTPBadRequest(explanation=msg) + + if limit < 0: + msg = _("limit param must be positive") + raise webob.exc.HTTPBadRequest(explanation=msg) + + return limit + + def _validate_create_body(self, body): + """Validate the body of task creating request""" + for param in self._required_properties: + if param not in body: + msg = _("Task '%s' is required") % param + raise webob.exc.HTTPBadRequest(explanation=msg) + + def __init__(self, schema=None): + super(RequestDeserializer, self).__init__() + self.schema = schema or get_task_schema() + + def create(self, request): + body = self._get_request_body(request) + self._validate_create_body(body) + try: + self.schema.validate(body) + except exception.InvalidObject as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + task = {} + properties = body + for key in self._required_properties: + try: + task[key] = properties.pop(key) + except KeyError: + pass + return dict(task=task) + + def index(self, request): + params = request.params.copy() + limit = params.pop('limit', None) + marker = params.pop('marker', None) + sort_dir = params.pop('sort_dir', 'desc') + query_params = { + 'sort_key': params.pop('sort_key', 'created_at'), + 'sort_dir': self._validate_sort_dir(sort_dir), + 'filters': self._get_filters(params) + } + + if marker is not None: + query_params['marker'] = self._validate_marker(marker) + + if limit is not None: + query_params['limit'] = self._validate_limit(limit) + return query_params + + +class ResponseSerializer(wsgi.JSONResponseSerializer): + def __init__(self, task_schema=None, partial_task_schema=None): + super(ResponseSerializer, self).__init__() + self.task_schema = task_schema or get_task_schema() + self.partial_task_schema = (partial_task_schema + or _get_partial_task_schema()) + + def _inject_location_header(self, response, task): + location = self._get_task_location(task) + response.headers['Location'] = location.encode('utf-8') + + def _get_task_location(self, task): + return '/v2/tasks/%s' % task.task_id + + def _format_task(self, schema, task): + task_view = {} + task_view['id'] = task.task_id + task_view['input'] = task.task_input + task_view['type'] = task.type + task_view['status'] = task.status + task_view['owner'] = task.owner + task_view['message'] = task.message + task_view['result'] = task.result + if task.expires_at: + task_view['expires_at'] = timeutils.isotime(task.expires_at) + task_view['created_at'] = timeutils.isotime(task.created_at) + task_view['updated_at'] = timeutils.isotime(task.updated_at) + task_view['self'] = self._get_task_location(task) + task_view['schema'] = '/v2/schemas/task' + task_view = schema.filter(task_view) # domain + return task_view + + def _format_task_stub(self, schema, task): + task_view = {} + task_view['id'] = task.task_id + task_view['type'] = task.type + task_view['status'] = task.status + task_view['owner'] = task.owner + if task.expires_at: + task_view['expires_at'] = timeutils.isotime(task.expires_at) + task_view['created_at'] = timeutils.isotime(task.created_at) + task_view['updated_at'] = timeutils.isotime(task.updated_at) + task_view['self'] = self._get_task_location(task) + task_view['schema'] = '/v2/schemas/task' + task_view = schema.filter(task_view) # domain + return task_view + + def create(self, response, task): + response.status_int = 201 + self._inject_location_header(response, task) + self.get(response, task) + + def get(self, response, task): + task_view = self._format_task(self.task_schema, task) + body = json.dumps(task_view, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + def index(self, response, result): + params = dict(response.request.params) + params.pop('marker', None) + query = urlparse.urlencode(params) + body = { + 'tasks': [self._format_task_stub(self.partial_task_schema, task) + for task in result['tasks']], + 'first': '/v2/tasks', + 'schema': '/v2/schemas/tasks', + } + if query: + body['first'] = '%s?%s' % (body['first'], query) + if 'next_marker' in result: + params['marker'] = result['next_marker'] + next_query = urlparse.urlencode(params) + body['next'] = '/v2/tasks?%s' % next_query + response.unicode_body = six.text_type(json.dumps(body, + ensure_ascii=False)) + response.content_type = 'application/json' + + +_TASK_SCHEMA = { + "id": { + "description": _("An identifier for the task"), + "pattern": _('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}' + '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'), + "type": "string" + }, + "type": { + "description": _("The type of task represented by this content"), + "enum": [ + "import", + ], + "type": "string" + }, + "status": { + "description": _("The current status of this task"), + "enum": [ + "pending", + "processing", + "success", + "failure" + ], + "type": "string" + }, + "input": { + "description": _("The parameters required by task, JSON blob"), + "type": ["null", "object"], + }, + "result": { + "description": _("The result of current task, JSON blob"), + "type": ["null", "object"], + }, + "owner": { + "description": _("An identifier for the owner of this task"), + "type": "string" + }, + "message": { + "description": _("Human-readable informative message only included" + " when appropriate (usually on failure)"), + "type": "string", + }, + "expires_at": { + "description": _("Datetime when this resource would be" + " subject to removal"), + "type": ["null", "string"] + }, + "created_at": { + "description": _("Datetime when this resource was created"), + "type": "string" + }, + "updated_at": { + "description": _("Datetime when this resource was updated"), + "type": "string" + }, + 'self': {'type': 'string'}, + 'schema': {'type': 'string'} +} + + +def get_task_schema(): + properties = copy.deepcopy(_TASK_SCHEMA) + schema = daisy.schema.Schema('task', properties) + return schema + + +def _get_partial_task_schema(): + properties = copy.deepcopy(_TASK_SCHEMA) + hide_properties = ['input', 'result', 'message'] + for key in hide_properties: + del properties[key] + schema = daisy.schema.Schema('task', properties) + return schema + + +def get_collection_schema(): + task_schema = _get_partial_task_schema() + return daisy.schema.CollectionSchema('tasks', task_schema) + + +def create_resource(): + """Task resource factory method""" + task_schema = get_task_schema() + partial_task_schema = _get_partial_task_schema() + deserializer = RequestDeserializer(task_schema) + serializer = ResponseSerializer(task_schema, partial_task_schema) + controller = TasksController() + return wsgi.Resource(controller, deserializer, serializer) diff --git a/code/daisy/daisy/api/versions.py b/code/daisy/daisy/api/versions.py new file mode 100755 index 00000000..658e7405 --- /dev/null +++ b/code/daisy/daisy/api/versions.py @@ -0,0 +1,85 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import httplib + +from oslo.serialization import jsonutils +from oslo_config import cfg +import webob.dec + +from daisy.common import wsgi +from daisy import i18n + +_ = i18n._ + +versions_opts = [ + cfg.StrOpt('public_endpoint', default=None, + help=_('Public url to use for versions endpoint. The default ' + 'is None, which will use the request\'s host_url ' + 'attribute to populate the URL base. If Glance is ' + 'operating behind a proxy, you will want to change ' + 'this to represent the proxy\'s URL.')), +] + +CONF = cfg.CONF +CONF.register_opts(versions_opts) + + +class Controller(object): + + """A wsgi controller that reports which API versions are supported.""" + + def index(self, req): + """Respond to a request for all OpenStack API versions.""" + def build_version_object(version, path, status): + url = CONF.public_endpoint or req.host_url + return { + 'id': 'v%s' % version, + 'status': status, + 'links': [ + { + 'rel': 'self', + 'href': '%s/%s/' % (url, path), + }, + ], + } + + version_objs = [] + if CONF.enable_v2_api: + version_objs.extend([ + build_version_object(2.3, 'v2', 'CURRENT'), + build_version_object(2.2, 'v2', 'SUPPORTED'), + build_version_object(2.1, 'v2', 'SUPPORTED'), + build_version_object(2.0, 'v2', 'SUPPORTED'), + ]) + if CONF.enable_v1_api: + version_objs.extend([ + build_version_object(1.1, 'v1', 'SUPPORTED'), + build_version_object(1.0, 'v1', 'SUPPORTED'), + ]) + + response = webob.Response(request=req, + status=httplib.MULTIPLE_CHOICES, + content_type='application/json') + response.body = jsonutils.dumps(dict(versions=version_objs)) + return response + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + return self.index(req) + + +def create_resource(conf): + return wsgi.Resource(Controller()) diff --git a/code/daisy/daisy/artifacts/__init__.py b/code/daisy/daisy/artifacts/__init__.py new file mode 100755 index 00000000..96c00f23 --- /dev/null +++ b/code/daisy/daisy/artifacts/__init__.py @@ -0,0 +1,46 @@ +# Copyright (c) 2015 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import six + +from daisy.common import exception + + +class Showlevel(object): + # None - do not show additional properties and blobs with locations; + # Basic - show all artifact fields except dependencies; + # Direct - show all artifact fields with only direct dependencies; + # Transitive - show all artifact fields with all of dependencies. + NONE = 0 + BASIC = 1 + DIRECT = 2 + TRANSITIVE = 3 + + _level_map = {'none': NONE, 'basic': BASIC, 'direct': DIRECT, + 'transitive': TRANSITIVE} + _inverted_level_map = {v: k for k, v in six.iteritems(_level_map)} + + @staticmethod + def to_str(n): + try: + return Showlevel._inverted_level_map[n] + except KeyError: + raise exception.ArtifactUnsupportedShowLevel() + + @staticmethod + def from_str(str_value): + try: + return Showlevel._level_map[str_value] + except KeyError: + raise exception.ArtifactUnsupportedShowLevel() diff --git a/code/daisy/daisy/async/__init__.py b/code/daisy/daisy/async/__init__.py new file mode 100755 index 00000000..e011786d --- /dev/null +++ b/code/daisy/daisy/async/__init__.py @@ -0,0 +1,73 @@ +# Copyright 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from daisy import i18n + + +LOG = logging.getLogger(__name__) +_LE = i18n._LE + + +class TaskExecutor(object): + """Base class for Asynchronous task executors. It does not support the + execution mechanism. + + Provisions the extensible classes with necessary variables to utilize + important Glance modules like, context, task_repo, image_repo, + image_factory. + + Note: + It also gives abstraction for the standard pre-processing and + post-processing operations to be executed by a task. These may include + validation checks, security checks, introspection, error handling etc. + The aim is to give developers an abstract sense of the execution + pipeline logic. + + Args: + context: daisy.context.RequestContext object for AuthZ and AuthN + checks + task_repo: daisy.db.TaskRepo object which acts as a translator for + daisy.domain.Task and daisy.domain.TaskStub objects + into ORM semantics + image_repo: daisy.db.ImageRepo object which acts as a translator for + daisy.domain.Image object into ORM semantics + image_factory: daisy.domain.ImageFactory object to be used for + creating new images for certain types of tasks viz. import, cloning + """ + + def __init__(self, context, task_repo, image_repo, image_factory): + self.context = context + self.task_repo = task_repo + self.image_repo = image_repo + self.image_factory = image_factory + + def begin_processing(self, task_id): + task = self.task_repo.get(task_id) + task.begin_processing() + self.task_repo.save(task) + + # start running + self._run(task_id, task.type) + + def _run(self, task_id, task_type): + task = self.task_repo.get(task_id) + msg = _LE("This execution of Tasks is not setup. Please consult the " + "project documentation for more information on the " + "executors available.") + LOG.error(msg) + task.fail(_LE("Internal error occurred while trying to process task.")) + self.task_repo.save(task) diff --git a/code/daisy/daisy/async/flows/__init__.py b/code/daisy/daisy/async/flows/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/async/flows/base_import.py b/code/daisy/daisy/async/flows/base_import.py new file mode 100755 index 00000000..6d22f335 --- /dev/null +++ b/code/daisy/daisy/async/flows/base_import.py @@ -0,0 +1,441 @@ +# Copyright 2015 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import os + +import glance_store as store_api +from glance_store import backend +from oslo_config import cfg +import six +from stevedore import named +from taskflow.patterns import linear_flow as lf +from taskflow import retry +from taskflow import task + +from daisy.common import exception +from daisy.common.scripts.image_import import main as image_import +from daisy.common.scripts import utils as script_utils +from daisy.common import utils as common_utils +from daisy import i18n + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI + + +CONF = cfg.CONF + + +class _CreateImage(task.Task): + + default_provides = 'image_id' + + def __init__(self, task_id, task_type, task_repo, image_repo, + image_factory): + self.task_id = task_id + self.task_type = task_type + self.task_repo = task_repo + self.image_repo = image_repo + self.image_factory = image_factory + super(_CreateImage, self).__init__( + name='%s-CreateImage-%s' % (task_type, task_id)) + + def execute(self): + task = script_utils.get_task(self.task_repo, self.task_id) + if task is None: + return + task_input = script_utils.unpack_task_input(task) + image = image_import.create_image( + self.image_repo, self.image_factory, + task_input.get('image_properties'), self.task_id) + + LOG.debug("Task %(task_id)s created image %(image_id)s" % + {'task_id': task.task_id, 'image_id': image.image_id}) + return image.image_id + + def revert(self, *args, **kwargs): + # TODO(flaper87): Define the revert rules for images on failures. + # Deleting the image may not be what we want since users could upload + # the image data in a separate step. However, it really depends on + # when the failure happened. I guess we should check if data has been + # written, although at that point failures are (should be) unexpected, + # at least image-workflow wise. + pass + + +class _ImportToFS(task.Task): + + default_provides = 'file_path' + + def __init__(self, task_id, task_type, task_repo, uri): + self.task_id = task_id + self.task_type = task_type + self.task_repo = task_repo + self.uri = uri + super(_ImportToFS, self).__init__( + name='%s-ImportToFS-%s' % (task_type, task_id)) + + if CONF.task.work_dir is None: + msg = (_("%(task_id)s of %(task_type)s not configured " + "properly. Missing work dir: %(work_dir)s") % + {'task_id': self.task_id, + 'task_type': self.task_type, + 'work_dir': CONF.task.work_dir}) + raise exception.BadTaskConfiguration(msg) + + self.store = self._build_store() + + def _build_store(self): + # NOTE(flaper87): Due to the nice glance_store api (#sarcasm), we're + # forced to build our own config object, register the required options + # (and by required I mean *ALL* of them, even the ones we don't want), + # and create our own store instance by calling a private function. + # This is certainly unfortunate but it's the best we can do until the + # glance_store refactor is done. A good thing is that glance_store is + # under our team's management and it gates on Glance so changes to + # this API will (should?) break task's tests. + conf = cfg.ConfigOpts() + backend.register_opts(conf) + conf.set_override('filesystem_store_datadir', + CONF.task.work_dir, + group='glance_store') + + # NOTE(flaper87): Do not even try to judge me for this... :( + # With the glance_store refactor, this code will change, until + # that happens, we don't have a better option and this is the + # least worst one, IMHO. + store = backend._load_store(conf, 'file') + + if store is None: + msg = (_("%(task_id)s of %(task_type)s not configured " + "properly. Could not load the filesystem store") % + {'task_id': self.task_id, 'task_type': self.task_type}) + raise exception.BadTaskConfiguration(msg) + + store.configure() + return store + + def execute(self, image_id): + """Create temp file into store and return path to it + + :param image_id: Glance Image ID + """ + # NOTE(flaper87): We've decided to use a separate `work_dir` for + # this task - and tasks coming after this one - as a way to expect + # users to configure a local store for pre-import works on the image + # to happen. + # + # While using any path should be "technically" fine, it's not what + # we recommend as the best solution. For more details on this, please + # refer to the comment in the `_ImportToStore.execute` method. + data = script_utils.get_image_data_iter(self.uri) + + # NOTE(jokke): Using .tasks_import to ease debugging. The file name + # is specific so we know exactly where it's coming from. + tmp_id = "%s.tasks_import" % image_id + path = self.store.add(tmp_id, data, 0, context=None)[0] + return path + + def revert(self, image_id, result=None, **kwargs): + # NOTE(flaper87): If result is None, it probably + # means this task failed. Otherwise, we would have + # a result from its execution. + if result is None: + return + + if os.path.exists(result.split("file://")[-1]): + store_api.delete_from_backend(result) + + +class _DeleteFromFS(task.Task): + + def __init__(self, task_id, task_type): + self.task_id = task_id + self.task_type = task_type + super(_DeleteFromFS, self).__init__( + name='%s-DeleteFromFS-%s' % (task_type, task_id)) + + def execute(self, file_path): + """Remove file from the backend + + :param file_path: path to the file being deleted + """ + store_api.delete_from_backend(file_path) + + +class _ImportToStore(task.Task): + + def __init__(self, task_id, task_type, image_repo, uri): + self.task_id = task_id + self.task_type = task_type + self.image_repo = image_repo + self.uri = uri + super(_ImportToStore, self).__init__( + name='%s-ImportToStore-%s' % (task_type, task_id)) + + def execute(self, image_id, file_path=None): + """Bringing the introspected image to back end store + + :param image_id: Glance Image ID + :param file_path: path to the image file + """ + # NOTE(flaper87): There are a couple of interesting bits in the + # interaction between this task and the `_ImportToFS` one. I'll try + # to cover them in this comment. + # + # NOTE(flaper87): + # `_ImportToFS` downloads the image to a dedicated `work_dir` which + # needs to be configured in advance (please refer to the config option + # docs for more info). The motivation behind this is also explained in + # the `_ImportToFS.execute` method. + # + # Due to the fact that we have an `_ImportToFS` task which downloads + # the image data already, we need to be as smart as we can in this task + # to avoid downloading the data several times and reducing the copy or + # write times. There are several scenarios where the interaction + # between this task and `_ImportToFS` could be improved. All these + # scenarios assume the `_ImportToFS` task has been executed before + # and/or in a more abstract scenario, that `file_path` is being + # provided. + # + # Scenario 1: FS Store is Remote, introspection enabled, + # conversion disabled + # + # In this scenario, the user would benefit from having the scratch path + # being the same path as the fs store. Only one write would happen and + # an extra read will happen in order to introspect the image. Note that + # this read is just for the image headers and not the entire file. + # + # Scenario 2: FS Store is remote, introspection enabled, + # conversion enabled + # + # In this scenario, the user would benefit from having a *local* store + # into which the image can be converted. This will require downloading + # the image locally, converting it and then copying the converted image + # to the remote store. + # + # Scenario 3: FS Store is local, introspection enabled, + # conversion disabled + # Scenario 4: FS Store is local, introspection enabled, + # conversion enabled + # + # In both these scenarios the user shouldn't care if the FS + # store path and the work dir are the same, therefore probably + # benefit, about the scratch path and the FS store being the + # same from a performance perspective. Space wise, regardless + # of the scenario, the user will have to account for it in + # advance. + # + # Lets get to it and identify the different scenarios in the + # implementation + image = self.image_repo.get(image_id) + image.status = 'saving' + self.image_repo.save(image) + + # NOTE(flaper87): Let's dance... and fall + # + # Unfortunatelly, because of the way our domain layers work and + # the checks done in the FS store, we can't simply rename the file + # and set the location. To do that, we'd have to duplicate the logic + # of every and each of the domain factories (quota, location, etc) + # and we'd also need to hack the FS store to prevent it from raising + # a "duplication path" error. I'd rather have this task copying the + # image bits one more time than duplicating all that logic. + # + # Since I don't think this should be the definitive solution, I'm + # leaving the code below as a reference for what should happen here + # once the FS store and domain code will be able to handle this case. + # + # if file_path is None: + # image_import.set_image_data(image, self.uri, None) + # return + + # NOTE(flaper87): Don't assume the image was stored in the + # work_dir. Think in the case this path was provided by another task. + # Also, lets try to neither assume things nor create "logic" + # dependencies between this task and `_ImportToFS` + # + # base_path = os.path.dirname(file_path.split("file://")[-1]) + + # NOTE(flaper87): Hopefully just scenarios #3 and #4. I say + # hopefully because nothing prevents the user to use the same + # FS store path as a work dir + # + # image_path = os.path.join(base_path, image_id) + # + # if (base_path == CONF.glance_store.filesystem_store_datadir or + # base_path in CONF.glance_store.filesystem_store_datadirs): + # os.rename(file_path, image_path) + # + # image_import.set_image_data(image, image_path, None) + + image_import.set_image_data(image, file_path or self.uri, None) + + +class _SaveImage(task.Task): + + def __init__(self, task_id, task_type, image_repo): + self.task_id = task_id + self.task_type = task_type + self.image_repo = image_repo + super(_SaveImage, self).__init__( + name='%s-SaveImage-%s' % (task_type, task_id)) + + def execute(self, image_id): + """Transition image status to active + + :param image_id: Glance Image ID + """ + new_image = self.image_repo.get(image_id) + if new_image.status == 'saving': + # NOTE(flaper87): THIS IS WRONG! + # we should be doing atomic updates to avoid + # race conditions. This happens in other places + # too. + new_image.status = 'active' + self.image_repo.save(new_image) + + +class _CompleteTask(task.Task): + + def __init__(self, task_id, task_type, task_repo): + self.task_id = task_id + self.task_type = task_type + self.task_repo = task_repo + super(_CompleteTask, self).__init__( + name='%s-CompleteTask-%s' % (task_type, task_id)) + + def execute(self, image_id): + """Finishing the task flow + + :param image_id: Glance Image ID + """ + task = script_utils.get_task(self.task_repo, self.task_id) + if task is None: + return + try: + task.succeed({'image_id': image_id}) + except Exception as e: + # Note: The message string contains Error in it to indicate + # in the task.message that it's a error message for the user. + + # TODO(nikhil): need to bring back save_and_reraise_exception when + # necessary + err_msg = ("Error: " + six.text_type(type(e)) + ': ' + + common_utils.exception_to_str(e)) + log_msg = err_msg + _LE("Task ID %s") % task.task_id + LOG.exception(log_msg) + + task.fail(err_msg) + finally: + self.task_repo.save(task) + + LOG.info(_LI("%(task_id)s of %(task_type)s completed") % + {'task_id': self.task_id, 'task_type': self.task_type}) + + +def _get_import_flows(**kwargs): + # NOTE(flaper87): Until we have a better infrastructure to enable + # and disable tasks plugins, hard-code the tasks we know exist, + # instead of loading everything from the namespace. This guarantees + # both, the load order of these plugins and the fact that no random + # plugins will be added/loaded until we feel comfortable with this. + # Future patches will keep using NamedExtensionManager but they'll + # rely on a config option to control this process. + extensions = named.NamedExtensionManager('daisy.flows.import', + names=['convert', + 'introspect'], + name_order=True, + invoke_on_load=True, + invoke_kwds=kwargs) + + for ext in extensions.extensions: + yield ext.obj + + +def get_flow(**kwargs): + """Return task flow + + :param task_id: Task ID + :param task_type: Type of the task + :param task_repo: Task repo + :param image_repo: Image repository used + :param image_factory: Glance Image Factory + :param uri: uri for the image file + """ + task_id = kwargs.get('task_id') + task_type = kwargs.get('task_type') + task_repo = kwargs.get('task_repo') + image_repo = kwargs.get('image_repo') + image_factory = kwargs.get('image_factory') + uri = kwargs.get('uri') + + flow = lf.Flow(task_type, retry=retry.AlwaysRevert()).add( + _CreateImage(task_id, task_type, task_repo, image_repo, image_factory)) + + import_to_store = _ImportToStore(task_id, task_type, image_repo, uri) + + try: + # NOTE(flaper87): ImportToLocal and DeleteFromLocal shouldn't be here. + # Ideally, we should have the different import flows doing this for us + # and this function should clean up duplicated tasks. For example, say + # 2 flows need to have a local copy of the image - ImportToLocal - in + # order to be able to complete the task - i.e Introspect-. In that + # case, the introspect.get_flow call should add both, ImportToLocal and + # DeleteFromLocal, to the flow and this function will reduce the + # duplicated calls to those tasks by creating a linear flow that + # ensures those are called before the other tasks. For now, I'm + # keeping them here, though. + limbo = lf.Flow(task_type).add(_ImportToFS(task_id, + task_type, + task_repo, + uri)) + + for subflow in _get_import_flows(**kwargs): + limbo.add(subflow) + + # NOTE(flaper87): We have hard-coded 2 tasks, + # if there aren't more than 2, it means that + # no subtask has been registered. + if len(limbo) > 1: + flow.add(limbo) + + # NOTE(flaper87): Until this implementation gets smarter, + # make sure ImportToStore is called *after* the imported + # flow stages. If not, the image will be set to saving state + # invalidating tasks like Introspection or Convert. + flow.add(import_to_store) + + # NOTE(flaper87): Since this is an "optional" task but required + # when `limbo` is executed, we're adding it in its own subflow + # to isolat it from the rest of the flow. + delete_flow = lf.Flow(task_type).add(_DeleteFromFS(task_id, + task_type)) + flow.add(delete_flow) + else: + flow.add(import_to_store) + except exception.BadTaskConfiguration: + # NOTE(flaper87): If something goes wrong with the load of + # import tasks, make sure we go on. + flow.add(import_to_store) + + flow.add( + _SaveImage(task_id, task_type, image_repo), + _CompleteTask(task_id, task_type, task_repo) + ) + return flow diff --git a/code/daisy/daisy/async/flows/convert.py b/code/daisy/daisy/async/flows/convert.py new file mode 100755 index 00000000..476c7124 --- /dev/null +++ b/code/daisy/daisy/async/flows/convert.py @@ -0,0 +1,94 @@ +# Copyright 2015 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +from oslo_concurrency import processutils as putils +from oslo_config import cfg +from taskflow.patterns import linear_flow as lf +from taskflow import task + +from daisy import i18n + +_ = i18n._ +_LI = i18n._LI +_LE = i18n._LE +_LW = i18n._LW +LOG = logging.getLogger(__name__) + +convert_task_opts = [ + cfg.StrOpt('conversion_format', + default=None, + choices=('qcow2', 'raw'), + help=_("The format to which images will be automatically " + "converted. " "Can be 'qcow2' or 'raw'.")), +] + +CONF = cfg.CONF + +# NOTE(flaper87): Registering under the taskflow_executor section +# for now. It seems a waste to have a whole section dedidcated to a +# single task with a single option. +CONF.register_opts(convert_task_opts, group='taskflow_executor') + + +class _Convert(task.Task): + + conversion_missing_warned = False + + def __init__(self, task_id, task_type, image_repo): + self.task_id = task_id + self.task_type = task_type + self.image_repo = image_repo + super(_Convert, self).__init__( + name='%s-Convert-%s' % (task_type, task_id)) + + def execute(self, image_id, file_path): + + # NOTE(flaper87): A format must be explicitly + # specified. There's no "sane" default for this + # because the dest format may work differently depending + # on the environment OpenStack is running in. + conversion_format = CONF.taskflow_executor.conversion_format + if conversion_format is None: + if not _Convert.conversion_missing_warned: + msg = (_LW('The conversion format is None, please add a value ' + 'for it in the config file for this task to ' + 'work: %s') % + self.task_id) + LOG.warn(msg) + _Convert.conversion_missing_warned = True + return + + # TODO(flaper87): Check whether the image is in the desired + # format already. Probably using `qemu-img` just like the + # `Introspection` task. + dest_path = "%s.converted" + stdout, stderr = putils.trycmd('qemu-img', 'convert', '-O', + conversion_format, file_path, dest_path, + log_errors=putils.LOG_ALL_ERRORS) + + if stderr: + raise RuntimeError(stderr) + + +def get_flow(**kwargs): + task_id = kwargs.get('task_id') + task_type = kwargs.get('task_type') + image_repo = kwargs.get('image_repo') + + return lf.Flow(task_type).add( + _Convert(task_id, task_type, image_repo), + ) diff --git a/code/daisy/daisy/async/flows/introspect.py b/code/daisy/daisy/async/flows/introspect.py new file mode 100755 index 00000000..8157f7f5 --- /dev/null +++ b/code/daisy/daisy/async/flows/introspect.py @@ -0,0 +1,89 @@ +# Copyright 2015 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import logging + +from oslo_concurrency import processutils as putils +from oslo_utils import excutils +from taskflow.patterns import linear_flow as lf + +from daisy.async import utils +from daisy import i18n + + +_LE = i18n._LE +_LI = i18n._LI +LOG = logging.getLogger(__name__) + + +class _Introspect(utils.OptionalTask): + """Taskflow to pull the embedded metadata out of image file""" + + def __init__(self, task_id, task_type, image_repo): + self.task_id = task_id + self.task_type = task_type + self.image_repo = image_repo + super(_Introspect, self).__init__( + name='%s-Introspect-%s' % (task_type, task_id)) + + def execute(self, image_id, file_path): + """Does the actual introspection + + :param image_id: Glance image ID + :param file_path: Path to the file being introspected + """ + + try: + stdout, stderr = putils.trycmd('qemu-img', 'info', + '--output=json', file_path, + log_errors=putils.LOG_ALL_ERRORS) + except OSError as exc: + # NOTE(flaper87): errno == 2 means the executable file + # was not found. For now, log an error and move forward + # until we have a better way to enable/disable optional + # tasks. + if exc.errno != 2: + with excutils.save_and_reraise_exception(): + msg = (_LE('Failed to execute introspection ' + '%(task_id)s: %(exc)s') % + {'task_id': self.task_id, 'exc': exc.message}) + LOG.error(msg) + return + + if stderr: + raise RuntimeError(stderr) + + metadata = json.loads(stdout) + new_image = self.image_repo.get(image_id) + new_image.virtual_size = metadata.get('virtual-size', 0) + new_image.disk_format = metadata.get('format') + self.image_repo.save(new_image) + LOG.debug("%(task_id)s: Introspection successful: %(file)s" % + {'task_id': self.task_id, 'file': file_path}) + return new_image + + +def get_flow(**kwargs): + task_id = kwargs.get('task_id') + task_type = kwargs.get('task_type') + image_repo = kwargs.get('image_repo') + + LOG.debug("Flow: %(task_type)s with ID %(id)s on %(repo)s" % + {'task_type': task_type, 'id': task_id, 'repo': image_repo}) + + return lf.Flow(task_type).add( + _Introspect(task_id, task_type, image_repo), + ) diff --git a/code/daisy/daisy/async/taskflow_executor.py b/code/daisy/daisy/async/taskflow_executor.py new file mode 100755 index 00000000..2edadc09 --- /dev/null +++ b/code/daisy/daisy/async/taskflow_executor.py @@ -0,0 +1,131 @@ +# Copyright 2015 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from stevedore import driver +from taskflow import engines +from taskflow.listeners import logging as llistener +from taskflow.types import futures +from taskflow.utils import eventlet_utils + +import daisy.async +from daisy.common.scripts import utils as script_utils +from daisy import i18n + +_ = i18n._ +_LE = i18n._LE +LOG = logging.getLogger(__name__) + +_deprecated_opt = cfg.DeprecatedOpt('eventlet_executor_pool_size', + group='task') + +taskflow_executor_opts = [ + cfg.StrOpt('engine_mode', + default='parallel', + choices=('serial', 'parallel'), + help=_("The mode in which the engine will run. " + "Can be 'serial' or 'parallel'.")), + cfg.IntOpt('max_workers', + default=10, + help=_("The number of parallel activities executed at the " + "same time by the engine. The value can be greater " + "than one when the engine mode is 'parallel'."), + deprecated_opts=[_deprecated_opt]) +] + + +CONF = cfg.CONF +CONF.register_opts(taskflow_executor_opts, group='taskflow_executor') + + +class TaskExecutor(daisy.async.TaskExecutor): + + def __init__(self, context, task_repo, image_repo, image_factory): + self.context = context + self.task_repo = task_repo + self.image_repo = image_repo + self.image_factory = image_factory + self.engine_conf = { + 'engine': CONF.taskflow_executor.engine_mode, + } + self.engine_kwargs = {} + if CONF.taskflow_executor.engine_mode == 'parallel': + self.engine_kwargs['max_workers'] = ( + CONF.taskflow_executor.max_workers) + super(TaskExecutor, self).__init__(context, task_repo, image_repo, + image_factory) + + @contextlib.contextmanager + def _executor(self): + if CONF.taskflow_executor.engine_mode != 'parallel': + yield None + else: + max_workers = CONF.taskflow_executor.max_workers + if eventlet_utils.EVENTLET_AVAILABLE: + yield futures.GreenThreadPoolExecutor(max_workers=max_workers) + else: + yield futures.ThreadPoolExecutor(max_workers=max_workers) + + def _get_flow(self, task): + try: + task_input = script_utils.unpack_task_input(task) + uri = script_utils.validate_location_uri( + task_input.get('import_from')) + + kwds = { + 'uri': uri, + 'task_id': task.task_id, + 'task_type': task.type, + 'context': self.context, + 'task_repo': self.task_repo, + 'image_repo': self.image_repo, + 'image_factory': self.image_factory + } + + return driver.DriverManager('daisy.flows', task.type, + invoke_on_load=True, + invoke_kwds=kwds).driver + except RuntimeError: + raise NotImplementedError() + + def _run(self, task_id, task_type): + LOG.debug('Taskflow executor picked up the execution of task ID ' + '%(task_id)s of task type ' + '%(task_type)s' % {'task_id': task_id, + 'task_type': task_type}) + + task = script_utils.get_task(self.task_repo, task_id) + if task is None: + # NOTE: This happens if task is not found in the database. In + # such cases, there is no way to update the task status so, + # it's ignored here. + return + + flow = self._get_flow(task) + + try: + with self._executor() as executor: + engine = engines.load(flow, self.engine_conf, + executor=executor, **self.engine_kwargs) + with llistener.DynamicLoggingListener(engine, log=LOG): + engine.run() + except Exception as exc: + with excutils.save_and_reraise_exception(): + LOG.error(_LE('Failed to execute task %(task_id)s: %(exc)s') % + {'task_id': task_id, 'exc': exc.message}) diff --git a/code/daisy/daisy/async/utils.py b/code/daisy/daisy/async/utils.py new file mode 100755 index 00000000..d973f7d1 --- /dev/null +++ b/code/daisy/daisy/async/utils.py @@ -0,0 +1,66 @@ +# Copyright 2015 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging +from taskflow import task + +from daisy import i18n + + +LOG = logging.getLogger(__name__) +_LW = i18n._LW + + +class OptionalTask(task.Task): + + def __init__(self, *args, **kwargs): + super(OptionalTask, self).__init__(*args, **kwargs) + self.execute = self._catch_all(self.execute) + + def _catch_all(self, func): + # NOTE(flaper87): Read this comment before calling the MI6 + # Here's the thing, there's no nice way to define "optional" + # tasks. That is, tasks whose failure shouldn't affect the execution + # of the flow. The only current "sane" way to do this, is by catching + # everything and logging. This seems harmless from a taskflow + # perspective but it is not. There are some issues related to this + # "workaround": + # + # - Task's states will shamelessly lie to us saying the task succeeded. + # + # - No revert procedure will be triggered, which means optional tasks, + # for now, mustn't cause any side-effects because they won't be able to + # clean them up. If these tasks depend on other task that do cause side + # effects, a task that cleans those side effects most be registered as + # well. For example, _ImportToFS, _MyDumbTask, _DeleteFromFS. + # + # - Ideally, optional tasks shouldn't `provide` new values unless they + # are part of an optional flow. Due to the decoration of the execute + # method, these tasks will need to define the provided methods at + # class level using `default_provides`. + # + # + # The taskflow team is working on improving this and on something that + # will provide the ability of defining optional tasks. For now, to lie + # ourselves we must. + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as exc: + msg = (_LW("An optional task has failed, " + "the failure was: %s") % + exc.message) + LOG.warn(msg) + return wrapper diff --git a/code/daisy/daisy/cmd/__init__.py b/code/daisy/daisy/cmd/__init__.py new file mode 100755 index 00000000..fe2ea735 --- /dev/null +++ b/code/daisy/daisy/cmd/__init__.py @@ -0,0 +1,53 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import sys + +import oslo_utils.strutils as strutils + +from daisy import i18n + +try: + import dns # NOQA +except ImportError: + dnspython_installed = False +else: + dnspython_installed = True + + +def fix_greendns_ipv6(): + if dnspython_installed: + # All of this is because if dnspython is present in your environment + # then eventlet monkeypatches socket.getaddrinfo() with an + # implementation which doesn't work for IPv6. What we're checking here + # is that the magic environment variable was set when the import + # happened. + nogreendns = 'EVENTLET_NO_GREENDNS' + flag = os.environ.get(nogreendns, '') + if 'eventlet' in sys.modules and not strutils.bool_from_string(flag): + msg = i18n._("It appears that the eventlet module has been " + "imported prior to setting %s='yes'. It is currently " + "necessary to disable eventlet.greendns " + "if using ipv6 since eventlet.greendns currently " + "breaks with ipv6 addresses. Please ensure that " + "eventlet is not imported prior to this being set.") + raise ImportError(msg % (nogreendns)) + + os.environ[nogreendns] = 'yes' + + +i18n.enable_lazy() +fix_greendns_ipv6() diff --git a/code/daisy/daisy/cmd/agent_notification.py b/code/daisy/daisy/cmd/agent_notification.py new file mode 100755 index 00000000..5575a662 --- /dev/null +++ b/code/daisy/daisy/cmd/agent_notification.py @@ -0,0 +1,30 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from daisy import listener +from daisy.openstack.common import service as os_service +from daisy import service + + +def main(): + service.prepare_service() + launcher = os_service.ProcessLauncher() + launcher.launch_service( + listener.ListenerService(), + workers=service.get_workers('listener')) + launcher.wait() + +if __name__ == "__main__": + main() diff --git a/code/daisy/daisy/cmd/api.py b/code/daisy/daisy/cmd/api.py new file mode 100755 index 00000000..fd6a8746 --- /dev/null +++ b/code/daisy/daisy/cmd/api.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Glance API Server +""" + +import os +import sys + +import eventlet + +from daisy.common import utils + +# Monkey patch socket, time, select, threads +eventlet.patcher.monkey_patch(all=False, socket=True, time=True, + select=True, thread=True, os=True) + +# If ../glance/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): + sys.path.insert(0, possible_topdir) + +import glance_store +from oslo_config import cfg +from oslo_log import log as logging +import osprofiler.notifier +import osprofiler.web + +from daisy.common import config +from daisy.common import exception +from daisy.common import wsgi +from daisy import notifier +from daisy.openstack.common import systemd + +CONF = cfg.CONF +CONF.import_group("profiler", "daisy.common.wsgi") +logging.register_options(CONF) + +KNOWN_EXCEPTIONS = (RuntimeError, + exception.WorkerCreationFailure, + glance_store.exceptions.BadStoreConfiguration) + + +def fail(e): + global KNOWN_EXCEPTIONS + return_code = KNOWN_EXCEPTIONS.index(type(e)) + 1 + sys.stderr.write("ERROR: %s\n" % utils.exception_to_str(e)) + sys.exit(return_code) + + +def main(): + try: + config.parse_args() + wsgi.set_eventlet_hub() + logging.setup(CONF, 'daisy') + + if cfg.CONF.profiler.enabled: + _notifier = osprofiler.notifier.create("Messaging", + notifier.messaging, {}, + notifier.get_transport(), + "daisy", "api", + cfg.CONF.bind_host) + osprofiler.notifier.set(_notifier) + else: + osprofiler.web.disable() + + server = wsgi.Server(initialize_glance_store=True) + server.start(config.load_paste_app('daisy-api'), default_port=9292) + systemd.notify_once() + server.wait() + except KNOWN_EXCEPTIONS as e: + fail(e) + + +if __name__ == '__main__': + main() diff --git a/code/daisy/daisy/cmd/cache_cleaner.py b/code/daisy/daisy/cmd/cache_cleaner.py new file mode 100755 index 00000000..dce0d99e --- /dev/null +++ b/code/daisy/daisy/cmd/cache_cleaner.py @@ -0,0 +1,60 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Glance Image Cache Invalid Cache Entry and Stalled Image cleaner + +This is meant to be run as a periodic task from cron. + +If something goes wrong while we're caching an image (for example the fetch +times out, or an exception is raised), we create an 'invalid' entry. These +entires are left around for debugging purposes. However, after some period of +time, we want to clean these up. + +Also, if an incomplete image hangs around past the image_cache_stall_time +period, we automatically sweep it up. +""" + +import os +import sys + +from oslo_log import log as logging + +# If ../glance/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from daisy.common import config +from daisy.image_cache import cleaner + +CONF = config.CONF +logging.register_options(CONF) + + +def main(): + try: + config.parse_cache_args() + logging.setup(CONF, 'glance') + + app = cleaner.Cleaner() + app.run() + except RuntimeError as e: + sys.exit("ERROR: %s" % e) diff --git a/code/daisy/daisy/cmd/cache_manage.py b/code/daisy/daisy/cmd/cache_manage.py new file mode 100755 index 00000000..8f3eb7ed --- /dev/null +++ b/code/daisy/daisy/cmd/cache_manage.py @@ -0,0 +1,518 @@ +#!/usr/bin/env python + +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A simple cache management utility for daisy. +""" +from __future__ import print_function + +import functools +import optparse +import os +import sys +import time + +from oslo_utils import timeutils + +from daisy.common import utils + +# If ../glance/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from daisy.common import exception +import daisy.image_cache.client +from daisy.version import version_info as version + + +SUCCESS = 0 +FAILURE = 1 + + +def catch_error(action): + """Decorator to provide sensible default error handling for actions.""" + def wrap(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + ret = func(*args, **kwargs) + return SUCCESS if ret is None else ret + except exception.NotFound: + options = args[0] + print("Cache management middleware not enabled on host %s" % + options.host) + return FAILURE + except exception.Forbidden: + print("Not authorized to make this request.") + return FAILURE + except Exception as e: + options = args[0] + if options.debug: + raise + print("Failed to %s. Got error:" % action) + pieces = utils.exception_to_str(e).split('\n') + for piece in pieces: + print(piece) + return FAILURE + + return wrapper + return wrap + + +@catch_error('show cached images') +def list_cached(options, args): + """%(prog)s list-cached [options] + +List all images currently cached. + """ + client = get_client(options) + images = client.get_cached_images() + if not images: + print("No cached images.") + return SUCCESS + + print("Found %d cached images..." % len(images)) + + pretty_table = utils.PrettyTable() + pretty_table.add_column(36, label="ID") + pretty_table.add_column(19, label="Last Accessed (UTC)") + pretty_table.add_column(19, label="Last Modified (UTC)") + # 1 TB takes 13 characters to display: len(str(2**40)) == 13 + pretty_table.add_column(14, label="Size", just="r") + pretty_table.add_column(10, label="Hits", just="r") + + print(pretty_table.make_header()) + + for image in images: + last_modified = image['last_modified'] + last_modified = timeutils.iso8601_from_timestamp(last_modified) + + last_accessed = image['last_accessed'] + if last_accessed == 0: + last_accessed = "N/A" + else: + last_accessed = timeutils.iso8601_from_timestamp(last_accessed) + + print(pretty_table.make_row( + image['image_id'], + last_accessed, + last_modified, + image['size'], + image['hits'])) + + +@catch_error('show queued images') +def list_queued(options, args): + """%(prog)s list-queued [options] + +List all images currently queued for caching. + """ + client = get_client(options) + images = client.get_queued_images() + if not images: + print("No queued images.") + return SUCCESS + + print("Found %d queued images..." % len(images)) + + pretty_table = utils.PrettyTable() + pretty_table.add_column(36, label="ID") + + print(pretty_table.make_header()) + + for image in images: + print(pretty_table.make_row(image)) + + +@catch_error('queue the specified image for caching') +def queue_image(options, args): + """%(prog)s queue-image [options] + +Queues an image for caching +""" + if len(args) == 1: + image_id = args.pop() + else: + print("Please specify one and only ID of the image you wish to ") + print("queue from the cache as the first argument") + return FAILURE + + if (not options.force and + not user_confirm("Queue image %(image_id)s for caching?" % + {'image_id': image_id}, default=False)): + return SUCCESS + + client = get_client(options) + client.queue_image_for_caching(image_id) + + if options.verbose: + print("Queued image %(image_id)s for caching" % + {'image_id': image_id}) + + return SUCCESS + + +@catch_error('delete the specified cached image') +def delete_cached_image(options, args): + """ +%(prog)s delete-cached-image [options] + +Deletes an image from the cache + """ + if len(args) == 1: + image_id = args.pop() + else: + print("Please specify one and only ID of the image you wish to ") + print("delete from the cache as the first argument") + return FAILURE + + if (not options.force and + not user_confirm("Delete cached image %(image_id)s?" % + {'image_id': image_id}, default=False)): + return SUCCESS + + client = get_client(options) + client.delete_cached_image(image_id) + + if options.verbose: + print("Deleted cached image %(image_id)s" % {'image_id': image_id}) + + return SUCCESS + + +@catch_error('Delete all cached images') +def delete_all_cached_images(options, args): + """%(prog)s delete-all-cached-images [options] + +Remove all images from the cache. + """ + if (not options.force and + not user_confirm("Delete all cached images?", default=False)): + return SUCCESS + + client = get_client(options) + num_deleted = client.delete_all_cached_images() + + if options.verbose: + print("Deleted %(num_deleted)s cached images" % + {'num_deleted': num_deleted}) + + return SUCCESS + + +@catch_error('delete the specified queued image') +def delete_queued_image(options, args): + """ +%(prog)s delete-queued-image [options] + +Deletes an image from the cache + """ + if len(args) == 1: + image_id = args.pop() + else: + print("Please specify one and only ID of the image you wish to ") + print("delete from the cache as the first argument") + return FAILURE + + if (not options.force and + not user_confirm("Delete queued image %(image_id)s?" % + {'image_id': image_id}, default=False)): + return SUCCESS + + client = get_client(options) + client.delete_queued_image(image_id) + + if options.verbose: + print("Deleted queued image %(image_id)s" % {'image_id': image_id}) + + return SUCCESS + + +@catch_error('Delete all queued images') +def delete_all_queued_images(options, args): + """%(prog)s delete-all-queued-images [options] + +Remove all images from the cache queue. + """ + if (not options.force and + not user_confirm("Delete all queued images?", default=False)): + return SUCCESS + + client = get_client(options) + num_deleted = client.delete_all_queued_images() + + if options.verbose: + print("Deleted %(num_deleted)s queued images" % + {'num_deleted': num_deleted}) + + return SUCCESS + + +def get_client(options): + """Return a new client object to a Glance server. + + specified by the --host and --port options + supplied to the CLI + """ + return daisy.image_cache.client.get_client( + host=options.host, + port=options.port, + username=options.os_username, + password=options.os_password, + tenant=options.os_tenant_name, + auth_url=options.os_auth_url, + auth_strategy=options.os_auth_strategy, + auth_token=options.os_auth_token, + region=options.os_region_name, + insecure=options.insecure) + + +def env(*vars, **kwargs): + """Search for the first defined of possibly many env vars. + + Returns the first environment variable defined in vars, or + returns the default defined in kwargs. + """ + for v in vars: + value = os.environ.get(v, None) + if value: + return value + return kwargs.get('default', '') + + +def create_options(parser): + """Set up the CLI and config-file options that may be + parsed and program commands. + + :param parser: The option parser + """ + parser.add_option('-v', '--verbose', default=False, action="store_true", + help="Print more verbose output.") + parser.add_option('-d', '--debug', default=False, action="store_true", + help="Print debugging output.") + parser.add_option('-H', '--host', metavar="ADDRESS", default="0.0.0.0", + help="Address of Glance API host. " + "Default: %default.") + parser.add_option('-p', '--port', dest="port", metavar="PORT", + type=int, default=9292, + help="Port the Glance API host listens on. " + "Default: %default.") + parser.add_option('-k', '--insecure', dest="insecure", + default=False, action="store_true", + help="Explicitly allow glance to perform \"insecure\" " + "SSL (https) requests. The server's certificate will " + "not be verified against any certificate authorities. " + "This option should be used with caution.") + parser.add_option('-f', '--force', dest="force", metavar="FORCE", + default=False, action="store_true", + help="Prevent select actions from requesting " + "user confirmation.") + + parser.add_option('--os-auth-token', + dest='os_auth_token', + default=env('OS_AUTH_TOKEN'), + help='Defaults to env[OS_AUTH_TOKEN].') + parser.add_option('-A', '--os_auth_token', '--auth_token', + dest='os_auth_token', + help=optparse.SUPPRESS_HELP) + + parser.add_option('--os-username', + dest='os_username', + default=env('OS_USERNAME'), + help='Defaults to env[OS_USERNAME].') + parser.add_option('-I', '--os_username', + dest='os_username', + help=optparse.SUPPRESS_HELP) + + parser.add_option('--os-password', + dest='os_password', + default=env('OS_PASSWORD'), + help='Defaults to env[OS_PASSWORD].') + parser.add_option('-K', '--os_password', + dest='os_password', + help=optparse.SUPPRESS_HELP) + + parser.add_option('--os-region-name', + dest='os_region_name', + default=env('OS_REGION_NAME'), + help='Defaults to env[OS_REGION_NAME].') + parser.add_option('-R', '--os_region_name', + dest='os_region_name', + help=optparse.SUPPRESS_HELP) + + parser.add_option('--os-tenant-id', + dest='os_tenant_id', + default=env('OS_TENANT_ID'), + help='Defaults to env[OS_TENANT_ID].') + parser.add_option('--os_tenant_id', + dest='os_tenant_id', + help=optparse.SUPPRESS_HELP) + + parser.add_option('--os-tenant-name', + dest='os_tenant_name', + default=env('OS_TENANT_NAME'), + help='Defaults to env[OS_TENANT_NAME].') + parser.add_option('-T', '--os_tenant_name', + dest='os_tenant_name', + help=optparse.SUPPRESS_HELP) + + parser.add_option('--os-auth-url', + default=env('OS_AUTH_URL'), + help='Defaults to env[OS_AUTH_URL].') + parser.add_option('-N', '--os_auth_url', + dest='os_auth_url', + help=optparse.SUPPRESS_HELP) + + parser.add_option('-S', '--os_auth_strategy', dest="os_auth_strategy", + metavar="STRATEGY", + help="Authentication strategy (keystone or noauth).") + + +def parse_options(parser, cli_args): + """ + Returns the parsed CLI options, command to run and its arguments, merged + with any same-named options found in a configuration file + + :param parser: The option parser + """ + if not cli_args: + cli_args.append('-h') # Show options in usage output... + + (options, args) = parser.parse_args(cli_args) + + # HACK(sirp): Make the parser available to the print_help method + # print_help is a command, so it only accepts (options, args); we could + # one-off have it take (parser, options, args), however, for now, I think + # this little hack will suffice + options.__parser = parser + + if not args: + parser.print_usage() + sys.exit(0) + + command_name = args.pop(0) + command = lookup_command(parser, command_name) + + return (options, command, args) + + +def print_help(options, args): + """ + Print help specific to a command + """ + if len(args) != 1: + sys.exit("Please specify a command") + + parser = options.__parser + command_name = args.pop() + command = lookup_command(parser, command_name) + + print(command.__doc__ % {'prog': os.path.basename(sys.argv[0])}) + + +def lookup_command(parser, command_name): + BASE_COMMANDS = {'help': print_help} + + CACHE_COMMANDS = { + 'list-cached': list_cached, + 'list-queued': list_queued, + 'queue-image': queue_image, + 'delete-cached-image': delete_cached_image, + 'delete-all-cached-images': delete_all_cached_images, + 'delete-queued-image': delete_queued_image, + 'delete-all-queued-images': delete_all_queued_images, + } + + commands = {} + for command_set in (BASE_COMMANDS, CACHE_COMMANDS): + commands.update(command_set) + + try: + command = commands[command_name] + except KeyError: + parser.print_usage() + sys.exit("Unknown command: %(cmd_name)s" % {'cmd_name': command_name}) + + return command + + +def user_confirm(prompt, default=False): + """Yes/No question dialog with user. + + :param prompt: question/statement to present to user (string) + :param default: boolean value to return if empty string + is received as response to prompt + + """ + if default: + prompt_default = "[Y/n]" + else: + prompt_default = "[y/N]" + + answer = raw_input("%s %s " % (prompt, prompt_default)) + + if answer == "": + return default + else: + return answer.lower() in ("yes", "y") + + +def main(): + usage = """ +%prog [options] [args] + +Commands: + + help Output help for one of the commands below + + list-cached List all images currently cached + + list-queued List all images currently queued for caching + + queue-image Queue an image for caching + + delete-cached-image Purges an image from the cache + + delete-all-cached-images Removes all images from the cache + + delete-queued-image Deletes an image from the cache queue + + delete-all-queued-images Deletes all images from the cache queue +""" + + version_string = version.cached_version_string() + oparser = optparse.OptionParser(version=version_string, + usage=usage.strip()) + create_options(oparser) + (options, command, args) = parse_options(oparser, sys.argv[1:]) + + try: + start_time = time.time() + result = command(options, args) + end_time = time.time() + if options.verbose: + print("Completed in %-0.4f sec." % (end_time - start_time)) + sys.exit(result) + except (RuntimeError, NotImplementedError) as e: + print("ERROR: ", e) + +if __name__ == '__main__': + main() diff --git a/code/daisy/daisy/cmd/cache_prefetcher.py b/code/daisy/daisy/cmd/cache_prefetcher.py new file mode 100755 index 00000000..a8aad985 --- /dev/null +++ b/code/daisy/daisy/cmd/cache_prefetcher.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python + +# Copyright 2011-2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Glance Image Cache Pre-fetcher + +This is meant to be run from the command line after queueing +images to be pretched. +""" + +import os +import sys + +# If ../glance/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): + sys.path.insert(0, possible_topdir) + +import glance_store +from oslo_log import log as logging + +from daisy.common import config +from daisy.image_cache import prefetcher + +CONF = config.CONF +logging.register_options(CONF) + + +def main(): + try: + config.parse_cache_args() + logging.setup(CONF, 'glance') + + glance_store.register_opts(config.CONF) + glance_store.create_stores(config.CONF) + glance_store.verify_default_store() + + app = prefetcher.Prefetcher() + app.run() + except RuntimeError as e: + sys.exit("ERROR: %s" % e) + + +if __name__ == '__main__': + main() diff --git a/code/daisy/daisy/cmd/cache_pruner.py b/code/daisy/daisy/cmd/cache_pruner.py new file mode 100755 index 00000000..b2457323 --- /dev/null +++ b/code/daisy/daisy/cmd/cache_pruner.py @@ -0,0 +1,52 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Glance Image Cache Pruner + +This is meant to be run as a periodic task, perhaps every half-hour. +""" + +import os +import sys + +from oslo_log import log as logging + +# If ../glance/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from daisy.common import config +from daisy.image_cache import pruner + +CONF = config.CONF +logging.register_options(CONF) + + +def main(): + try: + config.parse_cache_args() + logging.setup(CONF, 'glance') + + app = pruner.Pruner() + app.run() + except RuntimeError as e: + sys.exit("ERROR: %s" % e) diff --git a/code/daisy/daisy/cmd/control.py b/code/daisy/daisy/cmd/control.py new file mode 100755 index 00000000..e4c8bed6 --- /dev/null +++ b/code/daisy/daisy/cmd/control.py @@ -0,0 +1,412 @@ +# Copyright (c) 2011 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Helper script for starting/stopping/reloading Glance server programs. +Thanks for some of the code, Swifties ;) +""" + +from __future__ import print_function +from __future__ import with_statement + +import argparse +import fcntl +import os +import resource +import signal +import subprocess +import sys +import tempfile +import time + +# If ../glance/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from oslo_config import cfg +from oslo_utils import units +# NOTE(jokke): simplified transition to py3, behaves like py2 xrange +from six.moves import range + +from daisy.common import config +from daisy import i18n + +_ = i18n._ + +CONF = cfg.CONF + +ALL_COMMANDS = ['start', 'status', 'stop', 'shutdown', 'restart', + 'reload', 'force-reload'] +ALL_SERVERS = ['api', 'registry', 'scrubber'] +RELOAD_SERVERS = ['glance-api', 'glance-registry'] +GRACEFUL_SHUTDOWN_SERVERS = ['glance-api', 'glance-registry', + 'glance-scrubber'] +MAX_DESCRIPTORS = 32768 +MAX_MEMORY = 2 * units.Gi # 2 GB +USAGE = """%(prog)s [options] [CONFPATH] + +Where is one of: + + all, {0} + +And command is one of: + + {1} + +And CONFPATH is the optional configuration file to use.""".format( + ', '.join(ALL_SERVERS), ', '.join(ALL_COMMANDS)) + +exitcode = 0 + + +def gated_by(predicate): + def wrap(f): + def wrapped_f(*args): + if predicate: + return f(*args) + else: + return None + return wrapped_f + return wrap + + +def pid_files(server, pid_file): + pid_files = [] + if pid_file: + if os.path.exists(os.path.abspath(pid_file)): + pid_files = [os.path.abspath(pid_file)] + else: + if os.path.exists('/var/run/glance/%s.pid' % server): + pid_files = ['/var/run/glance/%s.pid' % server] + for pid_file in pid_files: + pid = int(open(pid_file).read().strip()) + yield pid_file, pid + + +def do_start(verb, pid_file, server, args): + if verb != 'Respawn' and pid_file == CONF.pid_file: + for pid_file, pid in pid_files(server, pid_file): + if os.path.exists('/proc/%s' % pid): + print(_("%(serv)s appears to already be running: %(pid)s") % + {'serv': server, 'pid': pid_file}) + return + else: + print(_("Removing stale pid file %s") % pid_file) + os.unlink(pid_file) + + try: + resource.setrlimit(resource.RLIMIT_NOFILE, + (MAX_DESCRIPTORS, MAX_DESCRIPTORS)) + resource.setrlimit(resource.RLIMIT_DATA, + (MAX_MEMORY, MAX_MEMORY)) + except ValueError: + print(_('Unable to increase file descriptor limit. ' + 'Running as non-root?')) + os.environ['PYTHON_EGG_CACHE'] = '/tmp' + + def write_pid_file(pid_file, pid): + with open(pid_file, 'w') as fp: + fp.write('%d\n' % pid) + + def redirect_to_null(fds): + with open(os.devnull, 'r+b') as nullfile: + for desc in fds: # close fds + try: + os.dup2(nullfile.fileno(), desc) + except OSError: + pass + + def redirect_to_syslog(fds, server): + log_cmd = 'logger' + log_cmd_params = '-t "%s[%d]"' % (server, os.getpid()) + process = subprocess.Popen([log_cmd, log_cmd_params], + stdin=subprocess.PIPE) + for desc in fds: # pipe to logger command + try: + os.dup2(process.stdin.fileno(), desc) + except OSError: + pass + + def redirect_stdio(server, capture_output): + input = [sys.stdin.fileno()] + output = [sys.stdout.fileno(), sys.stderr.fileno()] + + redirect_to_null(input) + if capture_output: + redirect_to_syslog(output, server) + else: + redirect_to_null(output) + + @gated_by(CONF.capture_output) + def close_stdio_on_exec(): + fds = [sys.stdin.fileno(), sys.stdout.fileno(), sys.stderr.fileno()] + for desc in fds: # set close on exec flag + fcntl.fcntl(desc, fcntl.F_SETFD, fcntl.FD_CLOEXEC) + + def launch(pid_file, conf_file=None, capture_output=False, await_time=0): + args = [server] + if conf_file: + args += ['--config-file', conf_file] + msg = (_('%(verb)sing %(serv)s with %(conf)s') % + {'verb': verb, 'serv': server, 'conf': conf_file}) + else: + msg = (_('%(verb)sing %(serv)s') % {'verb': verb, 'serv': server}) + print(msg) + + close_stdio_on_exec() + + pid = os.fork() + if pid == 0: + os.setsid() + redirect_stdio(server, capture_output) + try: + os.execlp('%s' % server, *args) + except OSError as e: + msg = (_('unable to launch %(serv)s. Got error: %(e)s') % + {'serv': server, 'e': e}) + sys.exit(msg) + sys.exit(0) + else: + write_pid_file(pid_file, pid) + await_child(pid, await_time) + return pid + + @gated_by(CONF.await_child) + def await_child(pid, await_time): + bail_time = time.time() + await_time + while time.time() < bail_time: + reported_pid, status = os.waitpid(pid, os.WNOHANG) + if reported_pid == pid: + global exitcode + exitcode = os.WEXITSTATUS(status) + break + time.sleep(0.05) + + conf_file = None + if args and os.path.exists(args[0]): + conf_file = os.path.abspath(os.path.expanduser(args[0])) + + return launch(pid_file, conf_file, CONF.capture_output, CONF.await_child) + + +def do_check_status(pid_file, server): + if os.path.exists(pid_file): + with open(pid_file, 'r') as pidfile: + pid = pidfile.read().strip() + print(_("%(serv)s (pid %(pid)s) is running...") % + {'serv': server, 'pid': pid}) + else: + print(_("%s is stopped") % server) + + +def get_pid_file(server, pid_file): + pid_file = (os.path.abspath(pid_file) if pid_file else + '/var/run/glance/%s.pid' % server) + dir, file = os.path.split(pid_file) + + if not os.path.exists(dir): + try: + os.makedirs(dir) + except OSError: + pass + + if not os.access(dir, os.W_OK): + fallback = os.path.join(tempfile.mkdtemp(), '%s.pid' % server) + msg = (_('Unable to create pid file %(pid)s. Running as non-root?\n' + 'Falling back to a temp file, you can stop %(service)s ' + 'service using:\n' + ' %(file)s %(server)s stop --pid-file %(fb)s') % + {'pid': pid_file, + 'service': server, + 'file': __file__, + 'server': server, + 'fb': fallback}) + print(msg) + pid_file = fallback + + return pid_file + + +def do_reload(pid_file, server): + if server not in RELOAD_SERVERS: + msg = (_('Reload of %(serv)s not supported') % {'serv': server}) + sys.exit(msg) + + pid = None + if os.path.exists(pid_file): + with open(pid_file, 'r') as pidfile: + pid = int(pidfile.read().strip()) + else: + msg = (_('Server %(serv)s is stopped') % {'serv': server}) + sys.exit(msg) + + sig = signal.SIGHUP + try: + print(_('Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)') + % {'serv': server, 'pid': pid, 'sig': sig}) + os.kill(pid, sig) + except OSError: + print(_("Process %d not running") % pid) + + +def do_stop(server, args, graceful=False): + if graceful and server in GRACEFUL_SHUTDOWN_SERVERS: + sig = signal.SIGHUP + else: + sig = signal.SIGTERM + + did_anything = False + pfiles = pid_files(server, CONF.pid_file) + for pid_file, pid in pfiles: + did_anything = True + try: + os.unlink(pid_file) + except OSError: + pass + try: + print(_('Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)') + % {'serv': server, 'pid': pid, 'sig': sig}) + os.kill(pid, sig) + except OSError: + print(_("Process %d not running") % pid) + for pid_file, pid in pfiles: + for _junk in range(150): # 15 seconds + if not os.path.exists('/proc/%s' % pid): + break + time.sleep(0.1) + else: + print(_('Waited 15 seconds for pid %(pid)s (%(file)s) to die;' + ' giving up') % {'pid': pid, 'file': pid_file}) + if not did_anything: + print(_('%s is already stopped') % server) + + +def add_command_parsers(subparsers): + cmd_parser = argparse.ArgumentParser(add_help=False) + cmd_subparsers = cmd_parser.add_subparsers(dest='command') + for cmd in ALL_COMMANDS: + parser = cmd_subparsers.add_parser(cmd) + parser.add_argument('args', nargs=argparse.REMAINDER) + + for server in ALL_SERVERS: + full_name = 'glance-' + server + + parser = subparsers.add_parser(server, parents=[cmd_parser]) + parser.set_defaults(servers=[full_name]) + + parser = subparsers.add_parser(full_name, parents=[cmd_parser]) + parser.set_defaults(servers=[full_name]) + + parser = subparsers.add_parser('all', parents=[cmd_parser]) + parser.set_defaults(servers=['glance-' + s for s in ALL_SERVERS]) + + +def main(): + global exitcode + + opts = [ + cfg.SubCommandOpt('server', + title='Server types', + help='Available server types', + handler=add_command_parsers), + cfg.StrOpt('pid-file', + metavar='PATH', + help='File to use as pid file. Default: ' + '/var/run/glance/$server.pid.'), + cfg.IntOpt('await-child', + metavar='DELAY', + default=0, + help='Period to wait for service death ' + 'in order to report exit code ' + '(default is to not wait at all).'), + cfg.BoolOpt('capture-output', + default=False, + help='Capture stdout/err in syslog ' + 'instead of discarding it.'), + cfg.BoolOpt('respawn', + default=False, + help='Restart service on unexpected death.'), + ] + CONF.register_cli_opts(opts) + + config.parse_args(usage=USAGE) + + @gated_by(CONF.await_child) + @gated_by(CONF.respawn) + def mutually_exclusive(): + sys.stderr.write('--await-child and --respawn are mutually exclusive') + sys.exit(1) + + mutually_exclusive() + + @gated_by(CONF.respawn) + def anticipate_respawn(children): + while children: + pid, status = os.wait() + if pid in children: + (pid_file, server, args) = children.pop(pid) + running = os.path.exists(pid_file) + one_second_ago = time.time() - 1 + bouncing = (running and + os.path.getmtime(pid_file) >= one_second_ago) + if running and not bouncing: + args = (pid_file, server, args) + new_pid = do_start('Respawn', *args) + children[new_pid] = args + else: + rsn = 'bouncing' if bouncing else 'deliberately stopped' + print(_('Suppressed respawn as %(serv)s was %(rsn)s.') + % {'serv': server, 'rsn': rsn}) + + if CONF.server.command == 'start': + children = {} + for server in CONF.server.servers: + pid_file = get_pid_file(server, CONF.pid_file) + args = (pid_file, server, CONF.server.args) + pid = do_start('Start', *args) + children[pid] = args + + anticipate_respawn(children) + + if CONF.server.command == 'status': + for server in CONF.server.servers: + pid_file = get_pid_file(server, CONF.pid_file) + do_check_status(pid_file, server) + + if CONF.server.command == 'stop': + for server in CONF.server.servers: + do_stop(server, CONF.server.args) + + if CONF.server.command == 'shutdown': + for server in CONF.server.servers: + do_stop(server, CONF.server.args, graceful=True) + + if CONF.server.command == 'restart': + for server in CONF.server.servers: + do_stop(server, CONF.server.args) + for server in CONF.server.servers: + pid_file = get_pid_file(server, CONF.pid_file) + do_start('Restart', pid_file, server, CONF.server.args) + + if CONF.server.command in ('reload', 'force-reload'): + for server in CONF.server.servers: + pid_file = get_pid_file(server, CONF.pid_file) + do_reload(pid_file, server) + + sys.exit(exitcode) diff --git a/code/daisy/daisy/cmd/index.py b/code/daisy/daisy/cmd/index.py new file mode 100755 index 00000000..02dee658 --- /dev/null +++ b/code/daisy/daisy/cmd/index.py @@ -0,0 +1,52 @@ +# Copyright 2015 Intel Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from oslo_config import cfg +from oslo_log import log as logging +import stevedore + +from daisy.common import config +from daisy import i18n + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) +_LE = i18n._LE + + +def main(): + try: + logging.register_options(CONF) + cfg_files = cfg.find_config_files(project='glance', + prog='glance-api') + cfg_files.extend(cfg.find_config_files(project='glance', + prog='glance-search')) + config.parse_args(default_config_files=cfg_files) + logging.setup(CONF, 'glance') + + namespace = 'daisy.search.index_backend' + ext_manager = stevedore.extension.ExtensionManager( + namespace, invoke_on_load=True) + for ext in ext_manager.extensions: + try: + ext.obj.setup() + except Exception as e: + LOG.error(_LE("Failed to setup index extension " + "%(ext)s: %(e)s") % {'ext': ext.name, + 'e': e}) + except RuntimeError as e: + sys.exit("ERROR: %s" % e) diff --git a/code/daisy/daisy/cmd/manage.py b/code/daisy/daisy/cmd/manage.py new file mode 100755 index 00000000..f630fb9d --- /dev/null +++ b/code/daisy/daisy/cmd/manage.py @@ -0,0 +1,324 @@ +#!/usr/bin/env python + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Glance Management Utility +""" + +from __future__ import print_function + +# FIXME(sirp): When we have glance-admin we can consider merging this into it +# Perhaps for consistency with Nova, we would then rename glance-admin -> +# glance-manage (or the other way around) + +import os +import sys + +# If ../glance/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'daisy', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from oslo_config import cfg +from oslo_db.sqlalchemy import migration +from oslo_log import log as logging +from oslo_utils import encodeutils +import six + +from daisy.common import config +from daisy.common import exception +from daisy.common import utils +from daisy.db import migration as db_migration +from daisy.db.sqlalchemy import api as db_api +from daisy.db.sqlalchemy import metadata +from daisy import i18n + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) +_LW = i18n._LW + + +# Decorators for actions +def args(*args, **kwargs): + def _decorator(func): + func.__dict__.setdefault('args', []).insert(0, (args, kwargs)) + return func + return _decorator + + +class DbCommands(object): + """Class for managing the db""" + + def __init__(self): + pass + + def version(self): + """Print database's current migration level""" + print(migration.db_version(db_api.get_engine(), + db_migration.MIGRATE_REPO_PATH, + db_migration.INIT_VERSION)) + + @args('--version', metavar='', help='Database version') + def upgrade(self, version=None): + """Upgrade the database's migration level""" + migration.db_sync(db_api.get_engine(), + db_migration.MIGRATE_REPO_PATH, + version) + + @args('--version', metavar='', help='Database version') + def downgrade(self, version=None): + """Downgrade the database's migration level""" + migration.db_sync(db_api.get_engine(), + db_migration.MIGRATE_REPO_PATH, + version) + + @args('--version', metavar='', help='Database version') + def version_control(self, version=None): + """Place a database under migration control""" + migration.db_version_control(db_api.get_engine(), + db_migration.MIGRATE_REPO_PATH, + version) + + @args('--version', metavar='', help='Database version') + @args('--current_version', metavar='', + help='Current Database version') + def sync(self, version=None, current_version=None): + """ + Place a database under migration control and upgrade/downgrade it, + creating first if necessary. + """ + if current_version not in (None, 'None'): + migration.db_version_control(db_api.get_engine(), + db_migration.MIGRATE_REPO_PATH, + version=current_version) + + migration.db_sync(db_api.get_engine(), + db_migration.MIGRATE_REPO_PATH, + version) + + @args('--path', metavar='', help='Path to the directory or file ' + 'where json metadata is stored') + @args('--merge', action='store_true', + help='Merge files with data that is in the database. By default it ' + 'prefers existing data over new. This logic can be changed by ' + 'combining --merge option with one of these two options: ' + '--prefer_new or --overwrite.') + @args('--prefer_new', action='store_true', + help='Prefer new metadata over existing. Existing metadata ' + 'might be overwritten. Needs to be combined with --merge ' + 'option.') + @args('--overwrite', action='store_true', + help='Drop and rewrite metadata. Needs to be combined with --merge ' + 'option') + def load_metadefs(self, path=None, merge=False, + prefer_new=False, overwrite=False): + """Load metadefinition json files to database""" + metadata.db_load_metadefs(db_api.get_engine(), path, merge, + prefer_new, overwrite) + + def unload_metadefs(self): + """Unload metadefinitions from database""" + metadata.db_unload_metadefs(db_api.get_engine()) + + @args('--path', metavar='', help='Path to the directory where ' + 'json metadata files should be ' + 'saved.') + def export_metadefs(self, path=None): + """Export metadefinitions data from database to files""" + metadata.db_export_metadefs(db_api.get_engine(), + path) + + +class DbLegacyCommands(object): + """Class for managing the db using legacy commands""" + + def __init__(self, command_object): + self.command_object = command_object + + def version(self): + self.command_object.version() + + def upgrade(self, version=None): + self.command_object.upgrade(CONF.command.version) + + def downgrade(self, version=None): + self.command_object.downgrade(CONF.command.version) + + def version_control(self, version=None): + self.command_object.version_control(CONF.command.version) + + def sync(self, version=None, current_version=None): + self.command_object.sync(CONF.command.version, + CONF.command.current_version) + + def load_metadefs(self, path=None, merge=False, + prefer_new=False, overwrite=False): + self.command_object.load_metadefs(CONF.command.path, + CONF.command.merge, + CONF.command.prefer_new, + CONF.command.overwrite) + + def unload_metadefs(self): + self.command_object.unload_metadefs() + + def export_metadefs(self, path=None): + self.command_object.export_metadefs(CONF.command.path) + + +def add_legacy_command_parsers(command_object, subparsers): + + legacy_command_object = DbLegacyCommands(command_object) + + parser = subparsers.add_parser('db_version') + parser.set_defaults(action_fn=legacy_command_object.version) + parser.set_defaults(action='db_version') + + parser = subparsers.add_parser('db_upgrade') + parser.set_defaults(action_fn=legacy_command_object.upgrade) + parser.add_argument('version', nargs='?') + parser.set_defaults(action='db_upgrade') + + parser = subparsers.add_parser('db_downgrade') + parser.set_defaults(action_fn=legacy_command_object.downgrade) + parser.add_argument('version') + parser.set_defaults(action='db_downgrade') + + parser = subparsers.add_parser('db_version_control') + parser.set_defaults(action_fn=legacy_command_object.version_control) + parser.add_argument('version', nargs='?') + parser.set_defaults(action='db_version_control') + + parser = subparsers.add_parser('db_sync') + parser.set_defaults(action_fn=legacy_command_object.sync) + parser.add_argument('version', nargs='?') + parser.add_argument('current_version', nargs='?') + parser.set_defaults(action='db_sync') + + parser = subparsers.add_parser('db_load_metadefs') + parser.set_defaults(action_fn=legacy_command_object.load_metadefs) + parser.add_argument('path', nargs='?') + parser.add_argument('merge', nargs='?') + parser.add_argument('prefer_new', nargs='?') + parser.add_argument('overwrite', nargs='?') + parser.set_defaults(action='db_load_metadefs') + + parser = subparsers.add_parser('db_unload_metadefs') + parser.set_defaults(action_fn=legacy_command_object.unload_metadefs) + parser.set_defaults(action='db_unload_metadefs') + + parser = subparsers.add_parser('db_export_metadefs') + parser.set_defaults(action_fn=legacy_command_object.export_metadefs) + parser.add_argument('path', nargs='?') + parser.set_defaults(action='db_export_metadefs') + + +def add_command_parsers(subparsers): + command_object = DbCommands() + + parser = subparsers.add_parser('db') + parser.set_defaults(command_object=command_object) + + category_subparsers = parser.add_subparsers(dest='action') + + for (action, action_fn) in methods_of(command_object): + parser = category_subparsers.add_parser(action) + + action_kwargs = [] + for args, kwargs in getattr(action_fn, 'args', []): + # FIXME(basha): hack to assume dest is the arg name without + # the leading hyphens if no dest is supplied + kwargs.setdefault('dest', args[0][2:]) + if kwargs['dest'].startswith('action_kwarg_'): + action_kwargs.append( + kwargs['dest'][len('action_kwarg_'):]) + else: + action_kwargs.append(kwargs['dest']) + kwargs['dest'] = 'action_kwarg_' + kwargs['dest'] + + parser.add_argument(*args, **kwargs) + + parser.set_defaults(action_fn=action_fn) + parser.set_defaults(action_kwargs=action_kwargs) + + parser.add_argument('action_args', nargs='*') + + add_legacy_command_parsers(command_object, subparsers) + + +command_opt = cfg.SubCommandOpt('command', + title='Commands', + help='Available commands', + handler=add_command_parsers) + + +def methods_of(obj): + """Get all callable methods of an object that don't start with underscore + + returns a list of tuples of the form (method_name, method) + """ + result = [] + for i in dir(obj): + if callable(getattr(obj, i)) and not i.startswith('_'): + result.append((i, getattr(obj, i))) + return result + + +def main(): + CONF.register_cli_opt(command_opt) + try: + logging.register_options(CONF) + cfg_files = cfg.find_config_files(project='daisy', + prog='daisy-registry') + cfg_files.extend(cfg.find_config_files(project='daisy', + prog='daisy-api')) + cfg_files.extend(cfg.find_config_files(project='daisy', + prog='daisy-manage')) + cfg_files.extend(cfg.find_config_files(project='daisy', + prog='daisy-orchestration')) + config.parse_args(default_config_files=cfg_files, + usage="%(prog)s [options] ") + logging.setup(CONF, 'daisy') + except RuntimeError as e: + sys.exit("ERROR: %s" % e) + + try: + if CONF.command.action.startswith('db'): + return CONF.command.action_fn() + else: + func_kwargs = {} + for k in CONF.command.action_kwargs: + v = getattr(CONF.command, 'action_kwarg_' + k) + if v is None: + continue + if isinstance(v, six.string_types): + v = encodeutils.safe_decode(v) + func_kwargs[k] = v + func_args = [encodeutils.safe_decode(arg) + for arg in CONF.command.action_args] + return CONF.command.action_fn(*func_args, **func_kwargs) + except exception.DaisyException as e: + sys.exit("ERROR: %s" % utils.exception_to_str(e)) + + +if __name__ == '__main__': + main() diff --git a/code/daisy/daisy/cmd/orchestration.py b/code/daisy/daisy/cmd/orchestration.py new file mode 100755 index 00000000..3cdd9a67 --- /dev/null +++ b/code/daisy/daisy/cmd/orchestration.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Reference implementation server for Daisy orchestration +""" + +import os +import sys + +import eventlet + +# Monkey patch socket and time +eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True) + +# If ../glance/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'daisy', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from oslo_config import cfg +from oslo_log import log as logging +import osprofiler.notifier +import osprofiler.web +from daisy.common import exception +from daisy.common import config +from daisy.common import utils +from daisy.common import wsgi +from daisy import notifier +from daisy.openstack.common import systemd +from daisy.openstack.common import loopingcall +from daisy.orchestration.manager import OrchestrationManager + +CONF = cfg.CONF +scale_opts = [ + cfg.StrOpt('auto_scale_interval', default=60, + help='Number of seconds between two checkings to compute auto scale status'), +] +CONF.register_opts(scale_opts, group='orchestration') +logging.register_options(CONF) + + +def fail(returncode, e): + sys.stderr.write("ERROR: %s\n" % six.text_type(e)) + +def main(): + try: + config.parse_args() + logging.setup(CONF,'daisy') + timer = loopingcall.FixedIntervalLoopingCall( + OrchestrationManager.find_auto_scale_cluster) + timer.start(float(CONF.orchestration.auto_scale_interval)).wait() + except exception.WorkerCreationFailure as e: + fail(2, e) + except RuntimeError as e: + fail(1, e) + +if __name__ == '__main__': + main() diff --git a/code/daisy/daisy/cmd/registry.py b/code/daisy/daisy/cmd/registry.py new file mode 100755 index 00000000..4cfec19d --- /dev/null +++ b/code/daisy/daisy/cmd/registry.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Reference implementation server for Daisy Registry +""" + +import os +import sys + +import eventlet + +# Monkey patch socket and time +eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True) + +# If ../glance/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'daisy', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from oslo_config import cfg +from oslo_log import log as logging +import osprofiler.notifier +import osprofiler.web + +from daisy.common import config +from daisy.common import utils +from daisy.common import wsgi +from daisy import notifier +from daisy.openstack.common import systemd + +CONF = cfg.CONF +CONF.import_group("profiler", "daisy.common.wsgi") +logging.register_options(CONF) + + +def main(): + try: + config.parse_args() + wsgi.set_eventlet_hub() + logging.setup(CONF, 'daisy') + + if cfg.CONF.profiler.enabled: + _notifier = osprofiler.notifier.create("Messaging", + notifier.messaging, {}, + notifier.get_transport(), + "daisy", "registry", + cfg.CONF.bind_host) + osprofiler.notifier.set(_notifier) + + else: + osprofiler.web.disable() + + server = wsgi.Server() + server.start(config.load_paste_app('daisy-registry'), + default_port=9191) + systemd.notify_once() + server.wait() + except RuntimeError as e: + sys.exit("ERROR: %s" % utils.exception_to_str(e)) + + +if __name__ == '__main__': + main() diff --git a/code/daisy/daisy/cmd/replicator.py b/code/daisy/daisy/cmd/replicator.py new file mode 100755 index 00000000..19cb2323 --- /dev/null +++ b/code/daisy/daisy/cmd/replicator.py @@ -0,0 +1,725 @@ +#!/usr/bin/env python + +# Copyright 2012 Michael Still and Canonical Inc +# Copyright 2014 SoftLayer Technologies, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +import httplib +import os +import sys + +from oslo.config import cfg +from oslo.serialization import jsonutils +from oslo_log import log as logging +import six.moves.urllib.parse as urlparse +from webob import exc + +from daisy.common import config +from daisy.common import exception +from daisy.common import utils +from daisy import i18n + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LI = i18n._LI +_LE = i18n._LE +_LW = i18n._LW + + +# NOTE: positional arguments will be parsed before until +# this bug is corrected https://bugs.launchpad.net/oslo.config/+bug/1392428 +cli_opts = [ + cfg.IntOpt('chunksize', + short='c', + default=65536, + help="Amount of data to transfer per HTTP write."), + cfg.StrOpt('dontreplicate', + short='D', + default=('created_at date deleted_at location updated_at'), + help="List of fields to not replicate."), + cfg.BoolOpt('metaonly', + short='m', + default=False, + help="Only replicate metadata, not images."), + cfg.StrOpt('token', + short='t', + default='', + help=("Pass in your authentication token if you have " + "one. If you use this option the same token is " + "used for both the master and the slave.")), + cfg.StrOpt('mastertoken', + short='M', + default='', + help=("Pass in your authentication token if you have " + "one. This is the token used for the master.")), + cfg.StrOpt('slavetoken', + short='S', + default='', + help=("Pass in your authentication token if you have " + "one. This is the token used for the slave.")), + cfg.StrOpt('command', + positional=True, + help="Command to be given to replicator"), + cfg.ListOpt('args', + positional=True, + help="Arguments for the command"), +] + +CONF = cfg.CONF +CONF.register_cli_opts(cli_opts) +logging.register_options(CONF) + +# If ../glance/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): + sys.path.insert(0, possible_topdir) + + +COMMANDS = """Commands: + + help Output help for one of the commands below + + compare What is missing from the slave glance? + dump Dump the contents of a glance instance to local disk. + livecopy Load the contents of one glance instance into another. + load Load the contents of a local directory into daisy. + size Determine the size of a glance instance if dumped to disk. +""" + + +IMAGE_ALREADY_PRESENT_MESSAGE = _('The image %s is already present on ' + 'the slave, but our check for it did ' + 'not find it. This indicates that we ' + 'do not have permissions to see all ' + 'the images on the slave server.') + + +class ImageService(object): + def __init__(self, conn, auth_token): + """Initialize the ImageService. + + conn: a httplib.HTTPConnection to the glance server + auth_token: authentication token to pass in the x-auth-token header + """ + self.auth_token = auth_token + self.conn = conn + + def _http_request(self, method, url, headers, body, + ignore_result_body=False): + """Perform an HTTP request against the server. + + method: the HTTP method to use + url: the URL to request (not including server portion) + headers: headers for the request + body: body to send with the request + ignore_result_body: the body of the result will be ignored + + Returns: a httplib response object + """ + if self.auth_token: + headers.setdefault('x-auth-token', self.auth_token) + + LOG.debug('Request: %(method)s http://%(server)s:%(port)s' + '%(url)s with headers %(headers)s' + % {'method': method, + 'server': self.conn.host, + 'port': self.conn.port, + 'url': url, + 'headers': repr(headers)}) + self.conn.request(method, url, body, headers) + + response = self.conn.getresponse() + headers = self._header_list_to_dict(response.getheaders()) + code = response.status + code_description = httplib.responses[code] + LOG.debug('Response: %(code)s %(status)s %(headers)s' + % {'code': code, + 'status': code_description, + 'headers': repr(headers)}) + + if code == 400: + raise exc.HTTPBadRequest( + explanation=response.read()) + + if code == 500: + raise exc.HTTPInternalServerError( + explanation=response.read()) + + if code == 401: + raise exc.HTTPUnauthorized( + explanation=response.read()) + + if code == 403: + raise exc.HTTPForbidden( + explanation=response.read()) + + if code == 409: + raise exc.HTTPConflict( + explanation=response.read()) + + if ignore_result_body: + # NOTE: because we are pipelining requests through a single HTTP + # connection, httplib requires that we read the response body + # before we can make another request. If the caller knows they + # don't care about the body, they can ask us to do that for them. + response.read() + return response + + def get_images(self): + """Return a detailed list of images. + + Yields a series of images as dicts containing metadata. + """ + params = {'is_public': None} + + while True: + url = '/v1/images/detail' + query = urlparse.urlencode(params) + if query: + url += '?%s' % query + + response = self._http_request('GET', url, {}, '') + result = jsonutils.loads(response.read()) + + if not result or 'images' not in result or not result['images']: + return + for image in result.get('images', []): + params['marker'] = image['id'] + yield image + + def get_image(self, image_uuid): + """Fetch image data from daisy. + + image_uuid: the id of an image + + Returns: a httplib Response object where the body is the image. + """ + url = '/v1/images/%s' % image_uuid + return self._http_request('GET', url, {}, '') + + @staticmethod + def _header_list_to_dict(headers): + """Expand a list of headers into a dictionary. + + headers: a list of [(key, value), (key, value), (key, value)] + + Returns: a dictionary representation of the list + """ + d = {} + for (header, value) in headers: + if header.startswith('x-image-meta-property-'): + prop = header.replace('x-image-meta-property-', '') + d.setdefault('properties', {}) + d['properties'][prop] = value + else: + d[header.replace('x-image-meta-', '')] = value + return d + + def get_image_meta(self, image_uuid): + """Return the metadata for a single image. + + image_uuid: the id of an image + + Returns: image metadata as a dictionary + """ + url = '/v1/images/%s' % image_uuid + response = self._http_request('HEAD', url, {}, '', + ignore_result_body=True) + return self._header_list_to_dict(response.getheaders()) + + @staticmethod + def _dict_to_headers(d): + """Convert a dictionary into one suitable for a HTTP request. + + d: a dictionary + + Returns: the same dictionary, with x-image-meta added to every key + """ + h = {} + for key in d: + if key == 'properties': + for subkey in d[key]: + if d[key][subkey] is None: + h['x-image-meta-property-%s' % subkey] = '' + else: + h['x-image-meta-property-%s' % subkey] = d[key][subkey] + + else: + h['x-image-meta-%s' % key] = d[key] + return h + + def add_image(self, image_meta, image_data): + """Upload an image. + + image_meta: image metadata as a dictionary + image_data: image data as a object with a read() method + + Returns: a tuple of (http response headers, http response body) + """ + + url = '/v1/images' + headers = self._dict_to_headers(image_meta) + headers['Content-Type'] = 'application/octet-stream' + headers['Content-Length'] = int(image_meta['size']) + + response = self._http_request('POST', url, headers, image_data) + headers = self._header_list_to_dict(response.getheaders()) + + LOG.debug('Image post done') + body = response.read() + return headers, body + + def add_image_meta(self, image_meta): + """Update image metadata. + + image_meta: image metadata as a dictionary + + Returns: a tuple of (http response headers, http response body) + """ + + url = '/v1/images/%s' % image_meta['id'] + headers = self._dict_to_headers(image_meta) + headers['Content-Type'] = 'application/octet-stream' + + response = self._http_request('PUT', url, headers, '') + headers = self._header_list_to_dict(response.getheaders()) + + LOG.debug('Image post done') + body = response.read() + return headers, body + + +def get_image_service(): + """Get a copy of the image service. + + This is done like this to make it easier to mock out ImageService. + """ + return ImageService + + +def replication_size(options, args): + """%(prog)s size + + Determine the size of a glance instance if dumped to disk. + + server:port: the location of the glance instance. + """ + + # Make sure server info is provided + if len(args) < 1: + raise TypeError(_("Too few arguments.")) + + server, port = utils.parse_valid_host_port(args.pop()) + + total_size = 0 + count = 0 + + imageservice = get_image_service() + client = imageservice(httplib.HTTPConnection(server, port), + options.slavetoken) + for image in client.get_images(): + LOG.debug('Considering image: %(image)s' % {'image': image}) + if image['status'] == 'active': + total_size += int(image['size']) + count += 1 + + print(_('Total size is %(size)d bytes across %(img_count)d images') % + {'size': total_size, + 'img_count': count}) + + +def replication_dump(options, args): + """%(prog)s dump + + Dump the contents of a glance instance to local disk. + + server:port: the location of the glance instance. + path: a directory on disk to contain the data. + """ + + # Make sure server and path are provided + if len(args) < 2: + raise TypeError(_("Too few arguments.")) + + path = args.pop() + server, port = utils.parse_valid_host_port(args.pop()) + + imageservice = get_image_service() + client = imageservice(httplib.HTTPConnection(server, port), + options.mastertoken) + for image in client.get_images(): + LOG.debug('Considering: %s' % image['id']) + + data_path = os.path.join(path, image['id']) + if not os.path.exists(data_path): + LOG.info(_LI('Storing: %s') % image['id']) + + # Dump glance information + with open(data_path, 'w') as f: + f.write(jsonutils.dumps(image)) + + if image['status'] == 'active' and not options.metaonly: + # Now fetch the image. The metadata returned in headers here + # is the same as that which we got from the detailed images + # request earlier, so we can ignore it here. Note that we also + # only dump active images. + LOG.debug('Image %s is active' % image['id']) + image_response = client.get_image(image['id']) + with open(data_path + '.img', 'wb') as f: + while True: + chunk = image_response.read(options.chunksize) + if not chunk: + break + f.write(chunk) + + +def _dict_diff(a, b): + """A one way dictionary diff. + + a: a dictionary + b: a dictionary + + Returns: True if the dictionaries are different + """ + # Only things the master has which the slave lacks matter + if set(a.keys()) - set(b.keys()): + LOG.debug('metadata diff -- master has extra keys: %(keys)s' + % {'keys': ' '.join(set(a.keys()) - set(b.keys()))}) + return True + + for key in a: + if str(a[key]) != str(b[key]): + LOG.debug('metadata diff -- value differs for key ' + '%(key)s: master "%(master_value)s" vs ' + 'slave "%(slave_value)s"' % + {'key': key, + 'master_value': a[key], + 'slave_value': b[key]}) + return True + + return False + + +def replication_load(options, args): + """%(prog)s load + + Load the contents of a local directory into daisy. + + server:port: the location of the glance instance. + path: a directory on disk containing the data. + """ + + # Make sure server and path are provided + if len(args) < 2: + raise TypeError(_("Too few arguments.")) + + path = args.pop() + server, port = utils.parse_valid_host_port(args.pop()) + + imageservice = get_image_service() + client = imageservice(httplib.HTTPConnection(server, port), + options.slavetoken) + + updated = [] + + for ent in os.listdir(path): + if utils.is_uuid_like(ent): + image_uuid = ent + LOG.info(_LI('Considering: %s') % image_uuid) + + meta_file_name = os.path.join(path, image_uuid) + with open(meta_file_name) as meta_file: + meta = jsonutils.loads(meta_file.read()) + + # Remove keys which don't make sense for replication + for key in options.dontreplicate.split(' '): + if key in meta: + LOG.debug('Stripping %(header)s from saved ' + 'metadata', {'header': key}) + del meta[key] + + if _image_present(client, image_uuid): + # NOTE(mikal): Perhaps we just need to update the metadata? + # Note that we don't attempt to change an image file once it + # has been uploaded. + LOG.debug('Image %s already present', image_uuid) + headers = client.get_image_meta(image_uuid) + for key in options.dontreplicate.split(' '): + if key in headers: + LOG.debug('Stripping %(header)s from slave ' + 'metadata', {'header': key}) + del headers[key] + + if _dict_diff(meta, headers): + LOG.info(_LI('Image %s metadata has changed') % + image_uuid) + headers, body = client.add_image_meta(meta) + _check_upload_response_headers(headers, body) + updated.append(meta['id']) + + else: + if not os.path.exists(os.path.join(path, image_uuid + '.img')): + LOG.debug('%s dump is missing image data, skipping' % + image_uuid) + continue + + # Upload the image itself + with open(os.path.join(path, image_uuid + '.img')) as img_file: + try: + headers, body = client.add_image(meta, img_file) + _check_upload_response_headers(headers, body) + updated.append(meta['id']) + except exc.HTTPConflict: + LOG.error(_LE(IMAGE_ALREADY_PRESENT_MESSAGE) + % image_uuid) # noqa + + return updated + + +def replication_livecopy(options, args): + """%(prog)s livecopy + + Load the contents of one glance instance into another. + + fromserver:port: the location of the master glance instance. + toserver:port: the location of the slave glance instance. + """ + + # Make sure from-server and to-server are provided + if len(args) < 2: + raise TypeError(_("Too few arguments.")) + + imageservice = get_image_service() + + slave_server, slave_port = utils.parse_valid_host_port(args.pop()) + slave_conn = httplib.HTTPConnection(slave_server, slave_port) + slave_client = imageservice(slave_conn, options.slavetoken) + + master_server, master_port = utils.parse_valid_host_port(args.pop()) + master_conn = httplib.HTTPConnection(master_server, master_port) + master_client = imageservice(master_conn, options.mastertoken) + + updated = [] + + for image in master_client.get_images(): + LOG.debug('Considering %(id)s' % {'id': image['id']}) + for key in options.dontreplicate.split(' '): + if key in image: + LOG.debug('Stripping %(header)s from master metadata', + {'header': key}) + del image[key] + + if _image_present(slave_client, image['id']): + # NOTE(mikal): Perhaps we just need to update the metadata? + # Note that we don't attempt to change an image file once it + # has been uploaded. + headers = slave_client.get_image_meta(image['id']) + if headers['status'] == 'active': + for key in options.dontreplicate.split(' '): + if key in image: + LOG.debug('Stripping %(header)s from master ' + 'metadata', {'header': key}) + del image[key] + if key in headers: + LOG.debug('Stripping %(header)s from slave ' + 'metadata', {'header': key}) + del headers[key] + + if _dict_diff(image, headers): + LOG.info(_LI('Image %s metadata has changed') % + image['id']) + headers, body = slave_client.add_image_meta(image) + _check_upload_response_headers(headers, body) + updated.append(image['id']) + + elif image['status'] == 'active': + LOG.info(_LI('Image %s is being synced') % image['id']) + if not options.metaonly: + image_response = master_client.get_image(image['id']) + try: + headers, body = slave_client.add_image(image, + image_response) + _check_upload_response_headers(headers, body) + updated.append(image['id']) + except exc.HTTPConflict: + LOG.error(_LE(IMAGE_ALREADY_PRESENT_MESSAGE) % image['id']) # noqa + + return updated + + +def replication_compare(options, args): + """%(prog)s compare + + Compare the contents of fromserver with those of toserver. + + fromserver:port: the location of the master glance instance. + toserver:port: the location of the slave glance instance. + """ + + # Make sure from-server and to-server are provided + if len(args) < 2: + raise TypeError(_("Too few arguments.")) + + imageservice = get_image_service() + + slave_server, slave_port = utils.parse_valid_host_port(args.pop()) + slave_conn = httplib.HTTPConnection(slave_server, slave_port) + slave_client = imageservice(slave_conn, options.slavetoken) + + master_server, master_port = utils.parse_valid_host_port(args.pop()) + master_conn = httplib.HTTPConnection(master_server, master_port) + master_client = imageservice(master_conn, options.mastertoken) + + differences = {} + + for image in master_client.get_images(): + if _image_present(slave_client, image['id']): + headers = slave_client.get_image_meta(image['id']) + for key in options.dontreplicate.split(' '): + if key in image: + LOG.debug('Stripping %(header)s from master metadata', + {'header': key}) + del image[key] + if key in headers: + LOG.debug('Stripping %(header)s from slave metadata', + {'header': key}) + del headers[key] + + for key in image: + if image[key] != headers.get(key, None): + LOG.warn(_LW('%(image_id)s: field %(key)s differs ' + '(source is %(master_value)s, destination ' + 'is %(slave_value)s)') + % {'image_id': image['id'], + 'key': key, + 'master_value': image[key], + 'slave_value': headers.get(key, 'undefined')}) + differences[image['id']] = 'diff' + else: + LOG.debug('%(image_id)s is identical' + % {'image_id': image['id']}) + + elif image['status'] == 'active': + LOG.warn(_LW('Image %s entirely missing from the destination') + % image['id']) + differences[image['id']] = 'missing' + + return differences + + +def _check_upload_response_headers(headers, body): + """Check that the headers of an upload are reasonable. + + headers: the headers from the upload + body: the body from the upload + """ + + if 'status' not in headers: + try: + d = jsonutils.loads(body) + if 'image' in d and 'status' in d['image']: + return + + except Exception: + raise exception.UploadException(body) + + +def _image_present(client, image_uuid): + """Check if an image is present in daisy. + + client: the ImageService + image_uuid: the image uuid to check + + Returns: True if the image is present + """ + headers = client.get_image_meta(image_uuid) + return 'status' in headers + + +def print_help(options, args): + """Print help specific to a command. + + options: the parsed command line options + args: the command line + """ + if len(args) != 1: + print(COMMANDS) + sys.exit(1) + command_name = args.pop() + command = lookup_command(command_name) + print(command.__doc__ % {'prog': os.path.basename(sys.argv[0])}) + + +def lookup_command(command_name): + """Lookup a command. + + command_name: the command name + + Returns: a method which implements that command + """ + BASE_COMMANDS = {'help': print_help} + + REPLICATION_COMMANDS = {'compare': replication_compare, + 'dump': replication_dump, + 'livecopy': replication_livecopy, + 'load': replication_load, + 'size': replication_size} + + commands = {} + for command_set in (BASE_COMMANDS, REPLICATION_COMMANDS): + commands.update(command_set) + + try: + command = commands[command_name] + except KeyError: + sys.exit(_("Unknown command: %s") % command_name) + + return command + + +def main(): + """The main function.""" + + try: + config.parse_args() + except RuntimeError as e: + sys.exit("ERROR: %s" % utils.exception_to_str(e)) + + # Setup logging + logging.setup('glance') + + if CONF.token: + CONF.slavetoken = CONF.token + CONF.mastertoken = CONF.token + + command = lookup_command(CONF.command) + + try: + command(CONF, CONF.args) + except TypeError as e: + LOG.error(_LE(command.__doc__) % {'prog': command.__name__}) # noqa + sys.exit("ERROR: %s" % utils.exception_to_str(e)) + except ValueError as e: + LOG.error(_LE(command.__doc__) % {'prog': command.__name__}) # noqa + sys.exit("ERROR: %s" % utils.exception_to_str(e)) + + +if __name__ == '__main__': + main() diff --git a/code/daisy/daisy/cmd/scrubber.py b/code/daisy/daisy/cmd/scrubber.py new file mode 100755 index 00000000..d27f6fa3 --- /dev/null +++ b/code/daisy/daisy/cmd/scrubber.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python + +# Copyright 2011-2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Glance Scrub Service +""" + +import os +import sys + +# If ../glance/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): + sys.path.insert(0, possible_topdir) + +import glance_store +from oslo_config import cfg +from oslo_log import log as logging + +from daisy.common import config +from daisy.openstack.common import systemd +from daisy import scrubber + + +CONF = cfg.CONF +logging.register_options(CONF) + + +def main(): + CONF.register_cli_opts(scrubber.scrubber_cmd_cli_opts) + CONF.register_opts(scrubber.scrubber_cmd_opts) + + try: + config.parse_args() + logging.setup(CONF, 'glance') + + glance_store.register_opts(config.CONF) + glance_store.create_stores(config.CONF) + glance_store.verify_default_store() + + app = scrubber.Scrubber(glance_store) + + if CONF.daemon: + server = scrubber.Daemon(CONF.wakeup_time) + server.start(app) + systemd.notify_once() + server.wait() + else: + import eventlet + pool = eventlet.greenpool.GreenPool(1000) + app.run(pool) + except RuntimeError as e: + sys.exit("ERROR: %s" % e) + + +if __name__ == '__main__': + main() diff --git a/code/daisy/daisy/cmd/search.py b/code/daisy/daisy/cmd/search.py new file mode 100755 index 00000000..552551f6 --- /dev/null +++ b/code/daisy/daisy/cmd/search.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Glance Catalog Search Server +""" + +import os +import sys + +import eventlet + +from daisy.common import utils + +# Monkey patch socket, time, select, threads +eventlet.patcher.monkey_patch(socket=True, time=True, select=True, + thread=True, os=True) + +# If ../glance/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): + sys.path.insert(0, possible_topdir) + +from oslo.config import cfg +from oslo_log import log as logging +import osprofiler.notifier +import osprofiler.web + +from daisy.common import config +from daisy.common import exception +from daisy.common import wsgi +from daisy import notifier + +CONF = cfg.CONF +CONF.import_group("profiler", "daisy.common.wsgi") +logging.register_options(CONF) + +KNOWN_EXCEPTIONS = (RuntimeError, + exception.WorkerCreationFailure) + + +def fail(e): + global KNOWN_EXCEPTIONS + return_code = KNOWN_EXCEPTIONS.index(type(e)) + 1 + sys.stderr.write("ERROR: %s\n" % utils.exception_to_str(e)) + sys.exit(return_code) + + +def main(): + try: + config.parse_args() + wsgi.set_eventlet_hub() + logging.setup(CONF, 'glance') + + if cfg.CONF.profiler.enabled: + _notifier = osprofiler.notifier.create("Messaging", + notifier.messaging, {}, + notifier.get_transport(), + "glance", "search", + cfg.CONF.bind_host) + osprofiler.notifier.set(_notifier) + else: + osprofiler.web.disable() + + server = wsgi.Server() + server.start(config.load_paste_app('glance-search'), + default_port=9393) + server.wait() + except KNOWN_EXCEPTIONS as e: + fail(e) + + +if __name__ == '__main__': + main() diff --git a/code/daisy/daisy/common/.config.py.swp b/code/daisy/daisy/common/.config.py.swp new file mode 100755 index 00000000..6d91803f Binary files /dev/null and b/code/daisy/daisy/common/.config.py.swp differ diff --git a/code/daisy/daisy/common/__init__.py b/code/daisy/daisy/common/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/common/artifacts/__init__.py b/code/daisy/daisy/common/artifacts/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/common/artifacts/declarative.py b/code/daisy/daisy/common/artifacts/declarative.py new file mode 100755 index 00000000..4d07527d --- /dev/null +++ b/code/daisy/daisy/common/artifacts/declarative.py @@ -0,0 +1,747 @@ +# Copyright (c) 2015 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import re +import types + +import semantic_version +import six + +from daisy.common import exception as exc +from daisy import i18n + + +_ = i18n._ + + +class AttributeDefinition(object): + """A base class for the attribute definitions which may be added to + declaratively defined artifact types + """ + + ALLOWED_TYPES = (object,) + + def __init__(self, + display_name=None, + description=None, + readonly=False, + mutable=True, + required=False, + default=None): + """Initializes attribute definition + + :param display_name: Display name of the attribute + :param description: Description of the attribute + :param readonly: Flag indicating if the value of attribute may not be + changed once an artifact is created + :param mutable: Flag indicating if the value of attribute may not be + changed once an artifact is published + :param required: Flag indicating if the value of attribute is required + :param default: default value of the attribute + """ + self.name = None + self.display_name = display_name + self.description = description + self.readonly = readonly + self.required = required + self.mutable = mutable + self.default = default + self._add_validator('type', + lambda v: isinstance(v, self.ALLOWED_TYPES), + _("Not a valid value type")) + self._validate_default() + + def _set_name(self, value): + self.name = value + if self.display_name is None: + self.display_name = value + + def _add_validator(self, name, func, message): + if not hasattr(self, '_validators'): + self._validators = [] + self._validators_index = {} + pair = (func, message) + self._validators.append(pair) + self._validators_index[name] = pair + + def _get_validator(self, name): + return self._validators_index.get(name) + + def _remove_validator(self, name): + pair = self._validators_index.pop(name, None) + if pair is not None: + self._validators.remove(pair) + + def _check_definition(self): + self._validate_default() + + def _validate_default(self): + if self.default: + try: + self.validate(self.default, 'default') + except exc.InvalidArtifactPropertyValue: + raise exc.InvalidArtifactTypePropertyDefinition( + _("Default value is invalid")) + + def get_value(self, obj): + return getattr(obj, self.name) + + def set_value(self, obj, value): + return setattr(obj, self.name, value) + + def validate(self, value, name=None): + if value is None: + if self.required: + raise exc.InvalidArtifactPropertyValue( + name=name or self.name, + val=value, + msg=_('Value is required')) + else: + return + + first_error = next((msg for v_func, msg in self._validators + if not v_func(value)), None) + if first_error: + raise exc.InvalidArtifactPropertyValue(name=name or self.name, + val=value, + msg=first_error) + + +class ListAttributeDefinition(AttributeDefinition): + """A base class for Attribute definitions having List-semantics + + Is inherited by Array, ArtifactReferenceList and BinaryObjectList + """ + ALLOWED_TYPES = (types.ListType,) + ALLOWED_ITEM_TYPES = (AttributeDefinition, ) + + def _check_item_type(self, item): + if not isinstance(item, self.ALLOWED_ITEM_TYPES): + raise exc.InvalidArtifactTypePropertyDefinition( + _('Invalid item type specification')) + if item.default is not None: + raise exc.InvalidArtifactTypePropertyDefinition( + _('List definitions may hot have defaults')) + + def __init__(self, item_type, min_size=0, max_size=None, unique=False, + **kwargs): + + super(ListAttributeDefinition, self).__init__(**kwargs) + if isinstance(item_type, types.ListType): + for it in item_type: + self._check_item_type(it) + + # we need to copy the item_type collection + self.item_type = item_type[:] + + if min_size != 0: + raise exc.InvalidArtifactTypePropertyDefinition( + _("Cannot specify 'min_size' explicitly") + ) + + if max_size is not None: + raise exc.InvalidArtifactTypePropertyDefinition( + _("Cannot specify 'max_size' explicitly") + ) + + # setting max_size and min_size to the length of item_type, + # as tuple-semantic assumes that the number of elements is set + # by the type spec + min_size = max_size = len(item_type) + else: + self._check_item_type(item_type) + self.item_type = item_type + + if min_size: + self.min_size(min_size) + + if max_size: + self.max_size(max_size) + + if unique: + self.unique() + + def min_size(self, value): + self._min_size = value + if value is not None: + self._add_validator('min_size', + lambda v: len(v) >= self._min_size, + _('List size is less than minimum')) + else: + self._remove_validator('min_size') + + def max_size(self, value): + self._max_size = value + if value is not None: + self._add_validator('max_size', + lambda v: len(v) <= self._max_size, + _('List size is greater than maximum')) + else: + self._remove_validator('max_size') + + def unique(self, value=True): + self._unique = value + if value: + def _unique(items): + seen = set() + for item in items: + if item in seen: + return False + seen.add(item) + return True + self._add_validator('unique', + _unique, _('Items have to be unique')) + else: + self._remove_validator('unique') + + def _set_name(self, value): + super(ListAttributeDefinition, self)._set_name(value) + if isinstance(self.item_type, types.ListType): + for i, item in enumerate(self.item_type): + item._set_name("%s[%i]" % (value, i)) + else: + self.item_type._set_name("%s[*]" % value) + + def validate(self, value, name=None): + super(ListAttributeDefinition, self).validate(value, name) + if value is not None: + for i, item in enumerate(value): + self._validate_item_at(item, i) + + def get_item_definition_at_index(self, index): + if isinstance(self.item_type, types.ListType): + if index < len(self.item_type): + return self.item_type[index] + else: + return None + return self.item_type + + def _validate_item_at(self, item, index): + item_type = self.get_item_definition_at_index(index) + # set name if none has been given to the list element at given index + if (isinstance(self.item_type, types.ListType) and item_type and + not item_type.name): + item_type.name = "%s[%i]" % (self.name, index) + if item_type: + item_type.validate(item) + + +class DictAttributeDefinition(AttributeDefinition): + """A base class for Attribute definitions having Map-semantics + + Is inherited by Dict + """ + ALLOWED_TYPES = (types.DictionaryType,) + ALLOWED_PROPERTY_TYPES = (AttributeDefinition,) + + def _check_prop(self, key, item): + if (not isinstance(item, self.ALLOWED_PROPERTY_TYPES) or + (key is not None and not isinstance(key, types.StringTypes))): + raise exc.InvalidArtifactTypePropertyDefinition( + _('Invalid dict property type specification')) + + @staticmethod + def _validate_key(key): + if not isinstance(key, types.StringTypes): + raise exc.InvalidArtifactPropertyValue( + _('Invalid dict property type')) + + def __init__(self, properties, min_properties=0, max_properties=0, + **kwargs): + super(DictAttributeDefinition, self).__init__(**kwargs) + if isinstance(properties, types.DictionaryType): + for key, value in six.iteritems(properties): + self._check_prop(key, value) + # copy the properties dict + self.properties = properties.copy() + + self._add_validator('keys', + lambda v: set(v.keys()) <= set( + self.properties.keys()), + _('Dictionary contains unexpected key(s)')) + else: + self._check_prop(None, properties) + self.properties = properties + + if min_properties: + self.min_properties(min_properties) + + if max_properties: + self.max_properties(max_properties) + + def min_properties(self, value): + self._min_properties = value + if value is not None: + self._add_validator('min_properties', + lambda v: len(v) >= self._min_properties, + _('Dictionary size is less than ' + 'minimum')) + else: + self._remove_validator('min_properties') + + def max_properties(self, value): + self._max_properties = value + if value is not None: + self._add_validator('max_properties', + lambda v: len(v) <= self._max_properties, + _('Dictionary size is ' + 'greater than maximum')) + else: + self._remove_validator('max_properties') + + def _set_name(self, value): + super(DictAttributeDefinition, self)._set_name(value) + if isinstance(self.properties, types.DictionaryType): + for k, v in six.iteritems(self.properties): + v._set_name(value) + else: + self.properties._set_name(value) + + def validate(self, value, name=None): + super(DictAttributeDefinition, self).validate(value, name) + if value is not None: + for k, v in six.iteritems(value): + self._validate_item_with_key(v, k) + + def _validate_item_with_key(self, value, key): + self._validate_key(key) + if isinstance(self.properties, types.DictionaryType): + prop_def = self.properties.get(key) + if prop_def is not None: + name = "%s[%s]" % (prop_def.name, key) + prop_def.validate(value, name=name) + else: + name = "%s[%s]" % (self.properties.name, key) + self.properties.validate(value, name=name) + + def get_prop_definition_at_key(self, key): + if isinstance(self.properties, types.DictionaryType): + return self.properties.get(key) + else: + return self.properties + + +class PropertyDefinition(AttributeDefinition): + """A base class for Attributes defining generic or type-specific metadata + properties + """ + DB_TYPE = None + + def __init__(self, + internal=False, + allowed_values=None, + validators=None, + **kwargs): + """Defines a metadata property + + :param internal: a flag indicating that the property is internal, i.e. + not returned to client + :param allowed_values: specifies a list of values allowed for the + property + :param validators: specifies a list of custom validators for the + property + """ + super(PropertyDefinition, self).__init__(**kwargs) + self.internal = internal + self._allowed_values = None + + if validators is not None: + try: + for i, (f, m) in enumerate(validators): + self._add_validator("custom_%i" % i, f, m) + except ValueError: + raise exc.InvalidArtifactTypePropertyDefinition( + _("Custom validators list should contain tuples " + "'(function, message)'")) + + if allowed_values is not None: + # copy the allowed_values, as this is going to create a + # closure, and we need to make sure that external modification of + # this list does not affect the created validator + self.allowed_values(allowed_values) + self._check_definition() + + def _validate_allowed_values(self): + if self._allowed_values: + try: + for allowed_value in self._allowed_values: + self.validate(allowed_value, 'allowed_value') + except exc.InvalidArtifactPropertyValue: + raise exc.InvalidArtifactTypePropertyDefinition( + _("Allowed values %s are invalid under given validators") % + self._allowed_values) + + def allowed_values(self, values): + self._allowed_values = values[:] + if values is not None: + self._add_validator('allowed', lambda v: v in self._allowed_values, + _("Is not allowed value")) + else: + self._remove_validator('allowed') + self._check_definition() + + def _check_definition(self): + self._validate_allowed_values() + super(PropertyDefinition, self)._check_definition() + + +class RelationDefinition(AttributeDefinition): + """A base class for Attributes defining cross-artifact relations""" + def __init__(self, internal=False, **kwargs): + self.internal = internal + kwargs.setdefault('mutable', False) + # if mutable=True has been passed -> raise an exception + if kwargs['mutable'] is True: + raise exc.InvalidArtifactTypePropertyDefinition( + _("Dependency relations cannot be mutable")) + super(RelationDefinition, self).__init__(**kwargs) + + +class BlobDefinition(AttributeDefinition): + """A base class for Attributes defining binary objects""" + pass + + +class ArtifactTypeMetaclass(type): + """A metaclass to build Artifact Types. Not intended to be used directly + + Use `get_declarative_base` to get the base class instead + """ + def __init__(cls, class_name, bases, attributes): + if '_declarative_artifact_type' not in cls.__dict__: + _build_declarative_meta(cls) + super(ArtifactTypeMetaclass, cls).__init__(class_name, bases, + attributes) + + +class ArtifactPropertyDescriptor(object): + """A descriptor object for working with artifact attributes""" + + def __init__(self, prop, collection_wrapper_class=None): + self.prop = prop + self.collection_wrapper_class = collection_wrapper_class + + def __get__(self, instance, owner): + if instance is None: + # accessed via owner class + return self.prop + else: + v = getattr(instance, '_' + self.prop.name, None) + if v is None and self.prop.default is not None: + v = copy.copy(self.prop.default) + self.__set__(instance, v, ignore_mutability=True) + return self.__get__(instance, owner) + else: + if v is not None and self.collection_wrapper_class: + if self.prop.readonly: + readonly = True + elif (not self.prop.mutable and + hasattr(instance, '__is_mutable__') and + not hasattr(instance, + '__suspend_mutability_checks__')): + + readonly = not instance.__is_mutable__() + else: + readonly = False + if readonly: + v = v.__make_immutable__() + return v + + def __set__(self, instance, value, ignore_mutability=False): + if instance: + if self.prop.readonly: + if hasattr(instance, '_' + self.prop.name): + raise exc.InvalidArtifactPropertyValue( + _('Attempt to set readonly property')) + if not self.prop.mutable: + if (hasattr(instance, '__is_mutable__') and + not hasattr(instance, + '__suspend_mutability_checks__')): + mutable = instance.__is_mutable__() or ignore_mutability + if not mutable: + raise exc.InvalidArtifactPropertyValue( + _('Attempt to set value of immutable property')) + if value is not None and self.collection_wrapper_class: + value = self.collection_wrapper_class(value) + value.property = self.prop + self.prop.validate(value) + setattr(instance, '_' + self.prop.name, value) + + +class ArtifactAttributes(object): + """A container class storing description of Artifact Type attributes""" + def __init__(self): + self.properties = {} + self.dependencies = {} + self.blobs = {} + self.all = {} + + @property + def default_dependency(self): + """Returns the default dependency relation for an artifact type""" + if len(self.dependencies) == 1: + return self.dependencies.values()[0] + + @property + def default_blob(self): + """Returns the default blob object for an artifact type""" + if len(self.blobs) == 1: + return self.blobs.values()[0] + + @property + def default_properties_dict(self): + """Returns a default properties dict for an artifact type""" + dict_props = [v for v in self.properties.values() if + isinstance(v, DictAttributeDefinition)] + if len(dict_props) == 1: + return dict_props[0] + + @property + def tags(self): + """Returns tags property for an artifact type""" + return self.properties.get('tags') + + def add(self, attribute): + self.all[attribute.name] = attribute + if isinstance(attribute, PropertyDefinition): + self.properties[attribute.name] = attribute + elif isinstance(attribute, BlobDefinition): + self.blobs[attribute.name] = attribute + elif isinstance(attribute, RelationDefinition): + self.dependencies[attribute.name] = attribute + + +class ArtifactTypeMetadata(object): + """A container to store the meta-information about an artifact type""" + + def __init__(self, type_name, type_display_name, type_version, + type_description, endpoint): + """Initializes the Artifact Type metadata + + :param type_name: name of the artifact type + :param type_display_name: display name of the artifact type + :param type_version: version of the artifact type + :param type_description: description of the artifact type + :param endpoint: REST API URI suffix to call the artifacts of this type + """ + + self.attributes = ArtifactAttributes() + + # These are going to be defined by third-party plugin + # developers, so we need to do some validations on these values and + # raise InvalidArtifactTypeDefinition if they are violated + self.type_name = type_name + self.type_display_name = type_display_name or type_name + self.type_version = type_version or '1.0' + self.type_description = type_description + self.endpoint = endpoint or type_name.lower() + + self._validate_string(self.type_name, 'Type name', min_length=1, + max_length=255) + self._validate_string(self.type_display_name, 'Type display name', + max_length=255) + self._validate_string(self.type_description, 'Type description') + self._validate_string(self.endpoint, 'endpoint', min_length=1) + try: + semantic_version.Version(self.type_version, partial=True) + except ValueError: + raise exc.InvalidArtifactTypeDefinition( + message=_("Type version has to be a valid semver string")) + + @staticmethod + def _validate_string(value, name, min_length=0, max_length=None, + pattern=None): + if value is None: + if min_length > 0: + raise exc.InvalidArtifactTypeDefinition( + message=_("%(attribute)s is required"), attribute=name) + else: + return + if not isinstance(value, six.string_types): + raise exc.InvalidArtifactTypeDefinition( + message=_("%(attribute)s have to be string"), attribute=name) + if max_length and len(value) > max_length: + raise exc.InvalidArtifactTypeDefinition( + message=_("%(attribute)s may not be longer than %(length)i"), + attribute=name, length=max_length) + if min_length and len(value) < min_length: + raise exc.InvalidArtifactTypeDefinition( + message=_("%(attribute)s may not be shorter than %(length)i"), + attribute=name, length=min_length) + if pattern and not re.match(pattern, value): + raise exc.InvalidArtifactTypeDefinition( + message=_("%(attribute)s should match pattern %(pattern)s"), + attribute=name, pattern=pattern.pattern) + + +def _build_declarative_meta(cls): + attrs = dict(cls.__dict__) + type_name = None + type_display_name = None + type_version = None + type_description = None + endpoint = None + + for base in cls.__mro__: + for name, value in six.iteritems(vars(base)): + if name == '__type_name__': + if not type_name: + type_name = cls.__type_name__ + elif name == '__type_version__': + if not type_version: + type_version = cls.__type_version__ + elif name == '__type_description__': + if not type_description: + type_description = cls.__type_description__ + elif name == '__endpoint__': + if not endpoint: + endpoint = cls.__endpoint__ + elif name == '__type_display_name__': + if not type_display_name: + type_display_name = cls.__type_display_name__ + elif base is not cls and name not in attrs: + if isinstance(value, AttributeDefinition): + attrs[name] = value + elif isinstance(value, ArtifactPropertyDescriptor): + attrs[name] = value.prop + + meta = ArtifactTypeMetadata(type_name=type_name or cls.__name__, + type_display_name=type_display_name, + type_version=type_version, + type_description=type_description, + endpoint=endpoint) + setattr(cls, 'metadata', meta) + for k, v in attrs.items(): + if k == 'metadata': + raise exc.InvalidArtifactTypePropertyDefinition( + _("Cannot declare artifact property with reserved name " + "'metadata'")) + if isinstance(v, AttributeDefinition): + v._set_name(k) + wrapper_class = None + if isinstance(v, ListAttributeDefinition): + wrapper_class = type("ValidatedList", (list,), {}) + _add_validation_to_list(wrapper_class) + if isinstance(v, DictAttributeDefinition): + wrapper_class = type("ValidatedDict", (dict,), {}) + _add_validation_to_dict(wrapper_class) + prop_descr = ArtifactPropertyDescriptor(v, wrapper_class) + setattr(cls, k, prop_descr) + meta.attributes.add(v) + + +def _validating_method(method, klass): + def wrapper(self, *args, **kwargs): + instance_copy = klass(self) + method(instance_copy, *args, **kwargs) + self.property.validate(instance_copy) + method(self, *args, **kwargs) + + return wrapper + + +def _immutable_method(method): + def substitution(*args, **kwargs): + raise exc.InvalidArtifactPropertyValue( + _("Unable to modify collection in " + "immutable or readonly property")) + + return substitution + + +def _add_immutable_wrappers(class_to_add, wrapped_methods): + for method_name in wrapped_methods: + method = getattr(class_to_add, method_name, None) + if method: + setattr(class_to_add, method_name, _immutable_method(method)) + + +def _add_validation_wrappers(class_to_validate, base_class, validated_methods): + for method_name in validated_methods: + method = getattr(class_to_validate, method_name, None) + if method: + setattr(class_to_validate, method_name, + _validating_method(method, base_class)) + readonly_class = type("Readonly" + class_to_validate.__name__, + (class_to_validate,), {}) + _add_immutable_wrappers(readonly_class, validated_methods) + + def __make_immutable__(self): + return readonly_class(self) + + class_to_validate.__make_immutable__ = __make_immutable__ + + +def _add_validation_to_list(list_based_class): + validated_methods = ['append', 'extend', 'insert', 'pop', 'remove', + 'reverse', 'sort', '__setitem__', '__delitem__', + '__delslice__'] + _add_validation_wrappers(list_based_class, list, validated_methods) + + +def _add_validation_to_dict(dict_based_class): + validated_methods = ['pop', 'popitem', 'setdefault', 'update', + '__delitem__', '__setitem__', 'clear'] + _add_validation_wrappers(dict_based_class, dict, validated_methods) + + +def _kwarg_init_constructor(self, **kwargs): + self.__suspend_mutability_checks__ = True + try: + for k in kwargs: + if not hasattr(type(self), k): + raise exc.ArtifactInvalidProperty(prop=k) + setattr(self, k, kwargs[k]) + self._validate_required(self.metadata.attributes.properties) + finally: + del self.__suspend_mutability_checks__ + + +def _validate_required(self, attribute_dict): + for k, v in six.iteritems(attribute_dict): + if v.required and (not hasattr(self, k) or getattr(self, k) is None): + raise exc.InvalidArtifactPropertyValue(name=k, val=None, + msg=_('Value is required')) + + +def _update(self, values): + for k in values: + if hasattr(type(self), k): + setattr(self, k, values[k]) + else: + raise exc.ArtifactInvalidProperty(prop=k) + + +def _pre_publish_validator(self, *args, **kwargs): + self._validate_required(self.metadata.attributes.blobs) + self._validate_required(self.metadata.attributes.dependencies) + + +_kwarg_init_constructor.__name__ = '__init__' +_pre_publish_validator.__name__ = '__pre_publish__' +_update.__name__ = 'update' + + +def get_declarative_base(name='base', base_class=object): + """Returns a base class which should be inherited to construct Artifact + Type object using the declarative syntax of attribute definition + """ + bases = not isinstance(base_class, tuple) and (base_class,) or base_class + class_dict = {'__init__': _kwarg_init_constructor, + '_validate_required': _validate_required, + '__pre_publish__': _pre_publish_validator, + '_declarative_artifact_type': True, + 'update': _update} + return ArtifactTypeMetaclass(name, bases, class_dict) diff --git a/code/daisy/daisy/common/artifacts/definitions.py b/code/daisy/daisy/common/artifacts/definitions.py new file mode 100755 index 00000000..d74d57a1 --- /dev/null +++ b/code/daisy/daisy/common/artifacts/definitions.py @@ -0,0 +1,571 @@ +# Copyright (c) 2015 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import numbers +import re +import types + +import semantic_version +import six + +from daisy.common.artifacts import declarative +import daisy.common.exception as exc +from daisy import i18n + + +_ = i18n._ + + +class Text(declarative.PropertyDefinition): + """A text metadata property of arbitrary length + + Maps to TEXT columns in database, does not support sorting or filtering + """ + ALLOWED_TYPES = (six.string_types,) + DB_TYPE = 'text' + + +# noinspection PyAttributeOutsideInit +class String(Text): + """A string metadata property of limited length + + Maps to VARCHAR columns in database, supports filtering and sorting. + May have constrains on length and regexp patterns. + + The maximum length is limited to 255 characters + """ + + DB_TYPE = 'string' + + def __init__(self, max_length=255, min_length=0, pattern=None, **kwargs): + """Defines a String metadata property. + + :param max_length: maximum value length + :param min_length: minimum value length + :param pattern: regexp pattern to match + """ + super(String, self).__init__(**kwargs) + + self.max_length(max_length) + self.min_length(min_length) + if pattern: + self.pattern(pattern) + # if default and/or allowed_values are specified (in base classes) + # then we need to validate them against the newly added validators + self._check_definition() + + def max_length(self, value): + """Sets the maximum value length""" + self._max_length = value + if value is not None: + if value > 255: + raise exc.InvalidArtifactTypePropertyDefinition( + _('Max string length may not exceed 255 characters')) + self._add_validator('max_length', + lambda v: len(v) <= self._max_length, + _('Length is greater than maximum')) + else: + self._remove_validator('max_length') + self._check_definition() + + def min_length(self, value): + """Sets the minimum value length""" + self._min_length = value + if value is not None: + if value < 0: + raise exc.InvalidArtifactTypePropertyDefinition( + _('Min string length may not be negative')) + + self._add_validator('min_length', + lambda v: len(v) >= self._min_length, + _('Length is less than minimum')) + else: + self._remove_validator('min_length') + self._check_definition() + + def pattern(self, value): + """Sets the regexp pattern to match""" + self._pattern = value + if value is not None: + self._add_validator('pattern', + lambda v: re.match(self._pattern, + v) is not None, + _('Does not match pattern')) + else: + self._remove_validator('pattern') + self._check_definition() + + +class SemVerString(String): + """A String metadata property matching semver pattern""" + + def __init__(self, **kwargs): + def validate(value): + try: + semantic_version.Version(value, partial=True) + except ValueError: + return False + return True + + super(SemVerString, + self).__init__(validators=[(validate, + "Invalid semver string")], + **kwargs) + + +# noinspection PyAttributeOutsideInit +class Integer(declarative.PropertyDefinition): + """An Integer metadata property + + Maps to INT columns in Database, supports filtering and sorting. + May have constraints on value + """ + + ALLOWED_TYPES = (six.integer_types,) + DB_TYPE = 'int' + + def __init__(self, min_value=None, max_value=None, **kwargs): + """Defines an Integer metadata property + + :param min_value: minimum allowed value + :param max_value: maximum allowed value + """ + super(Integer, self).__init__(**kwargs) + if min_value is not None: + self.min_value(min_value) + + if max_value is not None: + self.max_value(max_value) + + # if default and/or allowed_values are specified (in base classes) + # then we need to validate them against the newly added validators + self._check_definition() + + def min_value(self, value): + """Sets the minimum allowed value""" + self._min_value = value + if value is not None: + self._add_validator('min_value', + lambda v: v >= self._min_value, + _('Value is less than minimum')) + else: + self._remove_validator('min_value') + self._check_definition() + + def max_value(self, value): + """Sets the maximum allowed value""" + self._max_value = value + if value is not None: + self._add_validator('max_value', + lambda v: v <= self._max_value, + _('Value is greater than maximum')) + else: + self._remove_validator('max_value') + self._check_definition() + + +# noinspection PyAttributeOutsideInit +class DateTime(declarative.PropertyDefinition): + """A DateTime metadata property + + Maps to a DATETIME columns in database. + Is not supported as Type Specific property, may be used only as Generic one + + May have constraints on value + """ + ALLOWED_TYPES = (datetime.datetime,) + DB_TYPE = 'datetime' + + def __init__(self, min_value=None, max_value=None, **kwargs): + """Defines a DateTime metadata property + + :param min_value: minimum allowed value + :param max_value: maximum allowed value + """ + super(DateTime, self).__init__(**kwargs) + if min_value is not None: + self.min_value(min_value) + + if max_value is not None: + self.max_value(max_value) + + # if default and/or allowed_values are specified (in base classes) + # then we need to validate them against the newly added validators + self._check_definition() + + def min_value(self, value): + """Sets the minimum allowed value""" + self._min_value = value + if value is not None: + self._add_validator('min_value', + lambda v: v >= self._min_value, + _('Value is less than minimum')) + else: + self._remove_validator('min_value') + self._check_definition() + + def max_value(self, value): + """Sets the maximum allowed value""" + self._max_value = value + if value is not None: + self._add_validator('max_value', + lambda v: v <= self._max_value, + _('Value is greater than maximum')) + else: + self._remove_validator('max_value') + self._check_definition() + + +# noinspection PyAttributeOutsideInit +class Numeric(declarative.PropertyDefinition): + """A Numeric metadata property + + Maps to floating point number columns in Database, supports filtering and + sorting. May have constraints on value + """ + ALLOWED_TYPES = numbers.Number + DB_TYPE = 'numeric' + + def __init__(self, min_value=None, max_value=None, **kwargs): + """Defines a Numeric metadata property + + :param min_value: minimum allowed value + :param max_value: maximum allowed value + """ + super(Numeric, self).__init__(**kwargs) + if min_value is not None: + self.min_value(min_value) + + if max_value is not None: + self.max_value(max_value) + + # if default and/or allowed_values are specified (in base classes) + # then we need to validate them against the newly added validators + self._check_definition() + + def min_value(self, value): + """Sets the minimum allowed value""" + self._min_value = value + if value is not None: + self._add_validator('min_value', + lambda v: v >= self._min_value, + _('Value is less than minimum')) + else: + self._remove_validator('min_value') + self._check_definition() + + def max_value(self, value): + """Sets the maximum allowed value""" + self._max_value = value + if value is not None: + self._add_validator('max_value', + lambda v: v <= self._max_value, + _('Value is greater than maximum')) + else: + self._remove_validator('max_value') + self._check_definition() + + +class Boolean(declarative.PropertyDefinition): + """A Boolean metadata property + + Maps to Boolean columns in database. Supports filtering and sorting. + """ + ALLOWED_TYPES = (types.BooleanType,) + DB_TYPE = 'bool' + + +class Array(declarative.ListAttributeDefinition, + declarative.PropertyDefinition, list): + """An array metadata property + + May contain elements of any other PropertyDefinition types except Dict and + Array. Each elements maps to appropriate type of columns in database. + Preserves order. Allows filtering based on "Array contains Value" semantics + + May specify constrains on types of elements, their amount and uniqueness. + """ + ALLOWED_ITEM_TYPES = (declarative.PropertyDefinition,) + + def __init__(self, item_type=String(), min_size=0, max_size=None, + unique=False, extra_items=True, **kwargs): + """Defines an Array metadata property + + :param item_type: defines the types of elements in Array. If set to an + instance of PropertyDefinition then all the elements have to be of that + type. If set to list of such instances, then the elements on the + corresponding positions have to be of the appropriate type. + :param min_size: minimum size of the Array + :param max_size: maximum size of the Array + :param unique: if set to true, all the elements in the Array have to be + unique + """ + if isinstance(item_type, Array): + msg = _("Array property can't have item_type=Array") + raise exc.InvalidArtifactTypePropertyDefinition(msg) + declarative.ListAttributeDefinition.__init__(self, + item_type=item_type, + min_size=min_size, + max_size=max_size, + unique=unique) + declarative.PropertyDefinition.__init__(self, **kwargs) + + +class Dict(declarative.DictAttributeDefinition, + declarative.PropertyDefinition, dict): + """A dictionary metadata property + + May contain elements of any other PropertyDefinition types except Dict. + Each elements maps to appropriate type of columns in database. Allows + filtering and sorting by values of each key except the ones mapping the + Text fields. + + May specify constrains on types of elements and their amount. + """ + ALLOWED_PROPERTY_TYPES = (declarative.PropertyDefinition,) + + def __init__(self, properties=String(), min_properties=0, + max_properties=None, **kwargs): + """Defines a dictionary metadata property + + :param properties: defines the types of dictionary values. If set to an + instance of PropertyDefinition then all the value have to be of that + type. If set to a dictionary with string keys and values of + PropertyDefinition type, then the elements mapped by the corresponding + have have to be of the appropriate type. + :param min_properties: minimum allowed amount of properties in the dict + :param max_properties: maximum allowed amount of properties in the dict + """ + declarative.DictAttributeDefinition. \ + __init__(self, + properties=properties, + min_properties=min_properties, + max_properties=max_properties) + declarative.PropertyDefinition.__init__(self, **kwargs) + + +class ArtifactType(declarative.get_declarative_base()): # noqa + """A base class for all the Artifact Type definitions + + Defines the Generic metadata properties as attributes. + """ + id = String(required=True, readonly=True) + type_name = String(required=True, readonly=True) + type_version = SemVerString(required=True, readonly=True) + name = String(required=True, mutable=False) + version = SemVerString(required=True, mutable=False) + description = Text() + tags = Array(unique=True, default=[]) + visibility = String(required=True, + allowed_values=["private", "public", "shared", + "community"], + default="private") + state = String(required=True, readonly=True, allowed_values=["creating", + "active", + "deactivated", + "deleted"]) + owner = String(required=True, readonly=True) + created_at = DateTime(required=True, readonly=True) + updated_at = DateTime(required=True, readonly=True) + published_at = DateTime(readonly=True) + deleted_at = DateTime(readonly=True) + + def __init__(self, **kwargs): + if "type_name" in kwargs: + raise exc.InvalidArtifactPropertyValue( + _("Unable to specify artifact type explicitly")) + if "type_version" in kwargs: + raise exc.InvalidArtifactPropertyValue( + _("Unable to specify artifact type version explicitly")) + super(ArtifactType, + self).__init__(type_name=self.metadata.type_name, + type_version=self.metadata.type_version, **kwargs) + + def __eq__(self, other): + if not isinstance(other, ArtifactType): + return False + return self.id == other.id + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash(self.id) + + def __is_mutable__(self): + return self.state == "creating" + + +class ArtifactReference(declarative.RelationDefinition): + """An artifact reference definition + + Allows to define constraints by the name and version of target artifact + """ + ALLOWED_TYPES = ArtifactType + + def __init__(self, type_name=None, type_version=None, **kwargs): + """Defines an artifact reference + + :param type_name: type name of the target artifact + :param type_version: type version of the target artifact + """ + super(ArtifactReference, self).__init__(**kwargs) + if type_name is not None: + if isinstance(type_name, types.ListType): + type_names = list(type_name) + if type_version is not None: + raise exc.InvalidArtifactTypePropertyDefinition( + _('Unable to specify version ' + 'if multiple types are possible')) + else: + type_names = [type_name] + + def validate_reference(artifact): + if artifact.type_name not in type_names: + return False + if (type_version is not None and + artifact.type_version != type_version): + return False + return True + + self._add_validator('referenced_type', + validate_reference, + _("Invalid referenced type")) + elif type_version is not None: + raise exc.InvalidArtifactTypePropertyDefinition( + _('Unable to specify version ' + 'if type is not specified')) + self._check_definition() + + +class ArtifactReferenceList(declarative.ListAttributeDefinition, + declarative.RelationDefinition, list): + """A list of Artifact References + + Allows to define a collection of references to other artifacts, each + optionally constrained by type name and type version + """ + ALLOWED_ITEM_TYPES = (ArtifactReference,) + + def __init__(self, references=ArtifactReference(), min_size=0, + max_size=None, **kwargs): + if isinstance(references, types.ListType): + raise exc.InvalidArtifactTypePropertyDefinition( + _("Invalid reference list specification")) + declarative.RelationDefinition.__init__(self, **kwargs) + declarative.ListAttributeDefinition.__init__(self, + item_type=references, + min_size=min_size, + max_size=max_size, + unique=True, + default=[] + if min_size == 0 else + None) + + +class Blob(object): + """A Binary object being part of the Artifact""" + def __init__(self, size=0, locations=None, checksum=None, item_key=None): + """Initializes a new Binary Object for an Artifact + + :param size: the size of Binary Data + :param locations: a list of data locations in backing stores + :param checksum: a checksum for the data + """ + if locations is None: + locations = [] + self.size = size + self.checksum = checksum + self.locations = locations + self.item_key = item_key + + def to_dict(self): + return { + "size": self.size, + "checksum": self.checksum, + } + + +class BinaryObject(declarative.BlobDefinition, Blob): + """A definition of BinaryObject binding + + Adds a BinaryObject to an Artifact Type, optionally constrained by file + size and amount of locations + """ + ALLOWED_TYPES = (Blob,) + + def __init__(self, + max_file_size=None, + min_file_size=None, + min_locations=None, + max_locations=None, + **kwargs): + """Defines a binary object as part of Artifact Type + :param max_file_size: maximum size of the associate Blob + :param min_file_size: minimum size of the associated Blob + :param min_locations: minimum number of locations in the associated + Blob + :param max_locations: maximum number of locations in the associated + Blob + """ + super(BinaryObject, self).__init__(default=None, readonly=False, + mutable=False, **kwargs) + self._max_file_size = max_file_size + self._min_file_size = min_file_size + self._min_locations = min_locations + self._max_locations = max_locations + + self._add_validator('size_not_empty', + lambda v: v.size is not None, + _('Blob size is not set')) + if max_file_size: + self._add_validator('max_size', + lambda v: v.size <= self._max_file_size, + _("File too large")) + if min_file_size: + self._add_validator('min_size', + lambda v: v.size >= self._min_file_size, + _("File too small")) + if min_locations: + self._add_validator('min_locations', + lambda v: len( + v.locations) >= self._min_locations, + _("Too few locations")) + if max_locations: + self._add_validator( + 'max_locations', + lambda v: len(v.locations) <= self._max_locations, + _("Too many locations")) + + +class BinaryObjectList(declarative.ListAttributeDefinition, + declarative.BlobDefinition, list): + """A definition of binding to the list of BinaryObject + + Adds a list of BinaryObject's to an artifact type, optionally constrained + by the number of objects in the list and their uniqueness + + """ + ALLOWED_ITEM_TYPES = (BinaryObject,) + + def __init__(self, objects=BinaryObject(), min_count=0, max_count=None, + **kwargs): + declarative.BlobDefinition.__init__(self, **kwargs) + declarative.ListAttributeDefinition.__init__(self, + item_type=objects, + min_size=min_count, + max_size=max_count, + unique=True) + self.default = [] if min_count == 0 else None diff --git a/code/daisy/daisy/common/artifacts/loader.py b/code/daisy/daisy/common/artifacts/loader.py new file mode 100755 index 00000000..62154e91 --- /dev/null +++ b/code/daisy/daisy/common/artifacts/loader.py @@ -0,0 +1,195 @@ +# Copyright 2011-2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo.config import cfg +import semantic_version +from stevedore import enabled + +from daisy.common.artifacts import definitions +from daisy.common import exception +from daisy import i18n +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LW = i18n._LW +_LI = i18n._LI + + +plugins_opts = [ + cfg.BoolOpt('load_enabled', default=True, + help=_('When false, no artifacts can be loaded regardless of' + ' available_plugins. When true, artifacts can be' + ' loaded.')), + cfg.ListOpt('available_plugins', default=[], + help=_('A list of artifacts that are allowed in the' + ' format name or name-version. Empty list means that' + ' any artifact can be loaded.')) +] + + +CONF = cfg.CONF +CONF.register_opts(plugins_opts) + + +class ArtifactsPluginLoader(object): + def __init__(self, namespace): + self.mgr = enabled.EnabledExtensionManager( + check_func=self._gen_check_func(), + namespace=namespace, + propagate_map_exceptions=True, + on_load_failure_callback=self._on_load_failure) + self.plugin_map = {'by_typename': {}, + 'by_endpoint': {}} + + def _add_extention(ext): + """ + Plugins can be loaded as entry_point=single plugin and + entry_point=PLUGIN_LIST, where PLUGIN_LIST is a python variable + holding a list of plugins + """ + def _load_one(plugin): + if issubclass(plugin, definitions.ArtifactType): + # make sure that have correct plugin name + art_name = plugin.metadata.type_name + if art_name != ext.name: + raise exception.ArtifactNonMatchingTypeName( + name=art_name, plugin=ext.name) + # make sure that no plugin with the same name and version + # already exists + exists = self._get_plugins(ext.name) + new_tv = plugin.metadata.type_version + if any(e.metadata.type_version == new_tv for e in exists): + raise exception.ArtifactDuplicateNameTypeVersion() + self._add_plugin("by_endpoint", plugin.metadata.endpoint, + plugin) + self._add_plugin("by_typename", plugin.metadata.type_name, + plugin) + + if isinstance(ext.plugin, list): + for p in ext.plugin: + _load_one(p) + else: + _load_one(ext.plugin) + + # (ivasilevskaya) that looks pretty bad as RuntimeError is too general, + # but stevedore has awful exception wrapping with no specific class + # for this very case (no extensions for given namespace found) + try: + self.mgr.map(_add_extention) + except RuntimeError as re: + LOG.error(_LE("Unable to load artifacts: %s") % re.message) + + def _version(self, artifact): + return semantic_version.Version.coerce(artifact.metadata.type_version) + + def _add_plugin(self, spec, name, plugin): + """ + Inserts a new plugin into a sorted by desc type_version list + of existing plugins in order to retrieve the latest by next() + """ + def _add(name, value): + self.plugin_map[spec][name] = value + + old_order = copy.copy(self._get_plugins(name, spec=spec)) + for i, p in enumerate(old_order): + if self._version(p) < self._version(plugin): + _add(name, old_order[0:i] + [plugin] + old_order[i:]) + return + _add(name, old_order + [plugin]) + + def _get_plugins(self, name, spec="by_typename"): + if spec not in self.plugin_map.keys(): + return [] + return self.plugin_map[spec].get(name, []) + + def _gen_check_func(self): + """generates check_func for EnabledExtensionManager""" + + def _all_forbidden(ext): + LOG.warn(_LW("Can't load artifact %s: load disabled in config") % + ext.name) + raise exception.ArtifactLoadError(name=ext.name) + + def _all_allowed(ext): + LOG.info( + _LI("Artifact %s has been successfully loaded") % ext.name) + return True + + if not CONF.load_enabled: + return _all_forbidden + if len(CONF.available_plugins) == 0: + return _all_allowed + + available = [] + for name in CONF.available_plugins: + type_name, version = (name.split('-', 1) + if '-' in name else (name, None)) + available.append((type_name, version)) + + def _check_ext(ext): + try: + next(n for n, v in available + if n == ext.plugin.metadata.type_name and + (v is None or v == ext.plugin.metadata.type_version)) + except StopIteration: + LOG.warn(_LW("Can't load artifact %s: not in" + " available_plugins list") % ext.name) + raise exception.ArtifactLoadError(name=ext.name) + LOG.info( + _LI("Artifact %s has been successfully loaded") % ext.name) + return True + + return _check_ext + + # this has to be done explicitly as stevedore is pretty ignorant when + # face to face with an Exception and tries to swallow it and print sth + # irrelevant instead of expected error message + def _on_load_failure(self, manager, ep, exc): + msg = (_LE("Could not load plugin from %(module)s: %(msg)s") % + {"module": ep.module_name, "msg": exc}) + LOG.error(msg) + raise exc + + def _find_class_in_collection(self, collection, name, version=None): + try: + def _cmp_version(plugin, version): + ver = semantic_version.Version.coerce + return (ver(plugin.metadata.type_version) == + ver(version)) + + if version: + return next((p for p in collection + if _cmp_version(p, version))) + return next((p for p in collection)) + except StopIteration: + raise exception.ArtifactPluginNotFound( + name="%s %s" % (name, "v %s" % version if version else "")) + + def get_class_by_endpoint(self, name, version=None): + if version is None: + classlist = self._get_plugins(name, spec="by_endpoint") + if not classlist: + raise exception.ArtifactPluginNotFound(name=name) + return self._find_class_in_collection(classlist, name) + return self._find_class_in_collection( + self._get_plugins(name, spec="by_endpoint"), name, version) + + def get_class_by_typename(self, name, version=None): + return self._find_class_in_collection( + self._get_plugins(name, spec="by_typename"), name, version) diff --git a/code/daisy/daisy/common/artifacts/serialization.py b/code/daisy/daisy/common/artifacts/serialization.py new file mode 100755 index 00000000..2b791443 --- /dev/null +++ b/code/daisy/daisy/common/artifacts/serialization.py @@ -0,0 +1,264 @@ +# Copyright (c) 2015 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import six + +from daisy.common.artifacts import declarative +from daisy.common.artifacts import definitions +from daisy.common import exception +from daisy import i18n + + +_ = i18n._ + +COMMON_ARTIFACT_PROPERTIES = ['id', + 'type_name', + 'type_version', + 'name', + 'version', + 'description', + 'visibility', + 'state', + 'tags', + 'owner', + 'created_at', + 'updated_at', + 'published_at', + 'deleted_at'] + + +def _serialize_list_prop(prop, values): + """ + A helper func called to correctly serialize an Array property. + + Returns a dict {'type': some_supported_db_type, 'value': serialized_data} + """ + # FIXME(Due to a potential bug in declarative framework, for Arrays, that + # are values to some dict items (Dict(properties={"foo": Array()})), + # prop.get_value(artifact) returns not the real list of items, but the + # whole dict). So we can't rely on prop.get_value(artifact) and will pass + # correctly retrieved values to this function + serialized_value = [] + for i, val in enumerate(values or []): + db_type = prop.get_item_definition_at_index(i).DB_TYPE + if db_type is None: + continue + serialized_value.append({ + 'type': db_type, + 'value': val + }) + return serialized_value + + +def _serialize_dict_prop(artifact, prop, key, value, save_prop_func): + key_to_save = prop.name + '.' + key + dict_key_prop = prop.get_prop_definition_at_key(key) + db_type = dict_key_prop.DB_TYPE + if (db_type is None and + not isinstance(dict_key_prop, + declarative.ListAttributeDefinition)): + # nothing to do here, don't know how to deal with this type + return + elif isinstance(dict_key_prop, + declarative.ListAttributeDefinition): + serialized = _serialize_list_prop( + dict_key_prop, + # FIXME(see comment for _serialize_list_prop func) + values=(dict_key_prop.get_value(artifact) or {}).get(key, [])) + save_prop_func(key_to_save, 'array', serialized) + else: + save_prop_func(key_to_save, db_type, value) + + +def _serialize_dependencies(artifact): + """Returns a dict of serialized dependencies for given artifact""" + dependencies = {} + for relation in artifact.metadata.attributes.dependencies.values(): + serialized_dependency = [] + if isinstance(relation, declarative.ListAttributeDefinition): + for dep in relation.get_value(artifact): + serialized_dependency.append(dep.id) + else: + relation_data = relation.get_value(artifact) + if relation_data: + serialized_dependency.append(relation.get_value(artifact).id) + dependencies[relation.name] = serialized_dependency + return dependencies + + +def _serialize_blobs(artifact): + """Return a dict of serialized blobs for given artifact""" + blobs = {} + for blob in artifact.metadata.attributes.blobs.values(): + serialized_blob = [] + if isinstance(blob, declarative.ListAttributeDefinition): + for b in blob.get_value(artifact) or []: + serialized_blob.append({ + 'size': b.size, + 'locations': b.locations, + 'checksum': b.checksum, + 'item_key': b.item_key + }) + else: + b = blob.get_value(artifact) + # if no value for blob has been set -> continue + if not b: + continue + serialized_blob.append({ + 'size': b.size, + 'locations': b.locations, + 'checksum': b.checksum, + 'item_key': b.item_key + }) + blobs[blob.name] = serialized_blob + return blobs + + +def serialize_for_db(artifact): + result = {} + custom_properties = {} + + def _save_prop(prop_key, prop_type, value): + custom_properties[prop_key] = { + 'type': prop_type, + 'value': value + } + + for prop in artifact.metadata.attributes.properties.values(): + if prop.name in COMMON_ARTIFACT_PROPERTIES: + result[prop.name] = prop.get_value(artifact) + continue + if isinstance(prop, declarative.ListAttributeDefinition): + serialized_value = _serialize_list_prop(prop, + prop.get_value(artifact)) + _save_prop(prop.name, 'array', serialized_value) + elif isinstance(prop, declarative.DictAttributeDefinition): + fields_to_set = prop.get_value(artifact) or {} + # if some keys are not present (like in prop == {}), then have to + # set their values to None. + # XXX FIXME prop.properties may be a dict ({'foo': '', 'bar': ''}) + # or String\Integer\whatsoever, limiting the possible dict values. + # In the latter case have no idea how to remove old values during + # serialization process. + if isinstance(prop.properties, dict): + for key in [k for k in prop.properties + if k not in fields_to_set.keys()]: + _serialize_dict_prop(artifact, prop, key, None, _save_prop) + # serialize values of properties present + for key, value in six.iteritems(fields_to_set): + _serialize_dict_prop(artifact, prop, key, value, _save_prop) + elif prop.DB_TYPE is not None: + _save_prop(prop.name, prop.DB_TYPE, prop.get_value(artifact)) + + result['properties'] = custom_properties + result['dependencies'] = _serialize_dependencies(artifact) + result['blobs'] = _serialize_blobs(artifact) + return result + + +def _deserialize_blobs(artifact_type, blobs_from_db, artifact_properties): + """Retrieves blobs from database""" + for blob_name, blob_value in six.iteritems(blobs_from_db): + if not blob_value: + continue + if isinstance(artifact_type.metadata.attributes.blobs.get(blob_name), + declarative.ListAttributeDefinition): + val = [] + for v in blob_value: + b = definitions.Blob(size=v['size'], + locations=v['locations'], + checksum=v['checksum'], + item_key=v['item_key']) + val.append(b) + elif len(blob_value) == 1: + val = definitions.Blob(size=blob_value[0]['size'], + locations=blob_value[0]['locations'], + checksum=blob_value[0]['checksum'], + item_key=blob_value[0]['item_key']) + else: + raise exception.InvalidArtifactPropertyValue( + message=_('Blob %(name)s may not have multiple values'), + name=blob_name) + artifact_properties[blob_name] = val + + +def _deserialize_dependencies(artifact_type, deps_from_db, + artifact_properties, plugins): + """Retrieves dependencies from database""" + for dep_name, dep_value in six.iteritems(deps_from_db): + if not dep_value: + continue + if isinstance( + artifact_type.metadata.attributes.dependencies.get(dep_name), + declarative.ListAttributeDefinition): + val = [] + for v in dep_value: + val.append(deserialize_from_db(v, plugins)) + elif len(dep_value) == 1: + val = deserialize_from_db(dep_value[0], plugins) + else: + raise exception.InvalidArtifactPropertyValue( + message=_('Relation %(name)s may not have multiple values'), + name=dep_name) + artifact_properties[dep_name] = val + + +def deserialize_from_db(db_dict, plugins): + artifact_properties = {} + type_name = None + type_version = None + + for prop_name in COMMON_ARTIFACT_PROPERTIES: + prop_value = db_dict.pop(prop_name, None) + if prop_name == 'type_name': + type_name = prop_value + elif prop_name == 'type_version': + type_version = prop_value + else: + artifact_properties[prop_name] = prop_value + + try: + artifact_type = plugins.get_class_by_typename(type_name, type_version) + except exception.ArtifactPluginNotFound: + raise exception.UnknownArtifactType(name=type_name, + version=type_version) + + type_specific_properties = db_dict.pop('properties', {}) + for prop_name, prop_value in six.iteritems(type_specific_properties): + prop_type = prop_value.get('type') + prop_value = prop_value.get('value') + if prop_value is None: + continue + if '.' in prop_name: # dict-based property + name, key = prop_name.split('.', 1) + artifact_properties.setdefault(name, {}) + if prop_type == 'array': + artifact_properties[name][key] = [item.get('value') for item in + prop_value] + else: + artifact_properties[name][key] = prop_value + elif prop_type == 'array': # list-based property + artifact_properties[prop_name] = [item.get('value') for item in + prop_value] + else: + artifact_properties[prop_name] = prop_value + + blobs = db_dict.pop('blobs', {}) + _deserialize_blobs(artifact_type, blobs, artifact_properties) + + dependencies = db_dict.pop('dependencies', {}) + _deserialize_dependencies(artifact_type, dependencies, + artifact_properties, plugins) + + return artifact_type(**artifact_properties) diff --git a/code/daisy/daisy/common/auth.py b/code/daisy/daisy/common/auth.py new file mode 100755 index 00000000..b09ee2d5 --- /dev/null +++ b/code/daisy/daisy/common/auth.py @@ -0,0 +1,292 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +This auth module is intended to allow OpenStack client-tools to select from a +variety of authentication strategies, including NoAuth (the default), and +Keystone (an identity management system). + + > auth_plugin = AuthPlugin(creds) + + > auth_plugin.authenticate() + + > auth_plugin.auth_token + abcdefg + + > auth_plugin.management_url + http://service_endpoint/ +""" +import httplib2 +from oslo.serialization import jsonutils +from oslo_log import log as logging +# NOTE(jokke): simplified transition to py3, behaves like py2 xrange +from six.moves import range +import six.moves.urllib.parse as urlparse + +from daisy.common import exception +from daisy import i18n + + +LOG = logging.getLogger(__name__) +_ = i18n._ + + +class BaseStrategy(object): + def __init__(self): + self.auth_token = None + # TODO(sirp): Should expose selecting public/internal/admin URL. + self.management_url = None + + def authenticate(self): + raise NotImplementedError + + @property + def is_authenticated(self): + raise NotImplementedError + + @property + def strategy(self): + raise NotImplementedError + + +class NoAuthStrategy(BaseStrategy): + def authenticate(self): + pass + + @property + def is_authenticated(self): + return True + + @property + def strategy(self): + return 'noauth' + + +class KeystoneStrategy(BaseStrategy): + MAX_REDIRECTS = 10 + + def __init__(self, creds, insecure=False, configure_via_auth=True): + self.creds = creds + self.insecure = insecure + self.configure_via_auth = configure_via_auth + super(KeystoneStrategy, self).__init__() + + def check_auth_params(self): + # Ensure that supplied credential parameters are as required + for required in ('username', 'password', 'auth_url', + 'strategy'): + if self.creds.get(required) is None: + raise exception.MissingCredentialError(required=required) + if self.creds['strategy'] != 'keystone': + raise exception.BadAuthStrategy(expected='keystone', + received=self.creds['strategy']) + # For v2.0 also check tenant is present + if self.creds['auth_url'].rstrip('/').endswith('v2.0'): + if self.creds.get("tenant") is None: + raise exception.MissingCredentialError(required='tenant') + + def authenticate(self): + """Authenticate with the Keystone service. + + There are a few scenarios to consider here: + + 1. Which version of Keystone are we using? v1 which uses headers to + pass the credentials, or v2 which uses a JSON encoded request body? + + 2. Keystone may respond back with a redirection using a 305 status + code. + + 3. We may attempt a v1 auth when v2 is what's called for. In this + case, we rewrite the url to contain /v2.0/ and retry using the v2 + protocol. + """ + def _authenticate(auth_url): + # If OS_AUTH_URL is missing a trailing slash add one + if not auth_url.endswith('/'): + auth_url += '/' + token_url = urlparse.urljoin(auth_url, "tokens") + # 1. Check Keystone version + is_v2 = auth_url.rstrip('/').endswith('v2.0') + if is_v2: + self._v2_auth(token_url) + else: + self._v1_auth(token_url) + + self.check_auth_params() + auth_url = self.creds['auth_url'] + for _ in range(self.MAX_REDIRECTS): + try: + _authenticate(auth_url) + except exception.AuthorizationRedirect as e: + # 2. Keystone may redirect us + auth_url = e.url + except exception.AuthorizationFailure: + # 3. In some configurations nova makes redirection to + # v2.0 keystone endpoint. Also, new location does not + # contain real endpoint, only hostname and port. + if 'v2.0' not in auth_url: + auth_url = urlparse.urljoin(auth_url, 'v2.0/') + else: + # If we successfully auth'd, then memorize the correct auth_url + # for future use. + self.creds['auth_url'] = auth_url + break + else: + # Guard against a redirection loop + raise exception.MaxRedirectsExceeded(redirects=self.MAX_REDIRECTS) + + def _v1_auth(self, token_url): + creds = self.creds + + headers = {} + headers['X-Auth-User'] = creds['username'] + headers['X-Auth-Key'] = creds['password'] + + tenant = creds.get('tenant') + if tenant: + headers['X-Auth-Tenant'] = tenant + + resp, resp_body = self._do_request(token_url, 'GET', headers=headers) + + def _management_url(self, resp): + for url_header in ('x-image-management-url', + 'x-server-management-url', + 'x-glance'): + try: + return resp[url_header] + except KeyError as e: + not_found = e + raise not_found + + if resp.status in (200, 204): + try: + if self.configure_via_auth: + self.management_url = _management_url(self, resp) + self.auth_token = resp['x-auth-token'] + except KeyError: + raise exception.AuthorizationFailure() + elif resp.status == 305: + raise exception.AuthorizationRedirect(uri=resp['location']) + elif resp.status == 400: + raise exception.AuthBadRequest(url=token_url) + elif resp.status == 401: + raise exception.NotAuthenticated() + elif resp.status == 404: + raise exception.AuthUrlNotFound(url=token_url) + else: + raise Exception(_('Unexpected response: %s') % resp.status) + + def _v2_auth(self, token_url): + + creds = self.creds + + creds = { + "auth": { + "tenantName": creds['tenant'], + "passwordCredentials": { + "username": creds['username'], + "password": creds['password'] + } + } + } + + headers = {} + headers['Content-Type'] = 'application/json' + req_body = jsonutils.dumps(creds) + + resp, resp_body = self._do_request( + token_url, 'POST', headers=headers, body=req_body) + + if resp.status == 200: + resp_auth = jsonutils.loads(resp_body)['access'] + creds_region = self.creds.get('region') + if self.configure_via_auth: + endpoint = get_endpoint(resp_auth['serviceCatalog'], + endpoint_region=creds_region) + self.management_url = endpoint + self.auth_token = resp_auth['token']['id'] + elif resp.status == 305: + raise exception.RedirectException(resp['location']) + elif resp.status == 400: + raise exception.AuthBadRequest(url=token_url) + elif resp.status == 401: + raise exception.NotAuthenticated() + elif resp.status == 404: + raise exception.AuthUrlNotFound(url=token_url) + else: + raise Exception(_('Unexpected response: %s') % resp.status) + + @property + def is_authenticated(self): + return self.auth_token is not None + + @property + def strategy(self): + return 'keystone' + + def _do_request(self, url, method, headers=None, body=None): + headers = headers or {} + conn = httplib2.Http() + conn.force_exception_to_status_code = True + conn.disable_ssl_certificate_validation = self.insecure + headers['User-Agent'] = 'glance-client' + resp, resp_body = conn.request(url, method, headers=headers, body=body) + return resp, resp_body + + +def get_plugin_from_strategy(strategy, creds=None, insecure=False, + configure_via_auth=True): + if strategy == 'noauth': + return NoAuthStrategy() + elif strategy == 'keystone': + return KeystoneStrategy(creds, insecure, + configure_via_auth=configure_via_auth) + else: + raise Exception(_("Unknown auth strategy '%s'") % strategy) + + +def get_endpoint(service_catalog, service_type='image', endpoint_region=None, + endpoint_type='publicURL'): + """ + Select an endpoint from the service catalog + + We search the full service catalog for services + matching both type and region. If the client + supplied no region then any 'image' endpoint + is considered a match. There must be one -- and + only one -- successful match in the catalog, + otherwise we will raise an exception. + """ + endpoint = None + for service in service_catalog: + s_type = None + try: + s_type = service['type'] + except KeyError: + msg = _('Encountered service with no "type": %s') % s_type + LOG.warn(msg) + continue + + if s_type == service_type: + for ep in service['endpoints']: + if endpoint_region is None or endpoint_region == ep['region']: + if endpoint is not None: + # This is a second match, abort + raise exception.RegionAmbiguity(region=endpoint_region) + endpoint = ep + if endpoint and endpoint.get(endpoint_type): + return endpoint[endpoint_type] + else: + raise exception.NoServiceEndpoint() diff --git a/code/daisy/daisy/common/client.py b/code/daisy/daisy/common/client.py new file mode 100755 index 00000000..1d18b0ca --- /dev/null +++ b/code/daisy/daisy/common/client.py @@ -0,0 +1,594 @@ +# Copyright 2010-2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# HTTPSClientAuthConnection code comes courtesy of ActiveState website: +# http://code.activestate.com/recipes/ +# 577548-https-httplib-client-connection-with-certificate-v/ + +import collections +import copy +import errno +import functools +import httplib +import os +import re + +try: + from eventlet.green import socket + from eventlet.green import ssl +except ImportError: + import socket + import ssl + +import osprofiler.web + +try: + import sendfile # noqa + SENDFILE_SUPPORTED = True +except ImportError: + SENDFILE_SUPPORTED = False + +from oslo_log import log as logging +from oslo_utils import encodeutils +import six +# NOTE(jokke): simplified transition to py3, behaves like py2 xrange +from six.moves import range +import six.moves.urllib.parse as urlparse + +from daisy.common import auth +from daisy.common import exception +from daisy.common import utils +from daisy import i18n + +LOG = logging.getLogger(__name__) +_ = i18n._ + +# common chunk size for get and put +CHUNKSIZE = 65536 + +VERSION_REGEX = re.compile(r"/?v[0-9\.]+") + + +def handle_unauthenticated(func): + """ + Wrap a function to re-authenticate and retry. + """ + @functools.wraps(func) + def wrapped(self, *args, **kwargs): + try: + return func(self, *args, **kwargs) + except exception.NotAuthenticated: + self._authenticate(force_reauth=True) + return func(self, *args, **kwargs) + return wrapped + + +def handle_redirects(func): + """ + Wrap the _do_request function to handle HTTP redirects. + """ + MAX_REDIRECTS = 5 + + @functools.wraps(func) + def wrapped(self, method, url, body, headers): + for _ in range(MAX_REDIRECTS): + try: + return func(self, method, url, body, headers) + except exception.RedirectException as redirect: + if redirect.url is None: + raise exception.InvalidRedirect() + url = redirect.url + raise exception.MaxRedirectsExceeded(redirects=MAX_REDIRECTS) + return wrapped + + +class HTTPSClientAuthConnection(httplib.HTTPSConnection): + """ + Class to make a HTTPS connection, with support for + full client-based SSL Authentication + + :see http://code.activestate.com/recipes/ + 577548-https-httplib-client-connection-with-certificate-v/ + """ + + def __init__(self, host, port, key_file, cert_file, + ca_file, timeout=None, insecure=False): + httplib.HTTPSConnection.__init__(self, host, port, key_file=key_file, + cert_file=cert_file) + self.key_file = key_file + self.cert_file = cert_file + self.ca_file = ca_file + self.timeout = timeout + self.insecure = insecure + + def connect(self): + """ + Connect to a host on a given (SSL) port. + If ca_file is pointing somewhere, use it to check Server Certificate. + + Redefined/copied and extended from httplib.py:1105 (Python 2.6.x). + This is needed to pass cert_reqs=ssl.CERT_REQUIRED as parameter to + ssl.wrap_socket(), which forces SSL to check server certificate against + our client certificate. + """ + sock = socket.create_connection((self.host, self.port), self.timeout) + if self._tunnel_host: + self.sock = sock + self._tunnel() + # Check CA file unless 'insecure' is specificed + if self.insecure is True: + self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, + cert_reqs=ssl.CERT_NONE) + else: + self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, + ca_certs=self.ca_file, + cert_reqs=ssl.CERT_REQUIRED) + + +class BaseClient(object): + + """A base client class""" + + DEFAULT_PORT = 80 + DEFAULT_DOC_ROOT = None + # Standard CA file locations for Debian/Ubuntu, RedHat/Fedora, + # Suse, FreeBSD/OpenBSD + DEFAULT_CA_FILE_PATH = ('/etc/ssl/certs/ca-certificates.crt:' + '/etc/pki/tls/certs/ca-bundle.crt:' + '/etc/ssl/ca-bundle.pem:' + '/etc/ssl/cert.pem') + + OK_RESPONSE_CODES = ( + httplib.OK, + httplib.CREATED, + httplib.ACCEPTED, + httplib.NO_CONTENT, + ) + + REDIRECT_RESPONSE_CODES = ( + httplib.MOVED_PERMANENTLY, + httplib.FOUND, + httplib.SEE_OTHER, + httplib.USE_PROXY, + httplib.TEMPORARY_REDIRECT, + ) + + def __init__(self, host, port=None, timeout=None, use_ssl=False, + auth_token=None, creds=None, doc_root=None, key_file=None, + cert_file=None, ca_file=None, insecure=False, + configure_via_auth=True): + """ + Creates a new client to some service. + + :param host: The host where service resides + :param port: The port where service resides + :param timeout: Connection timeout. + :param use_ssl: Should we use HTTPS? + :param auth_token: The auth token to pass to the server + :param creds: The credentials to pass to the auth plugin + :param doc_root: Prefix for all URLs we request from host + :param key_file: Optional PEM-formatted file that contains the private + key. + If use_ssl is True, and this param is None (the + default), then an environ variable + GLANCE_CLIENT_KEY_FILE is looked for. If no such + environ variable is found, ClientConnectionError + will be raised. + :param cert_file: Optional PEM-formatted certificate chain file. + If use_ssl is True, and this param is None (the + default), then an environ variable + GLANCE_CLIENT_CERT_FILE is looked for. If no such + environ variable is found, ClientConnectionError + will be raised. + :param ca_file: Optional CA cert file to use in SSL connections + If use_ssl is True, and this param is None (the + default), then an environ variable + GLANCE_CLIENT_CA_FILE is looked for. + :param insecure: Optional. If set then the server's certificate + will not be verified. + :param configure_via_auth: Optional. Defaults to True. If set, the + URL returned from the service catalog for the image + endpoint will **override** the URL supplied to in + the host parameter. + """ + self.host = host + self.port = port or self.DEFAULT_PORT + self.timeout = timeout + # A value of '0' implies never timeout + if timeout == 0: + self.timeout = None + self.use_ssl = use_ssl + self.auth_token = auth_token + self.creds = creds or {} + self.connection = None + self.configure_via_auth = configure_via_auth + # doc_root can be a nullstring, which is valid, and why we + # cannot simply do doc_root or self.DEFAULT_DOC_ROOT below. + self.doc_root = (doc_root if doc_root is not None + else self.DEFAULT_DOC_ROOT) + + self.key_file = key_file + self.cert_file = cert_file + self.ca_file = ca_file + self.insecure = insecure + self.auth_plugin = self.make_auth_plugin(self.creds, self.insecure) + self.connect_kwargs = self.get_connect_kwargs() + + def get_connect_kwargs(self): + connect_kwargs = {} + + # Both secure and insecure connections have a timeout option + connect_kwargs['timeout'] = self.timeout + + if self.use_ssl: + if self.key_file is None: + self.key_file = os.environ.get('GLANCE_CLIENT_KEY_FILE') + if self.cert_file is None: + self.cert_file = os.environ.get('GLANCE_CLIENT_CERT_FILE') + if self.ca_file is None: + self.ca_file = os.environ.get('GLANCE_CLIENT_CA_FILE') + + # Check that key_file/cert_file are either both set or both unset + if self.cert_file is not None and self.key_file is None: + msg = _("You have selected to use SSL in connecting, " + "and you have supplied a cert, " + "however you have failed to supply either a " + "key_file parameter or set the " + "GLANCE_CLIENT_KEY_FILE environ variable") + raise exception.ClientConnectionError(msg) + + if self.key_file is not None and self.cert_file is None: + msg = _("You have selected to use SSL in connecting, " + "and you have supplied a key, " + "however you have failed to supply either a " + "cert_file parameter or set the " + "GLANCE_CLIENT_CERT_FILE environ variable") + raise exception.ClientConnectionError(msg) + + if (self.key_file is not None and + not os.path.exists(self.key_file)): + msg = _("The key file you specified %s does not " + "exist") % self.key_file + raise exception.ClientConnectionError(msg) + connect_kwargs['key_file'] = self.key_file + + if (self.cert_file is not None and + not os.path.exists(self.cert_file)): + msg = _("The cert file you specified %s does not " + "exist") % self.cert_file + raise exception.ClientConnectionError(msg) + connect_kwargs['cert_file'] = self.cert_file + + if (self.ca_file is not None and + not os.path.exists(self.ca_file)): + msg = _("The CA file you specified %s does not " + "exist") % self.ca_file + raise exception.ClientConnectionError(msg) + + if self.ca_file is None: + for ca in self.DEFAULT_CA_FILE_PATH.split(":"): + if os.path.exists(ca): + self.ca_file = ca + break + + connect_kwargs['ca_file'] = self.ca_file + connect_kwargs['insecure'] = self.insecure + + return connect_kwargs + + def configure_from_url(self, url): + """ + Setups the connection based on the given url. + + The form is: + + ://:port/doc_root + """ + LOG.debug("Configuring from URL: %s", url) + parsed = urlparse.urlparse(url) + self.use_ssl = parsed.scheme == 'https' + self.host = parsed.hostname + self.port = parsed.port or 80 + self.doc_root = parsed.path.rstrip('/') + + # We need to ensure a version identifier is appended to the doc_root + if not VERSION_REGEX.match(self.doc_root): + if self.DEFAULT_DOC_ROOT: + doc_root = self.DEFAULT_DOC_ROOT.lstrip('/') + self.doc_root += '/' + doc_root + msg = ("Appending doc_root %(doc_root)s to URL %(url)s" % + {'doc_root': doc_root, 'url': url}) + LOG.debug(msg) + + # ensure connection kwargs are re-evaluated after the service catalog + # publicURL is parsed for potential SSL usage + self.connect_kwargs = self.get_connect_kwargs() + + def make_auth_plugin(self, creds, insecure): + """ + Returns an instantiated authentication plugin. + """ + strategy = creds.get('strategy', 'noauth') + plugin = auth.get_plugin_from_strategy(strategy, creds, insecure, + self.configure_via_auth) + return plugin + + def get_connection_type(self): + """ + Returns the proper connection type + """ + if self.use_ssl: + return HTTPSClientAuthConnection + else: + return httplib.HTTPConnection + + def _authenticate(self, force_reauth=False): + """ + Use the authentication plugin to authenticate and set the auth token. + + :param force_reauth: For re-authentication to bypass cache. + """ + auth_plugin = self.auth_plugin + + if not auth_plugin.is_authenticated or force_reauth: + auth_plugin.authenticate() + + self.auth_token = auth_plugin.auth_token + + management_url = auth_plugin.management_url + if management_url and self.configure_via_auth: + self.configure_from_url(management_url) + + @handle_unauthenticated + def do_request(self, method, action, body=None, headers=None, + params=None): + """ + Make a request, returning an HTTP response object. + + :param method: HTTP verb (GET, POST, PUT, etc.) + :param action: Requested path to append to self.doc_root + :param body: Data to send in the body of the request + :param headers: Headers to send with the request + :param params: Key/value pairs to use in query string + :returns: HTTP response object + """ + if not self.auth_token: + self._authenticate() + + url = self._construct_url(action, params) + # NOTE(ameade): We need to copy these kwargs since they can be altered + # in _do_request but we need the originals if handle_unauthenticated + # calls this function again. + return self._do_request(method=method, url=url, + body=copy.deepcopy(body), + headers=copy.deepcopy(headers)) + + def _construct_url(self, action, params=None): + """ + Create a URL object we can use to pass to _do_request(). + """ + action = urlparse.quote(action) + path = '/'.join([self.doc_root or '', action.lstrip('/')]) + scheme = "https" if self.use_ssl else "http" + netloc = "%s:%d" % (self.host, self.port) + + if isinstance(params, dict): + for (key, value) in params.items(): + if value is None: + del params[key] + continue + if not isinstance(value, six.string_types): + value = str(value) + params[key] = encodeutils.safe_encode(value) + query = urlparse.urlencode(params) + else: + query = None + + url = urlparse.ParseResult(scheme, netloc, path, '', query, '') + log_msg = _("Constructed URL: %s") + LOG.debug(log_msg, url.geturl()) + return url + + def _encode_headers(self, headers): + """ + Encodes headers. + + Note: This should be used right before + sending anything out. + + :param headers: Headers to encode + :returns: Dictionary with encoded headers' + names and values + """ + to_str = encodeutils.safe_encode + return dict([(to_str(h), to_str(v)) for h, v in + six.iteritems(headers)]) + + @handle_redirects + def _do_request(self, method, url, body, headers): + """ + Connects to the server and issues a request. Handles converting + any returned HTTP error status codes to OpenStack/Glance exceptions + and closing the server connection. Returns the result data, or + raises an appropriate exception. + + :param method: HTTP method ("GET", "POST", "PUT", etc...) + :param url: urlparse.ParsedResult object with URL information + :param body: data to send (as string, filelike or iterable), + or None (default) + :param headers: mapping of key/value pairs to add as headers + + :note + + If the body param has a read attribute, and method is either + POST or PUT, this method will automatically conduct a chunked-transfer + encoding and use the body as a file object or iterable, transferring + chunks of data using the connection's send() method. This allows large + objects to be transferred efficiently without buffering the entire + body in memory. + """ + if url.query: + path = url.path + "?" + url.query + else: + path = url.path + + try: + connection_type = self.get_connection_type() + headers = self._encode_headers(headers or {}) + headers.update(osprofiler.web.get_trace_id_headers()) + + if 'x-auth-token' not in headers and self.auth_token: + headers['x-auth-token'] = self.auth_token + + c = connection_type(url.hostname, url.port, **self.connect_kwargs) + + def _pushing(method): + return method.lower() in ('post', 'put') + + def _simple(body): + return body is None or isinstance(body, six.string_types) + + def _filelike(body): + return hasattr(body, 'read') + + def _sendbody(connection, iter): + connection.endheaders() + for sent in iter: + # iterator has done the heavy lifting + pass + + def _chunkbody(connection, iter): + connection.putheader('Transfer-Encoding', 'chunked') + connection.endheaders() + for chunk in iter: + connection.send('%x\r\n%s\r\n' % (len(chunk), chunk)) + connection.send('0\r\n\r\n') + + # Do a simple request or a chunked request, depending + # on whether the body param is file-like or iterable and + # the method is PUT or POST + # + if not _pushing(method) or _simple(body): + # Simple request... + c.request(method, path, body, headers) + elif _filelike(body) or self._iterable(body): + c.putrequest(method, path) + + use_sendfile = self._sendable(body) + + # According to HTTP/1.1, Content-Length and Transfer-Encoding + # conflict. + for header, value in headers.items(): + if use_sendfile or header.lower() != 'content-length': + c.putheader(header, str(value)) + + iter = utils.chunkreadable(body) + + if use_sendfile: + # send actual file without copying into userspace + _sendbody(c, iter) + else: + # otherwise iterate and chunk + _chunkbody(c, iter) + else: + raise TypeError('Unsupported image type: %s' % body.__class__) + + res = c.getresponse() + + def _retry(res): + return res.getheader('Retry-After') + + status_code = self.get_status_code(res) + if status_code in self.OK_RESPONSE_CODES: + return res + elif status_code in self.REDIRECT_RESPONSE_CODES: + raise exception.RedirectException(res.getheader('Location')) + elif status_code == httplib.UNAUTHORIZED: + raise exception.NotAuthenticated(res.read()) + elif status_code == httplib.FORBIDDEN: + raise exception.Forbidden(res.read()) + elif status_code == httplib.NOT_FOUND: + raise exception.NotFound(res.read()) + elif status_code == httplib.CONFLICT: + raise exception.Duplicate(res.read()) + elif status_code == httplib.BAD_REQUEST: + raise exception.Invalid(res.read()) + elif status_code == httplib.MULTIPLE_CHOICES: + raise exception.MultipleChoices(body=res.read()) + elif status_code == httplib.REQUEST_ENTITY_TOO_LARGE: + raise exception.LimitExceeded(retry=_retry(res), + body=res.read()) + elif status_code == httplib.INTERNAL_SERVER_ERROR: + raise exception.ServerError() + elif status_code == httplib.SERVICE_UNAVAILABLE: + raise exception.ServiceUnavailable(retry=_retry(res)) + else: + raise exception.UnexpectedStatus(status=status_code, + body=res.read()) + + except (socket.error, IOError) as e: + raise exception.ClientConnectionError(e) + + def _seekable(self, body): + # pipes are not seekable, avoids sendfile() failure on e.g. + # cat /path/to/image | glance add ... + # or where add command is launched via popen + try: + os.lseek(body.fileno(), 0, os.SEEK_CUR) + return True + except OSError as e: + return (e.errno != errno.ESPIPE) + + def _sendable(self, body): + return (SENDFILE_SUPPORTED and + hasattr(body, 'fileno') and + self._seekable(body) and + not self.use_ssl) + + def _iterable(self, body): + return isinstance(body, collections.Iterable) + + def get_status_code(self, response): + """ + Returns the integer status code from the response, which + can be either a Webob.Response (used in testing) or httplib.Response + """ + if hasattr(response, 'status_int'): + return response.status_int + else: + return response.status + + def _extract_params(self, actual_params, allowed_params): + """ + Extract a subset of keys from a dictionary. The filters key + will also be extracted, and each of its values will be returned + as an individual param. + + :param actual_params: dict of keys to filter + :param allowed_params: list of keys that 'actual_params' will be + reduced to + :retval subset of 'params' dict + """ + try: + # expect 'filters' param to be a dict here + result = dict(actual_params.get('filters')) + except TypeError: + result = {} + + for allowed_param in allowed_params: + if allowed_param in actual_params: + result[allowed_param] = actual_params[allowed_param] + + return result diff --git a/code/daisy/daisy/common/config.py b/code/daisy/daisy/common/config.py new file mode 100755 index 00000000..2b910be9 --- /dev/null +++ b/code/daisy/daisy/common/config.py @@ -0,0 +1,281 @@ + +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Routines for configuring Glance +""" + +import logging +import logging.config +import logging.handlers +import os +import tempfile + +from oslo_concurrency import lockutils +from oslo_config import cfg +from oslo_policy import policy +from paste import deploy + +from daisy import i18n +from daisy.version import version_info as version + +_ = i18n._ + +paste_deploy_opts = [ + cfg.StrOpt('flavor', + help=_('Partial name of a pipeline in your paste configuration ' + 'file with the service name removed. For example, if ' + 'your paste section name is ' + '[pipeline:glance-api-keystone] use the value ' + '"keystone"')), + cfg.StrOpt('config_file', + help=_('Name of the paste configuration file.')), +] +image_format_opts = [ + cfg.ListOpt('container_formats', + default=['ami', 'ari', 'aki', 'bare', 'ovf', 'ova'], + help=_("Supported values for the 'container_format' " + "image attribute"), + deprecated_opts=[cfg.DeprecatedOpt('container_formats', + group='DEFAULT')]), + cfg.ListOpt('disk_formats', + default=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', + 'vdi', 'iso'], + help=_("Supported values for the 'disk_format' " + "image attribute"), + deprecated_opts=[cfg.DeprecatedOpt('disk_formats', + group='DEFAULT')]), +] +task_opts = [ + cfg.IntOpt('task_time_to_live', + default=48, + help=_("Time in hours for which a task lives after, either " + "succeeding or failing"), + deprecated_opts=[cfg.DeprecatedOpt('task_time_to_live', + group='DEFAULT')]), + cfg.StrOpt('task_executor', + default='taskflow', + help=_("Specifies which task executor to be used to run the " + "task scripts.")), + cfg.StrOpt('work_dir', + default=None, + help=_('Work dir for asynchronous task operations. ' + 'The directory set here will be used to operate over ' + 'images - normally before they are imported in the ' + 'destination store. When providing work dir, make sure ' + 'enough space is provided for concurrent tasks to run ' + 'efficiently without running out of space. A rough ' + 'estimation can be done by multiplying the number of ' + '`max_workers` - or the N of workers running - by an ' + 'average image size (e.g 500MB). The image size ' + 'estimation should be done based on the average size in ' + 'your deployment. Note that depending on the tasks ' + 'running you may need to multiply this number by some ' + 'factor depending on what the task does. For example, ' + 'you may want to double the available size if image ' + 'conversion is enabled. All this being said, remember ' + 'these are just estimations and you should do them ' + 'based on the worst case scenario and be prepared to ' + 'act in case they were wrong.')), +] +common_opts = [ + cfg.BoolOpt('allow_additional_image_properties', default=True, + help=_('Whether to allow users to specify image properties ' + 'beyond what the image schema provides')), + cfg.IntOpt('image_member_quota', default=128, + help=_('Maximum number of image members per image. ' + 'Negative values evaluate to unlimited.')), + cfg.IntOpt('image_property_quota', default=128, + help=_('Maximum number of properties allowed on an image. ' + 'Negative values evaluate to unlimited.')), + cfg.IntOpt('image_tag_quota', default=128, + help=_('Maximum number of tags allowed on an image. ' + 'Negative values evaluate to unlimited.')), + cfg.IntOpt('image_location_quota', default=10, + help=_('Maximum number of locations allowed on an image. ' + 'Negative values evaluate to unlimited.')), + cfg.StrOpt('data_api', default='daisy.db.sqlalchemy.api', + help=_('Python module path of data access API')), + cfg.IntOpt('limit_param_default', default=25, + help=_('Default value for the number of items returned by a ' + 'request if not specified explicitly in the request')), + cfg.IntOpt('api_limit_max', default=1000, + help=_('Maximum permissible number of items that could be ' + 'returned by a request')), + cfg.BoolOpt('show_image_direct_url', default=False, + help=_('Whether to include the backend image storage location ' + 'in image properties. Revealing storage location can ' + 'be a security risk, so use this setting with ' + 'caution!')), + cfg.BoolOpt('show_multiple_locations', default=False, + help=_('Whether to include the backend image locations ' + 'in image properties. ' + 'For example, if using the file system store a URL of ' + '"file:///path/to/image" will be returned to the user ' + 'in the \'direct_url\' meta-data field. ' + 'Revealing storage location can ' + 'be a security risk, so use this setting with ' + 'caution! The overrides show_image_direct_url.')), + cfg.IntOpt('image_size_cap', default=1099511627776, + help=_("Maximum size of image a user can upload in bytes. " + "Defaults to 1099511627776 bytes (1 TB)." + "WARNING: this value should only be increased after " + "careful consideration and must be set to a value under " + "8 EB (9223372036854775808).")), + cfg.StrOpt('user_storage_quota', default='0', + help=_("Set a system wide quota for every user. This value is " + "the total capacity that a user can use across " + "all storage systems. A value of 0 means unlimited." + "Optional unit can be specified for the value. Accepted " + "units are B, KB, MB, GB and TB representing " + "Bytes, KiloBytes, MegaBytes, GigaBytes and TeraBytes " + "respectively. If no unit is specified then Bytes is " + "assumed. Note that there should not be any space " + "between value and unit and units are case sensitive.")), + cfg.BoolOpt('enable_v1_api', default=True, + help=_("Deploy the v1 OpenStack Images API.")), + cfg.BoolOpt('enable_v2_api', default=True, + help=_("Deploy the v2 OpenStack Images API.")), + cfg.BoolOpt('enable_v1_registry', default=True, + help=_("Deploy the v1 OpenStack Registry API.")), + cfg.BoolOpt('enable_v2_registry', default=True, + help=_("Deploy the v2 OpenStack Registry API.")), + cfg.StrOpt('pydev_worker_debug_host', + help=_('The hostname/IP of the pydev process listening for ' + 'debug connections')), + cfg.IntOpt('pydev_worker_debug_port', default=5678, + help=_('The port on which a pydev process is listening for ' + 'connections.')), + cfg.StrOpt('metadata_encryption_key', secret=True, + help=_('AES key for encrypting store \'location\' metadata. ' + 'This includes, if used, Swift or S3 credentials. ' + 'Should be set to a random string of length 16, 24 or ' + '32 bytes')), + cfg.StrOpt('digest_algorithm', default='sha1', + help=_('Digest algorithm which will be used for digital ' + 'signature; the default is sha1 the default in Kilo ' + 'for a smooth upgrade process, and it will be updated ' + 'with sha256 in next release(L). Use the command ' + '"openssl list-message-digest-algorithms" to get the ' + 'available algorithms supported by the version of ' + 'OpenSSL on the platform. Examples are "sha1", ' + '"sha256", "sha512", etc.')), +] + +CONF = cfg.CONF +CONF.register_opts(paste_deploy_opts, group='paste_deploy') +CONF.register_opts(image_format_opts, group='image_format') +CONF.register_opts(task_opts, group='task') +CONF.register_opts(common_opts) +policy.Enforcer(CONF) + + +def parse_args(args=None, usage=None, default_config_files=None): + if "OSLO_LOCK_PATH" not in os.environ: + lockutils.set_defaults(tempfile.gettempdir()) + + CONF(args=args, + project='daisy', + version=version.cached_version_string(), + usage=usage, + default_config_files=default_config_files) + + +def parse_cache_args(args=None): + config_files = cfg.find_config_files(project='daisy', prog='daisy-cache') + parse_args(args=args, default_config_files=config_files) + + +def _get_deployment_flavor(flavor=None): + """ + Retrieve the paste_deploy.flavor config item, formatted appropriately + for appending to the application name. + + :param flavor: if specified, use this setting rather than the + paste_deploy.flavor configuration setting + """ + if not flavor: + flavor = CONF.paste_deploy.flavor + return '' if not flavor else ('-' + flavor) + + +def _get_paste_config_path(): + paste_suffix = '-paste.ini' + conf_suffix = '.conf' + if CONF.config_file: + # Assume paste config is in a paste.ini file corresponding + # to the last config file + path = CONF.config_file[-1].replace(conf_suffix, paste_suffix) + else: + path = CONF.prog + paste_suffix + return CONF.find_file(os.path.basename(path)) + + +def _get_deployment_config_file(): + """ + Retrieve the deployment_config_file config item, formatted as an + absolute pathname. + """ + path = CONF.paste_deploy.config_file + if not path: + path = _get_paste_config_path() + if not path: + msg = _("Unable to locate paste config file for %s.") % CONF.prog + raise RuntimeError(msg) + return os.path.abspath(path) + + +def load_paste_app(app_name, flavor=None, conf_file=None): + """ + Builds and returns a WSGI app from a paste config file. + + We assume the last config file specified in the supplied ConfigOpts + object is the paste config file, if conf_file is None. + + :param app_name: name of the application to load + :param flavor: name of the variant of the application to load + :param conf_file: path to the paste config file + + :raises RuntimeError when config file cannot be located or application + cannot be loaded from config file + """ + # append the deployment flavor to the application name, + # in order to identify the appropriate paste pipeline + app_name += _get_deployment_flavor(flavor) + + if not conf_file: + conf_file = _get_deployment_config_file() + + try: + logger = logging.getLogger(__name__) + logger.debug("Loading %(app_name)s from %(conf_file)s", + {'conf_file': conf_file, 'app_name': app_name}) + + app = deploy.loadapp("config:%s" % conf_file, name=app_name) + + # Log the options used when starting if we're in debug mode... + if CONF.debug: + CONF.log_opt_values(logger, logging.DEBUG) + + return app + except (LookupError, ImportError) as e: + msg = (_("Unable to load %(app_name)s from " + "configuration file %(conf_file)s." + "\nGot: %(e)r") % {'app_name': app_name, + 'conf_file': conf_file, + 'e': e}) + logger.error(msg) + raise RuntimeError(msg) diff --git a/code/daisy/daisy/common/crypt.py b/code/daisy/daisy/common/crypt.py new file mode 100755 index 00000000..3638f110 --- /dev/null +++ b/code/daisy/daisy/common/crypt.py @@ -0,0 +1,68 @@ + +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Routines for URL-safe encrypting/decrypting +""" + +import base64 + +from Crypto.Cipher import AES +from Crypto import Random +from Crypto.Random import random +# NOTE(jokke): simplified transition to py3, behaves like py2 xrange +from six.moves import range + + +def urlsafe_encrypt(key, plaintext, blocksize=16): + """ + Encrypts plaintext. Resulting ciphertext will contain URL-safe characters + :param key: AES secret key + :param plaintext: Input text to be encrypted + :param blocksize: Non-zero integer multiple of AES blocksize in bytes (16) + + :returns : Resulting ciphertext + """ + def pad(text): + """ + Pads text to be encrypted + """ + pad_length = (blocksize - len(text) % blocksize) + sr = random.StrongRandom() + pad = ''.join(chr(sr.randint(1, 0xFF)) for i in range(pad_length - 1)) + # We use chr(0) as a delimiter between text and padding + return text + chr(0) + pad + + # random initial 16 bytes for CBC + init_vector = Random.get_random_bytes(16) + cypher = AES.new(key, AES.MODE_CBC, init_vector) + padded = cypher.encrypt(pad(str(plaintext))) + return base64.urlsafe_b64encode(init_vector + padded) + + +def urlsafe_decrypt(key, ciphertext): + """ + Decrypts URL-safe base64 encoded ciphertext + :param key: AES secret key + :param ciphertext: The encrypted text to decrypt + + :returns : Resulting plaintext + """ + # Cast from unicode + ciphertext = base64.urlsafe_b64decode(str(ciphertext)) + cypher = AES.new(key, AES.MODE_CBC, ciphertext[:16]) + padded = cypher.decrypt(ciphertext[16:]) + return padded[:padded.rfind(chr(0))] diff --git a/code/daisy/daisy/common/exception.py b/code/daisy/daisy/common/exception.py new file mode 100755 index 00000000..4464d07d --- /dev/null +++ b/code/daisy/daisy/common/exception.py @@ -0,0 +1,588 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Glance exception subclasses""" + +import six +import six.moves.urllib.parse as urlparse + +from daisy import i18n + +_ = i18n._ + +_FATAL_EXCEPTION_FORMAT_ERRORS = False + + +class RedirectException(Exception): + def __init__(self, url): + self.url = urlparse.urlparse(url) + + +class DaisyException(Exception): + """ + Base Glance Exception + + To correctly use this class, inherit from it and define + a 'message' property. That message will get printf'd + with the keyword arguments provided to the constructor. + """ + message = _("An unknown exception occurred") + + def __init__(self, message=None, *args, **kwargs): + if not message: + message = self.message + try: + if kwargs: + message = message % kwargs + except Exception: + if _FATAL_EXCEPTION_FORMAT_ERRORS: + raise + else: + # at least get the core message out if something happened + pass + self.msg = message + self.message = message + super(DaisyException, self).__init__(message) + + def __unicode__(self): + # NOTE(flwang): By default, self.msg is an instance of Message, which + # can't be converted by str(). Based on the definition of + # __unicode__, it should return unicode always. + return six.text_type(self.msg) + + +class MissingCredentialError(DaisyException): + message = _("Missing required credential: %(required)s") + + +class BadAuthStrategy(DaisyException): + message = _("Incorrect auth strategy, expected \"%(expected)s\" but " + "received \"%(received)s\"") + + +class NotFound(DaisyException): + message = _("An object with the specified identifier was not found.") + + +class BadStoreUri(DaisyException): + message = _("The Store URI was malformed.") + + +class Duplicate(DaisyException): + message = _("An object with the same identifier already exists.") + + +class Conflict(DaisyException): + message = _("An object with the same identifier is currently being " + "operated on.") + + +class StorageQuotaFull(DaisyException): + message = _("The size of the data %(image_size)s will exceed the limit. " + "%(remaining)s bytes remaining.") + + +class AuthBadRequest(DaisyException): + message = _("Connect error/bad request to Auth service at URL %(url)s.") + + +class AuthUrlNotFound(DaisyException): + message = _("Auth service at URL %(url)s not found.") + + +class AuthorizationFailure(DaisyException): + message = _("Authorization failed.") + + +class NotAuthenticated(DaisyException): + message = _("You are not authenticated.") + + +class UploadException(DaisyException): + message = _('Image upload problem: %s') + + +class Forbidden(DaisyException): + message = _("You are not authorized to complete this action.") + + +class ForbiddenPublicImage(Forbidden): + message = _("You are not authorized to complete this action.") + + +class ProtectedImageDelete(Forbidden): + message = _("Image %(image_id)s is protected and cannot be deleted.") + + +class ProtectedMetadefNamespaceDelete(Forbidden): + message = _("Metadata definition namespace %(namespace)s is protected" + " and cannot be deleted.") + + +class ProtectedMetadefNamespacePropDelete(Forbidden): + message = _("Metadata definition property %(property_name)s is protected" + " and cannot be deleted.") + + +class ProtectedMetadefObjectDelete(Forbidden): + message = _("Metadata definition object %(object_name)s is protected" + " and cannot be deleted.") + + +class ProtectedMetadefResourceTypeAssociationDelete(Forbidden): + message = _("Metadata definition resource-type-association" + " %(resource_type)s is protected and cannot be deleted.") + + +class ProtectedMetadefResourceTypeSystemDelete(Forbidden): + message = _("Metadata definition resource-type %(resource_type_name)s is" + " a seeded-system type and cannot be deleted.") + + +class ProtectedMetadefTagDelete(Forbidden): + message = _("Metadata definition tag %(tag_name)s is protected" + " and cannot be deleted.") + + +class Invalid(DaisyException): + message = _("Data supplied was not valid.") + + +class InvalidSortKey(Invalid): + message = _("Sort key supplied was not valid.") + + +class InvalidSortDir(Invalid): + message = _("Sort direction supplied was not valid.") + + +class InvalidPropertyProtectionConfiguration(Invalid): + message = _("Invalid configuration in property protection file.") + + +class InvalidSwiftStoreConfiguration(Invalid): + message = _("Invalid configuration in glance-swift conf file.") + + +class InvalidFilterRangeValue(Invalid): + message = _("Unable to filter using the specified range.") + + +class InvalidOptionValue(Invalid): + message = _("Invalid value for option %(option)s: %(value)s") + + +class ReadonlyProperty(Forbidden): + message = _("Attribute '%(property)s' is read-only.") + + +class ReservedProperty(Forbidden): + message = _("Attribute '%(property)s' is reserved.") + + +class AuthorizationRedirect(DaisyException): + message = _("Redirecting to %(uri)s for authorization.") + + +class ClientConnectionError(DaisyException): + message = _("There was an error connecting to a server") + + +class ClientConfigurationError(DaisyException): + message = _("There was an error configuring the client.") + + +class MultipleChoices(DaisyException): + message = _("The request returned a 302 Multiple Choices. This generally " + "means that you have not included a version indicator in a " + "request URI.\n\nThe body of response returned:\n%(body)s") + + +class LimitExceeded(DaisyException): + message = _("The request returned a 413 Request Entity Too Large. This " + "generally means that rate limiting or a quota threshold was " + "breached.\n\nThe response body:\n%(body)s") + + def __init__(self, *args, **kwargs): + self.retry_after = (int(kwargs['retry']) if kwargs.get('retry') + else None) + super(LimitExceeded, self).__init__(*args, **kwargs) + + +class ServiceUnavailable(DaisyException): + message = _("The request returned 503 Service Unavailable. This " + "generally occurs on service overload or other transient " + "outage.") + + def __init__(self, *args, **kwargs): + self.retry_after = (int(kwargs['retry']) if kwargs.get('retry') + else None) + super(ServiceUnavailable, self).__init__(*args, **kwargs) + + +class ServerError(DaisyException): + message = _("The request returned 500 Internal Server Error.") + + +class UnexpectedStatus(DaisyException): + message = _("The request returned an unexpected status: %(status)s." + "\n\nThe response body:\n%(body)s") + + +class InvalidContentType(DaisyException): + message = _("Invalid content type %(content_type)s") + + +class BadRegistryConnectionConfiguration(DaisyException): + message = _("Registry was not configured correctly on API server. " + "Reason: %(reason)s") + + +class BadDriverConfiguration(DaisyException): + message = _("Driver %(driver_name)s could not be configured correctly. " + "Reason: %(reason)s") + + +class MaxRedirectsExceeded(DaisyException): + message = _("Maximum redirects (%(redirects)s) was exceeded.") + + +class InvalidRedirect(DaisyException): + message = _("Received invalid HTTP redirect.") + + +class NoServiceEndpoint(DaisyException): + message = _("Response from Keystone does not contain a Glance endpoint.") + + +class RegionAmbiguity(DaisyException): + message = _("Multiple 'image' service matches for region %(region)s. This " + "generally means that a region is required and you have not " + "supplied one.") + + +class WorkerCreationFailure(DaisyException): + message = _("Server worker creation failed: %(reason)s.") + + +class SchemaLoadError(DaisyException): + message = _("Unable to load schema: %(reason)s") + + +class InvalidObject(DaisyException): + message = _("Provided object does not match schema " + "'%(schema)s': %(reason)s") + + +class UnsupportedHeaderFeature(DaisyException): + message = _("Provided header feature is unsupported: %(feature)s") + + +class InUseByStore(DaisyException): + message = _("The image cannot be deleted because it is in use through " + "the backend store outside of daisy.") + + +class ImageSizeLimitExceeded(DaisyException): + message = _("The provided image is too large.") + + +class ImageMemberLimitExceeded(LimitExceeded): + message = _("The limit has been exceeded on the number of allowed image " + "members for this image. Attempted: %(attempted)s, " + "Maximum: %(maximum)s") + + +class ImagePropertyLimitExceeded(LimitExceeded): + message = _("The limit has been exceeded on the number of allowed image " + "properties. Attempted: %(attempted)s, Maximum: %(maximum)s") + + +class ImageTagLimitExceeded(LimitExceeded): + message = _("The limit has been exceeded on the number of allowed image " + "tags. Attempted: %(attempted)s, Maximum: %(maximum)s") + + +class ImageLocationLimitExceeded(LimitExceeded): + message = _("The limit has been exceeded on the number of allowed image " + "locations. Attempted: %(attempted)s, Maximum: %(maximum)s") + + +class SIGHUPInterrupt(DaisyException): + message = _("System SIGHUP signal received.") + + +class RPCError(DaisyException): + message = _("%(cls)s exception was raised in the last rpc call: %(val)s") + + +class TaskException(DaisyException): + message = _("An unknown task exception occurred") + + +class BadTaskConfiguration(DaisyException): + message = _("Task was not configured properly") + +class InstallException(DaisyException): + message = _("Cluster installtation raise exception") + +class InstallTimeoutException(DaisyException): + message = _( + "Time out, during install TECS components to cluster %(cluster_id)s") + +class TaskNotFound(TaskException, NotFound): + message = _("Task with the given id %(task_id)s was not found") + + +class InvalidTaskStatus(TaskException, Invalid): + message = _("Provided status of task is unsupported: %(status)s") + + +class InvalidTaskType(TaskException, Invalid): + message = _("Provided type of task is unsupported: %(type)s") + + +class InvalidTaskStatusTransition(TaskException, Invalid): + message = _("Status transition from %(cur_status)s to" + " %(new_status)s is not allowed") + + +class DuplicateLocation(Duplicate): + message = _("The location %(location)s already exists") + + +class ImageDataNotFound(NotFound): + message = _("No image data could be found") + + +class InvalidParameterValue(Invalid): + message = _("Invalid value '%(value)s' for parameter '%(param)s': " + "%(extra_msg)s") + + +class InvalidImageStatusTransition(Invalid): + message = _("Image status transition from %(cur_status)s to" + " %(new_status)s is not allowed") + + +class MetadefDuplicateNamespace(Duplicate): + message = _("The metadata definition namespace=%(namespace_name)s" + " already exists.") + + +class MetadefDuplicateObject(Duplicate): + message = _("A metadata definition object with name=%(object_name)s" + " already exists in namespace=%(namespace_name)s.") + + +class MetadefDuplicateProperty(Duplicate): + message = _("A metadata definition property with name=%(property_name)s" + " already exists in namespace=%(namespace_name)s.") + + +class MetadefDuplicateResourceType(Duplicate): + message = _("A metadata definition resource-type with" + " name=%(resource_type_name)s already exists.") + + +class MetadefDuplicateResourceTypeAssociation(Duplicate): + message = _("The metadata definition resource-type association of" + " resource-type=%(resource_type_name)s to" + " namespace=%(namespace_name)s" + " already exists.") + + +class MetadefDuplicateTag(Duplicate): + message = _("A metadata tag with name=%(name)s" + " already exists in namespace=%(namespace_name)s.") + + +class MetadefForbidden(Forbidden): + message = _("You are not authorized to complete this action.") + + +class MetadefIntegrityError(Forbidden): + message = _("The metadata definition %(record_type)s with" + " name=%(record_name)s not deleted." + " Other records still refer to it.") + + +class MetadefNamespaceNotFound(NotFound): + message = _("Metadata definition namespace=%(namespace_name)s" + "was not found.") + + +class MetadefObjectNotFound(NotFound): + message = _("The metadata definition object with" + " name=%(object_name)s was not found in" + " namespace=%(namespace_name)s.") + + +class MetadefPropertyNotFound(NotFound): + message = _("The metadata definition property with" + " name=%(property_name)s was not found in" + " namespace=%(namespace_name)s.") + + +class MetadefResourceTypeNotFound(NotFound): + message = _("The metadata definition resource-type with" + " name=%(resource_type_name)s, was not found.") + + +class MetadefResourceTypeAssociationNotFound(NotFound): + message = _("The metadata definition resource-type association of" + " resource-type=%(resource_type_name)s to" + " namespace=%(namespace_name)s," + " was not found.") + + +class MetadefTagNotFound(NotFound): + message = _("The metadata definition tag with" + " name=%(name)s was not found in" + " namespace=%(namespace_name)s.") + + +class InvalidVersion(Invalid): + message = _("Version is invalid: %(reason)s") + + +class InvalidArtifactTypePropertyDefinition(Invalid): + message = _("Invalid property definition") + + +class InvalidArtifactTypeDefinition(Invalid): + message = _("Invalid type definition") + + +class InvalidArtifactPropertyValue(Invalid): + message = _("Property '%(name)s' may not have value '%(val)s': %(msg)s") + + def __init__(self, message=None, *args, **kwargs): + super(InvalidArtifactPropertyValue, self).__init__(message, *args, + **kwargs) + self.name = kwargs.get('name') + self.value = kwargs.get('val') + + +class ArtifactNotFound(NotFound): + message = _("Artifact with id=%(id)s was not found") + + +class ArtifactForbidden(Forbidden): + message = _("Artifact with id=%(id)s is not accessible") + + +class ArtifactDuplicateNameTypeVersion(Duplicate): + message = _("Artifact with the specified type, name and version" + " already exists") + + +class InvalidArtifactStateTransition(Invalid): + message = _("Artifact cannot change state from %(source)s to %(target)s") + + +class ArtifactDuplicateDirectDependency(Duplicate): + message = _("Artifact with the specified type, name and version" + " already has the direct dependency=%(dep)s") + + +class ArtifactDuplicateTransitiveDependency(Duplicate): + message = _("Artifact with the specified type, name and version" + " already has the transitive dependency=%(dep)s") + + +class ArtifactUnsupportedPropertyOperator(Invalid): + message = _("Operator %(op)s is not supported") + + +class ArtifactUnsupportedShowLevel(Invalid): + message = _("Show level %(shl)s is not supported in this operation") + + +class ArtifactPropertyValueNotFound(NotFound): + message = _("Property's %(prop)s value has not been found") + + +class ArtifactInvalidProperty(Invalid): + message = _("Artifact has no property %(prop)s") + + +class ArtifactInvalidPropertyParameter(Invalid): + message = _("Cannot use this parameter with the operator %(op)s") + + +class ArtifactLoadError(DaisyException): + message = _("Cannot load artifact '%(name)s'") + + +class ArtifactNonMatchingTypeName(ArtifactLoadError): + message = _( + "Plugin name '%(plugin)s' should match artifact typename '%(name)s'") + + +class ArtifactPluginNotFound(NotFound): + message = _("No plugin for '%(name)s' has been loaded") + + +class UnknownArtifactType(NotFound): + message = _("Artifact type with name '%(name)s' and version '%(version)s' " + "is not known") + + +class ArtifactInvalidStateTransition(Invalid): + message = _("Artifact state cannot be changed from %(curr)s to %(to)s") + + +class JsonPatchException(DaisyException): + message = _("Invalid jsonpatch request") + + +class InvalidJsonPatchBody(JsonPatchException): + message = _("The provided body %(body)s is invalid " + "under given schema: %(schema)s") + + +class InvalidJsonPatchPath(JsonPatchException): + message = _("The provided path '%(path)s' is invalid: %(explanation)s") + + def __init__(self, message=None, *args, **kwargs): + self.explanation = kwargs.get("explanation") + super(InvalidJsonPatchPath, self).__init__(message, *args, **kwargs) + + +class InvalidNetworkConfig(DaisyException): + pass + +class InvalidIP(DaisyException): + pass + +class OSInstallFailed(DaisyException): + message = _("os installtation failed.") + +class IMPIOprationFailed(DaisyException): + message = _("ipmi command failed.") + +class ThreadBinException(DaisyException): + def __init__(self, *args): + super(ThreadBinException, self).__init__(*args) + +class SubprocessCmdFailed(DaisyException): + message = _("suprocess command failed.") + +class DeleteConstrainted(DaisyException): + message = _("delete is not allowed.") + + diff --git a/code/daisy/daisy/common/jsonpatchvalidator.py b/code/daisy/daisy/common/jsonpatchvalidator.py new file mode 100755 index 00000000..0d92852c --- /dev/null +++ b/code/daisy/daisy/common/jsonpatchvalidator.py @@ -0,0 +1,122 @@ +# Copyright 2015 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +""" +A mixin that validates the given body for jsonpatch-compatibility. +The methods supported are limited to listed in METHODS_ALLOWED +""" + +import re + +import jsonschema + +import daisy.common.exception as exc +from daisy.openstack.common._i18n import _ + + +class JsonPatchValidatorMixin(object): + # a list of allowed methods allowed according to RFC 6902 + ALLOWED = ["replace", "test", "remove", "add", "copy"] + PATH_REGEX_COMPILED = re.compile("^/[^/]+(/[^/]+)*$") + + def __init__(self, methods_allowed=["replace", "remove"]): + self.schema = self._gen_schema(methods_allowed) + self.methods_allowed = [m for m in methods_allowed + if m in self.ALLOWED] + + @staticmethod + def _gen_schema(methods_allowed): + """ + Generates a jsonschema for jsonpatch request based on methods_allowed + """ + # op replace needs no 'value' param, so needs a special schema if + # present in methods_allowed + basic_schema = { + "type": "array", + "items": {"properties": {"op": {"type": "string", + "enum": methods_allowed}, + "path": {"type": "string"}, + "value": {"type": ["string", + "object", + "integer", + "array", + "boolean"]} + }, + "required": ["op", "path", "value"], + "type": "object"}, + "$schema": "http://json-schema.org/draft-04/schema#" + } + if "remove" in methods_allowed: + methods_allowed.remove("remove") + no_remove_op_schema = { + "type": "object", + "properties": { + "op": {"type": "string", "enum": methods_allowed}, + "path": {"type": "string"}, + "value": {"type": ["string", "object", + "integer", "array", "boolean"]} + }, + "required": ["op", "path", "value"]} + op_remove_only_schema = { + "type": "object", + "properties": { + "op": {"type": "string", "enum": ["remove"]}, + "path": {"type": "string"} + }, + "required": ["op", "path"]} + + basic_schema = { + "type": "array", + "items": { + "oneOf": [no_remove_op_schema, op_remove_only_schema]}, + "$schema": "http://json-schema.org/draft-04/schema#" + } + return basic_schema + + def validate_body(self, body): + try: + jsonschema.validate(body, self.schema) + # now make sure everything is ok with path + return [{"path": self._decode_json_pointer(e["path"]), + "value": e.get("value", None), + "op": e["op"]} for e in body] + except jsonschema.ValidationError: + raise exc.InvalidJsonPatchBody(body=body, schema=self.schema) + + def _check_for_path_errors(self, pointer): + if not re.match(self.PATH_REGEX_COMPILED, pointer): + msg = _("Json path should start with a '/', " + "end with no '/', no 2 subsequent '/' are allowed.") + raise exc.InvalidJsonPatchPath(path=pointer, explanation=msg) + if re.search('~[^01]', pointer) or pointer.endswith('~'): + msg = _("Pointer contains '~' which is not part of" + " a recognized escape sequence [~0, ~1].") + raise exc.InvalidJsonPatchPath(path=pointer, explanation=msg) + + def _decode_json_pointer(self, pointer): + """Parses a json pointer. Returns a pointer as a string. + + Json Pointers are defined in + http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer . + The pointers use '/' for separation between object attributes. + A '/' character in an attribute name is encoded as "~1" and + a '~' character is encoded as "~0". + """ + self._check_for_path_errors(pointer) + ret = [] + for part in pointer.lstrip('/').split('/'): + ret.append(part.replace('~1', '/').replace('~0', '~').strip()) + return '/'.join(ret) diff --git a/code/daisy/daisy/common/location_strategy/__init__.py b/code/daisy/daisy/common/location_strategy/__init__.py new file mode 100755 index 00000000..1f3952b1 --- /dev/null +++ b/code/daisy/daisy/common/location_strategy/__init__.py @@ -0,0 +1,116 @@ +# Copyright 2014 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo_config import cfg +from oslo_log import log as logging +import stevedore + +from daisy import i18n + +_ = i18n._ +_LE = i18n._LE + +location_strategy_opts = [ + cfg.StrOpt('location_strategy', default='location_order', + choices=('location_order', 'store_type'), + help=_("This value sets what strategy will be used to " + "determine the image location order. Currently " + "two strategies are packaged with Glance " + "'location_order' and 'store_type'.")) +] + +CONF = cfg.CONF +CONF.register_opts(location_strategy_opts) + +LOG = logging.getLogger(__name__) + + +def _load_strategies(): + """Load all strategy modules.""" + modules = {} + namespace = "daisy.common.image_location_strategy.modules" + ex = stevedore.extension.ExtensionManager(namespace) + for module_name in ex.names(): + try: + mgr = stevedore.driver.DriverManager( + namespace=namespace, + name=module_name, + invoke_on_load=False) + + # Obtain module name + strategy_name = str(mgr.driver.get_strategy_name()) + if strategy_name in modules: + msg = (_('%(strategy)s is registered as a module twice. ' + '%(module)s is not being used.') % + {'strategy': strategy_name, 'module': module_name}) + LOG.warn(msg) + else: + # Initialize strategy module + mgr.driver.init() + modules[strategy_name] = mgr.driver + except Exception as e: + LOG.error(_LE("Failed to load location strategy module " + "%(module)s: %(e)s") % {'module': module_name, + 'e': e}) + return modules + + +_available_strategies = _load_strategies() + + +# TODO(kadachi): Not used but don't remove this until glance_store +# development/migration stage. +def verify_location_strategy(conf=None, strategies=_available_strategies): + """Validate user configured 'location_strategy' option value.""" + if not conf: + conf = CONF.location_strategy + if conf not in strategies: + msg = (_('Invalid location_strategy option: %(name)s. ' + 'The valid strategy option(s) is(are): %(strategies)s') % + {'name': conf, 'strategies': ", ".join(strategies.keys())}) + LOG.error(msg) + raise RuntimeError(msg) + + +def get_ordered_locations(locations, **kwargs): + """ + Order image location list by configured strategy. + + :param locations: The original image location list. + :param kwargs: Strategy-specific arguments for under layer strategy module. + :return: The image location list with strategy-specific order. + """ + if not locations: + return [] + strategy_module = _available_strategies[CONF.location_strategy] + return strategy_module.get_ordered_locations(copy.deepcopy(locations), + **kwargs) + + +def choose_best_location(locations, **kwargs): + """ + Choose best location from image location list by configured strategy. + + :param locations: The original image location list. + :param kwargs: Strategy-specific arguments for under layer strategy module. + :return: The best location from image location list. + """ + locations = get_ordered_locations(locations, **kwargs) + if locations: + return locations[0] + else: + return None diff --git a/code/daisy/daisy/common/location_strategy/location_order.py b/code/daisy/daisy/common/location_strategy/location_order.py new file mode 100755 index 00000000..022fe762 --- /dev/null +++ b/code/daisy/daisy/common/location_strategy/location_order.py @@ -0,0 +1,36 @@ +# Copyright 2014 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Image location order based location strategy module""" + + +def get_strategy_name(): + """Return strategy module name.""" + return 'location_order' + + +def init(): + """Initialize strategy module.""" + pass + + +def get_ordered_locations(locations, **kwargs): + """ + Order image location list. + + :param locations: The original image location list. + :return: The image location list with original natural order. + """ + return locations diff --git a/code/daisy/daisy/common/location_strategy/store_type.py b/code/daisy/daisy/common/location_strategy/store_type.py new file mode 100755 index 00000000..4f04c0c1 --- /dev/null +++ b/code/daisy/daisy/common/location_strategy/store_type.py @@ -0,0 +1,120 @@ +# Copyright 2014 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Storage preference based location strategy module""" + +from oslo_config import cfg +import six +import six.moves.urllib.parse as urlparse + +from daisy import i18n + +_ = i18n._ + +store_type_opts = [ + cfg.ListOpt("store_type_preference", + default=[], + help=_("The store names to use to get store preference order. " + "The name must be registered by one of the stores " + "defined by the 'stores' config option. " + "This option will be applied when you using " + "'store_type' option as image location strategy " + "defined by the 'location_strategy' config option.")) +] + +CONF = cfg.CONF +CONF.register_opts(store_type_opts, group='store_type_location_strategy') + +_STORE_TO_SCHEME_MAP = {} + + +def get_strategy_name(): + """Return strategy module name.""" + return 'store_type' + + +def init(): + """Initialize strategy module.""" + # NOTE(zhiyan): We have a plan to do a reusable glance client library for + # all clients like Nova and Cinder in near period, it would be able to + # contains common code to provide uniform image service interface for them, + # just like Brick in Cinder, this code can be moved to there and shared + # between Glance and client both side. So this implementation as far as + # possible to prevent make relationships with Glance(server)-specific code, + # for example: using functions within store module to validate + # 'store_type_preference' option. + mapping = {'filesystem': ['file', 'filesystem'], + 'http': ['http', 'https'], + 'rbd': ['rbd'], + 's3': ['s3', 's3+http', 's3+https'], + 'swift': ['swift', 'swift+https', 'swift+http'], + 'gridfs': ['gridfs'], + 'sheepdog': ['sheepdog'], + 'cinder': ['cinder'], + 'vmware_datastore': ['vsphere']} + _STORE_TO_SCHEME_MAP.clear() + _STORE_TO_SCHEME_MAP.update(mapping) + + +def get_ordered_locations(locations, uri_key='url', **kwargs): + """ + Order image location list. + + :param locations: The original image location list. + :param uri_key: The key name for location URI in image location dictionary. + :return: The image location list with preferred store type order. + """ + def _foreach_store_type_preference(): + store_types = CONF.store_type_location_strategy.store_type_preference + for preferred_store in store_types: + preferred_store = str(preferred_store).strip() + if not preferred_store: + continue + yield preferred_store + + if not locations: + return locations + + preferences = {} + others = [] + for preferred_store in _foreach_store_type_preference(): + preferences[preferred_store] = [] + + for location in locations: + uri = location.get(uri_key) + if not uri: + continue + pieces = urlparse.urlparse(uri.strip()) + + store_name = None + for store, schemes in six.iteritems(_STORE_TO_SCHEME_MAP): + if pieces.scheme.strip() in schemes: + store_name = store + break + + if store_name in preferences: + preferences[store_name].append(location) + else: + others.append(location) + + ret = [] + # NOTE(zhiyan): While configuration again since py26 does not support + # ordereddict container. + for preferred_store in _foreach_store_type_preference(): + ret.extend(preferences[preferred_store]) + + ret.extend(others) + + return ret diff --git a/code/daisy/daisy/common/property_utils.py b/code/daisy/daisy/common/property_utils.py new file mode 100755 index 00000000..802a7d1d --- /dev/null +++ b/code/daisy/daisy/common/property_utils.py @@ -0,0 +1,206 @@ +# Copyright 2013 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ConfigParser +import re +try: + from collections import OrderedDict +except ImportError: + from ordereddict import OrderedDict + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_policy import policy + +import daisy.api.policy +from daisy.common import exception +from daisy import i18n + +# NOTE(bourke): The default dict_type is collections.OrderedDict in py27, but +# we must set manually for compatibility with py26 +CONFIG = ConfigParser.SafeConfigParser(dict_type=OrderedDict) +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE + +property_opts = [ + cfg.StrOpt('property_protection_file', + help=_('The location of the property protection file.' + 'This file contains the rules for property protections ' + 'and the roles/policies associated with it. If this ' + 'config value is not specified, by default, property ' + 'protections won\'t be enforced. If a value is ' + 'specified and the file is not found, then the ' + 'daisy-api service will not start.')), + cfg.StrOpt('property_protection_rule_format', + default='roles', + choices=('roles', 'policies'), + help=_('This config value indicates whether "roles" or ' + '"policies" are used in the property protection file.')), +] + +CONF = cfg.CONF +CONF.register_opts(property_opts) + +# NOTE (spredzy): Due to the particularly lengthy name of the exception +# and the number of occurrence it is raise in this file, a variable is +# created +InvalidPropProtectConf = exception.InvalidPropertyProtectionConfiguration + + +def is_property_protection_enabled(): + if CONF.property_protection_file: + return True + return False + + +class PropertyRules(object): + + def __init__(self, policy_enforcer=None): + self.rules = [] + self.prop_exp_mapping = {} + self.policies = [] + self.policy_enforcer = policy_enforcer or daisy.api.policy.Enforcer() + self.prop_prot_rule_format = CONF.property_protection_rule_format + self.prop_prot_rule_format = self.prop_prot_rule_format.lower() + self._load_rules() + + def _load_rules(self): + try: + conf_file = CONF.find_file(CONF.property_protection_file) + CONFIG.read(conf_file) + except Exception as e: + msg = (_LE("Couldn't find property protection file %(file)s: " + "%(error)s.") % {'file': CONF.property_protection_file, + 'error': e}) + LOG.error(msg) + raise InvalidPropProtectConf() + + if self.prop_prot_rule_format not in ['policies', 'roles']: + msg = _LE("Invalid value '%s' for " + "'property_protection_rule_format'. " + "The permitted values are " + "'roles' and 'policies'") % self.prop_prot_rule_format + LOG.error(msg) + raise InvalidPropProtectConf() + + operations = ['create', 'read', 'update', 'delete'] + properties = CONFIG.sections() + for property_exp in properties: + property_dict = {} + compiled_rule = self._compile_rule(property_exp) + + for operation in operations: + permissions = CONFIG.get(property_exp, operation) + if permissions: + if self.prop_prot_rule_format == 'policies': + if ',' in permissions: + LOG.error( + _LE("Multiple policies '%s' not allowed " + "for a given operation. Policies can be " + "combined in the policy file"), + permissions) + raise InvalidPropProtectConf() + self.prop_exp_mapping[compiled_rule] = property_exp + self._add_policy_rules(property_exp, operation, + permissions) + permissions = [permissions] + else: + permissions = [permission.strip() for permission in + permissions.split(',')] + if '@' in permissions and '!' in permissions: + msg = (_LE( + "Malformed property protection rule in " + "[%(prop)s] %(op)s=%(perm)s: '@' and '!' " + "are mutually exclusive") % + dict(prop=property_exp, + op=operation, + perm=permissions)) + LOG.error(msg) + raise InvalidPropProtectConf() + property_dict[operation] = permissions + else: + property_dict[operation] = [] + LOG.warn( + _('Property protection on operation %(operation)s' + ' for rule %(rule)s is not found. No role will be' + ' allowed to perform this operation.') % + {'operation': operation, + 'rule': property_exp}) + + self.rules.append((compiled_rule, property_dict)) + + def _compile_rule(self, rule): + try: + return re.compile(rule) + except Exception as e: + msg = (_LE("Encountered a malformed property protection rule" + " %(rule)s: %(error)s.") % {'rule': rule, + 'error': e}) + LOG.error(msg) + raise InvalidPropProtectConf() + + def _add_policy_rules(self, property_exp, action, rule): + """Add policy rules to the policy enforcer. + + For example, if the file listed as property_protection_file has: + [prop_a] + create = glance_creator + then the corresponding policy rule would be: + "prop_a:create": "rule:glance_creator" + where glance_creator is defined in policy.json. For example: + "glance_creator": "role:admin or role:glance_create_user" + """ + rule = "rule:%s" % rule + rule_name = "%s:%s" % (property_exp, action) + rule_dict = policy.Rules.from_dict({ + rule_name: rule + }) + self.policy_enforcer.add_rules(rule_dict) + + def _check_policy(self, property_exp, action, context): + try: + action = ":".join([property_exp, action]) + self.policy_enforcer.enforce(context, action, {}) + except exception.Forbidden: + return False + return True + + def check_property_rules(self, property_name, action, context): + roles = context.roles + if not self.rules: + return True + + if action not in ['create', 'read', 'update', 'delete']: + return False + + for rule_exp, rule in self.rules: + if rule_exp.search(str(property_name)): + break + else: # no matching rules + return False + + rule_roles = rule.get(action) + if rule_roles: + if '!' in rule_roles: + return False + elif '@' in rule_roles: + return True + if self.prop_prot_rule_format == 'policies': + prop_exp_key = self.prop_exp_mapping[rule_exp] + return self._check_policy(prop_exp_key, action, + context) + if set(roles).intersection(set(rule_roles)): + return True + return False diff --git a/code/daisy/daisy/common/rpc.py b/code/daisy/daisy/common/rpc.py new file mode 100755 index 00000000..3d3cd6b8 --- /dev/null +++ b/code/daisy/daisy/common/rpc.py @@ -0,0 +1,279 @@ +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +RPC Controller +""" +import datetime +import traceback + +from oslo_config import cfg +from oslo_log import log as logging +import oslo_utils.importutils as imp +from oslo_utils import timeutils +import six +from webob import exc + +from daisy.common import client +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +from daisy import i18n + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE + + +rpc_opts = [ + # NOTE(flaper87): Shamelessly copied + # from oslo rpc. + cfg.ListOpt('allowed_rpc_exception_modules', + default=['openstack.common.exception', + 'daisy.common.exception', + 'exceptions', + ], + help='Modules of exceptions that are permitted to be recreated' + ' upon receiving exception data from an rpc call.'), +] + +CONF = cfg.CONF +CONF.register_opts(rpc_opts) + + +class RPCJSONSerializer(wsgi.JSONResponseSerializer): + + def _sanitizer(self, obj): + def to_primitive(_type, _value): + return {"_type": _type, "_value": _value} + + if isinstance(obj, datetime.datetime): + return to_primitive("datetime", timeutils.strtime(obj)) + + return super(RPCJSONSerializer, self)._sanitizer(obj) + + +class RPCJSONDeserializer(wsgi.JSONRequestDeserializer): + + def _to_datetime(self, obj): + return timeutils.parse_strtime(obj) + + def _sanitizer(self, obj): + try: + _type, _value = obj["_type"], obj["_value"] + return getattr(self, "_to_" + _type)(_value) + except (KeyError, AttributeError): + return obj + + +class Controller(object): + """ + Base RPCController. + + This is the base controller for RPC based APIs. Commands + handled by this controller respect the following form: + + [{ + 'command': 'method_name', + 'kwargs': {...} + }] + + The controller is capable of processing more than one command + per request and will always return a list of results. + + :params raise_exc: Boolean that specifies whether to raise + exceptions instead of "serializing" them. + """ + + def __init__(self, raise_exc=False): + self._registered = {} + self.raise_exc = raise_exc + + def register(self, resource, filtered=None, excluded=None, refiner=None): + """ + Exports methods through the RPC Api. + + :params resource: Resource's instance to register. + :params filtered: List of methods that *can* be registered. Read + as "Method must be in this list". + :params excluded: List of methods to exclude. + :params refiner: Callable to use as filter for methods. + + :raises AssertionError: If refiner is not callable. + """ + + funcs = filter(lambda x: not x.startswith("_"), dir(resource)) + + if filtered: + funcs = [f for f in funcs if f in filtered] + + if excluded: + funcs = [f for f in funcs if f not in excluded] + + if refiner: + assert callable(refiner), "Refiner must be callable" + funcs = filter(refiner, funcs) + + for name in funcs: + meth = getattr(resource, name) + + if not callable(meth): + continue + + self._registered[name] = meth + + def __call__(self, req, body): + """ + Executes the command + """ + + if not isinstance(body, list): + msg = _("Request must be a list of commands") + raise exc.HTTPBadRequest(explanation=msg) + + def validate(cmd): + if not isinstance(cmd, dict): + msg = _("Bad Command: %s") % str(cmd) + raise exc.HTTPBadRequest(explanation=msg) + + command, kwargs = cmd.get("command"), cmd.get("kwargs") + + if (not command or not isinstance(command, six.string_types) or + (kwargs and not isinstance(kwargs, dict))): + msg = _("Wrong command structure: %s") % (str(cmd)) + raise exc.HTTPBadRequest(explanation=msg) + + method = self._registered.get(command) + if not method: + # Just raise 404 if the user tries to + # access a private method. No need for + # 403 here since logically the command + # is not registered to the rpc dispatcher + raise exc.HTTPNotFound(explanation=_("Command not found")) + + return True + + # If more than one command were sent then they might + # be intended to be executed sequentially, that for, + # lets first verify they're all valid before executing + # them. + commands = filter(validate, body) + + results = [] + for cmd in commands: + # kwargs is not required + command, kwargs = cmd["command"], cmd.get("kwargs", {}) + method = self._registered[command] + try: + result = method(req.context, **kwargs) + except Exception as e: + if self.raise_exc: + raise + + cls, val = e.__class__, utils.exception_to_str(e) + msg = (_LE("RPC Call Error: %(val)s\n%(tb)s") % + dict(val=val, tb=traceback.format_exc())) + LOG.error(msg) + + # NOTE(flaper87): Don't propagate all exceptions + # but the ones allowed by the user. + module = cls.__module__ + if module not in CONF.allowed_rpc_exception_modules: + cls = exception.RPCError + val = six.text_type(exception.RPCError(cls=cls, val=val)) + + cls_path = "%s.%s" % (cls.__module__, cls.__name__) + result = {"_error": {"cls": cls_path, "val": val}} + results.append(result) + return results + + +class RPCClient(client.BaseClient): + + def __init__(self, *args, **kwargs): + self._serializer = RPCJSONSerializer() + self._deserializer = RPCJSONDeserializer() + + self.raise_exc = kwargs.pop("raise_exc", True) + self.base_path = kwargs.pop("base_path", '/rpc') + super(RPCClient, self).__init__(*args, **kwargs) + + @client.handle_unauthenticated + def bulk_request(self, commands): + """ + Execute multiple commands in a single request. + + :params commands: List of commands to send. Commands + must respect the following form: + + { + 'command': 'method_name', + 'kwargs': method_kwargs + } + """ + body = self._serializer.to_json(commands) + response = super(RPCClient, self).do_request('POST', + self.base_path, + body) + return self._deserializer.from_json(response.read()) + + def do_request(self, method, **kwargs): + """ + Simple do_request override. This method serializes + the outgoing body and builds the command that will + be sent. + + :params method: The remote python method to call + :params kwargs: Dynamic parameters that will be + passed to the remote method. + """ + content = self.bulk_request([{'command': method, + 'kwargs': kwargs}]) + + # NOTE(flaper87): Return the first result if + # a single command was executed. + content = content[0] + + # NOTE(flaper87): Check if content is an error + # and re-raise it if raise_exc is True. Before + # checking if content contains the '_error' key, + # verify if it is an instance of dict - since the + # RPC call may have returned something different. + if self.raise_exc and (isinstance(content, dict) + and '_error' in content): + error = content['_error'] + try: + exc_cls = imp.import_class(error['cls']) + raise exc_cls(error['val']) + except ImportError: + # NOTE(flaper87): The exception + # class couldn't be imported, using + # a generic exception. + raise exception.RPCError(**error) + return content + + def __getattr__(self, item): + """ + This method returns a method_proxy that + will execute the rpc call in the registry + service. + """ + if item.startswith('_'): + raise AttributeError(item) + + def method_proxy(**kw): + return self.do_request(item, **kw) + + return method_proxy diff --git a/code/daisy/daisy/common/scripts/__init__.py b/code/daisy/daisy/common/scripts/__init__.py new file mode 100755 index 00000000..6d1dafd4 --- /dev/null +++ b/code/daisy/daisy/common/scripts/__init__.py @@ -0,0 +1,52 @@ +# Copyright 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging + +from daisy.common.scripts.image_import import main as image_import +from daisy import i18n + + +_LI = i18n._LI +_LE = i18n._LE +LOG = logging.getLogger(__name__) + + +def run_task(task_id, task_type, context, + task_repo=None, image_repo=None, image_factory=None): + # TODO(nikhil): if task_repo is None get new task repo + # TODO(nikhil): if image_repo is None get new image repo + # TODO(nikhil): if image_factory is None get new image factory + LOG.info(_LI("Loading known task scripts for task_id %(task_id)s " + "of type %(task_type)s"), {'task_id': task_id, + 'task_type': task_type}) + if task_type == 'import': + image_import.run(task_id, context, task_repo, + image_repo, image_factory) + + else: + msg = _LE("This task type %(task_type)s is not supported by the " + "current deployment of daisy. Please refer the " + "documentation provided by OpenStack or your operator " + "for more information.") % {'task_type': task_type} + LOG.error(msg) + task = task_repo.get(task_id) + task.fail(msg) + if task_repo: + task_repo.save(task) + else: + LOG.error(_LE("Failed to save task %(task_id)s in DB as task_repo " + "is %(task_repo)s"), {"task_id": task_id, + "task_repo": task_repo}) diff --git a/code/daisy/daisy/common/scripts/image_import/__init__.py b/code/daisy/daisy/common/scripts/image_import/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/common/scripts/image_import/main.py b/code/daisy/daisy/common/scripts/image_import/main.py new file mode 100755 index 00000000..718b88dd --- /dev/null +++ b/code/daisy/daisy/common/scripts/image_import/main.py @@ -0,0 +1,166 @@ +# Copyright 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +__all__ = [ + 'run', +] + +from oslo_concurrency import lockutils +from oslo_log import log as logging +from oslo_utils import excutils +import six + +from daisy.api.v2 import images as v2_api +from daisy.common import exception +from daisy.common.scripts import utils as script_utils +from daisy.common import store_utils +from daisy.common import utils as common_utils +from daisy import i18n + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + + +def run(t_id, context, task_repo, image_repo, image_factory): + LOG.info(_LI('Task %(task_id)s beginning import ' + 'execution.') % {'task_id': t_id}) + _execute(t_id, task_repo, image_repo, image_factory) + + +# NOTE(nikhil): This lock prevents more than N number of threads to be spawn +# simultaneously. The number N represents the number of threads in the +# executor pool. The value is set to 10 in the eventlet executor. +@lockutils.synchronized("glance_import") +def _execute(t_id, task_repo, image_repo, image_factory): + task = script_utils.get_task(task_repo, t_id) + + if task is None: + # NOTE: This happens if task is not found in the database. In + # such cases, there is no way to update the task status so, + # it's ignored here. + return + + try: + task_input = script_utils.unpack_task_input(task) + + uri = script_utils.validate_location_uri(task_input.get('import_from')) + image_id = import_image(image_repo, image_factory, task_input, t_id, + uri) + + task.succeed({'image_id': image_id}) + except Exception as e: + # Note: The message string contains Error in it to indicate + # in the task.message that it's a error message for the user. + + # TODO(nikhil): need to bring back save_and_reraise_exception when + # necessary + err_msg = ("Error: " + six.text_type(type(e)) + ': ' + + common_utils.exception_to_str(e)) + log_msg = _LE(err_msg + ("Task ID %s" % task.task_id)) # noqa + LOG.exception(log_msg) + + task.fail(_LE(err_msg)) # noqa + finally: + task_repo.save(task) + + +def import_image(image_repo, image_factory, task_input, task_id, uri): + original_image = create_image(image_repo, image_factory, + task_input.get('image_properties'), task_id) + # NOTE: set image status to saving just before setting data + original_image.status = 'saving' + image_repo.save(original_image) + image_id = original_image.image_id + + # NOTE: Retrieving image from the database because the Image object + # returned from create_image method does not have appropriate factories + # wrapped around it. + new_image = image_repo.get(image_id) + set_image_data(new_image, uri, None) + + try: + # NOTE: Check if the Image is not deleted after setting the data + # before saving the active image. Here if image status is + # saving, then new_image is saved as it contains updated location, + # size, virtual_size and checksum information and the status of + # new_image is already set to active in set_image_data() call. + image = image_repo.get(image_id) + if image.status == 'saving': + image_repo.save(new_image) + return image_id + else: + msg = _("The Image %(image_id)s object being created by this task " + "%(task_id)s, is no longer in valid status for further " + "processing.") % {"image_id": image_id, + "task_id": task_id} + raise exception.Conflict(msg) + except (exception.Conflict, exception.NotFound): + with excutils.save_and_reraise_exception(): + if new_image.locations: + for location in new_image.locations: + store_utils.delete_image_location_from_backend( + new_image.context, + image_id, + location) + + +def create_image(image_repo, image_factory, image_properties, task_id): + _base_properties = [] + for k, v in v2_api.get_base_properties().items(): + _base_properties.append(k) + + properties = {} + # NOTE: get the base properties + for key in _base_properties: + try: + properties[key] = image_properties.pop(key) + except KeyError: + msg = ("Task ID %(task_id)s: Ignoring property %(k)s for setting " + "base properties while creating " + "Image.") % {'task_id': task_id, 'k': key} + LOG.debug(msg) + + # NOTE: get the rest of the properties and pass them as + # extra_properties for Image to be created with them. + properties['extra_properties'] = image_properties + script_utils.set_base_image_properties(properties=properties) + + image = image_factory.new_image(**properties) + image_repo.add(image) + return image + + +def set_image_data(image, uri, task_id): + data_iter = None + try: + LOG.info(_LI("Task %(task_id)s: Got image data uri %(data_uri)s to be " + "imported") % {"data_uri": uri, "task_id": task_id}) + data_iter = script_utils.get_image_data_iter(uri) + image.set_data(data_iter) + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.warn(_LW("Task %(task_id)s failed with exception %(error)s") % + {"error": common_utils.exception_to_str(e), + "task_id": task_id}) + LOG.info(_LI("Task %(task_id)s: Could not import image file" + " %(image_data)s") % {"image_data": uri, + "task_id": task_id}) + finally: + if isinstance(data_iter, file): + data_iter.close() diff --git a/code/daisy/daisy/common/scripts/utils.py b/code/daisy/daisy/common/scripts/utils.py new file mode 100755 index 00000000..fa991dc6 --- /dev/null +++ b/code/daisy/daisy/common/scripts/utils.py @@ -0,0 +1,137 @@ +# Copyright 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +__all__ = [ + 'get_task', + 'unpack_task_input', + 'set_base_image_properties', + 'validate_location_uri', + 'get_image_data_iter', +] + + +import urllib2 + +from oslo_log import log as logging + +from daisy.common import exception +from daisy import i18n + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE + + +def get_task(task_repo, task_id): + """Gets a TaskProxy object. + + :param task_repo: TaskRepo object used to perform DB operations + :param task_id: ID of the Task + """ + task = None + try: + task = task_repo.get(task_id) + except exception.NotFound: + msg = _LE('Task not found for task_id %s') % task_id + LOG.exception(msg) + + return task + + +def unpack_task_input(task): + """Verifies and returns valid task input dictionary. + + :param task: Task domain object + """ + task_input = task.task_input + + # NOTE: until we support multiple task types, we just check for + # input fields related to 'import task'. + for key in ["import_from", "import_from_format", "image_properties"]: + if key not in task_input: + msg = _("Input does not contain '%(key)s' field") % {"key": key} + raise exception.Invalid(msg) + + return task_input + + +def set_base_image_properties(properties=None): + """Sets optional base properties for creating Image. + + :param properties: Input dict to set some base properties + """ + if isinstance(properties, dict) and len(properties) == 0: + # TODO(nikhil): We can make these properties configurable while + # implementing the pipeline logic for the scripts. The below shown + # are placeholders to show that the scripts work on 'devstack' + # environment. + properties['disk_format'] = 'qcow2' + properties['container_format'] = 'bare' + + +def validate_location_uri(location): + """Validate location uri into acceptable format. + + :param location: Location uri to be validated + """ + if not location: + raise exception.BadStoreUri(_('Invalid location: %s') % location) + + elif location.startswith(('http://', 'https://')): + return location + + # NOTE: file type uri is being avoided for security reasons, + # see LP bug #942118 #1400966. + elif location.startswith(("file:///", "filesystem:///")): + msg = _("File based imports are not allowed. Please use a non-local " + "source of image data.") + # NOTE: raise Exception and let the encompassing block save + # the error msg in the task.message. + raise StandardError(msg) + + else: + # TODO(nikhil): add other supported uris + supported = ['http', ] + msg = _("The given uri is not valid. Please specify a " + "valid uri from the following list of supported uri " + "%(supported)s") % {'supported': supported} + raise urllib2.URLError(msg) + + +def get_image_data_iter(uri): + """Returns iterable object either for local file or uri + + :param uri: uri (remote or local) to the datasource we want to iterate + + Validation/sanitization of the uri is expected to happen before we get + here. + """ + # NOTE(flaper87): This is safe because the input uri is already + # verified before the task is created. + if uri.startswith("file://"): + uri = uri.split("file://")[-1] + # NOTE(flaper87): The caller of this function expects to have + # an iterable object. FileObjects in python are iterable, therefore + # we are returning it as is. + # The file descriptor will be eventually cleaned up by the garbage + # collector once its ref-count is dropped to 0. That is, when there + # wont be any references pointing to this file. + # + # We're not using StringIO or other tools to avoid reading everything + # into memory. Some images may be quite heavy. + return open(uri, "r") + + return urllib2.urlopen(uri) diff --git a/code/daisy/daisy/common/semver_db.py b/code/daisy/daisy/common/semver_db.py new file mode 100755 index 00000000..56972c98 --- /dev/null +++ b/code/daisy/daisy/common/semver_db.py @@ -0,0 +1,144 @@ +# Copyright (c) 2015 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import semantic_version + +from daisy.common import exception +from daisy import i18n + +MAX_COMPONENT_LENGTH = pow(2, 16) - 1 +MAX_NUMERIC_PRERELEASE_LENGTH = 6 + +_ = i18n._ + + +class DBVersion(object): + def __init__(self, components_long, prerelease, build): + """ + Creates a DBVersion object out of 3 component fields. This initializer + is supposed to be called from SQLAlchemy if 3 database columns are + mapped to this composite field. + + :param components_long: a 64-bit long value, containing numeric + components of the version + :param prerelease: a prerelease label of the version, optionally + preformatted with leading zeroes in numeric-only parts of the label + :param build: a build label of the version + """ + version_string = '%s.%s.%s' % _long_to_components(components_long) + if prerelease: + version_string += '-' + _strip_leading_zeroes_from_prerelease( + prerelease) + + if build: + version_string += '+' + build + self.version = semantic_version.Version(version_string) + + def __repr__(self): + return str(self.version) + + def __eq__(self, other): + return (isinstance(other, DBVersion) and + other.version == self.version) + + def __ne__(self, other): + return (not isinstance(other, DBVersion) + or self.version != other.version) + + def __composite_values__(self): + long_version = _version_to_long(self.version) + prerelease = _add_leading_zeroes_to_prerelease(self.version.prerelease) + build = '.'.join(self.version.build) if self.version.build else None + return long_version, prerelease, build + + +def parse(version_string): + version = semantic_version.Version.coerce(version_string) + return DBVersion(_version_to_long(version), + '.'.join(version.prerelease), + '.'.join(version.build)) + + +def _check_limit(value): + if value > MAX_COMPONENT_LENGTH: + reason = _("Version component is too " + "large (%d max)") % MAX_COMPONENT_LENGTH + raise exception.InvalidVersion(reason=reason) + + +def _version_to_long(version): + """ + Converts the numeric part of the semver version into the 64-bit long value + using the following logic: + + * major version is stored in first 16 bits of the value + * minor version is stored in next 16 bits + * patch version is stored in following 16 bits + * next 2 bits are used to store the flag: if the version has pre-release + label then these bits are 00, otherwise they are 11. Intermediate values + of the flag (01 and 10) are reserved for future usage. + * last 14 bits of the value are reserved fo future usage + + The numeric components of version are checked so their value do not exceed + 16 bits. + + :param version: a semantic_version.Version object + """ + _check_limit(version.major) + _check_limit(version.minor) + _check_limit(version.patch) + major = version.major << 48 + minor = version.minor << 32 + patch = version.patch << 16 + flag = 0 if version.prerelease else 2 + flag <<= 14 + return major | minor | patch | flag + + +def _long_to_components(value): + major = value >> 48 + minor = (value - (major << 48)) >> 32 + patch = (value - (major << 48) - (minor << 32)) >> 16 + return str(major), str(minor), str(patch) + + +def _add_leading_zeroes_to_prerelease(label_tuple): + if label_tuple is None: + return None + res = [] + for component in label_tuple: + if component.isdigit(): + if len(component) > MAX_NUMERIC_PRERELEASE_LENGTH: + reason = _("Prerelease numeric component is too large " + "(%d characters " + "max)") % MAX_NUMERIC_PRERELEASE_LENGTH + raise exception.InvalidVersion(reason=reason) + res.append(component.rjust(MAX_NUMERIC_PRERELEASE_LENGTH, '0')) + else: + res.append(component) + return '.'.join(res) + + +def _strip_leading_zeroes_from_prerelease(string_value): + res = [] + for component in string_value.split('.'): + if component.isdigit(): + val = component.lstrip('0') + if len(val) == 0: # Corner case: when the component is just '0' + val = '0' # it will be stripped completely, so restore it + res.append(val) + else: + res.append(component) + return '.'.join(res) diff --git a/code/daisy/daisy/common/store_utils.py b/code/daisy/daisy/common/store_utils.py new file mode 100755 index 00000000..3d7c7519 --- /dev/null +++ b/code/daisy/daisy/common/store_utils.py @@ -0,0 +1,144 @@ +# Copyright 2014 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +import glance_store as store_api +from oslo_config import cfg +from oslo_log import log as logging +import six.moves.urllib.parse as urlparse + +from daisy.common import utils +import daisy.db as db_api +from daisy import i18n +from daisy import scrubber + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LW = i18n._LW + +store_utils_opts = [ + cfg.BoolOpt('use_user_token', default=True, + help=_('Whether to pass through the user token when ' + 'making requests to the registry.')), +] + +CONF = cfg.CONF +CONF.register_opts(store_utils_opts) + +RESTRICTED_URI_SCHEMAS = frozenset(['file', 'filesystem', 'swift+config']) + + +def safe_delete_from_backend(context, image_id, location): + """ + Given a location, delete an image from the store and + update location status to db. + + This function try to handle all known exceptions which might be raised + by those calls on store and DB modules in its implementation. + + :param context: The request context + :param image_id: The image identifier + :param location: The image location entry + """ + + try: + ret = store_api.delete_from_backend(location['url'], context=context) + location['status'] = 'deleted' + if 'id' in location: + db_api.get_api().image_location_delete(context, image_id, + location['id'], 'deleted') + return ret + except store_api.NotFound: + msg = _LW('Failed to delete image %s in store from URI') % image_id + LOG.warn(msg) + except store_api.StoreDeleteNotSupported as e: + LOG.warn(utils.exception_to_str(e)) + except store_api.UnsupportedBackend: + exc_type = sys.exc_info()[0].__name__ + msg = (_LE('Failed to delete image %(image_id)s from store: %(exc)s') % + dict(image_id=image_id, exc=exc_type)) + LOG.error(msg) + + +def schedule_delayed_delete_from_backend(context, image_id, location): + """ + Given a location, schedule the deletion of an image location and + update location status to db. + + :param context: The request context + :param image_id: The image identifier + :param location: The image location entry + """ + + __, db_queue = scrubber.get_scrub_queues() + + if not CONF.use_user_token: + context = None + + ret = db_queue.add_location(image_id, location, user_context=context) + if ret: + location['status'] = 'pending_delete' + if 'id' in location: + # NOTE(zhiyan): New added image location entry will has no 'id' + # field since it has not been saved to DB. + db_api.get_api().image_location_delete(context, image_id, + location['id'], + 'pending_delete') + else: + db_api.get_api().image_location_add(context, image_id, location) + + return ret + + +def delete_image_location_from_backend(context, image_id, location): + """ + Given a location, immediately or schedule the deletion of an image + location and update location status to db. + + :param context: The request context + :param image_id: The image identifier + :param location: The image location entry + """ + + deleted = False + if CONF.delayed_delete: + deleted = schedule_delayed_delete_from_backend(context, + image_id, location) + if not deleted: + # NOTE(zhiyan) If image metadata has not been saved to DB + # such as uploading process failure then we can't use + # location status mechanism to support image pending delete. + safe_delete_from_backend(context, image_id, location) + + +def validate_external_location(uri): + """ + Validate if URI of external location are supported. + + Only over non-local store types are OK, i.e. S3, Swift, + HTTP. Note the absence of 'file://' for security reasons, + see LP bug #942118, 1400966, 'swift+config://' is also + absent for security reasons, see LP bug #1334196. + + :param uri: The URI of external image location. + :return: Whether given URI of external image location are OK. + """ + + # TODO(zhiyan): This function could be moved to glance_store. + # TODO(gm): Use a whitelist of allowed schemes + scheme = urlparse.urlparse(uri).scheme + return (scheme in store_api.get_known_schemes() and + scheme not in RESTRICTED_URI_SCHEMAS) diff --git a/code/daisy/daisy/common/swift_store_utils.py b/code/daisy/daisy/common/swift_store_utils.py new file mode 100755 index 00000000..e1215482 --- /dev/null +++ b/code/daisy/daisy/common/swift_store_utils.py @@ -0,0 +1,103 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ConfigParser +try: + from collections import OrderedDict +except ImportError: + from ordereddict import OrderedDict + +from oslo_config import cfg +from oslo_log import log as logging + +from daisy.common import exception +from daisy import i18n + +_ = i18n._ +_LE = i18n._LE + +swift_opts = [ + cfg.StrOpt('default_swift_reference', + default="ref1", + help=_('The reference to the default swift account/backing ' + 'store parameters to use for adding new images.')), + cfg.StrOpt('swift_store_auth_address', + help=_('The address where the Swift authentication service ' + 'is listening.(deprecated)')), + cfg.StrOpt('swift_store_user', secret=True, + help=_('The user to authenticate against the Swift ' + 'authentication service (deprecated)')), + cfg.StrOpt('swift_store_key', secret=True, + help=_('Auth key for the user authenticating against the ' + 'Swift authentication service. (deprecated)')), + cfg.StrOpt('swift_store_config_file', secret=True, + help=_('The config file that has the swift account(s)' + 'configs.')), +] + +# NOTE(bourke): The default dict_type is collections.OrderedDict in py27, but +# we must set manually for compatibility with py26 +CONFIG = ConfigParser.SafeConfigParser(dict_type=OrderedDict) +LOG = logging.getLogger(__name__) + + +CONF = cfg.CONF +CONF.register_opts(swift_opts) + + +def is_multiple_swift_store_accounts_enabled(): + if CONF.swift_store_config_file is None: + return False + return True + + +class SwiftParams(object): + def __init__(self): + if is_multiple_swift_store_accounts_enabled(): + self.params = self._load_config() + else: + self.params = self._form_default_params() + + def _form_default_params(self): + default = {} + if (CONF.swift_store_user and CONF.swift_store_key + and CONF.swift_store_auth_address): + default['user'] = CONF.swift_store_user + default['key'] = CONF.swift_store_key + default['auth_address'] = CONF.swift_store_auth_address + return {CONF.default_swift_reference: default} + return {} + + def _load_config(self): + try: + conf_file = CONF.find_file(CONF.swift_store_config_file) + CONFIG.read(conf_file) + except Exception as e: + msg = (_LE("swift config file %(conf_file)s:%(exc)s not found") % + {'conf_file': CONF.swift_store_config_file, 'exc': e}) + LOG.error(msg) + raise exception.InvalidSwiftStoreConfiguration() + account_params = {} + account_references = CONFIG.sections() + for ref in account_references: + reference = {} + try: + reference['auth_address'] = CONFIG.get(ref, 'auth_address') + reference['user'] = CONFIG.get(ref, 'user') + reference['key'] = CONFIG.get(ref, 'key') + account_params[ref] = reference + except (ValueError, SyntaxError, ConfigParser.NoOptionError) as e: + LOG.exception(_LE("Invalid format of swift store config " + "cfg")) + return account_params diff --git a/code/daisy/daisy/common/utils.py b/code/daisy/daisy/common/utils.py new file mode 100755 index 00000000..a44badef --- /dev/null +++ b/code/daisy/daisy/common/utils.py @@ -0,0 +1,804 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2014 SoftLayer Technologies, Inc. +# Copyright 2015 Mirantis, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +System-level utilities and helper functions. +""" + +import errno + +try: + from eventlet import sleep +except ImportError: + from time import sleep +from eventlet.green import socket + +import functools +import os +import platform +import re +import stevedore +import subprocess +import sys +import uuid + +from OpenSSL import crypto +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import encodeutils +from oslo_utils import excutils +from oslo_utils import netutils +from oslo_utils import strutils +import six +from webob import exc + +from daisy.common import exception +from daisy import i18n + +CONF = cfg.CONF + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE + +FEATURE_BLACKLIST = ['content-length', 'content-type', 'x-image-meta-size'] + +# Whitelist of v1 API headers of form x-image-meta-xxx +IMAGE_META_HEADERS = ['x-image-meta-location', 'x-image-meta-size', + 'x-image-meta-is_public', 'x-image-meta-disk_format', + 'x-image-meta-container_format', 'x-image-meta-name', + 'x-image-meta-status', 'x-image-meta-copy_from', + 'x-image-meta-uri', 'x-image-meta-checksum', + 'x-image-meta-created_at', 'x-image-meta-updated_at', + 'x-image-meta-deleted_at', 'x-image-meta-min_ram', + 'x-image-meta-min_disk', 'x-image-meta-owner', + 'x-image-meta-store', 'x-image-meta-id', + 'x-image-meta-protected', 'x-image-meta-deleted', + 'x-image-meta-virtual_size'] + +DAISY_TEST_SOCKET_FD_STR = 'DAISY_TEST_SOCKET_FD' + + +def chunkreadable(iter, chunk_size=65536): + """ + Wrap a readable iterator with a reader yielding chunks of + a preferred size, otherwise leave iterator unchanged. + + :param iter: an iter which may also be readable + :param chunk_size: maximum size of chunk + """ + return chunkiter(iter, chunk_size) if hasattr(iter, 'read') else iter + + +def chunkiter(fp, chunk_size=65536): + """ + Return an iterator to a file-like obj which yields fixed size chunks + + :param fp: a file-like object + :param chunk_size: maximum size of chunk + """ + while True: + chunk = fp.read(chunk_size) + if chunk: + yield chunk + else: + break + + +def cooperative_iter(iter): + """ + Return an iterator which schedules after each + iteration. This can prevent eventlet thread starvation. + + :param iter: an iterator to wrap + """ + try: + for chunk in iter: + sleep(0) + yield chunk + except Exception as err: + with excutils.save_and_reraise_exception(): + msg = _LE("Error: cooperative_iter exception %s") % err + LOG.error(msg) + + +def cooperative_read(fd): + """ + Wrap a file descriptor's read with a partial function which schedules + after each read. This can prevent eventlet thread starvation. + + :param fd: a file descriptor to wrap + """ + def readfn(*args): + result = fd.read(*args) + sleep(0) + return result + return readfn + + +MAX_COOP_READER_BUFFER_SIZE = 134217728 # 128M seems like a sane buffer limit + + +class CooperativeReader(object): + """ + An eventlet thread friendly class for reading in image data. + + When accessing data either through the iterator or the read method + we perform a sleep to allow a co-operative yield. When there is more than + one image being uploaded/downloaded this prevents eventlet thread + starvation, ie allows all threads to be scheduled periodically rather than + having the same thread be continuously active. + """ + def __init__(self, fd): + """ + :param fd: Underlying image file object + """ + self.fd = fd + self.iterator = None + # NOTE(markwash): if the underlying supports read(), overwrite the + # default iterator-based implementation with cooperative_read which + # is more straightforward + if hasattr(fd, 'read'): + self.read = cooperative_read(fd) + else: + self.iterator = None + self.buffer = '' + self.position = 0 + + def read(self, length=None): + """Return the requested amount of bytes, fetching the next chunk of + the underlying iterator when needed. + + This is replaced with cooperative_read in __init__ if the underlying + fd already supports read(). + """ + if length is None: + if len(self.buffer) - self.position > 0: + # if no length specified but some data exists in buffer, + # return that data and clear the buffer + result = self.buffer[self.position:] + self.buffer = '' + self.position = 0 + return str(result) + else: + # otherwise read the next chunk from the underlying iterator + # and return it as a whole. Reset the buffer, as subsequent + # calls may specify the length + try: + if self.iterator is None: + self.iterator = self.__iter__() + return self.iterator.next() + except StopIteration: + return '' + finally: + self.buffer = '' + self.position = 0 + else: + result = bytearray() + while len(result) < length: + if self.position < len(self.buffer): + to_read = length - len(result) + chunk = self.buffer[self.position:self.position + to_read] + result.extend(chunk) + + # This check is here to prevent potential OOM issues if + # this code is called with unreasonably high values of read + # size. Currently it is only called from the HTTP clients + # of Glance backend stores, which use httplib for data + # streaming, which has readsize hardcoded to 8K, so this + # check should never fire. Regardless it still worths to + # make the check, as the code may be reused somewhere else. + if len(result) >= MAX_COOP_READER_BUFFER_SIZE: + raise exception.LimitExceeded() + self.position += len(chunk) + else: + try: + if self.iterator is None: + self.iterator = self.__iter__() + self.buffer = self.iterator.next() + self.position = 0 + except StopIteration: + self.buffer = '' + self.position = 0 + return str(result) + return str(result) + + def __iter__(self): + return cooperative_iter(self.fd.__iter__()) + + +class LimitingReader(object): + """ + Reader designed to fail when reading image data past the configured + allowable amount. + """ + def __init__(self, data, limit): + """ + :param data: Underlying image data object + :param limit: maximum number of bytes the reader should allow + """ + self.data = data + self.limit = limit + self.bytes_read = 0 + + def __iter__(self): + for chunk in self.data: + self.bytes_read += len(chunk) + if self.bytes_read > self.limit: + raise exception.ImageSizeLimitExceeded() + else: + yield chunk + + def read(self, i): + result = self.data.read(i) + self.bytes_read += len(result) + if self.bytes_read > self.limit: + raise exception.ImageSizeLimitExceeded() + return result + + +def image_meta_to_http_headers(image_meta): + """ + Returns a set of image metadata into a dict + of HTTP headers that can be fed to either a Webob + Request object or an httplib.HTTP(S)Connection object + + :param image_meta: Mapping of image metadata + """ + headers = {} + for k, v in image_meta.items(): + if v is not None: + if k == 'properties': + for pk, pv in v.items(): + if pv is not None: + headers["x-image-meta-property-%s" + % pk.lower()] = six.text_type(pv) + else: + headers["x-image-meta-%s" % k.lower()] = six.text_type(v) + return headers + + +def get_image_meta_from_headers(response): + """ + Processes HTTP headers from a supplied response that + match the x-image-meta and x-image-meta-property and + returns a mapping of image metadata and properties + + :param response: Response to process + """ + result = {} + properties = {} + + if hasattr(response, 'getheaders'): # httplib.HTTPResponse + headers = response.getheaders() + else: # webob.Response + headers = response.headers.items() + + for key, value in headers: + key = str(key.lower()) + if key.startswith('x-image-meta-property-'): + field_name = key[len('x-image-meta-property-'):].replace('-', '_') + properties[field_name] = value or None + elif key.startswith('x-image-meta-'): + field_name = key[len('x-image-meta-'):].replace('-', '_') + if 'x-image-meta-' + field_name not in IMAGE_META_HEADERS: + msg = _("Bad header: %(header_name)s") % {'header_name': key} + raise exc.HTTPBadRequest(msg, content_type="text/plain") + result[field_name] = value or None + result['properties'] = properties + + for key, nullable in [('size', False), ('min_disk', False), + ('min_ram', False), ('virtual_size', True)]: + if key in result: + try: + result[key] = int(result[key]) + except ValueError: + if nullable and result[key] == str(None): + result[key] = None + else: + extra = (_("Cannot convert image %(key)s '%(value)s' " + "to an integer.") + % {'key': key, 'value': result[key]}) + raise exception.InvalidParameterValue(value=result[key], + param=key, + extra_msg=extra) + if result[key] < 0 and result[key] is not None: + extra = (_("Image %(key)s must be >= 0 " + "('%(value)s' specified).") + % {'key': key, 'value': result[key]}) + raise exception.InvalidParameterValue(value=result[key], + param=key, + extra_msg=extra) + + for key in ('is_public', 'deleted', 'protected'): + if key in result: + result[key] = strutils.bool_from_string(result[key]) + return result + +def get_host_meta(response): + result = {} + for key,value in response.json.items(): + result[key] = value + return result + +def get_cluster_meta(response): + result = {} + for key,value in response.json.items(): + result[key] = value + return result + +def get_component_meta(response): + result = {} + for key,value in response.json.items(): + result[key] = value + return result + +def get_service_meta(response): + result = {} + for key,value in response.json.items(): + result[key] = value + return result + +def get_template_meta(response): + result = {} + for key,value in response.json.items(): + result[key] = value + return result + +def get_role_meta(response): + result = {} + for key,value in response.json.items(): + result[key] = value + return result + +def get_config_file_meta(response): + result = {} + for key,value in response.json.items(): + result[key] = value + return result + +def get_config_set_meta(response): + result = {} + for key,value in response.json.items(): + result[key] = value + return result + +def get_config_meta(response): + result = {} + for key,value in response.json.items(): + result[key] = value + return result + +def get_network_meta(response): + result = {} + for key,value in response.json.items(): + result[key] = value + return result + +def get_dict_meta(response): + result = {} + for key,value in response.json.items(): + result[key] = value + return result + +def create_mashup_dict(image_meta): + """ + Returns a dictionary-like mashup of the image core properties + and the image custom properties from given image metadata. + + :param image_meta: metadata of image with core and custom properties + """ + + def get_items(): + for key, value in six.iteritems(image_meta): + if isinstance(value, dict): + for subkey, subvalue in six.iteritems( + create_mashup_dict(value)): + if subkey not in image_meta: + yield subkey, subvalue + else: + yield key, value + + return dict(get_items()) + + +def safe_mkdirs(path): + try: + os.makedirs(path) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + +def safe_remove(path): + try: + os.remove(path) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + +class PrettyTable(object): + """Creates an ASCII art table for use in bin/glance + + Example: + + ID Name Size Hits + --- ----------------- ------------ ----- + 122 image 22 0 + """ + def __init__(self): + self.columns = [] + + def add_column(self, width, label="", just='l'): + """Add a column to the table + + :param width: number of characters wide the column should be + :param label: column heading + :param just: justification for the column, 'l' for left, + 'r' for right + """ + self.columns.append((width, label, just)) + + def make_header(self): + label_parts = [] + break_parts = [] + for width, label, _ in self.columns: + # NOTE(sirp): headers are always left justified + label_part = self._clip_and_justify(label, width, 'l') + label_parts.append(label_part) + + break_part = '-' * width + break_parts.append(break_part) + + label_line = ' '.join(label_parts) + break_line = ' '.join(break_parts) + return '\n'.join([label_line, break_line]) + + def make_row(self, *args): + row = args + row_parts = [] + for data, (width, _, just) in zip(row, self.columns): + row_part = self._clip_and_justify(data, width, just) + row_parts.append(row_part) + + row_line = ' '.join(row_parts) + return row_line + + @staticmethod + def _clip_and_justify(data, width, just): + # clip field to column width + clipped_data = str(data)[:width] + + if just == 'r': + # right justify + justified = clipped_data.rjust(width) + else: + # left justify + justified = clipped_data.ljust(width) + + return justified + + +def get_terminal_size(): + + def _get_terminal_size_posix(): + import fcntl + import struct + import termios + + height_width = None + + try: + height_width = struct.unpack('hh', fcntl.ioctl(sys.stderr.fileno(), + termios.TIOCGWINSZ, + struct.pack('HH', 0, 0))) + except Exception: + pass + + if not height_width: + try: + p = subprocess.Popen(['stty', 'size'], + shell=False, + stdout=subprocess.PIPE, + stderr=open(os.devnull, 'w')) + result = p.communicate() + if p.returncode == 0: + return tuple(int(x) for x in result[0].split()) + except Exception: + pass + + return height_width + + def _get_terminal_size_win32(): + try: + from ctypes import create_string_buffer + from ctypes import windll + handle = windll.kernel32.GetStdHandle(-12) + csbi = create_string_buffer(22) + res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi) + except Exception: + return None + if res: + import struct + unpack_tmp = struct.unpack("hhhhHhhhhhh", csbi.raw) + (bufx, bufy, curx, cury, wattr, + left, top, right, bottom, maxx, maxy) = unpack_tmp + height = bottom - top + 1 + width = right - left + 1 + return (height, width) + else: + return None + + def _get_terminal_size_unknownOS(): + raise NotImplementedError + + func = {'posix': _get_terminal_size_posix, + 'win32': _get_terminal_size_win32} + + height_width = func.get(platform.os.name, _get_terminal_size_unknownOS)() + + if height_width is None: + raise exception.Invalid() + + for i in height_width: + if not isinstance(i, int) or i <= 0: + raise exception.Invalid() + + return height_width[0], height_width[1] + + +def mutating(func): + """Decorator to enforce read-only logic""" + @functools.wraps(func) + def wrapped(self, req, *args, **kwargs): + if req.context.read_only: + msg = "Read-only access" + LOG.debug(msg) + raise exc.HTTPForbidden(msg, request=req, + content_type="text/plain") + return func(self, req, *args, **kwargs) + return wrapped + + +def setup_remote_pydev_debug(host, port): + error_msg = _LE('Error setting up the debug environment. Verify that the' + ' option pydev_worker_debug_host is pointing to a valid ' + 'hostname or IP on which a pydev server is listening on' + ' the port indicated by pydev_worker_debug_port.') + + try: + try: + from pydev import pydevd + except ImportError: + import pydevd + + pydevd.settrace(host, + port=port, + stdoutToServer=True, + stderrToServer=True) + return True + except Exception: + with excutils.save_and_reraise_exception(): + LOG.exception(error_msg) + + +def validate_key_cert(key_file, cert_file): + try: + error_key_name = "private key" + error_filename = key_file + with open(key_file, 'r') as keyfile: + key_str = keyfile.read() + key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_str) + + error_key_name = "certificate" + error_filename = cert_file + with open(cert_file, 'r') as certfile: + cert_str = certfile.read() + cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str) + except IOError as ioe: + raise RuntimeError(_("There is a problem with your %(error_key_name)s " + "%(error_filename)s. Please verify it." + " Error: %(ioe)s") % + {'error_key_name': error_key_name, + 'error_filename': error_filename, + 'ioe': ioe}) + except crypto.Error as ce: + raise RuntimeError(_("There is a problem with your %(error_key_name)s " + "%(error_filename)s. Please verify it. OpenSSL" + " error: %(ce)s") % + {'error_key_name': error_key_name, + 'error_filename': error_filename, + 'ce': ce}) + + try: + data = str(uuid.uuid4()) + digest = CONF.digest_algorithm + if digest == 'sha1': + LOG.warn('The FIPS (FEDERAL INFORMATION PROCESSING STANDARDS)' + ' state that the SHA-1 is not suitable for' + ' general-purpose digital signature applications (as' + ' specified in FIPS 186-3) that require 112 bits of' + ' security. The default value is sha1 in Kilo for a' + ' smooth upgrade process, and it will be updated' + ' with sha256 in next release(L).') + out = crypto.sign(key, data, digest) + crypto.verify(cert, out, data, digest) + except crypto.Error as ce: + raise RuntimeError(_("There is a problem with your key pair. " + "Please verify that cert %(cert_file)s and " + "key %(key_file)s belong together. OpenSSL " + "error %(ce)s") % {'cert_file': cert_file, + 'key_file': key_file, + 'ce': ce}) + + +def get_test_suite_socket(): + global DAISY_TEST_SOCKET_FD_STR + if DAISY_TEST_SOCKET_FD_STR in os.environ: + fd = int(os.environ[DAISY_TEST_SOCKET_FD_STR]) + sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) + sock = socket.SocketType(_sock=sock) + sock.listen(CONF.backlog) + del os.environ[DAISY_TEST_SOCKET_FD_STR] + os.close(fd) + return sock + return None + + +def is_uuid_like(val): + """Returns validation of a value as a UUID. + + For our purposes, a UUID is a canonical form string: + aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa + """ + try: + return str(uuid.UUID(val)) == val + except (TypeError, ValueError, AttributeError): + return False + + +def is_valid_hostname(hostname): + """Verify whether a hostname (not an FQDN) is valid.""" + return re.match('^[a-zA-Z0-9-]+$', hostname) is not None + + +def is_valid_fqdn(fqdn): + """Verify whether a host is a valid FQDN.""" + return re.match('^[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$', fqdn) is not None + + +def parse_valid_host_port(host_port): + """ + Given a "host:port" string, attempts to parse it as intelligently as + possible to determine if it is valid. This includes IPv6 [host]:port form, + IPv4 ip:port form, and hostname:port or fqdn:port form. + + Invalid inputs will raise a ValueError, while valid inputs will return + a (host, port) tuple where the port will always be of type int. + """ + + try: + try: + host, port = netutils.parse_host_port(host_port) + except Exception: + raise ValueError(_('Host and port "%s" is not valid.') % host_port) + + if not netutils.is_valid_port(port): + raise ValueError(_('Port "%s" is not valid.') % port) + + # First check for valid IPv6 and IPv4 addresses, then a generic + # hostname. Failing those, if the host includes a period, then this + # should pass a very generic FQDN check. The FQDN check for letters at + # the tail end will weed out any hilariously absurd IPv4 addresses. + + if not (netutils.is_valid_ipv6(host) or netutils.is_valid_ipv4(host) or + is_valid_hostname(host) or is_valid_fqdn(host)): + raise ValueError(_('Host "%s" is not valid.') % host) + + except Exception as ex: + raise ValueError(_('%s ' + 'Please specify a host:port pair, where host is an ' + 'IPv4 address, IPv6 address, hostname, or FQDN. If ' + 'using an IPv6 address, enclose it in brackets ' + 'separately from the port (i.e., ' + '"[fe80::a:b:c]:9876").') % ex) + + return (host, int(port)) + + +def exception_to_str(exc): + try: + error = six.text_type(exc) + except UnicodeError: + try: + error = str(exc) + except UnicodeError: + error = ("Caught '%(exception)s' exception." % + {"exception": exc.__class__.__name__}) + return encodeutils.safe_encode(error, errors='ignore') + + +try: + REGEX_4BYTE_UNICODE = re.compile(u'[\U00010000-\U0010ffff]') +except re.error: + # UCS-2 build case + REGEX_4BYTE_UNICODE = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]') + + +def no_4byte_params(f): + """ + Checks that no 4 byte unicode characters are allowed + in dicts' keys/values and string's parameters + """ + def wrapper(*args, **kwargs): + + def _is_match(some_str): + return (isinstance(some_str, unicode) and + REGEX_4BYTE_UNICODE.findall(some_str) != []) + + def _check_dict(data_dict): + # a dict of dicts has to be checked recursively + for key, value in data_dict.iteritems(): + if isinstance(value, dict): + _check_dict(value) + else: + if _is_match(key): + msg = _("Property names can't contain 4 byte unicode.") + raise exception.Invalid(msg) + if _is_match(value): + msg = (_("%s can't contain 4 byte unicode characters.") + % key.title()) + raise exception.Invalid(msg) + + for data_dict in [arg for arg in args if isinstance(arg, dict)]: + _check_dict(data_dict) + # now check args for str values + for arg in args: + if _is_match(arg): + msg = _("Param values can't contain 4 byte unicode.") + raise exception.Invalid(msg) + # check kwargs as well, as params are passed as kwargs via + # registry calls + _check_dict(kwargs) + return f(*args, **kwargs) + return wrapper + + +def stash_conf_values(): + """ + Make a copy of some of the current global CONF's settings. + Allows determining if any of these values have changed + when the config is reloaded. + """ + conf = {} + conf['bind_host'] = CONF.bind_host + conf['bind_port'] = CONF.bind_port + conf['tcp_keepidle'] = CONF.cert_file + conf['backlog'] = CONF.backlog + conf['key_file'] = CONF.key_file + conf['cert_file'] = CONF.cert_file + + return conf + + +def get_search_plugins(): + namespace = 'daisy.search.index_backend' + ext_manager = stevedore.extension.ExtensionManager( + namespace, invoke_on_load=True) + return ext_manager.extensions diff --git a/code/daisy/daisy/common/wsgi.py b/code/daisy/daisy/common/wsgi.py new file mode 100755 index 00000000..6aed981e --- /dev/null +++ b/code/daisy/daisy/common/wsgi.py @@ -0,0 +1,916 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010 OpenStack Foundation +# Copyright 2014 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Utility methods for working with WSGI servers +""" +from __future__ import print_function + +import errno +import functools +import os +import signal +import sys +import time + +import eventlet +from eventlet.green import socket +from eventlet.green import ssl +import eventlet.greenio +import eventlet.wsgi +import glance_store +from oslo.serialization import jsonutils +from oslo_concurrency import processutils +from oslo_config import cfg +from oslo_log import log as logging +from oslo_log import loggers +import routes +import routes.middleware +import six +import webob.dec +import webob.exc +from webob import multidict + +from daisy.common import exception +from daisy.common import utils +from daisy import i18n + + +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +bind_opts = [ + cfg.StrOpt('bind_host', default='0.0.0.0', + help=_('Address to bind the server. Useful when ' + 'selecting a particular network interface.')), + cfg.IntOpt('bind_port', + help=_('The port on which the server will listen.')), +] + +socket_opts = [ + cfg.IntOpt('backlog', default=4096, + help=_('The backlog value that will be used when creating the ' + 'TCP listener socket.')), + cfg.IntOpt('tcp_keepidle', default=600, + help=_('The value for the socket option TCP_KEEPIDLE. This is ' + 'the time in seconds that the connection must be idle ' + 'before TCP starts sending keepalive probes.')), + cfg.StrOpt('ca_file', help=_('CA certificate file to use to verify ' + 'connecting clients.')), + cfg.StrOpt('cert_file', help=_('Certificate file to use when starting API ' + 'server securely.')), + cfg.StrOpt('key_file', help=_('Private key file to use when starting API ' + 'server securely.')), +] + +eventlet_opts = [ + cfg.IntOpt('workers', default=processutils.get_worker_count(), + help=_('The number of child process workers that will be ' + 'created to service requests. The default will be ' + 'equal to the number of CPUs available.')), + cfg.IntOpt('max_header_line', default=16384, + help=_('Maximum line size of message headers to be accepted. ' + 'max_header_line may need to be increased when using ' + 'large tokens (typically those generated by the ' + 'Keystone v3 API with big service catalogs')), + cfg.BoolOpt('http_keepalive', default=True, + help=_('If False, server will return the header ' + '"Connection: close", ' + 'If True, server will return "Connection: Keep-Alive" ' + 'in its responses. In order to close the client socket ' + 'connection explicitly after the response is sent and ' + 'read successfully by the client, you simply have to ' + 'set this option to False when you create a wsgi ' + 'server.')), +] + +profiler_opts = [ + cfg.BoolOpt("enabled", default=False, + help=_('If False fully disable profiling feature.')), + cfg.BoolOpt("trace_sqlalchemy", default=False, + help=_("If False doesn't trace SQL requests.")) +] + + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF +CONF.register_opts(bind_opts) +CONF.register_opts(socket_opts) +CONF.register_opts(eventlet_opts) +CONF.register_opts(profiler_opts, group="profiler") + +ASYNC_EVENTLET_THREAD_POOL_LIST = [] + + +def get_bind_addr(default_port=None): + """Return the host and port to bind to.""" + return (CONF.bind_host, CONF.bind_port or default_port) + + +def ssl_wrap_socket(sock): + """ + Wrap an existing socket in SSL + + :param sock: non-SSL socket to wrap + + :returns: An SSL wrapped socket + """ + utils.validate_key_cert(CONF.key_file, CONF.cert_file) + + ssl_kwargs = { + 'server_side': True, + 'certfile': CONF.cert_file, + 'keyfile': CONF.key_file, + 'cert_reqs': ssl.CERT_NONE, + } + + if CONF.ca_file: + ssl_kwargs['ca_certs'] = CONF.ca_file + ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED + + return ssl.wrap_socket(sock, **ssl_kwargs) + + +def get_socket(default_port): + """ + Bind socket to bind ip:port in conf + + note: Mostly comes from Swift with a few small changes... + + :param default_port: port to bind to if none is specified in conf + + :returns : a socket object as returned from socket.listen or + ssl.wrap_socket if conf specifies cert_file + """ + bind_addr = get_bind_addr(default_port) + + # TODO(jaypipes): eventlet's greened socket module does not actually + # support IPv6 in getaddrinfo(). We need to get around this in the + # future or monitor upstream for a fix + address_family = [ + addr[0] for addr in socket.getaddrinfo(bind_addr[0], + bind_addr[1], + socket.AF_UNSPEC, + socket.SOCK_STREAM) + if addr[0] in (socket.AF_INET, socket.AF_INET6) + ][0] + + use_ssl = CONF.key_file or CONF.cert_file + if use_ssl and (not CONF.key_file or not CONF.cert_file): + raise RuntimeError(_("When running server in SSL mode, you must " + "specify both a cert_file and key_file " + "option value in your configuration file")) + + sock = utils.get_test_suite_socket() + retry_until = time.time() + 30 + + while not sock and time.time() < retry_until: + try: + sock = eventlet.listen(bind_addr, + backlog=CONF.backlog, + family=address_family) + except socket.error as err: + if err.args[0] != errno.EADDRINUSE: + raise + eventlet.sleep(0.1) + if not sock: + raise RuntimeError(_("Could not bind to %(host)s:%(port)s after" + " trying for 30 seconds") % + {'host': bind_addr[0], + 'port': bind_addr[1]}) + + return sock + + +def set_eventlet_hub(): + try: + eventlet.hubs.use_hub('poll') + except Exception: + try: + eventlet.hubs.use_hub('selects') + except Exception: + msg = _("eventlet 'poll' nor 'selects' hubs are available " + "on this platform") + raise exception.WorkerCreationFailure( + reason=msg) + + +def initialize_glance_store(): + """Initialize glance store.""" + glance_store.register_opts(CONF) + glance_store.create_stores(CONF) + glance_store.verify_default_store() + + +def get_asynchronous_eventlet_pool(size=1000): + """Return eventlet pool to caller. + + Also store pools created in global list, to wait on + it after getting signal for graceful shutdown. + + :param size: eventlet pool size + :returns: eventlet pool + """ + global ASYNC_EVENTLET_THREAD_POOL_LIST + + pool = eventlet.GreenPool(size=size) + # Add pool to global ASYNC_EVENTLET_THREAD_POOL_LIST + ASYNC_EVENTLET_THREAD_POOL_LIST.append(pool) + + return pool + + +class Server(object): + """Server class to manage multiple WSGI sockets and applications. + + This class requires initialize_glance_store set to True if + glance store needs to be initialized. + """ + def __init__(self, threads=1000, initialize_glance_store=False): + os.umask(0o27) # ensure files are created with the correct privileges + self._logger = logging.getLogger("eventlet.wsgi.server") + self._wsgi_logger = loggers.WritableLogger(self._logger) + self.threads = threads + self.children = set() + self.stale_children = set() + self.running = True + # NOTE(abhishek): Allows us to only re-initialize glance_store when + # the API's configuration reloads. + self.initialize_glance_store = initialize_glance_store + self.pgid = os.getpid() + try: + # NOTE(flaper87): Make sure this process + # runs in its own process group. + os.setpgid(self.pgid, self.pgid) + except OSError: + # NOTE(flaper87): When running glance-control, + # (glance's functional tests, for example) + # setpgid fails with EPERM as glance-control + # creates a fresh session, of which the newly + # launched service becomes the leader (session + # leaders may not change process groups) + # + # Running glance-(api|registry) is safe and + # shouldn't raise any error here. + self.pgid = 0 + + def hup(self, *args): + """ + Reloads configuration files with zero down time + """ + signal.signal(signal.SIGHUP, signal.SIG_IGN) + raise exception.SIGHUPInterrupt + + def kill_children(self, *args): + """Kills the entire process group.""" + signal.signal(signal.SIGTERM, signal.SIG_IGN) + signal.signal(signal.SIGINT, signal.SIG_IGN) + self.running = False + os.killpg(self.pgid, signal.SIGTERM) + + def start(self, application, default_port): + """ + Run a WSGI server with the given application. + + :param application: The application to be run in the WSGI server + :param default_port: Port to bind to if none is specified in conf + """ + self.application = application + self.default_port = default_port + self.configure() + self.start_wsgi() + + def start_wsgi(self): + if CONF.workers == 0: + # Useful for profiling, test, debug etc. + self.pool = self.create_pool() + self.pool.spawn_n(self._single_run, self.application, self.sock) + return + else: + LOG.info(_LI("Starting %d workers") % CONF.workers) + signal.signal(signal.SIGTERM, self.kill_children) + signal.signal(signal.SIGINT, self.kill_children) + signal.signal(signal.SIGHUP, self.hup) + while len(self.children) < CONF.workers: + self.run_child() + + def create_pool(self): + return eventlet.GreenPool(size=self.threads) + + def _remove_children(self, pid): + if pid in self.children: + self.children.remove(pid) + LOG.info(_LI('Removed dead child %s') % pid) + elif pid in self.stale_children: + self.stale_children.remove(pid) + LOG.info(_LI('Removed stale child %s') % pid) + else: + LOG.warn(_LW('Unrecognised child %s') % pid) + + def _verify_and_respawn_children(self, pid, status): + if len(self.stale_children) == 0: + LOG.debug('No stale children') + if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0: + LOG.error(_LE('Not respawning child %d, cannot ' + 'recover from termination') % pid) + if not self.children and not self.stale_children: + LOG.info( + _LI('All workers have terminated. Exiting')) + self.running = False + else: + if len(self.children) < CONF.workers: + self.run_child() + + def wait_on_children(self): + while self.running: + try: + pid, status = os.wait() + if os.WIFEXITED(status) or os.WIFSIGNALED(status): + self._remove_children(pid) + self._verify_and_respawn_children(pid, status) + except OSError as err: + if err.errno not in (errno.EINTR, errno.ECHILD): + raise + except KeyboardInterrupt: + LOG.info(_LI('Caught keyboard interrupt. Exiting.')) + break + except exception.SIGHUPInterrupt: + self.reload() + continue + eventlet.greenio.shutdown_safe(self.sock) + self.sock.close() + LOG.debug('Exited') + + def configure(self, old_conf=None, has_changed=None): + """ + Apply configuration settings + + :param old_conf: Cached old configuration settings (if any) + :param has changed: callable to determine if a parameter has changed + """ + eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line + self.configure_socket(old_conf, has_changed) + if self.initialize_glance_store: + initialize_glance_store() + + def reload(self): + """ + Reload and re-apply configuration settings + + Existing child processes are sent a SIGHUP signal + and will exit after completing existing requests. + New child processes, which will have the updated + configuration, are spawned. This allows preventing + interruption to the service. + """ + def _has_changed(old, new, param): + old = old.get(param) + new = getattr(new, param) + return (new != old) + + old_conf = utils.stash_conf_values() + has_changed = functools.partial(_has_changed, old_conf, CONF) + CONF.reload_config_files() + os.killpg(self.pgid, signal.SIGHUP) + self.stale_children = self.children + self.children = set() + + # Ensure any logging config changes are picked up + logging.setup(CONF, 'glance') + + self.configure(old_conf, has_changed) + self.start_wsgi() + + def wait(self): + """Wait until all servers have completed running.""" + try: + if self.children: + self.wait_on_children() + else: + self.pool.waitall() + except KeyboardInterrupt: + pass + + def run_child(self): + def child_hup(*args): + """Shuts down child processes, existing requests are handled.""" + signal.signal(signal.SIGHUP, signal.SIG_IGN) + eventlet.wsgi.is_accepting = False + self.sock.close() + + pid = os.fork() + if pid == 0: + signal.signal(signal.SIGHUP, child_hup) + signal.signal(signal.SIGTERM, signal.SIG_DFL) + # ignore the interrupt signal to avoid a race whereby + # a child worker receives the signal before the parent + # and is respawned unnecessarily as a result + signal.signal(signal.SIGINT, signal.SIG_IGN) + # The child has no need to stash the unwrapped + # socket, and the reference prevents a clean + # exit on sighup + self._sock = None + self.run_server() + LOG.info(_LI('Child %d exiting normally') % os.getpid()) + # self.pool.waitall() is now called in wsgi's server so + # it's safe to exit here + sys.exit(0) + else: + LOG.info(_LI('Started child %s') % pid) + self.children.add(pid) + + def run_server(self): + """Run a WSGI server.""" + if cfg.CONF.pydev_worker_debug_host: + utils.setup_remote_pydev_debug(cfg.CONF.pydev_worker_debug_host, + cfg.CONF.pydev_worker_debug_port) + + eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0" + self.pool = self.create_pool() + try: + eventlet.wsgi.server(self.sock, + self.application, + log=self._wsgi_logger, + custom_pool=self.pool, + debug=False, + keepalive=CONF.http_keepalive) + except socket.error as err: + if err[0] != errno.EINVAL: + raise + + # waiting on async pools + if ASYNC_EVENTLET_THREAD_POOL_LIST: + for pool in ASYNC_EVENTLET_THREAD_POOL_LIST: + pool.waitall() + + def _single_run(self, application, sock): + """Start a WSGI server in a new green thread.""" + LOG.info(_LI("Starting single process server")) + eventlet.wsgi.server(sock, application, custom_pool=self.pool, + log=self._wsgi_logger, + debug=False, + keepalive=CONF.http_keepalive) + + def configure_socket(self, old_conf=None, has_changed=None): + """ + Ensure a socket exists and is appropriately configured. + + This function is called on start up, and can also be + called in the event of a configuration reload. + + When called for the first time a new socket is created. + If reloading and either bind_host or bind port have been + changed the existing socket must be closed and a new + socket opened (laws of physics). + + In all other cases (bind_host/bind_port have not changed) + the existing socket is reused. + + :param old_conf: Cached old configuration settings (if any) + :param has changed: callable to determine if a parameter has changed + """ + # Do we need a fresh socket? + new_sock = (old_conf is None or ( + has_changed('bind_host') or + has_changed('bind_port'))) + # Will we be using https? + use_ssl = not (not CONF.cert_file or not CONF.key_file) + # Were we using https before? + old_use_ssl = (old_conf is not None and not ( + not old_conf.get('key_file') or + not old_conf.get('cert_file'))) + # Do we now need to perform an SSL wrap on the socket? + wrap_sock = use_ssl is True and (old_use_ssl is False or new_sock) + # Do we now need to perform an SSL unwrap on the socket? + unwrap_sock = use_ssl is False and old_use_ssl is True + + if new_sock: + self._sock = None + if old_conf is not None: + self.sock.close() + _sock = get_socket(self.default_port) + _sock.setsockopt(socket.SOL_SOCKET, + socket.SO_REUSEADDR, 1) + # sockets can hang around forever without keepalive + _sock.setsockopt(socket.SOL_SOCKET, + socket.SO_KEEPALIVE, 1) + self._sock = _sock + + if wrap_sock: + self.sock = ssl_wrap_socket(self._sock) + + if unwrap_sock: + self.sock = self._sock + + if new_sock and not use_ssl: + self.sock = self._sock + + # Pick up newly deployed certs + if old_conf is not None and use_ssl is True and old_use_ssl is True: + if has_changed('cert_file') or has_changed('key_file'): + utils.validate_key_cert(CONF.key_file, CONF.cert_file) + if has_changed('cert_file'): + self.sock.certfile = CONF.cert_file + if has_changed('key_file'): + self.sock.keyfile = CONF.key_file + + if new_sock or (old_conf is not None and has_changed('tcp_keepidle')): + # This option isn't available in the OS X version of eventlet + if hasattr(socket, 'TCP_KEEPIDLE'): + self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, + CONF.tcp_keepidle) + + if old_conf is not None and has_changed('backlog'): + self.sock.listen(CONF.backlog) + + +class Middleware(object): + """ + Base WSGI middleware wrapper. These classes require an application to be + initialized that will be called next. By default the middleware will + simply call its wrapped app, or you can override __call__ to customize its + behavior. + """ + + def __init__(self, application): + self.application = application + + @classmethod + def factory(cls, global_conf, **local_conf): + def filter(app): + return cls(app) + return filter + + def process_request(self, req): + """ + Called on each request. + + If this returns None, the next application down the stack will be + executed. If it returns a response then that response will be returned + and execution will stop here. + + """ + return None + + def process_response(self, response): + """Do whatever you'd like to the response.""" + return response + + @webob.dec.wsgify + def __call__(self, req): + response = self.process_request(req) + if response: + return response + response = req.get_response(self.application) + response.request = req + try: + return self.process_response(response) + except webob.exc.HTTPException as e: + return e + + +class Debug(Middleware): + """ + Helper class that can be inserted into any WSGI application chain + to get information about the request and response. + """ + + @webob.dec.wsgify + def __call__(self, req): + print(("*" * 40) + " REQUEST ENVIRON") + for key, value in req.environ.items(): + print(key, "=", value) + print('') + resp = req.get_response(self.application) + + print(("*" * 40) + " RESPONSE HEADERS") + for (key, value) in six.iteritems(resp.headers): + print(key, "=", value) + print('') + + resp.app_iter = self.print_generator(resp.app_iter) + + return resp + + @staticmethod + def print_generator(app_iter): + """ + Iterator that prints the contents of a wrapper string iterator + when iterated. + """ + print(("*" * 40) + " BODY") + for part in app_iter: + sys.stdout.write(part) + sys.stdout.flush() + yield part + print() + + +class APIMapper(routes.Mapper): + """ + Handle route matching when url is '' because routes.Mapper returns + an error in this case. + """ + + def routematch(self, url=None, environ=None): + if url is "": + result = self._match("", environ) + return result[0], result[1] + return routes.Mapper.routematch(self, url, environ) + + +class RejectMethodController(object): + def reject(self, req, allowed_methods, *args, **kwargs): + LOG.debug("The method %s is not allowed for this resource" % + req.environ['REQUEST_METHOD']) + raise webob.exc.HTTPMethodNotAllowed( + headers=[('Allow', allowed_methods)]) + + +class Router(object): + """ + WSGI middleware that maps incoming requests to WSGI apps. + """ + + def __init__(self, mapper): + """ + Create a router for the given routes.Mapper. + + Each route in `mapper` must specify a 'controller', which is a + WSGI app to call. You'll probably want to specify an 'action' as + well and have your controller be a wsgi.Controller, who will route + the request to the action method. + + Examples: + mapper = routes.Mapper() + sc = ServerController() + + # Explicit mapping of one route to a controller+action + mapper.connect(None, "/svrlist", controller=sc, action="list") + + # Actions are all implicitly defined + mapper.resource("server", "servers", controller=sc) + + # Pointing to an arbitrary WSGI app. You can specify the + # {path_info:.*} parameter so the target app can be handed just that + # section of the URL. + mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp()) + """ + mapper.redirect("", "/") + self.map = mapper + self._router = routes.middleware.RoutesMiddleware(self._dispatch, + self.map) + + @classmethod + def factory(cls, global_conf, **local_conf): + return cls(APIMapper()) + + @webob.dec.wsgify + def __call__(self, req): + """ + Route the incoming request to a controller based on self.map. + If no match, return either a 404(Not Found) or 501(Not Implemented). + """ + return self._router + + @staticmethod + @webob.dec.wsgify + def _dispatch(req): + """ + Called by self._router after matching the incoming request to a route + and putting the information into req.environ. Either returns 404, + 501, or the routed WSGI app's response. + """ + match = req.environ['wsgiorg.routing_args'][1] + if not match: + implemented_http_methods = ['GET', 'HEAD', 'POST', 'PUT', + 'DELETE', 'PATCH'] + if req.environ['REQUEST_METHOD'] not in implemented_http_methods: + return webob.exc.HTTPNotImplemented() + else: + return webob.exc.HTTPNotFound() + app = match['controller'] + return app + + +class Request(webob.Request): + """Add some OpenStack API-specific logic to the base webob.Request.""" + + def best_match_content_type(self): + """Determine the requested response content-type.""" + supported = ('application/json',) + bm = self.accept.best_match(supported) + return bm or 'application/json' + + def get_content_type(self, allowed_content_types): + """Determine content type of the request body.""" + if "Content-Type" not in self.headers: + raise exception.InvalidContentType(content_type=None) + + content_type = self.content_type + + if content_type not in allowed_content_types: + raise exception.InvalidContentType(content_type=content_type) + else: + return content_type + + def best_match_language(self): + """Determines best available locale from the Accept-Language header. + + :returns: the best language match or None if the 'Accept-Language' + header was not available in the request. + """ + if not self.accept_language: + return None + langs = i18n.get_available_languages('glance') + return self.accept_language.best_match(langs) + + def get_content_range(self): + """Return the `Range` in a request.""" + range_str = self.headers.get('Content-Range') + if range_str is not None: + range_ = webob.byterange.ContentRange.parse(range_str) + if range_ is None: + msg = _('Malformed Content-Range header: %s') % range_str + raise webob.exc.HTTPBadRequest(explanation=msg) + return range_ + + +class JSONRequestDeserializer(object): + valid_transfer_encoding = frozenset(['chunked', 'compress', 'deflate', + 'gzip', 'identity']) + + def has_body(self, request): + """ + Returns whether a Webob.Request object will possess an entity body. + + :param request: Webob.Request object + """ + request_encoding = request.headers.get('transfer-encoding', '').lower() + is_valid_encoding = request_encoding in self.valid_transfer_encoding + if is_valid_encoding and request.is_body_readable: + return True + elif request.content_length > 0: + return True + + return False + + @staticmethod + def _sanitizer(obj): + """Sanitizer method that will be passed to jsonutils.loads.""" + return obj + + def from_json(self, datastring): + try: + return jsonutils.loads(datastring, object_hook=self._sanitizer) + except ValueError: + msg = _('Malformed JSON in request body.') + raise webob.exc.HTTPBadRequest(explanation=msg) + + def default(self, request): + if self.has_body(request): + return {'body': self.from_json(request.body)} + else: + return {} + + +class JSONResponseSerializer(object): + + def _sanitizer(self, obj): + """Sanitizer method that will be passed to jsonutils.dumps.""" + if hasattr(obj, "to_dict"): + return obj.to_dict() + if isinstance(obj, multidict.MultiDict): + return obj.mixed() + return jsonutils.to_primitive(obj) + + def to_json(self, data): + return jsonutils.dumps(data, default=self._sanitizer) + + def default(self, response, result): + response.content_type = 'application/json' + response.body = self.to_json(result) + + +def translate_exception(req, e): + """Translates all translatable elements of the given exception.""" + + # The RequestClass attribute in the webob.dec.wsgify decorator + # does not guarantee that the request object will be a particular + # type; this check is therefore necessary. + if not hasattr(req, "best_match_language"): + return e + + locale = req.best_match_language() + + if isinstance(e, webob.exc.HTTPError): + e.explanation = i18n.translate(e.explanation, locale) + e.detail = i18n.translate(e.detail, locale) + if getattr(e, 'body_template', None): + e.body_template = i18n.translate(e.body_template, locale) + return e + + +class Resource(object): + """ + WSGI app that handles (de)serialization and controller dispatch. + + Reads routing information supplied by RoutesMiddleware and calls + the requested action method upon its deserializer, controller, + and serializer. Those three objects may implement any of the basic + controller action methods (create, update, show, index, delete) + along with any that may be specified in the api router. A 'default' + method may also be implemented to be used in place of any + non-implemented actions. Deserializer methods must accept a request + argument and return a dictionary. Controller methods must accept a + request argument. Additionally, they must also accept keyword + arguments that represent the keys returned by the Deserializer. They + may raise a webob.exc exception or return a dict, which will be + serialized by requested content type. + """ + + def __init__(self, controller, deserializer=None, serializer=None): + """ + :param controller: object that implement methods created by routes lib + :param deserializer: object that supports webob request deserialization + through controller-like actions + :param serializer: object that supports webob response serialization + through controller-like actions + """ + self.controller = controller + self.serializer = serializer or JSONResponseSerializer() + self.deserializer = deserializer or JSONRequestDeserializer() + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """WSGI method that controls (de)serialization and method dispatch.""" + action_args = self.get_action_args(request.environ) + action = action_args.pop('action', None) + + try: + deserialized_request = self.dispatch(self.deserializer, + action, request) + action_args.update(deserialized_request) + action_result = self.dispatch(self.controller, action, + request, **action_args) + except webob.exc.WSGIHTTPException as e: + exc_info = sys.exc_info() + raise translate_exception(request, e), None, exc_info[2] + + try: + response = webob.Response(request=request) + self.dispatch(self.serializer, action, response, action_result) + return response + except webob.exc.WSGIHTTPException as e: + return translate_exception(request, e) + except webob.exc.HTTPException as e: + return e + # return unserializable result (typically a webob exc) + except Exception: + return action_result + + def dispatch(self, obj, action, *args, **kwargs): + """Find action-specific method on self and call it.""" + try: + method = getattr(obj, action) + except AttributeError: + method = getattr(obj, 'default') + + return method(*args, **kwargs) + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + try: + args = request_environment['wsgiorg.routing_args'][1].copy() + except Exception: + return {} + + try: + del args['controller'] + except KeyError: + pass + + try: + del args['format'] + except KeyError: + pass + + return args diff --git a/code/daisy/daisy/common/wsme_utils.py b/code/daisy/daisy/common/wsme_utils.py new file mode 100755 index 00000000..82f4cbd0 --- /dev/null +++ b/code/daisy/daisy/common/wsme_utils.py @@ -0,0 +1,70 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from datetime import datetime + +from oslo_utils import timeutils +from wsme import types as wsme_types + + +class WSMEModelTransformer(object): + + def to_dict(self): + # Return the wsme_attributes names:values as a dict + my_dict = {} + for attribute in self._wsme_attributes: + value = getattr(self, attribute.name) + if value is not wsme_types.Unset: + my_dict.update({attribute.name: value}) + return my_dict + + @classmethod + def to_wsme_model(model, db_entity, self_link=None, schema=None): + # Return the wsme_attributes names:values as a dict + names = [] + for attribute in model._wsme_attributes: + names.append(attribute.name) + + values = {} + for name in names: + value = getattr(db_entity, name, None) + if value is not None: + if type(value) == datetime: + iso_datetime_value = timeutils.isotime(value) + values.update({name: iso_datetime_value}) + else: + values.update({name: value}) + + if schema: + values['schema'] = schema + + model_object = model(**values) + + # 'self' kwarg is used in wsme.types.Base.__init__(self, ..) and + # conflicts during initialization. self_link is a proxy field to self. + if self_link: + model_object.self = self_link + + return model_object + + @classmethod + def get_mandatory_attrs(cls): + return [attr.name for attr in cls._wsme_attributes if attr.mandatory] + + +def _get_value(obj): + if obj is not wsme_types.Unset: + return obj + else: + return None diff --git a/code/daisy/daisy/context.py b/code/daisy/daisy/context.py new file mode 100755 index 00000000..eb57f3e3 --- /dev/null +++ b/code/daisy/daisy/context.py @@ -0,0 +1,60 @@ +# Copyright 2011-2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_context import context + +from daisy.api import policy + + +class RequestContext(context.RequestContext): + """Stores information about the security context. + + Stores how the user accesses the system, as well as additional request + information. + + """ + + def __init__(self, roles=None, + owner_is_tenant=True, service_catalog=None, + policy_enforcer=None, **kwargs): + super(RequestContext, self).__init__(**kwargs) + self.roles = roles or [] + self.owner_is_tenant = owner_is_tenant + self.service_catalog = service_catalog + self.policy_enforcer = policy_enforcer or policy.Enforcer() + if not self.is_admin: + self.is_admin = self.policy_enforcer.check_is_admin(self) + + def to_dict(self): + d = super(RequestContext, self).to_dict() + d.update({ + 'roles': self.roles, + 'service_catalog': self.service_catalog, + }) + return d + + @classmethod + def from_dict(cls, values): + return cls(**values) + + @property + def owner(self): + """Return the owner to correlate with an image.""" + return self.tenant if self.owner_is_tenant else self.user + + @property + def can_see_deleted(self): + """Admins can see deleted by default""" + return self.show_deleted or self.is_admin diff --git a/code/daisy/daisy/contrib/__init__.py b/code/daisy/daisy/contrib/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/contrib/plugins/__init__.py b/code/daisy/daisy/contrib/plugins/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/contrib/plugins/artifacts_sample/__init__.py b/code/daisy/daisy/contrib/plugins/artifacts_sample/__init__.py new file mode 100755 index 00000000..f730c439 --- /dev/null +++ b/code/daisy/daisy/contrib/plugins/artifacts_sample/__init__.py @@ -0,0 +1,5 @@ +from v1 import artifact as art1 +from v2 import artifact as art2 + + +MY_ARTIFACT = [art1.MyArtifact, art2.MyArtifact] diff --git a/code/daisy/daisy/contrib/plugins/artifacts_sample/base.py b/code/daisy/daisy/contrib/plugins/artifacts_sample/base.py new file mode 100755 index 00000000..6eb0b283 --- /dev/null +++ b/code/daisy/daisy/contrib/plugins/artifacts_sample/base.py @@ -0,0 +1,29 @@ +# Copyright 2011-2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from daisy.common.artifacts import definitions + + +class BaseArtifact(definitions.ArtifactType): + __type_version__ = "1.0" + prop1 = definitions.String() + prop2 = definitions.Integer() + int_list = definitions.Array(item_type=definitions.Integer(max_value=10, + min_value=1)) + depends_on = definitions.ArtifactReference(type_name='MyArtifact') + references = definitions.ArtifactReferenceList() + + image_file = definitions.BinaryObject() + screenshots = definitions.BinaryObjectList() diff --git a/code/daisy/daisy/contrib/plugins/artifacts_sample/setup.cfg b/code/daisy/daisy/contrib/plugins/artifacts_sample/setup.cfg new file mode 100755 index 00000000..ad13e0ad --- /dev/null +++ b/code/daisy/daisy/contrib/plugins/artifacts_sample/setup.cfg @@ -0,0 +1,25 @@ +[metadata] +name = artifact +version = 0.0.1 +description = A sample plugin for artifact loading +author = Inessa Vasilevskaya +author-email = ivasilevskaya@mirantis.com +classifier = + Development Status :: 3 - Alpha + License :: OSI Approved :: Apache Software License + Programming Language :: Python + Programming Language :: Python :: 2 + Programming Language :: Python :: 2.7 + Programming Language :: Python :: 3 + Programming Language :: Python :: 3.2 + Programming Language :: Python :: 3.3 + Intended Audience :: Developers + Environment :: Console + +[global] +setup-hooks = + pbr.hooks.setup_hook + +[entry_points] +daisy.artifacts.types = + MyArtifact = daisy.contrib.plugins.artifacts_sample:MY_ARTIFACT diff --git a/code/daisy/daisy/contrib/plugins/artifacts_sample/setup.py b/code/daisy/daisy/contrib/plugins/artifacts_sample/setup.py new file mode 100755 index 00000000..2a3ea51e --- /dev/null +++ b/code/daisy/daisy/contrib/plugins/artifacts_sample/setup.py @@ -0,0 +1,20 @@ +# Copyright 2011-2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import setuptools + +# all other params will be taken from setup.cfg +setuptools.setup(packages=setuptools.find_packages(), + setup_requires=['pbr'], pbr=True) diff --git a/code/daisy/daisy/contrib/plugins/artifacts_sample/v1/__init__.py b/code/daisy/daisy/contrib/plugins/artifacts_sample/v1/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/contrib/plugins/artifacts_sample/v1/artifact.py b/code/daisy/daisy/contrib/plugins/artifacts_sample/v1/artifact.py new file mode 100755 index 00000000..e3ae21a4 --- /dev/null +++ b/code/daisy/daisy/contrib/plugins/artifacts_sample/v1/artifact.py @@ -0,0 +1,21 @@ +# Copyright 2011-2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from daisy.contrib.plugins.artifacts_sample import base + + +class MyArtifact(base.BaseArtifact): + __type_version__ = "1.0.1" diff --git a/code/daisy/daisy/contrib/plugins/artifacts_sample/v2/__init__.py b/code/daisy/daisy/contrib/plugins/artifacts_sample/v2/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/contrib/plugins/artifacts_sample/v2/artifact.py b/code/daisy/daisy/contrib/plugins/artifacts_sample/v2/artifact.py new file mode 100755 index 00000000..6cd65936 --- /dev/null +++ b/code/daisy/daisy/contrib/plugins/artifacts_sample/v2/artifact.py @@ -0,0 +1,23 @@ +# Copyright 2011-2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from daisy.common.artifacts import definitions +from daisy.contrib.plugins.artifacts_sample import base + + +class MyArtifact(base.BaseArtifact): + __type_version__ = "2.0" + depends_on = definitions.ArtifactReference(type_name="MyArtifact") diff --git a/code/daisy/daisy/contrib/plugins/image_artifact/__init__.py b/code/daisy/daisy/contrib/plugins/image_artifact/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/contrib/plugins/image_artifact/requirements.txt b/code/daisy/daisy/contrib/plugins/image_artifact/requirements.txt new file mode 100755 index 00000000..5cee777d --- /dev/null +++ b/code/daisy/daisy/contrib/plugins/image_artifact/requirements.txt @@ -0,0 +1 @@ +python-glanceclient diff --git a/code/daisy/daisy/contrib/plugins/image_artifact/setup.cfg b/code/daisy/daisy/contrib/plugins/image_artifact/setup.cfg new file mode 100755 index 00000000..5c42ba60 --- /dev/null +++ b/code/daisy/daisy/contrib/plugins/image_artifact/setup.cfg @@ -0,0 +1,25 @@ +[metadata] +name = image_artifact_plugin +version = 2.0 +description = An artifact plugin for Imaging functionality +author = Alexander Tivelkov +author-email = ativelkov@mirantis.com +classifier = + Development Status :: 3 - Alpha + License :: OSI Approved :: Apache Software License + Programming Language :: Python + Programming Language :: Python :: 2 + Programming Language :: Python :: 2.7 + Programming Language :: Python :: 3 + Programming Language :: Python :: 3.2 + Programming Language :: Python :: 3.3 + Intended Audience :: Developers + Environment :: Console + +[global] +setup-hooks = + pbr.hooks.setup_hook + +[entry_points] +daisy.artifacts.types = + Image = daisy.contrib.plugins.image_artifact.version_selector:versions diff --git a/code/daisy/daisy/contrib/plugins/image_artifact/setup.py b/code/daisy/daisy/contrib/plugins/image_artifact/setup.py new file mode 100755 index 00000000..2a3ea51e --- /dev/null +++ b/code/daisy/daisy/contrib/plugins/image_artifact/setup.py @@ -0,0 +1,20 @@ +# Copyright 2011-2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import setuptools + +# all other params will be taken from setup.cfg +setuptools.setup(packages=setuptools.find_packages(), + setup_requires=['pbr'], pbr=True) diff --git a/code/daisy/daisy/contrib/plugins/image_artifact/v1/__init__.py b/code/daisy/daisy/contrib/plugins/image_artifact/v1/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/contrib/plugins/image_artifact/v1/image.py b/code/daisy/daisy/contrib/plugins/image_artifact/v1/image.py new file mode 100755 index 00000000..065d0d66 --- /dev/null +++ b/code/daisy/daisy/contrib/plugins/image_artifact/v1/image.py @@ -0,0 +1,36 @@ +# Copyright (c) 2014 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from daisy.common.artifacts import definitions + + +class ImageAsAnArtifact(definitions.ArtifactType): + __type_name__ = 'Image' + __endpoint__ = 'images' + + file = definitions.BinaryObject(required=True) + disk_format = definitions.String(allowed_values=['ami', 'ari', 'aki', + 'vhd', 'vmdk', 'raw', + 'qcow2', 'vdi', 'iso'], + required=True, + mutable=False) + container_format = definitions.String(allowed_values=['ami', 'ari', + 'aki', 'bare', + 'ovf', 'ova'], + required=True, + mutable=False) + min_disk = definitions.Integer(min_value=0, default=0) + min_ram = definitions.Integer(min_value=0, default=0) + + virtual_size = definitions.Integer(min_value=0) diff --git a/code/daisy/daisy/contrib/plugins/image_artifact/v1_1/__init__.py b/code/daisy/daisy/contrib/plugins/image_artifact/v1_1/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/contrib/plugins/image_artifact/v1_1/image.py b/code/daisy/daisy/contrib/plugins/image_artifact/v1_1/image.py new file mode 100755 index 00000000..25d976b7 --- /dev/null +++ b/code/daisy/daisy/contrib/plugins/image_artifact/v1_1/image.py @@ -0,0 +1,27 @@ +# Copyright (c) 2014 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from daisy.common.artifacts import definitions +import daisy.contrib.plugins.image_artifact.v1.image as v1 + + +class ImageAsAnArtifact(v1.ImageAsAnArtifact): + __type_version__ = '1.1' + + icons = definitions.BinaryObjectList() + + similar_images = (definitions. + ArtifactReferenceList(references=definitions. + ArtifactReference('Image'))) diff --git a/code/daisy/daisy/contrib/plugins/image_artifact/v2/__init__.py b/code/daisy/daisy/contrib/plugins/image_artifact/v2/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/contrib/plugins/image_artifact/v2/image.py b/code/daisy/daisy/contrib/plugins/image_artifact/v2/image.py new file mode 100755 index 00000000..056790c8 --- /dev/null +++ b/code/daisy/daisy/contrib/plugins/image_artifact/v2/image.py @@ -0,0 +1,75 @@ +# Copyright (c) 2014 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from daisy.common.artifacts import definitions +from daisy.common import exception +import daisy.contrib.plugins.image_artifact.v1_1.image as v1_1 + +import daisyclient + + +from daisy import i18n + + +_ = i18n._ + + +class ImageAsAnArtifact(v1_1.ImageAsAnArtifact): + __type_version__ = '2.0' + + file = definitions.BinaryObject(required=False) + legacy_image_id = definitions.String(required=False, mutable=False, + pattern=R'[0-9a-f]{8}-[0-9a-f]{4}' + R'-4[0-9a-f]{3}-[89ab]' + R'[0-9a-f]{3}-[0-9a-f]{12}') + + def __pre_publish__(self, context, *args, **kwargs): + super(ImageAsAnArtifact, self).__pre_publish__(*args, **kwargs) + if self.file is None and self.legacy_image_id is None: + raise exception.InvalidArtifactPropertyValue( + message=_("Either a file or a legacy_image_id has to be " + "specified") + ) + if self.file is not None and self.legacy_image_id is not None: + raise exception.InvalidArtifactPropertyValue( + message=_("Both file and legacy_image_id may not be " + "specified at the same time")) + + if self.legacy_image_id: + glance_endpoint = next(service['endpoints'][0]['publicURL'] + for service in context.service_catalog + if service['name'] == 'glance') + try: + client = daisyclient.Client(version=2, + endpoint=glance_endpoint, + token=context.auth_token) + legacy_image = client.images.get(self.legacy_image_id) + except Exception: + raise exception.InvalidArtifactPropertyValue( + message=_('Unable to get legacy image') + ) + if legacy_image is not None: + self.file = definitions.Blob(size=legacy_image.size, + locations=[ + { + "status": "active", + "value": + legacy_image.direct_url + }], + checksum=legacy_image.checksum, + item_key=legacy_image.id) + else: + raise exception.InvalidArtifactPropertyValue( + message=_("Legacy image was not found") + ) diff --git a/code/daisy/daisy/contrib/plugins/image_artifact/version_selector.py b/code/daisy/daisy/contrib/plugins/image_artifact/version_selector.py new file mode 100755 index 00000000..afbe3034 --- /dev/null +++ b/code/daisy/daisy/contrib/plugins/image_artifact/version_selector.py @@ -0,0 +1,19 @@ +# Copyright (c) 2014 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from v1 import image as v1 +from v1_1 import image as v1_1 +from v2 import image as v2 + +versions = [v1.ImageAsAnArtifact, v1_1.ImageAsAnArtifact, v2.ImageAsAnArtifact] diff --git a/code/daisy/daisy/db/__init__.py b/code/daisy/daisy/db/__init__.py new file mode 100755 index 00000000..d98dbc8b --- /dev/null +++ b/code/daisy/daisy/db/__init__.py @@ -0,0 +1,862 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010-2012 OpenStack Foundation +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg +from oslo_utils import importutils +from wsme.rest import json + +from daisy.api.v2.model.metadef_property_type import PropertyType +from daisy.common import crypt +from daisy.common import exception +from daisy.common import location_strategy +import daisy.domain +import daisy.domain.proxy +from daisy import i18n + +_ = i18n._ + +CONF = cfg.CONF +CONF.import_opt('image_size_cap', 'daisy.common.config') +CONF.import_opt('metadata_encryption_key', 'daisy.common.config') + + +def get_api(): + api = importutils.import_module(CONF.data_api) + if hasattr(api, 'configure'): + api.configure() + return api + + +def unwrap(db_api): + return db_api + + +# attributes common to all models +BASE_MODEL_ATTRS = set(['id', 'created_at', 'updated_at', 'deleted_at', + 'deleted']) + + +IMAGE_ATTRS = BASE_MODEL_ATTRS | set(['name', 'status', 'size', 'virtual_size', + 'disk_format', 'container_format', + 'min_disk', 'min_ram', 'is_public', + 'locations', 'checksum', 'owner', + 'protected']) + + +class ImageRepo(object): + + def __init__(self, context, db_api): + self.context = context + self.db_api = db_api + + def get(self, image_id): + try: + db_api_image = dict(self.db_api.image_get(self.context, image_id)) + assert not db_api_image['deleted'] + except (exception.NotFound, exception.Forbidden, AssertionError): + msg = _("No image found with ID %s") % image_id + raise exception.NotFound(msg) + tags = self.db_api.image_tag_get_all(self.context, image_id) + image = self._format_image_from_db(db_api_image, tags) + return ImageProxy(image, self.context, self.db_api) + + def list(self, marker=None, limit=None, sort_key=None, + sort_dir=None, filters=None, member_status='accepted'): + sort_key = ['created_at'] if not sort_key else sort_key + sort_dir = ['desc'] if not sort_dir else sort_dir + db_api_images = self.db_api.image_get_all( + self.context, filters=filters, marker=marker, limit=limit, + sort_key=sort_key, sort_dir=sort_dir, + member_status=member_status, return_tag=True) + images = [] + for db_api_image in db_api_images: + db_image = dict(db_api_image) + image = self._format_image_from_db(db_image, db_image['tags']) + images.append(image) + return images + + def _format_image_from_db(self, db_image, db_tags): + visibility = 'public' if db_image['is_public'] else 'private' + properties = {} + for prop in db_image.pop('properties'): + # NOTE(markwash) db api requires us to filter deleted + if not prop['deleted']: + properties[prop['name']] = prop['value'] + locations = [loc for loc in db_image['locations'] + if loc['status'] == 'active'] + if CONF.metadata_encryption_key: + key = CONF.metadata_encryption_key + for l in locations: + l['url'] = crypt.urlsafe_decrypt(key, l['url']) + return daisy.domain.Image( + image_id=db_image['id'], + name=db_image['name'], + status=db_image['status'], + created_at=db_image['created_at'], + updated_at=db_image['updated_at'], + visibility=visibility, + min_disk=db_image['min_disk'], + min_ram=db_image['min_ram'], + protected=db_image['protected'], + locations=location_strategy.get_ordered_locations(locations), + checksum=db_image['checksum'], + owner=db_image['owner'], + disk_format=db_image['disk_format'], + container_format=db_image['container_format'], + size=db_image['size'], + virtual_size=db_image['virtual_size'], + extra_properties=properties, + tags=db_tags + ) + + def _format_image_to_db(self, image): + locations = image.locations + if CONF.metadata_encryption_key: + key = CONF.metadata_encryption_key + ld = [] + for loc in locations: + url = crypt.urlsafe_encrypt(key, loc['url']) + ld.append({'url': url, 'metadata': loc['metadata'], + 'status': loc['status'], + # NOTE(zhiyan): New location has no ID field. + 'id': loc.get('id')}) + locations = ld + return { + 'id': image.image_id, + 'name': image.name, + 'status': image.status, + 'created_at': image.created_at, + 'min_disk': image.min_disk, + 'min_ram': image.min_ram, + 'protected': image.protected, + 'locations': locations, + 'checksum': image.checksum, + 'owner': image.owner, + 'disk_format': image.disk_format, + 'container_format': image.container_format, + 'size': image.size, + 'virtual_size': image.virtual_size, + 'is_public': image.visibility == 'public', + 'properties': dict(image.extra_properties), + } + + def add(self, image): + image_values = self._format_image_to_db(image) + if image_values['size'] > CONF.image_size_cap: + raise exception.ImageSizeLimitExceeded + # the updated_at value is not set in the _format_image_to_db + # function since it is specific to image create + image_values['updated_at'] = image.updated_at + new_values = self.db_api.image_create(self.context, image_values) + self.db_api.image_tag_set_all(self.context, + image.image_id, image.tags) + image.created_at = new_values['created_at'] + image.updated_at = new_values['updated_at'] + + def save(self, image, from_state=None): + image_values = self._format_image_to_db(image) + if image_values['size'] > CONF.image_size_cap: + raise exception.ImageSizeLimitExceeded + try: + new_values = self.db_api.image_update(self.context, + image.image_id, + image_values, + purge_props=True, + from_state=from_state) + except (exception.NotFound, exception.Forbidden): + msg = _("No image found with ID %s") % image.image_id + raise exception.NotFound(msg) + self.db_api.image_tag_set_all(self.context, image.image_id, + image.tags) + image.updated_at = new_values['updated_at'] + + def remove(self, image): + image_values = self._format_image_to_db(image) + try: + self.db_api.image_update(self.context, image.image_id, + image_values, purge_props=True) + except (exception.NotFound, exception.Forbidden): + msg = _("No image found with ID %s") % image.image_id + raise exception.NotFound(msg) + # NOTE(markwash): don't update tags? + new_values = self.db_api.image_destroy(self.context, image.image_id) + image.updated_at = new_values['updated_at'] + + +class ImageProxy(daisy.domain.proxy.Image): + + def __init__(self, image, context, db_api): + self.context = context + self.db_api = db_api + self.image = image + super(ImageProxy, self).__init__(image) + + def get_member_repo(self): + member_repo = ImageMemberRepo(self.context, self.db_api, + self.image) + return member_repo + + +class ImageMemberRepo(object): + + def __init__(self, context, db_api, image): + self.context = context + self.db_api = db_api + self.image = image + + def _format_image_member_from_db(self, db_image_member): + return daisy.domain.ImageMembership( + id=db_image_member['id'], + image_id=db_image_member['image_id'], + member_id=db_image_member['member'], + status=db_image_member['status'], + created_at=db_image_member['created_at'], + updated_at=db_image_member['updated_at'] + ) + + def _format_image_member_to_db(self, image_member): + image_member = {'image_id': self.image.image_id, + 'member': image_member.member_id, + 'status': image_member.status, + 'created_at': image_member.created_at} + return image_member + + def list(self): + db_members = self.db_api.image_member_find( + self.context, image_id=self.image.image_id) + image_members = [] + for db_member in db_members: + image_members.append(self._format_image_member_from_db(db_member)) + return image_members + + def add(self, image_member): + try: + self.get(image_member.member_id) + except exception.NotFound: + pass + else: + msg = _('The target member %(member_id)s is already ' + 'associated with image %(image_id)s.') % { + 'member_id': image_member.member_id, + 'image_id': self.image.image_id} + raise exception.Duplicate(msg) + + image_member_values = self._format_image_member_to_db(image_member) + new_values = self.db_api.image_member_create(self.context, + image_member_values) + image_member.created_at = new_values['created_at'] + image_member.updated_at = new_values['updated_at'] + image_member.id = new_values['id'] + + def remove(self, image_member): + try: + self.db_api.image_member_delete(self.context, image_member.id) + except (exception.NotFound, exception.Forbidden): + msg = _("The specified member %s could not be found") + raise exception.NotFound(msg % image_member.id) + + def save(self, image_member, from_state=None): + image_member_values = self._format_image_member_to_db(image_member) + try: + new_values = self.db_api.image_member_update(self.context, + image_member.id, + image_member_values) + except (exception.NotFound, exception.Forbidden): + raise exception.NotFound() + image_member.updated_at = new_values['updated_at'] + + def get(self, member_id): + try: + db_api_image_member = self.db_api.image_member_find( + self.context, + self.image.image_id, + member_id) + if not db_api_image_member: + raise exception.NotFound() + except (exception.NotFound, exception.Forbidden): + raise exception.NotFound() + + image_member = self._format_image_member_from_db( + db_api_image_member[0]) + return image_member + + +class TaskRepo(object): + + def __init__(self, context, db_api): + self.context = context + self.db_api = db_api + + def _format_task_from_db(self, db_task): + return daisy.domain.Task( + task_id=db_task['id'], + task_type=db_task['type'], + status=db_task['status'], + owner=db_task['owner'], + expires_at=db_task['expires_at'], + created_at=db_task['created_at'], + updated_at=db_task['updated_at'], + task_input=db_task['input'], + result=db_task['result'], + message=db_task['message'], + ) + + def _format_task_stub_from_db(self, db_task): + return daisy.domain.TaskStub( + task_id=db_task['id'], + task_type=db_task['type'], + status=db_task['status'], + owner=db_task['owner'], + expires_at=db_task['expires_at'], + created_at=db_task['created_at'], + updated_at=db_task['updated_at'], + ) + + def _format_task_to_db(self, task): + task = {'id': task.task_id, + 'type': task.type, + 'status': task.status, + 'input': task.task_input, + 'result': task.result, + 'owner': task.owner, + 'message': task.message, + 'expires_at': task.expires_at, + 'created_at': task.created_at, + 'updated_at': task.updated_at, + } + return task + + def get(self, task_id): + try: + db_api_task = self.db_api.task_get(self.context, task_id) + except (exception.NotFound, exception.Forbidden): + msg = _('Could not find task %s') % task_id + raise exception.NotFound(msg) + return self._format_task_from_db(db_api_task) + + def list(self, marker=None, limit=None, sort_key='created_at', + sort_dir='desc', filters=None): + db_api_tasks = self.db_api.task_get_all(self.context, + filters=filters, + marker=marker, + limit=limit, + sort_key=sort_key, + sort_dir=sort_dir) + return [self._format_task_stub_from_db(task) for task in db_api_tasks] + + def save(self, task): + task_values = self._format_task_to_db(task) + try: + updated_values = self.db_api.task_update(self.context, + task.task_id, + task_values) + except (exception.NotFound, exception.Forbidden): + msg = _('Could not find task %s') % task.task_id + raise exception.NotFound(msg) + task.updated_at = updated_values['updated_at'] + + def add(self, task): + task_values = self._format_task_to_db(task) + updated_values = self.db_api.task_create(self.context, task_values) + task.created_at = updated_values['created_at'] + task.updated_at = updated_values['updated_at'] + + def remove(self, task): + task_values = self._format_task_to_db(task) + try: + self.db_api.task_update(self.context, task.task_id, task_values) + updated_values = self.db_api.task_delete(self.context, + task.task_id) + except (exception.NotFound, exception.Forbidden): + msg = _('Could not find task %s') % task.task_id + raise exception.NotFound(msg) + task.updated_at = updated_values['updated_at'] + task.deleted_at = updated_values['deleted_at'] + + +class MetadefNamespaceRepo(object): + + def __init__(self, context, db_api): + self.context = context + self.db_api = db_api + + def _format_namespace_from_db(self, namespace_obj): + return daisy.domain.MetadefNamespace( + namespace_id=namespace_obj['id'], + namespace=namespace_obj['namespace'], + display_name=namespace_obj['display_name'], + description=namespace_obj['description'], + owner=namespace_obj['owner'], + visibility=namespace_obj['visibility'], + protected=namespace_obj['protected'], + created_at=namespace_obj['created_at'], + updated_at=namespace_obj['updated_at'] + ) + + def _format_namespace_to_db(self, namespace_obj): + namespace = { + 'namespace': namespace_obj.namespace, + 'display_name': namespace_obj.display_name, + 'description': namespace_obj.description, + 'visibility': namespace_obj.visibility, + 'protected': namespace_obj.protected, + 'owner': namespace_obj.owner + } + return namespace + + def add(self, namespace): + self.db_api.metadef_namespace_create( + self.context, + self._format_namespace_to_db(namespace) + ) + + def get(self, namespace): + try: + db_api_namespace = self.db_api.metadef_namespace_get( + self.context, namespace) + except (exception.NotFound, exception.Forbidden): + msg = _('Could not find namespace %s') % namespace + raise exception.NotFound(msg) + return self._format_namespace_from_db(db_api_namespace) + + def list(self, marker=None, limit=None, sort_key='created_at', + sort_dir='desc', filters=None): + db_namespaces = self.db_api.metadef_namespace_get_all( + self.context, + marker=marker, + limit=limit, + sort_key=sort_key, + sort_dir=sort_dir, + filters=filters + ) + return [self._format_namespace_from_db(namespace_obj) + for namespace_obj in db_namespaces] + + def remove(self, namespace): + try: + self.db_api.metadef_namespace_delete(self.context, + namespace.namespace) + except (exception.NotFound, exception.Forbidden): + msg = _("The specified namespace %s could not be found") + raise exception.NotFound(msg % namespace.namespace) + + def remove_objects(self, namespace): + try: + self.db_api.metadef_object_delete_namespace_content( + self.context, + namespace.namespace + ) + except (exception.NotFound, exception.Forbidden): + msg = _("The specified namespace %s could not be found") + raise exception.NotFound(msg % namespace.namespace) + + def remove_properties(self, namespace): + try: + self.db_api.metadef_property_delete_namespace_content( + self.context, + namespace.namespace + ) + except (exception.NotFound, exception.Forbidden): + msg = _("The specified namespace %s could not be found") + raise exception.NotFound(msg % namespace.namespace) + + def remove_tags(self, namespace): + try: + self.db_api.metadef_tag_delete_namespace_content( + self.context, + namespace.namespace + ) + except (exception.NotFound, exception.Forbidden): + msg = _("The specified namespace %s could not be found") + raise exception.NotFound(msg % namespace.namespace) + + def object_count(self, namespace_name): + return self.db_api.metadef_object_count( + self.context, + namespace_name + ) + + def property_count(self, namespace_name): + return self.db_api.metadef_property_count( + self.context, + namespace_name + ) + + def save(self, namespace): + try: + self.db_api.metadef_namespace_update( + self.context, namespace.namespace_id, + self._format_namespace_to_db(namespace) + ) + except exception.NotFound as e: + raise exception.NotFound(explanation=e.msg) + return namespace + + +class MetadefObjectRepo(object): + + def __init__(self, context, db_api): + self.context = context + self.db_api = db_api + self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api) + + def _format_metadef_object_from_db(self, metadata_object, + namespace_entity): + required_str = metadata_object['required'] + required_list = required_str.split(",") if required_str else [] + + # Convert the persisted json schema to a dict of PropertyTypes + property_types = {} + json_props = metadata_object['json_schema'] + for id in json_props: + property_types[id] = json.fromjson(PropertyType, json_props[id]) + + return daisy.domain.MetadefObject( + namespace=namespace_entity, + object_id=metadata_object['id'], + name=metadata_object['name'], + required=required_list, + description=metadata_object['description'], + properties=property_types, + created_at=metadata_object['created_at'], + updated_at=metadata_object['updated_at'] + ) + + def _format_metadef_object_to_db(self, metadata_object): + + required_str = (",".join(metadata_object.required) if + metadata_object.required else None) + + # Convert the model PropertyTypes dict to a JSON string + properties = metadata_object.properties + db_schema = {} + if properties: + for k, v in properties.items(): + json_data = json.tojson(PropertyType, v) + db_schema[k] = json_data + + db_metadata_object = { + 'name': metadata_object.name, + 'required': required_str, + 'description': metadata_object.description, + 'json_schema': db_schema + } + return db_metadata_object + + def add(self, metadata_object): + self.db_api.metadef_object_create( + self.context, + metadata_object.namespace, + self._format_metadef_object_to_db(metadata_object) + ) + + def get(self, namespace, object_name): + try: + namespace_entity = self.meta_namespace_repo.get(namespace) + db_metadata_object = self.db_api.metadef_object_get( + self.context, + namespace, + object_name) + except (exception.NotFound, exception.Forbidden): + msg = _('Could not find metadata object %s') % object_name + raise exception.NotFound(msg) + return self._format_metadef_object_from_db(db_metadata_object, + namespace_entity) + + def list(self, marker=None, limit=None, sort_key='created_at', + sort_dir='desc', filters=None): + namespace = filters['namespace'] + namespace_entity = self.meta_namespace_repo.get(namespace) + db_metadata_objects = self.db_api.metadef_object_get_all( + self.context, namespace) + return [self._format_metadef_object_from_db(metadata_object, + namespace_entity) + for metadata_object in db_metadata_objects] + + def remove(self, metadata_object): + try: + self.db_api.metadef_object_delete( + self.context, + metadata_object.namespace.namespace, + metadata_object.name + ) + except (exception.NotFound, exception.Forbidden): + msg = _("The specified metadata object %s could not be found") + raise exception.NotFound(msg % metadata_object.name) + + def save(self, metadata_object): + try: + self.db_api.metadef_object_update( + self.context, metadata_object.namespace.namespace, + metadata_object.object_id, + self._format_metadef_object_to_db(metadata_object)) + except exception.NotFound as e: + raise exception.NotFound(explanation=e.msg) + return metadata_object + + +class MetadefResourceTypeRepo(object): + + def __init__(self, context, db_api): + self.context = context + self.db_api = db_api + self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api) + + def _format_resource_type_from_db(self, resource_type, namespace): + return daisy.domain.MetadefResourceType( + namespace=namespace, + name=resource_type['name'], + prefix=resource_type['prefix'], + properties_target=resource_type['properties_target'], + created_at=resource_type['created_at'], + updated_at=resource_type['updated_at'] + ) + + def _format_resource_type_to_db(self, resource_type): + db_resource_type = { + 'name': resource_type.name, + 'prefix': resource_type.prefix, + 'properties_target': resource_type.properties_target + } + return db_resource_type + + def add(self, resource_type): + self.db_api.metadef_resource_type_association_create( + self.context, resource_type.namespace, + self._format_resource_type_to_db(resource_type) + ) + + def get(self, resource_type, namespace): + namespace_entity = self.meta_namespace_repo.get(namespace) + db_resource_type = ( + self.db_api. + metadef_resource_type_association_get( + self.context, + namespace, + resource_type + ) + ) + return self._format_resource_type_from_db(db_resource_type, + namespace_entity) + + def list(self, filters=None): + namespace = filters['namespace'] + if namespace: + namespace_entity = self.meta_namespace_repo.get(namespace) + db_resource_types = ( + self.db_api. + metadef_resource_type_association_get_all_by_namespace( + self.context, + namespace + ) + ) + return [self._format_resource_type_from_db(resource_type, + namespace_entity) + for resource_type in db_resource_types] + else: + db_resource_types = ( + self.db_api. + metadef_resource_type_get_all(self.context) + ) + return [daisy.domain.MetadefResourceType( + namespace=None, + name=resource_type['name'], + prefix=None, + properties_target=None, + created_at=resource_type['created_at'], + updated_at=resource_type['updated_at'] + ) for resource_type in db_resource_types] + + def remove(self, resource_type): + try: + self.db_api.metadef_resource_type_association_delete( + self.context, resource_type.namespace.namespace, + resource_type.name) + + except (exception.NotFound, exception.Forbidden): + msg = _("The specified resource type %s could not be found ") + raise exception.NotFound(msg % resource_type.name) + + +class MetadefPropertyRepo(object): + + def __init__(self, context, db_api): + self.context = context + self.db_api = db_api + self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api) + + def _format_metadef_property_from_db( + self, + property, + namespace_entity): + + return daisy.domain.MetadefProperty( + namespace=namespace_entity, + property_id=property['id'], + name=property['name'], + schema=property['json_schema'] + ) + + def _format_metadef_property_to_db(self, property): + + db_metadata_object = { + 'name': property.name, + 'json_schema': property.schema + } + return db_metadata_object + + def add(self, property): + self.db_api.metadef_property_create( + self.context, + property.namespace, + self._format_metadef_property_to_db(property) + ) + + def get(self, namespace, property_name): + try: + namespace_entity = self.meta_namespace_repo.get(namespace) + db_property_type = self.db_api.metadef_property_get( + self.context, + namespace, + property_name + ) + except (exception.NotFound, exception.Forbidden): + msg = _('Could not find property %s') % property_name + raise exception.NotFound(msg) + return self._format_metadef_property_from_db( + db_property_type, namespace_entity) + + def list(self, marker=None, limit=None, sort_key='created_at', + sort_dir='desc', filters=None): + namespace = filters['namespace'] + namespace_entity = self.meta_namespace_repo.get(namespace) + + db_properties = self.db_api.metadef_property_get_all( + self.context, namespace) + return ( + [self._format_metadef_property_from_db( + property, namespace_entity) for property in db_properties] + ) + + def remove(self, property): + try: + self.db_api.metadef_property_delete( + self.context, property.namespace.namespace, property.name) + except (exception.NotFound, exception.Forbidden): + msg = _("The specified property %s could not be found") + raise exception.NotFound(msg % property.name) + + def save(self, property): + try: + self.db_api.metadef_property_update( + self.context, property.namespace.namespace, + property.property_id, + self._format_metadef_property_to_db(property) + ) + except exception.NotFound as e: + raise exception.NotFound(explanation=e.msg) + return property + + +class MetadefTagRepo(object): + + def __init__(self, context, db_api): + self.context = context + self.db_api = db_api + self.meta_namespace_repo = MetadefNamespaceRepo(context, db_api) + + def _format_metadef_tag_from_db(self, metadata_tag, + namespace_entity): + return daisy.domain.MetadefTag( + namespace=namespace_entity, + tag_id=metadata_tag['id'], + name=metadata_tag['name'], + created_at=metadata_tag['created_at'], + updated_at=metadata_tag['updated_at'] + ) + + def _format_metadef_tag_to_db(self, metadata_tag): + db_metadata_tag = { + 'name': metadata_tag.name + } + return db_metadata_tag + + def add(self, metadata_tag): + self.db_api.metadef_tag_create( + self.context, + metadata_tag.namespace, + self._format_metadef_tag_to_db(metadata_tag) + ) + + def add_tags(self, metadata_tags): + tag_list = [] + namespace = None + for metadata_tag in metadata_tags: + tag_list.append(self._format_metadef_tag_to_db(metadata_tag)) + if namespace is None: + namespace = metadata_tag.namespace + + self.db_api.metadef_tag_create_tags( + self.context, namespace, tag_list) + + def get(self, namespace, name): + try: + namespace_entity = self.meta_namespace_repo.get(namespace) + db_metadata_tag = self.db_api.metadef_tag_get( + self.context, + namespace, + name) + except (exception.NotFound, exception.Forbidden): + msg = _('Could not find metadata tag %s') % name + raise exception.NotFound(msg) + return self._format_metadef_tag_from_db(db_metadata_tag, + namespace_entity) + + def list(self, marker=None, limit=None, sort_key='created_at', + sort_dir='desc', filters=None): + namespace = filters['namespace'] + namespace_entity = self.meta_namespace_repo.get(namespace) + + db_metadata_tag = self.db_api.metadef_tag_get_all( + self.context, namespace, filters, marker, limit, sort_key, + sort_dir) + + return [self._format_metadef_tag_from_db(metadata_tag, + namespace_entity) + for metadata_tag in db_metadata_tag] + + def remove(self, metadata_tag): + try: + self.db_api.metadef_tag_delete( + self.context, + metadata_tag.namespace.namespace, + metadata_tag.name + ) + except (exception.NotFound, exception.Forbidden): + msg = _("The specified metadata tag %s could not be found") + raise exception.NotFound(msg % metadata_tag.name) + + def save(self, metadata_tag): + try: + self.db_api.metadef_tag_update( + self.context, metadata_tag.namespace.namespace, + metadata_tag.tag_id, + self._format_metadef_tag_to_db(metadata_tag)) + except exception.NotFound as e: + raise exception.NotFound(explanation=e.msg) + return metadata_tag diff --git a/code/daisy/daisy/db/metadata.py b/code/daisy/daisy/db/metadata.py new file mode 100755 index 00000000..b0f4669a --- /dev/null +++ b/code/daisy/daisy/db/metadata.py @@ -0,0 +1,65 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Copyright 2013 OpenStack Foundation +# Copyright 2013 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Metadata setup commands.""" + +import threading + +from oslo_config import cfg +from oslo_db import options as db_options +from stevedore import driver + +from daisy.db.sqlalchemy import api as db_api + + +_IMPL = None +_LOCK = threading.Lock() + +db_options.set_defaults(cfg.CONF) + + +def get_backend(): + global _IMPL + if _IMPL is None: + with _LOCK: + if _IMPL is None: + _IMPL = driver.DriverManager( + "daisy.database.metadata_backend", + cfg.CONF.database.backend).driver + return _IMPL + + +def load_metadefs(): + """Read metadefinition files and insert data into the database""" + return get_backend().db_load_metadefs(engine=db_api.get_engine(), + metadata_path=None, + merge=False, + prefer_new=False, + overwrite=False) + + +def unload_metadefs(): + """Unload metadefinitions from database""" + return get_backend().db_unload_metadefs(engine=db_api.get_engine()) + + +def export_metadefs(): + """Export metadefinitions from database to files""" + return get_backend().db_export_metadefs(engine=db_api.get_engine(), + metadata_path=None) diff --git a/code/daisy/daisy/db/migration.py b/code/daisy/daisy/db/migration.py new file mode 100755 index 00000000..af9046a3 --- /dev/null +++ b/code/daisy/daisy/db/migration.py @@ -0,0 +1,65 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Copyright 2013 OpenStack Foundation +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Database setup and migration commands.""" + +import os +import threading + +from oslo_config import cfg +from oslo_db import options as db_options +from stevedore import driver + +from daisy.db.sqlalchemy import api as db_api + + +_IMPL = None +_LOCK = threading.Lock() + +db_options.set_defaults(cfg.CONF) + + +def get_backend(): + global _IMPL + if _IMPL is None: + with _LOCK: + if _IMPL is None: + _IMPL = driver.DriverManager( + "daisy.database.migration_backend", + cfg.CONF.database.backend).driver + return _IMPL + +INIT_VERSION = 0 + +MIGRATE_REPO_PATH = os.path.join( + os.path.abspath(os.path.dirname(__file__)), + 'sqlalchemy', + 'migrate_repo', +) + + +def db_sync(version=None, init_version=0, engine=None): + """Migrate the database to `version` or the most recent version.""" + + if engine is None: + engine = db_api.get_engine() + return get_backend().db_sync(engine=engine, + abs_path=MIGRATE_REPO_PATH, + version=version, + init_version=init_version) diff --git a/code/daisy/daisy/db/registry/__init__.py b/code/daisy/daisy/db/registry/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/db/registry/api.py b/code/daisy/daisy/db/registry/api.py new file mode 100755 index 00000000..80dcbaac --- /dev/null +++ b/code/daisy/daisy/db/registry/api.py @@ -0,0 +1,610 @@ +# Copyright 2013 Red Hat, Inc. +# Copyright 2015 Mirantis, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +This is the Registry's Driver API. + +This API relies on the registry RPC client (version >= 2). The functions bellow +work as a proxy for the database back-end configured in the registry service, +which means that everything returned by that back-end will be also returned by +this API. + + +This API exists for supporting deployments not willing to put database +credentials in daisy-api. Those deployments can rely on this registry driver +that will talk to a remote registry service, which will then access the +database back-end. +""" + +import functools + +from oslo_log import log as logging + +from daisy import artifacts +from daisy.registry.client.v2 import api + + +LOG = logging.getLogger(__name__) + + +def configure(): + api.configure_registry_client() + + +def _get_client(func): + """Injects a client instance to the each function + + This decorator creates an instance of the Registry + client and passes it as an argument to each function + in this API. + """ + @functools.wraps(func) + def wrapper(context, *args, **kwargs): + client = api.get_registry_client(context) + return func(client, *args, **kwargs) + return wrapper + + +@_get_client +def image_create(client, values): + """Create an image from the values dictionary.""" + return client.image_create(values=values) + + +@_get_client +def image_update(client, image_id, values, purge_props=False, from_state=None): + """ + Set the given properties on an image and update it. + + :raises NotFound if image does not exist. + """ + return client.image_update(values=values, + image_id=image_id, + purge_props=purge_props, from_state=from_state) + + +@_get_client +def image_destroy(client, image_id): + """Destroy the image or raise if it does not exist.""" + return client.image_destroy(image_id=image_id) + + +@_get_client +def image_get(client, image_id, force_show_deleted=False): + return client.image_get(image_id=image_id, + force_show_deleted=force_show_deleted) + + +def is_image_visible(context, image, status=None): + """Return True if the image is visible in this context.""" + # Is admin == image visible + if context.is_admin: + return True + + # No owner == image visible + if image['owner'] is None: + return True + + # Image is_public == image visible + if image['is_public']: + return True + + # Perform tests based on whether we have an owner + if context.owner is not None: + if context.owner == image['owner']: + return True + + # Figure out if this image is shared with that tenant + members = image_member_find(context, + image_id=image['id'], + member=context.owner, + status=status) + if members: + return True + + # Private image + return False + + +@_get_client +def image_get_all(client, filters=None, marker=None, limit=None, + sort_key=None, sort_dir=None, + member_status='accepted', is_public=None, + admin_as_user=False, return_tag=False): + """ + Get all images that match zero or more filters. + + :param filters: dict of filter keys and values. If a 'properties' + key is present, it is treated as a dict of key/value + filters on the image properties attribute + :param marker: image id after which to start page + :param limit: maximum number of images to return + :param sort_key: image attribute by which results should be sorted + :param sort_dir: direction in which results should be sorted (asc, desc) + :param member_status: only return shared images that have this membership + status + :param is_public: If true, return only public images. If false, return + only private and shared images. + :param admin_as_user: For backwards compatibility. If true, then return to + an admin the equivalent set of images which it would see + if it were a regular user + :param return_tag: To indicates whether image entry in result includes it + relevant tag entries. This could improve upper-layer + query performance, to prevent using separated calls + """ + sort_key = ['created_at'] if not sort_key else sort_key + sort_dir = ['desc'] if not sort_dir else sort_dir + return client.image_get_all(filters=filters, marker=marker, limit=limit, + sort_key=sort_key, sort_dir=sort_dir, + member_status=member_status, + is_public=is_public, + admin_as_user=admin_as_user, + return_tag=return_tag) + + +@_get_client +def image_property_create(client, values, session=None): + """Create an ImageProperty object""" + return client.image_property_create(values=values) + + +@_get_client +def image_property_delete(client, prop_ref, image_ref, session=None): + """ + Used internally by _image_property_create and image_property_update + """ + return client.image_property_delete(prop_ref=prop_ref, image_ref=image_ref) + + +@_get_client +def image_member_create(client, values, session=None): + """Create an ImageMember object""" + return client.image_member_create(values=values) + + +@_get_client +def image_member_update(client, memb_id, values): + """Update an ImageMember object""" + return client.image_member_update(memb_id=memb_id, values=values) + + +@_get_client +def image_member_delete(client, memb_id, session=None): + """Delete an ImageMember object""" + client.image_member_delete(memb_id=memb_id) + + +@_get_client +def image_member_find(client, image_id=None, member=None, status=None): + """Find all members that meet the given criteria + + :param image_id: identifier of image entity + :param member: tenant to which membership has been granted + """ + return client.image_member_find(image_id=image_id, + member=member, + status=status) + + +@_get_client +def image_member_count(client, image_id): + """Return the number of image members for this image + + :param image_id: identifier of image entity + """ + return client.image_member_count(image_id=image_id) + + +@_get_client +def image_tag_set_all(client, image_id, tags): + client.image_tag_set_all(image_id=image_id, tags=tags) + + +@_get_client +def image_tag_create(client, image_id, value, session=None): + """Create an image tag.""" + return client.image_tag_create(image_id=image_id, value=value) + + +@_get_client +def image_tag_delete(client, image_id, value, session=None): + """Delete an image tag.""" + client.image_tag_delete(image_id=image_id, value=value) + + +@_get_client +def image_tag_get_all(client, image_id, session=None): + """Get a list of tags for a specific image.""" + return client.image_tag_get_all(image_id=image_id) + + +@_get_client +def image_location_delete(client, image_id, location_id, status, session=None): + """Delete an image location.""" + client.image_location_delete(image_id=image_id, location_id=location_id, + status=status) + + +@_get_client +def image_location_update(client, image_id, location, session=None): + """Update image location.""" + client.image_location_update(image_id=image_id, location=location) + + +@_get_client +def user_get_storage_usage(client, owner_id, image_id=None, session=None): + return client.user_get_storage_usage(owner_id=owner_id, image_id=image_id) + + +@_get_client +def task_get(client, task_id, session=None, force_show_deleted=False): + """Get a single task object + :return: task dictionary + """ + return client.task_get(task_id=task_id, session=session, + force_show_deleted=force_show_deleted) + + +@_get_client +def task_get_all(client, filters=None, marker=None, limit=None, + sort_key='created_at', sort_dir='desc', admin_as_user=False): + """Get all tasks that match zero or more filters. + + :param filters: dict of filter keys and values. + :param marker: task id after which to start page + :param limit: maximum number of tasks to return + :param sort_key: task attribute by which results should be sorted + :param sort_dir: direction in which results should be sorted (asc, desc) + :param admin_as_user: For backwards compatibility. If true, then return to + an admin the equivalent set of tasks which it would see + if it were a regular user + :return: tasks set + """ + return client.task_get_all(filters=filters, marker=marker, limit=limit, + sort_key=sort_key, sort_dir=sort_dir, + admin_as_user=admin_as_user) + + +@_get_client +def task_create(client, values, session=None): + """Create a task object""" + return client.task_create(values=values, session=session) + + +@_get_client +def task_delete(client, task_id, session=None): + """Delete a task object""" + return client.task_delete(task_id=task_id, session=session) + + +@_get_client +def task_update(client, task_id, values, session=None): + return client.task_update(task_id=task_id, values=values, session=session) + + +# Metadef +@_get_client +def metadef_namespace_get_all( + client, marker=None, limit=None, sort_key='created_at', + sort_dir=None, filters=None, session=None): + return client.metadef_namespace_get_all( + marker=marker, limit=limit, + sort_key=sort_key, sort_dir=sort_dir, filters=filters) + + +@_get_client +def metadef_namespace_get(client, namespace_name, session=None): + return client.metadef_namespace_get(namespace_name=namespace_name) + + +@_get_client +def metadef_namespace_create(client, values, session=None): + return client.metadef_namespace_create(values=values) + + +@_get_client +def metadef_namespace_update( + client, namespace_id, namespace_dict, + session=None): + return client.metadef_namespace_update( + namespace_id=namespace_id, namespace_dict=namespace_dict) + + +@_get_client +def metadef_namespace_delete(client, namespace_name, session=None): + return client.metadef_namespace_delete( + namespace_name=namespace_name) + + +@_get_client +def metadef_object_get_all(client, namespace_name, session=None): + return client.metadef_object_get_all( + namespace_name=namespace_name) + + +@_get_client +def metadef_object_get( + client, + namespace_name, object_name, session=None): + return client.metadef_object_get( + namespace_name=namespace_name, object_name=object_name) + + +@_get_client +def metadef_object_create( + client, + namespace_name, object_dict, session=None): + return client.metadef_object_create( + namespace_name=namespace_name, object_dict=object_dict) + + +@_get_client +def metadef_object_update( + client, + namespace_name, object_id, + object_dict, session=None): + return client.metadef_object_update( + namespace_name=namespace_name, object_id=object_id, + object_dict=object_dict) + + +@_get_client +def metadef_object_delete( + client, + namespace_name, object_name, + session=None): + return client.metadef_object_delete( + namespace_name=namespace_name, object_name=object_name) + + +@_get_client +def metadef_object_delete_namespace_content( + client, + namespace_name, session=None): + return client.metadef_object_delete_namespace_content( + namespace_name=namespace_name) + + +@_get_client +def metadef_object_count( + client, + namespace_name, session=None): + return client.metadef_object_count( + namespace_name=namespace_name) + + +@_get_client +def metadef_property_get_all( + client, + namespace_name, session=None): + return client.metadef_property_get_all( + namespace_name=namespace_name) + + +@_get_client +def metadef_property_get( + client, + namespace_name, property_name, + session=None): + return client.metadef_property_get( + namespace_name=namespace_name, property_name=property_name) + + +@_get_client +def metadef_property_create( + client, + namespace_name, property_dict, + session=None): + return client.metadef_property_create( + namespace_name=namespace_name, property_dict=property_dict) + + +@_get_client +def metadef_property_update( + client, + namespace_name, property_id, + property_dict, session=None): + return client.metadef_property_update( + namespace_name=namespace_name, property_id=property_id, + property_dict=property_dict) + + +@_get_client +def metadef_property_delete( + client, + namespace_name, property_name, + session=None): + return client.metadef_property_delete( + namespace_name=namespace_name, property_name=property_name) + + +@_get_client +def metadef_property_delete_namespace_content( + client, + namespace_name, session=None): + return client.metadef_property_delete_namespace_content( + namespace_name=namespace_name) + + +@_get_client +def metadef_property_count( + client, + namespace_name, session=None): + return client.metadef_property_count( + namespace_name=namespace_name) + + +@_get_client +def metadef_resource_type_create(client, values, session=None): + return client.metadef_resource_type_create(values=values) + + +@_get_client +def metadef_resource_type_get( + client, + resource_type_name, session=None): + return client.metadef_resource_type_get( + resource_type_name=resource_type_name) + + +@_get_client +def metadef_resource_type_get_all(client, session=None): + return client.metadef_resource_type_get_all() + + +@_get_client +def metadef_resource_type_delete( + client, + resource_type_name, session=None): + return client.metadef_resource_type_delete( + resource_type_name=resource_type_name) + + +@_get_client +def metadef_resource_type_association_get( + client, + namespace_name, resource_type_name, + session=None): + return client.metadef_resource_type_association_get( + namespace_name=namespace_name, resource_type_name=resource_type_name) + + +@_get_client +def metadef_resource_type_association_create( + client, + namespace_name, values, session=None): + return client.metadef_resource_type_association_create( + namespace_name=namespace_name, values=values) + + +@_get_client +def metadef_resource_type_association_delete( + client, + namespace_name, resource_type_name, session=None): + return client.metadef_resource_type_association_delete( + namespace_name=namespace_name, resource_type_name=resource_type_name) + + +@_get_client +def metadef_resource_type_association_get_all_by_namespace( + client, + namespace_name, session=None): + return client.metadef_resource_type_association_get_all_by_namespace( + namespace_name=namespace_name) + + +@_get_client +def metadef_tag_get_all(client, namespace_name, filters=None, marker=None, + limit=None, sort_key='created_at', sort_dir=None, + session=None): + return client.metadef_tag_get_all( + namespace_name=namespace_name, filters=filters, marker=marker, + limit=limit, sort_key=sort_key, sort_dir=sort_dir, session=session) + + +@_get_client +def metadef_tag_get(client, namespace_name, name, session=None): + return client.metadef_tag_get( + namespace_name=namespace_name, name=name) + + +@_get_client +def metadef_tag_create( + client, namespace_name, tag_dict, session=None): + return client.metadef_tag_create( + namespace_name=namespace_name, tag_dict=tag_dict) + + +@_get_client +def metadef_tag_create_tags( + client, namespace_name, tag_list, session=None): + return client.metadef_tag_create_tags( + namespace_name=namespace_name, tag_list=tag_list) + + +@_get_client +def metadef_tag_update( + client, namespace_name, id, tag_dict, session=None): + return client.metadef_tag_update( + namespace_name=namespace_name, id=id, tag_dict=tag_dict) + + +@_get_client +def metadef_tag_delete( + client, namespace_name, name, session=None): + return client.metadef_tag_delete( + namespace_name=namespace_name, name=name) + + +@_get_client +def metadef_tag_delete_namespace_content( + client, namespace_name, session=None): + return client.metadef_tag_delete_namespace_content( + namespace_name=namespace_name) + + +@_get_client +def metadef_tag_count(client, namespace_name, session=None): + return client.metadef_tag_count(namespace_name=namespace_name) + + +@_get_client +def artifact_create(client, values, + type_name, type_version=None, session=None): + return client.artifact_create(values=values, + type_name=type_name, + type_version=type_version) + + +@_get_client +def artifact_update(client, values, artifact_id, + type_name, type_version=None, session=None): + return client.artifact_update(values=values, artifact_id=artifact_id, + type_name=type_name, + type_version=type_version) + + +@_get_client +def artifact_delete(client, artifact_id, + type_name, type_version=None, session=None): + return client.artifact_delete(artifact_id=artifact_id, + type_name=type_name, + type_version=type_version) + + +@_get_client +def artifact_get(client, artifact_id, + type_name, type_version=None, session=None): + return client.artifact_get(artifact_id=artifact_id, + type_name=type_name, + type_version=type_version) + + +@_get_client +def artifact_get_all(client, marker=None, limit=None, sort_key=None, + sort_dir=None, filters={}, + show_level=artifacts.Showlevel.NONE, session=None): + return client.artifact_create(marker, limit, sort_key, + sort_dir, filters, show_level) + + +@_get_client +def artifact_publish(client, artifact_id, + type_name, type_version=None, session=None): + return client.artifact_publish(artifact_id=artifact_id, + type_name=type_name, + type_version=type_version) + diff --git a/code/daisy/daisy/db/simple/__init__.py b/code/daisy/daisy/db/simple/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/db/simple/api.py b/code/daisy/daisy/db/simple/api.py new file mode 100755 index 00000000..717b0af7 --- /dev/null +++ b/code/daisy/daisy/db/simple/api.py @@ -0,0 +1,2136 @@ +# Copyright 2012 OpenStack, Foundation +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import functools +from operator import itemgetter +import uuid + +from oslo_log import log as logging +from oslo_utils import timeutils +import six + +from daisy.common import exception +from daisy.common import utils +from daisy import i18n + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LI = i18n._LI +_LW = i18n._LW + +DATA = { + 'images': {}, + 'members': {}, + 'metadef_namespace_resource_types': [], + 'metadef_namespaces': [], + 'metadef_objects': [], + 'metadef_properties': [], + 'metadef_resource_types': [], + 'metadef_tags': [], + 'tags': {}, + 'locations': [], + 'tasks': {}, + 'task_info': {}, + 'artifacts': {}, + 'artifact_properties': {}, + 'artifact_tags': {}, + 'artifact_dependencies': {}, + 'artifact_blobs': {}, + 'artifact_blob_locations': {} +} + +INDEX = 0 + + +def log_call(func): + @functools.wraps(func) + def wrapped(*args, **kwargs): + LOG.info(_LI('Calling %(funcname)s: args=%(args)s, ' + 'kwargs=%(kwargs)s') % + {"funcname": func.__name__, + "args": args, + "kwargs": kwargs}) + output = func(*args, **kwargs) + LOG.info(_LI('Returning %(funcname)s: %(output)s') % + {"funcname": func.__name__, + "output": output}) + return output + return wrapped + + +def reset(): + global DATA + DATA = { + 'images': {}, + 'members': [], + 'metadef_namespace_resource_types': [], + 'metadef_namespaces': [], + 'metadef_objects': [], + 'metadef_properties': [], + 'metadef_resource_types': [], + 'metadef_tags': [], + 'tags': {}, + 'locations': [], + 'tasks': {}, + 'task_info': {}, + 'artifacts': {} + } + + +def clear_db_env(*args, **kwargs): + """ + Setup global environment configuration variables. + + We have no connection-oriented environment variables, so this is a NOOP. + """ + pass + + +def _get_session(): + return DATA + + +@utils.no_4byte_params +def _image_location_format(image_id, value, meta_data, status, deleted=False): + dt = timeutils.utcnow() + return { + 'id': str(uuid.uuid4()), + 'image_id': image_id, + 'created_at': dt, + 'updated_at': dt, + 'deleted_at': dt if deleted else None, + 'deleted': deleted, + 'url': value, + 'metadata': meta_data, + 'status': status, + } + + +def _image_property_format(image_id, name, value): + return { + 'image_id': image_id, + 'name': name, + 'value': value, + 'deleted': False, + 'deleted_at': None, + } + + +def _image_member_format(image_id, tenant_id, can_share, status='pending'): + dt = timeutils.utcnow() + return { + 'id': str(uuid.uuid4()), + 'image_id': image_id, + 'member': tenant_id, + 'can_share': can_share, + 'status': status, + 'created_at': dt, + 'updated_at': dt, + } + + +def _pop_task_info_values(values): + task_info_values = {} + for k, v in values.items(): + if k in ['input', 'result', 'message']: + values.pop(k) + task_info_values[k] = v + + return task_info_values + + +def _format_task_from_db(task_ref, task_info_ref): + task = copy.deepcopy(task_ref) + if task_info_ref: + task_info = copy.deepcopy(task_info_ref) + task_info_values = _pop_task_info_values(task_info) + task.update(task_info_values) + return task + + +def _task_format(task_id, **values): + dt = timeutils.utcnow() + task = { + 'id': task_id, + 'type': 'import', + 'status': 'pending', + 'owner': None, + 'expires_at': None, + 'created_at': dt, + 'updated_at': dt, + 'deleted_at': None, + 'deleted': False, + } + task.update(values) + return task + + +def _task_info_format(task_id, **values): + task_info = { + 'task_id': task_id, + 'input': None, + 'result': None, + 'message': None, + } + task_info.update(values) + return task_info + + +@utils.no_4byte_params +def _image_update(image, values, properties): + # NOTE(bcwaldon): store properties as a list to match sqlalchemy driver + properties = [{'name': k, + 'value': v, + 'image_id': image['id'], + 'deleted': False} for k, v in properties.items()] + if 'properties' not in image.keys(): + image['properties'] = [] + image['properties'].extend(properties) + image.update(values) + return image + + +def _image_format(image_id, **values): + dt = timeutils.utcnow() + image = { + 'id': image_id, + 'name': None, + 'owner': None, + 'locations': [], + 'status': 'queued', + 'protected': False, + 'is_public': False, + 'container_format': None, + 'disk_format': None, + 'min_ram': 0, + 'min_disk': 0, + 'size': None, + 'virtual_size': None, + 'checksum': None, + 'tags': [], + 'created_at': dt, + 'updated_at': dt, + 'deleted_at': None, + 'deleted': False, + } + + locations = values.pop('locations', None) + if locations is not None: + image['locations'] = [] + for location in locations: + location_ref = _image_location_format(image_id, + location['url'], + location['metadata'], + location['status']) + image['locations'].append(location_ref) + DATA['locations'].append(location_ref) + + return _image_update(image, values, values.pop('properties', {})) + + +def _filter_images(images, filters, context, + status='accepted', is_public=None, + admin_as_user=False): + filtered_images = [] + if 'properties' in filters: + prop_filter = filters.pop('properties') + filters.update(prop_filter) + + if status == 'all': + status = None + + visibility = filters.pop('visibility', None) + + for image in images: + member = image_member_find(context, image_id=image['id'], + member=context.owner, status=status) + is_member = len(member) > 0 + has_ownership = context.owner and image['owner'] == context.owner + can_see = (image['is_public'] or has_ownership or is_member or + (context.is_admin and not admin_as_user)) + if not can_see: + continue + + if visibility: + if visibility == 'public': + if not image['is_public']: + continue + elif visibility == 'private': + if image['is_public']: + continue + if not (has_ownership or (context.is_admin + and not admin_as_user)): + continue + elif visibility == 'shared': + if not is_member: + continue + + if is_public is not None: + if not image['is_public'] == is_public: + continue + + to_add = True + for k, value in six.iteritems(filters): + key = k + if k.endswith('_min') or k.endswith('_max'): + key = key[0:-4] + try: + value = int(value) + except ValueError: + msg = _("Unable to filter on a range " + "with a non-numeric value.") + raise exception.InvalidFilterRangeValue(msg) + if k.endswith('_min'): + to_add = image.get(key) >= value + elif k.endswith('_max'): + to_add = image.get(key) <= value + elif k != 'is_public' and image.get(k) is not None: + to_add = image.get(key) == value + elif k == 'tags': + filter_tags = value + image_tags = image_tag_get_all(context, image['id']) + for tag in filter_tags: + if tag not in image_tags: + to_add = False + break + else: + to_add = False + for p in image['properties']: + properties = {p['name']: p['value'], + 'deleted': p['deleted']} + to_add |= (properties.get(key) == value and + properties.get('deleted') is False) + + if not to_add: + break + + if to_add: + filtered_images.append(image) + + return filtered_images + + +def _do_pagination(context, images, marker, limit, show_deleted, + status='accepted'): + start = 0 + end = -1 + if marker is None: + start = 0 + else: + # Check that the image is accessible + _image_get(context, marker, force_show_deleted=show_deleted, + status=status) + + for i, image in enumerate(images): + if image['id'] == marker: + start = i + 1 + break + else: + raise exception.NotFound() + + end = start + limit if limit is not None else None + return images[start:end] + + +def _sort_images(images, sort_key, sort_dir): + sort_key = ['created_at'] if not sort_key else sort_key + + default_sort_dir = 'desc' + + if not sort_dir: + sort_dir = [default_sort_dir] * len(sort_key) + elif len(sort_dir) == 1: + default_sort_dir = sort_dir[0] + sort_dir *= len(sort_key) + + for key in ['created_at', 'id']: + if key not in sort_key: + sort_key.append(key) + sort_dir.append(default_sort_dir) + + for key in sort_key: + if images and not (key in images[0]): + raise exception.InvalidSortKey() + + if any(dir for dir in sort_dir if dir not in ['asc', 'desc']): + raise exception.InvalidSortDir() + + if len(sort_key) != len(sort_dir): + raise exception.Invalid(message='Number of sort dirs does not match ' + 'the number of sort keys') + + for key, dir in reversed(zip(sort_key, sort_dir)): + reverse = dir == 'desc' + images.sort(key=itemgetter(key), reverse=reverse) + + return images + + +def _image_get(context, image_id, force_show_deleted=False, status=None): + try: + image = DATA['images'][image_id] + except KeyError: + LOG.warn(_LW('Could not find image %s') % image_id) + raise exception.NotFound() + + if image['deleted'] and not (force_show_deleted + or context.can_see_deleted): + LOG.warn(_LW('Unable to get deleted image')) + raise exception.NotFound() + + if not is_image_visible(context, image): + LOG.warn(_LW('Unable to get unowned image')) + raise exception.Forbidden("Image not visible to you") + + return image + + +@log_call +def image_get(context, image_id, session=None, force_show_deleted=False): + image = _image_get(context, image_id, force_show_deleted) + return _normalize_locations(context, copy.deepcopy(image), + force_show_deleted=force_show_deleted) + + +@log_call +def image_get_all(context, filters=None, marker=None, limit=None, + sort_key=None, sort_dir=None, + member_status='accepted', is_public=None, + admin_as_user=False, return_tag=False): + filters = filters or {} + images = DATA['images'].values() + images = _filter_images(images, filters, context, member_status, + is_public, admin_as_user) + images = _sort_images(images, sort_key, sort_dir) + images = _do_pagination(context, images, marker, limit, + filters.get('deleted')) + + force_show_deleted = True if filters.get('deleted') else False + res = [] + for image in images: + img = _normalize_locations(context, copy.deepcopy(image), + force_show_deleted=force_show_deleted) + if return_tag: + img['tags'] = image_tag_get_all(context, img['id']) + res.append(img) + return res + + +@log_call +def image_property_create(context, values): + image = _image_get(context, values['image_id']) + prop = _image_property_format(values['image_id'], + values['name'], + values['value']) + image['properties'].append(prop) + return prop + + +@log_call +def image_property_delete(context, prop_ref, image_ref): + prop = None + for p in DATA['images'][image_ref]['properties']: + if p['name'] == prop_ref: + prop = p + if not prop: + raise exception.NotFound() + prop['deleted_at'] = timeutils.utcnow() + prop['deleted'] = True + return prop + + +@log_call +def image_member_find(context, image_id=None, member=None, status=None): + filters = [] + images = DATA['images'] + members = DATA['members'] + + def is_visible(member): + return (member['member'] == context.owner or + images[member['image_id']]['owner'] == context.owner) + + if not context.is_admin: + filters.append(is_visible) + + if image_id is not None: + filters.append(lambda m: m['image_id'] == image_id) + if member is not None: + filters.append(lambda m: m['member'] == member) + if status is not None: + filters.append(lambda m: m['status'] == status) + + for f in filters: + members = filter(f, members) + return [copy.deepcopy(m) for m in members] + + +@log_call +def image_member_count(context, image_id): + """Return the number of image members for this image + + :param image_id: identifier of image entity + """ + if not image_id: + msg = _("Image id is required.") + raise exception.Invalid(msg) + + members = DATA['members'] + return len(filter(lambda x: x['image_id'] == image_id, members)) + + +@log_call +def image_member_create(context, values): + member = _image_member_format(values['image_id'], + values['member'], + values.get('can_share', False), + values.get('status', 'pending')) + global DATA + DATA['members'].append(member) + return copy.deepcopy(member) + + +@log_call +def image_member_update(context, member_id, values): + global DATA + for member in DATA['members']: + if member['id'] == member_id: + member.update(values) + member['updated_at'] = timeutils.utcnow() + return copy.deepcopy(member) + else: + raise exception.NotFound() + + +@log_call +def image_member_delete(context, member_id): + global DATA + for i, member in enumerate(DATA['members']): + if member['id'] == member_id: + del DATA['members'][i] + break + else: + raise exception.NotFound() + + +@log_call +@utils.no_4byte_params +def image_location_add(context, image_id, location): + deleted = location['status'] in ('deleted', 'pending_delete') + location_ref = _image_location_format(image_id, + value=location['url'], + meta_data=location['metadata'], + status=location['status'], + deleted=deleted) + DATA['locations'].append(location_ref) + image = DATA['images'][image_id] + image.setdefault('locations', []).append(location_ref) + + +@log_call +@utils.no_4byte_params +def image_location_update(context, image_id, location): + loc_id = location.get('id') + if loc_id is None: + msg = _("The location data has an invalid ID: %d") % loc_id + raise exception.Invalid(msg) + + deleted = location['status'] in ('deleted', 'pending_delete') + updated_time = timeutils.utcnow() + delete_time = updated_time if deleted else None + + updated = False + for loc in DATA['locations']: + if loc['id'] == loc_id and loc['image_id'] == image_id: + loc.update({"value": location['url'], + "meta_data": location['metadata'], + "status": location['status'], + "deleted": deleted, + "updated_at": updated_time, + "deleted_at": delete_time}) + updated = True + break + + if not updated: + msg = (_("No location found with ID %(loc)s from image %(img)s") % + dict(loc=loc_id, img=image_id)) + LOG.warn(msg) + raise exception.NotFound(msg) + + +@log_call +def image_location_delete(context, image_id, location_id, status, + delete_time=None): + if status not in ('deleted', 'pending_delete'): + msg = _("The status of deleted image location can only be set to " + "'pending_delete' or 'deleted'.") + raise exception.Invalid(msg) + + deleted = False + for loc in DATA['locations']: + if loc['id'] == location_id and loc['image_id'] == image_id: + deleted = True + delete_time = delete_time or timeutils.utcnow() + loc.update({"deleted": deleted, + "status": status, + "updated_at": delete_time, + "deleted_at": delete_time}) + break + + if not deleted: + msg = (_("No location found with ID %(loc)s from image %(img)s") % + dict(loc=location_id, img=image_id)) + LOG.warn(msg) + raise exception.NotFound(msg) + + +def _image_locations_set(context, image_id, locations): + # NOTE(zhiyan): 1. Remove records from DB for deleted locations + used_loc_ids = [loc['id'] for loc in locations if loc.get('id')] + image = DATA['images'][image_id] + for loc in image['locations']: + if loc['id'] not in used_loc_ids and not loc['deleted']: + image_location_delete(context, image_id, loc['id'], 'deleted') + for i, loc in enumerate(DATA['locations']): + if (loc['image_id'] == image_id and loc['id'] not in used_loc_ids and + not loc['deleted']): + del DATA['locations'][i] + + # NOTE(zhiyan): 2. Adding or update locations + for loc in locations: + if loc.get('id') is None: + image_location_add(context, image_id, loc) + else: + image_location_update(context, image_id, loc) + + +def _image_locations_delete_all(context, image_id, delete_time=None): + image = DATA['images'][image_id] + for loc in image['locations']: + if not loc['deleted']: + image_location_delete(context, image_id, loc['id'], 'deleted', + delete_time=delete_time) + + for i, loc in enumerate(DATA['locations']): + if image_id == loc['image_id'] and loc['deleted'] == False: + del DATA['locations'][i] + + +def _normalize_locations(context, image, force_show_deleted=False): + """ + Generate suitable dictionary list for locations field of image. + + We don't need to set other data fields of location record which return + from image query. + """ + + if image['status'] == 'deactivated' and not context.is_admin: + # Locations are not returned for a deactivated image for non-admin user + image['locations'] = [] + return image + + if force_show_deleted: + locations = image['locations'] + else: + locations = filter(lambda x: not x['deleted'], image['locations']) + image['locations'] = [{'id': loc['id'], + 'url': loc['url'], + 'metadata': loc['metadata'], + 'status': loc['status']} + for loc in locations] + return image + + +@log_call +def image_create(context, image_values): + global DATA + image_id = image_values.get('id', str(uuid.uuid4())) + + if image_id in DATA['images']: + raise exception.Duplicate() + + if 'status' not in image_values: + raise exception.Invalid('status is a required attribute') + + allowed_keys = set(['id', 'name', 'status', 'min_ram', 'min_disk', 'size', + 'virtual_size', 'checksum', 'locations', 'owner', + 'protected', 'is_public', 'container_format', + 'disk_format', 'created_at', 'updated_at', 'deleted', + 'deleted_at', 'properties', 'tags']) + + incorrect_keys = set(image_values.keys()) - allowed_keys + if incorrect_keys: + raise exception.Invalid( + 'The keys %s are not valid' % str(incorrect_keys)) + + image = _image_format(image_id, **image_values) + DATA['images'][image_id] = image + DATA['tags'][image_id] = image.pop('tags', []) + + return _normalize_locations(context, copy.deepcopy(image)) + + +@log_call +def image_update(context, image_id, image_values, purge_props=False, + from_state=None): + global DATA + try: + image = DATA['images'][image_id] + except KeyError: + raise exception.NotFound() + + location_data = image_values.pop('locations', None) + if location_data is not None: + _image_locations_set(context, image_id, location_data) + + # replace values for properties that already exist + new_properties = image_values.pop('properties', {}) + for prop in image['properties']: + if prop['name'] in new_properties: + prop['value'] = new_properties.pop(prop['name']) + elif purge_props: + # this matches weirdness in the sqlalchemy api + prop['deleted'] = True + + image['updated_at'] = timeutils.utcnow() + _image_update(image, image_values, new_properties) + DATA['images'][image_id] = image + return _normalize_locations(context, copy.deepcopy(image)) + + +@log_call +def image_destroy(context, image_id): + global DATA + try: + delete_time = timeutils.utcnow() + DATA['images'][image_id]['deleted'] = True + DATA['images'][image_id]['deleted_at'] = delete_time + + # NOTE(flaper87): Move the image to one of the deleted statuses + # if it hasn't been done yet. + if (DATA['images'][image_id]['status'] not in + ['deleted', 'pending_delete']): + DATA['images'][image_id]['status'] = 'deleted' + + _image_locations_delete_all(context, image_id, + delete_time=delete_time) + + for prop in DATA['images'][image_id]['properties']: + image_property_delete(context, prop['name'], image_id) + + members = image_member_find(context, image_id=image_id) + for member in members: + image_member_delete(context, member['id']) + + tags = image_tag_get_all(context, image_id) + for tag in tags: + image_tag_delete(context, image_id, tag) + + return _normalize_locations(context, + copy.deepcopy(DATA['images'][image_id])) + except KeyError: + raise exception.NotFound() + + +@log_call +def image_tag_get_all(context, image_id): + return DATA['tags'].get(image_id, []) + + +@log_call +def image_tag_get(context, image_id, value): + tags = image_tag_get_all(context, image_id) + if value in tags: + return value + else: + raise exception.NotFound() + + +@log_call +def image_tag_set_all(context, image_id, values): + global DATA + DATA['tags'][image_id] = values + + +@log_call +@utils.no_4byte_params +def image_tag_create(context, image_id, value): + global DATA + DATA['tags'][image_id].append(value) + return value + + +@log_call +def image_tag_delete(context, image_id, value): + global DATA + try: + DATA['tags'][image_id].remove(value) + except ValueError: + raise exception.NotFound() + + +def is_image_mutable(context, image): + """Return True if the image is mutable in this context.""" + # Is admin == image mutable + if context.is_admin: + return True + + # No owner == image not mutable + if image['owner'] is None or context.owner is None: + return False + + # Image only mutable by its owner + return image['owner'] == context.owner + + +def is_image_visible(context, image, status=None): + """Return True if the image is visible in this context.""" + # Is admin == image visible + if context.is_admin: + return True + + # No owner == image visible + if image['owner'] is None: + return True + + # Image is_public == image visible + if image['is_public']: + return True + + # Perform tests based on whether we have an owner + if context.owner is not None: + if context.owner == image['owner']: + return True + + # Figure out if this image is shared with that tenant + if status == 'all': + status = None + members = image_member_find(context, + image_id=image['id'], + member=context.owner, + status=status) + if members: + return True + + # Private image + return False + + +def user_get_storage_usage(context, owner_id, image_id=None, session=None): + images = image_get_all(context, filters={'owner': owner_id}) + total = 0 + for image in images: + if image['status'] in ['killed', 'deleted']: + continue + + if image['id'] != image_id: + locations = [loc for loc in image['locations'] + if loc.get('status') != 'deleted'] + total += (image['size'] * len(locations)) + return total + + +@log_call +def task_create(context, values): + """Create a task object""" + global DATA + + task_values = copy.deepcopy(values) + task_id = task_values.get('id', str(uuid.uuid4())) + required_attributes = ['type', 'status', 'input'] + allowed_attributes = ['id', 'type', 'status', 'input', 'result', 'owner', + 'message', 'expires_at', 'created_at', + 'updated_at', 'deleted_at', 'deleted'] + + if task_id in DATA['tasks']: + raise exception.Duplicate() + + for key in required_attributes: + if key not in task_values: + raise exception.Invalid('%s is a required attribute' % key) + + incorrect_keys = set(task_values.keys()) - set(allowed_attributes) + if incorrect_keys: + raise exception.Invalid( + 'The keys %s are not valid' % str(incorrect_keys)) + + task_info_values = _pop_task_info_values(task_values) + task = _task_format(task_id, **task_values) + DATA['tasks'][task_id] = task + task_info = _task_info_create(task['id'], task_info_values) + + return _format_task_from_db(task, task_info) + + +@log_call +def task_update(context, task_id, values): + """Update a task object""" + global DATA + task_values = copy.deepcopy(values) + task_info_values = _pop_task_info_values(task_values) + try: + task = DATA['tasks'][task_id] + except KeyError: + msg = "No task found with ID %s" % task_id + LOG.debug(msg) + raise exception.TaskNotFound(task_id=task_id) + + task.update(task_values) + task['updated_at'] = timeutils.utcnow() + DATA['tasks'][task_id] = task + task_info = _task_info_update(task['id'], task_info_values) + + return _format_task_from_db(task, task_info) + + +@log_call +def task_get(context, task_id, force_show_deleted=False): + task, task_info = _task_get(context, task_id, force_show_deleted) + return _format_task_from_db(task, task_info) + + +def _task_get(context, task_id, force_show_deleted=False): + try: + task = DATA['tasks'][task_id] + except KeyError: + msg = _LW('Could not find task %s') % task_id + LOG.warn(msg) + raise exception.TaskNotFound(task_id=task_id) + + if task['deleted'] and not (force_show_deleted or context.can_see_deleted): + msg = _LW('Unable to get deleted task %s') % task_id + LOG.warn(msg) + raise exception.TaskNotFound(task_id=task_id) + + if not _is_task_visible(context, task): + msg = "Forbidding request, task %s is not visible" % task_id + LOG.debug(msg) + msg = _("Forbidding request, task %s is not visible") % task_id + raise exception.Forbidden(msg) + + task_info = _task_info_get(task_id) + + return task, task_info + + +@log_call +def task_delete(context, task_id): + global DATA + try: + DATA['tasks'][task_id]['deleted'] = True + DATA['tasks'][task_id]['deleted_at'] = timeutils.utcnow() + DATA['tasks'][task_id]['updated_at'] = timeutils.utcnow() + return copy.deepcopy(DATA['tasks'][task_id]) + except KeyError: + msg = "No task found with ID %s" % task_id + LOG.debug(msg) + raise exception.TaskNotFound(task_id=task_id) + + +@log_call +def task_get_all(context, filters=None, marker=None, limit=None, + sort_key='created_at', sort_dir='desc'): + """ + Get all tasks that match zero or more filters. + + :param filters: dict of filter keys and values. + :param marker: task id after which to start page + :param limit: maximum number of tasks to return + :param sort_key: task attribute by which results should be sorted + :param sort_dir: direction in which results should be sorted (asc, desc) + :return: tasks set + """ + filters = filters or {} + tasks = DATA['tasks'].values() + tasks = _filter_tasks(tasks, filters, context) + tasks = _sort_tasks(tasks, sort_key, sort_dir) + tasks = _paginate_tasks(context, tasks, marker, limit, + filters.get('deleted')) + + filtered_tasks = [] + for task in tasks: + filtered_tasks.append(_format_task_from_db(task, task_info_ref=None)) + + return filtered_tasks + + +def _is_task_visible(context, task): + """Return True if the task is visible in this context.""" + # Is admin == task visible + if context.is_admin: + return True + + # No owner == task visible + if task['owner'] is None: + return True + + # Perform tests based on whether we have an owner + if context.owner is not None: + if context.owner == task['owner']: + return True + + return False + + +def _filter_tasks(tasks, filters, context, admin_as_user=False): + filtered_tasks = [] + + for task in tasks: + has_ownership = context.owner and task['owner'] == context.owner + can_see = (has_ownership or (context.is_admin and not admin_as_user)) + if not can_see: + continue + + add = True + for k, value in six.iteritems(filters): + add = task[k] == value and task['deleted'] is False + if not add: + break + + if add: + filtered_tasks.append(task) + + return filtered_tasks + + +def _sort_tasks(tasks, sort_key, sort_dir): + reverse = False + if tasks and not (sort_key in tasks[0]): + raise exception.InvalidSortKey() + keyfn = lambda x: (x[sort_key] if x[sort_key] is not None else '', + x['created_at'], x['id']) + reverse = sort_dir == 'desc' + tasks.sort(key=keyfn, reverse=reverse) + return tasks + + +def _paginate_tasks(context, tasks, marker, limit, show_deleted): + start = 0 + end = -1 + if marker is None: + start = 0 + else: + # Check that the task is accessible + _task_get(context, marker, force_show_deleted=show_deleted) + + for i, task in enumerate(tasks): + if task['id'] == marker: + start = i + 1 + break + else: + if task: + raise exception.TaskNotFound(task_id=task['id']) + else: + msg = _("Task does not exist") + raise exception.NotFound(message=msg) + + end = start + limit if limit is not None else None + return tasks[start:end] + + +def _task_info_create(task_id, values): + """Create a Task Info for Task with given task ID""" + global DATA + task_info = _task_info_format(task_id, **values) + DATA['task_info'][task_id] = task_info + + return task_info + + +def _task_info_update(task_id, values): + """Update Task Info for Task with given task ID and updated values""" + global DATA + try: + task_info = DATA['task_info'][task_id] + except KeyError: + msg = "No task info found with task id %s" % task_id + LOG.debug(msg) + raise exception.TaskNotFound(task_id=task_id) + + task_info.update(values) + DATA['task_info'][task_id] = task_info + + return task_info + + +def _task_info_get(task_id): + """Get Task Info for Task with given task ID""" + global DATA + try: + task_info = DATA['task_info'][task_id] + except KeyError: + msg = _LW('Could not find task info %s') % task_id + LOG.warn(msg) + raise exception.TaskNotFound(task_id=task_id) + + return task_info + + +def _metadef_delete_namespace_content(get_func, key, context, namespace_name): + global DATA + metadefs = get_func(context, namespace_name) + data = DATA[key] + for metadef in metadefs: + data.remove(metadef) + return metadefs + + +@log_call +def metadef_namespace_create(context, values): + """Create a namespace object""" + global DATA + + namespace_values = copy.deepcopy(values) + namespace_name = namespace_values.get('namespace') + required_attributes = ['namespace', 'owner'] + allowed_attributes = ['namespace', 'owner', 'display_name', 'description', + 'visibility', 'protected'] + + for namespace in DATA['metadef_namespaces']: + if namespace['namespace'] == namespace_name: + msg = ("Can not create the metadata definition namespace. " + "Namespace=%s already exists.") % namespace_name + LOG.debug(msg) + raise exception.MetadefDuplicateNamespace( + namespace_name=namespace_name) + + for key in required_attributes: + if key not in namespace_values: + raise exception.Invalid('%s is a required attribute' % key) + + incorrect_keys = set(namespace_values.keys()) - set(allowed_attributes) + if incorrect_keys: + raise exception.Invalid( + 'The keys %s are not valid' % str(incorrect_keys)) + + namespace = _format_namespace(namespace_values) + DATA['metadef_namespaces'].append(namespace) + + return namespace + + +@log_call +def metadef_namespace_update(context, namespace_id, values): + """Update a namespace object""" + global DATA + namespace_values = copy.deepcopy(values) + + namespace = metadef_namespace_get_by_id(context, namespace_id) + if namespace['namespace'] != values['namespace']: + for db_namespace in DATA['metadef_namespaces']: + if db_namespace['namespace'] == values['namespace']: + msg = ("Invalid update. It would result in a duplicate" + " metadata definition namespace with the same" + " name of %s" + % values['namespace']) + LOG.debug(msg) + emsg = (_("Invalid update. It would result in a duplicate" + " metadata definition namespace with the same" + " name of %s") + % values['namespace']) + raise exception.MetadefDuplicateNamespace(emsg) + DATA['metadef_namespaces'].remove(namespace) + + namespace.update(namespace_values) + namespace['updated_at'] = timeutils.utcnow() + DATA['metadef_namespaces'].append(namespace) + + return namespace + + +@log_call +def metadef_namespace_get_by_id(context, namespace_id): + """Get a namespace object""" + try: + namespace = next(namespace for namespace in DATA['metadef_namespaces'] + if namespace['id'] == namespace_id) + except StopIteration: + msg = (_("Metadata definition namespace not found for id=%s") + % namespace_id) + LOG.warn(msg) + raise exception.MetadefNamespaceNotFound(msg) + + if not _is_namespace_visible(context, namespace): + msg = ("Forbidding request, metadata definition namespace=%s" + " is not visible.") % namespace.namespace + LOG.debug(msg) + emsg = _("Forbidding request, metadata definition namespace=%s" + " is not visible.") % namespace.namespace + raise exception.MetadefForbidden(emsg) + + return namespace + + +@log_call +def metadef_namespace_get(context, namespace_name): + """Get a namespace object""" + try: + namespace = next(namespace for namespace in DATA['metadef_namespaces'] + if namespace['namespace'] == namespace_name) + except StopIteration: + msg = "No namespace found with name %s" % namespace_name + LOG.debug(msg) + raise exception.MetadefNamespaceNotFound( + namespace_name=namespace_name) + + _check_namespace_visibility(context, namespace, namespace_name) + + return namespace + + +@log_call +def metadef_namespace_get_all(context, + marker=None, + limit=None, + sort_key='created_at', + sort_dir='desc', + filters=None): + """Get a namespaces list""" + resource_types = filters.get('resource_types', []) if filters else [] + visibility = filters.get('visibility', None) if filters else None + + namespaces = [] + for namespace in DATA['metadef_namespaces']: + if not _is_namespace_visible(context, namespace): + continue + + if visibility and namespace['visibility'] != visibility: + continue + + if resource_types: + for association in DATA['metadef_namespace_resource_types']: + if association['namespace_id'] == namespace['id']: + if association['name'] in resource_types: + break + else: + continue + + namespaces.append(namespace) + + return namespaces + + +@log_call +def metadef_namespace_delete(context, namespace_name): + """Delete a namespace object""" + global DATA + + namespace = metadef_namespace_get(context, namespace_name) + DATA['metadef_namespaces'].remove(namespace) + + return namespace + + +@log_call +def metadef_namespace_delete_content(context, namespace_name): + """Delete a namespace content""" + global DATA + namespace = metadef_namespace_get(context, namespace_name) + namespace_id = namespace['id'] + + objects = [] + + for object in DATA['metadef_objects']: + if object['namespace_id'] != namespace_id: + objects.append(object) + + DATA['metadef_objects'] = objects + + properties = [] + + for property in DATA['metadef_objects']: + if property['namespace_id'] != namespace_id: + properties.append(object) + + DATA['metadef_objects'] = properties + + return namespace + + +@log_call +def metadef_object_get(context, namespace_name, object_name): + """Get a metadef object""" + namespace = metadef_namespace_get(context, namespace_name) + + _check_namespace_visibility(context, namespace, namespace_name) + + for object in DATA['metadef_objects']: + if (object['namespace_id'] == namespace['id'] and + object['name'] == object_name): + return object + else: + msg = ("The metadata definition object with name=%(name)s" + " was not found in namespace=%(namespace_name)s." + % {'name': object_name, 'namespace_name': namespace_name}) + LOG.debug(msg) + raise exception.MetadefObjectNotFound(namespace_name=namespace_name, + object_name=object_name) + + +@log_call +def metadef_object_get_by_id(context, namespace_name, object_id): + """Get a metadef object""" + namespace = metadef_namespace_get(context, namespace_name) + + _check_namespace_visibility(context, namespace, namespace_name) + + for object in DATA['metadef_objects']: + if (object['namespace_id'] == namespace['id'] and + object['id'] == object_id): + return object + else: + msg = (_("Metadata definition object not found for id=%s") + % object_id) + LOG.warn(msg) + raise exception.MetadefObjectNotFound(msg) + + +@log_call +def metadef_object_get_all(context, namespace_name): + """Get a metadef objects list""" + namespace = metadef_namespace_get(context, namespace_name) + + objects = [] + + _check_namespace_visibility(context, namespace, namespace_name) + + for object in DATA['metadef_objects']: + if object['namespace_id'] == namespace['id']: + objects.append(object) + + return objects + + +@log_call +def metadef_object_create(context, namespace_name, values): + """Create a metadef object""" + global DATA + + object_values = copy.deepcopy(values) + object_name = object_values['name'] + required_attributes = ['name'] + allowed_attributes = ['name', 'description', 'json_schema', 'required'] + + namespace = metadef_namespace_get(context, namespace_name) + + for object in DATA['metadef_objects']: + if (object['name'] == object_name and + object['namespace_id'] == namespace['id']): + msg = ("A metadata definition object with name=%(name)s" + " in namespace=%(namespace_name)s already exists." + % {'name': object_name, 'namespace_name': namespace_name}) + LOG.debug(msg) + raise exception.MetadefDuplicateObject( + object_name=object_name, namespace_name=namespace_name) + + for key in required_attributes: + if key not in object_values: + raise exception.Invalid('%s is a required attribute' % key) + + incorrect_keys = set(object_values.keys()) - set(allowed_attributes) + if incorrect_keys: + raise exception.Invalid( + 'The keys %s are not valid' % str(incorrect_keys)) + + object_values['namespace_id'] = namespace['id'] + + _check_namespace_visibility(context, namespace, namespace_name) + + object = _format_object(object_values) + DATA['metadef_objects'].append(object) + + return object + + +@log_call +def metadef_object_update(context, namespace_name, object_id, values): + """Update a metadef object""" + global DATA + + namespace = metadef_namespace_get(context, namespace_name) + + _check_namespace_visibility(context, namespace, namespace_name) + + object = metadef_object_get_by_id(context, namespace_name, object_id) + if object['name'] != values['name']: + for db_object in DATA['metadef_objects']: + if (db_object['name'] == values['name'] and + db_object['namespace_id'] == namespace['id']): + msg = ("Invalid update. It would result in a duplicate" + " metadata definition object with same name=%(name)s " + " in namespace=%(namespace_name)s." + % {'name': object['name'], + 'namespace_name': namespace_name}) + LOG.debug(msg) + emsg = (_("Invalid update. It would result in a duplicate" + " metadata definition object with the same" + " name=%(name)s " + " in namespace=%(namespace_name)s.") + % {'name': object['name'], + 'namespace_name': namespace_name}) + raise exception.MetadefDuplicateObject(emsg) + DATA['metadef_objects'].remove(object) + + object.update(values) + object['updated_at'] = timeutils.utcnow() + DATA['metadef_objects'].append(object) + + return object + + +@log_call +def metadef_object_delete(context, namespace_name, object_name): + """Delete a metadef object""" + global DATA + + object = metadef_object_get(context, namespace_name, object_name) + DATA['metadef_objects'].remove(object) + + return object + + +def metadef_object_delete_namespace_content(context, namespace_name, + session=None): + """Delete an object or raise if namespace or object doesn't exist.""" + return _metadef_delete_namespace_content( + metadef_object_get_all, 'metadef_objects', context, namespace_name) + + +@log_call +def metadef_object_count(context, namespace_name): + """Get metadef object count in a namespace""" + namespace = metadef_namespace_get(context, namespace_name) + + _check_namespace_visibility(context, namespace, namespace_name) + + count = 0 + for object in DATA['metadef_objects']: + if object['namespace_id'] == namespace['id']: + count = count + 1 + + return count + + +@log_call +def metadef_property_count(context, namespace_name): + """Get properties count in a namespace""" + namespace = metadef_namespace_get(context, namespace_name) + + _check_namespace_visibility(context, namespace, namespace_name) + + count = 0 + for property in DATA['metadef_properties']: + if property['namespace_id'] == namespace['id']: + count = count + 1 + + return count + + +@log_call +def metadef_property_create(context, namespace_name, values): + """Create a metadef property""" + global DATA + + property_values = copy.deepcopy(values) + property_name = property_values['name'] + required_attributes = ['name'] + allowed_attributes = ['name', 'description', 'json_schema', 'required'] + + namespace = metadef_namespace_get(context, namespace_name) + + for property in DATA['metadef_properties']: + if (property['name'] == property_name and + property['namespace_id'] == namespace['id']): + msg = ("Can not create metadata definition property. A property" + " with name=%(name)s already exists in" + " namespace=%(namespace_name)s." + % {'name': property_name, + 'namespace_name': namespace_name}) + LOG.debug(msg) + raise exception.MetadefDuplicateProperty( + property_name=property_name, + namespace_name=namespace_name) + + for key in required_attributes: + if key not in property_values: + raise exception.Invalid('%s is a required attribute' % key) + + incorrect_keys = set(property_values.keys()) - set(allowed_attributes) + if incorrect_keys: + raise exception.Invalid( + 'The keys %s are not valid' % str(incorrect_keys)) + + property_values['namespace_id'] = namespace['id'] + + _check_namespace_visibility(context, namespace, namespace_name) + + property = _format_property(property_values) + DATA['metadef_properties'].append(property) + + return property + + +@log_call +def metadef_property_update(context, namespace_name, property_id, values): + """Update a metadef property""" + global DATA + + namespace = metadef_namespace_get(context, namespace_name) + + _check_namespace_visibility(context, namespace, namespace_name) + + property = metadef_property_get_by_id(context, namespace_name, property_id) + if property['name'] != values['name']: + for db_property in DATA['metadef_properties']: + if (db_property['name'] == values['name'] and + db_property['namespace_id'] == namespace['id']): + msg = ("Invalid update. It would result in a duplicate" + " metadata definition property with the same" + " name=%(name)s" + " in namespace=%(namespace_name)s." + % {'name': property['name'], + 'namespace_name': namespace_name}) + LOG.debug(msg) + emsg = (_("Invalid update. It would result in a duplicate" + " metadata definition property with the same" + " name=%(name)s" + " in namespace=%(namespace_name)s.") + % {'name': property['name'], + 'namespace_name': namespace_name}) + raise exception.MetadefDuplicateProperty(emsg) + DATA['metadef_properties'].remove(property) + + property.update(values) + property['updated_at'] = timeutils.utcnow() + DATA['metadef_properties'].append(property) + + return property + + +@log_call +def metadef_property_get_all(context, namespace_name): + """Get a metadef properties list""" + namespace = metadef_namespace_get(context, namespace_name) + + properties = [] + + _check_namespace_visibility(context, namespace, namespace_name) + + for property in DATA['metadef_properties']: + if property['namespace_id'] == namespace['id']: + properties.append(property) + + return properties + + +@log_call +def metadef_property_get_by_id(context, namespace_name, property_id): + """Get a metadef property""" + namespace = metadef_namespace_get(context, namespace_name) + + _check_namespace_visibility(context, namespace, namespace_name) + + for property in DATA['metadef_properties']: + if (property['namespace_id'] == namespace['id'] and + property['id'] == property_id): + return property + else: + msg = (_("Metadata definition property not found for id=%s") + % property_id) + LOG.warn(msg) + raise exception.MetadefPropertyNotFound(msg) + + +@log_call +def metadef_property_get(context, namespace_name, property_name): + """Get a metadef property""" + namespace = metadef_namespace_get(context, namespace_name) + + _check_namespace_visibility(context, namespace, namespace_name) + + for property in DATA['metadef_properties']: + if (property['namespace_id'] == namespace['id'] and + property['name'] == property_name): + return property + else: + msg = ("No property found with name=%(name)s in" + " namespace=%(namespace_name)s " + % {'name': property_name, 'namespace_name': namespace_name}) + LOG.debug(msg) + raise exception.MetadefPropertyNotFound(namespace_name=namespace_name, + property_name=property_name) + + +@log_call +def metadef_property_delete(context, namespace_name, property_name): + """Delete a metadef property""" + global DATA + + property = metadef_property_get(context, namespace_name, property_name) + DATA['metadef_properties'].remove(property) + + return property + + +def metadef_property_delete_namespace_content(context, namespace_name, + session=None): + """Delete a property or raise if it or namespace doesn't exist.""" + return _metadef_delete_namespace_content( + metadef_property_get_all, 'metadef_properties', context, + namespace_name) + + +@log_call +def metadef_resource_type_create(context, values): + """Create a metadef resource type""" + global DATA + + resource_type_values = copy.deepcopy(values) + resource_type_name = resource_type_values['name'] + + allowed_attrubites = ['name', 'protected'] + + for resource_type in DATA['metadef_resource_types']: + if resource_type['name'] == resource_type_name: + raise exception.Duplicate() + + incorrect_keys = set(resource_type_values.keys()) - set(allowed_attrubites) + if incorrect_keys: + raise exception.Invalid( + 'The keys %s are not valid' % str(incorrect_keys)) + + resource_type = _format_resource_type(resource_type_values) + DATA['metadef_resource_types'].append(resource_type) + + return resource_type + + +@log_call +def metadef_resource_type_get_all(context): + """List all resource types""" + return DATA['metadef_resource_types'] + + +@log_call +def metadef_resource_type_get(context, resource_type_name): + """Get a resource type""" + try: + resource_type = next(resource_type for resource_type in + DATA['metadef_resource_types'] + if resource_type['name'] == + resource_type_name) + except StopIteration: + msg = "No resource type found with name %s" % resource_type_name + LOG.debug(msg) + raise exception.MetadefResourceTypeNotFound( + resource_type_name=resource_type_name) + + return resource_type + + +@log_call +def metadef_resource_type_association_create(context, namespace_name, + values): + global DATA + + association_values = copy.deepcopy(values) + + namespace = metadef_namespace_get(context, namespace_name) + resource_type_name = association_values['name'] + resource_type = metadef_resource_type_get(context, + resource_type_name) + + required_attributes = ['name', 'properties_target', 'prefix'] + allowed_attributes = copy.deepcopy(required_attributes) + + for association in DATA['metadef_namespace_resource_types']: + if (association['namespace_id'] == namespace['id'] and + association['resource_type'] == resource_type['id']): + msg = ("The metadata definition resource-type association of" + " resource_type=%(resource_type_name)s to" + " namespace=%(namespace_name)s, already exists." + % {'resource_type_name': resource_type_name, + 'namespace_name': namespace_name}) + LOG.debug(msg) + raise exception.MetadefDuplicateResourceTypeAssociation( + resource_type_name=resource_type_name, + namespace_name=namespace_name) + + for key in required_attributes: + if key not in association_values: + raise exception.Invalid('%s is a required attribute' % key) + + incorrect_keys = set(association_values.keys()) - set(allowed_attributes) + if incorrect_keys: + raise exception.Invalid( + 'The keys %s are not valid' % str(incorrect_keys)) + + association = _format_association(namespace, resource_type, + association_values) + DATA['metadef_namespace_resource_types'].append(association) + + return association + + +@log_call +def metadef_resource_type_association_get(context, namespace_name, + resource_type_name): + namespace = metadef_namespace_get(context, namespace_name) + resource_type = metadef_resource_type_get(context, resource_type_name) + + for association in DATA['metadef_namespace_resource_types']: + if (association['namespace_id'] == namespace['id'] and + association['resource_type'] == resource_type['id']): + return association + else: + msg = ("No resource type association found associated with namespace " + "%s and resource type %s" % namespace_name, resource_type_name) + LOG.debug(msg) + raise exception.MetadefResourceTypeAssociationNotFound( + resource_type_name=resource_type_name, + namespace_name=namespace_name) + + +@log_call +def metadef_resource_type_association_get_all_by_namespace(context, + namespace_name): + namespace = metadef_namespace_get(context, namespace_name) + + namespace_resource_types = [] + for resource_type in DATA['metadef_namespace_resource_types']: + if resource_type['namespace_id'] == namespace['id']: + namespace_resource_types.append(resource_type) + + return namespace_resource_types + + +@log_call +def metadef_resource_type_association_delete(context, namespace_name, + resource_type_name): + global DATA + + resource_type = metadef_resource_type_association_get(context, + namespace_name, + resource_type_name) + DATA['metadef_namespace_resource_types'].remove(resource_type) + + return resource_type + + +@log_call +def metadef_tag_get(context, namespace_name, name): + """Get a metadef tag""" + namespace = metadef_namespace_get(context, namespace_name) + _check_namespace_visibility(context, namespace, namespace_name) + + for tag in DATA['metadef_tags']: + if tag['namespace_id'] == namespace['id'] and tag['name'] == name: + return tag + else: + msg = ("The metadata definition tag with name=%(name)s" + " was not found in namespace=%(namespace_name)s." + % {'name': name, 'namespace_name': namespace_name}) + LOG.debug(msg) + raise exception.MetadefTagNotFound(name=name, + namespace_name=namespace_name) + + +@log_call +def metadef_tag_get_by_id(context, namespace_name, id): + """Get a metadef tag""" + namespace = metadef_namespace_get(context, namespace_name) + _check_namespace_visibility(context, namespace, namespace_name) + + for tag in DATA['metadef_tags']: + if tag['namespace_id'] == namespace['id'] and tag['id'] == id: + return tag + else: + msg = (_("Metadata definition tag not found for id=%s") % id) + LOG.warn(msg) + raise exception.MetadefTagNotFound(msg) + + +@log_call +def metadef_tag_get_all(context, namespace_name, filters=None, marker=None, + limit=None, sort_key='created_at', sort_dir=None, + session=None): + """Get a metadef tags list""" + + namespace = metadef_namespace_get(context, namespace_name) + _check_namespace_visibility(context, namespace, namespace_name) + + tags = [] + for tag in DATA['metadef_tags']: + if tag['namespace_id'] == namespace['id']: + tags.append(tag) + + return tags + + +@log_call +def metadef_tag_create(context, namespace_name, values): + """Create a metadef tag""" + global DATA + + tag_values = copy.deepcopy(values) + tag_name = tag_values['name'] + required_attributes = ['name'] + allowed_attributes = ['name'] + + namespace = metadef_namespace_get(context, namespace_name) + + for tag in DATA['metadef_tags']: + if tag['name'] == tag_name and tag['namespace_id'] == namespace['id']: + msg = ("A metadata definition tag with name=%(name)s" + " in namespace=%(namespace_name)s already exists." + % {'name': tag_name, 'namespace_name': namespace_name}) + LOG.debug(msg) + raise exception.MetadefDuplicateTag( + name=tag_name, namespace_name=namespace_name) + + for key in required_attributes: + if key not in tag_values: + raise exception.Invalid('%s is a required attribute' % key) + + incorrect_keys = set(tag_values.keys()) - set(allowed_attributes) + if incorrect_keys: + raise exception.Invalid( + 'The keys %s are not valid' % str(incorrect_keys)) + + tag_values['namespace_id'] = namespace['id'] + + _check_namespace_visibility(context, namespace, namespace_name) + + tag = _format_tag(tag_values) + DATA['metadef_tags'].append(tag) + return tag + + +@log_call +def metadef_tag_create_tags(context, namespace_name, tag_list): + """Create a metadef tag""" + global DATA + + namespace = metadef_namespace_get(context, namespace_name) + _check_namespace_visibility(context, namespace, namespace_name) + + required_attributes = ['name'] + allowed_attributes = ['name'] + data_tag_list = [] + tag_name_list = [] + for tag_value in tag_list: + tag_values = copy.deepcopy(tag_value) + tag_name = tag_values['name'] + + for key in required_attributes: + if key not in tag_values: + raise exception.Invalid('%s is a required attribute' % key) + + incorrect_keys = set(tag_values.keys()) - set(allowed_attributes) + if incorrect_keys: + raise exception.Invalid( + 'The keys %s are not valid' % str(incorrect_keys)) + + if tag_name in tag_name_list: + msg = ("A metadata definition tag with name=%(name)s" + " in namespace=%(namespace_name)s already exists." + % {'name': tag_name, 'namespace_name': namespace_name}) + LOG.debug(msg) + raise exception.MetadefDuplicateTag( + name=tag_name, namespace_name=namespace_name) + else: + tag_name_list.append(tag_name) + + tag_values['namespace_id'] = namespace['id'] + data_tag_list.append(_format_tag(tag_values)) + + DATA['metadef_tags'] = [] + for tag in data_tag_list: + DATA['metadef_tags'].append(tag) + + return data_tag_list + + +@log_call +def metadef_tag_update(context, namespace_name, id, values): + """Update a metadef tag""" + global DATA + + namespace = metadef_namespace_get(context, namespace_name) + + _check_namespace_visibility(context, namespace, namespace_name) + + tag = metadef_tag_get_by_id(context, namespace_name, id) + if tag['name'] != values['name']: + for db_tag in DATA['metadef_tags']: + if (db_tag['name'] == values['name'] and + db_tag['namespace_id'] == namespace['id']): + msg = ("Invalid update. It would result in a duplicate" + " metadata definition tag with same name=%(name)s " + " in namespace=%(namespace_name)s." + % {'name': tag['name'], + 'namespace_name': namespace_name}) + LOG.debug(msg) + raise exception.MetadefDuplicateTag( + name=tag['name'], namespace_name=namespace_name) + + DATA['metadef_tags'].remove(tag) + + tag.update(values) + tag['updated_at'] = timeutils.utcnow() + DATA['metadef_tags'].append(tag) + return tag + + +@log_call +def metadef_tag_delete(context, namespace_name, name): + """Delete a metadef tag""" + global DATA + + tags = metadef_tag_get(context, namespace_name, name) + DATA['metadef_tags'].remove(tags) + + return tags + + +def metadef_tag_delete_namespace_content(context, namespace_name, + session=None): + """Delete an tag or raise if namespace or tag doesn't exist.""" + return _metadef_delete_namespace_content( + metadef_tag_get_all, 'metadef_tags', context, namespace_name) + + +@log_call +def metadef_tag_count(context, namespace_name): + """Get metadef tag count in a namespace""" + namespace = metadef_namespace_get(context, namespace_name) + + _check_namespace_visibility(context, namespace, namespace_name) + + count = 0 + for tag in DATA['metadef_tags']: + if tag['namespace_id'] == namespace['id']: + count = count + 1 + + return count + + +def _artifact_format(artifact_id, **values): + dt = timeutils.utcnow() + artifact = { + 'id': artifact_id, + 'type_name': None, + 'type_version_prefix': None, + 'type_version_suffix': None, + 'type_version_meta': None, + 'version_prefix': None, + 'version_suffix': None, + 'version_meta': None, + 'description': None, + 'visibility': None, + 'state': None, + 'owner': None, + 'scope': None, + 'tags': [], + 'properties': {}, + 'blobs': [], + 'created_at': dt, + 'updated_at': dt, + 'deleted_at': None, + 'deleted': False, + } + + artifact.update(values) + return artifact + + +@log_call +def artifact_create(context, values, type_name, type_version): + global DATA + artifact_id = values.get('id', str(uuid.uuid4())) + + if artifact_id in DATA['artifacts']: + raise exception.Duplicate() + + if 'state' not in values: + raise exception.Invalid('state is a required attribute') + + allowed_keys = set(['id', + 'type_name', + 'type_version', + 'name', + 'version', + 'description', + 'visibility', + 'state', + 'owner', + 'scope']) + + incorrect_keys = set(values.keys()) - allowed_keys + if incorrect_keys: + raise exception.Invalid( + 'The keys %s are not valid' % str(incorrect_keys)) + + artifact = _artifact_format(artifact_id, **values) + DATA['artifacts'][artifact_id] = artifact + + return copy.deepcopy(artifact) + + +def _artifact_get(context, artifact_id, type_name, + type_version=None): + try: + artifact = DATA['artifacts'][artifact_id] + if artifact['type_name'] != type_name or\ + (type_version is not None and + artifact['type_version'] != type_version): + raise KeyError + except KeyError: + LOG.info(_LI('Could not find artifact %s') % artifact_id) + raise exception.NotFound() + + if artifact['deleted_at']: + LOG.info(_LI('Unable to get deleted image')) + raise exception.NotFound() + + return artifact + + +@log_call +def artifact_get(context, artifact_id, + type_name, + type_version=None, session=None): + artifact = _artifact_get(context, artifact_id, type_name, + type_version) + return copy.deepcopy(artifact) + + +def _format_association(namespace, resource_type, association_values): + association = { + 'namespace_id': namespace['id'], + 'resource_type': resource_type['id'], + 'properties_target': None, + 'prefix': None, + 'created_at': timeutils.utcnow(), + 'updated_at': timeutils.utcnow() + + } + association.update(association_values) + return association + + +def _format_resource_type(values): + dt = timeutils.utcnow() + resource_type = { + 'id': _get_metadef_id(), + 'name': values['name'], + 'protected': True, + 'created_at': dt, + 'updated_at': dt + } + resource_type.update(values) + return resource_type + + +def _format_property(values): + property = { + 'id': _get_metadef_id(), + 'namespace_id': None, + 'name': None, + 'json_schema': None + } + property.update(values) + return property + + +def _format_namespace(values): + dt = timeutils.utcnow() + namespace = { + 'id': _get_metadef_id(), + 'namespace': None, + 'display_name': None, + 'description': None, + 'visibility': 'private', + 'protected': False, + 'owner': None, + 'created_at': dt, + 'updated_at': dt + } + namespace.update(values) + return namespace + + +def _format_object(values): + dt = timeutils.utcnow() + object = { + 'id': _get_metadef_id(), + 'namespace_id': None, + 'name': None, + 'description': None, + 'json_schema': None, + 'required': None, + 'created_at': dt, + 'updated_at': dt + } + object.update(values) + return object + + +def _format_tag(values): + dt = timeutils.utcnow() + tag = { + 'id': _get_metadef_id(), + 'namespace_id': None, + 'name': None, + 'created_at': dt, + 'updated_at': dt + } + tag.update(values) + return tag + + +def _is_namespace_visible(context, namespace): + """Return true if namespace is visible in this context""" + if context.is_admin: + return True + + if namespace.get('visibility', '') == 'public': + return True + + if namespace['owner'] is None: + return True + + if context.owner is not None: + if context.owner == namespace['owner']: + return True + + return False + + +def _check_namespace_visibility(context, namespace, namespace_name): + if not _is_namespace_visible(context, namespace): + msg = ("Forbidding request, metadata definition namespace=%s" + " is not visible." % namespace_name) + LOG.debug(msg) + emsg = _("Forbidding request, metadata definition namespace=%s" + " is not visible.") % namespace_name + raise exception.MetadefForbidden(emsg) + + +def _get_metadef_id(): + global INDEX + INDEX += 1 + return INDEX diff --git a/code/daisy/daisy/db/sqlalchemy/__init__.py b/code/daisy/daisy/db/sqlalchemy/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/db/sqlalchemy/api.py b/code/daisy/daisy/db/sqlalchemy/api.py new file mode 100755 index 00000000..8bbf01c9 --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/api.py @@ -0,0 +1,6161 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010-2011 OpenStack Foundation +# Copyright 2012 Justin Santa Barbara +# Copyright 2013 IBM Corp. +# Copyright 2015 Mirantis, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +"""Defines interface for DB access.""" + +import threading +import uuid +import re +import copy +from oslo_config import cfg +from oslo_db import exception as db_exception +from oslo_db.sqlalchemy import session +from oslo_log import log as logging +from oslo_utils import timeutils +import osprofiler.sqlalchemy +from retrying import retry +import six +# NOTE(jokke): simplified transition to py3, behaves like py2 xrange +from six.moves import range +import sqlalchemy +import sqlalchemy.orm as sa_orm +import sqlalchemy.sql as sa_sql +import types + +from daisy import artifacts as ga +from daisy.common import exception +from daisy.common import utils +from daisy.db.sqlalchemy import artifacts +from daisy.db.sqlalchemy.metadef_api import namespace as metadef_namespace_api +from daisy.db.sqlalchemy.metadef_api import object as metadef_object_api +from daisy.db.sqlalchemy.metadef_api import property as metadef_property_api +from daisy.db.sqlalchemy.metadef_api\ + import resource_type as metadef_resource_type_api +from daisy.db.sqlalchemy.metadef_api\ + import resource_type_association as metadef_association_api +from daisy.db.sqlalchemy.metadef_api import tag as metadef_tag_api +from daisy.db.sqlalchemy import models +from daisy import i18n + +BASE = models.BASE +sa_logger = None +LOG = logging.getLogger(__name__) +_ = i18n._ +_LI = i18n._LI +_LW = i18n._LW + + +STATUSES = ['active', 'saving', 'queued', 'killed', 'pending_delete', + 'deleted', 'deactivated'] + +CONF = cfg.CONF +CONF.import_group("profiler", "daisy.common.wsgi") + +_FACADE = None +_LOCK = threading.Lock() + + +def _retry_on_deadlock(exc): + """Decorator to retry a DB API call if Deadlock was received.""" + + if isinstance(exc, db_exception.DBDeadlock): + LOG.warn(_LW("Deadlock detected. Retrying...")) + return True + return False + + +def _create_facade_lazily(): + global _LOCK, _FACADE + if _FACADE is None: + with _LOCK: + if _FACADE is None: + _FACADE = session.EngineFacade.from_config(CONF) + + if CONF.profiler.enabled and CONF.profiler.trace_sqlalchemy: + osprofiler.sqlalchemy.add_tracing(sqlalchemy, + _FACADE.get_engine(), + "db") + return _FACADE + + +def get_engine(): + facade = _create_facade_lazily() + return facade.get_engine() + + +def get_session(autocommit=True, expire_on_commit=False): + facade = _create_facade_lazily() + return facade.get_session(autocommit=autocommit, + expire_on_commit=expire_on_commit) + + +def clear_db_env(): + """ + Unset global configuration variables for database. + """ + global _FACADE + _FACADE = None + + +def _check_mutate_authorization(context, image_ref): + if not is_image_mutable(context, image_ref): + LOG.warn(_LW("Attempted to modify image user did not own.")) + msg = _("You do not own this image") + if image_ref.is_public: + exc_class = exception.ForbiddenPublicImage + else: + exc_class = exception.Forbidden + + raise exc_class(msg) + + +def image_create(context, values): + """Create an image from the values dictionary.""" + return _image_update(context, values, None, purge_props=False) + + +def image_update(context, image_id, values, purge_props=False, + from_state=None): + """ + Set the given properties on an image and update it. + + :raises NotFound if image does not exist. + """ + return _image_update(context, values, image_id, purge_props, + from_state=from_state) + + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def image_destroy(context, image_id): + """Destroy the image or raise if it does not exist.""" + session = get_session() + with session.begin(): + image_ref = _image_get(context, image_id, session=session) + + # Perform authorization check + _check_mutate_authorization(context, image_ref) + + image_ref.delete(session=session) + delete_time = image_ref.deleted_at + + _image_locations_delete_all(context, image_id, delete_time, session) + + _image_property_delete_all(context, image_id, delete_time, session) + + _image_member_delete_all(context, image_id, delete_time, session) + + _image_tag_delete_all(context, image_id, delete_time, session) + + return _normalize_locations(context, image_ref) + +def _normalize_locations(context, image, force_show_deleted=False): + """ + Generate suitable dictionary list for locations field of image. + + We don't need to set other data fields of location record which return + from image query. + """ + + if image['status'] == 'deactivated' and not context.is_admin: + # Locations are not returned for a deactivated image for non-admin user + image['locations'] = [] + return image + + if force_show_deleted: + locations = image['locations'] + else: + locations = filter(lambda x: not x.deleted, image['locations']) + image['locations'] = [{'id': loc['id'], + 'url': loc['value'], + 'metadata': loc['meta_data'], + 'status': loc['status']} + for loc in locations] + return image + + +def _normalize_tags(image): + undeleted_tags = filter(lambda x: not x.deleted, image['tags']) + image['tags'] = [tag['value'] for tag in undeleted_tags] + return image + + +def image_get(context, image_id, session=None, force_show_deleted=False): + image = _image_get(context, image_id, session=session, + force_show_deleted=force_show_deleted) + image = _normalize_locations(context, image.to_dict(), + force_show_deleted=force_show_deleted) + return image + + +def _check_image_id(image_id): + """ + check if the given image id is valid before executing operations. For + now, we only check its length. The original purpose of this method is + wrapping the different behaviors between MySql and DB2 when the image id + length is longer than the defined length in database model. + :param image_id: The id of the image we want to check + :return: Raise NoFound exception if given image id is invalid + """ + if (image_id and + len(image_id) > models.Image.id.property.columns[0].type.length): + raise exception.NotFound() + + +def _image_get(context, image_id, session=None, force_show_deleted=False): + """Get an image or raise if it does not exist.""" + _check_image_id(image_id) + session = session or get_session() + + try: + query = session.query(models.Image).options( + sa_orm.joinedload(models.Image.properties)).options( + sa_orm.joinedload( + models.Image.locations)).filter_by(id=image_id) + + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + image = query.one() + + except sa_orm.exc.NoResultFound: + msg = "No image found with ID %s" % image_id + LOG.debug(msg) + raise exception.NotFound(msg) + + # Make sure they can look at it + if not is_image_visible(context, image): + msg = "Forbidding request, image %s not visible" % image_id + LOG.debug(msg) + raise exception.Forbidden(msg) + + return image + +def _check_host_id(host_id): + """ + check if the given host id is valid before executing operations. For + now, we only check its length. The original purpose of this method is + wrapping the different behaviors between MySql and DB2 when the host id + length is longer than the defined length in database model. + :param image_id: The id of the host we want to check + :return: Raise NoFound exception if given host id is invalid + """ + if (host_id and + len(host_id) > models.Host.id.property.columns[0].type.length): + raise exception.NotFound() + +def _ip_checker_re(ip_str): + pattern = r"\b(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b" + if re.match(pattern, ip_str): + return True + else: + return False + +def ip_into_int(ip): + """ + Switch ip string to decimalism integer.. + :param ip: ip string + :return: decimalism integer + """ + return reduce(lambda x,y:(x<<8)+y, map(int, ip.split('.'))) + +def inter_into_ip(num): + inter_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)]) + return inter_ip(num) + +def is_in_cidr_range(ip, network): + """ + Check ip is in range + :param ip: Ip will be checked, like:192.168.1.2. + :param network: Ip range,like:192.168.0.0/24. + :return: If ip in range,return True,else return False. + """ + network = network.split('/') + mask = ~(2**(32 - int(network[1])) - 1) + return (ip_into_int(ip) & mask) == (ip_into_int(network[0]) & mask) + +def cidr_convert_ip_ranges(cidr): + str_ip_mask = cidr.split('/')[1] + ip_addr = cidr.split('/')[0] + ip_inst=ip_into_int(ip_addr) + mask = ~(2**(32 - int(str_ip_mask)) - 1) + ip_addr_min = inter_into_ip(ip_inst & (mask & 0xffffffff)) + ip_addr_max = inter_into_ip(ip_inst | (~mask & 0xffffffff)) + if ip_addr_min.split('.')[3]=='0': + ip_addr_min=ip_addr_min.split('.')[0]+'.'+ip_addr_min.split('.')[1]+'.'+ip_addr_min.split('.')[2]+'.1' + return [ip_addr_min,ip_addr_max] + +def get_ip_with_equal_cidr(cluster_id,network_plane_name,session): + equal_cidr_network_plane_id_list=[] + available_ip_list=[] + + sql_network_plane_cidr="select networks.cidr from networks where networks.name='"+network_plane_name+"' and networks.cluster_id='"+cluster_id+"' and networks.deleted=0" + query_network_plane_cidr = session.execute(sql_network_plane_cidr).fetchone() + network_cidr=query_network_plane_cidr.values().pop() + if not network_cidr: + msg = "Error:The CIDR is blank of %s!"%network_plane_name + LOG.error(msg) + raise exception.Forbidden(msg) + str_network_cidr=','.join(cidr_convert_ip_ranges(network_cidr)) + + sql_all_network_plane_info="select networks.id,networks.cidr from networks where networks.cluster_id='"+cluster_id+"' and networks.deleted=0" + query_all_network_plane_info = session.execute(sql_all_network_plane_info).fetchall() + for network_plane_tmp in query_all_network_plane_info: + query_network_plane_tmp_info=network_plane_tmp.values() + cidr = query_network_plane_tmp_info[1] + if not cidr: + continue + ip_ranges_cidr=cidr_convert_ip_ranges(cidr) + str_query_network_plane_cidr=','.join(ip_ranges_cidr) + if str_network_cidr==str_query_network_plane_cidr: + equal_cidr_network_plane_id_list.append(query_network_plane_tmp_info[0]) + + for network_id in equal_cidr_network_plane_id_list: + sql_ip="select assigned_networks.ip from assigned_networks where assigned_networks.deleted=0 and assigned_networks.network_id='"+network_id+"' order by assigned_networks.ip" + query_ip_list = session.execute(sql_ip).fetchall() + for tmp_ip in query_ip_list: + ip_pop=tmp_ip.values().pop() + available_ip_list.append(ip_pop) + return available_ip_list + +def merge_networks_for_unifiers(cluster_id, assigned_networks): + merged_by_cidr_vlan = {} + session = get_session() + for network_plane in assigned_networks: + network_plane_name = network_plane['name'] + network_plane_ip = network_plane.get('ip') + sql_network_plane_info="select networks.vlan_id,networks.cidr from networks where networks.name='"+network_plane_name+"' and networks.cluster_id='"+cluster_id+"' and networks.deleted=0" + query_network_plane_info = session.execute(sql_network_plane_info).fetchone() + vlan_id = query_network_plane_info.values()[0] + if not vlan_id: + vlan_id = '' + cidr = query_network_plane_info.values()[1] + index = (vlan_id, cidr) + if merged_by_cidr_vlan.has_key(index): + merged_by_cidr_vlan[index].append(network_plane) + else: + merged_by_cidr_vlan[index] = [network_plane] + + merged_networks = [] + for networks in merged_by_cidr_vlan.values(): + networks_name = [] + networks_ip = '' + for network in networks: + networks_name.append(network['name']) + if not networks_ip: + networks_ip = network.get('ip') + merged_networks.append({'name':','.join(networks_name),'ip':networks_ip}) + + return merged_networks + +def check_ip_exist_and_in_cidr_range(cluster_id,network_plane_name,network_plane_ip,occupied_network_ips,session): + equal_cidr_network_plane_id=[] + + check_ip_if_valid=_ip_checker_re(network_plane_ip) + if not check_ip_if_valid: + msg = "Error:The %s is not the right ip!"%network_plane_ip + LOG.error(msg) + raise exception.Forbidden(msg) + + sql_network_plane_cidr="select networks.cidr from networks where networks.name='"+network_plane_name+"' and networks.cluster_id='"+cluster_id+"' and networks.deleted=0" + query_network_plane_cidr = session.execute(sql_network_plane_cidr).fetchone() + network_cidr=query_network_plane_cidr.values().pop() + + check_ip_if_in_cidr=is_in_cidr_range(network_plane_ip, network_cidr) + if not check_ip_if_in_cidr: + msg = "Error:The ip %s is not in cidr %s range!"%(network_plane_ip,network_cidr) + raise exception.Forbidden(msg) + + available_ip_list=get_ip_with_equal_cidr(cluster_id,network_plane_name,session) + # allow different networks with same ip in the same interface + if (network_plane_ip in available_ip_list and + network_plane_ip not in occupied_network_ips): + msg = "Error:The IP %s is already exist."%network_plane_ip + LOG.error(msg) + raise exception.Forbidden(msg) + +def check_ip_ranges(ip_ranges_one,available_ip_list): + ip_range = copy.deepcopy(ip_ranges_one.values()) + ip_ranges_end=ip_range.pop() + ip_ranges_start=ip_range.pop() + inter_num=ip_into_int(ip_ranges_start) + ip_ranges_end_inter=ip_into_int(ip_ranges_end) + while True: + inter_tmp=inter_num + ip_tmp=inter_into_ip(inter_tmp) + if ip_tmp not in available_ip_list: + if inter_tmp > ip_ranges_end_inter: + msg = "warning:The IP address assigned by IP ranges is already insufficient." + LOG.warn(msg) + break + else: + return [True,ip_tmp] + else: + inter_num=inter_tmp+1 + + return [False,None] + +def change_host_name(values, mangement_ip): + if mangement_ip: + values['name'] = "host-" + mangement_ip.replace('.','-') + +def according_to_cidr_distribution_ip(cluster_id,network_plane_name,session): + ip_ranges_cidr=[] + distribution_ip="" + + + sql_network_plane_info="select networks.id,networks.cidr,networks.network_type from networks where networks.name='"+network_plane_name+"' and networks.cluster_id='"+cluster_id+"' and networks.deleted=0" + query_network_plane_info = session.execute(sql_network_plane_info).fetchone() + network_id=query_network_plane_info.values()[0] + network_cidr=query_network_plane_info.values()[1] + network_type=query_network_plane_info.values()[2] + + if network_type not in ['PRIVATE','EXTERNAL','VXLAN']: + available_ip_list=get_ip_with_equal_cidr(cluster_id,network_plane_name,session) + sql_ip_ranges="select ip_ranges.start,end from ip_ranges where network_id='"+network_id +"'" + query_ip_ranges = session.execute(sql_ip_ranges).fetchall() + if query_ip_ranges: + for ip_ranges_one in query_ip_ranges: + check_ip_exist_list=check_ip_ranges(ip_ranges_one,available_ip_list) + if check_ip_exist_list[0]: + distribution_ip=check_ip_exist_list[1] + break + else: + continue + else: + ip_ranges_cidr=cidr_convert_ip_ranges(network_cidr) + ip_min_inter=ip_into_int(ip_ranges_cidr[0]) + ip_max_inter=ip_into_int(ip_ranges_cidr[1]) + while True: + distribution_ip=inter_into_ip(ip_min_inter+1) + if distribution_ip not in available_ip_list: + distribution_ip_inter=ip_into_int(distribution_ip) + if distribution_ip_inter < ip_max_inter: + break + else: + msg = "Error:The IP address assigned by CIDR is already insufficient." + LOG.error(msg) + break + else: + ip_min_inter=ip_min_inter+1 + return distribution_ip + +def add_assigned_networks_data(context,network,cluster_id,host_interface_ref,network_plane_names,network_plane_ip,session): + for network_plane_name in network_plane_names: + sql_network_plane_id="select networks.id,networks.network_type from networks where networks.name='"+network_plane_name+"' and networks.cluster_id='"+cluster_id+"' and networks.deleted=0" + query_network_plane_id = session.execute(sql_network_plane_id).fetchone() + network_id=query_network_plane_id.values()[0] + network_type=query_network_plane_id.values()[1] + + assigned_network = dict() + assigned_network['ip']=network_plane_ip + assigned_networks_ref = models.AssignedNetworks() + assigned_network['network_id'] = network_id + if host_interface_ref.type == 'bond': + assigned_network['mac'] = '' + else: + assigned_network['mac'] = network['mac'] + assigned_network['interface_id'] = host_interface_ref.id + if network_type == 'VXLAN' or network_type == 'PRIVATE': + assigned_network['vswitch_type'] = network.get('vswitch_type', 'ovs') + assigned_networks_ref.update(assigned_network) + _update_values(assigned_networks_ref, assigned_network) + assigned_networks_ref.save(session=session) + +def _according_interface_to_add_network_alias(context,interface_assigned_networks,values): + network_cidrs = [] + session = get_session() + network_query = session.query(models.Network).filter_by(deleted=False).filter_by(cluster_id=values['cluster']).all() + for network_info in network_query: + for network_name in interface_assigned_networks: + if network_name==network_info['name']: + network_cidrs.append(network_info['cidr']) + if len(set(network_cidrs)) == 1 and len(network_cidrs) > 1: + for sub_network_query in network_query: + if sub_network_query.name in interface_assigned_networks: + alias_name = '_'.join(interface_assigned_networks) + query_network = session.query(models.Network).filter_by(deleted=False).filter_by(id=sub_network_query.id) + query_network.update({"alias": alias_name}) + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +@utils.no_4byte_params +def _host_update(context, values, host_id): + """ + Used internally by host_add and host_update + + :param context: Request context + :param values: A dict of attributes to set + :param host_id: If None, create the host, otherwise, find and update it + """ + # NOTE(jbresnah) values is altered in this so a copy is needed + values = values.copy() + role_values = dict() + host_interfaces_values = dict() + host_cluster_values = dict() + assigned_networks_ip=[] + management_ip="" + + session = get_session() + with session.begin(): + if host_id: + host_ref = _host_get(context, host_id, session=session) + else: + host_ref = models.Host() + cluster_host_ref = models.ClusterHost() + if host_id: + # Don't drop created_at if we're passing it in... + _drop_protected_attrs(models.Host, values) + # NOTE(iccha-sethi): updated_at must be explicitly set in case + # only ImageProperty table was modifited + values['updated_at'] = timeutils.utcnow() + host_cluster_values['updated_at'] = timeutils.utcnow() + + if host_id: + if values.has_key("os_version") and utils.is_uuid_like(values['os_version']): + host_ref.os_version_id = values['os_version'] + elif(values.has_key("os_version") and not utils.is_uuid_like(values['os_version'])): + host_ref.os_version_file = values['os_version'] + if values.has_key('cluster'): + delete_host_cluster(context, host_id, session) + host_cluster_values['host_id'] = host_id + host_cluster_values['cluster_id'] = values['cluster'] + if host_ref.status == 'init': + values['status'] = "in-cluster" + cluster_host_ref.update(host_cluster_values) + _update_values(cluster_host_ref, host_cluster_values) + cluster_host_ref.save(session=session) + if values.has_key('role'): + if values['role']: + delete_host_role(context, host_id, session) + for role_info in values['role']: + host_role_ref = models.HostRole() + role_values['host_id'] = host_ref.id + role_values['role_id'] = role_info + host_role_ref.update(role_values) + _update_values(host_role_ref, role_values) + host_role_ref.save(session=session) + values['status'] = "with-role" + else: + delete_host_role(context, host_id, session) + if (values.has_key('cluster') or + host_ref.status == 'with-role' or + host_ref.status == 'in-cluster'): + values['status'] = "in-cluster" + else: + values['status'] = "init" + if values.has_key('interfaces'): + host_interfaces=get_host_interface(context, host_id, None, session) + if host_interfaces: + for host_interface_info in host_interfaces: + delete_assigned_networks(context, host_interface_info.id, session) + delete_host_interface(context, host_id, session) + orig_keys = list(eval(values['interfaces'])) + for host_interface_info in orig_keys: + if host_interface_info.has_key('assigned_networks'): + _according_interface_to_add_network_alias(context, host_interface_info['assigned_networks'],values) + for network in orig_keys: + host_interfaces_values = network.copy() + if network.has_key('slaves'): + if len(network['slaves']) == 1: + host_interfaces_values['slave1'] = network['slaves'][0] + elif len(network['slaves']) == 2: + host_interfaces_values['slave1'] = network['slaves'][0] + host_interfaces_values['slave2'] = network['slaves'][1] + del host_interfaces_values['slaves'] + + if host_interfaces_values.has_key('assigned_networks'): + del host_interfaces_values['assigned_networks'] + if host_interfaces_values.has_key('is_deployment'): + if host_interfaces_values['is_deployment']=="True" or host_interfaces_values['is_deployment'] == True or host_interfaces_values['is_deployment'] == "true": + host_interfaces_values['is_deployment']=1 + else: + host_interfaces_values['is_deployment']=0 + if host_interfaces_values.has_key('id'): del host_interfaces_values['id'] + host_interface_ref = models.HostInterface() + host_interface_ref.update(host_interfaces_values) + host_interface_ref.host_id = host_id + _update_values(host_interface_ref, host_interfaces_values) + host_interface_ref.save(session=session) + + if values.has_key('cluster'): + if network.has_key('assigned_networks'): + occupied_network_ips = [] + merged_assigned_networks = merge_networks_for_unifiers(values['cluster'], + network['assigned_networks']) + for networks_plane in merged_assigned_networks: + network_plane_names = networks_plane['name'].split(',') + network_plane_ip = networks_plane.get('ip') + if network_plane_ip: + occupied_network_ips.append(network_plane_ip) + check_ip_exist_and_in_cidr_range(values['cluster'], + network_plane_names[0], + network_plane_ip, + occupied_network_ips, + session) + else: + network_plane_ip = according_to_cidr_distribution_ip(values['cluster'], network_plane_names[0], + session) + + if 'MANAGEMENT' in network_plane_names: + change_host_name(values, network_plane_ip) + add_assigned_networks_data(context,network,values['cluster'],host_interface_ref,network_plane_names,network_plane_ip,session) + + query = session.query(models.Host).filter_by(id=host_id) + keys = values.keys() + for k in keys: + if k not in host_ref.to_dict(): + del values[k] + updated = query.update(values, synchronize_session='fetch') + + if not updated: + msg = (_('cannot transition from %(current)s to ' + '%(next)s in update (wanted ' + 'from_state=%(from)s)') % + {'current': current, 'next': new_status, + 'from': from_state}) + raise exception.Conflict(msg) + + host_ref = _host_get(context, host_id, session=session) + else: + if values.has_key('cluster'): + values['status'] = "in-cluster" + if values.has_key('role'): + values['status'] = "with-role" + host_ref.update(values) + _update_values(host_ref, values) + try: + host_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("Node ID %s already exists!" + % values['id']) + + if values.has_key('cluster'): + host_cluster_values['host_id'] = host_ref.id + host_cluster_values['cluster_id'] = values['cluster'] + cluster_host_ref.update(host_cluster_values) + _update_values(cluster_host_ref, host_cluster_values) + cluster_host_ref.save(session=session) + + if values.has_key('role'): + for role_info in values['role']: + host_role_ref = models.HostRole() + role_values['host_id'] = host_ref.id + role_values['role_id'] = role_info + host_role_ref.update(role_values) + _update_values(host_role_ref, role_values) + host_role_ref.save(session=session) + + if values.has_key("os_version") and utils.is_uuid_like(values['os_version']): + host_ref.os_version_id = values['os_version'] + elif(values.has_key("os_version") and not utils.is_uuid_like(values['os_version'])): + host_ref.os_version_file = values['os_version'] + + if values.has_key('interfaces'): + orig_keys = list(eval(values['interfaces'])) + for network in orig_keys: + host_interface_ref = models.HostInterface() + host_interfaces_values = network.copy() + if network.has_key('slaves'): + if len(network['slaves']) == 1: + host_interfaces_values['slave1'] = network['slaves'][0] + elif len(network['slaves']) == 2: + host_interfaces_values['slave1'] = network['slaves'][0] + host_interfaces_values['slave2'] = network['slaves'][1] + + if host_interfaces_values.has_key('is_deployment'): + if host_interfaces_values['is_deployment']=="True" or host_interfaces_values['is_deployment'] == True or host_interfaces_values['is_deployment'] == "true": + host_interfaces_values['is_deployment']=1 + else: + host_interfaces_values['is_deployment']=0 + host_interfaces_values['host_id'] = host_ref.id + host_interface_ref.update(host_interfaces_values) + _update_values(host_interface_ref, host_interfaces_values) + host_interface_ref.save(session=session) + + if values.has_key('cluster'): + if network.has_key('assigned_networks'): + occupied_network_ips = [] + merged_assigned_networks = merge_networks_for_unifiers(values['cluster'], + network['assigned_networks']) + for networks_plane in merged_assigned_networks: + network_plane_names = networks_plane['name'].split(',') + network_plane_ip = networks_plane.get('ip') + if network_plane_ip: + occupied_network_ips.append(network_plane_ip) + check_ip_exist_and_in_cidr_range(values['cluster'], + network_plane_names[0], + network_plane_ip, + occupied_network_ips, + session) + else: + network_plane_ip = according_to_cidr_distribution_ip(values['cluster'], network_plane_names[0], + session) + if 'MANAGEMENT' in network_plane_names: + change_host_name(values, network_plane_ip) + add_assigned_networks_data(context,network,values['cluster'],host_interface_ref,network_plane_names,network_plane_ip,session) + + query = session.query(models.Host).filter_by(id=host_ref.id) + if values.has_key('cluster'): + del values['cluster'] + if values.has_key('interfaces'): + del values['interfaces'] + if values.has_key('role'): + del values['role'] + if values.has_key('os_version'): + del values['os_version'] + updated = query.update(values, synchronize_session='fetch') + return host_get(context, host_ref.id) + +def _host_get(context, host_id, session=None, force_show_deleted=False): + """Get an host or raise if it does not exist.""" + _check_host_id(host_id) + session = session or get_session() + + try: + query = session.query(models.Host).filter_by(id=host_id) + + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + host = query.one() + + except sa_orm.exc.NoResultFound: + msg = "No host found with ID %s" % host_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return host + +def host_get(context, host_id, session=None, force_show_deleted=False): + host = _host_get(context, host_id, session=session, + force_show_deleted=force_show_deleted) + return host + +def get_host_interface(context, host_id, mac=None, session=None, force_show_deleted=False): + session = session or get_session() + try: + query = session.query(models.HostInterface).filter_by(host_id=host_id) + if mac: + query = query.filter_by(mac=mac) + + # filter out deleted images if context disallows it + if not force_show_deleted: + query = query.filter_by(deleted=False) + + host_interface = query.all() + + for interface in host_interface: + assigned_networks_list = [] + openvswitch_type = '' + assignnetwork_query = session.query(models.AssignedNetworks).filter_by(interface_id=interface.id).filter_by(deleted=False) + assignnetwork_list = assignnetwork_query.all() + for assignnetwork in assignnetwork_list: + query_network = session.query(models.Network).filter_by(id=assignnetwork.network_id).filter_by(deleted=False).first() + if query_network: + assigned_networks_info = {'name':query_network.name, + 'ip':assignnetwork.ip} + + assigned_networks_list.append(assigned_networks_info) + if query_network.network_type in ['VXLAN','PRIVATE']: + openvswitch_type = assignnetwork.vswitch_type + interface.assigned_networks = assigned_networks_list + interface.vswitch_type = openvswitch_type + + except sa_orm.exc.NoResultFound: + msg = "No host found with ID %s" % host_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return host_interface + +def get_host_interface_mac(context, mac, session=None, force_show_deleted=False): + session = session or get_session() + try: + query = session.query(models.HostInterface).filter_by(mac=mac).filter_by(deleted=False) + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + host_interface = query.all() + for interface in host_interface: + list = [] + assignnetwork_query = session.query(models.AssignedNetworks).filter_by(interface_id=interface.id).filter_by(deleted=False) + assignnetwork_list = assignnetwork_query.all() + for assignnetwork in assignnetwork_list: + query_network_name = session.query(models.Network).filter_by(id=assignnetwork.network_id).filter_by(deleted=False).one() + list.append(query_network_name.name) + interface.assigned_networks = list + + except sa_orm.exc.NoResultFound: + msg = "No mac found with %s" % mac + LOG.debug(msg) + raise exception.NotFound(msg) + + return host_interface + +def get_assigned_network(context, interface_id, network_id, session=None, force_show_deleted=False): + session = session or get_session() + try: + query = session.query(models.AssignedNetworks).filter_by(interface_id=interface_id).filter_by(network_id=network_id).filter_by(deleted=False) + host_assigned_network = query.one() + except sa_orm.exc.NoResultFound: + msg = "No assigned_network found with interface %s and network %s" % (interface_id, network_id) + LOG.debug(msg) + raise exception.NotFound(msg) + + return host_assigned_network + +def delete_host_role(context, host_id, session=None): + session = session or get_session() + try: + query = session.query(models.HostRole).filter_by(host_id=host_id).filter_by(deleted=False) + host_roles = query.all() + for host_role in host_roles: + host_role.delete(session=session) + except sa_orm.exc.NoResultFound: + msg = "No host found with ID %s" % host_id + LOG.debug(msg) + raise exception.NotFound(msg) + +def delete_host_cluster(context, host_id , session=None): + session = session or get_session() + try: + query = session.query(models.ClusterHost).filter_by(host_id=host_id).filter_by(deleted=False) + host_clusters = query.all() + for host_cluster in host_clusters: + host_cluster.delete(session=session) + except sa_orm.exc.NoResultFound: + msg = "No host found with ID %s" % host_id + LOG.debug(msg) + raise exception.NotFound(msg) + +def delete_host_interface(context, host_id, session=None): + session = session or get_session() + try: + query = session.query(models.HostInterface).filter_by(host_id=host_id).filter_by(deleted=False) + host_interface = query.all() + for interface in host_interface: + interface.delete(session=session) + except sa_orm.exc.NoResultFound: + msg = "No host found with ID %s" % host_id + LOG.debug(msg) + raise exception.NotFound(msg) + +def _get_assigned_networks_by_network_id(context, network_id, session=None, force_show_deleted=False): + session = session or get_session() + try: + query = session.query(models.AssignedNetworks).filter_by(network_id=network_id).filter_by(deleted=False) + assigned_networks = query.all() + except sa_orm.exc.NoResultFound: + msg = "No host found with ID %s" % host_id + LOG.debug(msg) + raise exception.NotFound(msg) + return assigned_networks + +def get_assigned_networks(context, interface_id, session=None, force_show_deleted=False): + session = session or get_session() + try: + query = session.query(models.AssignedNetworks).filter_by(interface_id=interface_id).filter_by(deleted=False) + assigned_networks = query.all() + except sa_orm.exc.NoResultFound: + msg = "No host found with ID %s" % host_id + LOG.debug(msg) + raise exception.NotFound(msg) + return assigned_networks + +def delete_assigned_networks(context, interface_id, session=None, force_show_deleted=False): + session = session or get_session() + try: + query = session.query(models.AssignedNetworks).filter_by(interface_id=interface_id).filter_by(deleted=False) + assigned_networks = query.all() + for assigned_network in assigned_networks: + assigned_network.delete(session=session) + + except sa_orm.exc.NoResultFound: + msg = "No host found with ID %s" % host_id + LOG.debug(msg) + raise exception.NotFound(msg) + +def get_os_version(context, version_id, session=None, force_show_deleted=False): + session = session or get_session() + try: + query = session.query(models.version).filter_by(id=version_id) + + #filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + os_version = query.one() + + except sa_orm.exc.NoResultFound: + msg = "No host found with ID %s" % host_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return os_version + +def host_add(context, values): + """Add an host from the values dictionary.""" + return _host_update(context, values, None) + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def host_destroy(context, host_id): + """Destroy the host or raise if it does not exist.""" + session = get_session() + with session.begin(): + host_ref = _host_get(context, host_id, session=session) + host_interfaces=get_host_interface(context, host_id, None, session) + if host_interfaces: + for host_interface_info in host_interfaces: + delete_assigned_networks(context, host_interface_info.id) + delete_host_interface(context, host_id) + host_ref.delete(session=session) + + return host_ref + +def host_update(context, host_id, values): + """ + Set the given properties on an image and update it. + + :raises NotFound if host does not exist. + """ + return _host_update(context, values, host_id) + +def discover_host_add(context, values): + """Add an discover host from the values dictionary.""" + return _discover_host_update(context, values, None) + +def discover_host_update(context, discover_host_id, values): + """ + Set the given properties on an image and update it. + + :raises NotFound if host does not exist. + """ + return _discover_host_update(context, values, discover_host_id) + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +@utils.no_4byte_params +def _discover_host_update(context, values, discover_host_id): + """ + Used internally by discover_host_add and discover_host_update + + :param context: Request context + :param values: A dict of attributes to set + :param discover_host_id: If None, create the discover host, otherwise, find and update it + """ + # NOTE(jbresnah) values is altered in this so a copy is needed + values = values.copy() + session = get_session() + with session.begin(): + if discover_host_id: + discover_host_ref = _discover_host_get(context, discover_host_id, session=session) + + else: + discover_host_ref = models.DiscoverHost() + + if discover_host_id: + # Don't drop created_at if we're passing it in... + _drop_protected_attrs(models.DiscoverHost, values) + # NOTE(iccha-sethi): updated_at must be explicitly set in case + # only ImageProperty table was modifited + values['updated_at'] = timeutils.utcnow() + + if discover_host_id: + if values.get('id', None): del values['id'] + discover_host_ref.update(values) + _update_values(discover_host_ref, values) + try: + discover_host_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("Node ID %s already exists!" + % values['id']) + else: + discover_host_ref.update(values) + _update_values(discover_host_ref, values) + try: + discover_host_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("Node ID %s already exists!" + % values['id']) + + return discover_host_get(context, discover_host_ref.id) + +def _discover_host_get(context, discover_host_id, session=None, force_show_deleted=False): + """Get an host or raise if it does not exist.""" + + session = session or get_session() + try: + query = session.query(models.DiscoverHost).filter_by(id=discover_host_id) + + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + discover_host = query.one() + return discover_host + except sa_orm.exc.NoResultFound: + msg = "No host found with ID %s" % discover_host_id + LOG.debug(msg) + raise exception.NotFound(msg) + + +def discover_host_get(context, discover_host_id, session=None, force_show_deleted=False): + discover_host = _discover_host_get(context, discover_host_id, session=session, + force_show_deleted=force_show_deleted) + return discover_host + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def discover_host_destroy(context, host_id): + """Destroy the discover host or raise if it does not exist.""" + session = get_session() + with session.begin(): + host_ref = _discover_host_get(context, host_id, session=session) + host_ref.delete(session=session) + return host_ref + +def discover_host_get_all(context, filters=None, marker=None, limit=None, + sort_key=None, sort_dir=None): + + sort_key = ['created_at'] if not sort_key else sort_key + + default_sort_dir = 'desc' + + if not sort_dir: + sort_dir = [default_sort_dir] * len(sort_key) + elif len(sort_dir) == 1: + default_sort_dir = sort_dir[0] + sort_dir *= len(sort_key) + + filters = filters or {} + + showing_deleted = 'changes-since' in filters or filters.get('deleted', + False) + marker_discover_host = None + if marker is not None: + marker_discover_host = _discover_host_get(context, + marker, + force_show_deleted=showing_deleted) + + for key in ['created_at', 'id']: + if key not in sort_key: + sort_key.append(key) + sort_dir.append(default_sort_dir) + + session = get_session() + + query = session.query(models.DiscoverHost).filter_by(deleted=showing_deleted) + + query = _paginate_query(query, models.DiscoverHost, limit, + sort_key, + marker=marker_discover_host, + sort_dir=None, + sort_dirs=sort_dir) + + discover_hosts = [] + for discover_host in query.all(): + discover_host = discover_host.to_dict() + discover_hosts.append(discover_host) + return discover_hosts + +def get_discover_host_detail(context, discover_host_id, session=None, force_show_deleted=False): + ''' + ''' + session = session or get_session() + try: + query = session.query(models.DiscoverHost).filter_by(id=discover_host_id, deleted=False) + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + discover_host = query.one() + except sa_orm.exc.NoResultFound: + msg = "No host found with ID %s" % discover_host_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return discover_host + +def _check_cluster_id(cluster_id): + """ + check if the given project id is valid before executing operations. For + now, we only check its length. The original purpose of this method is + wrapping the different behaviors between MySql and DB2 when the project id + length is longer than the defined length in database model. + :param image_id: The id of the project we want to check + :return: Raise NoFound exception if given project id is invalid + """ + if (cluster_id and + len(cluster_id) > models.Cluster.id.property.columns[0].type.length): + raise exception.NotFound() + +def delete_cluster_host(context, cluster_id, session=None): + session = session or get_session() + try: + query = session.query(models.ClusterHost).filter_by(cluster_id=cluster_id).filter_by(deleted=False) + cluster_host = query.all() + for host in cluster_host: + host.delete(session=session) + except sa_orm.exc.NoResultFound: + msg = "No host found with ID %s" % cluster_id + LOG.debug(msg) + raise exception.NotFound(msg) + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +@utils.no_4byte_params +def _cluster_update(context, values, cluster_id): + """ + Used internally by cluster_add and project_update + + :param context: Request context + :param values: A dict of attributes to set + :param cluster_id: If None, create the project, otherwise, find and update it + """ + + # NOTE(jbresnah) values is altered in this so a copy is needed + if not values: + raise exception.Invalid() + + values = values.copy() + hosts_values = dict() + interfaces_values = dict() + + session = get_session() + with session.begin(): + if cluster_id: + project_ref = _cluster_get(context, cluster_id, session=session) + else: + project_ref = models.Cluster() + host_ref = models.Host() + + # parse the range params + if values.has_key('networking_parameters'): + network_params = eval(values['networking_parameters']) + if network_params: + if network_params.has_key('gre_id_range') and len(network_params['gre_id_range'])>1: + values['gre_id_start'] = network_params['gre_id_range'][0] + values['gre_id_end'] = network_params['gre_id_range'][1] + if network_params.has_key('vlan_range') and len(network_params['vlan_range'])>1: + values['vlan_start'] = network_params['vlan_range'][0] + values['vlan_end'] = network_params['vlan_range'][1] + if network_params.has_key('vni_range') and len(network_params['vni_range'])>1: + values['vni_start'] = network_params['vni_range'][0] + values['vni_end'] = network_params['vni_range'][1] + values['net_l23_provider'] = network_params.get('net_l23_provider', None) + values['base_mac'] = network_params.get('base_mac', None) + values['segmentation_type'] = network_params.get('segmentation_type', None) + values['public_vip'] = network_params.get('public_vip', None) + + # save host info + if values.has_key('nodes'): + for host_id in eval(values['nodes']): + host = host_get(context, host_id, session=None, force_show_deleted=False) + host.status = "in-cluster" + host.save(session=session) + + if cluster_id: + # Don't drop created_at if we're passing it in... + _drop_protected_attrs(models.Cluster, values) + # NOTE(iccha-sethi): updated_at must be explicitly set in case + # only ImageProperty table was modifited + values['updated_at'] = timeutils.utcnow() + + if cluster_id: + + for cluster in session.query(models.Cluster).filter_by(deleted=False).all(): + if cluster['id'] == cluster_id: + continue + if cluster['name'] == values['name']: + msg = "cluster name is repeated!" + LOG.debug(msg) + raise exception.Invalid(msg) + if values.has_key('nodes'): + delete_cluster_host(context, cluster_id, session) + for host_id in eval(values['nodes']): + cluster_host_ref = models.ClusterHost() + hosts_values['cluster_id'] = project_ref.id + hosts_values['host_id'] = host_id + _update_values(cluster_host_ref, hosts_values) + cluster_host_ref.save(session=session) + + if values.has_key('networks'): + for interface_id in eval(values['networks']): + query = session.query(models.Network).filter_by(id=interface_id) + interfaces_values['cluster_id'] = project_ref.id + interfaces_values['updated_at'] = timeutils.utcnow() + updated = query.update(interfaces_values, synchronize_session='fetch') + + # update --------------------------------------------------------------------- + # deal with logic_network + if values.has_key('logic_networks'): + query = session.query(models.Cluster).filter_by(id = cluster_id) + if not query: + raise exception.NotFound("Cluster not found,id=%s" % cluster_id) + # insert data to logic_network tables + + logic_networks = eval(values['logic_networks']) + if logic_networks: + _cluster_add_logic_network( + logic_networks = logic_networks, + cluster_id = project_ref.id, + session = session, + status = "update") + #---start--delete all logic_networks if values['logic_networks'] == []--- + else: + logic_networks_query = session.query(models.LogicNetwork).\ + filter_by(cluster_id = cluster_id, deleted = 0) + if logic_networks_query: + logic_networks_query.update( + {"deleted" : True, "deleted_at" : timeutils.utcnow()} + ) + #------------------------end---------------------------------------------- + + # deal routers + if values.has_key('routers'): + routers = eval(values['routers']) + if routers: + _cluster_add_routers( + routers = routers, + cluster_id = project_ref.id, + session = session, + status = "update" + ) + #----delete all routers if values['routers'] == []--- + else: + router_ref = \ + session.query(models.Router).filter_by( + cluster_id = cluster_id, deleted=False) + if router_ref: + router_ref.update( + {"deleted" : True, "deleted_at" : timeutils.utcnow()} + ) + #------------------------end-------------------------------- + # update --------------------------------------------------------------------- + + query = session.query(models.Cluster).filter_by(id=cluster_id) + + # Validate fields for projects table. This is similar to what is done + # for the query result update except that we need to do it prior + # in this case. + # TODO(dosaboy): replace this with a dict comprehension once py26 + # support is deprecated. + keys = values.keys() + for k in keys: + if k not in project_ref.to_dict(): + del values[k] + updated = query.update(values, synchronize_session='fetch') + + if not updated: + msg = (_('cannot transition from %(current)s to ' + '%(next)s in update (wanted ' + 'from_state=%(from)s)') % + {'current': current, 'next': new_status, + 'from': from_state}) + raise exception.Conflict(msg) + + project_ref = _cluster_get(context, cluster_id, session=session) + else: + for cluster in session.query(models.Cluster).filter_by(deleted=False).all(): + if cluster['name'] == values['name']: + msg = "cluster name is repeated!" + LOG.debug(msg) + raise exception.Forbidden(msg) + project_ref.update(values) + _update_values(project_ref, values) + try: + project_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("cluster ID %s already exists!" + % values['id']) + if values.has_key('nodes'): + for host_id in eval(values['nodes']): + cluster_host_ref = models.ClusterHost() + hosts_values['cluster_id'] = project_ref.id + hosts_values['host_id'] = host_id + _update_values(cluster_host_ref, hosts_values) + cluster_host_ref.save(session=session) + + if values.has_key('networks'): + for interface_id in eval(values['networks']): + query = session.query(models.Network).filter_by(id=interface_id) + interfaces_values['cluster_id'] = project_ref.id + interfaces_values['updated_at'] = timeutils.utcnow() + updated = query.update(interfaces_values, synchronize_session='fetch') + + network_query = session.query(models.Network).filter_by(type="template").filter_by(deleted=False).all() + for sub_network_query in network_query: + network_ref = models.Network() + network_ref.cluster_id = project_ref.id + network_ref.name = sub_network_query.name + network_ref.description = sub_network_query.description + network_ref.cidr = sub_network_query.cidr + network_ref.network_type = sub_network_query.network_type + network_ref.ml2_type = sub_network_query.ml2_type + network_ref.capability = sub_network_query.capability + network_ref.save(session=session) + + # add --------------------------------------------------------------------- + # deal logic_network infos + if values.has_key('logic_networks'): + # insert data to logic_network tables + logic_networks = eval(values['logic_networks']) + if logic_networks: + _cluster_add_logic_network( + logic_networks = logic_networks, + cluster_id = project_ref.id, + session = session, + status = "add") + + # deal routers + if values.has_key('routers'): + routers = eval(values['routers']) + if routers: + _cluster_add_routers( + routers = routers, + cluster_id = project_ref.id, + session = session, + status = "add" + ) + # add --------------------------------------------------------------------- + + role_query = session.query(models.Role).filter_by(type="template",cluster_id=None).filter_by(deleted=False).all() + for sub_role_query in role_query: + role_ref = models.Role() + role_ref.cluster_id = project_ref.id + role_ref.name = sub_role_query.name + role_ref.description = sub_role_query.description + role_ref.status = sub_role_query.status + role_ref.type = "default" + role_ref.deployment_backend = sub_role_query.deployment_backend + configset_ref = models.ConfigSet() + configset_ref.name = project_ref.name + role_ref.name + configset_ref.description = project_ref.name + role_ref.name + configset_ref.save(session=session) + role_ref.config_set_id = configset_ref.id + role_ref.save(session=session) + service_role_query = session.query(models.ServiceRole).filter_by(role_id=sub_role_query.id).filter_by(deleted=False).all() + for sub_service_role_query in service_role_query: + service_role_ref = models.ServiceRole() + service_role_ref.role_id = role_ref.id + service_role_ref.service_id = sub_service_role_query.service_id + service_role_ref.save(session=session) + + return _cluster_get(context, project_ref.id) + +def _cluster_add_routers(**params): + session = params['session'] or get_session() + if 0 == cmp(params['status'], "update"): + router_ref = \ + session.query(models.Router).filter_by(cluster_id = params['cluster_id']) + if router_ref.all(): + router_ref.update( + {"deleted" : True, "deleted_at" : timeutils.utcnow()} + ) + + logic_networks_query_all = [] + logic_networks_query = session.query(models.LogicNetwork).\ + filter_by(cluster_id = params['cluster_id'], deleted = 0) + if logic_networks_query: + logic_networks_query_all = logic_networks_query.all() + + for router in params['routers']: + # inser data to router tables + router_values = {} + router_ref = models.Router() + router_values['name'] = router.get('name', None) + router_values['description'] = router.get('description', None) + router_values['cluster_id'] = params['cluster_id'] + external_name = router.get('external_logic_network', None) + if external_name: + logic_network_query = \ + session.query(models.LogicNetwork).filter_by(name = external_name).filter_by(deleted=False).first() + if logic_network_query: + router_values['external_logic_network'] = external_name + + _update_values(router_ref, router_values) + router_ref.save(session) # submit logic_network info to affair + + for internal_subnet_name in router.get('subnets', None): + for logic_netwrok in logic_networks_query_all: + subnet_query = \ + session.query(models.Subnet).filter_by(name = internal_subnet_name, + deleted=False, + logic_network_id = logic_netwrok.id) + if subnet_query.first(): + subnet_query.update( + {"router_id" : router_ref.id, "updated_at" : timeutils.utcnow()} + ) + +def _cluster_add_logic_network(**params): + session = params['session']or get_session() + logic_networks_query_all = [] + if "update" == params['status']: + logic_networks_query = session.query(models.LogicNetwork).\ + filter_by(cluster_id = params['cluster_id'], deleted = 0) + if logic_networks_query: + logic_networks_query_all = logic_networks_query.all() + logic_networks_query.update( + {"deleted" : True, "deleted_at" : timeutils.utcnow()} + ) + + for logic_network in params['logic_networks']: + # insert data to subnet table + logic_network_values = {} + logic_network_values['name'] = logic_network.get('name', None) + logic_network_values['type'] = logic_network.get('type', None) + logic_network_values['segmentation_type'] = logic_network.get('segmentation_type', None) + logic_network_values['segmentation_id'] = logic_network.get('segmentation_id', None) + logic_network_values['shared'] = logic_network.get('shared', None) + if logic_network.get('physnet_name', None): + query_list = session.query(models.Network).\ + filter_by(cluster_id = params['cluster_id']).filter_by(deleted=False).all() + if (query_list and [valid_physnet + for valid_physnet in query_list + if logic_network['physnet_name'] == valid_physnet.name]) or \ + logic_network.get('segmentation_type', None) == "flat": + logic_network_values['physnet_name'] = logic_network['physnet_name'] + logic_network_values['cluster_id'] = params['cluster_id'] + + logic_network_ref = models.LogicNetwork() + _update_values(logic_network_ref, logic_network_values) + logic_network_ref.save(session) # submit logic_network info to affair + + if logic_network.get('subnets', None) : + _cluster_add_subnet( + subnets = logic_network['subnets'] , + logic_networks_query_all = logic_networks_query_all, + logic_network_id = logic_network_ref.id, + session = session, + status = params['status']) + +def _cluster_add_subnet(**params): + session = params['session'] or get_session() + subnets_query_all = [] + if "update" == params['status']: + for logic_network_query in params['logic_networks_query_all']: + subnet_query = session.query(models.Subnet).\ + filter_by(logic_network_id = logic_network_query.id, deleted = 0) + if subnet_query: + subnets_query_all += subnet_query.all() + subnet_query.update({ + "deleted" : True, "deleted_at" : timeutils.utcnow()} + ) + + for subnet in params['subnets']: + subnet_values = {} + subnet_values['cidr'] = subnet.get('cidr', None) + subnet_values['gateway'] = subnet.get('gateway', None) + subnet_values['name'] = subnet.get('name', None) + subnet_values['logic_network_id'] = params['logic_network_id'] + + subnet_ref = models.Subnet() + _update_values(subnet_ref, subnet_values) + subnet_ref.save(session) + + if subnet.get('floating_ranges', None): + _cluster_add_floating_range( + values = subnet['floating_ranges'], + subnets_query_all = subnets_query_all, + subnet_id = subnet_ref.id, + session = session, + status = params['status']) + + if subnet.get('dns_nameservers', None): + _cluster_add_dns_nameservers( + values = subnet['dns_nameservers'], + subnets_query_all = subnets_query_all, + subnet_id = subnet_ref.id, + session = session, + status = params['status']) + +def _cluster_add_floating_range(**params): + session = params['session'] or get_session() + floating_range_values = dict() + if params['status'] == "update": + for subnet_query in params['subnets_query_all']: + query = session.query(models.FloatIpRange).\ + filter_by(subnet_id = subnet_query.id).filter_by(deleted=False) + if query.first() is not None: + floating_range_values['updated_at'] = timeutils.utcnow() + query.delete(synchronize_session='fetch') + + if params['values']: + for floating_range in params['values']: + float_ip_range_ref = models.FloatIpRange() + if len(floating_range) >1: + floating_range_values['start'] = floating_range[0] + floating_range_values['end'] = floating_range[1] + floating_range_values['subnet_id'] = params['subnet_id'] + float_ip_range_ref.update(floating_range_values) + _update_values(float_ip_range_ref, floating_range_values) + float_ip_range_ref.save(session = session) + +def _cluster_add_dns_nameservers(**params): + session = params['session'] or get_session() + dns_nameservers_values = dict() + if params['status'] == "update": + for subnet_query in params['subnets_query_all']: + query = session.query(models.DnsNameservers).\ + filter_by(subnet_id=subnet_query.id).filter_by(deleted=False) + if query.first() is not None: + dns_nameservers_values['updated_at'] = timeutils.utcnow() + query.delete(synchronize_session='fetch') + + if params['values']: + for dns_nameservers in params['values']: + dns_Nameservers_ref = models.DnsNameservers() + dns_nameservers_values['dns'] = dns_nameservers + dns_nameservers_values['subnet_id'] = params['subnet_id'] + session.query(models.DnsNameservers).filter_by(subnet_id = params['subnet_id']).filter_by(deleted=False) + dns_Nameservers_ref.update(dns_nameservers_values) + _update_values(dns_Nameservers_ref, dns_nameservers_values) + dns_Nameservers_ref.save(session=session) + +def _check_component_id(component_id): + """ + check if the given component id is valid before executing operations. For + now, we only check its length. The original purpose of this method is + wrapping the different behaviors between MySql and DB2 when the component id + length is longer than the defined length in database model. + :param image_id: The id of the component we want to check + :return: Raise NoFound exception if given component id is invalid + """ + if (component_id and + len(component_id) > models.Component.id.property.columns[0].type.length): + raise exception.NotFound() + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +@utils.no_4byte_params +def _component_update(context, values, component_id): + """ + Used internally by component_add and component_update + + :param context: Request context + :param values: A dict of attributes to set + :param component_id: If None, create the component, otherwise, find and update it + """ + + # NOTE(jbresnah) values is altered in this so a copy is needed + values = values.copy() + + session = get_session() + with session.begin(): + if component_id: + component_ref = _component_get(context, component_id, session=session) + else: + component_ref = models.Component() + #if host_ref.id is None: + # host_ref.id = str(uuid.uuid4()) + if component_id: + # Don't drop created_at if we're passing it in... + _drop_protected_attrs(models.Component, values) + # NOTE(iccha-sethi): updated_at must be explicitly set in case + # only ImageProperty table was modifited + values['updated_at'] = timeutils.utcnow() + + if component_id: + query = session.query(models.Component).filter_by(id=component_id).filter_by(deleted=False) + + # Validate fields for components table. This is similar to what is done + # for the query result update except that we need to do it prior + # in this case. + # TODO(dosaboy): replace this with a dict comprehension once py26 + # support is deprecated. + keys = values.keys() + for k in keys: + if k not in component_ref.to_dict(): + del values[k] + updated = query.update(values, synchronize_session='fetch') + + if not updated: + msg = (_('cannot transition from %(current)s to ' + '%(next)s in update (wanted ' + 'from_state=%(from)s)') % + {'current': current, 'next': new_status, + 'from': from_state}) + raise exception.Conflict(msg) + + component_ref = _component_get(context, component_id, session=session) + else: + #print "1 host_ref.id:%s" % host_ref.id + #print host_ref.created_at + #print values + #values["id"] = host_ref.id + component_ref.update(values) + # Validate the attributes before we go any further. From my + # investigation, the @validates decorator does not validate + # on new records, only on existing records, which is, well, + # idiotic. + _update_values(component_ref, values) + #print "2 host_ref.id:%s" % host_ref.id + #print host_ref.created_at + #print values + try: + component_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("component ID %s already exists!" + % values['id']) + + return component_get(context, component_ref.id) + +def component_update(context, component_id, values): + """ + Set the given properties on an image and update it. + + :raises NotFound if component does not exist. + """ + return _component_update(context, values, component_id) + +def _cluster_get(context, cluster_id, session=None, force_show_deleted=False): + """Get an project or raise if it does not exist.""" + _check_cluster_id(cluster_id) + session = session or get_session() + + try: + query = session.query(models.Cluster).filter_by(id=cluster_id).filter_by(deleted=False) + # filter out deleted images if context disallows it + if not force_show_deleted: + query = query.filter_by(deleted=False) + + project = query.one() + + except sa_orm.exc.NoResultFound: + msg = "No cluster found with ID %s" % cluster_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return project + +def get_logic_network(context, cluster_id, session=None, force_show_deleted=False): + """Get an logic network or raise if it does not exist.""" + session = session or get_session() + try: + query = session.query(models.LogicNetwork).filter_by(cluster_id=cluster_id).filter_by(deleted=False) + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + logic_networks = query.all() + + except sa_orm.exc.NoResultFound: + msg = "No logic network found with cluster ID %s" % cluster_id + LOG.debug(msg) + raise exception.NotFound(msg) + for logic_network in list(logic_networks): + #subnet_list = [] + subnet = _get_subnet(context, logic_network['id'], None,session) + #subnet_list.append(subnet) + logic_network['subnets'] = subnet + + return logic_networks + +def _get_subnet(context, logic_network_id=None, router_id=None, session=None, force_show_deleted=False): + """Get an subnet or raise if it does not exist.""" + session = session or get_session() + try: + if logic_network_id and router_id is None: + query = session.query(models.Subnet).filter_by(logic_network_id=logic_network_id).filter_by(deleted=False) + elif logic_network_id is None and router_id is not None: + query = session.query(models.Subnet).filter_by(router_id=router_id) + query = query.filter_by(deleted=False) + return query.all() + else: + query = session.query(models.Subnet) + + if not force_show_deleted: + query = query.filter_by(deleted=False) + + subnets = query.all() + + except sa_orm.exc.NoResultFound: + msg = "No Float Ip Range found with cluster ID %s" % cluster_id + LOG.debug(msg) + raise exception.NotFound(msg) + + ip_into_int = lambda ip: reduce(lambda x, y: (x<<8)+y, map(int, ip.split('.'))) + int_to_ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)]) + for subnet in subnets: + dns_nameservers = _dns_nameservers_get(context, subnet['id'], session) + subnet['dns_nameservers'] = \ + [dns_server['dns'] for dns_server in dns_nameservers] if dns_nameservers else [] + subnet['dns_nameservers'].sort() + + float_ip_range = _float_ip_range_get(context, subnet['id'], session) + if float_ip_range and len(float_ip_range) > 1: + int_ip_range = [[ip_into_int(float_ip[0]), ip_into_int(float_ip[1])] for float_ip in float_ip_range] + int_ip_range = sorted(int_ip_range, key=lambda x : x[0]) + float_ip_range = [[int_to_ip(int_ip[0]), int_to_ip(int_ip[1])] for int_ip in int_ip_range] + subnet['floating_ranges'] = float_ip_range if float_ip_range else [] + + return subnets + +def _float_ip_range_get(context, subnet_id, session=None, force_show_deleted=False): + """Get an project or raise if it does not exist.""" + session = session or get_session() + + try: + query = session.query(models.FloatIpRange).filter_by(subnet_id=subnet_id).filter_by(deleted=False) + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + floatIpRange = query.all() + + float_ip_ranges_list = [] + for float_ip_range in list(floatIpRange): + float_ip_range_list = [] + float_ip_range_list.append(float_ip_range.start) + float_ip_range_list.append(float_ip_range.end) + float_ip_ranges_list.append(float_ip_range_list) + + except sa_orm.exc.NoResultFound: + msg = "float ip range no found with subnet ID %s" % subnet_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return float_ip_ranges_list + +def _dns_nameservers_get(context, subnet_id, session=None, force_show_deleted=False): + """Get an nameservers or raise if it does not exist.""" + session = session or get_session() + + try: + query = session.query(models.DnsNameservers).filter_by(subnet_id=subnet_id).filter_by(deleted=False) + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + dns_nameservers = query.all() + + except sa_orm.exc.NoResultFound: + msg = "No dns nameservers found with subnet ID %s" % subnet_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return dns_nameservers + +def router_get(context, cluster_id, session=None, force_show_deleted=False): + """Get an routers or raise if it does not exist.""" + session = session or get_session() + try: + query = session.query(models.Router).filter_by(cluster_id=cluster_id).filter_by(deleted=False) + if not force_show_deleted: + query = query.filter_by(deleted=False) + + routers = query.all() + routers_list = [] + for router in routers: + subnets = [] + router_id = router['id'] + subnets = _get_subnet(context, None,router_id, session) + router['subnets'] = [subnet.name for subnet in subnets] + routers_list.append(router) + + except sa_orm.exc.NoResultFound: + msg = "No routers found with cluster ID %s" % cluster_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return routers_list + +def cluster_get(context, cluster_id, session=None, force_show_deleted=False): + Cluster = _cluster_get(context, cluster_id, session=session, + force_show_deleted=force_show_deleted) + return Cluster + +def cluster_add(context, values): + """Add an cluster from the values dictionary.""" + return _cluster_update(context, values, None) + +def cluster_update(context, cluster_id, values): + """ + Set the given properties on an cluster and update it. + + :raises NotFound if cluster does not exist. + """ + return _cluster_update(context, values, cluster_id) + +def get_cluster_host(context, cluster_id, session=None, force_show_deleted=False): + _check_cluster_id(cluster_id) + session = session or get_session() + try: + query = session.query(models.ClusterHost).filter_by(cluster_id = cluster_id, deleted=False) + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + cluster_hosts = query.all() + cluster_hosts_id = [item.host_id for item in cluster_hosts] + + except sa_orm.exc.NoResultFound: + msg = "No cluster found with ID %s" % cluster_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return cluster_hosts_id + + +def _component_get(context, component_id, session=None, force_show_deleted=False): + """Get an component or raise if it does not exist.""" + _check_component_id(component_id) + session = session or get_session() + + try: + query = session.query(models.Component).filter_by(id=component_id).filter_by(deleted=False) + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + component = query.one() + + except sa_orm.exc.NoResultFound: + msg = "No component found with ID %s" % component_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return component + +def component_get(context, component_id, session=None, force_show_deleted=False): + component = _component_get(context, component_id, session=session, + force_show_deleted=force_show_deleted) + return component + +def component_add(context, values): + """Add an component from the values dictionary.""" + return _component_update(context, values, None) + +def _component_services_get(context, component_id, session=None): + """Get an service or raise if it does not exist.""" + _check_component_id(component_id) + session = session or get_session() + try: + query = session.query(models.Service).filter_by(component_id=component_id).filter_by(deleted=False) + services = query.all() + except sa_orm.exc.NoResultFound: + msg = "No service found with ID %s" % service_id + LOG.debug(msg) + raise exception.NotFound(msg) + return services + +def _services_used_in_cluster(context, services_id, session=None): + session = session or get_session() + services_used = set() + for service_id in services_id: + _check_service_id(service_id) + try: + query = session.query(models.ServiceRole).filter_by(service_id=service_id).filter_by(deleted=False) + service_roles = query.all() + for service_role in service_roles: + role_ref = _role_get(context, service_role.role_id, session=session) + if role_ref.type != 'template': + services_used.add(service_id) + except sa_orm.exc.NoResultFound: + msg = "No service role found with ID %s" % service_id + LOG.debug(msg) + raise exception.NotFound(msg) + return services_used + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def component_destroy(context, component_id): + """Destroy the component or raise if it does not exist.""" + session = get_session() + + services = _component_services_get(context, component_id, session) + services_id = [service.id for service in services] + + services_used = _services_used_in_cluster(context, services_id, session) + if services_used: + msg = "Services '%s' of component '%s' is using in cluster" % (','.join(services_used),component_id) + raise exception.DeleteConstrainted(msg) + + for service_id in services_id: + _service_destroy(context, service_id) + + with session.begin(): + component_ref = _component_get(context, component_id, session=session) + component_ref.delete(session=session) + + return component_ref + +def _check_service_id(service_id): + """ + check if the given service id is valid before executing operations. For + now, we only check its length. The original purpose of this method is + wrapping the different behaviors between MySql and DB2 when the service id + length is longer than the defined length in database model. + :param image_id: The id of the service we want to check + :return: Raise NoFound exception if given service id is invalid + """ + if (service_id and + len(service_id) > models.Service.id.property.columns[0].type.length): + raise exception.NotFound() + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +@utils.no_4byte_params +def _service_update(context, values, service_id): + """ + Used internally by service_add and service_update + + :param context: Request context + :param values: A dict of attributes to set + :param service_id: If None, create the service, otherwise, find and update it + """ + + # NOTE(jbresnah) values is altered in this so a copy is needed + values = values.copy() + + session = get_session() + with session.begin(): + if service_id: + service_ref = _service_get(context, service_id, session=session) + else: + service_ref = models.Service() + #if host_ref.id is None: + # host_ref.id = str(uuid.uuid4()) + if service_id: + # Don't drop created_at if we're passing it in... + _drop_protected_attrs(models.Service, values) + # NOTE(iccha-sethi): updated_at must be explicitly set in case + # only ImageProperty table was modifited + values['updated_at'] = timeutils.utcnow() + + if service_id: + query = session.query(models.Service).filter_by(id=service_id).filter_by(deleted=False) + + # Validate fields for services table. This is similar to what is done + # for the query result update except that we need to do it prior + # in this case. + # TODO(dosaboy): replace this with a dict comprehension once py26 + # support is deprecated. + keys = values.keys() + for k in keys: + if k not in service_ref.to_dict(): + del values[k] + updated = query.update(values, synchronize_session='fetch') + + if not updated: + msg = (_('cannot transition from %(current)s to ' + '%(next)s in update (wanted ' + 'from_state=%(from)s)') % + {'current': current, 'next': new_status, + 'from': from_state}) + raise exception.Conflict(msg) + + service_ref = _service_get(context, service_id, session=session) + else: + #print "1 host_ref.id:%s" % host_ref.id + #print host_ref.created_at + #print values + #values["id"] = host_ref.id + service_ref.update(values) + # Validate the attributes before we go any further. From my + # investigation, the @validates decorator does not validate + # on new records, only on existing records, which is, well, + # idiotic. + _update_values(service_ref, values) + #print "2 host_ref.id:%s" % host_ref.id + #print host_ref.created_at + #print values + try: + service_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("service ID %s already exists!" + % values['id']) + + return service_get(context, service_ref.id) + +def service_update(context, service_id, values): + """ + Set the given properties on an image and update it. + + :raises NotFound if service does not exist. + """ + return _service_update(context, values, service_id) + +def _service_get(context, service_id, session=None, force_show_deleted=False): + """Get an service or raise if it does not exist.""" + _check_service_id(service_id) + session = session or get_session() + + try: + query = session.query(models.Service).filter_by(id=service_id).filter_by(deleted=False) + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + service = query.one() + + except sa_orm.exc.NoResultFound: + msg = "No service found with ID %s" % service_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return service + +def service_get(context, service_id, session=None, force_show_deleted=False): + service = _service_get(context, service_id, session=session, + force_show_deleted=force_show_deleted) + return service + +def service_add(context, values): + """Add an service from the values dictionary.""" + return _service_update(context, values, None) + +def _delete_service_role(context, service_id, session=None): + _check_service_id(service_id) + session = session or get_session() + try: + query = session.query(models.ServiceRole).filter_by(service_id=service_id).filter_by(deleted=False) + service_roles = query.all() + for service_role in service_roles: + service_role.delete(session=session) + except sa_orm.exc.NoResultFound: + msg = "No service role found with ID %s" % service_id + LOG.debug(msg) + raise exception.NotFound(msg) + +def _service_destroy(context, service_id): + """Destroy the service or raise if it does not exist.""" + session = get_session() + _delete_service_role(context, service_id, session) + with session.begin(): + service_ref = _service_get(context, service_id, session=session) + service_ref.delete(session=session) + return service_ref + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def service_destroy(context, service_id): + """Destroy the service or raise if it does not exist.""" + session = get_session() + + services_used = _services_used_in_cluster(context, [service_id], session) + if services_used: + msg = "The service %s usd in cluster" % ','.join(services_used) + raise exception.DeleteConstrainted(msg) + + return _service_destroy(context, service_id) + +def service_get_all(context, filters=None, marker=None, limit=None, + sort_key=None, sort_dir=None): + """ + Get all hosts that match zero or more filters. + + :param filters: dict of filter keys and values. If a 'properties' + key is present, it is treated as a dict of key/value + filters on the host properties attribute + :param marker: host id after which to start page + :param limit: maximum number of hosts to return + :param sort_key: list of host attributes by which results should be sorted + :param sort_dir: directions in which results should be sorted (asc, desc) + """ + sort_key = ['created_at'] if not sort_key else sort_key + + default_sort_dir = 'desc' + + if not sort_dir: + sort_dir = [default_sort_dir] * len(sort_key) + elif len(sort_dir) == 1: + default_sort_dir = sort_dir[0] + sort_dir *= len(sort_key) + + filters = filters or {} + + showing_deleted = 'changes-since' in filters or filters.get('deleted', + False) + marker_service = None + if marker is not None: + marker_service = _service_get(context, + marker, + force_show_deleted=showing_deleted) + + for key in ['created_at', 'id']: + if key not in sort_key: + sort_key.append(key) + sort_dir.append(default_sort_dir) + + session = get_session() + + query = session.query(models.Service).filter_by(deleted=showing_deleted) + + query = _paginate_query(query, models.Service, limit, + sort_key, + marker=marker_service, + sort_dir=None, + sort_dirs=sort_dir) + + services = [] + for service in query.all(): + service_dict = service.to_dict() + services.append(service_dict) + return services + + +def _role_host_member_get(context, session, member_id=None, host_id=None): + """Fetch an HostRole entity by id.""" + query = session.query(models.HostRole) + + if host_id is not None and member_id is not None: + query = query.filter(models.HostRole.role_id == member_id).filter(models.HostRole.host_id == host_id).filter(models.HostRole.deleted == 0) + elif member_id is not None and host_id is None: + query = query.filter(models.HostRole.role_id == member_id).filter(models.HostRole.deleted == 0) + elif host_id is not None and member_id is None: + query = query.filter(models.HostRole.host_id == host_id).filter(models.HostRole.deleted == 0) + return query.all() + +def role_host_member_get(context, member_id=None, host_id=None): + session = get_session() + nodes_ref = _role_host_member_get(context, session, member_id, host_id) + return nodes_ref + +def _set_host_status(context, host_id, status): + session = get_session() + host_ref = _host_get(context, host_id, session=session) + host_ref.status = status + host_ref.save(session=session) + +def role_host_member_delete(context, member_id=None, host_id=None): + """Delete an HostRole object.""" + session = get_session() + nodes_ref = _role_host_member_get(context, session, member_id, host_id) + hosts_id = set() + for node_ref in nodes_ref: + hosts_id.add(node_ref.host_id) + node_ref.delete(session=session) + for host_id in hosts_id: + nodes_ref = _role_host_member_get(context, session, host_id=host_id) + if not nodes_ref: + _set_host_status(context, host_id, "in-cluster") + +def _role_service_member_get(context, session, member_id): + """Fetch an ServiceRole entity by id.""" + query = session.query(models.ServiceRole) + query = query.filter(models.ServiceRole.role_id == member_id).filter(models.ServiceRole.deleted == 0) + + return query.all() + +def role_service_member_delete(context, member_id): + """Delete an ServiceRole object.""" + session = get_session() + services_ref = _role_service_member_get(context, session, member_id) + for service_ref in services_ref: + if service_ref.role_id==member_id: + service_ref.delete(session=session) + +def _check_role_id(role_id): + """ + check if the given role id is valid before executing operations. For + now, we only check its length. The original purpose of this method is + wrapping the different behaviors between MySql and DB2 when the role id + length is longer than the defined length in database model. + :param image_id: The id of the role we want to check + :return: Raise NoFound exception if given role id is invalid + """ + if (role_id and + len(role_id) > models.Role.id.property.columns[0].type.length): + raise exception.NotFound() + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +@utils.no_4byte_params +def _role_update(context, values, role_id): + """ + Used internally by role_add and role_update + + :param context: Request context + :param values: A dict of attributes to set + :param role_id: If None, create the role, otherwise, find and update it + """ + + # NOTE(jbresnah) values is altered in this so a copy is needed + values = values.copy() + + hosts_values = dict() + services_values = dict() + host_cluster_values = dict() + session = get_session() + with session.begin(): + if role_id: + role_ref = _role_get(context, role_id, session=session) + else: + role_ref = models.Role() + + #if host_ref.id is None: + # host_ref.id = str(uuid.uuid4()) + if role_id: + # Don't drop created_at if we're passing it in... + if values.has_key('nodes'): + orig_hosts = list(eval(values['nodes'])) + nodes_ref = _role_host_member_get(context, session, role_id) + if nodes_ref: + for host_id in orig_hosts: + _set_host_status(context, host_id, "with-role") + host_flag=0 + for node_ref in nodes_ref: + if node_ref.host_id==host_id: + host_flag=1 + break + + if host_flag==0: # host without this role, add role to this host, should add host to cluster at the same time + role_host_ref = models.HostRole() + hosts_values['role_id'] = role_id + hosts_values['host_id'] = host_id + _update_values(role_host_ref, hosts_values) + role_host_ref.save(session=session) + cluster_id = None + if values.has_key('cluster_id'): + cluster_id = values['cluster_id'] + else: + role_def_tmp = session.query(models.Role).filter_by(id=role_id, deleted=False).one() + if role_def_tmp: + cluster_id = role_def_tmp.cluster_id + if cluster_id: + cluster_hosts_id = get_cluster_host(context, cluster_id, session=None, force_show_deleted=False) + #check this host existed in the cluster or not + if host_id not in cluster_hosts_id: + cluster_host_ref = models.ClusterHost() + host_cluster_values['updated_at'] = timeutils.utcnow() + host_cluster_values['host_id'] = host_id + host_cluster_values['cluster_id'] = cluster_id + cluster_host_ref.update(host_cluster_values) + _update_values(cluster_host_ref, host_cluster_values) + cluster_host_ref.save(session=session) + else: #new host + for host_id in orig_hosts: + _set_host_status(context, host_id, "with-role") + role_host_ref = models.HostRole() + hosts_values['role_id'] = role_id + hosts_values['host_id'] = host_id + _update_values(role_host_ref, hosts_values) + role_host_ref.save(session=session) + cluster_id = None + if values.has_key('cluster_id'): + cluster_id = values['cluster_id'] + else: + role_def_tmp = session.query(models.Role).filter_by(id=role_id, deleted=False).one() + if role_def_tmp: + cluster_id = role_def_tmp.cluster_id + if cluster_id: + cluster_hosts_id = get_cluster_host(context, cluster_id, session=None, force_show_deleted=False) + if host_id not in cluster_hosts_id: + cluster_host_ref = models.ClusterHost() + host_cluster_values['updated_at'] = timeutils.utcnow() + host_cluster_values['host_id'] = host_id + host_cluster_values['cluster_id'] = cluster_id + cluster_host_ref.update(host_cluster_values) + _update_values(cluster_host_ref, host_cluster_values) + cluster_host_ref.save(session=session) + + if values.has_key('services'): + orig_services = list(eval(values['services'])) + services_ref = _role_service_member_get(context, session, role_id) + + if services_ref: + for service_id in orig_services: + service_flag=0 + for service_ref in services_ref: + if service_ref.service_id==service_id: + service_flag=1 + break + + if service_flag==0: + role_service_ref = models.ServiceRole() + services_values['role_id'] = role_id + services_values['service_id'] = service_id + _update_values(role_service_ref, services_values) + role_service_ref.save(session=session) + else: + for service_id in orig_services: + role_service_ref = models.ServiceRole() + services_values['role_id'] = role_id + services_values['service_id'] = service_id + _update_values(role_service_ref, services_values) + role_service_ref.save(session=session) + + _drop_protected_attrs(models.Role, values) + # NOTE(iccha-sethi): updated_at must be explicitly set in case + # only ImageProperty table was modifited + values['updated_at'] = timeutils.utcnow() + + if role_id: + query = session.query(models.Role).filter_by(id=role_id).filter_by(deleted=False) + + # Validate fields for roles table. This is similar to what is done + # for the query result update except that we need to do it prior + # in this case. + # TODO(dosaboy): replace this with a dict comprehension once py26 + # support is deprecated. + keys = values.keys() + for k in keys: + if k not in role_ref.to_dict(): + del values[k] + updated = query.update(values, synchronize_session='fetch') + + if not updated: + msg = (_('cannot transition from %(current)s to ' + '%(next)s in update (wanted ' + 'from_state=%(from)s)') % + {'current': current, 'next': new_status, + 'from': from_state}) + raise exception.Conflict(msg) + + role_ref = _role_get(context, role_id, session=session) + else: + #print "1 host_ref.id:%s" % host_ref.id + #print host_ref.created_at + #print values + #values["id"] = host_ref.id + role_ref.update(values) + # Validate the attributes before we go any further. From my + # investigation, the @validates decorator does not validate + # on new records, only on existing records, which is, well, + # idiotic. + _update_values(role_ref, values) + #print "2 host_ref.id:%s" % host_ref.id + #print host_ref.created_at + #print values + try: + role_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("role ID %s already exists!" + % values['id']) + + if values.has_key('nodes'): + orig_hosts = list(eval(values['nodes'])) + cluster_id = None + if values.has_key('cluster_id') and values['cluster_id']: + cluster_id = values['cluster_id'] + + for host_id in orig_hosts: + _set_host_status(context, host_id, "with-role") + role_host_ref = models.HostRole() + hosts_values['role_id'] = role_ref.id + hosts_values['host_id'] = host_id + _update_values(role_host_ref, hosts_values) + role_host_ref.save(session=session) + + cluster_hosts_id = get_cluster_host(context, cluster_id, session=None, force_show_deleted=False) + if host_id not in cluster_hosts_id: # add new record in cluster_host + cluster_host_ref = models.ClusterHost() + host_cluster_values['updated_at'] = timeutils.utcnow() + host_cluster_values['host_id'] = host_id + host_cluster_values['cluster_id'] = cluster_id + cluster_host_ref.update(host_cluster_values) + _update_values(cluster_host_ref, host_cluster_values) + cluster_host_ref.save(session=session) + + if values.has_key('services'): + orig_services = list(eval(values['services'])) + for service_id in orig_services: + role_service_ref = models.ServiceRole() + services_values['role_id'] = role_ref.id + services_values['service_id'] = service_id + _update_values(role_service_ref, services_values) + role_service_ref.save(session=session) + + + return role_get(context, role_ref.id) + +def role_update(context, role_id, values): + """ + Set the given properties on an image and update it. + + :raises NotFound if role does not exist. + """ + return _role_update(context, values, role_id) + +def _role_get(context, role_id, session=None, force_show_deleted=False): + """Get an role or raise if it does not exist.""" + _check_role_id(role_id) + session = session or get_session() + + try: + query = session.query(models.Role).filter_by(id=role_id).filter_by(deleted=False) + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + role = query.one() + + except sa_orm.exc.NoResultFound: + msg = "No role found with ID %s" % role_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return role + +def role_get(context, role_id, session=None, force_show_deleted=False): + role = _role_get(context, role_id, session=session, + force_show_deleted=force_show_deleted) + return role + +def role_add(context, values): + """Add an role from the values dictionary.""" + return _role_update(context, values, None) +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def role_destroy(context, role_id): + """Destroy the role or raise if it does not exist.""" + session = get_session() + with session.begin(): + delete_service_disks_by_role(role_id, session) + delete_cinder_volumes_by_role(role_id, session) + + role_ref = _role_get(context, role_id, session=session) + role_ref.delete(session=session) + role_host_member_delete(context,role_id) + role_service_member_delete(context,role_id) + + return role_ref + +def role_get_all(context, filters=None, marker=None, limit=None, + sort_key=None, sort_dir=None): + """ + Get all hosts that match zero or more filters. + + :param filters: dict of filter keys and values. If a 'properties' + key is present, it is treated as a dict of key/value + filters on the host properties attribute + :param marker: host id after which to start page + :param limit: maximum number of hosts to return + :param sort_key: list of host attributes by which results should be sorted + :param sort_dir: directions in which results should be sorted (asc, desc) + """ + sort_key = ['created_at'] if not sort_key else sort_key + + default_sort_dir = 'desc' + + if not sort_dir: + sort_dir = [default_sort_dir] * len(sort_key) + elif len(sort_dir) == 1: + default_sort_dir = sort_dir[0] + sort_dir *= len(sort_key) + + filters = filters or {} + + showing_deleted = 'changes-since' in filters or filters.get('deleted', + False) + marker_role = None + if marker is not None: + marker_role = _role_get(context, + marker, + force_show_deleted=showing_deleted) + + for key in ['created_at', 'id']: + if key not in sort_key: + sort_key.append(key) + sort_dir.append(default_sort_dir) + + session = get_session() + + if 'cluster_id' in filters: + query=session.query(models.Role).filter_by(cluster_id=filters.pop('cluster_id')).filter_by(deleted=False) + else: + query = session.query(models.Role).filter_by(deleted=showing_deleted) + + query = _paginate_query(query, models.Role, limit, + sort_key, + marker=marker_role, + sort_dir=None, + sort_dirs=sort_dir) + + roles = [] + for role in query.all(): + role_dict = role.to_dict() + # If uninstalling tecs, we reset the role_progress value to (100 - role_progress) + # for showing data on client. Because role_progress will reduce from 100 to 0 and + # role_status will be set to 'init', when uninstalling is finished. + # So that installing could be started at next time. + status = role_dict.get('status', None) + progress = role_dict.get('progress', None) + if status in ["uninstalling", "uninstall-failed"] and type(progress) is types.LongType: + role_dict['progress'] = 100 - progress + role_dict["status"] = status + roles.append(role_dict) + return roles + +def role_services_get(context, role_id, session=None, force_show_deleted=False): + """Get an role or raise if it does not exist.""" + _check_role_id(role_id) + session = session or get_session() + + try: + query = session.query(models.ServiceRole).filter_by(role_id=role_id).filter_by(deleted=False) + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + except sa_orm.exc.NoResultFound: + msg = "No role found with ID %s" % role_id + LOG.debug(msg) + raise exception.NotFound(msg) + + roles = [] + for role in query.all(): + role_dict = role.to_dict() + roles.append(role_dict) + return roles + +def get_host_roles(context, role_id, session=None, force_show_deleted=False): + """Get an role or raise if it does not exist.""" + _check_role_id(role_id) + session = session or get_session() + + try: + query = session.query(models.HostRole).filter_by(role_id=role_id).filter_by(deleted=False) + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + except sa_orm.exc.NoResultFound: + msg = "No role found with ID %s" % role_id + LOG.debug(msg) + raise exception.NotFound(msg) + + roles = [] + for role in query.all(): + role_dict = role.to_dict() + roles.append(role_dict) + return roles + +def role_host_destroy(context, role_id): + """Destroy the role or raise if it does not exist.""" + session = get_session() + with session.begin(): + role_ref = _role_get(context, role_id, session=session) + role_host_member_delete(context,role_id) + return role_ref + +def delete_service_disks_by_role(role_id, session=None): + if session is None: + session = get_session() + service_disks_query = session.query(models.ServiceDisk).filter_by( + role_id=role_id).filter_by(deleted=False) + delete_time = timeutils.utcnow() + service_disks_query.update({"deleted": True, "deleted_at": delete_time}) + +def delete_cinder_volumes_by_role(role_id, session=None): + if session is None: + session = get_session() + cinder_volumes_query = session.query(models.CinderVolume).filter_by( + role_id=role_id).filter_by(deleted=False) + delete_time = timeutils.utcnow() + cinder_volumes_query.update({"deleted": True, "deleted_at": delete_time}) + +def role_host_update(context, role_host_id, values): + """Update the host_roles or raise if it does not exist.""" + _check_role_host_id(role_host_id) + session = get_session() + with session.begin(): + query = session.query(models.HostRole).filter_by(id=role_host_id) + updated = query.update(values, synchronize_session='fetch') + if not updated: + msg = (_('cannot transition from %(current)s to ' + '%(next)s in update (wanted ' + 'from_state=%(from)s)') % + {'current': current, 'next': new_status, + 'from': from_state}) + raise exception.Conflict(msg) + return + +def _check_role_host_id(role_host_id): + """ + check if the given role id is valid before executing operations. For + now, we only check its length. The original purpose of this method is + wrapping the different behaviors between MySql and DB2 when the role id + length is longer than the defined length in database model. + :param image_id: The id of the role we want to check + :return: Raise NoFound exception if given role id is invalid + """ + if (role_host_id and + len(role_host_id) > models.HostRole.id.property.columns[0].type.length): + raise exception.NotFound() + +def cluster_update(context, cluster_id, values): + """ + Set the given properties on an cluster and update it. + + :raises NotFound if cluster does not exist. + """ + return _cluster_update(context, values, cluster_id) + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def cluster_destroy(context, cluster_id): + """Destroy the project or raise if it does not exist.""" + session = get_session() + with session.begin(): + project_ref = _cluster_get(context, cluster_id, session=session) + project_ref.delete(session=session) + + logicnetwork_query = session.query(models.LogicNetwork).filter_by( + cluster_id=cluster_id).filter_by(deleted=False) + delete_time = timeutils.utcnow() + for logicnetwork_info in logicnetwork_query.all(): + query_subnet=session.query(models.Subnet).filter_by(logic_network_id=logicnetwork_info.id).filter_by(deleted=False) + for subnet_info in query_subnet.all(): + query_float_ip_range=session.query(models.FloatIpRange).filter_by( + subnet_id=subnet_info.id).filter_by(deleted=False) + query_float_ip_range.update({"deleted": True, "deleted_at": delete_time}) + + query_dns_nameservers=session.query(models.DnsNameservers).filter_by( + subnet_id=subnet_info.id).filter_by(deleted=False) + query_dns_nameservers.update({"deleted": True, "deleted_at": delete_time}) + query_subnet.update({"deleted": True, "deleted_at": delete_time}) + logicnetwork_query.update({"deleted": True, "deleted_at": delete_time}) + + router_query = session.query(models.Router).filter_by( + cluster_id=cluster_id).filter_by(deleted=False) + router_query.update({"deleted": True, "deleted_at": delete_time}) + + role_query = session.query(models.Role).filter_by( + cluster_id=cluster_id).filter_by(deleted=False) + for role_info in role_query.all(): + query_config_set_id=session.query(models.ConfigSet).filter_by(id=role_info.config_set_id).filter_by(deleted=False) + for config_set_id_info in query_config_set_id.all(): + config_set_item=session.query(models.ConfigSetItem).filter_by( + config_set_id=config_set_id_info.id).filter_by(deleted=False) + config_set_item.update({"deleted": True, "deleted_at": delete_time}) + query_config_set_id.update({"deleted": True, "deleted_at": delete_time}) + + query_host_role=session.query(models.HostRole).filter_by(role_id=role_info.id).filter_by(deleted=False) + query_host_role.update({"deleted": True, "deleted_at": delete_time}) + + query_service_role=session.query(models.ServiceRole).filter_by(role_id=role_info.id).filter_by(deleted=False) + query_service_role.update({"deleted": True, "deleted_at": delete_time}) + delete_service_disks_by_role(role_info.id, session) + delete_cinder_volumes_by_role(role_info.id, session) + role_query.update({"deleted": True, "deleted_at": delete_time}) + + network_query = session.query(models.Network).filter_by( + cluster_id=cluster_id).filter_by(deleted=False) + for network_info in network_query.all(): + query_assigned_network=session.query(models.AssignedNetworks).filter_by( + network_id=network_info.id).filter_by(deleted=False) + query_assigned_network.update({"deleted": True, "deleted_at": delete_time}) + + query_ip_range=session.query(models.IpRange).filter_by( + network_id=network_info.id).filter_by(deleted=False) + query_ip_range.update({"deleted": True, "deleted_at": delete_time}) + network_query.update({"deleted": True, "deleted_at": delete_time}) + + cluster_host_query = session.query(models.ClusterHost).filter_by(cluster_id=cluster_id).filter_by(deleted=False) + cluster_hosts = cluster_host_query.all() + for cluster_host in cluster_hosts: #delete host role which all the roles belong to this cluster + delete_host_role(context, cluster_host.host_id, session=session) + host_ref = _host_get(context, cluster_host.host_id, session=session) + host_ref.update({'status': 'init'}) + delete_cluster_host(context, cluster_id, session=session) + + return project_ref + +def is_image_mutable(context, image): + """Return True if the image is mutable in this context.""" + # Is admin == image mutable + if context.is_admin: + return True + + # No owner == image not mutable + if image['owner'] is None or context.owner is None: + return False + + # Image only mutable by its owner + return image['owner'] == context.owner + + +def is_image_visible(context, image, status=None): + """Return True if the image is visible in this context.""" + # Is admin == image visible + if context.is_admin: + return True + + # No owner == image visible + if image['owner'] is None: + return True + + # Image is_public == image visible + if image['is_public']: + return True + + # Perform tests based on whether we have an owner + if context.owner is not None: + if context.owner == image['owner']: + return True + + # Figure out if this image is shared with that tenant + members = image_member_find(context, + image_id=image['id'], + member=context.owner, + status=status) + if members: + return True + + # Private image + return False + + +def _paginate_query(query, model, limit, sort_keys, marker=None, + sort_dir=None, sort_dirs=None): + """Returns a query with sorting / pagination criteria added. + + Pagination works by requiring a unique sort_key, specified by sort_keys. + (If sort_keys is not unique, then we risk looping through values.) + We use the last row in the previous page as the 'marker' for pagination. + So we must return values that follow the passed marker in the order. + With a single-valued sort_key, this would be easy: sort_key > X. + With a compound-values sort_key, (k1, k2, k3) we must do this to repeat + the lexicographical ordering: + (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3) + + We also have to cope with different sort_directions. + + Typically, the id of the last row is used as the client-facing pagination + marker, then the actual marker object must be fetched from the db and + passed in to us as marker. + + :param query: the query object to which we should add paging/sorting + :param model: the ORM model class + :param limit: maximum number of items to return + :param sort_keys: array of attributes by which results should be sorted + :param marker: the last item of the previous page; we returns the next + results after this value. + :param sort_dir: direction in which results should be sorted (asc, desc) + :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys + + :rtype: sqlalchemy.orm.query.Query + :return: The query with sorting/pagination added. + """ + + if 'id' not in sort_keys: + # TODO(justinsb): If this ever gives a false-positive, check + # the actual primary key, rather than assuming its id + LOG.warn(_LW('Id not in sort_keys; is sort_keys unique?')) + + assert(not (sort_dir and sort_dirs)) + + # Default the sort direction to ascending + if sort_dirs is None and sort_dir is None: + sort_dir = 'asc' + + # Ensure a per-column sort direction + if sort_dirs is None: + sort_dirs = [sort_dir for _sort_key in sort_keys] + + assert(len(sort_dirs) == len(sort_keys)) + + # Add sorting + for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): + sort_dir_func = { + 'asc': sqlalchemy.asc, + 'desc': sqlalchemy.desc, + }[current_sort_dir] + + try: + sort_key_attr = getattr(model, current_sort_key) + except AttributeError: + raise exception.InvalidSortKey() + query = query.order_by(sort_dir_func(sort_key_attr)) + + default = '' # Default to an empty string if NULL + + # Add pagination + if marker is not None: + marker_values = [] + for sort_key in sort_keys: + v = getattr(marker, sort_key) + if v is None: + v = default + marker_values.append(v) + + # Build up an array of sort criteria as in the docstring + criteria_list = [] + for i in range(len(sort_keys)): + crit_attrs = [] + for j in range(i): + model_attr = getattr(model, sort_keys[j]) + default = None if isinstance( + model_attr.property.columns[0].type, + sqlalchemy.DateTime) else '' + attr = sa_sql.expression.case([(model_attr != None, + model_attr), ], + else_=default) + crit_attrs.append((attr == marker_values[j])) + + model_attr = getattr(model, sort_keys[i]) + default = None if isinstance(model_attr.property.columns[0].type, + sqlalchemy.DateTime) else '' + attr = sa_sql.expression.case([(model_attr != None, + model_attr), ], + else_=default) + if sort_dirs[i] == 'desc': + crit_attrs.append((attr < marker_values[i])) + elif sort_dirs[i] == 'asc': + crit_attrs.append((attr > marker_values[i])) + else: + raise ValueError(_("Unknown sort direction, " + "must be 'desc' or 'asc'")) + + criteria = sa_sql.and_(*crit_attrs) + criteria_list.append(criteria) + + f = sa_sql.or_(*criteria_list) + query = query.filter(f) + + if limit is not None: + query = query.limit(limit) + + return query + + +def _make_conditions_from_filters(filters, is_public=None): + # NOTE(venkatesh) make copy of the filters are to be altered in this + # method. + filters = filters.copy() + + image_conditions = [] + prop_conditions = [] + tag_conditions = [] + + if is_public is not None: + image_conditions.append(models.Image.is_public == is_public) + + if 'checksum' in filters: + checksum = filters.pop('checksum') + image_conditions.append(models.Image.checksum == checksum) + + if 'is_public' in filters: + key = 'is_public' + value = filters.pop('is_public') + prop_filters = _make_image_property_condition(key=key, value=value) + prop_conditions.append(prop_filters) + + for (k, v) in filters.pop('properties', {}).items(): + prop_filters = _make_image_property_condition(key=k, value=v) + prop_conditions.append(prop_filters) + + if 'changes-since' in filters: + # normalize timestamp to UTC, as sqlalchemy doesn't appear to + # respect timezone offsets + changes_since = timeutils.normalize_time(filters.pop('changes-since')) + image_conditions.append(models.Image.updated_at > changes_since) + + if 'deleted' in filters: + deleted_filter = filters.pop('deleted') + image_conditions.append(models.Image.deleted == deleted_filter) + # TODO(bcwaldon): handle this logic in registry server + if not deleted_filter: + image_statuses = [s for s in STATUSES if s != 'killed'] + image_conditions.append(models.Image.status.in_(image_statuses)) + + if 'tags' in filters: + tags = filters.pop('tags') + for tag in tags: + tag_filters = [models.ImageTag.deleted == False] + tag_filters.extend([models.ImageTag.value == tag]) + tag_conditions.append(tag_filters) + + filters = dict([(k, v) for k, v in filters.items() if v is not None]) + + for (k, v) in filters.items(): + key = k + if k.endswith('_min') or k.endswith('_max'): + key = key[0:-4] + try: + v = int(filters.pop(k)) + except ValueError: + msg = _("Unable to filter on a range " + "with a non-numeric value.") + raise exception.InvalidFilterRangeValue(msg) + + if k.endswith('_min'): + image_conditions.append(getattr(models.Image, key) >= v) + if k.endswith('_max'): + image_conditions.append(getattr(models.Image, key) <= v) + + for (k, v) in filters.items(): + value = filters.pop(k) + if hasattr(models.Image, k): + image_conditions.append(getattr(models.Image, k) == value) + else: + prop_filters = _make_image_property_condition(key=k, value=value) + prop_conditions.append(prop_filters) + + return image_conditions, prop_conditions, tag_conditions + + +def _make_image_property_condition(key, value): + prop_filters = [models.ImageProperty.deleted == False] + prop_filters.extend([models.ImageProperty.name == key]) + prop_filters.extend([models.ImageProperty.value == value]) + return prop_filters + + +def _select_images_query(context, image_conditions, admin_as_user, + member_status, visibility): + session = get_session() + + img_conditional_clause = sa_sql.and_(*image_conditions) + + regular_user = (not context.is_admin) or admin_as_user + + query_member = session.query(models.Image).join( + models.Image.members).filter(img_conditional_clause) + if regular_user: + member_filters = [models.ImageMember.deleted == False] + if context.owner is not None: + member_filters.extend([models.ImageMember.member == context.owner]) + if member_status != 'all': + member_filters.extend([ + models.ImageMember.status == member_status]) + query_member = query_member.filter(sa_sql.and_(*member_filters)) + + # NOTE(venkatesh) if the 'visibility' is set to 'shared', we just + # query the image members table. No union is required. + if visibility is not None and visibility == 'shared': + return query_member + + query_image = session.query(models.Image).filter(img_conditional_clause) + if regular_user: + query_image = query_image.filter(models.Image.is_public == True) + query_image_owner = None + if context.owner is not None: + query_image_owner = session.query(models.Image).filter( + models.Image.owner == context.owner).filter( + img_conditional_clause) + if query_image_owner is not None: + query = query_image.union(query_image_owner, query_member) + else: + query = query_image.union(query_member) + return query + else: + # Admin user + return query_image + + +def image_get_all(context, filters=None, marker=None, limit=None, + sort_key=None, sort_dir=None, + member_status='accepted', is_public=None, + admin_as_user=False, return_tag=False): + """ + Get all images that match zero or more filters. + + :param filters: dict of filter keys and values. If a 'properties' + key is present, it is treated as a dict of key/value + filters on the image properties attribute + :param marker: image id after which to start page + :param limit: maximum number of images to return + :param sort_key: list of image attributes by which results should be sorted + :param sort_dir: directions in which results should be sorted (asc, desc) + :param member_status: only return shared images that have this membership + status + :param is_public: If true, return only public images. If false, return + only private and shared images. + :param admin_as_user: For backwards compatibility. If true, then return to + an admin the equivalent set of images which it would see + if it was a regular user + :param return_tag: To indicates whether image entry in result includes it + relevant tag entries. This could improve upper-layer + query performance, to prevent using separated calls + """ + sort_key = ['created_at'] if not sort_key else sort_key + + default_sort_dir = 'desc' + + if not sort_dir: + sort_dir = [default_sort_dir] * len(sort_key) + elif len(sort_dir) == 1: + default_sort_dir = sort_dir[0] + sort_dir *= len(sort_key) + + filters = filters or {} + + visibility = filters.pop('visibility', None) + showing_deleted = 'changes-since' in filters or filters.get('deleted', + False) + + img_cond, prop_cond, tag_cond = _make_conditions_from_filters( + filters, is_public) + + query = _select_images_query(context, + img_cond, + admin_as_user, + member_status, + visibility) + + if visibility is not None: + if visibility == 'public': + query = query.filter(models.Image.is_public == True) + elif visibility == 'private': + query = query.filter(models.Image.is_public == False) + + if prop_cond: + for prop_condition in prop_cond: + query = query.join(models.ImageProperty, aliased=True).filter( + sa_sql.and_(*prop_condition)) + + if tag_cond: + for tag_condition in tag_cond: + query = query.join(models.ImageTag, aliased=True).filter( + sa_sql.and_(*tag_condition)) + + marker_image = None + if marker is not None: + marker_image = _image_get(context, + marker, + force_show_deleted=showing_deleted) + + for key in ['created_at', 'id']: + if key not in sort_key: + sort_key.append(key) + sort_dir.append(default_sort_dir) + + query = _paginate_query(query, models.Image, limit, + sort_key, + marker=marker_image, + sort_dir=None, + sort_dirs=sort_dir) + + query = query.options(sa_orm.joinedload( + models.Image.properties)).options( + sa_orm.joinedload(models.Image.locations)) + if return_tag: + query = query.options(sa_orm.joinedload(models.Image.tags)) + + images = [] + for image in query.all(): + image_dict = image.to_dict() + image_dict = _normalize_locations(context, image_dict, + force_show_deleted=showing_deleted) + if return_tag: + image_dict = _normalize_tags(image_dict) + images.append(image_dict) + return images +def host_get_all(context, filters=None, marker=None, limit=None, + sort_key=None, sort_dir=None): + """ + Get all hosts that match zero or more filters. + + :param filters: dict of filter keys and values. If a 'properties' + key is present, it is treated as a dict of key/value + filters on the host properties attribute + :param marker: host id after which to start page + :param limit: maximum number of hosts to return + :param sort_key: list of host attributes by which results should be sorted + :param sort_dir: directions in which results should be sorted (asc, desc) + """ + sort_key = ['created_at'] if not sort_key else sort_key + + default_sort_dir = 'desc' + + if not sort_dir: + sort_dir = [default_sort_dir] * len(sort_key) + elif len(sort_dir) == 1: + default_sort_dir = sort_dir[0] + sort_dir *= len(sort_key) + + filters = filters or {} + + showing_deleted = 'changes-since' in filters or filters.get('deleted', + False) + + marker_host = None + if marker is not None: + marker_host = _host_get(context, + marker, + force_show_deleted=showing_deleted) + + for key in ['created_at', 'id']: + if key not in sort_key: + sort_key.append(key) + sort_dir.append(default_sort_dir) + session = get_session() + if 'status' in filters and 'cluster_id' not in filters: + status = filters.pop('status') + query = session.query(models.Host).filter_by(deleted=showing_deleted).filter_by(status=status) + elif 'cluster_id' in filters and 'status' not in filters: + cluster_id = filters.pop('cluster_id') + sql = "select hosts.* from hosts ,cluster_hosts where hosts.deleted=0 and hosts.status='in-cluster' and cluster_hosts.cluster_id ='"+cluster_id +"' and cluster_hosts.host_id=hosts.id and cluster_hosts.deleted=0" + query = session.execute(sql).fetchall() + hosts = [] + for host in query: + host_dict = dict(host.items()) + hosts.append(host_dict) + sql = "select hosts.*,cluster_hosts.cluster_id as cluster_id," \ + "host_roles.progress as role_progress,host_roles.status as role_status," \ + "host_roles.messages as role_messages from cluster_hosts ,hosts,roles,host_roles \ + where hosts.deleted=0 and cluster_hosts.cluster_id ='"+cluster_id +"' and cluster_hosts.deleted=0 \ + and roles.deleted=0 and roles.cluster_id='" + cluster_id+ "'\ + and cluster_hosts.host_id=hosts.id \ + and host_roles.role_id = roles.id \ + and host_roles.host_id = hosts.id and host_roles.deleted=0 group by hosts.id" + query = session.execute(sql).fetchall() + for host in query: + host_dict = dict(host.items()) + hosts.append(host_dict) + return hosts + elif 'cluster_id' in filters and 'status' in filters: + status = filters.pop('status') + cluster_id = filters.pop('cluster_id') + if status == 'in-cluster': + sql = "select hosts.* from hosts ,cluster_hosts where hosts.deleted=0 and hosts.status='in-cluster' and cluster_hosts.cluster_id ='"+cluster_id +"' and cluster_hosts.host_id=hosts.id and cluster_hosts.deleted=0" + query = session.execute(sql).fetchall() + hosts = [] + for host in query: + host_dict = dict(host.items()) + hosts.append(host_dict) + return hosts + if status == 'with-role': + sql = "select hosts.*,cluster_hosts.cluster_id as cluster_id," \ + "host_roles.progress as role_progress,host_roles.status as role_status," \ + "host_roles.messages as role_messages from cluster_hosts ,hosts,roles,host_roles \ + where hosts.deleted=0 and cluster_hosts.cluster_id ='"+cluster_id +"' and cluster_hosts.deleted=0 \ + and roles.deleted=0 and roles.cluster_id='" + cluster_id+ "'\ + and cluster_hosts.host_id=hosts.id \ + and host_roles.role_id = roles.id \ + and host_roles.host_id = hosts.id and host_roles.deleted=0 group by hosts.id" + query = session.execute(sql).fetchall() + hosts = [] + for host in query: + host_dict = dict(host.items()) + hosts.append(host_dict) + return hosts + elif 'name' in filters: + name = filters.pop('name') + query = session.query(models.Host).filter_by(deleted=showing_deleted).filter_by(name=name) + else: + query = session.query(models.Host).filter_by(deleted=showing_deleted) + + query = _paginate_query(query, models.Host, limit, + sort_key, + marker=marker_host, + sort_dir=None, + sort_dirs=sort_dir) + + hosts = [] + for host in query.all(): + host_dict = host.to_dict() + hosts.append(host_dict) + return hosts + +def _drop_protected_attrs(model_class, values): + """ + Removed protected attributes from values dictionary using the models + __protected_attributes__ field. + """ + for attr in model_class.__protected_attributes__: + if attr in values: + del values[attr] + + +def _image_get_disk_usage_by_owner(owner, session, image_id=None): + query = session.query(models.Image) + query = query.filter(models.Image.owner == owner) + if image_id is not None: + query = query.filter(models.Image.id != image_id) + query = query.filter(models.Image.size > 0) + query = query.filter(~models.Image.status.in_(['killed', 'deleted'])) + images = query.all() + + total = 0 + for i in images: + locations = [l for l in i.locations if l['status'] != 'deleted'] + total += (i.size * len(locations)) + return total + + +def _validate_image(values): + """ + Validates the incoming data and raises a Invalid exception + if anything is out of order. + + :param values: Mapping of image metadata to check + """ + + status = values.get('status') + if not status: + msg = "Image status is required." + raise exception.Invalid(msg) + + if status not in STATUSES: + msg = "Invalid image status '%s' for image." % status + raise exception.Invalid(msg) + + return values + + +def _update_values(image_ref, values): + for k in values: + if getattr(image_ref, k) != values[k]: + setattr(image_ref, k, values[k]) + + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +@utils.no_4byte_params +def _image_update(context, values, image_id, purge_props=False, + from_state=None): + """ + Used internally by image_create and image_update + + :param context: Request context + :param values: A dict of attributes to set + :param image_id: If None, create the image, otherwise, find and update it + """ + + # NOTE(jbresnah) values is altered in this so a copy is needed + values = values.copy() + + session = get_session() + with session.begin(): + + # Remove the properties passed in the values mapping. We + # handle properties separately from base image attributes, + # and leaving properties in the values mapping will cause + # a SQLAlchemy model error because SQLAlchemy expects the + # properties attribute of an Image model to be a list and + # not a dict. + properties = values.pop('properties', {}) + + location_data = values.pop('locations', None) + + new_status = values.get('status', None) + if image_id: + image_ref = _image_get(context, image_id, session=session) + current = image_ref.status + # Perform authorization check + _check_mutate_authorization(context, image_ref) + else: + if values.get('size') is not None: + values['size'] = int(values['size']) + + if 'min_ram' in values: + values['min_ram'] = int(values['min_ram'] or 0) + + if 'min_disk' in values: + values['min_disk'] = int(values['min_disk'] or 0) + + values['is_public'] = bool(values.get('is_public', False)) + values['protected'] = bool(values.get('protected', False)) + image_ref = models.Image() + + # Need to canonicalize ownership + if 'owner' in values and not values['owner']: + values['owner'] = None + + if image_id: + # Don't drop created_at if we're passing it in... + _drop_protected_attrs(models.Image, values) + # NOTE(iccha-sethi): updated_at must be explicitly set in case + # only ImageProperty table was modifited + values['updated_at'] = timeutils.utcnow() + + if image_id: + query = session.query(models.Image).filter_by(id=image_id) + if from_state: + query = query.filter_by(status=from_state) + + if new_status: + _validate_image(values) + + # Validate fields for Images table. This is similar to what is done + # for the query result update except that we need to do it prior + # in this case. + # TODO(dosaboy): replace this with a dict comprehension once py26 + # support is deprecated. + keys = values.keys() + for k in keys: + if k not in image_ref.to_dict(): + del values[k] + updated = query.update(values, synchronize_session='fetch') + + if not updated: + msg = (_('cannot transition from %(current)s to ' + '%(next)s in update (wanted ' + 'from_state=%(from)s)') % + {'current': current, 'next': new_status, + 'from': from_state}) + raise exception.Conflict(msg) + + image_ref = _image_get(context, image_id, session=session) + else: + image_ref.update(values) + # Validate the attributes before we go any further. From my + # investigation, the @validates decorator does not validate + # on new records, only on existing records, which is, well, + # idiotic. + values = _validate_image(image_ref.to_dict()) + _update_values(image_ref, values) + + try: + image_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("Image ID %s already exists!" + % values['id']) + + _set_properties_for_image(context, image_ref, properties, purge_props, + session) + + if location_data is not None: + _image_locations_set(context, image_ref.id, location_data, + session=session) + + return image_get(context, image_ref.id) + +@utils.no_4byte_params +def image_location_add(context, image_id, location, session=None): + deleted = location['status'] in ('deleted', 'pending_delete') + delete_time = timeutils.utcnow() if deleted else None + location_ref = models.ImageLocation(image_id=image_id, + value=location['url'], + meta_data=location['metadata'], + status=location['status'], + deleted=deleted, + deleted_at=delete_time) + session = session or get_session() + location_ref.save(session=session) + + +@utils.no_4byte_params +def image_location_update(context, image_id, location, session=None): + loc_id = location.get('id') + if loc_id is None: + msg = _("The location data has an invalid ID: %d") % loc_id + raise exception.Invalid(msg) + + try: + session = session or get_session() + location_ref = session.query(models.ImageLocation).filter_by( + id=loc_id).filter_by(image_id=image_id).one() + + deleted = location['status'] in ('deleted', 'pending_delete') + updated_time = timeutils.utcnow() + delete_time = updated_time if deleted else None + + location_ref.update({"value": location['url'], + "meta_data": location['metadata'], + "status": location['status'], + "deleted": deleted, + "updated_at": updated_time, + "deleted_at": delete_time}) + location_ref.save(session=session) + except sa_orm.exc.NoResultFound: + msg = (_("No location found with ID %(loc)s from image %(img)s") % + dict(loc=loc_id, img=image_id)) + LOG.warn(msg) + raise exception.NotFound(msg) + + +def image_location_delete(context, image_id, location_id, status, + delete_time=None, session=None): + if status not in ('deleted', 'pending_delete'): + msg = _("The status of deleted image location can only be set to " + "'pending_delete' or 'deleted'") + raise exception.Invalid(msg) + + try: + session = session or get_session() + location_ref = session.query(models.ImageLocation).filter_by( + id=location_id).filter_by(image_id=image_id).one() + + delete_time = delete_time or timeutils.utcnow() + + location_ref.update({"deleted": True, + "status": status, + "updated_at": delete_time, + "deleted_at": delete_time}) + location_ref.save(session=session) + except sa_orm.exc.NoResultFound: + msg = (_("No location found with ID %(loc)s from image %(img)s") % + dict(loc=location_id, img=image_id)) + LOG.warn(msg) + raise exception.NotFound(msg) + + +def _image_locations_set(context, image_id, locations, session=None): + # NOTE(zhiyan): 1. Remove records from DB for deleted locations + session = session or get_session() + query = session.query(models.ImageLocation).filter_by( + image_id=image_id).filter_by( + deleted=False).filter(~models.ImageLocation.id.in_( + [loc['id'] + for loc in locations + if loc.get('id')])) + for loc_id in [loc_ref.id for loc_ref in query.all()]: + image_location_delete(context, image_id, loc_id, 'deleted', + session=session) + + # NOTE(zhiyan): 2. Adding or update locations + for loc in locations: + if loc.get('id') is None: + image_location_add(context, image_id, loc, session=session) + else: + image_location_update(context, image_id, loc, session=session) + + +def _image_locations_delete_all(context, image_id, + delete_time=None, session=None): + """Delete all image locations for given image""" + session = session or get_session() + location_refs = session.query(models.ImageLocation).filter_by( + image_id=image_id).filter_by(deleted=False).all() + + for loc_id in [loc_ref.id for loc_ref in location_refs]: + image_location_delete(context, image_id, loc_id, 'deleted', + delete_time=delete_time, session=session) + + +@utils.no_4byte_params +def _set_properties_for_image(context, image_ref, properties, + purge_props=False, session=None): + """ + Create or update a set of image_properties for a given image + + :param context: Request context + :param image_ref: An Image object + :param properties: A dict of properties to set + :param session: A SQLAlchemy session to use (if present) + """ + orig_properties = {} + for prop_ref in image_ref.properties: + orig_properties[prop_ref.name] = prop_ref + + for name, value in six.iteritems(properties): + prop_values = {'image_id': image_ref.id, + 'name': name, + 'value': value} + if name in orig_properties: + prop_ref = orig_properties[name] + _image_property_update(context, prop_ref, prop_values, + session=session) + else: + image_property_create(context, prop_values, session=session) + + if purge_props: + for key in orig_properties.keys(): + if key not in properties: + prop_ref = orig_properties[key] + image_property_delete(context, prop_ref.name, + image_ref.id, session=session) + + +def _image_child_entry_delete_all(child_model_cls, image_id, delete_time=None, + session=None): + """Deletes all the child entries for the given image id. + + Deletes all the child entries of the given child entry ORM model class + using the parent image's id. + + The child entry ORM model class can be one of the following: + model.ImageLocation, model.ImageProperty, model.ImageMember and + model.ImageTag. + + :param child_model_cls: the ORM model class. + :param image_id: id of the image whose child entries are to be deleted. + :param delete_time: datetime of deletion to be set. + If None, uses current datetime. + :param session: A SQLAlchemy session to use (if present) + + :rtype: int + :return: The number of child entries got soft-deleted. + """ + session = session or get_session() + + query = session.query(child_model_cls).filter_by( + image_id=image_id).filter_by(deleted=False) + + delete_time = delete_time or timeutils.utcnow() + + count = query.update({"deleted": True, "deleted_at": delete_time}) + return count + + +def image_property_create(context, values, session=None): + """Create an ImageProperty object.""" + prop_ref = models.ImageProperty() + prop = _image_property_update(context, prop_ref, values, session=session) + return prop.to_dict() + + +def _image_property_update(context, prop_ref, values, session=None): + """ + Used internally by image_property_create and image_property_update. + """ + _drop_protected_attrs(models.ImageProperty, values) + values["deleted"] = False + prop_ref.update(values) + prop_ref.save(session=session) + return prop_ref + + +def image_property_delete(context, prop_ref, image_ref, session=None): + """ + Used internally by image_property_create and image_property_update. + """ + session = session or get_session() + prop = session.query(models.ImageProperty).filter_by(image_id=image_ref, + name=prop_ref).one() + prop.delete(session=session) + return prop + + +def _image_property_delete_all(context, image_id, delete_time=None, + session=None): + """Delete all image properties for given image""" + props_updated_count = _image_child_entry_delete_all(models.ImageProperty, + image_id, + delete_time, + session) + return props_updated_count + + +def image_member_create(context, values, session=None): + """Create an ImageMember object.""" + memb_ref = models.ImageMember() + _image_member_update(context, memb_ref, values, session=session) + return _image_member_format(memb_ref) + + +def _image_member_format(member_ref): + """Format a member ref for consumption outside of this module.""" + return { + 'id': member_ref['id'], + 'image_id': member_ref['image_id'], + 'member': member_ref['member'], + 'can_share': member_ref['can_share'], + 'status': member_ref['status'], + 'created_at': member_ref['created_at'], + 'updated_at': member_ref['updated_at'] + } + + +def image_member_update(context, memb_id, values): + """Update an ImageMember object.""" + session = get_session() + memb_ref = _image_member_get(context, memb_id, session) + _image_member_update(context, memb_ref, values, session) + return _image_member_format(memb_ref) + + +def _image_member_update(context, memb_ref, values, session=None): + """Apply supplied dictionary of values to a Member object.""" + _drop_protected_attrs(models.ImageMember, values) + values["deleted"] = False + values.setdefault('can_share', False) + memb_ref.update(values) + memb_ref.save(session=session) + return memb_ref + + +def image_member_delete(context, memb_id, session=None): + """Delete an ImageMember object.""" + session = session or get_session() + member_ref = _image_member_get(context, memb_id, session) + _image_member_delete(context, member_ref, session) + + +def _image_member_delete(context, memb_ref, session): + memb_ref.delete(session=session) + + +def _image_member_delete_all(context, image_id, delete_time=None, + session=None): + """Delete all image members for given image""" + members_updated_count = _image_child_entry_delete_all(models.ImageMember, + image_id, + delete_time, + session) + return members_updated_count + + +def _image_member_get(context, memb_id, session): + """Fetch an ImageMember entity by id.""" + query = session.query(models.ImageMember) + query = query.filter_by(id=memb_id) + return query.one() + + +def image_member_find(context, image_id=None, member=None, status=None): + """Find all members that meet the given criteria + + :param image_id: identifier of image entity + :param member: tenant to which membership has been granted + """ + session = get_session() + members = _image_member_find(context, session, image_id, member, status) + return [_image_member_format(m) for m in members] + + +def _image_member_find(context, session, image_id=None, + member=None, status=None): + query = session.query(models.ImageMember) + query = query.filter_by(deleted=False) + + if not context.is_admin: + query = query.join(models.Image) + filters = [ + models.Image.owner == context.owner, + models.ImageMember.member == context.owner, + ] + query = query.filter(sa_sql.or_(*filters)) + + if image_id is not None: + query = query.filter(models.ImageMember.image_id == image_id) + if member is not None: + query = query.filter(models.ImageMember.member == member) + if status is not None: + query = query.filter(models.ImageMember.status == status) + + return query.all() + + +def image_member_count(context, image_id): + """Return the number of image members for this image + + :param image_id: identifier of image entity + """ + session = get_session() + + if not image_id: + msg = _("Image id is required.") + raise exception.Invalid(msg) + + query = session.query(models.ImageMember) + query = query.filter_by(deleted=False) + query = query.filter(models.ImageMember.image_id == str(image_id)) + + return query.count() + + +def image_tag_set_all(context, image_id, tags): + # NOTE(kragniz): tag ordering should match exactly what was provided, so a + # subsequent call to image_tag_get_all returns them in the correct order + + session = get_session() + existing_tags = image_tag_get_all(context, image_id, session) + + tags_created = [] + for tag in tags: + if tag not in tags_created and tag not in existing_tags: + tags_created.append(tag) + image_tag_create(context, image_id, tag, session) + + for tag in existing_tags: + if tag not in tags: + image_tag_delete(context, image_id, tag, session) + + +@utils.no_4byte_params +def image_tag_create(context, image_id, value, session=None): + """Create an image tag.""" + session = session or get_session() + tag_ref = models.ImageTag(image_id=image_id, value=value) + tag_ref.save(session=session) + return tag_ref['value'] + + +def image_tag_delete(context, image_id, value, session=None): + """Delete an image tag.""" + _check_image_id(image_id) + session = session or get_session() + query = session.query(models.ImageTag).filter_by( + image_id=image_id).filter_by( + value=value).filter_by(deleted=False) + try: + tag_ref = query.one() + except sa_orm.exc.NoResultFound: + raise exception.NotFound() + + tag_ref.delete(session=session) + + +def _image_tag_delete_all(context, image_id, delete_time=None, session=None): + """Delete all image tags for given image""" + tags_updated_count = _image_child_entry_delete_all(models.ImageTag, + image_id, + delete_time, + session) + return tags_updated_count + + +def image_tag_get_all(context, image_id, session=None): + """Get a list of tags for a specific image.""" + _check_image_id(image_id) + session = session or get_session() + tags = session.query(models.ImageTag.value).filter_by( + image_id=image_id).filter_by(deleted=False).all() + return [tag[0] for tag in tags] + + +def user_get_storage_usage(context, owner_id, image_id=None, session=None): + _check_image_id(image_id) + session = session or get_session() + total_size = _image_get_disk_usage_by_owner( + owner_id, session, image_id=image_id) + return total_size + + +def _task_info_format(task_info_ref): + """Format a task info ref for consumption outside of this module""" + if task_info_ref is None: + return {} + return { + 'task_id': task_info_ref['task_id'], + 'input': task_info_ref['input'], + 'result': task_info_ref['result'], + 'message': task_info_ref['message'], + } + + +def _task_info_create(context, task_id, values, session=None): + """Create an TaskInfo object""" + session = session or get_session() + task_info_ref = models.TaskInfo() + task_info_ref.task_id = task_id + task_info_ref.update(values) + task_info_ref.save(session=session) + return _task_info_format(task_info_ref) + + +def _task_info_update(context, task_id, values, session=None): + """Update an TaskInfo object""" + session = session or get_session() + task_info_ref = _task_info_get(context, task_id, session=session) + if task_info_ref: + task_info_ref.update(values) + task_info_ref.save(session=session) + return _task_info_format(task_info_ref) + + +def _task_info_get(context, task_id, session=None): + """Fetch an TaskInfo entity by task_id""" + session = session or get_session() + query = session.query(models.TaskInfo) + query = query.filter_by(task_id=task_id) + try: + task_info_ref = query.one() + except sa_orm.exc.NoResultFound: + msg = ("TaskInfo was not found for task with id %(task_id)s" % + {'task_id': task_id}) + LOG.debug(msg) + task_info_ref = None + + return task_info_ref + + +def task_create(context, values, session=None): + """Create a task object""" + + values = values.copy() + session = session or get_session() + with session.begin(): + task_info_values = _pop_task_info_values(values) + + task_ref = models.Task() + _task_update(context, task_ref, values, session=session) + + _task_info_create(context, + task_ref.id, + task_info_values, + session=session) + + return task_get(context, task_ref.id, session) + + +def _pop_task_info_values(values): + task_info_values = {} + for k, v in values.items(): + if k in ['input', 'result', 'message']: + values.pop(k) + task_info_values[k] = v + + return task_info_values + + +def task_update(context, task_id, values, session=None): + """Update a task object""" + + session = session or get_session() + + with session.begin(): + task_info_values = _pop_task_info_values(values) + + task_ref = _task_get(context, task_id, session) + _drop_protected_attrs(models.Task, values) + + values['updated_at'] = timeutils.utcnow() + + _task_update(context, task_ref, values, session) + + if task_info_values: + _task_info_update(context, + task_id, + task_info_values, + session) + + return task_get(context, task_id, session) + + +def task_get(context, task_id, session=None, force_show_deleted=False): + """Fetch a task entity by id""" + task_ref = _task_get(context, task_id, session=session, + force_show_deleted=force_show_deleted) + return _task_format(task_ref, task_ref.info) + + +def task_delete(context, task_id, session=None): + """Delete a task""" + session = session or get_session() + task_ref = _task_get(context, task_id, session=session) + task_ref.delete(session=session) + return _task_format(task_ref, task_ref.info) + + +def task_get_all(context, filters=None, marker=None, limit=None, + sort_key='created_at', sort_dir='desc', admin_as_user=False): + """ + Get all tasks that match zero or more filters. + + :param filters: dict of filter keys and values. + :param marker: task id after which to start page + :param limit: maximum number of tasks to return + :param sort_key: task attribute by which results should be sorted + :param sort_dir: direction in which results should be sorted (asc, desc) + :param admin_as_user: For backwards compatibility. If true, then return to + an admin the equivalent set of tasks which it would see + if it were a regular user + :return: tasks set + """ + filters = filters or {} + + session = get_session() + query = session.query(models.Task) + + if not (context.is_admin or admin_as_user) and context.owner is not None: + query = query.filter(models.Task.owner == context.owner) + + showing_deleted = False + + if 'deleted' in filters: + deleted_filter = filters.pop('deleted') + query = query.filter_by(deleted=deleted_filter) + showing_deleted = deleted_filter + + for (k, v) in filters.items(): + if v is not None: + key = k + if hasattr(models.Task, key): + query = query.filter(getattr(models.Task, key) == v) + + marker_task = None + if marker is not None: + marker_task = _task_get(context, marker, + force_show_deleted=showing_deleted) + + sort_keys = ['created_at', 'id'] + if sort_key not in sort_keys: + sort_keys.insert(0, sort_key) + + query = _paginate_query(query, models.Task, limit, + sort_keys, + marker=marker_task, + sort_dir=sort_dir) + + task_refs = query.all() + + tasks = [] + for task_ref in task_refs: + tasks.append(_task_format(task_ref, task_info_ref=None)) + + return tasks + + +def _is_task_visible(context, task): + """Return True if the task is visible in this context.""" + # Is admin == task visible + if context.is_admin: + return True + + # No owner == task visible + if task['owner'] is None: + return True + + # Perform tests based on whether we have an owner + if context.owner is not None: + if context.owner == task['owner']: + return True + + return False + + +def _task_get(context, task_id, session=None, force_show_deleted=False): + """Fetch a task entity by id""" + session = session or get_session() + query = session.query(models.Task).options( + sa_orm.joinedload(models.Task.info) + ).filter_by(id=task_id) + + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + try: + task_ref = query.one() + except sa_orm.exc.NoResultFound: + msg = "No task found with ID %s" % task_id + LOG.debug(msg) + raise exception.TaskNotFound(task_id=task_id) + + # Make sure the task is visible + if not _is_task_visible(context, task_ref): + msg = "Forbidding request, task %s is not visible" % task_id + LOG.debug(msg) + raise exception.Forbidden(msg) + + return task_ref + + +def _task_update(context, task_ref, values, session=None): + """Apply supplied dictionary of values to a task object.""" + values["deleted"] = False + task_ref.update(values) + task_ref.save(session=session) + return task_ref + + +def _task_format(task_ref, task_info_ref=None): + """Format a task ref for consumption outside of this module""" + task_dict = { + 'id': task_ref['id'], + 'type': task_ref['type'], + 'status': task_ref['status'], + 'owner': task_ref['owner'], + 'expires_at': task_ref['expires_at'], + 'created_at': task_ref['created_at'], + 'updated_at': task_ref['updated_at'], + 'deleted_at': task_ref['deleted_at'], + 'deleted': task_ref['deleted'] + } + + if task_info_ref: + task_info_dict = { + 'input': task_info_ref['input'], + 'result': task_info_ref['result'], + 'message': task_info_ref['message'], + } + task_dict.update(task_info_dict) + + return task_dict + + +def metadef_namespace_get_all(context, marker=None, limit=None, sort_key=None, + sort_dir=None, filters=None, session=None): + """List all available namespaces.""" + session = session or get_session() + namespaces = metadef_namespace_api.get_all( + context, session, marker, limit, sort_key, sort_dir, filters) + return namespaces + + +def metadef_namespace_get(context, namespace_name, session=None): + """Get a namespace or raise if it does not exist or is not visible.""" + session = session or get_session() + return metadef_namespace_api.get( + context, namespace_name, session) + + +def metadef_namespace_create(context, values, session=None): + """Create a namespace or raise if it already exists.""" + session = session or get_session() + return metadef_namespace_api.create(context, values, session) + + +def metadef_namespace_update(context, namespace_id, namespace_dict, + session=None): + """Update a namespace or raise if it does not exist or not visible""" + session = session or get_session() + return metadef_namespace_api.\ + update(context, namespace_id, namespace_dict, session) + + +def metadef_namespace_delete(context, namespace_name, session=None): + """Delete the namespace and all foreign references""" + session = session or get_session() + return metadef_namespace_api.delete_cascade( + context, namespace_name, session) + + +def metadef_object_get_all(context, namespace_name, session=None): + """Get a metadata-schema object or raise if it does not exist.""" + session = session or get_session() + return metadef_object_api.get_all( + context, namespace_name, session) + + +def metadef_object_get(context, namespace_name, object_name, session=None): + """Get a metadata-schema object or raise if it does not exist.""" + session = session or get_session() + return metadef_object_api.get( + context, namespace_name, object_name, session) + + +def metadef_object_create(context, namespace_name, object_dict, + session=None): + """Create a metadata-schema object or raise if it already exists.""" + session = session or get_session() + return metadef_object_api.create( + context, namespace_name, object_dict, session) + + +def metadef_object_update(context, namespace_name, object_id, object_dict, + session=None): + """Update an object or raise if it does not exist or not visible.""" + session = session or get_session() + return metadef_object_api.update( + context, namespace_name, object_id, object_dict, session) + + +def metadef_object_delete(context, namespace_name, object_name, + session=None): + """Delete an object or raise if namespace or object doesn't exist.""" + session = session or get_session() + return metadef_object_api.delete( + context, namespace_name, object_name, session) + + +def metadef_object_delete_namespace_content( + context, namespace_name, session=None): + """Delete an object or raise if namespace or object doesn't exist.""" + session = session or get_session() + return metadef_object_api.delete_by_namespace_name( + context, namespace_name, session) + + +def metadef_object_count(context, namespace_name, session=None): + """Get count of properties for a namespace, raise if ns doesn't exist.""" + session = session or get_session() + return metadef_object_api.count(context, namespace_name, session) + + +def metadef_property_get_all(context, namespace_name, session=None): + """Get a metadef property or raise if it does not exist.""" + session = session or get_session() + return metadef_property_api.get_all(context, namespace_name, session) + + +def metadef_property_get(context, namespace_name, + property_name, session=None): + """Get a metadef property or raise if it does not exist.""" + session = session or get_session() + return metadef_property_api.get( + context, namespace_name, property_name, session) + + +def metadef_property_create(context, namespace_name, property_dict, + session=None): + """Create a metadef property or raise if it already exists.""" + session = session or get_session() + return metadef_property_api.create( + context, namespace_name, property_dict, session) + + +def metadef_property_update(context, namespace_name, property_id, + property_dict, session=None): + """Update an object or raise if it does not exist or not visible.""" + session = session or get_session() + return metadef_property_api.update( + context, namespace_name, property_id, property_dict, session) + + +def metadef_property_delete(context, namespace_name, property_name, + session=None): + """Delete a property or raise if it or namespace doesn't exist.""" + session = session or get_session() + return metadef_property_api.delete( + context, namespace_name, property_name, session) + + +def metadef_property_delete_namespace_content( + context, namespace_name, session=None): + """Delete a property or raise if it or namespace doesn't exist.""" + session = session or get_session() + return metadef_property_api.delete_by_namespace_name( + context, namespace_name, session) + + +def metadef_property_count(context, namespace_name, session=None): + """Get count of properties for a namespace, raise if ns doesn't exist.""" + session = session or get_session() + return metadef_property_api.count(context, namespace_name, session) + + +def metadef_resource_type_create(context, values, session=None): + """Create a resource_type""" + session = session or get_session() + return metadef_resource_type_api.create( + context, values, session) + + +def metadef_resource_type_get(context, resource_type_name, session=None): + """Get a resource_type""" + session = session or get_session() + return metadef_resource_type_api.get( + context, resource_type_name, session) + + +def metadef_resource_type_get_all(context, session=None): + """list all resource_types""" + session = session or get_session() + return metadef_resource_type_api.get_all(context, session) + + +def metadef_resource_type_delete(context, resource_type_name, session=None): + """Get a resource_type""" + session = session or get_session() + return metadef_resource_type_api.delete( + context, resource_type_name, session) + + +def metadef_resource_type_association_get( + context, namespace_name, resource_type_name, session=None): + session = session or get_session() + return metadef_association_api.get( + context, namespace_name, resource_type_name, session) + + +def metadef_resource_type_association_create( + context, namespace_name, values, session=None): + session = session or get_session() + return metadef_association_api.create( + context, namespace_name, values, session) + + +def metadef_resource_type_association_delete( + context, namespace_name, resource_type_name, session=None): + session = session or get_session() + return metadef_association_api.delete( + context, namespace_name, resource_type_name, session) + + +def metadef_resource_type_association_get_all_by_namespace( + context, namespace_name, session=None): + session = session or get_session() + return metadef_association_api.\ + get_all_by_namespace(context, namespace_name, session) + + +def metadef_tag_get_all( + context, namespace_name, filters=None, marker=None, limit=None, + sort_key=None, sort_dir=None, session=None): + """Get metadata-schema tags or raise if none exist.""" + session = session or get_session() + return metadef_tag_api.get_all( + context, namespace_name, session, + filters, marker, limit, sort_key, sort_dir) + + +def metadef_tag_get(context, namespace_name, name, session=None): + """Get a metadata-schema tag or raise if it does not exist.""" + session = session or get_session() + return metadef_tag_api.get( + context, namespace_name, name, session) + + +def metadef_tag_create(context, namespace_name, tag_dict, + session=None): + """Create a metadata-schema tag or raise if it already exists.""" + session = session or get_session() + return metadef_tag_api.create( + context, namespace_name, tag_dict, session) + + +def metadef_tag_create_tags(context, namespace_name, tag_list, + session=None): + """Create a metadata-schema tag or raise if it already exists.""" + session = get_session() + return metadef_tag_api.create_tags( + context, namespace_name, tag_list, session) + + +def metadef_tag_update(context, namespace_name, id, tag_dict, + session=None): + """Update an tag or raise if it does not exist or not visible.""" + session = session or get_session() + return metadef_tag_api.update( + context, namespace_name, id, tag_dict, session) + + +def metadef_tag_delete(context, namespace_name, name, + session=None): + """Delete an tag or raise if namespace or tag doesn't exist.""" + session = session or get_session() + return metadef_tag_api.delete( + context, namespace_name, name, session) + + +def metadef_tag_delete_namespace_content( + context, namespace_name, session=None): + """Delete an tag or raise if namespace or tag doesn't exist.""" + session = session or get_session() + return metadef_tag_api.delete_by_namespace_name( + context, namespace_name, session) + + +def metadef_tag_count(context, namespace_name, session=None): + """Get count of tags for a namespace, raise if ns doesn't exist.""" + session = session or get_session() + return metadef_tag_api.count(context, namespace_name, session) + + +def artifact_create(context, values, type_name, + type_version=None, session=None): + session = session or get_session() + artifact = artifacts.create(context, values, session, type_name, + type_version) + return artifact + + +def artifact_delete(context, artifact_id, type_name, + type_version=None, session=None): + session = session or get_session() + artifact = artifacts.delete(context, artifact_id, session, type_name, + type_version) + return artifact + + +def artifact_update(context, values, artifact_id, type_name, + type_version=None, session=None): + session = session or get_session() + artifact = artifacts.update(context, values, artifact_id, session, + type_name, type_version) + return artifact + + +def artifact_get(context, artifact_id, + type_name=None, + type_version=None, + show_level=ga.Showlevel.BASIC, + session=None): + session = session or get_session() + return artifacts.get(context, artifact_id, session, type_name, + type_version, show_level) + + +def artifact_publish(context, + artifact_id, + type_name, + type_version=None, + session=None): + session = session or get_session() + return artifacts.publish(context, + artifact_id, + session, + type_name, + type_version) + + +def artifact_get_all(context, marker=None, limit=None, sort_keys=None, + sort_dirs=None, filters=None, + show_level=ga.Showlevel.NONE, session=None): + session = session or get_session() + return artifacts.get_all(context, session, marker, limit, sort_keys, + sort_dirs, filters, show_level) + + +def _project_host_member_format(member_ref): + """Format a member ref for consumption outside of this module.""" + return { + 'id': member_ref['id'], + 'cluster_id': member_ref['cluster_id'], + 'host_id': member_ref['host_id'], + 'created_at': member_ref['created_at'], + 'updated_at': member_ref['updated_at'] + } + +def _cluster_host_member_get(context, session, member_id): + """Fetch an ClusterHost entity by id.""" + query = session.query(models.ClusterHost) + query = query.filter(models.ClusterHost.id == member_id).filter_by(deleted=False) + return query.one() + +def _cluster_host_member_update(context, memb_ref, values, session=None): + """Apply supplied dictionary of values to a Member object.""" + _drop_protected_attrs(models.ClusterHost, values) + if values.has_key('node'): + host = host_get(context, values['node'], session=None, force_show_deleted=False) + host.status = "in-cluster" + host.save(session=session) + values["deleted"] = False + memb_ref.update(values) + memb_ref.save(session=session) + return memb_ref + +def cluster_host_member_update(context, values, member_id): + """Update an ClusterHost object.""" + session = get_session() + memb_ref = _cluster_host_member_get(context, session, member_id) + _cluster_host_member_update(context, memb_ref, values, session) + return _project_host_member_format(memb_ref) + +def cluster_host_member_create(context, values, session=None): + """Create an ClusterHost object.""" + memb_ref = models.ClusterHost() + _cluster_host_member_update(context, memb_ref, values, session=session) + return _project_host_member_format(memb_ref) + +def _cluster_host_member_find(context, session, cluster_id=None, host_id=None): + query = session.query(models.ClusterHost) + query = query.filter_by(deleted=False) + + if cluster_id is not None: + query = query.filter(models.ClusterHost.cluster_id == cluster_id and models.ClusterHost.deleted == "0") + if host_id is not None: + query = query.filter(models.ClusterHost.host_id == host_id and models.ClusterHost.deleted == "0") + + return query.all() + + +def _cluster_host_member_delete(context, memb_ref, session): + memb_ref.delete(session=session) + +def cluster_host_member_delete(context, member_id): + """Delete an ClusterHost object.""" + session = get_session() + member_ref = _cluster_host_member_get(context, session, member_id) + host_info=host_get(context, member_ref['host_id'], session) + host_info['status']="init" + host_update(context,member_ref['host_id'],dict(host_info)) + _cluster_host_member_delete(context, member_ref, session) + delete_host_role(context, member_ref['host_id'], session) + host_interfaces = get_host_interface(context, member_ref['host_id'], None, session) + for host_interface_info in host_interfaces: + delete_assigned_networks(context, host_interface_info.id, session) + + +def cluster_host_member_find(context, cluster_id=None, host_id=None): + """Find all members that meet the given criteria + + :param cluster_id: identifier of project entity + :param host_id: host identifier + """ + session = get_session() + members = _cluster_host_member_find(context, session, cluster_id, host_id) + return [_project_host_member_format(m) for m in members] + +def cluster_get_all(context, filters=None, marker=None, limit=None, + sort_key=None, sort_dir=None): + """ + Get all hosts that match zero or more filters. + + :param filters: dict of filter keys and values. If a 'properties' + key is present, it is treated as a dict of key/value + filters on the host properties attribute + :param marker: host id after which to start page + :param limit: maximum number of hosts to return + :param sort_key: list of host attributes by which results should be sorted + :param sort_dir: directions in which results should be sorted (asc, desc) + """ + sort_key = ['created_at'] if not sort_key else sort_key + + default_sort_dir = 'desc' + + if not sort_dir: + sort_dir = [default_sort_dir] * len(sort_key) + elif len(sort_dir) == 1: + default_sort_dir = sort_dir[0] + sort_dir *= len(sort_key) + + filters = filters or {} + + showing_deleted = 'changes-since' in filters or filters.get('deleted', + False) + marker_cluster = None + if marker is not None: + marker_cluster = _cluster_get(context, + marker, + force_show_deleted=showing_deleted) + + for key in ['created_at', 'id']: + if key not in sort_key: + sort_key.append(key) + sort_dir.append(default_sort_dir) + + session = get_session() + if 'name' in filters: + name = filters.pop('name') + query = session.query(models.Cluster).filter_by(deleted=False).filter_by(name=name) + elif 'auto_scale' in filters: + auto_scale = filters.pop('auto_scale') + query = session.query(models.Cluster).filter_by(deleted=False).filter_by(auto_scale=auto_scale) + else: + query = session.query(models.Cluster).filter_by(deleted=False) + + query = _paginate_query(query, models.Cluster, limit, + sort_key, + marker=marker_cluster, + sort_dir=None, + sort_dirs=sort_dir) + + clusters = [] + for cluster in query.all(): + cluster_dict = cluster.to_dict() + clusters.append(cluster_dict) + return clusters + +def component_get_all(context, filters=None, marker=None, limit=None, + sort_key=None, sort_dir=None): + """ + Get all hosts that match zero or more filters. + + :param filters: dict of filter keys and values. If a 'properties' + key is present, it is treated as a dict of key/value + filters on the host properties attribute + :param marker: host id after which to start page + :param limit: maximum number of hosts to return + :param sort_key: list of host attributes by which results should be sorted + :param sort_dir: directions in which results should be sorted (asc, desc) + """ + sort_key = ['created_at'] if not sort_key else sort_key + + default_sort_dir = 'desc' + + if not sort_dir: + sort_dir = [default_sort_dir] * len(sort_key) + elif len(sort_dir) == 1: + default_sort_dir = sort_dir[0] + sort_dir *= len(sort_key) + + filters = filters or {} + + showing_deleted = 'changes-since' in filters or filters.get('deleted', + False) + marker_component = None + if marker is not None: + marker_component = _component_get(context, + marker, + force_show_deleted=showing_deleted) + + for key in ['created_at', 'id']: + if key not in sort_key: + sort_key.append(key) + sort_dir.append(default_sort_dir) + + session = get_session() + + query = session.query(models.Component).filter_by(deleted=showing_deleted) + + query = _paginate_query(query, models.Component, limit, + sort_key, + marker=marker_component, + sort_dir=None, + sort_dirs=sort_dir) + + components = [] + for component in query.all(): + component_dict = component.to_dict() + components.append(component_dict) + return components + +def _check_config_file_id(config_file_id): + """ + check if the given config_file id is valid before executing operations. For + now, we only check its length. The original purpose of this method is + wrapping the different behaviors between MySql and DB2 when the config_file id + length is longer than the defined length in database model. + :param image_id: The id of the config_file we want to check + :return: Raise NoFound exception if given config_file id is invalid + """ + if (config_file_id and + len(config_file_id) > models.ConfigFile.id.property.columns[0].type.length): + raise exception.NotFound() + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +@utils.no_4byte_params +def _config_file_update(context, values, config_file_id): + """ + Used internally by config_file_add and config_file_update + + :param context: Request context + :param values: A dict of attributes to set + :param config_file_id: If None, create the config_file, otherwise, find and update it + """ + # NOTE(jbresnah) values is altered in this so a copy is needed + values = values.copy() + + session = get_session() + with session.begin(): + if config_file_id: + config_file_ref = _config_file_get(context, config_file_id, session=session) + else: + config_file_ref = models.ConfigFile() + #if config_file_ref.id is None: + # config_file_ref.id = str(uuid.uuid4()) + if config_file_id: + # Don't drop created_at if we're passing it in... + _drop_protected_attrs(models.ConfigFile, values) + # NOTE(iccha-sethi): updated_at must be explicitly set in case + # only ImageProperty table was modifited + values['updated_at'] = timeutils.utcnow() + + if config_file_id: + query = session.query(models.ConfigFile).filter_by(id=config_file_id).filter_by(deleted=False) + + # Validate fields for Config_files table. This is similar to what is done + # for the query result update except that we need to do it prior + # in this case. + # TODO(dosaboy): replace this with a dict comprehension once py26 + # support is deprecated. + keys = values.keys() + for k in keys: + if k not in config_file_ref.to_dict(): + del values[k] + updated = query.update(values, synchronize_session='fetch') + + if not updated: + msg = (_('cannot transition from %(current)s to ' + '%(next)s in update (wanted ' + 'from_state=%(from)s)') % + {'current': current, 'next': new_status, + 'from': from_state}) + raise exception.Conflict(msg) + + config_file_ref = _config_file_get(context, config_file_id, session=session) + else: + config_file_ref.update(values) + _update_values(config_file_ref, values) + try: + config_file_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("Config_file ID %s already exists!" + % values['id']) + + return config_file_get(context, config_file_ref.id) + +def _config_file_get(context, config_file_id, session=None, force_show_deleted=False): + """Get an config_file or raise if it does not exist.""" + _check_config_file_id(config_file_id) + session = session or get_session() + + try: + query = session.query(models.ConfigFile).filter_by(id=config_file_id).filter_by(deleted=False) + + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + config_file = query.one() + + except sa_orm.exc.NoResultFound: + msg = "No config_file found with ID %s" % config_file_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return config_file + +def config_file_get(context, config_file_id, session=None, force_show_deleted=False): + config_file = _config_file_get(context, config_file_id, session=session, + force_show_deleted=force_show_deleted) + return config_file + +def config_file_add(context, values): + """Add an config_file from the values dictionary.""" + return _config_file_update(context, values, None) + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def config_file_destroy(context, config_file_id): + """Destroy the config_file or raise if it does not exist.""" + session = get_session() + configs = _config_get_by_config_file_id(context, config_file_id, session=session) + for config in configs: + config_items = _config_item_set_get_by_config_id(context, config.id, session=session) + if config_items: + msg = "config file '%s' is using" % (config_file_id) + raise exception.DeleteConstrainted(msg) + for config in configs: + config_destroy(context, config.id) + + with session.begin(): + config_file_ref = _config_file_get(context, config_file_id, session=session) + config_file_ref.delete(session=session) + + return config_file_ref + +def config_file_update(context, config_file_id, values): + """ + Set the given properties on an image and update it. + + :raises NotFound if config_file does not exist. + """ + return _config_file_update(context, values, config_file_id) + +def config_file_get_all(context, filters=None, marker=None, limit=None, + sort_key=None, sort_dir=None): + """ + Get all config_files that match zero or more filters. + + :param filters: dict of filter keys and values. If a 'properties' + key is present, it is treated as a dict of key/value + filters on the config_file properties attribute + :param marker: config_file id after which to start page + :param limit: maximum number of config_files to return + :param sort_key: list of config_file attributes by which results should be sorted + :param sort_dir: directions in which results should be sorted (asc, desc) + """ + sort_key = ['created_at'] if not sort_key else sort_key + + default_sort_dir = 'desc' + + if not sort_dir: + sort_dir = [default_sort_dir] * len(sort_key) + elif len(sort_dir) == 1: + default_sort_dir = sort_dir[0] + sort_dir *= len(sort_key) + + filters = filters or {} + + showing_deleted = 'changes-since' in filters or filters.get('deleted', + False) + + marker_config_file = None + if marker is not None: + marker_config_file = _config_file_get(context, + marker, + force_show_deleted=showing_deleted) + + for key in ['created_at', 'id']: + if key not in sort_key: + sort_key.append(key) + sort_dir.append(default_sort_dir) + + session = get_session() + + query = session.query(models.ConfigFile).filter_by(deleted=showing_deleted) + + query = _paginate_query(query, models.ConfigFile, limit, + sort_key, + marker=marker_config_file, + sort_dir=None, + sort_dirs=sort_dir) + + config_files = [] + for config_file in query.all(): + config_file_dict = config_file.to_dict() + config_files.append(config_file_dict) + return config_files + +def _check_config_set_id(config_set_id): + """ + check if the given config_set id is valid before executing operations. For + now, we only check its length. The original purpose of this method is + wrapping the different behaviors between MySql and DB2 when the config_set id + length is longer than the defined length in database model. + :param image_id: The id of the config_set we want to check + :return: Raise NoFound exception if given config_set id is invalid + """ + if (config_set_id and + len(config_set_id) > models.ConfigSet.id.property.columns[0].type.length): + raise exception.NotFound() + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +@utils.no_4byte_params +def _config_set_update(context, values, config_set_id): + """ + Used internally by config_set_add and config_set_update + + :param context: Request context + :param values: A dict of attributes to set + :param config_set_id: If None, create the config_set, otherwise, find and update it + """ + # NOTE(jbresnah) values is altered in this so a copy is needed + values = values.copy() + + session = get_session() + with session.begin(): + if config_set_id: + config_set_ref = _config_set_get(context, config_set_id, session=session) + else: + config_set_ref = models.ConfigSet() + #if config_set_ref.id is None: + # config_set_ref.id = str(uuid.uuid4()) + if config_set_id: + # Don't drop created_at if we're passing it in... + _drop_protected_attrs(models.ConfigSet, values) + # NOTE(iccha-sethi): updated_at must be explicitly set in case + # only ImageProperty table was modifited + values['updated_at'] = timeutils.utcnow() + + if config_set_id: + query = session.query(models.ConfigSet).filter_by(id=config_set_id).filter_by(deleted=False) + + # Validate fields for config_sets table. This is similar to what is done + # for the query result update except that we need to do it prior + # in this case. + # TODO(dosaboy): replace this with a dict comprehension once py26 + # support is deprecated. + keys = values.keys() + for k in keys: + if k not in config_set_ref.to_dict(): + del values[k] + updated = query.update(values, synchronize_session='fetch') + + if not updated: + msg = (_('cannot transition from %(current)s to ' + '%(next)s in update (wanted ' + 'from_state=%(from)s)') % + {'current': current, 'next': new_status, + 'from': from_state}) + raise exception.Conflict(msg) + + config_set_ref = _config_set_get(context, config_set_id, session=session) + else: + config_set_ref.update(values) + _update_values(config_set_ref, values) + try: + config_set_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("Config_set ID %s already exists!" + % values['id']) + + return config_set_get(context, config_set_ref.id) + + +def _config_set_get(context, config_set_id, session=None, force_show_deleted=False): + """Get an config_set or raise if it does not exist.""" + _check_config_set_id(config_set_id) + session = session or get_session() + + try: + query = session.query(models.ConfigSet).filter_by(id=config_set_id).filter_by(deleted=False) + + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + config_set = query.one() + + except sa_orm.exc.NoResultFound: + msg = "No config_set found with ID %s" % config_set_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return config_set + +def config_set_get(context, config_set_id, session=None, force_show_deleted=False): + config_set = _config_set_get(context, config_set_id, session=session, + force_show_deleted=force_show_deleted) + return config_set + +def config_set_add(context, values): + """Add an config_set from the values dictionary.""" + return _config_set_update(context, values, None) + +def _config_item_set_get_by_config_id(context, config_id, session=None, force_show_deleted=False): + """Get an config_set or raise if it does not exist.""" + _check_config_id(config_id) + session = session or get_session() + + try: + query = session.query(models.ConfigSetItem).filter_by(config_id=config_id).filter_by(deleted=False) + + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + config_items = query.all() + + except sa_orm.exc.NoResultFound: + msg = "No config_set found with ID %s" % config_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return config_items + +def _config_item_get_by_config_set_id(context, config_set_id, session=None, force_show_deleted=False): + """Get an config or raise if it does not exist.""" + _check_config_set_id(config_set_id) + session = session or get_session() + + try: + query = session.query(models.ConfigSetItem).filter_by(config_set_id=config_set_id).filter_by(deleted=False) + + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + config_items = query.all() + + except sa_orm.exc.NoResultFound: + msg = "No config_item found with ID %s" % config_set_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return config_items + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def config_set_destroy(context, config_set_id): + """Destroy the config_set or raise if it does not exist.""" + session = get_session() + with session.begin(): + config_set_ref = _config_set_get(context, config_set_id, session=session) + + config_set_ref.delete(session=session) + + config_item_refs = _config_item_get_by_config_set_id(context, config_set_id, session=session) + + for config_item_ref in config_item_refs: + config_item_ref.delete(session=session) + + return config_set_ref + +def config_set_update(context, config_set_id, values): + """ + Set the given properties on an image and update it. + + :raises NotFound if config_set does not exist. + """ + return _config_set_update(context, values, config_set_id) + +def config_set_get_all(context, filters=None, marker=None, limit=None, + sort_key=None, sort_dir=None): + """ + Get all config_sets that match zero or more filters. + + :param filters: dict of filter keys and values. If a 'properties' + key is present, it is treated as a dict of key/value + filters on the config_set properties attribute + :param marker: config_set id after which to start page + :param limit: maximum number of config_sets to return + :param sort_key: list of config_set attributes by which results should be sorted + :param sort_dir: directions in which results should be sorted (asc, desc) + """ + sort_key = ['created_at'] if not sort_key else sort_key + + default_sort_dir = 'desc' + + if not sort_dir: + sort_dir = [default_sort_dir] * len(sort_key) + elif len(sort_dir) == 1: + default_sort_dir = sort_dir[0] + sort_dir *= len(sort_key) + + filters = filters or {} + + showing_deleted = 'changes-since' in filters or filters.get('deleted', + False) + + marker_config_set = None + if marker is not None: + marker_config_set = _config_set_get(context, + marker, + force_show_deleted=showing_deleted) + + for key in ['created_at', 'id']: + if key not in sort_key: + sort_key.append(key) + sort_dir.append(default_sort_dir) + + session = get_session() + + query = session.query(models.ConfigSet).filter_by(deleted=showing_deleted) + + query = _paginate_query(query, models.ConfigSet, limit, + sort_key, + marker=marker_config_set, + sort_dir=None, + sort_dirs=sort_dir) + + config_sets = [] + for config_set in query.all(): + config_set_dict = config_set.to_dict() + config_sets.append(config_set_dict) + return config_sets + +def _check_config_id(config_id): + """ + check if the given config id is valid before executing operations. For + now, we only check its length. The original purpose of this method is + wrapping the different behaviors between MySql and DB2 when the config id + length is longer than the defined length in database model. + :param image_id: The id of the config we want to check + :return: Raise NoFound exception if given config id is invalid + """ + if (config_id and + len(config_id) > models.Config.id.property.columns[0].type.length): + raise exception.NotFound() + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +@utils.no_4byte_params +def _config_update(context, values, config_id): + """ + Used internally by config_add and config_update + + :param context: Request context + :param values: A dict of attributes to set + :param config_id: If None, create the config, otherwise, find and update it + """ + # NOTE(jbresnah) values is altered in this so a copy is needed + values = values.copy() + + config_item_values=dict() + + session = get_session() + + with session.begin(): + if config_id: + config_ref = _config_get(context, config_id, session=session) + else: + config_ref = models.Config() + config_item_ref = models.ConfigSetItem() + #if config_ref.id is None: + # config_ref.id = str(uuid.uuid4()) + if config_id: + if values.has_key('config_set_id'): + config_item_values['config_set_id'] = str(values['config_set_id']) + + # Don't drop created_at if we're passing it in... + _drop_protected_attrs(models.Config, values) + # NOTE(iccha-sethi): updated_at must be explicitly set in case + # only ImageProperty table was modifited + values['updated_at'] = timeutils.utcnow() + + if config_id: + query = session.query(models.Config).filter_by(id=config_id).filter_by(deleted=False) + + # Validate fields for configs table. This is similar to what is done + # for the query result update except that we need to do it prior + # in this case. + # TODO(dosaboy): replace this with a dict comprehension once py26 + # support is deprecated. + keys = values.keys() + for k in keys: + if k not in config_ref.to_dict(): + del values[k] + updated = query.update(values, synchronize_session='fetch') + + + + if config_item_values.has_key('config_set_id'): + session = get_session() + _drop_protected_attrs(models.ConfigSetItem, config_item_values) + query = session.query(models.ConfigSetItem).filter_by(config_id=config_id).filter_by(deleted=False) + query.update(config_item_values, synchronize_session='fetch') + + if not updated: + msg = (_('cannot transition from %(current)s to ' + '%(next)s in update (wanted ' + 'from_state=%(from)s)') % + {'current': current, 'next': new_status, + 'from': from_state}) + raise exception.Conflict(msg) + + config_ref = _config_get(context, config_id, session=session) + + else: + config_ref.update(values) + + _update_values(config_ref, values) + try: + + config_ref.save(session=session) + + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("Config ID %s already exists!" + % values['id']) + + if values.has_key('config_set_id'): + config_item_values['config_id'] = config_ref.id + config_item_values['config_set_id'] = str(values['config_set_id']) + _update_values(config_item_ref, config_item_values) + config_item_ref.save(session=session) + + return config_get(context, config_ref.id) + +def _config_get(context, config_id, session=None, force_show_deleted=False): + """Get an config or raise if it does not exist.""" + _check_config_id(config_id) + session = session or get_session() + + try: + query = session.query(models.Config).filter_by(id=config_id).filter_by(deleted=False) + + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + config = query.one() + + except sa_orm.exc.NoResultFound: + msg = "No config found with ID %s" % config_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return config + +def _config_get_by_config_file_id(context, config_file_id, session=None, force_show_deleted=False): + """Get an config or raise if it does not exist.""" + _check_config_file_id(config_file_id) + session = session or get_session() + + try: + query = session.query(models.Config).filter_by(config_file_id=config_file_id).filter_by(deleted=False) + + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + configs = query.all() + + except sa_orm.exc.NoResultFound: + msg = "No config found with ID %s" % config_file_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return configs + +def _config_item_get_by_config_id(config_id, session=None): + """Get an config or raise if it does not exist.""" + _check_config_id(config_id) + try: + query = session.query(models.ConfigSetItem).filter_by(config_id=config_id).filter_by(deleted=False) + config_items = query.all() + + except sa_orm.exc.NoResultFound: + msg = "No config found with ID %s" % config_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return config_items + +def config_get(context, config_id, session=None, force_show_deleted=False): + config = _config_get(context, config_id, session=session, + force_show_deleted=force_show_deleted) + return config + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +@utils.no_4byte_params +def update_config_by_role_hosts(context, values): + if not values: + LOG.error("<<>>") + + session = get_session() + with session.begin(): + for value in values: + if not value.get('config', None): + continue + configs = value['config'] + + for config in configs: + if not config.get('id', None): + continue + + id = config['id'] + config['updated_at'] = timeutils.utcnow() + config_ref =_config_get(context, id, session) + if not config_ref: + continue + + config_ref.update(config) + _update_values(config_ref, config) + + return {'configs':values} + +def config_add(context, values): + """Add an config from the values dictionary.""" + return _config_update(context, values, None) + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def config_destroy(context, config_id): + """Destroy the config or raise if it does not exist.""" + session = get_session() + with session.begin(): + config_ref = _config_get(context, config_id, session=session) + config_file_id=config_ref.config_file_id + config_item_refs = _config_item_get_by_config_id(config_id, session=session) + for config_item_ref in config_item_refs: + config_item_ref.delete(session=session) + config_ref.delete(session=session) + + return config_ref + +def config_update(context, config_id, values): + """ + Set the given properties on an image and update it. + + :raises NotFound if config does not exist. + """ + return _config_update(context, values, config_id) + +def config_get_all(context, filters=None, marker=None, limit=None, + sort_key=None, sort_dir=None): + """ + Get all configs that match zero or more filters. + + :param filters: dict of filter keys and values. If a 'properties' + key is present, it is treated as a dict of key/value + filters on the config properties attribute + :param marker: config id after which to start page + :param limit: maximum number of configs to return + :param sort_key: list of config attributes by which results should be sorted + :param sort_dir: directions in which results should be sorted (asc, desc) + """ + sort_key = ['created_at'] if not sort_key else sort_key + + default_sort_dir = 'desc' + + if not sort_dir: + sort_dir = [default_sort_dir] * len(sort_key) + elif len(sort_dir) == 1: + default_sort_dir = sort_dir[0] + sort_dir *= len(sort_key) + + filters = filters or {} + + showing_deleted = 'changes-since' in filters or filters.get('deleted', + False) + + marker_config = None + if marker is not None: + marker_config = _config_get(context, + marker, + force_show_deleted=showing_deleted) + + for key in ['created_at', 'id']: + if key not in sort_key: + sort_key.append(key) + sort_dir.append(default_sort_dir) + + session = get_session() + + query = session.query(models.Config).filter_by(deleted=showing_deleted) + + query = _paginate_query(query, models.Config, limit, + sort_key, + marker=marker_config, + sort_dir=None, + sort_dirs=sort_dir) + + configs = [] + for config in query.all(): + config_dict = config.to_dict() + configs.append(config_dict) + return configs + +def network_get(context, network_id, session=None, force_show_deleted=False): + Network = _network_get(context, network_id, session=session, + force_show_deleted=force_show_deleted) + return Network + +def network_add(context, values): + """Add an cluster from the values dictionary.""" + return _network_update(context, values, None) + +def network_update(context, network_id, values): + """ + Set the given properties on an cluster and update it. + + :raises NotFound if cluster does not exist. + """ + return _network_update(context, values, network_id) + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def network_destroy(context, network_id): + """Destroy the project or raise if it does not exist.""" + session = get_session() + with session.begin(): + network_ref = _network_get(context, network_id, session=session) + assign_networks = _get_assigned_networks_by_network_id(context, network_id, session=session) + if assign_networks: + msg = "network %s is in used, it couldn't be deleted" % network_id + raise exception.DeleteConstrainted(msg) + else: + network_ref.delete(session=session) + + return network_ref + +def delete_network_ip_range(context, network_id): + session = get_session() + with session.begin(): + querry= session.query(models.IpRange).filter_by(network_id=network_id).filter_by(deleted=0) + ip_ranges=querry.all() + for ip_range in ip_ranges: + ip_range.delete(session=session) + +def get_network_ip_range(context, network_id): + session = get_session() + with session.begin(): + querry= session.query(models.IpRange).filter_by(network_id=network_id).filter_by(deleted=0) + ip_ranges=querry.all() + + return ip_ranges +def network_get_all(context, cluster_id=None, filters=None, marker=None, limit=None, + sort_key=None, sort_dir=None): + """ + Get all hosts that match zero or more filters. + + :param filters: dict of filter keys and values. If a 'properties' + key is present, it is treated as a dict of key/value + filters on the host properties attribute + :param marker: host id after which to start page + :param limit: maximum number of hosts to return + :param sort_key: list of host attributes by which results should be sorted + :param sort_dir: directions in which results should be sorted (asc, desc) + """ + sort_key = ['created_at'] if not sort_key else sort_key + + default_sort_dir = 'desc' + + if not sort_dir: + sort_dir = [default_sort_dir] * len(sort_key) + elif len(sort_dir) == 1: + default_sort_dir = sort_dir[0] + sort_dir *= len(sort_key) + + filters = filters or {} + + showing_deleted = 'changes-since' in filters or filters.get('deleted', + False) + + marker_network = None + if marker is not None: + marker_network = _network_get(context, marker, cluster_id, + force_show_deleted=showing_deleted) + + for key in ['created_at', 'id']: + if key not in sort_key: + sort_key.append(key) + sort_dir.append(default_sort_dir) + + session = get_session() + + if 0 == cmp(cluster_id, "template"): + query = session.query(models.Network).filter_by(type="template").filter_by(deleted=False).all() + return [phynet_name.name for phynet_name in query] + elif cluster_id is not None: + query = session.query(models.Network).\ + filter_by(cluster_id=cluster_id).\ + filter_by(deleted=showing_deleted) + else: + query = session.query(models.Network). \ + filter_by(deleted=showing_deleted) + + query = _paginate_query(query, models.Network, limit, + sort_key, + marker=marker_network, + sort_dir=None, + sort_dirs=sort_dir) + query = query.all() + networks = [] + for network in query: + if network.type == 'template': + continue + ip_range_list=[] + ip_ranges=get_network_ip_range(context, network['id']) + if ip_ranges: + for ip_range in ip_ranges: + ip_range_dict={} + ip_range_dict['start']=str(ip_range['start']) + ip_range_dict['end']=str(ip_range['end']) + ip_range_list.append(ip_range_dict) + network['ip_ranges']=ip_range_list + network_dict = network.to_dict() + networks.append(network_dict) + return networks + +def _check_network_id(network_id): + """ + check if the given project id is valid before executing operations. For + now, we only check its length. The original purpose of this method is + wrapping the different behaviors between MySql and DB2 when the project id + length is longer than the defined length in database model. + :param image_id: The id of the project we want to check + :return: Raise NoFound exception if given project id is invalid + """ + if (network_id and + len(network_id) > models.Network.id.property.columns[0].type.length): + raise exception.NotFound() + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +@utils.no_4byte_params +def update_phyname_of_network(context, network_phyname_set): + """ + Update phynet_name segment in network table. + :param context: data for context + :param network_phyname_set: Like {'network_id':pyhnet_name} + :return: + """ + if not context or not network_phyname_set: + raise exception.Invalid("Fun:update_phyname_of_network, input params invalid.") + + session = get_session() + with session.begin(): + for k,v in network_phyname_set.items(): + query = session.query(models.Network). \ + filter_by(id = k). \ + filter_by(name = v[0]).filter_by(deleted=False) + + if query and query.first(): + query.update( + {'updated_at' : timeutils.utcnow(), 'physnet_name' :"physnet_"+v[1]} + ) + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +@utils.no_4byte_params +def _network_update(context, values, network_id): + """ + Used internally by network_add and project_update + + :param context: Request context + :param values: A dict of attributes to set + :param network_id: If None, create the network, otherwise, find and update it + """ + + # NOTE(jbresnah) values is altered in this so a copy is needed + values = values.copy() + ip_ranges_values = dict() + + session = get_session() + with session.begin(): + if network_id: + network_ref = _network_get(context, network_id, session=session) + else: + network_ref = models.Network() + + if network_id: + # Don't drop created_at if we're passing it in... + _drop_protected_attrs(models.Network, values) + # NOTE(iccha-sethi): updated_at must be explicitly set in case + # only ImageProperty table was modifited + values['updated_at'] = timeutils.utcnow() + + if network_id: + query = session.query(models.Network).filter_by(id=network_id).filter_by(deleted=False) + + # Validate fields for projects table. This is similar to what is done + # for the query result update except that we need to do it prior + # in this case. + # TODO(dosaboy): replace this with a dict comprehension once py26 + # support is deprecated. + if values.has_key("cidr") and values['cidr'] != [u''] and values['cidr'] !='': + sql_cidr="select networks.cidr from networks where networks.id='"+network_id +"'" + query_cidr = session.execute(sql_cidr).fetchone() + network_tmp=query_cidr.values() + network_cidr=network_tmp.pop() + if network_cidr and network_cidr != values['cidr']: + if values['cidr'] != network_cidr: + #sql_ip="select host_interfaces.ip from host_interfaces, assigned_networks where host_interfaces.deleted=0 and host_interfaces.id=assigned_networks.interface_id and assigned_networks.deleted=0 and assigned_networks.network_id='"+network_id+"'" + sql_ip="select assigned_networks.ip from assigned_networks where assigned_networks.deleted=0 and assigned_networks.network_id='"+network_id+"' order by assigned_networks.ip" + query_ip_list = session.execute(sql_ip).fetchall() + for tmp_ip in query_ip_list: + ip_pop=tmp_ip.values().pop() + if ip_pop: + if is_in_cidr_range(ip_pop, network_cidr): + msg = "Error:Distribution ip by CIDR is being used, and the CIDR is not allowed to change." + LOG.error(msg) + raise exception.Forbidden(msg) + + network_ref = _network_get(context, network_id, session=session) + if values.has_key("ip_ranges"): + delete_network_ip_range(context, network_id) + for ip_range in list(eval(values['ip_ranges'])): + ip_range_ref = models.IpRange() + ip_range_ref['start'] = ip_range["start"] + ip_range_ref['end'] = ip_range["end"] + ip_range_ref.network_id = network_ref.id + ip_range_ref.save(session=session) + del values['ip_ranges'] + keys = values.keys() + for k in keys: + if k not in network_ref.to_dict(): + del values[k] + updated = query.update(values, synchronize_session='fetch') + + if not updated: + msg = (_('cannot transition from %(current)s to ' + '%(next)s in update (wanted ' + 'from_state=%(from)s)') % + {'current': current, 'next': new_status, + 'from': from_state}) + raise exception.Conflict(msg) + else: + + network_ref.update(values) + _update_values(network_ref, values) + try: + network_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("network ID %s already exists!" + % values['id']) + if values.has_key("ip_ranges"): + for ip_range in list(eval(values['ip_ranges'])): + try: + ip_ranges_values['start'] = ip_range["start"] + ip_ranges_values['end'] = ip_range["end"] + ip_ranges_values['network_id'] = network_ref.id + ip_range_ref = models.IpRange() + ip_range_ref.update(ip_ranges_values) + _update_values(ip_range_ref, ip_ranges_values) + ip_range_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("ip rangge %s already exists!" + % value['ip_ranges']) + + return _network_get(context, network_ref.id) + +def _network_get(context, network_id=None, cluster_id=None, session=None, force_show_deleted=False): + """Get an network or raise if it does not exist.""" + if network_id is not None: + _check_network_id(network_id) + session = session or get_session() + + try: + if network_id is not None: + query = session.query(models.Network).filter_by(id=network_id).filter_by(deleted=False) + #if cluster_id is not None: + #query = session.query(models.Network).filter_by(cluster_id=cluster_id).filter_by(deleted=False) + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + networks = query.one() + + ip_range_list=[] + ip_ranges=get_network_ip_range(context, networks['id']) + if ip_ranges: + for ip_range in ip_ranges: + ip_range_dict={} + ip_range_dict['start']=str(ip_range['start']) + ip_range_dict['end']=str(ip_range['end']) + ip_range_list.append(ip_range_dict) + networks['ip_ranges']=ip_range_list + + + except sa_orm.exc.NoResultFound: + msg = "No network found with ID %s" % network_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return networks + +def update_config(session,config_flag,config_set_id,query_set_item_list,config_interface_info): + for config_set_item in query_set_item_list.all(): + query_config_info= session.query(models.Config).filter_by(id=config_set_item.config_id).filter_by(deleted=False) + query_config_file_info= session.query(models.ConfigFile).filter_by(id=query_config_info.one().config_file_id).filter_by(deleted=False) + if query_config_file_info.one().name == config_interface_info['file-name']\ + and config_interface_info['section'] == query_config_info.one().section and config_interface_info['key'] == query_config_info.one().key: + del config_interface_info['file-name'] + config_interface_info['config_version']=query_config_info.one().config_version+1 + config_updated = query_config_info.one().update(config_interface_info) + config_flag=1 + return config_flag + else: + continue + return config_flag + +def add_config(session,config_interface_info,config_set_id,config_file_id): + config_set_value=dict() + add_config = models.Config() + del config_interface_info['file-name'] + config_interface_info['config_file_id']=config_file_id + config_interface_info['config_version']=1 + config_interface_info['running_version']=0 + add_config.update(config_interface_info) + _update_values(add_config,config_interface_info) + add_config.save(session=session) + + add_config_setitem=models.ConfigSetItem() + config_set_value['config_set_id']=config_set_id + config_set_value['config_id']=add_config.id + config_set_value.update(config_set_value) + _update_values(add_config_setitem,config_set_value) + add_config_setitem.save(session=session) + +def add_config_and_file(session,config_interface_info,config_set_id): + query_config_file=session.query(models.ConfigFile).filter_by(name=config_interface_info['file-name']).filter_by(deleted=False) + if query_config_file.all(): + config_file_id=query_config_file.one().id + else: + config_file_value = dict() + add_config_file = models.ConfigFile() + config_file_value['name']=config_interface_info['file-name'] + config_file_value.update(config_file_value) + _update_values(add_config_file,config_file_value) + add_config_file.save(session=session) + config_file_id=add_config_file.id + + add_config(session,config_interface_info,config_set_id,config_file_id) + +def config_interface(context, config_interface): + config_flag=0 + config_info_list=[] + config_interface = config_interface.copy() + session = get_session() + with session.begin(): + if config_interface.get('role',None) and config_interface.get('cluster',None): + query_role_info=session.query(models.Role).filter_by(name=config_interface['role']).filter_by(cluster_id=config_interface['cluster']).filter_by(deleted=False) + if query_role_info.one().config_set_id: + config_set_id=query_role_info.one().config_set_id + else: + msg = "No config_set_id found with Role Name %s" % config_interface.role-name + LOG.error(msg) + raise exception.NotFound(msg) + else: + if config_interface.get('config_set',None): + config_set_id=config_interface.get('config_set',None) + else: + msg = "no role name and cluster id or config_set_id" + LOG.error(msg) + raise exception.NotFound(msg) + + try: + for config_interface_info in eval(config_interface['config']): + query_set_item_list=session.query(models.ConfigSetItem).filter_by(config_set_id=config_set_id).filter_by(deleted=False) + if query_set_item_list.all(): + config_exist=update_config(session,config_flag,config_set_id,query_set_item_list,config_interface_info) + if not config_exist: + query_config_file=session.query(models.ConfigFile).filter_by(name=config_interface_info['file-name']).filter_by(deleted=False) + if query_config_file.all(): + add_config(session,config_interface_info,config_set_id,query_config_file.one().id) + else: + add_config_and_file(session,config_interface_info,config_set_id) + else: + add_config_and_file(session,config_interface_info,config_set_id) + + except sa_orm.exc.NoResultFound: + msg = "No config_set found with ID %s" % config_set_id + LOG.error(msg) + raise exception.NotFound(msg) + + for config_interface_info in eval(config_interface['config']): + query_config_set_item_list=session.query(models.ConfigSetItem).filter_by(config_set_id=config_set_id).filter_by(deleted=False) + if query_config_set_item_list.all(): + for config_set_item in query_config_set_item_list.all(): + query_config_info= session.query(models.Config).filter_by(id=config_set_item.config_id).filter_by(deleted=False) + query_config_file= session.query(models.ConfigFile).filter_by(id=query_config_info.one().config_file_id).filter_by(deleted=False) + if query_config_file.one().name == config_interface_info['file-name'] and config_interface_info['section'] == query_config_info.one().section \ + and config_interface_info['key'] == query_config_info.one().key: + config_info={} + config_info['id']=query_config_info.one().id + config_info['file-name']=config_interface_info['file-name'] + config_info['section']=query_config_info.one().section + config_info['key']=query_config_info.one().key + config_info['value']=query_config_info.one().value + config_info['description']=query_config_info.one().description + config_info['config_version']=query_config_info.one().config_version + config_info['running_version']=query_config_info.one().running_version + config_info_list.append(config_info) + + return_config_info={'cluster':config_interface.get('cluster',None), + 'role':config_interface.get('role',None), + 'config':config_info_list} + return return_config_info + +def _check_service_disk_id(service_disk_id): + """ + check if the given project id is valid before executing operations. For + now, we only check its length. The original purpose of this method is + wrapping the different behaviors between MySql and DB2 when the project id + length is longer than the defined length in database model. + :param image_id: The id of the project we want to check + :return: Raise NoFound exception if given project id is invalid + """ + if (service_disk_id and + len(service_disk_id) > models.ServiceDisk.id.property.columns[0].type.length): + raise exception.NotFound() + +def _service_disk_get(context, service_disk_id=None, role_id=None, marker=None, session=None, force_show_deleted=False): + """Get an service_disk or raise if it does not exist.""" + if service_disk_id is not None: + _check_service_disk_id(service_disk_id) + session = session or get_session() + + try: + if service_disk_id is not None: + query = session.query(models.ServiceDisk).filter_by(id=service_disk_id).filter_by(deleted=False) + elif role_id is not None: + query = session.query(models.ServiceDisk).filter_by(role_id=role_id).filter_by(deleted=False) + else: + query = session.query(models.ServiceDisk).filter_by(deleted=False) + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + if service_disk_id is not None: + service_disk = query.one() + else: + service_disk = query.all() + except sa_orm.exc.NoResultFound: + msg = "No service_disk found with ID %s" % service_disk_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return service_disk + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +@utils.no_4byte_params +def _service_disk_update(context, values, service_disk_id): + """ + Used internally by service_disk_add and project_update + + :param context: Request context + :param values: A dict of attributes to set + :param service_disk_id: If None, create the service_disk, otherwise, find and update it + """ + + # NOTE(jbresnah) values is altered in this so a copy is needed + values = values.copy() + session = get_session() + with session.begin(): + if service_disk_id: + # Don't drop created_at if we're passing it in... + _drop_protected_attrs(models.ServiceDisk, values) + # NOTE(iccha-sethi): updated_at must be explicitly set in case + # only ImageProperty table was modifited + values['updated_at'] = timeutils.utcnow() + # Validate fields for projects table. This is similar to what is done + # for the query result update except that we need to do it prior + # in this case. + # TODO(dosaboy): replace this with a dict comprehension once py26 + # support is deprecated. + query = session.query(models.ServiceDisk).filter_by(id=service_disk_id).filter_by(deleted=False) + updated = query.update(values, synchronize_session='fetch') + + if not updated: + msg = (_('cannot transition from %(current)s to ' + '%(next)s in update (wanted ' + 'from_state=%(from)s)') % + {'current': current, 'next': new_status, + 'from': from_state}) + raise exception.Conflict(msg) + else: + service_disk_ref = models.ServiceDisk() + service_disk_ref.update(values) + _update_values(service_disk_ref, values) + try: + service_disk_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("service_disk ID %s already exists!" + % values['id']) + + service_disk_id = service_disk_ref.id + return _service_disk_get(context, service_disk_id) + +def service_disk_add(context, values): + """Add an cluster from the values dictionary.""" + return _service_disk_update(context, values, None) + + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def service_disk_destroy(context, service_disk_id): + """Destroy the service_disk or raise if it does not exist.""" + session = get_session() + with session.begin(): + service_disk_ref = _service_disk_get(context, service_disk_id, session=session) + service_disk_ref.delete(session=session) + return service_disk_ref + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def service_disk_update(context, service_disk_id, values): + """ + Set the given properties on an cluster and update it. + + :raises NotFound if cluster does not exist. + """ + return _service_disk_update(context, values, service_disk_id) + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) + +def service_disk_detail (context, service_disk_id): + service_disk_ref = _service_disk_get(context, service_disk_id) + return service_disk_ref + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def service_disk_list(context, filters=None, **param): + """ + Get all hosts that match zero or more filters. + + :param filters: dict of filter keys and values. If a 'properties' + key is present, it is treated as a dict of key/value + filters on the host properties attribute + :param marker: host id after which to start page + :param limit: maximum number of hosts to return + :param sort_key: list of host attributes by which results should be sorted + :param sort_dir: directions in which results should be sorted (asc, desc) + """ + # sort_key = ['created_at'] if not sort_key else sort_key + + # default_sort_dir = 'desc' + + # if not sort_dir: + # sort_dir = [default_sort_dir] * len(sort_key) + # elif len(sort_dir) == 1: + # default_sort_dir = sort_dir[0] + # sort_dir *= len(sort_key) + + filters = filters or {} + # showing_deleted = 'changes-since' in filters or filters.get('deleted', + # False) + + role_id = None + if 'role_id' in filters: + role_id=filters.pop('role_id') + + service_disk_ref = _service_disk_get(context, role_id=role_id) + return service_disk_ref + +def _check_cinder_volume_id(cinder_volume_id): + """ + check if the given project id is valid before executing operations. For + now, we only check its length. The original purpose of this method is + wrapping the different behaviors between MySql and DB2 when the project id + length is longer than the defined length in database model. + :param image_id: The id of the project we want to check + :return: Raise NoFound exception if given project id is invalid + """ + if (cinder_volume_id and + len(cinder_volume_id) > models.CinderVolume.id.property.columns[0].type.length): + raise exception.NotFound() + +def _cinder_volume_get(context, cinder_volume_id=None, role_id=None, marker=None, session=None, force_show_deleted=False): + """Get an cinder_volume or raise if it does not exist.""" + if cinder_volume_id is not None: + _check_cinder_volume_id(cinder_volume_id) + session = session or get_session() + + try: + if cinder_volume_id is not None: + query = session.query(models.CinderVolume).filter_by(id=cinder_volume_id).filter_by(deleted=False) + elif role_id is not None: + query = session.query(models.CinderVolume).filter_by(role_id=role_id).filter_by(deleted=False) + else: + query = session.query(models.CinderVolume).filter_by(deleted=False) + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + + if cinder_volume_id is not None: + cinder_volume = query.one() + else: + cinder_volume = query.all() + except sa_orm.exc.NoResultFound: + msg = "No cinder_volume found with ID %s" % cinder_volume_id + LOG.debug(msg) + raise exception.NotFound(msg) + + return cinder_volume + + +def _cinder_volume_update(context, values, cinder_volume_id): + """ + Used internally by cinder_volume_add and project_update + + :param context: Request context + :param values: A dict of attributes to set + :param cinder_volume_id: If None, create the cinder_volume, otherwise, find and update it + """ + + # NOTE(jbresnah) values is altered in this so a copy is needed + values = values.copy() + session = get_session() + with session.begin(): + if cinder_volume_id: + # Don't drop created_at if we're passing it in... + _drop_protected_attrs(models.CinderVolume, values) + # NOTE(iccha-sethi): updated_at must be explicitly set in case + # only ImageProperty table was modifited + values['updated_at'] = timeutils.utcnow() + # Validate fields for projects table. This is similar to what is done + # for the query result update except that we need to do it prior + # in this case. + # TODO(dosaboy): replace this with a dict comprehension once py26 + # support is deprecated. + query = session.query(models.CinderVolume).filter_by(id=cinder_volume_id).filter_by(deleted=False) + updated = query.update(values, synchronize_session='fetch') + + if not updated: + msg = (_('cannot transition from %(current)s to ' + '%(next)s in update (wanted ' + 'from_state=%(from)s)') % + {'current': current, 'next': new_status, + 'from': from_state}) + raise exception.Conflict(msg) + else: + cinder_volume_ref = models.CinderVolume() + cinder_volume_ref.update(values) + _update_values(cinder_volume_ref, values) + try: + cinder_volume_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("cinder_volume ID %s already exists!" + % values['id']) + + cinder_volume_id = cinder_volume_ref.id + return _cinder_volume_get(context, cinder_volume_id) + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def cinder_volume_add(context, values): + """Add an cluster from the values dictionary.""" + return _cinder_volume_update(context, values, None) + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def cinder_volume_destroy(context, cinder_volume_id): + """Destroy the service_disk or raise if it does not exist.""" + session = get_session() + with session.begin(): + cinder_volume_ref = _cinder_volume_get(context, cinder_volume_id, session=session) + cinder_volume_ref.delete(session=session) + return cinder_volume_ref + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def cinder_volume_update(context, cinder_volume_id, values): + """ + Set the given properties on an cluster and update it. + + :raises NotFound if cluster does not exist. + """ + return _cinder_volume_update(context, values, cinder_volume_id) + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def cinder_volume_detail(context, cinder_volume_id): + cinder_volume_ref = _cinder_volume_get(context, cinder_volume_id) + return cinder_volume_ref + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def cinder_volume_list(context, filters=None, **param): + """ + Get all hosts that match zero or more filters. + + :param filters: dict of filter keys and values. If a 'properties' + key is present, it is treated as a dict of key/value + filters on the host properties attribute + :param marker: host id after which to start page + :param limit: maximum number of hosts to return + :param sort_key: list of host attributes by which results should be sorted + :param sort_dir: directions in which results should be sorted (asc, desc) + """ + filters = filters or {} + # showing_deleted = 'changes-since' in filters or filters.get('deleted', + # False) + role_id = None + if 'role_id' in filters: + role_id=filters.pop('role_id') + + cinder_volume_ref = _cinder_volume_get(context, role_id=role_id) + return cinder_volume_ref + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def template_add(context, values): + """add cluster template to daisy.""" + return _template_update(context, values, None) + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def template_update(context, template_id, values): + """update cluster template to daisy.""" + return _template_update(context, values, template_id) + +def _template_update(context, values, template_id): + """update or add cluster template to daisy.""" + values = values.copy() + session = get_session() + with session.begin(): + if template_id: + template_ref = _template_get(context, template_id, session=session) + else: + template_ref = models.Template() + + if template_id: + # Don't drop created_at if we're passing it in... + _drop_protected_attrs(models.Template, values) + # NOTE(iccha-sethi): updated_at must be explicitly set in case + # only ImageProperty table was modifited + values['updated_at'] = timeutils.utcnow() + + if template_id: + if values.get('id', None): del values['id'] + template_ref.update(values) + _update_values(template_ref, values) + try: + template_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("Node ID %s already exists!" + % values['id']) + else: + template_ref.update(values) + _update_values(template_ref, values) + try: + template_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("Node ID %s already exists!" + % values['id']) + + return template_get(context, template_ref.id) + +def _template_get(context, template_id, session=None, force_show_deleted=False): + """Get an host or raise if it does not exist.""" + + session = session or get_session() + try: + query = session.query(models.Template).filter_by(id=template_id) + + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + template = query.one() + return template + except sa_orm.exc.NoResultFound: + msg = "No template found with ID %s" % template_id + LOG.debug(msg) + raise exception.NotFound(msg) + + +def template_get(context, template_id, session=None, force_show_deleted=False): + template = _template_get(context, template_id, session=session, + force_show_deleted=force_show_deleted) + return template + +def template_destroy(context, template_id, session=None, force_show_deleted=False): + session = session or get_session() + with session.begin(): + template_ref = _template_get(context, template_id, session=session) + template_ref.delete(session=session) + return template_ref + +def template_get_all(context, filters=None, marker=None, limit=None, + sort_key=None, sort_dir=None): + sort_key = ['created_at'] if not sort_key else sort_key + + default_sort_dir = 'desc' + + if not sort_dir: + sort_dir = [default_sort_dir] * len(sort_key) + elif len(sort_dir) == 1: + default_sort_dir = sort_dir[0] + sort_dir *= len(sort_key) + + filters = filters or {} + + showing_deleted = 'changes-since' in filters or filters.get('deleted', + False) + marker_template = None + if marker is not None: + marker_template = _template_get(context, + marker, + force_show_deleted=showing_deleted) + + for key in ['created_at', 'id']: + if key not in sort_key: + sort_key.append(key) + sort_dir.append(default_sort_dir) + + session = get_session() + + query = session.query(models.Template).filter_by(deleted=showing_deleted) + + query = _paginate_query(query, models.Template, limit, + sort_key, + marker=marker_template, + sort_dir=None, + sort_dirs=sort_dir) + if 'name' in filters: + name = filters.pop('name') + query = session.query(models.Template).filter_by(deleted=False).filter_by(name=name) + if 'type' in filters: + type = filters.pop('type') + query = session.query(models.Template).filter_by(deleted=False).filter_by(type=type) + templates = [] + for template in query.all(): + template = template.to_dict() + templates.append(template) + return templates + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def host_template_add(context, values): + """add host template to daisy.""" + return _host_template_update(context, values, None) + +@retry(retry_on_exception=_retry_on_deadlock, wait_fixed=500, + stop_max_attempt_number=50) +def host_template_update(context, template_id, values): + """update host template to daisy.""" + return _host_template_update(context, values, template_id) + +def _host_template_update(context, values, template_id): + """update or add cluster template to daisy.""" + values = values.copy() + session = get_session() + with session.begin(): + if template_id: + template_ref = _host_template_get(context, template_id, session=session) + else: + template_ref = models.HostTemplate() + + if template_id: + # Don't drop created_at if we're passing it in... + _drop_protected_attrs(models.HostTemplate, values) + # NOTE(iccha-sethi): updated_at must be explicitly set in case + # only ImageProperty table was modifited + values['updated_at'] = timeutils.utcnow() + + if template_id: + if values.get('id', None): del values['id'] + template_ref.update(values) + _update_values(template_ref, values) + try: + template_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("Node ID %s already exists!" + % values['id']) + else: + template_ref.update(values) + _update_values(template_ref, values) + try: + template_ref.save(session=session) + except db_exception.DBDuplicateEntry: + raise exception.Duplicate("Node ID %s already exists!" + % values['id']) + + return host_template_get(context, template_ref.id) + +def _host_template_get(context, template_id, session=None, force_show_deleted=False): + """Get an host or raise if it does not exist.""" + + session = session or get_session() + try: + query = session.query(models.HostTemplate).filter_by(id=template_id) + + # filter out deleted images if context disallows it + if not force_show_deleted and not context.can_see_deleted: + query = query.filter_by(deleted=False) + template = query.one() + return template + except sa_orm.exc.NoResultFound: + msg = "No host_template found with ID %s" % template_id + LOG.debug(msg) + raise exception.NotFound(msg) + + +def host_template_get(context, template_id, session=None, force_show_deleted=False): + template = _host_template_get(context, template_id, session=session, + force_show_deleted=force_show_deleted) + return template + +def host_template_destroy(context, template_id, session=None, force_show_deleted=False): + session = session or get_session() + with session.begin(): + template_ref = _host_template_get(context, template_id, session=session) + template_ref.delete(session=session) + return template_ref + +def host_template_get_all(context, filters=None, marker=None, limit=None, + sort_key=None, sort_dir=None): + sort_key = ['created_at'] if not sort_key else sort_key + + default_sort_dir = 'desc' + + if not sort_dir: + sort_dir = [default_sort_dir] * len(sort_key) + elif len(sort_dir) == 1: + default_sort_dir = sort_dir[0] + sort_dir *= len(sort_key) + + filters = filters or {} + + showing_deleted = 'changes-since' in filters or filters.get('deleted', + False) + marker_template = None + if marker is not None: + marker_template = _host_template_get(context, + marker, + force_show_deleted=showing_deleted) + + for key in ['created_at', 'id']: + if key not in sort_key: + sort_key.append(key) + sort_dir.append(default_sort_dir) + + session = get_session() + + query = session.query(models.HostTemplate).filter_by(deleted=showing_deleted) + + query = _paginate_query(query, models.HostTemplate, limit, + sort_key, + marker=marker_template, + sort_dir=None, + sort_dirs=sort_dir) + if 'cluster_name' in filters: + cluster_name = filters.pop('cluster_name') + query = session.query(models.HostTemplate).filter_by(deleted=False).filter_by(cluster_name=cluster_name) + if 'name' in filters: + name = filters.pop('name') + query = session.query(models.HostTemplate).filter_by(deleted=False).filter_by(name=name) + templates = [] + for template in query.all(): + template = template.to_dict() + templates.append(template) + return templates + +def host_interfaces_get_all(context, filters=None): + filters = filters or {} + session = get_session() + query = session.query(models.HostInterface).filter_by(deleted=0) + + if 'host_id' in filters: + host_id = filters.pop('host_id') + query = query.filter_by(id=host_id) + if 'ip' in filters: + ip = filters.pop('ip') + query = query.filter_by(ip=ip) + if 'mac' in filters: + mac = filters.pop('mac') + query = query.filter_by(mac=mac) + if 'pci' in filters: + pci = filters.pop('pci') + query = query.filter_by(pci=pci) + host_interfaces = [] + for host_interface in query.all(): + host_interface = host_interface.to_dict() + host_interfaces.append(host_interface) + return host_interfaces diff --git a/code/daisy/daisy/db/sqlalchemy/artifacts.py b/code/daisy/daisy/db/sqlalchemy/artifacts.py new file mode 100755 index 00000000..d8fef0e0 --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/artifacts.py @@ -0,0 +1,756 @@ +# Copyright (c) 2015 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import operator +import uuid + +from enum import Enum +from oslo.config import cfg +from oslo.db import exception as db_exc +from oslo_utils import timeutils +import sqlalchemy +from sqlalchemy import and_ +from sqlalchemy import or_ +import sqlalchemy.orm as orm +from sqlalchemy.orm import joinedload + +import daisy.artifacts as ga +from daisy.common import exception +from daisy.common import semver_db +from daisy.db.sqlalchemy import models_artifacts as models +from daisy import i18n +from oslo_log import log as os_logging + +LOG = os_logging.getLogger(__name__) +_LW = i18n._LW +_LE = i18n._LE + +CONF = cfg.CONF + + +class Visibility(Enum): + PRIVATE = 'private' + PUBLIC = 'public' + SHARED = 'shared' + + +class State(Enum): + CREATING = 'creating' + ACTIVE = 'active' + DEACTIVATED = 'deactivated' + DELETED = 'deleted' + + +TRANSITIONS = { + State.CREATING: [State.ACTIVE, State.DELETED], + State.ACTIVE: [State.DEACTIVATED, State.DELETED], + State.DEACTIVATED: [State.ACTIVE, State.DELETED], + State.DELETED: [] +} + + +def create(context, values, session, type_name, type_version=None): + return _out(_create_or_update(context, values, None, session, + type_name, type_version)) + + +def update(context, values, artifact_id, session, + type_name, type_version=None): + return _out(_create_or_update(context, values, artifact_id, session, + type_name, type_version)) + + +def delete(context, artifact_id, session, type_name, type_version=None): + values = {'state': 'deleted'} + return _out(_create_or_update(context, values, artifact_id, session, + type_name, type_version)) + + +def _create_or_update(context, values, artifact_id, session, type_name, + type_version=None): + values = copy.deepcopy(values) + with session.begin(): + _set_version_fields(values) + _validate_values(values) + _drop_protected_attrs(models.Artifact, values) + if artifact_id: + # update existing artifact + state = values.get('state') + show_level = ga.Showlevel.BASIC + if state is not None: + if state == 'active': + show_level = ga.Showlevel.DIRECT + values['published_at'] = timeutils.utcnow() + if state == 'deleted': + values['deleted_at'] = timeutils.utcnow() + + artifact = _get(context, artifact_id, session, type_name, + type_version, show_level=show_level) + _validate_transition(artifact.state, + values.get('state') or artifact.state) + else: + # create new artifact + artifact = models.Artifact() + if 'id' not in values: + artifact.id = str(uuid.uuid4()) + else: + artifact.id = values['id'] + + if 'tags' in values: + tags = values.pop('tags') + artifact.tags = _do_tags(artifact, tags) + + if 'properties' in values: + properties = values.pop('properties', {}) + artifact.properties = _do_properties(artifact, properties) + + if 'blobs' in values: + blobs = values.pop('blobs') + artifact.blobs = _do_blobs(artifact, blobs) + + if 'dependencies' in values: + dependencies = values.pop('dependencies') + _do_dependencies(artifact, dependencies, session) + + if values.get('state', None) == 'publish': + artifact.dependencies.extend( + _do_transitive_dependencies(artifact, session)) + + artifact.update(values) + try: + artifact.save(session=session) + except db_exc.DBDuplicateEntry: + LOG.warn(_LW("Artifact with the specified type, name and version " + "already exists")) + raise exception.ArtifactDuplicateNameTypeVersion() + + return artifact + + +def get(context, artifact_id, session, type_name=None, type_version=None, + show_level=ga.Showlevel.BASIC): + artifact = _get(context, artifact_id, session, type_name, type_version, + show_level) + return _out(artifact, show_level) + + +def publish(context, artifact_id, session, type_name, + type_version=None): + """ + Because transitive dependencies are not initially created it has to be done + manually by calling this function. + It creates transitive dependencies for the given artifact_id and saves + them in DB. + :returns artifact dict with Transitive show level + """ + values = {'state': 'active'} + return _out(_create_or_update(context, values, artifact_id, session, + type_name, type_version)) + + +def _validate_transition(source_state, target_state): + if target_state == source_state: + return + try: + source_state = State(source_state) + target_state = State(target_state) + except ValueError: + raise exception.InvalidArtifactStateTransition(source=source_state, + target=target_state) + if (source_state not in TRANSITIONS or + target_state not in TRANSITIONS[source_state]): + raise exception.InvalidArtifactStateTransition(source=source_state, + target=target_state) + + +def _out(artifact, show_level=ga.Showlevel.BASIC, show_text_properties=True): + """ + Transforms sqlalchemy object into dict depending on the show level. + + :param artifact: sql + :param show_level: constant from Showlevel class + :param show_text_properties: for performance optimization it's possible + to disable loading of massive text properties + :return: generated dict + """ + res = artifact.to_dict(show_level=show_level, + show_text_properties=show_text_properties) + + if show_level >= ga.Showlevel.DIRECT: + dependencies = artifact.dependencies + dependencies.sort(key=lambda elem: (elem.artifact_origin, + elem.name, elem.position)) + res['dependencies'] = {} + if show_level == ga.Showlevel.DIRECT: + new_show_level = ga.Showlevel.BASIC + else: + new_show_level = ga.Showlevel.TRANSITIVE + for dep in dependencies: + if dep.artifact_origin == artifact.id: + # make array + for p in res['dependencies'].keys(): + if p == dep.name: + # add value to array + res['dependencies'][p].append( + _out(dep.dest, new_show_level)) + break + else: + # create new array + deparr = [] + deparr.append(_out(dep.dest, new_show_level)) + res['dependencies'][dep.name] = deparr + return res + + +def _get(context, artifact_id, session, type_name=None, type_version=None, + show_level=ga.Showlevel.BASIC): + values = dict(id=artifact_id) + if type_name is not None: + values['type_name'] = type_name + if type_version is not None: + values['type_version'] = type_version + _set_version_fields(values) + try: + if show_level == ga.Showlevel.NONE: + query = session.query(models.Artifact) \ + .options(joinedload(models.Artifact.tags)) \ + .filter_by(**values) + else: + query = session.query(models.Artifact) \ + .options(joinedload(models.Artifact.properties)) \ + .options(joinedload(models.Artifact.tags)) \ + .options(joinedload(models.Artifact.blobs). + joinedload(models.ArtifactBlob.locations)) \ + .filter_by(**values) + + artifact = query.one() + except orm.exc.NoResultFound: + LOG.warn(_LW("Artifact with id=%s not found") % artifact_id) + raise exception.ArtifactNotFound(id=artifact_id) + if not _check_visibility(context, artifact): + LOG.warn(_LW("Artifact with id=%s is not accessible") % artifact_id) + raise exception.ArtifactForbidden(id=artifact_id) + return artifact + + +def get_all(context, session, marker=None, limit=None, + sort_keys=None, sort_dirs=None, filters=None, + show_level=ga.Showlevel.NONE): + """List all visible artifacts""" + + filters = filters or {} + + artifacts = _get_all( + context, session, filters, marker, + limit, sort_keys, sort_dirs, show_level) + + return map(lambda ns: _out(ns, show_level, show_text_properties=False), + artifacts) + + +def _get_all(context, session, filters=None, marker=None, + limit=None, sort_keys=None, sort_dirs=None, + show_level=ga.Showlevel.NONE): + """Get all namespaces that match zero or more filters. + + :param filters: dict of filter keys and values. + :param marker: namespace id after which to start page + :param limit: maximum number of namespaces to return + :param sort_keys: namespace attributes by which results should be sorted + :param sort_dirs: directions in which results should be sorted (asc, desc) + """ + + filters = filters or {} + + query = _do_artifacts_query(context, session, show_level) + basic_conds, tag_conds, prop_conds = _do_query_filters(filters) + + if basic_conds: + for basic_condition in basic_conds: + query = query.filter(and_(*basic_condition)) + + if tag_conds: + for tag_condition in tag_conds: + query = query.join(models.ArtifactTag, aliased=True).filter( + and_(*tag_condition)) + + if prop_conds: + for prop_condition in prop_conds: + query = query.join(models.ArtifactProperty, aliased=True).filter( + and_(*prop_condition)) + + marker_artifact = None + if marker is not None: + marker_artifact = _get(context, marker, session, None, None) + + if sort_keys is None: + sort_keys = [('created_at', None), ('id', None)] + sort_dirs = ['desc', 'desc'] + else: + for key in [('created_at', None), ('id', None)]: + if key not in sort_keys: + sort_keys.append(key) + sort_dirs.append('desc') + + # Note(mfedosin): Kostyl to deal with situation that sqlalchemy cannot + # work with composite keys correctly + if ('version', None) in sort_keys: + i = sort_keys.index(('version', None)) + version_sort_dir = sort_dirs[i] + sort_keys[i:i + 1] = [('version_prefix', None), + ('version_suffix', None), + ('version_meta', None)] + sort_dirs[i:i + 1] = [version_sort_dir] * 3 + + query = _do_paginate_query(query=query, + limit=limit, + sort_keys=sort_keys, + marker=marker_artifact, + sort_dirs=sort_dirs) + + return query.all() + + +def _do_paginate_query(query, sort_keys=None, sort_dirs=None, + marker=None, limit=None): + # Default the sort direction to ascending + if sort_dirs is None: + sort_dir = 'asc' + + # Ensure a per-column sort direction + if sort_dirs is None: + sort_dirs = [sort_dir for _sort_key in sort_keys] + + assert(len(sort_dirs) == len(sort_keys)) + + # Add sorting + for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs): + try: + sort_dir_func = { + 'asc': sqlalchemy.asc, + 'desc': sqlalchemy.desc, + }[current_sort_dir] + except KeyError: + raise ValueError(_LE("Unknown sort direction, " + "must be 'desc' or 'asc'")) + + if current_sort_key[1] is None: + # sort by generic property + query = query.order_by(sort_dir_func(getattr( + models.Artifact, + current_sort_key[0]))) + else: + # sort by custom property + prop_type = current_sort_key[1] + "_value" + query = query.join(models.ArtifactProperty).\ + filter( + models.ArtifactProperty.name == current_sort_key[0]).\ + order_by( + sort_dir_func(getattr(models.ArtifactProperty, + prop_type))) + + # Add pagination + if marker is not None: + marker_values = [] + for sort_key in sort_keys: + v = getattr(marker, sort_key[0]) + marker_values.append(v) + + # Build up an array of sort criteria as in the docstring + criteria_list = [] + for i in range(len(sort_keys)): + crit_attrs = [] + for j in range(i): + if sort_keys[j][1] is None: + model_attr = getattr(models.Artifact, sort_keys[j][0]) + else: + model_attr = getattr(models.ArtifactProperty, + sort_keys[j][1] + "_value") + crit_attrs.append((model_attr == marker_values[j])) + + if sort_keys[i][1] is None: + model_attr = getattr(models.Artifact, sort_keys[j][0]) + else: + model_attr = getattr(models.ArtifactProperty, + sort_keys[j][1] + "_value") + if sort_dirs[i] == 'desc': + crit_attrs.append((model_attr < marker_values[i])) + else: + crit_attrs.append((model_attr > marker_values[i])) + + criteria = and_(*crit_attrs) + criteria_list.append(criteria) + + f = or_(*criteria_list) + query = query.filter(f) + + if limit is not None: + query = query.limit(limit) + + return query + + +def _do_artifacts_query(context, session, show_level=ga.Showlevel.NONE): + """Build the query to get all artifacts based on the context""" + + LOG.debug("context.is_admin=%(is_admin)s; context.owner=%(owner)s" % + {'is_admin': context.is_admin, 'owner': context.owner}) + + if show_level == ga.Showlevel.NONE: + query = session.query(models.Artifact) \ + .options(joinedload(models.Artifact.tags)) + elif show_level == ga.Showlevel.BASIC: + query = session.query(models.Artifact) \ + .options(joinedload(models.Artifact.properties) + .defer(models.ArtifactProperty.text_value)) \ + .options(joinedload(models.Artifact.tags)) \ + .options(joinedload(models.Artifact.blobs). + joinedload(models.ArtifactBlob.locations)) + else: + # other show_levels aren't supported + msg = _LW("Show level %s is not supported in this " + "operation") % ga.Showlevel.to_str(show_level) + LOG.warn(msg) + raise exception.ArtifactUnsupportedShowLevel(shl=show_level) + + # If admin, return everything. + if context.is_admin: + return query + else: + # If regular user, return only public artifacts. + # However, if context.owner has a value, return both + # public and private artifacts of the context.owner. + if context.owner is not None: + query = query.filter( + or_(models.Artifact.owner == context.owner, + models.Artifact.visibility == 'public')) + else: + query = query.filter( + models.Artifact.visibility == 'public') + return query + +op_mappings = { + 'EQ': operator.eq, + 'GT': operator.gt, + 'GE': operator.ge, + 'LT': operator.lt, + 'LE': operator.le, + 'NE': operator.ne, + 'IN': operator.eq # it must be eq +} + + +def _do_query_filters(filters): + basic_conds = [] + tag_conds = [] + prop_conds = [] + + # don't show deleted artifacts + basic_conds.append([models.Artifact.state != 'deleted']) + + visibility = filters.pop('visibility', None) + if visibility is not None: + # ignore operator. always consider it EQ + basic_conds.append([models.Artifact.visibility == visibility['value']]) + + type_name = filters.pop('type_name', None) + if type_name is not None: + # ignore operator. always consider it EQ + basic_conds.append([models.Artifact.type_name == type_name['value']]) + type_version = filters.pop('type_version', None) + if type_version is not None: + # ignore operator. always consider it EQ + # TODO(mfedosin) add support of LIKE operator + type_version = semver_db.parse(type_version['value']) + basic_conds.append([models.Artifact.type_version == type_version]) + + name = filters.pop('name', None) + if name is not None: + # ignore operator. always consider it EQ + basic_conds.append([models.Artifact.name == name['value']]) + version = filters.pop('version', None) + if version is not None: + # ignore operator. always consider it EQ + # TODO(mfedosin) add support of LIKE operator + version = semver_db.parse(version['value']) + basic_conds.append([models.Artifact.version == version]) + + state = filters.pop('state', None) + if state is not None: + # ignore operator. always consider it EQ + basic_conds.append([models.Artifact.state == state['value']]) + + owner = filters.pop('owner', None) + if owner is not None: + # ignore operator. always consider it EQ + basic_conds.append([models.Artifact.owner == owner['value']]) + + id_list = filters.pop('id_list', None) + if id_list is not None: + basic_conds.append([models.Artifact.id.in_(id_list['value'])]) + + name_list = filters.pop('name_list', None) + if name_list is not None: + basic_conds.append([models.Artifact.name.in_(name_list['value'])]) + + tags = filters.pop('tags', None) + if tags is not None: + for tag in tags['value']: + tag_conds.append([models.ArtifactTag.value == tag]) + + # process remaining filters + for filtername, filtervalue in filters.items(): + + db_prop_op = filtervalue['operator'] + db_prop_value = filtervalue['value'] + db_prop_type = filtervalue['type'] + "_value" + db_prop_position = filtervalue.get('position') + + conds = [models.ArtifactProperty.name == filtername] + + if db_prop_op in op_mappings: + fn = op_mappings[db_prop_op] + result = fn(getattr(models.ArtifactProperty, db_prop_type), + db_prop_value) + + cond = [result, + models.ArtifactProperty.position == db_prop_position] + if db_prop_op == 'IN': + if db_prop_position is not None: + msg = _LE("Cannot use this parameter with " + "the operator IN") + LOG.error(msg) + raise exception.ArtifactInvalidPropertyParameter(op='IN') + cond = [result, + models.ArtifactProperty.position >= 0] + else: + msg = _LE("Operator %s is not supported") % db_prop_op + LOG.error(msg) + raise exception.ArtifactUnsupportedPropertyOperator(op=db_prop_op) + + conds.extend(cond) + + prop_conds.append(conds) + return basic_conds, tag_conds, prop_conds + + +def _do_tags(artifact, new_tags): + tags_to_update = [] + # don't touch existing tags + for tag in artifact.tags: + if tag.value in new_tags: + tags_to_update.append(tag) + new_tags.remove(tag.value) + # add new tags + for tag in new_tags: + db_tag = models.ArtifactTag() + db_tag.value = tag + tags_to_update.append(db_tag) + return tags_to_update + + +def _do_property(propname, prop, position=None): + db_prop = models.ArtifactProperty() + db_prop.name = propname + setattr(db_prop, + (prop['type'] + "_value"), + prop['value']) + db_prop.position = position + return db_prop + + +def _do_properties(artifact, new_properties): + + props_to_update = [] + # don't touch existing properties + for prop in artifact.properties: + if prop.name not in new_properties: + props_to_update.append(prop) + + for propname, prop in new_properties.items(): + if prop['type'] == 'array': + for pos, arrprop in enumerate(prop['value']): + props_to_update.append( + _do_property(propname, arrprop, pos) + ) + else: + props_to_update.append( + _do_property(propname, prop) + ) + return props_to_update + + +def _do_blobs(artifact, new_blobs): + blobs_to_update = [] + + # don't touch existing blobs + for blob in artifact.blobs: + if blob.name not in new_blobs: + blobs_to_update.append(blob) + + for blobname, blobs in new_blobs.items(): + for pos, blob in enumerate(blobs): + for db_blob in artifact.blobs: + if db_blob.name == blobname and db_blob.position == pos: + # update existing blobs + db_blob.size = blob['size'] + db_blob.checksum = blob['checksum'] + db_blob.item_key = blob['item_key'] + db_blob.locations = _do_locations(db_blob, + blob['locations']) + blobs_to_update.append(db_blob) + break + else: + # create new blob + db_blob = models.ArtifactBlob() + db_blob.name = blobname + db_blob.size = blob['size'] + db_blob.checksum = blob['checksum'] + db_blob.item_key = blob['item_key'] + db_blob.position = pos + db_blob.locations = _do_locations(db_blob, blob['locations']) + blobs_to_update.append(db_blob) + return blobs_to_update + + +def _do_locations(blob, new_locations): + locs_to_update = [] + for pos, loc in enumerate(new_locations): + for db_loc in blob.locations: + if db_loc.value == loc['value']: + # update existing location + db_loc.position = pos + db_loc.status = loc['status'] + locs_to_update.append(db_loc) + break + else: + # create new location + db_loc = models.ArtifactBlobLocation() + db_loc.value = loc['value'] + db_loc.status = loc['status'] + db_loc.position = pos + locs_to_update.append(db_loc) + return locs_to_update + + +def _do_dependencies(artifact, new_dependencies, session): + deps_to_update = [] + # small check that all dependencies are new + if artifact.dependencies is not None: + for db_dep in artifact.dependencies: + for dep in new_dependencies.keys(): + if db_dep.name == dep: + msg = _LW("Artifact with the specified type, name " + "and versions already has the direct " + "dependency=%s") % dep + LOG.warn(msg) + # change values of former dependency + for dep in artifact.dependencies: + session.delete(dep) + artifact.dependencies = [] + for depname, depvalues in new_dependencies.items(): + for pos, depvalue in enumerate(depvalues): + db_dep = models.ArtifactDependency() + db_dep.name = depname + db_dep.artifact_source = artifact.id + db_dep.artifact_dest = depvalue + db_dep.artifact_origin = artifact.id + db_dep.is_direct = True + db_dep.position = pos + deps_to_update.append(db_dep) + artifact.dependencies = deps_to_update + + +def _do_transitive_dependencies(artifact, session): + deps_to_update = [] + for dependency in artifact.dependencies: + depvalue = dependency.artifact_dest + transitdeps = session.query(models.ArtifactDependency). \ + filter_by(artifact_source=depvalue).all() + for transitdep in transitdeps: + if not transitdep.is_direct: + # transitive dependencies are already created + msg = _LW("Artifact with the specified type, " + "name and version already has the " + "direct dependency=%d") % transitdep.id + LOG.warn(msg) + raise exception.ArtifactDuplicateTransitiveDependency( + dep=transitdep.id) + + db_dep = models.ArtifactDependency() + db_dep.name = transitdep['name'] + db_dep.artifact_source = artifact.id + db_dep.artifact_dest = transitdep.artifact_dest + db_dep.artifact_origin = transitdep.artifact_source + db_dep.is_direct = False + db_dep.position = transitdep.position + deps_to_update.append(db_dep) + return deps_to_update + + +def _check_visibility(context, artifact): + if context.is_admin: + return True + + if not artifact.owner: + return True + + if artifact.visibility == Visibility.PUBLIC.value: + return True + + if artifact.visibility == Visibility.PRIVATE.value: + if context.owner and context.owner == artifact.owner: + return True + else: + return False + + if artifact.visibility == Visibility.SHARED.value: + return False + + return False + + +def _set_version_fields(values): + if 'type_version' in values: + values['type_version'] = semver_db.parse(values['type_version']) + if 'version' in values: + values['version'] = semver_db.parse(values['version']) + + +def _validate_values(values): + if 'state' in values: + try: + State(values['state']) + except ValueError: + msg = "Invalid artifact state '%s'" % values['state'] + raise exception.Invalid(msg) + if 'visibility' in values: + try: + Visibility(values['visibility']) + except ValueError: + msg = "Invalid artifact visibility '%s'" % values['visibility'] + raise exception.Invalid(msg) + # TODO(mfedosin): it's an idea to validate tags someday + # (check that all tags match the regexp) + + +def _drop_protected_attrs(model_class, values): + """ + Removed protected attributes from values dictionary using the models + __protected_attributes__ field. + """ + for attr in model_class.__protected_attributes__: + if attr in values: + del values[attr] diff --git a/code/daisy/daisy/db/sqlalchemy/metadata.py b/code/daisy/daisy/db/sqlalchemy/metadata.py new file mode 100755 index 00000000..86a59285 --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/metadata.py @@ -0,0 +1,483 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Copyright 2013 OpenStack Foundation +# Copyright 2013 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import os +from os.path import isfile +from os.path import join +import re + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import timeutils +import six +import sqlalchemy +from sqlalchemy import and_ +from sqlalchemy.schema import MetaData +from sqlalchemy.sql import select + +from daisy.common import utils +from daisy import i18n + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LW = i18n._LW +_LI = i18n._LI + +metadata_opts = [ + cfg.StrOpt('metadata_source_path', default='/etc/glance/metadefs/', + help=_('Path to the directory where json metadata ' + 'files are stored')) +] + +CONF = cfg.CONF +CONF.register_opts(metadata_opts) + + +def get_metadef_namespaces_table(meta): + return sqlalchemy.Table('metadef_namespaces', meta, autoload=True) + + +def get_metadef_resource_types_table(meta): + return sqlalchemy.Table('metadef_resource_types', meta, autoload=True) + + +def get_metadef_namespace_resource_types_table(meta): + return sqlalchemy.Table('metadef_namespace_resource_types', meta, + autoload=True) + + +def get_metadef_properties_table(meta): + return sqlalchemy.Table('metadef_properties', meta, autoload=True) + + +def get_metadef_objects_table(meta): + return sqlalchemy.Table('metadef_objects', meta, autoload=True) + + +def get_metadef_tags_table(meta): + return sqlalchemy.Table('metadef_tags', meta, autoload=True) + + +def _get_resource_type_id(meta, name): + rt_table = get_metadef_resource_types_table(meta) + resource_type = ( + select([rt_table.c.id]). + where(rt_table.c.name == name). + select_from(rt_table). + execute().fetchone()) + if resource_type: + return resource_type[0] + return None + + +def _get_resource_type(meta, resource_type_id): + rt_table = get_metadef_resource_types_table(meta) + return ( + rt_table.select(). + where(rt_table.c.id == resource_type_id). + execute().fetchone()) + + +def _get_namespace_resource_types(meta, namespace_id): + namespace_resource_types_table =\ + get_metadef_namespace_resource_types_table(meta) + return namespace_resource_types_table.select().\ + where(namespace_resource_types_table.c.namespace_id == namespace_id).\ + execute().fetchall() + + +def _get_namespace_resource_type_by_ids(meta, namespace_id, rt_id): + namespace_resource_types_table =\ + get_metadef_namespace_resource_types_table(meta) + return namespace_resource_types_table.select().\ + where(and_( + namespace_resource_types_table.c.namespace_id == namespace_id, + namespace_resource_types_table.c.resource_type_id == rt_id)).\ + execute().fetchone() + + +def _get_properties(meta, namespace_id): + properties_table = get_metadef_properties_table(meta) + return properties_table.select().\ + where(properties_table.c.namespace_id == namespace_id).\ + execute().fetchall() + + +def _get_objects(meta, namespace_id): + objects_table = get_metadef_objects_table(meta) + return objects_table.select().\ + where(objects_table.c.namespace_id == namespace_id).\ + execute().fetchall() + + +def _get_tags(meta, namespace_id): + tags_table = get_metadef_tags_table(meta) + return ( + tags_table.select(). + where(tags_table.c.namespace_id == namespace_id). + execute().fetchall()) + + +def _get_resource_id(table, namespace_id, resource_name): + resource = ( + select([table.c.id]). + where(and_(table.c.namespace_id == namespace_id, + table.c.name == resource_name)). + select_from(table). + execute().fetchone()) + if resource: + return resource[0] + return None + + +def _clear_metadata(meta): + metadef_tables = [get_metadef_properties_table(meta), + get_metadef_objects_table(meta), + get_metadef_tags_table(meta), + get_metadef_namespace_resource_types_table(meta), + get_metadef_namespaces_table(meta), + get_metadef_resource_types_table(meta)] + + for table in metadef_tables: + table.delete().execute() + LOG.info(_LI("Table %s has been cleared"), table) + + +def _clear_namespace_metadata(meta, namespace_id): + metadef_tables = [get_metadef_properties_table(meta), + get_metadef_objects_table(meta), + get_metadef_tags_table(meta), + get_metadef_namespace_resource_types_table(meta)] + namespaces_table = get_metadef_namespaces_table(meta) + + for table in metadef_tables: + table.delete().where(table.c.namespace_id == namespace_id).execute() + namespaces_table.delete().where( + namespaces_table.c.id == namespace_id).execute() + + +def _populate_metadata(meta, metadata_path=None, merge=False, + prefer_new=False, overwrite=False): + if not metadata_path: + metadata_path = CONF.metadata_source_path + + try: + if isfile(metadata_path): + json_schema_files = [metadata_path] + else: + json_schema_files = [f for f in os.listdir(metadata_path) + if isfile(join(metadata_path, f)) + and f.endswith('.json')] + except OSError as e: + LOG.error(utils.exception_to_str(e)) + return + + if not json_schema_files: + LOG.error(_LE("Json schema files not found in %s. Aborting."), + metadata_path) + return + + namespaces_table = get_metadef_namespaces_table(meta) + namespace_rt_table = get_metadef_namespace_resource_types_table(meta) + objects_table = get_metadef_objects_table(meta) + tags_table = get_metadef_tags_table(meta) + properties_table = get_metadef_properties_table(meta) + resource_types_table = get_metadef_resource_types_table(meta) + + for json_schema_file in json_schema_files: + try: + file = join(metadata_path, json_schema_file) + with open(file) as json_file: + metadata = json.load(json_file) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + continue + + values = { + 'namespace': metadata.get('namespace', None), + 'display_name': metadata.get('display_name', None), + 'description': metadata.get('description', None), + 'visibility': metadata.get('visibility', None), + 'protected': metadata.get('protected', None), + 'owner': metadata.get('owner', 'admin') + } + + db_namespace = select( + [namespaces_table.c.id] + ).where( + namespaces_table.c.namespace == values['namespace'] + ).select_from( + namespaces_table + ).execute().fetchone() + + if db_namespace and overwrite: + LOG.info(_LI("Overwriting namespace %s"), values['namespace']) + _clear_namespace_metadata(meta, db_namespace[0]) + db_namespace = None + + if not db_namespace: + values.update({'created_at': timeutils.utcnow()}) + _insert_data_to_db(namespaces_table, values) + + db_namespace = select( + [namespaces_table.c.id] + ).where( + namespaces_table.c.namespace == values['namespace'] + ).select_from( + namespaces_table + ).execute().fetchone() + elif not merge: + LOG.info(_LI("Skipping namespace %s. It already exists in the " + "database."), values['namespace']) + continue + elif prefer_new: + values.update({'updated_at': timeutils.utcnow()}) + _update_data_in_db(namespaces_table, values, + namespaces_table.c.id, db_namespace[0]) + + namespace_id = db_namespace[0] + + for resource_type in metadata.get('resource_type_associations', []): + rt_id = _get_resource_type_id(meta, resource_type['name']) + if not rt_id: + val = { + 'name': resource_type['name'], + 'created_at': timeutils.utcnow(), + 'protected': True + } + _insert_data_to_db(resource_types_table, val) + rt_id = _get_resource_type_id(meta, resource_type['name']) + elif prefer_new: + val = {'updated_at': timeutils.utcnow()} + _update_data_in_db(resource_types_table, val, + resource_types_table.c.id, rt_id) + + values = { + 'namespace_id': namespace_id, + 'resource_type_id': rt_id, + 'properties_target': resource_type.get( + 'properties_target', None), + 'prefix': resource_type.get('prefix', None) + } + namespace_resource_type = _get_namespace_resource_type_by_ids( + meta, namespace_id, rt_id) + if not namespace_resource_type: + values.update({'created_at': timeutils.utcnow()}) + _insert_data_to_db(namespace_rt_table, values) + elif prefer_new: + values.update({'updated_at': timeutils.utcnow()}) + _update_rt_association(namespace_rt_table, values, + rt_id, namespace_id) + + for property, schema in six.iteritems(metadata.get('properties', + {})): + values = { + 'name': property, + 'namespace_id': namespace_id, + 'json_schema': json.dumps(schema) + } + property_id = _get_resource_id(properties_table, + namespace_id, property) + if not property_id: + values.update({'created_at': timeutils.utcnow()}) + _insert_data_to_db(properties_table, values) + elif prefer_new: + values.update({'updated_at': timeutils.utcnow()}) + _update_data_in_db(properties_table, values, + properties_table.c.id, property_id) + + for object in metadata.get('objects', []): + values = { + 'name': object['name'], + 'description': object.get('description', None), + 'namespace_id': namespace_id, + 'json_schema': json.dumps( + object.get('properties', None)) + } + object_id = _get_resource_id(objects_table, namespace_id, + object['name']) + if not object_id: + values.update({'created_at': timeutils.utcnow()}) + _insert_data_to_db(objects_table, values) + elif prefer_new: + values.update({'updated_at': timeutils.utcnow()}) + _update_data_in_db(objects_table, values, + objects_table.c.id, object_id) + + for tag in metadata.get('tags', []): + values = { + 'name': tag.get('name'), + 'namespace_id': namespace_id, + } + tag_id = _get_resource_id(tags_table, namespace_id, tag['name']) + if not tag_id: + values.update({'created_at': timeutils.utcnow()}) + _insert_data_to_db(tags_table, values) + elif prefer_new: + values.update({'updated_at': timeutils.utcnow()}) + _update_data_in_db(tags_table, values, + tags_table.c.id, tag_id) + + LOG.info(_LI("File %s loaded to database."), file) + + LOG.info(_LI("Metadata loading finished")) + + +def _insert_data_to_db(table, values, log_exception=True): + try: + table.insert(values=values).execute() + except sqlalchemy.exc.IntegrityError: + if log_exception: + LOG.warning(_LW("Duplicate entry for values: %s"), values) + + +def _update_data_in_db(table, values, column, value): + try: + (table.update(values=values). + where(column == value).execute()) + except sqlalchemy.exc.IntegrityError: + LOG.warning(_LW("Duplicate entry for values: %s"), values) + + +def _update_rt_association(table, values, rt_id, namespace_id): + try: + (table.update(values=values). + where(and_(table.c.resource_type_id == rt_id, + table.c.namespace_id == namespace_id)).execute()) + except sqlalchemy.exc.IntegrityError: + LOG.warning(_LW("Duplicate entry for values: %s"), values) + + +def _export_data_to_file(meta, path): + if not path: + path = CONF.metadata_source_path + + namespace_table = get_metadef_namespaces_table(meta) + namespaces = namespace_table.select().execute().fetchall() + + pattern = re.compile('[\W_]+', re.UNICODE) + + for id, namespace in enumerate(namespaces, start=1): + namespace_id = namespace['id'] + namespace_file_name = pattern.sub('', namespace['display_name']) + + values = { + 'namespace': namespace['namespace'], + 'display_name': namespace['display_name'], + 'description': namespace['description'], + 'visibility': namespace['visibility'], + 'protected': namespace['protected'], + 'resource_type_associations': [], + 'properties': {}, + 'objects': [], + 'tags': [] + } + + namespace_resource_types = _get_namespace_resource_types(meta, + namespace_id) + db_objects = _get_objects(meta, namespace_id) + db_properties = _get_properties(meta, namespace_id) + db_tags = _get_tags(meta, namespace_id) + + resource_types = [] + for namespace_resource_type in namespace_resource_types: + resource_type =\ + _get_resource_type(meta, + namespace_resource_type['resource_type_id']) + resource_types.append({ + 'name': resource_type['name'], + 'prefix': namespace_resource_type['prefix'], + 'properties_target': namespace_resource_type[ + 'properties_target'] + }) + values.update({ + 'resource_type_associations': resource_types + }) + + objects = [] + for object in db_objects: + objects.append({ + "name": object['name'], + "description": object['description'], + "properties": json.loads(object['json_schema']) + }) + values.update({ + 'objects': objects + }) + + properties = {} + for property in db_properties: + properties.update({ + property['name']: json.loads(property['json_schema']) + }) + values.update({ + 'properties': properties + }) + + tags = [] + for tag in db_tags: + tags.append({ + "name": tag['name'] + }) + values.update({ + 'tags': tags + }) + + try: + file_name = ''.join([path, namespace_file_name, '.json']) + with open(file_name, 'w') as json_file: + json_file.write(json.dumps(values)) + except Exception as e: + LOG.exception(utils.exception_to_str(e)) + LOG.info(_LI("Namespace %(namespace)s saved in %(file)s") % { + 'namespace': namespace_file_name, 'file': file_name}) + + +def db_load_metadefs(engine, metadata_path=None, merge=False, + prefer_new=False, overwrite=False): + meta = MetaData() + meta.bind = engine + + if not merge and (prefer_new or overwrite): + LOG.error(_LE("To use --prefer_new or --overwrite you need to combine " + "of these options with --merge option.")) + return + + if prefer_new and overwrite and merge: + LOG.error(_LE("Please provide no more than one option from this list: " + "--prefer_new, --overwrite")) + return + + _populate_metadata(meta, metadata_path, merge, prefer_new, overwrite) + + +def db_unload_metadefs(engine): + meta = MetaData() + meta.bind = engine + + _clear_metadata(meta) + + +def db_export_metadefs(engine, metadata_path=None): + meta = MetaData() + meta.bind = engine + + _export_data_to_file(meta, metadata_path) diff --git a/code/daisy/daisy/db/sqlalchemy/metadef_api/__init__.py b/code/daisy/daisy/db/sqlalchemy/metadef_api/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/db/sqlalchemy/metadef_api/namespace.py b/code/daisy/daisy/db/sqlalchemy/metadef_api/namespace.py new file mode 100755 index 00000000..04eeb0a7 --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/metadef_api/namespace.py @@ -0,0 +1,310 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_db import exception as db_exc +from oslo_db.sqlalchemy.utils import paginate_query +from oslo_log import log as logging +import sqlalchemy.exc as sa_exc +from sqlalchemy import or_ +import sqlalchemy.orm as sa_orm + +from daisy.common import exception as exc +import daisy.db.sqlalchemy.metadef_api as metadef_api +from daisy.db.sqlalchemy import models_metadef as models +from daisy import i18n + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LW = i18n._LW + + +def _is_namespace_visible(context, namespace, status=None): + """Return True if the namespace is visible in this context.""" + + # Is admin == visible + if context.is_admin: + return True + + # No owner == visible + if namespace['owner'] is None: + return True + + # Is public == visible + if 'visibility' in namespace: + if namespace['visibility'] == 'public': + return True + + # context.owner has a value and is the namespace owner == visible + if context.owner is not None: + if context.owner == namespace['owner']: + return True + + # Private + return False + + +def _select_namespaces_query(context, session): + """Build the query to get all namespaces based on the context""" + + LOG.debug("context.is_admin=%(is_admin)s; context.owner=%(owner)s" % + {'is_admin': context.is_admin, 'owner': context.owner}) + + # If admin, return everything. + query_ns = session.query(models.MetadefNamespace) + if context.is_admin: + return query_ns + else: + # If regular user, return only public namespaces. + # However, if context.owner has a value, return both + # public and private namespaces of the context.owner. + if context.owner is not None: + query = ( + query_ns.filter( + or_(models.MetadefNamespace.owner == context.owner, + models.MetadefNamespace.visibility == 'public'))) + else: + query = query_ns.filter( + models.MetadefNamespace.visibility == 'public') + return query + + +def _get(context, namespace_id, session): + """Get a namespace by id, raise if not found""" + + try: + query = session.query(models.MetadefNamespace)\ + .filter_by(id=namespace_id) + namespace_rec = query.one() + except sa_orm.exc.NoResultFound: + msg = (_("Metadata definition namespace not found for id=%s") + % namespace_id) + LOG.warn(msg) + raise exc.MetadefNamespaceNotFound(msg) + + # Make sure they are allowed to view it. + if not _is_namespace_visible(context, namespace_rec.to_dict()): + msg = ("Forbidding request, metadata definition namespace=%s" + " is not visible.") % namespace_rec.namespace + LOG.debug(msg) + emsg = _("Forbidding request, metadata definition namespace=%s" + " is not visible.") % namespace_rec.namespace + raise exc.MetadefForbidden(emsg) + + return namespace_rec + + +def _get_by_name(context, name, session): + """Get a namespace by name, raise if not found""" + + try: + query = session.query(models.MetadefNamespace)\ + .filter_by(namespace=name) + namespace_rec = query.one() + except sa_orm.exc.NoResultFound: + msg = "Metadata definition namespace=%s was not found." % name + LOG.debug(msg) + raise exc.MetadefNamespaceNotFound(namespace_name=name) + + # Make sure they are allowed to view it. + if not _is_namespace_visible(context, namespace_rec.to_dict()): + msg = ("Forbidding request, metadata definition namespace=%s" + " is not visible." % name) + LOG.debug(msg) + emsg = _("Forbidding request, metadata definition namespace=%s" + " is not visible.") % name + raise exc.MetadefForbidden(emsg) + + return namespace_rec + + +def _get_all(context, session, filters=None, marker=None, + limit=None, sort_key='created_at', sort_dir='desc'): + """Get all namespaces that match zero or more filters. + + :param filters: dict of filter keys and values. + :param marker: namespace id after which to start page + :param limit: maximum number of namespaces to return + :param sort_key: namespace attribute by which results should be sorted + :param sort_dir: direction in which results should be sorted (asc, desc) + """ + + filters = filters or {} + + query = _select_namespaces_query(context, session) + + # if visibility filter, apply it to the context based query + visibility = filters.pop('visibility', None) + if visibility is not None: + query = query.filter(models.MetadefNamespace.visibility == visibility) + + # if id_list filter, apply it to the context based query + id_list = filters.pop('id_list', None) + if id_list is not None: + query = query.filter(models.MetadefNamespace.id.in_(id_list)) + + marker_namespace = None + if marker is not None: + marker_namespace = _get(context, marker, session) + + sort_keys = ['created_at', 'id'] + sort_keys.insert(0, sort_key) if sort_key not in sort_keys else sort_keys + + query = paginate_query(query=query, + model=models.MetadefNamespace, + limit=limit, + sort_keys=sort_keys, + marker=marker_namespace, sort_dir=sort_dir) + + return query.all() + + +def _get_all_by_resource_types(context, session, filters, marker=None, + limit=None, sort_key=None, sort_dir=None): + """get all visible namespaces for the specified resource_types""" + + resource_types = filters['resource_types'] + resource_type_list = resource_types.split(',') + db_recs = ( + session.query(models.MetadefResourceType) + .join(models.MetadefResourceType.associations) + .filter(models.MetadefResourceType.name.in_(resource_type_list)) + .values(models.MetadefResourceType.name, + models.MetadefNamespaceResourceType.namespace_id) + ) + + namespace_id_list = [] + for name, namespace_id in db_recs: + namespace_id_list.append(namespace_id) + + if len(namespace_id_list) is 0: + return [] + + filters2 = filters + filters2.update({'id_list': namespace_id_list}) + + return _get_all(context, session, filters2, + marker, limit, sort_key, sort_dir) + + +def get_all(context, session, marker=None, limit=None, + sort_key=None, sort_dir=None, filters=None): + """List all visible namespaces""" + + namespaces = [] + filters = filters or {} + + if 'resource_types' in filters: + namespaces = _get_all_by_resource_types( + context, session, filters, marker, limit, sort_key, sort_dir) + else: + namespaces = _get_all( + context, session, filters, marker, limit, sort_key, sort_dir) + + return map(lambda ns: ns.to_dict(), namespaces) + + +def get(context, name, session): + """Get a namespace by name, raise if not found""" + namespace_rec = _get_by_name(context, name, session) + return namespace_rec.to_dict() + + +def create(context, values, session): + """Create a namespace, raise if namespace already exists.""" + + namespace_name = values['namespace'] + namespace = models.MetadefNamespace() + metadef_api.utils.drop_protected_attrs(models.MetadefNamespace, values) + namespace.update(values.copy()) + try: + namespace.save(session=session) + except db_exc.DBDuplicateEntry: + msg = ("Can not create the metadata definition namespace." + " Namespace=%s already exists.") % namespace_name + LOG.debug(msg) + raise exc.MetadefDuplicateNamespace( + namespace_name=namespace_name) + + return namespace.to_dict() + + +def update(context, namespace_id, values, session): + """Update a namespace, raise if not found/visible or duplicate result""" + + namespace_rec = _get(context, namespace_id, session) + metadef_api.utils.drop_protected_attrs(models.MetadefNamespace, values) + + try: + namespace_rec.update(values.copy()) + namespace_rec.save(session=session) + except db_exc.DBDuplicateEntry: + msg = ("Invalid update. It would result in a duplicate" + " metadata definition namespace with the same name of %s" + % values['namespace']) + LOG.debug(msg) + emsg = (_("Invalid update. It would result in a duplicate" + " metadata definition namespace with the same name of %s") + % values['namespace']) + raise exc.MetadefDuplicateNamespace(emsg) + + return namespace_rec.to_dict() + + +def delete(context, name, session): + """Raise if not found, has references or not visible""" + + namespace_rec = _get_by_name(context, name, session) + try: + session.delete(namespace_rec) + session.flush() + except db_exc.DBError as e: + if isinstance(e.inner_exception, sa_exc.IntegrityError): + msg = ("Metadata definition namespace=%s not deleted." + " Other records still refer to it." % name) + LOG.debug(msg) + raise exc.MetadefIntegrityError( + record_type='namespace', record_name=name) + else: + raise e + + return namespace_rec.to_dict() + + +def delete_cascade(context, name, session): + """Raise if not found, has references or not visible""" + + namespace_rec = _get_by_name(context, name, session) + with session.begin(): + try: + metadef_api.tag.delete_namespace_content( + context, namespace_rec.id, session) + metadef_api.object.delete_namespace_content( + context, namespace_rec.id, session) + metadef_api.property.delete_namespace_content( + context, namespace_rec.id, session) + metadef_api.resource_type_association.delete_namespace_content( + context, namespace_rec.id, session) + session.delete(namespace_rec) + session.flush() + except db_exc.DBError as e: + if isinstance(e.inner_exception, sa_exc.IntegrityError): + msg = ("Metadata definition namespace=%s not deleted." + " Other records still refer to it." % name) + LOG.debug(msg) + raise exc.MetadefIntegrityError( + record_type='namespace', record_name=name) + else: + raise e + + return namespace_rec.to_dict() diff --git a/code/daisy/daisy/db/sqlalchemy/metadef_api/object.py b/code/daisy/daisy/db/sqlalchemy/metadef_api/object.py new file mode 100755 index 00000000..d719b41b --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/metadef_api/object.py @@ -0,0 +1,158 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo_db import exception as db_exc +from oslo_log import log as logging +from sqlalchemy import func +import sqlalchemy.orm as sa_orm + +from daisy.common import exception as exc +from daisy.db.sqlalchemy.metadef_api import namespace as namespace_api +import daisy.db.sqlalchemy.metadef_api.utils as metadef_utils +from daisy.db.sqlalchemy import models_metadef as models +from daisy import i18n + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LW = i18n._LW + + +def _get(context, object_id, session): + try: + query = session.query(models.MetadefObject)\ + .filter_by(id=object_id) + metadef_object = query.one() + except sa_orm.exc.NoResultFound: + msg = (_("Metadata definition object not found for id=%s") + % object_id) + LOG.warn(msg) + raise exc.MetadefObjectNotFound(msg) + + return metadef_object + + +def _get_by_name(context, namespace_name, name, session): + namespace = namespace_api.get(context, namespace_name, session) + try: + query = session.query(models.MetadefObject)\ + .filter_by(name=name, namespace_id=namespace['id']) + metadef_object = query.one() + except sa_orm.exc.NoResultFound: + msg = ("The metadata definition object with name=%(name)s" + " was not found in namespace=%(namespace_name)s." + % {'name': name, 'namespace_name': namespace_name}) + LOG.debug(msg) + raise exc.MetadefObjectNotFound(object_name=name, + namespace_name=namespace_name) + + return metadef_object + + +def get_all(context, namespace_name, session): + namespace = namespace_api.get(context, namespace_name, session) + query = session.query(models.MetadefObject)\ + .filter_by(namespace_id=namespace['id']) + md_objects = query.all() + + md_objects_list = [] + for obj in md_objects: + md_objects_list.append(obj.to_dict()) + return md_objects_list + + +def create(context, namespace_name, values, session): + namespace = namespace_api.get(context, namespace_name, session) + values.update({'namespace_id': namespace['id']}) + + md_object = models.MetadefObject() + metadef_utils.drop_protected_attrs(models.MetadefObject, values) + md_object.update(values.copy()) + try: + md_object.save(session=session) + except db_exc.DBDuplicateEntry: + msg = ("A metadata definition object with name=%(name)s" + " in namespace=%(namespace_name)s already exists." + % {'name': md_object.name, + 'namespace_name': namespace_name}) + LOG.debug(msg) + raise exc.MetadefDuplicateObject( + object_name=md_object.name, namespace_name=namespace_name) + + return md_object.to_dict() + + +def get(context, namespace_name, name, session): + md_object = _get_by_name(context, namespace_name, name, session) + + return md_object.to_dict() + + +def update(context, namespace_name, object_id, values, session): + """Update an object, raise if ns not found/visible or duplicate result""" + namespace_api.get(context, namespace_name, session) + + md_object = _get(context, object_id, session) + metadef_utils.drop_protected_attrs(models.MetadefObject, values) + # values['updated_at'] = timeutils.utcnow() - done by TS mixin + try: + md_object.update(values.copy()) + md_object.save(session=session) + except db_exc.DBDuplicateEntry: + msg = ("Invalid update. It would result in a duplicate" + " metadata definition object with same name=%(name)s" + " in namespace=%(namespace_name)s." + % {'name': md_object.name, 'namespace_name': namespace_name}) + LOG.debug(msg) + emsg = (_("Invalid update. It would result in a duplicate" + " metadata definition object with the same name=%(name)s" + " in namespace=%(namespace_name)s.") + % {'name': md_object.name, 'namespace_name': namespace_name}) + raise exc.MetadefDuplicateObject(emsg) + + return md_object.to_dict() + + +def delete(context, namespace_name, object_name, session): + namespace_api.get(context, namespace_name, session) + md_object = _get_by_name(context, namespace_name, object_name, session) + + session.delete(md_object) + session.flush() + + return md_object.to_dict() + + +def delete_namespace_content(context, namespace_id, session): + """Use this def only if the ns for the id has been verified as visible""" + + count = 0 + query = session.query(models.MetadefObject)\ + .filter_by(namespace_id=namespace_id) + count = query.delete(synchronize_session='fetch') + return count + + +def delete_by_namespace_name(context, namespace_name, session): + namespace = namespace_api.get(context, namespace_name, session) + return delete_namespace_content(context, namespace['id'], session) + + +def count(context, namespace_name, session): + """Get the count of objects for a namespace, raise if ns not found""" + namespace = namespace_api.get(context, namespace_name, session) + + query = session.query(func.count(models.MetadefObject.id))\ + .filter_by(namespace_id=namespace['id']) + return query.scalar() diff --git a/code/daisy/daisy/db/sqlalchemy/metadef_api/property.py b/code/daisy/daisy/db/sqlalchemy/metadef_api/property.py new file mode 100755 index 00000000..fb356a03 --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/metadef_api/property.py @@ -0,0 +1,170 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo_db import exception as db_exc +from oslo_log import log as logging +from sqlalchemy import func +import sqlalchemy.orm as sa_orm + +from daisy.common import exception as exc +from daisy.db.sqlalchemy.metadef_api import namespace as namespace_api +from daisy.db.sqlalchemy.metadef_api import utils as metadef_utils +from daisy.db.sqlalchemy import models_metadef as models +from daisy import i18n + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LW = i18n._LW + + +def _get(context, property_id, session): + + try: + query = session.query(models.MetadefProperty)\ + .filter_by(id=property_id) + property_rec = query.one() + + except sa_orm.exc.NoResultFound: + msg = (_("Metadata definition property not found for id=%s") + % property_id) + LOG.warn(msg) + raise exc.MetadefPropertyNotFound(msg) + + return property_rec + + +def _get_by_name(context, namespace_name, name, session): + """get a property; raise if ns not found/visible or property not found""" + + namespace = namespace_api.get(context, namespace_name, session) + try: + query = session.query(models.MetadefProperty)\ + .filter_by(name=name, namespace_id=namespace['id']) + property_rec = query.one() + + except sa_orm.exc.NoResultFound: + msg = ("The metadata definition property with name=%(name)s" + " was not found in namespace=%(namespace_name)s." + % {'name': name, 'namespace_name': namespace_name}) + LOG.debug(msg) + raise exc.MetadefPropertyNotFound(property_name=name, + namespace_name=namespace_name) + + return property_rec + + +def get(context, namespace_name, name, session): + """get a property; raise if ns not found/visible or property not found""" + + property_rec = _get_by_name(context, namespace_name, name, session) + return property_rec.to_dict() + + +def get_all(context, namespace_name, session): + namespace = namespace_api.get(context, namespace_name, session) + query = session.query(models.MetadefProperty)\ + .filter_by(namespace_id=namespace['id']) + properties = query.all() + + properties_list = [] + for prop in properties: + properties_list.append(prop.to_dict()) + return properties_list + + +def create(context, namespace_name, values, session): + namespace = namespace_api.get(context, namespace_name, session) + values.update({'namespace_id': namespace['id']}) + + property_rec = models.MetadefProperty() + metadef_utils.drop_protected_attrs(models.MetadefProperty, values) + property_rec.update(values.copy()) + + try: + property_rec.save(session=session) + except db_exc.DBDuplicateEntry: + msg = ("Can not create metadata definition property. A property" + " with name=%(name)s already exists in" + " namespace=%(namespace_name)s." + % {'name': property_rec.name, + 'namespace_name': namespace_name}) + LOG.debug(msg) + raise exc.MetadefDuplicateProperty( + property_name=property_rec.name, + namespace_name=namespace_name) + + return property_rec.to_dict() + + +def update(context, namespace_name, property_id, values, session): + """Update a property, raise if ns not found/visible or duplicate result""" + + namespace_api.get(context, namespace_name, session) + property_rec = _get(context, property_id, session) + metadef_utils.drop_protected_attrs(models.MetadefProperty, values) + # values['updated_at'] = timeutils.utcnow() - done by TS mixin + try: + property_rec.update(values.copy()) + property_rec.save(session=session) + except db_exc.DBDuplicateEntry: + msg = ("Invalid update. It would result in a duplicate" + " metadata definition property with the same name=%(name)s" + " in namespace=%(namespace_name)s." + % {'name': property_rec.name, + 'namespace_name': namespace_name}) + LOG.debug(msg) + emsg = (_("Invalid update. It would result in a duplicate" + " metadata definition property with the same name=%(name)s" + " in namespace=%(namespace_name)s.") + % {'name': property_rec.name, + 'namespace_name': namespace_name}) + raise exc.MetadefDuplicateProperty(emsg) + + return property_rec.to_dict() + + +def delete(context, namespace_name, property_name, session): + property_rec = _get_by_name( + context, namespace_name, property_name, session) + if property_rec: + session.delete(property_rec) + session.flush() + + return property_rec.to_dict() + + +def delete_namespace_content(context, namespace_id, session): + """Use this def only if the ns for the id has been verified as visible""" + + count = 0 + query = session.query(models.MetadefProperty)\ + .filter_by(namespace_id=namespace_id) + count = query.delete(synchronize_session='fetch') + return count + + +def delete_by_namespace_name(context, namespace_name, session): + namespace = namespace_api.get(context, namespace_name, session) + return delete_namespace_content(context, namespace['id'], session) + + +def count(context, namespace_name, session): + """Get the count of properties for a namespace, raise if ns not found""" + + namespace = namespace_api.get(context, namespace_name, session) + + query = session.query(func.count(models.MetadefProperty.id))\ + .filter_by(namespace_id=namespace['id']) + return query.scalar() diff --git a/code/daisy/daisy/db/sqlalchemy/metadef_api/resource_type.py b/code/daisy/daisy/db/sqlalchemy/metadef_api/resource_type.py new file mode 100755 index 00000000..b90eed68 --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/metadef_api/resource_type.py @@ -0,0 +1,111 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo_db import exception as db_exc +from oslo_log import log as logging +import sqlalchemy.exc as sa_exc +import sqlalchemy.orm as sa_orm + +from daisy.common import exception as exc +import daisy.db.sqlalchemy.metadef_api.utils as metadef_utils +from daisy.db.sqlalchemy import models_metadef as models + +LOG = logging.getLogger(__name__) + + +def get(context, name, session): + """Get a resource type, raise if not found""" + + try: + query = session.query(models.MetadefResourceType)\ + .filter_by(name=name) + resource_type = query.one() + except sa_orm.exc.NoResultFound: + msg = "No metadata definition resource-type found with name %s" % name + LOG.debug(msg) + raise exc.MetadefResourceTypeNotFound(resource_type_name=name) + + return resource_type.to_dict() + + +def get_all(context, session): + """Get a list of all resource types""" + + query = session.query(models.MetadefResourceType) + resource_types = query.all() + + resource_types_list = [] + for rt in resource_types: + resource_types_list.append(rt.to_dict()) + + return resource_types_list + + +def create(context, values, session): + """Create a resource_type, raise if it already exists.""" + + resource_type = models.MetadefResourceType() + metadef_utils.drop_protected_attrs(models.MetadefResourceType, values) + resource_type.update(values.copy()) + try: + resource_type.save(session=session) + except db_exc.DBDuplicateEntry: + msg = ("Can not create the metadata definition resource-type." + " A resource-type with name=%s already exists." + % resource_type.name) + LOG.debug(msg) + raise exc.MetadefDuplicateResourceType( + resource_type_name=resource_type.name) + + return resource_type.to_dict() + + +def update(context, values, session): + """Update a resource type, raise if not found""" + + name = values['name'] + metadef_utils.drop_protected_attrs(models.MetadefResourceType, values) + db_rec = get(context, name, session) + db_rec.update(values.copy()) + db_rec.save(session=session) + + return db_rec.to_dict() + + +def delete(context, name, session): + """Delete a resource type or raise if not found or is protected""" + + db_rec = get(context, name, session) + if db_rec.protected is True: + msg = ("Delete forbidden. Metadata definition resource-type %s is a" + " seeded-system type and can not be deleted.") % name + LOG.debug(msg) + raise exc.ProtectedMetadefResourceTypeSystemDelete( + resource_type_name=name) + + try: + session.delete(db_rec) + session.flush() + except db_exc.DBError as e: + if isinstance(e.inner_exception, sa_exc.IntegrityError): + msg = ("Could not delete Metadata definition resource-type %s" + ". It still has content") % name + LOG.debug(msg) + raise exc.MetadefIntegrityError( + record_type='resource-type', record_name=name) + else: + raise e + + return db_rec.to_dict() diff --git a/code/daisy/daisy/db/sqlalchemy/metadef_api/resource_type_association.py b/code/daisy/daisy/db/sqlalchemy/metadef_api/resource_type_association.py new file mode 100755 index 00000000..bc29389a --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/metadef_api/resource_type_association.py @@ -0,0 +1,217 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo_db import exception as db_exc +from oslo_log import log as logging +import sqlalchemy.orm as sa_orm + +from daisy.common import exception as exc +from daisy.db.sqlalchemy.metadef_api\ + import namespace as namespace_api +from daisy.db.sqlalchemy.metadef_api\ + import resource_type as resource_type_api +from daisy.db.sqlalchemy.metadef_api\ + import utils as metadef_utils +from daisy.db.sqlalchemy import models_metadef as models + +LOG = logging.getLogger(__name__) + + +def _to_db_dict(namespace_id, resource_type_id, model_dict): + """transform a model dict to a metadef_namespace_resource_type dict""" + db_dict = {'namespace_id': namespace_id, + 'resource_type_id': resource_type_id, + 'properties_target': model_dict['properties_target'], + 'prefix': model_dict['prefix']} + return db_dict + + +def _to_model_dict(resource_type_name, ns_res_type_dict): + """transform a metadef_namespace_resource_type dict to a model dict""" + model_dict = {'name': resource_type_name, + 'properties_target': ns_res_type_dict['properties_target'], + 'prefix': ns_res_type_dict['prefix'], + 'created_at': ns_res_type_dict['created_at'], + 'updated_at': ns_res_type_dict['updated_at']} + return model_dict + + +def _set_model_dict(resource_type_name, properties_target, prefix, + created_at, updated_at): + """return a model dict set with the passed in key values""" + model_dict = {'name': resource_type_name, + 'properties_target': properties_target, + 'prefix': prefix, + 'created_at': created_at, + 'updated_at': updated_at} + return model_dict + + +def _get(context, namespace_name, resource_type_name, + namespace_id, resource_type_id, session): + """Get a namespace resource_type association""" + + # visibility check assumed done in calling routine via namespace_get + try: + query = session.query(models.MetadefNamespaceResourceType).\ + filter_by(namespace_id=namespace_id, + resource_type_id=resource_type_id) + db_rec = query.one() + except sa_orm.exc.NoResultFound: + msg = ("The metadata definition resource-type association of" + " resource_type=%(resource_type_name)s to" + " namespace_name=%(namespace_name)s was not found." + % {'resource_type_name': resource_type_name, + 'namespace_name': namespace_name}) + LOG.debug(msg) + raise exc.MetadefResourceTypeAssociationNotFound( + resource_type_name=resource_type_name, + namespace_name=namespace_name) + + return db_rec + + +def _create_association( + context, namespace_name, resource_type_name, values, session): + """Create an association, raise if it already exists.""" + + namespace_resource_type_rec = models.MetadefNamespaceResourceType() + metadef_utils.drop_protected_attrs( + models.MetadefNamespaceResourceType, values) + # values['updated_at'] = timeutils.utcnow() # TS mixin should do this + namespace_resource_type_rec.update(values.copy()) + try: + namespace_resource_type_rec.save(session=session) + except db_exc.DBDuplicateEntry: + msg = ("The metadata definition resource-type association of" + " resource_type=%(resource_type_name)s to" + " namespace=%(namespace_name)s, already exists." + % {'resource_type_name': resource_type_name, + 'namespace_name': namespace_name}) + LOG.debug(msg) + raise exc.MetadefDuplicateResourceTypeAssociation( + resource_type_name=resource_type_name, + namespace_name=namespace_name) + + return namespace_resource_type_rec.to_dict() + + +def _delete(context, namespace_name, resource_type_name, + namespace_id, resource_type_id, session): + """Delete a resource type association or raise if not found.""" + + db_rec = _get(context, namespace_name, resource_type_name, + namespace_id, resource_type_id, session) + session.delete(db_rec) + session.flush() + + return db_rec.to_dict() + + +def get(context, namespace_name, resource_type_name, session): + """Get a resource_type associations; raise if not found""" + namespace = namespace_api.get( + context, namespace_name, session) + + resource_type = resource_type_api.get( + context, resource_type_name, session) + + found = _get(context, namespace_name, resource_type_name, + namespace['id'], resource_type['id'], session) + + return _to_model_dict(resource_type_name, found) + + +def get_all_by_namespace(context, namespace_name, session): + """List resource_type associations by namespace, raise if not found""" + + # namespace get raises an exception if not visible + namespace = namespace_api.get( + context, namespace_name, session) + + db_recs = ( + session.query(models.MetadefResourceType) + .join(models.MetadefResourceType.associations) + .filter_by(namespace_id=namespace['id']) + .values(models.MetadefResourceType.name, + models.MetadefNamespaceResourceType.properties_target, + models.MetadefNamespaceResourceType.prefix, + models.MetadefNamespaceResourceType.created_at, + models.MetadefNamespaceResourceType.updated_at)) + + model_dict_list = [] + for name, properties_target, prefix, created_at, updated_at in db_recs: + model_dict_list.append( + _set_model_dict + (name, properties_target, prefix, created_at, updated_at) + ) + + return model_dict_list + + +def create(context, namespace_name, values, session): + """Create an association, raise if already exists or ns not found.""" + + namespace = namespace_api.get( + context, namespace_name, session) + + # if the resource_type does not exist, create it + resource_type_name = values['name'] + metadef_utils.drop_protected_attrs( + models.MetadefNamespaceResourceType, values) + try: + resource_type = resource_type_api.get( + context, resource_type_name, session) + except exc.NotFound: + resource_type = None + LOG.debug("Creating resource-type %s" % resource_type_name) + + if resource_type is None: + resource_type_dict = {'name': resource_type_name, 'protected': 0} + resource_type = resource_type_api.create( + context, resource_type_dict, session) + + # Create the association record, set the field values + ns_resource_type_dict = _to_db_dict( + namespace['id'], resource_type['id'], values) + new_rec = _create_association(context, namespace_name, resource_type_name, + ns_resource_type_dict, session) + + return _to_model_dict(resource_type_name, new_rec) + + +def delete(context, namespace_name, resource_type_name, session): + """Delete an association or raise if not found""" + + namespace = namespace_api.get( + context, namespace_name, session) + + resource_type = resource_type_api.get( + context, resource_type_name, session) + + deleted = _delete(context, namespace_name, resource_type_name, + namespace['id'], resource_type['id'], session) + + return _to_model_dict(resource_type_name, deleted) + + +def delete_namespace_content(context, namespace_id, session): + """Use this def only if the ns for the id has been verified as visible""" + + count = 0 + query = session.query(models.MetadefNamespaceResourceType)\ + .filter_by(namespace_id=namespace_id) + count = query.delete(synchronize_session='fetch') + return count diff --git a/code/daisy/daisy/db/sqlalchemy/metadef_api/tag.py b/code/daisy/daisy/db/sqlalchemy/metadef_api/tag.py new file mode 100755 index 00000000..8520f709 --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/metadef_api/tag.py @@ -0,0 +1,204 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo_db import exception as db_exc +from oslo_db.sqlalchemy.utils import paginate_query +from oslo_log import log as logging +from sqlalchemy import func +import sqlalchemy.orm as sa_orm + +from daisy.common import exception as exc +from daisy.db.sqlalchemy.metadef_api import namespace as namespace_api +import daisy.db.sqlalchemy.metadef_api.utils as metadef_utils +from daisy.db.sqlalchemy import models_metadef as models +from daisy import i18n + +LOG = logging.getLogger(__name__) +_LW = i18n._LW + + +def _get(context, id, session): + try: + query = (session.query(models.MetadefTag).filter_by(id=id)) + metadef_tag = query.one() + except sa_orm.exc.NoResultFound: + msg = (_LW("Metadata tag not found for id %s") % id) + LOG.warn(msg) + raise exc.MetadefTagNotFound(message=msg) + return metadef_tag + + +def _get_by_name(context, namespace_name, name, session): + namespace = namespace_api.get(context, namespace_name, session) + try: + query = (session.query(models.MetadefTag).filter_by( + name=name, namespace_id=namespace['id'])) + metadef_tag = query.one() + except sa_orm.exc.NoResultFound: + msg = ("The metadata tag with name=%(name)s" + " was not found in namespace=%(namespace_name)s." + % {'name': name, 'namespace_name': namespace_name}) + LOG.debug(msg) + raise exc.MetadefTagNotFound(name=name, + namespace_name=namespace_name) + return metadef_tag + + +def get_all(context, namespace_name, session, filters=None, marker=None, + limit=None, sort_key='created_at', sort_dir='desc'): + """Get all tags that match zero or more filters. + + :param filters: dict of filter keys and values. + :param marker: tag id after which to start page + :param limit: maximum number of namespaces to return + :param sort_key: namespace attribute by which results should be sorted + :param sort_dir: direction in which results should be sorted (asc, desc) + """ + + namespace = namespace_api.get(context, namespace_name, session) + query = (session.query(models.MetadefTag).filter_by( + namespace_id=namespace['id'])) + + marker_tag = None + if marker is not None: + marker_tag = _get(context, marker, session) + + sort_keys = ['created_at', 'id'] + sort_keys.insert(0, sort_key) if sort_key not in sort_keys else sort_keys + + query = paginate_query(query=query, + model=models.MetadefTag, + limit=limit, + sort_keys=sort_keys, + marker=marker_tag, sort_dir=sort_dir) + metadef_tag = query.all() + metadef_tag_list = [] + for tag in metadef_tag: + metadef_tag_list.append(tag.to_dict()) + + return metadef_tag_list + + +def create(context, namespace_name, values, session): + namespace = namespace_api.get(context, namespace_name, session) + values.update({'namespace_id': namespace['id']}) + + metadef_tag = models.MetadefTag() + metadef_utils.drop_protected_attrs(models.MetadefTag, values) + metadef_tag.update(values.copy()) + try: + metadef_tag.save(session=session) + except db_exc.DBDuplicateEntry: + msg = ("A metadata tag name=%(name)s" + " in namespace=%(namespace_name)s already exists." + % {'name': metadef_tag.name, + 'namespace_name': namespace_name}) + LOG.debug(msg) + raise exc.MetadefDuplicateTag( + name=metadef_tag.name, namespace_name=namespace_name) + + return metadef_tag.to_dict() + + +def create_tags(context, namespace_name, tag_list, session): + + metadef_tags_list = [] + if tag_list: + namespace = namespace_api.get(context, namespace_name, session) + + try: + with session.begin(): + query = (session.query(models.MetadefTag).filter_by( + namespace_id=namespace['id'])) + query.delete(synchronize_session='fetch') + + for value in tag_list: + value.update({'namespace_id': namespace['id']}) + metadef_utils.drop_protected_attrs( + models.MetadefTag, value) + metadef_tag = models.MetadefTag() + metadef_tag.update(value.copy()) + metadef_tag.save(session=session) + metadef_tags_list.append(metadef_tag.to_dict()) + except db_exc.DBDuplicateEntry: + msg = ("A metadata tag name=%(name)s" + " in namespace=%(namespace_name)s already exists." + % {'name': metadef_tag.name, + 'namespace_name': namespace_name}) + LOG.debug(msg) + raise exc.MetadefDuplicateTag( + name=metadef_tag.name, namespace_name=namespace_name) + + return metadef_tags_list + + +def get(context, namespace_name, name, session): + metadef_tag = _get_by_name(context, namespace_name, name, session) + return metadef_tag.to_dict() + + +def update(context, namespace_name, id, values, session): + """Update an tag, raise if ns not found/visible or duplicate result""" + namespace_api.get(context, namespace_name, session) + + metadata_tag = _get(context, id, session) + metadef_utils.drop_protected_attrs(models.MetadefTag, values) + # values['updated_at'] = timeutils.utcnow() - done by TS mixin + try: + metadata_tag.update(values.copy()) + metadata_tag.save(session=session) + except db_exc.DBDuplicateEntry: + msg = ("Invalid update. It would result in a duplicate" + " metadata tag with same name=%(name)s" + " in namespace=%(namespace_name)s." + % {'name': values['name'], + 'namespace_name': namespace_name}) + LOG.debug(msg) + raise exc.MetadefDuplicateTag( + name=values['name'], namespace_name=namespace_name) + + return metadata_tag.to_dict() + + +def delete(context, namespace_name, name, session): + namespace_api.get(context, namespace_name, session) + md_tag = _get_by_name(context, namespace_name, name, session) + + session.delete(md_tag) + session.flush() + + return md_tag.to_dict() + + +def delete_namespace_content(context, namespace_id, session): + """Use this def only if the ns for the id has been verified as visible""" + count = 0 + query = (session.query(models.MetadefTag).filter_by( + namespace_id=namespace_id)) + count = query.delete(synchronize_session='fetch') + return count + + +def delete_by_namespace_name(context, namespace_name, session): + namespace = namespace_api.get(context, namespace_name, session) + return delete_namespace_content(context, namespace['id'], session) + + +def count(context, namespace_name, session): + """Get the count of objects for a namespace, raise if ns not found""" + namespace = namespace_api.get(context, namespace_name, session) + query = (session.query(func.count(models.MetadefTag.id)).filter_by( + namespace_id=namespace['id'])) + return query.scalar() diff --git a/code/daisy/daisy/db/sqlalchemy/metadef_api/utils.py b/code/daisy/daisy/db/sqlalchemy/metadef_api/utils.py new file mode 100755 index 00000000..ca20150e --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/metadef_api/utils.py @@ -0,0 +1,23 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +def drop_protected_attrs(model_class, values): + """ + Removed protected attributes from values dictionary using the models + __protected_attributes__ field. + """ + for attr in model_class.__protected_attributes__: + if attr in values: + del values[attr] diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/README b/code/daisy/daisy/db/sqlalchemy/migrate_repo/README new file mode 100755 index 00000000..6218f8ca --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/README @@ -0,0 +1,4 @@ +This is a database migration repository. + +More information at +http://code.google.com/p/sqlalchemy-migrate/ diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/__init__.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/manage.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/manage.py new file mode 100755 index 00000000..f55f4cf2 --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/manage.py @@ -0,0 +1,20 @@ +# Copyright (c) 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from migrate.versioning.shell import main + +# This should probably be a console script entry point. +if __name__ == '__main__': + main(debug='False', repository='.') diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/migrate.cfg b/code/daisy/daisy/db/sqlalchemy/migrate_repo/migrate.cfg new file mode 100755 index 00000000..5a44f3be --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/migrate.cfg @@ -0,0 +1,20 @@ +[db_settings] +# Used to identify which repository this database is versioned under. +# You can use the name of your project. +repository_id=Daisy Migrations + +# The name of the database table used to track the schema version. +# This name shouldn't already be used by your project. +# If this is changed once a database is under version control, you'll need to +# change the table name in each database too. +version_table=migrate_version + +# When committing a change script, Migrate will attempt to generate the +# sql for all supported databases; normally, if one of them fails - probably +# because you don't have that database installed - it is ignored and the +# commit continues, perhaps ending successfully. +# Databases in this list MUST compile successfully during a commit, or the +# entire commit will fail. List the databases your application will actually +# be using to ensure your updates to that database work properly. +# This must be a list; example: ['postgres','sqlite'] +required_dbs=[] diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/schema.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/schema.py new file mode 100755 index 00000000..69c43218 --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/schema.py @@ -0,0 +1,108 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Various conveniences used for migration scripts +""" + +from oslo_log import log as logging +import sqlalchemy.types + +from daisy import i18n + + +LOG = logging.getLogger(__name__) +_LI = i18n._LI + + +String = lambda length: sqlalchemy.types.String( + length=length, convert_unicode=False, + unicode_error=None, _warn_on_bytestring=False) + + +Text = lambda: sqlalchemy.types.Text( + length=None, convert_unicode=False, + unicode_error=None, _warn_on_bytestring=False) + + +Boolean = lambda: sqlalchemy.types.Boolean(create_constraint=True, name=None) + + +DateTime = lambda: sqlalchemy.types.DateTime(timezone=False) + + +Integer = lambda: sqlalchemy.types.Integer() + + +BigInteger = lambda: sqlalchemy.types.BigInteger() + + +PickleType = lambda: sqlalchemy.types.PickleType() + + +Numeric = lambda: sqlalchemy.types.Numeric() + + +def from_migration_import(module_name, fromlist): + """ + Import a migration file and return the module + + :param module_name: name of migration module to import from + (ex: 001_add_images_table) + :param fromlist: list of items to import (ex: define_images_table) + :retval: module object + + This bit of ugliness warrants an explanation: + + As you're writing migrations, you'll frequently want to refer to + tables defined in previous migrations. + + In the interest of not repeating yourself, you need a way of importing + that table into a 'future' migration. + + However, tables are bound to metadata, so what you need to import is + really a table factory, which you can late-bind to your current + metadata object. + + Moreover, migrations begin with a number (001...), which means they + aren't valid Python identifiers. This means we can't perform a + 'normal' import on them (the Python lexer will 'splode). Instead, we + need to use __import__ magic to bring the table-factory into our + namespace. + + Example Usage: + + (define_images_table,) = from_migration_import( + '001_add_images_table', ['define_images_table']) + + images = define_images_table(meta) + + # Refer to images table + """ + module_path = 'daisy.db.sqlalchemy.migrate_repo.versions.%s' % module_name + module = __import__(module_path, globals(), locals(), fromlist, -1) + return [getattr(module, item) for item in fromlist] + + +def create_tables(tables): + for table in tables: + LOG.info(_LI("creating table %(table)s") % {'table': table}) + table.create() + + +def drop_tables(tables): + for table in tables: + LOG.info(_LI("dropping table %(table)s") % {'table': table}) + table.drop() diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/001_add_daisy_tables.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/001_add_daisy_tables.py new file mode 100755 index 00000000..2d92e19d --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/001_add_daisy_tables.py @@ -0,0 +1,796 @@ +# Copyright (c) 2015 ZTE, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy.schema import (Column, ForeignKey, Index, MetaData, Table) + + +from daisy.db.sqlalchemy.migrate_repo.schema import ( + BigInteger, Boolean, DateTime, Integer, Numeric, String, Text, + create_tables) # noqa + + +def define_hosts_table(meta): + hosts = Table('hosts', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('dmi_uuid', String(36)), + Column('name', String(255), nullable=False), + Column('description', Text()), + Column('resource_type', String(36)), + Column('ipmi_user', String(36)), + Column('ipmi_passwd', String(36)), + Column('ipmi_addr', String(256)), + Column('status', String(36), default='init', nullable=False), + Column('root_disk', String(256)), + Column('root_lv_size', Integer()), + Column('swap_lv_size', Integer()), + Column('root_pwd', String(36)), + Column('isolcpus', String(256)), + Column('os_version_id', String(36)), + Column('os_version_file', String(255)), + Column('os_progress', Integer()), + Column('os_status', String(36)), + Column('messages', Text()), + Column('hugepagesize', String(36)), + Column('hugepages', Integer()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return hosts + +def define_discover_hosts_table(meta): + discover_hosts = Table('discover_hosts', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('ip', String(255), nullable=False), + Column('user', String(36)), + Column('passwd', String(36), nullable=False), + Column('status', String(255), default='init', nullable=True), + Column('created_at', DateTime(), nullable=True), + Column('updated_at', DateTime(), nullable=True), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return discover_hosts + +def define_clusters_table(meta): + clusters = Table('clusters', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('name', String(255), default='TECS', nullable=False), + Column('owner', String(255)), + Column('description', Text()), + Column('net_l23_provider', String(64)), + Column('base_mac', String(128)), + Column('gre_id_start', Integer()), + Column('gre_id_end', Integer()), + Column('vlan_start', Integer()), + Column('vlan_end', Integer()), + Column('vni_start', BigInteger()), + Column('vni_end', BigInteger()), + Column('public_vip', String(128)), + Column('segmentation_type', String(64)), + Column('auto_scale', Integer(), nullable=False, default=0), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return clusters + +def define_cluster_hosts_table(meta): + cluster_hosts = Table('cluster_hosts', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('cluster_id', String(36), + ForeignKey('clusters.id'), + nullable=False), + Column('host_id', String(36), + nullable=False), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return cluster_hosts + +def define_networks_table(meta): + networks = Table('networks', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('name', String(255), nullable=False), + Column('description', Text()), + Column('cluster_id', String(36)), + Column('cidr', String(255)), + Column('vlan_id', String(36)), + Column('vlan_start', Integer(),nullable=False, default=1), + Column('vlan_end', Integer(),nullable=False, default=4094), + Column('ip', String(256)), + Column('gateway', String(128)), + Column('type', String(36), nullable=False, default='default'), + Column('ml2_type', String(36)), + Column('network_type', String(36), nullable=False), + Column('physnet_name', String(108)), + Column('capability', String(36)), + Column('mtu', Integer(), nullable=False, default=1500), + Column('alias', String(255)), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return networks + +def define_ip_ranges_table(meta): + ip_ranges = Table('ip_ranges', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('start', String(128)), + Column('end', String(128)), + Column('network_id', String(36)), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return ip_ranges + +def define_host_interfaces_table(meta): + host_interfaces = Table('host_interfaces', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('host_id', String(36), + ForeignKey('hosts.id'), + nullable=False), + Column('name', String(64)), + Column('ip', String(256)), + Column('netmask', String(256)), + Column('gateway', String(256)), + Column('mac', String(256)), + Column('pci', String(32)), + Column('type', String(32),nullable=False, default='ether'), + Column('slave1', String(32)), + Column('slave2', String(32)), + Column('mode', String(36)), + Column('is_deployment', Boolean(),default=False), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return host_interfaces + +def define_host_roles_table(meta): + host_roles = Table('host_roles', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('host_id', + String(36), + ForeignKey('hosts.id'), + nullable=False), + Column('role_id', + String(36), + ForeignKey('roles.id'), + nullable=False), + Column('status', String(32), nullable=False, default='init'), + Column('progress', Integer(), default=0), + Column('messages', Text()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return host_roles + + +def define_roles_table(meta): + roles = Table('roles', + meta, + Column('id', + String(36), primary_key=True, + nullable=False, index=True), + Column('name', + String(255), + nullable=False), + Column('status', String(32), nullable=False, default='init'), + Column('progress', Integer(), default=0), + Column('config_set_id', + String(36), + ForeignKey('config_sets.id')), + Column('description', Text()), + Column('cluster_id', String(36)), + Column('type', String(36), nullable=False, default='custom'), + Column('vip', String(256)), + Column('messages', Text()), + Column('db_lv_size', Integer()), + Column('glance_lv_size', Integer()), + Column('nova_lv_size', Integer(), default=0), + Column('disk_location', String(255), nullable=False, default='local'), + Column('deployment_backend', String(36)), + Column('config_set_update_progress', Integer(), default=0), + Column('ntp_server', String(255)), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return roles + +def define_service_roles_table(meta): + service_roles = Table('service_roles', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('role_id', String(36), ForeignKey('roles.id'), + nullable=False), + Column('service_id', String(36), ForeignKey('services.id'), nullable=False), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return service_roles + +def define_services_table(meta): + services = Table('services', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('name', String(255), nullable=False), + Column('component_id', String(36), ForeignKey('components.id'), nullable=True), + Column('description', Text()), + Column('backup_type', String(32), nullable=False, default='none'), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return services + +def define_components_table(meta): + components = Table('components', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('name', String(255), nullable=False), + Column('description', Text()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return components + +def define_config_sets_table(meta): + config_sets = Table('config_sets', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('name', String(255), nullable=False), + Column('description', Text()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return config_sets + +def define_configs_table(meta): + configs = Table('configs', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('section', String(255)), + Column('key', String(255), nullable=False), + Column('value', String(255)), + Column('config_file_id', String(36), ForeignKey('config_files.id'), nullable=False), + Column('config_version', Integer(),default=0), + Column('running_version', Integer(),default=0), + Column('description', Text()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return configs + +def define_config_files_table(meta): + config_files = Table('config_files', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('name', String(255), nullable=False), + Column('description', Text()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return config_files + +def define_config_set_items_table(meta): + config_set_items = Table('config_set_items', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('config_set_id', String(36), ForeignKey('config_sets.id'), + nullable=False), + Column('config_id', String(36), ForeignKey('configs.id'), nullable=False), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return config_set_items + +def define_config_historys_table(meta): + config_historys = Table('config_historys', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('config_id', String(36)), + Column('value', String(255)), + Column('version', Integer()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return config_historys + +def define_tasks_table(meta): + tasks = Table('tasks', + meta, + Column('id', String(36), primary_key=True, nullable=False), + Column('type', String(30), nullable=False), + Column('status', String(30), nullable=False), + Column('owner', String(255), nullable=False), + Column('expires_at', DateTime()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return tasks + +def define_task_infos_table(meta): + task_infos = Table('task_infos', + meta, + Column('task_id', String(36)), + Column('input', Text()), + Column('result', Text()), + Column('message', Text()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return task_infos + +def define_repositorys_table(meta): + repositorys = Table('repositorys', + meta, + Column('id', String(36), primary_key=True, nullable=False), + Column('url', String(255)), + Column('description', Text()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return repositorys + + +def define_users_table(meta): + users = Table('users', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('name', String(256), nullable=False), + Column('password', String(256)), + Column('email', String(256)), + Column('phone', String(128)), + Column('address', String(256)), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return users + +def define_versions_table(meta): + versions = Table('versions', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('name', String(256), nullable=False), + Column('size', BigInteger()), + Column('status', String(30)), + Column('checksum', String(128)), + Column('owner', String(256)), + Column('version', String(32)), + Column('type', String(30), default='0'), + Column('description', Text()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return versions + +def define_assigned_networks_table(meta): + assigned_networks = Table('assigned_networks', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('mac', String(128)), + Column('network_id', String(36)), + Column('interface_id', String(36)), + Column('ip', String(256)), + Column('vswitch_type', String(256)), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return assigned_networks + +def define_logic_networks_table(meta): + logic_networks = Table('logic_networks', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('name', String(255), nullable=False), + Column('type', String(36)), + Column('physnet_name', String(255)), + Column('cluster_id', String(36), ForeignKey('clusters.id'), nullable=False), + Column('segmentation_id', BigInteger()), + Column('segmentation_type', String(64), nullable=False), + Column('shared', Boolean(), default=False), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + return logic_networks + +def define_subnets_table(meta): + subnets = Table('subnets', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('cidr', String(128)), + Column('gateway', String(128)), + Column('logic_network_id', String(36), ForeignKey('logic_networks.id'), nullable=False), + Column('name',String(255), nullable=False), + Column('router_id', String(36), ForeignKey('routers.id')), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + return subnets + +def define_float_ip_ranges_table(meta): + float_ip_ranges = Table('float_ip_ranges', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('start', String(128)), + Column('end', String(128)), + Column('subnet_id', String(36), ForeignKey('subnets.id'), nullable=False), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return float_ip_ranges + +def define_dns_nameservers_table(meta): + dns_nameservers = Table('dns_nameservers', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('dns', String(128)), + Column('subnet_id', String(36), ForeignKey('subnets.id'), nullable=False), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return dns_nameservers + +def define_routers_table(meta): + routers = Table('routers', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('name', String(255)), + Column('description', Text()), + Column('cluster_id', String(36), ForeignKey('clusters.id'), nullable=False), + Column('external_logic_network', String(255)), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return routers + +def define_service_disks_table(meta): + disks = Table('service_disks', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('service', String(255)), + Column('role_id', + String(36), + ForeignKey('roles.id'), + nullable=False), + Column('disk_location', String(255), nullable=False, default='local'), + Column('lun', Integer()), + Column('data_ips', String(255)), + Column('size', Integer()), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return disks + +def define_cinder_volumes_table(meta): + disks = Table('cinder_volumes', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('user_name', String(255)), + Column('user_pwd', String(255)), + Column('management_ips', String(255)), + Column('pools', String(255)), + Column('volume_driver', String(255)), + Column('volume_type', String(255)), + Column('backend_index', String(255)), + Column('role_id', + String(36), + ForeignKey('roles.id'), + nullable=False), + Column('created_at', DateTime(), nullable=False), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return disks + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + tables = [define_hosts_table(meta), + define_discover_hosts_table(meta), + define_clusters_table(meta), + define_cluster_hosts_table(meta), + define_networks_table(meta), + define_ip_ranges_table(meta), + define_host_interfaces_table(meta), + define_config_sets_table(meta), + define_components_table(meta), + define_services_table(meta), + define_roles_table(meta), + define_host_roles_table(meta), + define_service_roles_table(meta), + define_config_files_table(meta), + define_configs_table(meta), + define_config_set_items_table(meta), + define_config_historys_table(meta), + define_tasks_table(meta), + define_task_infos_table(meta), + define_repositorys_table(meta), + define_users_table(meta), + define_versions_table(meta), + define_assigned_networks_table(meta), + define_logic_networks_table(meta), + define_routers_table(meta), + define_subnets_table(meta), + define_float_ip_ranges_table(meta), + define_dns_nameservers_table(meta), + define_service_disks_table(meta), + define_cinder_volumes_table(meta)] + create_tables(tables) diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/002_add_role_type_roles.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/002_add_role_type_roles.py new file mode 100755 index 00000000..0004b0ae --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/002_add_role_type_roles.py @@ -0,0 +1,27 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String + + +meta = MetaData() +role_type = Column('role_type', String(255)) + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + roles = Table('roles', meta, autoload=True) + roles.create_column(role_type) + diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/003_add_template_tables.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/003_add_template_tables.py new file mode 100755 index 00000000..8c25d575 --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/003_add_template_tables.py @@ -0,0 +1,69 @@ +# Copyright (c) 2015 ZTE, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy.schema import (Column, Index, MetaData, Table) +from daisy.db.sqlalchemy.migrate_repo.schema import ( + Boolean, DateTime, String, Text, + create_tables) + + +def define_template_table(meta): + templates = Table('template', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('name', String(36), nullable=False), + Column('description', Text()), + Column('type', String(36), nullable=True), + Column('hosts', Text(), nullable=True), + Column('content', Text(), nullable=True), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('created_at', DateTime(), nullable=False), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return templates + +def define_host_template_table(meta): + host_templates = Table('host_templates', + meta, + Column('id', String(36), primary_key=True, + nullable=False), + Column('cluster_name', String(36), nullable=False), + Column('hosts', Text(), nullable=True), + Column('updated_at', DateTime(), nullable=False), + Column('deleted_at', DateTime()), + Column('created_at', DateTime(), nullable=False), + Column('deleted', + Boolean(), + nullable=False, + default=False, + index=True), + mysql_engine='InnoDB', + extend_existing=True) + + return host_templates + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + tables = [define_template_table(meta), + define_host_template_table(meta)] + create_tables(tables) diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/004_add_message_host_id_discover_hosts.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/004_add_message_host_id_discover_hosts.py new file mode 100755 index 00000000..469e1ee2 --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/004_add_message_host_id_discover_hosts.py @@ -0,0 +1,27 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String, Text + +meta = MetaData() +message = Column('message', Text(), nullable=True) +host_id = Column('host_id', String(36), nullable=True) + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + discover_hosts = Table('discover_hosts', meta, autoload=True) + discover_hosts.create_column(message) + discover_hosts.create_column(host_id) diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/005_add_data_ips_cindervolumes.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/005_add_data_ips_cindervolumes.py new file mode 100755 index 00000000..803a27dc --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/005_add_data_ips_cindervolumes.py @@ -0,0 +1,27 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String + + +meta = MetaData() +data_ips = Column('data_ips', String(255)) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + cinder_volumes = Table('cinder_volumes', meta, autoload=True) + cinder_volumes.create_column(data_ips) diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/006_add_role_public_vip.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/006_add_role_public_vip.py new file mode 100755 index 00000000..370eefbe --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/006_add_role_public_vip.py @@ -0,0 +1,27 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String + + +meta = MetaData() +public_vip = Column('public_vip', String(255)) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + roles = Table('roles', meta, autoload=True) + roles.create_column(public_vip) diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/007_add_role_db_glance_mogondb_vip.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/007_add_role_db_glance_mogondb_vip.py new file mode 100755 index 00000000..56ca2493 --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/007_add_role_db_glance_mogondb_vip.py @@ -0,0 +1,37 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String + + +meta = MetaData() +db_vip = Column('db_vip', String(255)) +glance_vip = Column('glance_vip', String(255)) +mongodb_vip = Column('mongodb_vip', String(255)) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + + roles = Table('roles', meta, autoload=True) + roles.create_column(db_vip) + roles.create_column(glance_vip) + roles.create_column(mongodb_vip) + + + + + diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/008_add_cluster_use_dns.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/008_add_cluster_use_dns.py new file mode 100755 index 00000000..aacfc9ec --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/008_add_cluster_use_dns.py @@ -0,0 +1,27 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String, Integer + + +meta = MetaData() +use_dns = Column('use_dns', Integer(), default=0) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + roles = Table('clusters', meta, autoload=True) + roles.create_column(use_dns) diff --git a/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/__init__.py b/code/daisy/daisy/db/sqlalchemy/migrate_repo/versions/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/db/sqlalchemy/models.py b/code/daisy/daisy/db/sqlalchemy/models.py new file mode 100755 index 00000000..8c859fd5 --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/models.py @@ -0,0 +1,530 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +SQLAlchemy models for daisy data +""" + +import uuid + +from oslo.serialization import jsonutils +from oslo_db.sqlalchemy import models +from oslo_utils import timeutils +from sqlalchemy import BigInteger +from sqlalchemy import Boolean +from sqlalchemy import Column +from sqlalchemy import DateTime +from sqlalchemy.ext.compiler import compiles +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy import ForeignKey +from sqlalchemy import Index +from sqlalchemy import Integer +from sqlalchemy.orm import backref, relationship +from sqlalchemy import sql +from sqlalchemy import String +from sqlalchemy import Text +from sqlalchemy.types import TypeDecorator +from sqlalchemy import UniqueConstraint + + +BASE = declarative_base() + + +@compiles(BigInteger, 'sqlite') +def compile_big_int_sqlite(type_, compiler, **kw): + return 'INTEGER' + + +class JSONEncodedDict(TypeDecorator): + """Represents an immutable structure as a json-encoded string""" + + impl = Text + + def process_bind_param(self, value, dialect): + if value is not None: + value = jsonutils.dumps(value) + return value + + def process_result_value(self, value, dialect): + if value is not None: + value = jsonutils.loads(value) + return value + +class DaisyBase(models.ModelBase, models.TimestampMixin): + """Base class for Daisy Models.""" + + __table_args__ = {'mysql_engine': 'InnoDB'} + __table_initialized__ = False + __protected_attributes__ = set([ + "created_at", "updated_at", "deleted_at", "deleted"]) + + def save(self, session=None): + from daisy.db.sqlalchemy import api as db_api + super(DaisyBase, self).save(session or db_api.get_session()) + + id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) + + created_at = Column(DateTime, default=lambda: timeutils.utcnow(), + nullable=False) + # TODO(vsergeyev): Column `updated_at` have no default value in + # openstack common code. We should decide, is this value + # required and make changes in oslo (if required) or + # in daisy (if not). + updated_at = Column(DateTime, default=lambda: timeutils.utcnow(), + nullable=True, onupdate=lambda: timeutils.utcnow()) + # TODO(boris-42): Use SoftDeleteMixin instead of deleted Column after + # migration that provides UniqueConstraints and change + # type of this column. + deleted_at = Column(DateTime) + deleted = Column(Boolean, nullable=False, default=False) + + def delete(self, session=None): + """Delete this object.""" + self.deleted = True + self.deleted_at = timeutils.utcnow() + self.save(session=session) + + def keys(self): + return self.__dict__.keys() + + def values(self): + return self.__dict__.values() + + def items(self): + return self.__dict__.items() + + def to_dict(self): + d = self.__dict__.copy() + # NOTE(flaper87): Remove + # private state instance + # It is not serializable + # and causes CircularReference + d.pop("_sa_instance_state") + return d + +class Host(BASE, DaisyBase): + """Represents an host in the datastore.""" + __tablename__ = 'hosts' + __table_args__ = (Index('ix_hosts_deleted', 'deleted'),) + + name = Column(String(255), nullable=False) + dmi_uuid = Column(String(36)) + description = Column(Text) + resource_type = Column(String(36)) + ipmi_user=Column(String(36)) + ipmi_passwd=Column(String(36)) + ipmi_addr=Column(String(255)) + status = Column(String(36), default='init', nullable=False) + root_disk = Column(String(36)) + root_lv_size = Column(Integer(),default=51200) + swap_lv_size = Column(Integer(),default=4096) + root_pwd = Column(String(36)) + isolcpus = Column(String(255)) + os_version_id = Column(String(36)) + os_version_file = Column(String(255)) + os_progress = Column(Integer(),default=0) + os_status = Column(String(36)) + messages = Column(Text) + hugepagesize = Column(String(36)) + hugepages = Column(Integer(),default=0) + +class DiscoverHost(BASE, DaisyBase): + """Represents an host in the datastore.""" + __tablename__ = 'discover_hosts' + __table_args__ = (Index('ix_discover_hosts_deleted', 'deleted'),) + + ip = Column(String(255)) + user=Column(String(36)) + passwd=Column(String(36)) + status = Column(String(64), default='init') + message = Column(Text) + host_id = Column(String(36)) + +class Cluster(BASE, DaisyBase): + """Represents an clusters in the datastore.""" + __tablename__ = 'clusters' + __table_args__ = (Index('ix_clusters_deleted', 'deleted'),) + + name = Column(String(255), nullable=False) + owner = Column(String(255)) + description = Column(Text) + net_l23_provider = Column(String(64)) + base_mac = Column(String(128)) + gre_id_start = Column(Integer()) + gre_id_end = Column(Integer()) + vlan_start = Column(Integer()) + vlan_end = Column(Integer()) + vni_start = Column(BigInteger()) + vni_end = Column(BigInteger()) + public_vip = Column(String(128)) + segmentation_type = Column(String(64)) + auto_scale = Column(Integer(), nullable=False, default=0) + use_dns = Column(Integer(), nullable=False, default=0) + +class ClusterHost(BASE, DaisyBase): + """Represents an cluster host in the datastore.""" + __tablename__ = 'cluster_hosts' + __table_args__ = (Index('ix_cluster_hosts_deleted', 'deleted'),) + + cluster_id = Column(String(36), + ForeignKey('clusters.id'), + nullable=False) + host_id = Column(String(36), + nullable=False) + +class Template(BASE, DaisyBase): + """Represents an cluster host in the datastore.""" + __tablename__ = 'template' + __table_args__ = (Index('ix_template_deleted', 'deleted'),) + + name = Column(String(36), nullable=False) + description = Column(Text) + type = Column(String(36), nullable=True) + hosts = Column(Text(), nullable=True) + content = Column(Text(), nullable=True) + +class HostTemplate(BASE, DaisyBase): + """Represents an host template in the datastore.""" + __tablename__ = 'host_templates' + __table_args__ = (Index('ix_host_template_deleted', 'deleted'),) + + # name = Column(String(36), nullable=False) + # description = Column(String(36)) + cluster_name = Column(String(36), nullable=True) + hosts = Column(Text(), nullable=True) + +class HostInterface(BASE, DaisyBase): + """Represents an host_interfaces in the datastore.""" + __tablename__ = 'host_interfaces' + __table_args__ = (Index('ix_host_interfaces_deleted', 'deleted'),) + + host_id = Column(String(36), + ForeignKey('hosts.id'), + nullable=False) + name = Column(String(64)) + ip = Column(String(256)) + netmask = Column(String(256)) + gateway = Column(String(256)) + mac = Column(String(256)) + pci = Column(String(32)) + type = Column(String(32),nullable=False, default='ether') + slave1 = Column(String(32)) + slave2 = Column(String(32)) + mode = Column(String(36)) + is_deployment=Column(Boolean(),default=False) + +class Network(BASE, DaisyBase): + """Represents an networks in the datastore.""" + __tablename__ = 'networks' + __table_args__ = (Index('ix_networks_deleted', 'deleted'),) + + name = Column(String(255), nullable=False) + description = Column(Text) + cluster_id = Column(String(36)) + cidr = Column(String(255)) + vlan_id = Column(String(36)) + vlan_start = Column(Integer(), nullable=False, default=1) + vlan_end = Column(Integer(), nullable=False, default=4094) + gateway = Column(String(128)) + ip = Column(String(256)) + type = Column(String(36), nullable=False, default='default') + ml2_type = Column(String(36)) + network_type = Column(String(36), nullable=False) + physnet_name = Column(String(108)) + capability = Column(String(36)) + mtu = Column(Integer(), nullable=False, default=1500) + alias = Column(String(255)) +class IpRange(BASE, DaisyBase): + """Represents an ip_ranges in the datastore.""" + __tablename__ = 'ip_ranges' + __table_args__ = (Index('ix_ip_ranges_deleted', 'deleted'),) + + start = Column(String(128)) + end = Column(String(128)) + network_id = Column(String(36)) + +class HostRole(BASE, DaisyBase): + """Represents an host_roles in the datastore.""" + __tablename__ = 'host_roles' + __table_args__ = (Index('ix_host_roles_deleted', 'deleted'),) + + host_id = Column(String(36), + ForeignKey('hosts.id'), + nullable=False) + role_id = Column(String(36), + ForeignKey('roles.id'), + nullable=False) + status = Column(String(32), nullable=False, default='init') + progress = Column(Integer(), default=0) + messages = Column(Text) + +class Role(BASE, DaisyBase): + """Represents an roles in the datastore.""" + __tablename__ = 'roles' + __table_args__ = (Index('ix_roles_deleted', 'deleted'),Index('ix_roles_id', 'id'),) + + name = Column(String(255), + nullable=False) + description = Column(Text) + status = Column(String(32), nullable=False, default='init') + progress = Column(Integer(), default=0) + config_set_id = Column(String(36), + ForeignKey('config_sets.id')) + cluster_id = Column(String(36)) + type = Column(String(36), nullable=False, default='custom') + vip = Column(String(256)) + deployment_backend = Column(String(36)) + messages = Column(Text) + config_set_update_progress = Column(Integer(), default=0) + db_lv_size = Column(Integer(),default=0) + glance_lv_size = Column(Integer(),default=0) + nova_lv_size = Column(Integer(), default=0) + disk_location = Column(String(255), nullable=False, default='local') + ntp_server = Column(String(255)) + role_type = Column(String(255)) + db_vip = Column(String(255)) + glance_vip = Column(String(255)) + public_vip = Column(String(255)) + mongodb_vip = Column(String(255)) + +class ServiceRole(BASE, DaisyBase): + """Represents an service_roles in the datastore.""" + __tablename__ = 'service_roles' + __table_args__ = (Index('ix_service_roles_deleted', 'deleted'),) + + role_id = Column(String(36), ForeignKey('roles.id'), nullable=False) + service_id = Column(String(36), ForeignKey('services.id'), nullable=False) + +class Service(BASE, DaisyBase): + """Represents an services in the datastore.""" + __tablename__ = 'services' + __table_args__ = (Index('ix_services_deleted', 'deleted'),) + + name = Column(String(255), nullable=False) + description = Column(Text) + component_id = Column(String(36), ForeignKey('components.id'), nullable=True) + backup_type = Column(String(32), nullable=False, default='none') + +class Component(BASE, DaisyBase): + """Represents an components in the datastore.""" + __tablename__ = 'components' + __table_args__ = (Index('ix_components_deleted', 'deleted'),) + + name = Column(String(255), nullable=False) + description = Column(Text) + +class ConfigSet(BASE, DaisyBase): + """Represents an config_sets in the datastore.""" + __tablename__ = 'config_sets' + __table_args__ = (Index('ix_config_sets_deleted', 'deleted'),) + + name = Column(String(255), nullable=False) + description = Column(Text) + +class Config(BASE, DaisyBase): + """Represents an configs in the datastore.""" + __tablename__ = 'configs' + __table_args__ = (Index('ix_configs_deleted', 'deleted'),) + + section = Column(String(255)) + key = Column(String(255), nullable=False) + value = Column(String(255)) + config_file_id = Column(String(36), ForeignKey('config_files.id'), nullable=False) + config_version = Column(Integer(),default='0') + running_version = Column(Integer(),default='0') + description = Column(Text) + +class ConfigFile(BASE, DaisyBase): + """Represents an config_files in the datastore.""" + __tablename__ = 'config_files' + __table_args__ = (Index('ix_config_files_deleted', 'deleted'),) + + name = Column(String(255), nullable=False) + description = Column(Text) + +class ConfigSetItem(BASE, DaisyBase): + """Represents an config_set_items in the datastore.""" + __tablename__ = 'config_set_items' + __table_args__ = (Index('ix_config_set_items_deleted', 'deleted'),) + + config_set_id = Column(String(36), ForeignKey('config_sets.id'), + nullable=False) + config_id = Column(String(36), ForeignKey('configs.id'), nullable=False) + +class ConfigHistory(BASE, DaisyBase): + """Represents an config_historys in the datastore.""" + __tablename__ = 'config_historys' + __table_args__ = (Index('ix_config_historys_deleted', 'deleted'),) + + config_id = Column(String(36)) + value = Column(String(255)) + version = Column(Integer()) + +class Task(BASE, DaisyBase): + """Represents an tasks in the datastore.""" + __tablename__ = 'tasks' + __table_args__ = (Index('ix_tasks_deleted', 'deleted'),) + + type = Column(String(30), nullable=False) + status = Column(String(30), nullable=False) + owner = Column(String(255), nullable=False) + expires_at = Column(DateTime()) + +class TaskInfo(BASE, DaisyBase): + """Represents an task_infos in the datastore.""" + __tablename__ = 'task_infos' + __table_args__ = (Index('ix_task_infos_deleted', 'deleted'),) + + task_id = Column(String(36)) + input = Column(Text()) + result = Column(Text()) + message = Column(Text()) + +class Repository(BASE, DaisyBase): + """Represents an repositorys in the datastore.""" + __tablename__ = 'repositorys' + __table_args__ = (Index('ix_repositorys_deleted', 'deleted'),) + + url = Column(String(255)) + description = Column(Text()) + + +class User(BASE, DaisyBase): + """Represents an users in the datastore.""" + __tablename__ = 'users' + __table_args__ = (Index('ix_users_deleted', 'deleted'),) + + name = Column(String(256), nullable=False) + password = Column(String(256)) + email = Column(String(256)) + phone = Column(String(128)) + address = Column(String(256)) + +class Version(BASE, DaisyBase): + """Represents an versions in the datastore.""" + __tablename__ = 'versions' + __table_args__ = (Index('ix_versions_deleted', 'deleted'),) + + name = Column(String(256), nullable=False) + size = Column(BigInteger()) + status = Column(String(30)) + checksum = Column(String(128)) + owner = Column(String(256)) + version = Column(String(32)) + type = Column(String(30), default='0') + description = Column(Text()) + +class AssignedNetworks(BASE, DaisyBase): + """Represents an assigned_networks in the datastore.""" + __tablename__ = 'assigned_networks' + __table_args__ = (Index('ix_assigned_networks_deleted', 'deleted'),) + + mac = Column(String(128)) + network_id = Column(String(36)) + interface_id = Column(String(36)) + ip = Column(String(256)) + vswitch_type = Column(String(256)) + +class LogicNetwork(BASE, DaisyBase): + """Represents an logic_networks in the datastore.""" + __tablename__ = 'logic_networks' + __table_args__ = (Index('ix_logic_networks_deleted', 'deleted'),) + + name = Column(String(255), nullable=False) + type = Column(String(36)) + physnet_name = Column(String(255)) + cluster_id= Column(String(36), ForeignKey('clusters.id'), nullable=False) + segmentation_id = Column(BigInteger()) + segmentation_type = Column(String(64), nullable=False) + shared = Column(Boolean(), default=False) + +class Subnet(BASE, DaisyBase): + """Represents an subnets in the datastore.""" + __tablename__ = 'subnets' + __table_args__ = (Index('ix_subnets_deleted', 'deleted'),) + + cidr = Column(String(128)) + gateway = Column(String(128)) + logic_network_id = Column(String(36), ForeignKey('logic_networks.id'), nullable=False) + name = Column(String(255), nullable=False) + router_id = Column(String(36), ForeignKey('routers.id')) + +class FloatIpRange(BASE, DaisyBase): + """Represents an float_ip_ranges in the datastore.""" + __tablename__ = 'float_ip_ranges' + __table_args__ = (Index('ix_float_ip_ranges_deleted', 'deleted'),) + + start = Column(String(128)) + end = Column(String(36)) + subnet_id = Column(String(36), ForeignKey('subnets.id'), nullable=False) + +class DnsNameservers(BASE, DaisyBase): + """Represents an dns_nameservers in the datastore.""" + __tablename__ = 'dns_nameservers' + __table_args__ = (Index('ix_dns_nameservers_deleted', 'deleted'),) + + dns = Column(String(128)) + subnet_id = Column(String(36), ForeignKey('subnets.id'), nullable=False) + +class Router(BASE, DaisyBase): + """Represents an routers in the datastore.""" + __tablename__ = 'routers' + __table_args__ = (Index('ix_routers_deleted', 'deleted'),) + + name = Column(String(255)) + description = Column(Text()) + cluster_id = Column(String(36), ForeignKey('clusters.id'), nullable=False) + external_logic_network = Column(String(255)) + +class ServiceDisk(BASE, DaisyBase): + """Represents an service disks in the datastore.""" + __tablename__ = 'service_disks' + __table_args__ = (Index('ix_service_disks_deleted', 'deleted'),) + + service = Column(String(255)) + role_id = Column(String(36), ForeignKey('roles.id'), nullable=False) + disk_location = Column(String(255), nullable=False, default='local') + lun = Column(Integer()) + data_ips = Column(String(255)) + size = Column(Integer()) + + +class CinderVolume(BASE, DaisyBase): + """Represents an cinder volumes in the datastore.""" + __tablename__ = 'cinder_volumes' + __table_args__ = (Index('ix_service_disks_deleted', 'deleted'),) + + user_name = Column(String(255)) + user_pwd = Column(String(255)) + management_ips = Column(String(255)) + data_ips = Column(String(255)) + pools = Column(String(255)) + volume_driver = Column(String(255)) + volume_type = Column(String(255)) + backend_index = Column(String(255)) + role_id = Column(String(36), ForeignKey('roles.id'), nullable=False) + +def register_models(engine): + """Create database tables for all models with the given engine.""" + models = (Host,Project) + for model in models: + model.metadata.create_all(engine) + +def unregister_models(engine): + """Drop database tables for all models with the given engine.""" + models = (Host,project) + for model in models: + model.metadata.drop_all(engine) diff --git a/code/daisy/daisy/db/sqlalchemy/models_artifacts.py b/code/daisy/daisy/db/sqlalchemy/models_artifacts.py new file mode 100755 index 00000000..f3a2c057 --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/models_artifacts.py @@ -0,0 +1,336 @@ +# Copyright (c) 2015 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from oslo_db.sqlalchemy import models +from oslo_utils import timeutils +from sqlalchemy import BigInteger +from sqlalchemy import Boolean +from sqlalchemy import Column +from sqlalchemy import DateTime +from sqlalchemy.ext import declarative +from sqlalchemy import ForeignKey +from sqlalchemy import Index +from sqlalchemy import Integer +from sqlalchemy import Numeric +from sqlalchemy.orm import backref +from sqlalchemy.orm import composite +from sqlalchemy.orm import relationship +from sqlalchemy import String +from sqlalchemy import Text + +import daisy.artifacts as ga +from daisy.common import semver_db +from daisy import i18n +from oslo_log import log as os_logging + +BASE = declarative.declarative_base() +LOG = os_logging.getLogger(__name__) +_LW = i18n._LW + + +class ArtifactBase(models.ModelBase, models.TimestampMixin): + """Base class for Artifact Models.""" + + __table_args__ = {'mysql_engine': 'InnoDB'} + __table_initialized__ = False + __protected_attributes__ = set([ + "created_at", "updated_at"]) + + created_at = Column(DateTime, default=lambda: timeutils.utcnow(), + nullable=False) + + updated_at = Column(DateTime, default=lambda: timeutils.utcnow(), + nullable=False, onupdate=lambda: timeutils.utcnow()) + + def save(self, session=None): + from daisy.db.sqlalchemy import api as db_api + + super(ArtifactBase, self).save(session or db_api.get_session()) + + def keys(self): + return self.__dict__.keys() + + def values(self): + return self.__dict__.values() + + def items(self): + return self.__dict__.items() + + def to_dict(self): + d = {} + for c in self.__table__.columns: + d[c.name] = self[c.name] + return d + + +def _parse_property_type_value(prop, show_text_properties=True): + columns = [ + 'int_value', + 'string_value', + 'bool_value', + 'numeric_value'] + if show_text_properties: + columns.append('text_value') + + for prop_type in columns: + if getattr(prop, prop_type) is not None: + return prop_type.rpartition('_')[0], getattr(prop, prop_type) + + return None, None + + +class Artifact(BASE, ArtifactBase): + __tablename__ = 'artifacts' + __table_args__ = ( + Index('ix_artifact_name_and_version', 'name', 'version_prefix', + 'version_suffix'), + Index('ix_artifact_type', 'type_name', 'type_version_prefix', + 'type_version_suffix'), + Index('ix_artifact_state', 'state'), + Index('ix_artifact_owner', 'owner'), + Index('ix_artifact_visibility', 'visibility'), + {'mysql_engine': 'InnoDB'}) + + __protected_attributes__ = ArtifactBase.__protected_attributes__.union( + set(['published_at', 'deleted_at'])) + + id = Column(String(36), primary_key=True, + default=lambda: str(uuid.uuid4())) + name = Column(String(255), nullable=False) + type_name = Column(String(255), nullable=False) + type_version_prefix = Column(BigInteger, nullable=False) + type_version_suffix = Column(String(255)) + type_version_meta = Column(String(255)) + type_version = composite(semver_db.DBVersion, type_version_prefix, + type_version_suffix, type_version_meta) + version_prefix = Column(BigInteger, nullable=False) + version_suffix = Column(String(255)) + version_meta = Column(String(255)) + version = composite(semver_db.DBVersion, version_prefix, + version_suffix, version_meta) + description = Column(Text) + visibility = Column(String(32), nullable=False) + state = Column(String(32), nullable=False) + owner = Column(String(255), nullable=False) + published_at = Column(DateTime) + deleted_at = Column(DateTime) + + def to_dict(self, show_level=ga.Showlevel.BASIC, + show_text_properties=True): + d = super(Artifact, self).to_dict() + + d.pop('type_version_prefix') + d.pop('type_version_suffix') + d.pop('type_version_meta') + d.pop('version_prefix') + d.pop('version_suffix') + d.pop('version_meta') + d['type_version'] = str(self.type_version) + d['version'] = str(self.version) + + tags = [] + for tag in self.tags: + tags.append(tag.value) + d['tags'] = tags + + if show_level == ga.Showlevel.NONE: + return d + + properties = {} + + # sort properties + self.properties.sort(key=lambda elem: (elem.name, elem.position)) + + for prop in self.properties: + proptype, propvalue = _parse_property_type_value( + prop, show_text_properties) + if proptype is None: + continue + + if prop.position is not None: + # make array + for p in properties.keys(): + if p == prop.name: + # add value to array + properties[p]['value'].append(dict(type=proptype, + value=propvalue)) + break + else: + # create new array + p = dict(type='array', + value=[]) + p['value'].append(dict(type=proptype, + value=propvalue)) + properties[prop.name] = p + else: + # make scalar + properties[prop.name] = dict(type=proptype, + value=propvalue) + d['properties'] = properties + + blobs = {} + # sort blobs + self.blobs.sort(key=lambda elem: elem.position) + + for blob in self.blobs: + locations = [] + # sort locations + blob.locations.sort(key=lambda elem: elem.position) + for loc in blob.locations: + locations.append(dict(value=loc.value, + status=loc.status)) + if blob.name in blobs: + blobs[blob.name].append(dict(size=blob.size, + checksum=blob.checksum, + locations=locations, + item_key=blob.item_key)) + else: + blobs[blob.name] = [] + blobs[blob.name].append(dict(size=blob.size, + checksum=blob.checksum, + locations=locations, + item_key=blob.item_key)) + + d['blobs'] = blobs + + return d + + +class ArtifactDependency(BASE, ArtifactBase): + __tablename__ = 'artifact_dependencies' + __table_args__ = (Index('ix_artifact_dependencies_source_id', + 'artifact_source'), + Index('ix_artifact_dependencies_origin_id', + 'artifact_origin'), + Index('ix_artifact_dependencies_dest_id', + 'artifact_dest'), + Index('ix_artifact_dependencies_direct_dependencies', + 'artifact_source', 'is_direct'), + {'mysql_engine': 'InnoDB'}) + + id = Column(String(36), primary_key=True, nullable=False, + default=lambda: str(uuid.uuid4())) + artifact_source = Column(String(36), ForeignKey('artifacts.id'), + nullable=False) + artifact_dest = Column(String(36), ForeignKey('artifacts.id'), + nullable=False) + artifact_origin = Column(String(36), ForeignKey('artifacts.id'), + nullable=False) + is_direct = Column(Boolean, nullable=False) + position = Column(Integer) + name = Column(String(36)) + + source = relationship('Artifact', + backref=backref('dependencies', cascade="all, " + "delete"), + foreign_keys="ArtifactDependency.artifact_source") + dest = relationship('Artifact', + foreign_keys="ArtifactDependency.artifact_dest") + origin = relationship('Artifact', + foreign_keys="ArtifactDependency.artifact_origin") + + +class ArtifactTag(BASE, ArtifactBase): + __tablename__ = 'artifact_tags' + __table_args__ = (Index('ix_artifact_tags_artifact_id', 'artifact_id'), + Index('ix_artifact_tags_artifact_id_tag_value', + 'artifact_id', 'value'), + {'mysql_engine': 'InnoDB'},) + + id = Column(String(36), primary_key=True, nullable=False, + default=lambda: str(uuid.uuid4())) + artifact_id = Column(String(36), ForeignKey('artifacts.id'), + nullable=False) + artifact = relationship(Artifact, + backref=backref('tags', + cascade="all, delete-orphan")) + value = Column(String(255), nullable=False) + + +class ArtifactProperty(BASE, ArtifactBase): + __tablename__ = 'artifact_properties' + __table_args__ = ( + Index('ix_artifact_properties_artifact_id', 'artifact_id'), + Index('ix_artifact_properties_name', 'name'), + {'mysql_engine': 'InnoDB'},) + id = Column(String(36), primary_key=True, nullable=False, + default=lambda: str(uuid.uuid4())) + artifact_id = Column(String(36), ForeignKey('artifacts.id'), + nullable=False) + artifact = relationship(Artifact, + backref=backref('properties', + cascade="all, delete-orphan")) + name = Column(String(255), nullable=False) + string_value = Column(String(255)) + int_value = Column(Integer) + numeric_value = Column(Numeric) + bool_value = Column(Boolean) + text_value = Column(Text) + position = Column(Integer) + + +class ArtifactBlob(BASE, ArtifactBase): + __tablename__ = 'artifact_blobs' + __table_args__ = ( + Index('ix_artifact_blobs_artifact_id', 'artifact_id'), + Index('ix_artifact_blobs_name', 'name'), + {'mysql_engine': 'InnoDB'},) + id = Column(String(36), primary_key=True, nullable=False, + default=lambda: str(uuid.uuid4())) + artifact_id = Column(String(36), ForeignKey('artifacts.id'), + nullable=False) + name = Column(String(255), nullable=False) + item_key = Column(String(329)) + size = Column(BigInteger(), nullable=False) + checksum = Column(String(32)) + position = Column(Integer) + artifact = relationship(Artifact, + backref=backref('blobs', + cascade="all, delete-orphan")) + + +class ArtifactBlobLocation(BASE, ArtifactBase): + __tablename__ = 'artifact_blob_locations' + __table_args__ = (Index('ix_artifact_blob_locations_blob_id', + 'blob_id'), + {'mysql_engine': 'InnoDB'}) + + id = Column(String(36), primary_key=True, nullable=False, + default=lambda: str(uuid.uuid4())) + blob_id = Column(String(36), ForeignKey('artifact_blobs.id'), + nullable=False) + value = Column(Text, nullable=False) + position = Column(Integer) + status = Column(String(36), default='active', nullable=True) + blob = relationship(ArtifactBlob, + backref=backref('locations', + cascade="all, delete-orphan")) + + +def register_models(engine): + """Create database tables for all models with the given engine.""" + models = (Artifact, ArtifactTag, ArtifactProperty, + ArtifactBlob, ArtifactBlobLocation, ArtifactDependency) + for model in models: + model.metadata.create_all(engine) + + +def unregister_models(engine): + """Drop database tables for all models with the given engine.""" + models = (ArtifactDependency, ArtifactBlobLocation, ArtifactBlob, + ArtifactProperty, ArtifactTag, Artifact) + for model in models: + model.metadata.drop_all(engine) diff --git a/code/daisy/daisy/db/sqlalchemy/models_metadef.py b/code/daisy/daisy/db/sqlalchemy/models_metadef.py new file mode 100755 index 00000000..8a81a39a --- /dev/null +++ b/code/daisy/daisy/db/sqlalchemy/models_metadef.py @@ -0,0 +1,168 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +SQLAlchemy models for glance metadata schema +""" + +from oslo_db.sqlalchemy import models +from oslo_utils import timeutils +from sqlalchemy import Boolean +from sqlalchemy import Column +from sqlalchemy import DateTime +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy import ForeignKey +from sqlalchemy import Index +from sqlalchemy import Integer +from sqlalchemy.orm import relationship +from sqlalchemy import String +from sqlalchemy import Text + +from daisy.db.sqlalchemy.models import JSONEncodedDict + + +class DictionaryBase(models.ModelBase): + metadata = None + + def to_dict(self): + d = {} + for c in self.__table__.columns: + d[c.name] = self[c.name] + return d + + +BASE_DICT = declarative_base(cls=DictionaryBase) + + +class DaisyMetadefBase(models.TimestampMixin): + """Base class for Glance Metadef Models.""" + + __table_args__ = {'mysql_engine': 'InnoDB'} + __table_initialized__ = False + __protected_attributes__ = set(["created_at", "updated_at"]) + + created_at = Column(DateTime, default=lambda: timeutils.utcnow(), + nullable=False) + # TODO(wko): Column `updated_at` have no default value in + # openstack common code. We should decide, is this value + # required and make changes in oslo (if required) or + # in glance (if not). + updated_at = Column(DateTime, default=lambda: timeutils.utcnow(), + nullable=True, onupdate=lambda: timeutils.utcnow()) + + +class MetadefNamespace(BASE_DICT, DaisyMetadefBase): + """Represents a metadata-schema namespace in the datastore.""" + __tablename__ = 'metadef_namespaces' + __table_args__ = (Index('ix_metadef_namespaces_namespace', 'namespace'), + Index('ix_metadef_namespaces_owner', 'owner')) + + id = Column(Integer, primary_key=True, nullable=False) + namespace = Column(String(80), nullable=False) + display_name = Column(String(80)) + description = Column(Text()) + visibility = Column(String(32)) + protected = Column(Boolean) + owner = Column(String(255), nullable=False) + + +class MetadefObject(BASE_DICT, DaisyMetadefBase): + """Represents a metadata-schema object in the datastore.""" + __tablename__ = 'metadef_objects' + __table_args__ = (Index('ix_metadef_objects_namespace_id', 'namespace_id'), + Index('ix_metadef_objects_name', 'name')) + + id = Column(Integer, primary_key=True, nullable=False) + namespace_id = Column(Integer(), ForeignKey('metadef_namespaces.id'), + nullable=False) + name = Column(String(80), nullable=False) + description = Column(Text()) + required = Column(Text()) + json_schema = Column(JSONEncodedDict(), default={}, nullable=False) + + +class MetadefProperty(BASE_DICT, DaisyMetadefBase): + """Represents a metadata-schema namespace-property in the datastore.""" + __tablename__ = 'metadef_properties' + __table_args__ = (Index('ix_metadef_properties_namespace_id', + 'namespace_id'), + Index('ix_metadef_properties_name', 'name')) + + id = Column(Integer, primary_key=True, nullable=False) + namespace_id = Column(Integer(), ForeignKey('metadef_namespaces.id'), + nullable=False) + name = Column(String(80), nullable=False) + json_schema = Column(JSONEncodedDict(), default={}, nullable=False) + + +class MetadefNamespaceResourceType(BASE_DICT, DaisyMetadefBase): + """Represents a metadata-schema namespace-property in the datastore.""" + __tablename__ = 'metadef_namespace_resource_types' + __table_args__ = (Index('ix_metadef_ns_res_types_res_type_id_ns_id', + 'resource_type_id', 'namespace_id'), + Index('ix_metadef_ns_res_types_namespace_id', + 'namespace_id')) + + resource_type_id = Column(Integer, + ForeignKey('metadef_resource_types.id'), + primary_key=True, nullable=False) + namespace_id = Column(Integer, ForeignKey('metadef_namespaces.id'), + primary_key=True, nullable=False) + properties_target = Column(String(80)) + prefix = Column(String(80)) + + +class MetadefResourceType(BASE_DICT, DaisyMetadefBase): + """Represents a metadata-schema resource type in the datastore.""" + __tablename__ = 'metadef_resource_types' + __table_args__ = (Index('ix_metadef_resource_types_name', 'name'), ) + + id = Column(Integer, primary_key=True, nullable=False) + name = Column(String(80), nullable=False) + protected = Column(Boolean, nullable=False, default=False) + + associations = relationship( + "MetadefNamespaceResourceType", + primaryjoin=id == MetadefNamespaceResourceType.resource_type_id) + + +class MetadefTag(BASE_DICT, DaisyMetadefBase): + """Represents a metadata-schema tag in the data store.""" + __tablename__ = 'metadef_tags' + __table_args__ = (Index('ix_metadef_tags_namespace_id', + 'namespace_id', 'name'), + Index('ix_metadef_tags_name', 'name')) + + id = Column(Integer, primary_key=True, nullable=False) + namespace_id = Column(Integer(), ForeignKey('metadef_namespaces.id'), + nullable=False) + name = Column(String(80), nullable=False) + + +def register_models(engine): + """Create database tables for all models with the given engine.""" + models = (MetadefNamespace, MetadefObject, MetadefProperty, + MetadefTag, + MetadefResourceType, MetadefNamespaceResourceType) + for model in models: + model.metadata.create_all(engine) + + +def unregister_models(engine): + """Drop database tables for all models with the given engine.""" + models = (MetadefObject, MetadefProperty, MetadefNamespaceResourceType, + MetadefTag, + MetadefNamespace, MetadefResourceType) + for model in models: + model.metadata.drop_all(engine) diff --git a/code/daisy/daisy/domain/__init__.py b/code/daisy/daisy/domain/__init__.py new file mode 100755 index 00000000..7818c78d --- /dev/null +++ b/code/daisy/daisy/domain/__init__.py @@ -0,0 +1,684 @@ +# Copyright 2012 OpenStack Foundation +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import datetime +import uuid + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import importutils +from oslo_utils import timeutils +import six + +from daisy.common import exception +from daisy import i18n + +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +LOG = logging.getLogger(__name__) +CONF = cfg.CONF +CONF.import_opt('task_executor', 'daisy.common.config', group='task') + + +_delayed_delete_imported = False + + +def _import_delayed_delete(): + # glance_store (indirectly) imports daisy.domain therefore we can't put + # the CONF.import_opt outside - we have to do it in a convoluted/indirect + # way! + global _delayed_delete_imported + if not _delayed_delete_imported: + CONF.import_opt('delayed_delete', 'glance_store') + _delayed_delete_imported = True + + +class ImageFactory(object): + _readonly_properties = ['created_at', 'updated_at', 'status', 'checksum', + 'size', 'virtual_size'] + _reserved_properties = ['owner', 'is_public', 'locations', + 'deleted', 'deleted_at', 'direct_url', 'self', + 'file', 'schema'] + + def _check_readonly(self, kwargs): + for key in self._readonly_properties: + if key in kwargs: + raise exception.ReadonlyProperty(property=key) + + def _check_unexpected(self, kwargs): + if kwargs: + msg = _('new_image() got unexpected keywords %s') + raise TypeError(msg % kwargs.keys()) + + def _check_reserved(self, properties): + if properties is not None: + for key in self._reserved_properties: + if key in properties: + raise exception.ReservedProperty(property=key) + + def new_image(self, image_id=None, name=None, visibility='private', + min_disk=0, min_ram=0, protected=False, owner=None, + disk_format=None, container_format=None, + extra_properties=None, tags=None, **other_args): + extra_properties = extra_properties or {} + self._check_readonly(other_args) + self._check_unexpected(other_args) + self._check_reserved(extra_properties) + + if image_id is None: + image_id = str(uuid.uuid4()) + created_at = timeutils.utcnow() + updated_at = created_at + status = 'queued' + + return Image(image_id=image_id, name=name, status=status, + created_at=created_at, updated_at=updated_at, + visibility=visibility, min_disk=min_disk, + min_ram=min_ram, protected=protected, + owner=owner, disk_format=disk_format, + container_format=container_format, + extra_properties=extra_properties, tags=tags or []) + + +class Image(object): + + valid_state_targets = { + # Each key denotes a "current" state for the image. Corresponding + # values list the valid states to which we can jump from that "current" + # state. + # NOTE(flwang): In v2, we are deprecating the 'killed' status, so it's + # allowed to restore image from 'saving' to 'queued' so that upload + # can be retried. + 'queued': ('saving', 'active', 'deleted'), + 'saving': ('active', 'killed', 'deleted', 'queued'), + 'active': ('queued', 'pending_delete', 'deleted', 'deactivated'), + 'killed': ('deleted',), + 'pending_delete': ('deleted',), + 'deleted': (), + 'deactivated': ('active', 'deleted'), + } + + def __init__(self, image_id, status, created_at, updated_at, **kwargs): + self.image_id = image_id + self.status = status + self.created_at = created_at + self.updated_at = updated_at + self.name = kwargs.pop('name', None) + self.visibility = kwargs.pop('visibility', 'private') + self.min_disk = kwargs.pop('min_disk', 0) + self.min_ram = kwargs.pop('min_ram', 0) + self.protected = kwargs.pop('protected', False) + self.locations = kwargs.pop('locations', []) + self.checksum = kwargs.pop('checksum', None) + self.owner = kwargs.pop('owner', None) + self._disk_format = kwargs.pop('disk_format', None) + self._container_format = kwargs.pop('container_format', None) + self.size = kwargs.pop('size', None) + self.virtual_size = kwargs.pop('virtual_size', None) + extra_properties = kwargs.pop('extra_properties', {}) + self.extra_properties = ExtraProperties(extra_properties) + self.tags = kwargs.pop('tags', []) + if kwargs: + message = _("__init__() got unexpected keyword argument '%s'") + raise TypeError(message % kwargs.keys()[0]) + + @property + def status(self): + return self._status + + @status.setter + def status(self, status): + has_status = hasattr(self, '_status') + if has_status: + if status not in self.valid_state_targets[self._status]: + kw = {'cur_status': self._status, 'new_status': status} + e = exception.InvalidImageStatusTransition(**kw) + LOG.debug(e) + raise e + + if self._status == 'queued' and status in ('saving', 'active'): + missing = [k for k in ['disk_format', 'container_format'] + if not getattr(self, k)] + if len(missing) > 0: + if len(missing) == 1: + msg = _('Property %s must be set prior to ' + 'saving data.') + else: + msg = _('Properties %s must be set prior to ' + 'saving data.') + raise ValueError(msg % ', '.join(missing)) + # NOTE(flwang): Image size should be cleared as long as the image + # status is updated to 'queued' + if status == 'queued': + self.size = None + self.virtual_size = None + self._status = status + + @property + def visibility(self): + return self._visibility + + @visibility.setter + def visibility(self, visibility): + if visibility not in ('public', 'private'): + raise ValueError(_('Visibility must be either "public" ' + 'or "private"')) + self._visibility = visibility + + @property + def tags(self): + return self._tags + + @tags.setter + def tags(self, value): + self._tags = set(value) + + @property + def container_format(self): + return self._container_format + + @container_format.setter + def container_format(self, value): + if hasattr(self, '_container_format') and self.status != 'queued': + msg = _("Attribute container_format can be only replaced " + "for a queued image.") + raise exception.Forbidden(message=msg) + self._container_format = value + + @property + def disk_format(self): + return self._disk_format + + @disk_format.setter + def disk_format(self, value): + if hasattr(self, '_disk_format') and self.status != 'queued': + msg = _("Attribute disk_format can be only replaced " + "for a queued image.") + raise exception.Forbidden(message=msg) + self._disk_format = value + + @property + def min_disk(self): + return self._min_disk + + @min_disk.setter + def min_disk(self, value): + if value and value < 0: + extra_msg = _('Cannot be a negative value') + raise exception.InvalidParameterValue(value=value, + param='min_disk', + extra_msg=extra_msg) + self._min_disk = value + + @property + def min_ram(self): + return self._min_ram + + @min_ram.setter + def min_ram(self, value): + if value and value < 0: + extra_msg = _('Cannot be a negative value') + raise exception.InvalidParameterValue(value=value, + param='min_ram', + extra_msg=extra_msg) + self._min_ram = value + + def delete(self): + if self.protected: + raise exception.ProtectedImageDelete(image_id=self.image_id) + if CONF.delayed_delete and self.locations: + self.status = 'pending_delete' + else: + self.status = 'deleted' + + def deactivate(self): + if self.status == 'active': + self.status = 'deactivated' + elif self.status == 'deactivated': + # Noop if already deactive + pass + else: + msg = ("Not allowed to deactivate image in status '%s'" + % self.status) + LOG.debug(msg) + msg = (_("Not allowed to deactivate image in status '%s'") + % self.status) + raise exception.Forbidden(message=msg) + + def reactivate(self): + if self.status == 'deactivated': + self.status = 'active' + elif self.status == 'active': + # Noop if already active + pass + else: + msg = ("Not allowed to reactivate image in status '%s'" + % self.status) + LOG.debug(msg) + msg = (_("Not allowed to reactivate image in status '%s'") + % self.status) + raise exception.Forbidden(message=msg) + + def get_data(self, *args, **kwargs): + raise NotImplementedError() + + def set_data(self, data, size=None): + raise NotImplementedError() + + +class ExtraProperties(collections.MutableMapping, dict): + + def __getitem__(self, key): + return dict.__getitem__(self, key) + + def __setitem__(self, key, value): + return dict.__setitem__(self, key, value) + + def __delitem__(self, key): + return dict.__delitem__(self, key) + + def __eq__(self, other): + if isinstance(other, ExtraProperties): + return dict(self).__eq__(dict(other)) + elif isinstance(other, dict): + return dict(self).__eq__(other) + else: + return False + + def __len__(self): + return dict(self).__len__() + + def keys(self): + return dict(self).keys() + + +class ImageMembership(object): + + def __init__(self, image_id, member_id, created_at, updated_at, + id=None, status=None): + self.id = id + self.image_id = image_id + self.member_id = member_id + self.created_at = created_at + self.updated_at = updated_at + self.status = status + + @property + def status(self): + return self._status + + @status.setter + def status(self, status): + if status not in ('pending', 'accepted', 'rejected'): + msg = _('Status must be "pending", "accepted" or "rejected".') + raise ValueError(msg) + self._status = status + + +class ImageMemberFactory(object): + + def new_image_member(self, image, member_id): + created_at = timeutils.utcnow() + updated_at = created_at + + return ImageMembership(image_id=image.image_id, member_id=member_id, + created_at=created_at, updated_at=updated_at, + status='pending') + + +class Task(object): + _supported_task_type = ('import',) + + _supported_task_status = ('pending', 'processing', 'success', 'failure') + + def __init__(self, task_id, task_type, status, owner, + expires_at, created_at, updated_at, + task_input, result, message, task_time_to_live=48): + + if task_type not in self._supported_task_type: + raise exception.InvalidTaskType(task_type) + + if status not in self._supported_task_status: + raise exception.InvalidTaskStatus(status) + + self.task_id = task_id + self._status = status + self.type = task_type + self.owner = owner + self.expires_at = expires_at + # NOTE(nikhil): We use '_time_to_live' to determine how long a + # task should live from the time it succeeds or fails. + self._time_to_live = datetime.timedelta(hours=task_time_to_live) + self.created_at = created_at + self.updated_at = updated_at + self.task_input = task_input + self.result = result + self.message = message + + @property + def status(self): + return self._status + + @property + def message(self): + return self._message + + @message.setter + def message(self, message): + if message: + self._message = six.text_type(message) + else: + self._message = six.text_type('') + + def _validate_task_status_transition(self, cur_status, new_status): + valid_transitions = { + 'pending': ['processing', 'failure'], + 'processing': ['success', 'failure'], + 'success': [], + 'failure': [], + } + + if new_status in valid_transitions[cur_status]: + return True + else: + return False + + def _set_task_status(self, new_status): + if self._validate_task_status_transition(self.status, new_status): + self._status = new_status + log_msg = (_LI("Task [%(task_id)s] status changing from " + "%(cur_status)s to %(new_status)s") % + {'task_id': self.task_id, 'cur_status': self.status, + 'new_status': new_status}) + LOG.info(log_msg) + self._status = new_status + else: + log_msg = (_LE("Task [%(task_id)s] status failed to change from " + "%(cur_status)s to %(new_status)s") % + {'task_id': self.task_id, 'cur_status': self.status, + 'new_status': new_status}) + LOG.error(log_msg) + raise exception.InvalidTaskStatusTransition( + cur_status=self.status, + new_status=new_status + ) + + def begin_processing(self): + new_status = 'processing' + self._set_task_status(new_status) + + def succeed(self, result): + new_status = 'success' + self.result = result + self._set_task_status(new_status) + self.expires_at = timeutils.utcnow() + self._time_to_live + + def fail(self, message): + new_status = 'failure' + self.message = message + self._set_task_status(new_status) + self.expires_at = timeutils.utcnow() + self._time_to_live + + def run(self, executor): + executor.begin_processing(self.task_id) + + +class TaskStub(object): + + def __init__(self, task_id, task_type, status, owner, + expires_at, created_at, updated_at): + self.task_id = task_id + self._status = status + self.type = task_type + self.owner = owner + self.expires_at = expires_at + self.created_at = created_at + self.updated_at = updated_at + + @property + def status(self): + return self._status + + +class TaskFactory(object): + + def new_task(self, task_type, owner, task_time_to_live=48, + task_input=None, **kwargs): + task_id = str(uuid.uuid4()) + status = 'pending' + # Note(nikhil): expires_at would be set on the task, only when it + # succeeds or fails. + expires_at = None + created_at = timeutils.utcnow() + updated_at = created_at + return Task( + task_id, + task_type, + status, + owner, + expires_at, + created_at, + updated_at, + task_input, + kwargs.get('message'), + kwargs.get('result'), + task_time_to_live + ) + + +class TaskExecutorFactory(object): + eventlet_deprecation_warned = False + + def __init__(self, task_repo, image_repo, image_factory): + self.task_repo = task_repo + self.image_repo = image_repo + self.image_factory = image_factory + + def new_task_executor(self, context): + try: + # NOTE(flaper87): Backwards compatibility layer. + # It'll allow us to provide a deprecation path to + # users that are currently consuming the `eventlet` + # executor. + task_executor = CONF.task.task_executor + if task_executor == 'eventlet': + # NOTE(jokke): Making sure we do not log the deprecation + # warning 1000 times or anything crazy like that. + if not TaskExecutorFactory.eventlet_deprecation_warned: + msg = _LW("The `eventlet` executor has been deprecated. " + "Use `taskflow` instead.") + LOG.warn(msg) + TaskExecutorFactory.eventlet_deprecation_warned = True + task_executor = 'taskflow' + + executor_cls = ('daisy.async.%s_executor.' + 'TaskExecutor' % task_executor) + LOG.debug("Loading %s executor" % task_executor) + executor = importutils.import_class(executor_cls) + return executor(context, + self.task_repo, + self.image_repo, + self.image_factory) + except ImportError: + with excutils.save_and_reraise_exception(): + LOG.exception(_LE("Failed to load the %s executor provided " + "in the config.") % CONF.task.task_executor) + + +class MetadefNamespace(object): + + def __init__(self, namespace_id, namespace, display_name, description, + owner, visibility, protected, created_at, updated_at): + self.namespace_id = namespace_id + self.namespace = namespace + self.display_name = display_name + self.description = description + self.owner = owner + self.visibility = visibility or "private" + self.protected = protected or False + self.created_at = created_at + self.updated_at = updated_at + + def delete(self): + if self.protected: + raise exception.ProtectedMetadefNamespaceDelete( + namespace=self.namespace) + + +class MetadefNamespaceFactory(object): + + def new_namespace(self, namespace, owner, **kwargs): + namespace_id = str(uuid.uuid4()) + created_at = timeutils.utcnow() + updated_at = created_at + return MetadefNamespace( + namespace_id, + namespace, + kwargs.get('display_name'), + kwargs.get('description'), + owner, + kwargs.get('visibility'), + kwargs.get('protected'), + created_at, + updated_at + ) + + +class MetadefObject(object): + + def __init__(self, namespace, object_id, name, created_at, updated_at, + required, description, properties): + self.namespace = namespace + self.object_id = object_id + self.name = name + self.created_at = created_at + self.updated_at = updated_at + self.required = required + self.description = description + self.properties = properties + + def delete(self): + if self.namespace.protected: + raise exception.ProtectedMetadefObjectDelete(object_name=self.name) + + +class MetadefObjectFactory(object): + + def new_object(self, namespace, name, **kwargs): + object_id = str(uuid.uuid4()) + created_at = timeutils.utcnow() + updated_at = created_at + return MetadefObject( + namespace, + object_id, + name, + created_at, + updated_at, + kwargs.get('required'), + kwargs.get('description'), + kwargs.get('properties') + ) + + +class MetadefResourceType(object): + + def __init__(self, namespace, name, prefix, properties_target, + created_at, updated_at): + self.namespace = namespace + self.name = name + self.prefix = prefix + self.properties_target = properties_target + self.created_at = created_at + self.updated_at = updated_at + + def delete(self): + if self.namespace.protected: + raise exception.ProtectedMetadefResourceTypeAssociationDelete( + resource_type=self.name) + + +class MetadefResourceTypeFactory(object): + + def new_resource_type(self, namespace, name, **kwargs): + created_at = timeutils.utcnow() + updated_at = created_at + return MetadefResourceType( + namespace, + name, + kwargs.get('prefix'), + kwargs.get('properties_target'), + created_at, + updated_at + ) + + +class MetadefProperty(object): + + def __init__(self, namespace, property_id, name, schema): + self.namespace = namespace + self.property_id = property_id + self.name = name + self.schema = schema + + def delete(self): + if self.namespace.protected: + raise exception.ProtectedMetadefNamespacePropDelete( + property_name=self.name) + + +class MetadefPropertyFactory(object): + + def new_namespace_property(self, namespace, name, schema, **kwargs): + property_id = str(uuid.uuid4()) + return MetadefProperty( + namespace, + property_id, + name, + schema + ) + + +class MetadefTag(object): + + def __init__(self, namespace, tag_id, name, created_at, updated_at): + self.namespace = namespace + self.tag_id = tag_id + self.name = name + self.created_at = created_at + self.updated_at = updated_at + + def delete(self): + if self.namespace.protected: + raise exception.ProtectedMetadefTagDelete(tag_name=self.name) + + +class MetadefTagFactory(object): + + def new_tag(self, namespace, name, **kwargs): + tag_id = str(uuid.uuid4()) + created_at = timeutils.utcnow() + updated_at = created_at + return MetadefTag( + namespace, + tag_id, + name, + created_at, + updated_at + ) diff --git a/code/daisy/daisy/domain/proxy.py b/code/daisy/daisy/domain/proxy.py new file mode 100755 index 00000000..c9ce3e4b --- /dev/null +++ b/code/daisy/daisy/domain/proxy.py @@ -0,0 +1,542 @@ +# Copyright 2013 OpenStack Foundation +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +def _proxy(target, attr): + def get_attr(self): + return getattr(getattr(self, target), attr) + + def set_attr(self, value): + return setattr(getattr(self, target), attr, value) + + def del_attr(self): + return delattr(getattr(self, target), attr) + + return property(get_attr, set_attr, del_attr) + + +class Helper(object): + def __init__(self, proxy_class=None, proxy_kwargs=None): + self.proxy_class = proxy_class + self.proxy_kwargs = proxy_kwargs or {} + + def proxy(self, obj): + if obj is None or self.proxy_class is None: + return obj + return self.proxy_class(obj, **self.proxy_kwargs) + + def unproxy(self, obj): + if obj is None or self.proxy_class is None: + return obj + return obj.base + + +class TaskRepo(object): + def __init__(self, base, + task_proxy_class=None, task_proxy_kwargs=None): + self.base = base + self.task_proxy_helper = Helper(task_proxy_class, task_proxy_kwargs) + + def get(self, task_id): + task = self.base.get(task_id) + return self.task_proxy_helper.proxy(task) + + def add(self, task): + self.base.add(self.task_proxy_helper.unproxy(task)) + + def save(self, task): + self.base.save(self.task_proxy_helper.unproxy(task)) + + def remove(self, task): + base_task = self.task_proxy_helper.unproxy(task) + self.base.remove(base_task) + + +class TaskStubRepo(object): + def __init__(self, base, task_stub_proxy_class=None, + task_stub_proxy_kwargs=None): + self.base = base + self.task_stub_proxy_helper = Helper(task_stub_proxy_class, + task_stub_proxy_kwargs) + + def list(self, *args, **kwargs): + tasks = self.base.list(*args, **kwargs) + return [self.task_stub_proxy_helper.proxy(task) for task in tasks] + + +class Repo(object): + def __init__(self, base, item_proxy_class=None, item_proxy_kwargs=None): + self.base = base + self.helper = Helper(item_proxy_class, item_proxy_kwargs) + + def get(self, item_id): + return self.helper.proxy(self.base.get(item_id)) + + def list(self, *args, **kwargs): + items = self.base.list(*args, **kwargs) + return [self.helper.proxy(item) for item in items] + + def add(self, item): + base_item = self.helper.unproxy(item) + result = self.base.add(base_item) + return self.helper.proxy(result) + + def save(self, item, from_state=None): + base_item = self.helper.unproxy(item) + result = self.base.save(base_item, from_state=from_state) + return self.helper.proxy(result) + + def remove(self, item): + base_item = self.helper.unproxy(item) + result = self.base.remove(base_item) + return self.helper.proxy(result) + + +class ImageFactory(object): + def __init__(self, base, proxy_class=None, proxy_kwargs=None): + self.helper = Helper(proxy_class, proxy_kwargs) + self.base = base + + def new_image(self, **kwargs): + return self.helper.proxy(self.base.new_image(**kwargs)) + + +class ImageMembershipFactory(object): + def __init__(self, base, image_proxy_class=None, image_proxy_kwargs=None, + member_proxy_class=None, member_proxy_kwargs=None): + self.base = base + self.image_helper = Helper(image_proxy_class, image_proxy_kwargs) + self.member_helper = Helper(member_proxy_class, member_proxy_kwargs) + + def new_image_member(self, image, member_id): + base_image = self.image_helper.unproxy(image) + member = self.base.new_image_member(base_image, member_id) + return self.member_helper.proxy(member) + + +class Image(object): + def __init__(self, base, member_repo_proxy_class=None, + member_repo_proxy_kwargs=None): + self.base = base + self.helper = Helper(member_repo_proxy_class, + member_repo_proxy_kwargs) + + name = _proxy('base', 'name') + image_id = _proxy('base', 'image_id') + status = _proxy('base', 'status') + created_at = _proxy('base', 'created_at') + updated_at = _proxy('base', 'updated_at') + visibility = _proxy('base', 'visibility') + min_disk = _proxy('base', 'min_disk') + min_ram = _proxy('base', 'min_ram') + protected = _proxy('base', 'protected') + locations = _proxy('base', 'locations') + checksum = _proxy('base', 'checksum') + owner = _proxy('base', 'owner') + disk_format = _proxy('base', 'disk_format') + container_format = _proxy('base', 'container_format') + size = _proxy('base', 'size') + virtual_size = _proxy('base', 'virtual_size') + extra_properties = _proxy('base', 'extra_properties') + tags = _proxy('base', 'tags') + + def delete(self): + self.base.delete() + + def deactivate(self): + self.base.deactivate() + + def reactivate(self): + self.base.reactivate() + + def set_data(self, data, size=None): + self.base.set_data(data, size) + + def get_data(self, *args, **kwargs): + return self.base.get_data(*args, **kwargs) + + def get_member_repo(self): + return self.helper.proxy(self.base.get_member_repo()) + + +class Task(object): + def __init__(self, base): + self.base = base + + task_id = _proxy('base', 'task_id') + type = _proxy('base', 'type') + status = _proxy('base', 'status') + owner = _proxy('base', 'owner') + expires_at = _proxy('base', 'expires_at') + created_at = _proxy('base', 'created_at') + updated_at = _proxy('base', 'updated_at') + task_input = _proxy('base', 'task_input') + result = _proxy('base', 'result') + message = _proxy('base', 'message') + + def begin_processing(self): + self.base.begin_processing() + + def succeed(self, result): + self.base.succeed(result) + + def fail(self, message): + self.base.fail(message) + + def run(self, executor): + self.base.run(executor) + + +class TaskStub(object): + def __init__(self, base): + self.base = base + + task_id = _proxy('base', 'task_id') + type = _proxy('base', 'type') + status = _proxy('base', 'status') + owner = _proxy('base', 'owner') + expires_at = _proxy('base', 'expires_at') + created_at = _proxy('base', 'created_at') + updated_at = _proxy('base', 'updated_at') + + +class TaskFactory(object): + def __init__(self, + base, + task_proxy_class=None, + task_proxy_kwargs=None): + self.task_helper = Helper(task_proxy_class, task_proxy_kwargs) + self.base = base + + def new_task(self, **kwargs): + t = self.base.new_task(**kwargs) + return self.task_helper.proxy(t) + + +# Metadef Namespace classes +class MetadefNamespaceRepo(object): + def __init__(self, base, + namespace_proxy_class=None, namespace_proxy_kwargs=None): + self.base = base + self.namespace_proxy_helper = Helper(namespace_proxy_class, + namespace_proxy_kwargs) + + def get(self, namespace): + namespace_obj = self.base.get(namespace) + return self.namespace_proxy_helper.proxy(namespace_obj) + + def add(self, namespace): + self.base.add(self.namespace_proxy_helper.unproxy(namespace)) + + def list(self, *args, **kwargs): + namespaces = self.base.list(*args, **kwargs) + return [self.namespace_proxy_helper.proxy(namespace) for namespace + in namespaces] + + def remove(self, item): + base_item = self.namespace_proxy_helper.unproxy(item) + result = self.base.remove(base_item) + return self.namespace_proxy_helper.proxy(result) + + def remove_objects(self, item): + base_item = self.namespace_proxy_helper.unproxy(item) + result = self.base.remove_objects(base_item) + return self.namespace_proxy_helper.proxy(result) + + def remove_properties(self, item): + base_item = self.namespace_proxy_helper.unproxy(item) + result = self.base.remove_properties(base_item) + return self.namespace_proxy_helper.proxy(result) + + def remove_tags(self, item): + base_item = self.namespace_proxy_helper.unproxy(item) + result = self.base.remove_tags(base_item) + return self.namespace_proxy_helper.proxy(result) + + def save(self, item): + base_item = self.namespace_proxy_helper.unproxy(item) + result = self.base.save(base_item) + return self.namespace_proxy_helper.proxy(result) + + +class MetadefNamespace(object): + def __init__(self, base): + self.base = base + + namespace_id = _proxy('base', 'namespace_id') + namespace = _proxy('base', 'namespace') + display_name = _proxy('base', 'display_name') + description = _proxy('base', 'description') + owner = _proxy('base', 'owner') + visibility = _proxy('base', 'visibility') + protected = _proxy('base', 'protected') + created_at = _proxy('base', 'created_at') + updated_at = _proxy('base', 'updated_at') + + def delete(self): + self.base.delete() + + +class MetadefNamespaceFactory(object): + def __init__(self, + base, + meta_namespace_proxy_class=None, + meta_namespace_proxy_kwargs=None): + self.meta_namespace_helper = Helper(meta_namespace_proxy_class, + meta_namespace_proxy_kwargs) + self.base = base + + def new_namespace(self, **kwargs): + t = self.base.new_namespace(**kwargs) + return self.meta_namespace_helper.proxy(t) + + +# Metadef object classes +class MetadefObjectRepo(object): + def __init__(self, base, + object_proxy_class=None, object_proxy_kwargs=None): + self.base = base + self.object_proxy_helper = Helper(object_proxy_class, + object_proxy_kwargs) + + def get(self, namespace, object_name): + meta_object = self.base.get(namespace, object_name) + return self.object_proxy_helper.proxy(meta_object) + + def add(self, meta_object): + self.base.add(self.object_proxy_helper.unproxy(meta_object)) + + def list(self, *args, **kwargs): + objects = self.base.list(*args, **kwargs) + return [self.object_proxy_helper.proxy(meta_object) for meta_object + in objects] + + def remove(self, item): + base_item = self.object_proxy_helper.unproxy(item) + result = self.base.remove(base_item) + return self.object_proxy_helper.proxy(result) + + def save(self, item): + base_item = self.object_proxy_helper.unproxy(item) + result = self.base.save(base_item) + return self.object_proxy_helper.proxy(result) + + +class MetadefObject(object): + def __init__(self, base): + self.base = base + namespace = _proxy('base', 'namespace') + object_id = _proxy('base', 'object_id') + name = _proxy('base', 'name') + required = _proxy('base', 'required') + description = _proxy('base', 'description') + properties = _proxy('base', 'properties') + created_at = _proxy('base', 'created_at') + updated_at = _proxy('base', 'updated_at') + + def delete(self): + self.base.delete() + + +class MetadefObjectFactory(object): + def __init__(self, + base, + meta_object_proxy_class=None, + meta_object_proxy_kwargs=None): + self.meta_object_helper = Helper(meta_object_proxy_class, + meta_object_proxy_kwargs) + self.base = base + + def new_object(self, **kwargs): + t = self.base.new_object(**kwargs) + return self.meta_object_helper.proxy(t) + + +# Metadef ResourceType classes +class MetadefResourceTypeRepo(object): + def __init__(self, base, resource_type_proxy_class=None, + resource_type_proxy_kwargs=None): + self.base = base + self.resource_type_proxy_helper = Helper(resource_type_proxy_class, + resource_type_proxy_kwargs) + + def add(self, meta_resource_type): + self.base.add(self.resource_type_proxy_helper.unproxy( + meta_resource_type)) + + def get(self, *args, **kwargs): + resource_type = self.base.get(*args, **kwargs) + return self.resource_type_proxy_helper.proxy(resource_type) + + def list(self, *args, **kwargs): + resource_types = self.base.list(*args, **kwargs) + return [self.resource_type_proxy_helper.proxy(resource_type) + for resource_type in resource_types] + + def remove(self, item): + base_item = self.resource_type_proxy_helper.unproxy(item) + result = self.base.remove(base_item) + return self.resource_type_proxy_helper.proxy(result) + + +class MetadefResourceType(object): + def __init__(self, base): + self.base = base + namespace = _proxy('base', 'namespace') + name = _proxy('base', 'name') + prefix = _proxy('base', 'prefix') + properties_target = _proxy('base', 'properties_target') + created_at = _proxy('base', 'created_at') + updated_at = _proxy('base', 'updated_at') + + def delete(self): + self.base.delete() + + +class MetadefResourceTypeFactory(object): + def __init__(self, + base, + resource_type_proxy_class=None, + resource_type_proxy_kwargs=None): + self.resource_type_helper = Helper(resource_type_proxy_class, + resource_type_proxy_kwargs) + self.base = base + + def new_resource_type(self, **kwargs): + t = self.base.new_resource_type(**kwargs) + return self.resource_type_helper.proxy(t) + + +# Metadef namespace property classes +class MetadefPropertyRepo(object): + def __init__(self, base, + property_proxy_class=None, property_proxy_kwargs=None): + self.base = base + self.property_proxy_helper = Helper(property_proxy_class, + property_proxy_kwargs) + + def get(self, namespace, property_name): + property = self.base.get(namespace, property_name) + return self.property_proxy_helper.proxy(property) + + def add(self, property): + self.base.add(self.property_proxy_helper.unproxy(property)) + + def list(self, *args, **kwargs): + properties = self.base.list(*args, **kwargs) + return [self.property_proxy_helper.proxy(property) for property + in properties] + + def remove(self, item): + base_item = self.property_proxy_helper.unproxy(item) + result = self.base.remove(base_item) + return self.property_proxy_helper.proxy(result) + + def save(self, item): + base_item = self.property_proxy_helper.unproxy(item) + result = self.base.save(base_item) + return self.property_proxy_helper.proxy(result) + + +class MetadefProperty(object): + def __init__(self, base): + self.base = base + namespace = _proxy('base', 'namespace') + property_id = _proxy('base', 'property_id') + name = _proxy('base', 'name') + schema = _proxy('base', 'schema') + + def delete(self): + self.base.delete() + + +class MetadefPropertyFactory(object): + def __init__(self, + base, + property_proxy_class=None, + property_proxy_kwargs=None): + self.meta_object_helper = Helper(property_proxy_class, + property_proxy_kwargs) + self.base = base + + def new_namespace_property(self, **kwargs): + t = self.base.new_namespace_property(**kwargs) + return self.meta_object_helper.proxy(t) + + +# Metadef tag classes +class MetadefTagRepo(object): + def __init__(self, base, + tag_proxy_class=None, tag_proxy_kwargs=None): + self.base = base + self.tag_proxy_helper = Helper(tag_proxy_class, + tag_proxy_kwargs) + + def get(self, namespace, name): + meta_tag = self.base.get(namespace, name) + return self.tag_proxy_helper.proxy(meta_tag) + + def add(self, meta_tag): + self.base.add(self.tag_proxy_helper.unproxy(meta_tag)) + + def add_tags(self, meta_tags): + tags_list = [] + for meta_tag in meta_tags: + tags_list.append(self.tag_proxy_helper.unproxy(meta_tag)) + self.base.add_tags(tags_list) + + def list(self, *args, **kwargs): + tags = self.base.list(*args, **kwargs) + return [self.tag_proxy_helper.proxy(meta_tag) for meta_tag + in tags] + + def remove(self, item): + base_item = self.tag_proxy_helper.unproxy(item) + result = self.base.remove(base_item) + return self.tag_proxy_helper.proxy(result) + + def save(self, item): + base_item = self.tag_proxy_helper.unproxy(item) + result = self.base.save(base_item) + return self.tag_proxy_helper.proxy(result) + + +class MetadefTag(object): + def __init__(self, base): + self.base = base + + namespace = _proxy('base', 'namespace') + tag_id = _proxy('base', 'tag_id') + name = _proxy('base', 'name') + created_at = _proxy('base', 'created_at') + updated_at = _proxy('base', 'updated_at') + + def delete(self): + self.base.delete() + + +class MetadefTagFactory(object): + def __init__(self, + base, + meta_tag_proxy_class=None, + meta_tag_proxy_kwargs=None): + self.meta_tag_helper = Helper(meta_tag_proxy_class, + meta_tag_proxy_kwargs) + self.base = base + + def new_tag(self, **kwargs): + t = self.base.new_tag(**kwargs) + return self.meta_tag_helper.proxy(t) diff --git a/code/daisy/daisy/gateway.py b/code/daisy/daisy/gateway.py new file mode 100755 index 00000000..49b04601 --- /dev/null +++ b/code/daisy/daisy/gateway.py @@ -0,0 +1,262 @@ +# Copyright 2012 OpenStack Foundation +# Copyright 2013 IBM Corp. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import glance_store +from oslo_log import log as logging + +from daisy.api import authorization +from daisy.api import policy +from daisy.api import property_protections +from daisy.common import exception +from daisy.common import property_utils +from daisy.common import store_utils +import daisy.db +import daisy.domain +import daisy.location +import daisy.notifier +import daisy.quota +try: + import daisy.search + daisy_search = daisy.search +except ImportError: + daisy_search = None + + +LOG = logging.getLogger(__name__) + + +class Gateway(object): + def __init__(self, db_api=None, store_api=None, notifier=None, + policy_enforcer=None, es_api=None): + self.db_api = db_api or daisy.db.get_api() + self.store_api = store_api or glance_store + self.store_utils = store_utils + self.notifier = notifier or daisy.notifier.Notifier() + self.policy = policy_enforcer or policy.Enforcer() + if es_api: + self.es_api = es_api + else: + self.es_api = daisy_search.get_api() if daisy_search else None + + def get_image_factory(self, context): + image_factory = daisy.domain.ImageFactory() + store_image_factory = daisy.location.ImageFactoryProxy( + image_factory, context, self.store_api, self.store_utils) + quota_image_factory = daisy.quota.ImageFactoryProxy( + store_image_factory, context, self.db_api, self.store_utils) + policy_image_factory = policy.ImageFactoryProxy( + quota_image_factory, context, self.policy) + notifier_image_factory = daisy.notifier.ImageFactoryProxy( + policy_image_factory, context, self.notifier) + if property_utils.is_property_protection_enabled(): + property_rules = property_utils.PropertyRules(self.policy) + pif = property_protections.ProtectedImageFactoryProxy( + notifier_image_factory, context, property_rules) + authorized_image_factory = authorization.ImageFactoryProxy( + pif, context) + else: + authorized_image_factory = authorization.ImageFactoryProxy( + notifier_image_factory, context) + return authorized_image_factory + + def get_image_member_factory(self, context): + image_factory = daisy.domain.ImageMemberFactory() + quota_image_factory = daisy.quota.ImageMemberFactoryProxy( + image_factory, context, self.db_api, self.store_utils) + policy_member_factory = policy.ImageMemberFactoryProxy( + quota_image_factory, context, self.policy) + authorized_image_factory = authorization.ImageMemberFactoryProxy( + policy_member_factory, context) + return authorized_image_factory + + def get_repo(self, context): + image_repo = daisy.db.ImageRepo(context, self.db_api) + store_image_repo = daisy.location.ImageRepoProxy( + image_repo, context, self.store_api, self.store_utils) + quota_image_repo = daisy.quota.ImageRepoProxy( + store_image_repo, context, self.db_api, self.store_utils) + policy_image_repo = policy.ImageRepoProxy( + quota_image_repo, context, self.policy) + notifier_image_repo = daisy.notifier.ImageRepoProxy( + policy_image_repo, context, self.notifier) + if property_utils.is_property_protection_enabled(): + property_rules = property_utils.PropertyRules(self.policy) + pir = property_protections.ProtectedImageRepoProxy( + notifier_image_repo, context, property_rules) + authorized_image_repo = authorization.ImageRepoProxy( + pir, context) + else: + authorized_image_repo = authorization.ImageRepoProxy( + notifier_image_repo, context) + + return authorized_image_repo + + def get_task_factory(self, context): + task_factory = daisy.domain.TaskFactory() + policy_task_factory = policy.TaskFactoryProxy( + task_factory, context, self.policy) + notifier_task_factory = daisy.notifier.TaskFactoryProxy( + policy_task_factory, context, self.notifier) + authorized_task_factory = authorization.TaskFactoryProxy( + notifier_task_factory, context) + return authorized_task_factory + + def get_task_repo(self, context): + task_repo = daisy.db.TaskRepo(context, self.db_api) + policy_task_repo = policy.TaskRepoProxy( + task_repo, context, self.policy) + notifier_task_repo = daisy.notifier.TaskRepoProxy( + policy_task_repo, context, self.notifier) + authorized_task_repo = authorization.TaskRepoProxy( + notifier_task_repo, context) + return authorized_task_repo + + def get_task_stub_repo(self, context): + task_stub_repo = daisy.db.TaskRepo(context, self.db_api) + policy_task_stub_repo = policy.TaskStubRepoProxy( + task_stub_repo, context, self.policy) + notifier_task_stub_repo = daisy.notifier.TaskStubRepoProxy( + policy_task_stub_repo, context, self.notifier) + authorized_task_stub_repo = authorization.TaskStubRepoProxy( + notifier_task_stub_repo, context) + return authorized_task_stub_repo + + def get_task_executor_factory(self, context): + task_repo = self.get_task_repo(context) + image_repo = self.get_repo(context) + image_factory = self.get_image_factory(context) + return daisy.domain.TaskExecutorFactory(task_repo, + image_repo, + image_factory) + + def get_metadef_namespace_factory(self, context): + ns_factory = daisy.domain.MetadefNamespaceFactory() + policy_ns_factory = policy.MetadefNamespaceFactoryProxy( + ns_factory, context, self.policy) + notifier_ns_factory = daisy.notifier.MetadefNamespaceFactoryProxy( + policy_ns_factory, context, self.notifier) + authorized_ns_factory = authorization.MetadefNamespaceFactoryProxy( + notifier_ns_factory, context) + return authorized_ns_factory + + def get_metadef_namespace_repo(self, context): + ns_repo = daisy.db.MetadefNamespaceRepo(context, self.db_api) + policy_ns_repo = policy.MetadefNamespaceRepoProxy( + ns_repo, context, self.policy) + notifier_ns_repo = daisy.notifier.MetadefNamespaceRepoProxy( + policy_ns_repo, context, self.notifier) + authorized_ns_repo = authorization.MetadefNamespaceRepoProxy( + notifier_ns_repo, context) + return authorized_ns_repo + + def get_metadef_object_factory(self, context): + object_factory = daisy.domain.MetadefObjectFactory() + policy_object_factory = policy.MetadefObjectFactoryProxy( + object_factory, context, self.policy) + notifier_object_factory = daisy.notifier.MetadefObjectFactoryProxy( + policy_object_factory, context, self.notifier) + authorized_object_factory = authorization.MetadefObjectFactoryProxy( + notifier_object_factory, context) + return authorized_object_factory + + def get_metadef_object_repo(self, context): + object_repo = daisy.db.MetadefObjectRepo(context, self.db_api) + policy_object_repo = policy.MetadefObjectRepoProxy( + object_repo, context, self.policy) + notifier_object_repo = daisy.notifier.MetadefObjectRepoProxy( + policy_object_repo, context, self.notifier) + authorized_object_repo = authorization.MetadefObjectRepoProxy( + notifier_object_repo, context) + return authorized_object_repo + + def get_metadef_resource_type_factory(self, context): + resource_type_factory = daisy.domain.MetadefResourceTypeFactory() + policy_resource_type_factory = policy.MetadefResourceTypeFactoryProxy( + resource_type_factory, context, self.policy) + notifier_resource_type_factory = ( + daisy.notifier.MetadefResourceTypeFactoryProxy( + policy_resource_type_factory, context, self.notifier) + ) + authorized_resource_type_factory = ( + authorization.MetadefResourceTypeFactoryProxy( + notifier_resource_type_factory, context) + ) + return authorized_resource_type_factory + + def get_metadef_resource_type_repo(self, context): + resource_type_repo = daisy.db.MetadefResourceTypeRepo( + context, self.db_api) + policy_object_repo = policy.MetadefResourceTypeRepoProxy( + resource_type_repo, context, self.policy) + notifier_object_repo = daisy.notifier.MetadefResourceTypeRepoProxy( + policy_object_repo, context, self.notifier) + authorized_object_repo = authorization.MetadefResourceTypeRepoProxy( + notifier_object_repo, context) + return authorized_object_repo + + def get_metadef_property_factory(self, context): + prop_factory = daisy.domain.MetadefPropertyFactory() + policy_prop_factory = policy.MetadefPropertyFactoryProxy( + prop_factory, context, self.policy) + notifier_prop_factory = daisy.notifier.MetadefPropertyFactoryProxy( + policy_prop_factory, context, self.notifier) + authorized_prop_factory = authorization.MetadefPropertyFactoryProxy( + notifier_prop_factory, context) + return authorized_prop_factory + + def get_metadef_property_repo(self, context): + prop_repo = daisy.db.MetadefPropertyRepo(context, self.db_api) + policy_prop_repo = policy.MetadefPropertyRepoProxy( + prop_repo, context, self.policy) + notifier_prop_repo = daisy.notifier.MetadefPropertyRepoProxy( + policy_prop_repo, context, self.notifier) + authorized_prop_repo = authorization.MetadefPropertyRepoProxy( + notifier_prop_repo, context) + return authorized_prop_repo + + def get_metadef_tag_factory(self, context): + tag_factory = daisy.domain.MetadefTagFactory() + policy_tag_factory = policy.MetadefTagFactoryProxy( + tag_factory, context, self.policy) + notifier_tag_factory = daisy.notifier.MetadefTagFactoryProxy( + policy_tag_factory, context, self.notifier) + authorized_tag_factory = authorization.MetadefTagFactoryProxy( + notifier_tag_factory, context) + return authorized_tag_factory + + def get_metadef_tag_repo(self, context): + tag_repo = daisy.db.MetadefTagRepo(context, self.db_api) + policy_tag_repo = policy.MetadefTagRepoProxy( + tag_repo, context, self.policy) + notifier_tag_repo = daisy.notifier.MetadefTagRepoProxy( + policy_tag_repo, context, self.notifier) + authorized_tag_repo = authorization.MetadefTagRepoProxy( + notifier_tag_repo, context) + return authorized_tag_repo + + def get_catalog_search_repo(self, context): + if self.es_api is None: + # TODO(mriedem): Make this a separate exception or change to + # warning/error logging in Liberty once we're past string freeze. + # See bug 1441764. + LOG.debug('The search and index services are not available. ' + 'Ensure you have the necessary prerequisite ' + 'dependencies installed like elasticsearch to use these ' + 'services.') + raise exception.ServiceUnavailable() + search_repo = daisy.search.CatalogSearchRepo(context, self.es_api) + policy_search_repo = policy.CatalogSearchRepoProxy( + search_repo, context, self.policy) + return policy_search_repo diff --git a/code/daisy/daisy/hacking/__init__.py b/code/daisy/daisy/hacking/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/hacking/checks.py b/code/daisy/daisy/hacking/checks.py new file mode 100755 index 00000000..bb41e7c2 --- /dev/null +++ b/code/daisy/daisy/hacking/checks.py @@ -0,0 +1,158 @@ +# Copyright (c) 2014 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re + +import pep8 + +""" +Guidelines for writing new hacking checks + + - Use only for Glance-specific tests. OpenStack general tests + should be submitted to the common 'hacking' module. + - Pick numbers in the range G3xx. Find the current test with + the highest allocated number and then pick the next value. + If nova has an N3xx code for that test, use the same number. + - Keep the test method code in the source file ordered based + on the G3xx value. + - List the new rule in the top level HACKING.rst file + - Add test cases for each new rule to glance/tests/test_hacking.py + +""" + + +asse_trueinst_re = re.compile( + r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, " + "(\w|\.|\'|\"|\[|\])+\)\)") +asse_equal_type_re = re.compile( + r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), " + "(\w|\.|\'|\"|\[|\])+\)") +asse_equal_end_with_none_re = re.compile( + r"(.)*assertEqual\((\w|\.|\'|\"|\[|\])+, None\)") +asse_equal_start_with_none_re = re.compile( + r"(.)*assertEqual\(None, (\w|\.|\'|\"|\[|\])+\)") +unicode_func_re = re.compile(r"(\s|\W|^)unicode\(") +log_translation = re.compile( + r"(.)*LOG\.(audit)\(\s*('|\")") +log_translation_info = re.compile( + r"(.)*LOG\.(info)\(\s*(_\(|'|\")") +log_translation_exception = re.compile( + r"(.)*LOG\.(exception)\(\s*(_\(|'|\")") +log_translation_error = re.compile( + r"(.)*LOG\.(error)\(\s*(_\(|'|\")") +log_translation_critical = re.compile( + r"(.)*LOG\.(critical)\(\s*(_\(|'|\")") +log_translation_warning = re.compile( + r"(.)*LOG\.(warning)\(\s*(_\(|'|\")") + + +def assert_true_instance(logical_line): + """Check for assertTrue(isinstance(a, b)) sentences + + G316 + """ + if asse_trueinst_re.match(logical_line): + yield (0, "G316: assertTrue(isinstance(a, b)) sentences not allowed") + + +def assert_equal_type(logical_line): + """Check for assertEqual(type(A), B) sentences + + G317 + """ + if asse_equal_type_re.match(logical_line): + yield (0, "G317: assertEqual(type(A), B) sentences not allowed") + + +def assert_equal_none(logical_line): + """Check for assertEqual(A, None) or assertEqual(None, A) sentences + + G318 + """ + res = (asse_equal_start_with_none_re.match(logical_line) or + asse_equal_end_with_none_re.match(logical_line)) + if res: + yield (0, "G318: assertEqual(A, None) or assertEqual(None, A) " + "sentences not allowed") + + +def no_translate_debug_logs(logical_line, filename): + dirs = [ + "glance/api", + "glance/cmd", + "glance/common", + "glance/db", + "glance/domain", + "glance/image_cache", + "glance/quota", + "glance/registry", + "glance/store", + "glance/tests", + ] + + if max([name in filename for name in dirs]): + if logical_line.startswith("LOG.debug(_("): + yield(0, "G319: Don't translate debug level logs") + + +def no_direct_use_of_unicode_function(logical_line): + """Check for use of unicode() builtin + + G320 + """ + if unicode_func_re.match(logical_line): + yield(0, "G320: Use six.text_type() instead of unicode()") + + +def validate_log_translations(logical_line, physical_line, filename): + # Translations are not required in the test directory + if pep8.noqa(physical_line): + return + msg = "G322: LOG.info messages require translations `_LI()`!" + if log_translation_info.match(logical_line): + yield (0, msg) + msg = "G323: LOG.exception messages require translations `_LE()`!" + if log_translation_exception.match(logical_line): + yield (0, msg) + msg = "G324: LOG.error messages require translations `_LE()`!" + if log_translation_error.match(logical_line): + yield (0, msg) + msg = "G325: LOG.critical messages require translations `_LC()`!" + if log_translation_critical.match(logical_line): + yield (0, msg) + msg = "G326: LOG.warning messages require translations `_LW()`!" + if log_translation_warning.match(logical_line): + yield (0, msg) + msg = "G321: Log messages require translations!" + if log_translation.match(logical_line): + yield (0, msg) + + +def check_no_contextlib_nested(logical_line): + msg = ("G327: contextlib.nested is deprecated since Python 2.7. See " + "https://docs.python.org/2/library/contextlib.html#contextlib." + "nested for more information.") + if ("with contextlib.nested(" in logical_line or + "with nested(" in logical_line): + yield(0, msg) + + +def factory(register): + register(assert_true_instance) + register(assert_equal_type) + register(assert_equal_none) + register(no_translate_debug_logs) + register(no_direct_use_of_unicode_function) + register(validate_log_translations) + register(check_no_contextlib_nested) diff --git a/code/daisy/daisy/i18n.py b/code/daisy/daisy/i18n.py new file mode 100755 index 00000000..d17b5882 --- /dev/null +++ b/code/daisy/daisy/i18n.py @@ -0,0 +1,31 @@ +# Copyright 2014 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.i18n import * # noqa + +_translators = TranslatorFactory(domain='glance') + +# The primary translation function using the well-known name "_" +_ = _translators.primary + +# Translators for log levels. +# +# The abbreviated names are meant to reflect the usual use of a short +# name like '_'. The "L" is for "log" and the other letter comes from +# the level. +_LI = _translators.log_info +_LW = _translators.log_warning +_LE = _translators.log_error +_LC = _translators.log_critical diff --git a/code/daisy/daisy/image_cache/__init__.py b/code/daisy/daisy/image_cache/__init__.py new file mode 100755 index 00000000..d3052a9e --- /dev/null +++ b/code/daisy/daisy/image_cache/__init__.py @@ -0,0 +1,341 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +LRU Cache for Image Data +""" + +import hashlib + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +from oslo_utils import importutils +from oslo_utils import units + +from daisy.common import exception +from daisy.common import utils +from daisy import i18n + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +image_cache_opts = [ + cfg.StrOpt('image_cache_driver', default='sqlite', + help=_('The driver to use for image cache management.')), + cfg.IntOpt('image_cache_max_size', default=10 * units.Gi, # 10 GB + help=_('The upper limit (the maximum size of accumulated ' + 'cache in bytes) beyond which pruner, if running, ' + 'starts cleaning the images cache.')), + cfg.IntOpt('image_cache_stall_time', default=86400, # 24 hours + help=_('The amount of time to let an image remain in the ' + 'cache without being accessed.')), + cfg.StrOpt('image_cache_dir', + help=_('Base directory that the Image Cache uses.')), +] + +CONF = cfg.CONF +CONF.register_opts(image_cache_opts) + + +class ImageCache(object): + + """Provides an LRU cache for image data.""" + + def __init__(self): + self.init_driver() + + def init_driver(self): + """ + Create the driver for the cache + """ + driver_name = CONF.image_cache_driver + driver_module = (__name__ + '.drivers.' + driver_name + '.Driver') + try: + self.driver_class = importutils.import_class(driver_module) + LOG.info(_LI("Image cache loaded driver '%s'.") % + driver_name) + except ImportError as import_err: + LOG.warn(_LW("Image cache driver " + "'%(driver_name)s' failed to load. " + "Got error: '%(import_err)s."), + {'driver_name': driver_name, + 'import_err': import_err}) + + driver_module = __name__ + '.drivers.sqlite.Driver' + LOG.info(_LI("Defaulting to SQLite driver.")) + self.driver_class = importutils.import_class(driver_module) + self.configure_driver() + + def configure_driver(self): + """ + Configure the driver for the cache and, if it fails to configure, + fall back to using the SQLite driver which has no odd dependencies + """ + try: + self.driver = self.driver_class() + self.driver.configure() + except exception.BadDriverConfiguration as config_err: + driver_module = self.driver_class.__module__ + LOG.warn(_LW("Image cache driver " + "'%(driver_module)s' failed to configure. " + "Got error: '%(config_err)s"), + {'driver_module': driver_module, + 'config_err': config_err}) + LOG.info(_LI("Defaulting to SQLite driver.")) + default_module = __name__ + '.drivers.sqlite.Driver' + self.driver_class = importutils.import_class(default_module) + self.driver = self.driver_class() + self.driver.configure() + + def is_cached(self, image_id): + """ + Returns True if the image with the supplied ID has its image + file cached. + + :param image_id: Image ID + """ + return self.driver.is_cached(image_id) + + def is_queued(self, image_id): + """ + Returns True if the image identifier is in our cache queue. + + :param image_id: Image ID + """ + return self.driver.is_queued(image_id) + + def get_cache_size(self): + """ + Returns the total size in bytes of the image cache. + """ + return self.driver.get_cache_size() + + def get_hit_count(self, image_id): + """ + Return the number of hits that an image has + + :param image_id: Opaque image identifier + """ + return self.driver.get_hit_count(image_id) + + def get_cached_images(self): + """ + Returns a list of records about cached images. + """ + return self.driver.get_cached_images() + + def delete_all_cached_images(self): + """ + Removes all cached image files and any attributes about the images + and returns the number of cached image files that were deleted. + """ + return self.driver.delete_all_cached_images() + + def delete_cached_image(self, image_id): + """ + Removes a specific cached image file and any attributes about the image + + :param image_id: Image ID + """ + self.driver.delete_cached_image(image_id) + + def delete_all_queued_images(self): + """ + Removes all queued image files and any attributes about the images + and returns the number of queued image files that were deleted. + """ + return self.driver.delete_all_queued_images() + + def delete_queued_image(self, image_id): + """ + Removes a specific queued image file and any attributes about the image + + :param image_id: Image ID + """ + self.driver.delete_queued_image(image_id) + + def prune(self): + """ + Removes all cached image files above the cache's maximum + size. Returns a tuple containing the total number of cached + files removed and the total size of all pruned image files. + """ + max_size = CONF.image_cache_max_size + current_size = self.driver.get_cache_size() + if max_size > current_size: + LOG.debug("Image cache has free space, skipping prune...") + return (0, 0) + + overage = current_size - max_size + LOG.debug("Image cache currently %(overage)d bytes over max " + "size. Starting prune to max size of %(max_size)d ", + {'overage': overage, 'max_size': max_size}) + + total_bytes_pruned = 0 + total_files_pruned = 0 + entry = self.driver.get_least_recently_accessed() + while entry and current_size > max_size: + image_id, size = entry + LOG.debug("Pruning '%(image_id)s' to free %(size)d bytes", + {'image_id': image_id, 'size': size}) + self.driver.delete_cached_image(image_id) + total_bytes_pruned = total_bytes_pruned + size + total_files_pruned = total_files_pruned + 1 + current_size = current_size - size + entry = self.driver.get_least_recently_accessed() + + LOG.debug("Pruning finished pruning. " + "Pruned %(total_files_pruned)d and " + "%(total_bytes_pruned)d.", + {'total_files_pruned': total_files_pruned, + 'total_bytes_pruned': total_bytes_pruned}) + return total_files_pruned, total_bytes_pruned + + def clean(self, stall_time=None): + """ + Cleans up any invalid or incomplete cached images. The cache driver + decides what that means... + """ + self.driver.clean(stall_time) + + def queue_image(self, image_id): + """ + This adds a image to be cache to the queue. + + If the image already exists in the queue or has already been + cached, we return False, True otherwise + + :param image_id: Image ID + """ + return self.driver.queue_image(image_id) + + def get_caching_iter(self, image_id, image_checksum, image_iter): + """ + Returns an iterator that caches the contents of an image + while the image contents are read through the supplied + iterator. + + :param image_id: Image ID + :param image_checksum: checksum expected to be generated while + iterating over image data + :param image_iter: Iterator that will read image contents + """ + if not self.driver.is_cacheable(image_id): + return image_iter + + LOG.debug("Tee'ing image '%s' into cache", image_id) + + return self.cache_tee_iter(image_id, image_iter, image_checksum) + + def cache_tee_iter(self, image_id, image_iter, image_checksum): + try: + current_checksum = hashlib.md5() + + with self.driver.open_for_write(image_id) as cache_file: + for chunk in image_iter: + try: + cache_file.write(chunk) + finally: + current_checksum.update(chunk) + yield chunk + cache_file.flush() + + if (image_checksum and + image_checksum != current_checksum.hexdigest()): + msg = _("Checksum verification failed. Aborted " + "caching of image '%s'.") % image_id + raise exception.DaisyException(msg) + + except exception.DaisyException as e: + with excutils.save_and_reraise_exception(): + # image_iter has given us bad, (size_checked_iter has found a + # bad length), or corrupt data (checksum is wrong). + LOG.exception(utils.exception_to_str(e)) + except Exception as e: + LOG.exception(_LE("Exception encountered while tee'ing " + "image '%(image_id)s' into cache: %(error)s. " + "Continuing with response.") % + {'image_id': image_id, + 'error': utils.exception_to_str(e)}) + + # If no checksum provided continue responding even if + # caching failed. + for chunk in image_iter: + yield chunk + + def cache_image_iter(self, image_id, image_iter, image_checksum=None): + """ + Cache an image with supplied iterator. + + :param image_id: Image ID + :param image_file: Iterator retrieving image chunks + :param image_checksum: Checksum of image + + :retval True if image file was cached, False otherwise + """ + if not self.driver.is_cacheable(image_id): + return False + + for chunk in self.get_caching_iter(image_id, image_checksum, + image_iter): + pass + return True + + def cache_image_file(self, image_id, image_file): + """ + Cache an image file. + + :param image_id: Image ID + :param image_file: Image file to cache + + :retval True if image file was cached, False otherwise + """ + CHUNKSIZE = 64 * units.Mi + + return self.cache_image_iter(image_id, + utils.chunkiter(image_file, CHUNKSIZE)) + + def open_for_read(self, image_id): + """ + Open and yield file for reading the image file for an image + with supplied identifier. + + :note Upon successful reading of the image file, the image's + hit count will be incremented. + + :param image_id: Image ID + """ + return self.driver.open_for_read(image_id) + + def get_image_size(self, image_id): + """ + Return the size of the image file for an image with supplied + identifier. + + :param image_id: Image ID + """ + return self.driver.get_image_size(image_id) + + def get_queued_images(self): + """ + Returns a list of image IDs that are in the queue. The + list should be sorted by the time the image ID was inserted + into the queue. + """ + return self.driver.get_queued_images() diff --git a/code/daisy/daisy/image_cache/base.py b/code/daisy/daisy/image_cache/base.py new file mode 100755 index 00000000..0b56ac43 --- /dev/null +++ b/code/daisy/daisy/image_cache/base.py @@ -0,0 +1,21 @@ +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from daisy.image_cache import ImageCache + + +class CacheApp(object): + + def __init__(self): + self.cache = ImageCache() diff --git a/code/daisy/daisy/image_cache/cleaner.py b/code/daisy/daisy/image_cache/cleaner.py new file mode 100755 index 00000000..bbe45e54 --- /dev/null +++ b/code/daisy/daisy/image_cache/cleaner.py @@ -0,0 +1,27 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +""" +Cleans up any invalid cache entries +""" + +from daisy.image_cache import base + + +class Cleaner(base.CacheApp): + + def run(self): + self.cache.clean() diff --git a/code/daisy/daisy/image_cache/client.py b/code/daisy/daisy/image_cache/client.py new file mode 100755 index 00000000..f3c2a282 --- /dev/null +++ b/code/daisy/daisy/image_cache/client.py @@ -0,0 +1,133 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from oslo.serialization import jsonutils as json + +from daisy.common import client as base_client +from daisy.common import exception +from daisy import i18n + +_ = i18n._ + + +class CacheClient(base_client.BaseClient): + + DEFAULT_PORT = 9292 + DEFAULT_DOC_ROOT = '/v1' + + def delete_cached_image(self, image_id): + """ + Delete a specified image from the cache + """ + self.do_request("DELETE", "/cached_images/%s" % image_id) + return True + + def get_cached_images(self, **kwargs): + """ + Returns a list of images stored in the image cache. + """ + res = self.do_request("GET", "/cached_images") + data = json.loads(res.read())['cached_images'] + return data + + def get_queued_images(self, **kwargs): + """ + Returns a list of images queued for caching + """ + res = self.do_request("GET", "/queued_images") + data = json.loads(res.read())['queued_images'] + return data + + def delete_all_cached_images(self): + """ + Delete all cached images + """ + res = self.do_request("DELETE", "/cached_images") + data = json.loads(res.read()) + num_deleted = data['num_deleted'] + return num_deleted + + def queue_image_for_caching(self, image_id): + """ + Queue an image for prefetching into cache + """ + self.do_request("PUT", "/queued_images/%s" % image_id) + return True + + def delete_queued_image(self, image_id): + """ + Delete a specified image from the cache queue + """ + self.do_request("DELETE", "/queued_images/%s" % image_id) + return True + + def delete_all_queued_images(self): + """ + Delete all queued images + """ + res = self.do_request("DELETE", "/queued_images") + data = json.loads(res.read()) + num_deleted = data['num_deleted'] + return num_deleted + + +def get_client(host, port=None, timeout=None, use_ssl=False, username=None, + password=None, tenant=None, + auth_url=None, auth_strategy=None, + auth_token=None, region=None, + is_silent_upload=False, insecure=False): + """ + Returns a new client Glance client object based on common kwargs. + If an option isn't specified falls back to common environment variable + defaults. + """ + + if auth_url or os.getenv('OS_AUTH_URL'): + force_strategy = 'keystone' + else: + force_strategy = None + + creds = { + 'username': username or + os.getenv('OS_AUTH_USER', os.getenv('OS_USERNAME')), + 'password': password or + os.getenv('OS_AUTH_KEY', os.getenv('OS_PASSWORD')), + 'tenant': tenant or + os.getenv('OS_AUTH_TENANT', os.getenv('OS_TENANT_NAME')), + 'auth_url': auth_url or + os.getenv('OS_AUTH_URL'), + 'strategy': force_strategy or + auth_strategy or + os.getenv('OS_AUTH_STRATEGY', 'noauth'), + 'region': region or + os.getenv('OS_REGION_NAME'), + } + + if creds['strategy'] == 'keystone' and not creds['auth_url']: + msg = _("--os_auth_url option or OS_AUTH_URL environment variable " + "required when keystone authentication strategy is enabled\n") + raise exception.ClientConfigurationError(msg) + + return CacheClient( + host=host, + port=port, + timeout=timeout, + use_ssl=use_ssl, + auth_token=auth_token or + os.getenv('OS_TOKEN'), + creds=creds, + insecure=insecure) diff --git a/code/daisy/daisy/image_cache/drivers/__init__.py b/code/daisy/daisy/image_cache/drivers/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/image_cache/drivers/base.py b/code/daisy/daisy/image_cache/drivers/base.py new file mode 100755 index 00000000..5b03bc7c --- /dev/null +++ b/code/daisy/daisy/image_cache/drivers/base.py @@ -0,0 +1,219 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Base attribute driver class +""" + +import os.path + +from oslo_config import cfg +from oslo_log import log as logging + +from daisy.common import exception +from daisy.common import utils +from daisy import i18n + +LOG = logging.getLogger(__name__) +_ = i18n._ + +CONF = cfg.CONF + + +class Driver(object): + + def configure(self): + """ + Configure the driver to use the stored configuration options + Any store that needs special configuration should implement + this method. If the store was not able to successfully configure + itself, it should raise `exception.BadDriverConfiguration` + """ + # Here we set up the various file-based image cache paths + # that we need in order to find the files in different states + # of cache management. + self.set_paths() + + def set_paths(self): + """ + Creates all necessary directories under the base cache directory + """ + + self.base_dir = CONF.image_cache_dir + if self.base_dir is None: + msg = _('Failed to read %s from config') % 'image_cache_dir' + LOG.error(msg) + driver = self.__class__.__module__ + raise exception.BadDriverConfiguration(driver_name=driver, + reason=msg) + + self.incomplete_dir = os.path.join(self.base_dir, 'incomplete') + self.invalid_dir = os.path.join(self.base_dir, 'invalid') + self.queue_dir = os.path.join(self.base_dir, 'queue') + + dirs = [self.incomplete_dir, self.invalid_dir, self.queue_dir] + + for path in dirs: + utils.safe_mkdirs(path) + + def get_cache_size(self): + """ + Returns the total size in bytes of the image cache. + """ + raise NotImplementedError + + def get_cached_images(self): + """ + Returns a list of records about cached images. + + The list of records shall be ordered by image ID and shall look like:: + + [ + { + 'image_id': , + 'hits': INTEGER, + 'last_modified': ISO_TIMESTAMP, + 'last_accessed': ISO_TIMESTAMP, + 'size': INTEGER + }, ... + ] + + """ + return NotImplementedError + + def is_cached(self, image_id): + """ + Returns True if the image with the supplied ID has its image + file cached. + + :param image_id: Image ID + """ + raise NotImplementedError + + def is_cacheable(self, image_id): + """ + Returns True if the image with the supplied ID can have its + image file cached, False otherwise. + + :param image_id: Image ID + """ + raise NotImplementedError + + def is_queued(self, image_id): + """ + Returns True if the image identifier is in our cache queue. + + :param image_id: Image ID + """ + raise NotImplementedError + + def delete_all_cached_images(self): + """ + Removes all cached image files and any attributes about the images + and returns the number of cached image files that were deleted. + """ + raise NotImplementedError + + def delete_cached_image(self, image_id): + """ + Removes a specific cached image file and any attributes about the image + + :param image_id: Image ID + """ + raise NotImplementedError + + def delete_all_queued_images(self): + """ + Removes all queued image files and any attributes about the images + and returns the number of queued image files that were deleted. + """ + raise NotImplementedError + + def delete_queued_image(self, image_id): + """ + Removes a specific queued image file and any attributes about the image + + :param image_id: Image ID + """ + raise NotImplementedError + + def queue_image(self, image_id): + """ + Puts an image identifier in a queue for caching. Return True + on successful add to the queue, False otherwise... + + :param image_id: Image ID + """ + + def clean(self, stall_time=None): + """ + Dependent on the driver, clean up and destroy any invalid or incomplete + cached images + """ + raise NotImplementedError + + def get_least_recently_accessed(self): + """ + Return a tuple containing the image_id and size of the least recently + accessed cached file, or None if no cached files. + """ + raise NotImplementedError + + def open_for_write(self, image_id): + """ + Open a file for writing the image file for an image + with supplied identifier. + + :param image_id: Image ID + """ + raise NotImplementedError + + def open_for_read(self, image_id): + """ + Open and yield file for reading the image file for an image + with supplied identifier. + + :param image_id: Image ID + """ + raise NotImplementedError + + def get_image_filepath(self, image_id, cache_status='active'): + """ + This crafts an absolute path to a specific entry + + :param image_id: Image ID + :param cache_status: Status of the image in the cache + """ + if cache_status == 'active': + return os.path.join(self.base_dir, str(image_id)) + return os.path.join(self.base_dir, cache_status, str(image_id)) + + def get_image_size(self, image_id): + """ + Return the size of the image file for an image with supplied + identifier. + + :param image_id: Image ID + """ + path = self.get_image_filepath(image_id) + return os.path.getsize(path) + + def get_queued_images(self): + """ + Returns a list of image IDs that are in the queue. The + list should be sorted by the time the image ID was inserted + into the queue. + """ + raise NotImplementedError diff --git a/code/daisy/daisy/image_cache/drivers/sqlite.py b/code/daisy/daisy/image_cache/drivers/sqlite.py new file mode 100755 index 00000000..af72e25c --- /dev/null +++ b/code/daisy/daisy/image_cache/drivers/sqlite.py @@ -0,0 +1,497 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Cache driver that uses SQLite to store information about cached images +""" + +from __future__ import absolute_import +from contextlib import contextmanager +import os +import sqlite3 +import stat +import time + +from eventlet import sleep +from eventlet import timeout +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils + +from daisy.common import exception +from daisy import i18n +from daisy.image_cache.drivers import base + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +sqlite_opts = [ + cfg.StrOpt('image_cache_sqlite_db', default='cache.db', + help=_('The path to the sqlite file database that will be ' + 'used for image cache management.')), +] + +CONF = cfg.CONF +CONF.register_opts(sqlite_opts) + +DEFAULT_SQL_CALL_TIMEOUT = 2 + + +class SqliteConnection(sqlite3.Connection): + + """ + SQLite DB Connection handler that plays well with eventlet, + slightly modified from Swift's similar code. + """ + + def __init__(self, *args, **kwargs): + self.timeout_seconds = kwargs.get('timeout', DEFAULT_SQL_CALL_TIMEOUT) + kwargs['timeout'] = 0 + sqlite3.Connection.__init__(self, *args, **kwargs) + + def _timeout(self, call): + with timeout.Timeout(self.timeout_seconds): + while True: + try: + return call() + except sqlite3.OperationalError as e: + if 'locked' not in str(e): + raise + sleep(0.05) + + def execute(self, *args, **kwargs): + return self._timeout(lambda: sqlite3.Connection.execute( + self, *args, **kwargs)) + + def commit(self): + return self._timeout(lambda: sqlite3.Connection.commit(self)) + + +def dict_factory(cur, row): + return dict( + ((col[0], row[idx]) for idx, col in enumerate(cur.description))) + + +class Driver(base.Driver): + + """ + Cache driver that uses xattr file tags and requires a filesystem + that has atimes set. + """ + + def configure(self): + """ + Configure the driver to use the stored configuration options + Any store that needs special configuration should implement + this method. If the store was not able to successfully configure + itself, it should raise `exception.BadDriverConfiguration` + """ + super(Driver, self).configure() + + # Create the SQLite database that will hold our cache attributes + self.initialize_db() + + def initialize_db(self): + db = CONF.image_cache_sqlite_db + self.db_path = os.path.join(self.base_dir, db) + try: + conn = sqlite3.connect(self.db_path, check_same_thread=False, + factory=SqliteConnection) + conn.executescript(""" + CREATE TABLE IF NOT EXISTS cached_images ( + image_id TEXT PRIMARY KEY, + last_accessed REAL DEFAULT 0.0, + last_modified REAL DEFAULT 0.0, + size INTEGER DEFAULT 0, + hits INTEGER DEFAULT 0, + checksum TEXT + ); + """) + conn.close() + except sqlite3.DatabaseError as e: + msg = _("Failed to initialize the image cache database. " + "Got error: %s") % e + LOG.error(msg) + raise exception.BadDriverConfiguration(driver_name='sqlite', + reason=msg) + + def get_cache_size(self): + """ + Returns the total size in bytes of the image cache. + """ + sizes = [] + for path in self.get_cache_files(self.base_dir): + if path == self.db_path: + continue + file_info = os.stat(path) + sizes.append(file_info[stat.ST_SIZE]) + return sum(sizes) + + def get_hit_count(self, image_id): + """ + Return the number of hits that an image has. + + :param image_id: Opaque image identifier + """ + if not self.is_cached(image_id): + return 0 + + hits = 0 + with self.get_db() as db: + cur = db.execute("""SELECT hits FROM cached_images + WHERE image_id = ?""", + (image_id,)) + hits = cur.fetchone()[0] + return hits + + def get_cached_images(self): + """ + Returns a list of records about cached images. + """ + LOG.debug("Gathering cached image entries.") + with self.get_db() as db: + cur = db.execute("""SELECT + image_id, hits, last_accessed, last_modified, size + FROM cached_images + ORDER BY image_id""") + cur.row_factory = dict_factory + return [r for r in cur] + + def is_cached(self, image_id): + """ + Returns True if the image with the supplied ID has its image + file cached. + + :param image_id: Image ID + """ + return os.path.exists(self.get_image_filepath(image_id)) + + def is_cacheable(self, image_id): + """ + Returns True if the image with the supplied ID can have its + image file cached, False otherwise. + + :param image_id: Image ID + """ + # Make sure we're not already cached or caching the image + return not (self.is_cached(image_id) or + self.is_being_cached(image_id)) + + def is_being_cached(self, image_id): + """ + Returns True if the image with supplied id is currently + in the process of having its image file cached. + + :param image_id: Image ID + """ + path = self.get_image_filepath(image_id, 'incomplete') + return os.path.exists(path) + + def is_queued(self, image_id): + """ + Returns True if the image identifier is in our cache queue. + + :param image_id: Image ID + """ + path = self.get_image_filepath(image_id, 'queue') + return os.path.exists(path) + + def delete_all_cached_images(self): + """ + Removes all cached image files and any attributes about the images + """ + deleted = 0 + with self.get_db() as db: + for path in self.get_cache_files(self.base_dir): + delete_cached_file(path) + deleted += 1 + db.execute("""DELETE FROM cached_images""") + db.commit() + return deleted + + def delete_cached_image(self, image_id): + """ + Removes a specific cached image file and any attributes about the image + + :param image_id: Image ID + """ + path = self.get_image_filepath(image_id) + with self.get_db() as db: + delete_cached_file(path) + db.execute("""DELETE FROM cached_images WHERE image_id = ?""", + (image_id, )) + db.commit() + + def delete_all_queued_images(self): + """ + Removes all queued image files and any attributes about the images + """ + files = [f for f in self.get_cache_files(self.queue_dir)] + for file in files: + os.unlink(file) + return len(files) + + def delete_queued_image(self, image_id): + """ + Removes a specific queued image file and any attributes about the image + + :param image_id: Image ID + """ + path = self.get_image_filepath(image_id, 'queue') + if os.path.exists(path): + os.unlink(path) + + def clean(self, stall_time=None): + """ + Delete any image files in the invalid directory and any + files in the incomplete directory that are older than a + configurable amount of time. + """ + self.delete_invalid_files() + + if stall_time is None: + stall_time = CONF.image_cache_stall_time + + now = time.time() + older_than = now - stall_time + self.delete_stalled_files(older_than) + + def get_least_recently_accessed(self): + """ + Return a tuple containing the image_id and size of the least recently + accessed cached file, or None if no cached files. + """ + with self.get_db() as db: + cur = db.execute("""SELECT image_id FROM cached_images + ORDER BY last_accessed LIMIT 1""") + try: + image_id = cur.fetchone()[0] + except TypeError: + # There are no more cached images + return None + + path = self.get_image_filepath(image_id) + try: + file_info = os.stat(path) + size = file_info[stat.ST_SIZE] + except OSError: + size = 0 + return image_id, size + + @contextmanager + def open_for_write(self, image_id): + """ + Open a file for writing the image file for an image + with supplied identifier. + + :param image_id: Image ID + """ + incomplete_path = self.get_image_filepath(image_id, 'incomplete') + + def commit(): + with self.get_db() as db: + final_path = self.get_image_filepath(image_id) + LOG.debug("Fetch finished, moving " + "'%(incomplete_path)s' to '%(final_path)s'", + dict(incomplete_path=incomplete_path, + final_path=final_path)) + os.rename(incomplete_path, final_path) + + # Make sure that we "pop" the image from the queue... + if self.is_queued(image_id): + os.unlink(self.get_image_filepath(image_id, 'queue')) + + filesize = os.path.getsize(final_path) + now = time.time() + + db.execute("""INSERT INTO cached_images + (image_id, last_accessed, last_modified, hits, size) + VALUES (?, ?, ?, 0, ?)""", + (image_id, now, now, filesize)) + db.commit() + + def rollback(e): + with self.get_db() as db: + if os.path.exists(incomplete_path): + invalid_path = self.get_image_filepath(image_id, 'invalid') + + LOG.warn(_LW("Fetch of cache file failed (%(e)s), rolling " + "back by moving '%(incomplete_path)s' to " + "'%(invalid_path)s'") % + {'e': e, + 'incomplete_path': incomplete_path, + 'invalid_path': invalid_path}) + os.rename(incomplete_path, invalid_path) + + db.execute("""DELETE FROM cached_images + WHERE image_id = ?""", (image_id, )) + db.commit() + + try: + with open(incomplete_path, 'wb') as cache_file: + yield cache_file + except Exception as e: + with excutils.save_and_reraise_exception(): + rollback(e) + else: + commit() + finally: + # if the generator filling the cache file neither raises an + # exception, nor completes fetching all data, neither rollback + # nor commit will have been called, so the incomplete file + # will persist - in that case remove it as it is unusable + # example: ^c from client fetch + if os.path.exists(incomplete_path): + rollback('incomplete fetch') + + @contextmanager + def open_for_read(self, image_id): + """ + Open and yield file for reading the image file for an image + with supplied identifier. + + :param image_id: Image ID + """ + path = self.get_image_filepath(image_id) + with open(path, 'rb') as cache_file: + yield cache_file + now = time.time() + with self.get_db() as db: + db.execute("""UPDATE cached_images + SET hits = hits + 1, last_accessed = ? + WHERE image_id = ?""", + (now, image_id)) + db.commit() + + @contextmanager + def get_db(self): + """ + Returns a context manager that produces a database connection that + self-closes and calls rollback if an error occurs while using the + database connection + """ + conn = sqlite3.connect(self.db_path, check_same_thread=False, + factory=SqliteConnection) + conn.row_factory = sqlite3.Row + conn.text_factory = str + conn.execute('PRAGMA synchronous = NORMAL') + conn.execute('PRAGMA count_changes = OFF') + conn.execute('PRAGMA temp_store = MEMORY') + try: + yield conn + except sqlite3.DatabaseError as e: + msg = _LE("Error executing SQLite call. Got error: %s") % e + LOG.error(msg) + conn.rollback() + finally: + conn.close() + + def queue_image(self, image_id): + """ + This adds a image to be cache to the queue. + + If the image already exists in the queue or has already been + cached, we return False, True otherwise + + :param image_id: Image ID + """ + if self.is_cached(image_id): + msg = _LI("Not queueing image '%s'. Already cached.") % image_id + LOG.info(msg) + return False + + if self.is_being_cached(image_id): + msg = _LI("Not queueing image '%s'. Already being " + "written to cache") % image_id + LOG.info(msg) + return False + + if self.is_queued(image_id): + msg = _LI("Not queueing image '%s'. Already queued.") % image_id + LOG.info(msg) + return False + + path = self.get_image_filepath(image_id, 'queue') + + # Touch the file to add it to the queue + with open(path, "w"): + pass + + return True + + def delete_invalid_files(self): + """ + Removes any invalid cache entries + """ + for path in self.get_cache_files(self.invalid_dir): + os.unlink(path) + LOG.info(_LI("Removed invalid cache file %s") % path) + + def delete_stalled_files(self, older_than): + """ + Removes any incomplete cache entries older than a + supplied modified time. + + :param older_than: Files written to on or before this timestemp + will be deleted. + """ + for path in self.get_cache_files(self.incomplete_dir): + if os.path.getmtime(path) < older_than: + try: + os.unlink(path) + LOG.info(_LI("Removed stalled cache file %s") % path) + except Exception as e: + msg = (_LW("Failed to delete file %(path)s. " + "Got error: %(e)s"), + dict(path=path, e=e)) + LOG.warn(msg) + + def get_queued_images(self): + """ + Returns a list of image IDs that are in the queue. The + list should be sorted by the time the image ID was inserted + into the queue. + """ + files = [f for f in self.get_cache_files(self.queue_dir)] + items = [] + for path in files: + mtime = os.path.getmtime(path) + items.append((mtime, os.path.basename(path))) + + items.sort() + return [image_id for (modtime, image_id) in items] + + def get_cache_files(self, basepath): + """ + Returns cache files in the supplied directory + + :param basepath: Directory to look in for cache files + """ + for fname in os.listdir(basepath): + path = os.path.join(basepath, fname) + if path != self.db_path and os.path.isfile(path): + yield path + + +def delete_cached_file(path): + if os.path.exists(path): + LOG.debug("Deleting image cache file '%s'", path) + os.unlink(path) + else: + LOG.warn(_LW("Cached image file '%s' doesn't exist, unable to" + " delete") % path) diff --git a/code/daisy/daisy/image_cache/drivers/xattr.py b/code/daisy/daisy/image_cache/drivers/xattr.py new file mode 100755 index 00000000..60f38580 --- /dev/null +++ b/code/daisy/daisy/image_cache/drivers/xattr.py @@ -0,0 +1,510 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Cache driver that uses xattr file tags and requires a filesystem +that has atimes set. + +Assumptions +=========== + +1. Cache data directory exists on a filesytem that updates atime on + reads ('noatime' should NOT be set) + +2. Cache data directory exists on a filesystem that supports xattrs. + This is optional, but highly recommended since it allows us to + present ops with useful information pertaining to the cache, like + human readable filenames and statistics. + +3. `glance-prune` is scheduled to run as a periodic job via cron. This + is needed to run the LRU prune strategy to keep the cache size + within the limits set by the config file. + + +Cache Directory Notes +===================== + +The image cache data directory contains the main cache path, where the +active cache entries and subdirectories for handling partial downloads +and errored-out cache images. + +The layout looks like: + +$image_cache_dir/ + entry1 + entry2 + ... + incomplete/ + invalid/ + queue/ +""" + +from __future__ import absolute_import +from contextlib import contextmanager +import errno +import os +import stat +import time + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +import xattr + +from daisy.common import exception +from daisy.common import utils +from daisy import i18n +from daisy.image_cache.drivers import base + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +CONF = cfg.CONF + + +class Driver(base.Driver): + + """ + Cache driver that uses xattr file tags and requires a filesystem + that has atimes set. + """ + + def configure(self): + """ + Configure the driver to use the stored configuration options + Any store that needs special configuration should implement + this method. If the store was not able to successfully configure + itself, it should raise `exception.BadDriverConfiguration` + """ + # Here we set up the various file-based image cache paths + # that we need in order to find the files in different states + # of cache management. + self.set_paths() + + # We do a quick attempt to write a user xattr to a temporary file + # to check that the filesystem is even enabled to support xattrs + image_cache_dir = self.base_dir + fake_image_filepath = os.path.join(image_cache_dir, 'checkme') + with open(fake_image_filepath, 'wb') as fake_file: + fake_file.write("XXX") + fake_file.flush() + try: + set_xattr(fake_image_filepath, 'hits', '1') + except IOError as e: + if e.errno == errno.EOPNOTSUPP: + msg = (_("The device housing the image cache directory " + "%(image_cache_dir)s does not support xattr. It is" + " likely you need to edit your fstab and add the " + "user_xattr option to the appropriate line for the" + " device housing the cache directory.") % + {'image_cache_dir': image_cache_dir}) + LOG.error(msg) + raise exception.BadDriverConfiguration(driver_name="xattr", + reason=msg) + else: + # Cleanup after ourselves... + if os.path.exists(fake_image_filepath): + os.unlink(fake_image_filepath) + + def get_cache_size(self): + """ + Returns the total size in bytes of the image cache. + """ + sizes = [] + for path in get_all_regular_files(self.base_dir): + file_info = os.stat(path) + sizes.append(file_info[stat.ST_SIZE]) + return sum(sizes) + + def get_hit_count(self, image_id): + """ + Return the number of hits that an image has. + + :param image_id: Opaque image identifier + """ + if not self.is_cached(image_id): + return 0 + + path = self.get_image_filepath(image_id) + return int(get_xattr(path, 'hits', default=0)) + + def get_cached_images(self): + """ + Returns a list of records about cached images. + """ + LOG.debug("Gathering cached image entries.") + entries = [] + for path in get_all_regular_files(self.base_dir): + image_id = os.path.basename(path) + + entry = {} + entry['image_id'] = image_id + + file_info = os.stat(path) + entry['last_modified'] = file_info[stat.ST_MTIME] + entry['last_accessed'] = file_info[stat.ST_ATIME] + entry['size'] = file_info[stat.ST_SIZE] + entry['hits'] = self.get_hit_count(image_id) + + entries.append(entry) + entries.sort() # Order by ID + return entries + + def is_cached(self, image_id): + """ + Returns True if the image with the supplied ID has its image + file cached. + + :param image_id: Image ID + """ + return os.path.exists(self.get_image_filepath(image_id)) + + def is_cacheable(self, image_id): + """ + Returns True if the image with the supplied ID can have its + image file cached, False otherwise. + + :param image_id: Image ID + """ + # Make sure we're not already cached or caching the image + return not (self.is_cached(image_id) or + self.is_being_cached(image_id)) + + def is_being_cached(self, image_id): + """ + Returns True if the image with supplied id is currently + in the process of having its image file cached. + + :param image_id: Image ID + """ + path = self.get_image_filepath(image_id, 'incomplete') + return os.path.exists(path) + + def is_queued(self, image_id): + """ + Returns True if the image identifier is in our cache queue. + """ + path = self.get_image_filepath(image_id, 'queue') + return os.path.exists(path) + + def delete_all_cached_images(self): + """ + Removes all cached image files and any attributes about the images + """ + deleted = 0 + for path in get_all_regular_files(self.base_dir): + delete_cached_file(path) + deleted += 1 + return deleted + + def delete_cached_image(self, image_id): + """ + Removes a specific cached image file and any attributes about the image + + :param image_id: Image ID + """ + path = self.get_image_filepath(image_id) + delete_cached_file(path) + + def delete_all_queued_images(self): + """ + Removes all queued image files and any attributes about the images + """ + files = [f for f in get_all_regular_files(self.queue_dir)] + for file in files: + os.unlink(file) + return len(files) + + def delete_queued_image(self, image_id): + """ + Removes a specific queued image file and any attributes about the image + + :param image_id: Image ID + """ + path = self.get_image_filepath(image_id, 'queue') + if os.path.exists(path): + os.unlink(path) + + def get_least_recently_accessed(self): + """ + Return a tuple containing the image_id and size of the least recently + accessed cached file, or None if no cached files. + """ + stats = [] + for path in get_all_regular_files(self.base_dir): + file_info = os.stat(path) + stats.append((file_info[stat.ST_ATIME], # access time + file_info[stat.ST_SIZE], # size in bytes + path)) # absolute path + + if not stats: + return None + + stats.sort() + return os.path.basename(stats[0][2]), stats[0][1] + + @contextmanager + def open_for_write(self, image_id): + """ + Open a file for writing the image file for an image + with supplied identifier. + + :param image_id: Image ID + """ + incomplete_path = self.get_image_filepath(image_id, 'incomplete') + + def set_attr(key, value): + set_xattr(incomplete_path, key, value) + + def commit(): + set_attr('hits', 0) + + final_path = self.get_image_filepath(image_id) + LOG.debug("Fetch finished, moving " + "'%(incomplete_path)s' to '%(final_path)s'", + dict(incomplete_path=incomplete_path, + final_path=final_path)) + os.rename(incomplete_path, final_path) + + # Make sure that we "pop" the image from the queue... + if self.is_queued(image_id): + LOG.debug("Removing image '%s' from queue after " + "caching it." % image_id) + os.unlink(self.get_image_filepath(image_id, 'queue')) + + def rollback(e): + set_attr('error', utils.exception_to_str(e)) + + invalid_path = self.get_image_filepath(image_id, 'invalid') + LOG.debug("Fetch of cache file failed (%(e)s), rolling back by " + "moving '%(incomplete_path)s' to " + "'%(invalid_path)s'" % + {'e': utils.exception_to_str(e), + 'incomplete_path': incomplete_path, + 'invalid_path': invalid_path}) + os.rename(incomplete_path, invalid_path) + + try: + with open(incomplete_path, 'wb') as cache_file: + yield cache_file + except Exception as e: + with excutils.save_and_reraise_exception(): + rollback(e) + else: + commit() + finally: + # if the generator filling the cache file neither raises an + # exception, nor completes fetching all data, neither rollback + # nor commit will have been called, so the incomplete file + # will persist - in that case remove it as it is unusable + # example: ^c from client fetch + if os.path.exists(incomplete_path): + rollback('incomplete fetch') + + @contextmanager + def open_for_read(self, image_id): + """ + Open and yield file for reading the image file for an image + with supplied identifier. + + :param image_id: Image ID + """ + path = self.get_image_filepath(image_id) + with open(path, 'rb') as cache_file: + yield cache_file + path = self.get_image_filepath(image_id) + inc_xattr(path, 'hits', 1) + + def queue_image(self, image_id): + """ + This adds a image to be cache to the queue. + + If the image already exists in the queue or has already been + cached, we return False, True otherwise + + :param image_id: Image ID + """ + if self.is_cached(image_id): + msg = _LI("Not queueing image '%s'. Already cached.") % image_id + LOG.info(msg) + return False + + if self.is_being_cached(image_id): + msg = _LI("Not queueing image '%s'. Already being " + "written to cache") % image_id + LOG.info(msg) + return False + + if self.is_queued(image_id): + msg = _LI("Not queueing image '%s'. Already queued.") % image_id + LOG.info(msg) + return False + + path = self.get_image_filepath(image_id, 'queue') + LOG.debug("Queueing image '%s'.", image_id) + + # Touch the file to add it to the queue + with open(path, "w"): + pass + + return True + + def get_queued_images(self): + """ + Returns a list of image IDs that are in the queue. The + list should be sorted by the time the image ID was inserted + into the queue. + """ + files = [f for f in get_all_regular_files(self.queue_dir)] + items = [] + for path in files: + mtime = os.path.getmtime(path) + items.append((mtime, os.path.basename(path))) + + items.sort() + return [image_id for (modtime, image_id) in items] + + def _reap_old_files(self, dirpath, entry_type, grace=None): + now = time.time() + reaped = 0 + for path in get_all_regular_files(dirpath): + mtime = os.path.getmtime(path) + age = now - mtime + if not grace: + LOG.debug("No grace period, reaping '%(path)s'" + " immediately", {'path': path}) + delete_cached_file(path) + reaped += 1 + elif age > grace: + LOG.debug("Cache entry '%(path)s' exceeds grace period, " + "(%(age)i s > %(grace)i s)", + {'path': path, 'age': age, 'grace': grace}) + delete_cached_file(path) + reaped += 1 + + LOG.info(_LI("Reaped %(reaped)s %(entry_type)s cache entries"), + {'reaped': reaped, 'entry_type': entry_type}) + return reaped + + def reap_invalid(self, grace=None): + """Remove any invalid cache entries + + :param grace: Number of seconds to keep an invalid entry around for + debugging purposes. If None, then delete immediately. + """ + return self._reap_old_files(self.invalid_dir, 'invalid', grace=grace) + + def reap_stalled(self, grace=None): + """Remove any stalled cache entries + + :param grace: Number of seconds to keep an invalid entry around for + debugging purposes. If None, then delete immediately. + """ + return self._reap_old_files(self.incomplete_dir, 'stalled', + grace=grace) + + def clean(self, stall_time=None): + """ + Delete any image files in the invalid directory and any + files in the incomplete directory that are older than a + configurable amount of time. + """ + self.reap_invalid() + + if stall_time is None: + stall_time = CONF.image_cache_stall_time + + self.reap_stalled(stall_time) + + +def get_all_regular_files(basepath): + for fname in os.listdir(basepath): + path = os.path.join(basepath, fname) + if os.path.isfile(path): + yield path + + +def delete_cached_file(path): + if os.path.exists(path): + LOG.debug("Deleting image cache file '%s'" % path) + os.unlink(path) + else: + LOG.warn(_LW("Cached image file '%s' doesn't exist, unable to" + " delete") % path) + + +def _make_namespaced_xattr_key(key, namespace='user'): + """ + Create a fully-qualified xattr-key by including the intended namespace. + + Namespacing differs among OSes[1]: + + FreeBSD: user, system + Linux: user, system, trusted, security + MacOS X: not needed + + Mac OS X won't break if we include a namespace qualifier, so, for + simplicity, we always include it. + + -- + [1] http://en.wikipedia.org/wiki/Extended_file_attributes + """ + namespaced_key = ".".join([namespace, key]) + return namespaced_key + + +def get_xattr(path, key, **kwargs): + """Return the value for a particular xattr + + If the key doesn't not exist, or xattrs aren't supported by the file + system then a KeyError will be raised, that is, unless you specify a + default using kwargs. + """ + namespaced_key = _make_namespaced_xattr_key(key) + try: + return xattr.getxattr(path, namespaced_key) + except IOError: + if 'default' in kwargs: + return kwargs['default'] + else: + raise + + +def set_xattr(path, key, value): + """Set the value of a specified xattr. + + If xattrs aren't supported by the file-system, we skip setting the value. + """ + namespaced_key = _make_namespaced_xattr_key(key) + xattr.setxattr(path, namespaced_key, str(value)) + + +def inc_xattr(path, key, n=1): + """ + Increment the value of an xattr (assuming it is an integer). + + BEWARE, this code *does* have a RACE CONDITION, since the + read/update/write sequence is not atomic. + + Since the use-case for this function is collecting stats--not critical-- + the benefits of simple, lock-free code out-weighs the possibility of an + occasional hit not being counted. + """ + count = int(get_xattr(path, key)) + count += n + set_xattr(path, key, str(count)) diff --git a/code/daisy/daisy/image_cache/prefetcher.py b/code/daisy/daisy/image_cache/prefetcher.py new file mode 100755 index 00000000..9e009e89 --- /dev/null +++ b/code/daisy/daisy/image_cache/prefetcher.py @@ -0,0 +1,86 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Prefetches images into the Image Cache +""" + +import eventlet +import glance_store +from oslo_log import log as logging + +from daisy.common import exception +from daisy import context +from daisy import i18n +from daisy.image_cache import base +import daisy.registry.client.v1.api as registry + +LOG = logging.getLogger(__name__) +_LI = i18n._LI +_LW = i18n._LW + + +class Prefetcher(base.CacheApp): + + def __init__(self): + super(Prefetcher, self).__init__() + registry.configure_registry_client() + registry.configure_registry_admin_creds() + + def fetch_image_into_cache(self, image_id): + ctx = context.RequestContext(is_admin=True, show_deleted=True) + + try: + image_meta = registry.get_image_metadata(ctx, image_id) + if image_meta['status'] != 'active': + LOG.warn(_LW("Image '%s' is not active. Not caching.") % + image_id) + return False + + except exception.NotFound: + LOG.warn(_LW("No metadata found for image '%s'") % image_id) + return False + + location = image_meta['location'] + image_data, image_size = glance_store.get_from_backend(location, + context=ctx) + LOG.debug("Caching image '%s'", image_id) + cache_tee_iter = self.cache.cache_tee_iter(image_id, image_data, + image_meta['checksum']) + # Image is tee'd into cache and checksum verified + # as we iterate + list(cache_tee_iter) + return True + + def run(self): + + images = self.cache.get_queued_images() + if not images: + LOG.debug("Nothing to prefetch.") + return True + + num_images = len(images) + LOG.debug("Found %d images to prefetch", num_images) + + pool = eventlet.GreenPool(num_images) + results = pool.imap(self.fetch_image_into_cache, images) + successes = sum([1 for r in results if r is True]) + if successes != num_images: + LOG.warn(_LW("Failed to successfully cache all " + "images in queue.")) + return False + + LOG.info(_LI("Successfully cached all %d images") % num_images) + return True diff --git a/code/daisy/daisy/image_cache/pruner.py b/code/daisy/daisy/image_cache/pruner.py new file mode 100755 index 00000000..5aff4df9 --- /dev/null +++ b/code/daisy/daisy/image_cache/pruner.py @@ -0,0 +1,26 @@ +# Copyright 2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Prunes the Image Cache +""" + +from daisy.image_cache import base + + +class Pruner(base.CacheApp): + + def run(self): + self.cache.prune() diff --git a/code/daisy/daisy/listener.py b/code/daisy/daisy/listener.py new file mode 100755 index 00000000..b44ee529 --- /dev/null +++ b/code/daisy/daisy/listener.py @@ -0,0 +1,90 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo.config import cfg +from oslo import messaging +from oslo_log import log as logging +import stevedore + +from daisy import i18n +from daisy.openstack.common import service as os_service + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE + + +class NotificationEndpoint(object): + + def __init__(self): + self.plugins = get_plugins() + self.notification_target_map = dict() + for plugin in self.plugins: + try: + event_list = plugin.obj.get_notification_supported_events() + for event in event_list: + self.notification_target_map[event.lower()] = plugin.obj + except Exception as e: + LOG.error(_LE("Failed to retrieve supported notification" + " events from search plugins " + "%(ext)s: %(e)s") % + {'ext': plugin.name, 'e': e}) + + def info(self, ctxt, publisher_id, event_type, payload, metadata): + event_type_l = event_type.lower() + if event_type_l in self.notification_target_map: + plugin = self.notification_target_map[event_type_l] + handler = plugin.get_notification_handler() + handler.process( + ctxt, + publisher_id, + event_type, + payload, + metadata) + + +class ListenerService(os_service.Service): + def __init__(self, *args, **kwargs): + super(ListenerService, self).__init__(*args, **kwargs) + self.listeners = [] + + def start(self): + super(ListenerService, self).start() + transport = messaging.get_transport(cfg.CONF) + targets = [ + messaging.Target(topic="notifications", exchange="daisy") + ] + endpoints = [ + NotificationEndpoint() + ] + listener = messaging.get_notification_listener( + transport, + targets, + endpoints) + listener.start() + self.listeners.append(listener) + + def stop(self): + for listener in self.listeners: + listener.stop() + listener.wait() + super(ListenerService, self).stop() + + +def get_plugins(): + namespace = 'daisy.search.index_backend' + ext_manager = stevedore.extension.ExtensionManager( + namespace, invoke_on_load=True) + return ext_manager.extensions diff --git a/code/daisy/daisy/locale/en_GB/LC_MESSAGES/glance-log-info.po b/code/daisy/daisy/locale/en_GB/LC_MESSAGES/glance-log-info.po new file mode 100755 index 00000000..942cb602 --- /dev/null +++ b/code/daisy/daisy/locale/en_GB/LC_MESSAGES/glance-log-info.po @@ -0,0 +1,416 @@ +# Translations template for daisy. +# Copyright (C) 2015 ORGANIZATION +# This file is distributed under the same license as the glance project. +# +# Translators: +# Andi Chandler , 2014 +msgid "" +msgstr "" +"Project-Id-Version: Glance\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2015-04-03 06:02+0000\n" +"PO-Revision-Date: 2015-04-01 21:54+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/" +"glance/language/en_GB/)\n" +"Language: en_GB\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: glance/scrubber.py:459 +#, python-format +msgid "Starting Daemon: wakeup_time=%(wakeup_time)s threads=%(threads)s" +msgstr "Starting Daemon: wakeup_time=%(wakeup_time)s threads=%(threads)s" + +#: glance/scrubber.py:473 +msgid "Daemon Shutdown on KeyboardInterrupt" +msgstr "Daemon Shutdown on KeyboardInterrupt" + +#: glance/scrubber.py:485 +#, python-format +msgid "Initializing scrubber with configuration: %s" +msgstr "Initializing scrubber with configuration: %s" + +#: glance/scrubber.py:558 +#, python-format +msgid "Scrubbing image %(id)s from %(count)d locations." +msgstr "Scrubbing image %(id)s from %(count)d locations." + +#: glance/scrubber.py:581 +#, python-format +msgid "Image %s has been deleted." +msgstr "" + +#: glance/scrubber.py:633 +#, python-format +msgid "Getting images deleted before %s" +msgstr "Getting images deleted before %s" + +#: glance/api/middleware/cache.py:61 +msgid "Initialized image cache middleware" +msgstr "Initialised image cache middleware" + +#: glance/api/middleware/cache_manage.py:74 +msgid "Initialized image cache management middleware" +msgstr "Initialised image cache management middleware" + +#: glance/api/middleware/gzip.py:36 +msgid "Initialized gzip middleware" +msgstr "Initialised gzip middleware" + +#: glance/api/v1/images.py:690 +#, python-format +msgid "Uploaded data of image %s from request payload successfully." +msgstr "" + +#: glance/api/v1/images.py:752 +msgid "Triggering asynchronous copy from external source" +msgstr "Triggering asynchronous copy from external source" + +#: glance/api/v1/upload_utils.py:126 +#, python-format +msgid "Cleaning up %s after exceeding the quota" +msgstr "Cleaning up %s after exceeding the quota" + +#: glance/api/v1/upload_utils.py:175 +#, python-format +msgid "" +"Image %s could not be found after upload. The image may have been deleted " +"during the upload." +msgstr "" +"Image %s could not be found after upload. The image may have been deleted " +"during the upload." + +#: glance/api/v2/image_actions.py:51 +#, python-format +msgid "Image %s is deactivated" +msgstr "" + +#: glance/api/v2/image_actions.py:66 +#, python-format +msgid "Image %s is reactivated" +msgstr "" + +#: glance/async/flows/base_import.py:348 +#, python-format +msgid "%(task_id)s of %(task_type)s completed" +msgstr "" + +#: glance/cmd/replicator.py:372 +#, python-format +msgid "Storing: %s" +msgstr "Storing: %s" + +#: glance/cmd/replicator.py:445 +#, python-format +msgid "Considering: %s" +msgstr "Considering: %s" + +#: glance/cmd/replicator.py:471 glance/cmd/replicator.py:546 +#, python-format +msgid "Image %s metadata has changed" +msgstr "Image %s metadata has changed" + +#: glance/cmd/replicator.py:553 +#, python-format +msgid "Image %s is being synced" +msgstr "Image %s is being synced" + +#: glance/common/wsgi.py:308 glance/openstack/common/service.py:326 +#, python-format +msgid "Starting %d workers" +msgstr "Starting %d workers" + +#: glance/common/wsgi.py:321 +#, python-format +msgid "Removed dead child %s" +msgstr "" + +#: glance/common/wsgi.py:324 +#, python-format +msgid "Removed stale child %s" +msgstr "" + +#: glance/common/wsgi.py:336 +msgid "All workers have terminated. Exiting" +msgstr "All workers have terminated. Exiting" + +#: glance/common/wsgi.py:353 +msgid "Caught keyboard interrupt. Exiting." +msgstr "Caught keyboard interrupt. Exiting." + +#: glance/common/wsgi.py:432 +#, python-format +msgid "Child %d exiting normally" +msgstr "Child %d exiting normally" + +#: glance/common/wsgi.py:437 +#, python-format +msgid "Started child %s" +msgstr "Started child %s" + +#: glance/common/wsgi.py:466 +msgid "Starting single process server" +msgstr "Starting single process server" + +#: glance/common/artifacts/loader.py:131 glance/common/artifacts/loader.py:155 +#, python-format +msgid "Artifact %s has been successfully loaded" +msgstr "" + +#: glance/common/scripts/__init__.py:32 +#, python-format +msgid "" +"Loading known task scripts for task_id %(task_id)s of type %(task_type)s" +msgstr "" + +#: glance/common/scripts/image_import/main.py:41 +#, python-format +msgid "Task %(task_id)s beginning import execution." +msgstr "" + +#: glance/common/scripts/image_import/main.py:152 +#, python-format +msgid "Task %(task_id)s: Got image data uri %(data_uri)s to be imported" +msgstr "" + +#: glance/common/scripts/image_import/main.py:161 +#, python-format +msgid "Task %(task_id)s: Could not import image file %(image_data)s" +msgstr "" + +#: glance/db/simple/api.py:62 +#, python-format +msgid "Calling %(funcname)s: args=%(args)s, kwargs=%(kwargs)s" +msgstr "Calling %(funcname)s: args=%(args)s, kwargs=%(kwargs)s" + +#: glance/db/simple/api.py:68 +#, python-format +msgid "Returning %(funcname)s: %(output)s" +msgstr "Returning %(funcname)s: %(output)s" + +#: glance/db/simple/api.py:2002 +#, python-format +msgid "Could not find artifact %s" +msgstr "" + +#: glance/db/simple/api.py:2006 +msgid "Unable to get deleted image" +msgstr "Unable to get deleted image" + +#: glance/db/sqlalchemy/metadata.py:152 +#, python-format +msgid "Table %s has been cleared" +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:223 +#, python-format +msgid "Overwriting namespace %s" +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:239 +#, python-format +msgid "Skipping namespace %s. It already exists in the database." +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:330 +#, python-format +msgid "File %s loaded to database." +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:332 +msgid "Metadata loading finished" +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:441 +#, python-format +msgid "Namespace %(namespace)s saved in %(file)s" +msgstr "" + +#: glance/db/sqlalchemy/migrate_repo/schema.py:101 +#, python-format +msgid "creating table %(table)s" +msgstr "creating table %(table)s" + +#: glance/db/sqlalchemy/migrate_repo/schema.py:107 +#, python-format +msgid "dropping table %(table)s" +msgstr "dropping table %(table)s" + +#: glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py:72 +msgid "" +"'metadata_encryption_key' was not specified in the config file or a config " +"file was not specified. This means that this migration is a NOOP." +msgstr "" +"'metadata_encryption_key' was not specified in the config file or a config " +"file was not specified. This means that this migration is a NOOP." + +#: glance/domain/__init__.py:406 +#, python-format +msgid "" +"Task [%(task_id)s] status changing from %(cur_status)s to %(new_status)s" +msgstr "" + +#: glance/image_cache/__init__.py:71 +#, python-format +msgid "Image cache loaded driver '%s'." +msgstr "Image cache loaded driver '%s'." + +#: glance/image_cache/__init__.py:81 glance/image_cache/__init__.py:100 +msgid "Defaulting to SQLite driver." +msgstr "Defaulting to SQLite driver." + +#: glance/image_cache/prefetcher.py:85 +#, python-format +msgid "Successfully cached all %d images" +msgstr "Successfully cached all %d images" + +#: glance/image_cache/drivers/sqlite.py:414 +#: glance/image_cache/drivers/xattr.py:343 +#, python-format +msgid "Not queueing image '%s'. Already cached." +msgstr "Not queueing image '%s'. Already cached." + +#: glance/image_cache/drivers/sqlite.py:419 +#: glance/image_cache/drivers/xattr.py:348 +#, python-format +msgid "Not queueing image '%s'. Already being written to cache" +msgstr "Not queueing image '%s'. Already being written to cache" + +#: glance/image_cache/drivers/sqlite.py:425 +#: glance/image_cache/drivers/xattr.py:354 +#, python-format +msgid "Not queueing image '%s'. Already queued." +msgstr "Not queueing image '%s'. Already queued." + +#: glance/image_cache/drivers/sqlite.py:443 +#, python-format +msgid "Removed invalid cache file %s" +msgstr "Removed invalid cache file %s" + +#: glance/image_cache/drivers/sqlite.py:457 +#, python-format +msgid "Removed stalled cache file %s" +msgstr "Removed stalled cache file %s" + +#: glance/image_cache/drivers/xattr.py:400 +#, python-format +msgid "Reaped %(reaped)s %(entry_type)s cache entries" +msgstr "Reaped %(reaped)s %(entry_type)s cache entries" + +#: glance/openstack/common/eventlet_backdoor.py:146 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "Eventlet backdoor listening on %(port)s for process %(pid)d" + +#: glance/openstack/common/service.py:173 +#, python-format +msgid "Caught %s, exiting" +msgstr "Caught %s, exiting" + +#: glance/openstack/common/service.py:227 +msgid "Parent process has died unexpectedly, exiting" +msgstr "Parent process has died unexpectedly, exiting" + +#: glance/openstack/common/service.py:258 +#, python-format +msgid "Child caught %s, exiting" +msgstr "Child caught %s, exiting" + +#: glance/openstack/common/service.py:297 +msgid "Forking too fast, sleeping" +msgstr "Forking too fast, sleeping" + +#: glance/openstack/common/service.py:316 +#, python-format +msgid "Started child %d" +msgstr "Started child %d" + +#: glance/openstack/common/service.py:343 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Child %(pid)d killed by signal %(sig)d" + +#: glance/openstack/common/service.py:347 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Child %(pid)s exited with status %(code)d" + +#: glance/openstack/common/service.py:382 +#, python-format +msgid "Caught %s, stopping children" +msgstr "Caught %s, stopping children" + +#: glance/openstack/common/service.py:391 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: glance/openstack/common/service.py:407 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "Waiting on %d children to exit" + +#: glance/quota/__init__.py:330 +#, python-format +msgid "Cleaning up %s after exceeding the quota." +msgstr "Cleaning up %s after exceeding the quota." + +#: glance/registry/api/v1/images.py:343 glance/registry/api/v1/images.py:386 +#: glance/registry/api/v1/images.py:491 +#, python-format +msgid "Image %(id)s not found" +msgstr "Image %(id)s not found" + +#: glance/registry/api/v1/images.py:349 glance/registry/api/v1/images.py:381 +#: glance/registry/api/v1/images.py:503 +#, python-format +msgid "Access denied to image %(id)s but returning 'not found'" +msgstr "Access denied to image %(id)s but returning 'not found'" + +#: glance/registry/api/v1/images.py:371 +#, python-format +msgid "Successfully deleted image %(id)s" +msgstr "Successfully deleted image %(id)s" + +#: glance/registry/api/v1/images.py:375 +#, python-format +msgid "Delete denied for public image %(id)s" +msgstr "Delete denied for public image %(id)s" + +#: glance/registry/api/v1/images.py:415 +#, python-format +msgid "Rejecting image creation request for invalid image id '%(bad_id)s'" +msgstr "Rejecting image creation request for invalid image id '%(bad_id)s'" + +#: glance/registry/api/v1/images.py:428 +#, python-format +msgid "Successfully created image %(id)s" +msgstr "Successfully created image %(id)s" + +#: glance/registry/api/v1/images.py:482 +#, python-format +msgid "Updating metadata for image %(id)s" +msgstr "Updating metadata for image %(id)s" + +#: glance/registry/api/v1/images.py:497 +#, python-format +msgid "Update denied for public image %(id)s" +msgstr "Update denied for public image %(id)s" + +#: glance/registry/api/v1/members.py:198 +#, python-format +msgid "Successfully updated memberships for image %(id)s" +msgstr "Successfully updated memberships for image %(id)s" + +#: glance/registry/api/v1/members.py:271 +#, python-format +msgid "Successfully updated a membership for image %(id)s" +msgstr "Successfully updated a membership for image %(id)s" + +#: glance/registry/api/v1/members.py:320 +#, python-format +msgid "Successfully deleted a membership from image %(id)s" +msgstr "Successfully deleted a membership from image %(id)s" diff --git a/code/daisy/daisy/locale/fr/LC_MESSAGES/glance-log-info.po b/code/daisy/daisy/locale/fr/LC_MESSAGES/glance-log-info.po new file mode 100755 index 00000000..406ce3d9 --- /dev/null +++ b/code/daisy/daisy/locale/fr/LC_MESSAGES/glance-log-info.po @@ -0,0 +1,418 @@ +# Translations template for daisy. +# Copyright (C) 2015 ORGANIZATION +# This file is distributed under the same license as the glance project. +# +# Translators: +# Maxime COQUEREL , 2014 +msgid "" +msgstr "" +"Project-Id-Version: Glance\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2015-04-20 11:09+0200\n" +"PO-Revision-Date: 2015-04-01 21:54+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: French (http://www.transifex.com/projects/p/glance/language/" +"fr/)\n" +"Language: fr\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +#: glance/scrubber.py:459 +#, python-format +msgid "Starting Daemon: wakeup_time=%(wakeup_time)s threads=%(threads)s" +msgstr "" +"D茅marrage du serveur d茅mon : wakeup_time=%(wakeup_time)s threads=%(threads)s" + +#: glance/scrubber.py:473 +msgid "Daemon Shutdown on KeyboardInterrupt" +msgstr "Arr锚t du serveur d茅mon sur KeyboardInterrupt" + +#: glance/scrubber.py:485 +#, python-format +msgid "Initializing scrubber with configuration: %s" +msgstr "" + +#: glance/scrubber.py:558 +#, python-format +msgid "Scrubbing image %(id)s from %(count)d locations." +msgstr "" + +#: glance/scrubber.py:581 +#, python-format +msgid "Image %s has been deleted." +msgstr "" + +#: glance/scrubber.py:633 +#, python-format +msgid "Getting images deleted before %s" +msgstr "Obtention des images supprim茅es avant %s" + +#: glance/api/middleware/cache.py:61 +msgid "Initialized image cache middleware" +msgstr "Middleware de cache d'image initialis茅" + +#: glance/api/middleware/cache_manage.py:74 +msgid "Initialized image cache management middleware" +msgstr "Middleware de gestion du cache d'image initialis茅" + +#: glance/api/middleware/gzip.py:36 +msgid "Initialized gzip middleware" +msgstr "" + +#: glance/api/v1/images.py:691 +#, python-format +msgid "Uploaded data of image %s from request payload successfully." +msgstr "" + +#: glance/api/v1/images.py:753 +msgid "Triggering asynchronous copy from external source" +msgstr "D茅clenchement de copie asynchrone depuis une source externe" + +#: glance/api/v1/upload_utils.py:126 +#, python-format +msgid "Cleaning up %s after exceeding the quota" +msgstr "" + +#: glance/api/v1/upload_utils.py:175 +#, python-format +msgid "" +"Image %s could not be found after upload. The image may have been deleted " +"during the upload." +msgstr "" + +#: glance/api/v2/image_actions.py:51 +#, python-format +msgid "Image %s is deactivated" +msgstr "" + +#: glance/api/v2/image_actions.py:66 +#, python-format +msgid "Image %s is reactivated" +msgstr "" + +#: glance/async/flows/base_import.py:348 +#, python-format +msgid "%(task_id)s of %(task_type)s completed" +msgstr "" + +#: glance/cmd/replicator.py:372 +#, python-format +msgid "Storing: %s" +msgstr "Stockage: %s" + +#: glance/cmd/replicator.py:445 +#, python-format +msgid "Considering: %s" +msgstr "consid茅rant : %s" + +#: glance/cmd/replicator.py:471 glance/cmd/replicator.py:546 +#, python-format +msgid "Image %s metadata has changed" +msgstr "" + +#: glance/cmd/replicator.py:553 +#, python-format +msgid "Image %s is being synced" +msgstr "" + +#: glance/common/wsgi.py:308 glance/openstack/common/service.py:326 +#, python-format +msgid "Starting %d workers" +msgstr "D茅marrage des workers %d" + +#: glance/common/wsgi.py:321 +#, python-format +msgid "Removed dead child %s" +msgstr "" + +#: glance/common/wsgi.py:324 +#, python-format +msgid "Removed stale child %s" +msgstr "" + +#: glance/common/wsgi.py:336 +msgid "All workers have terminated. Exiting" +msgstr "Tous les agents ont termin茅. Quittez" + +#: glance/common/wsgi.py:353 +msgid "Caught keyboard interrupt. Exiting." +msgstr "Interruption intercept茅e de clavier. Fermeture du programme en cours." + +#: glance/common/wsgi.py:432 +#, python-format +msgid "Child %d exiting normally" +msgstr "Sortie normale de l'enfant %d" + +#: glance/common/wsgi.py:437 +#, python-format +msgid "Started child %s" +msgstr "Enfant d茅marr茅 %s" + +#: glance/common/wsgi.py:466 +msgid "Starting single process server" +msgstr "D茅marrage de serveur de processus unique" + +#: glance/common/artifacts/loader.py:131 glance/common/artifacts/loader.py:155 +#, python-format +msgid "Artifact %s has been successfully loaded" +msgstr "" + +#: glance/common/scripts/__init__.py:32 +#, python-format +msgid "" +"Loading known task scripts for task_id %(task_id)s of type %(task_type)s" +msgstr "" + +#: glance/common/scripts/image_import/main.py:41 +#, python-format +msgid "Task %(task_id)s beginning import execution." +msgstr "" + +#: glance/common/scripts/image_import/main.py:152 +#, python-format +msgid "Task %(task_id)s: Got image data uri %(data_uri)s to be imported" +msgstr "" + +#: glance/common/scripts/image_import/main.py:161 +#, python-format +msgid "Task %(task_id)s: Could not import image file %(image_data)s" +msgstr "" + +#: glance/db/simple/api.py:62 +#, python-format +msgid "Calling %(funcname)s: args=%(args)s, kwargs=%(kwargs)s" +msgstr "Appel de %(funcname)s : args = %(args)s, kwargs = %(kwargs)s" + +#: glance/db/simple/api.py:68 +#, python-format +msgid "Returning %(funcname)s: %(output)s" +msgstr "Retour de %(funcname)s : %(output)s" + +#: glance/db/simple/api.py:2002 +#, python-format +msgid "Could not find artifact %s" +msgstr "" + +#: glance/db/simple/api.py:2006 +msgid "Unable to get deleted image" +msgstr "Impossible de supprimer l'image" + +#: glance/db/sqlalchemy/metadata.py:161 +#, python-format +msgid "Table %s has been cleared" +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:232 +#, python-format +msgid "Overwriting namespace %s" +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:248 +#, python-format +msgid "Skipping namespace %s. It already exists in the database." +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:339 +#, python-format +msgid "File %s loaded to database." +msgstr "Fichier %s charg茅 dans la base de donn茅es." + +#: glance/db/sqlalchemy/metadata.py:341 +msgid "Metadata loading finished" +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:450 +#, python-format +msgid "Namespace %(namespace)s saved in %(file)s" +msgstr "Namespace %(namespace)s est sauvegard茅 dans %(file)s" + +#: glance/db/sqlalchemy/migrate_repo/schema.py:101 +#, python-format +msgid "creating table %(table)s" +msgstr "cr茅ation de la table %(table)s" + +#: glance/db/sqlalchemy/migrate_repo/schema.py:107 +#, python-format +msgid "dropping table %(table)s" +msgstr "suppression de la table %(table)s" + +#: glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py:72 +msgid "" +"'metadata_encryption_key' was not specified in the config file or a config " +"file was not specified. This means that this migration is a NOOP." +msgstr "" +"L'茅l茅ment 'metadata_encryption_key' n'est pas sp茅cifi茅 dans le fichier " +"config ou bien aucun fichier config n'est sp茅cifi茅. Cela signifie que cette " +"migration est un NOOP." + +#: glance/domain/__init__.py:406 +#, python-format +msgid "" +"Task [%(task_id)s] status changing from %(cur_status)s to %(new_status)s" +msgstr "" + +#: glance/image_cache/__init__.py:71 +#, python-format +msgid "Image cache loaded driver '%s'." +msgstr "Le cache d'image a charg茅 le pilote '%s'." + +#: glance/image_cache/__init__.py:81 glance/image_cache/__init__.py:100 +msgid "Defaulting to SQLite driver." +msgstr "Utilisation par d茅faut du pilote SQLite." + +#: glance/image_cache/prefetcher.py:85 +#, python-format +msgid "Successfully cached all %d images" +msgstr "Mise en cache r茅ussie de l'ensemble des %d images" + +#: glance/image_cache/drivers/sqlite.py:414 +#: glance/image_cache/drivers/xattr.py:343 +#, python-format +msgid "Not queueing image '%s'. Already cached." +msgstr "Aucune mise en file d'attente de l'image '%s'. D茅j脿 en cache." + +#: glance/image_cache/drivers/sqlite.py:419 +#: glance/image_cache/drivers/xattr.py:348 +#, python-format +msgid "Not queueing image '%s'. Already being written to cache" +msgstr "Aucune mise en file d'attente de l'image '%s'. D茅j脿 茅crite en cache" + +#: glance/image_cache/drivers/sqlite.py:425 +#: glance/image_cache/drivers/xattr.py:354 +#, python-format +msgid "Not queueing image '%s'. Already queued." +msgstr "Aucune mise en file d'attente de l'image '%s'. D茅j脿 en file d'attente." + +#: glance/image_cache/drivers/sqlite.py:443 +#, python-format +msgid "Removed invalid cache file %s" +msgstr "Fichier cache non valide supprim茅 %s" + +#: glance/image_cache/drivers/sqlite.py:457 +#, python-format +msgid "Removed stalled cache file %s" +msgstr "Fichier cache bloqu茅 supprim茅 %s" + +#: glance/image_cache/drivers/xattr.py:400 +#, python-format +msgid "Reaped %(reaped)s %(entry_type)s cache entries" +msgstr "%(reaped)s %(entry_type)s entr茅es de cache r茅gul茅es" + +#: glance/openstack/common/eventlet_backdoor.py:146 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "Eventlet backdoor en 茅coute sur le port %(port)s for process %(pid)d" + +#: glance/openstack/common/service.py:173 +#, python-format +msgid "Caught %s, exiting" +msgstr "%s intercept茅e, sortie" + +#: glance/openstack/common/service.py:227 +msgid "Parent process has died unexpectedly, exiting" +msgstr "Le processus parent s'est soudainement achev茅 sans raison, sortie" + +#: glance/openstack/common/service.py:258 +#, python-format +msgid "Child caught %s, exiting" +msgstr "L'enfant a re莽u %s, sortie" + +#: glance/openstack/common/service.py:297 +msgid "Forking too fast, sleeping" +msgstr "Bifurcation trop rapide, en veille" + +#: glance/openstack/common/service.py:316 +#, python-format +msgid "Started child %d" +msgstr "D茅marrage du d茅pendant %d" + +#: glance/openstack/common/service.py:343 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "%(pid)d du d茅pendant annihil茅s par le signal %(sig)d" + +#: glance/openstack/common/service.py:347 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "%(pid)s du d茅pendant sortis avec le statut %(code)d" + +#: glance/openstack/common/service.py:382 +#, python-format +msgid "Caught %s, stopping children" +msgstr "%s intercept茅, arr锚tant les d茅pendants" + +#: glance/openstack/common/service.py:391 +msgid "Wait called after thread killed. Cleaning up." +msgstr "Pause demand茅e apr猫s suppression de thread. Nettoyage." + +#: glance/openstack/common/service.py:407 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "En attente de sortie des d茅pendants %d" + +#: glance/quota/__init__.py:330 +#, python-format +msgid "Cleaning up %s after exceeding the quota." +msgstr "" + +#: glance/registry/api/v1/images.py:343 glance/registry/api/v1/images.py:386 +#: glance/registry/api/v1/images.py:491 +#, python-format +msgid "Image %(id)s not found" +msgstr "Image %(id)s non trouv茅" + +#: glance/registry/api/v1/images.py:349 glance/registry/api/v1/images.py:381 +#: glance/registry/api/v1/images.py:503 +#, python-format +msgid "Access denied to image %(id)s but returning 'not found'" +msgstr "Acc猫s refus茅 脿 l'image %(id)s mais renvoi de 'non trouv茅'" + +#: glance/registry/api/v1/images.py:371 +#, python-format +msgid "Successfully deleted image %(id)s" +msgstr "La suppression de l'image %(id)s a abouti" + +#: glance/registry/api/v1/images.py:375 +#, python-format +msgid "Delete denied for public image %(id)s" +msgstr "Suppression refus茅e pour l'image publique %(id)s" + +#: glance/registry/api/v1/images.py:415 +#, python-format +msgid "Rejecting image creation request for invalid image id '%(bad_id)s'" +msgstr "" +"Rejet de la demande de cr茅ation d'image pour l'ID image non valide " +"'%(bad_id)s'" + +#: glance/registry/api/v1/images.py:428 +#, python-format +msgid "Successfully created image %(id)s" +msgstr "La cr茅ation de l'image %(id)s a abouti" + +#: glance/registry/api/v1/images.py:482 +#, python-format +msgid "Updating metadata for image %(id)s" +msgstr "Mise 脿 jour des m茅tadonn茅es pour l'image %(id)s" + +#: glance/registry/api/v1/images.py:497 +#, python-format +msgid "Update denied for public image %(id)s" +msgstr "Mise 脿 jour refus茅e pour l'image publique %(id)s" + +#: glance/registry/api/v1/members.py:198 +#, python-format +msgid "Successfully updated memberships for image %(id)s" +msgstr "Mise 脿 jour des appartenance effectu茅e pour l'image %(id)s" + +#: glance/registry/api/v1/members.py:271 +#, python-format +msgid "Successfully updated a membership for image %(id)s" +msgstr "Mise 脿 jour d'une appartenance effectu茅e pour l'image %(id)s" + +#: glance/registry/api/v1/members.py:320 +#, python-format +msgid "Successfully deleted a membership from image %(id)s" +msgstr "La suppression d'une appartenance de l'image %(id)s a abouti" diff --git a/code/daisy/daisy/locale/glance-log-critical.pot b/code/daisy/daisy/locale/glance-log-critical.pot new file mode 100755 index 00000000..aae02761 --- /dev/null +++ b/code/daisy/daisy/locale/glance-log-critical.pot @@ -0,0 +1,19 @@ +# Translations template for daisy. +# Copyright (C) 2014 ORGANIZATION +# This file is distributed under the same license as the glance project. +# FIRST AUTHOR , 2014. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: glance 2014.2.dev41.gb7968cf\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2014-06-30 06:00+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + diff --git a/code/daisy/daisy/locale/glance-log-error.pot b/code/daisy/daisy/locale/glance-log-error.pot new file mode 100755 index 00000000..912aee59 --- /dev/null +++ b/code/daisy/daisy/locale/glance-log-error.pot @@ -0,0 +1,370 @@ +# Translations template for daisy. +# Copyright (C) 2015 ORGANIZATION +# This file is distributed under the same license as the glance project. +# FIRST AUTHOR , 2015. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: glance 2015.1.dev42\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2015-04-03 06:02+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: glance/listener.py:40 +#, python-format +msgid "" +"Failed to retrieve supported notification events from search plugins " +"%(ext)s: %(e)s" +msgstr "" + +#: glance/location.py:403 +#, python-format +msgid "" +"Glance tried all active locations to get data for image %s but all have " +"failed." +msgstr "" + +#: glance/notifier.py:361 glance/api/common.py:95 +#, python-format +msgid "An error occurred during image.send notification: %(err)s" +msgstr "" + +#: glance/scrubber.py:168 +#, python-format +msgid "%s file can not be read." +msgstr "" + +#: glance/scrubber.py:191 +#, python-format +msgid "%s file can not be wrote." +msgstr "" + +#: glance/scrubber.py:517 +#, python-format +msgid "Can not %(op)s scrub jobs from queue: %(err)s" +msgstr "" + +#: glance/scrubber.py:618 +#, python-format +msgid "%s file can not be created." +msgstr "" + +#: glance/api/common.py:58 +#, python-format +msgid "" +"An error occurred reading from backend storage for image %(image_id)s: " +"%(err)s" +msgstr "" + +#: glance/api/common.py:64 +#, python-format +msgid "" +"Backend storage for image %(image_id)s disconnected after writing only " +"%(bytes_written)d bytes" +msgstr "" + +#: glance/api/common.py:123 +#, python-format +msgid "Invalid value for option user_storage_quota: %(users_quota)s" +msgstr "" + +#: glance/api/middleware/cache.py:173 +#, python-format +msgid "" +"Image cache contained image file for image '%s', however the registry did" +" not contain metadata for that image!" +msgstr "" + +#: glance/api/middleware/cache.py:272 +#, python-format +msgid "could not find %s" +msgstr "" + +#: glance/api/middleware/cache.py:291 +msgid "Checksum header is missing." +msgstr "" + +#: glance/api/v1/images.py:603 +#, python-format +msgid "Copy from external source '%(scheme)s' failed for image: %(image)s" +msgstr "" + +#: glance/api/v1/upload_utils.py:79 +#, python-format +msgid "Unable to kill image %(id)s: " +msgstr "" + +#: glance/api/v1/upload_utils.py:266 +#, python-format +msgid "Received HTTP error while uploading image %s" +msgstr "" + +#: glance/api/v2/image_data.py:60 +#, python-format +msgid "Unable to restore image %(image_id)s: %(e)s" +msgstr "" + +#: glance/api/v2/image_data.py:155 glance/api/v2/image_data.py:159 +msgid "Failed to upload image data due to HTTP error" +msgstr "" + +#: glance/api/v2/image_data.py:164 +msgid "Failed to upload image data due to internal error" +msgstr "" + +#: glance/api/v2/metadef_namespaces.py:207 +#, python-format +msgid "Failed to delete namespace %(namespace)s " +msgstr "" + +#: glance/async/__init__.py:68 +msgid "" +"This execution of Tasks is not setup. Please consult the project " +"documentation for more information on the executors available." +msgstr "" + +#: glance/async/__init__.py:72 +msgid "Internal error occurred while trying to process task." +msgstr "" + +#: glance/async/taskflow_executor.py:130 +#, python-format +msgid "Failed to execute task %(task_id)s: %(exc)s" +msgstr "" + +#: glance/async/flows/base_import.py:341 +#, python-format +msgid "Task ID %s" +msgstr "" + +#: glance/async/flows/introspect.py:60 +#, python-format +msgid "Failed to execute introspection %(task_id)s: %(exc)s" +msgstr "" + +#: glance/cmd/index.py:48 +#, python-format +msgid "Failed to setup index extension %(ext)s: %(e)s" +msgstr "" + +#: glance/common/property_utils.py:84 +#, python-format +msgid "Couldn't find property protection file %(file)s: %(error)s." +msgstr "" + +#: glance/common/property_utils.py:91 +#, python-format +msgid "" +"Invalid value '%s' for 'property_protection_rule_format'. The permitted " +"values are 'roles' and 'policies'" +msgstr "" + +#: glance/common/property_utils.py:110 +#, python-format +msgid "" +"Multiple policies '%s' not allowed for a given operation. Policies can be" +" combined in the policy file" +msgstr "" + +#: glance/common/property_utils.py:123 +#, python-format +msgid "" +"Malformed property protection rule in [%(prop)s] %(op)s=%(perm)s: '@' and" +" '!' are mutually exclusive" +msgstr "" + +#: glance/common/property_utils.py:148 +#, python-format +msgid "Encountered a malformed property protection rule %(rule)s: %(error)s." +msgstr "" + +#: glance/common/rpc.py:186 +#, python-format +msgid "" +"RPC Call Error: %(val)s\n" +"%(tb)s" +msgstr "" + +#: glance/common/store_utils.py:71 +#, python-format +msgid "Failed to delete image %(image_id)s from store: %(exc)s" +msgstr "" + +#: glance/common/swift_store_utils.py:87 +#, python-format +msgid "swift config file %(conf_file)s:%(exc)s not found" +msgstr "" + +#: glance/common/swift_store_utils.py:101 +msgid "Invalid format of swift store config cfg" +msgstr "" + +#: glance/common/utils.py:115 +#, python-format +msgid "Error: cooperative_iter exception %s" +msgstr "" + +#: glance/common/utils.py:514 +msgid "" +"Error setting up the debug environment. Verify that the option " +"pydev_worker_debug_host is pointing to a valid hostname or IP on which a " +"pydev server is listening on the port indicated by " +"pydev_worker_debug_port." +msgstr "" + +#: glance/common/wsgi.py:332 +#, python-format +msgid "Not respawning child %d, cannot recover from termination" +msgstr "" + +#: glance/common/artifacts/loader.py:96 +#, python-format +msgid "Unable to load artifacts: %s" +msgstr "" + +#: glance/common/artifacts/loader.py:164 +#, python-format +msgid "Could not load plugin from %(module)s: %(msg)s" +msgstr "" + +#: glance/common/location_strategy/__init__.py:66 +#, python-format +msgid "Failed to load location strategy module %(module)s: %(e)s" +msgstr "" + +#: glance/common/scripts/__init__.py:40 +#, python-format +msgid "" +"This task type %(task_type)s is not supported by the current deployment " +"of daisy. Please refer the documentation provided by OpenStack or your " +"operator for more information." +msgstr "" + +#: glance/common/scripts/__init__.py:50 +#, python-format +msgid "Failed to save task %(task_id)s in DB as task_repo is %(task_repo)s" +msgstr "" + +#: glance/common/scripts/utils.py:48 +#, python-format +msgid "Task not found for task_id %s" +msgstr "" + +#: glance/db/sqlalchemy/artifacts.py:346 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: glance/db/sqlalchemy/artifacts.py:532 +msgid "Cannot use this parameter with the operator IN" +msgstr "" + +#: glance/db/sqlalchemy/artifacts.py:539 +#, python-format +msgid "Operator %s is not supported" +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:185 +#, python-format +msgid "Json schema files not found in %s. Aborting." +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:451 +msgid "" +"To use --prefer_new or --overwrite you need to combine of these options " +"with --merge option." +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:456 +msgid "" +"Please provide no more than one option from this list: --prefer_new, " +"--overwrite" +msgstr "" + +#: glance/db/sqlalchemy/migrate_repo/versions/015_quote_swift_credentials.py:63 +#: glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py:97 +#, python-format +msgid "Invalid store uri for image: %(image_id)s. Details: %(reason)s" +msgstr "" + +#: glance/domain/__init__.py:413 +#, python-format +msgid "" +"Task [%(task_id)s] status failed to change from %(cur_status)s to " +"%(new_status)s" +msgstr "" + +#: glance/domain/__init__.py:521 +#, python-format +msgid "Failed to load the %s executor provided in the config." +msgstr "" + +#: glance/image_cache/__init__.py:271 +#, python-format +msgid "" +"Exception encountered while tee'ing image '%(image_id)s' into cache: " +"%(error)s. Continuing with response." +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:398 +#, python-format +msgid "Error executing SQLite call. Got error: %s" +msgstr "" + +#: glance/openstack/common/loopingcall.py:95 +msgid "in fixed duration looping call" +msgstr "" + +#: glance/openstack/common/loopingcall.py:138 +msgid "in dynamic looping call" +msgstr "" + +#: glance/openstack/common/service.py:264 +msgid "Unhandled exception" +msgstr "" + +#: glance/registry/api/v1/images.py:136 +msgid "Unable to get images" +msgstr "" + +#: glance/registry/api/v1/images.py:354 +#, python-format +msgid "Unable to show image %s" +msgstr "" + +#: glance/registry/api/v1/images.py:390 +#, python-format +msgid "Unable to delete image %s" +msgstr "" + +#: glance/registry/api/v1/images.py:442 +#, python-format +msgid "Unable to create image %s" +msgstr "" + +#: glance/registry/api/v1/images.py:515 +#, python-format +msgid "Unable to update image %s" +msgstr "" + +#: glance/registry/client/v1/client.py:129 +#, python-format +msgid "Registry client request %(method)s %(action)s raised %(exc_name)s" +msgstr "" + +#: glance/search/api/v0_1/search.py:243 +#, python-format +msgid "Failed to retrieve RBAC filters from search plugin %(ext)s: %(e)s" +msgstr "" + +#: glance/tests/functional/v2/test_metadef_resourcetypes.py:99 +#, python-format +msgid "Forbidden to create resource type. Reason: %(reason)s" +msgstr "" + diff --git a/code/daisy/daisy/locale/glance-log-info.pot b/code/daisy/daisy/locale/glance-log-info.pot new file mode 100755 index 00000000..4f8d8dc2 --- /dev/null +++ b/code/daisy/daisy/locale/glance-log-info.pot @@ -0,0 +1,408 @@ +# Translations template for daisy. +# Copyright (C) 2015 ORGANIZATION +# This file is distributed under the same license as the glance project. +# FIRST AUTHOR , 2015. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: glance 2015.1.dev42\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2015-04-03 06:02+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: glance/scrubber.py:459 +#, python-format +msgid "Starting Daemon: wakeup_time=%(wakeup_time)s threads=%(threads)s" +msgstr "" + +#: glance/scrubber.py:473 +msgid "Daemon Shutdown on KeyboardInterrupt" +msgstr "" + +#: glance/scrubber.py:485 +#, python-format +msgid "Initializing scrubber with configuration: %s" +msgstr "" + +#: glance/scrubber.py:558 +#, python-format +msgid "Scrubbing image %(id)s from %(count)d locations." +msgstr "" + +#: glance/scrubber.py:581 +#, python-format +msgid "Image %s has been deleted." +msgstr "" + +#: glance/scrubber.py:633 +#, python-format +msgid "Getting images deleted before %s" +msgstr "" + +#: glance/api/middleware/cache.py:61 +msgid "Initialized image cache middleware" +msgstr "" + +#: glance/api/middleware/cache_manage.py:74 +msgid "Initialized image cache management middleware" +msgstr "" + +#: glance/api/middleware/gzip.py:36 +msgid "Initialized gzip middleware" +msgstr "" + +#: glance/api/v1/images.py:690 +#, python-format +msgid "Uploaded data of image %s from request payload successfully." +msgstr "" + +#: glance/api/v1/images.py:752 +msgid "Triggering asynchronous copy from external source" +msgstr "" + +#: glance/api/v1/upload_utils.py:126 +#, python-format +msgid "Cleaning up %s after exceeding the quota" +msgstr "" + +#: glance/api/v1/upload_utils.py:175 +#, python-format +msgid "" +"Image %s could not be found after upload. The image may have been deleted" +" during the upload." +msgstr "" + +#: glance/api/v2/image_actions.py:51 +#, python-format +msgid "Image %s is deactivated" +msgstr "" + +#: glance/api/v2/image_actions.py:66 +#, python-format +msgid "Image %s is reactivated" +msgstr "" + +#: glance/async/flows/base_import.py:348 +#, python-format +msgid "%(task_id)s of %(task_type)s completed" +msgstr "" + +#: glance/cmd/replicator.py:372 +#, python-format +msgid "Storing: %s" +msgstr "" + +#: glance/cmd/replicator.py:445 +#, python-format +msgid "Considering: %s" +msgstr "" + +#: glance/cmd/replicator.py:471 glance/cmd/replicator.py:546 +#, python-format +msgid "Image %s metadata has changed" +msgstr "" + +#: glance/cmd/replicator.py:553 +#, python-format +msgid "Image %s is being synced" +msgstr "" + +#: glance/common/wsgi.py:308 glance/openstack/common/service.py:326 +#, python-format +msgid "Starting %d workers" +msgstr "" + +#: glance/common/wsgi.py:321 +#, python-format +msgid "Removed dead child %s" +msgstr "" + +#: glance/common/wsgi.py:324 +#, python-format +msgid "Removed stale child %s" +msgstr "" + +#: glance/common/wsgi.py:336 +msgid "All workers have terminated. Exiting" +msgstr "" + +#: glance/common/wsgi.py:353 +msgid "Caught keyboard interrupt. Exiting." +msgstr "" + +#: glance/common/wsgi.py:432 +#, python-format +msgid "Child %d exiting normally" +msgstr "" + +#: glance/common/wsgi.py:437 +#, python-format +msgid "Started child %s" +msgstr "" + +#: glance/common/wsgi.py:466 +msgid "Starting single process server" +msgstr "" + +#: glance/common/artifacts/loader.py:131 glance/common/artifacts/loader.py:155 +#, python-format +msgid "Artifact %s has been successfully loaded" +msgstr "" + +#: glance/common/scripts/__init__.py:32 +#, python-format +msgid "Loading known task scripts for task_id %(task_id)s of type %(task_type)s" +msgstr "" + +#: glance/common/scripts/image_import/main.py:41 +#, python-format +msgid "Task %(task_id)s beginning import execution." +msgstr "" + +#: glance/common/scripts/image_import/main.py:152 +#, python-format +msgid "Task %(task_id)s: Got image data uri %(data_uri)s to be imported" +msgstr "" + +#: glance/common/scripts/image_import/main.py:161 +#, python-format +msgid "Task %(task_id)s: Could not import image file %(image_data)s" +msgstr "" + +#: glance/db/simple/api.py:62 +#, python-format +msgid "Calling %(funcname)s: args=%(args)s, kwargs=%(kwargs)s" +msgstr "" + +#: glance/db/simple/api.py:68 +#, python-format +msgid "Returning %(funcname)s: %(output)s" +msgstr "" + +#: glance/db/simple/api.py:2002 +#, python-format +msgid "Could not find artifact %s" +msgstr "" + +#: glance/db/simple/api.py:2006 +msgid "Unable to get deleted image" +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:152 +#, python-format +msgid "Table %s has been cleared" +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:223 +#, python-format +msgid "Overwriting namespace %s" +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:239 +#, python-format +msgid "Skipping namespace %s. It already exists in the database." +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:330 +#, python-format +msgid "File %s loaded to database." +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:332 +msgid "Metadata loading finished" +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:441 +#, python-format +msgid "Namespace %(namespace)s saved in %(file)s" +msgstr "" + +#: glance/db/sqlalchemy/migrate_repo/schema.py:101 +#, python-format +msgid "creating table %(table)s" +msgstr "" + +#: glance/db/sqlalchemy/migrate_repo/schema.py:107 +#, python-format +msgid "dropping table %(table)s" +msgstr "" + +#: glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py:72 +msgid "" +"'metadata_encryption_key' was not specified in the config file or a " +"config file was not specified. This means that this migration is a NOOP." +msgstr "" + +#: glance/domain/__init__.py:406 +#, python-format +msgid "Task [%(task_id)s] status changing from %(cur_status)s to %(new_status)s" +msgstr "" + +#: glance/image_cache/__init__.py:71 +#, python-format +msgid "Image cache loaded driver '%s'." +msgstr "" + +#: glance/image_cache/__init__.py:81 glance/image_cache/__init__.py:100 +msgid "Defaulting to SQLite driver." +msgstr "" + +#: glance/image_cache/prefetcher.py:85 +#, python-format +msgid "Successfully cached all %d images" +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:414 +#: glance/image_cache/drivers/xattr.py:343 +#, python-format +msgid "Not queueing image '%s'. Already cached." +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:419 +#: glance/image_cache/drivers/xattr.py:348 +#, python-format +msgid "Not queueing image '%s'. Already being written to cache" +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:425 +#: glance/image_cache/drivers/xattr.py:354 +#, python-format +msgid "Not queueing image '%s'. Already queued." +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:443 +#, python-format +msgid "Removed invalid cache file %s" +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:457 +#, python-format +msgid "Removed stalled cache file %s" +msgstr "" + +#: glance/image_cache/drivers/xattr.py:400 +#, python-format +msgid "Reaped %(reaped)s %(entry_type)s cache entries" +msgstr "" + +#: glance/openstack/common/eventlet_backdoor.py:146 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "" + +#: glance/openstack/common/service.py:173 +#, python-format +msgid "Caught %s, exiting" +msgstr "" + +#: glance/openstack/common/service.py:227 +msgid "Parent process has died unexpectedly, exiting" +msgstr "" + +#: glance/openstack/common/service.py:258 +#, python-format +msgid "Child caught %s, exiting" +msgstr "" + +#: glance/openstack/common/service.py:297 +msgid "Forking too fast, sleeping" +msgstr "" + +#: glance/openstack/common/service.py:316 +#, python-format +msgid "Started child %d" +msgstr "" + +#: glance/openstack/common/service.py:343 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "" + +#: glance/openstack/common/service.py:347 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "" + +#: glance/openstack/common/service.py:382 +#, python-format +msgid "Caught %s, stopping children" +msgstr "" + +#: glance/openstack/common/service.py:391 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: glance/openstack/common/service.py:407 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "" + +#: glance/quota/__init__.py:330 +#, python-format +msgid "Cleaning up %s after exceeding the quota." +msgstr "" + +#: glance/registry/api/v1/images.py:343 glance/registry/api/v1/images.py:386 +#: glance/registry/api/v1/images.py:491 +#, python-format +msgid "Image %(id)s not found" +msgstr "" + +#: glance/registry/api/v1/images.py:349 glance/registry/api/v1/images.py:381 +#: glance/registry/api/v1/images.py:503 +#, python-format +msgid "Access denied to image %(id)s but returning 'not found'" +msgstr "" + +#: glance/registry/api/v1/images.py:371 +#, python-format +msgid "Successfully deleted image %(id)s" +msgstr "" + +#: glance/registry/api/v1/images.py:375 +#, python-format +msgid "Delete denied for public image %(id)s" +msgstr "" + +#: glance/registry/api/v1/images.py:415 +#, python-format +msgid "Rejecting image creation request for invalid image id '%(bad_id)s'" +msgstr "" + +#: glance/registry/api/v1/images.py:428 +#, python-format +msgid "Successfully created image %(id)s" +msgstr "" + +#: glance/registry/api/v1/images.py:482 +#, python-format +msgid "Updating metadata for image %(id)s" +msgstr "" + +#: glance/registry/api/v1/images.py:497 +#, python-format +msgid "Update denied for public image %(id)s" +msgstr "" + +#: glance/registry/api/v1/members.py:198 +#, python-format +msgid "Successfully updated memberships for image %(id)s" +msgstr "" + +#: glance/registry/api/v1/members.py:271 +#, python-format +msgid "Successfully updated a membership for image %(id)s" +msgstr "" + +#: glance/registry/api/v1/members.py:320 +#, python-format +msgid "Successfully deleted a membership from image %(id)s" +msgstr "" + diff --git a/code/daisy/daisy/locale/glance-log-warning.pot b/code/daisy/daisy/locale/glance-log-warning.pot new file mode 100755 index 00000000..982f66fa --- /dev/null +++ b/code/daisy/daisy/locale/glance-log-warning.pot @@ -0,0 +1,308 @@ +# Translations template for daisy. +# Copyright (C) 2015 ORGANIZATION +# This file is distributed under the same license as the glance project. +# FIRST AUTHOR , 2015. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: glance 2015.1.dev42\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2015-04-03 06:02+0000\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: glance/scrubber.py:218 +#, python-format +msgid "Failed to find image to delete: %s" +msgstr "" + +#: glance/scrubber.py:258 +#, python-format +msgid "%s directory does not exist." +msgstr "" + +#: glance/scrubber.py:583 +#, python-format +msgid "Unable to delete URI from image %s." +msgstr "" + +#: glance/api/common.py:171 +#, python-format +msgid "" +"User %(user)s attempted to upload an image of unknown size that will " +"exceed the quota. %(remaining)d bytes remaining." +msgstr "" + +#: glance/api/common.py:180 +#, python-format +msgid "" +"User %(user)s attempted to upload an image of size %(size)d that will " +"exceed the quota. %(remaining)d bytes remaining." +msgstr "" + +#: glance/api/middleware/version_negotiation.py:67 +msgid "Unknown version. Returning version choices." +msgstr "" + +#: glance/api/v1/images.py:704 +#, python-format +msgid "" +"Failed to activate image %s in registry. About to delete image bits from " +"store and update status to 'killed'." +msgstr "" + +#: glance/api/v2/images.py:945 +#, python-format +msgid "" +"Could not find schema properties file %s. Continuing without custom " +"properties" +msgstr "" + +#: glance/api/v2/tasks.py:72 +#, python-format +msgid "Forbidden to create task. Reason: %(reason)s" +msgstr "" + +#: glance/api/v2/tasks.py:110 +#, python-format +msgid "Failed to find task %(task_id)s. Reason: %(reason)s" +msgstr "" + +#: glance/api/v2/tasks.py:115 +#, python-format +msgid "Forbidden to get task %(task_id)s. Reason: %(reason)s" +msgstr "" + +#: glance/async/utils.py:62 +#, python-format +msgid "An optional task has failed, the failure was: %s" +msgstr "" + +#: glance/async/flows/convert.py:67 +#, python-format +msgid "" +"The conversion format is None, please add a value for it in the config " +"file for this task to work: %s" +msgstr "" + +#: glance/cmd/replicator.py:607 +#, python-format +msgid "" +"%(image_id)s: field %(key)s differs (source is %(master_value)s, " +"destination is %(slave_value)s)" +msgstr "" + +#: glance/cmd/replicator.py:620 +#, python-format +msgid "Image %s entirely missing from the destination" +msgstr "" + +#: glance/common/store_utils.py:65 +#, python-format +msgid "Failed to delete image %s in store from URI" +msgstr "" + +#: glance/common/wsgi.py:326 +#, python-format +msgid "Unrecognised child %s" +msgstr "" + +#: glance/common/artifacts/loader.py:125 +#, python-format +msgid "Can't load artifact %s: load disabled in config" +msgstr "" + +#: glance/common/artifacts/loader.py:151 +#, python-format +msgid "Can't load artifact %s: not in available_plugins list" +msgstr "" + +#: glance/common/scripts/image_import/main.py:158 +#, python-format +msgid "Task %(task_id)s failed with exception %(error)s" +msgstr "" + +#: glance/db/simple/api.py:387 +#, python-format +msgid "Could not find image %s" +msgstr "" + +#: glance/db/simple/api.py:392 +msgid "Unable to get deleted image" +msgstr "" + +#: glance/db/simple/api.py:396 +msgid "Unable to get unowned image" +msgstr "" + +#: glance/db/simple/api.py:909 +#, python-format +msgid "Could not find task %s" +msgstr "" + +#: glance/db/simple/api.py:914 +#, python-format +msgid "Unable to get deleted task %s" +msgstr "" + +#: glance/db/simple/api.py:1075 +#, python-format +msgid "Could not find task info %s" +msgstr "" + +#: glance/db/sqlalchemy/api.py:77 +msgid "Deadlock detected. Retrying..." +msgstr "" + +#: glance/db/sqlalchemy/api.py:117 +msgid "Attempted to modify image user did not own." +msgstr "" + +#: glance/db/sqlalchemy/api.py:332 +msgid "Id not in sort_keys; is sort_keys unique?" +msgstr "" + +#: glance/db/sqlalchemy/artifacts.py:135 +msgid "Artifact with the specified type, name and version already exists" +msgstr "" + +#: glance/db/sqlalchemy/artifacts.py:240 +#, python-format +msgid "Artifact with id=%s not found" +msgstr "" + +#: glance/db/sqlalchemy/artifacts.py:243 +#, python-format +msgid "Artifact with id=%s is not accessible" +msgstr "" + +#: glance/db/sqlalchemy/artifacts.py:423 +#, python-format +msgid "Show level %s is not supported in this operation" +msgstr "" + +#: glance/db/sqlalchemy/artifacts.py:655 +#, python-format +msgid "" +"Artifact with the specified type, name and versions already has the " +"direct dependency=%s" +msgstr "" + +#: glance/db/sqlalchemy/artifacts.py:685 +#, python-format +msgid "" +"Artifact with the specified type, name and version already has the direct" +" dependency=%d" +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:340 glance/db/sqlalchemy/metadata.py:348 +#: glance/db/sqlalchemy/metadata.py:357 +#, python-format +msgid "Duplicate entry for values: %s" +msgstr "" + +#: glance/db/sqlalchemy/metadef_api/tag.py:37 +#, python-format +msgid "Metadata tag not found for id %s" +msgstr "" + +#: glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py:92 +#, python-format +msgid "Failed to decrypt location value for image %(image_id)s" +msgstr "" + +#: glance/domain/__init__.py:505 +msgid "The `eventlet` executor has been deprecated. Use `taskflow` instead." +msgstr "" + +#: glance/image_cache/__init__.py:74 +#, python-format +msgid "" +"Image cache driver '%(driver_name)s' failed to load. Got error: " +"'%(import_err)s." +msgstr "" + +#: glance/image_cache/__init__.py:95 +#, python-format +msgid "" +"Image cache driver '%(driver_module)s' failed to configure. Got error: " +"'%(config_err)s" +msgstr "" + +#: glance/image_cache/prefetcher.py:48 +#, python-format +msgid "Image '%s' is not active. Not caching." +msgstr "" + +#: glance/image_cache/prefetcher.py:53 +#, python-format +msgid "No metadata found for image '%s'" +msgstr "" + +#: glance/image_cache/prefetcher.py:81 +msgid "Failed to successfully cache all images in queue." +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:333 +#, python-format +msgid "" +"Fetch of cache file failed (%(e)s), rolling back by moving " +"'%(incomplete_path)s' to '%(invalid_path)s'" +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:459 +#, python-format +msgid "Failed to delete file %(path)s. Got error: %(e)s" +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:496 +#: glance/image_cache/drivers/xattr.py:447 +#, python-format +msgid "Cached image file '%s' doesn't exist, unable to delete" +msgstr "" + +#: glance/openstack/common/loopingcall.py:87 +#, python-format +msgid "task %(func_name)r run outlasted interval by %(delay).2f sec" +msgstr "" + +#: glance/openstack/common/service.py:351 +#, python-format +msgid "pid %d not in child list" +msgstr "" + +#: glance/registry/api/v1/images.py:126 +#, python-format +msgid "Invalid marker. Image %(id)s could not be found." +msgstr "" + +#: glance/registry/api/v1/images.py:131 glance/registry/api/v1/members.py:76 +#: glance/registry/api/v1/members.py:111 glance/registry/api/v1/members.py:229 +#: glance/registry/api/v1/members.py:293 +#, python-format +msgid "Access denied to image %(id)s but returning 'not found'" +msgstr "" + +#: glance/registry/api/v1/members.py:118 glance/registry/api/v1/members.py:236 +#: glance/registry/api/v1/members.py:300 +#, python-format +msgid "User lacks permission to share image %(id)s" +msgstr "" + +#: glance/registry/api/v1/members.py:129 glance/registry/api/v1/members.py:146 +#: glance/registry/api/v1/members.py:249 +#, python-format +msgid "Invalid membership association specified for image %(id)s" +msgstr "" + +#: glance/registry/api/v1/members.py:339 +#, python-format +msgid "Member %(id)s not found" +msgstr "" + diff --git a/code/daisy/daisy/locale/glance.pot b/code/daisy/daisy/locale/glance.pot new file mode 100755 index 00000000..aa6f6674 --- /dev/null +++ b/code/daisy/daisy/locale/glance.pot @@ -0,0 +1,3116 @@ +# Translations template for daisy. +# Copyright (C) 2015 ORGANIZATION +# This file is distributed under the same license as the glance project. +# FIRST AUTHOR , 2015. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: glance 2015.1.dev1.ge3bed85\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2015-04-20 11:09+0200\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" + +#: glance/location.py:87 +msgid "Invalid location" +msgstr "" + +#: glance/location.py:307 +msgid "Invalid locations" +msgstr "" + +#: glance/location.py:313 +#, python-format +msgid "Original locations is not empty: %s" +msgstr "" + +#: glance/location.py:386 glance/common/exception.py:361 +msgid "No image data could be found" +msgstr "" + +#: glance/location.py:398 +#, python-format +msgid "Get image %(id)s data failed: %(err)s." +msgstr "" + +#: glance/notifier.py:380 glance/api/v1/upload_utils.py:222 +#: glance/api/v2/image_data.py:122 +#, python-format +msgid "Image storage media is full: %s" +msgstr "" + +#: glance/notifier.py:385 glance/api/v1/upload_utils.py:232 +#: glance/api/v2/image_data.py:146 +#, python-format +msgid "Insufficient permissions on image storage media: %s" +msgstr "" + +#: glance/notifier.py:390 +#, python-format +msgid "Cannot save data for image %(image_id)s: %(error)s" +msgstr "" + +#: glance/notifier.py:397 +#, python-format +msgid "Unable to upload duplicate image data for image%(image_id)s: %(error)s" +msgstr "" + +#: glance/notifier.py:404 +#, python-format +msgid "Not allowed to upload image data for image %(image_id)s: %(error)s" +msgstr "" + +#: glance/notifier.py:410 +#, python-format +msgid "" +"Image %(image_id)s could not be found after upload. The image may have " +"been deleted during the upload: %(error)s" +msgstr "" + +#: glance/notifier.py:418 +#, python-format +msgid "" +"Failed to upload image data for image %(image_id)s due to HTTP error: " +"%(error)s" +msgstr "" + +#: glance/notifier.py:425 +#, python-format +msgid "" +"Failed to upload image data for image %(image_id)s due to internal error:" +" %(error)s" +msgstr "" + +#: glance/schema.py:65 +#, python-format +msgid "custom properties (%(props)s) conflict with base properties" +msgstr "" + +#: glance/scrubber.py:45 +msgid "" +"Directory that the scrubber will use to track information about what to " +"delete. Make sure this is set in daisy-api.conf and glance-" +"scrubber.conf." +msgstr "" + +#: glance/scrubber.py:50 +msgid "The amount of time in seconds to delay before performing a delete." +msgstr "" + +#: glance/scrubber.py:53 +msgid "" +"A boolean that determines if the scrubber should clean up the files it " +"uses for taking data. Only one server in your deployment should be " +"designated the cleanup host." +msgstr "" + +#: glance/scrubber.py:58 +msgid "Turn on/off delayed delete." +msgstr "" + +#: glance/scrubber.py:60 +msgid "" +"Items must have a modified time that is older than this value in order to" +" be candidates for cleanup." +msgstr "" + +#: glance/scrubber.py:66 +msgid "Loop time between checking for new items to schedule for delete." +msgstr "" + +#: glance/scrubber.py:74 +msgid "" +"Run as a long-running process. When not specified (the default) run the " +"scrub operation once and then exits. When specified do not exit and run " +"scrub on wakeup_time interval as specified in the config." +msgstr "" + +#: glance/scrubber.py:594 +#, python-format +msgid "%s file is not exists." +msgstr "" + +#: glance/scrubber.py:599 +#, python-format +msgid "%s file contains conflicting cleanup timestamp." +msgstr "" + +#: glance/api/authorization.py:131 glance/api/authorization.py:142 +#, python-format +msgid "You cannot get image member for %s" +msgstr "" + +#: glance/api/authorization.py:150 +#, python-format +msgid "You cannot delete image member for %s" +msgstr "" + +#: glance/api/authorization.py:159 +#, python-format +msgid "You cannot add image member for %s" +msgstr "" + +#: glance/api/authorization.py:168 +#, python-format +msgid "You cannot update image member %s" +msgstr "" + +#: glance/api/authorization.py:187 +#, python-format +msgid "You are not permitted to create images owned by '%s'." +msgstr "" + +#: glance/api/authorization.py:205 +msgid "You are not permitted to create image members for the image." +msgstr "" + +#: glance/api/authorization.py:210 glance/api/authorization.py:401 +msgid "Public images do not have members." +msgstr "" + +#: glance/api/authorization.py:226 +#, python-format +msgid "You are not permitted to modify '%(attr)s' on this %(resource)s." +msgstr "" + +#: glance/api/authorization.py:236 +msgid "You are not permitted to modify locations for this image." +msgstr "" + +#: glance/api/authorization.py:260 +#, python-format +msgid "You are not permitted to modify '%s' on this image." +msgstr "" + +#: glance/api/authorization.py:264 +msgid "You are not permitted to modify this image." +msgstr "" + +#: glance/api/authorization.py:277 +msgid "You are not permitted to modify tags on this image." +msgstr "" + +#: glance/api/authorization.py:318 +msgid "You are not permitted to delete this image." +msgstr "" + +#: glance/api/authorization.py:329 +msgid "You are not permitted to upload data for this image." +msgstr "" + +#: glance/api/authorization.py:366 glance/api/authorization.py:370 +#: glance/api/authorization.py:374 +msgid "You are not permitted to set status on this task." +msgstr "" + +#: glance/api/authorization.py:433 +#, python-format +msgid "You are not permitted to create this task with owner as: %s" +msgstr "" + +#: glance/api/authorization.py:498 +msgid "You are not permitted to delete this namespace." +msgstr "" + +#: glance/api/authorization.py:502 +msgid "You are not permitted to update this namespace." +msgstr "" + +#: glance/api/authorization.py:528 +#, python-format +msgid "You are not permitted to create namespace owned by '%s'" +msgstr "" + +#: glance/api/authorization.py:587 +msgid "You are not permitted to delete this object." +msgstr "" + +#: glance/api/authorization.py:591 +msgid "You are not permitted to update this object." +msgstr "" + +#: glance/api/authorization.py:616 +#, python-format +msgid "You are not permitted to create object owned by '%s'" +msgstr "" + +#: glance/api/authorization.py:679 +msgid "You are not permitted to delete this meta_resource_type." +msgstr "" + +#: glance/api/authorization.py:705 +#, python-format +msgid "You are not permitted to create resource_type owned by '%s'" +msgstr "" + +#: glance/api/authorization.py:763 +msgid "You are not permitted to delete this property." +msgstr "" + +#: glance/api/authorization.py:767 +msgid "You are not permitted to update this property." +msgstr "" + +#: glance/api/authorization.py:792 +#, python-format +msgid "You are not permitted to create property owned by '%s'" +msgstr "" + +#: glance/api/authorization.py:850 +msgid "You are not permitted to delete this tag." +msgstr "" + +#: glance/api/authorization.py:854 +msgid "You are not permitted to update this tag." +msgstr "" + +#: glance/api/authorization.py:875 +msgid "Owner must be specified to create a tag." +msgstr "" + +#: glance/api/authorization.py:878 +#, python-format +msgid "You are not permitted to create a tag in the namespace owned by '%s'" +msgstr "" + +#: glance/api/common.py:69 +#, python-format +msgid "Corrupt image download for image %(image_id)s" +msgstr "" + +#: glance/api/policy.py:163 glance/quota/__init__.py:355 +#, python-format +msgid "Invalid locations: %s" +msgstr "" + +#: glance/api/versions.py:29 +msgid "" +"Public url to use for versions endpoint. The default is None, which will " +"use the request's host_url attribute to populate the URL base. If Glance " +"is operating behind a proxy, you will want to change this to represent " +"the proxy's URL." +msgstr "" + +#: glance/api/middleware/context.py:30 +msgid "" +"When true, this option sets the owner of an image to be the tenant. " +"Otherwise, the owner of the image will be the authenticated user issuing" +" the request." +msgstr "" + +#: glance/api/middleware/context.py:35 +msgid "Role used to identify an authenticated user as administrator." +msgstr "" + +#: glance/api/middleware/context.py:38 +msgid "" +"Allow unauthenticated users to access the API with read-only privileges. " +"This only applies when using ContextMiddleware." +msgstr "" + +#: glance/api/middleware/context.py:54 +msgid "Unable to retrieve request id from context" +msgstr "" + +#: glance/api/middleware/context.py:111 +msgid "Invalid service catalog json." +msgstr "" + +#: glance/api/middleware/version_negotiation.py:44 +#, python-format +msgid "Determining version of request: %(method)s %(path)s Accept: %(accept)s" +msgstr "" + +#: glance/api/v1/controller.py:64 +#, python-format +msgid "Image %s is deactivated" +msgstr "" + +#: glance/api/v1/controller.py:70 +#, python-format +msgid "Image %s is not active" +msgstr "" + +#: glance/api/v1/controller.py:92 +#, python-format +msgid "Store for image_id not found: %s" +msgstr "" + +#: glance/api/v1/images.py:76 +#, python-format +msgid "Invalid disk format '%s' for image." +msgstr "" + +#: glance/api/v1/images.py:81 +#, python-format +msgid "Invalid container format '%s' for image." +msgstr "" + +#: glance/api/v1/images.py:86 +#, python-format +msgid "Image name too long: %d" +msgstr "" + +#: glance/api/v1/images.py:97 +msgid "" +"Invalid mix of disk and container formats. When setting a disk or " +"container format to one of 'aki', 'ari', or 'ami', the container and disk" +" formats must match." +msgstr "" + +#: glance/api/v1/images.py:176 +#, python-format +msgid "" +"The limit has been exceeded on the number of allowed image properties. " +"Attempted: %(num)s, Maximum: %(quota)s" +msgstr "" + +#: glance/api/v1/images.py:198 glance/api/v1/images.py:242 +#: glance/api/v1/images.py:276 +#, python-format +msgid "Property '%s' is protected" +msgstr "" + +#: glance/api/v1/images.py:339 +msgid "This operation is currently not permitted on Glance images details." +msgstr "" + +#: glance/api/v1/images.py:390 +#, python-format +msgid "Bad value passed to filter %(filter)s got %(val)s" +msgstr "" + +#: glance/api/v1/images.py:426 +#, python-format +msgid "External sources are not supported: '%s'" +msgstr "" + +#: glance/api/v1/images.py:522 +#, python-format +msgid "Required store %s is invalid" +msgstr "" + +#: glance/api/v1/images.py:534 +#, python-format +msgid "Invalid location %s" +msgstr "" + +#: glance/api/v1/images.py:557 +#, python-format +msgid "An image with identifier %s already exists" +msgstr "" + +#: glance/api/v1/images.py:564 +#, python-format +msgid "Failed to reserve image. Got error: %s" +msgstr "" + +#: glance/api/v1/images.py:571 +msgid "Forbidden to reserve image." +msgstr "" + +#: glance/api/v1/images.py:615 +msgid "Content-Type must be application/octet-stream" +msgstr "" + +#: glance/api/v1/images.py:671 +#, python-format +msgid "Failed to activate image. Got error: %s" +msgstr "" + +#: glance/api/v1/images.py:742 +msgid "It's invalid to provide multiple image sources." +msgstr "" + +#: glance/api/v1/images.py:775 +#, python-format +msgid "" +"Provided image size must match the stored image size. (provided size: " +"%(ps)d, stored size: %(ss)d)" +msgstr "" + +#: glance/api/v1/images.py:899 +msgid "Forbidden to update deleted image." +msgstr "" + +#: glance/api/v1/images.py:910 +#, python-format +msgid "Forbidden to modify '%s' of active image." +msgstr "" + +#: glance/api/v1/images.py:928 +msgid "Cannot upload to an unqueued image" +msgstr "" + +#: glance/api/v1/images.py:945 glance/common/scripts/utils.py:91 +#, python-format +msgid "Invalid location: %s" +msgstr "" + +#: glance/api/v1/images.py:952 +msgid "Attempted to update Location field for an image not in queued status." +msgstr "" + +#: glance/api/v1/images.py:992 glance/registry/api/v1/images.py:486 +#, python-format +msgid "Failed to update image metadata. Got error: %s" +msgstr "" + +#: glance/api/v1/images.py:999 +#, python-format +msgid "Failed to find image to update: %s" +msgstr "" + +#: glance/api/v1/images.py:1006 +#, python-format +msgid "Forbidden to update image: %s" +msgstr "" + +#: glance/api/v1/images.py:1014 +msgid "Image operation conflicts" +msgstr "" + +#: glance/api/v1/images.py:1045 +msgid "Image is protected" +msgstr "" + +#: glance/api/v1/images.py:1052 +#, python-format +msgid "Forbidden to delete a %s image." +msgstr "" + +#: glance/api/v1/images.py:1059 glance/api/v2/image_members.py:78 +#: glance/api/v2/image_members.py:122 glance/api/v2/image_members.py:160 +#: glance/api/v2/image_members.py:189 glance/api/v2/image_members.py:207 +#: glance/api/v2/image_tags.py:51 glance/api/v2/image_tags.py:78 +#, python-format +msgid "Image %s not found." +msgstr "" + +#: glance/api/v1/images.py:1094 +#, python-format +msgid "Failed to find image to delete: %s" +msgstr "" + +#: glance/api/v1/images.py:1101 +#, python-format +msgid "Forbidden to delete image: %s" +msgstr "" + +#: glance/api/v1/images.py:1108 glance/api/v2/images.py:230 +#, python-format +msgid "Image %(id)s could not be deleted because it is in use: %(exc)s" +msgstr "" + +#: glance/api/v1/images.py:1131 +#, python-format +msgid "Store for scheme %s not found" +msgstr "" + +#: glance/api/v1/images.py:1170 glance/api/v1/upload_utils.py:242 +#, python-format +msgid "Denying attempt to upload image larger than %d bytes." +msgstr "" + +#: glance/api/v1/members.py:42 glance/registry/api/v1/members.py:36 +msgid "No authenticated user" +msgstr "" + +#: glance/api/v1/members.py:54 +#, python-format +msgid "Image with identifier %s has been deleted." +msgstr "" + +#: glance/api/v1/members.py:77 +#, python-format +msgid "Image with identifier %s not found" +msgstr "" + +#: glance/api/v1/members.py:81 +msgid "Unauthorized image access" +msgstr "" + +#: glance/api/v1/members.py:118 glance/common/exception.py:303 +#, python-format +msgid "" +"The limit has been exceeded on the number of allowed image members for " +"this image. Attempted: %(attempted)s, Maximum: %(maximum)s" +msgstr "" + +#: glance/api/v1/upload_utils.py:134 +#, python-format +msgid "" +"Supplied %(attr)s (%(supplied)s) and %(attr)s generated from uploaded " +"image (%(actual)s) did not match. Setting image status to 'killed'." +msgstr "" + +#: glance/api/v1/upload_utils.py:191 glance/api/v2/image_data.py:100 +msgid "Error in store configuration. Adding images to store is disabled." +msgstr "" + +#: glance/api/v1/upload_utils.py:200 +#, python-format +msgid "Attempt to upload duplicate image: %s" +msgstr "" + +#: glance/api/v1/upload_utils.py:212 +#, python-format +msgid "Forbidden upload attempt: %s" +msgstr "" + +#: glance/api/v1/upload_utils.py:252 glance/api/v2/images.py:155 +#, python-format +msgid "Denying attempt to upload image because it exceeds the quota: %s" +msgstr "" + +#: glance/api/v1/upload_utils.py:273 +msgid "Client disconnected before sending all data to backend" +msgstr "" + +#: glance/api/v1/upload_utils.py:281 +#, python-format +msgid "Failed to upload image %s" +msgstr "" + +#: glance/api/v2/image_data.py:77 +#, python-format +msgid "" +"Image %s could not be found after upload. The image may have been deleted" +" during the upload, cleaning up the chunks uploaded." +msgstr "" + +#: glance/api/v2/image_data.py:130 +#, python-format +msgid "Image exceeds the storage quota: %s" +msgstr "" + +#: glance/api/v2/image_data.py:138 +#, python-format +msgid "The incoming image is too large: %s" +msgstr "" + +#: glance/api/v2/image_data.py:173 +msgid "" +"The requested image has been deactivated. Image data download is " +"forbidden." +msgstr "" + +#: glance/api/v2/image_members.py:82 +#, python-format +msgid "Not allowed to create members for image %s." +msgstr "" + +#: glance/api/v2/image_members.py:86 +#, python-format +msgid "Member %(member_id)s is duplicated for image %(image_id)s" +msgstr "" + +#: glance/api/v2/image_members.py:92 +#, python-format +msgid "Image member limit exceeded for image %(id)s: %(e)s:" +msgstr "" + +#: glance/api/v2/image_members.py:126 +#, python-format +msgid "Not allowed to update members for image %s." +msgstr "" + +#: glance/api/v2/image_members.py:130 +#, python-format +msgid "Incorrect request: %s" +msgstr "" + +#: glance/api/v2/image_members.py:164 +#, python-format +msgid "Not allowed to list members for image %s." +msgstr "" + +#: glance/api/v2/image_members.py:211 +#, python-format +msgid "Not allowed to delete members for image %s." +msgstr "" + +#: glance/api/v2/image_members.py:224 glance/api/v2/images.py:334 +#: glance/api/v2/metadef_namespaces.py:396 glance/api/v2/metadef_objects.py:228 +#: glance/api/v2/metadef_properties.py:176 +#: glance/api/v2/metadef_resource_types.py:147 +#: glance/api/v2/metadef_tags.py:260 glance/api/v2/tasks.py:137 +#: glance/search/api/v0_1/search.py:113 +#: glance/tests/functional/v2/test_metadef_resourcetypes.py:148 +msgid "Body expected in request." +msgstr "" + +#: glance/api/v2/image_members.py:235 +msgid "Member to be added not specified" +msgstr "" + +#: glance/api/v2/image_members.py:238 +msgid "Member can't be empty" +msgstr "" + +#: glance/api/v2/image_members.py:247 +msgid "Status not specified" +msgstr "" + +#: glance/api/v2/image_members.py:302 +msgid "An identifier for the image member (tenantId)" +msgstr "" + +#: glance/api/v2/image_members.py:306 glance/api/v2/images.py:787 +msgid "An identifier for the image" +msgstr "" + +#: glance/api/v2/image_members.py:312 +msgid "Date and time of image member creation" +msgstr "" + +#: glance/api/v2/image_members.py:319 +msgid "Date and time of last modification of image member" +msgstr "" + +#: glance/api/v2/image_members.py:324 +msgid "The status of this image member" +msgstr "" + +#: glance/api/v2/image_tags.py:55 +#, python-format +msgid "Not allowed to update tags for image %s." +msgstr "" + +#: glance/api/v2/image_tags.py:59 +#, python-format +msgid "Could not update image: %s" +msgstr "" + +#: glance/api/v2/image_tags.py:63 +#, python-format +msgid "Image tag limit exceeded for image %(id)s: %(e)s:" +msgstr "" + +#: glance/api/v2/image_tags.py:82 +#, python-format +msgid "Not allowed to delete tags for image %s." +msgstr "" + +#: glance/api/v2/images.py:179 glance/api/v2/images.py:212 +#, python-format +msgid "Property %s does not exist." +msgstr "" + +#: glance/api/v2/images.py:193 +#, python-format +msgid "Property %s already present." +msgstr "" + +#: glance/api/v2/images.py:207 +#, python-format +msgid "Property %s may not be removed." +msgstr "" + +#: glance/api/v2/images.py:225 +#, python-format +msgid "Failed to find image %(image_id)s to delete" +msgstr "" + +#: glance/api/v2/images.py:251 +msgid "Cannot replace locations from a non-empty list to a non-empty list." +msgstr "" + +#: glance/api/v2/images.py:275 +msgid "Invalid position for adding a location." +msgstr "" + +#: glance/api/v2/images.py:291 +msgid "Invalid position for removing a location." +msgstr "" + +#: glance/api/v2/images.py:342 glance/api/v2/images.py:448 +#: glance/api/v2/metadef_namespaces.py:404 glance/api/v2/metadef_objects.py:292 +#: glance/api/v2/metadef_properties.py:184 +#: glance/api/v2/metadef_resource_types.py:155 +#: glance/api/v2/metadef_tags.py:337 glance/search/api/v0_1/search.py:121 +#: glance/tests/functional/v2/test_metadef_resourcetypes.py:156 +#, python-format +msgid "Attribute '%s' is read-only." +msgstr "" + +#: glance/api/v2/images.py:373 glance/api/v2/images.py:395 +#, python-format +msgid "Unable to find '%s' in JSON Schema change" +msgstr "" + +#: glance/api/v2/images.py:381 +msgid "" +"Operation objects must contain only one member named \"add\", \"remove\"," +" or \"replace\"." +msgstr "" + +#: glance/api/v2/images.py:386 +msgid "" +"Operation objects must contain exactly one member named \"add\", " +"\"remove\", or \"replace\"." +msgstr "" + +#: glance/api/v2/images.py:423 +#, python-format +msgid "Pointer `%s` does not start with \"/\"." +msgstr "" + +#: glance/api/v2/images.py:426 +#, python-format +msgid "Pointer `%s` contains adjacent \"/\"." +msgstr "" + +#: glance/api/v2/images.py:429 +#, python-format +msgid "Pointer `%s` end with \"/\"." +msgstr "" + +#: glance/api/v2/images.py:432 +#, python-format +msgid "Pointer `%s` does not contains valid token." +msgstr "" + +#: glance/api/v2/images.py:435 +#, python-format +msgid "Pointer `%s` contains \"~\" not part of a recognized escape sequence." +msgstr "" + +#: glance/api/v2/images.py:441 +#, python-format +msgid "Operation \"%s\" requires a member named \"value\"." +msgstr "" + +#: glance/api/v2/images.py:451 +#, python-format +msgid "Attribute '%s' is reserved." +msgstr "" + +#: glance/api/v2/images.py:481 +#, python-format +msgid "Invalid JSON pointer for this resource: '/%s'" +msgstr "" + +#: glance/api/v2/images.py:493 +msgid "Unrecognized JSON Schema draft version" +msgstr "" + +#: glance/api/v2/images.py:515 +msgid "Request body must be a JSON array of operation objects." +msgstr "" + +#: glance/api/v2/images.py:520 +msgid "Operations must be JSON objects." +msgstr "" + +#: glance/api/v2/images.py:543 glance/api/v2/metadef_namespaces.py:451 +#: glance/api/v2/metadef_tags.py:284 glance/api/v2/tasks.py:173 +#: glance/registry/api/v1/images.py:267 glance/search/api/v0_1/search.py:165 +msgid "limit param must be an integer" +msgstr "" + +#: glance/api/v2/images.py:547 glance/api/v2/metadef_namespaces.py:455 +#: glance/api/v2/metadef_tags.py:288 glance/api/v2/tasks.py:177 +#: glance/registry/api/v1/images.py:270 glance/search/api/v0_1/search.py:169 +msgid "limit param must be positive" +msgstr "" + +#: glance/api/v2/images.py:554 +#, python-format +msgid "" +"Invalid sort key: %(sort_key)s. It must be one of the following: " +"%(available)s." +msgstr "" + +#: glance/api/v2/images.py:564 glance/api/v2/metadef_namespaces.py:433 +#: glance/api/v2/metadef_objects.py:274 glance/api/v2/metadef_tags.py:266 +#: glance/api/v2/tasks.py:143 +#, python-format +msgid "Invalid sort direction: %s" +msgstr "" + +#: glance/api/v2/images.py:571 +#, python-format +msgid "Invalid status: %s" +msgstr "" + +#: glance/api/v2/images.py:580 glance/api/v2/metadef_namespaces.py:442 +#: glance/api/v2/metadef_objects.py:283 glance/api/v2/metadef_tags.py:275 +#, python-format +msgid "Invalid visibility value: %s" +msgstr "" + +#: glance/api/v2/images.py:584 +msgid "The \"changes-since\" filter is no longer available on v2." +msgstr "" + +#: glance/api/v2/images.py:603 +msgid "Old and new sorting syntax cannot be combined" +msgstr "" + +#: glance/api/v2/images.py:634 +msgid "Number of sort dirs does not match the number of sort keys" +msgstr "" + +#: glance/api/v2/images.py:793 +msgid "Descriptive name for the image" +msgstr "" + +#: glance/api/v2/images.py:798 +msgid "Status of the image (READ-ONLY)" +msgstr "" + +#: glance/api/v2/images.py:804 +msgid "Scope of image accessibility" +msgstr "" + +#: glance/api/v2/images.py:809 +msgid "If true, image will not be deletable." +msgstr "" + +#: glance/api/v2/images.py:813 +msgid "md5 hash of image contents. (READ-ONLY)" +msgstr "" + +#: glance/api/v2/images.py:818 +msgid "Owner of the image" +msgstr "" + +#: glance/api/v2/images.py:823 +msgid "Size of image file in bytes (READ-ONLY)" +msgstr "" + +#: glance/api/v2/images.py:827 +msgid "Virtual size of image in bytes (READ-ONLY)" +msgstr "" + +#: glance/api/v2/images.py:831 +msgid "Format of the container" +msgstr "" + +#: glance/api/v2/images.py:836 +msgid "Format of the disk" +msgstr "" + +#: glance/api/v2/images.py:841 +msgid "Date and time of image registration (READ-ONLY)" +msgstr "" + +#: glance/api/v2/images.py:849 +msgid "Date and time of the last image modification (READ-ONLY)" +msgstr "" + +#: glance/api/v2/images.py:855 +msgid "List of strings related to the image" +msgstr "" + +#: glance/api/v2/images.py:863 +msgid "URL to access the image file kept in external store (READ-ONLY)" +msgstr "" + +#: glance/api/v2/images.py:868 +msgid "Amount of ram (in MB) required to boot image." +msgstr "" + +#: glance/api/v2/images.py:872 +msgid "Amount of disk space (in GB) required to boot image." +msgstr "" + +#: glance/api/v2/images.py:902 +msgid "A set of URLs to access the image file kept in external store" +msgstr "" + +#: glance/api/v2/metadef_namespaces.py:664 +msgid "The unique namespace text." +msgstr "" + +#: glance/api/v2/metadef_namespaces.py:669 +msgid "The user friendly name for the namespace. Used by UI if available." +msgstr "" + +#: glance/api/v2/metadef_namespaces.py:675 +msgid "Provides a user friendly description of the namespace." +msgstr "" + +#: glance/api/v2/metadef_namespaces.py:681 +msgid "Scope of namespace accessibility." +msgstr "" + +#: glance/api/v2/metadef_namespaces.py:686 +msgid "If true, namespace will not be deletable." +msgstr "" + +#: glance/api/v2/metadef_namespaces.py:690 +msgid "Owner of the namespace." +msgstr "" + +#: glance/api/v2/metadef_namespaces.py:695 +msgid "Date and time of namespace creation (READ-ONLY)" +msgstr "" + +#: glance/api/v2/metadef_namespaces.py:701 +msgid "Date and time of the last namespace modification (READ-ONLY)" +msgstr "" + +#: glance/api/v2/metadef_objects.py:187 +msgid "Date and time of object creation (READ-ONLY)" +msgstr "" + +#: glance/api/v2/metadef_objects.py:193 +msgid "Date and time of the last object modification (READ-ONLY)" +msgstr "" + +#: glance/api/v2/metadef_properties.py:95 +#, python-format +msgid "" +"Property %(property_name)s does not start with the expected resource type" +" association prefix of '%(prefix)s'." +msgstr "" + +#: glance/api/v2/metadef_resource_types.py:128 +#: glance/tests/functional/v2/test_metadef_resourcetypes.py:129 +#, python-format +msgid "Failed to find resource type %(resourcetype)s to delete" +msgstr "" + +#: glance/api/v2/metadef_resource_types.py:201 +#: glance/tests/functional/v2/test_metadef_resourcetypes.py:203 +msgid "" +"Resource type names should be aligned with Heat resource types whenever " +"possible: " +"http://docs.openstack.org/developer/heat/template_guide/openstack.html" +msgstr "" + +#: glance/api/v2/metadef_resource_types.py:209 +#: glance/tests/functional/v2/test_metadef_resourcetypes.py:211 +msgid "" +"Specifies the prefix to use for the given resource type. Any properties " +"in the namespace should be prefixed with this prefix when being applied " +"to the specified resource type. Must include prefix separator (e.g. a " +"colon :)." +msgstr "" + +#: glance/api/v2/metadef_resource_types.py:218 +#: glance/tests/functional/v2/test_metadef_resourcetypes.py:220 +msgid "" +"Some resource types allow more than one key / value pair per instance. " +"For example, Cinder allows user and image metadata on volumes. Only the " +"image properties metadata is evaluated by Nova (scheduling or drivers). " +"This property allows a namespace target to remove the ambiguity." +msgstr "" + +#: glance/api/v2/metadef_resource_types.py:229 +#: glance/tests/functional/v2/test_metadef_resourcetypes.py:231 +msgid "Date and time of resource type association (READ-ONLY)" +msgstr "" + +#: glance/api/v2/metadef_resource_types.py:235 +#: glance/tests/functional/v2/test_metadef_resourcetypes.py:237 +msgid "" +"Date and time of the last resource type association modification (READ-" +"ONLY)" +msgstr "" + +#: glance/api/v2/metadef_tags.py:188 +msgid "Date and time of tag creation (READ-ONLY)" +msgstr "" + +#: glance/api/v2/metadef_tags.py:194 +msgid "Date and time of the last tag modification (READ-ONLY)" +msgstr "" + +#: glance/api/v2/tasks.py:123 +msgid "" +"This operation is currently not permitted on Glance Tasks. They are auto " +"deleted after reaching the time based on their expires_at property." +msgstr "" + +#: glance/api/v2/tasks.py:152 +#, python-format +msgid "Invalid status value: %s" +msgstr "" + +#: glance/api/v2/tasks.py:158 +#, python-format +msgid "Invalid type value: %s" +msgstr "" + +#: glance/api/v2/tasks.py:165 glance/registry/api/v1/images.py:279 +msgid "Invalid marker format" +msgstr "" + +#: glance/api/v2/tasks.py:186 +#, python-format +msgid "Task '%s' is required" +msgstr "" + +#: glance/api/v2/tasks.py:309 +msgid "An identifier for the task" +msgstr "" + +#: glance/api/v2/tasks.py:310 +msgid "" +"^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0" +"-9a-fA-F]){12}$" +msgstr "" + +#: glance/api/v2/tasks.py:315 +msgid "The type of task represented by this content" +msgstr "" + +#: glance/api/v2/tasks.py:322 +msgid "The current status of this task" +msgstr "" + +#: glance/api/v2/tasks.py:332 +msgid "The parameters required by task, JSON blob" +msgstr "" + +#: glance/api/v2/tasks.py:336 +msgid "The result of current task, JSON blob" +msgstr "" + +#: glance/api/v2/tasks.py:340 +msgid "An identifier for the owner of this task" +msgstr "" + +#: glance/api/v2/tasks.py:344 +msgid "" +"Human-readable informative message only included when appropriate " +"(usually on failure)" +msgstr "" + +#: glance/api/v2/tasks.py:349 +msgid "Datetime when this resource would be subject to removal" +msgstr "" + +#: glance/api/v2/tasks.py:354 +msgid "Datetime when this resource was created" +msgstr "" + +#: glance/api/v2/tasks.py:358 +msgid "Datetime when this resource was updated" +msgstr "" + +#: glance/async/taskflow_executor.py:42 +msgid "The mode in which the engine will run. Can be 'serial' or 'parallel'." +msgstr "" + +#: glance/async/taskflow_executor.py:46 +msgid "" +"The number of parallel activities executed at the same time by the " +"engine. The value can be greater than one when the engine mode is " +"'parallel'." +msgstr "" + +#: glance/async/flows/base_import.py:94 +#, python-format +msgid "" +"%(task_id)s of %(task_type)s not configured properly. Missing work dir: " +"%(work_dir)s" +msgstr "" + +#: glance/async/flows/base_import.py:125 +#, python-format +msgid "" +"%(task_id)s of %(task_type)s not configured properly. Could not load the " +"filesystem store" +msgstr "" + +#: glance/async/flows/convert.py:35 +msgid "" +"The format to which images will be automatically converted. Can be " +"'qcow2' or 'raw'." +msgstr "" + +#: glance/cmd/__init__.py:41 +#, python-format +msgid "" +"It appears that the eventlet module has been imported prior to setting " +"%s='yes'. It is currently necessary to disable eventlet.greendns if using" +" ipv6 since eventlet.greendns currently breaks with ipv6 addresses. " +"Please ensure that eventlet is not imported prior to this being set." +msgstr "" + +#: glance/cmd/control.py:106 +#, python-format +msgid "%(serv)s appears to already be running: %(pid)s" +msgstr "" + +#: glance/cmd/control.py:110 +#, python-format +msgid "Removing stale pid file %s" +msgstr "" + +#: glance/cmd/control.py:119 +msgid "Unable to increase file descriptor limit. Running as non-root?" +msgstr "" + +#: glance/cmd/control.py:166 +#, python-format +msgid "%(verb)sing %(serv)s with %(conf)s" +msgstr "" + +#: glance/cmd/control.py:169 +#, python-format +msgid "%(verb)sing %(serv)s" +msgstr "" + +#: glance/cmd/control.py:181 +#, python-format +msgid "unable to launch %(serv)s. Got error: %(e)s" +msgstr "" + +#: glance/cmd/control.py:212 +#, python-format +msgid "%(serv)s (pid %(pid)s) is running..." +msgstr "" + +#: glance/cmd/control.py:215 +#, python-format +msgid "%s is stopped" +msgstr "" + +#: glance/cmd/control.py:231 +#, python-format +msgid "" +"Unable to create pid file %(pid)s. Running as non-root?\n" +"Falling back to a temp file, you can stop %(service)s service using:\n" +" %(file)s %(server)s stop --pid-file %(fb)s" +msgstr "" + +#: glance/cmd/control.py:248 +#, python-format +msgid "Reload of %(serv)s not supported" +msgstr "" + +#: glance/cmd/control.py:256 +#, python-format +msgid "Server %(serv)s is stopped" +msgstr "" + +#: glance/cmd/control.py:261 +#, python-format +msgid "Reloading %(serv)s (pid %(pid)s) with signal(%(sig)s)" +msgstr "" + +#: glance/cmd/control.py:265 glance/cmd/control.py:287 +#, python-format +msgid "Process %d not running" +msgstr "" + +#: glance/cmd/control.py:283 +#, python-format +msgid "Stopping %(serv)s (pid %(pid)s) with signal(%(sig)s)" +msgstr "" + +#: glance/cmd/control.py:294 +#, python-format +msgid "Waited 15 seconds for pid %(pid)s (%(file)s) to die; giving up" +msgstr "" + +#: glance/cmd/control.py:297 +#, python-format +msgid "%s is already stopped" +msgstr "" + +#: glance/cmd/control.py:374 +#, python-format +msgid "Suppressed respawn as %(serv)s was %(rsn)s." +msgstr "" + +#: glance/cmd/replicator.py:107 +#, python-format +msgid "" +"The image %s is already present on the slave, but our check for it did " +"not find it. This indicates that we do not have permissions to see all " +"the images on the slave server." +msgstr "" + +#: glance/cmd/replicator.py:327 glance/cmd/replicator.py:359 +#: glance/cmd/replicator.py:431 glance/cmd/replicator.py:507 +#: glance/cmd/replicator.py:578 +msgid "Too few arguments." +msgstr "" + +#: glance/cmd/replicator.py:343 +#, python-format +msgid "Total size is %(size)d bytes across %(img_count)d images" +msgstr "" + +#: glance/cmd/replicator.py:692 +#, python-format +msgid "Unknown command: %s" +msgstr "" + +#: glance/common/auth.py:189 glance/common/auth.py:229 +#, python-format +msgid "Unexpected response: %s" +msgstr "" + +#: glance/common/auth.py:257 +#, python-format +msgid "Unknown auth strategy '%s'" +msgstr "" + +#: glance/common/auth.py:278 +#, python-format +msgid "Encountered service with no \"type\": %s" +msgstr "" + +#: glance/common/client.py:245 +msgid "" +"You have selected to use SSL in connecting, and you have supplied a cert," +" however you have failed to supply either a key_file parameter or set the" +" GLANCE_CLIENT_KEY_FILE environ variable" +msgstr "" + +#: glance/common/client.py:253 +msgid "" +"You have selected to use SSL in connecting, and you have supplied a key, " +"however you have failed to supply either a cert_file parameter or set the" +" GLANCE_CLIENT_CERT_FILE environ variable" +msgstr "" + +#: glance/common/client.py:262 +#, python-format +msgid "The key file you specified %s does not exist" +msgstr "" + +#: glance/common/client.py:269 +#, python-format +msgid "The cert file you specified %s does not exist" +msgstr "" + +#: glance/common/client.py:276 +#, python-format +msgid "The CA file you specified %s does not exist" +msgstr "" + +#: glance/common/client.py:400 +#, python-format +msgid "Constructed URL: %s" +msgstr "" + +#: glance/common/config.py:40 +msgid "" +"Partial name of a pipeline in your paste configuration file with the " +"service name removed. For example, if your paste section name is " +"[pipeline:daisy-api-keystone] use the value \"keystone\"" +msgstr "" + +#: glance/common/config.py:46 +msgid "Name of the paste configuration file." +msgstr "" + +#: glance/common/config.py:51 +msgid "Supported values for the 'container_format' image attribute" +msgstr "" + +#: glance/common/config.py:58 +msgid "Supported values for the 'disk_format' image attribute" +msgstr "" + +#: glance/common/config.py:66 +msgid "Time in hours for which a task lives after, either succeeding or failing" +msgstr "" + +#: glance/common/config.py:72 +msgid "Specifies which task executor to be used to run the task scripts." +msgstr "" + +#: glance/common/config.py:76 +msgid "" +"Work dir for asynchronous task operations. The directory set here will be" +" used to operate over images - normally before they are imported in the " +"destination store. When providing work dir, make sure enough space is " +"provided for concurrent tasks to run efficiently without running out of " +"space. A rough estimation can be done by multiplying the number of " +"`max_workers` - or the N of workers running - by an average image size " +"(e.g 500MB). The image size estimation should be done based on the " +"average size in your deployment. Note that depending on the tasks running" +" you may need to multiply this number by some factor depending on what " +"the task does. For example, you may want to double the available size if " +"image conversion is enabled. All this being said, remember these are just" +" estimations and you should do them based on the worst case scenario and " +"be prepared to act in case they were wrong." +msgstr "" + +#: glance/common/config.py:97 +msgid "" +"Whether to allow users to specify image properties beyond what the image " +"schema provides" +msgstr "" + +#: glance/common/config.py:100 +msgid "" +"Maximum number of image members per image. Negative values evaluate to " +"unlimited." +msgstr "" + +#: glance/common/config.py:103 +msgid "" +"Maximum number of properties allowed on an image. Negative values " +"evaluate to unlimited." +msgstr "" + +#: glance/common/config.py:106 +msgid "" +"Maximum number of tags allowed on an image. Negative values evaluate to " +"unlimited." +msgstr "" + +#: glance/common/config.py:109 +msgid "" +"Maximum number of locations allowed on an image. Negative values evaluate" +" to unlimited." +msgstr "" + +#: glance/common/config.py:112 +msgid "Python module path of data access API" +msgstr "" + +#: glance/common/config.py:114 +msgid "" +"Default value for the number of items returned by a request if not " +"specified explicitly in the request" +msgstr "" + +#: glance/common/config.py:117 +msgid "Maximum permissible number of items that could be returned by a request" +msgstr "" + +#: glance/common/config.py:120 +msgid "" +"Whether to include the backend image storage location in image " +"properties. Revealing storage location can be a security risk, so use " +"this setting with caution!" +msgstr "" + +#: glance/common/config.py:125 +msgid "" +"Whether to include the backend image locations in image properties. For " +"example, if using the file system store a URL of " +"\"file:///path/to/image\" will be returned to the user in the " +"'direct_url' meta-data field. Revealing storage location can be a " +"security risk, so use this setting with caution! The overrides " +"show_image_direct_url." +msgstr "" + +#: glance/common/config.py:134 +msgid "" +"Maximum size of image a user can upload in bytes. Defaults to " +"1099511627776 bytes (1 TB).WARNING: this value should only be increased " +"after careful consideration and must be set to a value under 8 EB " +"(9223372036854775808)." +msgstr "" + +#: glance/common/config.py:140 +msgid "" +"Set a system wide quota for every user. This value is the total capacity " +"that a user can use across all storage systems. A value of 0 means " +"unlimited.Optional unit can be specified for the value. Accepted units " +"are B, KB, MB, GB and TB representing Bytes, KiloBytes, MegaBytes, " +"GigaBytes and TeraBytes respectively. If no unit is specified then Bytes " +"is assumed. Note that there should not be any space between value and " +"unit and units are case sensitive." +msgstr "" + +#: glance/common/config.py:150 +msgid "Deploy the v1 OpenStack Images API." +msgstr "" + +#: glance/common/config.py:152 +msgid "Deploy the v2 OpenStack Images API." +msgstr "" + +#: glance/common/config.py:154 +msgid "Deploy the v1 OpenStack Registry API." +msgstr "" + +#: glance/common/config.py:156 +msgid "Deploy the v2 OpenStack Registry API." +msgstr "" + +#: glance/common/config.py:158 +msgid "The hostname/IP of the pydev process listening for debug connections" +msgstr "" + +#: glance/common/config.py:161 +msgid "The port on which a pydev process is listening for connections." +msgstr "" + +#: glance/common/config.py:164 +msgid "" +"AES key for encrypting store 'location' metadata. This includes, if used," +" Swift or S3 credentials. Should be set to a random string of length 16, " +"24 or 32 bytes" +msgstr "" + +#: glance/common/config.py:169 +msgid "" +"Digest algorithm which will be used for digital signature; the default is" +" sha1 the default in Kilo for a smooth upgrade process, and it will be " +"updated with sha256 in next release(L). Use the command \"openssl list-" +"message-digest-algorithms\" to get the available algorithms supported by " +"the version of OpenSSL on the platform. Examples are \"sha1\", " +"\"sha256\", \"sha512\", etc." +msgstr "" + +#: glance/common/config.py:237 +#, python-format +msgid "Unable to locate paste config file for %s." +msgstr "" + +#: glance/common/config.py:276 +#, python-format +msgid "" +"Unable to load %(app_name)s from configuration file %(conf_file)s.\n" +"Got: %(e)r" +msgstr "" + +#: glance/common/exception.py:42 +msgid "An unknown exception occurred" +msgstr "" + +#: glance/common/exception.py:67 +#, python-format +msgid "Missing required credential: %(required)s" +msgstr "" + +#: glance/common/exception.py:71 +#, python-format +msgid "" +"Incorrect auth strategy, expected \"%(expected)s\" but received " +"\"%(received)s\"" +msgstr "" + +#: glance/common/exception.py:76 +msgid "An object with the specified identifier was not found." +msgstr "" + +#: glance/common/exception.py:80 +msgid "The Store URI was malformed." +msgstr "" + +#: glance/common/exception.py:84 +msgid "An object with the same identifier already exists." +msgstr "" + +#: glance/common/exception.py:88 +msgid "An object with the same identifier is currently being operated on." +msgstr "" + +#: glance/common/exception.py:93 +#, python-format +msgid "" +"The size of the data %(image_size)s will exceed the limit. %(remaining)s " +"bytes remaining." +msgstr "" + +#: glance/common/exception.py:98 +#, python-format +msgid "Connect error/bad request to Auth service at URL %(url)s." +msgstr "" + +#: glance/common/exception.py:102 +#, python-format +msgid "Auth service at URL %(url)s not found." +msgstr "" + +#: glance/common/exception.py:106 +msgid "Authorization failed." +msgstr "" + +#: glance/common/exception.py:110 +msgid "You are not authenticated." +msgstr "" + +#: glance/common/exception.py:114 +#, python-format +msgid "Image upload problem: %s" +msgstr "" + +#: glance/common/exception.py:118 glance/common/exception.py:122 +#: glance/common/exception.py:407 +msgid "You are not authorized to complete this action." +msgstr "" + +#: glance/common/exception.py:126 +#, python-format +msgid "Image %(image_id)s is protected and cannot be deleted." +msgstr "" + +#: glance/common/exception.py:130 +#, python-format +msgid "" +"Metadata definition namespace %(namespace)s is protected and cannot be " +"deleted." +msgstr "" + +#: glance/common/exception.py:135 +#, python-format +msgid "" +"Metadata definition property %(property_name)s is protected and cannot be" +" deleted." +msgstr "" + +#: glance/common/exception.py:140 +#, python-format +msgid "" +"Metadata definition object %(object_name)s is protected and cannot be " +"deleted." +msgstr "" + +#: glance/common/exception.py:145 +#, python-format +msgid "" +"Metadata definition resource-type-association %(resource_type)s is " +"protected and cannot be deleted." +msgstr "" + +#: glance/common/exception.py:150 +#, python-format +msgid "" +"Metadata definition resource-type %(resource_type_name)s is a seeded-" +"system type and cannot be deleted." +msgstr "" + +#: glance/common/exception.py:155 +#, python-format +msgid "Metadata definition tag %(tag_name)s is protected and cannot be deleted." +msgstr "" + +#: glance/common/exception.py:160 +msgid "Data supplied was not valid." +msgstr "" + +#: glance/common/exception.py:164 +msgid "Sort key supplied was not valid." +msgstr "" + +#: glance/common/exception.py:168 +msgid "Sort direction supplied was not valid." +msgstr "" + +#: glance/common/exception.py:172 +msgid "Invalid configuration in property protection file." +msgstr "" + +#: glance/common/exception.py:176 +msgid "Invalid configuration in glance-swift conf file." +msgstr "" + +#: glance/common/exception.py:180 +msgid "Unable to filter using the specified range." +msgstr "" + +#: glance/common/exception.py:184 +#, python-format +msgid "Invalid value for option %(option)s: %(value)s" +msgstr "" + +#: glance/common/exception.py:188 +#, python-format +msgid "Attribute '%(property)s' is read-only." +msgstr "" + +#: glance/common/exception.py:192 +#, python-format +msgid "Attribute '%(property)s' is reserved." +msgstr "" + +#: glance/common/exception.py:196 +#, python-format +msgid "Redirecting to %(uri)s for authorization." +msgstr "" + +#: glance/common/exception.py:200 +msgid "There was an error connecting to a server" +msgstr "" + +#: glance/common/exception.py:204 +msgid "There was an error configuring the client." +msgstr "" + +#: glance/common/exception.py:208 +#, python-format +msgid "" +"The request returned a 302 Multiple Choices. This generally means that " +"you have not included a version indicator in a request URI.\n" +"\n" +"The body of response returned:\n" +"%(body)s" +msgstr "" + +#: glance/common/exception.py:214 +#, python-format +msgid "" +"The request returned a 413 Request Entity Too Large. This generally means" +" that rate limiting or a quota threshold was breached.\n" +"\n" +"The response body:\n" +"%(body)s" +msgstr "" + +#: glance/common/exception.py:225 +msgid "" +"The request returned 503 Service Unavailable. This generally occurs on " +"service overload or other transient outage." +msgstr "" + +#: glance/common/exception.py:236 +msgid "The request returned 500 Internal Server Error." +msgstr "" + +#: glance/common/exception.py:240 +#, python-format +msgid "" +"The request returned an unexpected status: %(status)s.\n" +"\n" +"The response body:\n" +"%(body)s" +msgstr "" + +#: glance/common/exception.py:245 +#, python-format +msgid "Invalid content type %(content_type)s" +msgstr "" + +#: glance/common/exception.py:249 +#, python-format +msgid "Registry was not configured correctly on API server. Reason: %(reason)s" +msgstr "" + +#: glance/common/exception.py:254 +#, python-format +msgid "" +"Driver %(driver_name)s could not be configured correctly. Reason: " +"%(reason)s" +msgstr "" + +#: glance/common/exception.py:259 +#, python-format +msgid "Maximum redirects (%(redirects)s) was exceeded." +msgstr "" + +#: glance/common/exception.py:263 +msgid "Received invalid HTTP redirect." +msgstr "" + +#: glance/common/exception.py:267 +msgid "Response from Keystone does not contain a Glance endpoint." +msgstr "" + +#: glance/common/exception.py:271 +#, python-format +msgid "" +"Multiple 'image' service matches for region %(region)s. This generally " +"means that a region is required and you have not supplied one." +msgstr "" + +#: glance/common/exception.py:277 +#, python-format +msgid "Server worker creation failed: %(reason)s." +msgstr "" + +#: glance/common/exception.py:281 +#, python-format +msgid "Unable to load schema: %(reason)s" +msgstr "" + +#: glance/common/exception.py:285 +#, python-format +msgid "Provided object does not match schema '%(schema)s': %(reason)s" +msgstr "" + +#: glance/common/exception.py:290 +#, python-format +msgid "Provided header feature is unsupported: %(feature)s" +msgstr "" + +#: glance/common/exception.py:294 +msgid "" +"The image cannot be deleted because it is in use through the backend " +"store outside of daisy." +msgstr "" + +#: glance/common/exception.py:299 +msgid "The provided image is too large." +msgstr "" + +#: glance/common/exception.py:309 +#, python-format +msgid "" +"The limit has been exceeded on the number of allowed image properties. " +"Attempted: %(attempted)s, Maximum: %(maximum)s" +msgstr "" + +#: glance/common/exception.py:314 +#, python-format +msgid "" +"The limit has been exceeded on the number of allowed image tags. " +"Attempted: %(attempted)s, Maximum: %(maximum)s" +msgstr "" + +#: glance/common/exception.py:319 +#, python-format +msgid "" +"The limit has been exceeded on the number of allowed image locations. " +"Attempted: %(attempted)s, Maximum: %(maximum)s" +msgstr "" + +#: glance/common/exception.py:324 +msgid "System SIGHUP signal received." +msgstr "" + +#: glance/common/exception.py:328 +#, python-format +msgid "%(cls)s exception was raised in the last rpc call: %(val)s" +msgstr "" + +#: glance/common/exception.py:332 +msgid "An unknown task exception occurred" +msgstr "" + +#: glance/common/exception.py:336 +msgid "Task was not configured properly" +msgstr "" + +#: glance/common/exception.py:340 +#, python-format +msgid "Task with the given id %(task_id)s was not found" +msgstr "" + +#: glance/common/exception.py:344 +#, python-format +msgid "Provided status of task is unsupported: %(status)s" +msgstr "" + +#: glance/common/exception.py:348 +#, python-format +msgid "Provided type of task is unsupported: %(type)s" +msgstr "" + +#: glance/common/exception.py:352 +#, python-format +msgid "Status transition from %(cur_status)s to %(new_status)s is not allowed" +msgstr "" + +#: glance/common/exception.py:357 +#, python-format +msgid "The location %(location)s already exists" +msgstr "" + +#: glance/common/exception.py:365 +#, python-format +msgid "Invalid value '%(value)s' for parameter '%(param)s': %(extra_msg)s" +msgstr "" + +#: glance/common/exception.py:370 +#, python-format +msgid "" +"Image status transition from %(cur_status)s to %(new_status)s is not " +"allowed" +msgstr "" + +#: glance/common/exception.py:375 +#, python-format +msgid "The metadata definition namespace=%(namespace_name)s already exists." +msgstr "" + +#: glance/common/exception.py:380 +#, python-format +msgid "" +"A metadata definition object with name=%(object_name)s already exists in " +"namespace=%(namespace_name)s." +msgstr "" + +#: glance/common/exception.py:385 +#, python-format +msgid "" +"A metadata definition property with name=%(property_name)s already exists" +" in namespace=%(namespace_name)s." +msgstr "" + +#: glance/common/exception.py:390 +#, python-format +msgid "" +"A metadata definition resource-type with name=%(resource_type_name)s " +"already exists." +msgstr "" + +#: glance/common/exception.py:395 +#, python-format +msgid "" +"The metadata definition resource-type association of resource-" +"type=%(resource_type_name)s to namespace=%(namespace_name)s already " +"exists." +msgstr "" + +#: glance/common/exception.py:402 +#, python-format +msgid "" +"A metadata tag with name=%(name)s already exists in " +"namespace=%(namespace_name)s." +msgstr "" + +#: glance/common/exception.py:411 +#, python-format +msgid "" +"The metadata definition %(record_type)s with name=%(record_name)s not " +"deleted. Other records still refer to it." +msgstr "" + +#: glance/common/exception.py:417 +#, python-format +msgid "Metadata definition namespace=%(namespace_name)swas not found." +msgstr "" + +#: glance/common/exception.py:422 +#, python-format +msgid "" +"The metadata definition object with name=%(object_name)s was not found in" +" namespace=%(namespace_name)s." +msgstr "" + +#: glance/common/exception.py:428 +#, python-format +msgid "" +"The metadata definition property with name=%(property_name)s was not " +"found in namespace=%(namespace_name)s." +msgstr "" + +#: glance/common/exception.py:434 +#, python-format +msgid "" +"The metadata definition resource-type with name=%(resource_type_name)s, " +"was not found." +msgstr "" + +#: glance/common/exception.py:439 +#, python-format +msgid "" +"The metadata definition resource-type association of resource-" +"type=%(resource_type_name)s to namespace=%(namespace_name)s, was not " +"found." +msgstr "" + +#: glance/common/exception.py:446 +#, python-format +msgid "" +"The metadata definition tag with name=%(name)s was not found in " +"namespace=%(namespace_name)s." +msgstr "" + +#: glance/common/exception.py:452 +#, python-format +msgid "Version is invalid: %(reason)s" +msgstr "" + +#: glance/common/exception.py:456 +msgid "Invalid property definition" +msgstr "" + +#: glance/common/exception.py:460 +msgid "Invalid type definition" +msgstr "" + +#: glance/common/exception.py:464 +#, python-format +msgid "Property '%(name)s' may not have value '%(val)s': %(msg)s" +msgstr "" + +#: glance/common/exception.py:474 +#, python-format +msgid "Artifact with id=%(id)s was not found" +msgstr "" + +#: glance/common/exception.py:478 +#, python-format +msgid "Artifact with id=%(id)s is not accessible" +msgstr "" + +#: glance/common/exception.py:482 +msgid "Artifact with the specified type, name and version already exists" +msgstr "" + +#: glance/common/exception.py:487 +#, python-format +msgid "Artifact cannot change state from %(source)s to %(target)s" +msgstr "" + +#: glance/common/exception.py:491 +#, python-format +msgid "" +"Artifact with the specified type, name and version already has the direct" +" dependency=%(dep)s" +msgstr "" + +#: glance/common/exception.py:496 +#, python-format +msgid "" +"Artifact with the specified type, name and version already has the " +"transitive dependency=%(dep)s" +msgstr "" + +#: glance/common/exception.py:501 +#, python-format +msgid "Operator %(op)s is not supported" +msgstr "" + +#: glance/common/exception.py:505 +#, python-format +msgid "Show level %(shl)s is not supported in this operation" +msgstr "" + +#: glance/common/exception.py:509 +#, python-format +msgid "Property's %(prop)s value has not been found" +msgstr "" + +#: glance/common/exception.py:513 +#, python-format +msgid "Artifact has no property %(prop)s" +msgstr "" + +#: glance/common/exception.py:517 +#, python-format +msgid "Cannot use this parameter with the operator %(op)s" +msgstr "" + +#: glance/common/exception.py:521 +#, python-format +msgid "Cannot load artifact '%(name)s'" +msgstr "" + +#: glance/common/exception.py:525 +#, python-format +msgid "Plugin name '%(plugin)s' should match artifact typename '%(name)s'" +msgstr "" + +#: glance/common/exception.py:530 +#, python-format +msgid "No plugin for '%(name)s' has been loaded" +msgstr "" + +#: glance/common/exception.py:534 +#, python-format +msgid "Artifact type with name '%(name)s' and version '%(version)s' is not known" +msgstr "" + +#: glance/common/exception.py:539 +#, python-format +msgid "Artifact state cannot be changed from %(curr)s to %(to)s" +msgstr "" + +#: glance/common/exception.py:543 +msgid "Invalid jsonpatch request" +msgstr "" + +#: glance/common/exception.py:547 +#, python-format +msgid "The provided body %(body)s is invalid under given schema: %(schema)s" +msgstr "" + +#: glance/common/exception.py:552 +#, python-format +msgid "The provided path '%(path)s' is invalid: %(explanation)s" +msgstr "" + +#: glance/common/jsonpatchvalidator.py:101 +msgid "" +"Json path should start with a '/', end with no '/', no 2 subsequent '/' " +"are allowed." +msgstr "" + +#: glance/common/jsonpatchvalidator.py:105 +msgid "" +"Pointer contains '~' which is not part of a recognized escape sequence " +"[~0, ~1]." +msgstr "" + +#: glance/common/property_utils.py:39 +msgid "" +"The location of the property protection file.This file contains the rules" +" for property protections and the roles/policies associated with it. If " +"this config value is not specified, by default, property protections " +"won't be enforced. If a value is specified and the file is not found, " +"then the daisy-api service will not start." +msgstr "" + +#: glance/common/property_utils.py:49 +msgid "" +"This config value indicates whether \"roles\" or \"policies\" are used in" +" the property protection file." +msgstr "" + +#: glance/common/property_utils.py:136 +#, python-format +msgid "" +"Property protection on operation %(operation)s for rule %(rule)s is not " +"found. No role will be allowed to perform this operation." +msgstr "" + +#: glance/common/rpc.py:143 +msgid "Request must be a list of commands" +msgstr "" + +#: glance/common/rpc.py:148 +#, python-format +msgid "Bad Command: %s" +msgstr "" + +#: glance/common/rpc.py:155 +#, python-format +msgid "Wrong command structure: %s" +msgstr "" + +#: glance/common/rpc.py:164 +msgid "Command not found" +msgstr "" + +#: glance/common/semver_db.py:76 +#, python-format +msgid "Version component is too large (%d max)" +msgstr "" + +#: glance/common/semver_db.py:124 +#, python-format +msgid "Prerelease numeric component is too large (%d characters max)" +msgstr "" + +#: glance/common/store_utils.py:34 glance/registry/client/__init__.py:56 +msgid "" +"Whether to pass through the user token when making requests to the " +"registry." +msgstr "" + +#: glance/common/swift_store_utils.py:33 +msgid "" +"The reference to the default swift account/backing store parameters to " +"use for adding new images." +msgstr "" + +#: glance/common/swift_store_utils.py:36 +msgid "" +"The address where the Swift authentication service is " +"listening.(deprecated)" +msgstr "" + +#: glance/common/swift_store_utils.py:39 +msgid "" +"The user to authenticate against the Swift authentication service " +"(deprecated)" +msgstr "" + +#: glance/common/swift_store_utils.py:42 +msgid "" +"Auth key for the user authenticating against the Swift authentication " +"service. (deprecated)" +msgstr "" + +#: glance/common/swift_store_utils.py:45 +msgid "The config file that has the swift account(s)configs." +msgstr "" + +#: glance/common/utils.py:299 +#, python-format +msgid "Bad header: %(header_name)s" +msgstr "" + +#: glance/common/utils.py:313 +#, python-format +msgid "Cannot convert image %(key)s '%(value)s' to an integer." +msgstr "" + +#: glance/common/utils.py:320 +#, python-format +msgid "Image %(key)s must be >= 0 ('%(value)s' specified)." +msgstr "" + +#: glance/common/utils.py:549 +#, python-format +msgid "" +"There is a problem with your %(error_key_name)s %(error_filename)s. " +"Please verify it. Error: %(ioe)s" +msgstr "" + +#: glance/common/utils.py:556 +#, python-format +msgid "" +"There is a problem with your %(error_key_name)s %(error_filename)s. " +"Please verify it. OpenSSL error: %(ce)s" +msgstr "" + +#: glance/common/utils.py:577 +#, python-format +msgid "" +"There is a problem with your key pair. Please verify that cert " +"%(cert_file)s and key %(key_file)s belong together. OpenSSL error %(ce)s" +msgstr "" + +#: glance/common/utils.py:634 +#, python-format +msgid "Host and port \"%s\" is not valid." +msgstr "" + +#: glance/common/utils.py:637 +#, python-format +msgid "Port \"%s\" is not valid." +msgstr "" + +#: glance/common/utils.py:646 +#, python-format +msgid "Host \"%s\" is not valid." +msgstr "" + +#: glance/common/utils.py:649 +#, python-format +msgid "" +"%s Please specify a host:port pair, where host is an IPv4 address, IPv6 " +"address, hostname, or FQDN. If using an IPv6 address, enclose it in " +"brackets separately from the port (i.e., \"[fe80::a:b:c]:9876\")." +msgstr "" + +#: glance/common/utils.py:696 +msgid "Property names can't contain 4 byte unicode." +msgstr "" + +#: glance/common/utils.py:699 +#, python-format +msgid "%s can't contain 4 byte unicode characters." +msgstr "" + +#: glance/common/utils.py:708 +msgid "Param values can't contain 4 byte unicode." +msgstr "" + +#: glance/common/wsgi.py:61 +msgid "" +"Address to bind the server. Useful when selecting a particular network " +"interface." +msgstr "" + +#: glance/common/wsgi.py:64 +msgid "The port on which the server will listen." +msgstr "" + +#: glance/common/wsgi.py:69 +msgid "The backlog value that will be used when creating the TCP listener socket." +msgstr "" + +#: glance/common/wsgi.py:72 +msgid "" +"The value for the socket option TCP_KEEPIDLE. This is the time in " +"seconds that the connection must be idle before TCP starts sending " +"keepalive probes." +msgstr "" + +#: glance/common/wsgi.py:75 +msgid "CA certificate file to use to verify connecting clients." +msgstr "" + +#: glance/common/wsgi.py:77 +msgid "Certificate file to use when starting API server securely." +msgstr "" + +#: glance/common/wsgi.py:79 +msgid "Private key file to use when starting API server securely." +msgstr "" + +#: glance/common/wsgi.py:85 +msgid "" +"The number of child process workers that will be created to service " +"requests. The default will be equal to the number of CPUs available." +msgstr "" + +#: glance/common/wsgi.py:89 +msgid "" +"Maximum line size of message headers to be accepted. max_header_line may " +"need to be increased when using large tokens (typically those generated " +"by the Keystone v3 API with big service catalogs" +msgstr "" + +#: glance/common/wsgi.py:94 +msgid "" +"If False, server will return the header \"Connection: close\", If True, " +"server will return \"Connection: Keep-Alive\" in its responses. In order " +"to close the client socket connection explicitly after the response is " +"sent and read successfully by the client, you simply have to set this " +"option to False when you create a wsgi server." +msgstr "" + +#: glance/common/wsgi.py:106 +msgid "If False fully disable profiling feature." +msgstr "" + +#: glance/common/wsgi.py:108 +msgid "If False doesn't trace SQL requests." +msgstr "" + +#: glance/common/wsgi.py:178 +msgid "" +"When running server in SSL mode, you must specify both a cert_file and " +"key_file option value in your configuration file" +msgstr "" + +#: glance/common/wsgi.py:195 +#, python-format +msgid "Could not bind to %(host)s:%(port)s after trying for 30 seconds" +msgstr "" + +#: glance/common/wsgi.py:210 +msgid "eventlet 'poll' nor 'selects' hubs are available on this platform" +msgstr "" + +#: glance/common/wsgi.py:752 +#, python-format +msgid "Malformed Content-Range header: %s" +msgstr "" + +#: glance/common/wsgi.py:785 +msgid "Malformed JSON in request body." +msgstr "" + +#: glance/common/artifacts/declarative.py:63 +msgid "Not a valid value type" +msgstr "" + +#: glance/common/artifacts/declarative.py:96 +msgid "Default value is invalid" +msgstr "" + +#: glance/common/artifacts/declarative.py:110 +#: glance/common/artifacts/declarative.py:716 +msgid "Value is required" +msgstr "" + +#: glance/common/artifacts/declarative.py:133 +msgid "Invalid item type specification" +msgstr "" + +#: glance/common/artifacts/declarative.py:136 +msgid "List definitions may hot have defaults" +msgstr "" + +#: glance/common/artifacts/declarative.py:151 +msgid "Cannot specify 'min_size' explicitly" +msgstr "" + +#: glance/common/artifacts/declarative.py:156 +msgid "Cannot specify 'max_size' explicitly" +msgstr "" + +#: glance/common/artifacts/declarative.py:181 +msgid "List size is less than minimum" +msgstr "" + +#: glance/common/artifacts/declarative.py:190 +msgid "List size is greater than maximum" +msgstr "" + +#: glance/common/artifacts/declarative.py:205 +msgid "Items have to be unique" +msgstr "" + +#: glance/common/artifacts/declarative.py:253 +msgid "Invalid dict property type specification" +msgstr "" + +#: glance/common/artifacts/declarative.py:259 +msgid "Invalid dict property type" +msgstr "" + +#: glance/common/artifacts/declarative.py:273 +msgid "Dictionary contains unexpected key(s)" +msgstr "" + +#: glance/common/artifacts/declarative.py:289 +msgid "Dictionary size is less than minimum" +msgstr "" + +#: glance/common/artifacts/declarative.py:299 +msgid "Dictionary size is greater than maximum" +msgstr "" + +#: glance/common/artifacts/declarative.py:366 +msgid "Custom validators list should contain tuples '(function, message)'" +msgstr "" + +#: glance/common/artifacts/declarative.py:383 +#, python-format +msgid "Allowed values %s are invalid under given validators" +msgstr "" + +#: glance/common/artifacts/declarative.py:390 +msgid "Is not allowed value" +msgstr "" + +#: glance/common/artifacts/declarative.py:408 +msgid "Dependency relations cannot be mutable" +msgstr "" + +#: glance/common/artifacts/declarative.py:467 +msgid "Attempt to set readonly property" +msgstr "" + +#: glance/common/artifacts/declarative.py:475 +msgid "Attempt to set value of immutable property" +msgstr "" + +#: glance/common/artifacts/declarative.py:561 +msgid "Type version has to be a valid semver string" +msgstr "" + +#: glance/common/artifacts/declarative.py:569 +#, python-format +msgid "%(attribute)s is required" +msgstr "" + +#: glance/common/artifacts/declarative.py:574 +#, python-format +msgid "%(attribute)s have to be string" +msgstr "" + +#: glance/common/artifacts/declarative.py:577 +#, python-format +msgid "%(attribute)s may not be longer than %(length)i" +msgstr "" + +#: glance/common/artifacts/declarative.py:581 +#, python-format +msgid "%(attribute)s may not be shorter than %(length)i" +msgstr "" + +#: glance/common/artifacts/declarative.py:585 +#, python-format +msgid "%(attribute)s should match pattern %(pattern)s" +msgstr "" + +#: glance/common/artifacts/declarative.py:629 +msgid "Cannot declare artifact property with reserved name 'metadata'" +msgstr "" + +#: glance/common/artifacts/declarative.py:658 +msgid "Unable to modify collection in immutable or readonly property" +msgstr "" + +#: glance/common/artifacts/definitions.py:75 +msgid "Max string length may not exceed 255 characters" +msgstr "" + +#: glance/common/artifacts/definitions.py:78 +msgid "Length is greater than maximum" +msgstr "" + +#: glance/common/artifacts/definitions.py:89 +msgid "Min string length may not be negative" +msgstr "" + +#: glance/common/artifacts/definitions.py:93 +msgid "Length is less than minimum" +msgstr "" + +#: glance/common/artifacts/definitions.py:105 +msgid "Does not match pattern" +msgstr "" + +#: glance/common/artifacts/definitions.py:162 +#: glance/common/artifacts/definitions.py:214 +#: glance/common/artifacts/definitions.py:264 +msgid "Value is less than minimum" +msgstr "" + +#: glance/common/artifacts/definitions.py:173 +#: glance/common/artifacts/definitions.py:225 +#: glance/common/artifacts/definitions.py:275 +msgid "Value is greater than maximum" +msgstr "" + +#: glance/common/artifacts/definitions.py:316 +msgid "Array property can't have item_type=Array" +msgstr "" + +#: glance/common/artifacts/definitions.py:388 +msgid "Unable to specify artifact type explicitly" +msgstr "" + +#: glance/common/artifacts/definitions.py:391 +msgid "Unable to specify artifact type version explicitly" +msgstr "" + +#: glance/common/artifacts/definitions.py:430 +msgid "Unable to specify version if multiple types are possible" +msgstr "" + +#: glance/common/artifacts/definitions.py:445 +msgid "Invalid referenced type" +msgstr "" + +#: glance/common/artifacts/definitions.py:448 +msgid "Unable to specify version if type is not specified" +msgstr "" + +#: glance/common/artifacts/definitions.py:466 +msgid "Invalid reference list specification" +msgstr "" + +#: glance/common/artifacts/definitions.py:532 +msgid "Blob size is not set" +msgstr "" + +#: glance/common/artifacts/definitions.py:536 +msgid "File too large" +msgstr "" + +#: glance/common/artifacts/definitions.py:540 +msgid "File too small" +msgstr "" + +#: glance/common/artifacts/definitions.py:545 +msgid "Too few locations" +msgstr "" + +#: glance/common/artifacts/definitions.py:550 +msgid "Too many locations" +msgstr "" + +#: glance/common/artifacts/loader.py:36 +msgid "" +"When false, no artifacts can be loaded regardless of available_plugins. " +"When true, artifacts can be loaded." +msgstr "" + +#: glance/common/artifacts/loader.py:40 +msgid "" +"A list of artifacts that are allowed in the format name or name-version. " +"Empty list means that any artifact can be loaded." +msgstr "" + +#: glance/common/artifacts/serialization.py:191 +#, python-format +msgid "Blob %(name)s may not have multiple values" +msgstr "" + +#: glance/common/artifacts/serialization.py:212 +#, python-format +msgid "Relation %(name)s may not have multiple values" +msgstr "" + +#: glance/common/location_strategy/__init__.py:30 +msgid "" +"This value sets what strategy will be used to determine the image " +"location order. Currently two strategies are packaged with Glance " +"'location_order' and 'store_type'." +msgstr "" + +#: glance/common/location_strategy/__init__.py:57 +#, python-format +msgid "" +"%(strategy)s is registered as a module twice. %(module)s is not being " +"used." +msgstr "" + +#: glance/common/location_strategy/__init__.py:82 +#, python-format +msgid "" +"Invalid location_strategy option: %(name)s. The valid strategy option(s) " +"is(are): %(strategies)s" +msgstr "" + +#: glance/common/location_strategy/store_type.py:29 +msgid "" +"The store names to use to get store preference order. The name must be " +"registered by one of the stores defined by the 'stores' config option. " +"This option will be applied when you using 'store_type' option as image " +"location strategy defined by the 'location_strategy' config option." +msgstr "" + +#: glance/common/scripts/utils.py:65 +#, python-format +msgid "Input does not contain '%(key)s' field" +msgstr "" + +#: glance/common/scripts/utils.py:99 +msgid "" +"File based imports are not allowed. Please use a non-local source of " +"image data." +msgstr "" + +#: glance/common/scripts/utils.py:108 +#, python-format +msgid "" +"The given uri is not valid. Please specify a valid uri from the following" +" list of supported uri %(supported)s" +msgstr "" + +#: glance/common/scripts/image_import/main.py:108 +#, python-format +msgid "" +"The Image %(image_id)s object being created by this task %(task_id)s, is " +"no longer in valid status for further processing." +msgstr "" + +#: glance/contrib/plugins/image_artifact/v2/image.py:41 +msgid "Either a file or a legacy_image_id has to be specified" +msgstr "" + +#: glance/contrib/plugins/image_artifact/v2/image.py:46 +msgid "Both file and legacy_image_id may not be specified at the same time" +msgstr "" + +#: glance/contrib/plugins/image_artifact/v2/image.py:60 +msgid "Unable to get legacy image" +msgstr "" + +#: glance/contrib/plugins/image_artifact/v2/image.py:74 +msgid "Legacy image was not found" +msgstr "" + +#: glance/db/__init__.py:72 glance/db/__init__.py:182 glance/db/__init__.py:194 +#, python-format +msgid "No image found with ID %s" +msgstr "" + +#: glance/db/__init__.py:253 +#, python-format +msgid "" +"The target member %(member_id)s is already associated with image " +"%(image_id)s." +msgstr "" + +#: glance/db/__init__.py:270 +#, python-format +msgid "The specified member %s could not be found" +msgstr "" + +#: glance/db/__init__.py:348 glance/db/__init__.py:369 +#: glance/db/__init__.py:386 +#, python-format +msgid "Could not find task %s" +msgstr "" + +#: glance/db/__init__.py:433 +#, python-format +msgid "Could not find namespace %s" +msgstr "" + +#: glance/db/__init__.py:455 glance/db/__init__.py:465 +#: glance/db/__init__.py:475 glance/db/__init__.py:485 +#, python-format +msgid "The specified namespace %s could not be found" +msgstr "" + +#: glance/db/__init__.py:576 +#, python-format +msgid "Could not find metadata object %s" +msgstr "" + +#: glance/db/__init__.py:599 +#, python-format +msgid "The specified metadata object %s could not be found" +msgstr "" + +#: glance/db/__init__.py:692 +#, python-format +msgid "The specified resource type %s could not be found " +msgstr "" + +#: glance/db/__init__.py:739 +#, python-format +msgid "Could not find property %s" +msgstr "" + +#: glance/db/__init__.py:761 +#, python-format +msgid "The specified property %s could not be found" +msgstr "" + +#: glance/db/__init__.py:825 +#, python-format +msgid "Could not find metadata tag %s" +msgstr "" + +#: glance/db/__init__.py:851 +#, python-format +msgid "The specified metadata tag %s could not be found" +msgstr "" + +#: glance/db/simple/api.py:294 glance/db/sqlalchemy/api.py:466 +msgid "Unable to filter on a range with a non-numeric value." +msgstr "" + +#: glance/db/simple/api.py:488 glance/db/sqlalchemy/api.py:1115 +msgid "Image id is required." +msgstr "" + +#: glance/db/simple/api.py:548 glance/db/sqlalchemy/api.py:818 +#, python-format +msgid "The location data has an invalid ID: %d" +msgstr "" + +#: glance/db/simple/api.py:568 glance/db/simple/api.py:594 +#: glance/db/sqlalchemy/api.py:838 glance/db/sqlalchemy/api.py:864 +#, python-format +msgid "No location found with ID %(loc)s from image %(img)s" +msgstr "" + +#: glance/db/simple/api.py:578 +msgid "" +"The status of deleted image location can only be set to 'pending_delete' " +"or 'deleted'." +msgstr "" + +#: glance/db/simple/api.py:921 +#, python-format +msgid "Forbidding request, task %s is not visible" +msgstr "" + +#: glance/db/simple/api.py:1037 +msgid "Task does not exist" +msgstr "" + +#: glance/db/simple/api.py:1140 +#: glance/db/sqlalchemy/metadef_api/namespace.py:256 +#, python-format +msgid "" +"Invalid update. It would result in a duplicate metadata definition " +"namespace with the same name of %s" +msgstr "" + +#: glance/db/simple/api.py:1161 +#: glance/db/sqlalchemy/metadef_api/namespace.py:90 +#, python-format +msgid "Metadata definition namespace not found for id=%s" +msgstr "" + +#: glance/db/simple/api.py:1170 glance/db/simple/api.py:2128 +#: glance/db/sqlalchemy/metadef_api/namespace.py:100 +#: glance/db/sqlalchemy/metadef_api/namespace.py:124 +#, python-format +msgid "Forbidding request, metadata definition namespace=%s is not visible." +msgstr "" + +#: glance/db/simple/api.py:1295 glance/db/sqlalchemy/metadef_api/object.py:38 +#, python-format +msgid "Metadata definition object not found for id=%s" +msgstr "" + +#: glance/db/simple/api.py:1378 +#, python-format +msgid "" +"Invalid update. It would result in a duplicate metadata definition object" +" with the same name=%(name)s in namespace=%(namespace_name)s." +msgstr "" + +#: glance/db/simple/api.py:1507 +#: glance/db/sqlalchemy/metadef_api/property.py:128 +#, python-format +msgid "" +"Invalid update. It would result in a duplicate metadata definition " +"property with the same name=%(name)s in namespace=%(namespace_name)s." +msgstr "" + +#: glance/db/simple/api.py:1551 glance/db/sqlalchemy/metadef_api/property.py:40 +#, python-format +msgid "Metadata definition property not found for id=%s" +msgstr "" + +#: glance/db/simple/api.py:1761 +#, python-format +msgid "Metadata definition tag not found for id=%s" +msgstr "" + +#: glance/db/sqlalchemy/api.py:118 +msgid "You do not own this image" +msgstr "" + +#: glance/db/sqlalchemy/api.py:395 +msgid "Unknown sort direction, must be 'desc' or 'asc'" +msgstr "" + +#: glance/db/sqlalchemy/api.py:767 +#: glance/tests/unit/v2/test_registry_client.py:583 +#, python-format +msgid "" +"cannot transition from %(current)s to %(next)s in update (wanted " +"from_state=%(from)s)" +msgstr "" + +#: glance/db/sqlalchemy/api.py:847 +msgid "" +"The status of deleted image location can only be set to 'pending_delete' " +"or 'deleted'" +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:46 +msgid "Path to the directory where json metadata files are stored" +msgstr "" + +#: glance/db/sqlalchemy/metadef_api/object.py:118 +#, python-format +msgid "" +"Invalid update. It would result in a duplicate metadata definition object" +" with the same name=%(name)s in namespace=%(namespace_name)s." +msgstr "" + +#: glance/db/sqlalchemy/migrate_repo/versions/015_quote_swift_credentials.py:91 +#: glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py:158 +msgid "" +"URI cannot contain more than one occurrence of a scheme.If you have " +"specified a URI like " +"swift://user:pass@http://authurl.com/v1/container/obj, you need to change" +" it to use the swift+http:// scheme, like so: " +"swift+http://user:pass@authurl.com/v1/container/obj" +msgstr "" + +#: glance/db/sqlalchemy/migrate_repo/versions/015_quote_swift_credentials.py:128 +#: glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py:194 +#, python-format +msgid "Badly formed credentials '%(creds)s' in Swift URI" +msgstr "" + +#: glance/db/sqlalchemy/migrate_repo/versions/015_quote_swift_credentials.py:140 +#: glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py:206 +msgid "Badly formed credentials in Swift URI." +msgstr "" + +#: glance/db/sqlalchemy/migrate_repo/versions/015_quote_swift_credentials.py:157 +#: glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py:223 +#, python-format +msgid "Badly formed S3 URI: %(uri)s" +msgstr "" + +#: glance/domain/__init__.py:67 +#, python-format +msgid "new_image() got unexpected keywords %s" +msgstr "" + +#: glance/domain/__init__.py:139 +#, python-format +msgid "__init__() got unexpected keyword argument '%s'" +msgstr "" + +#: glance/domain/__init__.py:161 +#, python-format +msgid "Property %s must be set prior to saving data." +msgstr "" + +#: glance/domain/__init__.py:164 +#, python-format +msgid "Properties %s must be set prior to saving data." +msgstr "" + +#: glance/domain/__init__.py:181 +msgid "Visibility must be either \"public\" or \"private\"" +msgstr "" + +#: glance/domain/__init__.py:200 +msgid "Attribute container_format can be only replaced for a queued image." +msgstr "" + +#: glance/domain/__init__.py:212 +msgid "Attribute disk_format can be only replaced for a queued image." +msgstr "" + +#: glance/domain/__init__.py:224 glance/domain/__init__.py:237 +msgid "Cannot be a negative value" +msgstr "" + +#: glance/domain/__init__.py:261 +#, python-format +msgid "Not allowed to deactivate image in status '%s'" +msgstr "" + +#: glance/domain/__init__.py:275 +#, python-format +msgid "Not allowed to reactivate image in status '%s'" +msgstr "" + +#: glance/domain/__init__.py:330 +msgid "Status must be \"pending\", \"accepted\" or \"rejected\"." +msgstr "" + +#: glance/image_cache/__init__.py:40 +msgid "The driver to use for image cache management." +msgstr "" + +#: glance/image_cache/__init__.py:42 +msgid "" +"The upper limit (the maximum size of accumulated cache in bytes) beyond " +"which pruner, if running, starts cleaning the images cache." +msgstr "" + +#: glance/image_cache/__init__.py:46 +msgid "" +"The amount of time to let an image remain in the cache without being " +"accessed." +msgstr "" + +#: glance/image_cache/__init__.py:49 +msgid "Base directory that the Image Cache uses." +msgstr "" + +#: glance/image_cache/__init__.py:261 +#, python-format +msgid "Checksum verification failed. Aborted caching of image '%s'." +msgstr "" + +#: glance/image_cache/client.py:121 +msgid "" +"--os_auth_url option or OS_AUTH_URL environment variable required when " +"keystone authentication strategy is enabled\n" +msgstr "" + +#: glance/image_cache/drivers/base.py:56 +#, python-format +msgid "Failed to read %s from config" +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:45 +msgid "" +"The path to the sqlite file database that will be used for image cache " +"management." +msgstr "" + +#: glance/image_cache/drivers/sqlite.py:127 +#, python-format +msgid "Failed to initialize the image cache database. Got error: %s" +msgstr "" + +#: glance/image_cache/drivers/xattr.py:110 +#, python-format +msgid "" +"The device housing the image cache directory %(image_cache_dir)s does not" +" support xattr. It is likely you need to edit your fstab and add the " +"user_xattr option to the appropriate line for the device housing the " +"cache directory." +msgstr "" + +#: glance/registry/__init__.py:28 +msgid "Address to find the registry server." +msgstr "" + +#: glance/registry/__init__.py:30 +msgid "Port the registry server is listening on." +msgstr "" + +#: glance/registry/api/v1/images.py:128 glance/registry/api/v1/images.py:133 +msgid "Invalid marker. Image could not be found." +msgstr "" + +#: glance/registry/api/v1/images.py:237 +msgid "Unrecognized changes-since value" +msgstr "" + +#: glance/registry/api/v1/images.py:242 +msgid "protected must be True, or False" +msgstr "" + +#: glance/registry/api/v1/images.py:289 +#, python-format +msgid "Unsupported sort_key. Acceptable values: %s" +msgstr "" + +#: glance/registry/api/v1/images.py:298 +#, python-format +msgid "Unsupported sort_dir. Acceptable values: %s" +msgstr "" + +#: glance/registry/api/v1/images.py:324 +msgid "is_public must be None, True, or False" +msgstr "" + +#: glance/registry/api/v1/images.py:418 +msgid "Invalid image id format" +msgstr "" + +#: glance/registry/api/v1/images.py:433 +#, python-format +msgid "Image with identifier %s already exists!" +msgstr "" + +#: glance/registry/api/v1/images.py:437 +#, python-format +msgid "Failed to add image metadata. Got error: %s" +msgstr "" + +#: glance/registry/api/v1/members.py:70 glance/registry/api/v1/members.py:105 +#: glance/registry/api/v1/members.py:223 glance/registry/api/v1/members.py:287 +#, python-format +msgid "Image %(id)s not found" +msgstr "" + +#: glance/registry/api/v1/members.py:121 glance/registry/api/v1/members.py:239 +#: glance/registry/api/v1/members.py:303 +msgid "No permission to share that image" +msgstr "" + +#: glance/registry/api/v1/members.py:132 glance/registry/api/v1/members.py:149 +#: glance/registry/api/v1/members.py:252 +#, python-format +msgid "Invalid membership association: %s" +msgstr "" + +#: glance/registry/api/v1/members.py:316 glance/registry/api/v1/members.py:341 +msgid "Membership could not be found." +msgstr "" + +#: glance/registry/api/v2/rpc.py:44 +#, python-format +msgid "Registry service can't use %s" +msgstr "" + +#: glance/registry/client/__init__.py:24 +msgid "" +"The protocol to use for communication with the registry server. Either " +"http or https." +msgstr "" + +#: glance/registry/client/__init__.py:27 +msgid "" +"The path to the key file to use in SSL connections to the registry " +"server, if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE " +"environment variable to a filepath of the key file" +msgstr "" + +#: glance/registry/client/__init__.py:32 +msgid "" +"The path to the cert file to use in SSL connections to the registry " +"server, if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE " +"environment variable to a filepath of the CA cert file" +msgstr "" + +#: glance/registry/client/__init__.py:37 +msgid "" +"The path to the certifying authority cert file to use in SSL connections " +"to the registry server, if any. Alternately, you may set the " +"GLANCE_CLIENT_CA_FILE environment variable to a filepath of the CA cert " +"file." +msgstr "" + +#: glance/registry/client/__init__.py:43 +msgid "" +"When using SSL in connections to the registry server, do not require " +"validation via a certifying authority. This is the registry's equivalent " +"of specifying --insecure on the command line using glanceclient for the " +"API." +msgstr "" + +#: glance/registry/client/__init__.py:49 +msgid "" +"The period of time, in seconds, that the API server will wait for a " +"registry request to complete. A value of 0 implies no timeout." +msgstr "" + +#: glance/registry/client/__init__.py:59 +msgid "" +"The administrators user name. If \"use_user_token\" is not in effect, " +"then admin credentials can be specified." +msgstr "" + +#: glance/registry/client/__init__.py:63 +msgid "" +"The administrators password. If \"use_user_token\" is not in effect, then" +" admin credentials can be specified." +msgstr "" + +#: glance/registry/client/__init__.py:67 +msgid "" +"The tenant name of the administrative user. If \"use_user_token\" is not " +"in effect, then admin tenant name can be specified." +msgstr "" + +#: glance/registry/client/__init__.py:71 +msgid "" +"The URL to the keystone service. If \"use_user_token\" is not in effect " +"and using keystone auth, then URL of keystone can be specified." +msgstr "" + +#: glance/registry/client/__init__.py:76 +msgid "" +"The strategy to use for authentication. If \"use_user_token\" is not in " +"effect, then auth strategy can be specified." +msgstr "" + +#: glance/registry/client/__init__.py:80 +msgid "" +"The region for the authentication service. If \"use_user_token\" is not " +"in effect and using keystone auth, then region name can be specified." +msgstr "" + +#: glance/registry/client/v1/api.py:35 +msgid "" +"Whether to pass through headers containing user and tenant information " +"when making requests to the registry. This allows the registry to use the" +" context middleware without keystonemiddleware's auth_token middleware, " +"removing calls to the keystone auth service. It is recommended that when " +"using this option, secure communication between glance api and glance " +"registry is ensured by means other than auth_token middleware." +msgstr "" + +#: glance/registry/client/v1/api.py:80 glance/registry/client/v2/api.py:62 +msgid "Configuration option was not valid" +msgstr "" + +#: glance/registry/client/v1/api.py:84 glance/registry/client/v2/api.py:66 +msgid "Could not find required configuration option" +msgstr "" + +#: glance/search/api/v0_1/search.py:134 +#, python-format +msgid "Index '%s' is not supported." +msgstr "" + +#: glance/search/api/v0_1/search.py:143 +#, python-format +msgid "Document type '%s' is not supported." +msgstr "" + +#: glance/search/api/v0_1/search.py:152 +msgid "offset param must be an integer" +msgstr "" + +#: glance/search/api/v0_1/search.py:156 +msgid "offset param must be positive" +msgstr "" + +#: glance/search/api/v0_1/search.py:176 +msgid "actions param cannot be empty" +msgstr "" + +#: glance/search/api/v0_1/search.py:196 +#, python-format +msgid "Invalid action type: '%s'" +msgstr "" + +#: glance/search/api/v0_1/search.py:200 +#, python-format +msgid "Action type '%s' requires data or script param." +msgstr "" + +#: glance/search/api/v0_1/search.py:204 +#, python-format +msgid "Action type '%s' requires ID of the document." +msgstr "" + +#: glance/search/api/v0_1/search.py:327 +msgid "Action index is missing and no default index has been set." +msgstr "" + +#: glance/search/api/v0_1/search.py:332 +msgid "Action document type is missing and no default type has been set." +msgstr "" + +#: glance/tests/unit/test_migrations.py:621 +#, python-format +msgid "location: %s data lost" +msgstr "" + diff --git a/code/daisy/daisy/locale/pt_BR/LC_MESSAGES/glance-log-info.po b/code/daisy/daisy/locale/pt_BR/LC_MESSAGES/glance-log-info.po new file mode 100755 index 00000000..a4e0b59c --- /dev/null +++ b/code/daisy/daisy/locale/pt_BR/LC_MESSAGES/glance-log-info.po @@ -0,0 +1,424 @@ +# Translations template for daisy. +# Copyright (C) 2015 ORGANIZATION +# This file is distributed under the same license as the glance project. +# +# Translators: +# Rodrigo Felix de Almeida , 2014 +msgid "" +msgstr "" +"Project-Id-Version: Glance\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2015-04-03 06:02+0000\n" +"PO-Revision-Date: 2015-04-01 21:54+0000\n" +"Last-Translator: openstackjenkins \n" +"Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/" +"glance/language/pt_BR/)\n" +"Language: pt_BR\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 1.3\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +#: glance/scrubber.py:459 +#, python-format +msgid "Starting Daemon: wakeup_time=%(wakeup_time)s threads=%(threads)s" +msgstr "Iniciando Daemon: wakeup_time=%(wakeup_time)s threads=%(threads)s" + +#: glance/scrubber.py:473 +msgid "Daemon Shutdown on KeyboardInterrupt" +msgstr "Encerrando o daemon em KeyboardInterrupt" + +#: glance/scrubber.py:485 +#, python-format +msgid "Initializing scrubber with configuration: %s" +msgstr "Inicializando scrubber com configura莽茫o: %s" + +#: glance/scrubber.py:558 +#, python-format +msgid "Scrubbing image %(id)s from %(count)d locations." +msgstr "Limpando imagem %(id)s dos locais %(count)d." + +#: glance/scrubber.py:581 +#, python-format +msgid "Image %s has been deleted." +msgstr "Imagem %s foi exclu铆da." + +#: glance/scrubber.py:633 +#, python-format +msgid "Getting images deleted before %s" +msgstr "Obtendo imagens exclu铆das antes de %s" + +#: glance/api/middleware/cache.py:61 +msgid "Initialized image cache middleware" +msgstr "Middleware do cache de imagem inicializado" + +#: glance/api/middleware/cache_manage.py:74 +msgid "Initialized image cache management middleware" +msgstr "Middleware de gerenciamento do cache de imagem inicializado" + +#: glance/api/middleware/gzip.py:36 +msgid "Initialized gzip middleware" +msgstr "Inicializado middleware gzip" + +#: glance/api/v1/images.py:690 +#, python-format +msgid "Uploaded data of image %s from request payload successfully." +msgstr "Enviados com sucesso dados da imagem %s da requisi莽茫o de carga 煤til." + +#: glance/api/v1/images.py:752 +msgid "Triggering asynchronous copy from external source" +msgstr "Acionando c贸pia ass铆ncrona da origem externa" + +#: glance/api/v1/upload_utils.py:126 +#, python-format +msgid "Cleaning up %s after exceeding the quota" +msgstr "Limpando %s ap贸s exceder a quota" + +#: glance/api/v1/upload_utils.py:175 +#, python-format +msgid "" +"Image %s could not be found after upload. The image may have been deleted " +"during the upload." +msgstr "" +"Imagem %s n茫o p么de ser encontrada ap贸s o envio. A imagem pode ter sido " +"exclu铆da durante o envio." + +#: glance/api/v2/image_actions.py:51 +#, python-format +msgid "Image %s is deactivated" +msgstr "" + +#: glance/api/v2/image_actions.py:66 +#, python-format +msgid "Image %s is reactivated" +msgstr "" + +#: glance/async/flows/base_import.py:348 +#, python-format +msgid "%(task_id)s of %(task_type)s completed" +msgstr "" + +#: glance/cmd/replicator.py:372 +#, python-format +msgid "Storing: %s" +msgstr "Armazenando: %s" + +#: glance/cmd/replicator.py:445 +#, python-format +msgid "Considering: %s" +msgstr "Considerando: %s" + +#: glance/cmd/replicator.py:471 glance/cmd/replicator.py:546 +#, python-format +msgid "Image %s metadata has changed" +msgstr "Metadados da Imagem %s mudaram" + +#: glance/cmd/replicator.py:553 +#, python-format +msgid "Image %s is being synced" +msgstr "Imagem %s est谩 sendo sincronizada" + +#: glance/common/wsgi.py:308 glance/openstack/common/service.py:326 +#, python-format +msgid "Starting %d workers" +msgstr "Iniciando %d trabalhadores" + +#: glance/common/wsgi.py:321 +#, python-format +msgid "Removed dead child %s" +msgstr "" + +#: glance/common/wsgi.py:324 +#, python-format +msgid "Removed stale child %s" +msgstr "" + +#: glance/common/wsgi.py:336 +msgid "All workers have terminated. Exiting" +msgstr "Todos os trabalhadores foram finalizados. Saindo" + +#: glance/common/wsgi.py:353 +msgid "Caught keyboard interrupt. Exiting." +msgstr "Interrup莽茫o da captura de teclado. Saindo." + +#: glance/common/wsgi.py:432 +#, python-format +msgid "Child %d exiting normally" +msgstr "Filho %d saindo normalmente" + +#: glance/common/wsgi.py:437 +#, python-format +msgid "Started child %s" +msgstr "Filho iniciado %s" + +#: glance/common/wsgi.py:466 +msgid "Starting single process server" +msgstr "Iniciando servidor de processo 煤nico" + +#: glance/common/artifacts/loader.py:131 glance/common/artifacts/loader.py:155 +#, python-format +msgid "Artifact %s has been successfully loaded" +msgstr "" + +#: glance/common/scripts/__init__.py:32 +#, python-format +msgid "" +"Loading known task scripts for task_id %(task_id)s of type %(task_type)s" +msgstr "" +"Carregando scripts conhecidos de tarefas para task_id %(task_id)s de tipo " +"%(task_type)s" + +#: glance/common/scripts/image_import/main.py:41 +#, python-format +msgid "Task %(task_id)s beginning import execution." +msgstr "Tarefa %(task_id)s iniciando execu莽茫o da importa莽茫o." + +#: glance/common/scripts/image_import/main.py:152 +#, python-format +msgid "Task %(task_id)s: Got image data uri %(data_uri)s to be imported" +msgstr "" +"Tarefa %(task_id)s: Obtidos dados de uri de imagem %(data_uri)s a ser " +"importada" + +#: glance/common/scripts/image_import/main.py:161 +#, python-format +msgid "Task %(task_id)s: Could not import image file %(image_data)s" +msgstr "" +"Tarefa %(task_id)s: N茫o p么de importar o arquivo de imagem %(image_data)s" + +#: glance/db/simple/api.py:62 +#, python-format +msgid "Calling %(funcname)s: args=%(args)s, kwargs=%(kwargs)s" +msgstr "Chamando %(funcname)s: args=%(args)s, kwargs=%(kwargs)s" + +#: glance/db/simple/api.py:68 +#, python-format +msgid "Returning %(funcname)s: %(output)s" +msgstr "Retornando %(funcname)s: %(output)s" + +#: glance/db/simple/api.py:2002 +#, python-format +msgid "Could not find artifact %s" +msgstr "" + +#: glance/db/simple/api.py:2006 +msgid "Unable to get deleted image" +msgstr "N茫o 茅 poss铆vel obter a imagem exclu铆da" + +#: glance/db/sqlalchemy/metadata.py:152 +#, python-format +msgid "Table %s has been cleared" +msgstr "Tabela %s foi limpa" + +#: glance/db/sqlalchemy/metadata.py:223 +#, python-format +msgid "Overwriting namespace %s" +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:239 +#, python-format +msgid "Skipping namespace %s. It already exists in the database." +msgstr "" + +#: glance/db/sqlalchemy/metadata.py:330 +#, python-format +msgid "File %s loaded to database." +msgstr "Arquivo %s carregado no banco de dados." + +#: glance/db/sqlalchemy/metadata.py:332 +msgid "Metadata loading finished" +msgstr "Carregamento de metadados finalizado" + +#: glance/db/sqlalchemy/metadata.py:441 +#, python-format +msgid "Namespace %(namespace)s saved in %(file)s" +msgstr "Namespace %(namespace)s salvo em %(file)s" + +#: glance/db/sqlalchemy/migrate_repo/schema.py:101 +#, python-format +msgid "creating table %(table)s" +msgstr "criando tabela %(table)s" + +#: glance/db/sqlalchemy/migrate_repo/schema.py:107 +#, python-format +msgid "dropping table %(table)s" +msgstr "descartando tabela %(table)s" + +#: glance/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py:72 +msgid "" +"'metadata_encryption_key' was not specified in the config file or a config " +"file was not specified. This means that this migration is a NOOP." +msgstr "" +"'metadata_encryption_key' n茫o foi especificado no arquivo de configura莽茫o ou " +"um arquivo de configura莽茫o n茫o foi especificado. Isso significa que essa " +"migra莽茫o 茅 um NOOP." + +#: glance/domain/__init__.py:406 +#, python-format +msgid "" +"Task [%(task_id)s] status changing from %(cur_status)s to %(new_status)s" +msgstr "" + +#: glance/image_cache/__init__.py:71 +#, python-format +msgid "Image cache loaded driver '%s'." +msgstr "O cache de imagem carregou o driver '%s'." + +#: glance/image_cache/__init__.py:81 glance/image_cache/__init__.py:100 +msgid "Defaulting to SQLite driver." +msgstr "Padronizando para o driver SQLite." + +#: glance/image_cache/prefetcher.py:85 +#, python-format +msgid "Successfully cached all %d images" +msgstr "Armazenadas em cache com 锚xito todas as %d imagens" + +#: glance/image_cache/drivers/sqlite.py:414 +#: glance/image_cache/drivers/xattr.py:343 +#, python-format +msgid "Not queueing image '%s'. Already cached." +msgstr "N茫o enfileirando imagem %s'. J谩 armazenada em cache." + +#: glance/image_cache/drivers/sqlite.py:419 +#: glance/image_cache/drivers/xattr.py:348 +#, python-format +msgid "Not queueing image '%s'. Already being written to cache" +msgstr "N茫o enfileirando imagem %s'. J谩 est谩 sendo gravada no cache" + +#: glance/image_cache/drivers/sqlite.py:425 +#: glance/image_cache/drivers/xattr.py:354 +#, python-format +msgid "Not queueing image '%s'. Already queued." +msgstr "N茫o enfileirando a imagem '%s'. J谩 enfileirada." + +#: glance/image_cache/drivers/sqlite.py:443 +#, python-format +msgid "Removed invalid cache file %s" +msgstr "Arquivo de cache inv谩lido removido %s" + +#: glance/image_cache/drivers/sqlite.py:457 +#, python-format +msgid "Removed stalled cache file %s" +msgstr "Arquivo de cache paralisado removido %s" + +#: glance/image_cache/drivers/xattr.py:400 +#, python-format +msgid "Reaped %(reaped)s %(entry_type)s cache entries" +msgstr "%(reaped)s %(entry_type)s entradas de cache coletadas" + +#: glance/openstack/common/eventlet_backdoor.py:146 +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "Backdoor de Eventlet escutando na porta %(port)s pelo processo %(pid)d" + +#: glance/openstack/common/service.py:173 +#, python-format +msgid "Caught %s, exiting" +msgstr "%s capturadas, saindo" + +#: glance/openstack/common/service.py:227 +msgid "Parent process has died unexpectedly, exiting" +msgstr "Processo pai saiu inesperadamente, saindo" + +#: glance/openstack/common/service.py:258 +#, python-format +msgid "Child caught %s, exiting" +msgstr "Filho capturado %s, terminando" + +#: glance/openstack/common/service.py:297 +msgid "Forking too fast, sleeping" +msgstr "Bifurca莽茫o muito r谩pida, suspendendo" + +#: glance/openstack/common/service.py:316 +#, python-format +msgid "Started child %d" +msgstr "Filho %d iniciado" + +#: glance/openstack/common/service.py:343 +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "%(pid)d filho eliminado pelo sinal %(sig)d" + +#: glance/openstack/common/service.py:347 +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Filho %(pid)s encerrando com status %(code)d" + +#: glance/openstack/common/service.py:382 +#, python-format +msgid "Caught %s, stopping children" +msgstr "%s capturado, parando filhos" + +#: glance/openstack/common/service.py:391 +msgid "Wait called after thread killed. Cleaning up." +msgstr "" + +#: glance/openstack/common/service.py:407 +#, python-format +msgid "Waiting on %d children to exit" +msgstr "Aguardando em %d filhos para sair" + +#: glance/quota/__init__.py:330 +#, python-format +msgid "Cleaning up %s after exceeding the quota." +msgstr "Realizando limpeza %s ap贸s exceder a cota." + +#: glance/registry/api/v1/images.py:343 glance/registry/api/v1/images.py:386 +#: glance/registry/api/v1/images.py:491 +#, python-format +msgid "Image %(id)s not found" +msgstr "Imagem %(id)s n茫o localizada" + +#: glance/registry/api/v1/images.py:349 glance/registry/api/v1/images.py:381 +#: glance/registry/api/v1/images.py:503 +#, python-format +msgid "Access denied to image %(id)s but returning 'not found'" +msgstr "Acesso negado 脿 imagem %(id)s, mas retornando 'n茫o localizado'" + +#: glance/registry/api/v1/images.py:371 +#, python-format +msgid "Successfully deleted image %(id)s" +msgstr "Imagem exclu铆da com 锚xito %(id)s" + +#: glance/registry/api/v1/images.py:375 +#, python-format +msgid "Delete denied for public image %(id)s" +msgstr "Exclus茫o negada para imagem p煤blica %(id)s" + +#: glance/registry/api/v1/images.py:415 +#, python-format +msgid "Rejecting image creation request for invalid image id '%(bad_id)s'" +msgstr "" +"Rejeitando solicita莽茫o de cria莽茫o de imagem para o ID de imagem inv谩lido " +"'%(bad_id)s'" + +#: glance/registry/api/v1/images.py:428 +#, python-format +msgid "Successfully created image %(id)s" +msgstr "Imagem criada com 锚xito %(id)s" + +#: glance/registry/api/v1/images.py:482 +#, python-format +msgid "Updating metadata for image %(id)s" +msgstr "Atualizando metadados para a imagem %(id)s" + +#: glance/registry/api/v1/images.py:497 +#, python-format +msgid "Update denied for public image %(id)s" +msgstr "Atualiza莽茫o negada para imagem p煤blica %(id)s" + +#: glance/registry/api/v1/members.py:198 +#, python-format +msgid "Successfully updated memberships for image %(id)s" +msgstr "Associa莽玫es atualizadas com 锚xito para a imagem %(id)s" + +#: glance/registry/api/v1/members.py:271 +#, python-format +msgid "Successfully updated a membership for image %(id)s" +msgstr "Atualizada com 锚xito uma associa莽茫o para a imagem %(id)s" + +#: glance/registry/api/v1/members.py:320 +#, python-format +msgid "Successfully deleted a membership from image %(id)s" +msgstr "Exclu铆da com 锚xito uma associa莽茫o da imagem %(id)s" diff --git a/code/daisy/daisy/location.py b/code/daisy/daisy/location.py new file mode 100755 index 00000000..35b9c21f --- /dev/null +++ b/code/daisy/daisy/location.py @@ -0,0 +1,431 @@ +# Copyright 2014 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import copy + +import glance_store as store +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils + +from daisy.common import exception +from daisy.common import utils +import daisy.domain.proxy +from daisy import i18n + + +_ = i18n._ +_LE = i18n._LE + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +class ImageRepoProxy(daisy.domain.proxy.Repo): + + def __init__(self, image_repo, context, store_api, store_utils): + self.context = context + self.store_api = store_api + proxy_kwargs = {'context': context, 'store_api': store_api, + 'store_utils': store_utils} + super(ImageRepoProxy, self).__init__(image_repo, + item_proxy_class=ImageProxy, + item_proxy_kwargs=proxy_kwargs) + + def _set_acls(self, image): + public = image.visibility == 'public' + member_ids = [] + if image.locations and not public: + member_repo = image.get_member_repo() + member_ids = [m.member_id for m in member_repo.list()] + for location in image.locations: + self.store_api.set_acls(location['url'], public=public, + read_tenants=member_ids, + context=self.context) + + def add(self, image): + result = super(ImageRepoProxy, self).add(image) + self._set_acls(image) + return result + + def save(self, image, from_state=None): + result = super(ImageRepoProxy, self).save(image, from_state=from_state) + self._set_acls(image) + return result + + +def _check_location_uri(context, store_api, store_utils, uri): + """Check if an image location is valid. + + :param context: Glance request context + :param store_api: store API module + :param store_utils: store utils module + :param uri: location's uri string + """ + + is_ok = True + try: + # NOTE(zhiyan): Some stores return zero when it catch exception + is_ok = (store_utils.validate_external_location(uri) and + store_api.get_size_from_backend(uri, context=context) > 0) + except (store.UnknownScheme, store.NotFound): + is_ok = False + if not is_ok: + reason = _('Invalid location') + raise exception.BadStoreUri(message=reason) + + +def _check_image_location(context, store_api, store_utils, location): + _check_location_uri(context, store_api, store_utils, location['url']) + store_api.check_location_metadata(location['metadata']) + + +def _set_image_size(context, image, locations): + if not image.size: + for location in locations: + size_from_backend = store.get_size_from_backend( + location['url'], context=context) + + if size_from_backend: + # NOTE(flwang): This assumes all locations have the same size + image.size = size_from_backend + break + + +def _count_duplicated_locations(locations, new): + """ + To calculate the count of duplicated locations for new one. + + :param locations: The exiting image location set + :param new: The new image location + :returns: The count of duplicated locations + """ + + ret = 0 + for loc in locations: + if loc['url'] == new['url'] and loc['metadata'] == new['metadata']: + ret += 1 + return ret + + +class ImageFactoryProxy(daisy.domain.proxy.ImageFactory): + def __init__(self, factory, context, store_api, store_utils): + self.context = context + self.store_api = store_api + self.store_utils = store_utils + proxy_kwargs = {'context': context, 'store_api': store_api, + 'store_utils': store_utils} + super(ImageFactoryProxy, self).__init__(factory, + proxy_class=ImageProxy, + proxy_kwargs=proxy_kwargs) + + def new_image(self, **kwargs): + locations = kwargs.get('locations', []) + for loc in locations: + _check_image_location(self.context, + self.store_api, + self.store_utils, + loc) + loc['status'] = 'active' + if _count_duplicated_locations(locations, loc) > 1: + raise exception.DuplicateLocation(location=loc['url']) + return super(ImageFactoryProxy, self).new_image(**kwargs) + + +class StoreLocations(collections.MutableSequence): + """ + The proxy for store location property. It takes responsibility for: + 1. Location uri correctness checking when adding a new location. + 2. Remove the image data from the store when a location is removed + from an image. + """ + def __init__(self, image_proxy, value): + self.image_proxy = image_proxy + if isinstance(value, list): + self.value = value + else: + self.value = list(value) + + def append(self, location): + # NOTE(flaper87): Insert this + # location at the very end of + # the value list. + self.insert(len(self.value), location) + + def extend(self, other): + if isinstance(other, StoreLocations): + locations = other.value + else: + locations = list(other) + + for location in locations: + self.append(location) + + def insert(self, i, location): + _check_image_location(self.image_proxy.context, + self.image_proxy.store_api, + self.image_proxy.store_utils, + location) + location['status'] = 'active' + if _count_duplicated_locations(self.value, location) > 0: + raise exception.DuplicateLocation(location=location['url']) + + self.value.insert(i, location) + _set_image_size(self.image_proxy.context, + self.image_proxy, + [location]) + + def pop(self, i=-1): + location = self.value.pop(i) + try: + self.image_proxy.store_utils.delete_image_location_from_backend( + self.image_proxy.context, + self.image_proxy.image.image_id, + location) + except Exception: + with excutils.save_and_reraise_exception(): + self.value.insert(i, location) + return location + + def count(self, location): + return self.value.count(location) + + def index(self, location, *args): + return self.value.index(location, *args) + + def remove(self, location): + if self.count(location): + self.pop(self.index(location)) + else: + self.value.remove(location) + + def reverse(self): + self.value.reverse() + + # Mutable sequence, so not hashable + __hash__ = None + + def __getitem__(self, i): + return self.value.__getitem__(i) + + def __setitem__(self, i, location): + _check_image_location(self.image_proxy.context, + self.image_proxy.store_api, + self.image_proxy.store_utils, + location) + location['status'] = 'active' + self.value.__setitem__(i, location) + _set_image_size(self.image_proxy.context, + self.image_proxy, + [location]) + + def __delitem__(self, i): + location = None + try: + location = self.value.__getitem__(i) + except Exception: + return self.value.__delitem__(i) + self.image_proxy.store_utils.delete_image_location_from_backend( + self.image_proxy.context, + self.image_proxy.image.image_id, + location) + self.value.__delitem__(i) + + def __delslice__(self, i, j): + i = max(i, 0) + j = max(j, 0) + locations = [] + try: + locations = self.value.__getslice__(i, j) + except Exception: + return self.value.__delslice__(i, j) + for location in locations: + self.image_proxy.store_utils.delete_image_location_from_backend( + self.image_proxy.context, + self.image_proxy.image.image_id, + location) + self.value.__delitem__(i) + + def __iadd__(self, other): + self.extend(other) + return self + + def __contains__(self, location): + return location in self.value + + def __len__(self): + return len(self.value) + + def __cast(self, other): + if isinstance(other, StoreLocations): + return other.value + else: + return other + + def __cmp__(self, other): + return cmp(self.value, self.__cast(other)) + + def __iter__(self): + return iter(self.value) + + def __copy__(self): + return type(self)(self.image_proxy, self.value) + + def __deepcopy__(self, memo): + # NOTE(zhiyan): Only copy location entries, others can be reused. + value = copy.deepcopy(self.value, memo) + self.image_proxy.image.locations = value + return type(self)(self.image_proxy, value) + + +def _locations_proxy(target, attr): + """ + Make a location property proxy on the image object. + + :param target: the image object on which to add the proxy + :param attr: the property proxy we want to hook + """ + def get_attr(self): + value = getattr(getattr(self, target), attr) + return StoreLocations(self, value) + + def set_attr(self, value): + if not isinstance(value, (list, StoreLocations)): + reason = _('Invalid locations') + raise exception.BadStoreUri(message=reason) + ori_value = getattr(getattr(self, target), attr) + if ori_value != value: + # NOTE(zhiyan): Enforced locations list was previously empty list. + if len(ori_value) > 0: + raise exception.Invalid(_('Original locations is not empty: ' + '%s') % ori_value) + # NOTE(zhiyan): Check locations are all valid. + for location in value: + _check_image_location(self.context, + self.store_api, + self.store_utils, + location) + location['status'] = 'active' + if _count_duplicated_locations(value, location) > 1: + raise exception.DuplicateLocation(location=location['url']) + _set_image_size(self.context, getattr(self, target), value) + return setattr(getattr(self, target), attr, list(value)) + + def del_attr(self): + value = getattr(getattr(self, target), attr) + while len(value): + self.store_utils.delete_image_location_from_backend( + self.context, + self.image.image_id, + value[0]) + del value[0] + setattr(getattr(self, target), attr, value) + return delattr(getattr(self, target), attr) + + return property(get_attr, set_attr, del_attr) + + +class ImageProxy(daisy.domain.proxy.Image): + + locations = _locations_proxy('image', 'locations') + + def __init__(self, image, context, store_api, store_utils): + self.image = image + self.context = context + self.store_api = store_api + self.store_utils = store_utils + proxy_kwargs = { + 'context': context, + 'image': self, + 'store_api': store_api, + } + super(ImageProxy, self).__init__( + image, member_repo_proxy_class=ImageMemberRepoProxy, + member_repo_proxy_kwargs=proxy_kwargs) + + def delete(self): + self.image.delete() + if self.image.locations: + for location in self.image.locations: + self.store_utils.delete_image_location_from_backend( + self.context, + self.image.image_id, + location) + + def set_data(self, data, size=None): + if size is None: + size = 0 # NOTE(markwash): zero -> unknown size + location, size, checksum, loc_meta = self.store_api.add_to_backend( + CONF, + self.image.image_id, + utils.LimitingReader(utils.CooperativeReader(data), + CONF.image_size_cap), + size, + context=self.context) + self.image.locations = [{'url': location, 'metadata': loc_meta, + 'status': 'active'}] + self.image.size = size + self.image.checksum = checksum + self.image.status = 'active' + + def get_data(self, offset=0, chunk_size=None): + if not self.image.locations: + raise store.NotFound(_("No image data could be found")) + err = None + for loc in self.image.locations: + try: + data, size = self.store_api.get_from_backend( + loc['url'], + offset=offset, + chunk_size=chunk_size, + context=self.context) + + return data + except Exception as e: + LOG.warn(_('Get image %(id)s data failed: ' + '%(err)s.') % {'id': self.image.image_id, + 'err': utils.exception_to_str(e)}) + err = e + # tried all locations + LOG.error(_LE('Glance tried all active locations to get data for ' + 'image %s but all have failed.') % self.image.image_id) + raise err + + +class ImageMemberRepoProxy(daisy.domain.proxy.Repo): + def __init__(self, repo, image, context, store_api): + self.repo = repo + self.image = image + self.context = context + self.store_api = store_api + super(ImageMemberRepoProxy, self).__init__(repo) + + def _set_acls(self): + public = self.image.visibility == 'public' + if self.image.locations and not public: + member_ids = [m.member_id for m in self.repo.list()] + for location in self.image.locations: + self.store_api.set_acls(location['url'], public=public, + read_tenants=member_ids, + context=self.context) + + def add(self, member): + super(ImageMemberRepoProxy, self).add(member) + self._set_acls() + + def remove(self, member): + super(ImageMemberRepoProxy, self).remove(member) + self._set_acls() diff --git a/code/daisy/daisy/notifier.py b/code/daisy/daisy/notifier.py new file mode 100755 index 00000000..a90a61fa --- /dev/null +++ b/code/daisy/daisy/notifier.py @@ -0,0 +1,820 @@ +# Copyright 2011, OpenStack Foundation +# Copyright 2012, Red Hat, Inc. +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import glance_store +from oslo_config import cfg +from oslo_log import log as logging +import oslo_messaging +from oslo_utils import excutils +from oslo_utils import timeutils +import six +import webob + +from daisy.common import exception +from daisy.common import utils +from daisy.domain import proxy as domain_proxy +from daisy import i18n + +_ = i18n._ +_LE = i18n._LE + +notifier_opts = [ + cfg.StrOpt('default_publisher_id', default="image.localhost", + help='Default publisher_id for outgoing notifications.'), + cfg.ListOpt('disabled_notifications', default=[], + help='List of disabled notifications. A notification can be ' + 'given either as a notification type to disable a single ' + 'event, or as a notification group prefix to disable all ' + 'events within a group. Example: if this config option ' + 'is set to ["image.create", "metadef_namespace"], then ' + '"image.create" notification will not be sent after ' + 'image is created and none of the notifications for ' + 'metadefinition namespaces will be sent.'), +] + +CONF = cfg.CONF +CONF.register_opts(notifier_opts) + +LOG = logging.getLogger(__name__) + +_ALIASES = { + 'daisy.openstack.common.rpc.impl_kombu': 'rabbit', + 'daisy.openstack.common.rpc.impl_qpid': 'qpid', + 'daisy.openstack.common.rpc.impl_zmq': 'zmq', +} + + +def get_transport(): + return oslo_messaging.get_transport(CONF, aliases=_ALIASES) + + +class Notifier(object): + """Uses a notification strategy to send out messages about events.""" + + def __init__(self): + publisher_id = CONF.default_publisher_id + self._transport = get_transport() + self._notifier = oslo_messaging.Notifier(self._transport, + publisher_id=publisher_id) + + def warn(self, event_type, payload): + self._notifier.warn({}, event_type, payload) + + def info(self, event_type, payload): + self._notifier.info({}, event_type, payload) + + def error(self, event_type, payload): + self._notifier.error({}, event_type, payload) + + +def _get_notification_group(notification): + return notification.split('.', 1)[0] + + +def _is_notification_enabled(notification): + disabled_notifications = CONF.disabled_notifications + notification_group = _get_notification_group(notification) + + notifications = (notification, notification_group) + for disabled_notification in disabled_notifications: + if disabled_notification in notifications: + return False + + return True + + +def _send_notification(notify, notification_type, payload): + if _is_notification_enabled(notification_type): + notify(notification_type, payload) + + +def format_image_notification(image): + """ + Given a daisy.domain.Image object, return a dictionary of relevant + notification information. We purposely do not include 'location' + as it may contain credentials. + """ + return { + 'id': image.image_id, + 'name': image.name, + 'status': image.status, + 'created_at': timeutils.isotime(image.created_at), + 'updated_at': timeutils.isotime(image.updated_at), + 'min_disk': image.min_disk, + 'min_ram': image.min_ram, + 'protected': image.protected, + 'checksum': image.checksum, + 'owner': image.owner, + 'disk_format': image.disk_format, + 'container_format': image.container_format, + 'size': image.size, + 'is_public': image.visibility == 'public', + 'properties': dict(image.extra_properties), + 'tags': list(image.tags), + 'deleted': False, + 'deleted_at': None, + } + + +def format_task_notification(task): + # NOTE(nikhil): input is not passed to the notifier payload as it may + # contain sensitive info. + return { + 'id': task.task_id, + 'type': task.type, + 'status': task.status, + 'result': None, + 'owner': task.owner, + 'message': None, + 'expires_at': timeutils.isotime(task.expires_at), + 'created_at': timeutils.isotime(task.created_at), + 'updated_at': timeutils.isotime(task.updated_at), + 'deleted': False, + 'deleted_at': None, + } + + +def format_metadef_namespace_notification(metadef_namespace): + return { + 'namespace': metadef_namespace.namespace, + 'namespace_old': metadef_namespace.namespace, + 'display_name': metadef_namespace.display_name, + 'protected': metadef_namespace.protected, + 'visibility': metadef_namespace.visibility, + 'owner': metadef_namespace.owner, + 'description': metadef_namespace.description, + 'created_at': timeutils.isotime(metadef_namespace.created_at), + 'updated_at': timeutils.isotime(metadef_namespace.updated_at), + 'deleted': False, + 'deleted_at': None, + } + + +def format_metadef_object_notification(metadef_object): + object_properties = metadef_object.properties or {} + properties = [] + for name, prop in six.iteritems(object_properties): + object_property = _format_metadef_object_property(name, prop) + properties.append(object_property) + + return { + 'namespace': metadef_object.namespace, + 'name': metadef_object.name, + 'name_old': metadef_object.name, + 'properties': properties, + 'required': metadef_object.required, + 'description': metadef_object.description, + 'created_at': timeutils.isotime(metadef_object.created_at), + 'updated_at': timeutils.isotime(metadef_object.updated_at), + 'deleted': False, + 'deleted_at': None, + } + + +def _format_metadef_object_property(name, metadef_property): + return { + 'name': name, + 'type': metadef_property.type or None, + 'title': metadef_property.title or None, + 'description': metadef_property.description or None, + 'default': metadef_property.default or None, + 'minimum': metadef_property.minimum or None, + 'maximum': metadef_property.maximum or None, + 'enum': metadef_property.enum or None, + 'pattern': metadef_property.pattern or None, + 'minLength': metadef_property.minLength or None, + 'maxLength': metadef_property.maxLength or None, + 'confidential': metadef_property.confidential or None, + 'items': metadef_property.items or None, + 'uniqueItems': metadef_property.uniqueItems or None, + 'minItems': metadef_property.minItems or None, + 'maxItems': metadef_property.maxItems or None, + 'additionalItems': metadef_property.additionalItems or None, + } + + +def format_metadef_property_notification(metadef_property): + schema = metadef_property.schema + + return { + 'namespace': metadef_property.namespace, + 'name': metadef_property.name, + 'name_old': metadef_property.name, + 'type': schema.get('type'), + 'title': schema.get('title'), + 'description': schema.get('description'), + 'default': schema.get('default'), + 'minimum': schema.get('minimum'), + 'maximum': schema.get('maximum'), + 'enum': schema.get('enum'), + 'pattern': schema.get('pattern'), + 'minLength': schema.get('minLength'), + 'maxLength': schema.get('maxLength'), + 'confidential': schema.get('confidential'), + 'items': schema.get('items'), + 'uniqueItems': schema.get('uniqueItems'), + 'minItems': schema.get('minItems'), + 'maxItems': schema.get('maxItems'), + 'additionalItems': schema.get('additionalItems'), + 'deleted': False, + 'deleted_at': None, + } + + +def format_metadef_resource_type_notification(metadef_resource_type): + return { + 'namespace': metadef_resource_type.namespace, + 'name': metadef_resource_type.name, + 'name_old': metadef_resource_type.name, + 'prefix': metadef_resource_type.prefix, + 'properties_target': metadef_resource_type.properties_target, + 'created_at': timeutils.isotime(metadef_resource_type.created_at), + 'updated_at': timeutils.isotime(metadef_resource_type.updated_at), + 'deleted': False, + 'deleted_at': None, + } + + +def format_metadef_tag_notification(metadef_tag): + return { + 'namespace': metadef_tag.namespace, + 'name': metadef_tag.name, + 'name_old': metadef_tag.name, + 'created_at': timeutils.isotime(metadef_tag.created_at), + 'updated_at': timeutils.isotime(metadef_tag.updated_at), + 'deleted': False, + 'deleted_at': None, + } + + +class NotificationBase(object): + def get_payload(self, obj): + return {} + + def send_notification(self, notification_id, obj, extra_payload=None): + payload = self.get_payload(obj) + if extra_payload is not None: + payload.update(extra_payload) + + _send_notification(self.notifier.info, notification_id, payload) + + +@six.add_metaclass(abc.ABCMeta) +class NotificationProxy(NotificationBase): + def __init__(self, repo, context, notifier): + self.repo = repo + self.context = context + self.notifier = notifier + + super_class = self.get_super_class() + super_class.__init__(self, repo) + + @abc.abstractmethod + def get_super_class(self): + pass + + +@six.add_metaclass(abc.ABCMeta) +class NotificationRepoProxy(NotificationBase): + def __init__(self, repo, context, notifier): + self.repo = repo + self.context = context + self.notifier = notifier + proxy_kwargs = {'context': self.context, 'notifier': self.notifier} + + proxy_class = self.get_proxy_class() + super_class = self.get_super_class() + super_class.__init__(self, repo, proxy_class, proxy_kwargs) + + @abc.abstractmethod + def get_super_class(self): + pass + + @abc.abstractmethod + def get_proxy_class(self): + pass + + +@six.add_metaclass(abc.ABCMeta) +class NotificationFactoryProxy(object): + def __init__(self, factory, context, notifier): + kwargs = {'context': context, 'notifier': notifier} + + proxy_class = self.get_proxy_class() + super_class = self.get_super_class() + super_class.__init__(self, factory, proxy_class, kwargs) + + @abc.abstractmethod + def get_super_class(self): + pass + + @abc.abstractmethod + def get_proxy_class(self): + pass + + +class ImageProxy(NotificationProxy, domain_proxy.Image): + def get_super_class(self): + return domain_proxy.Image + + def get_payload(self, obj): + return format_image_notification(obj) + + def _format_image_send(self, bytes_sent): + return { + 'bytes_sent': bytes_sent, + 'image_id': self.repo.image_id, + 'owner_id': self.repo.owner, + 'receiver_tenant_id': self.context.tenant, + 'receiver_user_id': self.context.user, + } + + def _get_chunk_data_iterator(self, data, chunk_size=None): + sent = 0 + for chunk in data: + yield chunk + sent += len(chunk) + + if sent != (chunk_size or self.repo.size): + notify = self.notifier.error + else: + notify = self.notifier.info + + try: + _send_notification(notify, 'image.send', + self._format_image_send(sent)) + except Exception as err: + msg = (_LE("An error occurred during image.send" + " notification: %(err)s") % {'err': err}) + LOG.error(msg) + + def get_data(self, offset=0, chunk_size=None): + # Due to the need of evaluating subsequent proxies, this one + # should return a generator, the call should be done before + # generator creation + data = self.repo.get_data(offset=offset, chunk_size=chunk_size) + return self._get_chunk_data_iterator(data, chunk_size=chunk_size) + + def set_data(self, data, size=None): + self.send_notification('image.prepare', self.repo) + + notify_error = self.notifier.error + try: + self.repo.set_data(data, size) + except glance_store.StorageFull as e: + msg = (_("Image storage media is full: %s") % + utils.exception_to_str(e)) + _send_notification(notify_error, 'image.upload', msg) + raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg) + except glance_store.StorageWriteDenied as e: + msg = (_("Insufficient permissions on image storage media: %s") + % utils.exception_to_str(e)) + _send_notification(notify_error, 'image.upload', msg) + raise webob.exc.HTTPServiceUnavailable(explanation=msg) + except ValueError as e: + msg = (_("Cannot save data for image %(image_id)s: %(error)s") % + {'image_id': self.repo.image_id, + 'error': utils.exception_to_str(e)}) + _send_notification(notify_error, 'image.upload', msg) + raise webob.exc.HTTPBadRequest( + explanation=utils.exception_to_str(e)) + except exception.Duplicate as e: + msg = (_("Unable to upload duplicate image data for image" + "%(image_id)s: %(error)s") % + {'image_id': self.repo.image_id, + 'error': utils.exception_to_str(e)}) + _send_notification(notify_error, 'image.upload', msg) + raise webob.exc.HTTPConflict(explanation=msg) + except exception.Forbidden as e: + msg = (_("Not allowed to upload image data for image %(image_id)s:" + " %(error)s") % {'image_id': self.repo.image_id, + 'error': utils.exception_to_str(e)}) + _send_notification(notify_error, 'image.upload', msg) + raise webob.exc.HTTPForbidden(explanation=msg) + except exception.NotFound as e: + msg = (_("Image %(image_id)s could not be found after upload." + " The image may have been deleted during the upload:" + " %(error)s") % {'image_id': self.repo.image_id, + 'error': utils.exception_to_str(e)}) + _send_notification(notify_error, 'image.upload', msg) + raise webob.exc.HTTPNotFound(explanation=utils.exception_to_str(e)) + except webob.exc.HTTPError as e: + with excutils.save_and_reraise_exception(): + msg = (_("Failed to upload image data for image %(image_id)s" + " due to HTTP error: %(error)s") % + {'image_id': self.repo.image_id, + 'error': utils.exception_to_str(e)}) + _send_notification(notify_error, 'image.upload', msg) + except Exception as e: + with excutils.save_and_reraise_exception(): + msg = (_("Failed to upload image data for image %(image_id)s " + "due to internal error: %(error)s") % + {'image_id': self.repo.image_id, + 'error': utils.exception_to_str(e)}) + _send_notification(notify_error, 'image.upload', msg) + else: + self.send_notification('image.upload', self.repo) + self.send_notification('image.activate', self.repo) + + +class ImageFactoryProxy(NotificationFactoryProxy, domain_proxy.ImageFactory): + def get_super_class(self): + return domain_proxy.ImageFactory + + def get_proxy_class(self): + return ImageProxy + + +class ImageRepoProxy(NotificationRepoProxy, domain_proxy.Repo): + def get_super_class(self): + return domain_proxy.Repo + + def get_proxy_class(self): + return ImageProxy + + def get_payload(self, obj): + return format_image_notification(obj) + + def save(self, image, from_state=None): + super(ImageRepoProxy, self).save(image, from_state=from_state) + self.send_notification('image.update', image) + + def add(self, image): + super(ImageRepoProxy, self).add(image) + self.send_notification('image.create', image) + + def remove(self, image): + super(ImageRepoProxy, self).remove(image) + self.send_notification('image.delete', image, extra_payload={ + 'deleted': True, 'deleted_at': timeutils.isotime() + }) + + +class TaskProxy(NotificationProxy, domain_proxy.Task): + def get_super_class(self): + return domain_proxy.Task + + def get_payload(self, obj): + return format_task_notification(obj) + + def begin_processing(self): + super(TaskProxy, self).begin_processing() + self.send_notification('task.processing', self.repo) + + def succeed(self, result): + super(TaskProxy, self).succeed(result) + self.send_notification('task.success', self.repo) + + def fail(self, message): + super(TaskProxy, self).fail(message) + self.send_notification('task.failure', self.repo) + + def run(self, executor): + super(TaskProxy, self).run(executor) + self.send_notification('task.run', self.repo) + + +class TaskFactoryProxy(NotificationFactoryProxy, domain_proxy.TaskFactory): + def get_super_class(self): + return domain_proxy.TaskFactory + + def get_proxy_class(self): + return TaskProxy + + +class TaskRepoProxy(NotificationRepoProxy, domain_proxy.TaskRepo): + def get_super_class(self): + return domain_proxy.TaskRepo + + def get_proxy_class(self): + return TaskProxy + + def get_payload(self, obj): + return format_task_notification(obj) + + def add(self, task): + result = super(TaskRepoProxy, self).add(task) + self.send_notification('task.create', task) + return result + + def remove(self, task): + result = super(TaskRepoProxy, self).remove(task) + self.send_notification('task.delete', task, extra_payload={ + 'deleted': True, 'deleted_at': timeutils.isotime() + }) + return result + + +class TaskStubProxy(NotificationProxy, domain_proxy.TaskStub): + def get_super_class(self): + return domain_proxy.TaskStub + + +class TaskStubRepoProxy(NotificationRepoProxy, domain_proxy.TaskStubRepo): + def get_super_class(self): + return domain_proxy.TaskStubRepo + + def get_proxy_class(self): + return TaskStubProxy + + +class MetadefNamespaceProxy(NotificationProxy, domain_proxy.MetadefNamespace): + def get_super_class(self): + return domain_proxy.MetadefNamespace + + +class MetadefNamespaceFactoryProxy(NotificationFactoryProxy, + domain_proxy.MetadefNamespaceFactory): + def get_super_class(self): + return domain_proxy.MetadefNamespaceFactory + + def get_proxy_class(self): + return MetadefNamespaceProxy + + +class MetadefNamespaceRepoProxy(NotificationRepoProxy, + domain_proxy.MetadefNamespaceRepo): + def get_super_class(self): + return domain_proxy.MetadefNamespaceRepo + + def get_proxy_class(self): + return MetadefNamespaceProxy + + def get_payload(self, obj): + return format_metadef_namespace_notification(obj) + + def save(self, metadef_namespace): + name = getattr(metadef_namespace, '_old_namespace', + metadef_namespace.namespace) + result = super(MetadefNamespaceRepoProxy, self).save(metadef_namespace) + self.send_notification( + 'metadef_namespace.update', metadef_namespace, + extra_payload={ + 'namespace_old': name, + }) + return result + + def add(self, metadef_namespace): + result = super(MetadefNamespaceRepoProxy, self).add(metadef_namespace) + self.send_notification('metadef_namespace.create', metadef_namespace) + return result + + def remove(self, metadef_namespace): + result = super(MetadefNamespaceRepoProxy, self).remove( + metadef_namespace) + self.send_notification( + 'metadef_namespace.delete', metadef_namespace, + extra_payload={'deleted': True, 'deleted_at': timeutils.isotime()} + ) + return result + + def remove_objects(self, metadef_namespace): + result = super(MetadefNamespaceRepoProxy, self).remove_objects( + metadef_namespace) + self.send_notification('metadef_namespace.delete_objects', + metadef_namespace) + return result + + def remove_properties(self, metadef_namespace): + result = super(MetadefNamespaceRepoProxy, self).remove_properties( + metadef_namespace) + self.send_notification('metadef_namespace.delete_properties', + metadef_namespace) + return result + + def remove_tags(self, metadef_namespace): + result = super(MetadefNamespaceRepoProxy, self).remove_tags( + metadef_namespace) + self.send_notification('metadef_namespace.delete_tags', + metadef_namespace) + return result + + +class MetadefObjectProxy(NotificationProxy, domain_proxy.MetadefObject): + def get_super_class(self): + return domain_proxy.MetadefObject + + +class MetadefObjectFactoryProxy(NotificationFactoryProxy, + domain_proxy.MetadefObjectFactory): + def get_super_class(self): + return domain_proxy.MetadefObjectFactory + + def get_proxy_class(self): + return MetadefObjectProxy + + +class MetadefObjectRepoProxy(NotificationRepoProxy, + domain_proxy.MetadefObjectRepo): + def get_super_class(self): + return domain_proxy.MetadefObjectRepo + + def get_proxy_class(self): + return MetadefObjectProxy + + def get_payload(self, obj): + return format_metadef_object_notification(obj) + + def save(self, metadef_object): + name = getattr(metadef_object, '_old_name', metadef_object.name) + result = super(MetadefObjectRepoProxy, self).save(metadef_object) + self.send_notification( + 'metadef_object.update', metadef_object, + extra_payload={ + 'namespace': metadef_object.namespace.namespace, + 'name_old': name, + }) + return result + + def add(self, metadef_object): + result = super(MetadefObjectRepoProxy, self).add(metadef_object) + self.send_notification('metadef_object.create', metadef_object) + return result + + def remove(self, metadef_object): + result = super(MetadefObjectRepoProxy, self).remove(metadef_object) + self.send_notification( + 'metadef_object.delete', metadef_object, + extra_payload={ + 'deleted': True, + 'deleted_at': timeutils.isotime(), + 'namespace': metadef_object.namespace.namespace + } + ) + return result + + +class MetadefPropertyProxy(NotificationProxy, domain_proxy.MetadefProperty): + def get_super_class(self): + return domain_proxy.MetadefProperty + + +class MetadefPropertyFactoryProxy(NotificationFactoryProxy, + domain_proxy.MetadefPropertyFactory): + def get_super_class(self): + return domain_proxy.MetadefPropertyFactory + + def get_proxy_class(self): + return MetadefPropertyProxy + + +class MetadefPropertyRepoProxy(NotificationRepoProxy, + domain_proxy.MetadefPropertyRepo): + def get_super_class(self): + return domain_proxy.MetadefPropertyRepo + + def get_proxy_class(self): + return MetadefPropertyProxy + + def get_payload(self, obj): + return format_metadef_property_notification(obj) + + def save(self, metadef_property): + name = getattr(metadef_property, '_old_name', metadef_property.name) + result = super(MetadefPropertyRepoProxy, self).save(metadef_property) + self.send_notification( + 'metadef_property.update', metadef_property, + extra_payload={ + 'namespace': metadef_property.namespace.namespace, + 'name_old': name, + }) + return result + + def add(self, metadef_property): + result = super(MetadefPropertyRepoProxy, self).add(metadef_property) + self.send_notification('metadef_property.create', metadef_property) + return result + + def remove(self, metadef_property): + result = super(MetadefPropertyRepoProxy, self).remove(metadef_property) + self.send_notification( + 'metadef_property.delete', metadef_property, + extra_payload={ + 'deleted': True, + 'deleted_at': timeutils.isotime(), + 'namespace': metadef_property.namespace.namespace + } + ) + return result + + +class MetadefResourceTypeProxy(NotificationProxy, + domain_proxy.MetadefResourceType): + def get_super_class(self): + return domain_proxy.MetadefResourceType + + +class MetadefResourceTypeFactoryProxy(NotificationFactoryProxy, + domain_proxy.MetadefResourceTypeFactory): + def get_super_class(self): + return domain_proxy.MetadefResourceTypeFactory + + def get_proxy_class(self): + return MetadefResourceTypeProxy + + +class MetadefResourceTypeRepoProxy(NotificationRepoProxy, + domain_proxy.MetadefResourceTypeRepo): + def get_super_class(self): + return domain_proxy.MetadefResourceTypeRepo + + def get_proxy_class(self): + return MetadefResourceTypeProxy + + def get_payload(self, obj): + return format_metadef_resource_type_notification(obj) + + def add(self, md_resource_type): + result = super(MetadefResourceTypeRepoProxy, self).add( + md_resource_type) + self.send_notification('metadef_resource_type.create', + md_resource_type) + return result + + def remove(self, md_resource_type): + result = super(MetadefResourceTypeRepoProxy, self).remove( + md_resource_type) + self.send_notification( + 'metadef_resource_type.delete', md_resource_type, + extra_payload={ + 'deleted': True, + 'deleted_at': timeutils.isotime(), + 'namespace': md_resource_type.namespace.namespace + } + ) + return result + + +class MetadefTagProxy(NotificationProxy, domain_proxy.MetadefTag): + def get_super_class(self): + return domain_proxy.MetadefTag + + +class MetadefTagFactoryProxy(NotificationFactoryProxy, + domain_proxy.MetadefTagFactory): + def get_super_class(self): + return domain_proxy.MetadefTagFactory + + def get_proxy_class(self): + return MetadefTagProxy + + +class MetadefTagRepoProxy(NotificationRepoProxy, domain_proxy.MetadefTagRepo): + def get_super_class(self): + return domain_proxy.MetadefTagRepo + + def get_proxy_class(self): + return MetadefTagProxy + + def get_payload(self, obj): + return format_metadef_tag_notification(obj) + + def save(self, metadef_tag): + name = getattr(metadef_tag, '_old_name', metadef_tag.name) + result = super(MetadefTagRepoProxy, self).save(metadef_tag) + self.send_notification( + 'metadef_tag.update', metadef_tag, + extra_payload={ + 'namespace': metadef_tag.namespace.namespace, + 'name_old': name, + }) + return result + + def add(self, metadef_tag): + result = super(MetadefTagRepoProxy, self).add(metadef_tag) + self.send_notification('metadef_tag.create', metadef_tag) + return result + + def add_tags(self, metadef_tags): + result = super(MetadefTagRepoProxy, self).add_tags(metadef_tags) + for metadef_tag in metadef_tags: + self.send_notification('metadef_tag.create', metadef_tag) + + return result + + def remove(self, metadef_tag): + result = super(MetadefTagRepoProxy, self).remove(metadef_tag) + self.send_notification( + 'metadef_tag.delete', metadef_tag, + extra_payload={ + 'deleted': True, + 'deleted_at': timeutils.isotime(), + 'namespace': metadef_tag.namespace.namespace + } + ) + return result diff --git a/code/daisy/daisy/openstack/__init__.py b/code/daisy/daisy/openstack/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/openstack/common/README b/code/daisy/daisy/openstack/common/README new file mode 100755 index 00000000..04a61664 --- /dev/null +++ b/code/daisy/daisy/openstack/common/README @@ -0,0 +1,16 @@ +oslo-incubator +-------------- + +A number of modules from oslo-incubator are imported into this project. +You can clone the oslo-incubator repository using the following url: + + git://git.openstack.org/openstack/oslo-incubator + +These modules are "incubating" in oslo-incubator and are kept in sync +with the help of oslo-incubator's update.py script. See: + + https://wiki.openstack.org/wiki/Oslo#Syncing_Code_from_Incubator + +The copy of the code should never be directly modified here. Please +always update oslo-incubator first and then run the script to copy +the changes across. diff --git a/code/daisy/daisy/openstack/common/__init__.py b/code/daisy/daisy/openstack/common/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/openstack/common/_i18n.py b/code/daisy/daisy/openstack/common/_i18n.py new file mode 100755 index 00000000..2822a054 --- /dev/null +++ b/code/daisy/daisy/openstack/common/_i18n.py @@ -0,0 +1,45 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""oslo.i18n integration module. + +See http://docs.openstack.org/developer/oslo.i18n/usage.html + +""" + +try: + import oslo_i18n + + # NOTE(dhellmann): This reference to o-s-l-o will be replaced by the + # application name when this module is synced into the separate + # repository. It is OK to have more than one translation function + # using the same domain, since there will still only be one message + # catalog. + _translators = oslo_i18n.TranslatorFactory(domain='glance') + + # The primary translation function using the well-known name "_" + _ = _translators.primary + + # Translators for log levels. + # + # The abbreviated names are meant to reflect the usual use of a short + # name like '_'. The "L" is for "log" and the other letter comes from + # the level. + _LI = _translators.log_info + _LW = _translators.log_warning + _LE = _translators.log_error + _LC = _translators.log_critical +except ImportError: + # NOTE(dims): Support for cases where a project wants to use + # code from oslo-incubator, but is not ready to be internationalized + # (like tempest) + _ = _LI = _LW = _LE = _LC = lambda x: x diff --git a/code/daisy/daisy/openstack/common/eventlet_backdoor.py b/code/daisy/daisy/openstack/common/eventlet_backdoor.py new file mode 100755 index 00000000..4eae75eb --- /dev/null +++ b/code/daisy/daisy/openstack/common/eventlet_backdoor.py @@ -0,0 +1,151 @@ +# Copyright (c) 2012 OpenStack Foundation. +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +import copy +import errno +import gc +import logging +import os +import pprint +import socket +import sys +import traceback + +import eventlet.backdoor +import greenlet +from oslo_config import cfg + +from daisy.openstack.common._i18n import _LI + +help_for_backdoor_port = ( + "Acceptable values are 0, , and :, where 0 results " + "in listening on a random tcp port number; results in listening " + "on the specified port number (and not enabling backdoor if that port " + "is in use); and : results in listening on the smallest " + "unused port number within the specified range of port numbers. The " + "chosen port is displayed in the service's log file.") +eventlet_backdoor_opts = [ + cfg.StrOpt('backdoor_port', + help="Enable eventlet backdoor. %s" % help_for_backdoor_port) +] + +CONF = cfg.CONF +CONF.register_opts(eventlet_backdoor_opts) +LOG = logging.getLogger(__name__) + + +def list_opts(): + """Entry point for oslo-config-generator. + """ + return [(None, copy.deepcopy(eventlet_backdoor_opts))] + + +class EventletBackdoorConfigValueError(Exception): + def __init__(self, port_range, help_msg, ex): + msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. ' + '%(help)s' % + {'range': port_range, 'ex': ex, 'help': help_msg}) + super(EventletBackdoorConfigValueError, self).__init__(msg) + self.port_range = port_range + + +def _dont_use_this(): + print("Don't use this, just disconnect instead") + + +def _find_objects(t): + return [o for o in gc.get_objects() if isinstance(o, t)] + + +def _print_greenthreads(): + for i, gt in enumerate(_find_objects(greenlet.greenlet)): + print(i, gt) + traceback.print_stack(gt.gr_frame) + print() + + +def _print_nativethreads(): + for threadId, stack in sys._current_frames().items(): + print(threadId) + traceback.print_stack(stack) + print() + + +def _parse_port_range(port_range): + if ':' not in port_range: + start, end = port_range, port_range + else: + start, end = port_range.split(':', 1) + try: + start, end = int(start), int(end) + if end < start: + raise ValueError + return start, end + except ValueError as ex: + raise EventletBackdoorConfigValueError(port_range, ex, + help_for_backdoor_port) + + +def _listen(host, start_port, end_port, listen_func): + try_port = start_port + while True: + try: + return listen_func((host, try_port)) + except socket.error as exc: + if (exc.errno != errno.EADDRINUSE or + try_port >= end_port): + raise + try_port += 1 + + +def initialize_if_enabled(): + backdoor_locals = { + 'exit': _dont_use_this, # So we don't exit the entire process + 'quit': _dont_use_this, # So we don't exit the entire process + 'fo': _find_objects, + 'pgt': _print_greenthreads, + 'pnt': _print_nativethreads, + } + + if CONF.backdoor_port is None: + return None + + start_port, end_port = _parse_port_range(str(CONF.backdoor_port)) + + # NOTE(johannes): The standard sys.displayhook will print the value of + # the last expression and set it to __builtin__._, which overwrites + # the __builtin__._ that gettext sets. Let's switch to using pprint + # since it won't interact poorly with gettext, and it's easier to + # read the output too. + def displayhook(val): + if val is not None: + pprint.pprint(val) + sys.displayhook = displayhook + + sock = _listen('localhost', start_port, end_port, eventlet.listen) + + # In the case of backdoor port being zero, a port number is assigned by + # listen(). In any case, pull the port number out here. + port = sock.getsockname()[1] + LOG.info( + _LI('Eventlet backdoor listening on %(port)s for process %(pid)d') % + {'port': port, 'pid': os.getpid()} + ) + eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, + locals=backdoor_locals) + return port diff --git a/code/daisy/daisy/openstack/common/fileutils.py b/code/daisy/daisy/openstack/common/fileutils.py new file mode 100755 index 00000000..9097c35d --- /dev/null +++ b/code/daisy/daisy/openstack/common/fileutils.py @@ -0,0 +1,149 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib +import errno +import logging +import os +import stat +import tempfile + +from oslo_utils import excutils + +LOG = logging.getLogger(__name__) + +_FILE_CACHE = {} +DEFAULT_MODE = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO + + +def ensure_tree(path, mode=DEFAULT_MODE): + """Create a directory (and any ancestor directories required) + + :param path: Directory to create + :param mode: Directory creation permissions + """ + try: + os.makedirs(path, mode) + except OSError as exc: + if exc.errno == errno.EEXIST: + if not os.path.isdir(path): + raise + else: + raise + + +def read_cached_file(filename, force_reload=False): + """Read from a file if it has been modified. + + :param force_reload: Whether to reload the file. + :returns: A tuple with a boolean specifying if the data is fresh + or not. + """ + global _FILE_CACHE + + if force_reload: + delete_cached_file(filename) + + reloaded = False + mtime = os.path.getmtime(filename) + cache_info = _FILE_CACHE.setdefault(filename, {}) + + if not cache_info or mtime > cache_info.get('mtime', 0): + LOG.debug("Reloading cached file %s" % filename) + with open(filename) as fap: + cache_info['data'] = fap.read() + cache_info['mtime'] = mtime + reloaded = True + return (reloaded, cache_info['data']) + + +def delete_cached_file(filename): + """Delete cached file if present. + + :param filename: filename to delete + """ + global _FILE_CACHE + + if filename in _FILE_CACHE: + del _FILE_CACHE[filename] + + +def delete_if_exists(path, remove=os.unlink): + """Delete a file, but ignore file not found error. + + :param path: File to delete + :param remove: Optional function to remove passed path + """ + + try: + remove(path) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + +@contextlib.contextmanager +def remove_path_on_error(path, remove=delete_if_exists): + """Protect code that wants to operate on PATH atomically. + Any exception will cause PATH to be removed. + + :param path: File to work with + :param remove: Optional function to remove passed path + """ + + try: + yield + except Exception: + with excutils.save_and_reraise_exception(): + remove(path) + + +def file_open(*args, **kwargs): + """Open file + + see built-in open() documentation for more details + + Note: The reason this is kept in a separate module is to easily + be able to provide a stub module that doesn't alter system + state at all (for unit tests) + """ + return open(*args, **kwargs) + + +def write_to_tempfile(content, path=None, suffix='', prefix='tmp'): + """Create temporary file or use existing file. + + This util is needed for creating temporary file with + specified content, suffix and prefix. If path is not None, + it will be used for writing content. If the path doesn't + exist it'll be created. + + :param content: content for temporary file. + :param path: same as parameter 'dir' for mkstemp + :param suffix: same as parameter 'suffix' for mkstemp + :param prefix: same as parameter 'prefix' for mkstemp + + For example: it can be used in database tests for creating + configuration files. + """ + if path: + ensure_tree(path) + + (fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix) + try: + os.write(fd, content) + finally: + os.close(fd) + return path diff --git a/code/daisy/daisy/openstack/common/local.py b/code/daisy/daisy/openstack/common/local.py new file mode 100755 index 00000000..0819d5b9 --- /dev/null +++ b/code/daisy/daisy/openstack/common/local.py @@ -0,0 +1,45 @@ +# Copyright 2011 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Local storage of variables using weak references""" + +import threading +import weakref + + +class WeakLocal(threading.local): + def __getattribute__(self, attr): + rval = super(WeakLocal, self).__getattribute__(attr) + if rval: + # NOTE(mikal): this bit is confusing. What is stored is a weak + # reference, not the value itself. We therefore need to lookup + # the weak reference and return the inner value here. + rval = rval() + return rval + + def __setattr__(self, attr, value): + value = weakref.ref(value) + return super(WeakLocal, self).__setattr__(attr, value) + + +# NOTE(mikal): the name "store" should be deprecated in the future +store = WeakLocal() + +# A "weak" store uses weak references and allows an object to fall out of scope +# when it falls out of scope in the code that uses the thread local storage. A +# "strong" store will hold a reference to the object so that it never falls out +# of scope. +weak_store = WeakLocal() +strong_store = threading.local() diff --git a/code/daisy/daisy/openstack/common/loopingcall.py b/code/daisy/daisy/openstack/common/loopingcall.py new file mode 100755 index 00000000..655f2a29 --- /dev/null +++ b/code/daisy/daisy/openstack/common/loopingcall.py @@ -0,0 +1,147 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import sys +import time + +from eventlet import event +from eventlet import greenthread + +from daisy.openstack.common._i18n import _LE, _LW + +LOG = logging.getLogger(__name__) + +# NOTE(zyluo): This lambda function was declared to avoid mocking collisions +# with time.time() called in the standard logging module +# during unittests. +_ts = lambda: time.time() + + +class LoopingCallDone(Exception): + """Exception to break out and stop a LoopingCallBase. + + The poll-function passed to LoopingCallBase can raise this exception to + break out of the loop normally. This is somewhat analogous to + StopIteration. + + An optional return-value can be included as the argument to the exception; + this return-value will be returned by LoopingCallBase.wait() + + """ + + def __init__(self, retvalue=True): + """:param retvalue: Value that LoopingCallBase.wait() should return.""" + self.retvalue = retvalue + + +class LoopingCallBase(object): + def __init__(self, f=None, *args, **kw): + self.args = args + self.kw = kw + self.f = f + self._running = False + self.done = None + + def stop(self): + self._running = False + + def wait(self): + return self.done.wait() + + +class FixedIntervalLoopingCall(LoopingCallBase): + """A fixed interval looping call.""" + + def start(self, interval, initial_delay=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + start = _ts() + self.f(*self.args, **self.kw) + end = _ts() + if not self._running: + break + delay = end - start - interval + if delay > 0: + LOG.warn(_LW('task %(func_name)r run outlasted ' + 'interval by %(delay).2f sec'), + {'func_name': self.f, 'delay': delay}) + greenthread.sleep(-delay if delay < 0 else 0) + except LoopingCallDone as e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_LE('in fixed duration looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn_n(_inner) + return self.done + + +class DynamicLoopingCall(LoopingCallBase): + """A looping call which sleeps until the next known event. + + The function called should return how long to sleep for before being + called again. + """ + + def start(self, initial_delay=None, periodic_interval_max=None): + self._running = True + done = event.Event() + + def _inner(): + if initial_delay: + greenthread.sleep(initial_delay) + + try: + while self._running: + idle = self.f(*self.args, **self.kw) + if not self._running: + break + + if periodic_interval_max is not None: + idle = min(idle, periodic_interval_max) + LOG.debug('Dynamic looping call %(func_name)r sleeping ' + 'for %(idle).02f seconds', + {'func_name': self.f, 'idle': idle}) + greenthread.sleep(idle) + except LoopingCallDone as e: + self.stop() + done.send(e.retvalue) + except Exception: + LOG.exception(_LE('in dynamic looping call')) + done.send_exception(*sys.exc_info()) + return + else: + done.send(True) + + self.done = done + + greenthread.spawn(_inner) + return self.done diff --git a/code/daisy/daisy/openstack/common/service.py b/code/daisy/daisy/openstack/common/service.py new file mode 100755 index 00000000..ccf76172 --- /dev/null +++ b/code/daisy/daisy/openstack/common/service.py @@ -0,0 +1,495 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Generic Node base class for all workers that run on hosts.""" + +import errno +import logging +import os +import random +import signal +import sys +import time + +try: + # Importing just the symbol here because the io module does not + # exist in Python 2.6. + from io import UnsupportedOperation # noqa +except ImportError: + # Python 2.6 + UnsupportedOperation = None + +import eventlet +from eventlet import event +from oslo_config import cfg + +from daisy.openstack.common import eventlet_backdoor +from daisy.openstack.common._i18n import _LE, _LI, _LW +from daisy.openstack.common import systemd +from daisy.openstack.common import threadgroup + + +CONF = cfg.CONF +LOG = logging.getLogger(__name__) + + +def _sighup_supported(): + return hasattr(signal, 'SIGHUP') + + +def _is_daemon(): + # The process group for a foreground process will match the + # process group of the controlling terminal. If those values do + # not match, or ioctl() fails on the stdout file handle, we assume + # the process is running in the background as a daemon. + # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics + try: + is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()) + except OSError as err: + if err.errno == errno.ENOTTY: + # Assume we are a daemon because there is no terminal. + is_daemon = True + else: + raise + except UnsupportedOperation: + # Could not get the fileno for stdout, so we must be a daemon. + is_daemon = True + return is_daemon + + +def _is_sighup_and_daemon(signo): + if not (_sighup_supported() and signo == signal.SIGHUP): + # Avoid checking if we are a daemon, because the signal isn't + # SIGHUP. + return False + return _is_daemon() + + +def _signo_to_signame(signo): + signals = {signal.SIGTERM: 'SIGTERM', + signal.SIGINT: 'SIGINT'} + if _sighup_supported(): + signals[signal.SIGHUP] = 'SIGHUP' + return signals[signo] + + +def _set_signals_handler(handler): + signal.signal(signal.SIGTERM, handler) + signal.signal(signal.SIGINT, handler) + if _sighup_supported(): + signal.signal(signal.SIGHUP, handler) + + +class Launcher(object): + """Launch one or more services and wait for them to complete.""" + + def __init__(self): + """Initialize the service launcher. + + :returns: None + + """ + self.services = Services() + self.backdoor_port = eventlet_backdoor.initialize_if_enabled() + + def launch_service(self, service): + """Load and start the given service. + + :param service: The service you would like to start. + :returns: None + + """ + service.backdoor_port = self.backdoor_port + self.services.add(service) + + def stop(self): + """Stop all services which are currently running. + + :returns: None + + """ + self.services.stop() + + def wait(self): + """Waits until all services have been stopped, and then returns. + + :returns: None + + """ + self.services.wait() + + def restart(self): + """Reload config files and restart service. + + :returns: None + + """ + cfg.CONF.reload_config_files() + self.services.restart() + + +class SignalExit(SystemExit): + def __init__(self, signo, exccode=1): + super(SignalExit, self).__init__(exccode) + self.signo = signo + + +class ServiceLauncher(Launcher): + def _handle_signal(self, signo, frame): + # Allow the process to be killed again and die from natural causes + _set_signals_handler(signal.SIG_DFL) + raise SignalExit(signo) + + def handle_signal(self): + _set_signals_handler(self._handle_signal) + + def _wait_for_exit_or_signal(self, ready_callback=None): + status = None + signo = 0 + + LOG.debug('Full set of CONF:') + CONF.log_opt_values(LOG, logging.DEBUG) + + try: + if ready_callback: + ready_callback() + super(ServiceLauncher, self).wait() + except SignalExit as exc: + signame = _signo_to_signame(exc.signo) + LOG.info(_LI('Caught %s, exiting'), signame) + status = exc.code + signo = exc.signo + except SystemExit as exc: + status = exc.code + finally: + self.stop() + + return status, signo + + def wait(self, ready_callback=None): + systemd.notify_once() + while True: + self.handle_signal() + status, signo = self._wait_for_exit_or_signal(ready_callback) + if not _is_sighup_and_daemon(signo): + return status + self.restart() + + +class ServiceWrapper(object): + def __init__(self, service, workers): + self.service = service + self.workers = workers + self.children = set() + self.forktimes = [] + + +class ProcessLauncher(object): + def __init__(self): + """Constructor.""" + + self.children = {} + self.sigcaught = None + self.running = True + rfd, self.writepipe = os.pipe() + self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') + self.handle_signal() + + def handle_signal(self): + _set_signals_handler(self._handle_signal) + + def _handle_signal(self, signo, frame): + self.sigcaught = signo + self.running = False + + # Allow the process to be killed again and die from natural causes + _set_signals_handler(signal.SIG_DFL) + + def _pipe_watcher(self): + # This will block until the write end is closed when the parent + # dies unexpectedly + self.readpipe.read() + + LOG.info(_LI('Parent process has died unexpectedly, exiting')) + + sys.exit(1) + + def _child_process_handle_signal(self): + # Setup child signal handlers differently + def _sigterm(*args): + signal.signal(signal.SIGTERM, signal.SIG_DFL) + raise SignalExit(signal.SIGTERM) + + def _sighup(*args): + signal.signal(signal.SIGHUP, signal.SIG_DFL) + raise SignalExit(signal.SIGHUP) + + signal.signal(signal.SIGTERM, _sigterm) + if _sighup_supported(): + signal.signal(signal.SIGHUP, _sighup) + # Block SIGINT and let the parent send us a SIGTERM + signal.signal(signal.SIGINT, signal.SIG_IGN) + + def _child_wait_for_exit_or_signal(self, launcher): + status = 0 + signo = 0 + + # NOTE(johannes): All exceptions are caught to ensure this + # doesn't fallback into the loop spawning children. It would + # be bad for a child to spawn more children. + try: + launcher.wait() + except SignalExit as exc: + signame = _signo_to_signame(exc.signo) + LOG.info(_LI('Child caught %s, exiting'), signame) + status = exc.code + signo = exc.signo + except SystemExit as exc: + status = exc.code + except BaseException: + LOG.exception(_LE('Unhandled exception')) + status = 2 + finally: + launcher.stop() + + return status, signo + + def _child_process(self, service): + self._child_process_handle_signal() + + # Reopen the eventlet hub to make sure we don't share an epoll + # fd with parent and/or siblings, which would be bad + eventlet.hubs.use_hub() + + # Close write to ensure only parent has it open + os.close(self.writepipe) + # Create greenthread to watch for parent to close pipe + eventlet.spawn_n(self._pipe_watcher) + + # Reseed random number generator + random.seed() + + launcher = Launcher() + launcher.launch_service(service) + return launcher + + def _start_child(self, wrap): + if len(wrap.forktimes) > wrap.workers: + # Limit ourselves to one process a second (over the period of + # number of workers * 1 second). This will allow workers to + # start up quickly but ensure we don't fork off children that + # die instantly too quickly. + if time.time() - wrap.forktimes[0] < wrap.workers: + LOG.info(_LI('Forking too fast, sleeping')) + time.sleep(1) + + wrap.forktimes.pop(0) + + wrap.forktimes.append(time.time()) + + pid = os.fork() + if pid == 0: + launcher = self._child_process(wrap.service) + while True: + self._child_process_handle_signal() + status, signo = self._child_wait_for_exit_or_signal(launcher) + if not _is_sighup_and_daemon(signo): + break + launcher.restart() + + os._exit(status) + + LOG.info(_LI('Started child %d'), pid) + + wrap.children.add(pid) + self.children[pid] = wrap + + return pid + + def launch_service(self, service, workers=1): + wrap = ServiceWrapper(service, workers) + + LOG.info(_LI('Starting %d workers'), wrap.workers) + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def _wait_child(self): + try: + # Block while any of child processes have exited + pid, status = os.waitpid(0, 0) + if not pid: + return None + except OSError as exc: + if exc.errno not in (errno.EINTR, errno.ECHILD): + raise + return None + + if os.WIFSIGNALED(status): + sig = os.WTERMSIG(status) + LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'), + dict(pid=pid, sig=sig)) + else: + code = os.WEXITSTATUS(status) + LOG.info(_LI('Child %(pid)s exited with status %(code)d'), + dict(pid=pid, code=code)) + + if pid not in self.children: + LOG.warning(_LW('pid %d not in child list'), pid) + return None + + wrap = self.children.pop(pid) + wrap.children.remove(pid) + return wrap + + def _respawn_children(self): + while self.running: + wrap = self._wait_child() + if not wrap: + continue + while self.running and len(wrap.children) < wrap.workers: + self._start_child(wrap) + + def wait(self): + """Loop waiting on children to die and respawning as necessary.""" + + systemd.notify_once() + LOG.debug('Full set of CONF:') + CONF.log_opt_values(LOG, logging.DEBUG) + + try: + while True: + self.handle_signal() + self._respawn_children() + # No signal means that stop was called. Don't clean up here. + if not self.sigcaught: + return + + signame = _signo_to_signame(self.sigcaught) + LOG.info(_LI('Caught %s, stopping children'), signame) + if not _is_sighup_and_daemon(self.sigcaught): + break + + for pid in self.children: + os.kill(pid, signal.SIGHUP) + self.running = True + self.sigcaught = None + except eventlet.greenlet.GreenletExit: + LOG.info(_LI("Wait called after thread killed. Cleaning up.")) + + self.stop() + + def stop(self): + """Terminate child processes and wait on each.""" + self.running = False + for pid in self.children: + try: + os.kill(pid, signal.SIGTERM) + except OSError as exc: + if exc.errno != errno.ESRCH: + raise + + # Wait for children to die + if self.children: + LOG.info(_LI('Waiting on %d children to exit'), len(self.children)) + while self.children: + self._wait_child() + + +class Service(object): + """Service object for binaries running on hosts.""" + + def __init__(self, threads=1000): + self.tg = threadgroup.ThreadGroup(threads) + + # signal that the service is done shutting itself down: + self._done = event.Event() + + def reset(self): + # NOTE(Fengqian): docs for Event.reset() recommend against using it + self._done = event.Event() + + def start(self): + pass + + def stop(self, graceful=False): + self.tg.stop(graceful) + self.tg.wait() + # Signal that service cleanup is done: + if not self._done.ready(): + self._done.send() + + def wait(self): + self._done.wait() + + +class Services(object): + + def __init__(self): + self.services = [] + self.tg = threadgroup.ThreadGroup() + self.done = event.Event() + + def add(self, service): + self.services.append(service) + self.tg.add_thread(self.run_service, service, self.done) + + def stop(self): + # wait for graceful shutdown of services: + for service in self.services: + service.stop() + service.wait() + + # Each service has performed cleanup, now signal that the run_service + # wrapper threads can now die: + if not self.done.ready(): + self.done.send() + + # reap threads: + self.tg.stop() + + def wait(self): + self.tg.wait() + + def restart(self): + self.stop() + self.done = event.Event() + for restart_service in self.services: + restart_service.reset() + self.tg.add_thread(self.run_service, restart_service, self.done) + + @staticmethod + def run_service(service, done): + """Service start wrapper. + + :param service: service to run + :param done: event to wait on until a shutdown is triggered + :returns: None + + """ + service.start() + done.wait() + + +def launch(service, workers=1): + if workers is None or workers == 1: + launcher = ServiceLauncher() + launcher.launch_service(service) + else: + launcher = ProcessLauncher() + launcher.launch_service(service, workers=workers) + + return launcher diff --git a/code/daisy/daisy/openstack/common/systemd.py b/code/daisy/daisy/openstack/common/systemd.py new file mode 100755 index 00000000..36243b34 --- /dev/null +++ b/code/daisy/daisy/openstack/common/systemd.py @@ -0,0 +1,105 @@ +# Copyright 2012-2014 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Helper module for systemd service readiness notification. +""" + +import logging +import os +import socket +import sys + + +LOG = logging.getLogger(__name__) + + +def _abstractify(socket_name): + if socket_name.startswith('@'): + # abstract namespace socket + socket_name = '\0%s' % socket_name[1:] + return socket_name + + +def _sd_notify(unset_env, msg): + notify_socket = os.getenv('NOTIFY_SOCKET') + if notify_socket: + sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + try: + sock.connect(_abstractify(notify_socket)) + sock.sendall(msg) + if unset_env: + del os.environ['NOTIFY_SOCKET'] + except EnvironmentError: + LOG.debug("Systemd notification failed", exc_info=True) + finally: + sock.close() + + +def notify(): + """Send notification to Systemd that service is ready. + + For details see + http://www.freedesktop.org/software/systemd/man/sd_notify.html + """ + _sd_notify(False, 'READY=1') + + +def notify_once(): + """Send notification once to Systemd that service is ready. + + Systemd sets NOTIFY_SOCKET environment variable with the name of the + socket listening for notifications from services. + This method removes the NOTIFY_SOCKET environment variable to ensure + notification is sent only once. + """ + _sd_notify(True, 'READY=1') + + +def onready(notify_socket, timeout): + """Wait for systemd style notification on the socket. + + :param notify_socket: local socket address + :type notify_socket: string + :param timeout: socket timeout + :type timeout: float + :returns: 0 service ready + 1 service not ready + 2 timeout occurred + """ + sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + sock.settimeout(timeout) + sock.bind(_abstractify(notify_socket)) + try: + msg = sock.recv(512) + except socket.timeout: + return 2 + finally: + sock.close() + if 'READY=1' in msg: + return 0 + else: + return 1 + + +if __name__ == '__main__': + # simple CLI for testing + if len(sys.argv) == 1: + notify() + elif len(sys.argv) >= 2: + timeout = float(sys.argv[1]) + notify_socket = os.getenv('NOTIFY_SOCKET') + if notify_socket: + retval = onready(notify_socket, timeout) + sys.exit(retval) diff --git a/code/daisy/daisy/openstack/common/threadgroup.py b/code/daisy/daisy/openstack/common/threadgroup.py new file mode 100755 index 00000000..4561d2c5 --- /dev/null +++ b/code/daisy/daisy/openstack/common/threadgroup.py @@ -0,0 +1,149 @@ +# Copyright 2012 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import logging +import threading + +import eventlet +from eventlet import greenpool + +from daisy.openstack.common import loopingcall + + +LOG = logging.getLogger(__name__) + + +def _thread_done(gt, *args, **kwargs): + """Callback function to be passed to GreenThread.link() when we spawn() + Calls the :class:`ThreadGroup` to notify if. + + """ + kwargs['group'].thread_done(kwargs['thread']) + + +class Thread(object): + """Wrapper around a greenthread, that holds a reference to the + :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when + it has done so it can be removed from the threads list. + """ + def __init__(self, thread, group): + self.thread = thread + self.thread.link(_thread_done, group=group, thread=self) + + def stop(self): + self.thread.kill() + + def wait(self): + return self.thread.wait() + + def link(self, func, *args, **kwargs): + self.thread.link(func, *args, **kwargs) + + +class ThreadGroup(object): + """The point of the ThreadGroup class is to: + + * keep track of timers and greenthreads (making it easier to stop them + when need be). + * provide an easy API to add timers. + """ + def __init__(self, thread_pool_size=10): + self.pool = greenpool.GreenPool(thread_pool_size) + self.threads = [] + self.timers = [] + + def add_dynamic_timer(self, callback, initial_delay=None, + periodic_interval_max=None, *args, **kwargs): + timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs) + timer.start(initial_delay=initial_delay, + periodic_interval_max=periodic_interval_max) + self.timers.append(timer) + + def add_timer(self, interval, callback, initial_delay=None, + *args, **kwargs): + pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs) + pulse.start(interval=interval, + initial_delay=initial_delay) + self.timers.append(pulse) + + def add_thread(self, callback, *args, **kwargs): + gt = self.pool.spawn(callback, *args, **kwargs) + th = Thread(gt, self) + self.threads.append(th) + return th + + def thread_done(self, thread): + self.threads.remove(thread) + + def _stop_threads(self): + current = threading.current_thread() + + # Iterate over a copy of self.threads so thread_done doesn't + # modify the list while we're iterating + for x in self.threads[:]: + if x is current: + # don't kill the current thread. + continue + try: + x.stop() + except eventlet.greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) + + def stop_timers(self): + for x in self.timers: + try: + x.stop() + except Exception as ex: + LOG.exception(ex) + self.timers = [] + + def stop(self, graceful=False): + """stop function has the option of graceful=True/False. + + * In case of graceful=True, wait for all threads to be finished. + Never kill threads. + * In case of graceful=False, kill threads immediately. + """ + self.stop_timers() + if graceful: + # In case of graceful=True, wait for all threads to be + # finished, never kill threads + self.wait() + else: + # In case of graceful=False(Default), kill threads + # immediately + self._stop_threads() + + def wait(self): + for x in self.timers: + try: + x.wait() + except eventlet.greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) + current = threading.current_thread() + + # Iterate over a copy of self.threads so thread_done doesn't + # modify the list while we're iterating + for x in self.threads[:]: + if x is current: + continue + try: + x.wait() + except eventlet.greenlet.GreenletExit: + pass + except Exception as ex: + LOG.exception(ex) diff --git a/code/daisy/daisy/opts.py b/code/daisy/daisy/opts.py new file mode 100755 index 00000000..ed53540e --- /dev/null +++ b/code/daisy/daisy/opts.py @@ -0,0 +1,142 @@ +# Copyright (c) 2014 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +__all__ = [ + 'list_api_opts', + 'list_registry_opts', + 'list_scrubber_opts', + 'list_cache_opts', + 'list_manage_opts' +] + +import copy +import itertools + +import daisy.api.middleware.context +import daisy.api.versions +import daisy.common.config +import daisy.common.location_strategy +import daisy.common.location_strategy.store_type +import daisy.common.property_utils +import daisy.common.rpc +import daisy.common.wsgi +import daisy.image_cache +import daisy.image_cache.drivers.sqlite +import daisy.notifier +import daisy.registry +import daisy.registry.client +import daisy.registry.client.v1.api +import daisy.scrubber + + +_api_opts = [ + (None, list(itertools.chain( + daisy.api.middleware.context.context_opts, + daisy.api.versions.versions_opts, + daisy.common.config.common_opts, + daisy.common.location_strategy.location_strategy_opts, + daisy.common.property_utils.property_opts, + daisy.common.rpc.rpc_opts, + daisy.common.wsgi.bind_opts, + daisy.common.wsgi.eventlet_opts, + daisy.common.wsgi.socket_opts, + daisy.image_cache.drivers.sqlite.sqlite_opts, + daisy.image_cache.image_cache_opts, + daisy.notifier.notifier_opts, + daisy.registry.registry_addr_opts, + daisy.registry.client.registry_client_ctx_opts, + daisy.registry.client.registry_client_opts, + daisy.registry.client.v1.api.registry_client_ctx_opts, + daisy.scrubber.scrubber_opts))), + ('image_format', daisy.common.config.image_format_opts), + ('task', daisy.common.config.task_opts), + ('store_type_location_strategy', + daisy.common.location_strategy.store_type.store_type_opts), + ('paste_deploy', daisy.common.config.paste_deploy_opts) +] +_registry_opts = [ + (None, list(itertools.chain( + daisy.api.middleware.context.context_opts, + daisy.common.config.common_opts, + daisy.common.wsgi.bind_opts, + daisy.common.wsgi.socket_opts, + daisy.common.wsgi.eventlet_opts))), + ('paste_deploy', daisy.common.config.paste_deploy_opts) +] +_scrubber_opts = [ + (None, list(itertools.chain( + daisy.common.config.common_opts, + daisy.scrubber.scrubber_opts, + daisy.scrubber.scrubber_cmd_opts, + daisy.scrubber.scrubber_cmd_cli_opts, + daisy.registry.client.registry_client_ctx_opts, + daisy.registry.registry_addr_opts))), +] +_cache_opts = [ + (None, list(itertools.chain( + daisy.common.config.common_opts, + daisy.image_cache.drivers.sqlite.sqlite_opts, + daisy.image_cache.image_cache_opts, + daisy.registry.registry_addr_opts, + daisy.registry.client.registry_client_ctx_opts))), +] +_manage_opts = [ + (None, []) +] + + +def list_api_opts(): + """Return a list of oslo_config options available in Glance API service. + + Each element of the list is a tuple. The first element is the name of the + group under which the list of elements in the second element will be + registered. A group name of None corresponds to the [DEFAULT] group in + config files. + + This function is also discoverable via the 'daisy.api' entry point + under the 'oslo_config.opts' namespace. + + The purpose of this is to allow tools like the Oslo sample config file + generator to discover the options exposed to users by daisy. + + :returns: a list of (group_name, opts) tuples + """ + + return [(g, copy.deepcopy(o)) for g, o in _api_opts] + + +def list_registry_opts(): + """Return a list of oslo_config options available in Glance Registry + service. + """ + return [(g, copy.deepcopy(o)) for g, o in _registry_opts] + + +def list_scrubber_opts(): + """Return a list of oslo_config options available in Glance Scrubber + service. + """ + return [(g, copy.deepcopy(o)) for g, o in _scrubber_opts] + + +def list_cache_opts(): + """Return a list of oslo_config options available in Glance Cache + service. + """ + return [(g, copy.deepcopy(o)) for g, o in _cache_opts] + + +def list_manage_opts(): + """Return a list of oslo_config options available in Glance manage.""" + return [(g, copy.deepcopy(o)) for g, o in _manage_opts] diff --git a/code/daisy/daisy/orchestration/__init__.py b/code/daisy/daisy/orchestration/__init__.py new file mode 100755 index 00000000..8b137891 --- /dev/null +++ b/code/daisy/daisy/orchestration/__init__.py @@ -0,0 +1 @@ + diff --git a/code/daisy/daisy/orchestration/manager.py b/code/daisy/daisy/orchestration/manager.py new file mode 100755 index 00000000..0ffa269a --- /dev/null +++ b/code/daisy/daisy/orchestration/manager.py @@ -0,0 +1,177 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +/orchestration for tecs API +""" +import copy +import subprocess +import time + +import traceback +import webob.exc +from oslo_config import cfg +from oslo_log import log as logging +from webob.exc import HTTPBadRequest +from webob.exc import HTTPForbidden +from webob.exc import HTTPServerError +from webob.exc import HTTPNotFound +import threading +from threading import Thread + +from daisy import i18n +from daisy import notifier + +from daisy.common import exception +from daisyclient.v1.client import Client +from eventlet import greenthread +import eventlet.timeout +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +class OrchestrationManager(): + + def __init__(self, *args, **kwargs): + """Load orchestration options and initialization.""" + pass + + @staticmethod + def find_auto_scale_cluster(): + try: + daisy_version = 1.0 + daisy_endpoint="http://127.0.0.1:19292" + daisy_client = Client(version=daisy_version, endpoint=daisy_endpoint) + orchestrationManager = OrchestrationManager() + cluster_meta={'auto_scale':'1'} + params = {'filters':cluster_meta} + clusters_gen = daisy_client.clusters.list(**params) + clusters = [cluster.to_dict() for cluster in clusters_gen if cluster.auto_scale == 1 ] + if clusters: + cluster_id = clusters[0]['id'] + params = {'filters':''} + hosts_gen = daisy_client.hosts.list(**params) + init_hosts = [host.to_dict() for host in hosts_gen if host.os_status =="init" or host.os_status == "install-failed"] + if not init_hosts: + LOG.info("no init or install-failed host") + return {"status":"no init host"} + + params = {'filters':{'cluster_id':cluster_id}} + roles_gen = daisy_client.roles.list(**params) + roles_in_cluster = [role.to_dict() for role in roles_gen] + roles = [role for role in roles_in_cluster if role['name'] =="CONTROLLER_HA" and role['status'] == "active"] + if not roles: + LOG.info("no active CONTROLLER_HA role") + return {"status":"no active CONTROLLER_HA role"} + for host in init_hosts: + if host['status'] == "init": + host_info = daisy_client.hosts.get(host['id']) + if hasattr(host_info, "interfaces"): + scale_host_info = orchestrationManager.set_scale_host_interface(cluster_id, host_info,daisy_client) + if scale_host_info: + host_meta ={'name':scale_host_info.name,'os_version':scale_host_info.os_version_file, 'root_lv_size':scale_host_info.root_lv_size,'swap_lv_size':scale_host_info.swap_lv_size, 'role':['COMPUTER'], 'cluster':cluster_id, 'interfaces':scale_host_info.interfaces } + daisy_client.hosts.update(host['id'],**host_meta) + else: + LOG.error("can not set scale host") + return {"status":"no scale host"} + + else: + LOG.info("not interfaces in host %s" % host['id']) + raise HTTPNotFound("not interfaces in host %s" % host['id']) + orchestrationManager._os_tecs_install(cluster_id, daisy_client) + except exception.Invalid as e: + LOG.exception(e.message) + + + def _os_tecs_install(self, cluster_id, daisy_client): + try: + install_meta = {'cluster_id':cluster_id} + daisy_client.install.install(**install_meta) + LOG.info("install cluster %s" %cluster_id) + except exception.Invalid as e: + LOG.error("install error:%s" % e.message) + + def get_active_compute(self, cluster_id, daisy_client): + host_meta={'cluster_id':cluster_id} + host_meta['filters'] = host_meta + host_list_generator = daisy_client.hosts.list(**host_meta) + active_compute_list = [] + host_list = [host for host in host_list_generator if hasattr(host,"role_status") and host.role_status == "active"] + for host in host_list: + host_info = daisy_client.hosts.get(host.id) + if hasattr(host_info,"role") and "COMPUTER" in host_info.role and hasattr(host_info,"interfaces"): + active_compute_list.append(host_info) + return active_compute_list + + def set_scale_host_interface(self, cluster_id, host_info, daisy_client): + compute_list = [] + active_compute_host = None + compute_list = self.get_active_compute(cluster_id, daisy_client) + if compute_list and hasattr(host_info,"interfaces"): + active_compute_host = self.check_isomorphic_host(compute_list, host_info.interfaces) + if not active_compute_host: + LOG.info("%s not isomorphic host" % host_info.name) + return None + host_info.os_version_file = active_compute_host.os_version_file + host_info.root_lv_size = active_compute_host.root_lv_size + host_info.swap_lv_size = active_compute_host.swap_lv_size + host_info.name="computer-" + host_info.name[-12:] + else: + LOG.error("no active compute node in cluster") + return None + + if active_compute_host: + for interface in host_info.interfaces: + for compute_interface in active_compute_host.interfaces: + if interface['pci'] == compute_interface['pci'] and compute_interface.has_key("assigned_networks"): + for assigned_network in compute_interface['assigned_networks']: + assigned_network['ip'] = '' + interface['assigned_networks'] = compute_interface['assigned_networks'] + interface['name'] = compute_interface['name'] + interface['netmask'] = compute_interface['netmask'] + interface['gateway'] = compute_interface['gateway'] + interface['mode'] = compute_interface['mode'] + for compute_interface in active_compute_host.interfaces: + for assigned_network in compute_interface['assigned_networks']: + assigned_network['ip'] = '' + compute_interface['host_id'] = host_info.id + if compute_interface['type'] == "bond": + interfaces = [interface for interface in host_info.interfaces if interface['name'] == compute_interface['name']] + if not interfaces: + host_info.interfaces.append(compute_interface) + return host_info + + def check_isomorphic_host(self, compute_list, new_interfaces): + for compute_host in compute_list: + new_interface_count = len([interface for interface in new_interfaces if interface['type'] =="ether"]) + compute_interface_count = len([interface for interface in compute_host.interfaces if interface['type'] =="ether"]) + if new_interface_count != compute_interface_count: + continue + is_isomorphic = False + for interface in new_interfaces: + if interface['type'] != "ether": + continue + for compute_interface in compute_host.interfaces: + if interface['pci'] == compute_interface['pci'] and interface['max_speed'] == compute_interface['max_speed']: + is_isomorphic = True + elif interface['pci'] == compute_interface['pci'] and interface['max_speed'] != compute_interface['max_speed']: + is_isomorphic = False + break + if not is_isomorphic: + break + if is_isomorphic: + return compute_host + return False + diff --git a/code/daisy/daisy/quota/__init__.py b/code/daisy/daisy/quota/__init__.py new file mode 100755 index 00000000..6f91d96f --- /dev/null +++ b/code/daisy/daisy/quota/__init__.py @@ -0,0 +1,368 @@ +# Copyright 2013, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +import glance_store as store +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import excutils +import six + +import daisy.api.common +import daisy.common.exception as exception +from daisy.common import utils +import daisy.domain +import daisy.domain.proxy +from daisy import i18n + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LI = i18n._LI +CONF = cfg.CONF +CONF.import_opt('image_member_quota', 'daisy.common.config') +CONF.import_opt('image_property_quota', 'daisy.common.config') +CONF.import_opt('image_tag_quota', 'daisy.common.config') + + +def _enforce_image_tag_quota(tags): + if CONF.image_tag_quota < 0: + # If value is negative, allow unlimited number of tags + return + + if not tags: + return + + if len(tags) > CONF.image_tag_quota: + raise exception.ImageTagLimitExceeded(attempted=len(tags), + maximum=CONF.image_tag_quota) + + +def _calc_required_size(context, image, locations): + required_size = None + if image.size: + required_size = image.size * len(locations) + else: + for location in locations: + size_from_backend = None + try: + size_from_backend = store.get_size_from_backend( + location['url'], context=context) + except (store.UnknownScheme, store.NotFound): + pass + if size_from_backend: + required_size = size_from_backend * len(locations) + break + return required_size + + +def _enforce_image_location_quota(image, locations, is_setter=False): + if CONF.image_location_quota < 0: + # If value is negative, allow unlimited number of locations + return + + attempted = len(image.locations) + len(locations) + attempted = attempted if not is_setter else len(locations) + maximum = CONF.image_location_quota + if attempted > maximum: + raise exception.ImageLocationLimitExceeded(attempted=attempted, + maximum=maximum) + + +class ImageRepoProxy(daisy.domain.proxy.Repo): + + def __init__(self, image_repo, context, db_api, store_utils): + self.image_repo = image_repo + self.db_api = db_api + proxy_kwargs = {'context': context, 'db_api': db_api, + 'store_utils': store_utils} + super(ImageRepoProxy, self).__init__(image_repo, + item_proxy_class=ImageProxy, + item_proxy_kwargs=proxy_kwargs) + + def _enforce_image_property_quota(self, attempted): + if CONF.image_property_quota < 0: + # If value is negative, allow unlimited number of properties + return + + maximum = CONF.image_property_quota + if attempted > maximum: + kwargs = {'attempted': attempted, 'maximum': maximum} + exc = exception.ImagePropertyLimitExceeded(**kwargs) + LOG.debug(six.text_type(exc)) + raise exc + + def save(self, image, from_state=None): + if image.added_new_properties(): + self._enforce_image_property_quota(len(image.extra_properties)) + return super(ImageRepoProxy, self).save(image, from_state=from_state) + + def add(self, image): + self._enforce_image_property_quota(len(image.extra_properties)) + return super(ImageRepoProxy, self).add(image) + + +class ImageFactoryProxy(daisy.domain.proxy.ImageFactory): + def __init__(self, factory, context, db_api, store_utils): + proxy_kwargs = {'context': context, 'db_api': db_api, + 'store_utils': store_utils} + super(ImageFactoryProxy, self).__init__(factory, + proxy_class=ImageProxy, + proxy_kwargs=proxy_kwargs) + + def new_image(self, **kwargs): + tags = kwargs.pop('tags', set([])) + _enforce_image_tag_quota(tags) + return super(ImageFactoryProxy, self).new_image(tags=tags, **kwargs) + + +class QuotaImageTagsProxy(object): + + def __init__(self, orig_set): + if orig_set is None: + orig_set = set([]) + self.tags = orig_set + + def add(self, item): + self.tags.add(item) + _enforce_image_tag_quota(self.tags) + + def __cast__(self, *args, **kwargs): + return self.tags.__cast__(*args, **kwargs) + + def __contains__(self, *args, **kwargs): + return self.tags.__contains__(*args, **kwargs) + + def __eq__(self, other): + return self.tags == other + + def __iter__(self, *args, **kwargs): + return self.tags.__iter__(*args, **kwargs) + + def __len__(self, *args, **kwargs): + return self.tags.__len__(*args, **kwargs) + + def __getattr__(self, name): + return getattr(self.tags, name) + + +class ImageMemberFactoryProxy(daisy.domain.proxy.ImageMembershipFactory): + + def __init__(self, member_factory, context, db_api, store_utils): + self.db_api = db_api + self.context = context + proxy_kwargs = {'context': context, 'db_api': db_api, + 'store_utils': store_utils} + super(ImageMemberFactoryProxy, self).__init__( + member_factory, + image_proxy_class=ImageProxy, + image_proxy_kwargs=proxy_kwargs) + + def _enforce_image_member_quota(self, image): + if CONF.image_member_quota < 0: + # If value is negative, allow unlimited number of members + return + + current_member_count = self.db_api.image_member_count(self.context, + image.image_id) + attempted = current_member_count + 1 + maximum = CONF.image_member_quota + if attempted > maximum: + raise exception.ImageMemberLimitExceeded(attempted=attempted, + maximum=maximum) + + def new_image_member(self, image, member_id): + self._enforce_image_member_quota(image) + return super(ImageMemberFactoryProxy, self).new_image_member(image, + member_id) + + +class QuotaImageLocationsProxy(object): + + def __init__(self, image, context, db_api): + self.image = image + self.context = context + self.db_api = db_api + self.locations = image.locations + + def __cast__(self, *args, **kwargs): + return self.locations.__cast__(*args, **kwargs) + + def __contains__(self, *args, **kwargs): + return self.locations.__contains__(*args, **kwargs) + + def __delitem__(self, *args, **kwargs): + return self.locations.__delitem__(*args, **kwargs) + + def __delslice__(self, *args, **kwargs): + return self.locations.__delslice__(*args, **kwargs) + + def __eq__(self, other): + return self.locations == other + + def __getitem__(self, *args, **kwargs): + return self.locations.__getitem__(*args, **kwargs) + + def __iadd__(self, other): + if not hasattr(other, '__iter__'): + raise TypeError() + self._check_user_storage_quota(other) + return self.locations.__iadd__(other) + + def __iter__(self, *args, **kwargs): + return self.locations.__iter__(*args, **kwargs) + + def __len__(self, *args, **kwargs): + return self.locations.__len__(*args, **kwargs) + + def __setitem__(self, key, value): + return self.locations.__setitem__(key, value) + + def count(self, *args, **kwargs): + return self.locations.count(*args, **kwargs) + + def index(self, *args, **kwargs): + return self.locations.index(*args, **kwargs) + + def pop(self, *args, **kwargs): + return self.locations.pop(*args, **kwargs) + + def remove(self, *args, **kwargs): + return self.locations.remove(*args, **kwargs) + + def reverse(self, *args, **kwargs): + return self.locations.reverse(*args, **kwargs) + + def _check_user_storage_quota(self, locations): + required_size = _calc_required_size(self.context, + self.image, + locations) + daisy.api.common.check_quota(self.context, + required_size, + self.db_api) + _enforce_image_location_quota(self.image, locations) + + def __copy__(self): + return type(self)(self.image, self.context, self.db_api) + + def __deepcopy__(self, memo): + # NOTE(zhiyan): Only copy location entries, others can be reused. + self.image.locations = copy.deepcopy(self.locations, memo) + return type(self)(self.image, self.context, self.db_api) + + def append(self, object): + self._check_user_storage_quota([object]) + return self.locations.append(object) + + def insert(self, index, object): + self._check_user_storage_quota([object]) + return self.locations.insert(index, object) + + def extend(self, iter): + self._check_user_storage_quota(iter) + return self.locations.extend(iter) + + +class ImageProxy(daisy.domain.proxy.Image): + + def __init__(self, image, context, db_api, store_utils): + self.image = image + self.context = context + self.db_api = db_api + self.store_utils = store_utils + super(ImageProxy, self).__init__(image) + self.orig_props = set(image.extra_properties.keys()) + + def set_data(self, data, size=None): + remaining = daisy.api.common.check_quota( + self.context, size, self.db_api, image_id=self.image.image_id) + if remaining is not None: + # NOTE(jbresnah) we are trying to enforce a quota, put a limit + # reader on the data + data = utils.LimitingReader(data, remaining) + try: + self.image.set_data(data, size=size) + except exception.ImageSizeLimitExceeded: + raise exception.StorageQuotaFull(image_size=size, + remaining=remaining) + + # NOTE(jbresnah) If two uploads happen at the same time and neither + # properly sets the size attribute[1] then there is a race condition + # that will allow for the quota to be broken[2]. Thus we must recheck + # the quota after the upload and thus after we know the size. + # + # Also, when an upload doesn't set the size properly then the call to + # check_quota above returns None and so utils.LimitingReader is not + # used above. Hence the store (e.g. filesystem store) may have to + # download the entire file before knowing the actual file size. Here + # also we need to check for the quota again after the image has been + # downloaded to the store. + # + # [1] For e.g. when using chunked transfers the 'Content-Length' + # header is not set. + # [2] For e.g.: + # - Upload 1 does not exceed quota but upload 2 exceeds quota. + # Both uploads are to different locations + # - Upload 2 completes before upload 1 and writes image.size. + # - Immediately, upload 1 completes and (over)writes image.size + # with the smaller size. + # - Now, to glance, image has not exceeded quota but, in + # reality, the quota has been exceeded. + + try: + daisy.api.common.check_quota( + self.context, self.image.size, self.db_api, + image_id=self.image.image_id) + except exception.StorageQuotaFull: + with excutils.save_and_reraise_exception(): + LOG.info(_LI('Cleaning up %s after exceeding the quota.') + % self.image.image_id) + self.store_utils.safe_delete_from_backend( + self.context, self.image.image_id, self.image.locations[0]) + + @property + def tags(self): + return QuotaImageTagsProxy(self.image.tags) + + @tags.setter + def tags(self, value): + _enforce_image_tag_quota(value) + self.image.tags = value + + @property + def locations(self): + return QuotaImageLocationsProxy(self.image, + self.context, + self.db_api) + + @locations.setter + def locations(self, value): + _enforce_image_location_quota(self.image, value, is_setter=True) + + if not isinstance(value, (list, QuotaImageLocationsProxy)): + raise exception.Invalid(_('Invalid locations: %s') % value) + + required_size = _calc_required_size(self.context, + self.image, + value) + + daisy.api.common.check_quota( + self.context, required_size, self.db_api, + image_id=self.image.image_id) + self.image.locations = value + + def added_new_properties(self): + current_props = set(self.image.extra_properties.keys()) + return bool(current_props.difference(self.orig_props)) diff --git a/code/daisy/daisy/registry/__init__.py b/code/daisy/daisy/registry/__init__.py new file mode 100755 index 00000000..d71c9898 --- /dev/null +++ b/code/daisy/daisy/registry/__init__.py @@ -0,0 +1,34 @@ +# Copyright 2010-2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Registry API +""" + +from oslo_config import cfg + +from daisy import i18n + +_ = i18n._ + +registry_addr_opts = [ + cfg.StrOpt('registry_host', default='0.0.0.0', + help=_('Address to find the registry server.')), + cfg.IntOpt('registry_port', default=19191, + help=_('Port the registry server is listening on.')), +] + +CONF = cfg.CONF +CONF.register_opts(registry_addr_opts) diff --git a/code/daisy/daisy/registry/api/__init__.py b/code/daisy/daisy/registry/api/__init__.py new file mode 100755 index 00000000..4a66e0c1 --- /dev/null +++ b/code/daisy/daisy/registry/api/__init__.py @@ -0,0 +1,37 @@ +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from daisy.common import wsgi +from daisy.registry.api import v1 +from daisy.registry.api import v2 + +CONF = cfg.CONF +CONF.import_opt('enable_v1_registry', 'daisy.common.config') +CONF.import_opt('enable_v2_registry', 'daisy.common.config') + + +class API(wsgi.Router): + """WSGI entry point for all Registry requests.""" + + def __init__(self, mapper): + mapper = mapper or wsgi.APIMapper() + if CONF.enable_v1_registry: + v1.init(mapper) + if CONF.enable_v2_registry: + v2.init(mapper) + + super(API, self).__init__(mapper) diff --git a/code/daisy/daisy/registry/api/v1/__init__.py b/code/daisy/daisy/registry/api/v1/__init__.py new file mode 100755 index 00000000..35177c28 --- /dev/null +++ b/code/daisy/daisy/registry/api/v1/__init__.py @@ -0,0 +1,449 @@ +# Copyright 2010-2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from daisy.common import wsgi +from daisy.registry.api.v1 import members +from daisy.registry.api.v1 import hosts +from daisy.registry.api.v1 import config_files +from daisy.registry.api.v1 import config_sets +from daisy.registry.api.v1 import configs + +from daisy.registry.api.v1 import networks +from daisy.registry.api.v1 import disk_array +from daisy.registry.api.v1 import template + +def init(mapper): + + members_resource = members.create_resource() + + mapper.connect("/clusters/{cluster_id}/nodes/{host_id}", + controller=members_resource, + action="add_cluster_host", + conditions={'method': ['PUT']}) + + mapper.connect("/clusters/{cluster_id}/nodes/{host_id}", + controller=members_resource, + action="delete_cluster_host", + conditions={'method': ['DELETE']}) + mapper.connect("/clusters/{cluster_id}/nodes/{host_id}", + controller=members_resource, + action="get_cluster_hosts", + conditions={'method': ['GET']}) + mapper.connect("/clusters/{cluster_id}/nodes", + controller=members_resource, + action="get_cluster_hosts", + conditions={'method': ['GET']}) + mapper.connect("/multi_clusters/nodes/{host_id}", + controller=members_resource, + action="get_host_clusters", + conditions={'method': ['GET']}) + + hosts_resource = hosts.create_resource() + + mapper.connect("/nodes", + controller=hosts_resource, + action="add_host", + conditions={'method': ['POST']}) + + mapper.connect("/nodes/{id}", + controller=hosts_resource, + action="delete_host", + conditions={'method': ['DELETE']}) + + mapper.connect("/nodes/{id}", + controller=hosts_resource, + action="update_host", + conditions={'method': ['PUT']}) + + mapper.connect("/nodes", + controller=hosts_resource, + action="detail_host", + conditions={'method': ['GET']}) + + mapper.connect("/nodes/{id}", + controller=hosts_resource, + action="get_host", + conditions=dict(method=["GET"])) + + mapper.connect("/discover/nodes", + controller=hosts_resource, + action="add_discover_host", + conditions={'method': ['POST']}) + mapper.connect("/discover/nodes", + controller=hosts_resource, + action="detail_discover_host", + conditions={'method': ['GET']}) + mapper.connect("/discover/nodes/{id}", + controller=hosts_resource, + action="update_discover_host", + conditions={'method': ['PUT']}) + + mapper.connect("/discover/nodes/{discover_host_id}", + controller=hosts_resource, + action="get_discover_host", + conditions=dict(method=["GET"])) + + mapper.connect("/discover/nodes/{id}", + controller=hosts_resource, + action="delete_discover_host", + conditions={'method': ['DELETE']}) + + mapper.connect("/host-interface", + controller=hosts_resource, + action="get_host_interface", + conditions=dict(method=["GET"])) + mapper.connect("/interfaces/{interface_id}/network/{network_id}", + controller=hosts_resource, + action="get_assigned_network", + conditions=dict(method=["GET"])) + mapper.connect("/host-interfaces", + controller=hosts_resource, + action="get_all_host_interfaces", + conditions=dict(method=["PUT"])) + + mapper.connect("/clusters", + controller=hosts_resource, + action="add_cluster", + conditions={'method': ['POST']}) + + mapper.connect("/clusters/{id}", + controller=hosts_resource, + action="update_cluster", + conditions={'method': ['PUT']}) + + mapper.connect("/clusters/{id}", + controller=hosts_resource, + action="delete_cluster", + conditions={'method': ['DELETE']}) + + mapper.connect("/clusters", + controller=hosts_resource, + action='detail_cluster', + conditions={'method': ['GET']}) + + mapper.connect("/clusters/{id}", + controller=hosts_resource, + action="get_cluster", + conditions=dict(method=["GET"])) + + + mapper.connect("/components", + controller=hosts_resource, + action="add_component", + conditions={'method': ['POST']}) + mapper.connect("/components/{id}", + controller=hosts_resource, + action="delete_component", + conditions={'method': ['DELETE']}) + mapper.connect("/components/detail", + controller=hosts_resource, + action='detail_component', + conditions={'method': ['GET']}) + mapper.connect("/components/{id}", + controller=hosts_resource, + action="get_component", + conditions=dict(method=["GET"])) + mapper.connect("/components/{id}", + controller=hosts_resource, + action="update_component", + conditions={'method': ['PUT']}) + + mapper.connect("/services", + controller=hosts_resource, + action="add_service", + conditions={'method': ['POST']}) + mapper.connect("/services/{id}", + controller=hosts_resource, + action="delete_service", + conditions={'method': ['DELETE']}) + mapper.connect("/services/detail", + controller=hosts_resource, + action='detail_service', + conditions={'method': ['GET']}) + mapper.connect("/services/{id}", + controller=hosts_resource, + action="get_service", + conditions=dict(method=["GET"])) + mapper.connect("/services/{id}", + controller=hosts_resource, + action="update_service", + conditions={'method': ['PUT']}) + + mapper.connect("/roles", + controller=hosts_resource, + action="add_role", + conditions={'method': ['POST']}) + mapper.connect("/roles/{id}", + controller=hosts_resource, + action="delete_role", + conditions={'method': ['DELETE']}) + mapper.connect("/roles/detail", + controller=hosts_resource, + action='detail_role', + conditions={'method': ['GET']}) + mapper.connect("/roles/{id}", + controller=hosts_resource, + action="get_role", + conditions=dict(method=["GET"])) + mapper.connect("/roles/{id}", + controller=hosts_resource, + action="update_role", + conditions={'method': ['PUT']}) + mapper.connect("/roles/{id}/services", + controller=hosts_resource, + action="role_services", + conditions={'method': ['GET']}) + mapper.connect("/roles/{id}/hosts", + controller=hosts_resource, + action="host_roles", + conditions={'method': ['GET']}) + mapper.connect("/roles/{id}/hosts", + controller=hosts_resource, + action="delete_role_hosts", + conditions={'method': ['DELETE']}) + mapper.connect("/roles/{id}/hosts", + controller=hosts_resource, + action="update_role_hosts", + conditions={'method': ['PUT']}) + + config_files_resource = config_files.create_resource() + + mapper.connect("/config_files", + controller=config_files_resource, + action="add_config_file", + conditions={'method': ['POST']}) + + mapper.connect("/config_files/{id}", + controller=config_files_resource, + action="delete_config_file", + conditions={'method': ['DELETE']}) + + mapper.connect("/config_files/{id}", + controller=config_files_resource, + action="update_config_file", + conditions={'method': ['PUT']}) + + mapper.connect("/config_files/detail", + controller=config_files_resource, + action="detail_config_file", + conditions={'method': ['GET']}) + + mapper.connect("/config_files/{id}", + controller=config_files_resource, + action="get_config_file", + conditions=dict(method=["GET"])) + + config_sets_resource = config_sets.create_resource() + + mapper.connect("/config_sets", + controller=config_sets_resource, + action="add_config_set", + conditions={'method': ['POST']}) + + mapper.connect("/config_sets/{id}", + controller=config_sets_resource, + action="delete_config_set", + conditions={'method': ['DELETE']}) + + mapper.connect("/config_sets/{id}", + controller=config_sets_resource, + action="update_config_set", + conditions={'method': ['PUT']}) + + mapper.connect("/config_sets/detail", + controller=config_sets_resource, + action="detail_config_set", + conditions={'method': ['GET']}) + + mapper.connect("/config_sets/{id}", + controller=config_sets_resource, + action="get_config_set", + conditions=dict(method=["GET"])) + + configs_resource = configs.create_resource() + + mapper.connect("/configs", + controller=configs_resource, + action="add_config", + conditions={'method': ['POST']}) + + mapper.connect("/configs/{id}", + controller=configs_resource, + action="delete_config", + conditions={'method': ['DELETE']}) + + mapper.connect("/configs/{id}", + controller=configs_resource, + action="update_config", + conditions={'method': ['PUT']}) + + mapper.connect("/configs/update_config_by_role_hosts", + controller=configs_resource, + action="update_config_by_role_hosts", + conditions={'method': ['POST']}) + + mapper.connect("/configs/detail", + controller=configs_resource, + action="detail_config", + conditions={'method': ['GET']}) + + mapper.connect("/configs/{id}", + controller=configs_resource, + action="get_config", + conditions=dict(method=["GET"])) + + networks_resource = networks.create_resource() + + mapper.connect("/clusters/{id}/networks", + controller=networks_resource, + action="detail_network", + conditions={'method': ['GET']}) + + mapper.connect("/networks", + controller=networks_resource, + action="get_all_networks", + conditions={'method': ['GET']}) + + # mapper.resource('network', 'networks',controller=networks_resource, + # collection={'update_phyname_of_network':'POST', 'add_network':"POST"}, + # member={'get_network':'GET', 'update_network':'PUT', 'delete_network':'DELETE'}) + + mapper.connect("/networks", + controller=networks_resource, + action="add_network", + conditions={'method': ['POST']}) + + mapper.connect("/networks/{network_id}", + controller=networks_resource, + action="delete_network", + conditions={'method': ['DELETE']}) + + mapper.connect("/networks/{network_id}", + controller=networks_resource, + action="update_network", + conditions={'method': ['PUT']}) + + mapper.connect("/networks/{id}", + controller=networks_resource, + action="get_network", + conditions=dict(method=["GET"])) + + mapper.connect("/networks/update_phyname_of_network", + controller=networks_resource, + action="update_phyname_of_network", + conditions=dict(method=["POST"])) + + config_interface_resource = hosts.create_resource() + + mapper.connect("/config_interface", + controller=config_interface_resource, + action="config_interface", + conditions={'method': ['POST']}) + + array_resource = disk_array.create_resource() + mapper.connect("/service_disk", + controller=array_resource, + action='service_disk_add', + conditions={'method': ['POST']}) + mapper.connect("/service_disk/{id}", + controller=array_resource, + action='service_disk_delete', + conditions={'method': ['DELETE']}) + mapper.connect("/service_disk/{id}", + controller=array_resource, + action='service_disk_update', + conditions={'method': ['PUT']}) + mapper.connect("/service_disk/list", + controller=array_resource, + action='service_disk_list', + conditions={'method': ['GET']}) + mapper.connect("/service_disk/{id}", + controller=array_resource, + action='service_disk_detail', + conditions={'method': ['GET']}) + + mapper.connect("/cinder_volume", + controller=array_resource, + action='cinder_volume_add', + conditions={'method': ['POST']}) + mapper.connect("/cinder_volume/{id}", + controller=array_resource, + action='cinder_volume_delete', + conditions={'method': ['DELETE']}) + mapper.connect("/cinder_volume/{id}", + controller=array_resource, + action='cinder_volume_update', + conditions={'method': ['PUT']}) + mapper.connect("/cinder_volume/list", + controller=array_resource, + action='cinder_volume_list', + conditions={'method': ['GET']}) + mapper.connect("/cinder_volume/{id}", + controller=array_resource, + action='cinder_volume_detail', + conditions={'method': ['GET']}) + + template_resource = template.create_resource() + mapper.connect("/template", + controller=template_resource, + action='template_add', + conditions={'method': ['POST']}) + mapper.connect("/template/{template_id}", + controller=template_resource, + action='template_update', + conditions={'method': ['PUT']}) + mapper.connect("/template/{template_id}", + controller=template_resource, + action='template_delete', + conditions={'method': ['DELETE']}) + mapper.connect("/template/list", + controller=template_resource, + action='template_list', + conditions={'method': ['GET']}) + mapper.connect("/template/{template_id}", + controller=template_resource, + action='template_detail', + conditions={'method': ['GET']}) + + mapper.connect("/host_template", + controller=template_resource, + action='host_template_add', + conditions={'method': ['POST']}) + mapper.connect("/host_template/{template_id}", + controller=template_resource, + action='host_template_update', + conditions={'method': ['PUT']}) + mapper.connect("/host_template/{template_id}", + controller=template_resource, + action='host_template_delete', + conditions={'method': ['DELETE']}) + mapper.connect("/host_template/list", + controller=template_resource, + action='host_template_list', + conditions={'method': ['GET']}) + mapper.connect("/host_template/{template_id}", + controller=template_resource, + action='host_template_detail', + conditions={'method': ['GET']}) + +class API(wsgi.Router): + """WSGI entry point for all Registry requests.""" + + def __init__(self, mapper): + mapper = mapper or wsgi.APIMapper() + + init(mapper) + + super(API, self).__init__(mapper) diff --git a/code/daisy/daisy/registry/api/v1/config_files.py b/code/daisy/daisy/registry/api/v1/config_files.py new file mode 100755 index 00000000..f4899bf7 --- /dev/null +++ b/code/daisy/daisy/registry/api/v1/config_files.py @@ -0,0 +1,377 @@ +# Copyright 2010-2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Reference implementation registry server WSGI controller +""" + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import strutils +from oslo_utils import timeutils +from webob import exc + +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +from daisy import i18n + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +CONF = cfg.CONF + +DISPLAY_FIELDS_IN_INDEX = ['id', 'name','container_format', + 'checksum'] + +SUPPORTED_FILTERS = ['name', 'container_format'] + +SUPPORTED_SORT_KEYS = ('name', 'container_format', + 'id', 'created_at', 'updated_at') + +SUPPORTED_SORT_DIRS = ('asc', 'desc') + +SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir') + + +class Controller(object): + + def __init__(self): + self.db_api = daisy.db.get_api() + + def _get_config_files(self, context, filters, **params): + """Get config_files, wrapping in exception if necessary.""" + try: + return self.db_api.config_file_get_all(context, filters=filters, + **params) + except exception.NotFound: + LOG.warn(_LW("Invalid marker. Config_file %(id)s could not be " + "found.") % {'id': params.get('marker')}) + msg = _("Invalid marker. Config_file could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.Forbidden: + LOG.warn(_LW("Access denied to config_file %(id)s but returning " + "'not found'") % {'id': params.get('marker')}) + msg = _("Invalid marker. config_file could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except Exception: + LOG.exception(_LE("Unable to get config_files")) + raise + + def detail_config_file(self, req): + """Return a filtered list of public, non-deleted config_files in detail + + :param req: the Request object coming from the wsgi layer + :retval a mapping of the following form:: + + dict(config_files=[config_file_list]) + + Where config_file_list is a sequence of mappings containing + all config_file model fields. + """ + params = self._get_query_params(req) + + config_files = self._get_config_files(req.context, **params) + + return dict(config_files=config_files) + + def _get_query_params(self, req): + """Extract necessary query parameters from http request. + + :param req: the Request object coming from the wsgi layer + :retval dictionary of filters to apply to list of config_files + """ + params = { + 'filters': self._get_filters(req), + 'limit': self._get_limit(req), + 'sort_key': [self._get_sort_key(req)], + 'sort_dir': [self._get_sort_dir(req)], + 'marker': self._get_marker(req), + } + + for key, value in params.items(): + if value is None: + del params[key] + + return params + + def _get_filters(self, req): + """Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + filters = {} + properties = {} + + for param in req.params: + if param in SUPPORTED_FILTERS: + filters[param] = req.params.get(param) + if param.startswith('property-'): + _param = param[9:] + properties[_param] = req.params.get(param) + + if 'changes-since' in filters: + isotime = filters['changes-since'] + try: + filters['changes-since'] = timeutils.parse_isotime(isotime) + except ValueError: + raise exc.HTTPBadRequest(_("Unrecognized changes-since value")) + + if 'protected' in filters: + value = self._get_bool(filters['protected']) + if value is None: + raise exc.HTTPBadRequest(_("protected must be True, or " + "False")) + + filters['protected'] = value + + # only allow admins to filter on 'deleted' + if req.context.is_admin: + deleted_filter = self._parse_deleted_filter(req) + if deleted_filter is not None: + filters['deleted'] = deleted_filter + elif 'changes-since' not in filters: + filters['deleted'] = False + elif 'changes-since' not in filters: + filters['deleted'] = False + + if properties: + filters['properties'] = properties + + return filters + + def _get_limit(self, req): + """Parse a limit query param into something usable.""" + try: + limit = int(req.params.get('limit', CONF.limit_param_default)) + except ValueError: + raise exc.HTTPBadRequest(_("limit param must be an integer")) + + if limit < 0: + raise exc.HTTPBadRequest(_("limit param must be positive")) + + return min(CONF.api_limit_max, limit) + + def _get_marker(self, req): + """Parse a marker query param into something usable.""" + marker = req.params.get('marker', None) + + if marker and not utils.is_uuid_like(marker): + msg = _('Invalid marker format') + raise exc.HTTPBadRequest(explanation=msg) + + return marker + + def _get_sort_key(self, req): + """Parse a sort key query param from the request object.""" + sort_key = req.params.get('sort_key', 'created_at') + if sort_key is not None and sort_key not in SUPPORTED_SORT_KEYS: + _keys = ', '.join(SUPPORTED_SORT_KEYS) + msg = _("Unsupported sort_key. Acceptable values: %s") % (_keys,) + raise exc.HTTPBadRequest(explanation=msg) + return sort_key + + def _get_sort_dir(self, req): + """Parse a sort direction query param from the request object.""" + sort_dir = req.params.get('sort_dir', 'desc') + if sort_dir is not None and sort_dir not in SUPPORTED_SORT_DIRS: + _keys = ', '.join(SUPPORTED_SORT_DIRS) + msg = _("Unsupported sort_dir. Acceptable values: %s") % (_keys,) + raise exc.HTTPBadRequest(explanation=msg) + return sort_dir + + def _get_bool(self, value): + value = value.lower() + if value == 'true' or value == '1': + return True + elif value == 'false' or value == '0': + return False + + return None + + def _parse_deleted_filter(self, req): + """Parse deleted into something usable.""" + deleted = req.params.get('deleted') + if deleted is None: + return None + return strutils.bool_from_string(deleted) + + @utils.mutating + def add_config_file(self, req, body): + """Registers a new config_file with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the config_file + + :retval Returns the newly-created config_file information as a mapping, + which will include the newly-created config_file's internal id + in the 'id' field + """ + + config_file_data = body["config_file"] + + config_file_id = config_file_data.get('id') + + if config_file_id and not utils.is_uuid_like(config_file_id): + msg = _LI("Rejecting config_file creation request for invalid config_file " + "id '%(bad_id)s'") % {'bad_id': config_file_id} + LOG.info(msg) + msg = _("Invalid config_file id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + config_file_data = self.db_api.config_file_add(req.context, config_file_data) + + msg = (_LI("Successfully created config_file %s") % + config_file_data["id"]) + LOG.info(msg) + if 'config_file' not in config_file_data: + config_file_data = dict(config_file=config_file_data) + return config_file_data + except exception.Duplicate: + msg = _("config_file with identifier %s already exists!") % config_file_id + LOG.warn(msg) + return exc.HTTPConflict(msg) + except exception.Invalid as e: + msg = (_("Failed to add config_file metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to create config_file %s"), config_file_id) + raise + + @utils.mutating + def delete_config_file(self, req, id): + """Deletes an existing config_file with the registry. + + :param req: wsgi Request object + :param id: The opaque internal identifier for the image + + :retval Returns 200 if delete was successful, a fault if not. On + success, the body contains the deleted image information as a mapping. + """ + try: + deleted_config_file = self.db_api.config_file_destroy(req.context, id) + msg = _LI("Successfully deleted config_file %(id)s") % {'id': id} + LOG.info(msg) + return dict(config_file=deleted_config_file) + except exception.ForbiddenPublicImage: + msg = _LI("Delete denied for public config_file %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to config_file %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except exception.NotFound: + msg = _LI("config_file %(id)s not found") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to delete config_file %s") % id) + raise + + @utils.mutating + def get_config_file(self, req, id): + """Return data about the given config_file id.""" + try: + config_file_data = self.db_api.config_file_get(req.context, id) + msg = "Successfully retrieved config_file %(id)s" % {'id': id} + LOG.debug(msg) + except exception.NotFound: + msg = _LI("config_file %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to config_file %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to show config_file %s") % id) + raise + if 'config_file' not in config_file_data: + config_file_data = dict(config_file=config_file_data) + return config_file_data + + @utils.mutating + def update_config_file(self, req, id, body): + """Updates an existing config_file with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the image + :param id: The opaque internal identifier for the image + + :retval Returns the updated image information as a mapping, + """ + config_file_data = body['config_file'] + try: + updated_config_file = self.db_api.config_file_update(req.context, id, config_file_data) + + msg = _LI("Updating metadata for config_file %(id)s") % {'id': id} + LOG.info(msg) + if 'config_file' not in updated_config_file: + config_file_data = dict(config_file=updated_config_file) + return config_file_data + except exception.Invalid as e: + msg = (_("Failed to update config_file metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except exception.NotFound: + msg = _LI("config_file %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='config_file not found', + request=req, + content_type='text/plain') + except exception.ForbiddenPublicImage: + msg = _LI("Update denied for config_file %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to config_file %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='config_file not found', + request=req, + content_type='text/plain') + except exception.Conflict as e: + LOG.info(utils.exception_to_str(e)) + raise exc.HTTPConflict(body='config_file operation conflicts', + request=req, + content_type='text/plain') + except Exception: + LOG.exception(_LE("Unable to update config_file %s") % id) + raise + +def create_resource(): + """Images resource factory method.""" + deserializer = wsgi.JSONRequestDeserializer() + serializer = wsgi.JSONResponseSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/registry/api/v1/config_sets.py b/code/daisy/daisy/registry/api/v1/config_sets.py new file mode 100755 index 00000000..61ec8637 --- /dev/null +++ b/code/daisy/daisy/registry/api/v1/config_sets.py @@ -0,0 +1,384 @@ +# Copyright 2010-2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Reference implementation registry server WSGI controller +""" + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import strutils +from oslo_utils import timeutils +from webob import exc + +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +from daisy import i18n + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +CONF = cfg.CONF + +DISPLAY_FIELDS_IN_INDEX = ['id', 'name','container_format', + 'checksum'] + +SUPPORTED_FILTERS = ['name', 'container_format'] + +SUPPORTED_SORT_KEYS = ('name', 'container_format', + 'id', 'created_at', 'updated_at') + +SUPPORTED_SORT_DIRS = ('asc', 'desc') + +SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir') + + +class Controller(object): + + def __init__(self): + self.db_api = daisy.db.get_api() + + def _get_config_sets(self, context, filters, **params): + """Get config_sets, wrapping in exception if necessary.""" + try: + return self.db_api.config_set_get_all(context, filters=filters, + **params) + except exception.NotFound: + LOG.warn(_LW("Invalid marker. Config_set %(id)s could not be " + "found.") % {'id': params.get('marker')}) + msg = _("Invalid marker. Config_set could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.Forbidden: + LOG.warn(_LW("Access denied to config_set %(id)s but returning " + "'not found'") % {'id': params.get('marker')}) + msg = _("Invalid marker. config_set could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except Exception: + LOG.exception(_LE("Unable to get config_sets")) + raise + + def detail_config_set(self, req): + """Return a filtered list of public, non-deleted config_sets in detail + + :param req: the Request object coming from the wsgi layer + :retval a mapping of the following form:: + + dict(config_sets=[config_set_list]) + + Where config_set_list is a sequence of mappings containing + all config_set model fields. + """ + params = self._get_query_params(req) + + config_sets = self._get_config_sets(req.context, **params) + + return dict(config_sets=config_sets) + + def _get_query_params(self, req): + """Extract necessary query parameters from http request. + + :param req: the Request object coming from the wsgi layer + :retval dictionary of filters to apply to list of config_sets + """ + params = { + 'filters': self._get_filters(req), + 'limit': self._get_limit(req), + 'sort_key': [self._get_sort_key(req)], + 'sort_dir': [self._get_sort_dir(req)], + 'marker': self._get_marker(req), + } + + for key, value in params.items(): + if value is None: + del params[key] + + return params + + def _get_filters(self, req): + """Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + filters = {} + properties = {} + + for param in req.params: + if param in SUPPORTED_FILTERS: + filters[param] = req.params.get(param) + if param.startswith('property-'): + _param = param[9:] + properties[_param] = req.params.get(param) + + if 'changes-since' in filters: + isotime = filters['changes-since'] + try: + filters['changes-since'] = timeutils.parse_isotime(isotime) + except ValueError: + raise exc.HTTPBadRequest(_("Unrecognized changes-since value")) + + if 'protected' in filters: + value = self._get_bool(filters['protected']) + if value is None: + raise exc.HTTPBadRequest(_("protected must be True, or " + "False")) + + filters['protected'] = value + + # only allow admins to filter on 'deleted' + if req.context.is_admin: + deleted_filter = self._parse_deleted_filter(req) + if deleted_filter is not None: + filters['deleted'] = deleted_filter + elif 'changes-since' not in filters: + filters['deleted'] = False + elif 'changes-since' not in filters: + filters['deleted'] = False + + if properties: + filters['properties'] = properties + + return filters + + def _get_limit(self, req): + """Parse a limit query param into something usable.""" + try: + limit = int(req.params.get('limit', CONF.limit_param_default)) + except ValueError: + raise exc.HTTPBadRequest(_("limit param must be an integer")) + + if limit < 0: + raise exc.HTTPBadRequest(_("limit param must be positive")) + + return min(CONF.api_limit_max, limit) + + def _get_marker(self, req): + """Parse a marker query param into something usable.""" + marker = req.params.get('marker', None) + + if marker and not utils.is_uuid_like(marker): + msg = _('Invalid marker format') + raise exc.HTTPBadRequest(explanation=msg) + + return marker + + def _get_sort_key(self, req): + """Parse a sort key query param from the request object.""" + sort_key = req.params.get('sort_key', 'created_at') + if sort_key is not None and sort_key not in SUPPORTED_SORT_KEYS: + _keys = ', '.join(SUPPORTED_SORT_KEYS) + msg = _("Unsupported sort_key. Acceptable values: %s") % (_keys,) + raise exc.HTTPBadRequest(explanation=msg) + return sort_key + + def _get_sort_dir(self, req): + """Parse a sort direction query param from the request object.""" + sort_dir = req.params.get('sort_dir', 'desc') + if sort_dir is not None and sort_dir not in SUPPORTED_SORT_DIRS: + _keys = ', '.join(SUPPORTED_SORT_DIRS) + msg = _("Unsupported sort_dir. Acceptable values: %s") % (_keys,) + raise exc.HTTPBadRequest(explanation=msg) + return sort_dir + + def _get_bool(self, value): + value = value.lower() + if value == 'true' or value == '1': + return True + elif value == 'false' or value == '0': + return False + + return None + + def _parse_deleted_filter(self, req): + """Parse deleted into something usable.""" + deleted = req.params.get('deleted') + if deleted is None: + return None + return strutils.bool_from_string(deleted) + + @utils.mutating + def add_config_set(self, req, body): + """Registers a new config_set with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the config_set + + :retval Returns the newly-created config_set information as a mapping, + which will include the newly-created config_set's internal id + in the 'id' field + """ + + config_set_data = body["config_set"] + + config_set_id = config_set_data.get('id') + + if config_set_id and not utils.is_uuid_like(config_set_id): + msg = _LI("Rejecting config_set creation request for invalid config_set " + "id '%(bad_id)s'") % {'bad_id': config_set_id} + LOG.info(msg) + msg = _("Invalid config_set id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + config_set_data = self.db_api.config_set_add(req.context, config_set_data) + + msg = (_LI("Successfully created config_set %s") % + config_set_data["id"]) + LOG.info(msg) + if 'config_set' not in config_set_data: + config_set_data = dict(config_set=config_set_data) + return config_set_data + except exception.Duplicate: + msg = _("config_set with identifier %s already exists!") % config_set_id + LOG.warn(msg) + return exc.HTTPConflict(msg) + except exception.Invalid as e: + msg = (_("Failed to add config_set metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to create config_set %s"), config_set_id) + raise + + @utils.mutating + def delete_config_set(self, req, id): + """Deletes an existing config_set with the registry. + + :param req: wsgi Request object + :param id: The opaque internal identifier for the image + + :retval Returns 200 if delete was successful, a fault if not. On + success, the body contains the deleted image information as a mapping. + """ + try: + deleted_config_set = self.db_api.config_set_destroy(req.context, id) + msg = _LI("Successfully deleted config_set %(id)s") % {'id': id} + LOG.info(msg) + return dict(config_set=deleted_config_set) + except exception.ForbiddenPublicImage: + msg = _LI("Delete denied for public config_set %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to config_set %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except exception.NotFound: + msg = _LI("config_set %(id)s not found") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to delete config_set %s") % id) + raise + + @utils.mutating + def get_config_set(self, req, id): + """Return data about the given config_set id.""" + try: + config_set_data = self.db_api.config_set_get(req.context, id) + msg = "Successfully retrieved config_set %(id)s" % {'id': id} + LOG.debug(msg) + except exception.NotFound: + msg = _LI("config_set %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to config_set %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to show config_set %s") % id) + raise + if 'config_set' not in config_set_data: + config_set_data = dict(config_set=config_set_data) + config_items = self.db_api._config_item_get_by_config_set_id(req.context, id) + config = [] + for config_item in config_items: + config_inf = self.db_api.config_get(req.context, config_item['config_id']) + config.append(config_inf) + if config: + config_set_data['config_set']['config'] = config + return config_set_data + + @utils.mutating + def update_config_set(self, req, id, body): + """Updates an existing config_set with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the image + :param id: The opaque internal identifier for the image + + :retval Returns the updated image information as a mapping, + """ + config_set_data = body['config_set'] + try: + updated_config_set = self.db_api.config_set_update(req.context, id, config_set_data) + + msg = _LI("Updating metadata for config_set %(id)s") % {'id': id} + LOG.info(msg) + if 'config_set' not in updated_config_set: + config_set_data = dict(config_set=updated_config_set) + return config_set_data + except exception.Invalid as e: + msg = (_("Failed to update config_set metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except exception.NotFound: + msg = _LI("config_set %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='config_set not found', + request=req, + content_type='text/plain') + except exception.ForbiddenPublicImage: + msg = _LI("Update denied for config_set %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to config_set %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='config_set not found', + request=req, + content_type='text/plain') + except exception.Conflict as e: + LOG.info(utils.exception_to_str(e)) + raise exc.HTTPConflict(body='config_set operation conflicts', + request=req, + content_type='text/plain') + except Exception: + LOG.exception(_LE("Unable to update config_set %s") % id) + raise + +def create_resource(): + """Images resource factory method.""" + deserializer = wsgi.JSONRequestDeserializer() + serializer = wsgi.JSONResponseSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/registry/api/v1/configs.py b/code/daisy/daisy/registry/api/v1/configs.py new file mode 100755 index 00000000..9172b3b8 --- /dev/null +++ b/code/daisy/daisy/registry/api/v1/configs.py @@ -0,0 +1,381 @@ +# Copyright 2010-2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Reference implementation registry server WSGI controller +""" + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import strutils +from oslo_utils import timeutils +from webob import exc + +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +from daisy import i18n + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +CONF = cfg.CONF + +DISPLAY_FIELDS_IN_INDEX = ['id', 'name','container_format', + 'checksum'] + +SUPPORTED_FILTERS = ['name', 'container_format'] + +SUPPORTED_SORT_KEYS = ('name', 'container_format', + 'id', 'created_at', 'updated_at') + +SUPPORTED_SORT_DIRS = ('asc', 'desc') + +SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir') + + +class Controller(object): + + def __init__(self): + self.db_api = daisy.db.get_api() + + def _get_configs(self, context, filters, **params): + """Get configs, wrapping in exception if necessary.""" + try: + return self.db_api.config_get_all(context, filters=filters, + **params) + except exception.NotFound: + LOG.warn(_LW("Invalid marker. Config %(id)s could not be " + "found.") % {'id': params.get('marker')}) + msg = _("Invalid marker. Config could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.Forbidden: + LOG.warn(_LW("Access denied to config %(id)s but returning " + "'not found'") % {'id': params.get('marker')}) + msg = _("Invalid marker. config could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except Exception: + LOG.exception(_LE("Unable to get configs")) + raise + + def detail_config(self, req): + """Return a filtered list of public, non-deleted configs in detail + + :param req: the Request object coming from the wsgi layer + :retval a mapping of the following form:: + + dict(configs=[config_list]) + + Where config_list is a sequence of mappings containing + all config model fields. + """ + params = self._get_query_params(req) + + configs = self._get_configs(req.context, **params) + + return dict(configs=configs) + + def _get_query_params(self, req): + """Extract necessary query parameters from http request. + + :param req: the Request object coming from the wsgi layer + :retval dictionary of filters to apply to list of configs + """ + params = { + 'filters': self._get_filters(req), + 'limit': self._get_limit(req), + 'sort_key': [self._get_sort_key(req)], + 'sort_dir': [self._get_sort_dir(req)], + 'marker': self._get_marker(req), + } + + for key, value in params.items(): + if value is None: + del params[key] + + return params + + def _get_filters(self, req): + """Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + filters = {} + properties = {} + + for param in req.params: + if param in SUPPORTED_FILTERS: + filters[param] = req.params.get(param) + if param.startswith('property-'): + _param = param[9:] + properties[_param] = req.params.get(param) + + if 'changes-since' in filters: + isotime = filters['changes-since'] + try: + filters['changes-since'] = timeutils.parse_isotime(isotime) + except ValueError: + raise exc.HTTPBadRequest(_("Unrecognized changes-since value")) + + if 'protected' in filters: + value = self._get_bool(filters['protected']) + if value is None: + raise exc.HTTPBadRequest(_("protected must be True, or " + "False")) + + filters['protected'] = value + + # only allow admins to filter on 'deleted' + if req.context.is_admin: + deleted_filter = self._parse_deleted_filter(req) + if deleted_filter is not None: + filters['deleted'] = deleted_filter + elif 'changes-since' not in filters: + filters['deleted'] = False + elif 'changes-since' not in filters: + filters['deleted'] = False + + if properties: + filters['properties'] = properties + + return filters + + def _get_limit(self, req): + """Parse a limit query param into something usable.""" + try: + limit = int(req.params.get('limit', CONF.limit_param_default)) + except ValueError: + raise exc.HTTPBadRequest(_("limit param must be an integer")) + + if limit < 0: + raise exc.HTTPBadRequest(_("limit param must be positive")) + + return min(CONF.api_limit_max, limit) + + def _get_marker(self, req): + """Parse a marker query param into something usable.""" + marker = req.params.get('marker', None) + + if marker and not utils.is_uuid_like(marker): + msg = _('Invalid marker format') + raise exc.HTTPBadRequest(explanation=msg) + + return marker + + def _get_sort_key(self, req): + """Parse a sort key query param from the request object.""" + sort_key = req.params.get('sort_key', 'created_at') + if sort_key is not None and sort_key not in SUPPORTED_SORT_KEYS: + _keys = ', '.join(SUPPORTED_SORT_KEYS) + msg = _("Unsupported sort_key. Acceptable values: %s") % (_keys,) + raise exc.HTTPBadRequest(explanation=msg) + return sort_key + + def _get_sort_dir(self, req): + """Parse a sort direction query param from the request object.""" + sort_dir = req.params.get('sort_dir', 'desc') + if sort_dir is not None and sort_dir not in SUPPORTED_SORT_DIRS: + _keys = ', '.join(SUPPORTED_SORT_DIRS) + msg = _("Unsupported sort_dir. Acceptable values: %s") % (_keys,) + raise exc.HTTPBadRequest(explanation=msg) + return sort_dir + + def _get_bool(self, value): + value = value.lower() + if value == 'true' or value == '1': + return True + elif value == 'false' or value == '0': + return False + + return None + + def _parse_deleted_filter(self, req): + """Parse deleted into something usable.""" + deleted = req.params.get('deleted') + if deleted is None: + return None + return strutils.bool_from_string(deleted) + + @utils.mutating + def add_config(self, req, body): + """Registers a new config with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the config + + :retval Returns the newly-created config information as a mapping, + which will include the newly-created config's internal id + in the 'id' field + """ + + config_data = body["config"] + + config_id = config_data.get('id') + + if config_id and not utils.is_uuid_like(config_id): + msg = _LI("Rejecting config creation request for invalid config " + "id '%(bad_id)s'") % {'bad_id': config_id} + LOG.info(msg) + msg = _("Invalid config id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + config_data = self.db_api.config_add(req.context, config_data) + + msg = (_LI("Successfully created config %s") % + config_data["id"]) + LOG.info(msg) + if 'config' not in config_data: + config_data = dict(config=config_data) + return config_data + except exception.Duplicate: + msg = _("config with identifier %s already exists!") % config_id + LOG.warn(msg) + return exc.HTTPConflict(msg) + except exception.Invalid as e: + msg = (_("Failed to add config metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to create config %s"), config_id) + raise + + @utils.mutating + def delete_config(self, req, id): + """Deletes an existing config with the registry. + + :param req: wsgi Request object + :param id: The opaque internal identifier for the image + + :retval Returns 200 if delete was successful, a fault if not. On + success, the body contains the deleted image information as a mapping. + """ + try: + deleted_config = self.db_api.config_destroy(req.context, id) + msg = _LI("Successfully deleted config %(id)s") % {'id': id} + LOG.info(msg) + return dict(config=deleted_config) + except exception.ForbiddenPublicImage: + msg = _LI("Delete denied for public config %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to config %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except exception.NotFound: + msg = _LI("config %(id)s not found") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to delete config %s") % id) + raise + + @utils.mutating + def get_config(self, req, id): + """Return data about the given config id.""" + try: + config_data = self.db_api.config_get(req.context, id) + msg = "Successfully retrieved config %(id)s" % {'id': id} + LOG.debug(msg) + except exception.NotFound: + msg = _LI("config %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to config %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to show config %s") % id) + raise + if 'config' not in config_data: + config_data = dict(config=config_data) + return config_data + + @utils.mutating + def update_config_by_role_hosts(self, req, body): + return self.db_api.update_config_by_role_hosts(req.context, body['configs']) + + @utils.mutating + def update_config(self, req, id, body): + """Updates an existing config with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the image + :param id: The opaque internal identifier for the image + + :retval Returns the updated image information as a mapping, + """ + config_data = body['config'] + try: + updated_config = self.db_api.config_update(req.context, id, config_data) + + msg = _LI("Updating metadata for config %(id)s") % {'id': id} + LOG.info(msg) + if 'config' not in updated_config: + config_data = dict(config=updated_config) + return config_data + except exception.Invalid as e: + msg = (_("Failed to update config metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except exception.NotFound: + msg = _LI("config %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='config not found', + request=req, + content_type='text/plain') + except exception.ForbiddenPublicImage: + msg = _LI("Update denied for config %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to config %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='config not found', + request=req, + content_type='text/plain') + except exception.Conflict as e: + LOG.info(utils.exception_to_str(e)) + raise exc.HTTPConflict(body='config operation conflicts', + request=req, + content_type='text/plain') + except Exception: + LOG.exception(_LE("Unable to update config %s") % id) + raise + +def create_resource(): + """Images resource factory method.""" + deserializer = wsgi.JSONRequestDeserializer() + serializer = wsgi.JSONResponseSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/registry/api/v1/disk_array.py b/code/daisy/daisy/registry/api/v1/disk_array.py new file mode 100755 index 00000000..23683960 --- /dev/null +++ b/code/daisy/daisy/registry/api/v1/disk_array.py @@ -0,0 +1,575 @@ +# Copyright 2010-2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Reference implementation registry server WSGI controller +""" + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import strutils +from oslo_utils import timeutils +from webob import exc + +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +from daisy import i18n + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +CONF = cfg.CONF + +DISPLAY_FIELDS_IN_INDEX = ['id', 'name', 'size', + 'disk_format', 'container_format', + 'checksum'] + +SUPPORTED_FILTERS = ['name', 'status', 'role_id', 'container_format', 'disk_format', + 'min_ram', 'min_disk', 'size_min', 'size_max', + 'changes-since', 'protected'] + +SUPPORTED_SORT_KEYS = ('name', 'status', 'cluster_id', 'container_format', 'disk_format', + 'size', 'id', 'created_at', 'updated_at') + +SUPPORTED_SORT_DIRS = ('asc', 'desc') + +SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir', 'cluster_id') +SUPPORTED_SORT_KEYS = ('name','role_id', 'status', 'container_format', 'disk_format', + 'size', 'id', 'created_at', 'updated_at') + +SUPPORTED_SORT_DIRS = ('asc', 'desc') + +SUPPORTED_PARAMS = ('role_id','limit', 'marker', 'sort_key', 'sort_dir') + + +class Controller(object): + + def __init__(self): + self.db_api = daisy.db.get_api() + + def _get_query_params(self, req): + """Extract necessary query parameters from http request. + + :param req: the Request object coming from the wsgi layer + :retval dictionary of filters to apply to list of service_disks + """ + params = { + 'filters': self._get_filters(req), + 'limit': self._get_limit(req), + 'sort_key': [self._get_sort_key(req)], + 'sort_dir': [self._get_sort_dir(req)], + 'marker': self._get_marker(req), + } + + for key, value in params.items(): + if value is None: + del params[key] + + return params + + def _get_filters(self, req): + """Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + filters = {} + properties = {} + + for param in req.params: + if param in SUPPORTED_FILTERS: + filters[param] = req.params.get(param) + if param.startswith('property-'): + _param = param[9:] + properties[_param] = req.params.get(param) + + if 'changes-since' in filters: + isotime = filters['changes-since'] + try: + filters['changes-since'] = timeutils.parse_isotime(isotime) + except ValueError: + raise exc.HTTPBadRequest(_("Unrecognized changes-since value")) + + if 'protected' in filters: + value = self._get_bool(filters['protected']) + if value is None: + raise exc.HTTPBadRequest(_("protected must be True, or " + "False")) + + filters['protected'] = value + + # only allow admins to filter on 'deleted' + if req.context.is_admin: + deleted_filter = self._parse_deleted_filter(req) + if deleted_filter is not None: + filters['deleted'] = deleted_filter + elif 'changes-since' not in filters: + filters['deleted'] = False + elif 'changes-since' not in filters: + filters['deleted'] = False + + if properties: + filters['properties'] = properties + + return filters + + def _get_limit(self, req): + """Parse a limit query param into something usable.""" + try: + limit = int(req.params.get('limit', CONF.limit_param_default)) + except ValueError: + raise exc.HTTPBadRequest(_("limit param must be an integer")) + + if limit < 0: + raise exc.HTTPBadRequest(_("limit param must be positive")) + + return min(CONF.api_limit_max, limit) + + def _get_marker(self, req): + """Parse a marker query param into something usable.""" + marker = req.params.get('marker', None) + + if marker and not utils.is_uuid_like(marker): + msg = _('Invalid marker format') + raise exc.HTTPBadRequest(explanation=msg) + + return marker + + def _get_sort_key(self, req): + """Parse a sort key query param from the request object.""" + sort_key = req.params.get('sort_key', 'created_at') + if sort_key is not None and sort_key not in SUPPORTED_SORT_KEYS: + _keys = ', '.join(SUPPORTED_SORT_KEYS) + msg = _("Unsupported sort_key. Acceptable values: %s") % (_keys,) + raise exc.HTTPBadRequest(explanation=msg) + return sort_key + + def _get_sort_dir(self, req): + """Parse a sort direction query param from the request object.""" + sort_dir = req.params.get('sort_dir', 'desc') + if sort_dir is not None and sort_dir not in SUPPORTED_SORT_DIRS: + _keys = ', '.join(SUPPORTED_SORT_DIRS) + msg = _("Unsupported sort_dir. Acceptable values: %s") % (_keys,) + raise exc.HTTPBadRequest(explanation=msg) + return sort_dir + + def _get_bool(self, value): + value = value.lower() + if value == 'true' or value == '1': + return True + elif value == 'false' or value == '0': + return False + + return None + + def _parse_deleted_filter(self, req): + """Parse deleted into something usable.""" + deleted = req.params.get('deleted') + if deleted is None: + return None + return strutils.bool_from_string(deleted) + + @utils.mutating + def service_disk_add(self, req, body): + """Registers a new service_disk with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the service_disk + + :retval Returns the newly-created service_disk information as a mapping, + which will include the newly-created service_disk's internal id + in the 'id' field + """ + + service_disk_data = body["service_disk"] + + id = service_disk_data.get('id') + + # role = service_disk_data.get('role') + # add id and role + # if role + # self.db_api.get_role(req.context,role) + + if id and not utils.is_uuid_like(id): + msg = _LI("Rejecting service_disk creation request for invalid service_disk " + "id '%(bad_id)s'") % {'bad_id': id} + LOG.info(msg) + msg = _("Invalid service_disk id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + service_disk_data = self.db_api.service_disk_add(req.context, service_disk_data) + #service_disk_data = dict(service_disk=make_image_dict(service_disk_data)) + msg = (_LI("Successfully created node %s") % + service_disk_data["id"]) + LOG.info(msg) + if 'service_disk' not in service_disk_data: + service_disk_data = dict(service_disk=service_disk_data) + return service_disk_data + except exception.Duplicate: + msg = _("node with identifier %s already exists!") % image_id + LOG.warn(msg) + return exc.HTTPConflict(msg) + except exception.Invalid as e: + msg = (_("Failed to add node metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to create node %s"), id) + raise + + @utils.mutating + def service_disk_delete(self, req, id): + """Deletes an existing service_disk with the registry. + + :param req: wsgi Request object + :param id: The opaque internal identifier for the image + + :retval Returns 200 if delete was successful, a fault if not. On + success, the body contains the deleted image information as a mapping. + """ + try: + deleted_service_disk = self.db_api.service_disk_destroy(req.context, id) + msg = _LI("Successfully deleted service_disk %(id)s") % {'id': id} + LOG.info(msg) + return dict(service_disk=deleted_service_disk) + except exception.ForbiddenPublicImage: + msg = _LI("Delete denied for public service_disk %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to service_disk %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except exception.NotFound: + msg = _LI("service_disk %(id)s not found") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to delete service_disk %s") % id) + raise + + + @utils.mutating + def service_disk_update(self, req, id, body): + """Updates an existing service_disk with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the image + :param id: The opaque internal identifier for the image + + :retval Returns the updated image information as a mapping, + """ + service_disk_data = body['service_disk'] + try: + updated_service_disk = self.db_api.service_disk_update(req.context, id, service_disk_data) + + msg = _LI("Updating metadata for service_disk %(id)s") % {'id': id} + LOG.info(msg) + if 'service_disk' not in updated_service_disk: + service_disk_data = dict(service_disk=updated_service_disk) + return service_disk_data + except exception.Invalid as e: + msg = (_("Failed to update service_disk metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except exception.NotFound: + msg = _LI("service_disk %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='service_disk not found', + request=req, + content_type='text/plain') + except exception.ForbiddenPublicImage: + msg = _LI("Update denied for public service_disk %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + raise + except exception.Conflict as e: + LOG.info(utils.exception_to_str(e)) + raise exc.HTTPConflict(body='service_disk operation conflicts', + request=req, + content_type='text/plain') + except Exception: + LOG.exception(_LE("Unable to update service_disk %s") % id) + raise + + + @utils.mutating + def service_disk_detail(self, req, id): + """Return data about the given service_disk id.""" + try: + service_disk_data = self.db_api.service_disk_detail(req.context, id) + msg = "Successfully retrieved service_disk %(id)s" % {'id': id} + LOG.debug(msg) + except exception.NotFound: + msg = _LI("service_disk %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to service_disk %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to show service_disk %s") % id) + raise + if 'service_disk' not in service_disk_data: + service_disk_data = dict(service_disk=service_disk_data) + return service_disk_data + + def _list_service_disks(self, context, filters, params): + """Get service_disks, wrapping in exception if necessary.""" + try: + return self.db_api.service_disk_list(context, filters=filters, + **params) + except exception.NotFound: + LOG.warn(_LW("Invalid marker. service_disk %(id)s could not be " + "found.") % {'id': params.get('marker')}) + msg = _("Invalid marker. service_disk could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.Forbidden: + LOG.warn(_LW("Access denied to service_disk %(id)s but returning " + "'not found'") % {'id': params.get('marker')}) + msg = _("Invalid marker. service_disk could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except Exception: + LOG.exception(_LE("Unable to get service_disks")) + raise + + def service_disk_list(self, req): + """Return a filtered list of public, non-deleted service_disks in detail + + :param req: the Request object coming from the wsgi layer + :retval a mapping of the following form:: + + dict(service_disks=[service_disk_list]) + + Where service_disk_list is a sequence of mappings containing + all service_disk model fields. + """ + params = self._get_query_params(req) + filters = params.pop('filters') + service_disks = self._list_service_disks(req.context, filters, params) + return dict(service_disks=service_disks) + + @utils.mutating + def cinder_volume_add(self, req, body): + """Registers a new cinder_volume with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the cinder_volume + + :retval Returns the newly-created cinder_volume information as a mapping, + which will include the newly-created cinder_volume's internal id + in the 'id' field + """ + + cinder_volume_data = body["cinder_volume"] + + id = cinder_volume_data.get('id') + + # role = service_disk_data.get('role') + # add id and role + # if role + # self.db_api.get_role(req.context,role) + + if id and not utils.is_uuid_like(id): + msg = _LI("Rejecting cinder_volume creation request for invalid cinder_volume " + "id '%(bad_id)s'") % {'bad_id': id} + LOG.info(msg) + msg = _("Invalid cinder_volume id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + cinder_volume_data = self.db_api.cinder_volume_add(req.context, cinder_volume_data) + msg = (_LI("Successfully created cinder_volume %s") % + cinder_volume_data["id"]) + LOG.info(msg) + if 'cinder_volume' not in cinder_volume_data: + cinder_volume_data = dict(cinder_volume=cinder_volume_data) + return cinder_volume_data + except exception.Duplicate: + msg = _("cinder_volume with identifier %s already exists!") % id + LOG.warn(msg) + return exc.HTTPConflict(msg) + except exception.Invalid as e: + msg = (_("Failed to add cinder_volume metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to create cinder_volume %s"), id) + raise + + @utils.mutating + def cinder_volume_delete(self, req, id): + """Deletes an existing cinder_volume with the registry. + + :param req: wsgi Request object + :param id: The opaque internal identifier for the image + + :retval Returns 200 if delete was successful, a fault if not. On + success, the body contains the deleted image information as a mapping. + """ + try: + deleted_cinder_volume = self.db_api.cinder_volume_destroy(req.context, id) + msg = _LI("Successfully deleted cinder_volume %(cinder_volume_id)s") % {'cinder_volume_id': id} + LOG.info(msg) + return dict(cinder_volume=deleted_cinder_volume) + except exception.ForbiddenPublicImage: + msg = _LI("Delete denied for public cinder_volume %(cinder_volume_id)s") % {'cinder_volume_id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to cinder_volume %(id)s but returning" + " 'not found'") % {'cinder_volume_id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except exception.NotFound: + msg = _LI("cinder_volume %(cinder_volume_id)s not found") % {'cinder_volume_id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to delete cinder_volume %s") % id) + raise + + @utils.mutating + def cinder_volume_update(self, req, id, body): + """Updates an existing cinder_volume with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the image + :param id: The opaque internal identifier for the image + + :retval Returns the updated image information as a mapping, + """ + cinder_volume_data = body['cinder_volume'] + try: + updated_cinder_volume = self.db_api.cinder_volume_update(req.context, id, cinder_volume_data) + + msg = _LI("Updating metadata for cinder_volume %(cinder_volume_id)s") % {'cinder_volume_id': id} + LOG.info(msg) + if 'cinder_volume' not in updated_cinder_volume: + cinder_volume_data = dict(cinder_volume=updated_cinder_volume) + return cinder_volume_data + except exception.Invalid as e: + msg = (_("Failed to update cinder_volume metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except exception.NotFound: + msg = _LI("cinder_volume %(cinder_volume_id)s not found") % {'cinder_volume_id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='cinder_volume not found', + request=req, + content_type='text/plain') + except exception.ForbiddenPublicImage: + msg = _LI("Update denied for public cinder_volume %(cinder_volume_id)s") % {'cinder_volume_id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + raise + except exception.Conflict as e: + LOG.info(utils.exception_to_str(e)) + raise exc.HTTPConflict(body='cinder_volume operation conflicts', + request=req, + content_type='text/plain') + except Exception: + LOG.exception(_LE("Unable to update cinder_volume %s") % id) + raise + + @utils.mutating + def cinder_volume_detail(self, req, id): + """Return data about the given cinder_volume id.""" + try: + cinder_volume_data = self.db_api.cinder_volume_detail(req.context, id) + msg = "Successfully retrieved cinder_volume %(id)s" % {'id': id} + LOG.debug(msg) + except exception.NotFound: + msg = _LI("cinder_volume %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to cinder_volume %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to show cinder_volume %s") % id) + raise + if 'cinder_volume' not in cinder_volume_data: + cinder_volume_data = dict(cinder_volume=cinder_volume_data) + return cinder_volume_data + + def _list_cinder_volumes(self, context, filters, params): + """Get cinder_volumes, wrapping in exception if necessary.""" + try: + return self.db_api.cinder_volume_list(context, filters=filters, + **params) + except exception.NotFound: + LOG.warn(_LW("Invalid marker. cinder_volume %(id)s could not be " + "found.") % {'id': params.get('marker')}) + msg = _("Invalid marker. cinder_volume could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.Forbidden: + LOG.warn(_LW("Access denied to cinder_volume %(id)s but returning " + "'not found'") % {'id': params.get('marker')}) + msg = _("Invalid marker. cinder_volume could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except Exception: + LOG.exception(_LE("Unable to get cinder_volumes")) + raise + + def cinder_volume_list(self, req): + """Return a filtered list of public, non-deleted cinder_volumes in detail + + :param req: the Request object coming from the wsgi layer + :retval a mapping of the following form:: + + dict(cinder_volumes=[cinder_volume_list]) + + Where cinder_volume_list is a sequence of mappings containing + all service_disk model fields. + """ + params = self._get_query_params(req) + filters = params.pop('filters') + cinder_volumes = self._list_cinder_volumes(req.context, filters, params) + + return dict(cinder_volumes=cinder_volumes) + + +def create_resource(): + """Images resource factory method.""" + deserializer = wsgi.JSONRequestDeserializer() + serializer = wsgi.JSONResponseSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/registry/api/v1/hosts.py b/code/daisy/daisy/registry/api/v1/hosts.py new file mode 100755 index 00000000..50eef9c3 --- /dev/null +++ b/code/daisy/daisy/registry/api/v1/hosts.py @@ -0,0 +1,1687 @@ +# Copyright 2010-2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Reference implementation registry server WSGI controller +""" + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import strutils +from oslo_utils import timeutils +from webob import exc + +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +from daisy import i18n +from ironicclient import client as ironic_client + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +CONF = cfg.CONF + +DISPLAY_FIELDS_IN_INDEX = ['id', 'name', 'size', + 'disk_format', 'container_format', + 'checksum'] + +SUPPORTED_FILTERS = ['name', 'status','id','cluster_id' , 'auto_scale', 'container_format', 'disk_format', + + 'changes-since', 'protected'] + +SUPPORTED_SORT_KEYS = ('name', 'status', 'cluster_id', 'container_format', 'disk_format', + 'size', 'id', 'created_at', 'updated_at') + +SUPPORTED_SORT_DIRS = ('asc', 'desc') + +SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir', 'cluster_id') + + +class Controller(object): + + def __init__(self): + self.db_api = daisy.db.get_api() + self.ironicclient = self.get_ironicclient() + + @staticmethod + def get_ironicclient(): # pragma: no cover + """Get Ironic client instance.""" + args = {'os_auth_token': 'fake', + 'ironic_url':'http://127.0.0.1:6385/v1'} + return ironic_client.get_client(1, **args) + + def _get_hosts(self, context, filters, **params): + """Get hosts, wrapping in exception if necessary.""" + try: + return self.db_api.host_get_all(context, filters=filters, + **params) + except exception.NotFound: + LOG.warn(_LW("Invalid marker. Host %(id)s could not be " + "found.") % {'id': params.get('marker')}) + msg = _("Invalid marker. Host could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.Forbidden: + LOG.warn(_LW("Access denied to host %(id)s but returning " + "'not found'") % {'id': params.get('marker')}) + msg = _("Invalid marker. Host could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except Exception: + LOG.exception(_LE("Unable to get hosts")) + raise + + def _get_clusters(self, context, filters, **params): + """Get clusters, wrapping in exception if necessary.""" + try: + return self.db_api.cluster_get_all(context, filters=filters, + **params) + except exception.NotFound: + LOG.warn(_LW("Invalid marker. Cluster %(id)s could not be " + "found.") % {'id': params.get('marker')}) + msg = _("Invalid marker. Cluster could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.Forbidden: + LOG.warn(_LW("Access denied to cluster %(id)s but returning " + "'not found'") % {'id': params.get('marker')}) + msg = _("Invalid marker. Cluster could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except Exception: + LOG.exception(_LE("Unable to get clusters")) + raise + + def detail_host(self, req): + """Return a filtered list of public, non-deleted hosts in detail + + :param req: the Request object coming from the wsgi layer + :retval a mapping of the following form:: + + dict(nodes=[host_list]) + + Where host_list is a sequence of mappings containing + all host model fields. + """ + params = self._get_query_params(req) + + nodes = self._get_hosts(req.context, **params) + + return dict(nodes=nodes) + + def _get_query_params(self, req): + """Extract necessary query parameters from http request. + + :param req: the Request object coming from the wsgi layer + :retval dictionary of filters to apply to list of hosts + """ + params = { + 'filters': self._get_filters(req), + 'limit': self._get_limit(req), + 'sort_key': [self._get_sort_key(req)], + 'sort_dir': [self._get_sort_dir(req)], + 'marker': self._get_marker(req), + } + + for key, value in params.items(): + if value is None: + del params[key] + + return params + + def _get_filters(self, req): + """Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + filters = {} + properties = {} + + for param in req.params: + if param in SUPPORTED_FILTERS: + filters[param] = req.params.get(param) + if param.startswith('property-'): + _param = param[9:] + properties[_param] = req.params.get(param) + + if 'changes-since' in filters: + isotime = filters['changes-since'] + try: + filters['changes-since'] = timeutils.parse_isotime(isotime) + except ValueError: + raise exc.HTTPBadRequest(_("Unrecognized changes-since value")) + + if 'protected' in filters: + value = self._get_bool(filters['protected']) + if value is None: + raise exc.HTTPBadRequest(_("protected must be True, or " + "False")) + + filters['protected'] = value + + # only allow admins to filter on 'deleted' + if req.context.is_admin: + deleted_filter = self._parse_deleted_filter(req) + if deleted_filter is not None: + filters['deleted'] = deleted_filter + elif 'changes-since' not in filters: + filters['deleted'] = False + elif 'changes-since' not in filters: + filters['deleted'] = False + + if properties: + filters['properties'] = properties + + return filters + + def _get_limit(self, req): + """Parse a limit query param into something usable.""" + try: + limit = int(req.params.get('limit', CONF.limit_param_default)) + except ValueError: + raise exc.HTTPBadRequest(_("limit param must be an integer")) + + if limit < 0: + raise exc.HTTPBadRequest(_("limit param must be positive")) + + return min(CONF.api_limit_max, limit) + + def _get_marker(self, req): + """Parse a marker query param into something usable.""" + marker = req.params.get('marker', None) + + if marker and not utils.is_uuid_like(marker): + msg = _('Invalid marker format') + raise exc.HTTPBadRequest(explanation=msg) + + return marker + + def _get_sort_key(self, req): + """Parse a sort key query param from the request object.""" + sort_key = req.params.get('sort_key', 'created_at') + if sort_key is not None and sort_key not in SUPPORTED_SORT_KEYS: + _keys = ', '.join(SUPPORTED_SORT_KEYS) + msg = _("Unsupported sort_key. Acceptable values: %s") % (_keys,) + raise exc.HTTPBadRequest(explanation=msg) + return sort_key + + def _get_sort_dir(self, req): + """Parse a sort direction query param from the request object.""" + sort_dir = req.params.get('sort_dir', 'desc') + if sort_dir is not None and sort_dir not in SUPPORTED_SORT_DIRS: + _keys = ', '.join(SUPPORTED_SORT_DIRS) + msg = _("Unsupported sort_dir. Acceptable values: %s") % (_keys,) + raise exc.HTTPBadRequest(explanation=msg) + return sort_dir + + def _get_bool(self, value): + value = value.lower() + if value == 'true' or value == '1': + return True + elif value == 'false' or value == '0': + return False + + return None + + def _parse_deleted_filter(self, req): + """Parse deleted into something usable.""" + deleted = req.params.get('deleted') + if deleted is None: + return None + return strutils.bool_from_string(deleted) + + @utils.mutating + def add_host(self, req, body): + """Registers a new host with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the host + + :retval Returns the newly-created host information as a mapping, + which will include the newly-created host's internal id + in the 'id' field + """ + + host_data = body["host"] + + host_id = host_data.get('id') + + if host_id and not utils.is_uuid_like(host_id): + msg = _LI("Rejecting host creation request for invalid host " + "id '%(bad_id)s'") % {'bad_id': host_id} + LOG.info(msg) + msg = _("Invalid host id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + if host_id is None: + host_data = self.db_api.host_add(req.context, host_data) + else: + host_data = self.db_api.host_update(req.context, host_id, host_data) + #host_data = dict(host=make_image_dict(host_data)) + msg = (_LI("Successfully created node %s") % + host_data["id"]) + LOG.info(msg) + if 'host' not in host_data: + host_data = dict(host=host_data) + return host_data + except exception.Duplicate: + msg = _("node with identifier %s already exists!") % image_id + LOG.warn(msg) + return exc.HTTPConflict(msg) + except exception.Invalid as e: + msg = (_("Failed to add node metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to create node %s"), host_id) + raise + + @utils.mutating + def delete_host(self, req, id): + """Deletes an existing host with the registry. + + :param req: wsgi Request object + :param id: The opaque internal identifier for the image + + :retval Returns 200 if delete was successful, a fault if not. On + success, the body contains the deleted image information as a mapping. + """ + try: + deleted_host = self.db_api.host_destroy(req.context, id) + msg = _LI("Successfully deleted host %(id)s") % {'id': id} + LOG.info(msg) + members = self.db_api.cluster_host_member_find(req.context, + host_id=id) + if members: + for member in members: + self.db_api.cluster_host_member_delete(req.context, member['id']) + + self.db_api.role_host_member_delete(req.context, host_id=id) + return dict(host=deleted_host) + except exception.ForbiddenPublicImage: + msg = _LI("Delete denied for public host %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to host %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except exception.NotFound: + msg = _LI("Host %(id)s not found") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to delete host %s") % id) + raise + + @utils.mutating + def get_host(self, req, id): + """Return data about the given node id.""" + os_version_dict = {} + try: + host_data = self.db_api.host_get(req.context, id) + if utils.is_uuid_like(host_data.os_version_id): + version = self.db_api.get_os_version(req.context, host_data.os_version_id) + if version: + os_version_dict['name'] = version.name + os_version_dict['id'] = version.id + os_version_dict['desc'] = version.description + msg = "Successfully retrieved host %(id)s" % {'id': id} + LOG.debug(msg) + except exception.NotFound: + msg = _LI("Host %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to host %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to show host %s") % id) + raise + host_interface = self.db_api.get_host_interface(req.context, id) + + role_name=[] + if host_data.status == "with-role": + host_roles=self.db_api.role_host_member_get(req.context,None,id) + for host_role in host_roles: + role_info=self.db_api.role_get(req.context, host_role.role_id) + role_name.append(role_info['name']) + host_cluster=self.db_api.cluster_host_member_find(req.context, None,id) + if host_cluster: + cluster_info = self.db_api.cluster_get(req.context, host_cluster[0]['cluster_id']) + cluster_name = cluster_info['name'] + else: + cluster_name = None + if 'host' not in host_data: + host_data = dict(host=host_data) + if host_interface: + host_data['host']['interfaces'] = host_interface + if os_version_dict: + host_data['host']['os_version'] = os_version_dict + if role_name: + host_data['host']['role']=role_name + if cluster_name: + host_data['host']['cluster']=cluster_name + + host_deploy_network = [hi for hi in host_interface if hi['is_deployment']] + if host_deploy_network: + try: + host_obj = self.ironicclient.physical_node.get(host_deploy_network[0]['mac']) + host_hardware_config = dict([(f, getattr(host_obj, f, '')) for f in ['system', 'memory', 'cpu', 'disks', 'interfaces']]) + host_data['host']['system'] = host_hardware_config['system'] + host_data['host']['memory'] = host_hardware_config['memory'] + host_data['host']['cpu'] = host_hardware_config['cpu'] + host_data['host']['disks'] = host_hardware_config['disks'] + if host_interface: + for interface in host_interface: + for ironic_interface in host_hardware_config['interfaces'].values(): + if interface['mac'] == ironic_interface['mac'] and \ + interface['pci'] == ironic_interface['pci']: + interface['state'] = ironic_interface['state'] + interface['max_speed'] = ironic_interface['max_speed'] + interface['current_speed'] = ironic_interface['current_speed'] + # interface['pci'] = ironic_interface['pci'] + host_data['host']['interfaces'] = host_interface + except Exception: + LOG.exception(_LE("Unable to find ironic data %s") % Exception) + + return host_data + + @utils.mutating + def get_host_interface(self, req, body): + orig_interfaces = list(eval(body['interfaces'])) + for orig_interface in orig_interfaces: + host_interface = self.db_api.get_host_interface_mac(req.context,orig_interface['mac']) + return host_interface + + @utils.mutating + def get_all_host_interfaces(self, req, body): + """Return all_host_interfaces about the given filter.""" + filters = body['filters'] + + try: + host_interfaces = self.db_api.host_interfaces_get_all(req.context, filters) + return host_interfaces + except exception.NotFound: + LOG.warn(_LW("Invalid marker. template %(id)s could not be " + "found.") % {'id': params.get('marker')}) + msg = _("Invalid marker. template could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.Forbidden: + LOG.warn(_LW("Access denied to template %(id)s but returning " + "'not found'") % {'id': params.get('marker')}) + msg = _("Invalid marker. template could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except Exception: + LOG.exception(_LE("Unable to list template")) + raise + return + + @utils.mutating + def get_assigned_network(self, req, interface_id, network_id): + try: + host_assigned_network = self.db_api.get_assigned_network(req.context, + interface_id, network_id) + except exception.NotFound: + LOG.warn(_LW("Invalid marker. Assigned_network with interface %(interface_id)s and network %(network_id)s" + " could not be found.") % {'interface_id': interface_id,'network_id': network_id}) + msg = _("Invalid marker. Assigned_network could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.Forbidden: + LOG.warn(_LW("Access denied for assigned_network with interface %(interface_id)s " + "and network %(network_id)s") % {'interface_id': interface_id,'network_id': network_id}) + msg = _("Invalid marker. Assigned_network denied to get.") + raise exc.HTTPBadRequest(explanation=msg) + except Exception: + LOG.exception(_LE("Unable to get assigned_network")) + raise + return host_assigned_network + + @utils.mutating + def add_discover_host(self, req, body): + """Registers a new host with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the host + + :retval Returns the newly-created host information as a mapping, + which will include the newly-created host's internal id + in the 'id' field + """ + + discover_host_data = body["discover_host"] + discover_host_id = discover_host_data.get('id') + + if discover_host_id and not utils.is_uuid_like(discover_host_id): + msg = _LI("Rejecting host creation request for invalid host " + "id '%(bad_id)s'") % {'bad_id': discover_host_id} + LOG.info(msg) + msg = _("Invalid host id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + if discover_host_id is None: + discover_host_data = self.db_api.discover_host_add(req.context, discover_host_data) + else: + discover_host_data = self.db_api.discover_host_update(req.context, discover_host_id, discover_host_data) + #host_data = dict(host=make_image_dict(host_data)) + msg = (_LI("Successfully created node %s") % + discover_host_data["id"]) + LOG.info(msg) + if 'discover_host' not in discover_host_data: + discover_host_data = dict(discover_host = discover_host_data) + return discover_host_data + except exception.Duplicate: + msg = _("node with identifier %s already exists!") % discover_host_id + LOG.warn(msg) + return exc.HTTPConflict(msg) + except exception.Invalid as e: + msg = (_("Failed to add node metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to create node %s"), discover_host_id) + raise + + @utils.mutating + def delete_discover_host(self, req, id): + """Deletes an existing discover host with the registry. + + :param req: wsgi Request object + :param id: The opaque internal identifier for the image + + :retval Returns 200 if delete was successful, a fault if not. On + success, the body contains the deleted image information as a mapping. + """ + try: + deleted_host = self.db_api.discover_host_destroy(req.context, id) + msg = _LI("Successfully deleted host %(id)s") % {'id': id} + return dict(discover_host=deleted_host) + except exception.Forbidden: + msg = _LI("Access denied to host %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except exception.NotFound: + msg = _LI("Host %(id)s not found") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to delete host %s") % id) + raise + + def detail_discover_host(self, req): + """Return a filtered list of public, non-deleted hosts in detail + + :param req: the Request object coming from the wsgi layer + :retval a mapping of the following form:: + + dict(nodes=[host_list]) + + Where host_list is a sequence of mappings containing + all host model fields. + """ + params = self._get_query_params(req) + try: + nodes = self.db_api.discover_host_get_all(req.context, + **params) + except exception.NotFound: + LOG.warn(_LW("Invalid marker. Host %(id)s could not be " + "found.") % {'id': params.get('marker')}) + msg = _("Invalid marker. Host could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.Forbidden: + LOG.warn(_LW("Access denied to host %(id)s but returning " + "'not found'") % {'id': params.get('marker')}) + msg = _("Invalid marker. Host could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except Exception: + LOG.exception(_LE("Unable to get hosts")) + raise + + return dict(nodes=nodes) + + @utils.mutating + def update_discover_host(self, req, id, body): + ''' + ''' + discover_host_data = body["discover_host"] + if id and not utils.is_uuid_like(id): + msg = _LI("Rejecting host creation request for invalid host " + "id '%(bad_id)s'") % {'bad_id': id} + LOG.info(msg) + msg = _("Invalid host id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + updated_host = self.db_api.discover_host_update(req.context, id, discover_host_data) + msg = _LI("Updating metadata for host %(id)s") % {'id': id} + LOG.info(msg) + if 'discover_host' not in updated_host: + host_data = dict(discover_host=updated_host) + return host_data + except exception.Invalid as e: + msg = (_("Failed to update host metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except exception.NotFound: + msg = _LI("Host %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='Host not found', + request=req, + content_type='text/plain') + except exception.ForbiddenPublicImage: + msg = _LI("Update denied for public host %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + raise + except exception.Conflict as e: + LOG.info(utils.exception_to_str(e)) + raise exc.HTTPConflict(body='Host operation conflicts', + request=req, + content_type='text/plain') + except Exception: + LOG.exception(_LE("Unable to update host %s") % id) + raise + + def get_discover_host(self, req, discover_host_id): + ''' + ''' + if discover_host_id and not utils.is_uuid_like(discover_host_id): + msg = _LI("Rejecting host creation request for invalid host " + "id '%(bad_id)s'") % {'bad_id': discover_host_id} + LOG.info(msg) + msg = _("Invalid host id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + host_detail_info = self.db_api.get_discover_host_detail(req.context, discover_host_id) + msg = _LI("Updating metadata for host %(id)s") % {'id': discover_host_id} + LOG.info(msg) + if 'discover_host' not in host_detail_info: + host_data = dict(discover_host=host_detail_info) + LOG.info("host_data: %s" % host_data) + return host_data + except exception.Invalid as e: + msg = (_("Failed to update host metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except exception.NotFound: + msg = _LI("Host %(id)s not found") % {'id': discover_host_id} + LOG.info(msg) + raise exc.HTTPNotFound(body='Host not found', + request=req, + content_type='text/plain') + except exception.ForbiddenPublicImage: + msg = _LI("Update denied for public host %(id)s") % {'id': discover_host_id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + raise + except exception.Conflict as e: + LOG.info(utils.exception_to_str(e)) + raise exc.HTTPConflict(body='Host operation conflicts', + request=req, + content_type='text/plain') + except Exception: + LOG.exception(_LE("Unable to update host %s") % discover_host_id) + raise + + @utils.mutating + def add_cluster(self, req, body): + """Registers a new host with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the host + + :retval Returns the newly-created host information as a mapping, + which will include the newly-created host's internal id + in the 'id' field + """ + + cluster_data = body["cluster"] + + cluster_id = cluster_data.get('id') + + if cluster_id and not utils.is_uuid_like(cluster_id): + msg = _LI("Rejecting host creation request for invalid cluster " + "id '%(bad_id)s'") % {'bad_id': cluster_id} + LOG.info(msg) + msg = _("Invalid cluster id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + cluster_data = self.db_api.cluster_add(req.context, cluster_data) + msg = (_LI("Successfully created cluster %s") % + cluster_data["id"]) + LOG.info(msg) + if 'cluster' not in cluster_data: + cluster_data = dict(cluster=cluster_data) + return cluster_data + except exception.Duplicate: + msg = _("cluster with identifier %s already exists!") % image_id + LOG.warn(msg) + return exc.HTTPConflict(msg) + except exception.Invalid as e: + msg = (_("Failed to add cluster metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to create cluster %s"), cluster_id) + raise + + @utils.mutating + def delete_cluster(self, req, id): + """Deletes an existing cluster with the registry. + + :param req: wsgi Request object + :param id: The opaque internal identifier for the image + + :retval Returns 200 if delete was successful, a fault if not. On + success, the body contains the deleted image information as a mapping. + """ + try: + deleted_cluster = self.db_api.cluster_destroy(req.context, id) + msg = _LI("Successfully deleted cluster %(id)s") % {'id': id} + LOG.info(msg) + # Look up an existing membership + members = self.db_api.cluster_host_member_find(req.context, + cluster_id=id) + if members: + for member in members: + self.db_api.cluster_host_member_delete(req.context, member['id']) + + return dict(cluster=deleted_cluster) + except exception.ForbiddenPublicImage: + msg = _LI("Delete denied for public cluster %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to cluster %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except exception.NotFound: + msg = _LI("cluster %(id)s not found") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to delete cluster %s") % id) + raise + + @utils.mutating + def get_cluster(self, req, id): + """Return data about the given cluster id.""" + try: + cluster_data = self.db_api.cluster_get(req.context, id) + msg = "Successfully retrieved cluster %(id)s" % {'id': id} + LOG.debug(msg) + networking_parameters = {} + networking_parameters['gre_id_range'] = [cluster_data['gre_id_start'],cluster_data['gre_id_end']] + networking_parameters['vlan_range'] = [cluster_data['vlan_start'],cluster_data['vlan_end']] + networking_parameters['vni_range'] = [cluster_data['vni_start'],cluster_data['vni_end']] + networking_parameters['net_l23_provider'] = cluster_data['net_l23_provider'] + networking_parameters['base_mac'] = cluster_data['base_mac'] + networking_parameters['segmentation_type'] = cluster_data['segmentation_type'] + networking_parameters['public_vip'] = cluster_data['public_vip'] + cluster_data['networking_parameters'] = networking_parameters + except exception.NotFound: + msg = _LI("cluster %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to cluster %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to show cluster %s") % id) + raise + cluster_host_member_list = [] + cluster_network_member_list = [] + cluster_id = id + cluster_host_member = self.db_api.cluster_host_member_find(req.context,cluster_id) + if len(cluster_host_member) > 0: + for cluster_host in list(cluster_host_member): + cluster_host_member_list.append(cluster_host['host_id']) + cluster_data['nodes'] = cluster_host_member_list + + cluster_network_member = self.db_api.network_get_all(req.context,cluster_id) + if len(cluster_network_member) > 0: + for cluster_network in list(cluster_network_member): + cluster_network_member_list.append(cluster_network['id']) + cluster_data['networks'] = cluster_network_member_list + + logic_networks = self.db_api.get_logic_network(req.context,id) + cluster_data['logic_networks'] = logic_networks + + routers = self.db_api.router_get(req.context,cluster_id) + cluster_data['routers'] = routers + return cluster_data + + def detail_cluster(self, req): + """Return a filtered list of public, non-deleted hosts in detail + + :param req: the Request object coming from the wsgi layer + :retval a mapping of the following form:: + + dict(hosts=[host_list]) + + Where host_list is a sequence of mappings containing + all host model fields. + """ + params = self._get_query_params(req) + cluster_host_member_list = [] + cluster_network_member_list = [] + + clusters = self._get_clusters(req.context, **params) + for cluster in clusters: + cluster_id = cluster['id'] + cluster_host_member = self.db_api.cluster_host_member_find(req.context,cluster_id) + if len(cluster_host_member) > 0: + for cluster_host in list(cluster_host_member): + cluster_host_member_list.append(cluster_host['host_id']) + cluster['nodes'] = cluster_host_member_list + + cluster_network_member = self.db_api.network_get_all(req.context,cluster_id) + if len(cluster_network_member) > 0: + for cluster_network in list(cluster_network_member): + cluster_network_member_list.append(cluster_network['id']) + cluster['networks'] = cluster_network_member_list + + return dict(clusters=clusters) + + @utils.mutating + def add_component(self, req, body): + """Registers a new host with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the host + + :retval Returns the newly-created host information as a mapping, + which will include the newly-created host's internal id + in the 'id' field + """ + + component_data = body["component"] + + component_id = component_data.get('id') + + if component_id and not utils.is_uuid_like(component_id): + msg = _LI("Rejecting host creation request for invalid component " + "id '%(bad_id)s'") % {'bad_id': component_id} + LOG.info(msg) + msg = _("Invalid component id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + component_data = self.db_api.component_add(req.context, component_data) + #host_data = dict(host=make_image_dict(host_data)) + msg = (_LI("Successfully created component %s") % + component_data["id"]) + LOG.info(msg) + if 'component' not in component_data: + component_data = dict(component=component_data) + return component_data + except exception.Duplicate: + msg = _("component with identifier %s already exists!") % image_id + LOG.warn(msg) + return exc.HTTPConflict(msg) + except exception.Invalid as e: + msg = (_("Failed to add component metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to create component %s"), component_id) + raise + + @utils.mutating + def delete_component(self, req, id): + """Deletes an existing component with the registry. + + :param req: wsgi Request object + :param id: The opaque internal identifier for the image + + :retval Returns 200 if delete was successful, a fault if not. On + success, the body contains the deleted image information as a mapping. + """ + try: + deleted_component = self.db_api.component_destroy(req.context, id) + msg = _LI("Successfully deleted component %(id)s") % {'id': id} + LOG.info(msg) + return dict(component=deleted_component) + except exception.ForbiddenPublicImage: + msg = _LI("Delete denied for public component %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to component %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except exception.NotFound: + msg = _LI("Component %(id)s not found") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to delete component %s") % id) + raise + + def _get_components(self, context, filters, **params): + """Get components, wrapping in exception if necessary.""" + try: + return self.db_api.component_get_all(context, filters=filters, + **params) + except exception.NotFound: + LOG.warn(_LW("Invalid marker. Project %(id)s could not be " + "found.") % {'id': params.get('marker')}) + msg = _("Invalid marker. Project could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.Forbidden: + LOG.warn(_LW("Access denied to component %(id)s but returning " + "'not found'") % {'id': params.get('marker')}) + msg = _("Invalid marker. Project could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except Exception: + LOG.exception(_LE("Unable to get components")) + raise + + @utils.mutating + def get_component(self, req, id): + """Return data about the given component id.""" + try: + component_data = self.db_api.component_get(req.context, id) + msg = "Successfully retrieved component %(id)s" % {'id': id} + LOG.debug(msg) + except exception.NotFound: + msg = _LI("component %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to component %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to show component %s") % id) + raise + if 'component' not in component_data: + component_data = dict(component=component_data) + return component_data + + def detail_component(self, req): + """Return a filtered list of public, non-deleted hosts in detail + + :param req: the Request object coming from the wsgi layer + :retval a mapping of the following form:: + + dict(hosts=[host_list]) + + Where host_list is a sequence of mappings containing + all host model fields. + """ + params = self._get_query_params(req) + + components = self._get_components(req.context, **params) + + return dict(components=components) + + @utils.mutating + def update_component(self, req, id, body): + """Updates an existing component with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the image + :param id: The opaque internal identifier for the image + + :retval Returns the updated image information as a mapping, + """ + component_data = body['component'] + try: + updated_component = self.db_api.component_update(req.context, id, component_data) + + msg = _LI("Updating metadata for component %(id)s") % {'id': id} + LOG.info(msg) + if 'component' not in updated_component: + component_data = dict(component=updated_component) + return component_data + except exception.Invalid as e: + msg = (_("Failed to update component metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except exception.NotFound: + msg = _LI("Component %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='Component not found', + request=req, + content_type='text/plain') + except exception.ForbiddenPublicImage: + msg = _LI("Update denied for public component %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to component %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='Component not found', + request=req, + content_type='text/plain') + except exception.Conflict as e: + LOG.info(utils.exception_to_str(e)) + raise exc.HTTPConflict(body='Component operation conflicts', + request=req, + content_type='text/plain') + except Exception: + LOG.exception(_LE("Unable to update component %s") % id) + raise + + + @utils.mutating + def add_service(self, req, body): + """Registers a new host with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the host + + :retval Returns the newly-created host information as a mapping, + which will include the newly-created host's internal id + in the 'id' field + """ + + service_data = body["service"] + + service_id = service_data.get('id') + + if service_id and not utils.is_uuid_like(service_id): + msg = _LI("Rejecting host creation request for invalid service " + "id '%(bad_id)s'") % {'bad_id': service_id} + LOG.info(msg) + msg = _("Invalid service id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + print service_data + service_data = self.db_api.service_add(req.context, service_data) + #host_data = dict(host=make_image_dict(host_data)) + msg = (_LI("Successfully created service %s") % + service_data["id"]) + LOG.info(msg) + if 'service' not in service_data: + service_data = dict(service=service_data) + return service_data + except exception.Duplicate: + msg = _("service with identifier %s already exists!") % image_id + LOG.warn(msg) + return exc.HTTPConflict(msg) + except exception.Invalid as e: + msg = (_("Failed to add service metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to create service %s"), service_id) + raise + + @utils.mutating + def delete_service(self, req, id): + """Deletes an existing service with the registry. + + :param req: wsgi Request object + :param id: The opaque internal identifier for the image + + :retval Returns 200 if delete was successful, a fault if not. On + success, the body contains the deleted image information as a mapping. + """ + try: + deleted_service = self.db_api.service_destroy(req.context, id) + msg = _LI("Successfully deleted service %(id)s") % {'id': id} + LOG.info(msg) + return dict(service=deleted_service) + except exception.ForbiddenPublicImage: + msg = _LI("Delete denied for public service %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to service %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except exception.NotFound: + msg = _LI("Service %(id)s not found") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to delete service %s") % id) + raise + + def _get_services(self, context, filters, **params): + """Get services, wrapping in exception if necessary.""" + try: + return self.db_api.service_get_all(context, filters=filters, + **params) + except exception.NotFound: + LOG.warn(_LW("Invalid marker. Project %(id)s could not be " + "found.") % {'id': params.get('marker')}) + msg = _("Invalid marker. Project could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.Forbidden: + LOG.warn(_LW("Access denied to service %(id)s but returning " + "'not found'") % {'id': params.get('marker')}) + msg = _("Invalid marker. Project could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except Exception: + LOG.exception(_LE("Unable to get services")) + raise + + @utils.mutating + def get_service(self, req, id): + """Return data about the given service id.""" + try: + service_data = self.db_api.service_get(req.context, id) + msg = "Successfully retrieved service %(id)s" % {'id': id} + LOG.debug(msg) + except exception.NotFound: + msg = _LI("service %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to service %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to show service %s") % id) + raise + if 'service' not in service_data: + service_data = dict(service=service_data) + return service_data + + def detail_service(self, req): + """Return a filtered list of public, non-deleted hosts in detail + + :param req: the Request object coming from the wsgi layer + :retval a mapping of the following form:: + + dict(hosts=[host_list]) + + Where host_list is a sequence of mappings containing + all host model fields. + """ + params = self._get_query_params(req) + + services = self._get_services(req.context, **params) + + return dict(services=services) + + @utils.mutating + def update_service(self, req, id, body): + """Updates an existing service with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the image + :param id: The opaque internal identifier for the image + + :retval Returns the updated image information as a mapping, + """ + service_data = body['service'] + try: + updated_service = self.db_api.service_update(req.context, id, service_data) + + msg = _LI("Updating metadata for service %(id)s") % {'id': id} + LOG.info(msg) + if 'service' not in updated_service: + service_data = dict(service=updated_service) + return service_data + except exception.Invalid as e: + msg = (_("Failed to update service metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except exception.NotFound: + msg = _LI("Service %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='Service not found', + request=req, + content_type='text/plain') + except exception.ForbiddenPublicImage: + msg = _LI("Update denied for public service %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to service %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='Service not found', + request=req, + content_type='text/plain') + except exception.Conflict as e: + LOG.info(utils.exception_to_str(e)) + raise exc.HTTPConflict(body='Service operation conflicts', + request=req, + content_type='text/plain') + except Exception: + LOG.exception(_LE("Unable to update service %s") % id) + raise + + + @utils.mutating + def add_role(self, req, body): + """Registers a new host with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the host + + :retval Returns the newly-created host information as a mapping, + which will include the newly-created host's internal id + in the 'id' field + """ + + role_data = body["role"] + + role_id = role_data.get('id') + + if role_id and not utils.is_uuid_like(role_id): + msg = _LI("Rejecting host creation request for invalid role " + "id '%(bad_id)s'") % {'bad_id': role_id} + LOG.info(msg) + msg = _("Invalid role id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + print role_data + role_data = self.db_api.role_add(req.context, role_data) + #host_data = dict(host=make_image_dict(host_data)) + msg = (_LI("Successfully created role %s") % + role_data["id"]) + LOG.info(msg) + if 'role' not in role_data: + role_data = dict(role=role_data) + return role_data + except exception.Duplicate: + msg = _("role with identifier %s already exists!") % image_id + LOG.warn(msg) + return exc.HTTPConflict(msg) + except exception.Invalid as e: + msg = (_("Failed to add role metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to create role %s"), role_id) + raise + + @utils.mutating + def delete_role(self, req, id): + """Deletes an existing role with the registry. + + :param req: wsgi Request object + :param id: The opaque internal identifier for the image + + :retval Returns 200 if delete was successful, a fault if not. On + success, the body contains the deleted image information as a mapping. + """ + try: + deleted_role = self.db_api.role_destroy(req.context, id) + msg = _LI("Successfully deleted role %(id)s") % {'id': id} + LOG.info(msg) + return dict(role=deleted_role) + except exception.ForbiddenPublicImage: + msg = _LI("Delete denied for public role %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to role %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except exception.NotFound: + msg = _LI("Role %(id)s not found") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to delete role %s") % id) + raise + + def _get_roles(self, context, filters, **params): + """Get roles, wrapping in exception if necessary.""" + try: + return self.db_api.role_get_all(context, filters=filters, + **params) + except exception.NotFound: + LOG.warn(_LW("Invalid marker. Project %(id)s could not be " + "found.") % {'id': params.get('marker')}) + msg = _("Invalid marker. Project could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.Forbidden: + LOG.warn(_LW("Access denied to role %(id)s but returning " + "'not found'") % {'id': params.get('marker')}) + msg = _("Invalid marker. Project could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except Exception: + LOG.exception(_LE("Unable to get roles")) + raise + + @utils.mutating + def get_role(self, req, id): + """Return data about the given role id.""" + try: + role_data = self.db_api.role_get(req.context, id) + msg = "Successfully retrieved role %(id)s" % {'id': id} + LOG.debug(msg) + except exception.NotFound: + msg = _LI("role %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to role %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to show role %s") % id) + raise + role_services = self.db_api.role_services_get(req.context,id) + service_name = [] + for role_service in role_services: + service_info = self.db_api.service_get(req.context, role_service['service_id']) + service_name.append(service_info['name']) + if 'role' not in role_data: + role_data = dict(role=role_data) + if service_name: + role_data['role']['service_name'] = service_name + return role_data + + def detail_role(self, req): + """Return a filtered list of public, non-deleted hosts in detail + + :param req: the Request object coming from the wsgi layer + :retval a mapping of the following form:: + + dict(hosts=[host_list]) + + Where host_list is a sequence of mappings containing + all host model fields. + """ + params = self._get_query_params(req) + + roles = self._get_roles(req.context, **params) + + return dict(roles=roles) + + @utils.mutating + def update_role(self, req, id, body): + """Updates an existing role with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the image + :param id: The opaque internal identifier for the image + + :retval Returns the updated image information as a mapping, + """ + role_data = body['role'] + try: + updated_role = self.db_api.role_update(req.context, id, role_data) + + msg = _LI("Updating metadata for role %(id)s") % {'id': id} + LOG.info(msg) + if 'role' not in updated_role: + role_data = dict(role=updated_role) + return role_data + except exception.Invalid as e: + msg = (_("Failed to update role metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except exception.NotFound: + msg = _LI("Role %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='Role not found', + request=req, + content_type='text/plain') + except exception.ForbiddenPublicImage: + msg = _LI("Update denied for public role %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to role %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='Role not found', + request=req, + content_type='text/plain') + except exception.Conflict as e: + LOG.info(utils.exception_to_str(e)) + raise exc.HTTPConflict(body='Role operation conflicts', + request=req, + content_type='text/plain') + except Exception: + LOG.exception(_LE("Unable to update role %s") % id) + raise + + @utils.mutating + def role_services(self, req, id): + """Return service list of the role.""" + try: + role_data = self.db_api.role_services_get(req.context, id) + msg = "Successfully retrieved services of role %(id)s" % {'id': id} + LOG.debug(msg) + except exception.NotFound: + msg = _LI("role %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access services of role %(id)s denied but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to show services of role %s") % id) + raise + if 'role' not in role_data: + role_data = dict(role=role_data) + return role_data + + @utils.mutating + def update_host(self, req, id, body): + """Updates an existing host with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the image + :param id: The opaque internal identifier for the image + + :retval Returns the updated image information as a mapping, + """ + host_data = body['host'] + try: + updated_host = self.db_api.host_update(req.context, id, host_data) + + msg = _LI("Updating metadata for host %(id)s") % {'id': id} + LOG.info(msg) + if 'host' not in updated_host: + host_data = dict(host=updated_host) + return host_data + except exception.Invalid as e: + msg = (_("Failed to update host metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except exception.NotFound: + msg = _LI("Host %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='Host not found', + request=req, + content_type='text/plain') + except exception.ForbiddenPublicImage: + msg = _LI("Update denied for public host %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + raise + except exception.Conflict as e: + LOG.info(utils.exception_to_str(e)) + raise exc.HTTPConflict(body='Host operation conflicts', + request=req, + content_type='text/plain') + except Exception: + LOG.exception(_LE("Unable to update host %s") % id) + raise + + + @utils.mutating + def update_cluster(self, req, id, body): + """Updates an existing cluster with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the image + :param id: The opaque internal identifier for the image + + :retval Returns the updated image information as a mapping, + """ + cluster_data = body['cluster'] + try: + updated_cluster = self.db_api.cluster_update(req.context, id, cluster_data) + + msg = _LI("Updating metadata for cluster %(id)s") % {'id': id} + LOG.info(msg) + if 'cluster' not in updated_cluster: + cluster_data = dict(cluster=updated_cluster) + return cluster_data + except exception.Invalid as e: + msg = (_("Failed to update cluster metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except exception.NotFound: + msg = _LI("cluster %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='cluster not found', + request=req, + content_type='text/plain') + except exception.ForbiddenPublicImage: + msg = _LI("Update denied for public cluster %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to cluster %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='cluster not found', + request=req, + content_type='text/plain') + except exception.Conflict as e: + LOG.info(utils.exception_to_str(e)) + raise exc.HTTPConflict(body='cluster operation conflicts', + request=req, + content_type='text/plain') + except Exception: + LOG.exception(_LE("Unable to update cluster %s") % id) + raise + + @utils.mutating + def host_roles(self, req, id): + """Return host list in the host_roles.""" + try: + role_data = self.db_api.get_host_roles(req.context, id) + msg = "Successfully retrieved host of role %(id)s" % {'id': id} + LOG.debug(msg) + except exception.NotFound: + msg = _LI("role %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access host of role %(id)s denied but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to show host of role %s") % id) + raise + if 'role' not in role_data: + role_data = dict(role=role_data) + return role_data + + @utils.mutating + def delete_role_hosts(self, req, id): + """Return host list in the host_roles.""" + try: + role_data = self.db_api.role_host_destroy(req.context, id) + msg = "Successfully retrieved host of role %(id)s" % {'id': id} + LOG.debug(msg) + except exception.NotFound: + msg = _LI("role %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access host of role %(id)s denied but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to show host of role %s") % id) + raise + if 'role' not in role_data: + role_data = dict(role=role_data) + return role_data + + @utils.mutating + def update_role_hosts(self, req, id, body): + """Return role hosts list in the host_roles.""" + role_data = body['role'] + try: + updated_role = self.db_api.role_host_update(req.context, id, role_data) + + msg = _LI("Updating metadata for role_host id %(id)s") % {'id': id} + return updated_role + except exception.Invalid as e: + msg = (_("Failed to update role host metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except exception.NotFound: + msg = _LI("HostRole %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='HostRole not found', + request=req, + content_type='text/plain') + except exception.ForbiddenPublicImage: + msg = _LI("Update denied for public host_role %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to host_role %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='Role not found', + request=req, + content_type='text/plain') + except exception.Conflict as e: + LOG.info(utils.exception_to_str(e)) + raise exc.HTTPConflict(body='HostRole operation conflicts', + request=req, + content_type='text/plain') + except Exception: + LOG.exception(_LE("Unable to update host_role %s") % id) + raise + + @utils.mutating + def config_interface(self, req, body): + """Registers a new config_interface with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the host + + :retval Returns the newly-created host information as a mapping, + which will include the newly-created host's internal id + in the 'id' field + """ + config_interface_meta=body + cluster_id = config_interface_meta.get('cluster-id') + role_name=config_interface_meta.get('role-name') + try: + config_interface_meta = self.db_api.config_interface(req.context, config_interface_meta) + except exception.Invalid as e: + msg = (_("Failed to add role metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + if 'config_interface_meta' not in config_interface_meta: + config_interface_meta = dict(config_interface_meta=config_interface_meta) + return config_interface_meta + +def _limit_locations(image): + locations = image.pop('locations', []) + image['location_data'] = locations + image['location'] = None + for loc in locations: + if loc['status'] == 'active': + image['location'] = loc['url'] + break + +def make_image_dict(image): + """Create a dict representation of an image which we can use to + serialize the image. + """ + + def _fetch_attrs(d, attrs): + return dict([(a, d[a]) for a in attrs + if a in d.keys()]) + + # TODO(sirp): should this be a dict, or a list of dicts? + # A plain dict is more convenient, but list of dicts would provide + # access to created_at, etc + properties = dict((p['name'], p['value']) + for p in image['properties'] if not p['deleted']) + + image_dict = _fetch_attrs(image, daisy.db.IMAGE_ATTRS) + image_dict['properties'] = properties + _limit_locations(image_dict) + + return image_dict + +def create_resource(): + """Images resource factory method.""" + deserializer = wsgi.JSONRequestDeserializer() + serializer = wsgi.JSONResponseSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/registry/api/v1/images.py b/code/daisy/daisy/registry/api/v1/images.py new file mode 100755 index 00000000..d15a8f93 --- /dev/null +++ b/code/daisy/daisy/registry/api/v1/images.py @@ -0,0 +1,555 @@ +# Copyright 2010-2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Reference implementation registry server WSGI controller +""" + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import strutils +from oslo_utils import timeutils +from webob import exc + +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +from daisy import i18n + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +CONF = cfg.CONF + +DISPLAY_FIELDS_IN_INDEX = ['id', 'name', 'size', + 'disk_format', 'container_format', + 'checksum'] + +SUPPORTED_FILTERS = ['name', 'status', 'container_format', 'disk_format', + 'min_ram', 'min_disk', 'size_min', 'size_max', + 'changes-since', 'protected'] + +SUPPORTED_SORT_KEYS = ('name', 'status', 'container_format', 'disk_format', + 'size', 'id', 'created_at', 'updated_at') + +SUPPORTED_SORT_DIRS = ('asc', 'desc') + +SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir') + + +def _normalize_image_location_for_db(image_data): + """ + This function takes the legacy locations field and the newly added + location_data field from the image_data values dictionary which flows + over the wire between the registry and API servers and converts it + into the location_data format only which is then consumable by the + Image object. + + :param image_data: a dict of values representing information in the image + :return: a new image data dict + """ + if 'locations' not in image_data and 'location_data' not in image_data: + image_data['locations'] = None + return image_data + + locations = image_data.pop('locations', []) + location_data = image_data.pop('location_data', []) + + location_data_dict = {} + for l in locations: + location_data_dict[l] = {} + for l in location_data: + location_data_dict[l['url']] = {'metadata': l['metadata'], + 'status': l['status'], + # Note(zhiyan): New location has no ID. + 'id': l['id'] if 'id' in l else None} + + # NOTE(jbresnah) preserve original order. tests assume original order, + # should that be defined functionality + ordered_keys = locations[:] + for ld in location_data: + if ld['url'] not in ordered_keys: + ordered_keys.append(ld['url']) + + location_data = [] + for loc in ordered_keys: + data = location_data_dict[loc] + if data: + location_data.append({'url': loc, + 'metadata': data['metadata'], + 'status': data['status'], + 'id': data['id']}) + else: + location_data.append({'url': loc, + 'metadata': {}, + 'status': 'active', + 'id': None}) + + image_data['locations'] = location_data + return image_data + + +class Controller(object): + + def __init__(self): + self.db_api = daisy.db.get_api() + + def _get_images(self, context, filters, **params): + """Get images, wrapping in exception if necessary.""" + # NOTE(markwash): for backwards compatibility, is_public=True for + # admins actually means "treat me as if I'm not an admin and show me + # all my images" + if context.is_admin and params.get('is_public') is True: + params['admin_as_user'] = True + del params['is_public'] + try: + return self.db_api.image_get_all(context, filters=filters, + **params) + except exception.NotFound: + LOG.warn(_LW("Invalid marker. Image %(id)s could not be " + "found.") % {'id': params.get('marker')}) + msg = _("Invalid marker. Image could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.Forbidden: + LOG.warn(_LW("Access denied to image %(id)s but returning " + "'not found'") % {'id': params.get('marker')}) + msg = _("Invalid marker. Image could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except Exception: + LOG.exception(_LE("Unable to get images")) + raise + + def index(self, req): + """Return a basic filtered list of public, non-deleted images + + :param req: the Request object coming from the wsgi layer + :retval a mapping of the following form:: + + dict(images=[image_list]) + + Where image_list is a sequence of mappings:: + + { + 'id': , + 'name': , + 'size': , + 'disk_format': , + 'container_format': , + 'checksum': + } + """ + params = self._get_query_params(req) + images = self._get_images(req.context, **params) + + results = [] + for image in images: + result = {} + for field in DISPLAY_FIELDS_IN_INDEX: + result[field] = image[field] + results.append(result) + + LOG.debug("Returning image list") + return dict(images=results) + + def detail(self, req): + """Return a filtered list of public, non-deleted images in detail + + :param req: the Request object coming from the wsgi layer + :retval a mapping of the following form:: + + dict(images=[image_list]) + + Where image_list is a sequence of mappings containing + all image model fields. + """ + params = self._get_query_params(req) + + images = self._get_images(req.context, **params) + image_dicts = [make_image_dict(i) for i in images] + LOG.debug("Returning detailed image list") + return dict(images=image_dicts) + + def _get_query_params(self, req): + """Extract necessary query parameters from http request. + + :param req: the Request object coming from the wsgi layer + :retval dictionary of filters to apply to list of images + """ + params = { + 'filters': self._get_filters(req), + 'limit': self._get_limit(req), + 'sort_key': [self._get_sort_key(req)], + 'sort_dir': [self._get_sort_dir(req)], + 'marker': self._get_marker(req), + } + + if req.context.is_admin: + # Only admin gets to look for non-public images + params['is_public'] = self._get_is_public(req) + + for key, value in params.items(): + if value is None: + del params[key] + + # Fix for LP Bug #1132294 + # Ensure all shared images are returned in v1 + params['member_status'] = 'all' + return params + + def _get_filters(self, req): + """Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + filters = {} + properties = {} + + for param in req.params: + if param in SUPPORTED_FILTERS: + filters[param] = req.params.get(param) + if param.startswith('property-'): + _param = param[9:] + properties[_param] = req.params.get(param) + + if 'changes-since' in filters: + isotime = filters['changes-since'] + try: + filters['changes-since'] = timeutils.parse_isotime(isotime) + except ValueError: + raise exc.HTTPBadRequest(_("Unrecognized changes-since value")) + + if 'protected' in filters: + value = self._get_bool(filters['protected']) + if value is None: + raise exc.HTTPBadRequest(_("protected must be True, or " + "False")) + + filters['protected'] = value + + # only allow admins to filter on 'deleted' + if req.context.is_admin: + deleted_filter = self._parse_deleted_filter(req) + if deleted_filter is not None: + filters['deleted'] = deleted_filter + elif 'changes-since' not in filters: + filters['deleted'] = False + elif 'changes-since' not in filters: + filters['deleted'] = False + + if properties: + filters['properties'] = properties + + return filters + + def _get_limit(self, req): + """Parse a limit query param into something usable.""" + try: + limit = int(req.params.get('limit', CONF.limit_param_default)) + except ValueError: + raise exc.HTTPBadRequest(_("limit param must be an integer")) + + if limit < 0: + raise exc.HTTPBadRequest(_("limit param must be positive")) + + return min(CONF.api_limit_max, limit) + + def _get_marker(self, req): + """Parse a marker query param into something usable.""" + marker = req.params.get('marker', None) + + if marker and not utils.is_uuid_like(marker): + msg = _('Invalid marker format') + raise exc.HTTPBadRequest(explanation=msg) + + return marker + + def _get_sort_key(self, req): + """Parse a sort key query param from the request object.""" + sort_key = req.params.get('sort_key', 'created_at') + if sort_key is not None and sort_key not in SUPPORTED_SORT_KEYS: + _keys = ', '.join(SUPPORTED_SORT_KEYS) + msg = _("Unsupported sort_key. Acceptable values: %s") % (_keys,) + raise exc.HTTPBadRequest(explanation=msg) + return sort_key + + def _get_sort_dir(self, req): + """Parse a sort direction query param from the request object.""" + sort_dir = req.params.get('sort_dir', 'desc') + if sort_dir is not None and sort_dir not in SUPPORTED_SORT_DIRS: + _keys = ', '.join(SUPPORTED_SORT_DIRS) + msg = _("Unsupported sort_dir. Acceptable values: %s") % (_keys,) + raise exc.HTTPBadRequest(explanation=msg) + return sort_dir + + def _get_bool(self, value): + value = value.lower() + if value == 'true' or value == '1': + return True + elif value == 'false' or value == '0': + return False + + return None + + def _get_is_public(self, req): + """Parse is_public into something usable.""" + is_public = req.params.get('is_public', None) + + if is_public is None: + # NOTE(vish): This preserves the default value of showing only + # public images. + return True + elif is_public.lower() == 'none': + return None + + value = self._get_bool(is_public) + if value is None: + raise exc.HTTPBadRequest(_("is_public must be None, True, or " + "False")) + + return value + + def _parse_deleted_filter(self, req): + """Parse deleted into something usable.""" + deleted = req.params.get('deleted') + if deleted is None: + return None + return strutils.bool_from_string(deleted) + + def show(self, req, id): + """Return data about the given image id.""" + try: + image = self.db_api.image_get(req.context, id) + msg = "Successfully retrieved image %(id)s" % {'id': id} + LOG.debug(msg) + except exception.NotFound: + msg = _LI("Image %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to image %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to show image %s") % id) + raise + + return dict(image=make_image_dict(image)) + + @utils.mutating + def delete(self, req, id): + """Deletes an existing image with the registry. + + :param req: wsgi Request object + :param id: The opaque internal identifier for the image + + :retval Returns 200 if delete was successful, a fault if not. On + success, the body contains the deleted image information as a mapping. + """ + try: + deleted_image = self.db_api.image_destroy(req.context, id) + msg = _LI("Successfully deleted image %(id)s") % {'id': id} + LOG.info(msg) + return dict(image=make_image_dict(deleted_image)) + except exception.ForbiddenPublicImage: + msg = _LI("Delete denied for public image %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to image %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except exception.NotFound: + msg = _LI("Image %(id)s not found") % {'id': id} + LOG.info(msg) + return exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to delete image %s") % id) + raise + + @utils.mutating + def create(self, req, body): + """Registers a new image with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the image + + :retval Returns the newly-created image information as a mapping, + which will include the newly-created image's internal id + in the 'id' field + """ + image_data = body['image'] + + # Ensure the image has a status set + image_data.setdefault('status', 'active') + + # Set up the image owner + if not req.context.is_admin or 'owner' not in image_data: + image_data['owner'] = req.context.owner + + image_id = image_data.get('id') + if image_id and not utils.is_uuid_like(image_id): + msg = _LI("Rejecting image creation request for invalid image " + "id '%(bad_id)s'") % {'bad_id': image_id} + LOG.info(msg) + msg = _("Invalid image id format") + return exc.HTTPBadRequest(explanation=msg) + + if 'location' in image_data: + image_data['locations'] = [image_data.pop('location')] + + try: + image_data = _normalize_image_location_for_db(image_data) + image_data = self.db_api.image_create(req.context, image_data) + image_data = dict(image=make_image_dict(image_data)) + msg = (_LI("Successfully created image %(id)s") % + image_data['image']) + LOG.info(msg) + return image_data + except exception.Duplicate: + msg = _("Image with identifier %s already exists!") % image_id + LOG.warn(msg) + return exc.HTTPConflict(msg) + except exception.Invalid as e: + msg = (_("Failed to add image metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to create image %s"), image_id) + raise + + @utils.mutating + def update(self, req, id, body): + """Updates an existing image with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the image + :param id: The opaque internal identifier for the image + + :retval Returns the updated image information as a mapping, + """ + image_data = body['image'] + from_state = body.get('from_state', None) + + # Prohibit modification of 'owner' + if not req.context.is_admin and 'owner' in image_data: + del image_data['owner'] + + if 'location' in image_data: + image_data['locations'] = [image_data.pop('location')] + + purge_props = req.headers.get("X-Glance-Registry-Purge-Props", "false") + try: + LOG.debug("Updating image %(id)s with metadata: %(image_data)r", + {'id': id, + 'image_data': dict((k, v) for k, v in image_data.items() + if k != 'locations')}) + image_data = _normalize_image_location_for_db(image_data) + if purge_props == "true": + purge_props = True + else: + purge_props = False + + updated_image = self.db_api.image_update(req.context, id, + image_data, + purge_props=purge_props, + from_state=from_state) + + msg = _LI("Updating metadata for image %(id)s") % {'id': id} + LOG.info(msg) + return dict(image=make_image_dict(updated_image)) + except exception.Invalid as e: + msg = (_("Failed to update image metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except exception.NotFound: + msg = _LI("Image %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='Image not found', + request=req, + content_type='text/plain') + except exception.ForbiddenPublicImage: + msg = _LI("Update denied for public image %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to image %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='Image not found', + request=req, + content_type='text/plain') + except exception.Conflict as e: + LOG.info(utils.exception_to_str(e)) + raise exc.HTTPConflict(body='Image operation conflicts', + request=req, + content_type='text/plain') + except Exception: + LOG.exception(_LE("Unable to update image %s") % id) + raise + + +def _limit_locations(image): + locations = image.pop('locations', []) + image['location_data'] = locations + image['location'] = None + for loc in locations: + if loc['status'] == 'active': + image['location'] = loc['url'] + break + + +def make_image_dict(image): + """Create a dict representation of an image which we can use to + serialize the image. + """ + + def _fetch_attrs(d, attrs): + return dict([(a, d[a]) for a in attrs + if a in d.keys()]) + + # TODO(sirp): should this be a dict, or a list of dicts? + # A plain dict is more convenient, but list of dicts would provide + # access to created_at, etc + properties = dict((p['name'], p['value']) + for p in image['properties'] if not p['deleted']) + + image_dict = _fetch_attrs(image, daisy.db.IMAGE_ATTRS) + image_dict['properties'] = properties + _limit_locations(image_dict) + + return image_dict + + +def create_resource(): + """Images resource factory method.""" + deserializer = wsgi.JSONRequestDeserializer() + serializer = wsgi.JSONResponseSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/registry/api/v1/members.py b/code/daisy/daisy/registry/api/v1/members.py new file mode 100755 index 00000000..fb62529b --- /dev/null +++ b/code/daisy/daisy/registry/api/v1/members.py @@ -0,0 +1,447 @@ +# Copyright 2010-2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging +import webob.exc + +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +from daisy import i18n + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LI = i18n._LI +_LW = i18n._LW + + +class Controller(object): + def __init__(self): + self.db_api = daisy.db.get_api() + + def get_cluster_hosts(self, req, cluster_id, host_id=None): + """ + Get the members of an cluster. + """ + try: + self.db_api.cluster_get(req.context, cluster_id) + except exception.NotFound: + msg = _("Project %(id)s not found") % {'id': cluster_id} + LOG.warn(msg) + raise webob.exc.HTTPNotFound(msg) + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LW("Access denied to cluster %(id)s but returning" + " 'not found'") % {'id': cluster_id} + LOG.warn(msg) + raise webob.exc.HTTPNotFound() + + members = self.db_api.cluster_host_member_find(req.context, cluster_id=cluster_id, host_id=host_id) + msg = "Returning member list for cluster %(id)s" % {'id': cluster_id} + LOG.debug(msg) + return dict(members=make_member_list(members, + host_id='host_id')) + + @utils.mutating + def update_all(self, req, image_id, body): + """ + Replaces the members of the image with those specified in the + body. The body is a dict with the following format:: + + {"memberships": [ + {"member_id": , + ["can_share": [True|False]]}, ... + ]} + """ + self._check_can_access_image_members(req.context) + + # Make sure the image exists + try: + image = self.db_api.image_get(req.context, image_id) + except exception.NotFound: + msg = _("Image %(id)s not found") % {'id': image_id} + LOG.warn(msg) + raise webob.exc.HTTPNotFound(msg) + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LW("Access denied to image %(id)s but returning" + " 'not found'") % {'id': image_id} + LOG.warn(msg) + raise webob.exc.HTTPNotFound() + + # Can they manipulate the membership? + if not self.is_image_sharable(req.context, image): + msg = (_LW("User lacks permission to share image %(id)s") % + {'id': image_id}) + LOG.warn(msg) + msg = _("No permission to share that image") + raise webob.exc.HTTPForbidden(msg) + + # Get the membership list + try: + memb_list = body['memberships'] + except Exception as e: + # Malformed entity... + msg = _LW("Invalid membership association specified for " + "image %(id)s") % {'id': image_id} + LOG.warn(msg) + msg = (_("Invalid membership association: %s") % + utils.exception_to_str(e)) + raise webob.exc.HTTPBadRequest(explanation=msg) + + add = [] + existing = {} + # Walk through the incoming memberships + for memb in memb_list: + try: + datum = dict(image_id=image['id'], + member=memb['member_id'], + can_share=None) + except Exception as e: + # Malformed entity... + msg = _LW("Invalid membership association specified for " + "image %(id)s") % {'id': image_id} + LOG.warn(msg) + msg = (_("Invalid membership association: %s") % + utils.exception_to_str(e)) + raise webob.exc.HTTPBadRequest(explanation=msg) + + # Figure out what can_share should be + if 'can_share' in memb: + datum['can_share'] = bool(memb['can_share']) + + # Try to find the corresponding membership + members = self.db_api.image_member_find(req.context, + image_id=datum['image_id'], + member=datum['member']) + try: + member = members[0] + except IndexError: + # Default can_share + datum['can_share'] = bool(datum['can_share']) + add.append(datum) + else: + # Are we overriding can_share? + if datum['can_share'] is None: + datum['can_share'] = members[0]['can_share'] + + existing[member['id']] = { + 'values': datum, + 'membership': member, + } + + # We now have a filtered list of memberships to add and + # memberships to modify. Let's start by walking through all + # the existing image memberships... + existing_members = self.db_api.image_member_find(req.context, + image_id=image['id']) + for member in existing_members: + if member['id'] in existing: + # Just update the membership in place + update = existing[member['id']]['values'] + self.db_api.image_member_update(req.context, + member['id'], + update) + else: + # Outdated one; needs to be deleted + self.db_api.image_member_delete(req.context, member['id']) + + # Now add the non-existent ones + for memb in add: + self.db_api.image_member_create(req.context, memb) + + # Make an appropriate result + msg = (_LI("Successfully updated memberships for image %(id)s") % + {'id': image_id}) + LOG.info(msg) + return webob.exc.HTTPNoContent() + + @utils.mutating + def update(self, req, image_id, id, body=None): + """ + Adds a membership to the image, or updates an existing one. + If a body is present, it is a dict with the following format:: + + {"member": { + "can_share": [True|False] + }} + + If "can_share" is provided, the member's ability to share is + set accordingly. If it is not provided, existing memberships + remain unchanged and new memberships default to False. + """ + self._check_can_access_image_members(req.context) + + # Make sure the image exists + try: + image = self.db_api.image_get(req.context, image_id) + except exception.NotFound: + msg = _("Image %(id)s not found") % {'id': image_id} + LOG.warn(msg) + raise webob.exc.HTTPNotFound(msg) + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LW("Access denied to image %(id)s but returning" + " 'not found'") % {'id': image_id} + LOG.warn(msg) + raise webob.exc.HTTPNotFound() + + # Can they manipulate the membership? + if not self.is_image_sharable(req.context, image): + msg = (_LW("User lacks permission to share image %(id)s") % + {'id': image_id}) + LOG.warn(msg) + msg = _("No permission to share that image") + raise webob.exc.HTTPForbidden(msg) + + # Determine the applicable can_share value + can_share = None + if body: + try: + can_share = bool(body['member']['can_share']) + except Exception as e: + # Malformed entity... + msg = _LW("Invalid membership association specified for " + "image %(id)s") % {'id': image_id} + LOG.warn(msg) + msg = (_("Invalid membership association: %s") % + utils.exception_to_str(e)) + raise webob.exc.HTTPBadRequest(explanation=msg) + + # Look up an existing membership... + members = self.db_api.image_member_find(req.context, + image_id=image_id, + member=id) + if members: + if can_share is not None: + values = dict(can_share=can_share) + self.db_api.image_member_update(req.context, + members[0]['id'], + values) + else: + values = dict(image_id=image['id'], member=id, + can_share=bool(can_share)) + self.db_api.image_member_create(req.context, values) + + msg = (_LI("Successfully updated a membership for image %(id)s") % + {'id': image_id}) + LOG.info(msg) + return webob.exc.HTTPNoContent() + + @utils.mutating + def delete(self, req, image_id, id): + """ + Removes a membership from the image. + """ + self._check_can_access_image_members(req.context) + + # Make sure the image exists + try: + image = self.db_api.image_get(req.context, image_id) + except exception.NotFound: + msg = _("Image %(id)s not found") % {'id': image_id} + LOG.warn(msg) + raise webob.exc.HTTPNotFound(msg) + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LW("Access denied to image %(id)s but returning" + " 'not found'") % {'id': image_id} + LOG.warn(msg) + raise webob.exc.HTTPNotFound() + + # Can they manipulate the membership? + if not self.is_image_sharable(req.context, image): + msg = (_LW("User lacks permission to share image %(id)s") % + {'id': image_id}) + LOG.warn(msg) + msg = _("No permission to share that image") + raise webob.exc.HTTPForbidden(msg) + + # Look up an existing membership + members = self.db_api.image_member_find(req.context, + image_id=image_id, + member=id) + if members: + self.db_api.image_member_delete(req.context, members[0]['id']) + else: + msg = ("%(id)s is not a member of image %(image_id)s" % + {'id': id, 'image_id': image_id}) + LOG.debug(msg) + msg = _("Membership could not be found.") + raise webob.exc.HTTPNotFound(explanation=msg) + + # Make an appropriate result + msg = (_LI("Successfully deleted a membership from image %(id)s") % + {'id': image_id}) + LOG.info(msg) + return webob.exc.HTTPNoContent() + + @utils.mutating + def add_cluster_host(self, req, cluster_id, host_id, body=None): + """ + Adds a host to cluster. + """ + + # Make sure the cluster exists + try: + cluster = self.db_api.cluster_get(req.context, cluster_id) + except exception.NotFound: + msg = _("Project %(id)s not found") % {'id': cluster_id} + LOG.warn(msg) + raise webob.exc.HTTPNotFound(msg) + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LW("Access denied to cluster %(id)s but returning" + " 'not found'") % {'id': cluster_id} + LOG.warn(msg) + raise webob.exc.HTTPNotFound() + + # Make sure the host exists + try: + host = self.db_api.host_get(req.context, host_id) + except exception.NotFound: + msg = _("Host %(id)s not found") % {'id': host_id} + LOG.warn(msg) + raise webob.exc.HTTPNotFound(msg) + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LW("Access denied to host %(id)s but returning" + " 'not found'") % {'id': host_id} + LOG.warn(msg) + raise webob.exc.HTTPNotFound() + + # Look up an existing membership... + members = self.db_api.cluster_host_member_find(req.context, + cluster_id=cluster_id, + host_id=host_id) + if members: + msg = (_LI("Project %(cluster_id)s has host %(id)s membership already!") % + {'cluster_id': image_id,'host_id': host_id}) + else: + values = dict(cluster_id=cluster_id, host_id=host_id) + self.db_api.cluster_host_member_create(req.context, values) + + msg = (_LI("Successfully added a host %(host_id)s to cluster %(cluster_id)s") % + {'host_id':host_id,'cluster_id': cluster_id}) + LOG.info(msg) + return webob.exc.HTTPNoContent() + + @utils.mutating + def delete_cluster_host(self, req, cluster_id, host_id): + """ + Removes a host from cluster. + """ + # Make sure the cluster exists + try: + cluster = self.db_api.cluster_get(req.context, cluster_id) + except exception.NotFound: + msg = _("Project %(id)s not found") % {'id': cluster_id} + LOG.warn(msg) + raise webob.exc.HTTPNotFound(msg) + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LW("Access denied to cluster %(id)s but returning" + " 'not found'") % {'id': cluster_id} + LOG.warn(msg) + raise webob.exc.HTTPNotFound() + + # Make sure the host exists + try: + host = self.db_api.host_get(req.context, host_id) + except exception.NotFound: + msg = _("Host %(id)s not found") % {'id': host_id} + LOG.warn(msg) + raise webob.exc.HTTPNotFound(msg) + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LW("Access denied to host %(id)s but returning" + " 'not found'") % {'id': host_id} + LOG.warn(msg) + raise webob.exc.HTTPNotFound() + + # Look up an existing membership + members = self.db_api.cluster_host_member_find(req.context, + cluster_id=cluster_id, + host_id=host_id) + if members: + self.db_api.cluster_host_member_delete(req.context, members[0]['id']) + else: + msg = ("%(host_id)s is not a member of cluster %(cluster_id)s" % + {'host_id': host_id, 'cluster_id': cluster_id}) + LOG.debug(msg) + msg = _("Membership could not be found.") + raise webob.exc.HTTPNotFound(explanation=msg) + + # Make an appropriate result + msg = (_LI("Successfully deleted a host %(host_id)s from cluster %(cluster_id)s") % + {'host_id': host_id, 'cluster_id': cluster_id}) + LOG.info(msg) + return webob.exc.HTTPNoContent() + + def default(self, req, *args, **kwargs): + """This will cover the missing 'show' and 'create' actions""" + LOG.debug("The method %s is not allowed for this resource" % + req.environ['REQUEST_METHOD']) + raise webob.exc.HTTPMethodNotAllowed( + headers=[('Allow', 'PUT, DELETE')]) + + def get_host_clusters(self, req, host_id): + """ + Retrieves clusters shared with the given host. + """ + try: + members = self.db_api.cluster_host_member_find(req.context, host_id=host_id) + except exception.NotFound: + msg = _LW("Host %(id)s not found") % {'id': host_id} + LOG.warn(msg) + msg = _("Membership could not be found.") + raise webob.exc.HTTPBadRequest(explanation=msg) + + msg = "Returning list of clusters shared with host %(id)s" % {'id': host_id} + LOG.debug(msg) + return dict(multi_clusters=make_member_list(members, + cluster_id='cluster_id')) + + +def make_member_list(members, **attr_map): + """ + Create a dict representation of a list of members which we can use + to serialize the members list. Keyword arguments map the names of + optional attributes to include to the database attribute. + """ + + def _fetch_memb(memb, attr_map): + return dict([(k, memb[v]) + for k, v in attr_map.items() if v in memb.keys()]) + + # Return the list of members with the given attribute mapping + return [_fetch_memb(memb, attr_map) for memb in members] + + +def create_resource(): + """Image members resource factory method.""" + deserializer = wsgi.JSONRequestDeserializer() + serializer = wsgi.JSONResponseSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/registry/api/v1/networks.py b/code/daisy/daisy/registry/api/v1/networks.py new file mode 100755 index 00000000..df2dfde4 --- /dev/null +++ b/code/daisy/daisy/registry/api/v1/networks.py @@ -0,0 +1,481 @@ +# Copyright 2010-2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Reference implementation registry server WSGI controller +""" + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import strutils +from oslo_utils import timeutils +from webob import exc + +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +from daisy import i18n + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW + +CONF = cfg.CONF + +DISPLAY_FIELDS_IN_INDEX = ['id', 'name', 'size', + 'disk_format', 'container_format', + 'checksum'] + +SUPPORTED_FILTERS = ['name', 'status', 'container_format', 'disk_format', + 'min_ram', 'min_disk', 'size_min', 'size_max', + 'changes-since', 'protected'] + +SUPPORTED_SORT_KEYS = ('name', 'status', 'container_format', 'disk_format', + 'size', 'id', 'created_at', 'updated_at') + +SUPPORTED_SORT_DIRS = ('asc', 'desc') + +SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir') + + +class Controller(object): + + def __init__(self): + self.db_api = daisy.db.get_api() + + def _get_networks(self, context,cluster_id, filters=None, **params): + """Get networks, wrapping in exception if necessary.""" + try: + return self.db_api.network_get_all(context, cluster_id,filters=filters, + **params) + except exception.NotFound: + LOG.warn(_LW("Invalid marker. Network %(id)s could not be " + "found.") % {'id': params.get('marker')}) + msg = _("Invalid marker. Network could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.Forbidden: + LOG.warn(_LW("Access denied to network %(id)s but returning " + "'not found'") % {'id': params.get('marker')}) + msg = _("Invalid marker. Network could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except Exception: + LOG.exception(_LE("Unable to get networks")) + raise + + def update_phyname_of_network(self, req, body): + try: + self.db_api.update_phyname_of_network(req.context, body) + return {} + except exception.NotFound: + raise exc.HTTPServerError( + explanation="Update database for phyname of network table failed!") + + def get_all_networks(self, req): + params = self._get_query_params(req) + try: + networks = self.db_api.network_get_all(req.context,**params) + except Exception: + raise exc.HTTPServerError(explanation="Get all networks failed") + + return networks + + + def detail_network(self, req, id): + """Return a filtered list of public, non-deleted networks in detail + + :param req: the Request object coming from the wsgi layer + :retval a mapping of the following form:: + + dict(networks=[network_list]) + + Where network_list is a sequence of mappings containing + all network model fields. + """ + params = self._get_query_params(req) + networks = self._get_networks(req.context, id ,**params) + + return dict(networks=networks) + + def _get_query_params(self, req): + """Extract necessary query parameters from http request. + + :param req: the Request object coming from the wsgi layer + :retval dictionary of filters to apply to list of networks + """ + params = { + 'filters': self._get_filters(req), + 'limit': self._get_limit(req), + 'sort_key': [self._get_sort_key(req)], + 'sort_dir': [self._get_sort_dir(req)], + 'marker': self._get_marker(req), + } + + for key, value in params.items(): + if value is None: + del params[key] + + return params + + def _get_filters(self, req): + """Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + filters = {} + properties = {} + + for param in req.params: + if param in SUPPORTED_FILTERS: + filters[param] = req.params.get(param) + if param.startswith('property-'): + _param = param[9:] + properties[_param] = req.params.get(param) + + if 'changes-since' in filters: + isotime = filters['changes-since'] + try: + filters['changes-since'] = timeutils.parse_isotime(isotime) + except ValueError: + raise exc.HTTPBadRequest(_("Unrecognized changes-since value")) + + if 'protected' in filters: + value = self._get_bool(filters['protected']) + if value is None: + raise exc.HTTPBadRequest(_("protected must be True, or " + "False")) + + filters['protected'] = value + + # only allow admins to filter on 'deleted' + if req.context.is_admin: + deleted_filter = self._parse_deleted_filter(req) + if deleted_filter is not None: + filters['deleted'] = deleted_filter + elif 'changes-since' not in filters: + filters['deleted'] = False + elif 'changes-since' not in filters: + filters['deleted'] = False + + if properties: + filters['properties'] = properties + + return filters + + def _get_limit(self, req): + """Parse a limit query param into something usable.""" + try: + limit = int(req.params.get('limit', CONF.limit_param_default)) + except ValueError: + raise exc.HTTPBadRequest(_("limit param must be an integer")) + + if limit < 0: + raise exc.HTTPBadRequest(_("limit param must be positive")) + + return min(CONF.api_limit_max, limit) + + def _get_marker(self, req): + """Parse a marker query param into something usable.""" + marker = req.params.get('marker', None) + + if marker and not utils.is_uuid_like(marker): + msg = _('Invalid marker format') + raise exc.HTTPBadRequest(explanation=msg) + + return marker + + def _get_sort_key(self, req): + """Parse a sort key query param from the request object.""" + sort_key = req.params.get('sort_key', 'created_at') + if sort_key is not None and sort_key not in SUPPORTED_SORT_KEYS: + _keys = ', '.join(SUPPORTED_SORT_KEYS) + msg = _("Unsupported sort_key. Acceptable values: %s") % (_keys,) + raise exc.HTTPBadRequest(explanation=msg) + return sort_key + + def _get_sort_dir(self, req): + """Parse a sort direction query param from the request object.""" + sort_dir = req.params.get('sort_dir', 'desc') + if sort_dir is not None and sort_dir not in SUPPORTED_SORT_DIRS: + _keys = ', '.join(SUPPORTED_SORT_DIRS) + msg = _("Unsupported sort_dir. Acceptable values: %s") % (_keys,) + raise exc.HTTPBadRequest(explanation=msg) + return sort_dir + + def _get_bool(self, value): + value = value.lower() + if value == 'true' or value == '1': + return True + elif value == 'false' or value == '0': + return False + + return None + + def _parse_deleted_filter(self, req): + """Parse deleted into something usable.""" + deleted = req.params.get('deleted') + if deleted is None: + return None + return strutils.bool_from_string(deleted) + + @utils.mutating + def add_network(self, req, body): + """Registers a new network with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the network + + :retval Returns the newly-created network information as a mapping, + which will include the newly-created network's internal id + in the 'id' field + """ + + network_data = body["network"] + + network_id = network_data.get('id') + + # role = network_data.get('role') + # add network_id and role + # if role + # self.db_api.get_role(req.context,role) + + if network_id and not utils.is_uuid_like(network_id): + msg = _LI("Rejecting network creation request for invalid network " + "id '%(bad_id)s'") % {'bad_id': network_id} + LOG.info(msg) + msg = _("Invalid network id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + network_data = self.db_api.network_add(req.context, network_data) + #network_data = dict(network=make_image_dict(network_data)) + msg = (_LI("Successfully created node %s") % + network_data["id"]) + LOG.info(msg) + if 'network' not in network_data: + network_data = dict(network=network_data) + return network_data + except exception.Duplicate: + msg = _("node with identifier %s already exists!") % image_id + LOG.warn(msg) + return exc.HTTPConflict(msg) + except exception.Invalid as e: + msg = (_("Failed to add node metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to create node %s"), network_id) + raise + + @utils.mutating + def delete_network(self, req, network_id): + """Deletes an existing network with the registry. + + :param req: wsgi Request object + :param id: The opaque internal identifier for the image + + :retval Returns 200 if delete was successful, a fault if not. On + success, the body contains the deleted image information as a mapping. + """ + try: + deleted_network = self.db_api.network_destroy(req.context, network_id) + msg = _LI("Successfully deleted network %(network_id)s") % {'network_id': network_id} + LOG.info(msg) + return dict(network=deleted_network) + except exception.ForbiddenPublicImage: + msg = _LI("Delete denied for public network %(network_id)s") % {'network_id': network_id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to network %(id)s but returning" + " 'not found'") % {'network_id': network_id} + LOG.info(msg) + return exc.HTTPNotFound() + except exception.NotFound: + msg = _LI("Network %(network_id)s not found") % {'network_id': network_id} + LOG.info(msg) + return exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to delete network %s") % id) + raise + + @utils.mutating + def get_network(self, req, id): + """Return data about the given network id.""" + try: + network_data = self.db_api.network_get(req.context, id) + msg = "Successfully retrieved network %(id)s" % {'id': id} + LOG.debug(msg) + except exception.NotFound: + msg = _LI("Network %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to network %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound() + except Exception: + LOG.exception(_LE("Unable to show network %s") % id) + raise + if 'network' not in network_data: + network_data = dict(network=network_data) + return network_data + + + + @utils.mutating + def update_network(self, req, network_id, body): + """Updates an existing network with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the image + :param id: The opaque internal identifier for the image + + :retval Returns the updated image information as a mapping, + """ + network_data = body['network'] + try: + updated_network = self.db_api.network_update(req.context, network_id, network_data) + + msg = _LI("Updating metadata for network %(network_id)s") % {'network_id': network_id} + LOG.info(msg) + if 'network' not in updated_network: + network_data = dict(network=updated_network) + return network_data + except exception.Invalid as e: + msg = (_("Failed to update network metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except exception.NotFound: + msg = _LI("Network %(network_id)s not found") % {'network_id': network_id} + LOG.info(msg) + raise exc.HTTPNotFound(body='Network not found', + request=req, + content_type='text/plain') + except exception.ForbiddenPublicImage: + msg = _LI("Update denied for public network %(network_id)s") % {'network_id': network_id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + raise + except exception.Conflict as e: + LOG.info(utils.exception_to_str(e)) + raise exc.HTTPConflict(body='Network operation conflicts', + request=req, + content_type='text/plain') + except Exception: + LOG.exception(_LE("Unable to update network %s") % network_id) + raise + + + @utils.mutating + def update_cluster(self, req, id, body): + """Updates an existing cluster with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the image + :param id: The opaque internal identifier for the image + + :retval Returns the updated image information as a mapping, + """ + cluster_data = body['cluster'] + try: + updated_cluster = self.db_api.cluster_update(req.context, id, cluster_data) + + msg = _LI("Updating metadata for cluster %(id)s") % {'id': id} + LOG.info(msg) + if 'cluster' not in updated_cluster: + cluster_data = dict(cluster=updated_cluster) + return cluster_data + except exception.Invalid as e: + msg = (_("Failed to update cluster metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except exception.NotFound: + msg = _LI("cluster %(id)s not found") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='cluster not found', + request=req, + content_type='text/plain') + except exception.ForbiddenPublicImage: + msg = _LI("Update denied for public cluster %(id)s") % {'id': id} + LOG.info(msg) + raise exc.HTTPForbidden() + except exception.Forbidden: + # If it's private and doesn't belong to them, don't let on + # that it exists + msg = _LI("Access denied to cluster %(id)s but returning" + " 'not found'") % {'id': id} + LOG.info(msg) + raise exc.HTTPNotFound(body='cluster not found', + request=req, + content_type='text/plain') + except exception.Conflict as e: + LOG.info(utils.exception_to_str(e)) + raise exc.HTTPConflict(body='cluster operation conflicts', + request=req, + content_type='text/plain') + except Exception: + LOG.exception(_LE("Unable to update cluster %s") % id) + raise + + +def _limit_locations(image): + locations = image.pop('locations', []) + image['location_data'] = locations + image['location'] = None + for loc in locations: + if loc['status'] == 'active': + image['location'] = loc['url'] + break + +def make_image_dict(image): + """Create a dict representation of an image which we can use to + serialize the image. + """ + + def _fetch_attrs(d, attrs): + return dict([(a, d[a]) for a in attrs + if a in d.keys()]) + + # TODO(sirp): should this be a dict, or a list of dicts? + # A plain dict is more convenient, but list of dicts would provide + # access to created_at, etc + properties = dict((p['name'], p['value']) + for p in image['properties'] if not p['deleted']) + + image_dict = _fetch_attrs(image, daisy.db.IMAGE_ATTRS) + image_dict['properties'] = properties + _limit_locations(image_dict) + + return image_dict + +def create_resource(): + """Images resource factory method.""" + deserializer = wsgi.JSONRequestDeserializer() + serializer = wsgi.JSONResponseSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/registry/api/v1/template.py b/code/daisy/daisy/registry/api/v1/template.py new file mode 100755 index 00000000..e094e234 --- /dev/null +++ b/code/daisy/daisy/registry/api/v1/template.py @@ -0,0 +1,547 @@ +# Copyright 2010-2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Reference implementation registry server WSGI controller +""" + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import strutils +from oslo_utils import timeutils +from webob import exc + +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +from daisy import i18n + + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE +_LI = i18n._LI +_LW = i18n._LW +CONF = cfg.CONF +DISPLAY_FIELDS_IN_INDEX = ['id', 'name', 'type', 'hosts', 'content'] +SUPPORTED_FILTERS = ['name', 'type', 'cluster_name', 'hosts', 'content'] +SUPPORTED_SORT_KEYS = ('name', 'type', 'hosts', 'content', 'id', 'created_at', 'updated_at') +SUPPORTED_SORT_DIRS = ('asc', 'desc') +SUPPORTED_PARAMS = ('limit', 'marker', 'sort_key', 'sort_dir', 'name', 'type', 'cluster_name') + +class Controller(object): + + def __init__(self): + self.db_api = daisy.db.get_api() + + def _get_query_params(self, req): + """Extract necessary query parameters from http request. + + :param req: the Request object coming from the wsgi layer + :retval dictionary of filters to apply to list of templates + """ + params = { + 'filters': self._get_filters(req), + 'limit': self._get_limit(req), + 'sort_key': [self._get_sort_key(req)], + 'sort_dir': [self._get_sort_dir(req)], + 'marker': self._get_marker(req), + } + + for key, value in params.items(): + if value is None: + del params[key] + + return params + + def _get_filters(self, req): + """Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + filters = {} + properties = {} + + for param in req.params: + if param in SUPPORTED_FILTERS: + filters[param] = req.params.get(param) + if param.startswith('property-'): + _param = param[9:] + properties[_param] = req.params.get(param) + + if 'changes-since' in filters: + isotime = filters['changes-since'] + try: + filters['changes-since'] = timeutils.parse_isotime(isotime) + except ValueError: + raise exc.HTTPBadRequest(_("Unrecognized changes-since value")) + + if 'protected' in filters: + value = self._get_bool(filters['protected']) + if value is None: + raise exc.HTTPBadRequest(_("protected must be True, or " + "False")) + + filters['protected'] = value + + # only allow admins to filter on 'deleted' + if req.context.is_admin: + deleted_filter = self._parse_deleted_filter(req) + if deleted_filter is not None: + filters['deleted'] = deleted_filter + elif 'changes-since' not in filters: + filters['deleted'] = False + elif 'changes-since' not in filters: + filters['deleted'] = False + + if properties: + filters['properties'] = properties + + return filters + + def _get_limit(self, req): + """Parse a limit query param into something usable.""" + try: + limit = int(req.params.get('limit', CONF.limit_param_default)) + except ValueError: + raise exc.HTTPBadRequest(_("limit param must be an integer")) + + if limit < 0: + raise exc.HTTPBadRequest(_("limit param must be positive")) + + return min(CONF.api_limit_max, limit) + + def _get_marker(self, req): + """Parse a marker query param into something usable.""" + marker = req.params.get('marker', None) + + if marker and not utils.is_uuid_like(marker): + msg = _('Invalid marker format') + raise exc.HTTPBadRequest(explanation=msg) + + return marker + + def _get_sort_key(self, req): + """Parse a sort key query param from the request object.""" + sort_key = req.params.get('sort_key', 'created_at') + if sort_key is not None and sort_key not in SUPPORTED_SORT_KEYS: + _keys = ', '.join(SUPPORTED_SORT_KEYS) + msg = _("Unsupported sort_key. Acceptable values: %s") % (_keys,) + raise exc.HTTPBadRequest(explanation=msg) + return sort_key + + def _get_sort_dir(self, req): + """Parse a sort direction query param from the request object.""" + sort_dir = req.params.get('sort_dir', 'desc') + if sort_dir is not None and sort_dir not in SUPPORTED_SORT_DIRS: + _keys = ', '.join(SUPPORTED_SORT_DIRS) + msg = _("Unsupported sort_dir. Acceptable values: %s") % (_keys,) + raise exc.HTTPBadRequest(explanation=msg) + return sort_dir + + def _get_bool(self, value): + value = value.lower() + if value == 'true' or value == '1': + return True + elif value == 'false' or value == '0': + return False + + return None + + def _parse_deleted_filter(self, req): + """Parse deleted into something usable.""" + deleted = req.params.get('deleted') + if deleted is None: + return None + return strutils.bool_from_string(deleted) + + @utils.mutating + def template_add(self, req, body): + """Registers a new templatae with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the templatae + + :retval Returns the newly-created template information as a mapping, + which will include the newly-created template's internal id + in the 'id' field + """ + template_data = body["template"] + + id = template_data.get('id') + + # role = service_disk_data.get('role') + # add id and role + # if role + # self.db_api.get_role(req.context,role) + + if id and not utils.is_uuid_like(id): + msg = _LI("Rejecting template creation request for invalid template " + "id '%(bad_id)s'") % {'bad_id': id} + LOG.info(msg) + msg = _("Invalid template id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + template_data = self.db_api.template_add(req.context, template_data) + msg = (_LI("Successfully created template %s") % + template_data["id"]) + LOG.info(msg) + if 'template' not in template_data: + template_data = dict(template=template_data) + return template_data + except exception.Duplicate: + msg = _("template with identifier %s already exists!") % id + LOG.warn(msg) + return exc.HTTPConflict(msg) + except exception.Invalid as e: + msg = (_("Failed to add template metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to create template %s"), id) + raise + + @utils.mutating + def template_update(self, req, template_id, body): + """Registers a new template with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the template + + :retval Returns the newly-created template information as a mapping, + which will include the newly-created template's internal id + in the 'id' field + """ + template_data = body["template"] + if template_id and not utils.is_uuid_like(template_id): + msg = _LI("Rejecting cluster template creation request for invalid template " + "id '%(bad_id)s'") % {'bad_id': template_id} + LOG.info(msg) + msg = _("Invalid template id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + template_data = self.db_api.template_update(req.context, template_id, template_data) + msg = (_LI("Successfully updated template %s") % + template_data["id"]) + LOG.info(msg) + if 'template' not in template_data: + template_data = dict(template=template_data) + return template_data + except exception.Duplicate: + msg = _("template with identifier %s already exists!") % template_id + LOG.warn(msg) + return exc.HTTPConflict(msg) + except exception.Invalid as e: + msg = (_("Failed to update template metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to update template %s"), template_id) + raise + + @utils.mutating + def template_delete(self, req, template_id): + """Registers a new template with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the template + + :retval Returns the newly-created template information as a mapping, + which will include the newly-created template's internal id + in the 'id' field + """ + if template_id and not utils.is_uuid_like(template_id): + msg = _LI("Rejecting template delete request for invalid template " + "id '%(bad_id)s'") % {'bad_id': template_id} + LOG.info(msg) + msg = _("Invalid template id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + template_data = self.db_api.template_destroy(req.context, template_id) + #template_data = dict(template=make_image_dict(template_data)) + msg = (_LI("Successfully deleted template %s") % template_id) + LOG.info(msg) + if 'template' not in template_data: + template_data = dict(template=template_data) + return template_data + except exception.Invalid as e: + msg = (_("Failed to delete template metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to delete template %s"), template_id) + raise + + @utils.mutating + def template_list(self, req): + params = self._get_query_params(req) + try: + filters=params.pop('filters') + marker=params.get('marker') + limit=params.get('limit') + sort_key=params.get('sort_key') + sort_dir=params.get('sort_dir') + return self.db_api.template_get_all(req.context, filters=filters,\ + marker=marker,limit=limit,sort_key=sort_key,sort_dir=sort_dir) + except exception.NotFound: + LOG.warn(_LW("Invalid marker. template %(id)s could not be " + "found.") % {'id': params.get('marker')}) + msg = _("Invalid marker. template could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.Forbidden: + LOG.warn(_LW("Access denied to template %(id)s but returning " + "'not found'") % {'id': params.get('marker')}) + msg = _("Invalid marker. template could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except Exception: + LOG.exception(_LE("Unable to list template")) + raise + + @utils.mutating + def template_detail(self, req, template_id): + """Registers a new template with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the template + + :retval Returns the newly-created template information as a mapping, + which will include the newly-created template's internal id + in the 'id' field + """ + + if template_id and not utils.is_uuid_like(template_id): + msg = _LI("Rejecting template delete request for invalid template " + "id '%(bad_id)s'") % {'bad_id': template_id} + LOG.info(msg) + msg = _("Invalid template id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + template_data = self.db_api.template_get(req.context, template_id) + #service_disk_data = dict(service_disk=make_image_dict(service_disk_data)) + msg = (_LI("Successfully get template information:%s") % template_id) + LOG.info(msg) + if 'template' not in template_data: + template_data = dict(template=template_data) + return template_data + except exception.Invalid as e: + msg = (_("Failed to get template metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to get template %s"), template_id) + raise + + @utils.mutating + def host_template_add(self, req, body): + """Registers a new service_disk with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the service_disk + + :retval Returns the newly-created service_disk information as a mapping, + which will include the newly-created service_disk's internal id + in the 'id' field + """ + template_data = body["template"] + + id = template_data.get('id') + + # role = service_disk_data.get('role') + # add id and role + # if role + # self.db_api.get_role(req.context,role) + + if id and not utils.is_uuid_like(id): + msg = _LI("Rejecting service_disk creation request for invalid service_disk " + "id '%(bad_id)s'") % {'bad_id': id} + LOG.info(msg) + msg = _("Invalid service_disk id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + template_data = self.db_api.host_template_add(req.context, template_data) + #service_disk_data = dict(service_disk=make_image_dict(service_disk_data)) + msg = (_LI("Successfully created node %s") % + template_data["id"]) + LOG.info(msg) + if 'template' not in template_data: + template_data = dict(host_template=template_data) + return template_data + except exception.Duplicate: + msg = _("node with identifier %s already exists!") % id + LOG.warn(msg) + return exc.HTTPConflict(msg) + except exception.Invalid as e: + msg = (_("Failed to add node metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to create node %s"), id) + raise + + @utils.mutating + def host_template_update(self, req, template_id, body): + """Registers a new service_disk with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the service_disk + + :retval Returns the newly-created service_disk information as a mapping, + which will include the newly-created service_disk's internal id + in the 'id' field + """ + template_data = body["template"] + #template_id = template_data.get('template_id') + if template_id and not utils.is_uuid_like(template_id): + msg = _LI("Rejecting cluster template creation request for invalid template " + "id '%(bad_id)s'") % {'bad_id': template_id} + LOG.info(msg) + msg = _("Invalid template id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + template_data = self.db_api.host_template_update(req.context, template_id, template_data) + #service_disk_data = dict(service_disk=make_image_dict(service_disk_data)) + msg = (_LI("Successfully updated template %s") % + template_data["id"]) + LOG.info(msg) + if 'template' not in template_data: + template_data = dict(host_template=template_data) + return template_data + except exception.Duplicate: + msg = _("template with identifier %s already exists!") % template_id + LOG.warn(msg) + return exc.HTTPConflict(msg) + except exception.Invalid as e: + msg = (_("Failed to update template metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to update template %s"), template_id) + raise + + @utils.mutating + def host_template_delete(self, req, template_id): + """Registers a new service_disk with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the service_disk + + :retval Returns the newly-created service_disk information as a mapping, + which will include the newly-created service_disk's internal id + in the 'id' field + """ + if template_id and not utils.is_uuid_like(template_id): + msg = _LI("Rejecting template delete request for invalid template " + "id '%(bad_id)s'") % {'bad_id': template_id} + LOG.info(msg) + msg = _("Invalid template id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + template_data = self.db_api.host_template_destroy(req.context, template_id) + #service_disk_data = dict(service_disk=make_image_dict(service_disk_data)) + msg = (_LI("Successfully deleted template %s") % template_id) + LOG.info(msg) + if 'template' not in template_data: + template_data = dict(host_template=template_data) + return template_data + except exception.Invalid as e: + msg = (_("Failed to delete template metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to delete template %s"), template_id) + raise + + @utils.mutating + def host_template_list(self, req): + params = self._get_query_params(req) + try: + filters=params.pop('filters') + marker=params.get('marker') + limit=params.get('limit') + sort_key=params.get('sort_key') + sort_dir=params.get('sort_dir') + return self.db_api.host_template_get_all(req.context, filters=filters,\ + marker=marker,limit=limit,sort_key=sort_key,sort_dir=sort_dir) + except exception.NotFound: + LOG.warn(_LW("Invalid marker. template %(id)s could not be " + "found.") % {'id': params.get('marker')}) + msg = _("Invalid marker. template could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.Forbidden: + LOG.warn(_LW("Access denied to template %(id)s but returning " + "'not found'") % {'id': params.get('marker')}) + msg = _("Invalid marker. template could not be found.") + raise exc.HTTPBadRequest(explanation=msg) + except Exception: + LOG.exception(_LE("Unable to list template")) + raise + + @utils.mutating + def host_template_detail(self, req, template_id): + """Registers a new service_disk with the registry. + + :param req: wsgi Request object + :param body: Dictionary of information about the service_disk + + :retval Returns the newly-created service_disk information as a mapping, + which will include the newly-created service_disk's internal id + in the 'id' field + """ + + if template_id and not utils.is_uuid_like(template_id): + msg = _LI("Rejecting template delete request for invalid template " + "id '%(bad_id)s'") % {'bad_id': template_id} + LOG.info(msg) + msg = _("Invalid template id format") + return exc.HTTPBadRequest(explanation=msg) + + try: + template_data = self.db_api.host_template_get(req.context, template_id) + #service_disk_data = dict(service_disk=make_image_dict(service_disk_data)) + msg = (_LI("Successfully get template information:%s") % template_id) + LOG.info(msg) + if 'template' not in template_data: + template_data = dict(host_template=template_data) + return template_data + except exception.Invalid as e: + msg = (_("Failed to get template metadata. " + "Got error: %s") % utils.exception_to_str(e)) + LOG.error(msg) + return exc.HTTPBadRequest(msg) + except Exception: + LOG.exception(_LE("Unable to get template %s"), template_id) + raise + +def create_resource(): + """Images resource factory method.""" + deserializer = wsgi.JSONRequestDeserializer() + serializer = wsgi.JSONResponseSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/registry/api/v2/__init__.py b/code/daisy/daisy/registry/api/v2/__init__.py new file mode 100755 index 00000000..5b20eff1 --- /dev/null +++ b/code/daisy/daisy/registry/api/v2/__init__.py @@ -0,0 +1,35 @@ +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from daisy.common import wsgi +from daisy.registry.api.v2 import rpc + + +def init(mapper): + rpc_resource = rpc.create_resource() + mapper.connect("/rpc", controller=rpc_resource, + conditions=dict(method=["POST"]), + action="__call__") + + +class API(wsgi.Router): + """WSGI entry point for all Registry requests.""" + + def __init__(self, mapper): + mapper = mapper or wsgi.APIMapper() + + init(mapper) + + super(API, self).__init__(mapper) diff --git a/code/daisy/daisy/registry/api/v2/rpc.py b/code/daisy/daisy/registry/api/v2/rpc.py new file mode 100755 index 00000000..0b0ba777 --- /dev/null +++ b/code/daisy/daisy/registry/api/v2/rpc.py @@ -0,0 +1,57 @@ +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +RPC Controller +""" + +from oslo_config import cfg +from oslo_log import log as logging + +from daisy.common import rpc +from daisy.common import wsgi +import daisy.db +from daisy import i18n + + +LOG = logging.getLogger(__name__) +_ = i18n._ + +CONF = cfg.CONF + + +class Controller(rpc.Controller): + + def __init__(self, raise_exc=False): + super(Controller, self).__init__(raise_exc) + + # NOTE(flaper87): Avoid using registry's db + # driver for the registry service. It would + # end up in an infinite loop. + if CONF.data_api == "daisy.db.registry.api": + msg = _("Registry service can't use %s") % CONF.data_api + raise RuntimeError(msg) + + # NOTE(flaper87): Register the + # db_api as a resource to expose. + db_api = daisy.db.get_api() + self.register(daisy.db.unwrap(db_api)) + + +def create_resource(): + """Images resource factory method.""" + deserializer = rpc.RPCJSONDeserializer() + serializer = rpc.RPCJSONSerializer() + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/code/daisy/daisy/registry/client/__init__.py b/code/daisy/daisy/registry/client/__init__.py new file mode 100755 index 00000000..576c362b --- /dev/null +++ b/code/daisy/daisy/registry/client/__init__.py @@ -0,0 +1,88 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_config import cfg + +from daisy import i18n + +_ = i18n._ + +registry_client_opts = [ + cfg.StrOpt('registry_client_protocol', default='http', + help=_('The protocol to use for communication with the ' + 'registry server. Either http or https.')), + cfg.StrOpt('registry_client_key_file', + help=_('The path to the key file to use in SSL connections ' + 'to the registry server, if any. Alternately, you may ' + 'set the GLANCE_CLIENT_KEY_FILE environment variable to ' + 'a filepath of the key file')), + cfg.StrOpt('registry_client_cert_file', + help=_('The path to the cert file to use in SSL connections ' + 'to the registry server, if any. Alternately, you may ' + 'set the GLANCE_CLIENT_CERT_FILE environment variable ' + 'to a filepath of the CA cert file')), + cfg.StrOpt('registry_client_ca_file', + help=_('The path to the certifying authority cert file to use ' + 'in SSL connections to the registry server, if any. ' + 'Alternately, you may set the GLANCE_CLIENT_CA_FILE ' + 'environment variable to a filepath of the CA cert ' + 'file.')), + cfg.BoolOpt('registry_client_insecure', default=False, + help=_('When using SSL in connections to the registry server, ' + 'do not require validation via a certifying ' + 'authority. This is the registry\'s equivalent of ' + 'specifying --insecure on the command line using ' + 'glanceclient for the API.')), + cfg.IntOpt('registry_client_timeout', default=600, + help=_('The period of time, in seconds, that the API server ' + 'will wait for a registry request to complete. A ' + 'value of 0 implies no timeout.')), +] + +registry_client_ctx_opts = [ + cfg.BoolOpt('use_user_token', default=True, + help=_('Whether to pass through the user token when ' + 'making requests to the registry.')), + cfg.StrOpt('admin_user', secret=True, + help=_('The administrators user name. ' + 'If "use_user_token" is not in effect, then ' + 'admin credentials can be specified.')), + cfg.StrOpt('admin_password', secret=True, + help=_('The administrators password. ' + 'If "use_user_token" is not in effect, then ' + 'admin credentials can be specified.')), + cfg.StrOpt('admin_tenant_name', secret=True, + help=_('The tenant name of the administrative user. ' + 'If "use_user_token" is not in effect, then ' + 'admin tenant name can be specified.')), + cfg.StrOpt('auth_url', + help=_('The URL to the keystone service. ' + 'If "use_user_token" is not in effect and ' + 'using keystone auth, then URL of keystone ' + 'can be specified.')), + cfg.StrOpt('auth_strategy', default='noauth', + help=_('The strategy to use for authentication. ' + 'If "use_user_token" is not in effect, then ' + 'auth strategy can be specified.')), + cfg.StrOpt('auth_region', + help=_('The region for the authentication service. ' + 'If "use_user_token" is not in effect and ' + 'using keystone auth, then region name can ' + 'be specified.')), +] + +CONF = cfg.CONF +CONF.register_opts(registry_client_opts) +CONF.register_opts(registry_client_ctx_opts) diff --git a/code/daisy/daisy/registry/client/v1/__init__.py b/code/daisy/daisy/registry/client/v1/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/registry/client/v1/api.py b/code/daisy/daisy/registry/client/v1/api.py new file mode 100755 index 00000000..8ebe4054 --- /dev/null +++ b/code/daisy/daisy/registry/client/v1/api.py @@ -0,0 +1,582 @@ +# Copyright 2010-2011 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Registry's Client API +""" + +import os + +from oslo.serialization import jsonutils +from oslo_config import cfg +from oslo_log import log as logging + +from daisy.common import exception +from daisy import i18n +from daisy.registry.client.v1 import client + +LOG = logging.getLogger(__name__) +_ = i18n._ + +registry_client_ctx_opts = [ + cfg.BoolOpt('send_identity_headers', default=False, + help=_("Whether to pass through headers containing user " + "and tenant information when making requests to " + "the registry. This allows the registry to use the " + "context middleware without keystonemiddleware's " + "auth_token middleware, removing calls to the keystone " + "auth service. It is recommended that when using this " + "option, secure communication between glance api and " + "glance registry is ensured by means other than " + "auth_token middleware.")), +] + +CONF = cfg.CONF +CONF.register_opts(registry_client_ctx_opts) +_registry_client = 'daisy.registry.client' +CONF.import_opt('registry_client_protocol', _registry_client) +CONF.import_opt('registry_client_key_file', _registry_client) +CONF.import_opt('registry_client_cert_file', _registry_client) +CONF.import_opt('registry_client_ca_file', _registry_client) +CONF.import_opt('registry_client_insecure', _registry_client) +CONF.import_opt('registry_client_timeout', _registry_client) +CONF.import_opt('use_user_token', _registry_client) +CONF.import_opt('admin_user', _registry_client) +CONF.import_opt('admin_password', _registry_client) +CONF.import_opt('admin_tenant_name', _registry_client) +CONF.import_opt('auth_url', _registry_client) +CONF.import_opt('auth_strategy', _registry_client) +CONF.import_opt('auth_region', _registry_client) +CONF.import_opt('metadata_encryption_key', 'daisy.common.config') + +_CLIENT_CREDS = None +_CLIENT_HOST = None +_CLIENT_PORT = None +_CLIENT_KWARGS = {} +# AES key used to encrypt 'location' metadata +_METADATA_ENCRYPTION_KEY = None + + +def configure_registry_client(): + """ + Sets up a registry client for use in registry lookups + """ + global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT, _METADATA_ENCRYPTION_KEY + try: + host, port = CONF.registry_host, CONF.registry_port + except cfg.ConfigFileValueError: + msg = _("Configuration option was not valid") + LOG.error(msg) + raise exception.BadRegistryConnectionConfiguration(reason=msg) + except IndexError: + msg = _("Could not find required configuration option") + LOG.error(msg) + raise exception.BadRegistryConnectionConfiguration(reason=msg) + + _CLIENT_HOST = host + _CLIENT_PORT = port + _METADATA_ENCRYPTION_KEY = CONF.metadata_encryption_key + _CLIENT_KWARGS = { + 'use_ssl': CONF.registry_client_protocol.lower() == 'https', + 'key_file': CONF.registry_client_key_file, + 'cert_file': CONF.registry_client_cert_file, + 'ca_file': CONF.registry_client_ca_file, + 'insecure': CONF.registry_client_insecure, + 'timeout': CONF.registry_client_timeout, + } + + if not CONF.use_user_token: + configure_registry_admin_creds() + + +def configure_registry_admin_creds(): + global _CLIENT_CREDS + + if CONF.auth_url or os.getenv('OS_AUTH_URL'): + strategy = 'keystone' + else: + strategy = CONF.auth_strategy + + _CLIENT_CREDS = { + 'user': CONF.admin_user, + 'password': CONF.admin_password, + 'username': CONF.admin_user, + 'tenant': CONF.admin_tenant_name, + 'auth_url': os.getenv('OS_AUTH_URL') or CONF.auth_url, + 'strategy': strategy, + 'region': CONF.auth_region, + } + + +def get_registry_client(cxt): + global _CLIENT_CREDS, _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT + global _METADATA_ENCRYPTION_KEY + kwargs = _CLIENT_KWARGS.copy() + if CONF.use_user_token: + kwargs['auth_token'] = cxt.auth_token + if _CLIENT_CREDS: + kwargs['creds'] = _CLIENT_CREDS + + if CONF.send_identity_headers: + identity_headers = { + 'X-User-Id': cxt.user, + 'X-Tenant-Id': cxt.tenant, + 'X-Roles': ','.join(cxt.roles), + 'X-Identity-Status': 'Confirmed', + 'X-Service-Catalog': jsonutils.dumps(cxt.service_catalog), + } + kwargs['identity_headers'] = identity_headers + return client.RegistryClient(_CLIENT_HOST, _CLIENT_PORT, + _METADATA_ENCRYPTION_KEY, **kwargs) + + +def get_images_list(context, **kwargs): + c = get_registry_client(context) + return c.get_images(**kwargs) + + +def get_images_detail(context, **kwargs): + c = get_registry_client(context) + return c.get_images_detailed(**kwargs) + + +def get_image_metadata(context, image_id): + c = get_registry_client(context) + return c.get_image(image_id) + + +def add_image_metadata(context, image_meta): + LOG.debug("Adding image metadata...") + c = get_registry_client(context) + return c.add_image(image_meta) + + +def update_image_metadata(context, image_id, image_meta, + purge_props=False, from_state=None): + LOG.debug("Updating image metadata for image %s...", image_id) + c = get_registry_client(context) + return c.update_image(image_id, image_meta, purge_props=purge_props, + from_state=from_state) + + +def delete_image_metadata(context, image_id): + LOG.debug("Deleting image metadata for image %s...", image_id) + c = get_registry_client(context) + return c.delete_image(image_id) + + +def get_image_members(context, image_id): + c = get_registry_client(context) + return c.get_image_members(image_id) + + +def get_member_images(context, member_id): + c = get_registry_client(context) + return c.get_member_images(member_id) + + +def replace_members(context, image_id, member_data): + c = get_registry_client(context) + return c.replace_members(image_id, member_data) + + +def add_member(context, image_id, member_id, can_share=None): + c = get_registry_client(context) + return c.add_member(image_id, member_id, can_share=can_share) + + +def delete_member(context, image_id, member_id): + c = get_registry_client(context) + return c.delete_member(image_id, member_id) + +def add_host_metadata(context, host_meta): + LOG.debug("Adding host...") + c = get_registry_client(context) + return c.add_host(host_meta) + +def delete_host_metadata(context, host_id): + LOG.debug("Deleting host metadata for host %s...", host_id) + c = get_registry_client(context) + return c.delete_host(host_id) + +def update_host_metadata(context, host_id, host_meta): + LOG.debug("Updating host metadata for host %s...", host_id) + c = get_registry_client(context) + return c.update_host(host_id, host_meta) + +def get_host_metadata(context, host_id): + c = get_registry_client(context) + return c.get_host(host_id) + +def get_host_interface(context, host_meta): + c = get_registry_client(context) + return c.get_host_interface(host_meta) + +def get_all_host_interfaces(context, params): + c = get_registry_client(context) + return c.get_all_host_interfaces(params) + +def get_assigned_network(context, host_interface_id, network_id): + c = get_registry_client(context) + return c.get_assigned_network(host_interface_id, network_id) + +def add_discover_host_metadata(context, discover_host_meta): + LOG.debug("Adding discover host...") + c = get_registry_client(context) + return c.add_discover_host(discover_host_meta) + +def delete_discover_host_metadata(context, discover_host_id): + LOG.debug("Deleting host metadata for host %s...", discover_host_id) + c = get_registry_client(context) + return c.delete_discover_host(discover_host_id) + +def get_discover_hosts_detail(context, **kwargs): + c = get_registry_client(context) + return c.get_discover_hosts_detailed(**kwargs) + +def update_discover_host_metadata(context, host_id, host_meta): + c = get_registry_client(context) + return c.update_discover_host(host_id, host_meta) + +def get_discover_host_metadata(context, host_id): + c = get_registry_client(context) + return c.get_discover_host_metadata(host_id) + +def add_cluster_metadata(context, cluster_meta): + LOG.debug("Adding cluster...") + c = get_registry_client(context) + return c.add_cluster(cluster_meta) + +def update_cluster_metadata(context, cluster_id, cluster_meta): + LOG.debug("Updating cluster metadata for cluster %s...", cluster_id) + c = get_registry_client(context) + print context + print cluster_meta + return c.update_cluster(cluster_id, cluster_meta) + +def delete_cluster_metadata(context, cluster_id): + LOG.debug("Deleting cluster metadata for cluster %s...", cluster_id) + c = get_registry_client(context) + return c.delete_cluster(cluster_id) + +def get_cluster_metadata(context, cluster_id): + c = get_registry_client(context) + return c.get_cluster(cluster_id) + +def add_cluster_host(context, cluster_id, host_id): + c = get_registry_client(context) + return c.add_cluster_host(cluster_id, host_id) + +def delete_cluster_host(context, cluster_id, host_id): + c = get_registry_client(context) + return c.delete_cluster_host(cluster_id, host_id) + +def get_hosts_detail(context, **kwargs): + c = get_registry_client(context) + return c.get_hosts_detailed(**kwargs) + +def get_clusters_detail(context, **kwargs): + c = get_registry_client(context) + return c.get_clusters_detailed(**kwargs) + +def get_cluster_hosts(context, cluster_id, host_id=None): + c = get_registry_client(context) + return c.get_cluster_hosts(cluster_id, host_id) + + +def get_host_clusters(context, host_id): + c = get_registry_client(context) + return c.get_host_clusters(host_id) + +def add_component_metadata(context, component_meta): + LOG.debug("Adding component...") + c = get_registry_client(context) + return c.add_component(component_meta) + +def add_template_metadata(context, template): + c = get_registry_client(context) + return c.add_template(template) + +def update_template_metadata(context, template_id, template): + c = get_registry_client(context) + return c.update_template(template_id, template) + +def delete_template_metadata(context, template_id): + c = get_registry_client(context) + return c.delete_template(template_id) + + +def template_lists_metadata(context, **kwargs): + c = get_registry_client(context) + return c.list_template(**kwargs) + +def template_detail_metadata(context, template_id): + c = get_registry_client(context) + return c.get_template_detail(template_id) + +def add_host_template_metadata(context, template): + c = get_registry_client(context) + return c.add_host_template(template) + +def update_host_template_metadata(context, template_id, template): + c = get_registry_client(context) + return c.update_host_template(template_id, template) + +def delete_host_template_metadata(context, template_id): + c = get_registry_client(context) + return c.delete_host_template(template_id) + + +def host_template_lists_metadata(context, **kwargs): + c = get_registry_client(context) + return c.list_host_template(**kwargs) + +def host_template_detail_metadata(context, template_id): + c = get_registry_client(context) + return c.get_host_template_detail(template_id) + +def delete_component_metadata(context, component_id): + LOG.debug("Deleting component metadata for component %s...", component_id) + c = get_registry_client(context) + return c.delete_component(component_id) + +def get_components_detail(context, **kwargs): + c = get_registry_client(context) + return c.get_components_detailed(**kwargs) + +def get_component_metadata(context, component_id): + c = get_registry_client(context) + return c.get_component(component_id) + +def update_component_metadata(context, component_id, component_meta): + LOG.debug("Updating component metadata for component %s...", component_id) + c = get_registry_client(context) + return c.update_component(component_id, component_meta) + +def add_service_metadata(context, service_meta): + LOG.debug("Adding service...") + c = get_registry_client(context) + return c.add_service(service_meta) + +def delete_service_metadata(context, service_id): + LOG.debug("Deleting service metadata for service %s...", service_id) + c = get_registry_client(context) + return c.delete_service(service_id) + +def get_services_detail(context, **kwargs): + c = get_registry_client(context) + return c.get_services_detailed(**kwargs) + +def get_service_metadata(context, service_id): + c = get_registry_client(context) + return c.get_service(service_id) + +def update_service_metadata(context, service_id, service_meta): + LOG.debug("Updating service metadata for service %s...", service_id) + c = get_registry_client(context) + return c.update_service(service_id, service_meta) + +def add_role_metadata(context, role_meta): + LOG.debug("Adding role...") + c = get_registry_client(context) + return c.add_role(role_meta) + +def delete_role_metadata(context, role_id): + LOG.debug("Deleting role metadata for role %s...", role_id) + c = get_registry_client(context) + return c.delete_role(role_id) + +def get_roles_detail(context, **kwargs): + c = get_registry_client(context) + return c.get_roles_detailed(**kwargs) + +def get_role_metadata(context, role_id): + c = get_registry_client(context) + return c.get_role(role_id) + +def update_role_metadata(context, role_id, role_meta): + LOG.debug("Updating role metadata for role %s...", role_id) + c = get_registry_client(context) + return c.update_role(role_id, role_meta) + +def get_role_services(context, role_id): + c = get_registry_client(context) + return c.get_role_services(role_id) + +def get_role_host_metadata(context, role_id): + LOG.debug("get role_host metadata for role %s...", role_id) + c = get_registry_client(context) + return c.get_role_host(role_id) + +def delete_role_host_metadata(context, role_id): + LOG.debug("delete role_host metadata for role %s...", role_id) + c = get_registry_client(context) + return c.delete_role_host(role_id) + +def update_role_host_metadata(context, role_host_id, role_meta): + LOG.debug("update role_host metadata for role %s...", role_host_id) + c = get_registry_client(context) + return c.update_role_host(role_host_id, role_meta) + +def add_config_file_metadata(context, config_file_meta): + LOG.debug("Adding config_file...") + c = get_registry_client(context) + return c.add_config_file(config_file_meta) + +def delete_config_file_metadata(context, config_file_id): + LOG.debug("Deleting config_file metadata for config_file %s...", config_file_id) + c = get_registry_client(context) + return c.delete_config_file(config_file_id) + +def update_config_file_metadata(context, config_file_id, config_file_meta): + LOG.debug("Updating config_file metadata for config_file %s...", config_file_id) + c = get_registry_client(context) + return c.update_config_file(config_file_id, config_file_meta) + +def get_config_file_metadata(context, config_file_id): + c = get_registry_client(context) + return c.get_config_file(config_file_id) + +def get_config_files_detail(context, **kwargs): + c = get_registry_client(context) + return c.get_config_files_detailed(**kwargs) +def add_config_set_metadata(context, config_set_meta): + LOG.debug("Adding config_set...") + c = get_registry_client(context) + return c.add_config_set(config_set_meta) + +def delete_config_set_metadata(context, config_set_id): + LOG.debug("Deleting config_set metadata for config_set %s...", config_set_id) + c = get_registry_client(context) + return c.delete_config_set(config_set_id) + +def update_config_set_metadata(context, config_set_id, config_set_meta): + LOG.debug("Updating config_set metadata for config_file %s...", config_set_id) + c = get_registry_client(context) + return c.update_config_set(config_set_id, config_set_meta) + +def get_config_set_metadata(context, config_set_id): + c = get_registry_client(context) + return c.get_config_set(config_set_id) + +def get_config_sets_detail(context, **kwargs): + c = get_registry_client(context) + return c.get_config_sets_detailed(**kwargs) + +def add_config_metadata(context, config_meta): + LOG.debug("Adding config...") + c = get_registry_client(context) + return c.add_config(config_meta) + +def delete_config_metadata(context, config_id): + LOG.debug("Deleting config metadata for config %s...", config_id) + c = get_registry_client(context) + return c.delete_config(config_id) + +def update_config_metadata(context, config_id, config_meta): + LOG.debug("Updating config metadata for config_file %s...", config_id) + c = get_registry_client(context) + return c.update_config(config_id, config_meta) + +def update_configs_metadata_by_role_hosts(context, config_metas): + c = get_registry_client(context) + return c.update_config_by_role_hosts(config_metas) + +def get_config_metadata(context, config_id): + c = get_registry_client(context) + return c.get_config(config_id) + +def get_configs_detail(context, **kwargs): + c = get_registry_client(context) + return c.get_configs_detailed(**kwargs) + +def add_network_metadata(context, network_meta): + LOG.debug("Adding network...") + c = get_registry_client(context) + return c.add_network(network_meta) + +def update_phyname_of_network(context, network_phyname_set): + c = get_registry_client(context) + return c.update_phyname_of_network(network_phyname_set) + +def update_network_metadata(context, network_id, network_meta): + LOG.debug("Updating cluster metadata for cluster %s...", network_id) + c = get_registry_client(context) + return c.update_network(network_id, network_meta) + +def delete_network_metadata(context, network_id): + LOG.debug("Deleting cluster metadata for cluster %s...", network_id) + c = get_registry_client(context) + return c.delete_network(network_id) + +def get_network_metadata(context, network_id): + c = get_registry_client(context) + return c.get_networks(network_id) + +def get_networks_detail(context, cluster_id, **kwargs): + c = get_registry_client(context) + return c.get_networks_detailed(cluster_id, **kwargs) + +def get_all_networks(context, **kwargs): + c = get_registry_client(context) + return c.get_all_networks(**kwargs) + +def config_interface_metadata(context, config_interface_meta): + c = get_registry_client(context) + return c.config_interface(config_interface_meta) + +def add_service_disk_metadata(context, service_disk_meta): + c = get_registry_client(context) + return c.add_service_disk(service_disk_meta) + +def delete_service_disk_metadata(context, service_disk_id): + LOG.debug("Deleting service_disk metadata %s...", service_disk_id) + c = get_registry_client(context) + return c.delete_service_disk(service_disk_id) + +def update_service_disk_metadata(context, service_disk_id, service_disk_meta): + LOG.debug("Updating config metadata for config_file %s...", service_disk_id) + c = get_registry_client(context) + return c.update_service_disk(service_disk_id, service_disk_meta) + +def get_service_disk_detail_metadata(context, service_disk_id): + c = get_registry_client(context) + return c.get_service_disk_detail(service_disk_id) + +def list_service_disk_metadata(context, **kwargs): + c = get_registry_client(context) + return c.list_service_disk(**kwargs) + +def add_cinder_volume_metadata(context, cinder_volume_meta): + c = get_registry_client(context) + return c.add_cinder_volume(cinder_volume_meta) + +def delete_cinder_volume_metadata(context, cinder_volume_id): + LOG.debug("Deleting cinder_volume metadata %s...", cinder_volume_id) + c = get_registry_client(context) + return c.delete_cinder_volume(cinder_volume_id) + +def update_cinder_volume_metadata(context, cinder_volume_id, cinder_volume_meta): + LOG.debug("Updating config metadata for cinder_volume %s...", cinder_volume_id) + c = get_registry_client(context) + return c.update_cinder_volume(cinder_volume_id, cinder_volume_meta) + +def get_cinder_volume_detail_metadata(context, cinder_volume_id): + c = get_registry_client(context) + return c.get_cinder_volume_detail(cinder_volume_id) + +def list_cinder_volume_metadata(context, **kwargs): + c = get_registry_client(context) + return c.list_cinder_volume(**kwargs) \ No newline at end of file diff --git a/code/daisy/daisy/registry/client/v1/client.py b/code/daisy/daisy/registry/client/v1/client.py new file mode 100755 index 00000000..16d1bf3f --- /dev/null +++ b/code/daisy/daisy/registry/client/v1/client.py @@ -0,0 +1,1293 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Simple client class to speak with any RESTful service that implements +the Glance Registry API +""" + +from oslo.serialization import jsonutils +from oslo_log import log as logging +from oslo_utils import excutils + +from daisy.common.client import BaseClient +from daisy.common import crypt +from daisy import i18n +from daisy.registry.api.v1 import images +from daisy.registry.api.v1 import hosts +from daisy.registry.api.v1 import config_files +from daisy.registry.api.v1 import config_sets +from daisy.registry.api.v1 import configs +from daisy.registry.api.v1 import networks +from daisy.registry.api.v1 import template + +LOG = logging.getLogger(__name__) +_LE = i18n._LE +_LI = i18n._LI + + +class RegistryClient(BaseClient): + + """A client for the Registry image metadata service.""" + + DEFAULT_PORT = 19191 + + def __init__(self, host=None, port=None, metadata_encryption_key=None, + identity_headers=None, **kwargs): + """ + :param metadata_encryption_key: Key used to encrypt 'location' metadata + """ + self.metadata_encryption_key = metadata_encryption_key + # NOTE (dprince): by default base client overwrites host and port + # settings when using keystone. configure_via_auth=False disables + # this behaviour to ensure we still send requests to the Registry API + self.identity_headers = identity_headers + BaseClient.__init__(self, host, port, configure_via_auth=False, + **kwargs) + + def decrypt_metadata(self, image_metadata): + if self.metadata_encryption_key: + if image_metadata.get('location'): + location = crypt.urlsafe_decrypt(self.metadata_encryption_key, + image_metadata['location']) + image_metadata['location'] = location + if image_metadata.get('location_data'): + ld = [] + for loc in image_metadata['location_data']: + url = crypt.urlsafe_decrypt(self.metadata_encryption_key, + loc['url']) + ld.append({'id': loc['id'], 'url': url, + 'metadata': loc['metadata'], + 'status': loc['status']}) + image_metadata['location_data'] = ld + return image_metadata + + def encrypt_metadata(self, image_metadata): + if self.metadata_encryption_key: + location_url = image_metadata.get('location') + if location_url: + location = crypt.urlsafe_encrypt(self.metadata_encryption_key, + location_url, + 64) + image_metadata['location'] = location + if image_metadata.get('location_data'): + ld = [] + for loc in image_metadata['location_data']: + if loc['url'] == location_url: + url = location + else: + url = crypt.urlsafe_encrypt( + self.metadata_encryption_key, loc['url'], 64) + ld.append({'url': url, 'metadata': loc['metadata'], + 'status': loc['status'], + # NOTE(zhiyan): New location has no ID field. + 'id': loc.get('id')}) + image_metadata['location_data'] = ld + return image_metadata + + def get_images(self, **kwargs): + """ + Returns a list of image id/name mappings from Registry + + :param filters: dict of keys & expected values to filter results + :param marker: image id after which to start page + :param limit: max number of images to return + :param sort_key: results will be ordered by this image attribute + :param sort_dir: direction in which to order results (asc, desc) + """ + params = self._extract_params(kwargs, images.SUPPORTED_PARAMS) + res = self.do_request("GET", "/images", params=params) + image_list = jsonutils.loads(res.read())['images'] + for image in image_list: + image = self.decrypt_metadata(image) + return image_list + + def do_request(self, method, action, **kwargs): + try: + kwargs['headers'] = kwargs.get('headers', {}) + kwargs['headers'].update(self.identity_headers or {}) + res = super(RegistryClient, self).do_request(method, + action, + **kwargs) + status = res.status + request_id = res.getheader('x-openstack-request-id') + msg = ("Registry request %(method)s %(action)s HTTP %(status)s" + " request id %(request_id)s" % + {'method': method, 'action': action, + 'status': status, 'request_id': request_id}) + LOG.debug(msg) + + except Exception as exc: + with excutils.save_and_reraise_exception(): + exc_name = exc.__class__.__name__ + LOG.exception(_LE("Registry client request %(method)s " + "%(action)s raised %(exc_name)s"), + {'method': method, 'action': action, + 'exc_name': exc_name}) + return res + + def get_images_detailed(self, **kwargs): + """ + Returns a list of detailed image data mappings from Registry + + :param filters: dict of keys & expected values to filter results + :param marker: image id after which to start page + :param limit: max number of images to return + :param sort_key: results will be ordered by this image attribute + :param sort_dir: direction in which to order results (asc, desc) + """ + params = self._extract_params(kwargs, images.SUPPORTED_PARAMS) + res = self.do_request("GET", "/images/detail", params=params) + image_list = jsonutils.loads(res.read())['images'] + for image in image_list: + image = self.decrypt_metadata(image) + return image_list + + def get_image(self, image_id): + """Returns a mapping of image metadata from Registry.""" + res = self.do_request("GET", "/images/%s" % image_id) + data = jsonutils.loads(res.read())['image'] + return self.decrypt_metadata(data) + + def add_image(self, image_metadata): + """ + Tells registry about an image's metadata + """ + headers = { + 'Content-Type': 'application/json', + } + + if 'image' not in image_metadata: + image_metadata = dict(image=image_metadata) + + encrypted_metadata = self.encrypt_metadata(image_metadata['image']) + image_metadata['image'] = encrypted_metadata + body = jsonutils.dumps(image_metadata) + + res = self.do_request("POST", "/images", body=body, headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + image = data['image'] + return self.decrypt_metadata(image) + + def update_image(self, image_id, image_metadata, purge_props=False, + from_state=None): + """ + Updates Registry's information about an image + """ + if 'image' not in image_metadata: + image_metadata = dict(image=image_metadata) + + encrypted_metadata = self.encrypt_metadata(image_metadata['image']) + image_metadata['image'] = encrypted_metadata + image_metadata['from_state'] = from_state + body = jsonutils.dumps(image_metadata) + + headers = { + 'Content-Type': 'application/json', + } + + if purge_props: + headers["X-Glance-Registry-Purge-Props"] = "true" + + res = self.do_request("PUT", "/images/%s" % image_id, body=body, + headers=headers) + data = jsonutils.loads(res.read()) + image = data['image'] + return self.decrypt_metadata(image) + + def delete_image(self, image_id): + """ + Deletes Registry's information about an image + """ + res = self.do_request("DELETE", "/images/%s" % image_id) + data = jsonutils.loads(res.read()) + image = data['image'] + return image + + def get_image_members(self, image_id): + """Return a list of membership associations from Registry.""" + res = self.do_request("GET", "/images/%s/members" % image_id) + data = jsonutils.loads(res.read())['members'] + return data + + def get_member_images(self, member_id): + """Return a list of membership associations from Registry.""" + res = self.do_request("GET", "/shared-images/%s" % member_id) + data = jsonutils.loads(res.read())['shared_images'] + return data + + def replace_members(self, image_id, member_data): + """Replace registry's information about image membership.""" + if isinstance(member_data, (list, tuple)): + member_data = dict(memberships=list(member_data)) + elif (isinstance(member_data, dict) and + 'memberships' not in member_data): + member_data = dict(memberships=[member_data]) + + body = jsonutils.dumps(member_data) + + headers = {'Content-Type': 'application/json', } + + res = self.do_request("PUT", "/images/%s/members" % image_id, + body=body, headers=headers) + return self.get_status_code(res) == 204 + + def add_member(self, image_id, member_id, can_share=None): + """Add to registry's information about image membership.""" + body = None + headers = {} + # Build up a body if can_share is specified + if can_share is not None: + body = jsonutils.dumps(dict(member=dict(can_share=can_share))) + headers['Content-Type'] = 'application/json' + + url = "/images/%s/members/%s" % (image_id, member_id) + res = self.do_request("PUT", url, body=body, + headers=headers) + return self.get_status_code(res) == 204 + + def delete_member(self, image_id, member_id): + """Delete registry's information about image membership.""" + res = self.do_request("DELETE", "/images/%s/members/%s" % + (image_id, member_id)) + return self.get_status_code(res) == 204 + + def add_host(self, host_metadata): + """ + Tells registry about an host's metadata + """ + headers = { + 'Content-Type': 'application/json', + } + + if 'host' not in host_metadata: + host_metadata = dict(host=host_metadata) + + body = jsonutils.dumps(host_metadata) + + res = self.do_request("POST", "/nodes", body=body, headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + return data['host'] + + def delete_host(self, host_id): + """ + Deletes Registry's information about an host + """ + res = self.do_request("DELETE", "/nodes/%s" % host_id) + data = jsonutils.loads(res.read()) + return data['host'] + + def get_host(self, host_id): + """Returns a mapping of host metadata from Registry.""" + res = self.do_request("GET", "/nodes/%s" % host_id) + data = jsonutils.loads(res.read())['host'] + return data + + def get_host_interface(self, host_metadata): + """Returns a mapping of host_interface metadata from Registry.""" + + headers = { + 'Content-Type': 'application/json', + } + + # if 'host' not in host_metadata: + # host_metadata = dict(host=host_metadata) + + body = jsonutils.dumps(host_metadata) + res = self.do_request("GET", "/host-interface", body=body, headers=headers) + host_interface = jsonutils.loads(res.read()) + + return host_interface + + def get_all_host_interfaces(self, kwargs): + """Returns a mapping of host_interface metadata from Registry.""" + headers = { + 'Content-Type': 'application/json', + } + + if 'filters' not in kwargs: + filters = dict(filters=kwargs) + + body = jsonutils.dumps(filters) + res = self.do_request("PUT", "/host-interfaces", body=body, headers=headers) + host_interface = jsonutils.loads(res.read()) + + return host_interface + + def get_assigned_network(self, host_interface_id, network_id): + """Returns a mapping of host_assigned_network metadata from Registry.""" + + + body = None + headers = {} + + headers['Content-Type'] = 'application/json' + url = "/interfaces/%s/network/%s" % (host_interface_id, network_id) + res = self.do_request("GET", url, body=body, headers=headers) + host_assigned_network = jsonutils.loads(res.read()) + + return host_assigned_network + + def update_host(self, host_id, host_metadata): + """ + Updates Registry's information about an host + """ + if 'host' not in host_metadata: + host_metadata = dict(host=host_metadata) + + body = jsonutils.dumps(host_metadata) + + headers = { + 'Content-Type': 'application/json', + } + + res = self.do_request("PUT", "/nodes/%s" % host_id, body=body, + headers=headers) + data = jsonutils.loads(res.read()) + return data['host'] + + def add_discover_host(self, discover_host_meta): + """ + Tells registry about an host's metadata + """ + headers = { + 'Content-Type': 'application/json', + } + + if 'discover_host' not in discover_host_meta: + discover_host_meta = dict(discover_host=discover_host_meta) + + body = jsonutils.dumps(discover_host_meta) + + res = self.do_request("POST", "/discover/nodes", body=body, headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + return data['discover_host'] + + def delete_discover_host(self, discover_host_id): + """ + Deletes Registry's information about an host + """ + res = self.do_request("DELETE", "/discover/nodes/%s" % discover_host_id) + data = jsonutils.loads(res.read()) + return data + + def get_discover_hosts_detailed(self, **kwargs): + """ + Returns a list of detailed host data mappings from Registry + + :param filters: dict of keys & expected values to filter results + :param marker: host id after which to start page + :param limit: max number of hosts to return + :param sort_key: results will be ordered by this host attribute + :param sort_dir: direction in which to order results (asc, desc) + """ + params = self._extract_params(kwargs, hosts.SUPPORTED_PARAMS) + res = self.do_request("GET", "/discover/nodes", params=params) + host_list = jsonutils.loads(res.read())['nodes'] + return host_list + + def update_discover_host(self, host_id, discover_host_meta): + ''' + ''' + headers = { + 'Content-Type': 'application/json', + } + + if 'discover_host' not in discover_host_meta: + discover_host_meta = dict(discover_host=discover_host_meta) + + body = jsonutils.dumps(discover_host_meta) + + res = self.do_request("PUT", "/discover/nodes/%s" % host_id, body=body, headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + return data['discover_host'] + + def get_discover_host_metadata(self, host_id): + res = self.do_request("GET", "/discover/nodes/%s" % host_id) + data = jsonutils.loads(res.read())['discover_host'] + return data + + def add_cluster(self, cluster_metadata): + """ + Tells registry about an cluster's metadata + """ + headers = { + 'Content-Type': 'application/json', + } + + if 'cluster' not in cluster_metadata: + cluster_metadata = dict(cluster=cluster_metadata) + + body = jsonutils.dumps(cluster_metadata) + + res = self.do_request("POST", "/clusters", body=body, headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + return data['cluster'] + + def update_cluster(self, cluster_id, cluster_metadata): + """ + Updates Registry's information about an cluster + """ + if 'cluster' not in cluster_metadata: + cluster_metadata = dict(cluster=cluster_metadata) + + body = jsonutils.dumps(cluster_metadata) + + headers = { + 'Content-Type': 'application/json', + } + + res = self.do_request("PUT", "/clusters/%s" % cluster_id, body=body, + headers=headers) + data = jsonutils.loads(res.read()) + return data['cluster'] + + def delete_cluster(self, cluster_id): + """ + Deletes Registry's information about an cluster + """ + res = self.do_request("DELETE", "/clusters/%s" % cluster_id) + data = jsonutils.loads(res.read()) + return data['cluster'] + + def get_cluster(self, cluster_id): + """Returns a mapping of cluster metadata from Registry.""" + res = self.do_request("GET", "/clusters/%s" % cluster_id) + data = jsonutils.loads(res.read()) + return data + + def add_cluster_host(self, cluster_id, host_id): + """Add host to cluster.""" + body = None + headers = {} + + headers['Content-Type'] = 'application/json' + + url = "/clusters/%s/nodes/%s" % (cluster_id, host_id) + res = self.do_request("PUT", url, body=body, + headers=headers) + return self.get_status_code(res) == 204 + + def delete_cluster_host(self, cluster_id, host_id): + """Delete host from cluster.""" + res = self.do_request("DELETE", "/clusters/%s/nodes/%s" % + (cluster_id, host_id)) + return self.get_status_code(res) == 204 + + def get_hosts_detailed(self, **kwargs): + """ + Returns a list of detailed host data mappings from Registry + + :param filters: dict of keys & expected values to filter results + :param marker: host id after which to start page + :param limit: max number of hosts to return + :param sort_key: results will be ordered by this host attribute + :param sort_dir: direction in which to order results (asc, desc) + """ + params = self._extract_params(kwargs, hosts.SUPPORTED_PARAMS) + res = self.do_request("GET", "/nodes", params=params) + host_list = jsonutils.loads(res.read())['nodes'] + return host_list + + def get_clusters_detailed(self, **kwargs): + """ + Returns a list of detailed cluster data mappings from Registry + + :param filters: dict of keys & expected values to filter results + :param marker: host id after which to start page + :param limit: max number of hosts to return + :param sort_key: results will be ordered by this host attribute + :param sort_dir: direction in which to order results (asc, desc) + """ + params = self._extract_params(kwargs, hosts.SUPPORTED_PARAMS) + res = self.do_request("GET", "/clusters", params=params) + cluster_list = jsonutils.loads(res.read())['clusters'] + return cluster_list + + def get_cluster_hosts(self, cluster_id, host_id=None): + """Return a list of membership associations from Registry.""" + if host_id: + res = self.do_request("GET", "/clusters/%s/nodes/%s" % (cluster_id, host_id)) + else: + res = self.do_request("GET", "/clusters/%s/nodes" % cluster_id) + data = jsonutils.loads(res.read())['members'] + return data + + def get_host_clusters(self, host_id): + """Return a list of membership associations from Registry.""" + res = self.do_request("GET", "/multi_clusters/nodes/%s" % host_id) + data = jsonutils.loads(res.read())['multi_clusters'] + return data + + def add_template(self, template): + """ """ + headers = { + 'Content-Type': 'application/json', + } + + if 'template' not in template: + template = dict(template=template) + + body = jsonutils.dumps(template) + + res = self.do_request("POST", "/template", body=body, headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + return data['template'] + + def add_host_template(self, template): + """ """ + headers = { + 'Content-Type': 'application/json', + } + + if 'template' not in template: + template = dict(template=template) + + body = jsonutils.dumps(template) + + res = self.do_request("POST", "/host_template", body=body, headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + return data['host_template'] + + def update_template(self, template_id, template): + headers = { + 'Content-Type': 'application/json', + } + if 'template' not in template: + template = dict(template=template) + + body = jsonutils.dumps(template) + + res = self.do_request("PUT", "/template/%s" % template_id, body=body, headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + return data['template'] + + def update_host_template(self, template_id, template): + headers = { + 'Content-Type': 'application/json', + } + if 'template' not in template: + template = dict(template=template) + + body = jsonutils.dumps(template) + + res = self.do_request("PUT", "/host_template/%s" % template_id, body=body, headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + return data['host_template'] + + def delete_template(self, template_id): + res = self.do_request("DELETE", "/template/%s" % template_id) + data = jsonutils.loads(res.read()) + return data['template'] + + def delete_host_template(self, template_id): + res = self.do_request("DELETE", "/host_template/%s" % template_id) + data = jsonutils.loads(res.read()) + return data['host_template'] + + def list_template(self, **kwargs): + """ """ + params = self._extract_params(kwargs, template.SUPPORTED_PARAMS) + res = self.do_request("GET", "/template/list", params=params) + data = jsonutils.loads(res.read()) + return data + + def list_host_template(self, **kwargs): + """ """ + params = self._extract_params(kwargs, template.SUPPORTED_PARAMS) + res = self.do_request("GET", "/host_template/list", params=params) + data = jsonutils.loads(res.read()) + return data + + def get_template_detail(self, template_id): + res = self.do_request("GET", "/template/%s" % template_id) + data = jsonutils.loads(res.read()) + return data['template'] + + + def get_host_template_detail(self, template_id): + res = self.do_request("GET", "/host_template/%s" % template_id) + data = jsonutils.loads(res.read()) + return data['host_template'] + + + + def get_component(self, component_id): + """Returns a mapping of component metadata from Registry.""" + res = self.do_request("GET", "/components/%s" % component_id) + data = jsonutils.loads(res.read())['component'] + return data + + def add_component(self, component_metadata): + """ + Tells registry about an component's metadata + """ + headers = { + 'Content-Type': 'application/json', + } + + if 'component' not in component_metadata: + component_metadata = dict(component=component_metadata) + + body = jsonutils.dumps(component_metadata) + + res = self.do_request("POST", "/components", body=body, headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + return data['component'] + + def delete_component(self, component_id): + """ + Deletes Registry's information about an component + """ + res = self.do_request("DELETE", "/components/%s" % component_id) + data = jsonutils.loads(res.read()) + return data['component'] + + def get_components_detailed(self, **kwargs): + """ + Returns a list of detailed component data mappings from Registry + + :param filters: dict of keys & expected values to filter results + :param marker: host id after which to start page + :param limit: max number of hosts to return + :param sort_key: results will be ordered by this host attribute + :param sort_dir: direction in which to order results (asc, desc) + """ + params = self._extract_params(kwargs, hosts.SUPPORTED_PARAMS) + res = self.do_request("GET", "/components/detail", params=params) + component_list = jsonutils.loads(res.read())['components'] + return component_list + + def update_component(self, component_id, component_metadata): + """ + Updates Registry's information about an component + """ + if 'component' not in component_metadata: + component_metadata = dict(component=component_metadata) + + body = jsonutils.dumps(component_metadata) + + headers = { + 'Content-Type': 'application/json', + } + + res = self.do_request("PUT", "/components/%s" % component_id, body=body, + headers=headers) + data = jsonutils.loads(res.read()) + return data['component'] + + def get_service(self, service_id): + """Returns a mapping of service metadata from Registry.""" + res = self.do_request("GET", "/services/%s" % service_id) + data = jsonutils.loads(res.read())['service'] + return data + + def add_service(self, service_metadata): + """ + Tells registry about an service's metadata + """ + headers = { + 'Content-Type': 'application/json', + } + + if 'service' not in service_metadata: + service_metadata = dict(service=service_metadata) + + body = jsonutils.dumps(service_metadata) + print body + + res = self.do_request("POST", "/services", body=body, headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + return data['service'] + + def delete_service(self, service_id): + """ + Deletes Registry's information about an service + """ + res = self.do_request("DELETE", "/services/%s" % service_id) + data = jsonutils.loads(res.read()) + return data['service'] + + def get_services_detailed(self, **kwargs): + """ + Returns a list of detailed service data mappings from Registry + + :param filters: dict of keys & expected values to filter results + :param marker: host id after which to start page + :param limit: max number of hosts to return + :param sort_key: results will be ordered by this host attribute + :param sort_dir: direction in which to order results (asc, desc) + """ + params = self._extract_params(kwargs, hosts.SUPPORTED_PARAMS) + res = self.do_request("GET", "/services/detail", params=params) + service_list = jsonutils.loads(res.read())['services'] + return service_list + + def update_service(self, service_id, service_metadata): + """ + Updates Registry's information about an service + """ + if 'service' not in service_metadata: + service_metadata = dict(service=service_metadata) + + body = jsonutils.dumps(service_metadata) + + headers = { + 'Content-Type': 'application/json', + } + + res = self.do_request("PUT", "/services/%s" % service_id, body=body, + headers=headers) + data = jsonutils.loads(res.read()) + return data['service'] + + def get_role(self, role_id): + """Returns a mapping of role metadata from Registry.""" + res = self.do_request("GET", "/roles/%s" % role_id) + data = jsonutils.loads(res.read())['role'] + return data + + def add_role(self, role_metadata): + """ + Tells registry about an role's metadata + """ + headers = { + 'Content-Type': 'application/json', + } + + if 'role' not in role_metadata: + role_metadata = dict(role=role_metadata) + + body = jsonutils.dumps(role_metadata) + print body + + res = self.do_request("POST", "/roles", body=body, headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + return data['role'] + + def delete_role(self, role_id): + """ + Deletes Registry's information about an role + """ + res = self.do_request("DELETE", "/roles/%s" % role_id) + data = jsonutils.loads(res.read()) + return data['role'] + + def get_roles_detailed(self, **kwargs): + """ + Returns a list of detailed role data mappings from Registry + + :param filters: dict of keys & expected values to filter results + :param marker: host id after which to start page + :param limit: max number of hosts to return + :param sort_key: results will be ordered by this host attribute + :param sort_dir: direction in which to order results (asc, desc) + """ + params = self._extract_params(kwargs, hosts.SUPPORTED_PARAMS) + res = self.do_request("GET", "/roles/detail", params=params) + role_list = jsonutils.loads(res.read())['roles'] + return role_list + + def update_role(self, role_id, role_metadata): + """ + Updates Registry's information about an role + """ + if 'role' not in role_metadata: + role_metadata = dict(role=role_metadata) + + body = jsonutils.dumps(role_metadata) + + headers = { + 'Content-Type': 'application/json', + } + + res = self.do_request("PUT", "/roles/%s" % role_id, body=body, + headers=headers) + data = jsonutils.loads(res.read()) + return data['role'] + + def get_role_services(self, role_id): + """Returns the service list of a role.""" + res = self.do_request("GET", "/roles/%s/services" % role_id) + data = jsonutils.loads(res.read())['role'] + return data + + def get_role_host(self, role_id): + """Returns a mapping of role_host metadata from Registry.""" + res = self.do_request("GET", "/roles/%s/hosts" % role_id) + data = jsonutils.loads(res.read())['role'] + return data + + def delete_role_host(self, role_id): + """Returns a mapping of role_host metadata from Registry.""" + res = self.do_request("DELETE", "/roles/%s/hosts" % role_id) + data = jsonutils.loads(res.read())['role'] + return data + + def update_role_host(self, role_host_id,role_host): + """Returns a mapping of role_host metadata from Registry.""" + if 'role' not in role_host: + role_metadata = dict(role=role_host) + + body = jsonutils.dumps(role_metadata) + + headers = { + 'Content-Type': 'application/json', + } + + res = self.do_request("PUT", "/roles/%s/hosts" % role_host_id, body=body, + headers=headers) + data = jsonutils.loads(res.read()) + return data + + def add_config_file(self, config_file_metadata): + """ + Tells registry about an config_file's metadata + """ + headers = { + 'Content-Type': 'application/json', + } + + if 'config_file' not in config_file_metadata: + config_file_metadata = dict(config_file=config_file_metadata) + + body = jsonutils.dumps(config_file_metadata) + + res = self.do_request("POST", "/config_files", body=body, headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + return data['config_file'] + + def delete_config_file(self, config_file_id): + """ + Deletes Registry's information about an config_file + """ + res = self.do_request("DELETE", "/config_files/%s" % config_file_id) + data = jsonutils.loads(res.read()) + return data['config_file'] + + def get_config_file(self, config_file_id): + """Returns a mapping of config_file metadata from Registry.""" + res = self.do_request("GET", "/config_files/%s" % config_file_id) + data = jsonutils.loads(res.read())['config_file'] + return data + + def update_config_file(self, config_file_id, config_file_metadata): + """ + Updates Registry's information about an config_file + """ + if 'config_file' not in config_file_metadata: + config_file_metadata = dict(config_file=config_file_metadata) + + body = jsonutils.dumps(config_file_metadata) + + headers = { + 'Content-Type': 'application/json', + } + + res = self.do_request("PUT", "/config_files/%s" % config_file_id, body=body, + headers=headers) + data = jsonutils.loads(res.read()) + return data['config_file'] + + def get_config_files_detailed(self, **kwargs): + """ + Returns a list of detailed config_file data mappings from Registry + + :param filters: dict of keys & expected values to filter results + :param marker: config_file id after which to start page + :param limit: max number of hosts to return + :param sort_key: results will be ordered by this config_file attribute + :param sort_dir: direction in which to order results (asc, desc) + """ + params = self._extract_params(kwargs, config_files.SUPPORTED_PARAMS) + res = self.do_request("GET", "/config_files/detail", params=params) + config_file_list = jsonutils.loads(res.read())['config_files'] + return config_file_list + + def add_config_set(self, config_set_metadata): + """ + Tells registry about an config_set's metadata + """ + headers = { + 'Content-Type': 'application/json', + } + + if 'config_set' not in config_set_metadata: + config_set_metadata = dict(config_set=config_set_metadata) + + body = jsonutils.dumps(config_set_metadata) + + res = self.do_request("POST", "/config_sets", body=body, headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + return data['config_set'] + + def delete_config_set(self, config_set_id): + """ + Deletes Registry's information about an config_set + """ + res = self.do_request("DELETE", "/config_sets/%s" % config_set_id) + data = jsonutils.loads(res.read()) + return data['config_set'] + + def get_config_set(self, config_set_id): + """Returns a mapping of config_set metadata from Registry.""" + res = self.do_request("GET", "/config_sets/%s" % config_set_id) + data = jsonutils.loads(res.read())['config_set'] + return data + + def update_config_set(self, config_set_id, config_set_metadata): + """ + Updates Registry's information about an config_set + """ + if 'config_set' not in config_set_metadata: + config_set_metadata = dict(config_set=config_set_metadata) + + body = jsonutils.dumps(config_set_metadata) + + headers = { + 'Content-Type': 'application/json', + } + + res = self.do_request("PUT", "/config_sets/%s" % config_set_id, body=body, + headers=headers) + data = jsonutils.loads(res.read()) + return data['config_set'] + + def get_config_sets_detailed(self, **kwargs): + """ + Returns a list of detailed config_set data mappings from Registry + + :param filters: dict of keys & expected values to filter results + :param marker: config_set id after which to start page + :param limit: max number of hosts to return + :param sort_key: results will be ordered by this config_set attribute + :param sort_dir: direction in which to order results (asc, desc) + """ + params = self._extract_params(kwargs, config_sets.SUPPORTED_PARAMS) + res = self.do_request("GET", "/config_sets/detail", params=params) + config_set_list = jsonutils.loads(res.read())['config_sets'] + return config_set_list + + def add_config(self, config_metadata): + """ + Tells registry about an config's metadata + """ + headers = { + 'Content-Type': 'application/json', + } + + if 'config' not in config_metadata: + config_metadata = dict(config=config_metadata) + + body = jsonutils.dumps(config_metadata) + + res = self.do_request("POST", "/configs", body=body, headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + return data['config'] + + def delete_config(self, config_id): + """ + Deletes Registry's information about an config + """ + res = self.do_request("DELETE", "/configs/%s" % config_id) + data = jsonutils.loads(res.read()) + return data['config'] + + def get_config(self, config_id): + """Returns a mapping of config metadata from Registry.""" + res = self.do_request("GET", "/configs/%s" % config_id) + data = jsonutils.loads(res.read())['config'] + return data + + def update_config(self, config_id, config_metadata): + """ + Updates Registry's information about an config + """ + if 'config' not in config_metadata: + config_metadata = dict(config=config_metadata) + + body = jsonutils.dumps(config_metadata) + + headers = { + 'Content-Type': 'application/json', + } + + res = self.do_request("PUT", "/configs/%s" % config_id, body=body, + headers=headers) + data = jsonutils.loads(res.read()) + return data['config'] + + def update_config_by_role_hosts(self, config_metadatas): + """ + Updates Registry's information about an config + """ + if 'configs' not in config_metadatas: + config_metadata = dict(configs=config_metadatas) + + body = jsonutils.dumps(config_metadata) + + headers = { + 'Content-Type': 'application/json', + } + + res = self.do_request("POST", "/configs/update_config_by_role_hosts", body=body, headers=headers) + data = jsonutils.loads(res.read()) + return data['configs'] + + def get_configs_detailed(self, **kwargs): + """ + Returns a list of detailed config data mappings from Registry + + :param filters: dict of keys & expected values to filter results + :param marker: config id after which to start page + :param limit: max number of hosts to return + :param sort_key: results will be ordered by this config attribute + :param sort_dir: direction in which to order results (asc, desc) + """ + params = self._extract_params(kwargs, configs.SUPPORTED_PARAMS) + res = self.do_request("GET", "/configs/detail", params=params) + config_list = jsonutils.loads(res.read())['configs'] + return config_list + + def get_networks(self, network_id): + """Return a list of network associations from Registry.""" + res = self.do_request("GET", "/networks/%s" % network_id) + data = jsonutils.loads(res.read())['network'] + return data + + def add_network(self, network_metadata): + """ + Tells registry about an network's metadata + """ + headers = { + 'Content-Type': 'application/json', + } + + if 'network' not in network_metadata: + network_metadata = dict(network=network_metadata) + + body = jsonutils.dumps(network_metadata) + + res = self.do_request("POST", "/networks", body=body, headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + return data['network'] + + def update_phyname_of_network(self, network_phyname_set): + """ + Updates Registry's information about an network phynet_name segment + """ + body = jsonutils.dumps(network_phyname_set) + + headers = { + 'Content-Type': 'application/json', + } + + self.do_request("POST", "/networks/update_phyname_of_network", body=body, headers=headers) + + def update_network(self, network_id, network_metadata): + """ + Updates Registry's information about an network + """ + if 'network' not in network_metadata: + network_metadata = dict(network=network_metadata) + + body = jsonutils.dumps(network_metadata) + + headers = { + 'Content-Type': 'application/json', + } + + res = self.do_request("PUT", "/networks/%s" % network_id, body=body, + headers=headers) + data = jsonutils.loads(res.read()) + return data['network'] + + def delete_network(self, network_id): + """ + Deletes Registry's information about an network + """ + res = self.do_request("DELETE", "/networks/%s" % network_id) + data = jsonutils.loads(res.read()) + return data['network'] + + def get_networks_detailed(self, cluster_id, **kwargs): + """ + Returns a list of detailed host data mappings from Registry + + :param filters: dict of keys & expected values to filter results + :param marker: host id after which to start page + :param limit: max number of hosts to return + :param sort_key: results will be ordered by this host attribute + :param sort_dir: direction in which to order results (asc, desc) + """ + params = self._extract_params(kwargs, networks.SUPPORTED_PARAMS) + res = self.do_request("GET", "/clusters/%s/networks" % cluster_id, params=params) + network_list = jsonutils.loads(res.read())['networks'] + return network_list + + def get_all_networks(self, **kwargs): + params = self._extract_params(kwargs, hosts.SUPPORTED_PARAMS) + res = self.do_request("GET", "/networks", params=params) + data = jsonutils.loads(res.read()) + return data + + def config_interface(self, config_interface): + """ + Tells registry about an config_interface's metadata + """ + headers = { + 'Content-Type': 'application/json', + } + + body = jsonutils.dumps(config_interface) + res = self.do_request("POST", "/config_interface", body=body, headers=headers) + config_interface= jsonutils.loads(res.read())['config_interface_meta'] + return config_interface + + def add_service_disk(self, service_disk_metadata): + """ + Tells registry about an network's metadata + """ + headers = { + 'Content-Type': 'application/json', + } + + if 'service_disk' not in service_disk_metadata: + service_disk_metadata = dict(service_disk=service_disk_metadata) + + body = jsonutils.dumps(service_disk_metadata) + + res = self.do_request("POST", "/service_disk", body=body, headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + return data['service_disk'] + + def delete_service_disk(self, service_disk_id): + """ + Deletes Registry's information about an network + """ + res = self.do_request("DELETE", "/service_disk/%s" % service_disk_id) + data = jsonutils.loads(res.read()) + return data['service_disk'] + + + def update_service_disk(self, service_disk_id, service_disk_metadata): + """ + Updates Registry's information about an service_disk + """ + if 'service_disk' not in service_disk_metadata: + service_disk_metadata = dict(service_disk=service_disk_metadata) + + body = jsonutils.dumps(service_disk_metadata) + + headers = { + 'Content-Type': 'application/json', + } + + res = self.do_request("PUT", "/service_disk/%s" % service_disk_id, body=body, + headers=headers) + data = jsonutils.loads(res.read()) + return data['service_disk'] + + def get_service_disk_detail(self, service_disk_id): + """Return a list of service_disk associations from Registry.""" + res = self.do_request("GET", "/service_disk/%s" % service_disk_id) + data = jsonutils.loads(res.read())['service_disk'] + return data + + def list_service_disk(self, **kwargs): + """ + Returns a list of service_disk data mappings from Registry + """ + params = self._extract_params(kwargs, hosts.SUPPORTED_PARAMS) + res = self.do_request("GET", "/service_disk/list", params=params) + service_disk_list = jsonutils.loads(res.read())['service_disks'] + return service_disk_list + + def add_cinder_volume(self, cinder_volume_metadata): + """ + Tells registry about an network's metadata + """ + headers = { + 'Content-Type': 'application/json', + } + + if 'cinder_volume' not in cinder_volume_metadata: + cinder_volume_metadata = dict(cinder_volume=cinder_volume_metadata) + + body = jsonutils.dumps(cinder_volume_metadata) + + res = self.do_request("POST", "/cinder_volume", body=body, headers=headers) + # Registry returns a JSONified dict(image=image_info) + data = jsonutils.loads(res.read()) + return data['cinder_volume'] + + def delete_cinder_volume(self, cinder_volume_id): + """ + Deletes Registry's information about an network + """ + res = self.do_request("DELETE", "/cinder_volume/%s" % cinder_volume_id) + data = jsonutils.loads(res.read()) + return data['cinder_volume'] + + def update_cinder_volume(self, cinder_volume_id, cinder_volume_metadata): + """ + Updates Registry's information about an cinder_volume + """ + if 'cinder_volume' not in cinder_volume_metadata: + cinder_volume_metadata = dict(cinder_volume=cinder_volume_metadata) + + body = jsonutils.dumps(cinder_volume_metadata) + + headers = { + 'Content-Type': 'application/json', + } + + res = self.do_request("PUT", "/cinder_volume/%s" % cinder_volume_id, body=body, + headers=headers) + data = jsonutils.loads(res.read()) + return data['cinder_volume'] + + + def get_cinder_volume_detail(self, cinder_volume_id): + """Return a list of cinder_volume associations from Registry.""" + res = self.do_request("GET", "/cinder_volume/%s" % cinder_volume_id) + data = jsonutils.loads(res.read())['cinder_volume'] + return data + + def list_cinder_volume(self, **kwargs): + """ + Returns a list of cinder_volume data mappings from Registry + """ + params = self._extract_params(kwargs, hosts.SUPPORTED_PARAMS) + res = self.do_request("GET", "/cinder_volume/list", params=params) + cinder_volume_list = jsonutils.loads(res.read())['cinder_volumes'] + return cinder_volume_list \ No newline at end of file diff --git a/code/daisy/daisy/registry/client/v2/__init__.py b/code/daisy/daisy/registry/client/v2/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/registry/client/v2/api.py b/code/daisy/daisy/registry/client/v2/api.py new file mode 100755 index 00000000..3ae1a17a --- /dev/null +++ b/code/daisy/daisy/registry/client/v2/api.py @@ -0,0 +1,111 @@ +# Copyright 2013 Red Hat, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Registry's Client V2 +""" + +import os + +from oslo_config import cfg +from oslo_log import log as logging + +from daisy.common import exception +from daisy import i18n +from daisy.registry.client.v2 import client + +LOG = logging.getLogger(__name__) +_ = i18n._ + +CONF = cfg.CONF +_registry_client = 'daisy.registry.client' +CONF.import_opt('registry_client_protocol', _registry_client) +CONF.import_opt('registry_client_key_file', _registry_client) +CONF.import_opt('registry_client_cert_file', _registry_client) +CONF.import_opt('registry_client_ca_file', _registry_client) +CONF.import_opt('registry_client_insecure', _registry_client) +CONF.import_opt('registry_client_timeout', _registry_client) +CONF.import_opt('use_user_token', _registry_client) +CONF.import_opt('admin_user', _registry_client) +CONF.import_opt('admin_password', _registry_client) +CONF.import_opt('admin_tenant_name', _registry_client) +CONF.import_opt('auth_url', _registry_client) +CONF.import_opt('auth_strategy', _registry_client) +CONF.import_opt('auth_region', _registry_client) + +_CLIENT_CREDS = None +_CLIENT_HOST = None +_CLIENT_PORT = None +_CLIENT_KWARGS = {} + + +def configure_registry_client(): + """ + Sets up a registry client for use in registry lookups + """ + global _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT + try: + host, port = CONF.registry_host, CONF.registry_port + except cfg.ConfigFileValueError: + msg = _("Configuration option was not valid") + LOG.error(msg) + raise exception.BadRegistryConnectionConfiguration(msg) + except IndexError: + msg = _("Could not find required configuration option") + LOG.error(msg) + raise exception.BadRegistryConnectionConfiguration(msg) + + _CLIENT_HOST = host + _CLIENT_PORT = port + _CLIENT_KWARGS = { + 'use_ssl': CONF.registry_client_protocol.lower() == 'https', + 'key_file': CONF.registry_client_key_file, + 'cert_file': CONF.registry_client_cert_file, + 'ca_file': CONF.registry_client_ca_file, + 'insecure': CONF.registry_client_insecure, + 'timeout': CONF.registry_client_timeout, + } + + if not CONF.use_user_token: + configure_registry_admin_creds() + + +def configure_registry_admin_creds(): + global _CLIENT_CREDS + + if CONF.auth_url or os.getenv('OS_AUTH_URL'): + strategy = 'keystone' + else: + strategy = CONF.auth_strategy + + _CLIENT_CREDS = { + 'user': CONF.admin_user, + 'password': CONF.admin_password, + 'username': CONF.admin_user, + 'tenant': CONF.admin_tenant_name, + 'auth_url': os.getenv('OS_AUTH_URL') or CONF.auth_url, + 'strategy': strategy, + 'region': CONF.auth_region, + } + + +def get_registry_client(cxt): + global _CLIENT_CREDS, _CLIENT_KWARGS, _CLIENT_HOST, _CLIENT_PORT + kwargs = _CLIENT_KWARGS.copy() + if CONF.use_user_token: + kwargs['auth_token'] = cxt.auth_token + if _CLIENT_CREDS: + kwargs['creds'] = _CLIENT_CREDS + return client.RegistryClient(_CLIENT_HOST, _CLIENT_PORT, **kwargs) diff --git a/code/daisy/daisy/registry/client/v2/client.py b/code/daisy/daisy/registry/client/v2/client.py new file mode 100755 index 00000000..aa30e33f --- /dev/null +++ b/code/daisy/daisy/registry/client/v2/client.py @@ -0,0 +1,31 @@ +# Copyright 2013 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Simple client class to speak with any RESTful service that implements +the Glance Registry API +""" + +from oslo_log import log as logging + +from daisy.common import rpc + +LOG = logging.getLogger(__name__) + + +class RegistryClient(rpc.RPCClient): + """Registry's V2 Client.""" + + DEFAULT_PORT = 9191 diff --git a/code/daisy/daisy/schema.py b/code/daisy/daisy/schema.py new file mode 100755 index 00000000..ed34217a --- /dev/null +++ b/code/daisy/daisy/schema.py @@ -0,0 +1,226 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import jsonschema +import six + +from daisy.common import exception +from daisy.common import utils +from daisy import i18n + +_ = i18n._ + + +class Schema(object): + + def __init__(self, name, properties=None, links=None, required=None, + definitions=None): + self.name = name + if properties is None: + properties = {} + self.properties = properties + self.links = links + self.required = required + self.definitions = definitions + + def validate(self, obj): + try: + jsonschema.validate(obj, self.raw()) + except jsonschema.ValidationError as e: + raise exception.InvalidObject(schema=self.name, + reason=utils.exception_to_str(e)) + + def filter(self, obj): + filtered = {} + for key, value in six.iteritems(obj): + if self._filter_func(self.properties, key): + filtered[key] = value + return filtered + + @staticmethod + def _filter_func(properties, key): + return key in properties + + def merge_properties(self, properties): + # Ensure custom props aren't attempting to override base props + original_keys = set(self.properties.keys()) + new_keys = set(properties.keys()) + intersecting_keys = original_keys.intersection(new_keys) + conflicting_keys = [k for k in intersecting_keys + if self.properties[k] != properties[k]] + if conflicting_keys: + props = ', '.join(conflicting_keys) + reason = _("custom properties (%(props)s) conflict " + "with base properties") + raise exception.SchemaLoadError(reason=reason % {'props': props}) + + self.properties.update(properties) + + def raw(self): + raw = { + 'name': self.name, + 'properties': self.properties, + 'additionalProperties': False, + } + if self.definitions: + raw['definitions'] = self.definitions + if self.required: + raw['required'] = self.required + if self.links: + raw['links'] = self.links + return raw + + def minimal(self): + minimal = { + 'name': self.name, + 'properties': self.properties + } + if self.definitions: + minimal['definitions'] = self.definitions + if self.required: + minimal['required'] = self.required + return minimal + + +class PermissiveSchema(Schema): + @staticmethod + def _filter_func(properties, key): + return True + + def raw(self): + raw = super(PermissiveSchema, self).raw() + raw['additionalProperties'] = {'type': 'string'} + return raw + + def minimal(self): + minimal = super(PermissiveSchema, self).raw() + return minimal + + +class CollectionSchema(object): + + def __init__(self, name, item_schema): + self.name = name + self.item_schema = item_schema + + def raw(self): + definitions = None + if self.item_schema.definitions: + definitions = self.item_schema.definitions + self.item_schema.definitions = None + raw = { + 'name': self.name, + 'properties': { + self.name: { + 'type': 'array', + 'items': self.item_schema.raw(), + }, + 'first': {'type': 'string'}, + 'next': {'type': 'string'}, + 'schema': {'type': 'string'}, + }, + 'links': [ + {'rel': 'first', 'href': '{first}'}, + {'rel': 'next', 'href': '{next}'}, + {'rel': 'describedby', 'href': '{schema}'}, + ], + } + if definitions: + raw['definitions'] = definitions + self.item_schema.definitions = definitions + + return raw + + def minimal(self): + definitions = None + if self.item_schema.definitions: + definitions = self.item_schema.definitions + self.item_schema.definitions = None + minimal = { + 'name': self.name, + 'properties': { + self.name: { + 'type': 'array', + 'items': self.item_schema.minimal(), + }, + 'schema': {'type': 'string'}, + }, + 'links': [ + {'rel': 'describedby', 'href': '{schema}'}, + ], + } + if definitions: + minimal['definitions'] = definitions + self.item_schema.definitions = definitions + + return minimal + + +class DictCollectionSchema(Schema): + def __init__(self, name, item_schema): + self.name = name + self.item_schema = item_schema + + def raw(self): + definitions = None + if self.item_schema.definitions: + definitions = self.item_schema.definitions + self.item_schema.definitions = None + raw = { + 'name': self.name, + 'properties': { + self.name: { + 'type': 'object', + 'additionalProperties': self.item_schema.raw(), + }, + 'first': {'type': 'string'}, + 'next': {'type': 'string'}, + 'schema': {'type': 'string'}, + }, + 'links': [ + {'rel': 'first', 'href': '{first}'}, + {'rel': 'next', 'href': '{next}'}, + {'rel': 'describedby', 'href': '{schema}'}, + ], + } + if definitions: + raw['definitions'] = definitions + self.item_schema.definitions = definitions + + return raw + + def minimal(self): + definitions = None + if self.item_schema.definitions: + definitions = self.item_schema.definitions + self.item_schema.definitions = None + minimal = { + 'name': self.name, + 'properties': { + self.name: { + 'type': 'object', + 'additionalProperties': self.item_schema.minimal(), + }, + 'schema': {'type': 'string'}, + }, + 'links': [ + {'rel': 'describedby', 'href': '{schema}'}, + ], + } + if definitions: + minimal['definitions'] = definitions + self.item_schema.definitions = definitions + + return minimal diff --git a/code/daisy/daisy/scrubber.py b/code/daisy/daisy/scrubber.py new file mode 100755 index 00000000..91fe202a --- /dev/null +++ b/code/daisy/daisy/scrubber.py @@ -0,0 +1,650 @@ +# Copyright 2010 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import calendar +import os +import time + +import eventlet +from oslo_concurrency import lockutils +from oslo_config import cfg +from oslo_log import log as logging +import six + +from daisy.common import crypt +from daisy.common import exception +from daisy.common import utils +from daisy import context +import daisy.db as db_api +from daisy import i18n +import daisy.registry.client.v1.api as registry + +LOG = logging.getLogger(__name__) + +_ = i18n._ +_LI = i18n._LI +_LW = i18n._LW +_LE = i18n._LE + +scrubber_opts = [ + cfg.StrOpt('scrubber_datadir', + default='/var/lib/daisy/scrubber', + help=_('Directory that the scrubber will use to track ' + 'information about what to delete. ' + 'Make sure this is set in daisy-api.conf and ' + 'daisy-scrubber.conf.')), + cfg.IntOpt('scrub_time', default=0, + help=_('The amount of time in seconds to delay before ' + 'performing a delete.')), + cfg.BoolOpt('cleanup_scrubber', default=False, + help=_('A boolean that determines if the scrubber should ' + 'clean up the files it uses for taking data. Only ' + 'one server in your deployment should be designated ' + 'the cleanup host.')), + cfg.BoolOpt('delayed_delete', default=False, + help=_('Turn on/off delayed delete.')), + cfg.IntOpt('cleanup_scrubber_time', default=86400, + help=_('Items must have a modified time that is older than ' + 'this value in order to be candidates for cleanup.')) +] + +scrubber_cmd_opts = [ + cfg.IntOpt('wakeup_time', default=300, + help=_('Loop time between checking for new ' + 'items to schedule for delete.')) +] + +scrubber_cmd_cli_opts = [ + cfg.BoolOpt('daemon', + short='D', + default=False, + help=_('Run as a long-running process. When not ' + 'specified (the default) run the scrub operation ' + 'once and then exits. When specified do not exit ' + 'and run scrub on wakeup_time interval as ' + 'specified in the config.')) +] + +CONF = cfg.CONF +CONF.register_opts(scrubber_opts) +CONF.import_opt('metadata_encryption_key', 'daisy.common.config') + + +class ScrubQueue(object): + """Image scrub queue base class. + + The queue contains image's location which need to delete from backend. + """ + def __init__(self): + self.scrub_time = CONF.scrub_time + self.metadata_encryption_key = CONF.metadata_encryption_key + registry.configure_registry_client() + registry.configure_registry_admin_creds() + self.registry = registry.get_registry_client(context.RequestContext()) + + @abc.abstractmethod + def add_location(self, image_id, location, user_context=None): + """Adding image location to scrub queue. + + :param image_id: The opaque image identifier + :param location: The opaque image location + :param user_context: The user's request context + + :retval A boolean value to indicate success or not + """ + pass + + @abc.abstractmethod + def get_all_locations(self): + """Returns a list of image id and location tuple from scrub queue. + + :retval a list of image id and location tuple from scrub queue + """ + pass + + @abc.abstractmethod + def pop_all_locations(self): + """Pop out a list of image id and location tuple from scrub queue. + + :retval a list of image id and location tuple from scrub queue + """ + pass + + @abc.abstractmethod + def has_image(self, image_id): + """Returns whether the queue contains an image or not. + + :param image_id: The opaque image identifier + + :retval a boolean value to inform including or not + """ + pass + + +class ScrubFileQueue(ScrubQueue): + """File-based image scrub queue class.""" + def __init__(self): + super(ScrubFileQueue, self).__init__() + self.scrubber_datadir = CONF.scrubber_datadir + utils.safe_mkdirs(self.scrubber_datadir) + + def _read_queue_file(self, file_path): + """Reading queue file to loading deleted location and timestamp out. + + :param file_path: Queue file full path + + :retval a list of image location id, uri and timestamp tuple + """ + loc_ids = [] + uris = [] + delete_times = [] + + try: + with open(file_path, 'r') as f: + while True: + loc_id = f.readline().strip() + if loc_id: + lid = six.text_type(loc_id) + loc_ids.append(int(lid) if lid.isdigit() else lid) + uris.append(unicode(f.readline().strip())) + delete_times.append(int(f.readline().strip())) + else: + break + return loc_ids, uris, delete_times + except Exception: + LOG.error(_LE("%s file can not be read.") % file_path) + + def _update_queue_file(self, file_path, remove_record_idxs): + """Updating queue file to remove such queue records. + + :param file_path: Queue file full path + :param remove_record_idxs: A list of record index those want to remove + """ + try: + with open(file_path, 'r') as f: + lines = f.readlines() + # NOTE(zhiyan) we need bottom up removing to + # keep record index be valid. + remove_record_idxs.sort(reverse=True) + for record_idx in remove_record_idxs: + # Each record has three lines: + # location id, uri and delete time. + line_no = (record_idx + 1) * 3 - 1 + del lines[line_no:line_no + 3] + with open(file_path, 'w') as f: + f.write(''.join(lines)) + os.chmod(file_path, 0o600) + except Exception: + LOG.error(_LE("%s file can not be wrote.") % file_path) + + def add_location(self, image_id, location, user_context=None): + """Adding image location to scrub queue. + + :param image_id: The opaque image identifier + :param location: The opaque image location + :param user_context: The user's request context + + :retval A boolean value to indicate success or not + """ + if user_context is not None: + registry_client = registry.get_registry_client(user_context) + else: + registry_client = self.registry + + with lockutils.lock("scrubber-%s" % image_id, + lock_file_prefix='daisy-', external=True): + + # NOTE(zhiyan): make sure scrubber does not cleanup + # 'pending_delete' images concurrently before the code + # get lock and reach here. + try: + image = registry_client.get_image(image_id) + if image['status'] == 'deleted': + return True + except exception.NotFound as e: + LOG.warn(_LW("Failed to find image to delete: %s"), + utils.exception_to_str(e)) + return False + + loc_id = location.get('id', '-') + if self.metadata_encryption_key: + uri = crypt.urlsafe_encrypt(self.metadata_encryption_key, + location['url'], 64) + else: + uri = location['url'] + delete_time = time.time() + self.scrub_time + file_path = os.path.join(self.scrubber_datadir, str(image_id)) + + if os.path.exists(file_path): + # Append the uri of location to the queue file + with open(file_path, 'a') as f: + f.write('\n') + f.write('\n'.join([str(loc_id), + uri, + str(int(delete_time))])) + else: + # NOTE(zhiyan): Protect the file before we write any data. + open(file_path, 'w').close() + os.chmod(file_path, 0o600) + with open(file_path, 'w') as f: + f.write('\n'.join([str(loc_id), + uri, + str(int(delete_time))])) + os.utime(file_path, (delete_time, delete_time)) + + return True + + def _walk_all_locations(self, remove=False): + """Returns a list of image id and location tuple from scrub queue. + + :param remove: Whether remove location from queue or not after walk + + :retval a list of image id, location id and uri tuple from scrub queue + """ + if not os.path.exists(self.scrubber_datadir): + LOG.warn(_LW("%s directory does not exist.") % + self.scrubber_datadir) + return [] + + ret = [] + for root, dirs, files in os.walk(self.scrubber_datadir): + for image_id in files: + if not utils.is_uuid_like(image_id): + continue + with lockutils.lock("scrubber-%s" % image_id, + lock_file_prefix='daisy-', external=True): + file_path = os.path.join(self.scrubber_datadir, image_id) + records = self._read_queue_file(file_path) + loc_ids, uris, delete_times = records + + remove_record_idxs = [] + skipped = False + for (record_idx, delete_time) in enumerate(delete_times): + if delete_time > time.time(): + skipped = True + continue + else: + ret.append((image_id, + loc_ids[record_idx], + uris[record_idx])) + remove_record_idxs.append(record_idx) + + if remove: + if skipped: + # NOTE(zhiyan): remove location records from + # the queue file. + self._update_queue_file(file_path, + remove_record_idxs) + else: + utils.safe_remove(file_path) + return ret + + def get_all_locations(self): + """Returns a list of image id and location tuple from scrub queue. + + :retval a list of image id and location tuple from scrub queue + """ + return self._walk_all_locations() + + def pop_all_locations(self): + """Pop out a list of image id and location tuple from scrub queue. + + :retval a list of image id and location tuple from scrub queue + """ + return self._walk_all_locations(remove=True) + + def has_image(self, image_id): + """Returns whether the queue contains an image or not. + + :param image_id: The opaque image identifier + + :retval a boolean value to inform including or not + """ + return os.path.exists(os.path.join(self.scrubber_datadir, + str(image_id))) + + +class ScrubDBQueue(ScrubQueue): + """Database-based image scrub queue class.""" + def __init__(self): + super(ScrubDBQueue, self).__init__() + admin_tenant_name = CONF.admin_tenant_name + admin_token = self.registry.auth_token + self.admin_context = context.RequestContext(user=CONF.admin_user, + tenant=admin_tenant_name, + auth_token=admin_token) + + def add_location(self, image_id, location, user_context=None): + """Adding image location to scrub queue. + + :param image_id: The opaque image identifier + :param location: The opaque image location + :param user_context: The user's request context + + :retval A boolean value to indicate success or not + """ + loc_id = location.get('id') + if loc_id: + db_api.get_api().image_location_delete(self.admin_context, + image_id, loc_id, + 'pending_delete') + return True + else: + return False + + def _get_images_page(self, marker): + filters = {'deleted': True, + 'is_public': 'none', + 'status': 'pending_delete'} + + if marker: + return self.registry.get_images_detailed(filters=filters, + marker=marker) + else: + return self.registry.get_images_detailed(filters=filters) + + def _get_all_images(self): + """Generator to fetch all appropriate images, paging as needed.""" + + marker = None + while True: + images = self._get_images_page(marker) + if len(images) == 0: + break + marker = images[-1]['id'] + + for image in images: + yield image + + def _walk_all_locations(self, remove=False): + """Returns a list of image id and location tuple from scrub queue. + + :param remove: Whether remove location from queue or not after walk + + :retval a list of image id, location id and uri tuple from scrub queue + """ + ret = [] + + for image in self._get_all_images(): + deleted_at = image.get('deleted_at') + if not deleted_at: + continue + + # NOTE: Strip off microseconds which may occur after the last '.,' + # Example: 2012-07-07T19:14:34.974216 + date_str = deleted_at.rsplit('.', 1)[0].rsplit(',', 1)[0] + delete_time = calendar.timegm(time.strptime(date_str, + "%Y-%m-%dT%H:%M:%S")) + + if delete_time + self.scrub_time > time.time(): + continue + + for loc in image['location_data']: + if loc['status'] != 'pending_delete': + continue + + if self.metadata_encryption_key: + uri = crypt.urlsafe_encrypt(self.metadata_encryption_key, + loc['url'], 64) + else: + uri = loc['url'] + + ret.append((image['id'], loc['id'], uri)) + + if remove: + db_api.get_api().image_location_delete(self.admin_context, + image['id'], + loc['id'], + 'deleted') + self.registry.update_image(image['id'], + {'status': 'deleted'}) + return ret + + def get_all_locations(self): + """Returns a list of image id and location tuple from scrub queue. + + :retval a list of image id and location tuple from scrub queue + """ + return self._walk_all_locations() + + def pop_all_locations(self): + """Pop out a list of image id and location tuple from scrub queue. + + :retval a list of image id and location tuple from scrub queue + """ + return self._walk_all_locations(remove=True) + + def has_image(self, image_id): + """Returns whether the queue contains an image or not. + + :param image_id: The opaque image identifier + + :retval a boolean value to inform including or not + """ + try: + image = self.registry.get_image(image_id) + return image['status'] == 'pending_delete' + except exception.NotFound: + return False + + +_file_queue = None +_db_queue = None + + +def get_scrub_queues(): + global _file_queue, _db_queue + if not _file_queue: + _file_queue = ScrubFileQueue() + if not _db_queue: + _db_queue = ScrubDBQueue() + return (_file_queue, _db_queue) + + +class Daemon(object): + def __init__(self, wakeup_time=300, threads=1000): + LOG.info(_LI("Starting Daemon: wakeup_time=%(wakeup_time)s " + "threads=%(threads)s"), + {'wakeup_time': wakeup_time, 'threads': threads}) + self.wakeup_time = wakeup_time + self.event = eventlet.event.Event() + self.pool = eventlet.greenpool.GreenPool(threads) + + def start(self, application): + self._run(application) + + def wait(self): + try: + self.event.wait() + except KeyboardInterrupt: + msg = _LI("Daemon Shutdown on KeyboardInterrupt") + LOG.info(msg) + + def _run(self, application): + LOG.debug("Running application") + self.pool.spawn_n(application.run, self.pool, self.event) + eventlet.spawn_after(self.wakeup_time, self._run, application) + LOG.debug("Next run scheduled in %s seconds" % self.wakeup_time) + + +class Scrubber(object): + def __init__(self, store_api): + LOG.info(_LI("Initializing scrubber with configuration: %s") % + six.text_type({'scrubber_datadir': CONF.scrubber_datadir, + 'cleanup': CONF.cleanup_scrubber, + 'cleanup_time': CONF.cleanup_scrubber_time, + 'registry_host': CONF.registry_host, + 'registry_port': CONF.registry_port})) + + utils.safe_mkdirs(CONF.scrubber_datadir) + + self.store_api = store_api + + registry.configure_registry_client() + registry.configure_registry_admin_creds() + self.registry = registry.get_registry_client(context.RequestContext()) + + # Here we create a request context with credentials to support + # delayed delete when using multi-tenant backend storage + admin_tenant = CONF.admin_tenant_name + auth_token = self.registry.auth_token + self.admin_context = context.RequestContext(user=CONF.admin_user, + tenant=admin_tenant, + auth_token=auth_token) + + (self.file_queue, self.db_queue) = get_scrub_queues() + + def _get_delete_jobs(self, queue, pop): + try: + if pop: + records = queue.pop_all_locations() + else: + records = queue.get_all_locations() + except Exception as err: + LOG.error(_LE("Can not %(op)s scrub jobs from queue: %(err)s") % + {'op': 'pop' if pop else 'get', + 'err': utils.exception_to_str(err)}) + return {} + + delete_jobs = {} + for image_id, loc_id, loc_uri in records: + if image_id not in delete_jobs: + delete_jobs[image_id] = [] + delete_jobs[image_id].append((image_id, loc_id, loc_uri)) + return delete_jobs + + def _merge_delete_jobs(self, file_jobs, db_jobs): + ret = {} + for image_id, file_job_items in file_jobs.iteritems(): + ret[image_id] = file_job_items + db_job_items = db_jobs.get(image_id, []) + for db_item in db_job_items: + if db_item not in file_job_items: + ret[image_id].append(db_item) + for image_id, db_job_items in db_jobs.iteritems(): + if image_id not in ret: + ret[image_id] = db_job_items + return ret + + def run(self, pool, event=None): + file_jobs = self._get_delete_jobs(self.file_queue, True) + db_jobs = self._get_delete_jobs(self.db_queue, False) + delete_jobs = self._merge_delete_jobs(file_jobs, db_jobs) + + if delete_jobs: + for image_id, jobs in six.iteritems(delete_jobs): + self._scrub_image(pool, image_id, jobs) + + if CONF.cleanup_scrubber: + self._cleanup(pool) + + def _scrub_image(self, pool, image_id, delete_jobs): + if len(delete_jobs) == 0: + return + + LOG.info(_LI("Scrubbing image %(id)s from %(count)d locations.") % + {'id': image_id, 'count': len(delete_jobs)}) + # NOTE(bourke): The starmap must be iterated to do work + list(pool.starmap(self._delete_image_location_from_backend, + delete_jobs)) + + image = self.registry.get_image(image_id) + if (image['status'] == 'pending_delete' and + not self.file_queue.has_image(image_id)): + self.registry.update_image(image_id, {'status': 'deleted'}) + + def _delete_image_location_from_backend(self, image_id, loc_id, uri): + if CONF.metadata_encryption_key: + uri = crypt.urlsafe_decrypt(CONF.metadata_encryption_key, uri) + + try: + LOG.debug("Deleting URI from image %s." % image_id) + self.store_api.delete_from_backend(uri, self.admin_context) + if loc_id != '-': + db_api.get_api().image_location_delete(self.admin_context, + image_id, + int(loc_id), + 'deleted') + LOG.info(_LI("Image %s has been deleted.") % image_id) + except Exception: + LOG.warn(_LW("Unable to delete URI from image %s.") % image_id) + + def _read_cleanup_file(self, file_path): + """Reading cleanup to get latest cleanup timestamp. + + :param file_path: Cleanup status file full path + + :retval latest cleanup timestamp + """ + try: + if not os.path.exists(file_path): + msg = _("%s file is not exists.") % six.text_type(file_path) + raise Exception(msg) + atime = int(os.path.getatime(file_path)) + mtime = int(os.path.getmtime(file_path)) + if atime != mtime: + msg = _("%s file contains conflicting cleanup " + "timestamp.") % six.text_type(file_path) + raise Exception(msg) + return atime + except Exception as e: + LOG.error(utils.exception_to_str(e)) + return None + + def _update_cleanup_file(self, file_path, cleanup_time): + """Update latest cleanup timestamp to cleanup file. + + :param file_path: Cleanup status file full path + :param cleanup_time: The Latest cleanup timestamp + """ + try: + open(file_path, 'w').close() + os.chmod(file_path, 0o600) + os.utime(file_path, (cleanup_time, cleanup_time)) + except Exception: + LOG.error(_LE("%s file can not be created.") % + six.text_type(file_path)) + + def _cleanup(self, pool): + now = time.time() + cleanup_file = os.path.join(CONF.scrubber_datadir, ".cleanup") + if not os.path.exists(cleanup_file): + self._update_cleanup_file(cleanup_file, now) + return + + last_cleanup_time = self._read_cleanup_file(cleanup_file) + cleanup_time = last_cleanup_time + CONF.cleanup_scrubber_time + if cleanup_time > now: + return + + LOG.info(_LI("Getting images deleted before %s") % + CONF.cleanup_scrubber_time) + self._update_cleanup_file(cleanup_file, now) + + delete_jobs = self._get_delete_jobs(self.db_queue, False) + if not delete_jobs: + return + + for image_id, jobs in six.iteritems(delete_jobs): + with lockutils.lock("scrubber-%s" % image_id, + lock_file_prefix='daisy-', external=True): + if not self.file_queue.has_image(image_id): + # NOTE(zhiyan): scrubber should not cleanup this image + # since a queue file be created for this 'pending_delete' + # image concurrently before the code get lock and + # reach here. The checking only be worth if daisy-api and + # daisy-scrubber service be deployed on a same host. + self._scrub_image(pool, image_id, jobs) diff --git a/code/daisy/daisy/search/__init__.py b/code/daisy/daisy/search/__init__.py new file mode 100755 index 00000000..c9762da6 --- /dev/null +++ b/code/daisy/daisy/search/__init__.py @@ -0,0 +1,77 @@ +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import elasticsearch +from elasticsearch import helpers +from oslo_config import cfg + +from daisy.common import utils + + +search_opts = [ + cfg.ListOpt('hosts', default=['127.0.0.1:9200'], + help='List of nodes where Elasticsearch instances are ' + 'running. A single node should be defined as an IP ' + 'address and port number.'), +] + +CONF = cfg.CONF +CONF.register_opts(search_opts, group='elasticsearch') + + +def get_api(): + es_hosts = CONF.elasticsearch.hosts + es_api = elasticsearch.Elasticsearch(hosts=es_hosts) + return es_api + + +class CatalogSearchRepo(object): + + def __init__(self, context, es_api): + self.context = context + self.es_api = es_api + self.plugins = utils.get_search_plugins() or [] + self.plugins_info_dict = self._get_plugin_info() + + def search(self, index, doc_type, query, fields, offset, limit, + ignore_unavailable=True): + return self.es_api.search( + index=index, + doc_type=doc_type, + body=query, + _source_include=fields, + from_=offset, + size=limit, + ignore_unavailable=ignore_unavailable) + + def index(self, default_index, default_type, actions): + return helpers.bulk( + client=self.es_api, + index=default_index, + doc_type=default_type, + actions=actions) + + def plugins_info(self): + return self.plugins_info_dict + + def _get_plugin_info(self): + plugin_info = dict() + plugin_info['plugins'] = [] + for plugin in self.plugins: + info = dict() + info['type'] = plugin.obj.get_document_type() + info['index'] = plugin.obj.get_index_name() + plugin_info['plugins'].append(info) + return plugin_info diff --git a/code/daisy/daisy/search/api/__init__.py b/code/daisy/daisy/search/api/__init__.py new file mode 100755 index 00000000..03d2e36b --- /dev/null +++ b/code/daisy/daisy/search/api/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import paste.urlmap + + +def root_app_factory(loader, global_conf, **local_conf): + return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) diff --git a/code/daisy/daisy/search/api/v0_1/__init__.py b/code/daisy/daisy/search/api/v0_1/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/search/api/v0_1/router.py b/code/daisy/daisy/search/api/v0_1/router.py new file mode 100755 index 00000000..89406800 --- /dev/null +++ b/code/daisy/daisy/search/api/v0_1/router.py @@ -0,0 +1,66 @@ +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from daisy.common import wsgi +from daisy.search.api.v0_1 import search + + +class API(wsgi.Router): + + """WSGI router for Glance Catalog Search v0_1 API requests.""" + + def __init__(self, mapper): + + reject_method_resource = wsgi.Resource(wsgi.RejectMethodController()) + + search_catalog_resource = search.create_resource() + mapper.connect('/search', + controller=search_catalog_resource, + action='search', + conditions={'method': ['GET']}) + mapper.connect('/search', + controller=search_catalog_resource, + action='search', + conditions={'method': ['POST']}) + mapper.connect('/search', + controller=reject_method_resource, + action='reject', + allowed_methods='GET, POST', + conditions={'method': ['PUT', 'DELETE', + 'PATCH', 'HEAD']}) + + mapper.connect('/search/plugins', + controller=search_catalog_resource, + action='plugins_info', + conditions={'method': ['GET']}) + mapper.connect('/search/plugins', + controller=reject_method_resource, + action='reject', + allowed_methods='GET', + conditions={'method': ['POST', 'PUT', 'DELETE', + 'PATCH', 'HEAD']}) + + mapper.connect('/index', + controller=search_catalog_resource, + action='index', + conditions={'method': ['POST']}) + mapper.connect('/index', + controller=reject_method_resource, + action='reject', + allowed_methods='POST', + conditions={'method': ['GET', 'PUT', 'DELETE', + 'PATCH', 'HEAD']}) + + super(API, self).__init__(mapper) diff --git a/code/daisy/daisy/search/api/v0_1/search.py b/code/daisy/daisy/search/api/v0_1/search.py new file mode 100755 index 00000000..78ff42f8 --- /dev/null +++ b/code/daisy/daisy/search/api/v0_1/search.py @@ -0,0 +1,382 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +from oslo.config import cfg +from oslo_log import log as logging +import six +import webob.exc + +from daisy.api import policy +from daisy.common import exception +from daisy.common import utils +from daisy.common import wsgi +import daisy.db +import daisy.gateway +from daisy import i18n +import daisy.notifier +import daisy.schema + +LOG = logging.getLogger(__name__) +_ = i18n._ +_LE = i18n._LE + +CONF = cfg.CONF + + +class SearchController(object): + def __init__(self, plugins=None, es_api=None, policy_enforcer=None): + self.es_api = es_api or daisy.search.get_api() + self.policy = policy_enforcer or policy.Enforcer() + self.gateway = daisy.gateway.Gateway( + es_api=self.es_api, + policy_enforcer=self.policy) + self.plugins = plugins or [] + + def search(self, req, query, index, doc_type=None, fields=None, offset=0, + limit=10): + if fields is None: + fields = [] + + try: + search_repo = self.gateway.get_catalog_search_repo(req.context) + result = search_repo.search(index, + doc_type, + query, + fields, + offset, + limit, + True) + + for plugin in self.plugins: + result = plugin.obj.filter_result(result, req.context) + + return result + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Duplicate as e: + raise webob.exc.HTTPConflict(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + + def plugins_info(self, req): + try: + search_repo = self.gateway.get_catalog_search_repo(req.context) + return search_repo.plugins_info() + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + + def index(self, req, actions, default_index=None, default_type=None): + try: + search_repo = self.gateway.get_catalog_search_repo(req.context) + success, errors = search_repo.index( + default_index, + default_type, + actions) + return { + 'success': success, + 'failed': len(errors), + 'errors': errors, + } + + except exception.Forbidden as e: + raise webob.exc.HTTPForbidden(explanation=e.msg) + except exception.NotFound as e: + raise webob.exc.HTTPNotFound(explanation=e.msg) + except exception.Duplicate as e: + raise webob.exc.HTTPConflict(explanation=e.msg) + except Exception as e: + LOG.error(utils.exception_to_str(e)) + raise webob.exc.HTTPInternalServerError() + + +class RequestDeserializer(wsgi.JSONRequestDeserializer): + _disallowed_properties = ['self', 'schema'] + + def __init__(self, plugins, schema=None): + super(RequestDeserializer, self).__init__() + self.plugins = plugins + + def _get_request_body(self, request): + output = super(RequestDeserializer, self).default(request) + if 'body' not in output: + msg = _('Body expected in request.') + raise webob.exc.HTTPBadRequest(explanation=msg) + return output['body'] + + @classmethod + def _check_allowed(cls, query): + for key in cls._disallowed_properties: + if key in query: + msg = _("Attribute '%s' is read-only.") % key + raise webob.exc.HTTPForbidden(explanation=msg) + + def _get_available_indices(self): + return list(set([p.obj.get_index_name() for p in self.plugins])) + + def _get_available_types(self): + return list(set([p.obj.get_document_type() for p in self.plugins])) + + def _validate_index(self, index): + available_indices = self._get_available_indices() + + if index not in available_indices: + msg = _("Index '%s' is not supported.") % index + raise webob.exc.HTTPBadRequest(explanation=msg) + + return index + + def _validate_doc_type(self, doc_type): + available_types = self._get_available_types() + + if doc_type not in available_types: + msg = _("Document type '%s' is not supported.") % doc_type + raise webob.exc.HTTPBadRequest(explanation=msg) + + return doc_type + + def _validate_offset(self, offset): + try: + offset = int(offset) + except ValueError: + msg = _("offset param must be an integer") + raise webob.exc.HTTPBadRequest(explanation=msg) + + if offset < 0: + msg = _("offset param must be positive") + raise webob.exc.HTTPBadRequest(explanation=msg) + + return offset + + def _validate_limit(self, limit): + try: + limit = int(limit) + except ValueError: + msg = _("limit param must be an integer") + raise webob.exc.HTTPBadRequest(explanation=msg) + + if limit < 1: + msg = _("limit param must be positive") + raise webob.exc.HTTPBadRequest(explanation=msg) + + return limit + + def _validate_actions(self, actions): + if not actions: + msg = _("actions param cannot be empty") + raise webob.exc.HTTPBadRequest(explanation=msg) + + output = [] + allowed_action_types = ['create', 'update', 'delete', 'index'] + for action in actions: + action_type = action.get('action', 'index') + document_id = action.get('id') + document_type = action.get('type') + index_name = action.get('index') + data = action.get('data', {}) + script = action.get('script') + + if index_name is not None: + index_name = self._validate_index(index_name) + + if document_type is not None: + document_type = self._validate_doc_type(document_type) + + if action_type not in allowed_action_types: + msg = _("Invalid action type: '%s'") % action_type + raise webob.exc.HTTPBadRequest(explanation=msg) + elif (action_type in ['create', 'update', 'index'] and + not any([data, script])): + msg = (_("Action type '%s' requires data or script param.") % + action_type) + raise webob.exc.HTTPBadRequest(explanation=msg) + elif action_type in ['update', 'delete'] and not document_id: + msg = (_("Action type '%s' requires ID of the document.") % + action_type) + raise webob.exc.HTTPBadRequest(explanation=msg) + + bulk_action = { + '_op_type': action_type, + '_id': document_id, + '_index': index_name, + '_type': document_type, + } + + if script: + data_field = 'params' + bulk_action['script'] = script + elif action_type == 'update': + data_field = 'doc' + else: + data_field = '_source' + + bulk_action[data_field] = data + + output.append(bulk_action) + return output + + def _get_query(self, context, query, doc_types): + is_admin = context.is_admin + if is_admin: + query_params = { + 'query': { + 'query': query + } + } + else: + filtered_query_list = [] + for plugin in self.plugins: + try: + doc_type = plugin.obj.get_document_type() + rbac_filter = plugin.obj.get_rbac_filter(context) + except Exception as e: + LOG.error(_LE("Failed to retrieve RBAC filters " + "from search plugin " + "%(ext)s: %(e)s") % + {'ext': plugin.name, 'e': e}) + + if doc_type in doc_types: + filter_query = { + "query": query, + "filter": rbac_filter + } + filtered_query = { + 'filtered': filter_query + } + filtered_query_list.append(filtered_query) + + query_params = { + 'query': { + 'query': { + "bool": { + "should": filtered_query_list + }, + } + } + } + + return query_params + + def search(self, request): + body = self._get_request_body(request) + self._check_allowed(body) + query = body.pop('query', None) + indices = body.pop('index', None) + doc_types = body.pop('type', None) + fields = body.pop('fields', None) + offset = body.pop('offset', None) + limit = body.pop('limit', None) + highlight = body.pop('highlight', None) + + if not indices: + indices = self._get_available_indices() + elif not isinstance(indices, (list, tuple)): + indices = [indices] + + if not doc_types: + doc_types = self._get_available_types() + elif not isinstance(doc_types, (list, tuple)): + doc_types = [doc_types] + + query_params = self._get_query(request.context, query, doc_types) + query_params['index'] = [self._validate_index(index) + for index in indices] + query_params['doc_type'] = [self._validate_doc_type(doc_type) + for doc_type in doc_types] + + if fields is not None: + query_params['fields'] = fields + + if offset is not None: + query_params['offset'] = self._validate_offset(offset) + + if limit is not None: + query_params['limit'] = self._validate_limit(limit) + + if highlight is not None: + query_params['query']['highlight'] = highlight + + return query_params + + def index(self, request): + body = self._get_request_body(request) + self._check_allowed(body) + + default_index = body.pop('default_index', None) + if default_index is not None: + default_index = self._validate_index(default_index) + + default_type = body.pop('default_type', None) + if default_type is not None: + default_type = self._validate_doc_type(default_type) + + actions = self._validate_actions(body.pop('actions', None)) + if not all([default_index, default_type]): + for action in actions: + if not any([action['_index'], default_index]): + msg = (_("Action index is missing and no default " + "index has been set.")) + raise webob.exc.HTTPBadRequest(explanation=msg) + + if not any([action['_type'], default_type]): + msg = (_("Action document type is missing and no default " + "type has been set.")) + raise webob.exc.HTTPBadRequest(explanation=msg) + + query_params = { + 'default_index': default_index, + 'default_type': default_type, + 'actions': actions, + } + return query_params + + +class ResponseSerializer(wsgi.JSONResponseSerializer): + def __init__(self, schema=None): + super(ResponseSerializer, self).__init__() + self.schema = schema + + def search(self, response, query_result): + body = json.dumps(query_result, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + def plugins_info(self, response, query_result): + body = json.dumps(query_result, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + def index(self, response, query_result): + body = json.dumps(query_result, ensure_ascii=False) + response.unicode_body = six.text_type(body) + response.content_type = 'application/json' + + +def create_resource(): + """Search resource factory method""" + plugins = utils.get_search_plugins() + deserializer = RequestDeserializer(plugins) + serializer = ResponseSerializer() + controller = SearchController(plugins) + return wsgi.Resource(controller, deserializer, serializer) diff --git a/code/daisy/daisy/search/plugins/__init__.py b/code/daisy/daisy/search/plugins/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/daisy/search/plugins/base.py b/code/daisy/daisy/search/plugins/base.py new file mode 100755 index 00000000..8d4b79e0 --- /dev/null +++ b/code/daisy/daisy/search/plugins/base.py @@ -0,0 +1,140 @@ +# Copyright 2015 Intel Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from elasticsearch import helpers +import six + +import daisy.search + + +@six.add_metaclass(abc.ABCMeta) +class IndexBase(object): + chunk_size = 200 + + def __init__(self): + self.engine = daisy.search.get_api() + self.index_name = self.get_index_name() + self.document_type = self.get_document_type() + + def setup(self): + """Comprehensively install search engine index and put data into it.""" + self.setup_index() + self.setup_mapping() + self.setup_data() + + def setup_index(self): + """Create the index if it doesn't exist and update its settings.""" + index_exists = self.engine.indices.exists(self.index_name) + if not index_exists: + self.engine.indices.create(index=self.index_name) + + index_settings = self.get_settings() + if index_settings: + self.engine.indices.put_settings(index=self.index_name, + body=index_settings) + + return index_exists + + def setup_mapping(self): + """Update index document mapping.""" + index_mapping = self.get_mapping() + + if index_mapping: + self.engine.indices.put_mapping(index=self.index_name, + doc_type=self.document_type, + body=index_mapping) + + def setup_data(self): + """Insert all objects from database into search engine.""" + object_list = self.get_objects() + documents = [] + for obj in object_list: + document = self.serialize(obj) + documents.append(document) + + self.save_documents(documents) + + def save_documents(self, documents, id_field='id'): + """Send list of serialized documents into search engine.""" + actions = [] + for document in documents: + action = { + '_id': document.get(id_field), + '_source': document, + } + + actions.append(action) + + helpers.bulk( + client=self.engine, + index=self.index_name, + doc_type=self.document_type, + chunk_size=self.chunk_size, + actions=actions) + + @abc.abstractmethod + def get_objects(self): + """Get list of all objects which will be indexed into search engine.""" + + @abc.abstractmethod + def serialize(self, obj): + """Serialize database object into valid search engine document.""" + + @abc.abstractmethod + def get_index_name(self): + """Get name of the index.""" + + @abc.abstractmethod + def get_document_type(self): + """Get name of the document type.""" + + @abc.abstractmethod + def get_rbac_filter(self, request_context): + """Get rbac filter as es json filter dsl.""" + + def filter_result(self, result, request_context): + """Filter the outgoing search result.""" + return result + + def get_settings(self): + """Get an index settings.""" + return {} + + def get_mapping(self): + """Get an index mapping.""" + return {} + + def get_notification_handler(self): + """Get the notification handler which implements NotificationBase.""" + return None + + def get_notification_supported_events(self): + """Get the list of suppported event types.""" + return [] + + +@six.add_metaclass(abc.ABCMeta) +class NotificationBase(object): + + def __init__(self, engine, index_name, document_type): + self.engine = engine + self.index_name = index_name + self.document_type = document_type + + @abc.abstractmethod + def process(self, ctxt, publisher_id, event_type, payload, metadata): + """Process the incoming notification message.""" diff --git a/code/daisy/daisy/search/plugins/images.py b/code/daisy/daisy/search/plugins/images.py new file mode 100755 index 00000000..aab7cdd6 --- /dev/null +++ b/code/daisy/daisy/search/plugins/images.py @@ -0,0 +1,163 @@ +# Copyright 2015 Intel Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy.orm import joinedload + +from oslo_utils import timeutils + +from daisy.api import policy +from daisy.common import property_utils +import daisy.db +from daisy.db.sqlalchemy import models +from daisy.search.plugins import base +from daisy.search.plugins import images_notification_handler + + +class ImageIndex(base.IndexBase): + def __init__(self, db_api=None, policy_enforcer=None): + super(ImageIndex, self).__init__() + self.db_api = db_api or daisy.db.get_api() + self.policy = policy_enforcer or policy.Enforcer() + if property_utils.is_property_protection_enabled(): + self.property_rules = property_utils.PropertyRules(self.policy) + self._image_base_properties = [ + 'checksum', 'created_at', 'container_format', 'disk_format', 'id', + 'min_disk', 'min_ram', 'name', 'size', 'virtual_size', 'status', + 'tags', 'updated_at', 'visibility', 'protected', 'owner', + 'members'] + + def get_index_name(self): + return 'glance' + + def get_document_type(self): + return 'image' + + def get_mapping(self): + return { + 'dynamic': True, + 'properties': { + 'id': {'type': 'string', 'index': 'not_analyzed'}, + 'name': {'type': 'string'}, + 'description': {'type': 'string'}, + 'tags': {'type': 'string'}, + 'disk_format': {'type': 'string'}, + 'container_format': {'type': 'string'}, + 'size': {'type': 'long'}, + 'virtual_size': {'type': 'long'}, + 'status': {'type': 'string'}, + 'visibility': {'type': 'string'}, + 'checksum': {'type': 'string'}, + 'min_disk': {'type': 'long'}, + 'min_ram': {'type': 'long'}, + 'owner': {'type': 'string', 'index': 'not_analyzed'}, + 'protected': {'type': 'boolean'}, + 'members': {'type': 'string', 'index': 'not_analyzed'}, + "created_at": {'type': 'date'}, + "updated_at": {'type': 'date'} + }, + } + + def get_rbac_filter(self, request_context): + return [ + { + "and": [ + { + 'or': [ + { + 'term': { + 'owner': request_context.owner + } + }, + { + 'term': { + 'visibility': 'public' + } + }, + { + 'term': { + 'members': request_context.tenant + } + } + ] + }, + { + 'type': { + 'value': self.get_document_type() + } + } + ] + } + ] + + def filter_result(self, result, request_context): + if property_utils.is_property_protection_enabled(): + hits = result['hits']['hits'] + for hit in hits: + if hit['_type'] == self.get_document_type(): + source = hit['_source'] + for key in source.keys(): + if key not in self._image_base_properties: + if not self.property_rules.check_property_rules( + key, 'read', request_context): + del hit['_source'][key] + return result + + def get_objects(self): + session = self.db_api.get_session() + images = session.query(models.Image).options( + joinedload('properties'), joinedload('members'), joinedload('tags') + ).filter_by(deleted=False) + return images + + def serialize(self, obj): + visibility = 'public' if obj.is_public else 'private' + members = [] + for member in obj.members: + if member.status == 'accepted' and member.deleted == 0: + members.append(member.member) + + document = { + 'id': obj.id, + 'name': obj.name, + 'tags': obj.tags, + 'disk_format': obj.disk_format, + 'container_format': obj.container_format, + 'size': obj.size, + 'virtual_size': obj.virtual_size, + 'status': obj.status, + 'visibility': visibility, + 'checksum': obj.checksum, + 'min_disk': obj.min_disk, + 'min_ram': obj.min_ram, + 'owner': obj.owner, + 'protected': obj.protected, + 'members': members, + 'created_at': timeutils.isotime(obj.created_at), + 'updated_at': timeutils.isotime(obj.updated_at) + } + for image_property in obj.properties: + document[image_property.name] = image_property.value + + return document + + def get_notification_handler(self): + return images_notification_handler.ImageHandler( + self.engine, + self.get_index_name(), + self.get_document_type() + ) + + def get_notification_supported_events(self): + return ['image.create', 'image.update', 'image.delete'] diff --git a/code/daisy/daisy/search/plugins/images_notification_handler.py b/code/daisy/daisy/search/plugins/images_notification_handler.py new file mode 100755 index 00000000..64cedeb4 --- /dev/null +++ b/code/daisy/daisy/search/plugins/images_notification_handler.py @@ -0,0 +1,83 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_log import log as logging +import oslo_messaging + +from daisy.common import utils +from daisy.search.plugins import base + +LOG = logging.getLogger(__name__) + + +class ImageHandler(base.NotificationBase): + + def __init__(self, *args, **kwargs): + super(ImageHandler, self).__init__(*args, **kwargs) + self.image_delete_keys = ['deleted_at', 'deleted', + 'is_public', 'properties'] + + def process(self, ctxt, publisher_id, event_type, payload, metadata): + try: + actions = { + "image.create": self.create, + "image.update": self.update, + "image.delete": self.delete + } + actions[event_type](payload) + return oslo_messaging.NotificationResult.HANDLED + except Exception as e: + LOG.error(utils.exception_to_str(e)) + + def create(self, payload): + id = payload['id'] + payload = self.format_image(payload) + self.engine.create( + index=self.index_name, + doc_type=self.document_type, + body=payload, + id=id + ) + + def update(self, payload): + id = payload['id'] + payload = self.format_image(payload) + doc = {"doc": payload} + self.engine.update( + index=self.index_name, + doc_type=self.document_type, + body=doc, + id=id + ) + + def delete(self, payload): + id = payload['id'] + self.engine.delete( + index=self.index_name, + doc_type=self.document_type, + id=id + ) + + def format_image(self, payload): + visibility = 'public' if payload['is_public'] else 'private' + payload['visibility'] = visibility + + payload.update(payload.get('properties', '{}')) + + for key in payload.keys(): + if key in self.image_delete_keys: + del payload[key] + + return payload diff --git a/code/daisy/daisy/search/plugins/metadefs.py b/code/daisy/daisy/search/plugins/metadefs.py new file mode 100755 index 00000000..8654a019 --- /dev/null +++ b/code/daisy/daisy/search/plugins/metadefs.py @@ -0,0 +1,259 @@ +# Copyright 2015 Intel Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +import six + +import daisy.db +from daisy.db.sqlalchemy import models_metadef as models +from daisy.search.plugins import base +from daisy.search.plugins import metadefs_notification_handler + + +class MetadefIndex(base.IndexBase): + def __init__(self): + super(MetadefIndex, self).__init__() + + self.db_api = daisy.db.get_api() + + def get_index_name(self): + return 'glance' + + def get_document_type(self): + return 'metadef' + + def get_mapping(self): + property_mapping = { + 'dynamic': True, + 'type': 'nested', + 'properties': { + 'property': {'type': 'string', 'index': 'not_analyzed'}, + 'type': {'type': 'string'}, + 'title': {'type': 'string'}, + 'description': {'type': 'string'}, + } + } + mapping = { + '_id': { + 'path': 'namespace', + }, + 'properties': { + 'display_name': {'type': 'string'}, + 'description': {'type': 'string'}, + 'namespace': {'type': 'string', 'index': 'not_analyzed'}, + 'owner': {'type': 'string', 'index': 'not_analyzed'}, + 'visibility': {'type': 'string', 'index': 'not_analyzed'}, + 'resource_types': { + 'type': 'nested', + 'properties': { + 'name': {'type': 'string'}, + 'prefix': {'type': 'string'}, + 'properties_target': {'type': 'string'}, + }, + }, + 'objects': { + 'type': 'nested', + 'properties': { + 'id': {'type': 'string', 'index': 'not_analyzed'}, + 'name': {'type': 'string'}, + 'description': {'type': 'string'}, + 'properties': property_mapping, + } + }, + 'properties': property_mapping, + 'tags': { + 'type': 'nested', + 'properties': { + 'name': {'type': 'string'}, + } + } + }, + } + return mapping + + def get_rbac_filter(self, request_context): + # TODO(krykowski): Define base get_rbac_filter in IndexBase class + # which will provide some common subset of query pieces. + # Something like: + # def get_common_context_pieces(self, request_context): + # return [{'term': {'owner': request_context.owner, + # 'type': {'value': self.get_document_type()}}] + return [ + { + "and": [ + { + 'or': [ + { + 'term': { + 'owner': request_context.owner + } + }, + { + 'term': { + 'visibility': 'public' + } + } + ] + }, + { + 'type': { + 'value': self.get_document_type() + } + } + ] + } + ] + + def get_objects(self): + session = self.db_api.get_session() + namespaces = session.query(models.MetadefNamespace).all() + + resource_types = session.query(models.MetadefResourceType).all() + resource_types_map = {r.id: r.name for r in resource_types} + + for namespace in namespaces: + namespace.resource_types = self.get_namespace_resource_types( + namespace.id, resource_types_map) + namespace.objects = self.get_namespace_objects(namespace.id) + namespace.properties = self.get_namespace_properties(namespace.id) + namespace.tags = self.get_namespace_tags(namespace.id) + + return namespaces + + def get_namespace_resource_types(self, namespace_id, resource_types): + session = self.db_api.get_session() + namespace_resource_types = session.query( + models.MetadefNamespaceResourceType + ).filter_by(namespace_id=namespace_id) + + resource_associations = [{ + 'prefix': r.prefix, + 'properties_target': r.properties_target, + 'name': resource_types[r.resource_type_id], + } for r in namespace_resource_types] + return resource_associations + + def get_namespace_properties(self, namespace_id): + session = self.db_api.get_session() + properties = session.query( + models.MetadefProperty + ).filter_by(namespace_id=namespace_id) + return list(properties) + + def get_namespace_objects(self, namespace_id): + session = self.db_api.get_session() + namespace_objects = session.query( + models.MetadefObject + ).filter_by(namespace_id=namespace_id) + return list(namespace_objects) + + def get_namespace_tags(self, namespace_id): + session = self.db_api.get_session() + namespace_tags = session.query( + models.MetadefTag + ).filter_by(namespace_id=namespace_id) + return list(namespace_tags) + + def serialize(self, obj): + object_docs = [self.serialize_object(ns_obj) for ns_obj in obj.objects] + property_docs = [self.serialize_property(prop.name, prop.json_schema) + for prop in obj.properties] + resource_type_docs = [self.serialize_namespace_resource_type(rt) + for rt in obj.resource_types] + tag_docs = [self.serialize_tag(tag) for tag in obj.tags] + namespace_doc = self.serialize_namespace(obj) + namespace_doc.update({ + 'objects': object_docs, + 'properties': property_docs, + 'resource_types': resource_type_docs, + 'tags': tag_docs, + }) + return namespace_doc + + def serialize_namespace(self, namespace): + return { + 'namespace': namespace.namespace, + 'display_name': namespace.display_name, + 'description': namespace.description, + 'visibility': namespace.visibility, + 'protected': namespace.protected, + 'owner': namespace.owner, + } + + def serialize_object(self, obj): + obj_properties = obj.json_schema + property_docs = [] + for name, schema in six.iteritems(obj_properties): + property_doc = self.serialize_property(name, schema) + property_docs.append(property_doc) + + document = { + 'name': obj.name, + 'description': obj.description, + 'properties': property_docs, + } + return document + + def serialize_property(self, name, schema): + document = copy.deepcopy(schema) + document['property'] = name + + if 'default' in document: + document['default'] = str(document['default']) + if 'enum' in document: + document['enum'] = map(str, document['enum']) + + return document + + def serialize_namespace_resource_type(self, ns_resource_type): + return { + 'name': ns_resource_type['name'], + 'prefix': ns_resource_type['prefix'], + 'properties_target': ns_resource_type['properties_target'] + } + + def serialize_tag(self, tag): + return { + 'name': tag.name + } + + def get_notification_handler(self): + return metadefs_notification_handler.MetadefHandler( + self.engine, + self.get_index_name(), + self.get_document_type() + ) + + def get_notification_supported_events(self): + return [ + "metadef_namespace.create", + "metadef_namespace.update", + "metadef_namespace.delete", + "metadef_object.create", + "metadef_object.update", + "metadef_object.delete", + "metadef_property.create", + "metadef_property.update", + "metadef_property.delete", + "metadef_tag.create", + "metadef_tag.update", + "metadef_tag.delete", + "metadef_resource_type.create", + "metadef_resource_type.delete", + "metadef_namespace.delete_properties", + "metadef_namespace.delete_objects", + "metadef_namespace.delete_tags" + ] diff --git a/code/daisy/daisy/search/plugins/metadefs_notification_handler.py b/code/daisy/daisy/search/plugins/metadefs_notification_handler.py new file mode 100755 index 00000000..056cb6f0 --- /dev/null +++ b/code/daisy/daisy/search/plugins/metadefs_notification_handler.py @@ -0,0 +1,251 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six + +from oslo_log import log as logging +import oslo_messaging + +from daisy.common import utils +from daisy.search.plugins import base + +LOG = logging.getLogger(__name__) + + +class MetadefHandler(base.NotificationBase): + + def __init__(self, *args, **kwargs): + super(MetadefHandler, self).__init__(*args, **kwargs) + self.namespace_delete_keys = ['deleted_at', 'deleted', 'created_at', + 'updated_at', 'namespace_old'] + self.property_delete_keys = ['deleted', 'deleted_at', + 'name_old', 'namespace', 'name'] + + def process(self, ctxt, publisher_id, event_type, payload, metadata): + try: + actions = { + "metadef_namespace.create": self.create_ns, + "metadef_namespace.update": self.update_ns, + "metadef_namespace.delete": self.delete_ns, + "metadef_object.create": self.create_obj, + "metadef_object.update": self.update_obj, + "metadef_object.delete": self.delete_obj, + "metadef_property.create": self.create_prop, + "metadef_property.update": self.update_prop, + "metadef_property.delete": self.delete_prop, + "metadef_resource_type.create": self.create_rs, + "metadef_resource_type.delete": self.delete_rs, + "metadef_tag.create": self.create_tag, + "metadef_tag.update": self.update_tag, + "metadef_tag.delete": self.delete_tag, + "metadef_namespace.delete_properties": self.delete_props, + "metadef_namespace.delete_objects": self.delete_objects, + "metadef_namespace.delete_tags": self.delete_tags + } + actions[event_type](payload) + return oslo_messaging.NotificationResult.HANDLED + except Exception as e: + LOG.error(utils.exception_to_str(e)) + + def run_create(self, id, payload): + self.engine.create( + index=self.index_name, + doc_type=self.document_type, + body=payload, + id=id + ) + + def run_update(self, id, payload, script=False): + if script: + self.engine.update( + index=self.index_name, + doc_type=self.document_type, + body=payload, + id=id) + else: + doc = {"doc": payload} + self.engine.update( + index=self.index_name, + doc_type=self.document_type, + body=doc, + id=id) + + def run_delete(self, id): + self.engine.delete( + index=self.index_name, + doc_type=self.document_type, + id=id + ) + + def create_ns(self, payload): + id = payload['namespace'] + self.run_create(id, self.format_namespace(payload)) + + def update_ns(self, payload): + id = payload['namespace_old'] + self.run_update(id, self.format_namespace(payload)) + + def delete_ns(self, payload): + id = payload['namespace'] + self.run_delete(id) + + def create_obj(self, payload): + id = payload['namespace'] + object = self.format_object(payload) + self.create_entity(id, "objects", object) + + def update_obj(self, payload): + id = payload['namespace'] + object = self.format_object(payload) + self.update_entity(id, "objects", object, + payload['name_old'], "name") + + def delete_obj(self, payload): + id = payload['namespace'] + self.delete_entity(id, "objects", payload['name'], "name") + + def create_prop(self, payload): + id = payload['namespace'] + property = self.format_property(payload) + self.create_entity(id, "properties", property) + + def update_prop(self, payload): + id = payload['namespace'] + property = self.format_property(payload) + self.update_entity(id, "properties", property, + payload['name_old'], "property") + + def delete_prop(self, payload): + id = payload['namespace'] + self.delete_entity(id, "properties", payload['name'], "property") + + def create_rs(self, payload): + id = payload['namespace'] + resource_type = dict() + resource_type['name'] = payload['name'] + if payload['prefix']: + resource_type['prefix'] = payload['prefix'] + if payload['properties_target']: + resource_type['properties_target'] = payload['properties_target'] + + self.create_entity(id, "resource_types", resource_type) + + def delete_rs(self, payload): + id = payload['namespace'] + self.delete_entity(id, "resource_types", payload['name'], "name") + + def create_tag(self, payload): + id = payload['namespace'] + tag = dict() + tag['name'] = payload['name'] + + self.create_entity(id, "tags", tag) + + def update_tag(self, payload): + id = payload['namespace'] + tag = dict() + tag['name'] = payload['name'] + + self.update_entity(id, "tags", tag, payload['name_old'], "name") + + def delete_tag(self, payload): + id = payload['namespace'] + self.delete_entity(id, "tags", payload['name'], "name") + + def delete_props(self, payload): + self.delete_field(payload, "properties") + + def delete_objects(self, payload): + self.delete_field(payload, "objects") + + def delete_tags(self, payload): + self.delete_field(payload, "tags") + + def create_entity(self, id, entity, entity_data): + script = ("if (ctx._source.containsKey('%(entity)s'))" + "{ctx._source.%(entity)s += entity_item }" + "else {ctx._source.%(entity)s=entity_list};" % + {"entity": entity}) + + params = { + "entity_item": entity_data, + "entity_list": [entity_data] + } + payload = {"script": script, "params": params} + self.run_update(id, payload=payload, script=True) + + def update_entity(self, id, entity, entity_data, entity_id, field_name): + entity_id = entity_id.lower() + script = ("obj=null; for(entity_item :ctx._source.%(entity)s)" + "{if(entity_item['%(field_name)s'].toLowerCase() " + " == entity_id ) obj=entity_item;};" + "if(obj!=null)ctx._source.%(entity)s.remove(obj);" + "if (ctx._source.containsKey('%(entity)s'))" + "{ctx._source.%(entity)s += entity_item; }" + "else {ctx._source.%(entity)s=entity_list;}" % + {"entity": entity, "field_name": field_name}) + params = { + "entity_item": entity_data, + "entity_list": [entity_data], + "entity_id": entity_id + } + payload = {"script": script, "params": params} + self.run_update(id, payload=payload, script=True) + + def delete_entity(self, id, entity, entity_id, field_name): + entity_id = entity_id.lower() + script = ("obj=null; for(entity_item :ctx._source.%(entity)s)" + "{if(entity_item['%(field_name)s'].toLowerCase() " + " == entity_id ) obj=entity_item;};" + "if(obj!=null)ctx._source.%(entity)s.remove(obj);" % + {"entity": entity, "field_name": field_name}) + params = { + "entity_id": entity_id + } + payload = {"script": script, "params": params} + self.run_update(id, payload=payload, script=True) + + def delete_field(self, payload, field): + id = payload['namespace'] + script = ("if (ctx._source.containsKey('%(field)s'))" + "{ctx._source.remove('%(field)s')}") % {"field": field} + payload = {"script": script} + self.run_update(id, payload=payload, script=True) + + def format_namespace(self, payload): + for key in self.namespace_delete_keys: + if key in payload.keys(): + del payload[key] + return payload + + def format_object(self, payload): + formatted_object = dict() + formatted_object['name'] = payload['name'] + formatted_object['description'] = payload['description'] + if payload['required']: + formatted_object['required'] = payload['required'] + formatted_object['properties'] = [] + for property in payload['properties']: + formatted_property = self.format_property(property) + formatted_object['properties'].append(formatted_property) + return formatted_object + + def format_property(self, payload): + prop_data = dict() + prop_data['property'] = payload['name'] + for key, value in six.iteritems(payload): + if key not in self.property_delete_keys and value: + prop_data[key] = value + return prop_data diff --git a/code/daisy/daisy/service.py b/code/daisy/daisy/service.py new file mode 100755 index 00000000..2ca9f372 --- /dev/null +++ b/code/daisy/daisy/service.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python +# +# Copyright 2012-2014 eNovance +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import socket +import sys + +from oslo.config import cfg +from oslo import i18n +import oslo.messaging +from oslo_log import log + +CONF = cfg.CONF + +OPTS = [ + cfg.StrOpt('host', + default=socket.gethostname(), + help='Name of this node, which must be valid in an AMQP ' + 'key. Can be an opaque identifier. For ZeroMQ only, must ' + 'be a valid host name, FQDN, or IP address.'), + cfg.IntOpt('listener_workers', + default=1, + help='Number of workers for notification service. A single ' + 'notification agent is enabled by default.'), + cfg.IntOpt('http_timeout', + default=600, + help='Timeout seconds for HTTP requests. Set it to None to ' + 'disable timeout.'), +] +CONF.register_opts(OPTS) + +CLI_OPTS = [ + cfg.StrOpt('os-username', + deprecated_group="DEFAULT", + default=os.environ.get('OS_USERNAME', 'daisy'), + help='User name to use for OpenStack service access.'), + cfg.StrOpt('os-password', + deprecated_group="DEFAULT", + secret=True, + default=os.environ.get('OS_PASSWORD', 'admin'), + help='Password to use for OpenStack service access.'), + cfg.StrOpt('os-tenant-id', + deprecated_group="DEFAULT", + default=os.environ.get('OS_TENANT_ID', ''), + help='Tenant ID to use for OpenStack service access.'), + cfg.StrOpt('os-tenant-name', + deprecated_group="DEFAULT", + default=os.environ.get('OS_TENANT_NAME', 'admin'), + help='Tenant name to use for OpenStack service access.'), + cfg.StrOpt('os-cacert', + default=os.environ.get('OS_CACERT'), + help='Certificate chain for SSL validation.'), + cfg.StrOpt('os-auth-url', + deprecated_group="DEFAULT", + default=os.environ.get('OS_AUTH_URL', + 'http://localhost:5000/v2.0'), + help='Auth URL to use for OpenStack service access.'), + cfg.StrOpt('os-region-name', + deprecated_group="DEFAULT", + default=os.environ.get('OS_REGION_NAME'), + help='Region name to use for OpenStack service endpoints.'), + cfg.StrOpt('os-endpoint-type', + default=os.environ.get('OS_ENDPOINT_TYPE', 'publicURL'), + help='Type of endpoint in Identity service catalog to use for ' + 'communication with OpenStack services.'), + cfg.BoolOpt('insecure', + default=False, + help='Disables X.509 certificate validation when an ' + 'SSL connection to Identity Service is established.'), +] +CONF.register_cli_opts(CLI_OPTS, group="service_credentials") + +LOG = log.getLogger(__name__) +_DEFAULT_LOG_LEVELS = ['keystonemiddleware=WARN', 'stevedore=WARN'] + + +class WorkerException(Exception): + """Exception for errors relating to service workers.""" + + +def get_workers(name): + return 1 + + +def prepare_service(argv=None): + i18n.enable_lazy() + log.set_defaults(_DEFAULT_LOG_LEVELS) + log.register_options(CONF) + if argv is None: + argv = sys.argv + CONF(argv[1:], project='daisy-search') + log.setup(cfg.CONF, 'daisy-search') + oslo.messaging.set_transport_defaults('daisy') diff --git a/code/daisy/daisy/tests/__init__.py b/code/daisy/daisy/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/code/daisy/daisy/tests/api/__init__.py b/code/daisy/daisy/tests/api/__init__.py new file mode 100644 index 00000000..3ed9fd0f --- /dev/null +++ b/code/daisy/daisy/tests/api/__init__.py @@ -0,0 +1 @@ +__author__ = 'root' diff --git a/code/daisy/daisy/tests/api/tecs.conf b/code/daisy/daisy/tests/api/tecs.conf new file mode 100644 index 00000000..e9eafcfc --- /dev/null +++ b/code/daisy/daisy/tests/api/tecs.conf @@ -0,0 +1,16 @@ +[general] +CLUSTER_ID = + +CONFIG_SERVER1_HOSTS = +CONFIG_SERVER1_HOST = + +CONFIG_SERVER2_HOSTS = +CONFIG_SERVER2_HOST = + +CONFIG_COMPONENT1_INSTALL = + +CONFIG_COMPONENT2_INSTALL = + +CONFIG_SERVER1_INSTALL_MODE = + +CONFIG_SERVER2_INSTALL_MODE = \ No newline at end of file diff --git a/code/daisy/daisy/tests/api/test_config.py b/code/daisy/daisy/tests/api/test_config.py new file mode 100644 index 00000000..8f30f8c2 --- /dev/null +++ b/code/daisy/daisy/tests/api/test_config.py @@ -0,0 +1,80 @@ + +import daisy.api.backends.tecs.config as tecs_config + +import unittest +import os + +compute_role = {"Compute": + {'services': + {'server1': 'component1', 'server2': 'component2'}, + 'host_interfaces': + [{'management': {'ip': '192.168.1.1'}, + 'deployment': {'ip': '192.168.0.1'}, }, + {'management': {'ip': '192.168.1.2'}, + 'deployment': {'ip': '192.168.0.2'}, }, ], + 'vip': '192.168.4.2', }, } + +ha_role = {"CONTROLLER_HA": + {'services': + {'nova-api': 'component3', 'mariadb': 'component4'}, + 'host_interfaces': + [{'management': {'ip': '192.168.1.3', 'netmask': "255.255.255.0", 'name': 'eth0', }, + 'deployment': {'ip': '192.168.0.3'}, + 'storage': {'ip': '192.168.5.3'}, }, + {'management': {'ip': '192.168.1.4', 'netmask': "255.255.255.0", 'name': 'eth0', }, + 'deployment': {'ip': '192.168.0.4'}, + 'storage': {'ip': '192.168.5.3'}, }, ], + 'vip': '192.168.4.4', }, } + +lb_role = {"CONTROLLER_LB": + {'services': + {'nova-api': 'component5', 'mariadb': 'component6'}, + 'host_interfaces': + [{'management': {'ip': '192.168.1.5', 'netmask': "255.255.255.0", 'name': 'eth0', }, + 'deployment': {'ip': '192.168.0.5'}, + 'storage': {'ip': '192.168.5.5'}, }, + {'management': {'ip': '192.168.1.6', 'netmask': "255.255.255.0", 'name': 'eth0', }, + 'deployment': {'ip': '192.168.0.6'}, + 'storage': {'ip': '192.168.5.6'}, }, ], + 'vip': '192.168.4.6', }, } + +def merge_dict(*args): + result = dict() + for a in args: + if isinstance(a, dict): + result.update(a) + return result +mix_roles = merge_dict(compute_role, ha_role, lb_role) + + +class TestTecsConfig(unittest.TestCase): + def setUp(self): + tecs_config.tecs_conf_template_path = os.path.dirname(os.path.realpath(__file__)) + print tecs_config.tecs_conf_template_path + + def tearDown(self): + tecs_config.tecs_conf_template_path = tecs_config.default_tecs_conf_template_path + + def test_config_with_nothing(self): + tecs, ha = tecs_config.update_tecs_conf("ab-11", {}) + self.assertTrue(True) + + def test_config_with_compute_role(self): + tecs,ha = tecs_config.update_tecs_conf("ab-11", compute_role ) + self.assertTrue(True) + print tecs,ha + + def test_config_with_ha_role(self): + tecs, ha = tecs_config.update_tecs_conf("ab-11", ha_role ) + self.assertTrue(True) + + def test_config_with_lb_role(self): + tecs, ha = tecs_config.update_tecs_conf("ab-11", lb_role ) + self.assertTrue(True) + + def test_config_with_all_role(self): + tecs, ha = tecs_config.update_tecs_conf("ab-11", lb_role ) + self.assertTrue(True) + +if __name__ == "__main__": + unittest.main() \ No newline at end of file diff --git a/code/daisy/daisy/version.py b/code/daisy/daisy/version.py new file mode 100755 index 00000000..b6474c36 --- /dev/null +++ b/code/daisy/daisy/version.py @@ -0,0 +1,18 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import pbr.version + +version_info = pbr.version.VersionInfo('daisy') diff --git a/code/daisy/doc/source/architecture.rst b/code/daisy/doc/source/architecture.rst new file mode 100755 index 00000000..5d7af462 --- /dev/null +++ b/code/daisy/doc/source/architecture.rst @@ -0,0 +1,58 @@ +.. + Copyright 2015 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +================== +Basic architecture +================== + +OpenStack Glance has a client-server architecture and provides a user +REST API through which requests to the server are performed. + +Internal server operations are managed by a Glance Domain Controller +divided into layers. Each layer implements its own task. + +All the files operations are performed using glance_store library +which is responsible for interaction with external storage back ends or +local filesystem, and provides a uniform interface to access. + +Glance uses an sql-based central database (Glance DB) that is shared +with all the components in the system. + +.. figure:: /images/architecture.png + :figwidth: 100% + :align: center + :alt: OpenStack Glance Architecture + +.. centered:: Image 1. OpenStack Glance Architecture + +The Glance architecture consists of several components: + +* **A client** 鈥 any application that uses Glance server. + +* **REST API** 鈥 exposes Glance functionality via REST. + +* **Database Abstraction Layer (DAL)** 鈥 an application programming interface + which unifies the communication between Glance and databases. + +* **Glance Domain Controller** 鈥 middleware that implements the main + Glance functionalities: authorization, notifications, policies, + database connections. + +* **Glance Store** 鈥 organizes interactions between Glance and various + data stores. + +* **Registry Layer** 鈥 optional layer organizing secure communication between + the domain and the DAL by using a separate service. diff --git a/code/daisy/doc/source/authentication.rst b/code/daisy/doc/source/authentication.rst new file mode 100755 index 00000000..f7082dd8 --- /dev/null +++ b/code/daisy/doc/source/authentication.rst @@ -0,0 +1,107 @@ +.. + Copyright 2010 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Authentication With Keystone +============================ + +Glance may optionally be integrated with Keystone. Setting this up is +relatively straightforward, as the Keystone distribution includes the +necessary middleware. Once you have installed Keystone +and edited your configuration files, newly created images will have +their `owner` attribute set to the tenant of the authenticated users, +and the `is_public` attribute will cause access to those images for +which it is `false` to be restricted to only the owner, users with +admin context, or tenants/users with whom the image has been shared. + + +Configuring the Glance servers to use Keystone +---------------------------------------------- + +Keystone is integrated with Glance through the use of middleware. The +default configuration files for both the Glance API and the Glance +Registry use a single piece of middleware called ``unauthenticated-context``, +which generates a request context containing blank authentication +information. In order to configure Glance to use Keystone, the +``authtoken`` and ``context`` middlewares must be deployed in place of the +``unauthenticated-context`` middleware. The ``authtoken`` middleware performs +the authentication token validation and retrieves actual user authentication +information. It can be found in the Keystone distribution. + +Configuring Glance API to use Keystone +-------------------------------------- + +Configuring Glance API to use Keystone is relatively straight +forward. The first step is to ensure that declarations for the two +pieces of middleware exist in the ``glance-api-paste.ini``. Here is +an example for ``authtoken``:: + + [filter:authtoken] + paste.filter_factory = keystonemiddleware.auth_token:filter_factory + identity_uri = http://127.0.0.1:35357 + admin_user = glance_admin + admin_tenant_name = service_admins + admin_password = password1234 + +The actual values for these variables will need to be set depending on +your situation. For more information, please refer to the Keystone +documentation on the ``auth_token`` middleware, but in short: + +* The ``identity_uri`` variable points to the Keystone Admin service. + This information is used by the middleware to actually query Keystone about + the validity of the authentication tokens. +* The admin auth credentials (``admin_user``, ``admin_tenant_name``, + ``admin_password``) will be used to retrieve an admin token. That + token will be used to authorize user tokens behind the scenes. + +Finally, to actually enable using Keystone authentication, the +application pipeline must be modified. By default, it looks like:: + + [pipeline:glance-api] + pipeline = versionnegotiation unauthenticated-context apiv1app + +Your particular pipeline may vary depending on other options, such as +the image cache. This must be changed by replacing ``unauthenticated-context`` +with ``authtoken`` and ``context``:: + + [pipeline:glance-api] + pipeline = versionnegotiation authtoken context apiv1app + +Configuring Glance Registry to use Keystone +------------------------------------------- + +Configuring Glance Registry to use Keystone is also relatively +straight forward. The same middleware needs to be added +to ``glance-registry-paste.ini`` as was needed by Glance API; +see above for an example of the ``authtoken`` configuration. + +Again, to enable using Keystone authentication, the appropriate +application pipeline must be selected. By default, it looks like:: + + [pipeline:glance-registry-keystone] + pipeline = authtoken context registryapp + +To enable the above application pipeline, in your main ``glance-registry.conf`` +configuration file, select the appropriate deployment flavor by adding a +``flavor`` attribute in the ``paste_deploy`` group:: + + [paste_deploy] + flavor = keystone + +.. note:: + If your authentication service uses a role other than ``admin`` to identify + which users should be granted admin-level privileges, you must define it + in the ``admin_role`` config attribute in both ``glance-registry.conf`` and + ``glance-api.conf``. diff --git a/code/daisy/doc/source/cache.rst b/code/daisy/doc/source/cache.rst new file mode 100755 index 00000000..8a12fb54 --- /dev/null +++ b/code/daisy/doc/source/cache.rst @@ -0,0 +1,130 @@ +.. + Copyright 2011 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +The Glance Image Cache +====================== + +The Glance API server may be configured to have an optional local image cache. +A local image cache stores a copy of image files, essentially enabling multiple +API servers to serve the same image file, resulting in an increase in +scalability due to an increased number of endpoints serving an image file. + +This local image cache is transparent to the end user -- in other words, the +end user doesn't know that the Glance API is streaming an image file from +its local cache or from the actual backend storage system. + +Managing the Glance Image Cache +------------------------------- + +While image files are automatically placed in the image cache on successful +requests to ``GET /images/``, the image cache is not automatically +managed. Here, we describe the basics of how to manage the local image cache +on Glance API servers and how to automate this cache management. + +Controlling the Growth of the Image Cache +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The image cache has a configurable maximum size (the ``image_cache_max_size`` +configuration file option). The ``image_cache_max_size`` is an upper limit +beyond which pruner, if running, starts cleaning the images cache. +However, when images are successfully returned from a call to +``GET /images/``, the image cache automatically writes the image +file to its cache, regardless of whether the resulting write would make the +image cache's size exceed the value of ``image_cache_max_size``. +In order to keep the image cache at or below this maximum cache size, +you need to run the ``glance-cache-pruner`` executable. + +The recommended practice is to use ``cron`` to fire ``glance-cache-pruner`` +at a regular interval. + +Cleaning the Image Cache +~~~~~~~~~~~~~~~~~~~~~~~~ + +Over time, the image cache can accumulate image files that are either in +a stalled or invalid state. Stalled image files are the result of an image +cache write failing to complete. Invalid image files are the result of an +image file not being written properly to disk. + +To remove these types of files, you run the ``glance-cache-cleaner`` +executable. + +The recommended practice is to use ``cron`` to fire ``glance-cache-cleaner`` +at a semi-regular interval. + +Prefetching Images into the Image Cache +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Some installations have base (sometimes called "golden") images that are +very commonly used to boot virtual machines. When spinning up a new API +server, administrators may wish to prefetch these image files into the +local image cache to ensure that reads of those popular image files come +from a local cache. + +To queue an image for prefetching, you can use one of the following methods: + + * If the ``cache_manage`` middleware is enabled in the application pipeline, + you may call ``PUT /queued-images/`` to queue the image with + identifier ```` + + Alternately, you can use the ``glance-cache-manage`` program to queue the + image. This program may be run from a different host than the host + containing the image cache. Example usage:: + + $> glance-cache-manage --host= queue-image + + This will queue the image with identifier ```` for prefetching + +Once you have queued the images you wish to prefetch, call the +``glance-cache-prefetcher`` executable, which will prefetch all queued images +concurrently, logging the results of the fetch for each image. + +Finding Which Images are in the Image Cache +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can find out which images are in the image cache using one of the +following methods: + + * If the ``cachemanage`` middleware is enabled in the application pipeline, + you may call ``GET /cached-images`` to see a JSON-serialized list of + mappings that show cached images, the number of cache hits on each image, + the size of the image, and the times they were last accessed. + + Alternately, you can use the ``glance-cache-manage`` program. This program + may be run from a different host than the host containing the image cache. + Example usage:: + + $> glance-cache-manage --host= list-cached + + * You can issue the following call on \*nix systems (on the host that contains + the image cache):: + + $> ls -lhR $IMAGE_CACHE_DIR + + where ``$IMAGE_CACHE_DIR`` is the value of the ``image_cache_dir`` + configuration variable. + + Note that the image's cache hit is not shown using this method. + +Manually Removing Images from the Image Cache +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If the ``cachemanage`` middleware is enabled, you may call +``DELETE /cached-images/`` to remove the image file for image +with identifier ```` from the cache. + +Alternately, you can use the ``glance-cache-manage`` program. Example usage:: + + $> glance-cache-manage --host= delete-cached-image diff --git a/code/daisy/doc/source/common-image-properties.rst b/code/daisy/doc/source/common-image-properties.rst new file mode 100755 index 00000000..ec7c8ba6 --- /dev/null +++ b/code/daisy/doc/source/common-image-properties.rst @@ -0,0 +1,60 @@ +.. + Copyright 2013 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Common Image Properties +======================= + +When adding an image to Glance, you may specify some common image properties +that may prove useful to consumers of your image. + +This document explains the names of these properties and the expected values. + +The common image properties are also described in a JSON schema, found in +etc/schema-image.json in the Glance source code. + +**architecture** +---------------- + +Operating system architecture as specified in +http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html + +**instance_uuid** +----------------- + +The ID of the instance used to create this image. + +**kernel_id** +------------- + +The ID of image stored in Glance that should be used as the kernel when booting +an AMI-style image. + +**ramdisk_id** +-------------- + +The ID of image stored in Glance that should be used as the ramdisk when +booting an AMI-style image. + +**os_distro** +------------- + +The common name of the operating system distribution as specified in +http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html + +**os_version** +-------------- + +The operating system version as specified by the distributor. diff --git a/code/daisy/doc/source/conf.py b/code/daisy/doc/source/conf.py new file mode 100755 index 00000000..b42fb729 --- /dev/null +++ b/code/daisy/doc/source/conf.py @@ -0,0 +1,253 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2010 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Glance documentation build configuration file, created by +# sphinx-quickstart on Tue May 18 13:50:15 2010. +# +# This file is execfile()'d with the current directory set to its containing +# dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import os +import sys + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path = [ + os.path.abspath('../..'), + os.path.abspath('../../bin') + ] + sys.path + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['sphinx.ext.coverage', + 'sphinx.ext.ifconfig', + 'sphinx.ext.pngmath', + 'sphinx.ext.graphviz', + 'oslosphinx', + ] + +# Add any paths that contain templates here, relative to this directory. +# templates_path = [] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'Glance' +copyright = u'2010-2014, OpenStack Foundation.' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +from daisy.version import version_info as daisy_version +# The full version, including alpha/beta/rc tags. +release = daisy_version.version_string_with_vcs() +# The short X.Y version. +version = daisy_version.canonical_version_string() + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of documents that shouldn't be included in the build. +#unused_docs = [] + +# List of directories, relative to source directory, that shouldn't be searched +# for source files. +exclude_trees = ['api'] + +# The reST default role (for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +show_authors = True + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +modindex_common_prefix = ['glance.'] + +# -- Options for man page output -------------------------------------------- + +# Grouping the document tree for man pages. +# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' + +man_pages = [ + ('man/glanceapi', 'glance-api', u'Glance API Server', + [u'OpenStack'], 1), + ('man/glancecachecleaner', 'glance-cache-cleaner', u'Glance Cache Cleaner', + [u'OpenStack'], 1), + ('man/glancecachemanage', 'glance-cache-manage', u'Glance Cache Manager', + [u'OpenStack'], 1), + ('man/glancecacheprefetcher', 'glance-cache-prefetcher', + u'Glance Cache Pre-fetcher', [u'OpenStack'], 1), + ('man/glancecachepruner', 'glance-cache-pruner', u'Glance Cache Pruner', + [u'OpenStack'], 1), + ('man/glancecontrol', 'glance-control', u'Glance Daemon Control Helper ', + [u'OpenStack'], 1), + ('man/glancemanage', 'glance-manage', u'Glance Management Utility', + [u'OpenStack'], 1), + ('man/glanceregistry', 'glance-registry', u'Glance Registry Server', + [u'OpenStack'], 1), + ('man/glancereplicator', 'glance-replicator', u'Glance Replicator', + [u'OpenStack'], 1), + ('man/glancescrubber', 'glance-scrubber', u'Glance Scrubber Service', + [u'OpenStack'], 1) +] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +# html_theme_path = ["."] +# html_theme = '_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = ['_theme'] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' +git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1" +html_last_updated_fmt = os.popen(git_cmd).read() + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +html_use_modindex = False + +# If false, no index is generated. +html_use_index = False + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'glancedoc' + + +# -- Options for LaTeX output ------------------------------------------------ + +# The paper size ('letter' or 'a4'). +#latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +#latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, +# documentclass [howto/manual]). +latex_documents = [ + ('index', 'Glance.tex', u'Glance Documentation', + u'Glance Team', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +#latex_preamble = '' + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_use_modindex = True diff --git a/code/daisy/doc/source/configuring.rst b/code/daisy/doc/source/configuring.rst new file mode 100755 index 00000000..5984929f --- /dev/null +++ b/code/daisy/doc/source/configuring.rst @@ -0,0 +1,1501 @@ +.. + Copyright 2011 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Basic Configuration +=================== + +Glance has a number of options that you can use to configure the Glance API +server, the Glance Registry server, and the various storage backends that +Glance can use to store images. + +Most configuration is done via configuration files, with the Glance API +server and Glance Registry server using separate configuration files. + +When starting up a Glance server, you can specify the configuration file to +use (see :doc:`the documentation on controller Glance servers `). +If you do **not** specify a configuration file, Glance will look in the following +directories for a configuration file, in order: + +* ``~/.glance`` +* ``~/`` +* ``/etc/glance`` +* ``/etc`` + +The Glance API server configuration file should be named ``glance-api.conf``. +Similarly, the Glance Registry server configuration file should be named +``glance-registry.conf``. If you installed Glance via your operating system's +package management system, it is likely that you will have sample +configuration files installed in ``/etc/glance``. + +In addition to this documentation page, you can check the +``etc/glance-api.conf`` and ``etc/glance-registry.conf`` sample configuration +files distributed with Glance for example configuration files for each server +application with detailed comments on what each options does. + +The PasteDeploy configuration (controlling the deployment of the WSGI +application for each component) may be found by default in +-paste.ini alongside the main configuration file, .conf. +For example, ``glance-api-paste.ini`` corresponds to ``glance-api.conf``. +This pathname for the paste config is configurable, as follows:: + + [paste_deploy] + config_file = /path/to/paste/config + + +Common Configuration Options in Glance +-------------------------------------- + +Glance has a few command-line options that are common to all Glance programs: + +* ``--verbose`` + +Optional. Default: ``False`` + +Can be specified on the command line and in configuration files. + +Turns on the INFO level in logging and prints more verbose command-line +interface printouts. + +* ``--debug`` + +Optional. Default: ``False`` + +Can be specified on the command line and in configuration files. + +Turns on the DEBUG level in logging. + +* ``--config-file=PATH`` + +Optional. Default: See below for default search order. + +Specified on the command line only. + +Takes a path to a configuration file to use when running the program. If this +CLI option is not specified, then we check to see if the first argument is a +file. If it is, then we try to use that as the configuration file. If there is +no file or there were no arguments, we search for a configuration file in the +following order: + +* ``~/.glance`` +* ``~/`` +* ``/etc/glance`` +* ``/etc`` + +The filename that is searched for depends on the server application name. So, +if you are starting up the API server, ``glance-api.conf`` is searched for, +otherwise ``glance-registry.conf``. + +* ``--config-dir=DIR`` + +Optional. Default: ``None`` + +Specified on the command line only. + +Takes a path to a configuration directory from which all \*.conf fragments +are loaded. This provides an alternative to multiple --config-file options +when it is inconvenient to explicitly enumerate all the config files, for +example when an unknown number of config fragments are being generated +by a deployment framework. + +If --config-dir is set, then --config-file is ignored. + +An example usage would be: + + $ glance-api --config-dir=/etc/glance/glance-api.d + + $ ls /etc/glance/glance-api.d + 00-core.conf + 01-s3.conf + 02-swift.conf + 03-ssl.conf + ... etc. + +The numeric prefixes in the example above are only necessary if a specific +parse ordering is required (i.e. if an individual config option set in an +earlier fragment is overridden in a later fragment). + +Note that ``glance-manage`` currently loads configuration from three files: + +* ``glance-registry.conf`` +* ``glance-api.conf`` +* and the newly created ``glance-manage.conf`` + +By default ``glance-manage.conf`` only specifies a custom logging file but +other configuration options for ``glance-manage`` should be migrated in there. +**Warning**: Options set in ``glance-manage.conf`` will override options of +the same section and name set in the other two. Similarly, options in +``glance-api.conf`` will override options set in ``glance-registry.conf``. +This tool is planning to stop loading ``glance-registry.conf`` and +``glance-api.conf`` in a future cycle. + +Configuring Server Startup Options +---------------------------------- + +You can put the following options in the ``glance-api.conf`` and +``glance-registry.conf`` files, under the ``[DEFAULT]`` section. They enable +startup and binding behaviour for the API and registry servers, respectively. + +* ``bind_host=ADDRESS`` + +The address of the host to bind to. + +Optional. Default: ``0.0.0.0`` + +* ``bind_port=PORT`` + +The port the server should bind to. + +Optional. Default: ``9191`` for the registry server, ``9292`` for the API server + +* ``backlog=REQUESTS`` + +Number of backlog requests to configure the socket with. + +Optional. Default: ``4096`` + +* ``tcp_keepidle=SECONDS`` + +Sets the value of TCP_KEEPIDLE in seconds for each server socket. +Not supported on OS X. + +Optional. Default: ``600`` + +* ``workers=PROCESSES`` + +Number of Glance API or Registry worker processes to start. Each worker +process will listen on the same port. Increasing this value may increase +performance (especially if using SSL with compression enabled). Typically +it is recommended to have one worker process per CPU. The value `0` +will prevent any new processes from being created. + +Optional. Default: The number of CPUs available will be used by default. + +Configuring SSL Support +~~~~~~~~~~~~~~~~~~~~~~~~~ + +* ``cert_file=PATH`` + +Path to the certificate file the server should use when binding to an +SSL-wrapped socket. + +Optional. Default: not enabled. + +* ``key_file=PATH`` + +Path to the private key file the server should use when binding to an +SSL-wrapped socket. + +Optional. Default: not enabled. + +* ``ca_file=PATH`` + +Path to the CA certificate file the server should use to validate client +certificates provided during an SSL handshake. This is ignored if +``cert_file`` and ''key_file`` are not set. + +Optional. Default: not enabled. + +Configuring Registry Access +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +There are a number of configuration options in Glance that control how +the API server accesses the registry server. + +* ``registry_client_protocol=PROTOCOL`` + +If you run a secure Registry server, you need to set this value to ``https`` +and also set ``registry_client_key_file`` and optionally +``registry_client_cert_file``. + +Optional. Default: http + +* ``registry_client_key_file=PATH`` + +The path to the key file to use in SSL connections to the +registry server, if any. Alternately, you may set the +``GLANCE_CLIENT_KEY_FILE`` environ variable to a filepath of the key file + +Optional. Default: Not set. + +* ``registry_client_cert_file=PATH`` + +Optional. Default: Not set. + +The path to the cert file to use in SSL connections to the +registry server, if any. Alternately, you may set the +``GLANCE_CLIENT_CERT_FILE`` environ variable to a filepath of the cert file + +* ``registry_client_ca_file=PATH`` + +Optional. Default: Not set. + +The path to a Certifying Authority's cert file to use in SSL connections to the +registry server, if any. Alternately, you may set the +``GLANCE_CLIENT_CA_FILE`` environ variable to a filepath of the CA cert file + +* ``registry_client_insecure=False`` + +Optional. Default: False. + +When using SSL in connections to the registry server, do not require +validation via a certifying authority. This is the registry's equivalent of +specifying --insecure on the command line using glanceclient for the API + +* ``registry_client_timeout=SECONDS`` + +Optional. Default: ``600``. + +The period of time, in seconds, that the API server will wait for a registry +request to complete. A value of '0' implies no timeout. + +* ``use_user_token=True`` + +Optional. Default: True + +Pass the user token through for API requests to the registry. + +If 'use_user_token' is not in effect then admin credentials can be +specified (see below). If admin credentials are specified then they are +used to generate a token; this token rather than the original user's +token is used for requests to the registry. + +* ``admin_user=USER`` +If 'use_user_token' is not in effect then admin credentials can be +specified. Use this parameter to specify the username. + +Optional. Default: None + +* ``admin_password=PASSWORD`` +If 'use_user_token' is not in effect then admin credentials can be +specified. Use this parameter to specify the password. + +Optional. Default: None + +* ``admin_tenant_name=TENANTNAME`` +If 'use_user_token' is not in effect then admin credentials can be +specified. Use this parameter to specify the tenant name. + +Optional. Default: None + +* ``auth_url=URL`` +If 'use_user_token' is not in effect then admin credentials can be +specified. Use this parameter to specify the Keystone endpoint. + +Optional. Default: None + +* ``auth_strategy=STRATEGY`` +If 'use_user_token' is not in effect then admin credentials can be +specified. Use this parameter to specify the auth strategy. + +Optional. Default: keystone + +* ``auth_region=REGION`` +If 'use_user_token' is not in effect then admin credentials can be +specified. Use this parameter to specify the region. + +Optional. Default: None + + +Configuring Logging in Glance +----------------------------- + +There are a number of configuration options in Glance that control how Glance +servers log messages. + +* ``--log-config=PATH`` + +Optional. Default: ``None`` + +Specified on the command line only. + +Takes a path to a configuration file to use for configuring logging. + +Logging Options Available Only in Configuration Files +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You will want to place the different logging options in the **[DEFAULT]** section +in your application configuration file. As an example, you might do the following +for the API server, in a configuration file called ``etc/glance-api.conf``:: + + [DEFAULT] + log_file = /var/log/glance/api.log + +* ``log_file`` + +The filepath of the file to use for logging messages from Glance's servers. If +missing, the default is to output messages to ``stdout``, so if you are running +Glance servers in a daemon mode (using ``glance-control``) you should make +sure that the ``log_file`` option is set appropriately. + +* ``log_dir`` + +The filepath of the directory to use for log files. If not specified (the default) +the ``log_file`` is used as an absolute filepath. + +* ``log_date_format`` + +The format string for timestamps in the log output. + +Defaults to ``%Y-%m-%d %H:%M:%S``. See the +`logging module `_ documentation for +more information on setting this format string. + +* ``log_use_syslog`` + +Use syslog logging functionality. + +Defaults to False. + +Configuring Glance Storage Backends +----------------------------------- + +There are a number of configuration options in Glance that control how Glance +stores disk images. These configuration options are specified in the +``glance-api.conf`` config file in the section ``[glance_store]``. + +* ``default_store=STORE`` + +Optional. Default: ``file`` + +Can only be specified in configuration files. + +Sets the storage backend to use by default when storing images in Glance. +Available options for this option are (``file``, ``swift``, ``s3``, ``rbd``, ``sheepdog``, +``cinder`` or ``vsphere``). In order to select a default store it must also +be listed in the ``stores`` list described below. + +* ``stores=STORES`` + +Optional. Default: ``glance.store.filesystem.Store, glance.store.http.Store`` + +A comma separated list of enabled glance stores. Options are specified +in the format of glance.store.OPTION.Store. Some available options for this +option are (``filesystem``, ``http``, ``rbd``, ``s3``, ``swift``, ``sheepdog``, +``cinder``, ``gridfs``, ``vmware_datastore``) + +Configuring Glance Image Size Limit +----------------------------------- + +The following configuration option is specified in the +``glance-api.conf`` config file in the section ``[DEFAULT]``. + +* ``image_size_cap=SIZE`` + +Optional. Default: ``1099511627776`` (1 TB) + +Maximum image size, in bytes, which can be uploaded through the Glance API server. + +**IMPORTANT NOTE**: this value should only be increased after careful consideration +and must be set to a value under 8 EB (9223372036854775808). + +Configuring Glance User Storage Quota +------------------------------------- + +The following configuration option is specified in the +``glance-api.conf`` config file in the section ``[DEFAULT]``. + +* ``user_storage_quota`` + +Optional. Default: 0 (Unlimited). + +This value specifies the maximum amount of storage that each user can use +across all storage systems. Optionally unit can be specified for the value. +Values are accepted in B, KB, MB, GB or TB which are for Bytes, KiloBytes, +MegaBytes, GigaBytes and TeraBytes respectively. Default unit is Bytes. + +Example values would be, + user_storage_quota=20GB + +Configuring the Filesystem Storage Backend +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* ``filesystem_store_datadir=PATH`` + +Optional. Default: ``/var/lib/glance/images/`` + +Can only be specified in configuration files. + +`This option is specific to the filesystem storage backend.` + +Sets the path where the filesystem storage backend write disk images. Note that +the filesystem storage backend will attempt to create this directory if it does +not exist. Ensure that the user that ``glance-api`` runs under has write +permissions to this directory. + +* ``filesystem_store_file_perm=PERM_MODE`` + +Optional. Default: ``0`` + +Can only be specified in configuration files. + +`This option is specific to the filesystem storage backend.` + +The required permission value, in octal representation, for the created image file. +You can use this value to specify the user of the consuming service (such as Nova) as +the only member of the group that owns the created files. To keep the default value, +assign a permission value that is less than or equal to 0. Note that the file owner +must maintain read permission; if this value removes that permission an error message +will be logged and the BadStoreConfiguration exception will be raised. If the Glance +service has insufficient privileges to change file access permissions, a file will still +be saved, but a warning message will appear in the Glance log. + +Configuring the Filesystem Storage Backend with multiple stores +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* ``filesystem_store_datadirs=PATH:PRIORITY`` + +Optional. Default: ``/var/lib/glance/images/:1`` + +Example:: + + filesystem_store_datadirs = /var/glance/store + filesystem_store_datadirs = /var/glance/store1:100 + filesystem_store_datadirs = /var/glance/store2:200 + +This option can only be specified in configuration file and is specific +to the filesystem storage backend only. + +filesystem_store_datadirs option allows administrators to configure +multiple store directories to save glance image in filesystem storage backend. +Each directory can be coupled with its priority. + +**NOTE**: + +* This option can be specified multiple times to specify multiple stores. +* Either filesystem_store_datadir or filesystem_store_datadirs option must be + specified in glance-api.conf +* Store with priority 200 has precedence over store with priority 100. +* If no priority is specified, default priority '0' is associated with it. +* If two filesystem stores have same priority store with maximum free space + will be chosen to store the image. +* If same store is specified multiple times then BadStoreConfiguration + exception will be raised. + +Configuring the Swift Storage Backend +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* ``swift_store_auth_address=URL`` + +Required when using the Swift storage backend. + +Can only be specified in configuration files. + +`This option is specific to the Swift storage backend.` + +Sets the authentication URL supplied to Swift when making calls to its storage +system. For more information about the Swift authentication system, please +see the `Swift auth `_ +documentation and the +`overview of Swift authentication `_. + +**IMPORTANT NOTE**: Swift authentication addresses use HTTPS by default. This +means that if you are running Swift with authentication over HTTP, you need +to set your ``swift_store_auth_address`` to the full URL, including the ``http://``. + +* ``swift_store_user=USER`` + +Required when using the Swift storage backend. + +Can only be specified in configuration files. + +`This option is specific to the Swift storage backend.` + +Sets the user to authenticate against the ``swift_store_auth_address`` with. + +* ``swift_store_key=KEY`` + +Required when using the Swift storage backend. + +Can only be specified in configuration files. + +`This option is specific to the Swift storage backend.` + +Sets the authentication key to authenticate against the +``swift_store_auth_address`` with for the user ``swift_store_user``. + +* ``swift_store_container=CONTAINER`` + +Optional. Default: ``glance`` + +Can only be specified in configuration files. + +`This option is specific to the Swift storage backend.` + +Sets the name of the container to use for Glance images in Swift. + +* ``swift_store_create_container_on_put`` + +Optional. Default: ``False`` + +Can only be specified in configuration files. + +`This option is specific to the Swift storage backend.` + +If true, Glance will attempt to create the container ``swift_store_container`` +if it does not exist. + +* ``swift_store_large_object_size=SIZE_IN_MB`` + +Optional. Default: ``5120`` + +Can only be specified in configuration files. + +`This option is specific to the Swift storage backend.` + +What size, in MB, should Glance start chunking image files +and do a large object manifest in Swift? By default, this is +the maximum object size in Swift, which is 5GB + +* ``swift_store_large_object_chunk_size=SIZE_IN_MB`` + +Optional. Default: ``200`` + +Can only be specified in configuration files. + +`This option is specific to the Swift storage backend.` + +When doing a large object manifest, what size, in MB, should +Glance write chunks to Swift? The default is 200MB. + +* ``swift_store_multi_tenant=False`` + +Optional. Default: ``False`` + +Can only be specified in configuration files. + +`This option is specific to the Swift storage backend.` + +If set to True enables multi-tenant storage mode which causes Glance images +to be stored in tenant specific Swift accounts. When set to False Glance +stores all images in a single Swift account. + +* ``swift_store_multiple_containers_seed`` + +Optional. Default: ``0`` + +Can only be specified in configuration files. + +`This option is specific to the Swift storage backend.` + +When set to 0, a single-tenant store will only use one container to store all +images. When set to an integer value between 1 and 32, a single-tenant store +will use multiple containers to store images, and this value will determine +how many characters from an image UUID are checked when determining what +container to place the image in. The maximum number of containers that will be +created is approximately equal to 16^N. This setting is used only when +swift_store_multi_tentant is disabled. + +Example: if this config option is set to 3 and +swift_store_container = 'glance', then an image with UUID +'fdae39a1-bac5-4238-aba4-69bcc726e848' would be placed in the container +'glance_fda'. All dashes in the UUID are included when creating the container +name but do not count toward the character limit, so in this example with N=10 +the container name would be 'glance_fdae39a1-ba'. + +When choosing the value for swift_store_multiple_containers_seed, deployers +should discuss a suitable value with their swift operations team. The authors +of this option recommend that large scale deployments use a value of '2', +which will create a maximum of ~256 containers. Choosing a higher number than +this, even in extremely large scale deployments, may not have any positive +impact on performance and could lead to a large number of empty, unused +containers. The largest of deployments could notice an increase in performance +if swift rate limits are throttling on single container. Note: If dynamic +container creation is turned off, any value for this configuration option +higher than '1' may be unreasonable as the deployer would have to manually +create each container. + +* ``swift_store_admin_tenants`` + +Can only be specified in configuration files. + +`This option is specific to the Swift storage backend.` + +Optional. Default: Not set. + +A list of swift ACL strings that will be applied as both read and +write ACLs to the containers created by Glance in multi-tenant +mode. This grants the specified tenants/users read and write access +to all newly created image objects. The standard swift ACL string +formats are allowed, including: + +: +: +\*: + +Multiple ACLs can be combined using a comma separated list, for +example: swift_store_admin_tenants = service:glance,*:admin + +* ``swift_store_auth_version`` + +Can only be specified in configuration files. + +`This option is specific to the Swift storage backend.` + +Optional. Default: ``2`` + +A string indicating which version of Swift OpenStack authentication +to use. See the project +`python-swiftclient `_ +for more details. + +* ``swift_store_service_type`` + +Can only be specified in configuration files. + +`This option is specific to the Swift storage backend.` + +Optional. Default: ``object-store`` + +A string giving the service type of the swift service to use. This +setting is only used if swift_store_auth_version is ``2``. + +* ``swift_store_region`` + +Can only be specified in configuration files. + +`This option is specific to the Swift storage backend.` + +Optional. Default: Not set. + +A string giving the region of the swift service endpoint to use. This +setting is only used if swift_store_auth_version is ``2``. This +setting is especially useful for disambiguation if multiple swift +services might appear in a service catalog during authentication. + +* ``swift_store_endpoint_type`` + +Can only be specified in configuration files. + +`This option is specific to the Swift storage backend.` + +Optional. Default: ``publicURL`` + +A string giving the endpoint type of the swift service endpoint to +use. This setting is only used if swift_store_auth_version is ``2``. + +* ``swift_store_ssl_compression`` + +Can only be specified in configuration files. + +`This option is specific to the Swift storage backend.` + +* ``swift_store_cacert`` + +Can only be specified in configuration files. + +Optional. Default: ``None`` + +A string giving the path to a CA certificate bundle that will allow Glance's +services to perform SSL verification when communicating with Swift. + +Optional. Default: True. + +If set to False, disables SSL layer compression of https swift +requests. Setting to 'False' may improve performance for images which +are already in a compressed format, e.g. qcow2. If set to True then +compression will be enabled (provided it is supported by the swift +proxy). + +* ``swift_store_retry_get_count`` + +The number of times a Swift download will be retried before the request +fails. +Optional. Default: ``0`` + +Configuring Multiple Swift Accounts/Stores +------------------------------------------ + +In order to not store Swift account credentials in the database, and to +have support for multiple accounts (or multiple Swift backing stores), a +reference is stored in the database and the corresponding configuration +(credentials/ parameters) details are stored in the configuration file. +Optional. Default: not enabled. + +The location for this file is specified using the ``swift_store_config_file`` config file +in the section ``[DEFAULT]``. **If an incorrect value is specified, Glance API Swift store +service will not be configured.** +* ``swift_store_config_file=PATH`` + +`This option is specific to the Swift storage backend.` + +* ``default_swift_reference=DEFAULT_REFERENCE`` + +Required when multiple Swift accounts/backing stores are configured. + +Can only be specified in configuration files. + +`This option is specific to the Swift storage backend.` + +It is the default swift reference that is used to add any new images. +* ``swift_store_auth_insecure`` + +If True, bypass SSL certificate verification for Swift. + +Can only be specified in configuration files. + +`This option is specific to the Swift storage backend.` + +Optional. Default: ``False`` + +Configuring the S3 Storage Backend +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* ``s3_store_host=URL`` + +Required when using the S3 storage backend. + +Can only be specified in configuration files. + +`This option is specific to the S3 storage backend.` + +Default: s3.amazonaws.com + +Sets the main service URL supplied to S3 when making calls to its storage +system. For more information about the S3 authentication system, please +see the `S3 documentation `_ + +* ``s3_store_access_key=ACCESS_KEY`` + +Required when using the S3 storage backend. + +Can only be specified in configuration files. + +`This option is specific to the S3 storage backend.` + +Sets the access key to authenticate against the ``s3_store_host`` with. + +You should set this to your 20-character Amazon AWS access key. + +* ``s3_store_secret_key=SECRET_KEY`` + +Required when using the S3 storage backend. + +Can only be specified in configuration files. + +`This option is specific to the S3 storage backend.` + +Sets the secret key to authenticate against the +``s3_store_host`` with for the access key ``s3_store_access_key``. + +You should set this to your 40-character Amazon AWS secret key. + +* ``s3_store_bucket=BUCKET`` + +Required when using the S3 storage backend. + +Can only be specified in configuration files. + +`This option is specific to the S3 storage backend.` + +Sets the name of the bucket to use for Glance images in S3. + +Note that the namespace for S3 buckets is **global**, +therefore you must use a name for the bucket that is unique. It +is recommended that you use a combination of your AWS access key, +**lowercased** with "glance". + +For instance if your Amazon AWS access key is: + +``ABCDEFGHIJKLMNOPQRST`` + +then make your bucket value be: + +``abcdefghijklmnopqrstglance`` + +* ``s3_store_create_bucket_on_put`` + +Optional. Default: ``False`` + +Can only be specified in configuration files. + +`This option is specific to the S3 storage backend.` + +If true, Glance will attempt to create the bucket ``s3_store_bucket`` +if it does not exist. + +* ``s3_store_object_buffer_dir=PATH`` + +Optional. Default: ``the platform's default temporary directory`` + +Can only be specified in configuration files. + +`This option is specific to the S3 storage backend.` + +When sending images to S3, what directory should be +used to buffer the chunks? By default the platform's +temporary directory will be used. + +* ``s3_store_large_object_size=SIZE_IN_MB`` + +Optional. Default: ``100`` + +Can only be specified in configuration files. + +`This option is specific to the S3 storage backend.` + +Size, in ``MB``, should S3 start chunking image files +and do a multipart upload in S3. + +* ``s3_store_large_object_chunk_size=SIZE_IN_MB`` + +Optional. Default: ``10`` + +Can only be specified in configuration files. + +`This option is specific to the S3 storage backend.` + +Multipart upload part size, in ``MB``, should S3 use +when uploading parts. The size must be greater than or +equal to 5MB. The default is 10MB. + +* ``s3_store_thread_pools=NUM`` + +Optional. Default: ``10`` + +Can only be specified in configuration files. + +`This option is specific to the S3 storage backend.` + +The number of thread pools to perform a multipart upload +in S3. The default is 10. + +Configuring the RBD Storage Backend +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Note**: the RBD storage backend requires the python bindings for +librados and librbd. These are in the python-ceph package on +Debian-based distributions. + +* ``rbd_store_pool=POOL`` + +Optional. Default: ``rbd`` + +Can only be specified in configuration files. + +`This option is specific to the RBD storage backend.` + +Sets the RADOS pool in which images are stored. + +* ``rbd_store_chunk_size=CHUNK_SIZE_MB`` + +Optional. Default: ``4`` + +Can only be specified in configuration files. + +`This option is specific to the RBD storage backend.` + +Images will be chunked into objects of this size (in megabytes). +For best performance, this should be a power of two. + +* ``rbd_store_ceph_conf=PATH`` + +Optional. Default: ``/etc/ceph/ceph.conf``, ``~/.ceph/config``, and ``./ceph.conf`` + +Can only be specified in configuration files. + +`This option is specific to the RBD storage backend.` + +Sets the Ceph configuration file to use. + +* ``rbd_store_user=NAME`` + +Optional. Default: ``admin`` + +Can only be specified in configuration files. + +`This option is specific to the RBD storage backend.` + +Sets the RADOS user to authenticate as. This is only needed +when `RADOS authentication `_ +is `enabled. `_ + +A keyring must be set for this user in the Ceph +configuration file, e.g. with a user ``glance``:: + + [client.glance] + keyring=/etc/glance/rbd.keyring + +To set up a user named ``glance`` with minimal permissions, using a pool called +``images``, run:: + + rados mkpool images + ceph-authtool --create-keyring /etc/glance/rbd.keyring + ceph-authtool --gen-key --name client.glance --cap mon 'allow r' --cap osd 'allow rwx pool=images' /etc/glance/rbd.keyring + ceph auth add client.glance -i /etc/glance/rbd.keyring + +Configuring the Sheepdog Storage Backend +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* ``sheepdog_store_address=ADDR`` + +Optional. Default: ``localhost`` + +Can only be specified in configuration files. + +`This option is specific to the Sheepdog storage backend.` + +Sets the IP address of the sheep daemon + +* ``sheepdog_store_port=PORT`` + +Optional. Default: ``7000`` + +Can only be specified in configuration files. + +`This option is specific to the Sheepdog storage backend.` + +Sets the IP port of the sheep daemon + +* ``sheepdog_store_chunk_size=SIZE_IN_MB`` + +Optional. Default: ``64`` + +Can only be specified in configuration files. + +`This option is specific to the Sheepdog storage backend.` + +Images will be chunked into objects of this size (in megabytes). +For best performance, this should be a power of two. + +Configuring the Cinder Storage Backend +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Note**: Currently Cinder store is a partial implementation. +After Cinder expose 'brick' library, and 'Readonly-volume-attaching', +'volume-multiple-attaching' enhancement ready, the store will support +'Upload' and 'Download' interface finally. + +* ``cinder_catalog_info=::`` + +Optional. Default: ``volume:cinder:publicURL`` + +Can only be specified in configuration files. + +`This option is specific to the Cinder storage backend.` + +Sets the info to match when looking for cinder in the service catalog. +Format is : separated values of the form: :: + +* ``cinder_endpoint_template=http://ADDR:PORT/VERSION/%(project_id)s`` + +Optional. Default: ``None`` + +Can only be specified in configuration files. + +Override service catalog lookup with template for cinder endpoint. +e.g. http://localhost:8776/v1/%(project_id)s + +* ``os_region_name=REGION_NAME`` + +Optional. Default: ``None`` + +Can only be specified in configuration files. + +Region name of this node. + +* ``cinder_ca_certificates_file=CA_FILE_PATH`` + +Optional. Default: ``None`` + +Can only be specified in configuration files. + +Location of ca certicates file to use for cinder client requests. + +* ``cinder_http_retries=TIMES`` + +Optional. Default: ``3`` + +Can only be specified in configuration files. + +Number of cinderclient retries on failed http calls. + +* ``cinder_api_insecure=ON_OFF`` + +Optional. Default: ``False`` + +Can only be specified in configuration files. + +Allow to perform insecure SSL requests to cinder. + +Configuring the VMware Storage Backend +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* ``vmware_server_host=ADDRESS`` + +Required when using the VMware storage backend. + +Can only be specified in configuration files. + +Sets the address of the ESX/ESXi or vCenter Server target system. +The address can contain an IP (``127.0.0.1``), an IP and port +(``127.0.0.1:443``), a DNS name (``www.my-domain.com``) or DNS and port. + +`This option is specific to the VMware storage backend.` + +* ``vmware_server_username=USERNAME`` + +Required when using the VMware storage backend. + +Can only be specified in configuration files. + +Username for authenticating with VMware ESX/ESXi or vCenter Server. + +* ``vmware_server_password=PASSWORD`` + +Required when using the VMware storage backend. + +Can only be specified in configuration files. + +Password for authenticating with VMware ESX/ESXi or vCenter Server. + +* ``vmware_datacenter_path=DC_PATH`` + +Optional. Default: ``ha-datacenter`` + +Can only be specified in configuration files. + +Inventory path to a datacenter. If the ``vmware_server_host`` specified +is an ESX/ESXi, the ``vmware_datacenter_path`` is optional. If specified, +it should be ``ha-datacenter``. + +* ``vmware_datastore_name=DS_NAME`` + +Required when using the VMware storage backend. + +Can only be specified in configuration files. + +Datastore name associated with the ``vmware_datacenter_path`` + +* ``vmware_datastores`` + +Optional. Default: Not set. + +This option can only be specified in configuration file and is specific +to the VMware storage backend. + +vmware_datastores allows administrators to configure multiple datastores to +save glance image in the VMware store backend. The required format for the +option is: ::. + +where datacenter_path is the inventory path to the datacenter where the +datastore is located. An optional weight can be given to specify the priority. + +Example:: + + vmware_datastores = datacenter1:datastore1 + vmware_datastores = dc_folder/datacenter2:datastore2:100 + vmware_datastores = datacenter1:datastore3:200 + +**NOTE**: + + - This option can be specified multiple times to specify multiple datastores. + - Either vmware_datastore_name or vmware_datastores option must be specified + in glance-api.conf + - Datastore with weight 200 has precedence over datastore with weight 100. + - If no weight is specified, default weight '0' is associated with it. + - If two datastores have same weight, the datastore with maximum free space + will be chosen to store the image. + - If the datacenter path or datastore name contains a colon (:) symbol, it + must be escaped with a backslash. + +* ``vmware_api_retry_count=TIMES`` + +Optional. Default: ``10`` + +Can only be specified in configuration files. + +The number of times VMware ESX/VC server API must be +retried upon connection related issues. + +* ``vmware_task_poll_interval=SECONDS`` + +Optional. Default: ``5`` + +Can only be specified in configuration files. + +The interval used for polling remote tasks invoked on VMware ESX/VC server. + +* ``vmware_store_image_dir`` + +Optional. Default: ``/openstack_glance`` + +Can only be specified in configuration files. + +The path to access the folder where the images will be stored in the datastore. + +* ``vmware_api_insecure=ON_OFF`` + +Optional. Default: ``False`` + +Can only be specified in configuration files. + +Allow to perform insecure SSL requests to ESX/VC server. + +Configuring the Storage Endpoint +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* ``swift_store_endpoint=URL`` + +Optional. Default: ``None`` + +Can only be specified in configuration files. + +Overrides the storage URL returned by auth. The URL should include the +path up to and excluding the container. The location of an object is +obtained by appending the container and object to the configured URL. +e.g. ``https://www.my-domain.com/v1/path_up_to_container`` + +Configuring the Image Cache +--------------------------- + +Glance API servers can be configured to have a local image cache. Caching of +image files is transparent and happens using a piece of middleware that can +optionally be placed in the server application pipeline. + +This pipeline is configured in the PasteDeploy configuration file, +-paste.ini. You should not generally have to edit this file +directly, as it ships with ready-made pipelines for all common deployment +flavors. + +Enabling the Image Cache Middleware +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To enable the image cache middleware, the cache middleware must occur in +the application pipeline **after** the appropriate context middleware. + +The cache middleware should be in your ``glance-api-paste.ini`` in a section +titled ``[filter:cache]``. It should look like this:: + + [filter:cache] + paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory + +A ready-made application pipeline including this filter is defined in +the ``glance-api-paste.ini`` file, looking like so:: + + [pipeline:glance-api-caching] + pipeline = versionnegotiation context cache apiv1app + +To enable the above application pipeline, in your main ``glance-api.conf`` +configuration file, select the appropriate deployment flavor like so:: + + [paste_deploy] + flavor = caching + +Enabling the Image Cache Management Middleware +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +There is an optional ``cachemanage`` middleware that allows you to +directly interact with cache images. Use this flavor in place of the +``cache`` flavor in your api config file. + + [paste_deploy] + flavor = cachemanage + +Configuration Options Affecting the Image Cache +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. note:: + + These configuration options must be set in both the glance-cache + and glance-api configuration files. + + +One main configuration file option affects the image cache. + + * ``image_cache_dir=PATH`` + +Required when image cache middleware is enabled. + +Default: ``/var/lib/glance/image-cache`` + +This is the base directory the image cache can write files to. +Make sure the directory is writeable by the user running the +``glance-api`` server + + * ``image_cache_driver=DRIVER`` + +Optional. Choice of ``sqlite`` or ``xattr`` + +Default: ``sqlite`` + +The default ``sqlite`` cache driver has no special dependencies, other +than the ``python-sqlite3`` library, which is installed on virtually +all operating systems with modern versions of Python. It stores +information about the cached files in a SQLite database. + +The ``xattr`` cache driver required the ``python-xattr>=0.6.0`` library +and requires that the filesystem containing ``image_cache_dir`` have +access times tracked for all files (in other words, the noatime option +CANNOT be set for that filesystem). In addition, ``user_xattr`` must be +set on the filesystem's description line in fstab. Because of these +requirements, the ``xattr`` cache driver is not available on Windows. + + * ``image_cache_sqlite_db=DB_FILE`` + +Optional. + +Default: ``cache.db`` + +When using the ``sqlite`` cache driver, you can set the name of the database +that will be used to store the cached images information. The database +is always contained in the ``image_cache_dir``. + + * ``image_cache_max_size=SIZE`` + +Optional. + +Default: ``10737418240`` (10 GB) + +Size, in bytes, that the image cache should be constrained to. Images files +are cached automatically in the local image cache, even if the writing of that +image file would put the total cache size over this size. The +``glance-cache-pruner`` executable is what prunes the image cache to be equal +to or less than this value. The ``glance-cache-pruner`` executable is designed +to be run via cron on a regular basis. See more about this executable in +:doc:`Controlling the Growth of the Image Cache ` + + +Configuring the Glance Registry +------------------------------- + +There are a number of configuration options in Glance that control how +this registry server operates. These configuration options are specified in the +``glance-registry.conf`` config file in the section ``[DEFAULT]``. + +**IMPORTANT NOTE**: The glance-registry service is only used in conjunction +with the glance-api service when clients are using the v1 REST API. See +`Configuring Glance APIs`_ for more info. + +* ``sql_connection=CONNECTION_STRING`` (``--sql-connection`` when specified + on command line) + +Optional. Default: ``None`` + +Can be specified in configuration files. Can also be specified on the +command-line for the ``glance-manage`` program. + +Sets the SQLAlchemy connection string to use when connecting to the registry +database. Please see the documentation for +`SQLAlchemy connection strings `_ +online. You must urlencode any special characters in CONNECTION_STRING. + +* ``sql_timeout=SECONDS`` + on command line) + +Optional. Default: ``3600`` + +Can only be specified in configuration files. + +Sets the number of seconds after which SQLAlchemy should reconnect to the +datastore if no activity has been made on the connection. + +* ``enable_v1_registry=`` + +Optional. Default: ``True`` + +* ``enable_v2_registry=`` + +Optional. Default: ``True`` + +Defines which version(s) of the Registry API will be enabled. +If the Glance API server parameter ``enable_v1_api`` has been set to ``True`` the +``enable_v1_registry`` has to be ``True`` as well. +If the Glance API server parameter ``enable_v2_api`` has been set to ``True`` and +the parameter ``data_api`` has been set to ``glance.db.registry.api`` the +``enable_v2_registry`` has to be set to ``True`` + + +Configuring Notifications +------------------------- + +Glance can optionally generate notifications to be logged or sent to +a message queue. The configuration options are specified in the +``glance-api.conf`` config file in the section ``[DEFAULT]``. + +* ``notification_driver`` + +Optional. Default: ``noop`` + +Sets the notification driver used by oslo.messaging. Options include +``messaging``, ``messagingv2``, ``log`` and ``routing``. + +For more information see :doc:`Glance notifications ` and +`oslo.messaging `_. + +* ``disabled_notifications`` + +Optional. Default: ``[]`` + +List of disabled notifications. A notification can be given either as a +notification type to disable a single event, or as a notification group prefix +to disable all events within a group. + +Example: if this config option is set to ["image.create", "metadef_namespace"], +then "image.create" notification will not be sent after image is created and +none of the notifications for metadefinition namespaces will be sent. + +Configuring Glance Property Protections +--------------------------------------- + +Access to image meta properties may be configured using a +:doc:`Property Protections Configuration file `. The +location for this file can be specified in the ``glance-api.conf`` config file +in the section ``[DEFAULT]``. **If an incorrect value is specified, glance api +service will not start.** + +* ``property_protection_file=PATH`` + +Optional. Default: not enabled. + +If property_protection_file is set, the file may use either roles or policies +to specify property protections. + +* ``property_protection_rule_format=`` + +Optional. Default: ``roles``. + +Configuring Glance APIs +----------------------- + +The glance-api service implents versions 1 and 2 of the OpenStack +Images API. Disable either version of the Images API using the +following options: + +* ``enable_v1_api=`` + +Optional. Default: ``True`` + +* ``enable_v2_api=`` + +Optional. Default: ``True`` + +**IMPORTANT NOTE**: The v1 API is implemented on top of the +glance-registry service while the v2 API is not. This means that +in order to use the v2 API, you must copy the necessary sql +configuration from your glance-registry service to your +glance-api configuration file. + +Configuring Glance Tasks +------------------------ + +Glance Tasks are implemented only for version 2 of the OpenStack Images API. + +The config value ``task_time_to_live`` is used to determine how long a task +would be visible to the user after transitioning to either the ``success`` or +the ``failure`` state. + +* ``task_time_to_live=`` + +Optional. Default: ``48`` + +The config value ``task_executor`` is used to determine which executor +should be used by the Glance service to process the task. The currently +available implementation is: ``taskflow``. + +* ``task_executor=`` + +Optional. Default: ``taskflow`` + +The ``taskflow`` engine has its own set of configuration options, +under the ``taskflow_executor`` section, that can be tuned to improve +the task execution process. Among the available options, you may find +``engine_mode`` and ``max_workers``. The former allows for selecting +an execution model and the available options are ``serial``, +``parallel`` and ``worker-based``. The ``max_workers`` option, +instead, allows for controlling the number of workers that will be +instantiated per executor instance. + +The default value for the ``engine_mode`` is ``parallel``, whereas +the default number of ``max_workers`` is ``10``. + +Configuring Glance performance profiling +---------------------------------------- + +Glance supports using osprofiler to trace the performance of each key internal +handling, including RESTful API calling, DB operation and etc. + +``Please be aware that Glance performance profiling is currently a work in +progress feature.`` Although, some trace points is available, e.g. API +execution profiling at wsgi main entry and SQL execution profiling at DB +module, the more fine-grained trace point is being worked on. + +The config value ``enabled`` is used to determine whether fully enable +profiling feature for glance-api and glance-registry service. + +* ``enabled=`` + +Optional. Default: ``True`` + +The config value ``trace_sqlalchemy`` is used to determin whether fully enable +sqlalchemy engine based SQL execution profiling feature for glance-api and +glance-registry services. + +* ``trace_sqlalchemy=`` + +Optional. Default: ``True`` + +**IMPORTANT NOTE**: The HMAC key which is used for encrypting context data for +performance profiling is configued in paste config file of glance-api and +glance-registry service separately, by default they place at +/etc/glance/api-paste.ini and /etc/glance/registry-paste.ini files, in order +to make profiling work as designed operator needs to make those values of HMAC +key be consistent for all services in your deployment. Without HMAC key the +profiling will not be triggered even profiling feature is enabled. + +Configuring Glance public endpoint +---------------------------------- + +This setting allows an operator to configure the endpoint URL that will +appear in the Glance "versions" response (that is, the response to +``GET /``\ ). This can be necessary when the Glance API service is run +behind a proxy because the default endpoint displayed in the versions +response is that of the host actually running the API service. If +Glance is being run behind a load balancer, for example, direct access +to individual hosts running the Glance API may not be allowed, hence the +load balancer URL would be used for this value. + +* ``public_endpoint=`` + +Optional. Default: ``None`` + +Configuring Glance digest algorithm +----------------------------------- + +Digest algorithm which will be used for digital signature; the default is +sha1 for a smooth upgrade process but the recommended value is sha256. Use the +command:: + + openssl list-message-digest-algorithms + +to get the available algorithms supported by the version of OpenSSL on the +platform. Examples are "sha1", "sha256", "sha512", etc. If an invalid +digest algorithm is configured, all digital signature operations will fail and +return a ValueError exception with "No such digest method" error. + +* ``digest_algorithm=`` + +Optional. Default: ``sha1`` + +Configuring http_keepalive option +---------------------------------- + +* ``http_keepalive=`` + +If False, server will return the header "Connection: close", If True, server +will return "Connection: Keep-Alive" in its responses. In order to close the +client socket connection explicitly after the response is sent and read +successfully by the client, you simply have to set this option to False when +you create a wsgi server. diff --git a/code/daisy/doc/source/controllingservers.rst b/code/daisy/doc/source/controllingservers.rst new file mode 100755 index 00000000..1bc3b885 --- /dev/null +++ b/code/daisy/doc/source/controllingservers.rst @@ -0,0 +1,237 @@ +.. + Copyright 2011 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Controlling Glance Servers +========================== + +This section describes the ways to start, stop, and reload Glance's server +programs. + +Starting a server +----------------- + +There are two ways to start a Glance server (either the API server or the +registry server): + +* Manually calling the server program + +* Using the ``glance-control`` server daemon wrapper program + +We recommend using the second method. + +Manually starting the server +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The first is by directly calling the server program, passing in command-line +options and a single argument for a ``paste.deploy`` configuration file to +use when configuring the server application. + +.. note:: + + Glance ships with an ``etc/`` directory that contains sample ``paste.deploy`` + configuration files that you can copy to a standard configuation directory and + adapt for your own uses. Specifically, bind_host must be set properly. + +If you do `not` specify a configuration file on the command line, Glance will +do its best to locate a configuration file in one of the +following directories, stopping at the first config file it finds: + +* ``$CWD`` +* ``~/.glance`` +* ``~/`` +* ``/etc/glance`` +* ``/etc`` + +The filename that is searched for depends on the server application name. So, +if you are starting up the API server, ``glance-api.conf`` is searched for, +otherwise ``glance-registry.conf``. + +If no configuration file is found, you will see an error, like:: + + $> glance-api + ERROR: Unable to locate any configuration file. Cannot load application glance-api + +Here is an example showing how you can manually start the ``glance-api`` server and ``glance-registry`` in a shell.:: + + $ sudo glance-api --config-file glance-api.conf --debug & + jsuh@mc-ats1:~$ 2011-04-13 14:50:12 DEBUG [glance-api] ******************************************************************************** + 2011-04-13 14:50:12 DEBUG [glance-api] Configuration options gathered from config file: + 2011-04-13 14:50:12 DEBUG [glance-api] /home/jsuh/glance-api.conf + 2011-04-13 14:50:12 DEBUG [glance-api] ================================================ + 2011-04-13 14:50:12 DEBUG [glance-api] bind_host 65.114.169.29 + 2011-04-13 14:50:12 DEBUG [glance-api] bind_port 9292 + 2011-04-13 14:50:12 DEBUG [glance-api] debug True + 2011-04-13 14:50:12 DEBUG [glance-api] default_store file + 2011-04-13 14:50:12 DEBUG [glance-api] filesystem_store_datadir /home/jsuh/images/ + 2011-04-13 14:50:12 DEBUG [glance-api] registry_host 65.114.169.29 + 2011-04-13 14:50:12 DEBUG [glance-api] registry_port 9191 + 2011-04-13 14:50:12 DEBUG [glance-api] verbose False + 2011-04-13 14:50:12 DEBUG [glance-api] ******************************************************************************** + 2011-04-13 14:50:12 DEBUG [routes.middleware] Initialized with method overriding = True, and path info altering = True + 2011-04-13 14:50:12 DEBUG [eventlet.wsgi.server] (21354) wsgi starting up on http://65.114.169.29:9292/ + + $ sudo glance-registry --config-file glance-registry.conf & + jsuh@mc-ats1:~$ 2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] PRAGMA table_info("images") + 2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] () + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Col ('cid', 'name', 'type', 'notnull', 'dflt_value', 'pk') + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (0, u'created_at', u'DATETIME', 1, None, 0) + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (1, u'updated_at', u'DATETIME', 0, None, 0) + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (2, u'deleted_at', u'DATETIME', 0, None, 0) + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (3, u'deleted', u'BOOLEAN', 1, None, 0) + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (4, u'id', u'INTEGER', 1, None, 1) + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (5, u'name', u'VARCHAR(255)', 0, None, 0) + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (6, u'disk_format', u'VARCHAR(20)', 0, None, 0) + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (7, u'container_format', u'VARCHAR(20)', 0, None, 0) + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (8, u'size', u'INTEGER', 0, None, 0) + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (9, u'status', u'VARCHAR(30)', 1, None, 0) + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (10, u'is_public', u'BOOLEAN', 1, None, 0) + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (11, u'location', u'TEXT', 0, None, 0) + 2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] PRAGMA table_info("image_properties") + 2011-04-13 14:51:16 INFO [sqlalchemy.engine.base.Engine.0x...feac] () + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Col ('cid', 'name', 'type', 'notnull', 'dflt_value', 'pk') + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (0, u'created_at', u'DATETIME', 1, None, 0) + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (1, u'updated_at', u'DATETIME', 0, None, 0) + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (2, u'deleted_at', u'DATETIME', 0, None, 0) + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (3, u'deleted', u'BOOLEAN', 1, None, 0) + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (4, u'id', u'INTEGER', 1, None, 1) + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (5, u'image_id', u'INTEGER', 1, None, 0) + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (6, u'key', u'VARCHAR(255)', 1, None, 0) + 2011-04-13 14:51:16 DEBUG [sqlalchemy.engine.base.Engine.0x...feac] Row (7, u'value', u'TEXT', 0, None, 0) + + $ ps aux | grep glance + root 20009 0.7 0.1 12744 9148 pts/1 S 12:47 0:00 /usr/bin/python /usr/bin/glance-api glance-api.conf --debug + root 20012 2.0 0.1 25188 13356 pts/1 S 12:47 0:00 /usr/bin/python /usr/bin/glance-registry glance-registry.conf + jsuh 20017 0.0 0.0 3368 744 pts/1 S+ 12:47 0:00 grep glance + +Simply supply the configuration file as the parameter to the ``--config-file`` option +(the ``etc/glance-api.conf`` and ``etc/glance-registry.conf`` sample configuration +files were used in the above example) and then any other options +you want to use. (``--debug`` was used above to show some of the debugging +output that the server shows when starting up. Call the server program +with ``--help`` to see all available options you can specify on the +command line.) + +For more information on configuring the server via the ``paste.deploy`` +configuration files, see the section entitled +:doc:`Configuring Glance servers ` + +Note that the server `daemonizes` itself by using the standard +shell backgrounding indicator, ``&``, in the previous example. For most use cases, we recommend +using the ``glance-control`` server daemon wrapper for daemonizing. See below +for more details on daemonization with ``glance-control``. + +Using the ``glance-control`` program to start the server +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The second way to start up a Glance server is to use the ``glance-control`` +program. ``glance-control`` is a wrapper script that allows the user to +start, stop, restart, and reload the other Glance server programs in +a fashion that is more conducive to automation and scripting. + +Servers started via the ``glance-control`` program are always `daemonized`, +meaning that the server program process runs in the background. + +To start a Glance server with ``glance-control``, simply call +``glance-control`` with a server and the word "start", followed by +any command-line options you wish to provide. Start the server with ``glance-control`` +in the following way:: + + $> sudo glance-control [OPTIONS] start [CONFPATH] + +.. note:: + + You must use the ``sudo`` program to run ``glance-control`` currently, as the + pid files for the server programs are written to /var/run/glance/ + +Here is an example that shows how to start the ``glance-registry`` server +with the ``glance-control`` wrapper script. :: + + + $ sudo glance-control api start glance-api.conf + Starting glance-api with /home/jsuh/glance.conf + + $ sudo glance-control registry start glance-registry.conf + Starting glance-registry with /home/jsuh/glance.conf + + $ ps aux | grep glance + root 20038 4.0 0.1 12728 9116 ? Ss 12:51 0:00 /usr/bin/python /usr/bin/glance-api /home/jsuh/glance-api.conf + root 20039 6.0 0.1 25188 13356 ? Ss 12:51 0:00 /usr/bin/python /usr/bin/glance-registry /home/jsuh/glance-registry.conf + jsuh 20042 0.0 0.0 3368 744 pts/1 S+ 12:51 0:00 grep glance + + +The same configuration files are used by ``glance-control`` to start the +Glance server programs, and you can specify (as the example above shows) +a configuration file when starting the server. + + +In order for your launched glance service to be monitored for unexpected death +and respawned if necessary, use the following option: + + + $ sudo glance-control [service] start --respawn ... + + +Note that this will cause ``glance-control`` itself to remain running. Also note +that deliberately stopped services are not respawned, neither are rapidly bouncing +services (where process death occurred within one second of the last launch). + + +By default, output from glance services is discarded when launched with ``glance-control``. +In order to capture such output via syslog, use the following option: + + + $ sudo glance-control --capture-output ... + + +Stopping a server +----------------- + +If you started a Glance server manually and did not use the ``&`` backgrounding +function, simply send a terminate signal to the server process by typing +``Ctrl-C`` + +If you started the Glance server using the ``glance-control`` program, you can +use the ``glance-control`` program to stop it. Simply do the following:: + + $> sudo glance-control stop + +as this example shows:: + + $> sudo glance-control registry stop + Stopping glance-registry pid: 17602 signal: 15 + +Restarting a server +------------------- + +You can restart a server with the ``glance-control`` program, as demonstrated +here:: + + $> sudo glance-control registry restart etc/glance-registry.conf + Stopping glance-registry pid: 17611 signal: 15 + Starting glance-registry with /home/jpipes/repos/glance/trunk/etc/glance-registry.conf + +Reloading a server +------------------- + +You can reload a server with the ``glance-control`` program, as demonstrated +here:: + + $> sudo glance-control api reload + Reloading glance-api (pid 18506) with signal(1) + +A reload sends a SIGHUP signal to the master process and causes new configuration +settings to be picked up without any interruption to the running service (provided +neither bind_host or bind_port has changed). diff --git a/code/daisy/doc/source/daisyapi.rst b/code/daisy/doc/source/daisyapi.rst new file mode 100755 index 00000000..5ad1e147 --- /dev/null +++ b/code/daisy/doc/source/daisyapi.rst @@ -0,0 +1,711 @@ +.. + Copyright 2010 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Using Glance's Image Public APIs +================================ + +Glance fully implements versions 1.0, 1.1 and 2.0 of the OpenStack Images API. +The Images API specification is developed alongside Glance, but is not +considered part of the Glance project. + +Authentication +-------------- + +Glance depends on Keystone and the OpenStack Identity API to handle +authentication of clients. You must obtain an authentication token from +Keystone using and send it along with all API requests to Glance through +the ``X-Auth-Token`` header. Glance will communicate back to Keystone to +verify the token validity and obtain your identity credentials. + +See :doc:`authentication` for more information on integrating with Keystone. + +Using v1.X +---------- + +For the purpose of examples, assume there is a Glance API server running +at the URL ``http://glance.example.com`` on the default port 80. + +List Available Images +********************* + +We want to see a list of available images that the authenticated user has +access to. This includes images owned by the user, images shared with the user +and public images. + +We issue a ``GET`` request to ``http://glance.example.com/v1/images`` to +retrieve this list of available images. The data is returned as a JSON-encoded +mapping in the following format:: + + {'images': [ + {'uri': 'http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9', + 'name': 'Ubuntu 10.04 Plain', + 'disk_format': 'vhd', + 'container_format': 'ovf', + 'size': '5368709120'} + ...]} + + +List Available Images in More Detail +************************************ + +We want to see a more detailed list of available images that the authenticated +user has access to. This includes images owned by the user, images shared with +the user and public images. + +We issue a ``GET`` request to ``http://glance.example.com/v1/images/detail`` to +retrieve this list of available images. The data is returned as a +JSON-encoded mapping in the following format:: + + {'images': [ + {'uri': 'http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9', + 'name': 'Ubuntu 10.04 Plain 5GB', + 'disk_format': 'vhd', + 'container_format': 'ovf', + 'size': '5368709120', + 'checksum': 'c2e5db72bd7fd153f53ede5da5a06de3', + 'created_at': '2010-02-03 09:34:01', + 'updated_at': '2010-02-03 09:34:01', + 'deleted_at': '', + 'status': 'active', + 'is_public': true, + 'min_ram': 256, + 'min_disk': 5, + 'owner': null, + 'properties': {'distro': 'Ubuntu 10.04 LTS'}}, + ...]} + +.. note:: + + All timestamps returned are in UTC + + The `updated_at` timestamp is the timestamp when an image's metadata + was last updated, not its image data, as all image data is immutable + once stored in Glance + + The `properties` field is a mapping of free-form key/value pairs that + have been saved with the image metadata + + The `checksum` field is an MD5 checksum of the image file data + + The `is_public` field is a boolean indicating whether the image is + publicly available + + The `min_ram` field is an integer specifying the minimum amount of + ram needed to run this image on an instance, in megabytes + + The `min_disk` field is an integer specifying the minimum amount of + disk space needed to run this image on an instance, in gigabytes + + The `owner` field is a string which may either be null or which will + indicate the owner of the image + +Filtering Images Lists +********************** + +Both the ``GET /v1/images`` and ``GET /v1/images/detail`` requests take query +parameters that serve to filter the returned list of images. The following +list details these query parameters. + +* ``name=NAME`` + + Filters images having a ``name`` attribute matching ``NAME``. + +* ``container_format=FORMAT`` + + Filters images having a ``container_format`` attribute matching ``FORMAT`` + + For more information, see :doc:`About Disk and Container Formats ` + +* ``disk_format=FORMAT`` + + Filters images having a ``disk_format`` attribute matching ``FORMAT`` + + For more information, see :doc:`About Disk and Container Formats ` + +* ``status=STATUS`` + + Filters images having a ``status`` attribute matching ``STATUS`` + + For more information, see :doc:`About Image Statuses ` + +* ``size_min=BYTES`` + + Filters images having a ``size`` attribute greater than or equal to ``BYTES`` + +* ``size_max=BYTES`` + + Filters images having a ``size`` attribute less than or equal to ``BYTES`` + +These two resources also accept additional query parameters: + +* ``sort_key=KEY`` + + Results will be ordered by the specified image attribute ``KEY``. Accepted + values include ``id``, ``name``, ``status``, ``disk_format``, + ``container_format``, ``size``, ``created_at`` (default) and ``updated_at``. + +* ``sort_dir=DIR`` + + Results will be sorted in the direction ``DIR``. Accepted values are ``asc`` + for ascending or ``desc`` (default) for descending. + +* ``marker=ID`` + + An image identifier marker may be specified. When present only images which + occur after the identifier ``ID`` will be listed, i.e. the images which have + a `sort_key` later than that of the marker ``ID`` in the `sort_dir` direction. + +* ``limit=LIMIT`` + + When present the maximum number of results returned will not exceed ``LIMIT``. + +.. note:: + + If the specified ``LIMIT`` exceeds the operator defined limit (api_limit_max) + then the number of results returned may be less than ``LIMIT``. + +* ``is_public=PUBLIC`` + + An admin user may use the `is_public` parameter to control which results are + returned. + + When the `is_public` parameter is absent or set to `True` the following images + will be listed: Images whose `is_public` field is `True`, owned images and + shared images. + + When the `is_public` parameter is set to `False` the following images will be + listed: Images (owned, shared, or non-owned) whose `is_public` field is `False`. + + When the `is_public` parameter is set to `None` all images will be listed + irrespective of owner, shared status or the `is_public` field. + +.. note:: + + Use of the `is_public` parameter is restricted to admin users. For all other + users it will be ignored. + +Retrieve Image Metadata +*********************** + +We want to see detailed information for a specific virtual machine image +that the Glance server knows about. + +We have queried the Glance server for a list of images and the +data returned includes the `uri` field for each available image. This +`uri` field value contains the exact location needed to get the metadata +for a specific image. + +Continuing the example from above, in order to get metadata about the +first image returned, we can issue a ``HEAD`` request to the Glance +server for the image's URI. + +We issue a ``HEAD`` request to +``http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9`` to +retrieve complete metadata for that image. The metadata is returned as a +set of HTTP headers that begin with the prefix ``x-image-meta-``. The +following shows an example of the HTTP headers returned from the above +``HEAD`` request:: + + x-image-meta-uri http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9 + x-image-meta-name Ubuntu 10.04 Plain 5GB + x-image-meta-disk_format vhd + x-image-meta-container_format ovf + x-image-meta-size 5368709120 + x-image-meta-checksum c2e5db72bd7fd153f53ede5da5a06de3 + x-image-meta-created_at 2010-02-03 09:34:01 + x-image-meta-updated_at 2010-02-03 09:34:01 + x-image-meta-deleted_at + x-image-meta-status available + x-image-meta-is_public true + x-image-meta-min_ram 256 + x-image-meta-min_disk 0 + x-image-meta-owner null + x-image-meta-property-distro Ubuntu 10.04 LTS + +.. note:: + + All timestamps returned are in UTC + + The `x-image-meta-updated_at` timestamp is the timestamp when an + image's metadata was last updated, not its image data, as all + image data is immutable once stored in Glance + + There may be multiple headers that begin with the prefix + `x-image-meta-property-`. These headers are free-form key/value pairs + that have been saved with the image metadata. The key is the string + after `x-image-meta-property-` and the value is the value of the header + + The response's `ETag` header will always be equal to the + `x-image-meta-checksum` value + + The response's `x-image-meta-is_public` value is a boolean indicating + whether the image is publicly available + + The response's `x-image-meta-owner` value is a string which may either + be null or which will indicate the owner of the image + + +Retrieve Raw Image Data +*********************** + +We want to retrieve that actual raw data for a specific virtual machine image +that the Glance server knows about. + +We have queried the Glance server for a list of images and the +data returned includes the `uri` field for each available image. This +`uri` field value contains the exact location needed to get the metadata +for a specific image. + +Continuing the example from above, in order to get metadata about the +first image returned, we can issue a ``HEAD`` request to the Glance +server for the image's URI. + +We issue a ``GET`` request to +``http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9`` to +retrieve metadata for that image as well as the image itself encoded +into the response body. + +The metadata is returned as a set of HTTP headers that begin with the +prefix ``x-image-meta-``. The following shows an example of the HTTP headers +returned from the above ``GET`` request:: + + x-image-meta-uri http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9 + x-image-meta-name Ubuntu 10.04 Plain 5GB + x-image-meta-disk_format vhd + x-image-meta-container_format ovf + x-image-meta-size 5368709120 + x-image-meta-checksum c2e5db72bd7fd153f53ede5da5a06de3 + x-image-meta-created_at 2010-02-03 09:34:01 + x-image-meta-updated_at 2010-02-03 09:34:01 + x-image-meta-deleted_at + x-image-meta-status available + x-image-meta-is_public true + x-image-meta-min_ram 256 + x-image-meta-min_disk 5 + x-image-meta-owner null + x-image-meta-property-distro Ubuntu 10.04 LTS + +.. note:: + + All timestamps returned are in UTC + + The `x-image-meta-updated_at` timestamp is the timestamp when an + image's metadata was last updated, not its image data, as all + image data is immutable once stored in Glance + + There may be multiple headers that begin with the prefix + `x-image-meta-property-`. These headers are free-form key/value pairs + that have been saved with the image metadata. The key is the string + after `x-image-meta-property-` and the value is the value of the header + + The response's `Content-Length` header shall be equal to the value of + the `x-image-meta-size` header + + The response's `ETag` header will always be equal to the + `x-image-meta-checksum` value + + The response's `x-image-meta-is_public` value is a boolean indicating + whether the image is publicly available + + The response's `x-image-meta-owner` value is a string which may either + be null or which will indicate the owner of the image + + The image data itself will be the body of the HTTP response returned + from the request, which will have content-type of + `application/octet-stream`. + + +Add a New Image +*************** + +We have created a new virtual machine image in some way (created a +"golden image" or snapshotted/backed up an existing image) and we +wish to do two things: + + * Store the disk image data in Glance + * Store metadata about this image in Glance + +We can do the above two activities in a single call to the Glance API. +Assuming, like in the examples above, that a Glance API server is running +at ``glance.example.com``, we issue a ``POST`` request to add an image to +Glance:: + + POST http://glance.example.com/v1/images + +The metadata about the image is sent to Glance in HTTP headers. The body +of the HTTP request to the Glance API will be the MIME-encoded disk +image data. + + +Reserve a New Image +******************* + +We can also perform the activities described in `Add a New Image`_ using two +separate calls to the Image API; the first to register the image metadata, and +the second to add the image disk data. This is known as "reserving" an image. + +The first call should be a ``POST`` to ``http://glance.example.com/v1/images``, +which will result in a new image id being registered with a status of +``queued``:: + + {"image": + {"status": "queued", + "id": "71c675ab-d94f-49cd-a114-e12490b328d9", + ...} + ...} + +The image data can then be added using a ``PUT`` to +``http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9``. +The image status will then be set to ``active`` by Glance. + + +**Image Metadata in HTTP Headers** + +Glance will view as image metadata any HTTP header that it receives in a +``POST`` request where the header key is prefixed with the strings +``x-image-meta-`` and ``x-image-meta-property-``. + +The list of metadata headers that Glance accepts are listed below. + +* ``x-image-meta-name`` + + This header is required, unless reserving an image. Its value should be the + name of the image. + + Note that the name of an image *is not unique to a Glance node*. It + would be an unrealistic expectation of users to know all the unique + names of all other user's images. + +* ``x-image-meta-id`` + + This header is optional. + + When present, Glance will use the supplied identifier for the image. + If the identifier already exists in that Glance node, then a + **409 Conflict** will be returned by Glance. The value of the header + must be an uuid in hexadecimal string notation + (i.e. 71c675ab-d94f-49cd-a114-e12490b328d9). + + When this header is *not* present, Glance will generate an identifier + for the image and return this identifier in the response (see below) + +* ``x-image-meta-store`` + + This header is optional. Valid values are one of ``file``, ``s3``, ``rbd``, + ``swift``, ``cinder``, ``gridfs``, ``sheepdog`` or ``vsphere`` + + When present, Glance will attempt to store the disk image data in the + backing store indicated by the value of the header. If the Glance node + does not support the backing store, Glance will return a **400 Bad Request**. + + When not present, Glance will store the disk image data in the backing + store that is marked default. See the configuration option ``default_store`` + for more information. + +* ``x-image-meta-disk_format`` + + This header is required, unless reserving an image. Valid values are one of + ``aki``, ``ari``, ``ami``, ``raw``, ``iso``, ``vhd``, ``vdi``, ``qcow2``, or + ``vmdk``. + + For more information, see :doc:`About Disk and Container Formats ` + +* ``x-image-meta-container_format`` + + This header is required, unless reserving an image. Valid values are one of + ``aki``, ``ari``, ``ami``, ``bare``, or ``ovf``. + + For more information, see :doc:`About Disk and Container Formats ` + +* ``x-image-meta-size`` + + This header is optional. + + When present, Glance assumes that the expected size of the request body + will be the value of this header. If the length in bytes of the request + body *does not match* the value of this header, Glance will return a + **400 Bad Request**. + + When not present, Glance will calculate the image's size based on the size + of the request body. + +* ``x-image-meta-checksum`` + + This header is optional. When present it shall be the expected **MD5** + checksum of the image file data. + + When present, Glance will verify the checksum generated from the backend + store when storing your image against this value and return a + **400 Bad Request** if the values do not match. + +* ``x-image-meta-is_public`` + + This header is optional. + + When Glance finds the string "true" (case-insensitive), the image is marked as + a public image, meaning that any user may view its metadata and may read + the disk image from Glance. + + When not present, the image is assumed to be *not public* and owned by + a user. + +* ``x-image-meta-min_ram`` + + This header is optional. When present it shall be the expected minimum ram + required in megabytes to run this image on a server. + + When not present, the image is assumed to have a minimum ram requirement of 0. + +* ``x-image-meta-min_disk`` + + This header is optional. When present it shall be the expected minimum disk + space required in gigabytes to run this image on a server. + + When not present, the image is assumed to have a minimum disk space requirement of 0. + +* ``x-image-meta-owner`` + + This header is optional and only meaningful for admins. + + Glance normally sets the owner of an image to be the tenant or user + (depending on the "owner_is_tenant" configuration option) of the + authenticated user issuing the request. However, if the authenticated user + has the Admin role, this default may be overridden by setting this header to + null or to a string identifying the owner of the image. + +* ``x-image-meta-property-*`` + + When Glance receives any HTTP header whose key begins with the string prefix + ``x-image-meta-property-``, Glance adds the key and value to a set of custom, + free-form image properties stored with the image. The key is the + lower-cased string following the prefix ``x-image-meta-property-`` with dashes + and punctuation replaced with underscores. + + For example, if the following HTTP header were sent:: + + x-image-meta-property-distro Ubuntu 10.10 + + Then a key/value pair of "distro"/"Ubuntu 10.10" will be stored with the + image in Glance. + + There is no limit on the number of free-form key/value attributes that can + be attached to the image. However, keep in mind that the 8K limit on the + size of all HTTP headers sent in a request will effectively limit the number + of image properties. + + +Update an Image +*************** + +Glance will view as image metadata any HTTP header that it receives in a +``PUT`` request where the header key is prefixed with the strings +``x-image-meta-`` and ``x-image-meta-property-``. + +If an image was previously reserved, and thus is in the ``queued`` state, then +image data can be added by including it as the request body. If the image +already as data associated with it (e.g. not in the ``queued`` state), then +including a request body will result in a **409 Conflict** exception. + +On success, the ``PUT`` request will return the image metadata encoded as HTTP +headers. + +See more about image statuses here: :doc:`Image Statuses ` + + +List Image Memberships +********************** + +We want to see a list of the other system tenants (or users, if +"owner_is_tenant" is False) that may access a given virtual machine image that +the Glance server knows about. We take the `uri` field of the image data, +append ``/members`` to it, and issue a ``GET`` request on the resulting URL. + +Continuing from the example above, in order to get the memberships for the +first image returned, we can issue a ``GET`` request to the Glance +server for +``http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9/members`` +. What we will get back is JSON data such as the following:: + + {'members': [ + {'member_id': 'tenant1', + 'can_share': false} + ...]} + +The `member_id` field identifies a tenant with which the image is shared. If +that tenant is authorized to further share the image, the `can_share` field is +`true`. + + +List Shared Images +****************** + +We want to see a list of images which are shared with a given tenant. We issue +a ``GET`` request to ``http://glance.example.com/v1/shared-images/tenant1``. We +will get back JSON data such as the following:: + + {'shared_images': [ + {'image_id': '71c675ab-d94f-49cd-a114-e12490b328d9', + 'can_share': false} + ...]} + +The `image_id` field identifies an image shared with the tenant named by +*member_id*. If the tenant is authorized to further share the image, the +`can_share` field is `true`. + + +Add a Member to an Image +************************ + +We want to authorize a tenant to access a private image. We issue a ``PUT`` +request to +``http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9/members/tenant1`` +. With no body, this will add the membership to the image, leaving existing +memberships unmodified and defaulting new memberships to have `can_share` +set to `false`. We may also optionally attach a body of the following form:: + + {'member': + {'can_share': true} + } + +If such a body is provided, both existing and new memberships will have +`can_share` set to the provided value (either `true` or `false`). This query +will return a 204 ("No Content") status code. + + +Remove a Member from an Image +***************************** + +We want to revoke a tenant's right to access a private image. We issue a +``DELETE`` request to ``http://glance.example.com/v1/images/1/members/tenant1``. +This query will return a 204 ("No Content") status code. + + +Replace a Membership List for an Image +************************************** + +The full membership list for a given image may be replaced. We issue a ``PUT`` +request to +``http://glance.example.com/v1/images/71c675ab-d94f-49cd-a114-e12490b328d9/members`` +with a body of the following form:: + + {'memberships': [ + {'member_id': 'tenant1', + 'can_share': false} + ...]} + +All existing memberships which are not named in the replacement body are +removed, and those which are named have their `can_share` settings changed as +specified. (The `can_share` setting may be omitted, which will cause that +setting to remain unchanged in the existing memberships.) All new memberships +will be created, with `can_share` defaulting to `false` if it is not specified. + + +Image Membership Changes in Version 2.0 +--------------------------------------- + +Version 2.0 of the Images API eliminates the ``can_share`` attribute of image +membership. In the version 2.0 model, image sharing is not transitive. + +In version 2.0, image members have a ``status`` attribute that reflects how the +image should be treated with respect to that image member's image list. + +* The ``status`` attribute may have one of three values: ``pending``, + ``accepted``, or ``rejected``. + +* By default, only those shared images with status ``accepted`` are included in + an image member's image-list. + +* Only an image member may change his/her own membership status. + +* Only an image owner may create members on an image. The status of a newly + created image member is ``pending``. The image owner cannot change the + status of a member. + + +Distinctions from Version 1.x API Calls +*************************************** + +* The response to a request to list the members of an image has changed. + + call: ``GET`` on ``/v2/images/{imageId}/members`` + + response: see the JSON schema at ``/v2/schemas/members`` + +* The request body in the call to create an image member has changed. + + call: ``POST`` to ``/v2/images/{imageId}/members`` + + request body:: + + { "member": "" } + + where the {memberId} is the tenant ID of the image member. + + The member status of a newly created image member is ``pending``. + +New API Calls +************* + +* Change the status of an image member + + call: ``PUT`` on ``/v2/images/{imageId}/members/{memberId}`` + + request body:: + + { "status": "" } + + where is one of ``pending``, ``accepted``, or ``rejected``. + The {memberId} is the tenant ID of the image member. + + +API Message Localization +--------------------------------------- +Glance supports HTTP message localization. For example, an HTTP client can +receive API messages in Chinese even if the locale language of the server is +English. + +How to use it +************* +To receive localized API messages, the HTTP client needs to specify the +**Accept-Language** header to indicate the language to use to translate the +message. For more info about Accept-Language, please refer http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +A typical curl API request will be like below:: + + curl -i -X GET -H 'Accept-Language: zh' -H 'Content-Type: application/json' + http://127.0.0.1:9292/v2/images/aaa + +Then the response will be like the following:: + + HTTP/1.1 404 Not Found + Content-Length: 234 + Content-Type: text/html; charset=UTF-8 + X-Openstack-Request-Id: req-54d403a0-064e-4544-8faf-4aeef086f45a + Date: Sat, 22 Feb 2014 06:26:26 GMT + + + + 404 Not Found + + +

404 Not Found

+ 找不到任何具有标识 aaa 的映像

+ + + +.. note:: + Be sure there is the language package under /usr/share/locale-langpack/ on + the target Glance server. diff --git a/code/daisy/doc/source/daisyclient.rst b/code/daisy/doc/source/daisyclient.rst new file mode 100755 index 00000000..ad90b1a7 --- /dev/null +++ b/code/daisy/doc/source/daisyclient.rst @@ -0,0 +1,26 @@ +.. + Copyright 2011-2012 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Using Glance's Client Tools +=========================== + +The command-line tool and python library for Glance are both installed +through the python-glanceclient project. Explore the following resources +for more information: + +* `Official Docs `_ +* `Pypi Page `_ +* `GitHub Project `_ diff --git a/code/daisy/doc/source/daisymetadefcatalogapi.rst b/code/daisy/doc/source/daisymetadefcatalogapi.rst new file mode 100755 index 00000000..5e0db51e --- /dev/null +++ b/code/daisy/doc/source/daisymetadefcatalogapi.rst @@ -0,0 +1,605 @@ +.. + Copyright (c) 2014 Hewlett-Packard Development Company, L.P. + + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied. + See the License for the specific language governing permissions and + limitations under the License. + +Using Glance's Metadata Definitions Catalog Public APIs +======================================================= + +A common API hosted by the Glance service for vendors, admins, services, and +users to meaningfully define available key / value pair and tag metadata. +The intent is to enable better metadata collaboration across artifacts, +services, and projects for OpenStack users. + +This is about the definition of the available metadata that can be used on +different types of resources (images, artifacts, volumes, flavors, aggregates, +etc). A definition includes the properties type, its key, it's description, +and it's constraints. This catalog will not store the values for specific +instance properties. + +For example, a definition of a virtual CPU topology property for number of +cores will include the key to use, a description, and value constraints like +requiring it to be an integer. So, a user, potentially through Horizon, would +be able to search this catalog to list the available properties they can add to +a flavor or image. They will see the virtual CPU topology property in the list +and know that it must be an integer. In the Horizon example, when the user adds +the property, its key and value will be stored in the service that owns that +resource (Nova for flavors and in Glance for images). + +Diagram: https://wiki.openstack.org/w/images/b/bb/Glance-Metadata-API.png + +Glance Metadata Definitions Catalog implementation started with API version v2. + +Authentication +-------------- + +Glance depends on Keystone and the OpenStack Identity API to handle +authentication of clients. You must obtain an authentication token from +Keystone send it along with all API requests to Glance through the +``X-Auth-Token`` header. Glance will communicate back to Keystone to verify +the token validity and obtain your identity credentials. + +See :doc:`authentication` for more information on integrating with Keystone. + +Using v2.X +---------- + +For the purpose of examples, assume there is a Glance API server running +at the URL ``http://glance.example.com`` on the default port 80. + +List Available Namespaces +************************* + +We want to see a list of available namespaces that the authenticated user +has access to. This includes namespaces owned by the user, +namespaces shared with the user and public namespaces. + +We issue a ``GET`` request to ``http://glance.example.com/v2/metadefs/namespaces`` +to retrieve this list of available namespaces. +The data is returned as a JSON-encoded mapping in the following format:: + + { + "namespaces": [ + { + "namespace": "MyNamespace", + "display_name": "My User Friendly Namespace", + "description": "My description", + "visibility": "public", + "protected": true, + "owner": "The Test Owner", + "self": "/v2/metadefs/namespaces/MyNamespace", + "schema": "/v2/schemas/metadefs/namespace", + "created_at": "2014-08-28T17:13:06Z", + "updated_at": "2014-08-28T17:13:06Z", + "resource_type_associations": [ + { + "name": "OS::Nova::Aggregate", + "created_at": "2014-08-28T17:13:06Z", + "updated_at": "2014-08-28T17:13:06Z" + }, + { + "name": "OS::Nova::Flavor", + "prefix": "aggregate_instance_extra_specs:", + "created_at": "2014-08-28T17:13:06Z", + "updated_at": "2014-08-28T17:13:06Z" + } + ] + } + ], + "first": "/v2/metadefs/namespaces?sort_key=created_at&sort_dir=asc", + "schema": "/v2/schemas/metadefs/namespaces" + } + + +.. note:: + Listing namespaces will only show the summary of each namespace including + counts and resource type associations. Detailed response including all its + objects definitions, property definitions etc. will only be available on + each individual GET namespace request. + +Filtering Namespaces Lists +************************** + +``GET /v2/metadefs/namespaces`` requests take query parameters that serve to +filter the returned list of namespaces. The following +list details these query parameters. + +* ``resource_types=RESOURCE_TYPES`` + + Filters namespaces having a ``resource_types`` within the list of + comma separated ``RESOURCE_TYPES``. + +GET resource also accepts additional query parameters: + +* ``sort_key=KEY`` + + Results will be ordered by the specified sort attribute ``KEY``. Accepted + values include ``namespace``, ``created_at`` (default) and ``updated_at``. + +* ``sort_dir=DIR`` + + Results will be sorted in the direction ``DIR``. Accepted values are ``asc`` + for ascending or ``desc`` (default) for descending. + +* ``marker=NAMESPACE`` + + A namespace identifier marker may be specified. When present only + namespaces which occur after the identifier ``NAMESPACE`` will be listed, + i.e. the namespaces which have a `sort_key` later than that of the marker + ``NAMESPACE`` in the `sort_dir` direction. + +* ``limit=LIMIT`` + + When present the maximum number of results returned will not exceed ``LIMIT``. + +.. note:: + + If the specified ``LIMIT`` exceeds the operator defined limit (api_limit_max) + then the number of results returned may be less than ``LIMIT``. + +* ``visibility=PUBLIC`` + + An admin user may use the `visibility` parameter to control which results are + returned (PRIVATE or PUBLIC). + + +Retrieve Namespace +****************** + +We want to see a more detailed information about a namespace that the +authenticated user has access to. The detail includes the properties, objects, +and resource type associations. + +We issue a ``GET`` request to ``http://glance.example.com/v2/metadefs/namespaces/{namespace}`` +to retrieve the namespace details. +The data is returned as a JSON-encoded mapping in the following format:: + + { + "namespace": "MyNamespace", + "display_name": "My User Friendly Namespace", + "description": "My description", + "visibility": "public", + "protected": true, + "owner": "The Test Owner", + "schema": "/v2/schemas/metadefs/namespace", + "resource_type_associations": [ + { + "name": "OS::Glance::Image", + "prefix": "hw_", + "created_at": "2014-08-28T17:13:06Z", + "updated_at": "2014-08-28T17:13:06Z" + }, + { + "name": "OS::Cinder::Volume", + "prefix": "hw_", + "properties_target": "image", + "created_at": "2014-08-28T17:13:06Z", + "updated_at": "2014-08-28T17:13:06Z" + }, + { + "name": "OS::Nova::Flavor", + "prefix": "filter1:", + "created_at": "2014-08-28T17:13:06Z", + "updated_at": "2014-08-28T17:13:06Z" + } + ], + "properties": { + "nsprop1": { + "title": "My namespace property1", + "description": "More info here", + "type": "boolean", + "default": true + }, + "nsprop2": { + "title": "My namespace property2", + "description": "More info here", + "type": "string", + "default": "value1" + } + }, + "objects": [ + { + "name": "object1", + "description": "my-description", + "self": "/v2/metadefs/namespaces/MyNamespace/objects/object1", + "schema": "/v2/schemas/metadefs/object", + "created_at": "2014-08-28T17:13:06Z", + "updated_at": "2014-08-28T17:13:06Z", + "required": [], + "properties": { + "prop1": { + "title": "My object1 property1", + "description": "More info here", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + { + "name": "object2", + "description": "my-description", + "self": "/v2/metadefs/namespaces/MyNamespace/objects/object2", + "schema": "/v2/schemas/metadefs/object", + "created_at": "2014-08-28T17:13:06Z", + "updated_at": "2014-08-28T17:13:06Z", + "properties": { + "prop1": { + "title": "My object2 property1", + "description": "More info here", + "type": "integer", + "default": 20 + } + } + } + ] + } + +Retrieve available Resource Types +********************************* + +We want to see the list of all resource types that are available in Glance + +We issue a ``GET`` request to ``http://glance.example.com/v2/metadefs/resource_types`` +to retrieve all resource types. + +The data is returned as a JSON-encoded mapping in the following format:: + + { + "resource_types": [ + { + "created_at": "2014-08-28T17:13:04Z", + "name": "OS::Glance::Image", + "updated_at": "2014-08-28T17:13:04Z" + }, + { + "created_at": "2014-08-28T17:13:04Z", + "name": "OS::Cinder::Volume", + "updated_at": "2014-08-28T17:13:04Z" + }, + { + "created_at": "2014-08-28T17:13:04Z", + "name": "OS::Nova::Flavor", + "updated_at": "2014-08-28T17:13:04Z" + }, + { + "created_at": "2014-08-28T17:13:04Z", + "name": "OS::Nova::Aggregate", + "updated_at": "2014-08-28T17:13:04Z" + }, + { + "created_at": "2014-08-28T17:13:04Z", + "name": "OS::Nova::Instance", + "updated_at": "2014-08-28T17:13:04Z" + } + ] + } + + +Retrieve Resource Types associated with a Namespace +*************************************************** + +We want to see the list of resource types that are associated for a specific +namespace + +We issue a ``GET`` request to ``http://glance.example.com/v2/metadefs/namespaces/{namespace}/resource_types`` +to retrieve resource types. + +The data is returned as a JSON-encoded mapping in the following format:: + + { + "resource_type_associations" : [ + { + "name" : "OS::Glance::Image", + "prefix" : "hw_", + "created_at": "2014-08-28T17:13:04Z", + "updated_at": "2014-08-28T17:13:04Z" + }, + { + "name" :"OS::Cinder::Volume", + "prefix" : "hw_", + "properties_target" : "image", + "created_at": "2014-08-28T17:13:04Z", + "updated_at": "2014-08-28T17:13:04Z" + }, + { + "name" : "OS::Nova::Flavor", + "prefix" : "hw:", + "created_at": "2014-08-28T17:13:04Z", + "updated_at": "2014-08-28T17:13:04Z" + } + ] + } + +Add Namespace +************* + +We want to create a new namespace that can contain the properties, objects, +etc. + +We issue a ``POST`` request to add an namespace to Glance:: + + POST http://glance.example.com/v2/metadefs/namespaces/ + +The input data is an JSON-encoded mapping in the following format:: + + { + "namespace": "MyNamespace", + "display_name": "My User Friendly Namespace", + "description": "My description", + "visibility": "public", + "protected": true + } + +.. note:: + Optionally properties, objects and resource type associations could be + added in the same input. See GET Namespace output above(input will be + similar). + +Update Namespace +**************** + +We want to update an existing namespace + +We issue a ``PUT`` request to update an namespace to Glance:: + + PUT http://glance.example.com/v2/metadefs/namespaces/{namespace} + +The input data is similar to Add Namespace + + +Delete Namespace +**************** + +We want to delete an existing namespace including all its objects, +properties etc. + +We issue a ``DELETE`` request to delete an namespace to Glance:: + + DELETE http://glance.example.com/v2/metadefs/namespaces/{namespace} + + +Associate Resource Type with Namespace +************************************** + +We want to associate a resource type with an existing namespace + +We issue a ``POST`` request to associate resource type to Glance:: + + POST http://glance.example.com/v2/metadefs/namespaces/{namespace}/resource_types + +The input data is an JSON-encoded mapping in the following format:: + + { + "name" :"OS::Cinder::Volume", + "prefix" : "hw_", + "properties_target" : "image", + "created_at": "2014-08-28T17:13:04Z", + "updated_at": "2014-08-28T17:13:04Z" + } + + +Remove Resource Type associated with a Namespace +************************************************ + +We want to de-associate namespace from a resource type + +We issue a ``DELETE`` request to de-associate namespace resource type to +Glance:: + + DELETE http://glance.example.com/v2//metadefs/namespaces/{namespace}/resource_types/{resource_type} + + +List Objects in Namespace +************************* + +We want to see the list of meta definition objects in a specific namespace + +We issue a ``GET`` request to ``http://glance.example.com/v2/metadefs/namespaces/{namespace}/objects`` +to retrieve objects. + +The data is returned as a JSON-encoded mapping in the following format:: + + { + "objects": [ + { + "name": "object1", + "description": "my-description", + "self": "/v2/metadefs/namespaces/MyNamespace/objects/object1", + "schema": "/v2/schemas/metadefs/object", + "created_at": "2014-08-28T17:13:06Z", + "updated_at": "2014-08-28T17:13:06Z", + "required": [], + "properties": { + "prop1": { + "title": "My object1 property1", + "description": "More info here", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + { + "name": "object2", + "description": "my-description", + "self": "/v2/metadefs/namespaces/MyNamespace/objects/object2", + "schema": "/v2/schemas/metadefs/object", + "created_at": "2014-08-28T17:13:06Z", + "updated_at": "2014-08-28T17:13:06Z", + "properties": { + "prop1": { + "title": "My object2 property1", + "description": "More info here", + "type": "integer", + "default": 20 + } + } + } + ], + "schema": "/v2/schemas/metadefs/objects" + } + +Add object in a specific namespace +********************************** + +We want to create a new object which can group the properties + +We issue a ``POST`` request to add object to a namespace in Glance:: + + POST http://glance.example.com/v2/metadefs/namespaces/{namespace}/objects + + +The input data is an JSON-encoded mapping in the following format:: + + { + "name": "StorageQOS", + "description": "Our available storage QOS.", + "required": [ + "minIOPS" + ], + "properties": { + "minIOPS": { + "type": "integer", + "description": "The minimum IOPs required", + "default": 100, + "minimum": 100, + "maximum": 30000369 + }, + "burstIOPS": { + "type": "integer", + "description": "The expected burst IOPs", + "default": 1000, + "minimum": 100, + "maximum": 30000377 + } + } + } + +Update Object in a specific namespace +************************************* + +We want to update an existing object + +We issue a ``PUT`` request to update an object to Glance:: + + PUT http://glance.example.com/v2/metadefs/namespaces/{namespace}/objects/{object_name} + +The input data is similar to Add Object + + +Delete Object in a specific namespace +************************************* + +We want to delete an existing object. + +We issue a ``DELETE`` request to delete object in a namespace to Glance:: + + DELETE http://glance.example.com/v2/metadefs/namespaces/{namespace}/objects/{object_name} + + +Add property definition in a specific namespace +*********************************************** + +We want to create a new property definition in a namespace + +We issue a ``POST`` request to add property definition to a namespace in +Glance:: + + POST http://glance.example.com/v2/metadefs/namespaces/{namespace}/properties + + +The input data is an JSON-encoded mapping in the following format:: + + { + "name": "hypervisor_type", + "title" : "Hypervisor", + "type": "array", + "description": "The type of hypervisor required", + "items": { + "type": "string", + "enum": [ + "hyperv", + "qemu", + "kvm" + ] + } + } + + +Update property definition in a specific namespace +************************************************** + +We want to update an existing object + +We issue a ``PUT`` request to update an property definition in a namespace to +Glance:: + + PUT http://glance.example.com/v2/metadefs/namespaces/{namespace}/properties/{property_name} + +The input data is similar to Add property definition + + +Delete property definition in a specific namespace +************************************************** + +We want to delete an existing object. + +We issue a ``DELETE`` request to delete property definition in a namespace to +Glance:: + + DELETE http://glance.example.com/v2/metadefs/namespaces/{namespace}/properties/{property_name} + + +API Message Localization +------------------------ +Glance supports HTTP message localization. For example, an HTTP client can +receive API messages in Chinese even if the locale language of the server is +English. + +How to use it +************* +To receive localized API messages, the HTTP client needs to specify the +**Accept-Language** header to indicate the language to use to translate the +message. For more info about Accept-Language, please refer http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +A typical curl API request will be like below:: + + curl -i -X GET -H 'Accept-Language: zh' -H 'Content-Type: application/json' + http://127.0.0.1:9292/v2/metadefs/namespaces/{namespace} + +Then the response will be like the following:: + + HTTP/1.1 404 Not Found + Content-Length: 234 + Content-Type: text/html; charset=UTF-8 + X-Openstack-Request-Id: req-54d403a0-064e-4544-8faf-4aeef086f45a + Date: Sat, 22 Feb 2014 06:26:26 GMT + + + + 404 Not Found + + +

404 Not Found

+ 找不到任何具有标识 aaa 的映像

+ + + +.. note:: + Be sure there is the language package under /usr/share/locale-langpack/ on + the target Glance server. diff --git a/code/daisy/doc/source/db.rst b/code/daisy/doc/source/db.rst new file mode 100755 index 00000000..542bdd1b --- /dev/null +++ b/code/daisy/doc/source/db.rst @@ -0,0 +1,60 @@ +.. + Copyright 2012 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Database Management +=================== + +The default metadata driver for glance uses sqlalchemy, which implies there +exists a backend database which must be managed. The ``glance-manage`` binary +provides a set of commands for making this easier. + +The commands should be executed as a subcommand of 'db': + + glance-manage db + + +Sync the Database +----------------- + + glance-manage db sync + +Place a database under migration control and upgrade, creating it first if necessary. + + +Determining the Database Version +-------------------------------- + + glance-manage db version + +This will print the current migration level of a glance database. + + +Upgrading an Existing Database +------------------------------ + + glance-manage db upgrade + +This will take an existing database and upgrade it to the specified VERSION. + + +Downgrading an Existing Database +-------------------------------- + + glance-manage db downgrade + +This will downgrade an existing database from the current version to the +specified VERSION. + diff --git a/code/daisy/doc/source/formats.rst b/code/daisy/doc/source/formats.rst new file mode 100755 index 00000000..fd464e4e --- /dev/null +++ b/code/daisy/doc/source/formats.rst @@ -0,0 +1,108 @@ +.. + Copyright 2011 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Disk and Container Formats +========================== + +When adding an image to Glance, you must specify what the virtual +machine image's *disk format* and *container format* are. Disk and container +formats are configurable on a per-deployment basis. This document intends to +establish a global convention for what specific values of *disk_format* and +*container_format* mean. + +Disk Format +----------- + +The disk format of a virtual machine image is the format of the underlying +disk image. Virtual appliance vendors have different formats for laying out +the information contained in a virtual machine disk image. + +You can set your image's disk format to one of the following: + +* **raw** + + This is an unstructured disk image format + +* **vhd** + + This is the VHD disk format, a common disk format used by virtual machine + monitors from VMWare, Xen, Microsoft, VirtualBox, and others + +* **vmdk** + + Another common disk format supported by many common virtual machine monitors + +* **vdi** + + A disk format supported by VirtualBox virtual machine monitor and the QEMU + emulator + +* **iso** + + An archive format for the data contents of an optical disc (e.g. CDROM). + +* **qcow2** + + A disk format supported by the QEMU emulator that can expand dynamically and + supports Copy on Write + +* **aki** + + This indicates what is stored in Glance is an Amazon kernel image + +* **ari** + + This indicates what is stored in Glance is an Amazon ramdisk image + +* **ami** + + This indicates what is stored in Glance is an Amazon machine image + +Container Format +---------------- + +The container format refers to whether the virtual machine image is in a +file format that also contains metadata about the actual virtual machine. + +Note that the container format string is not currently used by Glance or +other OpenStack components, so it is safe to simply specify **bare** as +the container format if you are unsure. + +You can set your image's container format to one of the following: + +* **bare** + + This indicates there is no container or metadata envelope for the image + +* **ovf** + + This is the OVF container format + +* **aki** + + This indicates what is stored in Glance is an Amazon kernel image + +* **ari** + + This indicates what is stored in Glance is an Amazon ramdisk image + +* **ami** + + This indicates what is stored in Glance is an Amazon machine image + +* **ova** + + This indicates what is stored in Glance is an OVA tar archive file diff --git a/code/daisy/doc/source/identifiers.rst b/code/daisy/doc/source/identifiers.rst new file mode 100755 index 00000000..a6f5f741 --- /dev/null +++ b/code/daisy/doc/source/identifiers.rst @@ -0,0 +1,27 @@ +.. + Copyright 2010 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Image Identifiers +================= + +Images are uniquely identified by way of a URI that +matches the following signature:: + + /v1/images/ + +where `` is the resource location of the Glance service +that knows about an image, and `` is the image's identifier. Image +identifiers in Glance are *uuids*, making them *globally unique*. diff --git a/code/daisy/doc/source/images/architecture.png b/code/daisy/doc/source/images/architecture.png new file mode 100755 index 00000000..2eacef98 Binary files /dev/null and b/code/daisy/doc/source/images/architecture.png differ diff --git a/code/daisy/doc/source/images/image_status_transition.png b/code/daisy/doc/source/images/image_status_transition.png new file mode 100755 index 00000000..ae46dc03 Binary files /dev/null and b/code/daisy/doc/source/images/image_status_transition.png differ diff --git a/code/daisy/doc/source/images_src/architecture.graphml b/code/daisy/doc/source/images_src/architecture.graphml new file mode 100755 index 00000000..ae84dbdc --- /dev/null +++ b/code/daisy/doc/source/images_src/architecture.graphml @@ -0,0 +1,931 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Keystone + + + + + + + + + + Folder 2 + + + + + + + + + + + + + + + + API + + + + + + + + + + + + + + + + + + + + + + Glance + + + + + + + + + + Folder 3 + + + + + + + + + + + + + + + + REST API + + + + + + + + + + + + + + + + + Glance DB + + + + + + + + + + + + + + + + + + + + Database +Abstraction +Layer + + + + + + + + + + + + + + + + + Glance +Domain +Controller + Auth +Notifier +Policy +Quota +Location +DB + + + + + + + + + + + + + + + + + + + AuthZ + + + + + + + + + + + + + + + + + Registry +Layer + + + + + + + + + + + + + + + + + + + + + + Glance Store + + + + + + + + + + Folder 4 + + + + + + + + + + + + + + + + Glance Store Drivers + + + + + + + + + + + + + + + + + + AuthN/AuthZ + + + + + + + + + + + + + + + + + + + + + + Supported Storages + + + + + + + + + + Folder 5 + + + + + + + + + + + + + + + + Swift + + + + + + + + + + + + + + + + + + + + S3 + + + + + + + + + + + + + + + + + + + + Ceph + + + + + + + + + + + + + + + + + + + + Sheepdog + + + + + + + + + + + + + + + + + + + + ... + + + + + + + + + + + + + + + + + + + + Filesystem + + + + + + + + + + + + + + + + + + + + + + + + + A client + + + + + + + + + + Folder 7 + + + + + + + + + + + + + + + + AuthN + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + <?xml version="1.0" encoding="utf-8"?> +<svg version="1.1" + xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" + x="0px" y="0px" width="40px" height="48px" viewBox="0 0 40 48" enable-background="new 0 0 40 48" xml:space="preserve"> +<defs> +</defs> +<linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="655.0938" x2="409.4502" y2="655.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> + <stop offset="0" style="stop-color:#4D4D4D"/> + <stop offset="0.0558" style="stop-color:#5F5F5F"/> + <stop offset="0.2103" style="stop-color:#8D8D8D"/> + <stop offset="0.3479" style="stop-color:#AEAEAE"/> + <stop offset="0.4623" style="stop-color:#C2C2C2"/> + <stop offset="0.5394" style="stop-color:#C9C9C9"/> + <stop offset="0.6247" style="stop-color:#C5C5C5"/> + <stop offset="0.7072" style="stop-color:#BABABA"/> + <stop offset="0.7885" style="stop-color:#A6A6A6"/> + <stop offset="0.869" style="stop-color:#8B8B8B"/> + <stop offset="0.9484" style="stop-color:#686868"/> + <stop offset="1" style="stop-color:#4D4D4D"/> +</linearGradient> +<path fill="url(#SVGID_1_)" d="M19.625,37.613C8.787,37.613,0,35.738,0,33.425v10c0,2.313,8.787,4.188,19.625,4.188 + c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,35.738,30.464,37.613,19.625,37.613z"/> +<linearGradient id="SVGID_2_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="649.0938" x2="409.4502" y2="649.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> + <stop offset="0" style="stop-color:#B3B3B3"/> + <stop offset="0.0171" style="stop-color:#B6B6B6"/> + <stop offset="0.235" style="stop-color:#D7D7D7"/> + <stop offset="0.4168" style="stop-color:#EBEBEB"/> + <stop offset="0.5394" style="stop-color:#F2F2F2"/> + <stop offset="0.6579" style="stop-color:#EEEEEE"/> + <stop offset="0.7724" style="stop-color:#E3E3E3"/> + <stop offset="0.8853" style="stop-color:#CFCFCF"/> + <stop offset="0.9965" style="stop-color:#B4B4B4"/> + <stop offset="1" style="stop-color:#B3B3B3"/> +</linearGradient> +<path fill="url(#SVGID_2_)" d="M19.625,37.613c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.927-18.396,3.927 + c-9.481,0-17.396-1.959-18.396-3.927l-1.229,2C0,35.738,8.787,37.613,19.625,37.613z"/> +<linearGradient id="SVGID_3_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="646" x2="408.2217" y2="646" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> + <stop offset="0" style="stop-color:#C9C9C9"/> + <stop offset="1" style="stop-color:#808080"/> +</linearGradient> +<ellipse fill="url(#SVGID_3_)" cx="19.625" cy="31.425" rx="18.396" ry="3.926"/> +<linearGradient id="SVGID_4_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="641.0938" x2="409.4502" y2="641.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> + <stop offset="0" style="stop-color:#4D4D4D"/> + <stop offset="0.0558" style="stop-color:#5F5F5F"/> + <stop offset="0.2103" style="stop-color:#8D8D8D"/> + <stop offset="0.3479" style="stop-color:#AEAEAE"/> + <stop offset="0.4623" style="stop-color:#C2C2C2"/> + <stop offset="0.5394" style="stop-color:#C9C9C9"/> + <stop offset="0.6247" style="stop-color:#C5C5C5"/> + <stop offset="0.7072" style="stop-color:#BABABA"/> + <stop offset="0.7885" style="stop-color:#A6A6A6"/> + <stop offset="0.869" style="stop-color:#8B8B8B"/> + <stop offset="0.9484" style="stop-color:#686868"/> + <stop offset="1" style="stop-color:#4D4D4D"/> +</linearGradient> +<path fill="url(#SVGID_4_)" d="M19.625,23.613C8.787,23.613,0,21.738,0,19.425v10c0,2.313,8.787,4.188,19.625,4.188 + c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,21.738,30.464,23.613,19.625,23.613z"/> +<linearGradient id="SVGID_5_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="635.0938" x2="409.4502" y2="635.0938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> + <stop offset="0" style="stop-color:#B3B3B3"/> + <stop offset="0.0171" style="stop-color:#B6B6B6"/> + <stop offset="0.235" style="stop-color:#D7D7D7"/> + <stop offset="0.4168" style="stop-color:#EBEBEB"/> + <stop offset="0.5394" style="stop-color:#F2F2F2"/> + <stop offset="0.6579" style="stop-color:#EEEEEE"/> + <stop offset="0.7724" style="stop-color:#E3E3E3"/> + <stop offset="0.8853" style="stop-color:#CFCFCF"/> + <stop offset="0.9965" style="stop-color:#B4B4B4"/> + <stop offset="1" style="stop-color:#B3B3B3"/> +</linearGradient> +<path fill="url(#SVGID_5_)" d="M19.625,23.613c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 + c-9.481,0-17.396-1.959-18.396-3.926l-1.229,2C0,21.738,8.787,23.613,19.625,23.613z"/> +<linearGradient id="SVGID_6_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="632" x2="408.2217" y2="632" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> + <stop offset="0" style="stop-color:#C9C9C9"/> + <stop offset="1" style="stop-color:#808080"/> +</linearGradient> +<ellipse fill="url(#SVGID_6_)" cx="19.625" cy="17.426" rx="18.396" ry="3.926"/> +<linearGradient id="SVGID_7_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="627.5938" x2="409.4502" y2="627.5938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> + <stop offset="0" style="stop-color:#4D4D4D"/> + <stop offset="0.0558" style="stop-color:#5F5F5F"/> + <stop offset="0.2103" style="stop-color:#8D8D8D"/> + <stop offset="0.3479" style="stop-color:#AEAEAE"/> + <stop offset="0.4623" style="stop-color:#C2C2C2"/> + <stop offset="0.5394" style="stop-color:#C9C9C9"/> + <stop offset="0.6247" style="stop-color:#C5C5C5"/> + <stop offset="0.7072" style="stop-color:#BABABA"/> + <stop offset="0.7885" style="stop-color:#A6A6A6"/> + <stop offset="0.869" style="stop-color:#8B8B8B"/> + <stop offset="0.9484" style="stop-color:#686868"/> + <stop offset="1" style="stop-color:#4D4D4D"/> +</linearGradient> +<path fill="url(#SVGID_7_)" d="M19.625,10.113C8.787,10.113,0,8.238,0,5.925v10c0,2.313,8.787,4.188,19.625,4.188 + c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,8.238,30.464,10.113,19.625,10.113z"/> +<linearGradient id="SVGID_8_" gradientUnits="userSpaceOnUse" x1="370.2002" y1="621.5938" x2="409.4502" y2="621.5938" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> + <stop offset="0" style="stop-color:#B3B3B3"/> + <stop offset="0.0171" style="stop-color:#B6B6B6"/> + <stop offset="0.235" style="stop-color:#D7D7D7"/> + <stop offset="0.4168" style="stop-color:#EBEBEB"/> + <stop offset="0.5394" style="stop-color:#F2F2F2"/> + <stop offset="0.6579" style="stop-color:#EEEEEE"/> + <stop offset="0.7724" style="stop-color:#E3E3E3"/> + <stop offset="0.8853" style="stop-color:#CFCFCF"/> + <stop offset="0.9965" style="stop-color:#B4B4B4"/> + <stop offset="1" style="stop-color:#B3B3B3"/> +</linearGradient> +<path fill="url(#SVGID_8_)" d="M19.625,10.113c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 + c-9.481,0-17.396-1.959-18.396-3.926L0,5.925C0,8.238,8.787,10.113,19.625,10.113z"/> +<linearGradient id="SVGID_9_" gradientUnits="userSpaceOnUse" x1="371.4297" y1="618.5" x2="408.2217" y2="618.5" gradientTransform="matrix(1 0 0 1 -370.2002 -614.5742)"> + <stop offset="0" style="stop-color:#C9C9C9"/> + <stop offset="1" style="stop-color:#808080"/> +</linearGradient> +<ellipse fill="url(#SVGID_9_)" cx="19.625" cy="3.926" rx="18.396" ry="3.926"/> +<path opacity="0.24" fill="#FFFFFF" enable-background="new " d="M31.291,46.792c0,0-4.313,0.578-7.249,0.694 + C20.917,47.613,15,47.613,15,47.613l-2.443-10.279l-0.119-2.283l-1.231-1.842L9.789,23.024l-0.082-0.119L9.3,20.715l-1.45-1.44 + L5.329,8.793c0,0,5.296,0.882,7.234,1.07s8.375,0.25,8.375,0.25l3,9.875l-0.25,1.313l1.063,2.168l2.312,9.644l-0.375,1.875 + l1.627,2.193L31.291,46.792z"/> +</svg> + + <?xml version="1.0" encoding="utf-8"?> +<svg version="1.1" + xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" + x="0px" y="0px" width="41px" height="48px" viewBox="-0.875 -0.887 41 48" enable-background="new -0.875 -0.887 41 48" + xml:space="preserve"> +<defs> +</defs> +<linearGradient id="SVGID_1_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-979.1445" x2="682.0508" y2="-979.1445" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> + <stop offset="0" style="stop-color:#3C89C9"/> + <stop offset="0.1482" style="stop-color:#60A6DD"/> + <stop offset="0.3113" style="stop-color:#81C1F0"/> + <stop offset="0.4476" style="stop-color:#95D1FB"/> + <stop offset="0.5394" style="stop-color:#9CD7FF"/> + <stop offset="0.636" style="stop-color:#98D4FD"/> + <stop offset="0.7293" style="stop-color:#8DCAF6"/> + <stop offset="0.8214" style="stop-color:#79BBEB"/> + <stop offset="0.912" style="stop-color:#5EA5DC"/> + <stop offset="1" style="stop-color:#3C89C9"/> +</linearGradient> +<path fill="url(#SVGID_1_)" d="M19.625,36.763C8.787,36.763,0,34.888,0,32.575v10c0,2.313,8.787,4.188,19.625,4.188 + c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,34.888,30.464,36.763,19.625,36.763z"/> +<linearGradient id="SVGID_2_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-973.1445" x2="682.0508" y2="-973.1445" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> + <stop offset="0" style="stop-color:#9CD7FF"/> + <stop offset="0.0039" style="stop-color:#9DD7FF"/> + <stop offset="0.2273" style="stop-color:#BDE5FF"/> + <stop offset="0.4138" style="stop-color:#D1EEFF"/> + <stop offset="0.5394" style="stop-color:#D9F1FF"/> + <stop offset="0.6155" style="stop-color:#D5EFFE"/> + <stop offset="0.6891" style="stop-color:#C9E7FA"/> + <stop offset="0.7617" style="stop-color:#B6DAF3"/> + <stop offset="0.8337" style="stop-color:#9AC8EA"/> + <stop offset="0.9052" style="stop-color:#77B0DD"/> + <stop offset="0.9754" style="stop-color:#4D94CF"/> + <stop offset="1" style="stop-color:#3C89C9"/> +</linearGradient> +<path fill="url(#SVGID_2_)" d="M19.625,36.763c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.927-18.396,3.927 + c-9.481,0-17.396-1.959-18.396-3.927l-1.229,2C0,34.888,8.787,36.763,19.625,36.763z"/> +<path fill="#3C89C9" d="M19.625,26.468c10.16,0,19.625,2.775,19.625,2.775c-0.375,2.721-5.367,5.438-19.554,5.438 + c-12.125,0-18.467-2.484-19.541-4.918C-0.127,29.125,9.465,26.468,19.625,26.468z"/> +<linearGradient id="SVGID_3_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-965.6948" x2="682.0508" y2="-965.6948" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> + <stop offset="0" style="stop-color:#3C89C9"/> + <stop offset="0.1482" style="stop-color:#60A6DD"/> + <stop offset="0.3113" style="stop-color:#81C1F0"/> + <stop offset="0.4476" style="stop-color:#95D1FB"/> + <stop offset="0.5394" style="stop-color:#9CD7FF"/> + <stop offset="0.636" style="stop-color:#98D4FD"/> + <stop offset="0.7293" style="stop-color:#8DCAF6"/> + <stop offset="0.8214" style="stop-color:#79BBEB"/> + <stop offset="0.912" style="stop-color:#5EA5DC"/> + <stop offset="1" style="stop-color:#3C89C9"/> +</linearGradient> +<path fill="url(#SVGID_3_)" d="M19.625,23.313C8.787,23.313,0,21.438,0,19.125v10c0,2.313,8.787,4.188,19.625,4.188 + c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,21.438,30.464,23.313,19.625,23.313z"/> +<linearGradient id="SVGID_4_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-959.6948" x2="682.0508" y2="-959.6948" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> + <stop offset="0" style="stop-color:#9CD7FF"/> + <stop offset="0.0039" style="stop-color:#9DD7FF"/> + <stop offset="0.2273" style="stop-color:#BDE5FF"/> + <stop offset="0.4138" style="stop-color:#D1EEFF"/> + <stop offset="0.5394" style="stop-color:#D9F1FF"/> + <stop offset="0.6155" style="stop-color:#D5EFFE"/> + <stop offset="0.6891" style="stop-color:#C9E7FA"/> + <stop offset="0.7617" style="stop-color:#B6DAF3"/> + <stop offset="0.8337" style="stop-color:#9AC8EA"/> + <stop offset="0.9052" style="stop-color:#77B0DD"/> + <stop offset="0.9754" style="stop-color:#4D94CF"/> + <stop offset="1" style="stop-color:#3C89C9"/> +</linearGradient> +<path fill="url(#SVGID_4_)" d="M19.625,23.313c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 + c-9.481,0-17.396-1.959-18.396-3.926l-1.229,2C0,21.438,8.787,23.313,19.625,23.313z"/> +<path fill="#3C89C9" d="M19.476,13.019c10.161,0,19.625,2.775,19.625,2.775c-0.375,2.721-5.367,5.438-19.555,5.438 + c-12.125,0-18.467-2.485-19.541-4.918C-0.277,15.674,9.316,13.019,19.476,13.019z"/> +<linearGradient id="SVGID_5_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-952.4946" x2="682.0508" y2="-952.4946" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> + <stop offset="0" style="stop-color:#3C89C9"/> + <stop offset="0.1482" style="stop-color:#60A6DD"/> + <stop offset="0.3113" style="stop-color:#81C1F0"/> + <stop offset="0.4476" style="stop-color:#95D1FB"/> + <stop offset="0.5394" style="stop-color:#9CD7FF"/> + <stop offset="0.636" style="stop-color:#98D4FD"/> + <stop offset="0.7293" style="stop-color:#8DCAF6"/> + <stop offset="0.8214" style="stop-color:#79BBEB"/> + <stop offset="0.912" style="stop-color:#5EA5DC"/> + <stop offset="1" style="stop-color:#3C89C9"/> +</linearGradient> +<path fill="url(#SVGID_5_)" d="M19.625,10.113C8.787,10.113,0,8.238,0,5.925v10c0,2.313,8.787,4.188,19.625,4.188 + c10.839,0,19.625-1.875,19.625-4.188v-10C39.25,8.238,30.464,10.113,19.625,10.113z"/> +<linearGradient id="SVGID_6_" gradientUnits="userSpaceOnUse" x1="642.8008" y1="-946.4946" x2="682.0508" y2="-946.4946" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> + <stop offset="0" style="stop-color:#9CD7FF"/> + <stop offset="0.0039" style="stop-color:#9DD7FF"/> + <stop offset="0.2273" style="stop-color:#BDE5FF"/> + <stop offset="0.4138" style="stop-color:#D1EEFF"/> + <stop offset="0.5394" style="stop-color:#D9F1FF"/> + <stop offset="0.6155" style="stop-color:#D5EFFE"/> + <stop offset="0.6891" style="stop-color:#C9E7FA"/> + <stop offset="0.7617" style="stop-color:#B6DAF3"/> + <stop offset="0.8337" style="stop-color:#9AC8EA"/> + <stop offset="0.9052" style="stop-color:#77B0DD"/> + <stop offset="0.9754" style="stop-color:#4D94CF"/> + <stop offset="1" style="stop-color:#3C89C9"/> +</linearGradient> +<path fill="url(#SVGID_6_)" d="M19.625,10.113c10.839,0,19.625-1.875,19.625-4.188l-1.229-2c0,2.168-8.235,3.926-18.396,3.926 + c-9.481,0-17.396-1.959-18.396-3.926L0,5.925C0,8.238,8.787,10.113,19.625,10.113z"/> +<linearGradient id="SVGID_7_" gradientUnits="userSpaceOnUse" x1="644.0293" y1="-943.4014" x2="680.8223" y2="-943.4014" gradientTransform="matrix(1 0 0 -1 -642.8008 -939.4756)"> + <stop offset="0" style="stop-color:#9CD7FF"/> + <stop offset="1" style="stop-color:#3C89C9"/> +</linearGradient> +<ellipse fill="url(#SVGID_7_)" cx="19.625" cy="3.926" rx="18.396" ry="3.926"/> +<path opacity="0.24" fill="#FFFFFF" enable-background="new " d="M31.04,45.982c0,0-4.354,0.664-7.29,0.781 + c-3.125,0.125-8.952,0-8.952,0l-2.384-10.292l0.044-2.108l-1.251-1.154L9.789,23.024l-0.082-0.119L9.5,20.529l-1.65-1.254 + L5.329,8.793c0,0,4.213,0.903,7.234,1.07s8.375,0.25,8.375,0.25l3,9.875l-0.25,1.313l1.063,2.168l2.312,9.645l-0.521,1.416 + l1.46,1.834L31.04,45.982z"/> +</svg> + + + + diff --git a/code/daisy/doc/source/images_src/image_status_transition.dot b/code/daisy/doc/source/images_src/image_status_transition.dot new file mode 100755 index 00000000..353fae5e --- /dev/null +++ b/code/daisy/doc/source/images_src/image_status_transition.dot @@ -0,0 +1,51 @@ +/* +# All Rights Reserved. +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +*/ + +/* +This file can be compiled by graphviz with issuing the following command: + + dot -Tpng -oimage_status_transition.png image_status_transition.dot + +See http://www.graphviz.org to get more info. +*/ + +digraph { + node [shape="doublecircle" color="#006699" style="filled" + fillcolor="#33CCFF" fixedsize="True" width="1.5" height="1.5"]; + + "" -> "queued" [label="create image"]; + + "queued" -> "active" [label="add location*"]; + "queued" -> "saving" [label="upload"]; + "queued" -> "deleted" [label="delete"]; + + "saving" -> "active" [label="upload succeed"]; + "saving" -> "killed" [label="upload fail"]; + "saving" -> "deleted" [label="delete"]; + + "active" -> "queued" [label="remove location*"]; + "active" -> "pending_delete" [label="delayed delete"]; + "active" -> "deleted" [label="delete"]; + "active" -> "deactivated" [label="deactivate"]; + + "deactivated" -> "active" [label="reactivate"]; + "deactivated" -> "deleted" [label="delete"]; + + "killed" -> "deleted" [label="delete"]; + + "pending_delete" -> "deleted" [label="after scrub time"]; +} diff --git a/code/daisy/doc/source/images_src/image_status_transition.png b/code/daisy/doc/source/images_src/image_status_transition.png new file mode 100755 index 00000000..a0e4e81a Binary files /dev/null and b/code/daisy/doc/source/images_src/image_status_transition.png differ diff --git a/code/daisy/doc/source/index.rst b/code/daisy/doc/source/index.rst new file mode 100755 index 00000000..c054b204 --- /dev/null +++ b/code/daisy/doc/source/index.rst @@ -0,0 +1,90 @@ +.. + Copyright 2010 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Welcome to Glance's documentation! +================================== + +The Glance project provides a service where users can upload and discover +data assets that are meant to be used with other services. This currently +includes images and metadata definitions. + +Glance image services include discovering, registering, and +retrieving virtual machine images. Glance has a RESTful API that allows +querying of VM image metadata as well as retrieval of the actual image. + +VM images made available through Glance can be stored in a variety of +locations from simple filesystems to object-storage systems like the +OpenStack Swift project. + +Glance, as with all OpenStack projects, is written with the following design +guidelines in mind: + +* **Component based architecture**: Quickly add new behaviors +* **Highly available**: Scale to very serious workloads +* **Fault tolerant**: Isolated processes avoid cascading failures +* **Recoverable**: Failures should be easy to diagnose, debug, and rectify +* **Open standards**: Be a reference implementation for a community-driven api + +This documentation is generated by the Sphinx toolkit and lives in the source +tree. Additional documentation on Glance and other components of OpenStack can +be found on the `OpenStack wiki`_. + +.. _`OpenStack wiki`: http://wiki.openstack.org + +Glance Background Concepts +========================== + +.. toctree:: + :maxdepth: 1 + + architecture + identifiers + statuses + formats + common-image-properties + metadefs-concepts + +Installing/Configuring Glance +============================= + +.. toctree:: + :maxdepth: 1 + + installing + configuring + authentication + policies + +Operating Glance +================ + +.. toctree:: + :maxdepth: 1 + + controllingservers + db + cache + notifications + +Using Glance +============ + +.. toctree:: + :maxdepth: 1 + + daisyapi + daisyclient + daisymetadefcatalogapi diff --git a/code/daisy/doc/source/installing.rst b/code/daisy/doc/source/installing.rst new file mode 100755 index 00000000..dd44999a --- /dev/null +++ b/code/daisy/doc/source/installing.rst @@ -0,0 +1,179 @@ +.. + Copyright 2011 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Installation +============ + +Installing from packages +~~~~~~~~~~~~~~~~~~~~~~~~ + +To install the latest released version of Glance, +follow the following instructions. + +Debian, Ubuntu +############## + +1. Add the Glance PPA to your sources.lst:: + + $> sudo add-apt-repository ppa:glance-core/trunk + $> sudo apt-get update + +2. Install Glance:: + + $> sudo apt-get install glance + +Red Hat, Fedora +############### + +Only RHEL 6, Fedora 18, and newer releases have the necessary +components packaged. +On RHEL 6, enable the EPEL repository. + +Install Glance:: + + $ su - + # yum install openstack-glance + +openSUSE, SLE +############# + +openSUSE 13.2, SLE 12, and the rolling release Factory needs an extra +repository enabled to install all the OpenStack packages. + +Search the proper repository in the `Cloud:OpenStack:Master `_ project. For example, for openSUSE 13.2: + +1. Add the OpenStack master repository:: + + $ sudo zypper ar -f -g http://download.opensuse.org/repositories/Cloud:/OpenStack:/Master/openSUSE_13.2/ OpenStack + $ sudo zypper ref + +2. Install Glance:: + + $ sudo zypper in openstack-glance + +Installing from source tarballs +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To install the latest version of Glance from the Launchpad Bazaar repositories, +following the following instructions. + +1. Grab the source tarball from `Launchpad `_ + +2. Untar the source tarball:: + + $> tar -xzf + +3. Change into the package directory and build/install:: + + $> cd glance- + $> sudo python setup.py install + +Installing from Git +~~~~~~~~~~~~~~~~~~~ + +To install the latest version of Glance from the GitHub Git repositories, +following the following instructions. + +Debian, Ubuntu +############## + +1. Install Git and build dependencies:: + + $> sudo apt-get install git + $> sudo apt-get build-dep glance + +.. note:: + + If you want to build the Glance documentation locally, you will also want + to install the python-sphinx package + +2. Clone Glance's trunk branch from GitHub:: + + $> git clone git://github.com/openstack/glance + $> cd glance + +3. Install Glance:: + + $> sudo python setup.py install + +Red Hat, Fedora +############### + +On Fedora, most developers and essentially all users install packages. +Instructions below are not commonly used, and even then typically in a +throw-away VM. + +Since normal build dependencies are resolved by mechanisms of RPM, +there is no one-line command to install everything needed by +the source repository in git. One common way to discover the dependencies +is to search for *BuildRequires:* in the specfile of openstack-glance +for the appropriate distro. + +In case of Fedora 16, for example, do this:: + + $ su - + # yum install git + # yum install python2-devel python-setuptools python-distutils-extra + # yum install python-webob python-eventlet + # yum install python-virtualenv + +Build Glance:: + + $ python setup.py build + +If any missing modules crop up, install them with yum, then retry the build. + +.. note:: + + If you want to build the Glance documentation, you will also want + to install the packages python-sphinx and graphviz, then run + "python setup.py build_sphinx". Due to required features of + python-sphinx 1.0 or better, documentation can only be built + on Fedora 15 or later. + +Test the build:: + + $ ./run_tests.sh -s + +Once Glance is built and tested, install it:: + + $ su - + # python setup.py install + +openSUSE, SLE +############# + +On openSUSE and SLE (also this is valid for Factory), we can install +all the build dependencies using Zypper. + +1. Install Git and build dependencies:: + + $ sudo zypper install git + $ sudo zypper source-install -d openstack-glance + +.. note:: + + If you want to build the Glance documentation locally, you will also want + to install the packages python-sphinx and graphviz. + +2. Clone Glance's trunk branch from GitHub:: + + $ git clone git://github.com/openstack/glance + $ cd glance + +3. Install Glance:: + + $ sudo python setup.py install diff --git a/code/daisy/doc/source/man/daisyapi.rst b/code/daisy/doc/source/man/daisyapi.rst new file mode 100755 index 00000000..ed30de79 --- /dev/null +++ b/code/daisy/doc/source/man/daisyapi.rst @@ -0,0 +1,39 @@ +========== +daisy-api +========== + +--------------------------------------- +Server for the daisy Image Service API +--------------------------------------- + +:Author: daisy@lists.launchpad.net +:Date: 2014-01-16 +:Copyright: OpenStack LLC +:Version: 2014.1 +:Manual section: 1 +:Manual group: cloud computing + +SYNOPSIS +======== + +daisy-api [options] + +DESCRIPTION +=========== + +daisy-api is a server daemon that serves the daisy API + +OPTIONS +======= + + **General options** + + .. include:: general_options.rst + +FILES +===== + + **/etc/daisy/daisy-api.conf** + Default configuration file for daisy API + + .. include:: footer.rst diff --git a/code/daisy/doc/source/man/daisycachecleaner.rst b/code/daisy/doc/source/man/daisycachecleaner.rst new file mode 100755 index 00000000..7a588d33 --- /dev/null +++ b/code/daisy/doc/source/man/daisycachecleaner.rst @@ -0,0 +1,47 @@ +==================== +daisy-cache-cleaner +==================== + +---------------------------------------------------------------- +daisy Image Cache Invalid Cache Entry and Stalled Image cleaner +---------------------------------------------------------------- + +:Author: daisy@lists.launchpad.net +:Date: 2014-01-16 +:Copyright: OpenStack LLC +:Version: 2014.1 +:Manual section: 1 +:Manual group: cloud computing + +SYNOPSIS +======== + +daisy-cache-cleaner [options] + +DESCRIPTION +=========== + +This is meant to be run as a periodic task from cron. + +If something goes wrong while we're caching an image (for example the fetch +times out, or an exception is raised), we create an 'invalid' entry. These +entires are left around for debugging purposes. However, after some period of +time, we want to clean these up. + +Also, if an incomplete image hangs around past the image_cache_stall_time +period, we automatically sweep it up. + +OPTIONS +======= + + **General options** + + .. include:: general_options.rst + +FILES +====== + + **/etc/daisy/daisy-cache.conf** + Default configuration file for the daisy Cache + + .. include:: footer.rst diff --git a/code/daisy/doc/source/man/daisycachemanage.rst b/code/daisy/doc/source/man/daisycachemanage.rst new file mode 100755 index 00000000..388c7cf1 --- /dev/null +++ b/code/daisy/doc/source/man/daisycachemanage.rst @@ -0,0 +1,88 @@ +=================== +daisy-cache-manage +=================== + +------------------------ +Cache management utility +------------------------ + +:Author: daisy@lists.launchpad.net +:Date: 2014-01-16 +:Copyright: OpenStack LLC +:Version: 2014.1 +:Manual section: 1 +:Manual group: cloud computing + +SYNOPSIS +======== + + daisy-cache-manage [options] [args] + +COMMANDS +======== + + **help ** + Output help for one of the commands below + + **list-cached** + List all images currently cached + + **list-queued** + List all images currently queued for caching + + **queue-image** + Queue an image for caching + + **delete-cached-image** + Purges an image from the cache + + **delete-all-cached-images** + Removes all images from the cache + + **delete-queued-image** + Deletes an image from the cache queue + + **delete-all-queued-images** + Deletes all images from the cache queue + +OPTIONS +======= + + **--version** + show program's version number and exit + + **-h, --help** + show this help message and exit + + **-v, --verbose** + Print more verbose output + + **-d, --debug** + Print more verbose output + + **-H ADDRESS, --host=ADDRESS** + Address of daisy API host. + Default: 0.0.0.0 + + **-p PORT, --port=PORT** + Port the daisy API host listens on. + Default: 9292 + + **-k, --insecure** + Explicitly allow daisy to perform "insecure" SSL + (https) requests. The server's certificate will not be + verified against any certificate authorities. This + option should be used with caution. + + **-A TOKEN, --auth_token=TOKEN** + Authentication token to use to identify the client to the daisy server + + **-f, --force** + Prevent select actions from requesting user confirmation + + **-S STRATEGY, --os-auth-strategy=STRATEGY** + Authentication strategy (keystone or noauth) + + .. include:: openstack_options.rst + + .. include:: footer.rst diff --git a/code/daisy/doc/source/man/daisycacheprefetcher.rst b/code/daisy/doc/source/man/daisycacheprefetcher.rst new file mode 100755 index 00000000..0a116af4 --- /dev/null +++ b/code/daisy/doc/source/man/daisycacheprefetcher.rst @@ -0,0 +1,40 @@ +======================= +daisy-cache-prefetcher +======================= + +------------------------------ +daisy Image Cache Pre-fetcher +------------------------------ + +:Author: daisy@lists.launchpad.net +:Date: 2014-01-16 +:Copyright: OpenStack LLC +:Version: 2014.1 +:Manual section: 1 +:Manual group: cloud computing + +SYNOPSIS +======== + + daisy-cache-prefetcher [options] + +DESCRIPTION +=========== + +This is meant to be run from the command line after queueing +images to be pretched. + +OPTIONS +======= + + **General options** + + .. include:: general_options.rst + +FILES +===== + + **/etc/daisy/daisy-cache.conf** + Default configuration file for the daisy Cache + + .. include:: footer.rst diff --git a/code/daisy/doc/source/man/daisycachepruner.rst b/code/daisy/doc/source/man/daisycachepruner.rst new file mode 100755 index 00000000..2ba30af8 --- /dev/null +++ b/code/daisy/doc/source/man/daisycachepruner.rst @@ -0,0 +1,41 @@ +=================== +daisy-cache-pruner +=================== + +------------------- +daisy cache pruner +------------------- + +:Author: daisy@lists.launchpad.net +:Date: 2014-01-16 +:Copyright: OpenStack LLC +:Version: 2014.1 +:Manual section: 1 +:Manual group: cloud computing + +SYNOPSIS +======== + + daisy-cache-pruner [options] + +DESCRIPTION +=========== + +Prunes images from the daisy cache when the space exceeds the value +set in the image_cache_max_size configuration option. This is meant +to be run as a periodic task, perhaps every half-hour. + +OPTIONS +======== + + **General options** + + .. include:: general_options.rst + +FILES +===== + + **/etc/daisy/daisy-cache.conf** + Default configuration file for the daisy Cache + + .. include:: footer.rst diff --git a/code/daisy/doc/source/man/daisycontrol.rst b/code/daisy/doc/source/man/daisycontrol.rst new file mode 100755 index 00000000..e1f58296 --- /dev/null +++ b/code/daisy/doc/source/man/daisycontrol.rst @@ -0,0 +1,58 @@ +============== +daisy-control +============== + +-------------------------------------- +daisy daemon start/stop/reload helper +-------------------------------------- + +:Author: daisy@lists.launchpad.net +:Date: 2014-01-16 +:Copyright: OpenStack LLC +:Version: 2014.1 +:Manual section: 1 +:Manual group: cloud computing + +SYNOPSIS +======== + + daisy-control [options] [CONFPATH] + +Where is one of: + + all, api, daisy-api, registry, daisy-registry, scrubber, daisy-scrubber + +And command is one of: + + start, status, stop, shutdown, restart, reload, force-reload + +And CONFPATH is the optional configuration file to use. + +OPTIONS +======== + + **General Options** + + .. include:: general_options.rst + + **--pid-file=PATH** + File to use as pid file. Default: + /var/run/daisy/$server.pid + + **--await-child DELAY** + Period to wait for service death in order to report + exit code (default is to not wait at all) + + **--capture-output** + Capture stdout/err in syslog instead of discarding + + **--nocapture-output** + The inverse of --capture-output + + **--norespawn** + The inverse of --respawn + + **--respawn** + Restart service on unexpected death + + .. include:: footer.rst diff --git a/code/daisy/doc/source/man/daisymanage.rst b/code/daisy/doc/source/man/daisymanage.rst new file mode 100755 index 00000000..0be6ae85 --- /dev/null +++ b/code/daisy/doc/source/man/daisymanage.rst @@ -0,0 +1,103 @@ +============= +daisy-manage +============= + +------------------------- +daisy Management Utility +------------------------- + +:Author: daisy@lists.launchpad.net +:Date: 2014-01-16 +:Copyright: OpenStack LLC +:Version: 2014.1 +:Manual section: 1 +:Manual group: cloud computing + +SYNOPSIS +======== + + daisy-manage [options] + +DESCRIPTION +=========== + +daisy-manage is a utility for managing and configuring a daisy installation. +One important use of daisy-manage is to setup the database. To do this run:: + + daisy-manage db_sync + +Note: daisy-manage commands can be run either like this:: + + daisy-manage db sync + +or with the db commands concatenated, like this:: + + daisy-manage db_sync + + + +COMMANDS +======== + + **db** + This is the prefix for the commands below when used with a space + rather than a _. For example "db version". + + **db_version** + This will print the current migration level of a daisy database. + + **db_upgrade ** + This will take an existing database and upgrade it to the + specified VERSION. + + **db_downgrade ** + This will take an existing database and downgrade it to the + specified VERSION. + + **db_version_control** + Place the database under migration control. + + **db_sync ** + Place a database under migration control and upgrade, creating + it first if necessary. + + **db_export_metadefs** + Export the metadata definitions into json format. By default the + definitions are exported to /etc/daisy/metadefs directory. + + **db_load_metadefs** + Load the metadata definitions into daisy database. By default the + definitions are imported from /etc/daisy/metadefs directory. + + **db_unload_metadefs** + Unload the metadata definitions. Clears the contents of all the daisy + db tables including metadef_namespace_resource_types, metadef_tags, + metadef_objects, metadef_resource_types, metadef_namespaces and + metadef_properties. + +OPTIONS +======= + + **General Options** + + .. include:: general_options.rst + + **--sql_connection=CONN_STRING** + A proper SQLAlchemy connection string as described + `here `_ + + .. include:: footer.rst + +CONFIGURATION +============= + +The following paths are searched for a ``daisy-manage.conf`` file in the +following order: + +* ``~/.daisy`` +* ``~/`` +* ``/etc/daisy`` +* ``/etc`` + +All options set in ``daisy-manage.conf`` override those set in +``daisy-registry.conf`` and ``daisy-api.conf``. diff --git a/code/daisy/doc/source/man/daisyregistry.rst b/code/daisy/doc/source/man/daisyregistry.rst new file mode 100755 index 00000000..6a5a0cae --- /dev/null +++ b/code/daisy/doc/source/man/daisyregistry.rst @@ -0,0 +1,40 @@ +=============== +daisy-registry +=============== + +-------------------------------------- +Server for the daisy Registry Service +-------------------------------------- + +:Author: daisy@lists.launchpad.net +:Date: 2014-01-16 +:Copyright: OpenStack LLC +:Version: 2014.1 +:Manual section: 1 +:Manual group: cloud computing + +SYNOPSIS +======== + +daisy-registry [options] + +DESCRIPTION +=========== + +daisy-registry is a server daemon that serves image metadata through a +REST-like API. + +OPTIONS +======= + + **General options** + + .. include:: general_options.rst + +FILES +===== + + **/etc/daisy/daisy-registry.conf** + Default configuration file for daisy Registry + + .. include:: footer.rst diff --git a/code/daisy/doc/source/man/daisyreplicator.rst b/code/daisy/doc/source/man/daisyreplicator.rst new file mode 100755 index 00000000..f403c6e9 --- /dev/null +++ b/code/daisy/doc/source/man/daisyreplicator.rst @@ -0,0 +1,90 @@ +================= +daisy-replicator +================= + +--------------------------------------------- +Replicate images across multiple data centers +--------------------------------------------- + +:Author: daisy@lists.launchpad.net +:Date: 2014-01-16 +:Copyright: OpenStack LLC +:Version: 2014.1 +:Manual section: 1 +:Manual group: cloud computing + +SYNOPSIS +======== + +daisy-replicator [options] [args] + +DESCRIPTION +=========== + +daisy-replicator is a utility can be used to populate a new daisy +server using the images stored in an existing daisy server. The images +in the replicated daisy server preserve the uuids, metadata, and image +data from the original. + +COMMANDS +======== + + **help ** + Output help for one of the commands below + + **compare** + What is missing from the slave daisy? + + **dump** + Dump the contents of a daisy instance to local disk. + + **livecopy** + Load the contents of one daisy instance into another. + + **load** + Load the contents of a local directory into daisy. + + **size** + Determine the size of a daisy instance if dumped to disk. + +OPTIONS +======= + + **-h, --help** + Show this help message and exit + + **-c CHUNKSIZE, --chunksize=CHUNKSIZE** + Amount of data to transfer per HTTP write + + **-d, --debug** + Print debugging information + + **-D DONTREPLICATE, --dontreplicate=DONTREPLICATE** + List of fields to not replicate + + **-m, --metaonly** + Only replicate metadata, not images + + **-l LOGFILE, --logfile=LOGFILE** + Path of file to log to + + **-s, --syslog** + Log to syslog instead of a file + + **-t TOKEN, --token=TOKEN** + Pass in your authentication token if you have one. If + you use this option the same token is used for both + the master and the slave. + + **-M MASTERTOKEN, --mastertoken=MASTERTOKEN** + Pass in your authentication token if you have one. + This is the token used for the master. + + **-S SLAVETOKEN, --slavetoken=SLAVETOKEN** + Pass in your authentication token if you have one. + This is the token used for the slave. + + **-v, --verbose** + Print more verbose output + + .. include:: footer.rst diff --git a/code/daisy/doc/source/man/daisyscrubber.rst b/code/daisy/doc/source/man/daisyscrubber.rst new file mode 100755 index 00000000..3e79b8f6 --- /dev/null +++ b/code/daisy/doc/source/man/daisyscrubber.rst @@ -0,0 +1,63 @@ +=============== +daisy-scrubber +=============== + +-------------------- +daisy scrub service +-------------------- + +:Author: daisy@lists.launchpad.net +:Date: 2014-01-16 +:Copyright: OpenStack LLC +:Version: 2014.1 +:Manual section: 1 +:Manual group: cloud computing + +SYNOPSIS +======== + +daisy-scrubber [options] + +DESCRIPTION +=========== + +daisy-scrubber is a utility that cleans up images that have been deleted. The +mechanics of this differ depending on the backend store and pending_deletion +options chosen. + +Multiple daisy-scrubbers can be run in a single deployment, but only one of +them may be designated as the 'cleanup_scrubber' in the daisy-scrubber.conf +file. The 'cleanup_scrubber' coordinates other daisy-scrubbers by maintaining +the master queue of images that need to be removed. + +The daisy-scubber.conf file also specifies important configuration items such +as the time between runs ('wakeup_time' in seconds), length of time images +can be pending before their deletion ('cleanup_scrubber_time' in seconds) as +well as registry connectivity options. + +daisy-scrubber can run as a periodic job or long-running daemon. + +OPTIONS +======= + + **General options** + + .. include:: general_options.rst + + **-D, --daemon** + Run as a long-running process. When not specified (the + default) run the scrub operation once and then exits. + When specified do not exit and run scrub on + wakeup_time interval as specified in the config. + + **--nodaemon** + The inverse of --daemon. Runs the scrub operation once and + then exits. This is the default. + +FILES +====== + + **/etc/daisy/daisy-scrubber.conf** + Default configuration file for the daisy Scrubber + + .. include:: footer.rst diff --git a/code/daisy/doc/source/man/footer.rst b/code/daisy/doc/source/man/footer.rst new file mode 100755 index 00000000..4e9f9aaf --- /dev/null +++ b/code/daisy/doc/source/man/footer.rst @@ -0,0 +1,9 @@ +SEE ALSO +======== + +* `OpenStack Glance `__ + +BUGS +==== + +* Glance bugs are tracked in Launchpad so you can view current bugs at `OpenStack Glance `__ diff --git a/code/daisy/doc/source/man/general_options.rst b/code/daisy/doc/source/man/general_options.rst new file mode 100755 index 00000000..c61d0279 --- /dev/null +++ b/code/daisy/doc/source/man/general_options.rst @@ -0,0 +1,67 @@ + **-h, --help** + Show the help message and exit + + **--version** + Print the version number and exit + + **-v, --verbose** + Print more verbose output + + **--noverbose** + Disable verbose output + + **-d, --debug** + Print debugging output (set logging level to DEBUG instead of + default WARNING level) + + **--nodebug** + Disable debugging output + + **--use-syslog** + Use syslog for logging + + **--nouse-syslog** + Disable the use of syslog for logging + + **--syslog-log-facility SYSLOG_LOG_FACILITY** + syslog facility to receive log lines + + **--config-dir DIR** + Path to a config directory to pull \*.conf files from. This + file set is sorted, to provide a predictable parse order + if individual options are over-ridden. The set is parsed after + the file(s) specified via previous --config-file, arguments hence + over-ridden options in the directory take precedence. This means + that configuration from files in a specified config-dir will + always take precedence over configuration from files specified + by --config-file, regardless to argument order. + + **--config-file PATH** + Path to a config file to use. Multiple config files can be + specified by using this flag multiple times, for example, + --config-file --config-file . Values in latter + files take precedence. + + **--log-config-append PATH** + **--log-config PATH** + The name of logging configuration file. It does not + disable existing loggers, but just appends specified + logging configuration to any other existing logging + options. Please see the Python logging module documentation + for details on logging configuration files. The log-config + name for this option is depcrecated. + + **--log-format FORMAT** + A logging.Formatter log message format string which may use any + of the available logging.LogRecord attributes. Default: None + + **--log-date-format DATE_FORMAT** + Format string for %(asctime)s in log records. Default: None + + **--log-file PATH, --logfile PATH** + (Optional) Name of log file to output to. If not set, logging + will go to stdout. + + **--log-dir LOG_DIR, --logdir LOG_DIR** + (Optional) The directory to keep log files in (will be prepended + to --log-file) diff --git a/code/daisy/doc/source/man/openstack_options.rst b/code/daisy/doc/source/man/openstack_options.rst new file mode 100755 index 00000000..4602bb9a --- /dev/null +++ b/code/daisy/doc/source/man/openstack_options.rst @@ -0,0 +1,22 @@ + **-os-auth-token=OS_AUTH_TOKEN** + Defaults to env[OS_AUTH_TOKEN] + + **--os-username=OS_USERNAME** + Defaults to env[OS_USERNAME] + + **--os-password=OS_PASSWORD** + Defaults to env[OS_PASSWORD] + + **--os-region-name=OS_REGION_NAME** + Defaults to env[OS_REGION_NAME] + + **--os-tenant-id=OS_TENANT_ID** + Defaults to env[OS_TENANT_ID] + + **--os-tenant-name=OS_TENANT_NAME** + Defaults to env[OS_TENANT_NAME] + + **--os-auth-url=OS_AUTH_URL** + Defaults to env[OS_AUTH_URL] + + diff --git a/code/daisy/doc/source/metadefs-concepts.rst b/code/daisy/doc/source/metadefs-concepts.rst new file mode 100755 index 00000000..0a7cc6f3 --- /dev/null +++ b/code/daisy/doc/source/metadefs-concepts.rst @@ -0,0 +1,185 @@ +.. + Copyright (c) 2014 Hewlett-Packard Development Company, L.P. + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Metadata Definition Concepts +============================ + +The metadata definition service was added to Glance in the Juno release of +OpenStack. + +It provides a common API for vendors, admins, services, and users to +meaningfully **define** available key / value pair metadata that +can be used on different types of resources (images, artifacts, volumes, +flavors, aggregates, etc). A definition includes a property's key, +its description, its constraints, and the resource types to which it can be +associated. + +This catalog does not store the values for specific instance properties. + +For example, a definition of a virtual CPU topology property for the number of +cores will include the base key to use (for example, cpu_cores), a description, +and value constraints like requiring it to be an integer. So, a user, +potentially through Horizon, would be able to search this catalog to list the +available properties they can add to a flavor or image. They will see the +virtual CPU topology property in the list and know that it must be an integer. + +When the user adds the property its key and value will be stored in the +service that owns that resource (for example, Nova for flavors and in Glance +for images). The catalog also includes any additional prefix required when +the property is applied to different types of resources, such as "hw_" for +images and "hw:" for flavors. So, on an image, the user would know to set the +property as "hw_cpu_cores=1". + +Terminology +----------- + +Background +~~~~~~~~~~ +The term *metadata* can become very overloaded and confusing. This +catalog is about the additional metadata that is passed as arbitrary +key / value pairs or tags across various artifacts and OpenStack services. + +Below are a few examples of the various terms used for metadata across +OpenStack services today: + ++-------------------------+---------------------------+----------------------+ +| Nova | Cinder | Glance | ++=========================+===========================+======================+ +| Flavor | Volume & Snapshot | Image & Snapshot | +| + *extra specs* | + *image metadata* | + *properties* | +| Host Aggregate | + *metadata* | + *tags* | +| + *metadata* | VolumeType | | +| Instances | + *extra specs* | | +| + *metadata* | + *qos specs* | | +| + *tags* | | | ++-------------------------+---------------------------+----------------------+ + +Catalog Concepts +~~~~~~~~~~~~~~~~ + +The below figure illustrates the concept terminology used in the metadata +definitions catalog:: + + A namespace is associated with 0 to many resource types, making it visible to + the API / UI for applying to that type of resource. RBAC Permissions are + managed at a namespace level. + + +----------------------------------------------+ + | Namespace | + | | + | +-----------------------------------------+ | + | | Object Definition | | + | | | | +--------------------+ + | | +-------------------------------------+ | | +--> | Resource Type: | + | | | Property Definition A (key=integer) | | | | | e.g. Nova Flavor | + | | +-------------------------------------+ | | | +--------------------+ + | | | | | + | | +-------------------------------------+ | | | + | | | Property Definition B (key=string) | | | | +--------------------+ + | | +-------------------------------------+ | +--+--> | Resource Type: | + | | | | | | e.g. Glance Image | + | +-----------------------------------------+ | | +--------------------+ + | | | + | +-------------------------------------+ | | + | | Property Definition C (key=boolean) | | | +--------------------+ + | +-------------------------------------+ | +--> | Resource Type: | + | | | e.g. Cinder Volume | + +----------------------------------------------+ +--------------------+ + + Properties may be defined standalone or within the context of an object. + + +Catalog Terminology +~~~~~~~~~~~~~~~~~~~ + +The following terminology is used within the metadata definition catalog. + +**Namespaces** + +Metadata definitions are contained in namespaces. + +- Specify the access controls (CRUD) for everything defined in it. Allows for + admin only, different projects, or the entire cloud to define and use the + definitions in the namespace +- Associates the contained definitions to different types of resources + +**Properties** + +A property describes a single property and its primitive constraints. Each +property can ONLY be a primitive type: + +* string, integer, number, boolean, array + +Each primitive type is described using simple JSON schema notation. This +means NO nested objects and no definition referencing. + +**Objects** + +An object describes a group of one to many properties and their primitive +constraints. Each property in the group can ONLY be a primitive type: + +* string, integer, number, boolean, array + +Each primitive type is described using simple JSON schema notation. This +means NO nested objects. + +The object may optionally define required properties under the semantic +understanding that a user who uses the object should provide all required +properties. + +**Resource Type Association** + +Resource type association specifies the relationship between resource +types and the namespaces that are applicable to them. This information can be +used to drive UI and CLI views. For example, the same namespace of +objects, properties, and tags may be used for images, snapshots, volumes, and +flavors. Or a namespace may only apply to images. + +Resource types should be aligned with Heat resource types whenever possible. +http://docs.openstack.org/developer/heat/template_guide/openstack.html + +It is important to note that the same base property key can require different +prefixes depending on the target resource type. The API provides a way to +retrieve the correct property based on the target resource type. + +Below are a few examples: + +The desired virtual CPU topology can be set on both images and flavors +via metadata. The keys have different prefixes on images than on flavors. +On flavors keys are prefixed with ``hw:``, but on images the keys are prefixed +with ``hw_``. + +For more: https://github.com/openstack/nova-specs/blob/master/specs/juno/virt-driver-vcpu-topology.rst + +Another example is the AggregateInstanceExtraSpecsFilter and scoped properties +(e.g. properties with something:something=value). For scoped / namespaced +properties, the AggregateInstanceExtraSpecsFilter requires a prefix of +"aggregate_instance_extra_specs:" to be used on flavors but not on the +aggregate itself. Otherwise, the filter will not evaluate the property during +scheduling. + +So, on a host aggregate, you may see: + +companyx:fastio=true + +But then when used on the flavor, the AggregateInstanceExtraSpecsFilter needs: + +aggregate_instance_extra_specs:companyx:fastio=true + +In some cases, there may be multiple different filters that may use +the same property with different prefixes. In this case, the correct prefix +needs to be set based on which filter is enabled. + diff --git a/code/daisy/doc/source/notifications.rst b/code/daisy/doc/source/notifications.rst new file mode 100755 index 00000000..a8be394a --- /dev/null +++ b/code/daisy/doc/source/notifications.rst @@ -0,0 +1,213 @@ +.. + Copyright 2011-2013 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Notifications +============= + +Notifications can be generated for several events in the image lifecycle. +These can be used for auditing, troubleshooting, etc. + +Notification Drivers +-------------------- + +* log + + This driver uses the standard Python logging infrastructure with + the notifications ending up in file specificed by the log_file + configuration directive. + +* messaging + + This strategy sends notifications to a message queue configured + using oslo.messaging configuration options. + +* noop + + This strategy produces no notifications. It is the default strategy. + +Notification Types +------------------ + +* ``image.create`` + + Emitted when an image record is created in Glance. Image record creation is + independent of image data upload. + +* ``image.prepare`` + + Emitted when Glance begins uploading image data to its store. + +* ``image.upload`` + + Emitted when Glance has completed the upload of image data to its store. + +* ``image.activate`` + + Emitted when an image goes to `active` status. This occurs when Glance + knows where the image data is located. + +* ``image.send`` + + Emitted upon completion of an image being sent to a consumer. + +* ``image.update`` + + Emitted when an image record is updated in Glance. + +* ``image.delete`` + + Emitted when an image deleted from Glance. + +* ``task.run`` + + Emitted when a task is picked up by the executor to be run. + +* ``task.processing`` + + Emitted when a task is sent over to the executor to begin processing. + +* ``task.success`` + + Emitted when a task is successfully completed. + +* ``task.failure`` + + Emitted when a task fails. + +Content +------- + +Every message contains a handful of attributes. + +* message_id + + UUID identifying the message. + +* publisher_id + + The hostname of the glance instance that generated the message. + +* event_type + + Event that generated the message. + +* priority + + One of WARN, INFO or ERROR. + +* timestamp + + UTC timestamp of when event was generated. + +* payload + + Data specific to the event type. + +Payload +------- + +* image.send + + The payload for INFO, WARN, and ERROR events contain the following: + + image_id + ID of the image (UUID) + owner_id + Tenant or User ID that owns this image (string) + receiver_tenant_id + Tenant ID of the account receiving the image (string) + receiver_user_id + User ID of the account receiving the image (string) + destination_ip + bytes_sent + The number of bytes actually sent + +* image.create + + For INFO events, it is the image metadata. + WARN and ERROR events contain a text message in the payload. + +* image.prepare + + For INFO events, it is the image metadata. + WARN and ERROR events contain a text message in the payload. + +* image.upload + + For INFO events, it is the image metadata. + WARN and ERROR events contain a text message in the payload. + +* image.activate + + For INFO events, it is the image metadata. + WARN and ERROR events contain a text message in the payload. + +* image.update + + For INFO events, it is the image metadata. + WARN and ERROR events contain a text message in the payload. + +* image.delete + + For INFO events, it is the image id. + WARN and ERROR events contain a text message in the payload. + +* task.run + + The payload for INFO, WARN, and ERROR events contain the following: + + task_id + ID of the task (UUID) + owner + Tenant or User ID that created this task (string) + task_type + Type of the task. Example, task_type is "import". (string) + status, + status of the task. Status can be "pending", "processing", + "success" or "failure". (string) + task_input + Input provided by the user when attempting to create a task. (dict) + result + Resulting output from a successful task. (dict) + message + Message shown in the task if it fails. None if task succeeds. (string) + expires_at + UTC time at which the task would not be visible to the user. (string) + created_at + UTC time at which the task was created. (string) + updated_at + UTC time at which the task was latest updated. (string) + + The exceptions are:- + For INFO events, it is the task dict with result and message as None. + WARN and ERROR events contain a text message in the payload. + +* task.processing + + For INFO events, it is the task dict with result and message as None. + WARN and ERROR events contain a text message in the payload. + +* task.success + + For INFO events, it is the task dict with message as None and result is a + dict. + WARN and ERROR events contain a text message in the payload. + +* task.failure + + For INFO events, it is the task dict with result as None and message is + text. + WARN and ERROR events contain a text message in the payload. diff --git a/code/daisy/doc/source/policies.rst b/code/daisy/doc/source/policies.rst new file mode 100755 index 00000000..626a2d5a --- /dev/null +++ b/code/daisy/doc/source/policies.rst @@ -0,0 +1,193 @@ +.. + Copyright 2012 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Policies +======== + +Glance's public API calls may be restricted to certain sets of users using a +policy configuration file. This document explains exactly how policies are +configured and what they apply to. + +A policy is composed of a set of rules that are used by the policy "Brain" in +determining if a particular action may be performed by the authorized tenant. + +Constructing a Policy Configuration File +---------------------------------------- + +A policy configuration file is a simply JSON object that contain sets of +rules. Each top-level key is the name of a rule. Each rule +is a string that describes an action that may be performed in the Glance API. + +The actions that may have a rule enforced on them are: + +* ``get_images`` - List available image entities + + * ``GET /v1/images`` + * ``GET /v1/images/detail`` + * ``GET /v2/images`` + +* ``get_image`` - Retrieve a specific image entity + + * ``HEAD /v1/images/`` + * ``GET /v1/images/`` + * ``GET /v2/images/`` + +* ``download_image`` - Download binary image data + + * ``GET /v1/images/`` + * ``GET /v2/images//file`` + +* ``upload_image`` - Upload binary image data + + * ``POST /v1/images`` + * ``PUT /v1/images/`` + * ``PUT /v2/images//file`` + +* ``copy_from`` - Copy binary image data from URL + + * ``POST /v1/images`` + * ``PUT /v1/images/`` + +* ``add_image`` - Create an image entity + + * ``POST /v1/images`` + * ``POST /v2/images`` + +* ``modify_image`` - Update an image entity + + * ``PUT /v1/images/`` + * ``PUT /v2/images/`` + +* ``publicize_image`` - Create or update images with attribute + + * ``POST /v1/images`` with attribute ``is_public`` = ``true`` + * ``PUT /v1/images/`` with attribute ``is_public`` = ``true`` + * ``POST /v2/images`` with attribute ``visibility`` = ``public`` + * ``PUT /v2/images/`` with attribute ``visibility`` = ``public`` + +* ``delete_image`` - Delete an image entity and associated binary data + + * ``DELETE /v1/images/`` + * ``DELETE /v2/images/`` + +* ``add_member`` - Add a membership to the member repo of an image + + * ``POST /v2/images//members`` + +* ``get_members`` - List the members of an image + + * ``GET /v1/images//members`` + * ``GET /v2/images//members`` + +* ``delete_member`` - Delete a membership of an image + + * ``DELETE /v1/images//members/`` + * ``DELETE /v2/images//members/`` + +* ``modify_member`` - Create or update the membership of an image + + * ``PUT /v1/images//members/`` + * ``PUT /v1/images//members`` + * ``POST /v2/images//members`` + * ``PUT /v2/images//members/`` + +* ``manage_image_cache`` - Allowed to use the image cache management API + + +To limit an action to a particular role or roles, you list the roles like so :: + + { + "delete_image": ["role:admin", "role:superuser"] + } + +The above would add a rule that only allowed users that had roles of either +"admin" or "superuser" to delete an image. + +Writing Rules +------------- + +Role checks are going to continue to work exactly as they already do. If the +role defined in the check is one that the user holds, then that will pass, +e.g., ``role:admin``. + +To write a generic rule, you need to know that there are three values provided +by Glance that can be used in a rule on the left side of the colon (``:``). +Those values are the current user's credentials in the form of: + +- role +- tenant +- owner + +The left side of the colon can also contain any value that Python can +understand, e.g.,: + +- ``True`` +- ``False`` +- ``"a string"`` +- &c. + +Using ``tenant`` and ``owner`` will only work with images. Consider the +following rule:: + + tenant:%(owner)s + +This will use the ``tenant`` value of the currently authenticated user. It +will also use ``owner`` from the image it is acting upon. If those two +values are equivalent the check will pass. All attributes on an image (as well +as extra image properties) are available for use on the right side of the +colon. The most useful are the following: + +- ``owner`` +- ``protected`` +- ``is_public`` + +Therefore, you could construct a set of rules like the following:: + + { + "not_protected": "False:%(protected)s", + "is_owner": "tenant:%(owner)s", + "is_owner_or_admin": "rule:is_owner or role:admin", + "not_protected_and_is_owner": "rule:not_protected and rule:is_owner", + + "get_image": "rule:is_owner_or_admin", + "delete_image": "rule:not_protected_and_is_owner", + "add_member": "rule:not_protected_and_is_owner" + } + +Examples +-------- + +Example 1. (The default policy configuration) + + :: + + { + "default": "" + } + +Note that an empty JSON list means that all methods of the +Glance API are callable by anyone. + +Example 2. Disallow modification calls to non-admins + + :: + + { + "default": "", + "add_image": "role:admin", + "modify_image": "role:admin", + "delete_image": "role:admin" + } diff --git a/code/daisy/doc/source/property-protections.rst b/code/daisy/doc/source/property-protections.rst new file mode 100755 index 00000000..819e725d --- /dev/null +++ b/code/daisy/doc/source/property-protections.rst @@ -0,0 +1,149 @@ +.. + Copyright 2013 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Property Protections +==================== + +There are two types of image properties in Glance: + +* Core Properties, as specified by the image schema. + +* Meta Properties, which are arbitrary key/value pairs that can be added to an + image. + +Access to meta properties through Glance's public API calls may be +restricted to certain sets of users, using a property protections configuration +file. + +This document explains exactly how property protections are configured and what +they apply to. + + +Constructing a Property Protections Configuration File +------------------------------------------------------ + +A property protections configuration file follows the format of the Glance API +configuration file, which consists of sections, led by a ``[section]`` header +and followed by ``name = value`` entries. Each section header is a regular +expression matching a set of properties to be protected. + +.. note:: + + Section headers must compile to a valid regular expression, otherwise + glance api service will not start. Regular expressions + will be handled by python's re module which is PERL like. + +Each section describes four key-value pairs, where the key is one of +``create/read/update/delete``, and the value is a comma separated list of user +roles that are permitted to perform that operation in the Glance API. **If any of +the keys are not specified, then the glance api service will not start +successfully.** + +In the list of user roles, ``@`` means all roles and ``!`` means no role. +**If both @ and ! are specified for the same rule then the glance api service +will not start** + +.. note:: + + Only one policy rule is allowed per property operation. **If multiple are + specified, then the glance api service will not start.** + +The path to the file should be specified in the ``[DEFAULT]`` section of +``glance-api.conf`` as follows. + + :: + + property_protection_file=/path/to/file + +If this config value is not specified, property protections are not enforced. +**If the path is invalid, glance api service will not start successfully.** + +The file may use either roles or policies to describe the property protections. +The config value should be specified in the ``[DEFAULT]`` section of +``glance-api.conf`` as follows. + + :: + + property_protection_rule_format= + +The default value for ``property_protection_rule_format`` is ``roles``. + +Property protections are applied in the order specified in the configuration +file. This means that if for example you specify a section with ``[.*]`` at +the top of the file, all proceeding sections will be ignored. + +If a property does not match any of the given rules, all operations will be +disabled for all roles. + +If an operation is misspelled or omitted, that operation will be disabled for +all roles. + +Disallowing ``read`` operations will also disallow ``update/delete`` operations. + +A successful HTTP request will return status ``200 OK``. If the user is not +permitted to perform the requested action, ``403 Forbidden`` will be returned. + +V1 API X-glance-registry-Purge-props +------------------------------------ + +Property protections will still be honoured if +``X-glance-registry-Purge-props`` is set to ``True``. That is, if you request +to modify properties with this header set to ``True``, you will not be able to +delete or update properties for which you do not have the relevant permissions. +Properties which are not included in the request and for which you do have +delete permissions will still be removed. + +Examples +-------- + +**Example 1**. Limit all property interactions to admin only. + + :: + + [.*] + create = admin + read = admin + update = admin + delete = admin + +**Example 2**. Allow both admins and users with the billing role to read +and modify properties prefixed with ``x_billing_code_``. Allow admins to +read and modify any properties. + + :: + + [^x_billing_code_.*] + create = admin,billing + read = admin, billing + update = admin,billing + delete = admin,billing + + [.*] + create = admin + read = admin + update = admin + delete = admin + +**Example 3**. Limit all property interactions to admin only using policy +rule context_is_admin defined in policy.json. + + :: + + [.*] + create = context_is_admin + read = context_is_admin + update = context_is_admin + delete = context_is_admin diff --git a/code/daisy/doc/source/statuses.rst b/code/daisy/doc/source/statuses.rst new file mode 100755 index 00000000..46071369 --- /dev/null +++ b/code/daisy/doc/source/statuses.rst @@ -0,0 +1,95 @@ +.. + Copyright 2010 OpenStack Foundation + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Image Statuses +============== + +Images in Glance can be in one the following statuses: + +* ``queued`` + + The image identifier has been reserved for an image in the Glance + registry. No image data has been uploaded to Glance and the image + size was not explicitly set to zero on creation. + +* ``saving`` + + Denotes that an image's raw data is currently being uploaded to Glance. + When an image is registered with a call to `POST /images` and there + is an `x-image-meta-location` header present, that image will never be in + the `saving` status (as the image data is already available in some other + location). + +* ``active`` + + Denotes an image that is fully available in Glance. This occurs when + the image data is uploaded, or the image size is explicitly set to + zero on creation. + +* ``killed`` + + Denotes that an error occurred during the uploading of an image's data, + and that the image is not readable. + +* ``deleted`` + + Glance has retained the information about the image, but it is no longer + available to use. An image in this state will be removed automatically + at a later date. + +* ``pending_delete`` + + This is similar to `deleted`, however, Glance has not yet removed the + image data. An image in this state is not recoverable. + + +.. figure:: /images/image_status_transition.png + :figwidth: 100% + :align: center + :alt: Image status transition + + This is a representation of how the image move from one status to the next. + + * Add location from zero to more than one. + + * Remove location from one or more to zero by PATCH method which is only + supported in v2. + +Task Statuses +============== + +Tasks in Glance can be in one the following statuses: + +* ``pending`` + + The task identifier has been reserved for a task in the Glance. + No processing has begun on it yet. + +* ``processing`` + + The task has been picked up by the underlying executor and is being run + using the backend Glance execution logic for that task type. + +* ``success`` + + Denotes that the task has had a successful run within Glance. The ``result`` + field of the task shows more details about the outcome. + +* ``failure`` + + Denotes that an error occurred during the execution of the task and it + cannot continue processing. The ``message`` field of the task shows what the + error was. diff --git a/code/daisy/etc/daisy-api-paste.ini b/code/daisy/etc/daisy-api-paste.ini new file mode 100755 index 00000000..5cde9d73 --- /dev/null +++ b/code/daisy/etc/daisy-api-paste.ini @@ -0,0 +1,23 @@ +# Use this pipeline for no auth - DEFAULT +[pipeline:daisy-api] +pipeline = unauthenticated-context rootapp + +[pipeline:daisy-api-keystone] +pipeline = authtoken context rootapp + +[composite:rootapp] +paste.composite_factory = daisy.api:root_app_factory +/v1: apiv1app + +[app:apiv1app] +paste.app_factory = daisy.api.v1.router:API.factory + +[filter:unauthenticated-context] +paste.filter_factory = daisy.api.middleware.context:UnauthenticatedContextMiddleware.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory +delay_auth_decision = true + +[filter:context] +paste.filter_factory = daisy.api.middleware.context:ContextMiddleware.factory diff --git a/code/daisy/etc/daisy-api.conf b/code/daisy/etc/daisy-api.conf new file mode 100755 index 00000000..c3630d93 --- /dev/null +++ b/code/daisy/etc/daisy-api.conf @@ -0,0 +1,818 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +#verbose = False +verbose = True + +# Show debugging output in logs (sets DEBUG log level output) +#debug = False + +# Maximum image size (in bytes) that may be uploaded through the +# Glance API server. Defaults to 1 TB. +# WARNING: this value should only be increased after careful consideration +# and must be set to a value under 8 EB (9223372036854775808). +#image_size_cap = 1099511627776 + +# Address to bind the API server +bind_host = 0.0.0.0 + +# Port the bind the API server to +bind_port = 19292 + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +log_file = /var/log/daisy/api.log + +# Backlog requests when creating socket +backlog = 4096 + +# TCP_KEEPIDLE value in seconds when creating socket. +# Not supported on OS X. +#tcp_keepidle = 600 + +# API to use for accessing data. Default value points to sqlalchemy +# package, it is also possible to use: glance.db.registry.api +data_api = daisy.db.sqlalchemy.api + +# The number of child process workers that will be +# created to service API requests. The default will be +# equal to the number of CPUs available. (integer value) +#workers = 4 + +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large tokens +# (typically those generated by the Keystone v3 API with big service +# catalogs) +# max_header_line = 16384 + +# Maximum number of hosts install os at the same time. +max_parallel_os_number = 10 + +# Maximum number of hosts upgrade os at the same time. +max_parallel_os_upgrade_number = 10 + +# Role used to identify an authenticated user as administrator +#admin_role = admin + +# Allow unauthenticated users to access the API with read-only +# privileges. This only applies when using ContextMiddleware. +#allow_anonymous_access = False + +# Allow access to version 1 of glance api +#enable_v1_api = True + +# Allow access to version 2 of glance api +#enable_v2_api = True + +# Return the URL that references where the data is stored on +# the backend storage system. For example, if using the +# file system store a URL of 'file:///path/to/image' will +# be returned to the user in the 'direct_url' meta-data field. +# The default value is false. +#show_image_direct_url = False + +# Send headers containing user and tenant information when making requests to +# the v1 glance registry. This allows the registry to function as if a user is +# authenticated without the need to authenticate a user itself using the +# auth_token middleware. +# The default value is false. +#send_identity_headers = False + +# Supported values for the 'container_format' image attribute +#container_formats=ami,ari,aki,bare,ovf,ova + +# Supported values for the 'disk_format' image attribute +#disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso + +# Property Protections config file +# This file contains the rules for property protections and the roles/policies +# associated with it. +# If this config value is not specified, by default, property protections +# won't be enforced. +# If a value is specified and the file is not found, then the glance-api +# service will not start. +#property_protection_file = + +# Specify whether 'roles' or 'policies' are used in the +# property_protection_file. +# The default value for property_protection_rule_format is 'roles'. +#property_protection_rule_format = roles + +# This value sets what strategy will be used to determine the image location +# order. Currently two strategies are packaged with Glance 'location_order' +# and 'store_type'. +#location_strategy = location_order + + +# Public url to use for versions endpoint. The default is None, +# which will use the request's host_url attribute to populate the URL base. +# If Glance is operating behind a proxy, you will want to change this to +# represent the proxy's URL. +#public_endpoint= + +# http_keepalive option. If False, server will return the header +# "Connection: close", If True, server will return "Connection: Keep-Alive" +# in its responses. In order to close the client socket connection +# explicitly after the response is sent and read successfully by the client, +# you simply have to set this option to False when you create a wsgi server. +#http_keepalive = True + +# ================= Syslog Options ============================ + +# Send logs to syslog (/dev/log) instead of to file specified +# by `log_file` +#use_syslog = False + +# Facility to use. If unset defaults to LOG_USER. +#syslog_log_facility = LOG_LOCAL0 + +# ================= SSL Options =============================== + +# Certificate file to use when starting API server securely +#cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +#key_file = /path/to/keyfile + +# CA certificate file to use to verify connecting clients +#ca_file = /path/to/cafile + +# ================= Security Options ========================== + +# AES key for encrypting store 'location' metadata, including +# -- if used -- Swift or S3 credentials +# Should be set to a random string of length 16, 24 or 32 bytes +#metadata_encryption_key = <16, 24 or 32 char registry metadata key> + + +# Digest algorithm which will be used for digital signature, the default is +# sha1 in Kilo for a smooth upgrade process, and it will be updated with +# sha256 in next release(L). Use command +# "openssl list-message-digest-algorithms" to get the available algorithms +# supported by the version of OpenSSL on the platform. Examples are 'sha1', +# 'sha256', 'sha512', etc. +#digest_algorithm = sha1 + +# ============ Registry Options =============================== + +# Address to find the registry server +registry_host = 0.0.0.0 + +# Port the registry server is listening on +registry_port = 19191 + +# What protocol to use when connecting to the registry server? +# Set to https for secure HTTP communication +registry_client_protocol = http + +# The path to the key file to use in SSL connections to the +# registry server, if any. Alternately, you may set the +# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file +#registry_client_key_file = /path/to/key/file + +# The path to the cert file to use in SSL connections to the +# registry server, if any. Alternately, you may set the +# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file +#registry_client_cert_file = /path/to/cert/file + +# The path to the certifying authority cert file to use in SSL connections +# to the registry server, if any. Alternately, you may set the +# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file +#registry_client_ca_file = /path/to/ca/file + +# When using SSL in connections to the registry server, do not require +# validation via a certifying authority. This is the registry's equivalent of +# specifying --insecure on the command line using glanceclient for the API +# Default: False +#registry_client_insecure = False + +# The period of time, in seconds, that the API server will wait for a registry +# request to complete. A value of '0' implies no timeout. +# Default: 600 +#registry_client_timeout = 600 + +# Enable DEBUG log messages from sqlalchemy which prints every database +# query and response. +# Default: False +#sqlalchemy_debug = True + +# Pass the user's token through for API requests to the registry. +# Default: True +#use_user_token = True + +# If 'use_user_token' is not in effect then admin credentials +# can be specified. Requests to the registry on behalf of +# the API will use these credentials. +# Admin user name +#admin_user = None +# Admin password +#admin_password = None +# Admin tenant name +#admin_tenant_name = None +# Keystone endpoint +#auth_url = None +# Keystone region +#auth_region = None +# Auth strategy +#auth_strategy = keystone + +# ============ Notification System Options ===================== + +# Driver or drivers to handle sending notifications. Set to +# 'messaging' to send notifications to a message queue. +# notification_driver = noop + +# Default publisher_id for outgoing notifications. +# default_publisher_id = image.localhost + +# List of disabled notifications. A notification can be given either as a +# notification type to disable a single event, or as a notification group +# prefix to disable all events within a group. +# Example: if this config option is set to +# ["image.create", "metadef_namespace"], then "image.create" notification will +# not be sent after image is created and none of the notifications for +# metadefinition namespaces will be sent. +# disabled_notifications = [] + +# Messaging driver used for 'messaging' notifications driver +# rpc_backend = 'rabbit' + +# Configuration options if sending notifications via rabbitmq (these are +# the defaults) +rabbit_host = localhost +rabbit_port = 5672 +rabbit_use_ssl = false +rabbit_userid = guest +rabbit_password = guest +rabbit_virtual_host = / +rabbit_notification_exchange = glance +rabbit_notification_topic = notifications +rabbit_durable_queues = False + +# Configuration options if sending notifications via Qpid (these are +# the defaults) +qpid_notification_exchange = glance +qpid_notification_topic = notifications +qpid_hostname = localhost +qpid_port = 5672 +qpid_username = +qpid_password = +qpid_sasl_mechanisms = +qpid_reconnect_timeout = 0 +qpid_reconnect_limit = 0 +qpid_reconnect_interval_min = 0 +qpid_reconnect_interval_max = 0 +qpid_reconnect_interval = 0 +qpid_heartbeat = 5 +# Set to 'ssl' to enable SSL +qpid_protocol = tcp +qpid_tcp_nodelay = True + +# ============ Delayed Delete Options ============================= + +# Turn on/off delayed delete +delayed_delete = False + +# Delayed delete time in seconds +scrub_time = 43200 + +# Directory that the scrubber will use to remind itself of what to delete +# Make sure this is also set in glance-scrubber.conf +scrubber_datadir = /var/lib/glance/scrubber + +# =============== Quota Options ================================== + +# The maximum number of image members allowed per image +#image_member_quota = 128 + +# The maximum number of image properties allowed per image +#image_property_quota = 128 + +# The maximum number of tags allowed per image +#image_tag_quota = 128 + +# The maximum number of locations allowed per image +#image_location_quota = 10 + +# Set a system wide quota for every user. This value is the total number +# of bytes that a user can use across all storage systems. A value of +# 0 means unlimited. +#user_storage_quota = 0 + +# =============== Image Cache Options ============================= + +# Base directory that the Image Cache uses +image_cache_dir = /var/lib/glance/image-cache/ + +# =============== Policy Options ================================== + +[oslo_policy] +# The JSON file that defines policies. +# Deprecated group/name - [DEFAULT]/policy_file +#policy_file = policy.json + +# Default rule. Enforced when a requested rule is not found. +# Deprecated group/name - [DEFAULT]/policy_default_rule +#policy_default_rule = default + +# Directories where policy configuration files are stored. +# They can be relative to any directory in the search path +# defined by the config_dir option, or absolute paths. +# The file defined by policy_file must exist for these +# directories to be searched. +# Deprecated group/name - [DEFAULT]/policy_dirs +#policy_dirs = policy.d + +# =============== Database Options ================================= + +[database] +# The file name to use with SQLite (string value) +#sqlite_db = oslo.sqlite + +# If True, SQLite uses synchronous mode (boolean value) +#sqlite_synchronous = True + +# The backend to use for db (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend = sqlalchemy + +# The SQLAlchemy connection string used to connect to the +# database (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode = TRADITIONAL + +# Timeout before idle sql connections are reaped (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout = 3600 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = + +# Maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Interval between retries of opening a sql connection +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Enable the experimental use of database reconnect on +# connection lost (boolean value) +#use_db_reconnect = False + +# seconds between db connection retries (integer value) +#db_retry_interval = 1 + +# Whether to increase interval between db connection retries, +# up to db_max_retry_interval (boolean value) +#db_inc_retry_interval = True + +# max seconds between db connection retries, if +# db_inc_retry_interval is enabled (integer value) +#db_max_retry_interval = 10 + +# maximum db connection retries before error is raised. +# (setting -1 implies an infinite retry count) (integer value) +#db_max_retries = 20 + +[oslo_concurrency] + +# Enables or disables inter-process locks. (boolean value) +# Deprecated group/name - [DEFAULT]/disable_process_locking +#disable_process_locking = false + +# Directory to use for lock files. For security, the specified +# directory should only be writable by the user running the processes +# that need locking. It could be read from environment variable +# OSLO_LOCK_PATH. This setting needs to be the same for both +# glance-scrubber and glance-api service. Default to a temp directory. +# Deprecated group/name - [DEFAULT]/lock_path (string value) +#lock_path = /tmp + +[keystone_authtoken] +identity_uri = http://127.0.0.1:35357 +admin_tenant_name = %SERVICE_TENANT_NAME% +admin_user = %SERVICE_USER% +admin_password = %SERVICE_PASSWORD% +revocation_cache_time = 10 + +[paste_deploy] +# Name of the paste configuration file that defines the available pipelines +#config_file = glance-api-paste.ini + +# Partial name of a pipeline in your paste configuration file with the +# service name removed. For example, if your paste section name is +# [pipeline:glance-api-keystone], you would configure the flavor below +# as 'keystone'. +#flavor= + +[store_type_location_strategy] +# The scheme list to use to get store preference order. The scheme must be +# registered by one of the stores defined by the 'stores' config option. +# This option will be applied when you using 'store_type' option as image +# location strategy defined by the 'location_strategy' config option. +#store_type_preference = + +[profiler] +# If False fully disable profiling feature. +#enabled = False + +# If False doesn't trace SQL requests. +#trace_sqlalchemy = False + +[task] +# ================= Glance Tasks Options ============================ + +# Specifies how long (in hours) a task is supposed to live in the tasks DB +# after succeeding or failing before getting soft-deleted. +# The default value for task_time_to_live is 48 hours. +# task_time_to_live = 48 + +# Specifies which task executor to be used to run the task scripts. +# The default value for task_executor is taskflow. +# task_executor = taskflow + +# Work dir for asynchronous task operations. The directory set here +# will be used to operate over images - normally before they are +# imported in the destination store. When providing work dir, make sure +# enough space is provided for concurrent tasks to run efficiently +# without running out of space. A rough estimation can be done by +# multiplying the number of `max_workers` - or the N of workers running +# - by an average image size (e.g 500MB). The image size estimation +# should be done based on the average size in your deployment. Note that +# depending on the tasks running you may need to multiply this number by +# some factor depending on what the task does. For example, you may want +# to double the available size if image conversion is enabled. All this +# being said, remember these are just estimations and you should do them +# based on the worst case scenario and be prepared to act in case they +# were wrong. +# work_dir=None + +# Specifies the maximum number of eventlet threads which can be spun up by +# the eventlet based task executor to perform execution of Glance tasks. +# DEPRECATED: Use [taskflow_executor]/max_workers instead. +# eventlet_executor_pool_size = 1000 + +[taskflow_executor] +# The mode in which the engine will run. Can be 'default', 'serial', +# 'parallel' or 'worker-based' +#engine_mode = serial + +# The number of parallel activities executed at the same time by +# the engine. The value can be greater than one when the engine mode is +# 'parallel' or 'worker-based', otherwise this value will be ignored. +#max_workers = 10 + +[glance_store] +# List of which store classes and store class locations are +# currently known to glance at startup. +# Deprecated group/name - [DEFAULT]/known_stores +# Existing but disabled stores: +# glance.store.rbd.Store, +# glance.store.s3.Store, +# glance.store.swift.Store, +# glance.store.sheepdog.Store, +# glance.store.cinder.Store, +# glance.store.gridfs.Store, +# glance.store.vmware_datastore.Store, +#stores = glance.store.filesystem.Store, +# glance.store.http.Store + +# Which backend scheme should Glance use by default is not specified +# in a request to add a new image to Glance? Known schemes are determined +# by the stores option. +# Deprecated group/name - [DEFAULT]/default_store +# Default: 'file' +default_store = file + +# ============ Filesystem Store Options ======================== + +# Directory that the Filesystem backend store +# writes image data to +filesystem_store_datadir = /var/lib/daisy/images/ + +# A list of directories where image data can be stored. +# This option may be specified multiple times for specifying multiple store +# directories. Either one of filesystem_store_datadirs or +# filesystem_store_datadir option is required. A priority number may be given +# after each directory entry, separated by a ":". +# When adding an image, the highest priority directory will be selected, unless +# there is not enough space available in cases where the image size is already +# known. If no priority is given, it is assumed to be zero and the directory +# will be considered for selection last. If multiple directories have the same +# priority, then the one with the most free space available is selected. +# If same store is specified multiple times then BadStoreConfiguration +# exception will be raised. +#filesystem_store_datadirs = /var/lib/glance/images/:1 + +# A path to a JSON file that contains metadata describing the storage +# system. When show_multiple_locations is True the information in this +# file will be returned with any location that is contained in this +# store. +#filesystem_store_metadata_file = None + +# ============ Swift Store Options ============================= + +# Version of the authentication service to use +# Valid versions are '2' for keystone and '1' for swauth and rackspace +swift_store_auth_version = 2 + +# Address where the Swift authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'https://' +# For swauth, use something like '127.0.0.1:8080/v1.0/' +swift_store_auth_address = 127.0.0.1:5000/v2.0/ + +# User to authenticate against the Swift authentication service +# If you use Swift authentication service, set it to 'account':'user' +# where 'account' is a Swift storage account and 'user' +# is a user in that account +swift_store_user = jdoe:jdoe + +# Auth key for the user authenticating against the +# Swift authentication service +swift_store_key = a86850deb2742ec3cb41518e26aa2d89 + +# Container within the account that the account should use +# for storing images in Swift +swift_store_container = glance + +# Do we create the container if it does not exist? +swift_store_create_container_on_put = False + +# What size, in MB, should Glance start chunking image files +# and do a large object manifest in Swift? By default, this is +# the maximum object size in Swift, which is 5GB +swift_store_large_object_size = 5120 + +# swift_store_config_file = glance-swift.conf +# This file contains references for each of the configured +# Swift accounts/backing stores. If used, this option can prevent +# credentials being stored in the database. Using Swift references +# is disabled if this config is left blank. + +# The reference to the default Swift parameters to use for adding new images. +# default_swift_reference = 'ref1' + +# When doing a large object manifest, what size, in MB, should +# Glance write chunks to Swift? This amount of data is written +# to a temporary disk buffer during the process of chunking +# the image file, and the default is 200MB +swift_store_large_object_chunk_size = 200 + +# If set, the configured endpoint will be used. If None, the storage URL +# from the auth response will be used. The location of an object is +# obtained by appending the container and object to the configured URL. +# +# swift_store_endpoint = https://www.example.com/v1/not_a_container +#swift_store_endpoint = + +# If set to True enables multi-tenant storage mode which causes Glance images +# to be stored in tenant specific Swift accounts. +#swift_store_multi_tenant = False + +# If set to an integer value between 1 and 32, a single-tenant store will +# use multiple containers to store images. If set to the default value of 0, +# only a single container will be used. Multi-tenant stores are not affected +# by this option. The max number of containers that will be used to store +# images is approximately 16^N where N is the value of this option. Discuss +# the impact of this with your swift deployment team, as this option is only +# beneficial in the largest of deployments where swift rate limiting can lead +# to unwanted throttling on a single container. +#swift_store_multiple_containers_seed = 0 + +# A list of swift ACL strings that will be applied as both read and +# write ACLs to the containers created by Glance in multi-tenant +# mode. This grants the specified tenants/users read and write access +# to all newly created image objects. The standard swift ACL string +# formats are allowed, including: +# : +# : +# *: +# Multiple ACLs can be combined using a comma separated list, for +# example: swift_store_admin_tenants = service:glance,*:admin +#swift_store_admin_tenants = + +# The region of the swift endpoint to be used for single tenant. This setting +# is only necessary if the tenant has multiple swift endpoints. +#swift_store_region = + +# If set to False, disables SSL layer compression of https swift requests. +# Setting to 'False' may improve performance for images which are already +# in a compressed format, eg qcow2. If set to True, enables SSL layer +# compression (provided it is supported by the target swift proxy). +#swift_store_ssl_compression = True + +# The number of times a Swift download will be retried before the +# request fails +#swift_store_retry_get_count = 0 + +# Bypass SSL verification for Swift +#swift_store_auth_insecure = False + +# The path to a CA certificate bundle file to use for SSL verification when +# communicating with Swift. +#swift_store_cacert = + +# ============ S3 Store Options ============================= + +# Address where the S3 authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'http://' +s3_store_host = s3.amazonaws.com + +# User to authenticate against the S3 authentication service +s3_store_access_key = <20-char AWS access key> + +# Auth key for the user authenticating against the +# S3 authentication service +s3_store_secret_key = <40-char AWS secret key> + +# Container within the account that the account should use +# for storing images in S3. Note that S3 has a flat namespace, +# so you need a unique bucket name for your glance images. An +# easy way to do this is append your AWS access key to "glance". +# S3 buckets in AWS *must* be lowercased, so remember to lowercase +# your AWS access key if you use it in your bucket name below! +s3_store_bucket = glance + +# Do we create the bucket if it does not exist? +s3_store_create_bucket_on_put = False + +# When sending images to S3, the data will first be written to a +# temporary buffer on disk. By default the platform's temporary directory +# will be used. If required, an alternative directory can be specified here. +#s3_store_object_buffer_dir = /path/to/dir + +# When forming a bucket url, boto will either set the bucket name as the +# subdomain or as the first token of the path. Amazon's S3 service will +# accept it as the subdomain, but Swift's S3 middleware requires it be +# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'. +#s3_store_bucket_url_format = subdomain + +# Size, in MB, should S3 start chunking image files +# and do a multipart upload in S3. The default is 100MB. +#s3_store_large_object_size = 100 + +# Multipart upload part size, in MB, should S3 use when uploading +# parts. The size must be greater than or equal to +# 5MB. The default is 10MB. +#s3_store_large_object_chunk_size = 10 + +# The number of thread pools to perform a multipart upload +# in S3. The default is 10. +#s3_store_thread_pools = 10 + +# ============ RBD Store Options ============================= + +# Ceph configuration file path +# If using cephx authentication, this file should +# include a reference to the right keyring +# in a client. section +#rbd_store_ceph_conf = /etc/ceph/ceph.conf + +# RADOS user to authenticate as (only applicable if using cephx) +# If , a default will be chosen based on the client. section +# in rbd_store_ceph_conf +#rbd_store_user = + +# RADOS pool in which images are stored +#rbd_store_pool = images + +# RADOS images will be chunked into objects of this size (in megabytes). +# For best performance, this should be a power of two +#rbd_store_chunk_size = 8 + +# ============ Sheepdog Store Options ============================= + +sheepdog_store_address = localhost + +sheepdog_store_port = 7000 + +# Images will be chunked into objects of this size (in megabytes). +# For best performance, this should be a power of two +sheepdog_store_chunk_size = 64 + +# ============ Cinder Store Options =============================== + +# Info to match when looking for cinder in the service catalog +# Format is : separated values of the form: +# :: (string value) +#cinder_catalog_info = volume:cinder:publicURL + +# Override service catalog lookup with template for cinder endpoint +# e.g. http://localhost:8776/v1/%(project_id)s (string value) +#cinder_endpoint_template = + +# Region name of this node (string value) +#os_region_name = + +# Location of ca certicates file to use for cinder client requests +# (string value) +#cinder_ca_certificates_file = + +# Number of cinderclient retries on failed http calls (integer value) +#cinder_http_retries = 3 + +# Allow to perform insecure SSL requests to cinder (boolean value) +#cinder_api_insecure = False + +# ============ VMware Datastore Store Options ===================== + +# ESX/ESXi or vCenter Server target system. +# The server value can be an IP address or a DNS name +# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com +#vmware_server_host = + +# Server username (string value) +#vmware_server_username = + +# Server password (string value) +#vmware_server_password = + +# Inventory path to a datacenter (string value) +# Value optional when vmware_server_ip is an ESX/ESXi host: if specified +# should be `ha-datacenter`. +# Deprecated in favor of vmware_datastores. +#vmware_datacenter_path = + +# Datastore associated with the datacenter (string value) +# Deprecated in favor of vmware_datastores. +#vmware_datastore_name = + +# A list of datastores where the image can be stored. +# This option may be specified multiple times for specifying multiple +# datastores. Either one of vmware_datastore_name or vmware_datastores is +# required. The datastore name should be specified after its datacenter +# path, separated by ":". An optional weight may be given after the datastore +# name, separated again by ":". Thus, the required format becomes +# ::. +# When adding an image, the datastore with highest weight will be selected, +# unless there is not enough free space available in cases where the image size +# is already known. If no weight is given, it is assumed to be zero and the +# directory will be considered for selection last. If multiple datastores have +# the same weight, then the one with the most free space available is selected. +#vmware_datastores = + +# The number of times we retry on failures +# e.g., socket error, etc (integer value) +#vmware_api_retry_count = 10 + +# The interval used for polling remote tasks +# invoked on VMware ESX/VC server in seconds (integer value) +#vmware_task_poll_interval = 5 + +# Absolute path of the folder containing the images in the datastore +# (string value) +#vmware_store_image_dir = /openstack_glance + +# Allow to perform insecure SSL requests to the target system (boolean value) +#vmware_api_insecure = False diff --git a/code/daisy/etc/daisy-cache.conf b/code/daisy/etc/daisy-cache.conf new file mode 100755 index 00000000..00f7f497 --- /dev/null +++ b/code/daisy/etc/daisy-cache.conf @@ -0,0 +1,265 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +#verbose = False + +# Show debugging output in logs (sets DEBUG log level output) +#debug = False + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +log_file = /var/log/daisy/image-cache.log + +# Send logs to syslog (/dev/log) instead of to file specified by `log_file` +#use_syslog = False + +# Directory that the Image Cache writes data to +image_cache_dir = /var/lib/daisy/image-cache/ + +# Number of seconds after which we should consider an incomplete image to be +# stalled and eligible for reaping +image_cache_stall_time = 86400 + +# The upper limit (the maximum size of accumulated cache in bytes) beyond +# which pruner, if running, starts cleaning the images cache. +image_cache_max_size = 10737418240 + +# Address to find the registry server +registry_host = 0.0.0.0 + +# Port the registry server is listening on +registry_port = 9191 + +# Auth settings if using Keystone +# auth_url = http://127.0.0.1:5000/v2.0/ +# admin_tenant_name = %SERVICE_TENANT_NAME% +# admin_user = %SERVICE_USER% +# admin_password = %SERVICE_PASSWORD% + +# List of which store classes and store class locations are +# currently known to glance at startup. +# known_stores = glance.store.filesystem.Store, +# glance.store.http.Store, +# glance.store.rbd.Store, +# glance.store.s3.Store, +# glance.store.swift.Store, +# glance.store.sheepdog.Store, +# glance.store.cinder.Store, +# glance.store.vmware_datastore.Store, + +# ============ Filesystem Store Options ======================== + +# Directory that the Filesystem backend store +# writes image data to +filesystem_store_datadir = /var/lib/glance/images/ + +# ============ Swift Store Options ============================= + +# Version of the authentication service to use +# Valid versions are '2' for keystone and '1' for swauth and rackspace +swift_store_auth_version = 2 + +# Address where the Swift authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'https://' +# For swauth, use something like '127.0.0.1:8080/v1.0/' +swift_store_auth_address = 127.0.0.1:5000/v2.0/ + +# User to authenticate against the Swift authentication service +# If you use Swift authentication service, set it to 'account':'user' +# where 'account' is a Swift storage account and 'user' +# is a user in that account +swift_store_user = jdoe:jdoe + +# Auth key for the user authenticating against the +# Swift authentication service +swift_store_key = a86850deb2742ec3cb41518e26aa2d89 + +# Container within the account that the account should use +# for storing images in Swift +swift_store_container = glance + +# Do we create the container if it does not exist? +swift_store_create_container_on_put = False + +# What size, in MB, should Glance start chunking image files +# and do a large object manifest in Swift? By default, this is +# the maximum object size in Swift, which is 5GB +swift_store_large_object_size = 5120 + +# This file contains references for each of the configured +# Swift accounts/backing stores. If used, this option can prevent +# credentials being stored in the database. Using Swift references +# is disabled if this config is left blank. +#swift_store_config_file = glance-swift.conf + +# The reference to the default Swift parameters to use for adding new images. +#default_swift_reference = 'ref1' + +# When doing a large object manifest, what size, in MB, should +# Glance write chunks to Swift? This amount of data is written +# to a temporary disk buffer during the process of chunking +# the image file, and the default is 200MB +swift_store_large_object_chunk_size = 200 + +# If set, the configured endpoint will be used. If None, the storage URL +# from the auth response will be used. The location of an object is +# obtained by appending the container and object to the configured URL. +# +# swift_store_endpoint = https://www.example.com/v1/not_a_container +swift_store_endpoint = None + +# If set to True enables multi-tenant storage mode which causes Glance images +# to be stored in tenant specific Swift accounts. +#swift_store_multi_tenant = False + +# A list of swift ACL strings that will be applied as both read and +# write ACLs to the containers created by Glance in multi-tenant +# mode. This grants the specified tenants/users read and write access +# to all newly created image objects. The standard swift ACL string +# formats are allowed, including: +# : +# : +# *: +# Multiple ACLs can be combined using a comma separated list, for +# example: swift_store_admin_tenants = service:glance,*:admin +#swift_store_admin_tenants = + +# The region of the swift endpoint to be used for single tenant. This setting +# is only necessary if the tenant has multiple swift endpoints. +#swift_store_region = + +# If set to False, disables SSL layer compression of https swift requests. +# Setting to 'False' may improve performance for images which are already +# in a compressed format, eg qcow2. If set to True, enables SSL layer +# compression (provided it is supported by the target swift proxy). +#swift_store_ssl_compression = True + +# The number of times a Swift download will be retried before the +# request fails +#swift_store_retry_get_count = 0 + +# Bypass SSL verification for Swift +#swift_store_auth_insecure = False + +# The path to a CA certificate bundle file to use for SSL verification when +# communicating with Swift. +#swift_store_cacert = + +# ============ S3 Store Options ============================= + +# Address where the S3 authentication service lives +# Valid schemes are 'http://' and 'https://' +# If no scheme specified, default to 'http://' +s3_store_host = s3.amazonaws.com + +# User to authenticate against the S3 authentication service +s3_store_access_key = <20-char AWS access key> + +# Auth key for the user authenticating against the +# S3 authentication service +s3_store_secret_key = <40-char AWS secret key> + +# Container within the account that the account should use +# for storing images in S3. Note that S3 has a flat namespace, +# so you need a unique bucket name for your glance images. An +# easy way to do this is append your AWS access key to "glance". +# S3 buckets in AWS *must* be lowercased, so remember to lowercase +# your AWS access key if you use it in your bucket name below! +s3_store_bucket = glance + +# Do we create the bucket if it does not exist? +s3_store_create_bucket_on_put = False + +# When sending images to S3, the data will first be written to a +# temporary buffer on disk. By default the platform's temporary directory +# will be used. If required, an alternative directory can be specified here. +# s3_store_object_buffer_dir = /path/to/dir + +# ============ Cinder Store Options =========================== + +# Info to match when looking for cinder in the service catalog +# Format is : separated values of the form: +# :: (string value) +#cinder_catalog_info = volume:cinder:publicURL + +# Override service catalog lookup with template for cinder endpoint +# e.g. http://localhost:8776/v1/%(project_id)s (string value) +#cinder_endpoint_template = + +# Region name of this node (string value) +#os_region_name = + +# Location of ca certicates file to use for cinder client requests +# (string value) +#cinder_ca_certificates_file = + +# Number of cinderclient retries on failed http calls (integer value) +#cinder_http_retries = 3 + +# Allow to perform insecure SSL requests to cinder (boolean value) +#cinder_api_insecure = False + +# ============ VMware Datastore Store Options ===================== + +# ESX/ESXi or vCenter Server target system. +# The server value can be an IP address or a DNS name +# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com +#vmware_server_host = + +# Server username (string value) +#vmware_server_username = + +# Server password (string value) +#vmware_server_password = + +# Inventory path to a datacenter (string value) +# Value optional when vmware_server_ip is an ESX/ESXi host: if specified +# should be `ha-datacenter`. +#vmware_datacenter_path = + +# Datastore associated with the datacenter (string value) +#vmware_datastore_name = + +# The number of times we retry on failures +# e.g., socket error, etc (integer value) +#vmware_api_retry_count = 10 + +# The interval used for polling remote tasks +# invoked on VMware ESX/VC server in seconds (integer value) +#vmware_task_poll_interval = 5 + +# Absolute path of the folder containing the images in the datastore +# (string value) +#vmware_store_image_dir = /openstack_glance + +# Allow to perform insecure SSL requests to the target system (boolean value) +#vmware_api_insecure = False + +# ================= Security Options ========================== + +# AES key for encrypting store 'location' metadata, including +# -- if used -- Swift or S3 credentials +# Should be set to a random string of length 16, 24 or 32 bytes +# metadata_encryption_key = <16, 24 or 32 char registry metadata key> + +# =============== Policy Options ============================== + +[oslo_policy] +# The JSON file that defines policies. +# Deprecated group/name - [DEFAULT]/policy_file +#policy_file = policy.json + +# Default rule. Enforced when a requested rule is not found. +# Deprecated group/name - [DEFAULT]/policy_default_rule +#policy_default_rule = default + +# Directories where policy configuration files are stored. +# They can be relative to any directory in the search path +# defined by the config_dir option, or absolute paths. +# The file defined by policy_file must exist for these +# directories to be searched. +# Deprecated group/name - [DEFAULT]/policy_dirs +#policy_dirs = policy.d diff --git a/code/daisy/etc/daisy-manage.conf b/code/daisy/etc/daisy-manage.conf new file mode 100755 index 00000000..beccaada --- /dev/null +++ b/code/daisy/etc/daisy-manage.conf @@ -0,0 +1,167 @@ +[DEFAULT] + +# +# From glance.manage +# + +# Print debugging output (set logging level to DEBUG instead of +# default WARNING level). (boolean value) +#debug = false + +# The name of a logging configuration file. This file is appended to +# any existing logging configuration files. For details about logging +# configuration files, see the Python logging module documentation. +# (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append = + +# Format string for %%(asctime)s in log records. Default: %(default)s +# . (string value) +#log_date_format = %Y-%m-%d %H:%M:%S + +# (Optional) The base directory used for relative --log-file paths. +# (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir = + +# (Optional) Name of log file to output to. If no default is set, +# logging will go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +log_file = /var/log/daisy/manage.log + +# DEPRECATED. A logging.Formatter log message format string which may +# use any of the available logging.LogRecord attributes. This option +# is deprecated. Please use logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format = + +# Syslog facility to receive log lines. (string value) +#syslog_log_facility = LOG_USER + +# Use syslog for logging. Existing syslog format is DEPRECATED during +# I, and will change in J to honor RFC5424. (boolean value) +#use_syslog = false + +# (Optional) Enables or disables syslog rfc5424 format for logging. If +# enabled, prefixes the MSG part of the syslog message with APP-NAME +# (RFC5424). The format without the APP-NAME is deprecated in I, and +# will be removed in J. (boolean value) +#use_syslog_rfc_format = false + +# Print more verbose output (set logging level to INFO instead of +# default WARNING level). (boolean value) +#verbose = false + + +[database] + +# +# From oslo.db +# + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend = sqlalchemy + +# The SQLAlchemy connection string to use to connect to the database. +# (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = + +# Verbosity of SQL debugging information: 0=None, 100=Everything. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add Python stack traces to SQL as comment strings. (boolean value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = false + +# If True, increases the interval between database connection retries +# up to db_max_retry_interval. (boolean value) +#db_inc_retry_interval = true + +# Maximum database connection retries before error is raised. Set to +# -1 to specify an infinite retry count. (integer value) +#db_max_retries = 20 + +# If db_inc_retry_interval is set, the maximum seconds between +# database connection retries. (integer value) +#db_max_retry_interval = 10 + +# Seconds between database connection retries. (integer value) +#db_retry_interval = 1 + +# Timeout before idle SQL connections are reaped. (integer value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout = 3600 + +# If set, use this value for max_overflow with SQLAlchemy. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = + +# Maximum number of SQL connections to keep open in a pool. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = + +# Maximum number of database connection retries during startup. Set to +# -1 to specify an infinite retry count. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Minimum number of SQL connections to keep open in a pool. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size = 1 + +# The SQL mode to be used for MySQL sessions. This option, including +# the default, overrides any server-set SQL mode. To use whatever SQL +# mode is set by the server configuration, set this to no value. +# Example: mysql_sql_mode= (string value) +#mysql_sql_mode = TRADITIONAL + +# If set, use this value for pool_timeout with SQLAlchemy. (integer +# value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Interval between retries of opening a SQL connection. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# The SQLAlchemy connection string to use to connect to the slave +# database. (string value) +#slave_connection = + +# The file name to use with SQLite. (string value) +# Deprecated group/name - [DEFAULT]/sqlite_db +#sqlite_db = oslo.sqlite + +# If True, SQLite uses synchronous mode. (boolean value) +# Deprecated group/name - [DEFAULT]/sqlite_synchronous +#sqlite_synchronous = true + +# Enable the experimental use of database reconnect on connection +# lost. (boolean value) +#use_db_reconnect = false + +# +# From oslo.db.concurrency +# + +# Enable the experimental use of thread pooling for all DB API calls +# (boolean value) +# Deprecated group/name - [DEFAULT]/dbapi_use_tpool +#use_tpool = false diff --git a/code/daisy/etc/daisy-orchestration.conf b/code/daisy/etc/daisy-orchestration.conf new file mode 100755 index 00000000..8f25106e --- /dev/null +++ b/code/daisy/etc/daisy-orchestration.conf @@ -0,0 +1,21 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +#verbose = False +verbose = True + +# Show debugging output in logs (sets DEBUG log level output) +#debug = False + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +log_file = /var/log/daisy/orchestration.log + +# Backlog requests when creating socket +backlog = 4096 + +# interval second in auto scale +auto_scale_interval=60 + diff --git a/code/daisy/etc/daisy-registry-paste.ini b/code/daisy/etc/daisy-registry-paste.ini new file mode 100755 index 00000000..26bf3e4a --- /dev/null +++ b/code/daisy/etc/daisy-registry-paste.ini @@ -0,0 +1,30 @@ +# Use this pipeline for no auth - DEFAULT +[pipeline:daisy-registry] +pipeline = osprofiler unauthenticated-context registryapp + +# Use this pipeline for keystone auth +[pipeline:daisy-registry-keystone] +pipeline = osprofiler authtoken context registryapp + +# Use this pipeline for authZ only. This means that the registry will treat a +# user as authenticated without making requests to keystone to reauthenticate +# the user. +[pipeline:daisy-registry-trusted-auth] +pipeline = osprofiler context registryapp + +[app:registryapp] +paste.app_factory = daisy.registry.api:API.factory + +[filter:context] +paste.filter_factory = daisy.api.middleware.context:ContextMiddleware.factory + +[filter:unauthenticated-context] +paste.filter_factory = daisy.api.middleware.context:UnauthenticatedContextMiddleware.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory + +[filter:osprofiler] +paste.filter_factory = osprofiler.web:WsgiMiddleware.factory +hmac_keys = SECRET_KEY +enabled = yes diff --git a/code/daisy/etc/daisy-registry.conf b/code/daisy/etc/daisy-registry.conf new file mode 100755 index 00000000..4cfe2adb --- /dev/null +++ b/code/daisy/etc/daisy-registry.conf @@ -0,0 +1,267 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +#verbose = False +verbose = True + +# Show debugging output in logs (sets DEBUG log level output) +#debug = False + +# Address to bind the registry server +bind_host = 0.0.0.0 + +# Port the bind the registry server to +bind_port = 19191 + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +log_file = /var/log/daisy/registry.log + +# Backlog requests when creating socket +backlog = 4096 + +# TCP_KEEPIDLE value in seconds when creating socket. +# Not supported on OS X. +#tcp_keepidle = 600 + +# API to use for accessing data. Default value points to sqlalchemy +# package. +#data_api = glance.db.sqlalchemy.api + +# The number of child process workers that will be +# created to service Registry requests. The default will be +# equal to the number of CPUs available. (integer value) +#workers = None + +# Enable Registry API versions individually or simultaneously +#enable_v1_registry = True +#enable_v2_registry = True + +# Limit the api to return `param_limit_max` items in a call to a container. If +# a larger `limit` query param is provided, it will be reduced to this value. +api_limit_max = 1000 + +# If a `limit` query param is not provided in an api request, it will +# default to `limit_param_default` +limit_param_default = 25 + +# Role used to identify an authenticated user as administrator +#admin_role = admin + +# Enable DEBUG log messages from sqlalchemy which prints every database +# query and response. +# Default: False +#sqlalchemy_debug = True + +# http_keepalive option. If False, server will return the header +# "Connection: close", If True, server will return "Connection: Keep-Alive" +# in its responses. In order to close the client socket connection +# explicitly after the response is sent and read successfully by the client, +# you simply have to set this option to False when you create a wsgi server. +#http_keepalive = True + +# ================= Syslog Options ============================ + +# Send logs to syslog (/dev/log) instead of to file specified +# by `log_file` +#use_syslog = False + +# Facility to use. If unset defaults to LOG_USER. +#syslog_log_facility = LOG_LOCAL1 + +# ================= SSL Options =============================== + +# Certificate file to use when starting registry server securely +#cert_file = /path/to/certfile + +# Private key file to use when starting registry server securely +#key_file = /path/to/keyfile + +# CA certificate file to use to verify connecting clients +#ca_file = /path/to/cafile + +# ============ Notification System Options ===================== + +# Driver or drivers to handle sending notifications. Set to +# 'messaging' to send notifications to a message queue. +# notification_driver = noop + +# Default publisher_id for outgoing notifications. +# default_publisher_id = image.localhost + +# Messaging driver used for 'messaging' notifications driver +# rpc_backend = 'rabbit' + +# Configuration options if sending notifications via rabbitmq (these are +# the defaults) +rabbit_host = localhost +rabbit_port = 5672 +rabbit_use_ssl = false +rabbit_userid = guest +rabbit_password = guest +rabbit_virtual_host = / +rabbit_notification_exchange = glance +rabbit_notification_topic = notifications +rabbit_durable_queues = False + +# Configuration options if sending notifications via Qpid (these are +# the defaults) +qpid_notification_exchange = glance +qpid_notification_topic = notifications +qpid_hostname = localhost +qpid_port = 5672 +qpid_username = +qpid_password = +qpid_sasl_mechanisms = +qpid_reconnect_timeout = 0 +qpid_reconnect_limit = 0 +qpid_reconnect_interval_min = 0 +qpid_reconnect_interval_max = 0 +qpid_reconnect_interval = 0 +qpid_heartbeat = 5 +# Set to 'ssl' to enable SSL +qpid_protocol = tcp +qpid_tcp_nodelay = True + + +# =============== Policy Options ============================== + +[oslo_policy] +# The JSON file that defines policies. +# Deprecated group/name - [DEFAULT]/policy_file +#policy_file = policy.json + +# Default rule. Enforced when a requested rule is not found. +# Deprecated group/name - [DEFAULT]/policy_default_rule +#policy_default_rule = default + +# Directories where policy configuration files are stored. +# They can be relative to any directory in the search path +# defined by the config_dir option, or absolute paths. +# The file defined by policy_file must exist for these +# directories to be searched. +# Deprecated group/name - [DEFAULT]/policy_dirs +#policy_dirs = policy.d + +# ================= Database Options ========================== + +[database] +# The file name to use with SQLite (string value) +#sqlite_db = glance.sqlite + +# If True, SQLite uses synchronous mode (boolean value) +#sqlite_synchronous = True + +# The backend to use for db (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend = sqlalchemy + +# The SQLAlchemy connection string used to connect to the +# database (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = + +# The SQL mode to be used for MySQL sessions. This option, +# including the default, overrides any server-set SQL mode. To +# use whatever SQL mode is set by the server configuration, +# set this to no value. Example: mysql_sql_mode= (string +# value) +#mysql_sql_mode = TRADITIONAL + +# Timeout before idle sql connections are reaped (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout = 3600 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = + +# Maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Interval between retries of opening a sql connection +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Enable the experimental use of database reconnect on +# connection lost (boolean value) +#use_db_reconnect = False + +# seconds between db connection retries (integer value) +#db_retry_interval = 1 + +# Whether to increase interval between db connection retries, +# up to db_max_retry_interval (boolean value) +#db_inc_retry_interval = True + +# max seconds between db connection retries, if +# db_inc_retry_interval is enabled (integer value) +#db_max_retry_interval = 10 + +# maximum db connection retries before error is raised. +# (setting -1 implies an infinite retry count) (integer value) +#db_max_retries = 20 + +[keystone_authtoken] +identity_uri = http://127.0.0.1:35357 +admin_tenant_name = %SERVICE_TENANT_NAME% +admin_user = %SERVICE_USER% +admin_password = %SERVICE_PASSWORD% + +[paste_deploy] +# Name of the paste configuration file that defines the available pipelines +#config_file = glance-registry-paste.ini + +# Partial name of a pipeline in your paste configuration file with the +# service name removed. For example, if your paste section name is +# [pipeline:glance-registry-keystone], you would configure the flavor below +# as 'keystone'. +#flavor= + +[profiler] +# If False fully disable profiling feature. +#enabled = False + +# If False doesn't trace SQL requests. +#trace_sqlalchemy = False diff --git a/code/daisy/etc/daisy-scrubber.conf b/code/daisy/etc/daisy-scrubber.conf new file mode 100755 index 00000000..5e343385 --- /dev/null +++ b/code/daisy/etc/daisy-scrubber.conf @@ -0,0 +1,132 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +#verbose = False + +# Show debugging output in logs (sets DEBUG log level output) +#debug = False + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +log_file = /var/log/daisy/scrubber.log + +# Send logs to syslog (/dev/log) instead of to file specified by `log_file` +#use_syslog = False + +# Should we run our own loop or rely on cron/scheduler to run us +daemon = False + +# Loop time between checking for new items to schedule for delete +wakeup_time = 300 + +# Directory that the scrubber will use to remind itself of what to delete +# Make sure this is also set in glance-api.conf +scrubber_datadir = /var/lib/daisy/scrubber + +# Only one server in your deployment should be designated the cleanup host +cleanup_scrubber = False + +# pending_delete items older than this time are candidates for cleanup +cleanup_scrubber_time = 86400 + +# Address to find the registry server for cleanups +registry_host = 0.0.0.0 + +# Port the registry server is listening on +registry_port = 9191 + +# Auth settings if using Keystone +# auth_url = http://127.0.0.1:5000/v2.0/ +# admin_tenant_name = %SERVICE_TENANT_NAME% +# admin_user = %SERVICE_USER% +# admin_password = %SERVICE_PASSWORD% + +# API to use for accessing data. Default value points to sqlalchemy +# package, it is also possible to use: glance.db.registry.api +#data_api = glance.db.sqlalchemy.api + +# ================= Security Options ========================== + +# AES key for encrypting store 'location' metadata, including +# -- if used -- Swift or S3 credentials +# Should be set to a random string of length 16, 24 or 32 bytes +#metadata_encryption_key = <16, 24 or 32 char registry metadata key> + +# =============== Policy Options ============================== + +# The JSON file that defines policies. +#policy_file = policy.json + +# Default rule. Enforced when a requested rule is not found. +#policy_default_rule = default + +# Directories where policy configuration files are stored. +# They can be relative to any directory in the search path +# defined by the config_dir option, or absolute paths. +# The file defined by policy_file must exist for these +# directories to be searched. +#policy_dirs = policy.d + +# ================= Database Options ===============+========== + +[database] + +# The SQLAlchemy connection string used to connect to the +# database (string value) +#connection=sqlite:////glance/openstack/common/db/$sqlite_db + +# The SQLAlchemy connection string used to connect to the +# slave database (string value) +#slave_connection= + +# timeout before idle sql connections are reaped (integer +# value) +#idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool +# (integer value) +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool +# (integer value) +#max_pool_size= + +# maximum db connection retries during startup. (setting -1 +# implies an infinite retry count) (integer value) +#max_retries=10 + +# interval between retries of opening a sql connection +# (integer value) +#retry_interval=10 + +# If set, use this value for max_overflow with sqlalchemy +# (integer value) +#max_overflow= + +# Verbosity of SQL debugging information. 0=None, +# 100=Everything (integer value) +#connection_debug=0 + +# Add python stack traces to SQL as comment strings (boolean +# value) +#connection_trace=false + +# If set, use this value for pool_timeout with sqlalchemy +# (integer value) +#pool_timeout= + +[oslo_concurrency] + +# Enables or disables inter-process locks. (boolean value) +# Deprecated group/name - [DEFAULT]/disable_process_locking +#disable_process_locking = false + +# Directory to use for lock files. For security, the specified +# directory should only be writable by the user running the processes +# that need locking. It could be read from environment variable +# OSLO_LOCK_PATH. This setting needs to be the same for both +# glance-scrubber and glance-api service. Default to a temp directory. +# Deprecated group/name - [DEFAULT]/lock_path (string value) +#lock_path = /tmp diff --git a/code/daisy/etc/daisy-search-paste.ini b/code/daisy/etc/daisy-search-paste.ini new file mode 100755 index 00000000..fb2eb712 --- /dev/null +++ b/code/daisy/etc/daisy-search-paste.ini @@ -0,0 +1,23 @@ +# Use this pipeline for no auth - DEFAULT +[pipeline:glance-search] +pipeline = unauthenticated-context rootapp + +[pipeline:glance-search-keystone] +pipeline = authtoken context rootapp + +[composite:rootapp] +paste.composite_factory = glance.api:root_app_factory +/v0.1: apiv0_1app + +[app:apiv0_1app] +paste.app_factory = glance.search.api.v0_1.router:API.factory + +[filter:unauthenticated-context] +paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory + +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory +delay_auth_decision = true + +[filter:context] +paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory diff --git a/code/daisy/etc/daisy-search.conf b/code/daisy/etc/daisy-search.conf new file mode 100755 index 00000000..27ce2e4b --- /dev/null +++ b/code/daisy/etc/daisy-search.conf @@ -0,0 +1,116 @@ +[DEFAULT] +# Show more verbose log output (sets INFO log level output) +#verbose = False + +# Show debugging output in logs (sets DEBUG log level output) +debug = True + +# Address to bind the GRAFFITI server +bind_host = 0.0.0.0 + +# Port to bind the server to +bind_port = 9393 + +# Log to this file. Make sure you do not set the same log file for both the API +# and registry servers! +# +# If `log_file` is omitted and `use_syslog` is false, then log messages are +# sent to stdout as a fallback. +log_file = /var/log/daisy/search.log + +# Backlog requests when creating socket +backlog = 4096 + +# TCP_KEEPIDLE value in seconds when creating socket. +# Not supported on OS X. +#tcp_keepidle = 600 + +# Property Protections config file +# This file contains the rules for property protections and the roles/policies +# associated with it. +# If this config value is not specified, by default, property protections +# won't be enforced. +# If a value is specified and the file is not found, then the glance-api +# service will not start. +#property_protection_file = + +# Specify whether 'roles' or 'policies' are used in the +# property_protection_file. +# The default value for property_protection_rule_format is 'roles'. +#property_protection_rule_format = roles + +# http_keepalive option. If False, server will return the header +# "Connection: close", If True, server will return "Connection: Keep-Alive" +# in its responses. In order to close the client socket connection +# explicitly after the response is sent and read successfully by the client, +# you simply have to set this option to False when you create a wsgi server. +#http_keepalive = True + +# ================= Syslog Options ============================ + +# Send logs to syslog (/dev/log) instead of to file specified +# by `log_file` +#use_syslog = False + +# Facility to use. If unset defaults to LOG_USER. +#syslog_log_facility = LOG_LOCAL0 + +# ================= SSL Options =============================== + +# Certificate file to use when starting API server securely +#cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +#key_file = /path/to/keyfile + +# CA certificate file to use to verify connecting clients +#ca_file = /path/to/cafile + +# =============== Policy Options ================================== + +# The JSON file that defines policies. +policy_file = search-policy.json + +# Default rule. Enforced when a requested rule is not found. +#policy_default_rule = default + +# Directories where policy configuration files are stored. +# They can be relative to any directory in the search path +# defined by the config_dir option, or absolute paths. +# The file defined by policy_file must exist for these +# directories to be searched. +#policy_dirs = policy.d + +[paste_deploy] +# Name of the paste configuration file that defines the available pipelines +# config_file = glance-search-paste.ini + +# Partial name of a pipeline in your paste configuration file with the +# service name removed. For example, if your paste section name is +# [pipeline:glance-registry-keystone], you would configure the flavor below +# as 'keystone'. +#flavor= +# + +[database] +# The SQLAlchemy connection string used to connect to the +# database (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = + +[keystone_authtoken] +identity_uri = http://127.0.0.1:35357 +admin_tenant_name = %SERVICE_TENANT_NAME% +admin_user = %SERVICE_USER% +admin_password = %SERVICE_PASSWORD% +revocation_cache_time = 10 + +# =============== ElasticSearch Options ======================= + +[elasticsearch] +# List of nodes where Elasticsearch instances are running. A single node +# should be defined as an IP address and port number. +# The default is ['127.0.0.1:9200'] +#hosts = ['127.0.0.1:9200'] diff --git a/code/daisy/etc/daisy-swift.conf.sample b/code/daisy/etc/daisy-swift.conf.sample new file mode 100755 index 00000000..0bd3162e --- /dev/null +++ b/code/daisy/etc/daisy-swift.conf.sample @@ -0,0 +1,21 @@ +# glance-swift.conf.sample +# +# This file is an example config file when +# multiple swift accounts/backing stores are enabled. +# +# Specify the reference name in [] +# For each section, specify the auth_address, user and key. +# +# WARNING: +# * If any of auth_address, user or key is not specified, +# the glance-api's swift store will fail to configure + +[ref1] +user = tenant:user1 +key = key1 +auth_address = auth123@example.com + +[ref2] +user = user2 +key = key2 +auth_address = http://auth345@example.com diff --git a/code/daisy/etc/metadefs/README b/code/daisy/etc/metadefs/README new file mode 100755 index 00000000..39d25b30 --- /dev/null +++ b/code/daisy/etc/metadefs/README @@ -0,0 +1,4 @@ +This directory contains predefined namespaces for Glance Metadata Definitions +Catalog. Files from this directory can be loaded into the database using +db_load_metadefs command for glance-manage. Similarly you can unload the +definitions using db_unload_metadefs command. diff --git a/code/daisy/etc/metadefs/compute-aggr-disk-filter.json b/code/daisy/etc/metadefs/compute-aggr-disk-filter.json new file mode 100755 index 00000000..3a1037c9 --- /dev/null +++ b/code/daisy/etc/metadefs/compute-aggr-disk-filter.json @@ -0,0 +1,21 @@ +{ + "namespace": "OS::Compute::AggregateDiskFilter", + "display_name": "Disk Allocation per Host", + "description": "Properties related to the Nova scheduler filter AggregateDiskFilter. Filters aggregate hosts based on the available disk space compared to the requested disk space. Hosts in the aggregate with not enough usable disk will be filtered out. The filter must be enabled in the Nova scheduler to use these properties.", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Nova::Aggregate" + } + ], + "properties": { + "disk_allocation_ratio": { + "title": "Disk Subscription Ratio", + "description": "Allows the host to be under and over subscribed for the amount of disk space requested for an instance. A ratio greater than 1.0 allows for over subscription (hosts may have less usable disk space than requested). A ratio less than 1.0 allows for under subscription.", + "type": "number", + "readonly": false + } + }, + "objects": [] +} diff --git a/code/daisy/etc/metadefs/compute-aggr-iops-filter.json b/code/daisy/etc/metadefs/compute-aggr-iops-filter.json new file mode 100755 index 00000000..31047835 --- /dev/null +++ b/code/daisy/etc/metadefs/compute-aggr-iops-filter.json @@ -0,0 +1,23 @@ +{ + "namespace": "OS::Compute::AggregateIoOpsFilter", + "display_name": "IO Ops per Host", + "description": "Properties related to the Nova scheduler filter AggregateIoOpsFilter. Filters aggregate hosts based on the number of instances currently changing state. Hosts in the aggregate with too many instances changing state will be filtered out. The filter must be enabled in the Nova scheduler to use these properties.", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Nova::Aggregate" + } + ], + "properties": { + "max_io_ops_per_host": { + "title": "Maximum IO Operations per Host", + "description": "Prevents hosts in the aggregate that have this many or more instances currently in build, resize, snapshot, migrate, rescue or unshelve to be scheduled for new instances.", + "type": "integer", + "readonly": false, + "default": 8, + "minimum": 1 + } + }, + "objects": [] +} diff --git a/code/daisy/etc/metadefs/compute-aggr-num-instances.json b/code/daisy/etc/metadefs/compute-aggr-num-instances.json new file mode 100755 index 00000000..3c9f678d --- /dev/null +++ b/code/daisy/etc/metadefs/compute-aggr-num-instances.json @@ -0,0 +1,21 @@ +{ + "namespace": "OS::Compute::AggregateNumInstancesFilter", + "display_name": "Instances per Host", + "description": "Properties related to the Nova scheduler filter AggregateNumInstancesFilter. Filters aggregate hosts by the number of running instances on it. Hosts in the aggregate with too many instances will be filtered out. The filter must be enabled in the Nova scheduler to use these properties.", "visibility": "public", + "protected": false, + "resource_type_associations": [ + { + "name": "OS::Nova::Aggregate" + } + ], + "properties": { + "max_instances_per_host": { + "title": "Max Instances Per Host", + "description": "Maximum number of instances allowed to run on a host in the aggregate.", + "type": "integer", + "readonly": false, + "minimum": 0 + } + }, + "objects": [] +} diff --git a/code/daisy/etc/metadefs/compute-guest-shutdown.json b/code/daisy/etc/metadefs/compute-guest-shutdown.json new file mode 100755 index 00000000..354ee638 --- /dev/null +++ b/code/daisy/etc/metadefs/compute-guest-shutdown.json @@ -0,0 +1,21 @@ +{ + "namespace": "OS::Compute::GuestShutdownBehavior", + "display_name": "Shutdown Behavior", + "description": "These properties allow modifying the shutdown behavior for stop, rescue, resize, and shelve operations.", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Glance::Image" + } + ], + "properties": { + "os_shutdown_timeout": { + "title": "Shutdown timeout", + "description": "By default, guests will be given 60 seconds to perform a graceful shutdown. After that, the VM is powered off. This property allows overriding the amount of time (unit: seconds) to allow a guest OS to cleanly shut down before power off. A value of 0 (zero) means the guest will be powered off immediately with no opportunity for guest OS clean-up.", + "type": "integer", + "minimum": 0 + } + }, + "objects": [] +} diff --git a/code/daisy/etc/metadefs/compute-host-capabilities.json b/code/daisy/etc/metadefs/compute-host-capabilities.json new file mode 100755 index 00000000..e09de355 --- /dev/null +++ b/code/daisy/etc/metadefs/compute-host-capabilities.json @@ -0,0 +1,185 @@ +{ + "namespace": "OS::Compute::HostCapabilities", + "display_name": "Compute Host Capabilities", + "description": "Hardware capabilities provided by the compute host. This provides the ability to fine tune the hardware specification required when an instance is requested. The ComputeCapabilitiesFilter should be enabled in the Nova scheduler to use these properties. When enabled, this filter checks that the capabilities provided by the compute host satisfy any extra specifications requested. Only hosts that can provide the requested capabilities will be eligible for hosting the instance.", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Nova::Flavor", + "prefix": "capabilities:" + }, + { + "name": "OS::Nova::Aggregate", + "prefix": "aggregate_instance_extra_specs:" + } + ], + "properties": { + "cpu_info:vendor": { + "title": "Vendor", + "description": "Specifies the CPU manufacturer.", + "operators": [""], + "type": "string", + "enum": [ + "Intel", + "AMD" + ] + }, + "cpu_info:model": { + "title": "Model", + "description": "Specifies the CPU model. Use this property to ensure that your vm runs on a a specific cpu model.", + "operators": [""], + "type": "string", + "enum": [ + "Conroe", + "Core2Duo", + "Penryn", + "Nehalem", + "Westmere", + "SandyBridge", + "IvyBridge", + "Haswell", + "Broadwell", + "Delhi", + "Seoul", + "Abu Dhabi", + "Interlagos", + "Kabini", + "Valencia", + "Zurich", + "Budapest", + "Barcelona", + "Suzuka", + "Shanghai", + "Istanbul", + "Lisbon", + "Magny-Cours", + "Valencia", + "Cortex-A57", + "Cortex-A53", + "Cortex-A12", + "Cortex-A17", + "Cortex-A15", + "Coretx-A7", + "X-Gene" + ] + }, + "cpu_info:arch": { + "title": "Architecture", + "description": "Specifies the CPU architecture. Use this property to specify the architecture supported by the hypervisor.", + "operators": [""], + "type": "string", + "enum": [ + "x86", + "x86_64", + "i686", + "ia64", + "ARMv8-A", + "ARMv7-A" + ] + }, + "cpu_info:topology:cores": { + "title": "cores", + "description": "Number of cores.", + "type": "integer", + "readonly": false, + "default": 1 + }, + "cpu_info:topology:threads": { + "title": "threads", + "description": "Number of threads.", + "type": "integer", + "readonly": false, + "default": 1 + }, + "cpu_info:topology:sockets": { + "title": "sockets", + "description": "Number of sockets.", + "type": "integer", + "readonly": false, + "default": 1 + }, + "cpu_info:features": { + "title": "Features", + "description": "Specifies CPU flags/features. Using this property you can specify the required set of instructions supported by a vm.", + "operators": ["", ""], + "type": "array", + "items": { + "type": "string", + "enum": [ + "aes", + "vme", + "de", + "pse", + "tsc", + "msr", + "pae", + "mce", + "cx8", + "apic", + "sep", + "mtrr", + "pge", + "mca", + "cmov", + "pat", + "pse36", + "clflush", + "dts", + "acpi", + "mmx", + "fxsr", + "sse", + "sse2", + "ss", + "ht", + "tm", + "ia64", + "pbe", + "rdtscp", + "pni", + "pclmulqdq", + "dtes64", + "monitor", + "ds_cpl", + "vmx", + "smx", + "est", + "tm2", + "ssse3", + "cid", + "fma", + "cx16", + "xtpr", + "pdcm", + "pcid", + "dca", + "sse4_1", + "sse4_2", + "x2apic", + "movbe", + "popcnt", + "tsc_deadline_timer", + "xsave", + "avx", + "f16c", + "rdrand", + "fsgsbase", + "bmi1", + "hle", + "avx2", + "smep", + "bmi2", + "erms", + "invpcid", + "rtm", + "mpx", + "rdseed", + "adx", + "smap" + ] + } + } + }, + "objects": [] +} diff --git a/code/daisy/etc/metadefs/compute-hypervisor.json b/code/daisy/etc/metadefs/compute-hypervisor.json new file mode 100755 index 00000000..d82531bd --- /dev/null +++ b/code/daisy/etc/metadefs/compute-hypervisor.json @@ -0,0 +1,41 @@ +{ + "namespace": "OS::Compute::Hypervisor", + "display_name": "Hypervisor Selection", + "description": "OpenStack Compute supports many hypervisors, although most installations use only one hypervisor. For installations with multiple supported hypervisors, you can schedule different hypervisors using the ImagePropertiesFilter. This filters compute nodes that satisfy any architecture, hypervisor type, or virtual machine mode properties specified on the instance's image properties.", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Glance::Image" + } + ], + "properties": { + "hypervisor_type": { + "title": "Hypervisor Type", + "description": "Hypervisor type required by the image. Used with the ImagePropertiesFilter. \n\n KVM - Kernel-based Virtual Machine. LXC - Linux Containers (through libvirt). QEMU - Quick EMUlator. UML - User Mode Linux. hyperv - Microsoft庐 hyperv. vmware - VMware庐 vsphere. Baremetal - physical provisioning. For more information, see: http://docs.openstack.org/trunk/config-reference/content/section_compute-hypervisors.html", + "type": "string", + "enum": [ + "baremetal", + "hyperv", + "kvm", + "lxc", + "qemu", + "uml", + "vmware", + "xen" + ] + }, + "vm_mode": { + "title": "VM Mode", + "description": "The virtual machine mode. This represents the host/guest ABI (application binary interface) used for the virtual machine. Used with the ImagePropertiesFilter. \n\n hvm 鈥 Fully virtualized - This is the virtual machine mode (vm_mode) used by QEMU and KVM. \n\n xen - Xen 3.0 paravirtualized. \n\n uml 鈥 User Mode Linux paravirtualized. \n\n exe 鈥 Executables in containers. This is the mode used by LXC.", + "type": "string", + "enum": [ + "hvm", + "xen", + "uml", + "exe" + ] + } + }, + "objects": [] +} diff --git a/code/daisy/etc/metadefs/compute-instance-data.json b/code/daisy/etc/metadefs/compute-instance-data.json new file mode 100755 index 00000000..a29af79d --- /dev/null +++ b/code/daisy/etc/metadefs/compute-instance-data.json @@ -0,0 +1,36 @@ +{ + "namespace": "OS::Compute::InstanceData", + "display_name": "Instance Config Data", + "description": "Instances can perform self-configuration based on data made available to the running instance. These properties affect instance configuration.", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Glance::Image" + }, + { + "name": "OS::Cinder::Volume", + "properties_target": "image" + } + ], + "properties": { + "img_config_drive": { + "title": "Config Drive", + "description": "This property specifies whether or not Nova should use a config drive when booting the image. Mandatory means that Nova will always use a config drive when booting the image. OpenStack can be configured to write metadata to a special configuration drive that will be attached to the instance when it boots. The instance can retrieve any information from the config drive. One use case for the config drive is to pass network configuration information to the instance. See also: http://docs.openstack.org/user-guide/content/config-drive.html", + "type": "string", + "enum": [ + "optional", + "mandatory" + ] + }, + "os_require_quiesce": { + "title": "Require Quiescent File system", + "description": "This property specifies whether or not the filesystem must be quiesced during snapshot processing. For volume backed and image backed snapshots, yes means that snapshotting is aborted when quiescing fails, whereas, no means quiescing will be skipped and snapshot processing will continue after the quiesce failure.", + "type": "string", + "enum": [ + "yes", + "no" + ] + } + } +} diff --git a/code/daisy/etc/metadefs/compute-libvirt-image.json b/code/daisy/etc/metadefs/compute-libvirt-image.json new file mode 100755 index 00000000..191d1737 --- /dev/null +++ b/code/daisy/etc/metadefs/compute-libvirt-image.json @@ -0,0 +1,89 @@ +{ + "namespace": "OS::Compute::LibvirtImage", + "display_name": "libvirt Driver Options for Images", + "description": "The libvirt Compute Driver Options for Glance Images. \n\nThese are properties specific to compute drivers. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Glance::Image" + } + ], + "properties": { + "hw_disk_bus": { + "title": "Disk Bus", + "description": "Specifies the type of disk controller to attach disk devices to.", + "type": "string", + "enum": [ + "scsi", + "virtio", + "uml", + "xen", + "ide", + "usb" + ] + }, + "hw_rng_model": { + "title": "Random Number Generator Device", + "description": "Adds a random-number generator device to the image's instances. The cloud administrator can enable and control device behavior by configuring the instance's flavor. By default: The generator device is disabled. /dev/random is used as the default entropy source. To specify a physical HW RNG device, use the following option in the nova.conf file: rng_dev_path=/dev/hwrng", + "type": "string", + "default": "virtio" + }, + "hw_machine_type": { + "title": "Machine Type", + "description": "Enables booting an ARM system using the specified machine type. By default, if an ARM image is used and its type is not specified, Compute uses vexpress-a15 (for ARMv7) or virt (for AArch64) machine types. Valid types can be viewed by using the virsh capabilities command (machine types are displayed in the machine tag).", + "type": "string" + }, + "hw_scsi_model": { + "title": "SCSI Model", + "description": "Enables the use of VirtIO SCSI (virtio-scsi) to provide block device access for compute instances; by default, instances use VirtIO Block (virtio-blk). VirtIO SCSI is a para-virtualized SCSI controller device that provides improved scalability and performance, and supports advanced SCSI hardware.", + "type": "string", + "default": "virtio-scsi" + }, + "hw_video_model": { + "title": "Video Model", + "description": "The video image driver used.", + "type": "string", + "enum": [ + "vga", + "cirrus", + "vmvga", + "xen", + "qxl" + ] + }, + "hw_video_ram": { + "title": "Max Video Ram", + "description": "Maximum RAM (unit: MB) for the video image. Used only if a hw_video:ram_max_mb value has been set in the flavor's extra_specs and that value is higher than the value set in hw_video_ram.", + "type": "integer", + "minimum": 0 + }, + "os_command_line": { + "title": "Kernel Command Line", + "description": "The kernel command line to be used by the libvirt driver, instead of the default. For linux containers (LXC), the value is used as arguments for initialization. This key is valid only for Amazon kernel, ramdisk, or machine images (aki, ari, or ami).", + "type": "string" + }, + "hw_vif_model": { + "title": "Virtual Network Interface", + "description": "Specifies the model of virtual network interface device to use. The valid options depend on the hypervisor configuration. libvirt driver options: KVM and QEMU: e1000, ne2k_pci, pcnet, rtl8139, spapr-vlan, and virtio. Xen: e1000, netfront, ne2k_pci, pcnet, and rtl8139.", + "type": "string", + "enum": [ + "e1000", + "e1000e", + "ne2k_pci", + "netfront", + "pcnet", + "rtl8139", + "spapr-vlan", + "virtio" + ] + }, + "hw_qemu_guest_agent": { + "title": "QEMU Guest Agent", + "description": "This is a background process which helps management applications execute guest OS level commands. For example, freezing and thawing filesystems, entering suspend. However, guest agent (GA) is not bullet proof, and hostile guest OS can send spurious replies.", + "type": "string", + "enum": ["yes", "no"] + } + }, + "objects": [] +} diff --git a/code/daisy/etc/metadefs/compute-libvirt.json b/code/daisy/etc/metadefs/compute-libvirt.json new file mode 100755 index 00000000..08fd9929 --- /dev/null +++ b/code/daisy/etc/metadefs/compute-libvirt.json @@ -0,0 +1,32 @@ +{ + "namespace": "OS::Compute::Libvirt", + "display_name": "libvirt Driver Options", + "description": "The libvirt compute driver options. \n\nThese are properties that affect the libvirt compute driver and may be specified on flavors and images. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Glance::Image", + "prefix": "hw_" + }, + { + "name": "OS::Nova::Flavor", + "prefix": "hw:" + } + ], + "properties": { + "serial_port_count": { + "title": "Serial Port Count", + "description": "Specifies the count of serial ports that should be provided. If hw:serial_port_count is not set in the flavor's extra_specs, then any count is permitted. If hw:serial_port_count is set, then this provides the default serial port count. It is permitted to override the default serial port count, but only with a lower value.", + "type": "integer", + "minimum": 0 + }, + "boot_menu": { + "title": "Boot Menu", + "description": "If true, enables the BIOS bootmenu. In cases where both the image metadata and Extra Spec are set, the Extra Spec setting is used. This allows for flexibility in setting/overriding the default behavior as needed.", + "type": "string", + "enum": ["true", "false"] + } + }, + "objects": [] +} diff --git a/code/daisy/etc/metadefs/compute-quota.json b/code/daisy/etc/metadefs/compute-quota.json new file mode 100755 index 00000000..ca1bd596 --- /dev/null +++ b/code/daisy/etc/metadefs/compute-quota.json @@ -0,0 +1,109 @@ +{ + "namespace": "OS::Compute::Quota", + "display_name": "Flavor Quota", + "description": "Compute drivers may enable quotas on CPUs available to a VM, disk tuning, bandwidth I/O, and instance VIF traffic control. See: http://docs.openstack.org/admin-guide-cloud/content/customize-flavors.html", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Nova::Flavor" + } + ], + "objects": [ + { + "name": "CPU Limits", + "description": "You can configure the CPU limits with control parameters.", + "properties": { + "quota:cpu_shares": { + "title": "Quota: CPU Shares", + "description": "Specifies the proportional weighted share for the domain. If this element is omitted, the service defaults to the OS provided defaults. There is no unit for the value; it is a relative measure based on the setting of other VMs. For example, a VM configured with value 2048 gets twice as much CPU time as a VM configured with value 1024.", + "type": "integer" + }, + "quota:cpu_period": { + "title": "Quota: CPU Period", + "description": "Specifies the enforcement interval (unit: microseconds) for QEMU and LXC hypervisors. Within a period, each VCPU of the domain is not allowed to consume more than the quota worth of runtime. The value should be in range [1000, 1000000]. A period with value 0 means no value.", + "type": "integer", + "minimum": 1000, + "maximum": 1000000 + }, + "quota:cpu_quota": { + "title": "Quota: CPU Quota", + "description": "Specifies the maximum allowed bandwidth (unit: microseconds). A domain with a negative-value quota indicates that the domain has infinite bandwidth, which means that it is not bandwidth controlled. The value should be in range [1000, 18446744073709551] or less than 0. A quota with value 0 means no value. You can use this feature to ensure that all vCPUs run at the same speed.", + "type": "integer" + } + } + }, + { + "name": "Disk QoS", + "description": "Using disk I/O quotas, you can set maximum disk write to 10 MB per second for a VM user.", + "properties": { + "quota:disk_read_bytes_sec": { + "title": "Quota: Disk read bytes / sec", + "description": "Sets disk I/O quota for disk read bytes / sec.", + "type": "integer" + }, + "quota:disk_read_iops_sec": { + "title": "Quota: Disk read IOPS / sec", + "description": "Sets disk I/O quota for disk read IOPS / sec.", + "type": "integer" + }, + "quota:disk_write_bytes_sec": { + "title": "Quota: Disk Write Bytes / sec", + "description": "Sets disk I/O quota for disk write bytes / sec.", + "type": "integer" + }, + "quota:disk_write_iops_sec": { + "title": "Quota: Disk Write IOPS / sec", + "description": "Sets disk I/O quota for disk write IOPS / sec.", + "type": "integer" + }, + "quota:disk_total_bytes_sec": { + "title": "Quota: Disk Total Bytes / sec", + "description": "Sets disk I/O quota for total disk bytes / sec.", + "type": "integer" + }, + "quota:disk_total_iops_sec": { + "title": "Quota: Disk Total IOPS / sec", + "description": "Sets disk I/O quota for disk total IOPS / sec.", + "type": "integer" + } + } + }, + { + "name": "Virtual Interface QoS", + "description": "Bandwidth QoS tuning for instance virtual interfaces (VIFs) may be specified with these properties. Incoming and outgoing traffic can be shaped independently. If not specified, no quality of service (QoS) is applied on that traffic direction. So, if you want to shape only the network's incoming traffic, use inbound only (and vice versa). The OpenStack Networking service abstracts the physical implementation of the network, allowing plugins to configure and manage physical resources. Virtual Interfaces (VIF) in the logical model are analogous to physical network interface cards (NICs). VIFs are typically owned a managed by an external service; for instance when OpenStack Networking is used for building OpenStack networks, VIFs would be created, owned, and managed in Nova. VIFs are connected to OpenStack Networking networks via ports. A port is analogous to a port on a network switch, and it has an administrative state. When a VIF is attached to a port the OpenStack Networking API creates an attachment object, which specifies the fact that a VIF with a given identifier is plugged into the port.", + "properties": { + "quota:vif_inbound_average": { + "title": "Quota: VIF Inbound Average", + "description": "Network Virtual Interface (VIF) inbound average in kilobytes per second. Specifies average bit rate on the interface being shaped.", + "type": "integer" + }, + "quota:vif_inbound_burst": { + "title": "Quota: VIF Inbound Burst", + "description": "Network Virtual Interface (VIF) inbound burst in total kilobytes. Specifies the amount of bytes that can be burst at peak speed.", + "type": "integer" + }, + "quota:vif_inbound_peak": { + "title": "Quota: VIF Inbound Peak", + "description": "Network Virtual Interface (VIF) inbound peak in kilobytes per second. Specifies maximum rate at which an interface can receive data.", + "type": "integer" + }, + "quota:vif_outbound_average": { + "title": "Quota: VIF Outbound Average", + "description": "Network Virtual Interface (VIF) outbound average in kilobytes per second. Specifies average bit rate on the interface being shaped.", + "type": "integer" + }, + "quota:vif_outbound_burst": { + "title": "Quota: VIF Outbound Burst", + "description": "Network Virtual Interface (VIF) outbound burst in total kilobytes. Specifies the amount of bytes that can be burst at peak speed.", + "type": "integer" + }, + "quota:vif_outbound_peak": { + "title": "Quota: VIF Outbound Burst", + "description": "Network Virtual Interface (VIF) outbound peak in kilobytes per second. Specifies maximum rate at which an interface can send data.", + "type": "integer" + } + } + } + ] +} \ No newline at end of file diff --git a/code/daisy/etc/metadefs/compute-randomgen.json b/code/daisy/etc/metadefs/compute-randomgen.json new file mode 100755 index 00000000..2414b844 --- /dev/null +++ b/code/daisy/etc/metadefs/compute-randomgen.json @@ -0,0 +1,29 @@ +{ + "namespace": "OS::Compute::RandomNumberGenerator", + "display_name": "Random Number Generator", + "description": "If a random-number generator device has been added to the instance through its image properties, the device can be enabled and configured.", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Nova::Flavor" + } + ], + "properties": { + "hw_rng:allowed": { + "title": "Random Number Generator Allowed", + "description": "", + "type": "boolean" + }, + "hw_rng:rate_bytes": { + "title": "Random number generator limits.", + "description": "Allowed amount of bytes that the guest can read from the host's entropy per period.", + "type": "integer" + }, + "hw_rng:rate_period": { + "title": "Random number generator read period.", + "description": "Duration of the read period in seconds.", + "type": "integer" + } + } +} \ No newline at end of file diff --git a/code/daisy/etc/metadefs/compute-trust.json b/code/daisy/etc/metadefs/compute-trust.json new file mode 100755 index 00000000..7df5691b --- /dev/null +++ b/code/daisy/etc/metadefs/compute-trust.json @@ -0,0 +1,24 @@ +{ + "namespace": "OS::Compute::Trust", + "display_name": "Trusted Compute Pools (Intel庐 TXT)", + "description": "Trusted compute pools with Intel庐 Trusted Execution Technology (Intel庐 TXT) support IT compliance by protecting virtualized data centers - private, public, and hybrid clouds against attacks toward hypervisor and BIOS, firmware, and other pre-launch software components. The Nova trust scheduling filter must be enabled and configured with the trust attestation service in order to use this feature.", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Nova::Flavor" + } + ], + "properties": { + "trust:trusted_host": { + "title": "Intel庐 TXT attestation", + "description": "Select to ensure that node has been attested by Intel庐 Trusted Execution Technology (Intel庐 TXT). The Nova trust scheduling filter must be enabled and configured with the trust attestation service in order to use this feature.", + "type": "string", + "enum": [ + "trusted", + "untrusted", + "unknown" + ] + } + } +} \ No newline at end of file diff --git a/code/daisy/etc/metadefs/compute-vcputopology.json b/code/daisy/etc/metadefs/compute-vcputopology.json new file mode 100755 index 00000000..345a1c26 --- /dev/null +++ b/code/daisy/etc/metadefs/compute-vcputopology.json @@ -0,0 +1,54 @@ +{ + "namespace": "OS::Compute::VirtCPUTopology", + "display_name": "Virtual CPU Topology", + "description": "This provides the preferred socket/core/thread counts for the virtual CPU instance exposed to guests. This enables the ability to avoid hitting limitations on vCPU topologies that OS vendors place on their products. See also: http://git.openstack.org/cgit/openstack/nova-specs/tree/specs/juno/virt-driver-vcpu-topology.rst", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Glance::Image", + "prefix": "hw_" + }, + { + "name": "OS::Cinder::Volume", + "prefix": "hw_", + "properties_target": "image" + }, + { + "name": "OS::Nova::Flavor", + "prefix": "hw:" + } + ], + "properties": { + "cpu_sockets": { + "title": "vCPU Sockets", + "description": "Preferred number of sockets to expose to the guest.", + "type": "integer" + }, + "cpu_cores": { + "title": "vCPU Cores", + "description": "Preferred number of cores to expose to the guest.", + "type": "integer" + }, + "cpu_threads": { + "title": " vCPU Threads", + "description": "Preferred number of threads to expose to the guest.", + "type": "integer" + }, + "cpu_maxsockets": { + "title": "Max vCPU Sockets", + "description": "Maximum number of sockets to expose to the guest.", + "type": "integer" + }, + "cpu_maxcores": { + "title": "Max vCPU Cores", + "description": "Maximum number of cores to expose to the guest.", + "type": "integer" + }, + "cpu_maxthreads": { + "title": "Max vCPU Threads", + "description": "Maximum number of threads to expose to the guest.", + "type": "integer" + } + } +} diff --git a/code/daisy/etc/metadefs/compute-vmware-flavor.json b/code/daisy/etc/metadefs/compute-vmware-flavor.json new file mode 100755 index 00000000..ae4addc8 --- /dev/null +++ b/code/daisy/etc/metadefs/compute-vmware-flavor.json @@ -0,0 +1,19 @@ +{ + "namespace": "OS::Compute::VMwareFlavor", + "display_name": "VMware Driver Options for Flavors", + "description": "VMware Driver Options for Flavors may be used to customize and manage Nova Flavors. These are properties specific to VMWare compute drivers and will only have an effect if the VMWare compute driver is enabled in Nova. See: http://docs.openstack.org/admin-guide-cloud/content/customize-flavors.html", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Nova::Flavor" + } + ], + "properties": { + "vmware:hw_version": { + "title": "VMware Hardware Version", + "description": "Specifies the hardware version VMware uses to create images. If the hardware version needs to be compatible with a cluster version, for backward compatibility or other circumstances, the vmware:hw_version key specifies a virtual machine hardware version. In the event that a cluster has mixed host version types, the key will enable the VC to place the cluster on the correct host.", + "type": "string" + } + } +} diff --git a/code/daisy/etc/metadefs/compute-vmware-quota-flavor.json b/code/daisy/etc/metadefs/compute-vmware-quota-flavor.json new file mode 100755 index 00000000..ea711ac2 --- /dev/null +++ b/code/daisy/etc/metadefs/compute-vmware-quota-flavor.json @@ -0,0 +1,26 @@ +{ + "namespace": "OS::Compute::VMwareQuotaFlavor", + "display_name": "VMware Quota for Flavors", + "description": "The VMware compute driver allows various compute quotas to be specified on flavors. When specified, the VMWare driver will ensure that the quota is enforced. These are properties specific to VMWare compute drivers and will only have an effect if the VMWare compute driver is enabled in Nova. For a list of hypervisors, see: https://wiki.openstack.org/wiki/HypervisorSupportMatrix. For flavor customization, see: http://docs.openstack.org/admin-guide-cloud/content/customize-flavors.html", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Nova::Flavor" + } + ], + "properties": { + "quota:cpu_limit": { + "title": "Quota: CPU Limit", + "description": "Specifies the upper limit for CPU allocation in MHz. This parameter ensures that a machine never uses more than the defined amount of CPU time. It can be used to enforce a limit on the machine's CPU performance. The value should be a numerical value in MHz. If zero is supplied then the cpu_limit is unlimited.", + "type": "integer", + "minimum": 0 + }, + "quota:cpu_reservation": { + "title": "Quota: CPU Reservation Limit", + "description": "Specifies the guaranteed minimum CPU reservation in MHz. This means that if needed, the machine will definitely get allocated the reserved amount of CPU cycles. The value should be a numerical value in MHz.", + "type": "integer", + "minimum": 0 + } + } +} diff --git a/code/daisy/etc/metadefs/compute-vmware.json b/code/daisy/etc/metadefs/compute-vmware.json new file mode 100755 index 00000000..38a6149b --- /dev/null +++ b/code/daisy/etc/metadefs/compute-vmware.json @@ -0,0 +1,60 @@ +{ + "namespace": "OS::Compute::VMware", + "display_name": "VMware Driver Options", + "description": "The VMware compute driver options. \n\nThese are properties specific to VMWare compute drivers and will only have an effect if the VMWare compute driver is enabled in Nova. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Glance::Image" + } + ], + "properties": { + "vmware_adaptertype": { + "title": "Disk Adapter Type", + "description": "The virtual SCSI or IDE controller used by the hypervisor.", + "type": "string", + "enum": [ + "lsiLogic", + "lsiLogicsas", + "paraVirtual", + "busLogic", + "ide" + ], + "default" : "lsiLogic" + }, + "vmware_disktype": { + "title": "Disk Provisioning Type", + "description": "When performing operations such as creating a virtual disk, cloning, or migrating, the disk provisioning type may be specified. Please refer to VMware documentation for more.", + "type": "string", + "enum": [ + "streamOptimized", + "sparse", + "preallocated" + ], + "default" : "preallocated" + }, + "vmware_ostype": { + "title": "OS Type", + "description": "A VMware GuestID which describes the operating system installed in the image. This value is passed to the hypervisor when creating a virtual machine. If not specified, the key defaults to otherGuest. See thinkvirt.com.", + "type": "string", + "default": "otherGuest" + }, + "hw_vif_model": { + "title": "Virtual Network Interface", + "description": "Specifies the model of virtual network interface device to use. The valid options depend on the hypervisor. VMware driver supported options: e1000, e1000e, VirtualE1000, VirtualE1000e, VirtualPCNet32, VirtualSriovEthernetCard, and VirtualVmxnet.", + "type": "string", + "enum": [ + "e1000", + "e1000e", + "VirtualE1000", + "VirtualE1000e", + "VirtualPCNet32", + "VirtualSriovEthernetCard", + "VirtualVmxnet" + ], + "default" : "e1000" + } + }, + "objects": [] +} diff --git a/code/daisy/etc/metadefs/compute-watchdog.json b/code/daisy/etc/metadefs/compute-watchdog.json new file mode 100755 index 00000000..7eb32cec --- /dev/null +++ b/code/daisy/etc/metadefs/compute-watchdog.json @@ -0,0 +1,33 @@ +{ + "namespace": "OS::Compute::Watchdog", + "display_name": "Watchdog Behavior", + "description": "Compute drivers may enable watchdog behavior over instances. See: http://docs.openstack.org/admin-guide-cloud/content/customize-flavors.html", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Glance::Image" + }, + { + "name": "OS::Cinder::Volume", + "properties_target": "image" + }, + { + "name": "OS::Nova::Flavor" + } + ], + "properties": { + "hw_watchdog_action": { + "title": "Watchdog Action", + "description": "For the libvirt driver, you can enable and set the behavior of a virtual hardware watchdog device for each flavor. Watchdog devices keep an eye on the guest server, and carry out the configured action, if the server hangs. The watchdog uses the i6300esb device (emulating a PCI Intel 6300ESB). If hw_watchdog_action is not specified, the watchdog is disabled. Watchdog behavior set using a specific image's properties will override behavior set using flavors.", + "type": "string", + "enum": [ + "disabled", + "reset", + "poweroff", + "pause", + "none" + ] + } + } +} diff --git a/code/daisy/etc/metadefs/compute-xenapi.json b/code/daisy/etc/metadefs/compute-xenapi.json new file mode 100755 index 00000000..eda7489f --- /dev/null +++ b/code/daisy/etc/metadefs/compute-xenapi.json @@ -0,0 +1,29 @@ +{ + "namespace": "OS::Compute::XenAPI", + "display_name": "XenAPI Driver Options", + "description": "The XenAPI compute driver options. \n\nThese are properties specific to compute drivers. For a list of all hypervisors, see here: https://wiki.openstack.org/wiki/HypervisorSupportMatrix.", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Glance::Image" + } + ], + "properties": { + "os_type": { + "title": "OS Type", + "description": "The operating system installed on the image. The XenAPI driver contains logic that takes different actions depending on the value of the os_type parameter of the image. For example, for os_type=windows images, it creates a FAT32-based swap partition instead of a Linux swap partition, and it limits the injected host name to less than 16 characters.", + "type": "string", + "enum": [ + "linux", + "windows" + ] + }, + "auto_disk_config": { + "title": "Disk Adapter Type", + "description": "If true, the root partition on the disk is automatically resized before the instance boots. This value is only taken into account by the Compute service when using a Xen-based hypervisor with the XenAPI driver. The Compute service will only attempt to resize if there is a single partition on the image, and only if the partition is in ext3 or ext4 format.", + "type": "boolean" + } + }, + "objects": [] +} diff --git a/code/daisy/etc/metadefs/glance-common-image-props.json b/code/daisy/etc/metadefs/glance-common-image-props.json new file mode 100755 index 00000000..1311a7fd --- /dev/null +++ b/code/daisy/etc/metadefs/glance-common-image-props.json @@ -0,0 +1,42 @@ +{ + "display_name": "Common Image Properties", + "namespace": "OS::Glance::CommonImageProperties", + "description": "When adding an image to Glance, you may specify some common image properties that may prove useful to consumers of your image.", + "protected": true, + "resource_type_associations" : [ + ], + "properties": { + "kernel_id": { + "title": "Kernel ID", + "type": "string", + "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", + "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image." + }, + "ramdisk_id": { + "title": "Ramdisk ID", + "type": "string", + "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", + "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image." + }, + "instance_uuid": { + "title": "Instance ID", + "type": "string", + "description": "ID of instance used to create this image." + }, + "architecture": { + "title": "CPU Architecture", + "description": "The CPU architecture that must be supported by the hypervisor. For example, x86_64, arm, or ppc64. Run uname -m to get the architecture of a machine. We strongly recommend using the architecture data vocabulary defined by the libosinfo project for this purpose.", + "type": "string" + }, + "os_distro": { + "title": "OS Distro", + "description": "The common name of the operating system distribution in lowercase (uses the same data vocabulary as the libosinfo project). Specify only a recognized value for this field. Deprecated values are listed to assist you in searching for the recognized value.", + "type": "string" + }, + "os_version": { + "title": "OS Version", + "description": "Operating system version as specified by the distributor. (for example, '11.10')", + "type": "string" + } + } +} diff --git a/code/daisy/etc/metadefs/operating-system.json b/code/daisy/etc/metadefs/operating-system.json new file mode 100755 index 00000000..7a25d279 --- /dev/null +++ b/code/daisy/etc/metadefs/operating-system.json @@ -0,0 +1,27 @@ +{ + "display_name": "Common Operating System Properties", + "namespace": "OS::OperatingSystem", + "description": "When adding an image to Glance, you may specify some common image properties that may prove useful to consumers of your image.", + "protected": true, + "resource_type_associations" : [ + { + "name": "OS::Glance::Image" + }, + { + "name": "OS::Cinder::Volume", + "properties_target": "image" + } + ], + "properties": { + "os_distro": { + "title": "OS Distro", + "description": "The common name of the operating system distribution in lowercase (uses the same data vocabulary as the libosinfo project). Specify only a recognized value for this field. Deprecated values are listed to assist you in searching for the recognized value.", + "type": "string" + }, + "os_version": { + "title": "OS Version", + "description": "Operating system version as specified by the distributor. (for example, '11.10')", + "type": "string" + } + } +} diff --git a/code/daisy/etc/metadefs/software-databases.json b/code/daisy/etc/metadefs/software-databases.json new file mode 100755 index 00000000..213c1bdf --- /dev/null +++ b/code/daisy/etc/metadefs/software-databases.json @@ -0,0 +1,333 @@ +{ + "namespace": "OS::Software::DBMS", + "display_name": "Database Software", + "description": "A database is an organized collection of data. The data is typically organized to model aspects of reality in a way that supports processes requiring information. Database management systems are computer software applications that interact with the user, other applications, and the database itself to capture and analyze data. (http://en.wikipedia.org/wiki/Database)", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Glance::Image" + }, + { + "name": "OS::Cinder::Volume", + "properties_target": "image" + }, + { + "name": "OS::Nova::Instance" + }, + { + "name": "OS::Trove::Instance" + } + ], + "objects": [ + { + "name": "MySQL", + "description": "MySQL is an object-relational database management system (ORDBMS). The MySQL development project has made its source code available under the terms of the GNU General Public License, as well as under a variety of proprietary agreements. MySQL was owned and sponsored by a single for-profit firm, the Swedish company MySQL AB, now owned by Oracle Corporation. MySQL is a popular choice of database for use in web applications, and is a central component of the widely used LAMP open source web application software stack (and other 'AMP' stacks). (http://en.wikipedia.org/wiki/MySQL)", + "properties": { + "sw_database_mysql_version": { + "title": "Version", + "description": "The specific version of MySQL.", + "type": "string" + }, + "sw_database_mysql_listen_port": { + "title": "Listen Port", + "description": "The configured TCP/IP port which MySQL listens for incoming connections.", + "type": "integer", + "minimum": 1, + "maximum": 65535, + "default": 3606 + }, + "sw_database_mysql_admin": { + "title": "Admin User", + "description": "The primary user with privileges to perform administrative operations.", + "type": "string", + "default": "root" + } + } + }, + { + "name": "PostgreSQL", + "description": "PostgreSQL, often simply 'Postgres', is an object-relational database management system (ORDBMS) with an emphasis on extensibility and standards-compliance. PostgreSQL is cross-platform and runs on many operating systems. (http://en.wikipedia.org/wiki/PostgreSQL)", + "properties": { + "sw_database_postgresql_version": { + "title": "Version", + "description": "The specific version of PostgreSQL.", + "type": "string" + }, + "sw_database_postgresql_listen_port": { + "title": "Listen Port", + "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which PostgreSQL is to listen for connections from client applications.", + "type": "integer", + "minimum": 1, + "maximum": 65535, + "default": 5432 + }, + "sw_database_postgresql_admin": { + "title": "Admin User", + "description": "The primary user with privileges to perform administrative operations.", + "type": "string", + "default": "postgres" + } + } + }, + { + "name": "SQL Server", + "description": "Microsoft SQL Server is a relational database management system developed by Microsoft. There are at least a dozen different editions of Microsoft SQL Server aimed at different audiences and for workloads ranging from small single-machine applications to large Internet-facing applications with many concurrent users. Its primary query languages are T-SQL and ANSI SQL. (http://en.wikipedia.org/wiki/Microsoft_SQL_Server)", + "properties": { + "sw_database_sqlserver_version": { + "title": "Version", + "description": "The specific version of Microsoft SQL Server.", + "type": "string" + }, + "sw_database_sqlserver_edition": { + "title": "Edition", + "description": "SQL Server is available in multiple editions, with different feature sets and targeting different users.", + "type": "string", + "default": "Express", + "enum": [ + "Datacenter", + "Enterprise", + "Standard", + "Web", + "Business Intelligence", + "Workgroup", + "Express", + "Compact (SQL CE)", + "Developer", + "Embedded (SSEE)", + "Express", + "Fast Track", + "LocalDB", + "Parallel Data Warehouse (PDW)", + "Business Intelligence", + "Datawarehouse Appliance Edition" + ] + }, + "sw_database_sqlserver_listen_port": { + "title": "Listen Port", + "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which SQL Server is to listen for connections from client applications. The default SQL Server port is 1433, and client ports are assigned a random value between 1024 and 5000.", + "type": "integer", + "minimum": 1, + "maximum": 65535, + "default": 1433 + }, + "sw_database_postsqlserver_admin": { + "title": "Admin User", + "description": "The primary user with privileges to perform administrative operations.", + "type": "string", + "default": "sa" + } + } + }, + { + "name": "Oracle", + "description": "Oracle Database (commonly referred to as Oracle RDBMS or simply as Oracle) is an object-relational database management system produced and marketed by Oracle Corporation. (http://en.wikipedia.org/wiki/Oracle_Database)", + "properties": { + "sw_database_oracle_version": { + "title": "Version", + "description": "The specific version of Oracle.", + "type": "string" + }, + "sw_database_oracle_edition": { + "title": "Edition", + "description": "Over and above the different versions of the Oracle database management software developed over time, Oracle Corporation subdivides its product into varying editions.", + "type": "string", + "default": "Express", + "enum": [ + "Enterprise", + "Standard", + "Standard Edition One", + "Express (XE)", + "Workgroup", + "Lite" + ] + }, + "sw_database_oracle_listen_port": { + "title": "Listen Port", + "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Oracle is to listen for connections from client applications.", + "type": "integer", + "minimum": 1, + "maximum": 65535, + "default": 1521 + } + } + }, + { + "name": "DB2", + "description": "IBM DB2 is a family of database server products developed by IBM. These products all support the relational model, but in recent years some products have been extended to support object-relational features and non-relational structures, in particular XML. (http://en.wikipedia.org/wiki/IBM_DB2)", + "properties": { + "sw_database_db2_version": { + "title": "Version", + "description": "The specific version of DB2.", + "type": "string" + }, + "sw_database_db2_port": { + "title": "Listen Port", + "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which DB2 is to listen for connections from client applications.", + "type": "integer", + "minimum": 1, + "maximum": 65535, + "default": 5432 + }, + "sw_database_db2_admin": { + "title": "Admin User", + "description": "The primary user with privileges to perform administrative operations.", + "type": "string" + } + } + }, + { + "name": "MongoDB", + "description": "MongoDB is a cross-platform document-oriented database. Classified as a NoSQL database, MongoDB uses JSON-like documents with dynamic schemas (MongoDB calls the format BSON), making the integration of data in certain types of applications easier and faster. Released under a combination of the GNU Affero General Public License and the Apache License, MongoDB is free and open-source software. (http://en.wikipedia.org/wiki/MongoDB)", + "properties": { + "sw_database_mongodb_version": { + "title": "Version", + "description": "The specific version of MongoDB.", + "type": "string" + }, + "sw_database_mongodb_listen_port": { + "title": "Listen Port", + "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which MongoDB is to listen for connections from client applications.", + "type": "integer", + "minimum": 1, + "maximum": 65535, + "default": 27017 + }, + "sw_database_mongodb_admin": { + "title": "Admin User", + "description": "The primary user with privileges to perform administrative operations.", + "type": "string" + } + } + }, + { + "name": "Couchbase Server", + "description": "Couchbase Server, originally known as Membase, is an open source, distributed (shared-nothing architecture) NoSQL document-oriented database that is optimized for interactive applications. These applications must serve many concurrent users by creating, storing, retrieving, aggregating, manipulating and presenting data. In support of these kinds of application needs, Couchbase is designed to provide easy-to-scale key-value or document access with low latency and high sustained throughput. (http://en.wikipedia.org/wiki/Couchbase_Server)", + "properties": { + "sw_database_couchbaseserver_version": { + "title": "Version", + "description": "The specific version of Couchbase Server.", + "type": "string" + }, + "sw_database_couchbaseserver_listen_port": { + "title": "Listen Port", + "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Couchbase is to listen for connections from client applications.", + "type": "integer", + "minimum": 1, + "maximum": 65535, + "default": 11211 + }, + "sw_database_couchbaseserver_admin": { + "title": "Admin User", + "description": "The primary user with privileges to perform administrative operations.", + "type": "string", + "default": "admin" + } + } + }, + { + "name": "Redis", + "description": "Redis is a data structure server (NoSQL). It is open-source, networked, in-memory, and stores keys with optional durability. The development of Redis has been sponsored by Pivotal Software since May 2013; before that, it was sponsored by VMware. The name Redis means REmote DIctionary Server. (http://en.wikipedia.org/wiki/Redis)", + "properties": { + "sw_database_redis_version": { + "title": "Version", + "description": "The specific version of Redis.", + "type": "string" + }, + "sw_database_redis_listen_port": { + "title": "Listen Port", + "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Redis is to listen for connections from client applications.", + "type": "integer", + "minimum": 1, + "maximum": 65535, + "default": 6379 + }, + "sw_database_redis_admin": { + "title": "Admin User", + "description": "The primary user with privileges to perform administrative operations.", + "type": "string", + "default": "admin" + } + } + }, + { + "name": "CouchDB", + "description": "Apache CouchDB, commonly referred to as CouchDB, is an open source NoSQL database. It is a NoSQL database that uses JSON to store data, JavaScript as its query language using MapReduce, and HTTP for an API. One of its distinguishing features is multi-master replication. CouchDB was first released in 2005 and later became an Apache project in 2008. (http://en.wikipedia.org/wiki/CouchDB)", + "properties": { + "sw_database_couchdb_version": { + "title": "Version", + "description": "The specific version of CouchDB.", + "type": "string" + }, + "sw_database_couchdb_listen_port": { + "title": "Listen Port", + "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which CouchDB is to listen for connections from client applications.", + "type": "integer", + "minimum": 1, + "maximum": 65535, + "default": 5984 + }, + "sw_database_couchdb_admin": { + "title": "Admin User", + "description": "The primary user with privileges to perform administrative operations.", + "type": "string" + } + } + }, + { + "name": "Apache Cassandra", + "description": "Apache Cassandra is an open source distributed NoSQL database management system designed to handle large amounts of data across many commodity servers, providing high availability with no single point of failure. (http://en.wikipedia.org/wiki/Apache_Cassandra)", + "properties": { + "sw_database_cassandra_version": { + "title": "Version", + "description": "The specific version of Apache Cassandra.", + "type": "string" + }, + "sw_database_cassandra_listen_port": { + "title": "Listen Port", + "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Cassandra is to listen for connections from client applications.", + "type": "integer", + "minimum": 1, + "maximum": 65535, + "default": 9160 + }, + "sw_database_cassandra_admin": { + "title": "Admin User", + "description": "The primary user with privileges to perform administrative operations.", + "type": "string", + "default": "cassandra" + } + } + }, + { + "name": "HBase", + "description": "HBase is an open source, non-relational (NoSQL), distributed database modeled after Google's BigTable and written in Java. It is developed as part of Apache Software Foundation's Apache Hadoop project and runs on top of HDFS (Hadoop Distributed Filesystem), providing BigTable-like capabilities for Hadoop. (http://en.wikipedia.org/wiki/Apache_HBase)", + "properties": { + "sw_database_hbase_version": { + "title": "Version", + "description": "The specific version of HBase.", + "type": "string" + } + } + }, + { + "name": "Hazlecast", + "description": "In computing, Hazelcast is an in-memory open source software data grid based on Java. By having multiple nodes form a cluster, data is evenly distributed among the nodes. This allows for horizontal scaling both in terms of available storage space and processing power. Backups are also distributed in a similar fashion to other nodes, based on configuration, thereby protecting against single node failure. (http://en.wikipedia.org/wiki/Hazelcast)", + "properties": { + "sw_database_hazlecast_version": { + "title": "Version", + "description": "The specific version of Hazlecast.", + "type": "string" + }, + "sw_database_hazlecast_port": { + "title": "Listen Port", + "description": "Specifies the TCP/IP port or local Unix domain socket file extension on which Hazlecast is to listen for connections between members.", + "type": "integer", + "minimum": 1, + "maximum": 65535, + "default": 5701 + } + } + } + ] +} diff --git a/code/daisy/etc/metadefs/software-runtimes.json b/code/daisy/etc/metadefs/software-runtimes.json new file mode 100755 index 00000000..67c52353 --- /dev/null +++ b/code/daisy/etc/metadefs/software-runtimes.json @@ -0,0 +1,76 @@ +{ + "namespace": "OS::Software::Runtimes", + "display_name": "Runtime Environment", + "description": "Software is written in a specific programming language and the language must execute within a runtime environment. The runtime environment provides an abstraction to utilizing a computer's processor, memory (RAM), and other system resources.", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Glance::Image" + }, + { + "name": "OS::Cinder::Volume", + "properties_target": "image" + }, + { + "name": "OS::Nova::Instance" + } + ], + "objects": [ + { + "name": "PHP", + "description": "PHP is a server-side scripting language designed for web development but also used as a general-purpose programming language. PHP code can be simply mixed with HTML code, or it can be used in combination with various templating engines and web frameworks. PHP code is usually processed by a PHP interpreter, which is usually implemented as a web server's native module or a Common Gateway Interface (CGI) executable. After the PHP code is interpreted and executed, the web server sends resulting output to its client, usually in form of a part of the generated web page 鈥 for example, PHP code can generate a web page's HTML code, an image, or some other data. PHP has also evolved to include a command-line interface (CLI) capability and can be used in standalone graphical applications. (http://en.wikipedia.org/wiki/PHP)", + "properties": { + "sw_runtime_php_version": { + "title": "Version", + "description": "The specific version of PHP.", + "type": "string" + } + } + }, + { + "name": "Python", + "description": "Python is a widely used general-purpose, high-level programming language. Its design philosophy emphasizes code readability, and its syntax allows programmers to express concepts in fewer lines of code than would be possible in languages such as C++ or Java. The language provides constructs intended to enable clear programs on both a small and large scale. Python supports multiple programming paradigms, including object-oriented, imperative and functional programming or procedural styles. It features a dynamic type system and automatic memory management and has a large and comprehensive standard library. (http://en.wikipedia.org/wiki/Python_(programming_language))", + "properties": { + "sw_runtime_python_version": { + "title": "Version", + "description": "The specific version of python.", + "type": "string" + } + } + }, + { + "name": "Java", + "description": "Java is a functional computer programming language that is concurrent, class-based, object-oriented, and specifically designed to have as few implementation dependencies as possible. It is intended to let application developers write once, run anywhere (WORA), meaning that code that runs on one platform does not need to be recompiled to run on another. Java applications are typically compiled to bytecode that can run on any Java virtual machine (JVM) regardless of computer architecture. (http://en.wikipedia.org/wiki/Java_(programming_language))", + "properties": { + "sw_runtime_java_version": { + "title": "Version", + "description": "The specific version of Java.", + "type": "string" + } + } + }, + { + "name": "Ruby", + "description": "Ruby is a dynamic, reflective, object-oriented, general-purpose programming language. It was designed and developed in the mid-1990s by Yukihiro Matsumoto in Japan. According to its authors, Ruby was influenced by Perl, Smalltalk, Eiffel, Ada, and Lisp. It supports multiple programming paradigms, including functional, object-oriented, and imperative. It also has a dynamic type system and automatic memory management. (http://en.wikipedia.org/wiki/Python_(programming_language))", + "properties": { + "sw_runtime_ruby_version": { + "title": "Version", + "description": "The specific version of Ruby.", + "type": "string" + } + } + }, + { + "name": "Perl", + "description": "Perl is a family of high-level, general-purpose, interpreted, dynamic programming languages. The languages in this family include Perl 5 and Perl 6. Though Perl is not officially an acronym, there are various backronyms in use, the most well-known being Practical Extraction and Reporting Language (http://en.wikipedia.org/wiki/Perl)", + "properties": { + "sw_runtime_perl_version": { + "title": "Version", + "description": "The specific version of Perl.", + "type": "string" + } + } + } + ] +} \ No newline at end of file diff --git a/code/daisy/etc/metadefs/software-webservers.json b/code/daisy/etc/metadefs/software-webservers.json new file mode 100755 index 00000000..ac0cc848 --- /dev/null +++ b/code/daisy/etc/metadefs/software-webservers.json @@ -0,0 +1,102 @@ +{ + "namespace": "OS::Software::WebServers", + "display_name": "Web Servers", + "description": "A web server is a computer system that processes requests via HTTP, the basic network protocol used to distribute information on the World Wide Web. The most common use of web servers is to host websites, but there are other uses such as gaming, data storage, running enterprise applications, handling email, FTP, or other web uses. (http://en.wikipedia.org/wiki/Web_server)", + "visibility": "public", + "protected": true, + "resource_type_associations": [ + { + "name": "OS::Glance::Image" + }, + { + "name": "OS::Cinder::Volume", + "properties_target": "image" + }, + { + "name": "OS::Nova::Instance" + } + ], + "objects": [ + { + "name": "Apache HTTP Server", + "description": "The Apache HTTP Server, colloquially called Apache, is a Web server application notable for playing a key role in the initial growth of the World Wide Web. Apache is developed and maintained by an open community of developers under the auspices of the Apache Software Foundation. Most commonly used on a Unix-like system, the software is available for a wide variety of operating systems, including Unix, FreeBSD, Linux, Solaris, Novell NetWare, OS X, Microsoft Windows, OS/2, TPF, OpenVMS and eComStation. Released under the Apache License, Apache is open-source software. (http://en.wikipedia.org/wiki/Apache_HTTP_Server)", + "properties": { + "sw_webserver_apache_version": { + "title": "Version", + "description": "The specific version of Apache.", + "type": "string" + }, + "sw_webserver_apache_http_port": { + "title": "HTTP Port", + "description": "The configured TCP/IP port on which the web server listens for incoming HTTP connections.", + "type": "integer", + "minimum": 1, + "maximum": 65535, + "default": 80 + }, + "sw_webserver_apache_https_port": { + "title": "HTTPS Port", + "description": "The configured TCP/IP port on which the web server listens for incoming HTTPS connections.", + "type": "integer", + "minimum": 1, + "maximum": 65535, + "default": 443 + } + } + }, + { + "name": "Nginx", + "description": "Nginx (pronounced 'engine-x') is an open source reverse proxy server for HTTP, HTTPS, SMTP, POP3, and IMAP protocols, as well as a load balancer, HTTP cache, and a web server (origin server). The nginx project started with a strong focus on high concurrency, high performance and low memory usage. It is licensed under the 2-clause BSD-like license and it runs on Linux, BSD variants, Mac OS X, Solaris, AIX, HP-UX, as well as on other *nix flavors. It also has a proof of concept port for Microsoft Windows. (http://en.wikipedia.org/wiki/Nginx)", + "properties": { + "sw_webserver_nginx_version": { + "title": "Version", + "description": "The specific version of Nginx.", + "type": "string" + }, + "sw_webserver_nginx_http_port": { + "title": "HTTP Port", + "description": "The configured TCP/IP port on which the web server listens for incoming HTTP connections.", + "type": "integer", + "minimum": 1, + "maximum": 65535, + "default": 80 + }, + "sw_webserver_nginx_https_port": { + "title": "HTTPS Port", + "description": "The configured TCP/IP port on which the web server listens for incoming HTTPS connections.", + "type": "integer", + "minimum": 1, + "maximum": 65535, + "default": 443 + } + } + }, + { + "name": "IIS", + "description": "Internet Information Services (IIS, formerly Internet Information Server) is an extensible web server created by Microsoft. IIS supports HTTP, HTTPS, FTP, FTPS, SMTP and NNTP. IIS is not turned on by default when Windows is installed. The IIS Manager is accessed through the Microsoft Management Console or Administrative Tools in the Control Panel. (http://en.wikipedia.org/wiki/Internet_Information_Services)", + "properties": { + "sw_webserver_iis_version": { + "title": "Version", + "description": "The specific version of IIS.", + "type": "string" + }, + "sw_webserver_iis_http_port": { + "title": "HTTP Port", + "description": "The configured TCP/IP port on which the web server listens for incoming HTTP connections.", + "type": "integer", + "minimum": 1, + "maximum": 65535, + "default": 80 + }, + "sw_webserver_iis_https_port": { + "title": "HTTPS Port", + "description": "The configured TCP/IP port on which the web server listens for incoming HTTPS connections.", + "type": "integer", + "minimum": 1, + "maximum": 65535, + "default": 443 + } + } + } + ] +} \ No newline at end of file diff --git a/code/daisy/etc/oslo-config-generator/daisy-api.conf b/code/daisy/etc/oslo-config-generator/daisy-api.conf new file mode 100755 index 00000000..8ad50573 --- /dev/null +++ b/code/daisy/etc/oslo-config-generator/daisy-api.conf @@ -0,0 +1,11 @@ +[DEFAULT] +output_file = etc/daisy-api.conf.sample +namespace = daisy.api +namespace = daisy.store +namespace = oslo.concurrency +namespace = oslo.messaging +namespace = oslo.db +namespace = oslo.db.concurrency +namespace = oslo.policy +namespace = keystoneclient.middleware.auth_token +namespace = oslo.log diff --git a/code/daisy/etc/oslo-config-generator/daisy-cache.conf b/code/daisy/etc/oslo-config-generator/daisy-cache.conf new file mode 100755 index 00000000..0297eb34 --- /dev/null +++ b/code/daisy/etc/oslo-config-generator/daisy-cache.conf @@ -0,0 +1,5 @@ +[DEFAULT] +output_file = etc/daisy-cache.conf.sample +namespace = daisy.cache +namespace = oslo.log +namespace = oslo.policy diff --git a/code/daisy/etc/oslo-config-generator/daisy-manage.conf b/code/daisy/etc/oslo-config-generator/daisy-manage.conf new file mode 100755 index 00000000..6df2b938 --- /dev/null +++ b/code/daisy/etc/oslo-config-generator/daisy-manage.conf @@ -0,0 +1,6 @@ +[DEFAULT] +output_file = etc/daisy-manage.conf.sample +namespace = daisy.manage +namespace = oslo.db +namespace = oslo.db.concurrency +namespace = oslo.log diff --git a/code/daisy/etc/oslo-config-generator/daisy-registry.conf b/code/daisy/etc/oslo-config-generator/daisy-registry.conf new file mode 100755 index 00000000..d23fdad0 --- /dev/null +++ b/code/daisy/etc/oslo-config-generator/daisy-registry.conf @@ -0,0 +1,10 @@ +[DEFAULT] +output_file = etc/daisy-registry.conf.sample +namespace = daisy.registry +namespace = daisy.store +namespace = oslo.messaging +namespace = oslo.db +namespace = oslo.db.concurrency +namespace = oslo.policy +namespace = keystoneclient.middleware.auth_token +namespace = oslo.log diff --git a/code/daisy/etc/oslo-config-generator/daisy-scrubber.conf b/code/daisy/etc/oslo-config-generator/daisy-scrubber.conf new file mode 100755 index 00000000..46ce6931 --- /dev/null +++ b/code/daisy/etc/oslo-config-generator/daisy-scrubber.conf @@ -0,0 +1,8 @@ +[DEFAULT] +output_file = etc/daisy-scrubber.conf.sample +namespace = daisy.scrubber +namespace = oslo.concurrency +namespace = oslo.db +namespace = oslo.db.concurrency +namespace = oslo.log +namespace = oslo.policy diff --git a/code/daisy/etc/policy.json b/code/daisy/etc/policy.json new file mode 100755 index 00000000..4bbc8b46 --- /dev/null +++ b/code/daisy/etc/policy.json @@ -0,0 +1,61 @@ +{ + "context_is_admin": "role:admin", + "default": "", + + "add_image": "", + "delete_image": "", + "get_image": "", + "get_images": "", + "modify_image": "", + "publicize_image": "role:admin", + "copy_from": "", + + "download_image": "", + "upload_image": "", + + "delete_image_location": "", + "get_image_location": "", + "set_image_location": "", + + "add_member": "", + "delete_member": "", + "get_member": "", + "get_members": "", + "modify_member": "", + + "manage_image_cache": "role:admin", + + "get_task": "", + "get_tasks": "", + "add_task": "", + "modify_task": "", + + "deactivate": "", + "reactivate": "", + + "get_metadef_namespace": "", + "get_metadef_namespaces":"", + "modify_metadef_namespace":"", + "add_metadef_namespace":"", + + "get_metadef_object":"", + "get_metadef_objects":"", + "modify_metadef_object":"", + "add_metadef_object":"", + + "list_metadef_resource_types":"", + "get_metadef_resource_type":"", + "add_metadef_resource_type_association":"", + + "get_metadef_property":"", + "get_metadef_properties":"", + "modify_metadef_property":"", + "add_metadef_property":"", + + "get_metadef_tag":"", + "get_metadef_tags":"", + "modify_metadef_tag":"", + "add_metadef_tag":"", + "add_metadef_tags":"" + +} diff --git a/code/daisy/etc/property-protections-policies.conf.sample b/code/daisy/etc/property-protections-policies.conf.sample new file mode 100755 index 00000000..38f611e5 --- /dev/null +++ b/code/daisy/etc/property-protections-policies.conf.sample @@ -0,0 +1,34 @@ +# property-protections-policies.conf.sample +# +# This file is an example config file for when +# property_protection_rule_format=policies is enabled. +# +# Specify regular expression for which properties will be protected in [] +# For each section, specify CRUD permissions. You may refer to policies defined +# in policy.json. +# The property rules will be applied in the order specified. Once +# a match is found the remaining property rules will not be applied. +# +# WARNING: +# * If the reg ex specified below does not compile, then +# the glance-api service fails to start. (Guide for reg ex python compiler +# used: +# http://docs.python.org/2/library/re.html#regular-expression-syntax) +# * If an operation(create, read, update, delete) is not specified or misspelt +# then the glance-api service fails to start. +# So, remember, with GREAT POWER comes GREAT RESPONSIBILITY! +# +# NOTE: Only one policy can be specified per action. If multiple policies are +# specified, then the glance-api service fails to start. + +[^x_.*] +create = default +read = default +update = default +delete = default + +[.*] +create = context_is_admin +read = context_is_admin +update = context_is_admin +delete = context_is_admin diff --git a/code/daisy/etc/property-protections-roles.conf.sample b/code/daisy/etc/property-protections-roles.conf.sample new file mode 100755 index 00000000..634b5820 --- /dev/null +++ b/code/daisy/etc/property-protections-roles.conf.sample @@ -0,0 +1,32 @@ +# property-protections-roles.conf.sample +# +# This file is an example config file for when +# property_protection_rule_format=roles is enabled. +# +# Specify regular expression for which properties will be protected in [] +# For each section, specify CRUD permissions. +# The property rules will be applied in the order specified. Once +# a match is found the remaining property rules will not be applied. +# +# WARNING: +# * If the reg ex specified below does not compile, then +# glance-api service will not start. (Guide for reg ex python compiler used: +# http://docs.python.org/2/library/re.html#regular-expression-syntax) +# * If an operation(create, read, update, delete) is not specified or misspelt +# then the glance-api service will not start. +# So, remember, with GREAT POWER comes GREAT RESPONSIBILITY! +# +# NOTE: Multiple roles can be specified for a given operation. These roles must +# be comma separated. + +[^x_.*] +create = admin,member +read = admin,member +update = admin,member +delete = admin,member + +[.*] +create = admin +read = admin +update = admin +delete = admin diff --git a/code/daisy/etc/schema-image.json b/code/daisy/etc/schema-image.json new file mode 100755 index 00000000..5aafd6b3 --- /dev/null +++ b/code/daisy/etc/schema-image.json @@ -0,0 +1,28 @@ +{ + "kernel_id": { + "type": "string", + "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", + "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image." + }, + "ramdisk_id": { + "type": "string", + "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", + "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image." + }, + "instance_uuid": { + "type": "string", + "description": "ID of instance used to create this image." + }, + "architecture": { + "description": "Operating system architecture as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html", + "type": "string" + }, + "os_distro": { + "description": "Common name of operating system distribution as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html", + "type": "string" + }, + "os_version": { + "description": "Operating system version as specified by the distributor", + "type": "string" + } +} diff --git a/code/daisy/etc/search-policy.json b/code/daisy/etc/search-policy.json new file mode 100755 index 00000000..dc324e25 --- /dev/null +++ b/code/daisy/etc/search-policy.json @@ -0,0 +1,8 @@ +{ + "context_is_admin": "role:admin", + "default": "", + + "catalog_index": "role:admin", + "catalog_search": "", + "catalog_plugins": "" +} diff --git a/code/daisy/openstack-common.conf b/code/daisy/openstack-common.conf new file mode 100755 index 00000000..b149287f --- /dev/null +++ b/code/daisy/openstack-common.conf @@ -0,0 +1,8 @@ +[DEFAULT] + +# The list of modules to copy from oslo-incubator +module=install_venv_common +module=service + +# The base module to hold the copy of openstack.common +base=daisy diff --git a/code/daisy/pylintrc b/code/daisy/pylintrc new file mode 100755 index 00000000..6b073fd9 --- /dev/null +++ b/code/daisy/pylintrc @@ -0,0 +1,27 @@ +[Messages Control] +# W0511: TODOs in code comments are fine. +# W0142: *args and **kwargs are fine. +# W0622: Redefining id is fine. +disable-msg=W0511,W0142,W0622 + +[Basic] +# Variable names can be 1 to 31 characters long, with lowercase and underscores +variable-rgx=[a-z_][a-z0-9_]{0,30}$ + +# Argument names can be 2 to 31 characters long, with lowercase and underscores +argument-rgx=[a-z_][a-z0-9_]{1,30}$ + +# Method names should be at least 3 characters long +# and be lowercased with underscores +method-rgx=[a-z_][a-z0-9_]{2,50}$ + +# Module names matching nova-* are ok (files in bin/) +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(nova-[a-z0-9_-]+))$ + +# Don't require docstrings on tests. +no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ + +[Design] +max-public-methods=100 +min-public-methods=0 +max-args=6 diff --git a/code/daisy/rally-jobs/README.rst b/code/daisy/rally-jobs/README.rst new file mode 100755 index 00000000..fc822c8f --- /dev/null +++ b/code/daisy/rally-jobs/README.rst @@ -0,0 +1,30 @@ +Rally job related files +======================= + +This directory contains rally tasks and plugins that are run by OpenStack CI. + +Structure +--------- + +* plugins - directory where you can add rally plugins. Almost everything in + Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic + cleanup resources, .... + +* extra - all files from this directory will be copy pasted to gates, so you + are able to use absolute paths in rally tasks. + Files will be located in ~/.rally/extra/* + +* glance.yaml is a task that is run in gates against OpenStack (nova network) + deployed by DevStack + + +Useful links +------------ + +* More about Rally: https://rally.readthedocs.org/en/latest/ + +* How to add rally-gates: https://rally.readthedocs.org/en/latest/rally_gatejob.html + +* About plugins: https://rally.readthedocs.org/en/latest/plugins.html + +* Plugin samples: https://github.com/stackforge/rally/tree/master/doc/samples/plugins \ No newline at end of file diff --git a/code/daisy/rally-jobs/extra/README.rst b/code/daisy/rally-jobs/extra/README.rst new file mode 100755 index 00000000..836f35a0 --- /dev/null +++ b/code/daisy/rally-jobs/extra/README.rst @@ -0,0 +1,5 @@ +Extra files +=========== + +All files from this directory will be copy pasted to gates, so you are able to +use absolute path in rally tasks. Files will be in ~/.rally/extra/* diff --git a/code/daisy/rally-jobs/extra/fake.img b/code/daisy/rally-jobs/extra/fake.img new file mode 100755 index 00000000..e69de29b diff --git a/code/daisy/rally-jobs/glance.yaml b/code/daisy/rally-jobs/glance.yaml new file mode 100755 index 00000000..8e5db0e9 --- /dev/null +++ b/code/daisy/rally-jobs/glance.yaml @@ -0,0 +1,45 @@ +--- + GlanceImages.create_and_list_image: + - + args: + image_location: "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img" + container_format: "bare" + disk_format: "qcow2" + runner: + type: "constant" + times: 20 + concurrency: 5 + context: + users: + tenants: 1 + users_per_tenant: 1 + + GlanceImages.create_and_delete_image: + - + args: + image_location: "http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img" + container_format: "bare" + disk_format: "qcow2" + runner: + type: "constant" + times: 20 + concurrency: 5 + context: + users: + tenants: 5 + users_per_tenant: 2 + + GlancePlugin.create_and_list: + - + args: + image_location: "~/.rally/extra/fake.img" + container_format: "bare" + disk_format: "qcow2" + runner: + type: "constant" + times: 700 + concurrency: 7 + context: + users: + tenants: 1 + users_per_tenant: 1 diff --git a/code/daisy/rally-jobs/plugins/README.rst b/code/daisy/rally-jobs/plugins/README.rst new file mode 100755 index 00000000..9b989240 --- /dev/null +++ b/code/daisy/rally-jobs/plugins/README.rst @@ -0,0 +1,9 @@ +Rally plugins +============= + +All *.py modules from this directory will be auto-loaded by Rally and all +plugins will be discoverable. There is no need of any extra configuration +and there is no difference between writing them here and in rally code base. + +Note that it is better to push all interesting and useful benchmarks to Rally +code base, this simplifies administration for Operators. \ No newline at end of file diff --git a/code/daisy/rally-jobs/plugins/plugin_sample.py b/code/daisy/rally-jobs/plugins/plugin_sample.py new file mode 100755 index 00000000..7a5676eb --- /dev/null +++ b/code/daisy/rally-jobs/plugins/plugin_sample.py @@ -0,0 +1,91 @@ +# Copyright 2014 Mirantis Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" Sample of plugin for Glance. + +For more Glance related benchmarks take a look here: +github.com/stackforge/rally/blob/master/rally/benchmark/scenarios/glance/ + +About plugins: https://rally.readthedocs.org/en/latest/plugins.html + +Rally concepts https://wiki.openstack.org/wiki/Rally/Concepts +""" + +import os + +from rally.benchmark.scenarios import base +from rally.benchmark import utils as bench_utils + + +class GlancePlugin(base.Scenario): + + @base.atomic_action_timer("glance.create_image_label") + def _create_image(self, image_name, container_format, + image_location, disk_format, **kwargs): + """Create a new image. + + :param image_name: String used to name the image + :param container_format: Container format of image. + Acceptable formats: ami, ari, aki, bare, and ovf. + :param image_location: image file location used to upload + :param disk_format: Disk format of image. Acceptable formats: + ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso. + :param **kwargs: optional parameters to create image + + returns: object of image + """ + + kw = { + "name": image_name, + "container_format": container_format, + "disk_format": disk_format, + } + + kw.update(kwargs) + + try: + if os.path.isfile(os.path.expanduser(image_location)): + kw["data"] = open(os.path.expanduser(image_location)) + else: + kw["copy_from"] = image_location + + image = self.clients("glance").images.create(**kw) + + image = bench_utils.wait_for( + image, + is_ready=bench_utils.resource_is("active"), + update_resource=bench_utils.get_from_manager(), + timeout=100, + check_interval=0.5) + + finally: + if "data" in kw: + kw["data"].close() + + return image + + @base.atomic_action_timer("glance.list_images_label") + def _list_images(self): + return list(self.clients("glance").images.list()) + + @base.scenario(context={"cleanup": ["glance"]}) + def create_and_list(self, container_format, + image_location, disk_format, **kwargs): + self._create_image(self._generate_random_name(), + container_format, + image_location, + disk_format, + **kwargs) + self._list_images() diff --git a/code/daisy/requirements.txt b/code/daisy/requirements.txt new file mode 100755 index 00000000..2856fd1c --- /dev/null +++ b/code/daisy/requirements.txt @@ -0,0 +1,65 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +pbr>=0.6,!=0.7,<1.0 +# +# The greenlet package must be compiled with gcc and needs +# the Python.h headers. Make sure you install the python-dev +# package to get the right headers... +greenlet>=0.3.2 + +# < 0.8.0/0.8 does not work, see https://bugs.launchpad.net/bugs/1153983 +SQLAlchemy>=0.9.7,<=0.9.99 +anyjson>=0.3.3 +eventlet>=0.16.1,!=0.17.0 +PasteDeploy>=1.5.0 +Routes>=1.12.3,!=2.0 +WebOb>=1.2.3 +sqlalchemy-migrate>=0.9.5 +httplib2>=0.7.5 +kombu>=2.5.0 +pycrypto>=2.6 +iso8601>=0.1.9 +ordereddict +oslo.config>=1.9.3,<1.10.0 # Apache-2.0 +oslo.concurrency>=1.8.0,<1.9.0 # Apache-2.0 +oslo.context>=0.2.0,<0.3.0 # Apache-2.0 +oslo.utils>=1.4.0,<1.5.0 # Apache-2.0 +stevedore>=1.3.0,<1.4.0 # Apache-2.0 +taskflow>=0.7.1,<0.8.0 +keystonemiddleware>=1.5.0,<1.6.0 +WSME>=0.6 +# For openstack/common/lockutils +posix_ipc + +# For Swift storage backend. +python-swiftclient>=2.2.0,<2.5.0 + +# For VMware storage backed. +oslo.vmware>=0.11.1,<0.12.0 # Apache-2.0 + +# For paste.util.template used in keystone.common.template +Paste + +jsonschema>=2.0.0,<3.0.0 +python-keystoneclient>=1.1.0,<1.4.0 +pyOpenSSL>=0.11 +# Required by openstack.common libraries +six>=1.9.0 + +oslo.db>=1.7.0,<1.8.0 # Apache-2.0 +oslo.i18n>=1.5.0,<1.6.0 # Apache-2.0 +oslo.log>=1.0.0,<1.1.0 # Apache-2.0 +oslo.messaging>=1.8.0,<1.9.0 # Apache-2.0 +oslo.policy>=0.3.1,<0.4.0 # Apache-2.0 +oslo.serialization>=1.4.0,<1.5.0 # Apache-2.0 + +retrying>=1.2.3,!=1.3.0 # Apache-2.0 +osprofiler>=0.3.0 # Apache-2.0 + +# Glance Store +glance_store>=0.3.0,<0.5.0 # Apache-2.0 + +# Artifact repository +semantic_version>=2.3.1 diff --git a/code/daisy/run_tests.sh b/code/daisy/run_tests.sh new file mode 100755 index 00000000..41a5f784 --- /dev/null +++ b/code/daisy/run_tests.sh @@ -0,0 +1,251 @@ +#!/bin/bash + +set -eu + +function usage { + echo "Usage: $0 [OPTION]..." + echo "Run Glance's test suite(s)" + echo "" + echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" + echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" + echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment" + echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." + echo " -u, --update Update the virtual environment with any newer package versions" + echo " -p, --pep8 Just run PEP8 and HACKING compliance check" + echo " -8, --pep8-only-changed Just run PEP8 and HACKING compliance check on files changed since HEAD~1" + echo " -P, --no-pep8 Don't run static code checks" + echo " -c, --coverage Generate coverage report" + echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger." + echo " -h, --help Print this usage message" + echo " --virtual-env-path Location of the virtualenv directory" + echo " Default: \$(pwd)" + echo " --virtual-env-name Name of the virtualenv directory" + echo " Default: .venv" + echo " --tools-path Location of the tools directory" + echo " Default: \$(pwd)" + echo " --concurrency How many processes to use when running the tests. A value of 0 autodetects concurrency from your CPU count" + echo " Default: 0" + echo "" + echo "Note: with no options specified, the script will try to run the tests in a virtual environment," + echo " If no virtualenv is found, the script will ask if you would like to create one. If you " + echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." + exit +} + +function process_options { + i=1 + while [ $i -le $# ]; do + case "${!i}" in + -h|--help) usage;; + -V|--virtual-env) always_venv=1; never_venv=0;; + -N|--no-virtual-env) always_venv=0; never_venv=1;; + -s|--no-site-packages) no_site_packages=1;; + -f|--force) force=1;; + -u|--update) update=1;; + -p|--pep8) just_pep8=1;; + -8|--pep8-only-changed) just_pep8_changed=1;; + -P|--no-pep8) no_pep8=1;; + -c|--coverage) coverage=1;; + -d|--debug) debug=1;; + --virtual-env-path) + (( i++ )) + venv_path=${!i} + ;; + --virtual-env-name) + (( i++ )) + venv_dir=${!i} + ;; + --tools-path) + (( i++ )) + tools_path=${!i} + ;; + --concurrency) + (( i++ )) + concurrency=${!i} + ;; + -*) testropts="$testropts ${!i}";; + *) testrargs="$testrargs ${!i}" + esac + (( i++ )) + done +} + +tool_path=${tools_path:-$(pwd)} +venv_path=${venv_path:-$(pwd)} +venv_dir=${venv_name:-.venv} +with_venv=tools/with_venv.sh +always_venv=0 +never_venv=0 +force=0 +no_site_packages=0 +installvenvopts= +testrargs= +testropts= +wrapper="" +just_pep8=0 +just_pep8_changed=0 +no_pep8=0 +coverage=0 +debug=0 +update=0 +concurrency=0 + +LANG=en_US.UTF-8 +LANGUAGE=en_US:en +LC_ALL=C + +process_options $@ +# Make our paths available to other scripts we call +export venv_path +export venv_dir +export venv_name +export tools_dir +export venv=${venv_path}/${venv_dir} + +if [ $no_site_packages -eq 1 ]; then + installvenvopts="--no-site-packages" +fi + +function run_tests { + # Cleanup *pyc + ${wrapper} find . -type f -name "*.pyc" -delete + + if [ $debug -eq 1 ]; then + if [ "$testropts" = "" ] && [ "$testrargs" = "" ]; then + # Default to running all tests if specific test is not + # provided. + testrargs="discover ./glance/tests" + fi + ${wrapper} python -m testtools.run $testropts $testrargs + + # Short circuit because all of the testr and coverage stuff + # below does not make sense when running testtools.run for + # debugging purposes. + return $? + fi + + if [ $coverage -eq 1 ]; then + TESTRTESTS="$TESTRTESTS --coverage" + else + TESTRTESTS="$TESTRTESTS" + fi + + # Just run the test suites in current environment + set +e + testrargs=`echo "$testrargs" | sed -e's/^\s*\(.*\)\s*$/\1/'` + TESTRTESTS="$TESTRTESTS --testr-args='--subunit --concurrency $concurrency $testropts $testrargs'" + if [ setup.cfg -nt glance.egg-info/entry_points.txt ] + then + ${wrapper} python setup.py egg_info + fi + echo "Running \`${wrapper} $TESTRTESTS\`" + if ${wrapper} which subunit-2to1 2>&1 > /dev/null + then + # subunit-2to1 is present, testr subunit stream should be in version 2 + # format. Convert to version one before colorizing. + bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit-2to1 | ${wrapper} tools/colorizer.py" + else + bash -c "${wrapper} $TESTRTESTS | ${wrapper} tools/colorizer.py" + fi + RESULT=$? + set -e + + copy_subunit_log + + if [ $coverage -eq 1 ]; then + echo "Generating coverage report in covhtml/" + # Don't compute coverage for common code, which is tested elsewhere + ${wrapper} coverage combine + ${wrapper} coverage html --include='glance/*' --omit='glance/openstack/common/*' -d covhtml -i + fi + + return $RESULT +} + +function copy_subunit_log { + LOGNAME=`cat .testrepository/next-stream` + LOGNAME=$(($LOGNAME - 1)) + LOGNAME=".testrepository/${LOGNAME}" + cp $LOGNAME subunit.log +} + +function warn_on_flake8_without_venv { + if [ $never_venv -eq 1 ]; then + echo "**WARNING**:" + echo "Running flake8 without virtual env may miss OpenStack HACKING detection" + fi +} + +function run_pep8 { + echo "Running flake8 ..." + warn_on_flake8_without_venv + bash -c "${wrapper} flake8" +} + + +TESTRTESTS="lockutils-wrapper python setup.py testr" + +if [ $never_venv -eq 0 ] +then + # Remove the virtual environment if --force used + if [ $force -eq 1 ]; then + echo "Cleaning virtualenv..." + rm -rf ${venv} + fi + if [ $update -eq 1 ]; then + echo "Updating virtualenv..." + python tools/install_venv.py $installvenvopts + fi + if [ -e ${venv} ]; then + wrapper="${with_venv}" + else + if [ $always_venv -eq 1 ]; then + # Automatically install the virtualenv + python tools/install_venv.py $installvenvopts + wrapper="${with_venv}" + else + echo -e "No virtual environment found...create one? (Y/n) \c" + read use_ve + if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then + # Install the virtualenv and run the test suite in it + python tools/install_venv.py $installvenvopts + wrapper=${with_venv} + fi + fi + fi +fi + +# Delete old coverage data from previous runs +if [ $coverage -eq 1 ]; then + ${wrapper} coverage erase +fi + +if [ $just_pep8 -eq 1 ]; then + run_pep8 + exit +fi + +if [ $just_pep8_changed -eq 1 ]; then + # NOTE(gilliard) We want use flake8 to check the entirety of every file that has + # a change in it. Unfortunately the --filenames argument to flake8 only accepts + # file *names* and there are no files named (eg) "nova/compute/manager.py". The + # --diff argument behaves surprisingly as well, because although you feed it a + # diff, it actually checks the file on disk anyway. + files=$(git diff --name-only HEAD~1 | tr '\n' ' ') + echo "Running flake8 on ${files}" + warn_on_flake8_without_venv + bash -c "diff -u --from-file /dev/null ${files} | ${wrapper} flake8 --diff" + exit +fi + +run_tests + +# NOTE(sirp): we only want to run pep8 when we're running the full-test suite, +# not when we're running tests individually. To handle this, we need to +# distinguish between options (testropts), which begin with a '-', and +# arguments (testrargs). +if [ -z "$testrargs" ]; then + if [ $no_pep8 -eq 0 ]; then + run_pep8 + fi +fi diff --git a/code/daisy/setup.cfg b/code/daisy/setup.cfg new file mode 100755 index 00000000..d24eef29 --- /dev/null +++ b/code/daisy/setup.cfg @@ -0,0 +1,84 @@ +[metadata] +name = daisy +version = 2015.1 +summary = OpenStack Image Service +description-file = + README.rst +author = OpenStack +author-email = openstack-dev@lists.openstack.org +home-page = http://www.openstack.org/ +classifier = + Environment :: OpenStack + Intended Audience :: Information Technology + Intended Audience :: System Administrators + License :: OSI Approved :: Apache Software License + Operating System :: POSIX :: Linux + Programming Language :: Python + Programming Language :: Python :: 2 + Programming Language :: Python :: 2.7 + +[global] +setup-hooks = + pbr.hooks.setup_hook + +[entry_points] +console_scripts = + daisy-api = daisy.cmd.api:main + daisy-cache-prefetcher = daisy.cmd.cache_prefetcher:main + daisy-cache-pruner = daisy.cmd.cache_pruner:main + daisy-cache-manage = daisy.cmd.cache_manage:main + daisy-cache-cleaner = daisy.cmd.cache_cleaner:main + daisy-control = daisy.cmd.control:main + daisy-search = daisy.cmd.search:main + daisy-index = daisy.cmd.index:main + daisy-manage = daisy.cmd.manage:main + daisy-registry = daisy.cmd.registry:main + daisy-replicator = daisy.cmd.replicator:main + daisy-scrubber = daisy.cmd.scrubber:main + daisy-orchestration = daisy.cmd.orchestration:main +daisy.common.image_location_strategy.modules = + location_order_strategy = daisy.common.location_strategy.location_order + store_type_strategy = daisy.common.location_strategy.store_type +oslo.config.opts = + daisy.api = daisy.opts:list_api_opts + daisy.registry = daisy.opts:list_registry_opts + daisy.scrubber = daisy.opts:list_scrubber_opts + daisy.cache= daisy.opts:list_cache_opts + daisy.manage = daisy.opts:list_manage_opts +daisy.database.migration_backend = + sqlalchemy = oslo.db.sqlalchemy.migration +daisy.database.metadata_backend = + sqlalchemy = daisy.db.sqlalchemy.metadata +daisy.search.index_backend = + image = daisy.search.plugins.images:ImageIndex + metadef = daisy.search.plugins.metadefs:MetadefIndex +daisy.flows = + import = daisy.async.flows.base_import:get_flow +daisy.flows.import = + convert = daisy.async.flows.convert:get_flow + introspect = daisy.async.flows.introspect:get_flow + +[build_sphinx] +all_files = 1 +build-dir = doc/build +source-dir = doc/source + +[egg_info] +tag_build = +tag_date = 0 +tag_svn_revision = 0 + +[compile_catalog] +directory = daisy/locale +domain = daisy + +[update_catalog] +domain = daisy +output_dir = daisy/locale +input_file = daisy/locale/daisy.pot + +[extract_messages] +keywords = _ gettext ngettext l_ lazy_gettext +mapping_file = babel.cfg +output_file = daisy/locale/daisy.pot + diff --git a/code/daisy/setup.py b/code/daisy/setup.py new file mode 100755 index 00000000..73637574 --- /dev/null +++ b/code/daisy/setup.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT +import setuptools + +# In python < 2.7.4, a lazy loading of package `pbr` will break +# setuptools if some other modules registered functions in `atexit`. +# solution from: http://bugs.python.org/issue15881#msg170215 +try: + import multiprocessing # noqa +except ImportError: + pass + +setuptools.setup( + setup_requires=['pbr'], + pbr=True) diff --git a/code/daisy/test-requirements.txt b/code/daisy/test-requirements.txt new file mode 100755 index 00000000..217038a5 --- /dev/null +++ b/code/daisy/test-requirements.txt @@ -0,0 +1,34 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +# Hacking already pins down pep8, pyflakes and flake8 +hacking>=0.10.0,<0.11 + +# For translations processing +Babel>=1.3 + +# Needed for testing +coverage>=3.6 +discover +fixtures>=0.3.14 +mox3>=0.7.0 +mock>=1.0 +sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3 +requests>=2.2.0,!=2.4.0 +testrepository>=0.0.18 +testtools>=0.9.36,!=1.2.0 +psutil>=1.1.1,<2.0.0 +oslotest>=1.5.1,<1.6.0 # Apache-2.0 +# Optional packages that should be installed when testing +MySQL-python +psycopg2 +pysendfile==2.0.0 +qpid-python +xattr>=0.4 + +# Documentation +oslosphinx>=2.5.0,<2.6.0 # Apache-2.0 + +# Glance catalog index +elasticsearch>=1.3.0 diff --git a/code/daisy/tools/colorizer.py b/code/daisy/tools/colorizer.py new file mode 100755 index 00000000..ad4f8983 --- /dev/null +++ b/code/daisy/tools/colorizer.py @@ -0,0 +1,330 @@ +#!/usr/bin/env python + +# Copyright (c) 2013, Nebula, Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# Colorizer Code is borrowed from Twisted: +# Copyright (c) 2001-2010 Twisted Matrix Laboratories. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +"""Display a subunit stream through a colorized unittest test runner.""" + +import heapq +import sys +import unittest + +import subunit +import testtools + + +class _AnsiColorizer(object): + """A colorizer is an object that loosely wraps around a stream. + + That allows callers to write text to the stream in a particular color. + Colorizer classes must implement C{supported()} and C{write(text, color)}. + """ + _colors = dict(black=30, red=31, green=32, yellow=33, + blue=34, magenta=35, cyan=36, white=37) + + def __init__(self, stream): + self.stream = stream + + @staticmethod + def supported(stream=sys.stdout): + """Method that checks if the current terminal supports coloring. + + Returns True or False. + """ + if not stream.isatty(): + return False # auto color only on TTYs + try: + import curses + except ImportError: + return False + else: + try: + try: + return curses.tigetnum("colors") > 2 + except curses.error: + curses.setupterm() + return curses.tigetnum("colors") > 2 + except Exception: + # guess false in case of error + return False + + def write(self, text, color): + """Write the given text to the stream in the given color. + + @param text: Text to be written to the stream. + + @param color: A string label for a color. e.g. 'red', 'white'. + + """ + color = self._colors[color] + self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text)) + + +class _Win32Colorizer(object): + """See _AnsiColorizer docstring.""" + def __init__(self, stream): + import win32console + red, green, blue, bold = (win32console.FOREGROUND_RED, + win32console.FOREGROUND_GREEN, + win32console.FOREGROUND_BLUE, + win32console.FOREGROUND_INTENSITY) + self.stream = stream + self.screenBuffer = win32console.GetStdHandle( + win32console.STD_OUT_HANDLE) + self._colors = { + 'normal': red | green | blue, + 'red': red | bold, + 'green': green | bold, + 'blue': blue | bold, + 'yellow': red | green | bold, + 'magenta': red | blue | bold, + 'cyan': green | blue | bold, + 'white': red | green | blue | bold + } + + @staticmethod + def supported(stream=sys.stdout): + try: + import win32console + screenBuffer = win32console.GetStdHandle( + win32console.STD_OUT_HANDLE) + except ImportError: + return False + import pywintypes + try: + screenBuffer.SetConsoleTextAttribute( + win32console.FOREGROUND_RED | + win32console.FOREGROUND_GREEN | + win32console.FOREGROUND_BLUE) + except pywintypes.error: + return False + else: + return True + + def write(self, text, color): + color = self._colors[color] + self.screenBuffer.SetConsoleTextAttribute(color) + self.stream.write(text) + self.screenBuffer.SetConsoleTextAttribute(self._colors['normal']) + + +class _NullColorizer(object): + """See _AnsiColorizer docstring.""" + def __init__(self, stream): + self.stream = stream + + @staticmethod + def supported(stream=sys.stdout): + return True + + def write(self, text, color): + self.stream.write(text) + + +def get_elapsed_time_color(elapsed_time): + if elapsed_time > 1.0: + return 'red' + elif elapsed_time > 0.25: + return 'yellow' + else: + return 'green' + + +class SubunitTestResult(testtools.TestResult): + def __init__(self, stream, descriptions, verbosity): + super(SubunitTestResult, self).__init__() + self.stream = stream + self.showAll = verbosity > 1 + self.num_slow_tests = 10 + self.slow_tests = [] # this is a fixed-sized heap + self.colorizer = None + # NOTE(vish): reset stdout for the terminal check + stdout = sys.stdout + sys.stdout = sys.__stdout__ + for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]: + if colorizer.supported(): + self.colorizer = colorizer(self.stream) + break + sys.stdout = stdout + self.start_time = None + self.last_time = {} + self.results = {} + self.last_written = None + + def _writeElapsedTime(self, elapsed): + color = get_elapsed_time_color(elapsed) + self.colorizer.write(" %.2f" % elapsed, color) + + def _addResult(self, test, *args): + try: + name = test.id() + except AttributeError: + name = 'Unknown.unknown' + test_class, test_name = name.rsplit('.', 1) + + elapsed = (self._now() - self.start_time).total_seconds() + item = (elapsed, test_class, test_name) + if len(self.slow_tests) >= self.num_slow_tests: + heapq.heappushpop(self.slow_tests, item) + else: + heapq.heappush(self.slow_tests, item) + + self.results.setdefault(test_class, []) + self.results[test_class].append((test_name, elapsed) + args) + self.last_time[test_class] = self._now() + self.writeTests() + + def _writeResult(self, test_name, elapsed, long_result, color, + short_result, success): + if self.showAll: + self.stream.write(' %s' % str(test_name).ljust(66)) + self.colorizer.write(long_result, color) + if success: + self._writeElapsedTime(elapsed) + self.stream.writeln() + else: + self.colorizer.write(short_result, color) + + def addSuccess(self, test): + super(SubunitTestResult, self).addSuccess(test) + self._addResult(test, 'OK', 'green', '.', True) + + def addFailure(self, test, err): + if test.id() == 'process-returncode': + return + super(SubunitTestResult, self).addFailure(test, err) + self._addResult(test, 'FAIL', 'red', 'F', False) + + def addError(self, test, err): + super(SubunitTestResult, self).addFailure(test, err) + self._addResult(test, 'ERROR', 'red', 'E', False) + + def addSkip(self, test, reason=None, details=None): + super(SubunitTestResult, self).addSkip(test, reason, details) + self._addResult(test, 'SKIP', 'blue', 'S', True) + + def startTest(self, test): + self.start_time = self._now() + super(SubunitTestResult, self).startTest(test) + + def writeTestCase(self, cls): + if not self.results.get(cls): + return + if cls != self.last_written: + self.colorizer.write(cls, 'white') + self.stream.writeln() + for result in self.results[cls]: + self._writeResult(*result) + del self.results[cls] + self.stream.flush() + self.last_written = cls + + def writeTests(self): + time = self.last_time.get(self.last_written, self._now()) + if not self.last_written or (self._now() - time).total_seconds() > 2.0: + diff = 3.0 + while diff > 2.0: + classes = self.results.keys() + oldest = min(classes, key=lambda x: self.last_time[x]) + diff = (self._now() - self.last_time[oldest]).total_seconds() + self.writeTestCase(oldest) + else: + self.writeTestCase(self.last_written) + + def done(self): + self.stopTestRun() + + def stopTestRun(self): + for cls in list(self.results.iterkeys()): + self.writeTestCase(cls) + self.stream.writeln() + self.writeSlowTests() + + def writeSlowTests(self): + # Pare out 'fast' tests + slow_tests = [item for item in self.slow_tests + if get_elapsed_time_color(item[0]) != 'green'] + if slow_tests: + slow_total_time = sum(item[0] for item in slow_tests) + slow = ("Slowest %i tests took %.2f secs:" + % (len(slow_tests), slow_total_time)) + self.colorizer.write(slow, 'yellow') + self.stream.writeln() + last_cls = None + # sort by name + for elapsed, cls, name in sorted(slow_tests, + key=lambda x: x[1] + x[2]): + if cls != last_cls: + self.colorizer.write(cls, 'white') + self.stream.writeln() + last_cls = cls + self.stream.write(' %s' % str(name).ljust(68)) + self._writeElapsedTime(elapsed) + self.stream.writeln() + + def printErrors(self): + if self.showAll: + self.stream.writeln() + self.printErrorList('ERROR', self.errors) + self.printErrorList('FAIL', self.failures) + + def printErrorList(self, flavor, errors): + for test, err in errors: + self.colorizer.write("=" * 70, 'red') + self.stream.writeln() + self.colorizer.write(flavor, 'red') + self.stream.writeln(": %s" % test.id()) + self.colorizer.write("-" * 70, 'red') + self.stream.writeln() + self.stream.writeln("%s" % err) + + +test = subunit.ProtocolTestCase(sys.stdin, passthrough=None) + +if sys.version_info[0:2] <= (2, 6): + runner = unittest.TextTestRunner(verbosity=2) +else: + runner = unittest.TextTestRunner( + verbosity=2, resultclass=SubunitTestResult) + +if runner.run(test).wasSuccessful(): + exit_code = 0 +else: + exit_code = 1 +sys.exit(exit_code) diff --git a/code/daisy/tools/install_venv.py b/code/daisy/tools/install_venv.py new file mode 100755 index 00000000..f523f3e1 --- /dev/null +++ b/code/daisy/tools/install_venv.py @@ -0,0 +1,73 @@ +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Copyright 2010 OpenStack Foundation +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Installation script for Glance's development virtualenv +""" + +from __future__ import print_function + +import os +import sys + +import install_venv_common as install_venv # noqa + + +def print_help(): + help = """ + Glance development environment setup is complete. + + Glance development uses virtualenv to track and manage Python dependencies + while in development and testing. + + To activate the Glance virtualenv for the extent of your current shell session + you can run: + + $ source .venv/bin/activate + + Or, if you prefer, you can run commands in the virtualenv on a case by case + basis by running: + + $ tools/with_venv.sh + + Also, make test will automatically use the virtualenv. + """ + print(help) + + +def main(argv): + root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + venv = os.path.join(root, '.venv') + pip_requires = os.path.join(root, 'requirements.txt') + test_requires = os.path.join(root, 'test-requirements.txt') + py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) + project = 'Glance' + install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, + py_version, project) + options = install.parse_args(argv) + install.check_python_version() + install.check_dependencies() + install.create_virtualenv(no_site_packages=options.no_site_packages) + install.install_dependencies() + install.run_command([os.path.join(venv, 'bin/python'), + 'setup.py', 'develop']) + print_help() + +if __name__ == '__main__': + main(sys.argv) diff --git a/code/daisy/tools/install_venv_common.py b/code/daisy/tools/install_venv_common.py new file mode 100755 index 00000000..e279159a --- /dev/null +++ b/code/daisy/tools/install_venv_common.py @@ -0,0 +1,172 @@ +# Copyright 2013 OpenStack Foundation +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides methods needed by installation script for OpenStack development +virtual environments. + +Since this script is used to bootstrap a virtualenv from the system's Python +environment, it should be kept strictly compatible with Python 2.6. + +Synced in from openstack-common +""" + +from __future__ import print_function + +import optparse +import os +import subprocess +import sys + + +class InstallVenv(object): + + def __init__(self, root, venv, requirements, + test_requirements, py_version, + project): + self.root = root + self.venv = venv + self.requirements = requirements + self.test_requirements = test_requirements + self.py_version = py_version + self.project = project + + def die(self, message, *args): + print(message % args, file=sys.stderr) + sys.exit(1) + + def check_python_version(self): + if sys.version_info < (2, 6): + self.die("Need Python Version >= 2.6") + + def run_command_with_code(self, cmd, redirect_output=True, + check_exit_code=True): + """Runs a command in an out-of-process shell. + + Returns the output of that command. Working directory is self.root. + """ + if redirect_output: + stdout = subprocess.PIPE + else: + stdout = None + + proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) + output = proc.communicate()[0] + if check_exit_code and proc.returncode != 0: + self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) + return (output, proc.returncode) + + def run_command(self, cmd, redirect_output=True, check_exit_code=True): + return self.run_command_with_code(cmd, redirect_output, + check_exit_code)[0] + + def get_distro(self): + if (os.path.exists('/etc/fedora-release') or + os.path.exists('/etc/redhat-release')): + return Fedora( + self.root, self.venv, self.requirements, + self.test_requirements, self.py_version, self.project) + else: + return Distro( + self.root, self.venv, self.requirements, + self.test_requirements, self.py_version, self.project) + + def check_dependencies(self): + self.get_distro().install_virtualenv() + + def create_virtualenv(self, no_site_packages=True): + """Creates the virtual environment and installs PIP. + + Creates the virtual environment and installs PIP only into the + virtual environment. + """ + if not os.path.isdir(self.venv): + print('Creating venv...', end=' ') + if no_site_packages: + self.run_command(['virtualenv', '-q', '--no-site-packages', + self.venv]) + else: + self.run_command(['virtualenv', '-q', self.venv]) + print('done.') + else: + print("venv already exists...") + pass + + def pip_install(self, *args): + self.run_command(['tools/with_venv.sh', + 'pip', 'install', '--upgrade'] + list(args), + redirect_output=False) + + def install_dependencies(self): + print('Installing dependencies with pip (this can take a while)...') + + # First things first, make sure our venv has the latest pip and + # setuptools and pbr + self.pip_install('pip>=1.4') + self.pip_install('setuptools') + self.pip_install('pbr') + + self.pip_install('-r', self.requirements, '-r', self.test_requirements) + + def parse_args(self, argv): + """Parses command-line arguments.""" + parser = optparse.OptionParser() + parser.add_option('-n', '--no-site-packages', + action='store_true', + help="Do not inherit packages from global Python " + "install.") + return parser.parse_args(argv[1:])[0] + + +class Distro(InstallVenv): + + def check_cmd(self, cmd): + return bool(self.run_command(['which', cmd], + check_exit_code=False).strip()) + + def install_virtualenv(self): + if self.check_cmd('virtualenv'): + return + + if self.check_cmd('easy_install'): + print('Installing virtualenv via easy_install...', end=' ') + if self.run_command(['easy_install', 'virtualenv']): + print('Succeeded') + return + else: + print('Failed') + + self.die('ERROR: virtualenv not found.\n\n%s development' + ' requires virtualenv, please install it using your' + ' favorite package management tool' % self.project) + + +class Fedora(Distro): + """This covers all Fedora-based distributions. + + Includes: Fedora, RHEL, CentOS, Scientific Linux + """ + + def check_pkg(self, pkg): + return self.run_command_with_code(['rpm', '-q', pkg], + check_exit_code=False)[1] == 0 + + def install_virtualenv(self): + if self.check_cmd('virtualenv'): + return + + if not self.check_pkg('python-virtualenv'): + self.die("Please install 'python-virtualenv'.") + + super(Fedora, self).install_virtualenv() diff --git a/code/daisy/tools/migrate_image_owners.py b/code/daisy/tools/migrate_image_owners.py new file mode 100755 index 00000000..62fd9842 --- /dev/null +++ b/code/daisy/tools/migrate_image_owners.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +import keystoneclient.v2_0.client +from oslo_config import cfg +from oslo_log import log as logging + +import glance.context +import glance.db.sqlalchemy.api as db_api +from glance import i18n +import glance.registry.context + +_ = i18n._ +_LC = i18n._LC +_LE = i18n._LE +_LI = i18n._LI + +LOG = logging.getLogger(__name__) +LOG.addHandler(logging.StreamHandler()) +LOG.setLevel(logging.DEBUG) + + +def get_owner_map(ksclient, owner_is_tenant=True): + if owner_is_tenant: + entities = ksclient.tenants.list() + else: + entities = ksclient.users.list() + # build mapping of (user or tenant) name to id + return dict([(entity.name, entity.id) for entity in entities]) + + +def build_image_owner_map(owner_map, db, context): + image_owner_map = {} + for image in db.image_get_all(context): + image_id = image['id'] + owner_name = image['owner'] + + if not owner_name: + LOG.info(_LI('Image %s has no owner. Skipping.') % image_id) + continue + + try: + owner_id = owner_map[owner_name] + except KeyError: + msg = (_LE('Image "%(image)s" owner "%(owner)s" was not found. ' + 'Skipping.'), + {'image': image_id, 'owner': owner_name}) + LOG.error(msg) + continue + + image_owner_map[image_id] = owner_id + + msg = (_LI('Image "%(image)s" owner "%(owner)s" -> "%(owner_id)s"'), + {'image': image_id, 'owner': owner_name, 'owner_id': owner_id}) + LOG.info(msg) + + return image_owner_map + + +def update_image_owners(image_owner_map, db, context): + for (image_id, image_owner) in image_owner_map.items(): + db.image_update(context, image_id, {'owner': image_owner}) + LOG.info(_LI('Image %s successfully updated.') % image_id) + + +if __name__ == "__main__": + config = cfg.CONF + extra_cli_opts = [ + cfg.BoolOpt('dry-run', + help='Print output but do not make db changes.'), + cfg.StrOpt('keystone-auth-uri', + help='Authentication endpoint'), + cfg.StrOpt('keystone-admin-tenant-name', + help='Administrative user\'s tenant name'), + cfg.StrOpt('keystone-admin-user', + help='Administrative user\'s id'), + cfg.StrOpt('keystone-admin-password', + help='Administrative user\'s password', + secret=True), + ] + config.register_cli_opts(extra_cli_opts) + config(project='glance', prog='glance-registry') + + db_api.configure_db() + + context = glance.common.context.RequestContext(is_admin=True) + + auth_uri = config.keystone_auth_uri + admin_tenant_name = config.keystone_admin_tenant_name + admin_user = config.keystone_admin_user + admin_password = config.keystone_admin_password + + if not (auth_uri and admin_tenant_name and admin_user and admin_password): + LOG.critical(_LC('Missing authentication arguments')) + sys.exit(1) + + ks = keystoneclient.v2_0.client.Client(username=admin_user, + password=admin_password, + tenant_name=admin_tenant_name, + auth_url=auth_uri) + + owner_map = get_owner_map(ks, config.owner_is_tenant) + image_updates = build_image_owner_map(owner_map, db_api, context) + if not config.dry_run: + update_image_owners(image_updates, db_api, context) diff --git a/code/daisy/tools/with_venv.sh b/code/daisy/tools/with_venv.sh new file mode 100755 index 00000000..7303990b --- /dev/null +++ b/code/daisy/tools/with_venv.sh @@ -0,0 +1,7 @@ +#!/bin/bash +TOOLS_PATH=${TOOLS_PATH:-$(dirname $0)} +VENV_PATH=${VENV_PATH:-${TOOLS_PATH}} +VENV_DIR=${VENV_NAME:-/../.venv} +TOOLS=${TOOLS_PATH} +VENV=${VENV:-${VENV_PATH}/${VENV_DIR}} +source ${VENV}/bin/activate && "$@" diff --git a/code/daisy/tox.ini b/code/daisy/tox.ini new file mode 100755 index 00000000..9b03bf30 --- /dev/null +++ b/code/daisy/tox.ini @@ -0,0 +1,56 @@ +[tox] +minversion = 1.6 +envlist = py27,py33,py34,pep8 +skipsdist = True + +[testenv] +setenv = VIRTUAL_ENV={envdir} +usedevelop = True +install_command = pip install -U {opts} {packages} +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = lockutils-wrapper python setup.py testr --slowest --testr-args='{posargs}' +whitelist_externals = bash + +[tox:jenkins] +downloadcache = ~/cache/pip + +[testenv:pep8] +commands = + flake8 {posargs} + # Check that .po and .pot files are valid: + bash -c "find daisy -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null" + +[testenv:cover] +setenv = VIRTUAL_ENV={envdir} +commands = python setup.py testr --coverage --testr-args='^(?!.*test.*coverage).*$' + +[testenv:venv] +commands = {posargs} + +[testenv:genconfig] +commands = + oslo-config-generator --config-file etc/oslo-config-generator/daisy-api.conf + oslo-config-generator --config-file etc/oslo-config-generator/daisy-registry.conf + oslo-config-generator --config-file etc/oslo-config-generator/daisy-scrubber.conf + oslo-config-generator --config-file etc/oslo-config-generator/daisy-cache.conf + oslo-config-generator --config-file etc/oslo-config-generator/daisy-manage.conf + oslo-config-generator --config-file etc/oslo-config-generator/daisy-search.conf + +[testenv:docs] +commands = python setup.py build_sphinx + +[flake8] +# TODO(dmllr): Analyze or fix the warnings blacklisted below +# E711 comparison to None should be 'if cond is not None:' +# E712 comparison to True should be 'if cond is True:' or 'if cond:' +# H302 import only modules +# H402 one line docstring needs punctuation. +# H404 multi line docstring should start with a summary +# H405 multi line docstring summary not separated with an empty line +# H904 Wrap long lines in parentheses instead of a backslash +ignore = E711,E712,H302,H402,H404,H405,H904 +exclude = .venv,.git,.tox,dist,doc,etc,*daisy/locale*,*openstack/common*,*lib/python*,*egg,build + +[hacking] +local-check-factory = daisy.hacking.checks.factory diff --git a/code/daisyclient/AUTHORS b/code/daisyclient/AUTHORS new file mode 100755 index 00000000..2ede94ae --- /dev/null +++ b/code/daisyclient/AUTHORS @@ -0,0 +1,145 @@ +Abhishek Talwar +Adam Gandelman +Alan Meadows +Alessandro Pilotti +Alessio Ababilov +Alex Gaynor +Alex Meade +AmalaBasha +Andre Naehring +Andreas Jaeger +Andrew Laski +Andrey Kurilin +Andy McCrae +Anita Kuno +Bhuvan Arumugam +Bob Thyne +Boris Pavlovic +Brian Lamar +Brian Rosmaita +Brian Waldon +Chang Bo Guo +ChangBo Guo(gcb) +Chris Behrens +Chris Buccella +Chris Yeoh +Christian Berendt +Chuck Short +Cindy Pallares +Clark Boylan +Cyril Roelandt +Dan Prince +Davanum Srinivas +David Koo +David Peraza +David Wittman +Dazhao +Dean Troyer +Diego Parrilla +Dirk Mueller +Dominik Heidler +Doug Hellmann +Edward Hope-Morley +Eiichi Aikawa +Erno Kuvaja +Fei Long Wang +Fei Long Wang +Flaper Fesp +Flavio Percoco +Flavio Percoco +Florian Haas +Frederic Lepied +Gabe Westmaas +Gabriel Hurley +Ghe Rivero +Hugh Saunders +Ian Cordasco +Jakub Ruzicka +James E. Blair +James Li +James Page +Jamie Lennox +Jared Culp +Jay Pipes +Jeremy Stanley +Jimmy McCrory +Joe Gordon +John Bresnahan +John Trowbridge +Jon Bernard +Juan Manuel Olle +Justin Santa Barbara +Kamil Rykowski +Ken'ichi Ohmichi +Kevin McDonald +Kirill +Lakshmi N Sampath +Lars Gellrich +Le Tian Ren +Longgeek +Louis Taylor +Louis Taylor +Maithem +Manuel Desbonnet +Mark J. Washenberger +Mark McLoughlin +Markus Zoeller +Matt Riedemann +Matthew Booth +MattieuPuel +Michael Basnight +Michael Still +Michal Dulko +Mike Fedosin +Monty Taylor +Nikhil Komawar +Noboru arai +Oleksii Chuprykov +Pawel Koniszewski +Rakesh H S +Rob Crittenden +Russell Bryant +Sabari Kumar Murugesan +Sascha Peilicke +Sean Dague +Sean Dague +Shane Wang +Stanislaw Pitucha +Steve Lewis +Steve Martinelli +Stuart McLaren +Sudipta Biswas +Sulochan Acharya +Sushil Kumar +Tatyana Leontovich +Thierry Carrez +Thomas Leaman +Tom Leaman +Travis Tripp +Unmesh Gurjar +Venkatesh Sampath +Victor Morales +Vincent Untz +Vishvananda Ishaya +Wu Wenxiang +Yamini Sardana +Yang Yu +Yassine Lamgarchal +Yvonne Stachowski +Zhenguo Niu +Zhi Yan Liu +ZhiQiang Fan +amalaba +d34dh0r53 +eddie-sheffield +iccha-sethi +iccha.sethi +isethi +jaypipes +liuqing +llg8212 +lrqrun +m.benchchaoui@cloudbau.de +mouad benchchaoui +sridhargaddam +wanghong diff --git a/code/daisyclient/CONTRIBUTING.rst b/code/daisyclient/CONTRIBUTING.rst new file mode 100755 index 00000000..35564c59 --- /dev/null +++ b/code/daisyclient/CONTRIBUTING.rst @@ -0,0 +1,16 @@ +If you would like to contribute to the development of OpenStack, +you must follow the steps documented at: + + http://docs.openstack.org/infra/manual/developers.html#development-workflow + +Once those steps have been completed, changes to OpenStack +should be submitted for review via the Gerrit tool, following +the workflow documented at: + + http://docs.openstack.org/infra/manual/developers.html#development-workflow + +Pull requests submitted through GitHub will be ignored. + +Bugs should be filed on Launchpad, not GitHub: + + https://bugs.launchpad.net/python-glanceclient diff --git a/code/daisyclient/ChangeLog b/code/daisyclient/ChangeLog new file mode 100755 index 00000000..07faa8b0 --- /dev/null +++ b/code/daisyclient/ChangeLog @@ -0,0 +1,521 @@ +CHANGES +======= + +0.17.0 +------ + +* Add release notes for 0.17.0 +* Updated help for v2 member-update api +* Extend images CLI v2 with new sorting syntax +* Add the ability to specify the sort dir for each key +* Import sys module +* Adds the ability to sort images with multiple keys +* add examples for properties and doc build script +* Apply expected patch format when updating tags in v2.images +* v2: read limit for list from --limit in shell +* Fix leaking sockets after v2 list operation +* Fix leaking sockets after v1 list operation + +0.16.1 +------ + +* Add release notes for 0.16.1 +* removed excessive call to os.path.exists +* Fix tests failing if keystone is running locally +* Unify using six.moves.range rename everywhere + +0.16.0 +------ + +* Add release notes for 0.16.0 +* Show error on trying to upload to non-queued image +* https: Prevent leaking sockets for some operations +* Glance image delete output +* Strip json and html from error messages +* Unit tests covering missing username or password +* Register our own ConnectionPool without globals +* Updated from global requirements +* Change oslo.utils to oslo_utils +* Return 130 for keyboard interrupt +* Ignore NoneType when encoding headers +* Remove graduated gettextutils from openstack/common +* Use utils.exit rather than print+sys.exit +* Remove uuidutils from openstack-common +* Add a `--limit` parameter to list operations +* Fixed CLI help for bash-completion +* Remove openstack.common.importutils +* Remove openstack.common.strutils +* Adds basic examples of v2 API usage +* Sync latest apiclient from oslo-inc +* Remove duplicate 'a' in the help string of --os-image-url +* Close streamed requests explicitly +* Handle HTTP byte returns in python 3 +* Updated from global requirements +* Add validation to --property-filter in v1 shell +* v2: Allow upload from stdin on image-create +* Fix v2 image create --file documentation +* Make non-boolean check strict +* Disable progress bar if image is piped into client +* Fix Requests breaking download progress bar +* Fix broken-pipe seen in glance-api +* Update HTTPS certificate handling for pep-0476 + +0.15.0 +------ + +* Add release notes for 0.15.0 +* Support Pagination for namespace list +* Output clear error message on invalid api version +* Support schema types with non-str value +* Don't require version to create Client instance +* Add os_ prefix to project_domain_name/id +* Workflow documentation is now in infra-manual +* Allow --file in image-create with v2 Image API +* Add useful error on invalid --os-image-api-version +* Add release notes for 0.14.0 - 0.14.2 +* Fix minor typo in version error message +* Send `identity_headers` through the wire +* Curl statements to include globoff for IPv6 URLs +* Remove readonly options from v2 shell commands +* Add --property-filter option to v2 image-list +* Fix py34 failure for glance client + +0.14.2 +------ + +* Don't set X-Auth-Token key in http session header if no token provided +* Don't replace the https handler in the poolmanager +* Refactor method of constructing dicts in some tests +* Adds tty password entry for glanceclient +* '--public' ignored on image create +* Remove network_utils +* Skip non-base properties in patch method +* Adds support for Glance Tasks calls +* Reduce the set of supported client SSL ciphers +* Fix the ordering of assertEqual arguments + +0.14.1 +------ + +* Update how tokens are redacted +* Handle UnicodeDecodeError in log_http_response +* Print traceback to stderr if --debug is set +* Stop using intersphinx +* Updated from global requirements +* Fix v2 requests to non-bleeding edge servers +* Fix to ensure endpoint_type is used by _get_endpoint() +* Work toward Python 3.4 support and testing + +0.14.0 +------ + +* Support for Metadata Definitions Catalog API +* Catch new urllib3 exception: ProtocolError +* Default to system CA bundle if no CA certificate is provided +* Import missing gettextutils._ in shell.py +* Fix error when logging http response with python 3 +* Fix indentation in tox.ini +* Add bash completion to glance client +* Ensure server's SSL cert is validated +* Enable osprofiler interface in glanceclient shell +* Hide stderr noise in test output +* Remove deprecated commands from shell +* Normalize glanceclient requested service url +* Fix glance-client to work with IPv6 controllers +* Add support for Keystone v3 +* Downgrade log message for http request failures +* Update theme for docs +* Add a tox job for generating docs +* Don't stream non-binary requests +* Use a correctly formatted example location in help +* Replace old httpclient with requests +* CLI image-update gives a wrong help on '--tags' param +* Enable F841 +* Resolving the performance issue for image listing of v2 API +* Add profiling support to glanceclinet +* Use immutable arg rather mutable arg +* Add CONTRIBUTING.rst + +0.13.1 +------ + +* Added release notes for 0.13.0 +* Add wheels section to the setup.cfg +* Add missing classifiers +* Add license to setup.cfg +* Fix CA certificate handling +* Add the six module dependency +* Prepend '/' to the delete url for the v2 client +* Set purge-props header correctly in image update +* Updated from global requirements +* Change a debug line to prevent UnicodeDecodeError issue +* Add support for location parameters in v2 commands +* Convert passed integer values into int in v1 shell +* Reuse class Manager from common code +* Fix help text in image-create +* Python 3: use next(foo) instead of foo.next() +* Remove auth token from http logging +* Finalize Python3 support +* fixed typos found by RETF rules +* Updated from global requirements +* Remove py3k module +* Return request ID to callers +* progress flag not supported in v2 API +* Fix for invalid literal ValueError parsing ipv6 url(s) +* Adding network_utils module from oslo-incubator +* Sync with oslo-incubator +* Fix the parameter order of assertEqual in glanceclient v1 test +* Sync with Oslo +* Python 3: do not use __builtin__ +* Change assertTrue(isinstance()) by optimal assert +* Updated from global requirements +* Python3: do not use the 'file' type +* Python 3: do not use the unicode() function +* Fix the parameter order of assertEqual in glanceclient v2 test +* Improve help strings +* Fix the parameter order of assertEqual in glanceclient test +* Python3: define a __next__() method for VerboseIteratorWrapper +* test_shell: remove a deprecated keyword argument +* Python 3: Fix JsonPatch-related issues +* Pass bytes to tempfile.NamedTemporaryFile().write() +* Replace file with open, which is Python 3 compatible +* Remove tox locale overrides +* Fix misspellings in python-glanceclient +* Update my mailmap +* Add support for image size in v2 api upload +* Only show progress bar for local image files +* Using common method 'bool_from_string' from oslo strutils +* Handle endpoints with versions consistently +* Allow updating empty created v2 images from v1 +* server 500 should not be a client error +* It was removed urllib, urllib2 & urlparse modules +* python3: Switch to mox3 instead of mox +* Remove vim header +* Python 3: use six.iteritems and six.string_types +* Python3: use six.StringIO rather than StringIO.StringIO +* Python3: use six.StringIO rather than StringIO.StringIO +* Replace file.write and os.path.exists by mock +* Python 3: use six.iteritems() instead of iteritems() +* Python 3: use six.iteritems() instead of iteritems() +* Fix glanceclient http.py string formatting error +* Reuse Resource from oslo +* Get better format for long lines with PrettyTable +* Remove unused imports +* Sync apiclient and py3kcompat from oslo +* Fix and enable gating on H306 +* SSL: Handle wildcards in Subject Alternative Names +* Updated from global requirements +* Replace inheritance hierarchy with composition +* Updates tox.ini to use new features +* Updates .gitignore +* Readd missing Babel dependency after merge from Oslo(master/bdda833) +* Fix extra new line that break from progress bar + +0.12.0 +------ + +* Add release notes for 0.12.0 +* Make HACKING.rst DRYer +* change assertEquals to assertEqual +* Fix Pep8 errors found by Pep8 1.4.6 +* python3: use six.moves for httplib imports +* Sync from oslo-incubator +* python3: xrange no longer exists +* Fix misused assertTrue in unit tests +* Add CLI for V2 image create, update, and upload +* Fix regression bug after removing posixpath in http.py +* Fix getting header in redirect processing +* Fix default value for a header +* Replace OpenStack LLC with OpenStack Foundation +* Support glance client can get ipv6 image url correctly +* Added support for running the tests under PyPy with tox +* Enable query image by tag +* Fix python 3.x related Hacking warnings +* Fix glanceclient usage inconsistences for options +* Add 0.11.0 doc notes +* Use openstack-images-v2.1-json-patch for update method +* Allow single-wildcard SSL common name matching +* Revert "removed deprecated parameter --public" +* \Allow removal of properties using glance v2 api +* Updated from global requirements + +0.10.0 +------ + +* Revert 02116565d358a4fa254217779fef82b14b38d8ca +* Add 0.10.0 docs update +* Show a pretty progressbar when uploading and downloading an image +* Raise warlock requirement +* Cast image_id to string before calling urllib.quote +* Don't use posixpath for URLs +* Changes to allow image upload with V2 api +* removed deprecated parameter --public +* Encode error messages before sending them to stdout +* Allow v1 client to list all users' images +* Add v1 client side owner based filtering +* Enable client library V2 to create an image +* Provide glance CLI man page +* Fix test assertions & test cases for V2 Shell Unit test +* HTTPS response issues +* Increase default page_size value +* Pass all identity headers received to glance +* Fix SSL certificate CNAME checking +* uncap python-keystoneclient version requirement +* Expose checksum index image property in client +* Flake8 should ignore build folder +* Enable client V2 to update/delete tags for a given image +* Rename invalid domain name to be RFC compliant +* Start using Pyflakes and Hacking +* Removes extra slash on endpoints without a path +* Remove explicit distribute depend +* Replace utils.ensure_(str|unicode) with strutils.safe(decode|encode) +* Do not decode headers in v1/images.py +* Fix problem where image data is not read from a pipe +* Add tests for encodings +* python3: Introduce py33 to tox.ini +* Rename requires files to standard names +* Don't attempt to read stdin if it is empty +* Update importutils and openstack-common.conf format +* Convert non-ascii characters within image property to unicode +* Migrate to pbr +* Migrate to flake8 +* Add test for glanceclient shells +* Improve unit tests for python-glanceclient.glanceclient.common.base +* Image Members for glance v2 api +* Fix inconsistent --debug messages on image-update +* Expand HACKING with commit message guidelines +* Prevent WantReadError when using https +* Improve Python 3.x compatibility +* Sync with oslo-incubator copy of setup.py and version.py +* bug 1166263 image-update handling for closed stdin +* Test that copy_from is used properly in old API +* Fix "glance add" parsing of "copy_from" option +* Fix problem running glance --version +* Improve unit tests for python-glanceclient.glanceclient.common.http + +0.9.0 +----- + +* Add docs for 0.9.0 +* Filter images list by public=True|False +* Trapping KeyboardInterrupt sooner +* Allow for prettytable 0.7.x as well +* Implements filters: visibility, owner, member_status. Includes tests +* Add missing spaces in help msg +* Control C does not cancel the CLI cleanly +* Replace SchemaNotFound with HTTPNotFound +* Use getattr properly in legacy shell + +0.8.0 +----- + +* Add docs for v0.8.0 +* Report name resolution errors properly +* Decode input and encode output +* Add library support for v2 image update +* Expect minumum warlock version of 0.7.0 +* Update to latest oslo-version +* Update .coveragerc +* Make effective ssl callback behaviour more obvious +* Quote image ids before passing them to glance +* Fix typo in image-update help page +* Adds image-delete functionality +* Change https port to be an optional parameter +* Migrate to testr +* Add image names to glance command arguments +* Use testtools instead of unittest +* Add details to stdout error message + +0.7.0 +----- + +* Document v0.7.0 release +* Support --os-cacert +* Update --location help to reference swift store +* Change default image sort to use name +* Add --sort-key and --sort-dir to image-list +* Pin pep8 to 1.3.3 +* Allow setting x-image-meta-store through shell on image creation +* Verify that host matches certificate + +0.6.0 +----- + +* Document bugs/features for v0.6.0 +* Hook up region_name argument +* Simplify human-readable size output +* Make image sizes more readable for humans +* Set useful boolean flag metavars +* Unpin keystoneclient dependency +* Fixes bug on Windows related to a wrong API url +* Enhance --checksum help with algorithm +* added --version as new parameter +* Fixes setup compatibility issue on Windows +* Allow deletion of multiple images through CLI +* Fixes shell command for member-delete +* Add OpenStack trove classifier for PyPI +* Implement blueprint ssl-connect-rework +* Handle create/update of images with unknown size +* Display acceptable disk/container formats in help text +* Simplify http(s) connection instantiation +* Add happy path tests for ResponseBodyIterator +* Use full URI path from Glance endpoint in HTTP requests +* Typo in image-create help page +* Fixes glance add / update / image-create / image-update on Windows +* Fix weird "None" displayed on some errors +* Make ConnectionRefused error more informative + +0.5.1 +----- + +* Document remaining bug for v0.5.1 +* Update docs for v0.5.1 release +* Corrects URI to display hostname, port properly +* Catches HTTP 300 while printing responses +* get_connection should raise httplib.InvalidURL +* Fix PEP8 issues +* Specified Content-Length in update request header +* Sync importutils changes from openstack-common + +0.5.0 +----- + +* Update release notes for v0.5.0 +* Add nosehtmloutput as a test dependency +* Update command descriptions +* Update pip-requires with warlock<2 +* Enable client V1 to download images +* Simplify docs and provide 'News' on index.rst + +0.4.2 +----- + +* Ensure v1 'limit' query parameter works correctly + +0.4.1 +----- + +* Allow 'deleted' to be passed through image update +* Cast is_public, protected, deleted to bool +* Return known int values as int, not str +* Use system CA certificate file + +0.4.0 +----- + +* socket errors and timeouts should be CommunicationErrors +* Handle communication failures cleanly +* Enable client V2 to download images +* Refactor HTTP-related exceptions +* Simplify v2 schema lookup +* legacy_shell.py shouldn't be executable +* Client-side SSL Connection +* SSL Certificate Validation + +0.3.0 +----- + +* Add missing copyright headers +* Add legacy compat layer to v1 shell +* Allow CLI opts to override auth token and endpoint +* Update python-keystoneclient version dependency +* Stop looking for v2 image in container + +0.2.0 +----- + +* Add exceptions for 500 and 503 HTTP status codes +* Refactor http request/response logging +* Fix --debug CLI option +* Fix coverage reporting test +* Honor '--insecure' commandline flag also for keystone authentication +* Replace httplib2 with httplib as http driver +* Clarify usage of --insecure flag +* Add pagination to v1 image-list +* Update README usage examples +* Relax prettytable dependency to v0.6.X from v0.6 +* Add pagination to v2 image-list +* Prevent links from being printed in v2 CLI +* Align print_dict to the left +* Convert v2 images list method to generator +* Replace static v2 Image model with warlock model +* Add support for viewing a single image through v2 +* Rewrite link parsing for finding v2 schemas +* Establish the supported importable interface +* Add --is-public to image-create +* Wrap image data in iterator +* Translate is_protected to protected +* Change --protected to --is-protected in create +* Properly map boolean-like arguments to True/False +* Add ability to get version information in python +* Latest setup goodness +* Remove AuthorizationFailure exception +* Preserve image properties on update +* Add --file to image-update and correct bad name +* Allow image filtering by custom properties +* Expand v1 image-list filters +* Add --timeout option to cli +* Add size filtering to image-list action +* Allow image upload from local file to v1 API +* Use PyPI for keystoneclient +* Switch CLI to support underscores and dashes + +0.1.1 +----- + +* Split reading of versioninfo out into a method +* Add support for tag-based version numbers +* Support --os-endpoint-type in glanceclient +* Hook up GET /v1/images/ + +0.1.0 +----- + +* Add initial docs +* Edit build_sphinx options +* Minimize tox.ini +* Add 'explain' command to v2 that describes schemas +* Stick prettytable at v0.6 +* Add tests dir to pep8 command +* Set pep8 dependency at v1.2 +* Add minimal support for the v2 API +* Auto generate AUTHORS file for glanceclient component +* Include ChangeLog in tarball +* Properly install from zipball +* Adds support for --insecure +* Fix the zipball change +* Replace git url with github zipball +* Refactor HTTPClient to use two request methods +* Add missing files to MANIFEST.in +* Add importutils from openstack-common +* Adding service type as configurable shell option +* Remove printt +* Added condition requirement to simplejson +* Use tox for running tests locally +* Adds filter support to images.list() +* Add '.tox' to .gitignore +* Add fields to image-list +* Strip version from service catalog endpoint +* Fix image-create using pipelines +* Allow tenant name to be used in authentication +* Make tox cover output coverage.xml +* Add Sphinx to test-requires +* Updated depend processing to norms +* Fixing pep8 errors +* Add AUTHORS test case +* Added gitreview file +* Adding id for image members +* image membership management works +* Adding support for passing image data through cli +* Image update works +* More complete image creation +* Correct keystoneclient egg name in pip-requires +* Adding image-create action +* Adding shared-images support +* Image members bones +* Basic testing +* Update version to 2012.2 +* Further cleanup +* Basic get/list operations work +* All the latest OpenStack hotness +* Initial checkin for new CLI and client package diff --git a/code/daisyclient/HACKING.rst b/code/daisyclient/HACKING.rst new file mode 100755 index 00000000..fa8a64a6 --- /dev/null +++ b/code/daisyclient/HACKING.rst @@ -0,0 +1,12 @@ +Glance Style Commandments +========================= + +- Step 1: Read the OpenStack Style Commandments + http://docs.openstack.org/developer/hacking/ +- Step 2: Read on + + +Glance Specific Commandments +---------------------------- + +None so far diff --git a/code/daisyclient/LICENSE b/code/daisyclient/LICENSE new file mode 100755 index 00000000..67db8588 --- /dev/null +++ b/code/daisyclient/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/code/daisyclient/MANIFEST.in b/code/daisyclient/MANIFEST.in new file mode 100755 index 00000000..5be0f94c --- /dev/null +++ b/code/daisyclient/MANIFEST.in @@ -0,0 +1,4 @@ +include AUTHORS +include ChangeLog +exclude .gitignore +exclude .gitreview diff --git a/code/daisyclient/PKG-INFO b/code/daisyclient/PKG-INFO new file mode 100755 index 00000000..488b9148 --- /dev/null +++ b/code/daisyclient/PKG-INFO @@ -0,0 +1,32 @@ +Metadata-Version: 1.1 +Name: python-daisyclient +Version: 0.17.0 +Summary: OpenStack Image API Client Library +Home-page: http://www.openstack.org/ +Author: OpenStack +Author-email: openstack-dev@lists.openstack.org +License: Apache License, Version 2.0 +Description: Python bindings to the OpenStack Images API + ============================================= + + This is a client library for Glance built on the OpenStack Images API. It provides a Python API (the ``daisyclient`` module) and a command-line tool (``daisy``). This library fully supports the v1 Images API, while support for the v2 API is in progress. + + Development takes place via the usual OpenStack processes as outlined in the `developer guide `_. The master repository is in `Git `_. + + See release notes and more at ``_. + + +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Environment :: OpenStack +Classifier: Intended Audience :: Information Technology +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: POSIX :: Linux +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 diff --git a/code/daisyclient/README.rst b/code/daisyclient/README.rst new file mode 100755 index 00000000..09bbdbb8 --- /dev/null +++ b/code/daisyclient/README.rst @@ -0,0 +1,8 @@ +Python bindings to the OpenStack Images API +============================================= + +This is a client library for Glance built on the OpenStack Images API. It provides a Python API (the ``glanceclient`` module) and a command-line tool (``glance``). This library fully supports the v1 Images API, while support for the v2 API is in progress. + +Development takes place via the usual OpenStack processes as outlined in the `developer guide `_. The master repository is in `Git `_. + +See release notes and more at ``_. diff --git a/code/daisyclient/daisyclient/__init__.py b/code/daisyclient/daisyclient/__init__.py new file mode 100755 index 00000000..efe3ea6a --- /dev/null +++ b/code/daisyclient/daisyclient/__init__.py @@ -0,0 +1,31 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +#NOTE(bcwaldon): this try/except block is needed to run setup.py due to +# its need to import local code before installing required dependencies +try: + import daisyclient.client + Client = daisyclient.client.Client +except ImportError: + import warnings + warnings.warn("Could not import daisyclient.client", ImportWarning) + +import pbr.version + +version_info = pbr.version.VersionInfo('python-daisyclient') + +try: + __version__ = version_info.version_string() +except AttributeError: + __version__ = None diff --git a/code/daisyclient/daisyclient/_i18n.py b/code/daisyclient/daisyclient/_i18n.py new file mode 100755 index 00000000..6963c076 --- /dev/null +++ b/code/daisyclient/daisyclient/_i18n.py @@ -0,0 +1,34 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +try: + import oslo_i18n as i18n +except ImportError: + from oslo import i18n + + +_translators = i18n.TranslatorFactory(domain='glanceclient') + +# The primary translation function using the well-known name "_" +_ = _translators.primary + +# Translators for log levels. +# +# The abbreviated names are meant to reflect the usual use of a short +# name like '_'. The "L" is for "log" and the other letter comes from +# the level. +_LI = _translators.log_info +_LW = _translators.log_warning +_LE = _translators.log_error +_LC = _translators.log_critical diff --git a/code/daisyclient/daisyclient/client.py b/code/daisyclient/daisyclient/client.py new file mode 100755 index 00000000..19351f0c --- /dev/null +++ b/code/daisyclient/daisyclient/client.py @@ -0,0 +1,39 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import warnings + +from daisyclient.common import utils + + +def Client(version=None, endpoint=None, *args, **kwargs): + if version is not None: + warnings.warn(("`version` keyword is being deprecated. Please pass the" + " version as part of the URL. " + "http://$HOST:$PORT/v$VERSION_NUMBER"), + DeprecationWarning) + + endpoint, url_version = utils.strip_version(endpoint) + + if not url_version and not version: + msg = ("Please provide either the version or an url with the form " + "http://$HOST:$PORT/v$VERSION_NUMBER") + raise RuntimeError(msg) + + version = int(version or url_version) + + module = utils.import_versioned_module(version, 'client') + client_class = getattr(module, 'Client') + return client_class(endpoint, *args, **kwargs) diff --git a/code/daisyclient/daisyclient/common/__init__.py b/code/daisyclient/daisyclient/common/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisyclient/daisyclient/common/base.py b/code/daisyclient/daisyclient/common/base.py new file mode 100755 index 00000000..66171e7f --- /dev/null +++ b/code/daisyclient/daisyclient/common/base.py @@ -0,0 +1,35 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Base utilities to build API operation managers and objects on top of. + +DEPRECATED post v.0.12.0. Use 'daisyclient.openstack.common.apiclient.base' +instead of this module." +""" + +import warnings + +from daisyclient.openstack.common.apiclient import base + + +warnings.warn("The 'daisyclient.common.base' module is deprecated post " + "v.0.12.0. Use 'daisyclient.openstack.common.apiclient.base' " + "instead of this one.", DeprecationWarning) + + +getid = base.getid +Manager = base.ManagerWithFind +Resource = base.Resource diff --git a/code/daisyclient/daisyclient/common/exceptions.py b/code/daisyclient/daisyclient/common/exceptions.py new file mode 100755 index 00000000..b6f58935 --- /dev/null +++ b/code/daisyclient/daisyclient/common/exceptions.py @@ -0,0 +1,3 @@ +# This is here for compatibility purposes. Once all known OpenStack clients +# are updated to use daisyclient.exc, this file should be removed +from daisyclient.exc import * diff --git a/code/daisyclient/daisyclient/common/http.py b/code/daisyclient/daisyclient/common/http.py new file mode 100755 index 00000000..8a746a65 --- /dev/null +++ b/code/daisyclient/daisyclient/common/http.py @@ -0,0 +1,285 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import logging +import socket + +from oslo_utils import importutils +from oslo_utils import netutils +import requests +try: + from requests.packages.urllib3.exceptions import ProtocolError +except ImportError: + ProtocolError = requests.exceptions.ConnectionError +import six +from six.moves.urllib import parse + +try: + import json +except ImportError: + import simplejson as json + +# Python 2.5 compat fix +if not hasattr(parse, 'parse_qsl'): + import cgi + parse.parse_qsl = cgi.parse_qsl + +from oslo_utils import encodeutils + +from daisyclient.common import https +from daisyclient.common.utils import safe_header +from daisyclient import exc + +osprofiler_web = importutils.try_import("osprofiler.web") + +LOG = logging.getLogger(__name__) +USER_AGENT = 'python-daisyclient' +CHUNKSIZE = 1024 * 64 # 64kB + + +class HTTPClient(object): + + def __init__(self, endpoint, **kwargs): + self.endpoint = endpoint + self.identity_headers = kwargs.get('identity_headers') + self.auth_token = kwargs.get('token') + if self.identity_headers: + if self.identity_headers.get('X-Auth-Token'): + self.auth_token = self.identity_headers.get('X-Auth-Token') + del self.identity_headers['X-Auth-Token'] + + self.session = requests.Session() + self.session.headers["User-Agent"] = USER_AGENT + + if self.auth_token: + self.session.headers["X-Auth-Token"] = self.auth_token + + self.timeout = float(kwargs.get('timeout', 600)) + + if self.endpoint.startswith("https"): + compression = kwargs.get('ssl_compression', True) + + if not compression: + self.session.mount("daisy+https://", https.HTTPSAdapter()) + self.endpoint = 'daisy+' + self.endpoint + + self.session.verify = ( + kwargs.get('cacert', requests.certs.where()), + kwargs.get('insecure', False)) + + else: + if kwargs.get('insecure', False) is True: + self.session.verify = False + else: + if kwargs.get('cacert', None) is not '': + self.session.verify = kwargs.get('cacert', True) + + self.session.cert = (kwargs.get('cert_file'), + kwargs.get('key_file')) + + @staticmethod + def parse_endpoint(endpoint): + return netutils.urlsplit(endpoint) + + def log_curl_request(self, method, url, headers, data, kwargs): + curl = ['curl -g -i -X %s' % method] + + headers = copy.deepcopy(headers) + headers.update(self.session.headers) + + for (key, value) in six.iteritems(headers): + header = '-H \'%s: %s\'' % safe_header(key, value) + curl.append(header) + + if not self.session.verify: + curl.append('-k') + else: + if isinstance(self.session.verify, six.string_types): + curl.append(' --cacert %s' % self.session.verify) + + if self.session.cert: + curl.append(' --cert %s --key %s' % self.session.cert) + + if data and isinstance(data, six.string_types): + curl.append('-d \'%s\'' % data) + + curl.append(url) + + msg = ' '.join([encodeutils.safe_decode(item, errors='ignore') + for item in curl]) + LOG.debug(msg) + + @staticmethod + def log_http_response(resp, body=None): + status = (resp.raw.version / 10.0, resp.status_code, resp.reason) + dump = ['\nHTTP/%.1f %s %s' % status] + headers = resp.headers.items() + dump.extend(['%s: %s' % safe_header(k, v) for k, v in headers]) + dump.append('') + if body: + body = encodeutils.safe_decode(body) + dump.extend([body, '']) + LOG.debug('\n'.join([encodeutils.safe_decode(x, errors='ignore') + for x in dump])) + + @staticmethod + def encode_headers(headers): + """Encodes headers. + + Note: This should be used right before + sending anything out. + + :param headers: Headers to encode + :returns: Dictionary with encoded headers' + names and values + """ + return dict((encodeutils.safe_encode(h), encodeutils.safe_encode(v)) + for h, v in six.iteritems(headers) if v is not None) + + def _request(self, method, url, **kwargs): + """Send an http request with the specified characteristics. + Wrapper around httplib.HTTP(S)Connection.request to handle tasks such + as setting headers and error handling. + """ + # Copy the kwargs so we can reuse the original in case of redirects + headers = kwargs.pop("headers", {}) + headers = headers and copy.deepcopy(headers) or {} + + if self.identity_headers: + for k, v in six.iteritems(self.identity_headers): + headers.setdefault(k, v) + + # Default Content-Type is octet-stream + content_type = headers.get('Content-Type', 'application/octet-stream') + + def chunk_body(body): + chunk = body + while chunk: + chunk = body.read(CHUNKSIZE) + if chunk == '': + break + yield chunk + + data = kwargs.pop("data", None) + if data is not None and not isinstance(data, six.string_types): + try: + data = json.dumps(data) + content_type = 'application/json' + except TypeError: + # Here we assume it's + # a file-like object + # and we'll chunk it + data = chunk_body(data) + + headers['Content-Type'] = content_type + stream = True if content_type == 'application/octet-stream' else False + + if osprofiler_web: + headers.update(osprofiler_web.get_trace_id_headers()) + + # Note(flaper87): Before letting headers / url fly, + # they should be encoded otherwise httplib will + # complain. + headers = self.encode_headers(headers) + + try: + if self.endpoint.endswith("/") or url.startswith("/"): + conn_url = "%s%s" % (self.endpoint, url) + else: + conn_url = "%s/%s" % (self.endpoint, url) + self.log_curl_request(method, conn_url, headers, data, kwargs) + resp = self.session.request(method, + conn_url, + data=data, + stream=stream, + headers=headers, + **kwargs) + except requests.exceptions.Timeout as e: + message = ("Error communicating with %(endpoint)s %(e)s" % + dict(url=conn_url, e=e)) + raise exc.InvalidEndpoint(message=message) + except (requests.exceptions.ConnectionError, ProtocolError) as e: + message = ("Error finding address for %(url)s: %(e)s" % + dict(url=conn_url, e=e)) + raise exc.CommunicationError(message=message) + except socket.gaierror as e: + message = "Error finding address for %s: %s" % ( + self.endpoint_hostname, e) + raise exc.InvalidEndpoint(message=message) + except (socket.error, socket.timeout) as e: + endpoint = self.endpoint + message = ("Error communicating with %(endpoint)s %(e)s" % + {'endpoint': endpoint, 'e': e}) + raise exc.CommunicationError(message=message) + + if not resp.ok: + LOG.debug("Request returned failure status %s." % resp.status_code) + raise exc.from_response(resp, resp.text) + elif resp.status_code == requests.codes.MULTIPLE_CHOICES: + raise exc.from_response(resp) + + content_type = resp.headers.get('Content-Type') + + # Read body into string if it isn't obviously image data + if content_type == 'application/octet-stream': + # Do not read all response in memory when + # downloading an image. + body_iter = _close_after_stream(resp, CHUNKSIZE) + self.log_http_response(resp) + else: + content = resp.text + self.log_http_response(resp, content) + if content_type and content_type.startswith('application/json'): + # Let's use requests json method, + # it should take care of response + # encoding + body_iter = resp.json() + else: + body_iter = six.StringIO(content) + try: + body_iter = json.loads(''.join([c for c in body_iter])) + except ValueError: + body_iter = None + return resp, body_iter + + def head(self, url, **kwargs): + return self._request('HEAD', url, **kwargs) + + def get(self, url, **kwargs): + return self._request('GET', url, **kwargs) + + def post(self, url, **kwargs): + return self._request('POST', url, **kwargs) + + def put(self, url, **kwargs): + return self._request('PUT', url, **kwargs) + + def patch(self, url, **kwargs): + return self._request('PATCH', url, **kwargs) + + def delete(self, url, **kwargs): + return self._request('DELETE', url, **kwargs) + + +def _close_after_stream(response, chunk_size): + """Iterate over the content and ensure the response is closed after.""" + # Yield each chunk in the response body + for chunk in response.iter_content(chunk_size=chunk_size): + yield chunk + # Once we're done streaming the body, ensure everything is closed. + # This will return the connection to the HTTPConnectionPool in urllib3 + # and ideally reduce the number of HTTPConnectionPool full warnings. + response.close() diff --git a/code/daisyclient/daisyclient/common/https.py b/code/daisyclient/daisyclient/common/https.py new file mode 100755 index 00000000..d42566f2 --- /dev/null +++ b/code/daisyclient/daisyclient/common/https.py @@ -0,0 +1,348 @@ +# Copyright 2014 Red Hat, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import socket +import ssl +import struct + +import OpenSSL +from requests import adapters +from requests import compat +try: + from requests.packages.urllib3 import connectionpool +except ImportError: + from urllib3 import connectionpool + +from oslo_utils import encodeutils +import six +# NOTE(jokke): simplified transition to py3, behaves like py2 xrange +from six.moves import range + +from daisyclient.common import utils + +try: + from eventlet import patcher + # Handle case where we are running in a monkey patched environment + if patcher.is_monkey_patched('socket'): + from eventlet.green.httplib import HTTPSConnection + from eventlet.green.OpenSSL.SSL import GreenConnection as Connection + from eventlet.greenio import GreenSocket + # TODO(mclaren): A getsockopt workaround: see 'getsockopt' doc string + GreenSocket.getsockopt = utils.getsockopt + else: + raise ImportError +except ImportError: + try: + from httplib import HTTPSConnection + except ImportError: + from http.client import HTTPSConnection + from OpenSSL.SSL import Connection as Connection + + +from daisyclient import exc + + +def verify_callback(host=None): + """ + We use a partial around the 'real' verify_callback function + so that we can stash the host value without holding a + reference on the VerifiedHTTPSConnection. + """ + def wrapper(connection, x509, errnum, + depth, preverify_ok, host=host): + return do_verify_callback(connection, x509, errnum, + depth, preverify_ok, host=host) + return wrapper + + +def do_verify_callback(connection, x509, errnum, + depth, preverify_ok, host=None): + """ + Verify the server's SSL certificate. + + This is a standalone function rather than a method to avoid + issues around closing sockets if a reference is held on + a VerifiedHTTPSConnection by the callback function. + """ + if x509.has_expired(): + msg = "SSL Certificate expired on '%s'" % x509.get_notAfter() + raise exc.SSLCertificateError(msg) + + if depth == 0 and preverify_ok: + # We verify that the host matches against the last + # certificate in the chain + return host_matches_cert(host, x509) + else: + # Pass through OpenSSL's default result + return preverify_ok + + +def host_matches_cert(host, x509): + """ + Verify that the x509 certificate we have received + from 'host' correctly identifies the server we are + connecting to, ie that the certificate's Common Name + or a Subject Alternative Name matches 'host'. + """ + def check_match(name): + # Directly match the name + if name == host: + return True + + # Support single wildcard matching + if name.startswith('*.') and host.find('.') > 0: + if name[2:] == host.split('.', 1)[1]: + return True + + common_name = x509.get_subject().commonName + + # First see if we can match the CN + if check_match(common_name): + return True + # Also try Subject Alternative Names for a match + san_list = None + for i in range(x509.get_extension_count()): + ext = x509.get_extension(i) + if ext.get_short_name() == b'subjectAltName': + san_list = str(ext) + for san in ''.join(san_list.split()).split(','): + if san.startswith('DNS:'): + if check_match(san.split(':', 1)[1]): + return True + + # Server certificate does not match host + msg = ('Host "%s" does not match x509 certificate contents: ' + 'CommonName "%s"' % (host, common_name)) + if san_list is not None: + msg = msg + ', subjectAltName "%s"' % san_list + raise exc.SSLCertificateError(msg) + + +def to_bytes(s): + if isinstance(s, six.string_types): + return six.b(s) + else: + return s + + +class HTTPSAdapter(adapters.HTTPAdapter): + """ + This adapter will be used just when + ssl compression should be disabled. + + The init method overwrites the default + https pool by setting daisyclient's + one. + """ + + def request_url(self, request, proxies): + # NOTE(flaper87): Make sure the url is encoded, otherwise + # python's standard httplib will fail with a TypeError. + url = super(HTTPSAdapter, self).request_url(request, proxies) + return encodeutils.safe_encode(url) + + def _create_daisy_httpsconnectionpool(self, url): + kw = self.poolmanager.connection_kw + # Parse the url to get the scheme, host, and port + parsed = compat.urlparse(url) + # If there is no port specified, we should use the standard HTTPS port + port = parsed.port or 443 + pool = HTTPSConnectionPool(parsed.host, port, **kw) + + with self.poolmanager.pools.lock: + self.poolmanager.pools[(parsed.scheme, parsed.host, port)] = pool + + return pool + + def get_connection(self, url, proxies=None): + try: + return super(HTTPSAdapter, self).get_connection(url, proxies) + except KeyError: + # NOTE(sigamvirus24): This works around modifying a module global + # which fixes bug #1396550 + # The scheme is most likely daisy+https but check anyway + if not url.startswith('daisy+https://'): + raise + + return self._create_daisy_httpsconnectionpool(url) + + def cert_verify(self, conn, url, verify, cert): + super(HTTPSAdapter, self).cert_verify(conn, url, verify, cert) + conn.ca_certs = verify[0] + conn.insecure = verify[1] + + +class HTTPSConnectionPool(connectionpool.HTTPSConnectionPool): + """ + HTTPSConnectionPool will be instantiated when a new + connection is requested to the HTTPSAdapter.This + implementation overwrites the _new_conn method and + returns an instances of daisyclient's VerifiedHTTPSConnection + which handles no compression. + + ssl_compression is hard-coded to False because this will + be used just when the user sets --no-ssl-compression. + """ + + scheme = 'daisy+https' + + def _new_conn(self): + self.num_connections += 1 + return VerifiedHTTPSConnection(host=self.host, + port=self.port, + key_file=self.key_file, + cert_file=self.cert_file, + cacert=self.ca_certs, + insecure=self.insecure, + ssl_compression=False) + + +class OpenSSLConnectionDelegator(object): + """ + An OpenSSL.SSL.Connection delegator. + + Supplies an additional 'makefile' method which httplib requires + and is not present in OpenSSL.SSL.Connection. + + Note: Since it is not possible to inherit from OpenSSL.SSL.Connection + a delegator must be used. + """ + def __init__(self, *args, **kwargs): + self.connection = Connection(*args, **kwargs) + + def __getattr__(self, name): + return getattr(self.connection, name) + + def makefile(self, *args, **kwargs): + return socket._fileobject(self.connection, *args, **kwargs) + + +class VerifiedHTTPSConnection(HTTPSConnection): + """ + Extended HTTPSConnection which uses the OpenSSL library + for enhanced SSL support. + Note: Much of this functionality can eventually be replaced + with native Python 3.3 code. + """ + # Restrict the set of client supported cipher suites + CIPHERS = 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:'\ + 'eCDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:'\ + 'RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS' + + def __init__(self, host, port=None, key_file=None, cert_file=None, + cacert=None, timeout=None, insecure=False, + ssl_compression=True): + # List of exceptions reported by Python3 instead of + # SSLConfigurationError + if six.PY3: + excp_lst = (TypeError, FileNotFoundError, ssl.SSLError) + else: + # NOTE(jamespage) + # Accomodate changes in behaviour for pep-0467, introduced + # in python 2.7.9. + # https://github.com/python/peps/blob/master/pep-0476.txt + excp_lst = (TypeError, IOError, ssl.SSLError) + try: + HTTPSConnection.__init__(self, host, port, + key_file=key_file, + cert_file=cert_file) + self.key_file = key_file + self.cert_file = cert_file + self.timeout = timeout + self.insecure = insecure + # NOTE(flaper87): `is_verified` is needed for + # requests' urllib3. If insecure is True then + # the request is not `verified`, hence `not insecure` + self.is_verified = not insecure + self.ssl_compression = ssl_compression + self.cacert = None if cacert is None else str(cacert) + self.set_context() + # ssl exceptions are reported in various form in Python 3 + # so to be compatible, we report the same kind as under + # Python2 + except excp_lst as e: + raise exc.SSLConfigurationError(str(e)) + + def set_context(self): + """ + Set up the OpenSSL context. + """ + self.context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD) + self.context.set_cipher_list(self.CIPHERS) + + if self.ssl_compression is False: + self.context.set_options(0x20000) # SSL_OP_NO_COMPRESSION + + if self.insecure is not True: + self.context.set_verify(OpenSSL.SSL.VERIFY_PEER, + verify_callback(host=self.host)) + else: + self.context.set_verify(OpenSSL.SSL.VERIFY_NONE, + lambda *args: True) + + if self.cert_file: + try: + self.context.use_certificate_file(self.cert_file) + except Exception as e: + msg = 'Unable to load cert from "%s" %s' % (self.cert_file, e) + raise exc.SSLConfigurationError(msg) + if self.key_file is None: + # We support having key and cert in same file + try: + self.context.use_privatekey_file(self.cert_file) + except Exception as e: + msg = ('No key file specified and unable to load key ' + 'from "%s" %s' % (self.cert_file, e)) + raise exc.SSLConfigurationError(msg) + + if self.key_file: + try: + self.context.use_privatekey_file(self.key_file) + except Exception as e: + msg = 'Unable to load key from "%s" %s' % (self.key_file, e) + raise exc.SSLConfigurationError(msg) + + if self.cacert: + try: + self.context.load_verify_locations(to_bytes(self.cacert)) + except Exception as e: + msg = 'Unable to load CA from "%s" %s' % (self.cacert, e) + raise exc.SSLConfigurationError(msg) + else: + self.context.set_default_verify_paths() + + def connect(self): + """ + Connect to an SSL port using the OpenSSL library and apply + per-connection parameters. + """ + result = socket.getaddrinfo(self.host, self.port, 0, + socket.SOCK_STREAM) + if result: + socket_family = result[0][0] + if socket_family == socket.AF_INET6: + sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) + else: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + else: + # If due to some reason the address lookup fails - we still connect + # to IPv4 socket. This retains the older behavior. + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if self.timeout is not None: + # '0' microseconds + sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, + struct.pack('LL', self.timeout, 0)) + self.sock = OpenSSLConnectionDelegator(self.context, sock) + self.sock.connect((self.host, self.port)) diff --git a/code/daisyclient/daisyclient/common/progressbar.py b/code/daisyclient/daisyclient/common/progressbar.py new file mode 100755 index 00000000..cd4ffe50 --- /dev/null +++ b/code/daisyclient/daisyclient/common/progressbar.py @@ -0,0 +1,96 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +import six + + +class _ProgressBarBase(object): + """ + Base abstract class used by specific class wrapper to show a progress bar + when the wrapped object are consumed. + + :param wrapped: Object to wrap that hold data to be consumed. + :param totalsize: The total size of the data in the wrapped object. + + :note: The progress will be displayed only if sys.stdout is a tty. + """ + + def __init__(self, wrapped, totalsize): + self._wrapped = wrapped + self._totalsize = float(totalsize) + self._show_progress = sys.stdout.isatty() and self._totalsize != 0 + self._percent = 0 + + def _display_progress_bar(self, size_read): + if self._show_progress: + self._percent += size_read / self._totalsize + # Output something like this: [==========> ] 49% + sys.stdout.write('\r[{0:<30}] {1:.0%}'.format( + '=' * int(round(self._percent * 29)) + '>', self._percent + )) + sys.stdout.flush() + + def __getattr__(self, attr): + # Forward other attribute access to the wrapped object. + return getattr(self._wrapped, attr) + + +class VerboseFileWrapper(_ProgressBarBase): + """ + A file wrapper that show and advance a progress bar whenever file's read + method is called. + """ + + def read(self, *args, **kwargs): + data = self._wrapped.read(*args, **kwargs) + if data: + self._display_progress_bar(len(data)) + else: + if self._show_progress: + # Break to a new line from the progress bar for incoming + # output. + sys.stdout.write('\n') + return data + + +class VerboseIteratorWrapper(_ProgressBarBase): + """ + An iterator wrapper that show and advance a progress bar whenever + data is consumed from the iterator. + + :note: Use only with iterator that yield strings. + """ + + def __iter__(self): + return self + + def next(self): + try: + data = six.next(self._wrapped) + # NOTE(mouad): Assuming that data is a string b/c otherwise calling + # len function will not make any sense. + self._display_progress_bar(len(data)) + return data + except StopIteration: + if self._show_progress: + # Break to a new line from the progress bar for incoming + # output. + sys.stdout.write('\n') + raise + + # In Python 3, __next__() has replaced next(). + __next__ = next diff --git a/code/daisyclient/daisyclient/common/utils.py b/code/daisyclient/daisyclient/common/utils.py new file mode 100755 index 00000000..c2a41070 --- /dev/null +++ b/code/daisyclient/daisyclient/common/utils.py @@ -0,0 +1,451 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +import errno +import hashlib +import json +import os +import re +import sys +import threading +import uuid + +from oslo_utils import importutils +import six + +if os.name == 'nt': + import msvcrt +else: + msvcrt = None + +from oslo_utils import encodeutils +from oslo_utils import strutils +import prettytable +import six + +from daisyclient import exc + +_memoized_property_lock = threading.Lock() + +SENSITIVE_HEADERS = ('X-Auth-Token', ) + + +# Decorator for cli-args +def arg(*args, **kwargs): + def _decorator(func): + # Because of the sematics of decorator composition if we just append + # to the options list positional options will appear to be backwards. + func.__dict__.setdefault('arguments', []).insert(0, (args, kwargs)) + return func + return _decorator + + +def schema_args(schema_getter, omit=None): + omit = omit or [] + typemap = { + 'string': str, + 'integer': int, + 'boolean': strutils.bool_from_string, + 'array': list + } + + def _decorator(func): + schema = schema_getter() + if schema is None: + param = '' + kwargs = { + 'help': ("Please run with connection parameters set to " + "retrieve the schema for generating help for this " + "command") + } + func.__dict__.setdefault('arguments', []).insert(0, ((param, ), + kwargs)) + else: + properties = schema.get('properties', {}) + for name, property in six.iteritems(properties): + if name in omit: + continue + param = '--' + name.replace('_', '-') + kwargs = {} + + type_str = property.get('type', 'string') + + if isinstance(type_str, list): + # NOTE(flaper87): This means the server has + # returned something like `['null', 'string']`, + # therfore we use the first non-`null` type as + # the valid type. + for t in type_str: + if t != 'null': + type_str = t + break + + if type_str == 'array': + items = property.get('items') + kwargs['type'] = typemap.get(items.get('type')) + kwargs['nargs'] = '+' + else: + kwargs['type'] = typemap.get(type_str) + + if type_str == 'boolean': + kwargs['metavar'] = '[True|False]' + else: + kwargs['metavar'] = '<%s>' % name.upper() + + description = property.get('description', "") + if 'enum' in property: + if len(description): + description += " " + + # NOTE(flaper87): Make sure all values are `str/unicode` + # for the `join` to succeed. Enum types can also be `None` + # therfore, join's call would fail without the following + # list comprehension + vals = [six.text_type(val) for val in property.get('enum')] + description += ('Valid values: ' + ', '.join(vals)) + kwargs['help'] = description + + func.__dict__.setdefault('arguments', + []).insert(0, ((param, ), kwargs)) + return func + + return _decorator + + +def pretty_choice_list(l): + return ', '.join("'%s'" % i for i in l) + + +def print_list(objs, fields, formatters=None, field_settings=None): + formatters = formatters or {} + field_settings = field_settings or {} + pt = prettytable.PrettyTable([f for f in fields], caching=False) + pt.align = 'l' + + for o in objs: + row = [] + for field in fields: + if field in field_settings: + for setting, value in six.iteritems(field_settings[field]): + setting_dict = getattr(pt, setting) + setting_dict[field] = value + + if field in formatters: + row.append(formatters[field](o)) + else: + field_name = field.lower().replace(' ', '_') + data = getattr(o, field_name, None) + row.append(data) + pt.add_row(row) + + print(encodeutils.safe_decode(pt.get_string())) + + +def print_dict(d, max_column_width=80): + pt = prettytable.PrettyTable(['Property', 'Value'], caching=False) + pt.align = 'l' + pt.max_width = max_column_width + for k, v in six.iteritems(d): + if isinstance(v, (dict, list)): + v = json.dumps(v) + pt.add_row([k, v ]) + print(encodeutils.safe_decode(pt.get_string(sortby='Property'))) + + +def find_resource(manager, id): + """Helper for the _find_* methods.""" + # first try to get entity as integer id + try: + if isinstance(id, int) or id.isdigit(): + return manager.get(int(id)) + except exc.NotFound: + pass + + # now try to get entity as uuid + try: + # This must be unicode for Python 3 compatibility. + # If you pass a bytestring to uuid.UUID, you will get a TypeError + uuid.UUID(encodeutils.safe_decode(id)) + return manager.get(id) + except (ValueError, exc.NotFound): + msg = ("id %s is error " % id) + raise exc.CommandError(msg) + + # finally try to find entity by name + matches = list(manager.list(filters={'name': id})) + num_matches = len(matches) + if num_matches == 0: + msg = "No %s with a name or ID of '%s' exists." % \ + (manager.resource_class.__name__.lower(), id) + raise exc.CommandError(msg) + elif num_matches > 1: + msg = ("Multiple %s matches found for '%s', use an ID to be more" + " specific." % (manager.resource_class.__name__.lower(), + id)) + raise exc.CommandError(msg) + else: + return matches[0] + + +def skip_authentication(f): + """Function decorator used to indicate a caller may be unauthenticated.""" + f.require_authentication = False + return f + + +def is_authentication_required(f): + """Checks to see if the function requires authentication. + + Use the skip_authentication decorator to indicate a caller may + skip the authentication step. + """ + return getattr(f, 'require_authentication', True) + + +def env(*vars, **kwargs): + """Search for the first defined of possibly many env vars + + Returns the first environment variable defined in vars, or + returns the default defined in kwargs. + """ + for v in vars: + value = os.environ.get(v, None) + if value: + return value + return kwargs.get('default', '') + + +def import_versioned_module(version, submodule=None): + module = 'daisyclient.v%s' % version + if submodule: + module = '.'.join((module, submodule)) + return importutils.import_module(module) + + +def exit(msg='', exit_code=1): + if msg: + print(encodeutils.safe_decode(msg), file=sys.stderr) + sys.exit(exit_code) + + +def save_image(data, path): + """ + Save an image to the specified path. + + :param data: binary data of the image + :param path: path to save the image to + """ + if path is None: + image = sys.stdout + else: + image = open(path, 'wb') + try: + for chunk in data: + image.write(chunk) + finally: + if path is not None: + image.close() + + +def make_size_human_readable(size): + suffix = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB'] + base = 1024.0 + + index = 0 + while size >= base: + index = index + 1 + size = size / base + + padded = '%.1f' % size + stripped = padded.rstrip('0').rstrip('.') + + return '%s%s' % (stripped, suffix[index]) + + +def getsockopt(self, *args, **kwargs): + """ + A function which allows us to monkey patch eventlet's + GreenSocket, adding a required 'getsockopt' method. + TODO: (mclaren) we can remove this once the eventlet fix + (https://bitbucket.org/eventlet/eventlet/commits/609f230) + lands in mainstream packages. + """ + return self.fd.getsockopt(*args, **kwargs) + + +def exception_to_str(exc): + try: + error = six.text_type(exc) + except UnicodeError: + try: + error = str(exc) + except UnicodeError: + error = ("Caught '%(exception)s' exception." % + {"exception": exc.__class__.__name__}) + return encodeutils.safe_decode(error, errors='ignore') + + +def get_file_size(file_obj): + """ + Analyze file-like object and attempt to determine its size. + + :param file_obj: file-like object. + :retval The file's size or None if it cannot be determined. + """ + if (hasattr(file_obj, 'seek') and hasattr(file_obj, 'tell') and + (six.PY2 or six.PY3 and file_obj.seekable())): + try: + curr = file_obj.tell() + file_obj.seek(0, os.SEEK_END) + size = file_obj.tell() + file_obj.seek(curr) + return size + except IOError as e: + if e.errno == errno.ESPIPE: + # Illegal seek. This means the file object + # is a pipe (e.g. the user is trying + # to pipe image data to the client, + # echo testdata | bin/glance add blah...), or + # that file object is empty, or that a file-like + # object which doesn't support 'seek/tell' has + # been supplied. + return + else: + raise + + +def get_data_file(args): + if args.file: + return open(args.file, 'rb') + else: + # distinguish cases where: + # (1) stdin is not valid (as in cron jobs): + # glance ... <&- + # (2) image data is provided through standard input: + # glance ... < /tmp/file or cat /tmp/file | glance ... + # (3) no image data provided: + # glance ... + try: + os.fstat(0) + except OSError: + # (1) stdin is not valid (closed...) + return None + if not sys.stdin.isatty(): + # (2) image data is provided through standard input + if msvcrt: + msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY) + return sys.stdin + else: + # (3) no image data provided + return None + + +def strip_version(endpoint): + """Strip version from the last component of endpoint if present.""" + # NOTE(flaper87): This shouldn't be necessary if + # we make endpoint the first argument. However, we + # can't do that just yet because we need to keep + # backwards compatibility. + if not isinstance(endpoint, six.string_types): + raise ValueError("Expected endpoint") + + version = None + # Get rid of trailing '/' if present + endpoint = endpoint.rstrip('/') + url_bits = endpoint.split('/') + # regex to match 'v1' or 'v2.0' etc + if re.match('v\d+\.?\d*', url_bits[-1]): + version = float(url_bits[-1].lstrip('v')) + endpoint = '/'.join(url_bits[:-1]) + return endpoint, version + + +def print_image(image_obj, max_col_width=None): + ignore = ['self', 'access', 'file', 'schema'] + image = dict([item for item in six.iteritems(image_obj) + if item[0] not in ignore]) + if str(max_col_width).isdigit(): + print_dict(image, max_column_width=max_col_width) + else: + print_dict(image) + + +def integrity_iter(iter, checksum): + """ + Check image data integrity. + + :raises: IOError + """ + md5sum = hashlib.md5() + for chunk in iter: + yield chunk + if isinstance(chunk, six.string_types): + chunk = six.b(chunk) + md5sum.update(chunk) + md5sum = md5sum.hexdigest() + if md5sum != checksum: + raise IOError(errno.EPIPE, + 'Corrupt image download. Checksum was %s expected %s' % + (md5sum, checksum)) + + +def memoized_property(fn): + attr_name = '_lazy_once_' + fn.__name__ + + @property + def _memoized_property(self): + if hasattr(self, attr_name): + return getattr(self, attr_name) + else: + with _memoized_property_lock: + if not hasattr(self, attr_name): + setattr(self, attr_name, fn(self)) + return getattr(self, attr_name) + return _memoized_property + + +def safe_header(name, value): + if name in SENSITIVE_HEADERS: + v = value.encode('utf-8') + h = hashlib.sha1(v) + d = h.hexdigest() + return name, "{SHA1}%s" % d + else: + return name, value + + +def to_str(value): + if value is None: + return value + if not isinstance(value, six.string_types): + return str(value) + return value + + +class IterableWithLength(object): + def __init__(self, iterable, length): + self.iterable = iterable + self.length = length + + def __iter__(self): + return self.iterable + + def next(self): + return next(self.iterable) + + def __len__(self): + return self.length diff --git a/code/daisyclient/daisyclient/exc.py b/code/daisyclient/daisyclient/exc.py new file mode 100755 index 00000000..06a91262 --- /dev/null +++ b/code/daisyclient/daisyclient/exc.py @@ -0,0 +1,201 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re +import sys + + +class BaseException(Exception): + """An error occurred.""" + def __init__(self, message=None): + self.message = message + + def __str__(self): + return self.message or self.__class__.__doc__ + + +class CommandError(BaseException): + """Invalid usage of CLI.""" + + +class InvalidEndpoint(BaseException): + """The provided endpoint is invalid.""" + + +class CommunicationError(BaseException): + """Unable to communicate with server.""" + + +class ClientException(Exception): + """DEPRECATED!""" + + +class HTTPException(ClientException): + """Base exception for all HTTP-derived exceptions.""" + code = 'N/A' + + def __init__(self, details=None): + self.details = details or self.__class__.__name__ + + def __str__(self): + return "%s (HTTP %s)" % (self.details, self.code) + + +class HTTPMultipleChoices(HTTPException): + code = 300 + + def __str__(self): + self.details = ("Requested version of OpenStack Images API is not " + "available.") + return "%s (HTTP %s) %s" % (self.__class__.__name__, self.code, + self.details) + + +class BadRequest(HTTPException): + """DEPRECATED!""" + code = 400 + + +class HTTPBadRequest(BadRequest): + pass + + +class Unauthorized(HTTPException): + """DEPRECATED!""" + code = 401 + + +class HTTPUnauthorized(Unauthorized): + pass + + +class Forbidden(HTTPException): + """DEPRECATED!""" + code = 403 + + +class HTTPForbidden(Forbidden): + pass + + +class NotFound(HTTPException): + """DEPRECATED!""" + code = 404 + + +class HTTPNotFound(NotFound): + pass + + +class HTTPMethodNotAllowed(HTTPException): + code = 405 + + +class Conflict(HTTPException): + """DEPRECATED!""" + code = 409 + + +class HTTPConflict(Conflict): + pass + + +class OverLimit(HTTPException): + """DEPRECATED!""" + code = 413 + + +class HTTPOverLimit(OverLimit): + pass + + +class HTTPInternalServerError(HTTPException): + code = 500 + + +class HTTPNotImplemented(HTTPException): + code = 501 + + +class HTTPBadGateway(HTTPException): + code = 502 + + +class ServiceUnavailable(HTTPException): + """DEPRECATED!""" + code = 503 + + +class HTTPServiceUnavailable(ServiceUnavailable): + pass + + +# NOTE(bcwaldon): Build a mapping of HTTP codes to corresponding exception +# classes +_code_map = {} +for obj_name in dir(sys.modules[__name__]): + if obj_name.startswith('HTTP'): + obj = getattr(sys.modules[__name__], obj_name) + _code_map[obj.code] = obj + + +def from_response(response, body=None): + """Return an instance of an HTTPException based on httplib response.""" + cls = _code_map.get(response.status_code, HTTPException) + if body and 'json' in response.headers['content-type']: + # Iterate over the nested objects and retreive the "message" attribute. + messages = [obj.get('message') for obj in response.json().values()] + # Join all of the messages together nicely and filter out any objects + # that don't have a "message" attr. + details = '\n'.join(i for i in messages if i is not None) + return cls(details=details) + elif body and 'html' in response.headers['content-type']: + # Split the lines, strip whitespace and inline HTML from the response. + details = [re.sub(r'<.+?>', '', i.strip()) + for i in response.text.splitlines()] + details = [i for i in details if i] + # Remove duplicates from the list. + details_seen = set() + details_temp = [] + for i in details: + if i not in details_seen: + details_temp.append(i) + details_seen.add(i) + # Return joined string separated by colons. + details = ': '.join(details_temp) + return cls(details=details) + elif body: + details = body.replace('\n\n', '\n') + return cls(details=details) + + return cls() + + +class NoTokenLookupException(Exception): + """DEPRECATED!""" + pass + + +class EndpointNotFound(Exception): + """DEPRECATED!""" + pass + + +class SSLConfigurationError(BaseException): + pass + + +class SSLCertificateError(BaseException): + pass diff --git a/code/daisyclient/daisyclient/openstack/__init__.py b/code/daisyclient/daisyclient/openstack/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisyclient/daisyclient/openstack/common/__init__.py b/code/daisyclient/daisyclient/openstack/common/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisyclient/daisyclient/openstack/common/_i18n.py b/code/daisyclient/daisyclient/openstack/common/_i18n.py new file mode 100755 index 00000000..cee8f013 --- /dev/null +++ b/code/daisyclient/daisyclient/openstack/common/_i18n.py @@ -0,0 +1,45 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""oslo.i18n integration module. + +See http://docs.openstack.org/developer/oslo.i18n/usage.html + +""" + +try: + import oslo.i18n + + # NOTE(dhellmann): This reference to o-s-l-o will be replaced by the + # application name when this module is synced into the separate + # repository. It is OK to have more than one translation function + # using the same domain, since there will still only be one message + # catalog. + _translators = oslo.i18n.TranslatorFactory(domain='glanceclient') + + # The primary translation function using the well-known name "_" + _ = _translators.primary + + # Translators for log levels. + # + # The abbreviated names are meant to reflect the usual use of a short + # name like '_'. The "L" is for "log" and the other letter comes from + # the level. + _LI = _translators.log_info + _LW = _translators.log_warning + _LE = _translators.log_error + _LC = _translators.log_critical +except ImportError: + # NOTE(dims): Support for cases where a project wants to use + # code from oslo-incubator, but is not ready to be internationalized + # (like tempest) + _ = _LI = _LW = _LE = _LC = lambda x: x diff --git a/code/daisyclient/daisyclient/openstack/common/apiclient/__init__.py b/code/daisyclient/daisyclient/openstack/common/apiclient/__init__.py new file mode 100755 index 00000000..e69de29b diff --git a/code/daisyclient/daisyclient/openstack/common/apiclient/auth.py b/code/daisyclient/daisyclient/openstack/common/apiclient/auth.py new file mode 100755 index 00000000..03ad9e70 --- /dev/null +++ b/code/daisyclient/daisyclient/openstack/common/apiclient/auth.py @@ -0,0 +1,234 @@ +# Copyright 2013 OpenStack Foundation +# Copyright 2013 Spanish National Research Council. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# E0202: An attribute inherited from %s hide this method +# pylint: disable=E0202 + +######################################################################## +# +# THIS MODULE IS DEPRECATED +# +# Please refer to +# https://etherpad.openstack.org/p/kilo-glanceclient-library-proposals for +# the discussion leading to this deprecation. +# +# We recommend checking out the python-openstacksdk project +# (https://launchpad.net/python-openstacksdk) instead. +# +######################################################################## + +import abc +import argparse +import os + +import six +from stevedore import extension + +from daisyclient.openstack.common.apiclient import exceptions + + +_discovered_plugins = {} + + +def discover_auth_systems(): + """Discover the available auth-systems. + + This won't take into account the old style auth-systems. + """ + global _discovered_plugins + _discovered_plugins = {} + + def add_plugin(ext): + _discovered_plugins[ext.name] = ext.plugin + + ep_namespace = "daisyclient.openstack.common.apiclient.auth" + mgr = extension.ExtensionManager(ep_namespace) + mgr.map(add_plugin) + + +def load_auth_system_opts(parser): + """Load options needed by the available auth-systems into a parser. + + This function will try to populate the parser with options from the + available plugins. + """ + group = parser.add_argument_group("Common auth options") + BaseAuthPlugin.add_common_opts(group) + for name, auth_plugin in six.iteritems(_discovered_plugins): + group = parser.add_argument_group( + "Auth-system '%s' options" % name, + conflict_handler="resolve") + auth_plugin.add_opts(group) + + +def load_plugin(auth_system): + try: + plugin_class = _discovered_plugins[auth_system] + except KeyError: + raise exceptions.AuthSystemNotFound(auth_system) + return plugin_class(auth_system=auth_system) + + +def load_plugin_from_args(args): + """Load required plugin and populate it with options. + + Try to guess auth system if it is not specified. Systems are tried in + alphabetical order. + + :type args: argparse.Namespace + :raises: AuthPluginOptionsMissing + """ + auth_system = args.os_auth_system + if auth_system: + plugin = load_plugin(auth_system) + plugin.parse_opts(args) + plugin.sufficient_options() + return plugin + + for plugin_auth_system in sorted(six.iterkeys(_discovered_plugins)): + plugin_class = _discovered_plugins[plugin_auth_system] + plugin = plugin_class() + plugin.parse_opts(args) + try: + plugin.sufficient_options() + except exceptions.AuthPluginOptionsMissing: + continue + return plugin + raise exceptions.AuthPluginOptionsMissing(["auth_system"]) + + +@six.add_metaclass(abc.ABCMeta) +class BaseAuthPlugin(object): + """Base class for authentication plugins. + + An authentication plugin needs to override at least the authenticate + method to be a valid plugin. + """ + + auth_system = None + opt_names = [] + common_opt_names = [ + "auth_system", + "username", + "password", + "tenant_name", + "token", + "auth_url", + ] + + def __init__(self, auth_system=None, **kwargs): + self.auth_system = auth_system or self.auth_system + self.opts = dict((name, kwargs.get(name)) + for name in self.opt_names) + + @staticmethod + def _parser_add_opt(parser, opt): + """Add an option to parser in two variants. + + :param opt: option name (with underscores) + """ + dashed_opt = opt.replace("_", "-") + env_var = "OS_%s" % opt.upper() + arg_default = os.environ.get(env_var, "") + arg_help = "Defaults to env[%s]." % env_var + parser.add_argument( + "--os-%s" % dashed_opt, + metavar="<%s>" % dashed_opt, + default=arg_default, + help=arg_help) + parser.add_argument( + "--os_%s" % opt, + metavar="<%s>" % dashed_opt, + help=argparse.SUPPRESS) + + @classmethod + def add_opts(cls, parser): + """Populate the parser with the options for this plugin. + """ + for opt in cls.opt_names: + # use `BaseAuthPlugin.common_opt_names` since it is never + # changed in child classes + if opt not in BaseAuthPlugin.common_opt_names: + cls._parser_add_opt(parser, opt) + + @classmethod + def add_common_opts(cls, parser): + """Add options that are common for several plugins. + """ + for opt in cls.common_opt_names: + cls._parser_add_opt(parser, opt) + + @staticmethod + def get_opt(opt_name, args): + """Return option name and value. + + :param opt_name: name of the option, e.g., "username" + :param args: parsed arguments + """ + return (opt_name, getattr(args, "os_%s" % opt_name, None)) + + def parse_opts(self, args): + """Parse the actual auth-system options if any. + + This method is expected to populate the attribute `self.opts` with a + dict containing the options and values needed to make authentication. + """ + self.opts.update(dict(self.get_opt(opt_name, args) + for opt_name in self.opt_names)) + + def authenticate(self, http_client): + """Authenticate using plugin defined method. + + The method usually analyses `self.opts` and performs + a request to authentication server. + + :param http_client: client object that needs authentication + :type http_client: HTTPClient + :raises: AuthorizationFailure + """ + self.sufficient_options() + self._do_authenticate(http_client) + + @abc.abstractmethod + def _do_authenticate(self, http_client): + """Protected method for authentication. + """ + + def sufficient_options(self): + """Check if all required options are present. + + :raises: AuthPluginOptionsMissing + """ + missing = [opt + for opt in self.opt_names + if not self.opts.get(opt)] + if missing: + raise exceptions.AuthPluginOptionsMissing(missing) + + @abc.abstractmethod + def token_and_endpoint(self, endpoint_type, service_type): + """Return token and endpoint. + + :param service_type: Service type of the endpoint + :type service_type: string + :param endpoint_type: Type of endpoint. + Possible values: public or publicURL, + internal or internalURL, + admin or adminURL + :type endpoint_type: string + :returns: tuple of token and endpoint strings + :raises: EndpointException + """ diff --git a/code/daisyclient/daisyclient/openstack/common/apiclient/base.py b/code/daisyclient/daisyclient/openstack/common/apiclient/base.py new file mode 100755 index 00000000..01b74722 --- /dev/null +++ b/code/daisyclient/daisyclient/openstack/common/apiclient/base.py @@ -0,0 +1,532 @@ +# Copyright 2010 Jacob Kaplan-Moss +# Copyright 2011 OpenStack Foundation +# Copyright 2012 Grid Dynamics +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Base utilities to build API operation managers and objects on top of. +""" + +######################################################################## +# +# THIS MODULE IS DEPRECATED +# +# Please refer to +# https://etherpad.openstack.org/p/kilo-glanceclient-library-proposals for +# the discussion leading to this deprecation. +# +# We recommend checking out the python-openstacksdk project +# (https://launchpad.net/python-openstacksdk) instead. +# +######################################################################## + + +# E1102: %s is not callable +# pylint: disable=E1102 + +import abc +import copy + +from oslo_utils import strutils +import six +from six.moves.urllib import parse + +from daisyclient.openstack.common._i18n import _ +from daisyclient.openstack.common.apiclient import exceptions + + +def getid(obj): + """Return id if argument is a Resource. + + Abstracts the common pattern of allowing both an object or an object's ID + (UUID) as a parameter when dealing with relationships. + """ + try: + if obj.uuid: + return obj.uuid + except AttributeError: + pass + try: + return obj.id + except AttributeError: + return obj + + +# TODO(aababilov): call run_hooks() in HookableMixin's child classes +class HookableMixin(object): + """Mixin so classes can register and run hooks.""" + _hooks_map = {} + + @classmethod + def add_hook(cls, hook_type, hook_func): + """Add a new hook of specified type. + + :param cls: class that registers hooks + :param hook_type: hook type, e.g., '__pre_parse_args__' + :param hook_func: hook function + """ + if hook_type not in cls._hooks_map: + cls._hooks_map[hook_type] = [] + + cls._hooks_map[hook_type].append(hook_func) + + @classmethod + def run_hooks(cls, hook_type, *args, **kwargs): + """Run all hooks of specified type. + + :param cls: class that registers hooks + :param hook_type: hook type, e.g., '__pre_parse_args__' + :param args: args to be passed to every hook function + :param kwargs: kwargs to be passed to every hook function + """ + hook_funcs = cls._hooks_map.get(hook_type) or [] + for hook_func in hook_funcs: + hook_func(*args, **kwargs) + + +class BaseManager(HookableMixin): + """Basic manager type providing common operations. + + Managers interact with a particular type of API (servers, flavors, images, + etc.) and provide CRUD operations for them. + """ + resource_class = None + + def __init__(self, client): + """Initializes BaseManager with `client`. + + :param client: instance of BaseClient descendant for HTTP requests + """ + super(BaseManager, self).__init__() + self.client = client + + def _list(self, url, response_key=None, obj_class=None, json=None): + """List the collection. + + :param url: a partial URL, e.g., '/servers' + :param response_key: the key to be looked up in response dictionary, + e.g., 'servers'. If response_key is None - all response body + will be used. + :param obj_class: class for constructing the returned objects + (self.resource_class will be used by default) + :param json: data that will be encoded as JSON and passed in POST + request (GET will be sent by default) + """ + if json: + body = self.client.post(url, json=json).json() + else: + body = self.client.get(url).json() + + if obj_class is None: + obj_class = self.resource_class + + data = body[response_key] if response_key is not None else body + # NOTE(ja): keystone returns values as list as {'values': [ ... ]} + # unlike other services which just return the list... + try: + data = data['values'] + except (KeyError, TypeError): + pass + + return [obj_class(self, res, loaded=True) for res in data if res] + + def _get(self, url, response_key=None): + """Get an object from collection. + + :param url: a partial URL, e.g., '/servers' + :param response_key: the key to be looked up in response dictionary, + e.g., 'server'. If response_key is None - all response body + will be used. + """ + body = self.client.get(url).json() + data = body[response_key] if response_key is not None else body + return self.resource_class(self, data, loaded=True) + + def _head(self, url): + """Retrieve request headers for an object. + + :param url: a partial URL, e.g., '/servers' + """ + resp = self.client.head(url) + return resp.status_code == 204 + + def _post(self, url, json, response_key=None, return_raw=False): + """Create an object. + + :param url: a partial URL, e.g., '/servers' + :param json: data that will be encoded as JSON and passed in POST + request (GET will be sent by default) + :param response_key: the key to be looked up in response dictionary, + e.g., 'server'. If response_key is None - all response body + will be used. + :param return_raw: flag to force returning raw JSON instead of + Python object of self.resource_class + """ + body = self.client.post(url, json=json).json() + data = body[response_key] if response_key is not None else body + if return_raw: + return data + return self.resource_class(self, data) + + def _put(self, url, json=None, response_key=None): + """Update an object with PUT method. + + :param url: a partial URL, e.g., '/servers' + :param json: data that will be encoded as JSON and passed in POST + request (GET will be sent by default) + :param response_key: the key to be looked up in response dictionary, + e.g., 'servers'. If response_key is None - all response body + will be used. + """ + resp = self.client.put(url, json=json) + # PUT requests may not return a body + if resp.content: + body = resp.json() + if response_key is not None: + return self.resource_class(self, body[response_key]) + else: + return self.resource_class(self, body) + + def _patch(self, url, json=None, response_key=None): + """Update an object with PATCH method. + + :param url: a partial URL, e.g., '/servers' + :param json: data that will be encoded as JSON and passed in POST + request (GET will be sent by default) + :param response_key: the key to be looked up in response dictionary, + e.g., 'servers'. If response_key is None - all response body + will be used. + """ + body = self.client.patch(url, json=json).json() + if response_key is not None: + return self.resource_class(self, body[response_key]) + else: + return self.resource_class(self, body) + + def _delete(self, url): + """Delete an object. + + :param url: a partial URL, e.g., '/servers/my-server' + """ + return self.client.delete(url) + + +@six.add_metaclass(abc.ABCMeta) +class ManagerWithFind(BaseManager): + """Manager with additional `find()`/`findall()` methods.""" + + @abc.abstractmethod + def list(self): + pass + + def find(self, **kwargs): + """Find a single item with attributes matching ``**kwargs``. + + This isn't very efficient: it loads the entire list then filters on + the Python side. + """ + matches = self.findall(**kwargs) + num_matches = len(matches) + if num_matches == 0: + msg = _("No %(name)s matching %(args)s.") % { + 'name': self.resource_class.__name__, + 'args': kwargs + } + raise exceptions.NotFound(msg) + elif num_matches > 1: + raise exceptions.NoUniqueMatch() + else: + return matches[0] + + def findall(self, **kwargs): + """Find all items with attributes matching ``**kwargs``. + + This isn't very efficient: it loads the entire list then filters on + the Python side. + """ + found = [] + searches = kwargs.items() + + for obj in self.list(): + try: + if all(getattr(obj, attr) == value + for (attr, value) in searches): + found.append(obj) + except AttributeError: + continue + + return found + + +class CrudManager(BaseManager): + """Base manager class for manipulating entities. + + Children of this class are expected to define a `collection_key` and `key`. + + - `collection_key`: Usually a plural noun by convention (e.g. `entities`); + used to refer collections in both URL's (e.g. `/v3/entities`) and JSON + objects containing a list of member resources (e.g. `{'entities': [{}, + {}, {}]}`). + - `key`: Usually a singular noun by convention (e.g. `entity`); used to + refer to an individual member of the collection. + + """ + collection_key = None + key = None + + def build_url(self, base_url=None, **kwargs): + """Builds a resource URL for the given kwargs. + + Given an example collection where `collection_key = 'entities'` and + `key = 'entity'`, the following URL's could be generated. + + By default, the URL will represent a collection of entities, e.g.:: + + /entities + + If kwargs contains an `entity_id`, then the URL will represent a + specific member, e.g.:: + + /entities/{entity_id} + + :param base_url: if provided, the generated URL will be appended to it + """ + url = base_url if base_url is not None else '' + + url += '/%s' % self.collection_key + + # do we have a specific entity? + entity_id = kwargs.get('%s_id' % self.key) + if entity_id is not None: + url += '/%s' % entity_id + + return url + + def _filter_kwargs(self, kwargs): + """Drop null values and handle ids.""" + for key, ref in six.iteritems(kwargs.copy()): + if ref is None: + kwargs.pop(key) + else: + if isinstance(ref, Resource): + kwargs.pop(key) + kwargs['%s_id' % key] = getid(ref) + return kwargs + + def create(self, **kwargs): + kwargs = self._filter_kwargs(kwargs) + return self._post( + self.build_url(**kwargs), + {self.key: kwargs}, + self.key) + + def get(self, **kwargs): + kwargs = self._filter_kwargs(kwargs) + return self._get( + self.build_url(**kwargs), + self.key) + + def head(self, **kwargs): + kwargs = self._filter_kwargs(kwargs) + return self._head(self.build_url(**kwargs)) + + def list(self, base_url=None, **kwargs): + """List the collection. + + :param base_url: if provided, the generated URL will be appended to it + """ + kwargs = self._filter_kwargs(kwargs) + + return self._list( + '%(base_url)s%(query)s' % { + 'base_url': self.build_url(base_url=base_url, **kwargs), + 'query': '?%s' % parse.urlencode(kwargs) if kwargs else '', + }, + self.collection_key) + + def put(self, base_url=None, **kwargs): + """Update an element. + + :param base_url: if provided, the generated URL will be appended to it + """ + kwargs = self._filter_kwargs(kwargs) + + return self._put(self.build_url(base_url=base_url, **kwargs)) + + def update(self, **kwargs): + kwargs = self._filter_kwargs(kwargs) + params = kwargs.copy() + params.pop('%s_id' % self.key) + + return self._patch( + self.build_url(**kwargs), + {self.key: params}, + self.key) + + def delete(self, **kwargs): + kwargs = self._filter_kwargs(kwargs) + + return self._delete( + self.build_url(**kwargs)) + + def find(self, base_url=None, **kwargs): + """Find a single item with attributes matching ``**kwargs``. + + :param base_url: if provided, the generated URL will be appended to it + """ + kwargs = self._filter_kwargs(kwargs) + + rl = self._list( + '%(base_url)s%(query)s' % { + 'base_url': self.build_url(base_url=base_url, **kwargs), + 'query': '?%s' % parse.urlencode(kwargs) if kwargs else '', + }, + self.collection_key) + num = len(rl) + + if num == 0: + msg = _("No %(name)s matching %(args)s.") % { + 'name': self.resource_class.__name__, + 'args': kwargs + } + raise exceptions.NotFound(404, msg) + elif num > 1: + raise exceptions.NoUniqueMatch + else: + return rl[0] + + +class Extension(HookableMixin): + """Extension descriptor.""" + + SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__') + manager_class = None + + def __init__(self, name, module): + super(Extension, self).__init__() + self.name = name + self.module = module + self._parse_extension_module() + + def _parse_extension_module(self): + self.manager_class = None + for attr_name, attr_value in self.module.__dict__.items(): + if attr_name in self.SUPPORTED_HOOKS: + self.add_hook(attr_name, attr_value) + else: + try: + if issubclass(attr_value, BaseManager): + self.manager_class = attr_value + except TypeError: + pass + + def __repr__(self): + return "" % self.name + + +class Resource(object): + """Base class for OpenStack resources (tenant, user, etc.). + + This is pretty much just a bag for attributes. + """ + + HUMAN_ID = False + NAME_ATTR = 'name' + + def __init__(self, manager, info, loaded=False): + """Populate and bind to a manager. + + :param manager: BaseManager object + :param info: dictionary representing resource attributes + :param loaded: prevent lazy-loading if set to True + """ + self.manager = manager + self._info = info + self._add_details(info) + self._loaded = loaded + + def __repr__(self): + reprkeys = sorted(k + for k in self.__dict__.keys() + if k[0] != '_' and k != 'manager') + info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys) + return "<%s %s>" % (self.__class__.__name__, info) + + @property + def human_id(self): + """Human-readable ID which can be used for bash completion. + """ + if self.HUMAN_ID: + name = getattr(self, self.NAME_ATTR, None) + if name is not None: + return strutils.to_slug(name) + return None + + def _add_details(self, info): + for (k, v) in six.iteritems(info): + try: + setattr(self, k, v) + self._info[k] = v + except AttributeError: + # In this case we already defined the attribute on the class + pass + + def __getattr__(self, k): + if k not in self.__dict__: + # NOTE(bcwaldon): disallow lazy-loading if already loaded once + if not self.is_loaded(): + self.get() + return self.__getattr__(k) + + raise AttributeError(k) + else: + return self.__dict__[k] + + def get(self): + """Support for lazy loading details. + + Some clients, such as novaclient have the option to lazy load the + details, details which can be loaded with this function. + """ + # set_loaded() first ... so if we have to bail, we know we tried. + self.set_loaded(True) + if not hasattr(self.manager, 'get'): + return + + new = self.manager.get(self.id) + if new: + self._add_details(new._info) + self._add_details( + {'x_request_id': self.manager.client.last_request_id}) + + def __eq__(self, other): + if not isinstance(other, Resource): + return NotImplemented + # two resources of different types are not equal + if not isinstance(other, self.__class__): + return False + if hasattr(self, 'id') and hasattr(other, 'id'): + return self.id == other.id + return self._info == other._info + + def is_loaded(self): + return self._loaded + + def set_loaded(self, val): + self._loaded = val + + def to_dict(self): + return copy.deepcopy(self._info) diff --git a/code/daisyclient/daisyclient/openstack/common/apiclient/client.py b/code/daisyclient/daisyclient/openstack/common/apiclient/client.py new file mode 100755 index 00000000..e1d95f67 --- /dev/null +++ b/code/daisyclient/daisyclient/openstack/common/apiclient/client.py @@ -0,0 +1,388 @@ +# Copyright 2010 Jacob Kaplan-Moss +# Copyright 2011 OpenStack Foundation +# Copyright 2011 Piston Cloud Computing, Inc. +# Copyright 2013 Alessio Ababilov +# Copyright 2013 Grid Dynamics +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +OpenStack Client interface. Handles the REST calls and responses. +""" + +# E0202: An attribute inherited from %s hide this method +# pylint: disable=E0202 + +import hashlib +import logging +import time + +try: + import simplejson as json +except ImportError: + import json + +from oslo_utils import encodeutils +from oslo_utils import importutils +import requests + +from daisyclient.openstack.common._i18n import _ +from daisyclient.openstack.common.apiclient import exceptions + +_logger = logging.getLogger(__name__) +SENSITIVE_HEADERS = ('X-Auth-Token', 'X-Subject-Token',) + + +class HTTPClient(object): + """This client handles sending HTTP requests to OpenStack servers. + + Features: + + - share authentication information between several clients to different + services (e.g., for compute and image clients); + - reissue authentication request for expired tokens; + - encode/decode JSON bodies; + - raise exceptions on HTTP errors; + - pluggable authentication; + - store authentication information in a keyring; + - store time spent for requests; + - register clients for particular services, so one can use + `http_client.identity` or `http_client.compute`; + - log requests and responses in a format that is easy to copy-and-paste + into terminal and send the same request with curl. + """ + + user_agent = "daisyclient.openstack.common.apiclient" + + def __init__(self, + auth_plugin, + region_name=None, + endpoint_type="publicURL", + original_ip=None, + verify=True, + cert=None, + timeout=None, + timings=False, + keyring_saver=None, + debug=False, + user_agent=None, + http=None): + self.auth_plugin = auth_plugin + + self.endpoint_type = endpoint_type + self.region_name = region_name + + self.original_ip = original_ip + self.timeout = timeout + self.verify = verify + self.cert = cert + + self.keyring_saver = keyring_saver + self.debug = debug + self.user_agent = user_agent or self.user_agent + + self.times = [] # [("item", starttime, endtime), ...] + self.timings = timings + + # requests within the same session can reuse TCP connections from pool + self.http = http or requests.Session() + + self.cached_token = None + self.last_request_id = None + + def _safe_header(self, name, value): + if name in SENSITIVE_HEADERS: + # because in python3 byte string handling is ... ug + v = value.encode('utf-8') + h = hashlib.sha1(v) + d = h.hexdigest() + return encodeutils.safe_decode(name), "{SHA1}%s" % d + else: + return (encodeutils.safe_decode(name), + encodeutils.safe_decode(value)) + + def _http_log_req(self, method, url, kwargs): + if not self.debug: + return + + string_parts = [ + "curl -g -i", + "-X '%s'" % method, + "'%s'" % url, + ] + + for element in kwargs['headers']: + header = ("-H '%s: %s'" % + self._safe_header(element, kwargs['headers'][element])) + string_parts.append(header) + + _logger.debug("REQ: %s" % " ".join(string_parts)) + if 'data' in kwargs: + _logger.debug("REQ BODY: %s\n" % (kwargs['data'])) + + def _http_log_resp(self, resp): + if not self.debug: + return + _logger.debug( + "RESP: [%s] %s\n", + resp.status_code, + resp.headers) + if resp._content_consumed: + _logger.debug( + "RESP BODY: %s\n", + resp.text) + + def serialize(self, kwargs): + if kwargs.get('json') is not None: + kwargs['headers']['Content-Type'] = 'application/json' + kwargs['data'] = json.dumps(kwargs['json']) + try: + del kwargs['json'] + except KeyError: + pass + + def get_timings(self): + return self.times + + def reset_timings(self): + self.times = [] + + def request(self, method, url, **kwargs): + """Send an http request with the specified characteristics. + + Wrapper around `requests.Session.request` to handle tasks such as + setting headers, JSON encoding/decoding, and error handling. + + :param method: method of HTTP request + :param url: URL of HTTP request + :param kwargs: any other parameter that can be passed to + requests.Session.request (such as `headers`) or `json` + that will be encoded as JSON and used as `data` argument + """ + kwargs.setdefault("headers", {}) + kwargs["headers"]["User-Agent"] = self.user_agent + if self.original_ip: + kwargs["headers"]["Forwarded"] = "for=%s;by=%s" % ( + self.original_ip, self.user_agent) + if self.timeout is not None: + kwargs.setdefault("timeout", self.timeout) + kwargs.setdefault("verify", self.verify) + if self.cert is not None: + kwargs.setdefault("cert", self.cert) + self.serialize(kwargs) + + self._http_log_req(method, url, kwargs) + if self.timings: + start_time = time.time() + resp = self.http.request(method, url, **kwargs) + if self.timings: + self.times.append(("%s %s" % (method, url), + start_time, time.time())) + self._http_log_resp(resp) + + self.last_request_id = resp.headers.get('x-openstack-request-id') + + if resp.status_code >= 400: + _logger.debug( + "Request returned failure status: %s", + resp.status_code) + raise exceptions.from_response(resp, method, url) + + return resp + + @staticmethod + def concat_url(endpoint, url): + """Concatenate endpoint and final URL. + + E.g., "http://keystone/v2.0/" and "/tokens" are concatenated to + "http://keystone/v2.0/tokens". + + :param endpoint: the base URL + :param url: the final URL + """ + return "%s/%s" % (endpoint.rstrip("/"), url.strip("/")) + + def client_request(self, client, method, url, **kwargs): + """Send an http request using `client`'s endpoint and specified `url`. + + If request was rejected as unauthorized (possibly because the token is + expired), issue one authorization attempt and send the request once + again. + + :param client: instance of BaseClient descendant + :param method: method of HTTP request + :param url: URL of HTTP request + :param kwargs: any other parameter that can be passed to + `HTTPClient.request` + """ + + filter_args = { + "endpoint_type": client.endpoint_type or self.endpoint_type, + "service_type": client.service_type, + } + token, endpoint = (self.cached_token, client.cached_endpoint) + just_authenticated = False + if not (token and endpoint): + try: + token, endpoint = self.auth_plugin.token_and_endpoint( + **filter_args) + except exceptions.EndpointException: + pass + if not (token and endpoint): + self.authenticate() + just_authenticated = True + token, endpoint = self.auth_plugin.token_and_endpoint( + **filter_args) + if not (token and endpoint): + raise exceptions.AuthorizationFailure( + _("Cannot find endpoint or token for request")) + + old_token_endpoint = (token, endpoint) + kwargs.setdefault("headers", {})["X-Auth-Token"] = token + self.cached_token = token + client.cached_endpoint = endpoint + # Perform the request once. If we get Unauthorized, then it + # might be because the auth token expired, so try to + # re-authenticate and try again. If it still fails, bail. + try: + return self.request( + method, self.concat_url(endpoint, url), **kwargs) + except exceptions.Unauthorized as unauth_ex: + if just_authenticated: + raise + self.cached_token = None + client.cached_endpoint = None + if self.auth_plugin.opts.get('token'): + self.auth_plugin.opts['token'] = None + if self.auth_plugin.opts.get('endpoint'): + self.auth_plugin.opts['endpoint'] = None + self.authenticate() + try: + token, endpoint = self.auth_plugin.token_and_endpoint( + **filter_args) + except exceptions.EndpointException: + raise unauth_ex + if (not (token and endpoint) or + old_token_endpoint == (token, endpoint)): + raise unauth_ex + self.cached_token = token + client.cached_endpoint = endpoint + kwargs["headers"]["X-Auth-Token"] = token + return self.request( + method, self.concat_url(endpoint, url), **kwargs) + + def add_client(self, base_client_instance): + """Add a new instance of :class:`BaseClient` descendant. + + `self` will store a reference to `base_client_instance`. + + Example: + + >>> def test_clients(): + ... from keystoneclient.auth import keystone + ... from openstack.common.apiclient import client + ... auth = keystone.KeystoneAuthPlugin( + ... username="user", password="pass", tenant_name="tenant", + ... auth_url="http://auth:5000/v2.0") + ... openstack_client = client.HTTPClient(auth) + ... # create nova client + ... from novaclient.v1_1 import client + ... client.Client(openstack_client) + ... # create keystone client + ... from keystoneclient.v2_0 import client + ... client.Client(openstack_client) + ... # use them + ... openstack_client.identity.tenants.list() + ... openstack_client.compute.servers.list() + """ + service_type = base_client_instance.service_type + if service_type and not hasattr(self, service_type): + setattr(self, service_type, base_client_instance) + + def authenticate(self): + self.auth_plugin.authenticate(self) + # Store the authentication results in the keyring for later requests + if self.keyring_saver: + self.keyring_saver.save(self) + + +class BaseClient(object): + """Top-level object to access the OpenStack API. + + This client uses :class:`HTTPClient` to send requests. :class:`HTTPClient` + will handle a bunch of issues such as authentication. + """ + + service_type = None + endpoint_type = None # "publicURL" will be used + cached_endpoint = None + + def __init__(self, http_client, extensions=None): + self.http_client = http_client + http_client.add_client(self) + + # Add in any extensions... + if extensions: + for extension in extensions: + if extension.manager_class: + setattr(self, extension.name, + extension.manager_class(self)) + + def client_request(self, method, url, **kwargs): + return self.http_client.client_request( + self, method, url, **kwargs) + + @property + def last_request_id(self): + return self.http_client.last_request_id + + def head(self, url, **kwargs): + return self.client_request("HEAD", url, **kwargs) + + def get(self, url, **kwargs): + return self.client_request("GET", url, **kwargs) + + def post(self, url, **kwargs): + return self.client_request("POST", url, **kwargs) + + def put(self, url, **kwargs): + return self.client_request("PUT", url, **kwargs) + + def delete(self, url, **kwargs): + return self.client_request("DELETE", url, **kwargs) + + def patch(self, url, **kwargs): + return self.client_request("PATCH", url, **kwargs) + + @staticmethod + def get_class(api_name, version, version_map): + """Returns the client class for the requested API version + + :param api_name: the name of the API, e.g. 'compute', 'image', etc + :param version: the requested API version + :param version_map: a dict of client classes keyed by version + :rtype: a client class for the requested API version + """ + try: + client_path = version_map[str(version)] + except (KeyError, ValueError): + msg = _("Invalid %(api_name)s client version '%(version)s'. " + "Must be one of: %(version_map)s") % { + 'api_name': api_name, + 'version': version, + 'version_map': ', '.join(version_map.keys())} + raise exceptions.UnsupportedVersion(msg) + + return importutils.import_class(client_path) diff --git a/code/daisyclient/daisyclient/openstack/common/apiclient/exceptions.py b/code/daisyclient/daisyclient/openstack/common/apiclient/exceptions.py new file mode 100755 index 00000000..3fcfe6a2 --- /dev/null +++ b/code/daisyclient/daisyclient/openstack/common/apiclient/exceptions.py @@ -0,0 +1,479 @@ +# Copyright 2010 Jacob Kaplan-Moss +# Copyright 2011 Nebula, Inc. +# Copyright 2013 Alessio Ababilov +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Exception definitions. +""" + +######################################################################## +# +# THIS MODULE IS DEPRECATED +# +# Please refer to +# https://etherpad.openstack.org/p/kilo-glanceclient-library-proposals for +# the discussion leading to this deprecation. +# +# We recommend checking out the python-openstacksdk project +# (https://launchpad.net/python-openstacksdk) instead. +# +######################################################################## + +import inspect +import sys + +import six + +from daisyclient.openstack.common._i18n import _ + + +class ClientException(Exception): + """The base exception class for all exceptions this library raises. + """ + pass + + +class ValidationError(ClientException): + """Error in validation on API client side.""" + pass + + +class UnsupportedVersion(ClientException): + """User is trying to use an unsupported version of the API.""" + pass + + +class CommandError(ClientException): + """Error in CLI tool.""" + pass + + +class AuthorizationFailure(ClientException): + """Cannot authorize API client.""" + pass + + +class ConnectionError(ClientException): + """Cannot connect to API service.""" + pass + + +class ConnectionRefused(ConnectionError): + """Connection refused while trying to connect to API service.""" + pass + + +class AuthPluginOptionsMissing(AuthorizationFailure): + """Auth plugin misses some options.""" + def __init__(self, opt_names): + super(AuthPluginOptionsMissing, self).__init__( + _("Authentication failed. Missing options: %s") % + ", ".join(opt_names)) + self.opt_names = opt_names + + +class AuthSystemNotFound(AuthorizationFailure): + """User has specified an AuthSystem that is not installed.""" + def __init__(self, auth_system): + super(AuthSystemNotFound, self).__init__( + _("AuthSystemNotFound: %r") % auth_system) + self.auth_system = auth_system + + +class NoUniqueMatch(ClientException): + """Multiple entities found instead of one.""" + pass + + +class EndpointException(ClientException): + """Something is rotten in Service Catalog.""" + pass + + +class EndpointNotFound(EndpointException): + """Could not find requested endpoint in Service Catalog.""" + pass + + +class AmbiguousEndpoints(EndpointException): + """Found more than one matching endpoint in Service Catalog.""" + def __init__(self, endpoints=None): + super(AmbiguousEndpoints, self).__init__( + _("AmbiguousEndpoints: %r") % endpoints) + self.endpoints = endpoints + + +class HttpError(ClientException): + """The base exception class for all HTTP exceptions. + """ + http_status = 0 + message = _("HTTP Error") + + def __init__(self, message=None, details=None, + response=None, request_id=None, + url=None, method=None, http_status=None): + self.http_status = http_status or self.http_status + self.message = message or self.message + self.details = details + self.request_id = request_id + self.response = response + self.url = url + self.method = method + formatted_string = "%s (HTTP %s)" % (self.message, self.http_status) + if request_id: + formatted_string += " (Request-ID: %s)" % request_id + super(HttpError, self).__init__(formatted_string) + + +class HTTPRedirection(HttpError): + """HTTP Redirection.""" + message = _("HTTP Redirection") + + +class HTTPClientError(HttpError): + """Client-side HTTP error. + + Exception for cases in which the client seems to have erred. + """ + message = _("HTTP Client Error") + + +class HttpServerError(HttpError): + """Server-side HTTP error. + + Exception for cases in which the server is aware that it has + erred or is incapable of performing the request. + """ + message = _("HTTP Server Error") + + +class MultipleChoices(HTTPRedirection): + """HTTP 300 - Multiple Choices. + + Indicates multiple options for the resource that the client may follow. + """ + + http_status = 300 + message = _("Multiple Choices") + + +class BadRequest(HTTPClientError): + """HTTP 400 - Bad Request. + + The request cannot be fulfilled due to bad syntax. + """ + http_status = 400 + message = _("Bad Request") + + +class Unauthorized(HTTPClientError): + """HTTP 401 - Unauthorized. + + Similar to 403 Forbidden, but specifically for use when authentication + is required and has failed or has not yet been provided. + """ + http_status = 401 + message = _("Unauthorized") + + +class PaymentRequired(HTTPClientError): + """HTTP 402 - Payment Required. + + Reserved for future use. + """ + http_status = 402 + message = _("Payment Required") + + +class Forbidden(HTTPClientError): + """HTTP 403 - Forbidden. + + The request was a valid request, but the server is refusing to respond + to it. + """ + http_status = 403 + message = _("Forbidden") + + +class NotFound(HTTPClientError): + """HTTP 404 - Not Found. + + The requested resource could not be found but may be available again + in the future. + """ + http_status = 404 + message = _("Not Found") + + +class MethodNotAllowed(HTTPClientError): + """HTTP 405 - Method Not Allowed. + + A request was made of a resource using a request method not supported + by that resource. + """ + http_status = 405 + message = _("Method Not Allowed") + + +class NotAcceptable(HTTPClientError): + """HTTP 406 - Not Acceptable. + + The requested resource is only capable of generating content not + acceptable according to the Accept headers sent in the request. + """ + http_status = 406 + message = _("Not Acceptable") + + +class ProxyAuthenticationRequired(HTTPClientError): + """HTTP 407 - Proxy Authentication Required. + + The client must first authenticate itself with the proxy. + """ + http_status = 407 + message = _("Proxy Authentication Required") + + +class RequestTimeout(HTTPClientError): + """HTTP 408 - Request Timeout. + + The server timed out waiting for the request. + """ + http_status = 408 + message = _("Request Timeout") + + +class Conflict(HTTPClientError): + """HTTP 409 - Conflict. + + Indicates that the request could not be processed because of conflict + in the request, such as an edit conflict. + """ + http_status = 409 + message = _("Conflict") + + +class Gone(HTTPClientError): + """HTTP 410 - Gone. + + Indicates that the resource requested is no longer available and will + not be available again. + """ + http_status = 410 + message = _("Gone") + + +class LengthRequired(HTTPClientError): + """HTTP 411 - Length Required. + + The request did not specify the length of its content, which is + required by the requested resource. + """ + http_status = 411 + message = _("Length Required") + + +class PreconditionFailed(HTTPClientError): + """HTTP 412 - Precondition Failed. + + The server does not meet one of the preconditions that the requester + put on the request. + """ + http_status = 412 + message = _("Precondition Failed") + + +class RequestEntityTooLarge(HTTPClientError): + """HTTP 413 - Request Entity Too Large. + + The request is larger than the server is willing or able to process. + """ + http_status = 413 + message = _("Request Entity Too Large") + + def __init__(self, *args, **kwargs): + try: + self.retry_after = int(kwargs.pop('retry_after')) + except (KeyError, ValueError): + self.retry_after = 0 + + super(RequestEntityTooLarge, self).__init__(*args, **kwargs) + + +class RequestUriTooLong(HTTPClientError): + """HTTP 414 - Request-URI Too Long. + + The URI provided was too long for the server to process. + """ + http_status = 414 + message = _("Request-URI Too Long") + + +class UnsupportedMediaType(HTTPClientError): + """HTTP 415 - Unsupported Media Type. + + The request entity has a media type which the server or resource does + not support. + """ + http_status = 415 + message = _("Unsupported Media Type") + + +class RequestedRangeNotSatisfiable(HTTPClientError): + """HTTP 416 - Requested Range Not Satisfiable. + + The client has asked for a portion of the file, but the server cannot + supply that portion. + """ + http_status = 416 + message = _("Requested Range Not Satisfiable") + + +class ExpectationFailed(HTTPClientError): + """HTTP 417 - Expectation Failed. + + The server cannot meet the requirements of the Expect request-header field. + """ + http_status = 417 + message = _("Expectation Failed") + + +class UnprocessableEntity(HTTPClientError): + """HTTP 422 - Unprocessable Entity. + + The request was well-formed but was unable to be followed due to semantic + errors. + """ + http_status = 422 + message = _("Unprocessable Entity") + + +class InternalServerError(HttpServerError): + """HTTP 500 - Internal Server Error. + + A generic error message, given when no more specific message is suitable. + """ + http_status = 500 + message = _("Internal Server Error") + + +# NotImplemented is a python keyword. +class HttpNotImplemented(HttpServerError): + """HTTP 501 - Not Implemented. + + The server either does not recognize the request method, or it lacks + the ability to fulfill the request. + """ + http_status = 501 + message = _("Not Implemented") + + +class BadGateway(HttpServerError): + """HTTP 502 - Bad Gateway. + + The server was acting as a gateway or proxy and received an invalid + response from the upstream server. + """ + http_status = 502 + message = _("Bad Gateway") + + +class ServiceUnavailable(HttpServerError): + """HTTP 503 - Service Unavailable. + + The server is currently unavailable. + """ + http_status = 503 + message = _("Service Unavailable") + + +class GatewayTimeout(HttpServerError): + """HTTP 504 - Gateway Timeout. + + The server was acting as a gateway or proxy and did not receive a timely + response from the upstream server. + """ + http_status = 504 + message = _("Gateway Timeout") + + +class HttpVersionNotSupported(HttpServerError): + """HTTP 505 - HttpVersion Not Supported. + + The server does not support the HTTP protocol version used in the request. + """ + http_status = 505 + message = _("HTTP Version Not Supported") + + +# _code_map contains all the classes that have http_status attribute. +_code_map = dict( + (getattr(obj, 'http_status', None), obj) + for name, obj in six.iteritems(vars(sys.modules[__name__])) + if inspect.isclass(obj) and getattr(obj, 'http_status', False) +) + + +def from_response(response, method, url): + """Returns an instance of :class:`HttpError` or subclass based on response. + + :param response: instance of `requests.Response` class + :param method: HTTP method used for request + :param url: URL used for request + """ + + req_id = response.headers.get("x-openstack-request-id") + # NOTE(hdd) true for older versions of nova and cinder + if not req_id: + req_id = response.headers.get("x-compute-request-id") + kwargs = { + "http_status": response.status_code, + "response": response, + "method": method, + "url": url, + "request_id": req_id, + } + if "retry-after" in response.headers: + kwargs["retry_after"] = response.headers["retry-after"] + + content_type = response.headers.get("Content-Type", "") + if content_type.startswith("application/json"): + try: + body = response.json() + except ValueError: + pass + else: + if isinstance(body, dict): + error = body.get(list(body)[0]) + if isinstance(error, dict): + kwargs["message"] = (error.get("message") or + error.get("faultstring")) + kwargs["details"] = (error.get("details") or + six.text_type(body)) + elif content_type.startswith("text/"): + kwargs["details"] = response.text + + try: + cls = _code_map[response.status_code] + except KeyError: + if 500 <= response.status_code < 600: + cls = HttpServerError + elif 400 <= response.status_code < 500: + cls = HTTPClientError + else: + cls = HttpError + return cls(**kwargs) diff --git a/code/daisyclient/daisyclient/openstack/common/apiclient/fake_client.py b/code/daisyclient/daisyclient/openstack/common/apiclient/fake_client.py new file mode 100755 index 00000000..71e7f72b --- /dev/null +++ b/code/daisyclient/daisyclient/openstack/common/apiclient/fake_client.py @@ -0,0 +1,190 @@ +# Copyright 2013 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A fake server that "responds" to API methods with pre-canned responses. + +All of these responses come from the spec, so if for some reason the spec's +wrong the tests might raise AssertionError. I've indicated in comments the +places where actual behavior differs from the spec. +""" + +######################################################################## +# +# THIS MODULE IS DEPRECATED +# +# Please refer to +# https://etherpad.openstack.org/p/kilo-glanceclient-library-proposals for +# the discussion leading to this deprecation. +# +# We recommend checking out the python-openstacksdk project +# (https://launchpad.net/python-openstacksdk) instead. +# +######################################################################## + +# W0102: Dangerous default value %s as argument +# pylint: disable=W0102 + +import json + +import requests +import six +from six.moves.urllib import parse + +from daisyclient.openstack.common.apiclient import client + + +def assert_has_keys(dct, required=None, optional=None): + required = required or [] + optional = optional or [] + for k in required: + try: + assert k in dct + except AssertionError: + extra_keys = set(dct.keys()).difference(set(required + optional)) + raise AssertionError("found unexpected keys: %s" % + list(extra_keys)) + + +class TestResponse(requests.Response): + """Wrap requests.Response and provide a convenient initialization. + """ + + def __init__(self, data): + super(TestResponse, self).__init__() + self._content_consumed = True + if isinstance(data, dict): + self.status_code = data.get('status_code', 200) + # Fake the text attribute to streamline Response creation + text = data.get('text', "") + if isinstance(text, (dict, list)): + self._content = json.dumps(text) + default_headers = { + "Content-Type": "application/json", + } + else: + self._content = text + default_headers = {} + if six.PY3 and isinstance(self._content, six.string_types): + self._content = self._content.encode('utf-8', 'strict') + self.headers = data.get('headers') or default_headers + else: + self.status_code = data + + def __eq__(self, other): + return (self.status_code == other.status_code and + self.headers == other.headers and + self._content == other._content) + + +class FakeHTTPClient(client.HTTPClient): + + def __init__(self, *args, **kwargs): + self.callstack = [] + self.fixtures = kwargs.pop("fixtures", None) or {} + if not args and "auth_plugin" not in kwargs: + args = (None, ) + super(FakeHTTPClient, self).__init__(*args, **kwargs) + + def assert_called(self, method, url, body=None, pos=-1): + """Assert than an API method was just called. + """ + expected = (method, url) + called = self.callstack[pos][0:2] + assert self.callstack, \ + "Expected %s %s but no calls were made." % expected + + assert expected == called, 'Expected %s %s; got %s %s' % \ + (expected + called) + + if body is not None: + if self.callstack[pos][3] != body: + raise AssertionError('%r != %r' % + (self.callstack[pos][3], body)) + + def assert_called_anytime(self, method, url, body=None): + """Assert than an API method was called anytime in the test. + """ + expected = (method, url) + + assert self.callstack, \ + "Expected %s %s but no calls were made." % expected + + found = False + entry = None + for entry in self.callstack: + if expected == entry[0:2]: + found = True + break + + assert found, 'Expected %s %s; got %s' % \ + (method, url, self.callstack) + if body is not None: + assert entry[3] == body, "%s != %s" % (entry[3], body) + + self.callstack = [] + + def clear_callstack(self): + self.callstack = [] + + def authenticate(self): + pass + + def client_request(self, client, method, url, **kwargs): + # Check that certain things are called correctly + if method in ["GET", "DELETE"]: + assert "json" not in kwargs + + # Note the call + self.callstack.append( + (method, + url, + kwargs.get("headers") or {}, + kwargs.get("json") or kwargs.get("data"))) + try: + fixture = self.fixtures[url][method] + except KeyError: + pass + else: + return TestResponse({"headers": fixture[0], + "text": fixture[1]}) + + # Call the method + args = parse.parse_qsl(parse.urlparse(url)[4]) + kwargs.update(args) + munged_url = url.rsplit('?', 1)[0] + munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_') + munged_url = munged_url.replace('-', '_') + + callback = "%s_%s" % (method.lower(), munged_url) + + if not hasattr(self, callback): + raise AssertionError('Called unknown API method: %s %s, ' + 'expected fakes method name: %s' % + (method, url, callback)) + + resp = getattr(self, callback)(**kwargs) + if len(resp) == 3: + status, headers, body = resp + else: + status, body = resp + headers = {} + self.last_request_id = headers.get('x-openstack-request-id', + 'req-test') + return TestResponse({ + "status_code": status, + "text": body, + "headers": headers, + }) diff --git a/code/daisyclient/daisyclient/openstack/common/apiclient/utils.py b/code/daisyclient/daisyclient/openstack/common/apiclient/utils.py new file mode 100755 index 00000000..de0a34d4 --- /dev/null +++ b/code/daisyclient/daisyclient/openstack/common/apiclient/utils.py @@ -0,0 +1,100 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +######################################################################## +# +# THIS MODULE IS DEPRECATED +# +# Please refer to +# https://etherpad.openstack.org/p/kilo-glanceclient-library-proposals for +# the discussion leading to this deprecation. +# +# We recommend checking out the python-openstacksdk project +# (https://launchpad.net/python-openstacksdk) instead. +# +######################################################################## + +from oslo_utils import encodeutils +from oslo_utils import uuidutils +import six + +from daisyclient.openstack.common._i18n import _ +from daisyclient.openstack.common.apiclient import exceptions + + +def find_resource(manager, name_or_id, **find_args): + """Look for resource in a given manager. + + Used as a helper for the _find_* methods. + Example: + + .. code-block:: python + + def _find_hypervisor(cs, hypervisor): + #Get a hypervisor by name or ID. + return cliutils.find_resource(cs.hypervisors, hypervisor) + """ + # first try to get entity as integer id + try: + return manager.get(int(name_or_id)) + except (TypeError, ValueError, exceptions.NotFound): + pass + + # now try to get entity as uuid + try: + if six.PY2: + tmp_id = encodeutils.safe_encode(name_or_id) + else: + tmp_id = encodeutils.safe_decode(name_or_id) + + if uuidutils.is_uuid_like(tmp_id): + return manager.get(tmp_id) + except (TypeError, ValueError, exceptions.NotFound): + pass + + # for str id which is not uuid + if getattr(manager, 'is_alphanum_id_allowed', False): + try: + return manager.get(name_or_id) + except exceptions.NotFound: + pass + + try: + try: + return manager.find(human_id=name_or_id, **find_args) + except exceptions.NotFound: + pass + + # finally try to find entity by name + try: + resource = getattr(manager, 'resource_class', None) + name_attr = resource.NAME_ATTR if resource else 'name' + kwargs = {name_attr: name_or_id} + kwargs.update(find_args) + return manager.find(**kwargs) + except exceptions.NotFound: + msg = _("No %(name)s with a name or " + "ID of '%(name_or_id)s' exists.") % \ + { + "name": manager.resource_class.__name__.lower(), + "name_or_id": name_or_id + } + raise exceptions.CommandError(msg) + except exceptions.NoUniqueMatch: + msg = _("Multiple %(name)s matches found for " + "'%(name_or_id)s', use an ID to be more specific.") % \ + { + "name": manager.resource_class.__name__.lower(), + "name_or_id": name_or_id + } + raise exceptions.CommandError(msg) diff --git a/code/daisyclient/daisyclient/shell.py b/code/daisyclient/daisyclient/shell.py new file mode 100755 index 00000000..548ecef4 --- /dev/null +++ b/code/daisyclient/daisyclient/shell.py @@ -0,0 +1,714 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Command-line interface to the OpenStack Images API. +""" + +from __future__ import print_function + +import argparse +import copy +import getpass +import json +import logging +import os +from os.path import expanduser +import sys +import traceback + +from oslo_utils import encodeutils +from oslo_utils import importutils +import six.moves.urllib.parse as urlparse + +import daisyclient +from daisyclient import _i18n +from daisyclient.common import utils +from daisyclient import exc + +from keystoneclient.auth.identity import v2 as v2_auth +from keystoneclient.auth.identity import v3 as v3_auth +from keystoneclient import discover +from keystoneclient.openstack.common.apiclient import exceptions as ks_exc +from keystoneclient import session + +osprofiler_profiler = importutils.try_import("osprofiler.profiler") +_ = _i18n._ + + +class DaisyShell(object): + + def _append_global_identity_args(self, parser): + # FIXME(bobt): these are global identity (Keystone) arguments which + # should be consistent and shared by all service clients. Therefore, + # they should be provided by python-keystoneclient. We will need to + # refactor this code once this functionality is avaible in + # python-keystoneclient. See + # + # https://bugs.launchpad.net/python-keystoneclient/+bug/1332337 + # + parser.add_argument('-k', '--insecure', + default=False, + action='store_true', + help='Explicitly allow daisyclient to perform ' + '\"insecure SSL\" (https) requests. The server\'s ' + 'certificate will not be verified against any ' + 'certificate authorities. This option should ' + 'be used with caution.') + + parser.add_argument('--os-cert', + help='Path of certificate file to use in SSL ' + 'connection. This file can optionally be ' + 'prepended with the private key.') + + parser.add_argument('--cert-file', + dest='os_cert', + help='DEPRECATED! Use --os-cert.') + + parser.add_argument('--os-key', + help='Path of client key to use in SSL ' + 'connection. This option is not necessary ' + 'if your key is prepended to your cert file.') + + parser.add_argument('--key-file', + dest='os_key', + help='DEPRECATED! Use --os-key.') + + parser.add_argument('--os-cacert', + metavar='', + dest='os_cacert', + default=utils.env('OS_CACERT'), + help='Path of CA TLS certificate(s) used to ' + 'verify the remote server\'s certificate. ' + 'Without this option glance looks for the ' + 'default system CA certificates.') + + parser.add_argument('--ca-file', + dest='os_cacert', + help='DEPRECATED! Use --os-cacert.') + + parser.add_argument('--os-username', + default=utils.env('OS_USERNAME'), + help='Defaults to env[OS_USERNAME].') + + parser.add_argument('--os_username', + help=argparse.SUPPRESS) + + parser.add_argument('--os-user-id', + default=utils.env('OS_USER_ID'), + help='Defaults to env[OS_USER_ID].') + + parser.add_argument('--os-user-domain-id', + default=utils.env('OS_USER_DOMAIN_ID'), + help='Defaults to env[OS_USER_DOMAIN_ID].') + + parser.add_argument('--os-user-domain-name', + default=utils.env('OS_USER_DOMAIN_NAME'), + help='Defaults to env[OS_USER_DOMAIN_NAME].') + + parser.add_argument('--os-project-id', + default=utils.env('OS_PROJECT_ID'), + help='Another way to specify tenant ID. ' + 'This option is mutually exclusive with ' + ' --os-tenant-id. ' + 'Defaults to env[OS_PROJECT_ID].') + + parser.add_argument('--os-project-name', + default=utils.env('OS_PROJECT_NAME'), + help='Another way to specify tenant name. ' + 'This option is mutually exclusive with ' + ' --os-tenant-name. ' + 'Defaults to env[OS_PROJECT_NAME].') + + parser.add_argument('--os-project-domain-id', + default=utils.env('OS_PROJECT_DOMAIN_ID'), + help='Defaults to env[OS_PROJECT_DOMAIN_ID].') + + parser.add_argument('--os-project-domain-name', + default=utils.env('OS_PROJECT_DOMAIN_NAME'), + help='Defaults to env[OS_PROJECT_DOMAIN_NAME].') + + parser.add_argument('--os-password', + default=utils.env('OS_PASSWORD'), + help='Defaults to env[OS_PASSWORD].') + + parser.add_argument('--os_password', + help=argparse.SUPPRESS) + + parser.add_argument('--os-tenant-id', + default=utils.env('OS_TENANT_ID'), + help='Defaults to env[OS_TENANT_ID].') + + parser.add_argument('--os_tenant_id', + help=argparse.SUPPRESS) + + parser.add_argument('--os-tenant-name', + default=utils.env('OS_TENANT_NAME'), + help='Defaults to env[OS_TENANT_NAME].') + + parser.add_argument('--os_tenant_name', + help=argparse.SUPPRESS) + + parser.add_argument('--os-auth-url', + default=utils.env('OS_AUTH_URL'), + help='Defaults to env[OS_AUTH_URL].') + + parser.add_argument('--os_auth_url', + help=argparse.SUPPRESS) + + parser.add_argument('--os-region-name', + default=utils.env('OS_REGION_NAME'), + help='Defaults to env[OS_REGION_NAME].') + + parser.add_argument('--os_region_name', + help=argparse.SUPPRESS) + + parser.add_argument('--os-auth-token', + default=utils.env('OS_AUTH_TOKEN'), + help='Defaults to env[OS_AUTH_TOKEN].') + + parser.add_argument('--os_auth_token', + help=argparse.SUPPRESS) + + parser.add_argument('--os-service-type', + default=utils.env('OS_SERVICE_TYPE'), + help='Defaults to env[OS_SERVICE_TYPE].') + + parser.add_argument('--os_service_type', + help=argparse.SUPPRESS) + + parser.add_argument('--os-endpoint-type', + default=utils.env('OS_ENDPOINT_TYPE'), + help='Defaults to env[OS_ENDPOINT_TYPE].') + + parser.add_argument('--os_endpoint_type', + help=argparse.SUPPRESS) + + parser.add_argument('--os-endpoint', + default=utils.env('OS_ENDPOINT'), + help='Defaults to env[OS_ENDPOINT].') + + parser.add_argument('--os_endpoint', + help=argparse.SUPPRESS) + + def get_base_parser(self): + parser = argparse.ArgumentParser( + prog='daisy', + description=__doc__.strip(), + epilog='See "daisy help COMMAND" ' + 'for help on a specific command.', + add_help=False, + formatter_class=HelpFormatter, + ) + + # Global arguments + parser.add_argument('-h', '--help', + action='store_true', + help=argparse.SUPPRESS, + ) + + parser.add_argument('--version', + action='version', + version=daisyclient.__version__) + + parser.add_argument('-d', '--debug', + default=bool(utils.env('GLANCECLIENT_DEBUG')), + action='store_true', + help='Defaults to env[GLANCECLIENT_DEBUG].') + + parser.add_argument('-v', '--verbose', + default=False, action="store_true", + help="Print more verbose output") + + parser.add_argument('--get-schema', + default=False, action="store_true", + dest='get_schema', + help='Ignores cached copy and forces retrieval ' + 'of schema that generates portions of the ' + 'help text. Ignored with API version 1.') + + parser.add_argument('--timeout', + default=600, + help='Number of seconds to wait for a response') + + parser.add_argument('--no-ssl-compression', + dest='ssl_compression', + default=True, action='store_false', + help='Disable SSL compression when using https.') + + parser.add_argument('-f', '--force', + dest='force', + default=False, action='store_true', + help='Prevent select actions from requesting ' + 'user confirmation.') + + parser.add_argument('--os-image-url', + default=utils.env('OS_IMAGE_URL'), + help=('Defaults to env[OS_IMAGE_URL]. ' + 'If the provided image url contains ' + 'a version number and ' + '`--os-image-api-version` is omitted ' + 'the version of the URL will be picked as ' + 'the image api version to use.')) + + parser.add_argument('--os_image_url', + help=argparse.SUPPRESS) + + parser.add_argument('--os-image-api-version', + default=utils.env('OS_IMAGE_API_VERSION', + default=None), + help='Defaults to env[OS_IMAGE_API_VERSION] or 1.') + + parser.add_argument('--os_image_api_version', + help=argparse.SUPPRESS) + + if osprofiler_profiler: + parser.add_argument('--profile', + metavar='HMAC_KEY', + help='HMAC key to use for encrypting context ' + 'data for performance profiling of operation. ' + 'This key should be the value of HMAC key ' + 'configured in osprofiler middleware in ' + 'glance, it is specified in paste ' + 'configuration file at ' + '/etc/glance/api-paste.ini and ' + '/etc/glance/registry-paste.ini. Without key ' + 'the profiling will not be triggered even ' + 'if osprofiler is enabled on server side.') + + # FIXME(bobt): this method should come from python-keystoneclient + self._append_global_identity_args(parser) + + return parser + + def get_subcommand_parser(self, version): + parser = self.get_base_parser() + + self.subcommands = {} + subparsers = parser.add_subparsers(metavar='') + try: + submodule = utils.import_versioned_module(version, 'shell') + except ImportError: + print('"%s" is not a supported API version. Example ' + 'values are "1" or "2".' % version) + utils.exit() + + self._find_actions(subparsers, submodule) + self._find_actions(subparsers, self) + + self._add_bash_completion_subparser(subparsers) + + return parser + + def _find_actions(self, subparsers, actions_module): + for attr in (a for a in dir(actions_module) if a.startswith('do_')): + # I prefer to be hypen-separated instead of underscores. + command = attr[3:].replace('_', '-') + callback = getattr(actions_module, attr) + desc = callback.__doc__ or '' + help = desc.strip().split('\n')[0] + arguments = getattr(callback, 'arguments', []) + + subparser = subparsers.add_parser(command, + help=help, + description=desc, + add_help=False, + formatter_class=HelpFormatter + ) + subparser.add_argument('-h', '--help', + action='help', + help=argparse.SUPPRESS, + ) + self.subcommands[command] = subparser + for (args, kwargs) in arguments: + subparser.add_argument(*args, **kwargs) + subparser.set_defaults(func=callback) + + def _add_bash_completion_subparser(self, subparsers): + subparser = subparsers.add_parser('bash_completion', + add_help=False, + formatter_class=HelpFormatter) + self.subcommands['bash_completion'] = subparser + subparser.set_defaults(func=self.do_bash_completion) + + def _get_image_url(self, args): + """Translate the available url-related options into a single string. + + Return the endpoint that should be used to talk to Glance if a + clear decision can be made. Otherwise, return None. + """ + if args.os_image_url: + return args.os_image_url + else: + return None + + def _discover_auth_versions(self, session, auth_url): + # discover the API versions the server is supporting base on the + # given URL + v2_auth_url = None + v3_auth_url = None + try: + ks_discover = discover.Discover(session=session, auth_url=auth_url) + v2_auth_url = ks_discover.url_for('2.0') + v3_auth_url = ks_discover.url_for('3.0') + except ks_exc.ClientException as e: + # Identity service may not support discover API version. + # Lets trying to figure out the API version from the original URL. + url_parts = urlparse.urlparse(auth_url) + (scheme, netloc, path, params, query, fragment) = url_parts + path = path.lower() + if path.startswith('/v3'): + v3_auth_url = auth_url + elif path.startswith('/v2'): + v2_auth_url = auth_url + else: + # not enough information to determine the auth version + msg = ('Unable to determine the Keystone version ' + 'to authenticate with using the given ' + 'auth_url. Identity service may not support API ' + 'version discovery. Please provide a versioned ' + 'auth_url instead. error=%s') % (e) + raise exc.CommandError(msg) + + return (v2_auth_url, v3_auth_url) + + def _get_keystone_session(self, **kwargs): + ks_session = session.Session.construct(kwargs) + + # discover the supported keystone versions using the given auth url + auth_url = kwargs.pop('auth_url', None) + (v2_auth_url, v3_auth_url) = self._discover_auth_versions( + session=ks_session, + auth_url=auth_url) + + # Determine which authentication plugin to use. First inspect the + # auth_url to see the supported version. If both v3 and v2 are + # supported, then use the highest version if possible. + user_id = kwargs.pop('user_id', None) + username = kwargs.pop('username', None) + password = kwargs.pop('password', None) + user_domain_name = kwargs.pop('user_domain_name', None) + user_domain_id = kwargs.pop('user_domain_id', None) + # project and tenant can be used interchangeably + project_id = (kwargs.pop('project_id', None) or + kwargs.pop('tenant_id', None)) + project_name = (kwargs.pop('project_name', None) or + kwargs.pop('tenant_name', None)) + project_domain_id = kwargs.pop('project_domain_id', None) + project_domain_name = kwargs.pop('project_domain_name', None) + auth = None + + use_domain = (user_domain_id or + user_domain_name or + project_domain_id or + project_domain_name) + use_v3 = v3_auth_url and (use_domain or (not v2_auth_url)) + use_v2 = v2_auth_url and not use_domain + + if use_v3: + auth = v3_auth.Password( + v3_auth_url, + user_id=user_id, + username=username, + password=password, + user_domain_id=user_domain_id, + user_domain_name=user_domain_name, + project_id=project_id, + project_name=project_name, + project_domain_id=project_domain_id, + project_domain_name=project_domain_name) + elif use_v2: + auth = v2_auth.Password( + v2_auth_url, + username, + password, + tenant_id=project_id, + tenant_name=project_name) + else: + # if we get here it means domain information is provided + # (caller meant to use Keystone V3) but the auth url is + # actually Keystone V2. Obviously we can't authenticate a V3 + # user using V2. + exc.CommandError("Credential and auth_url mismatch. The given " + "auth_url is using Keystone V2 endpoint, which " + "may not able to handle Keystone V3 credentials. " + "Please provide a correct Keystone V3 auth_url.") + + ks_session.auth = auth + return ks_session + + def _get_endpoint_and_token(self, args, force_auth=False): + image_url = self._get_image_url(args) + auth_token = args.os_auth_token + + auth_reqd = force_auth or (utils.is_authentication_required(args.func) + and not (auth_token and image_url)) + + if not auth_reqd: + endpoint = image_url + token = args.os_auth_token + else: + + if not args.os_username: + raise exc.CommandError( + _("You must provide a username via" + " either --os-username or " + "env[OS_USERNAME]")) + + if not args.os_password: + # No password, If we've got a tty, try prompting for it + if hasattr(sys.stdin, 'isatty') and sys.stdin.isatty(): + # Check for Ctl-D + try: + args.os_password = getpass.getpass('OS Password: ') + except EOFError: + pass + # No password because we didn't have a tty or the + # user Ctl-D when prompted. + if not args.os_password: + raise exc.CommandError( + _("You must provide a password via " + "either --os-password, " + "env[OS_PASSWORD], " + "or prompted response")) + + # Validate password flow auth + project_info = ( + args.os_tenant_name or args.os_tenant_id or ( + args.os_project_name and ( + args.os_project_domain_name or + args.os_project_domain_id + ) + ) or args.os_project_id + ) + + if not project_info: + # tenant is deprecated in Keystone v3. Use the latest + # terminology instead. + raise exc.CommandError( + _("You must provide a project_id or project_name (" + "with project_domain_name or project_domain_id) " + "via " + " --os-project-id (env[OS_PROJECT_ID])" + " --os-project-name (env[OS_PROJECT_NAME])," + " --os-project-domain-id " + "(env[OS_PROJECT_DOMAIN_ID])" + " --os-project-domain-name " + "(env[OS_PROJECT_DOMAIN_NAME])")) + + if not args.os_auth_url: + raise exc.CommandError( + _("You must provide an auth url via" + " either --os-auth-url or " + "via env[OS_AUTH_URL]")) + + kwargs = { + 'auth_url': args.os_auth_url, + 'username': args.os_username, + 'user_id': args.os_user_id, + 'user_domain_id': args.os_user_domain_id, + 'user_domain_name': args.os_user_domain_name, + 'password': args.os_password, + 'tenant_name': args.os_tenant_name, + 'tenant_id': args.os_tenant_id, + 'project_name': args.os_project_name, + 'project_id': args.os_project_id, + 'project_domain_name': args.os_project_domain_name, + 'project_domain_id': args.os_project_domain_id, + 'insecure': args.insecure, + 'cacert': args.os_cacert, + 'cert': args.os_cert, + 'key': args.os_key + } + ks_session = self._get_keystone_session(**kwargs) + token = args.os_auth_token or ks_session.get_token() + + endpoint_type = args.os_endpoint_type or 'public' + service_type = args.os_service_type or 'image' + endpoint = args.os_image_url or ks_session.get_endpoint( + service_type=service_type, + interface=endpoint_type, + region_name=args.os_region_name) + + return endpoint, token + + def _get_versioned_client(self, api_version, args, force_auth=False): + #endpoint, token = self._get_endpoint_and_token(args,force_auth=force_auth) + #endpoint = "http://10.43.175.62:19292" + endpoint = args.os_endpoint + #print endpoint + kwargs = { + #'token': token, + 'insecure': args.insecure, + 'timeout': args.timeout, + 'cacert': args.os_cacert, + 'cert': args.os_cert, + 'key': args.os_key, + 'ssl_compression': args.ssl_compression + } + client = daisyclient.Client(api_version, endpoint, **kwargs) + return client + + def _cache_schemas(self, options, home_dir='~/.daisyclient'): + homedir = expanduser(home_dir) + if not os.path.exists(homedir): + os.makedirs(homedir) + + resources = ['image', 'metadefs/namespace', 'metadefs/resource_type'] + schema_file_paths = [homedir + os.sep + x + '_schema.json' + for x in ['image', 'namespace', 'resource_type']] + + client = None + for resource, schema_file_path in zip(resources, schema_file_paths): + if (not os.path.exists(schema_file_path)) or options.get_schema: + try: + if not client: + client = self._get_versioned_client('2', options, + force_auth=True) + schema = client.schemas.get(resource) + + with open(schema_file_path, 'w') as f: + f.write(json.dumps(schema.raw())) + except Exception: + # NOTE(esheffield) do nothing here, we'll get a message + # later if the schema is missing + pass + + def main(self, argv): + # Parse args once to find version + + # NOTE(flepied) Under Python3, parsed arguments are removed + # from the list so make a copy for the first parsing + base_argv = copy.deepcopy(argv) + parser = self.get_base_parser() + (options, args) = parser.parse_known_args(base_argv) + + try: + # NOTE(flaper87): Try to get the version from the + # image-url first. If no version was specified, fallback + # to the api-image-version arg. If both of these fail then + # fallback to the minimum supported one and let keystone + # do the magic. + endpoint = self._get_image_url(options) + endpoint, url_version = utils.strip_version(endpoint) + except ValueError: + # NOTE(flaper87): ValueError is raised if no endpoint is povided + url_version = None + + # build available subcommands based on version + try: + api_version = int(options.os_image_api_version or url_version or 1) + except ValueError: + print("Invalid API version parameter") + utils.exit() + + if api_version == 2: + self._cache_schemas(options) + + subcommand_parser = self.get_subcommand_parser(api_version) + self.parser = subcommand_parser + + # Handle top-level --help/-h before attempting to parse + # a command off the command line + if options.help or not argv: + self.do_help(options) + return 0 + + # Parse args again and call whatever callback was selected + args = subcommand_parser.parse_args(argv) + + # Short-circuit and deal with help command right away. + if args.func == self.do_help: + self.do_help(args) + return 0 + elif args.func == self.do_bash_completion: + self.do_bash_completion(args) + return 0 + + LOG = logging.getLogger('daisyclient') + LOG.addHandler(logging.StreamHandler()) + LOG.setLevel(logging.DEBUG if args.debug else logging.INFO) + + profile = osprofiler_profiler and options.profile + if profile: + osprofiler_profiler.init(options.profile) + + client = self._get_versioned_client(api_version, args, + force_auth=False) + + try: + args.func(client, args) + except exc.Unauthorized: + raise exc.CommandError("Invalid OpenStack Identity credentials.") + except Exception: + # NOTE(kragniz) Print any exceptions raised to stderr if the + # --debug flag is set + if args.debug: + traceback.print_exc() + raise + finally: + if profile: + trace_id = osprofiler_profiler.get().get_base_id() + print("Profiling trace ID: %s" % trace_id) + print("To display trace use next command:\n" + "osprofiler trace show --html %s " % trace_id) + + @utils.arg('command', metavar='', nargs='?', + help='Display help for .') + def do_help(self, args): + """ + Display help about this program or one of its subcommands. + """ + if getattr(args, 'command', None): + if args.command in self.subcommands: + self.subcommands[args.command].print_help() + else: + raise exc.CommandError("'%s' is not a valid subcommand" % + args.command) + else: + self.parser.print_help() + + def do_bash_completion(self, _args): + """Prints arguments for bash_completion. + + Prints all of the commands and options to stdout so that the + daisy.bash_completion script doesn't have to hard code them. + """ + commands = set() + options = set() + for sc_str, sc in self.subcommands.items(): + commands.add(sc_str) + for option in sc._optionals._option_string_actions.keys(): + options.add(option) + + commands.remove('bash_completion') + commands.remove('bash-completion') + print(' '.join(commands | options)) + + +class HelpFormatter(argparse.HelpFormatter): + def start_section(self, heading): + # Title-case the headings + heading = '%s%s' % (heading[0].upper(), heading[1:]) + super(HelpFormatter, self).start_section(heading) + + +def main(): + try: + DaisyShell().main(map(encodeutils.safe_decode, sys.argv[1:])) + except KeyboardInterrupt: + utils.exit('... terminating glance client', exit_code=130) + except Exception as e: + utils.exit(utils.exception_to_str(e)) diff --git a/code/daisyclient/daisyclient/v1/__init__.py b/code/daisyclient/daisyclient/v1/__init__.py new file mode 100755 index 00000000..3fb105d8 --- /dev/null +++ b/code/daisyclient/daisyclient/v1/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from daisyclient.v1.client import Client # noqa diff --git a/code/daisyclient/daisyclient/v1/client.py b/code/daisyclient/daisyclient/v1/client.py new file mode 100755 index 00000000..3c065b8c --- /dev/null +++ b/code/daisyclient/daisyclient/v1/client.py @@ -0,0 +1,66 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from daisyclient.common import http +from daisyclient.common import utils +from daisyclient.v1.image_members import ImageMemberManager +from daisyclient.v1.images import ImageManager +from daisyclient.v1.hosts import HostManager +from daisyclient.v1.clusters import ClusterManager +from daisyclient.v1.components import ComponentManager +from daisyclient.v1.services import ServiceManager +from daisyclient.v1.roles import RoleManager +from daisyclient.v1.cluster_hosts import ClusterHostManager +from daisyclient.v1.config_files import Config_fileManager +from daisyclient.v1.config_sets import Config_setManager +from daisyclient.v1.networks import NetworkManager +from daisyclient.v1.configs import ConfigManager +from daisyclient.v1.install import InstallManager +from daisyclient.v1.uninstall import UninstallManager +from daisyclient.v1.update import UpdateManager +from daisyclient.v1.disk_array import DiskArrayManager +from daisyclient.v1.template import TemplateManager +class Client(object): + """Client for the OpenStack Images v1 API. + + :param string endpoint: A user-supplied endpoint URL for the glance + service. + :param string token: Token for authentication. + :param integer timeout: Allows customization of the timeout for client + http requests. (optional) + """ + + def __init__(self, endpoint, *args, **kwargs): + """Initialize a new client for the daisy v1 API.""" + endpoint, version = utils.strip_version(endpoint) + self.version = version or 1.0 + self.http_client = http.HTTPClient(endpoint, *args, **kwargs) + self.images = ImageManager(self.http_client) + self.image_members = ImageMemberManager(self.http_client) + self.hosts = HostManager(self.http_client) + self.clusters = ClusterManager(self.http_client) + self.components = ComponentManager(self.http_client) + self.services = ServiceManager(self.http_client) + self.roles = RoleManager(self.http_client) + self.cluster_hosts = ClusterHostManager(self.http_client) + self.config_files = Config_fileManager(self.http_client) + self.config_sets = Config_setManager(self.http_client) + self.networks = NetworkManager(self.http_client) + self.configs = ConfigManager(self.http_client) + self.install = InstallManager(self.http_client) + self.uninstall = UninstallManager(self.http_client) + self.update = UpdateManager(self.http_client) + self.disk_array = DiskArrayManager(self.http_client) + self.template = TemplateManager(self.http_client) diff --git a/code/daisyclient/daisyclient/v1/cluster_hosts.py b/code/daisyclient/daisyclient/v1/cluster_hosts.py new file mode 100755 index 00000000..7064774f --- /dev/null +++ b/code/daisyclient/daisyclient/v1/cluster_hosts.py @@ -0,0 +1,80 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from daisyclient.openstack.common.apiclient import base + + +class ClusterHost(base.Resource): + def __repr__(self): + return "" % self._info + + def delete(self): + self.manager.delete(self) + + +class ClusterHostManager(base.ManagerWithFind): + resource_class = ClusterHost + + def get(self, cluster_id, host_id): + url = '/v1/clusters/%s/nodes/%s' % (cluster_id, host_id) + resp, body = self.client.get(url) + member = body['member'] + member['cluster_id'] = cluster_id + return ClusterHost(self, member, loaded=True) + + def list(self, cluster=None, host=None): + pass + # out = [] + # if cluster and host: + # out.extend(self._list_by_cluster_and_host(cluster, host)) + # elif cluster: + # out.extend(self._list_by_cluster(cluster)) + # elif host: + # out.extend(self._list_by_host(host)) + # else: + # pass + # return out + + # def _list_by_cluster_and_host(self, cluster, host): + # url = '/v1/clusters/%s/nodes/%s' % (cluster, host) + # resp, body = self.client.get(url) +# out = [] +# for member in body['members']: +# member['cluster'] = cluster + # out.append(ClusterHost(self, member, loaded=True)) + # return out + # + # def _list_by_cluster(self, cluster): + # url = '/v1/clusters/%s/nodes' % cluster +# resp, body = self.client.get(url) +# out = [] +# for member in body['members']: +# member['cluster_id'] = cluster +# out.append(ClusterHost(self, member, loaded=True)) +# return out + +# def _list_by_host(self, host): + # url = '/v1/multi-clusters/nodes/%s' % host + # resp, body = self.client.get(url) + # out = [] + # for member in body['multi-clusters']: + # member['host_id'] = host + # out.append(ClusterHost(self, member, loaded=True)) + # return out + + def delete(self, cluster_id, host_id): + self._delete("/v1/clusters/%s/nodes/%s" % (cluster_id, host_id)) + + diff --git a/code/daisyclient/daisyclient/v1/clusters.py b/code/daisyclient/daisyclient/v1/clusters.py new file mode 100755 index 00000000..530e9ccb --- /dev/null +++ b/code/daisyclient/daisyclient/v1/clusters.py @@ -0,0 +1,322 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo_utils import encodeutils +from oslo_utils import strutils +import six +import six.moves.urllib.parse as urlparse + +from daisyclient.common import utils +from daisyclient.openstack.common.apiclient import base + +UPDATE_PARAMS = ( + 'name', 'description', 'networks', 'deleted', 'nodes','floating_ranges', + 'dns_nameservers','net_l23_provider','base_mac','internal_gateway', + 'internal_cidr', 'external_cidr','gre_id_range', 'vlan_range', + 'vni_range', 'segmentation_type', 'public_vip', 'logic_networks', + 'networking_parameters', 'routers', 'auto_scale', 'use_dns' +) + +CREATE_PARAMS = ( + 'id', 'name', 'nodes', 'description', 'networks','floating_ranges', + 'dns_nameservers','net_l23_provider','base_mac','internal_gateway', + 'internal_cidr', 'external_cidr', 'gre_id_range', 'vlan_range', + 'vni_range', 'segmentation_type', 'public_vip', 'logic_networks', + 'networking_parameters', 'routers', 'auto_scale', 'use_dns' +) + +DEFAULT_PAGE_SIZE = 20 + +SORT_DIR_VALUES = ('asc', 'desc') +SORT_KEY_VALUES = ('name','auto_scale', 'id', 'created_at', 'updated_at') + +OS_REQ_ID_HDR = 'x-openstack-request-id' + + +class Cluster(base.Resource): + def __repr__(self): + return "" % self._info + + def update(self, **fields): + self.manager.update(self, **fields) + + def delete(self, **kwargs): + return self.manager.delete(self) + + def data(self, **kwargs): + return self.manager.data(self, **kwargs) + + +class ClusterManager(base.ManagerWithFind): + resource_class = Cluster + + def _list(self, url, response_key, obj_class=None, body=None): + resp, body = self.client.get(url) + + if obj_class is None: + obj_class = self.resource_class + + data = body[response_key] + return ([obj_class(self, res, loaded=True) for res in data if res], + resp) + + def _cluster_meta_from_headers(self, headers): + meta = {'properties': {}} + safe_decode = encodeutils.safe_decode + for key, value in six.iteritems(headers): + value = safe_decode(value, incoming='utf-8') + if key.startswith('x-image-meta-property-'): + _key = safe_decode(key[22:], incoming='utf-8') + meta['properties'][_key] = value + elif key.startswith('x-image-meta-'): + _key = safe_decode(key[13:], incoming='utf-8') + meta[_key] = value + + for key in ['is_public', 'protected', 'deleted']: + if key in meta: + meta[key] = strutils.bool_from_string(meta[key]) + + return self._format_cluster_meta_for_user(meta) + + def _cluster_meta_to_headers(self, fields): + headers = {} + fields_copy = copy.deepcopy(fields) + + # NOTE(flaper87): Convert to str, headers + # that are not instance of basestring. All + # headers will be encoded later, before the + # request is sent. + + for key, value in six.iteritems(fields_copy): + headers['%s' % key] = utils.to_str(value) + return headers + + @staticmethod + def _format_image_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + @staticmethod + def _format_cluster_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + def get(self, cluster, **kwargs): + """Get the metadata for a specific cluster. + + :param cluster: host object or id to look up + :rtype: :class:`Cluster` + """ + cluster_id = base.getid(cluster) + resp, body = self.client.get('/v1/clusters/%s' + % urlparse.quote(str(cluster_id))) + #meta = self._cluster_meta_from_headers(resp.headers) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + #return Host(self, meta) + return Cluster(self, self._format_cluster_meta_for_user(body['cluster'])) + + def data(self, image, do_checksum=True, **kwargs): + """Get the raw data for a specific image. + + :param image: image object or id to look up + :param do_checksum: Enable/disable checksum validation + :rtype: iterable containing image data + """ + image_id = base.getid(image) + resp, body = self.client.get('/v1/images/%s' + % urlparse.quote(str(image_id))) + content_length = int(resp.headers.get('content-length', 0)) + checksum = resp.headers.get('x-image-meta-checksum', None) + if do_checksum and checksum is not None: + body = utils.integrity_iter(body, checksum) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return utils.IterableWithLength(body, content_length) + + def _build_params(self, parameters): + params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)} + + if 'marker' in parameters: + params['marker'] = parameters['marker'] + + sort_key = parameters.get('sort_key') + if sort_key is not None: + if sort_key in SORT_KEY_VALUES: + params['sort_key'] = sort_key + else: + raise ValueError('sort_key must be one of the following: %s.' + % ', '.join(SORT_KEY_VALUES)) + + sort_dir = parameters.get('sort_dir') + if sort_dir is not None: + if sort_dir in SORT_DIR_VALUES: + params['sort_dir'] = sort_dir + else: + raise ValueError('sort_dir must be one of the following: %s.' + % ', '.join(SORT_DIR_VALUES)) + + filters = parameters.get('filters', {}) + params.update(filters) + + return params + + def list(self, **kwargs): + """Get a list of clusters. + + :param page_size: number of items to request in each paginated request + :param limit: maximum number of clusters to return + :param marker: begin returning clusters that appear later in the cluster + list than that represented by this cluster id + :param filters: dict of direct comparison filters that mimics the + structure of an cluster object + :param return_request_id: If an empty list is provided, populate this + list with the request ID value from the header + x-openstack-request-id + :rtype: list of :class:`Cluster` + """ + absolute_limit = kwargs.get('limit') + page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) + + def paginate(qp, return_request_id=None): + for param, value in six.iteritems(qp): + if isinstance(value, six.string_types): + # Note(flaper87) Url encoding should + # be moved inside http utils, at least + # shouldn't be here. + # + # Making sure all params are str before + # trying to encode them + qp[param] = encodeutils.safe_decode(value) + + url = '/v1/clusters?%s' % urlparse.urlencode(qp) + clusters, resp = self._list(url, "clusters") + + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + for cluster in clusters: + yield cluster + + return_request_id = kwargs.get('return_req_id', None) + + params = self._build_params(kwargs) + + seen = 0 + while True: + seen_last_page = 0 + filtered = 0 + for cluster in paginate(params, return_request_id): + last_cluster = cluster.id + + if (absolute_limit is not None and + seen + seen_last_page >= absolute_limit): + # Note(kragniz): we've seen enough images + return + else: + seen_last_page += 1 + yield cluster + + seen += seen_last_page + + if seen_last_page + filtered == 0: + # Note(kragniz): we didn't get any clusters in the last page + return + + if absolute_limit is not None and seen >= absolute_limit: + # Note(kragniz): reached the limit of clusters to return + return + + if page_size and seen_last_page + filtered < page_size: + # Note(kragniz): we've reached the last page of the clusters + return + + # Note(kragniz): there are more clusters to come + params['marker'] = last_cluster + seen_last_page = 0 + + def add(self, **kwargs): + """Add a cluster + + TODO(bcwaldon): document accepted params + """ + + fields = {} + for field in kwargs: + if field in CREATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'create() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs = self._cluster_meta_to_headers(fields) + resp, body = self.client.post('/v1/clusters', + headers=hdrs, + data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Cluster(self, self._format_cluster_meta_for_user(body['cluster'])) + + def delete(self, cluster, **kwargs): + """Delete an cluster.""" + url = "/v1/clusters/%s" % base.getid(cluster) + resp, body = self.client.delete(url) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + def update(self, cluster, **kwargs): + """Update an cluster + + TODO(bcwaldon): document accepted params + """ + hdrs = {} + fields = {} + for field in kwargs: + if field in UPDATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + #else: + # msg = 'update() got an unexpected keyword argument \'%s\'' + # raise TypeError(msg % field) + + hdrs.update(self._cluster_meta_to_headers(fields)) + url = '/v1/clusters/%s' % base.getid(cluster) + resp, body = self.client.put(url, headers=None, data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Cluster(self, self._format_cluster_meta_for_user(body['cluster'])) diff --git a/code/daisyclient/daisyclient/v1/components.py b/code/daisyclient/daisyclient/v1/components.py new file mode 100755 index 00000000..fe51b095 --- /dev/null +++ b/code/daisyclient/daisyclient/v1/components.py @@ -0,0 +1,315 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo_utils import encodeutils +from oslo_utils import strutils +import six +import six.moves.urllib.parse as urlparse + +from daisyclient.common import utils +from daisyclient.openstack.common.apiclient import base + +UPDATE_PARAMS = ('name', 'description', + #NOTE(bcwaldon: an attempt to update 'deleted' will be + # ignored, but we need to support it for backwards- + # compatibility with the legacy client library + 'deleted') + +CREATE_PARAMS = ('id', 'name','description') + +DEFAULT_PAGE_SIZE = 20 + +SORT_DIR_VALUES = ('asc', 'desc') +SORT_KEY_VALUES = ('name', 'id', 'created_at', 'updated_at') + +OS_REQ_ID_HDR = 'x-openstack-request-id' + + +class Component(base.Resource): + def __repr__(self): + return "" % self._info + + def update(self, **fields): + self.manager.update(self, **fields) + + def delete(self, **kwargs): + return self.manager.delete(self) + + def data(self, **kwargs): + return self.manager.data(self, **kwargs) + + +class ComponentManager(base.ManagerWithFind): + resource_class = Component + + def _list(self, url, response_key, obj_class=None, body=None): + resp, body = self.client.get(url) + + if obj_class is None: + obj_class = self.resource_class + + data = body[response_key] + return ([obj_class(self, res, loaded=True) for res in data if res], + resp) + + def _component_meta_from_headers(self, headers): + meta = {'properties': {}} + safe_decode = encodeutils.safe_decode + for key, value in six.iteritems(headers): + value = safe_decode(value, incoming='utf-8') + if key.startswith('x-image-meta-property-'): + _key = safe_decode(key[22:], incoming='utf-8') + meta['properties'][_key] = value + elif key.startswith('x-image-meta-'): + _key = safe_decode(key[13:], incoming='utf-8') + meta[_key] = value + + for key in ['is_public', 'protected', 'deleted']: + if key in meta: + meta[key] = strutils.bool_from_string(meta[key]) + + return self._format_component_meta_for_user(meta) + + def _component_meta_to_headers(self, fields): + headers = {} + fields_copy = copy.deepcopy(fields) + + # NOTE(flaper87): Convert to str, headers + # that are not instance of basestring. All + # headers will be encoded later, before the + # request is sent. + + for key, value in six.iteritems(fields_copy): + headers['%s' % key] = utils.to_str(value) + return headers + + @staticmethod + def _format_image_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + @staticmethod + def _format_component_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + def get(self, component, **kwargs): + """Get the metadata for a specific component. + + :param component: host object or id to look up + :rtype: :class:`Component` + """ + component_id = base.getid(component) + resp, body = self.client.get('/v1/components/%s' + % urlparse.quote(str(component_id))) + #meta = self._component_meta_from_headers(resp.headers) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + #return Host(self, meta) + return Component(self, self._format_component_meta_for_user(body['component'])) + + def data(self, image, do_checksum=True, **kwargs): + """Get the raw data for a specific image. + + :param image: image object or id to look up + :param do_checksum: Enable/disable checksum validation + :rtype: iterable containing image data + """ + image_id = base.getid(image) + resp, body = self.client.get('/v1/images/%s' + % urlparse.quote(str(image_id))) + content_length = int(resp.headers.get('content-length', 0)) + checksum = resp.headers.get('x-image-meta-checksum', None) + if do_checksum and checksum is not None: + body = utils.integrity_iter(body, checksum) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return utils.IterableWithLength(body, content_length) + + def _build_params(self, parameters): + params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)} + + if 'marker' in parameters: + params['marker'] = parameters['marker'] + + sort_key = parameters.get('sort_key') + if sort_key is not None: + if sort_key in SORT_KEY_VALUES: + params['sort_key'] = sort_key + else: + raise ValueError('sort_key must be one of the following: %s.' + % ', '.join(SORT_KEY_VALUES)) + + sort_dir = parameters.get('sort_dir') + if sort_dir is not None: + if sort_dir in SORT_DIR_VALUES: + params['sort_dir'] = sort_dir + else: + raise ValueError('sort_dir must be one of the following: %s.' + % ', '.join(SORT_DIR_VALUES)) + + filters = parameters.get('filters', {}) + params.update(filters) + + return params + + def list(self, **kwargs): + """Get a list of components. + + :param page_size: number of items to request in each paginated request + :param limit: maximum number of components to return + :param marker: begin returning components that appear later in the component + list than that represented by this component id + :param filters: dict of direct comparison filters that mimics the + structure of an component object + :param return_request_id: If an empty list is provided, populate this + list with the request ID value from the header + x-openstack-request-id + :rtype: list of :class:`Component` + """ + absolute_limit = kwargs.get('limit') + page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) + + def paginate(qp, return_request_id=None): + for param, value in six.iteritems(qp): + if isinstance(value, six.string_types): + # Note(flaper87) Url encoding should + # be moved inside http utils, at least + # shouldn't be here. + # + # Making sure all params are str before + # trying to encode them + qp[param] = encodeutils.safe_decode(value) + + url = '/v1/components/detail?%s' % urlparse.urlencode(qp) + components, resp = self._list(url, "components") + + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + for component in components: + yield component + + return_request_id = kwargs.get('return_req_id', None) + + params = self._build_params(kwargs) + + seen = 0 + while True: + seen_last_page = 0 + filtered = 0 + for component in paginate(params, return_request_id): + last_component = component.id + + if (absolute_limit is not None and + seen + seen_last_page >= absolute_limit): + # Note(kragniz): we've seen enough images + return + else: + seen_last_page += 1 + yield component + + seen += seen_last_page + + if seen_last_page + filtered == 0: + # Note(kragniz): we didn't get any components in the last page + return + + if absolute_limit is not None and seen >= absolute_limit: + # Note(kragniz): reached the limit of components to return + return + + if page_size and seen_last_page + filtered < page_size: + # Note(kragniz): we've reached the last page of the components + return + + # Note(kragniz): there are more components to come + params['marker'] = last_component + seen_last_page = 0 + + def add(self, **kwargs): + """Add a component + + TODO(bcwaldon): document accepted params + """ + + fields = {} + for field in kwargs: + if field in CREATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'create() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs = self._component_meta_to_headers(fields) + resp, body = self.client.post('/v1/components', + headers=hdrs, + data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Component(self, self._format_component_meta_for_user(body['component'])) + + def delete(self, component, **kwargs): + """Delete an component.""" + url = "/v1/components/%s" % base.getid(component) + resp, body = self.client.delete(url) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + def update(self, component, **kwargs): + """Update an component + + TODO(bcwaldon): document accepted params + """ + hdrs = {} + fields = {} + for field in kwargs: + if field in UPDATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'update() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs.update(self._component_meta_to_headers(fields)) + + url = '/v1/components/%s' % base.getid(component) + resp, body = self.client.put(url, headers=None, data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Component(self, self._format_component_meta_for_user(body['component_meta'])) diff --git a/code/daisyclient/daisyclient/v1/config_files.py b/code/daisyclient/daisyclient/v1/config_files.py new file mode 100755 index 00000000..5862a3c6 --- /dev/null +++ b/code/daisyclient/daisyclient/v1/config_files.py @@ -0,0 +1,282 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo_utils import encodeutils +from oslo_utils import strutils +import six +import six.moves.urllib.parse as urlparse + +from daisyclient.common import utils +from daisyclient.openstack.common.apiclient import base + +UPDATE_PARAMS = ('name', 'description', 'deleted') + +CREATE_PARAMS = ('id', 'name', 'description') + +DEFAULT_PAGE_SIZE = 20 + +SORT_DIR_VALUES = ('asc', 'desc') +SORT_KEY_VALUES = ('name', 'id', 'created_at', 'updated_at') + +OS_REQ_ID_HDR = 'x-openstack-request-id' + + +class Config_file(base.Resource): + def __repr__(self): + return "" % self._info + + def update(self, **fields): + self.manager.update(self, **fields) + + def delete(self, **kwargs): + return self.manager.delete(self) + + def data(self, **kwargs): + return self.manager.data(self, **kwargs) + + +class Config_fileManager(base.ManagerWithFind): + resource_class = Config_file + + def _list(self, url, response_key, obj_class=None, body=None): + resp, body = self.client.get(url) + + if obj_class is None: + obj_class = self.resource_class + + data = body[response_key] + return ([obj_class(self, res, loaded=True) for res in data if res], + resp) + + def _config_file_meta_from_headers(self, headers): + meta = {'properties': {}} + safe_decode = encodeutils.safe_decode + for key, value in six.iteritems(headers): + value = safe_decode(value, incoming='utf-8') + if key.startswith('x-image-meta-property-'): + _key = safe_decode(key[22:], incoming='utf-8') + meta['properties'][_key] = value + elif key.startswith('x-image-meta-'): + _key = safe_decode(key[13:], incoming='utf-8') + meta[_key] = value + + for key in ['is_public', 'protected', 'deleted']: + if key in meta: + meta[key] = strutils.bool_from_string(meta[key]) + + return self._format_config_file_meta_for_user(meta) + + def _config_file_meta_to_headers(self, fields): + headers = {} + fields_copy = copy.deepcopy(fields) + + # NOTE(flaper87): Convert to str, headers + # that are not instance of basestring. All + # headers will be encoded later, before the + # request is sent. + + for key, value in six.iteritems(fields_copy): + headers['%s' % key] = utils.to_str(value) + return headers + + @staticmethod + def _format_config_file_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + def get(self, config_file, **kwargs): + """Get the metadata for a specific config_file. + + :param config_file: image object or id to look up + :rtype: :class:`Config_file` + """ + config_file_id = base.getid(config_file) + resp, body = self.client.get('/v1/config_files/%s' + % urlparse.quote(str(config_file_id))) + #meta = self._config_file_meta_from_headers(resp.headers) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + #return Config_file(self, meta) + return Config_file(self, self._format_config_file_meta_for_user(body['config_file'])) + + def _build_params(self, parameters): + params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)} + + if 'marker' in parameters: + params['marker'] = parameters['marker'] + + sort_key = parameters.get('sort_key') + if sort_key is not None: + if sort_key in SORT_KEY_VALUES: + params['sort_key'] = sort_key + else: + raise ValueError('sort_key must be one of the following: %s.' + % ', '.join(SORT_KEY_VALUES)) + + sort_dir = parameters.get('sort_dir') + if sort_dir is not None: + if sort_dir in SORT_DIR_VALUES: + params['sort_dir'] = sort_dir + else: + raise ValueError('sort_dir must be one of the following: %s.' + % ', '.join(SORT_DIR_VALUES)) + + filters = parameters.get('filters', {}) + params.update(filters) + + return params + + def list(self, **kwargs): + """Get a list of config_files. + + :param page_size: number of items to request in each paginated request + :param limit: maximum number of config_files to return + :param marker: begin returning config_files that appear later in the config_file + list than that represented by this config_file id + :param filters: dict of direct comparison filters that mimics the + structure of an config_file object + :param return_request_id: If an empty list is provided, populate this + list with the request ID value from the header + x-openstack-request-id + :rtype: list of :class:`Config_file` + """ + absolute_limit = kwargs.get('limit') + page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) + + def paginate(qp, return_request_id=None): + for param, value in six.iteritems(qp): + if isinstance(value, six.string_types): + # Note(flaper87) Url encoding should + # be moved inside http utils, at least + # shouldn't be here. + # + # Making sure all params are str before + # trying to encode them + qp[param] = encodeutils.safe_decode(value) + + url = '/v1/config_files/detail?%s' % urlparse.urlencode(qp) + config_files, resp = self._list(url, "config_files") + + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + for config_file in config_files: + yield config_file + + return_request_id = kwargs.get('return_req_id', None) + + params = self._build_params(kwargs) + + seen = 0 + while True: + seen_last_page = 0 + filtered = 0 + for config_file in paginate(params, return_request_id): + last_config_file = config_file.id + + if (absolute_limit is not None and + seen + seen_last_page >= absolute_limit): + # Note(kragniz): we've seen enough images + return + else: + seen_last_page += 1 + yield config_file + + seen += seen_last_page + + if seen_last_page + filtered == 0: + # Note(kragniz): we didn't get any hosts in the last page + return + + if absolute_limit is not None and seen >= absolute_limit: + # Note(kragniz): reached the limit of hosts to return + return + + if page_size and seen_last_page + filtered < page_size: + # Note(kragniz): we've reached the last page of the hosts + return + + # Note(kragniz): there are more hosts to come + params['marker'] = last_config_file + seen_last_page = 0 + + def add(self, **kwargs): + """Add a config_file + + TODO(bcwaldon): document accepted params + """ + + fields = {} + for field in kwargs: + if field in CREATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'create() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs = self._config_file_meta_to_headers(fields) + resp, body = self.client.post('/v1/config_files', + headers=hdrs, + data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Config_file(self, self._format_config_file_meta_for_user(body['config_file'])) + + def delete(self, config_file, **kwargs): + """Delete an config_file.""" + url = "/v1/config_files/%s" % base.getid(config_file) + resp, body = self.client.delete(url) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + def update(self, config_file, **kwargs): + """Update an config_file + + TODO(bcwaldon): document accepted params + """ + hdrs = {} + fields = {} + for field in kwargs: + if field in UPDATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'update() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs.update(self._config_file_meta_to_headers(fields)) + + url = '/v1/config_files/%s' % base.getid(config_file) + resp, body = self.client.put(url, headers=None, data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Config_file(self, self._format_config_file_meta_for_user(body['config_file_meta'])) + diff --git a/code/daisyclient/daisyclient/v1/config_sets.py b/code/daisyclient/daisyclient/v1/config_sets.py new file mode 100755 index 00000000..84629313 --- /dev/null +++ b/code/daisyclient/daisyclient/v1/config_sets.py @@ -0,0 +1,321 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo_utils import encodeutils +from oslo_utils import strutils +import six +import six.moves.urllib.parse as urlparse + +from daisyclient.common import utils +from daisyclient.openstack.common.apiclient import base + +UPDATE_PARAMS = ('name', 'description', 'deleted','cluster','role') + +CREATE_PARAMS = ('id', 'name', 'description','cluster','role') + +DEFAULT_PAGE_SIZE = 20 + +SORT_DIR_VALUES = ('asc', 'desc') +SORT_KEY_VALUES = ('name', 'id', 'created_at', 'updated_at') + +OS_REQ_ID_HDR = 'x-openstack-request-id' + + +class Config_set(base.Resource): + def __repr__(self): + return "" % self._info + + def update(self, **fields): + self.manager.update(self, **fields) + + def delete(self, **kwargs): + return self.manager.delete(self) + + def data(self, **kwargs): + return self.manager.data(self, **kwargs) + + +class Config_setManager(base.ManagerWithFind): + resource_class = Config_set + + def _list(self, url, response_key, obj_class=None, body=None): + resp, body = self.client.get(url) + + if obj_class is None: + obj_class = self.resource_class + + data = body[response_key] + return ([obj_class(self, res, loaded=True) for res in data if res], + resp) + + def _config_set_meta_from_headers(self, headers): + meta = {'properties': {}} + safe_decode = encodeutils.safe_decode + for key, value in six.iteritems(headers): + value = safe_decode(value, incoming='utf-8') + if key.startswith('x-image-meta-property-'): + _key = safe_decode(key[22:], incoming='utf-8') + meta['properties'][_key] = value + elif key.startswith('x-image-meta-'): + _key = safe_decode(key[13:], incoming='utf-8') + meta[_key] = value + + for key in ['is_public', 'protected', 'deleted']: + if key in meta: + meta[key] = strutils.bool_from_string(meta[key]) + + return self._format_config_set_meta_for_user(meta) + + def _config_set_meta_to_headers(self, fields): + headers = {} + fields_copy = copy.deepcopy(fields) + + # NOTE(flaper87): Convert to str, headers + # that are not instance of basestring. All + # headers will be encoded later, before the + # request is sent. + + for key, value in six.iteritems(fields_copy): + headers['%s' % key] = utils.to_str(value) + return headers + + @staticmethod + def _format_config_set_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + def get(self, config_set, **kwargs): + """Get the metadata for a specific config_set. + + :param config_set: image object or id to look up + :rtype: :class:`Config_set` + """ + config_set_id = base.getid(config_set) + resp, body = self.client.get('/v1/config_sets/%s' + % urlparse.quote(str(config_set_id))) + #meta = self._config_set_meta_from_headers(resp.headers) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + #return Config_set(self, meta) + return Config_set(self, self._format_config_set_meta_for_user(body['config_set'])) + + def _build_params(self, parameters): + params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)} + + if 'marker' in parameters: + params['marker'] = parameters['marker'] + + sort_key = parameters.get('sort_key') + if sort_key is not None: + if sort_key in SORT_KEY_VALUES: + params['sort_key'] = sort_key + else: + raise ValueError('sort_key must be one of the following: %s.' + % ', '.join(SORT_KEY_VALUES)) + + sort_dir = parameters.get('sort_dir') + if sort_dir is not None: + if sort_dir in SORT_DIR_VALUES: + params['sort_dir'] = sort_dir + else: + raise ValueError('sort_dir must be one of the following: %s.' + % ', '.join(SORT_DIR_VALUES)) + + filters = parameters.get('filters', {}) + params.update(filters) + + return params + + def list(self, **kwargs): + """Get a list of config_sets. + + :param page_size: number of items to request in each paginated request + :param limit: maximum number of config_sets to return + :param marker: begin returning config_sets that appear later in the config_set + list than that represented by this config_set id + :param filters: dict of direct comparison filters that mimics the + structure of an config_set object + :param return_request_id: If an empty list is provided, populate this + list with the request ID value from the header + x-openstack-request-id + :rtype: list of :class:`Config_set` + """ + absolute_limit = kwargs.get('limit') + page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) + + def paginate(qp, return_request_id=None): + for param, value in six.iteritems(qp): + if isinstance(value, six.string_types): + # Note(flaper87) Url encoding should + # be moved inside http utils, at least + # shouldn't be here. + # + # Making sure all params are str before + # trying to encode them + qp[param] = encodeutils.safe_decode(value) + + url = '/v1/config_sets/detail?%s' % urlparse.urlencode(qp) + config_sets, resp = self._list(url, "config_sets") + + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + for config_set in config_sets: + yield config_set + + return_request_id = kwargs.get('return_req_id', None) + + params = self._build_params(kwargs) + + seen = 0 + while True: + seen_last_page = 0 + filtered = 0 + for config_set in paginate(params, return_request_id): + last_config_set = config_set.id + + if (absolute_limit is not None and + seen + seen_last_page >= absolute_limit): + # Note(kragniz): we've seen enough images + return + else: + seen_last_page += 1 + yield config_set + + seen += seen_last_page + + if seen_last_page + filtered == 0: + # Note(kragniz): we didn't get any hosts in the last page + return + + if absolute_limit is not None and seen >= absolute_limit: + # Note(kragniz): reached the limit of hosts to return + return + + if page_size and seen_last_page + filtered < page_size: + # Note(kragniz): we've reached the last page of the hosts + return + + # Note(kragniz): there are more hosts to come + params['marker'] = last_config_set + seen_last_page = 0 + + def add(self, **kwargs): + """Add a config_set + + TODO(bcwaldon): document accepted params + """ + + fields = {} + for field in kwargs: + if field in CREATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'create() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs = self._config_set_meta_to_headers(fields) + resp, body = self.client.post('/v1/config_sets', + headers=hdrs, + data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Config_set(self, self._format_config_set_meta_for_user(body['config_set'])) + + def delete(self, config_set, **kwargs): + """Delete an config_set.""" + url = "/v1/config_sets/%s" % base.getid(config_set) + resp, body = self.client.delete(url) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + def update(self, config_set, **kwargs): + """Update an config_set + + TODO(bcwaldon): document accepted params + """ + hdrs = {} + fields = {} + for field in kwargs: + if field in UPDATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'update() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs.update(self._config_set_meta_to_headers(fields)) + + url = '/v1/config_sets/%s' % base.getid(config_set) + resp, body = self.client.put(url, headers=None, data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Config_set(self, self._format_config_set_meta_for_user(body['config_set_meta'])) + + def cluster_config_set_update(self, **kwargs): + """config_interface effect + TODO(bcwaldon): document accepted params + """ + fields = {} + for field in kwargs: + if field in CREATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'create() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs = self._config_set_meta_to_headers(fields) + resp, body = self.client.post('/v1/cluster_config_set_update', + headers=hdrs, + data=hdrs) + return Config_set(self, self._format_config_set_meta_for_user(body['config_set'])) + + def cluster_config_set_progress(self, **kwargs): + """config_interface effect + TODO(bcwaldon): document accepted params + """ + fields = {} + for field in kwargs: + if field in CREATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'create() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs = self._config_set_meta_to_headers(fields) + resp, body = self.client.post('/v1/cluster_config_set_progress', + headers=hdrs, + data=hdrs) + return Config_set(self, self._format_config_set_meta_for_user(body)) diff --git a/code/daisyclient/daisyclient/v1/configs.py b/code/daisyclient/daisyclient/v1/configs.py new file mode 100755 index 00000000..f9954a96 --- /dev/null +++ b/code/daisyclient/daisyclient/v1/configs.py @@ -0,0 +1,261 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo_utils import encodeutils +from oslo_utils import strutils +import six +import six.moves.urllib.parse as urlparse + +from daisyclient.common import utils +from daisyclient.openstack.common.apiclient import base + +UPDATE_PARAMS = ('section', 'description', 'deleted','config_set_id','config_file_id','key','value','cluster','role','config_set','config') + +CREATE_PARAMS = ('id', 'section', 'description','config_set_id','config_file_id','key','value','cluster','role','config_set','config') + +DEFAULT_PAGE_SIZE = 20 + +SORT_DIR_VALUES = ('asc', 'desc') +SORT_KEY_VALUES = ('name', 'id', 'created_at', 'updated_at') + +OS_REQ_ID_HDR = 'x-openstack-request-id' + + +class Config(base.Resource): + def __repr__(self): + return "" % self._info + + def update(self, **fields): + self.manager.update(self, **fields) + + def delete(self, **kwargs): + return self.manager.delete(self) + + def data(self, **kwargs): + return self.manager.data(self, **kwargs) + + +class ConfigManager(base.ManagerWithFind): + resource_class = Config + + def _list(self, url, response_key, obj_class=None, body=None): + resp, body = self.client.get(url) + + if obj_class is None: + obj_class = self.resource_class + + data = body[response_key] + return ([obj_class(self, res, loaded=True) for res in data if res], + resp) + + def _config_meta_from_headers(self, headers): + meta = {'properties': {}} + safe_decode = encodeutils.safe_decode + for key, value in six.iteritems(headers): + value = safe_decode(value, incoming='utf-8') + if key.startswith('x-image-meta-property-'): + _key = safe_decode(key[22:], incoming='utf-8') + meta['properties'][_key] = value + elif key.startswith('x-image-meta-'): + _key = safe_decode(key[13:], incoming='utf-8') + meta[_key] = value + + for key in ['is_public', 'protected', 'deleted']: + if key in meta: + meta[key] = strutils.bool_from_string(meta[key]) + + return self._format_config_meta_for_user(meta) + + def _config_meta_to_headers(self, fields): + headers = {} + fields_copy = copy.deepcopy(fields) + + # NOTE(flaper87): Convert to str, headers + # that are not instance of basestring. All + # headers will be encoded later, before the + # request is sent. + + for key, value in six.iteritems(fields_copy): + headers['%s' % key] = utils.to_str(value) + return headers + + @staticmethod + def _format_config_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + def get(self, config, **kwargs): + """Get the metadata for a specific config. + + :param config: image object or id to look up + :rtype: :class:`Config` + """ + config_id = base.getid(config) + resp, body = self.client.get('/v1/configs/%s' + % urlparse.quote(str(config_id))) + #meta = self._config_meta_from_headers(resp.headers) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + #return Config(self, meta) + return Config(self, self._format_config_meta_for_user(body['config'])) + + def _build_params(self, parameters): + params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)} + + if 'marker' in parameters: + params['marker'] = parameters['marker'] + + sort_key = parameters.get('sort_key') + if sort_key is not None: + if sort_key in SORT_KEY_VALUES: + params['sort_key'] = sort_key + else: + raise ValueError('sort_key must be one of the following: %s.' + % ', '.join(SORT_KEY_VALUES)) + + sort_dir = parameters.get('sort_dir') + if sort_dir is not None: + if sort_dir in SORT_DIR_VALUES: + params['sort_dir'] = sort_dir + else: + raise ValueError('sort_dir must be one of the following: %s.' + % ', '.join(SORT_DIR_VALUES)) + + filters = parameters.get('filters', {}) + params.update(filters) + + return params + + def list(self, **kwargs): + """Get a list of configs. + + :param page_size: number of items to request in each paginated request + :param limit: maximum number of configs to return + :param marker: begin returning configs that appear later in the config + list than that represented by this config id + :param filters: dict of direct comparison filters that mimics the + structure of an config object + :param return_request_id: If an empty list is provided, populate this + list with the request ID value from the header + x-openstack-request-id + :rtype: list of :class:`Config` + """ + absolute_limit = kwargs.get('limit') + page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) + + def paginate(qp, return_request_id=None): + for param, value in six.iteritems(qp): + if isinstance(value, six.string_types): + # Note(flaper87) Url encoding should + # be moved inside http utils, at least + # shouldn't be here. + # + # Making sure all params are str before + # trying to encode them + qp[param] = encodeutils.safe_decode(value) + + url = '/v1/configs/detail?%s' % urlparse.urlencode(qp) + configs, resp = self._list(url, "configs") + + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + for config in configs: + yield config + + return_request_id = kwargs.get('return_req_id', None) + + params = self._build_params(kwargs) + + seen = 0 + while True: + seen_last_page = 0 + filtered = 0 + for config in paginate(params, return_request_id): + last_config = config.id + + if (absolute_limit is not None and + seen + seen_last_page >= absolute_limit): + # Note(kragniz): we've seen enough images + return + else: + seen_last_page += 1 + yield config + + seen += seen_last_page + + if seen_last_page + filtered == 0: + # Note(kragniz): we didn't get any hosts in the last page + return + + if absolute_limit is not None and seen >= absolute_limit: + # Note(kragniz): reached the limit of hosts to return + return + + if page_size and seen_last_page + filtered < page_size: + # Note(kragniz): we've reached the last page of the hosts + return + + # Note(kragniz): there are more hosts to come + params['marker'] = last_config + seen_last_page = 0 + + def add(self, **kwargs): + """Add a config + + TODO(bcwaldon): document accepted params + """ + + fields = {} + for field in kwargs: + if field in CREATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + # else: + # msg = 'create() got an unexpected keyword argument \'%s\'' + # raise TypeError(msg % field) + + hdrs = self._config_meta_to_headers(fields) + resp, body = self.client.post('/v1/configs', + headers=hdrs, + data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + return Config(self, body) + + def delete(self, **kwargs): + """Delete an config.""" + fields = {} + for field in kwargs: + if field in CREATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + hdrs = self._config_meta_to_headers(fields) + url = "/v1/configs_delete" + resp, body = self.client.delete(url,headers=hdrs,data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) diff --git a/code/daisyclient/daisyclient/v1/disk_array.py b/code/daisyclient/daisyclient/v1/disk_array.py new file mode 100755 index 00000000..3d8795c2 --- /dev/null +++ b/code/daisyclient/daisyclient/v1/disk_array.py @@ -0,0 +1,456 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo_utils import encodeutils +from oslo_utils import strutils +import six +import six.moves.urllib.parse as urlparse +from webob.exc import HTTPBadRequest +from daisyclient.common import utils +from daisyclient.openstack.common.apiclient import base + +CREATE_SERVICE_DISK_PARAMS = ('service', 'data_ips', 'size', + 'disk_location', 'role_id','lun') +CREATE_CINDER_BACKEND_PARAMS = ('disk_array', 'role_id') +CREATE_CINDER_BACKEND_INTER_PARAMS = ('management_ips', 'data_ips', + 'pools', 'volume_driver', + 'volume_type', 'role_id', + 'user_name','user_pwd') +UPDATE_CINDER_BACKEND_PARAMS = ('id', 'disk_array', 'role_id') +DEFAULT_PAGE_SIZE = 20 + +SORT_DIR_VALUES = ('asc', 'desc') +SORT_KEY_VALUES = ('id', 'role_id', 'created_at', 'updated_at', 'status') +SERVICE_DISK_UPDATE_PARAMS = CREATE_SERVICE_DISK_PARAMS +OS_REQ_ID_HDR = 'x-openstack-request-id' + + + +class Disk_array(base.Resource): + def __repr__(self): + return "" % self._info + + def update(self, **fields): + self.manager.update(self, **fields) + + def delete(self, **kwargs): + return self.manager.delete(self) + + def data(self, **kwargs): + return self.manager.data(self, **kwargs) + + +class DiskArrayManager(base.ManagerWithFind): + resource_class = Disk_array + + def _list(self, url, response_key, obj_class=None, body=None): + resp, body = self.client.get(url) + + if obj_class is None: + obj_class = self.resource_class + + data = body[response_key] + return ([obj_class(self, res, loaded=True) for res in data if res], + resp) + + + def _service_disk_meta_to_headers(self, fields): + headers = {} + fields_copy = copy.deepcopy(fields) + + # NOTE(flaper87): Convert to str, headers + # that are not instance of basestring. All + # headers will be encoded later, before the + # request is sent. + + for key, value in six.iteritems(fields_copy): + headers['%s' % key] = utils.to_str(value) + return headers + + def _cinder_volume_meta_to_headers(self, fields): + headers = {} + fields_copy = copy.deepcopy(fields) + + # NOTE(flaper87): Convert to str, headers + # that are not instance of basestring. All + # headers will be encoded later, before the + # request is sent. + + for key, value in six.iteritems(fields_copy): + headers['%s' % key] = utils.to_str(value) + return headers + + @staticmethod + def _format_service_disk_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + def list(self, **kwargs): + pass + + def get(self, service_disk, **kwargs): + """Get the metadata for a specific service_disk. + + :param service_disk: host object or id to look up + :rtype: :class:`service_disk` + """ + service_disk_id = base.getid(service_disk) + resp, body = self.client.get('/v1/service_disk/%s' + % urlparse.quote(str(service_disk_id))) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + #return Host(self, meta) + return Disk_array(self, self._format_service_disk_meta_for_user(body['disk_meta'])) + + def data(self, image, do_checksum=True, **kwargs): + """Get the raw data for a specific image. + + :param image: image object or id to look up + :param do_checksum: Enable/disable checksum validation + :rtype: iterable containing image data + """ + image_id = base.getid(image) + resp, body = self.client.get('/v1/images/%s' + % urlparse.quote(str(image_id))) + content_length = int(resp.headers.get('content-length', 0)) + checksum = resp.headers.get('x-image-meta-checksum', None) + if do_checksum and checksum is not None: + body = utils.integrity_iter(body, checksum) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return utils.IterableWithLength(body, content_length) + + def _build_params(self, parameters): + params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)} + + if 'marker' in parameters: + params['marker'] = parameters['marker'] + + sort_key = parameters.get('sort_key') + if sort_key is not None: + if sort_key in SORT_KEY_VALUES: + params['sort_key'] = sort_key + else: + raise ValueError('sort_key must be one of the following: %s.' + % ', '.join(SORT_KEY_VALUES)) + + sort_dir = parameters.get('sort_dir') + if sort_dir is not None: + if sort_dir in SORT_DIR_VALUES: + params['sort_dir'] = sort_dir + else: + raise ValueError('sort_dir must be one of the following: %s.' + % ', '.join(SORT_DIR_VALUES)) + + filters = parameters.get('filters', {}) + params.update(filters) + + return params + + def service_disk_add(self, **kwargs): + """Disk_array a cluster + + TODO(bcwaldon): document accepted params + """ + fields = {} + for field in kwargs: + if field in CREATE_SERVICE_DISK_PARAMS: + fields[field] = kwargs[field] + else: + msg = 'Disk_array() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + url = '/v1/service_disk' + + hdrs = self._service_disk_meta_to_headers(fields) + resp, body = self.client.post(url,headers=hdrs,data=hdrs) + return Disk_array(self, self._format_service_disk_meta_for_user(body['disk_meta'])) + + def service_disk_delete(self, id, **kwargs): + """Delete an service_disk.""" + url = "/v1/service_disk/%s" % base.getid(id) + resp, body = self.client.delete(url) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + def service_disk_update(self, id, **kwargs): + """Update an service_disk + TODO(bcwaldon): document accepted params + """ + hdrs = {} + fields = {} + for field in kwargs: + if field in SERVICE_DISK_UPDATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'update() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs.update(self._service_disk_meta_to_headers(fields)) + + url = '/v1/service_disk/%s' % base.getid(id) + resp, body = self.client.put(url, headers=None, data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Disk_array(self, self._format_service_disk_meta_for_user(body['disk_meta'])) + + def service_disk_detail(self, id, **kwargs): + """Get the metadata for a specific service_disk. + + :param service_disk: host object or id to look up + :rtype: :class:`service_disk` + """ + service_disk_id = base.getid(id) + resp, body = self.client.get('/v1/service_disk/%s' + % urlparse.quote(str(service_disk_id))) + + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Disk_array(self, self._format_service_disk_meta_for_user(body['disk_meta'])) + + def service_disk_list(self, **kwargs): + """Get a list of service_disks. + + :param page_size: number of items to request in each paginated request + :param limit: maximum number of service_disks to return + :param marker: begin returning service_disks that appear later in the service_disk + list than that represented by this service_disk id + :param filters: dict of direct comparison filters that mimics the + structure of an service_disk object + :param return_request_id: If an empty list is provided, populate this + list with the request ID value from the header + x-openstack-request-id + :rtype: list of :class:`service_disk` + """ + absolute_limit = kwargs.get('limit') + page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) + + def paginate(qp, return_request_id=None): + for param, value in six.iteritems(qp): + if isinstance(value, six.string_types): + # Note(flaper87) Url encoding should + # be moved inside http utils, at least + # shouldn't be here. + # + # Making sure all params are str before + # trying to encode them + qp[param] = encodeutils.safe_decode(value) + + url = '/v1/service_disk/list?%s' % urlparse.urlencode(qp) + service_disks, resp = self._list(url, "disk_meta") + + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + for service_disk in service_disks: + yield service_disk + + return_request_id = kwargs.get('return_req_id', None) + + params = self._build_params(kwargs) + + seen = 0 + while True: + seen_last_page = 0 + filtered = 0 + for service_disk in paginate(params, return_request_id): + last_service_disk = service_disk.id + + if (absolute_limit is not None and + seen + seen_last_page >= absolute_limit): + # Note(kragniz): we've seen enough images + return + else: + seen_last_page += 1 + yield service_disk + + seen += seen_last_page + + if seen_last_page + filtered == 0: + # Note(kragniz): we didn't get any service_disks in the last page + return + + if absolute_limit is not None and seen >= absolute_limit: + # Note(kragniz): reached the limit of service_disks to return + return + + if page_size and seen_last_page + filtered < page_size: + # Note(kragniz): we've reached the last page of the service_disks + return + + # Note(kragniz): there are more service_disks to come + params['marker'] = last_service_disk + seen_last_page = 0 + + + def cinder_volume_add(self, **kwargs): + """Disk_array a cluster + + TODO(bcwaldon): document accepted params + """ + fields = {} + for field in kwargs: + if field in CREATE_CINDER_BACKEND_PARAMS: + fields[field] = kwargs[field] + else: + msg = 'Disk_array() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + url = '/v1/cinder_volume' + + hdrs = self._service_disk_meta_to_headers(fields) + resp, body = self.client.post(url,headers=hdrs,data=hdrs) + return Disk_array(self, self._format_service_disk_meta_for_user(body['disk_meta'])) + + def cinder_volume_delete(self, id, **kwargs): + """Delete an cinder_volume.""" + url = "/v1/cinder_volume/%s" % base.getid(id) + resp, body = self.client.delete(url) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + def cinder_volume_update(self, id, **kwargs): + """Update an cinder_volume + + TODO(bcwaldon): document accepted params + """ + hdrs = {} + fields = {} + for field in kwargs: + if field in CREATE_CINDER_BACKEND_INTER_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'update() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs.update(self._cinder_volume_meta_to_headers(fields)) + + url = '/v1/cinder_volume/%s' % base.getid(id) + resp, body = self.client.put(url, headers=None, data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Disk_array(self, self._format_service_disk_meta_for_user(body['disk_meta'])) + + def cinder_volume_detail(self, id, **kwargs): + """Get the metadata for a specific cinder_volume. + + :param cinder_volume: host object or id to look up + :rtype: :class:`cinder_volume` + """ + + cinder_volume_id = base.getid(id) + resp, body = self.client.get('/v1/cinder_volume/%s' + % urlparse.quote(str(cinder_volume_id))) + + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Disk_array(self, self._format_service_disk_meta_for_user(body['disk_meta'])) + + def cinder_volume_list(self, **kwargs): + """Get a list of cinder_volumes. + + :param page_size: number of items to request in each paginated request + :param limit: maximum number of cinder_volumes to return + :param marker: begin returning cinder_volumes that appear later in the cinder_volume + list than that represented by this cinder_volume id + :param filters: dict of direct comparison filters that mimics the + structure of an cinder_volume object + :param return_request_id: If an empty list is provided, populate this + list with the request ID value from the header + x-openstack-request-id + :rtype: list of :class:`cinder_volume` + """ + absolute_limit = kwargs.get('limit') + page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) + + def paginate(qp, return_request_id=None): + for param, value in six.iteritems(qp): + if isinstance(value, six.string_types): + # Note(flaper87) Url encoding should + # be moved inside http utils, at least + # shouldn't be here. + # + # Making sure all params are str before + # trying to encode them + qp[param] = encodeutils.safe_decode(value) + + url = '/v1/cinder_volume/list?%s' % urlparse.urlencode(qp) + cinder_volumes, resp = self._list(url, "disk_meta") + + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + for cinder_volume in cinder_volumes: + yield cinder_volume + + return_request_id = kwargs.get('return_req_id', None) + + params = self._build_params(kwargs) + + seen = 0 + while True: + seen_last_page = 0 + filtered = 0 + for cinder_volume in paginate(params, return_request_id): + last_cinder_volume = cinder_volume.id + + if (absolute_limit is not None and + seen + seen_last_page >= absolute_limit): + # Note(kragniz): we've seen enough images + return + else: + seen_last_page += 1 + yield cinder_volume + + seen += seen_last_page + + if seen_last_page + filtered == 0: + # Note(kragniz): we didn't get any service_disks in the last page + return + + if absolute_limit is not None and seen >= absolute_limit: + # Note(kragniz): reached the limit of service_disks to return + return + + if page_size and seen_last_page + filtered < page_size: + # Note(kragniz): we've reached the last page of the service_disks + return + + # Note(kragniz): there are more service_disks to come + params['marker'] = last_cinder_volume + seen_last_page = 0 \ No newline at end of file diff --git a/code/daisyclient/daisyclient/v1/hosts.py b/code/daisyclient/daisyclient/v1/hosts.py new file mode 100755 index 00000000..f85a33ad --- /dev/null +++ b/code/daisyclient/daisyclient/v1/hosts.py @@ -0,0 +1,446 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo_utils import encodeutils +from oslo_utils import strutils +import six +import six.moves.urllib.parse as urlparse + +from daisyclient.common import utils +from daisyclient.openstack.common.apiclient import base + +UPDATE_PARAMS = ('name', 'resource_type', 'dmi_uuid', 'role', 'cluster', 'root_disk','root_lv_size','swap_lv_size','isolcpus','hugepagesize','hugepages','root_pwd','os_version', 'os_status', 'interfaces', 'is_deployment', 'description', 'deleted', 'status','ipmi_user','ipmi_passwd','ipmi_addr', 'ip', 'status', 'user', 'passwd') + +CREATE_PARAMS = ('id', 'name', 'description', 'resource_type', 'dmi_uuid','role', 'cluster', 'os_version', 'os_status', 'interfaces', 'is_deployment','status','ipmi_user','ipmi_passwd','ipmi_addr', 'ip', 'status', 'user', 'passwd') + +DEFAULT_PAGE_SIZE = 20 + +SORT_DIR_VALUES = ('asc', 'desc') +SORT_KEY_VALUES = ('name', 'id', 'cluster_id', 'created_at', 'updated_at', 'status') + +OS_REQ_ID_HDR = 'x-openstack-request-id' + + +class Host(base.Resource): + def __repr__(self): + return "" % self._info + + def update(self, **fields): + self.manager.update(self, **fields) + + def delete(self, **kwargs): + return self.manager.delete(self) + + def data(self, **kwargs): + return self.manager.data(self, **kwargs) + + +class HostManager(base.ManagerWithFind): + resource_class = Host + + def _list(self, url, response_key, obj_class=None, body=None): + resp, body = self.client.get(url) + + if obj_class is None: + obj_class = self.resource_class + + data = body[response_key] + return ([obj_class(self, res, loaded=True) for res in data if res], + resp) + + def _host_meta_from_headers(self, headers): + meta = {'properties': {}} + safe_decode = encodeutils.safe_decode + for key, value in six.iteritems(headers): + value = safe_decode(value, incoming='utf-8') + if key.startswith('x-image-meta-property-'): + _key = safe_decode(key[22:], incoming='utf-8') + meta['properties'][_key] = value + elif key.startswith('x-image-meta-'): + _key = safe_decode(key[13:], incoming='utf-8') + meta[_key] = value + + for key in ['is_public', 'protected', 'deleted']: + if key in meta: + meta[key] = strutils.bool_from_string(meta[key]) + + return self._format_host_meta_for_user(meta) + + def _host_meta_to_headers(self, fields): + headers = {} + fields_copy = copy.deepcopy(fields) + + # NOTE(flaper87): Convert to str, headers + # that are not instance of basestring. All + # headers will be encoded later, before the + # request is sent. + + for key, value in six.iteritems(fields_copy): + headers['%s' % key] = utils.to_str(value) + return headers + + @staticmethod + def _format_host_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + def get(self, host, **kwargs): + """Get the metadata for a specific host. + + :param host: image object or id to look up + :rtype: :class:`Host` + """ + host_id = base.getid(host) + resp, body = self.client.get('/v1/nodes/%s' + % urlparse.quote(str(host_id))) + #meta = self._host_meta_from_headers(resp.headers) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + #return Host(self, meta) + return Host(self, self._format_host_meta_for_user(body['host'])) + + def _build_params(self, parameters): + params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)} + + if 'marker' in parameters: + params['marker'] = parameters['marker'] + + sort_key = parameters.get('sort_key') + if sort_key is not None: + if sort_key in SORT_KEY_VALUES: + params['sort_key'] = sort_key + else: + raise ValueError('sort_key must be one of the following: %s.' + % ', '.join(SORT_KEY_VALUES)) + + sort_dir = parameters.get('sort_dir') + if sort_dir is not None: + if sort_dir in SORT_DIR_VALUES: + params['sort_dir'] = sort_dir + else: + raise ValueError('sort_dir must be one of the following: %s.' + % ', '.join(SORT_DIR_VALUES)) + + filters = parameters.get('filters', {}) + params.update(filters) + + return params + + def list(self, **kwargs): + """Get a list of hosts. + + :param page_size: number of items to request in each paginated request + :param limit: maximum number of hosts to return + :param marker: begin returning hosts that appear later in the host + list than that represented by this host id + :param filters: dict of direct comparison filters that mimics the + structure of an host object + :param return_request_id: If an empty list is provided, populate this + list with the request ID value from the header + x-openstack-request-id + :rtype: list of :class:`Host` + """ + absolute_limit = kwargs.get('limit') + page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) + + def paginate(qp, return_request_id=None): + for param, value in six.iteritems(qp): + if isinstance(value, six.string_types): + # Note(flaper87) Url encoding should + # be moved inside http utils, at least + # shouldn't be here. + # + # Making sure all params are str before + # trying to encode them + qp[param] = encodeutils.safe_decode(value) + + url = '/v1/nodes?%s' % urlparse.urlencode(qp) + hosts, resp = self._list(url, "nodes") + + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + for host in hosts: + yield host + + return_request_id = kwargs.get('return_req_id', None) + + params = self._build_params(kwargs) + + seen = 0 + while True: + seen_last_page = 0 + filtered = 0 + for host in paginate(params, return_request_id): + last_host = host.id + + if (absolute_limit is not None and + seen + seen_last_page >= absolute_limit): + # Note(kragniz): we've seen enough images + return + else: + seen_last_page += 1 + yield host + + seen += seen_last_page + + if seen_last_page + filtered == 0: + # Note(kragniz): we didn't get any hosts in the last page + return + + if absolute_limit is not None and seen >= absolute_limit: + # Note(kragniz): reached the limit of hosts to return + return + + if page_size and seen_last_page + filtered < page_size: + # Note(kragniz): we've reached the last page of the hosts + return + + # Note(kragniz): there are more hosts to come + params['marker'] = last_host + seen_last_page = 0 + + def add(self, **kwargs): + """Add a host + + TODO(bcwaldon): document accepted params + """ + + fields = {} + for field in kwargs: + if field in CREATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'create() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs = self._host_meta_to_headers(fields) + + resp, body = self.client.post('/v1/nodes', + headers=hdrs, + data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Host(self, self._format_host_meta_for_user(body['host'])) + + def delete(self, host, **kwargs): + """Delete an host.""" + url = "/v1/nodes/%s" % base.getid(host) + resp, body = self.client.delete(url) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + def update(self, host, **kwargs): + """Update an host + + TODO(bcwaldon): document accepted params + """ + hdrs = {} + fields = {} + for field in kwargs: + if field in UPDATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + #else: + # msg = 'update() got an unexpected keyword argument \'%s\'' + # raise TypeError(msg % field) + + hdrs.update(self._host_meta_to_headers(fields)) + + url = '/v1/nodes/%s' % base.getid(host) + resp, body = self.client.put(url, headers=None, data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Host(self, self._format_host_meta_for_user(body['host_meta'])) + + def discover_host(self, **kwargs): + """discovery host + TODO(bcwaldon): document accepted params + """ + hdrs = {} + fields = {} + for field in kwargs: + if field in UPDATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + + url = '/v1/discover_host/' + resp, body = self.client.post(url, headers=hdrs, data=hdrs) + + return Host(self, self._format_host_meta_for_user(body)) + + def add_discover_host(self, **kwargs): + """Add a discover host + + TODO(bcwaldon): document accepted params + """ + + fields = {} + for field in kwargs: + if field in CREATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'create() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs = self._host_meta_to_headers(fields) + + resp, body = self.client.post('/v1/discover/nodes', + headers=hdrs, + data=hdrs) + + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Host(self, self._format_host_meta_for_user(body['host'])) + + def delete_discover_host(self, host, **kwargs): + """Delete a discover host.""" + url = "/v1/discover/nodes/%s" % base.getid(host) + resp, body = self.client.delete(url) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + def list_discover_host(self, **kwargs): + """Get a list of hosts. + + :param page_size: number of items to request in each paginated request + :param limit: maximum number of hosts to return + :param marker: begin returning hosts that appear later in the host + list than that represented by this host id + :param filters: dict of direct comparison filters that mimics the + structure of an host object + :param return_request_id: If an empty list is provided, populate this + list with the request ID value from the header + x-openstack-request-id + :rtype: list of :class:`Host` + """ + absolute_limit = kwargs.get('limit') + page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) + + def paginate(qp, return_request_id=None): + for param, value in six.iteritems(qp): + if isinstance(value, six.string_types): + # Note(flaper87) Url encoding should + # be moved inside http utils, at least + # shouldn't be here. + # + # Making sure all params are str before + # trying to encode them + qp[param] = encodeutils.safe_decode(value) + + url = '/v1/discover/nodes?%s' % urlparse.urlencode(qp) + hosts, resp = self._list(url, "nodes") + + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + for host in hosts: + yield host + + return_request_id = kwargs.get('return_req_id', None) + + params = self._build_params(kwargs) + + seen = 0 + while True: + seen_last_page = 0 + filtered = 0 + for host in paginate(params, return_request_id): + last_host = host.id + + if (absolute_limit is not None and + seen + seen_last_page >= absolute_limit): + # Note(kragniz): we've seen enough images + return + else: + seen_last_page += 1 + yield host + + seen += seen_last_page + + if seen_last_page + filtered == 0: + # Note(kragniz): we didn't get any hosts in the last page + return + + if absolute_limit is not None and seen >= absolute_limit: + # Note(kragniz): reached the limit of hosts to return + return + + if page_size and seen_last_page + filtered < page_size: + # Note(kragniz): we've reached the last page of the hosts + return + + # Note(kragniz): there are more hosts to come + params['marker'] = last_host + seen_last_page = 0 + + def update_discover_host(self, host, **kwargs): + """Add a discover host + + TODO(bcwaldon): document accepted params + """ + hdrs = {} + fields = {} + for field in kwargs: + if field in UPDATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + #else: + # msg = 'update() got an unexpected keyword argument \'%s\'' + # raise TypeError(msg % field) + + hdrs.update(self._host_meta_to_headers(fields)) + + url = '/v1/discover/nodes/%s' % base.getid(host) + resp, body = self.client.put(url, headers=None, data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Host(self, self._format_host_meta_for_user(body['host'])) + + def get_discover_host_detail(self, host_id, **kwargs): + ''' + ''' + resp, body = self.client.get('/v1/discover/nodes/%s' % host_id) + #meta = self._host_meta_from_headers(resp.headers) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + #return Host(self, meta) + return Host(self, self._format_host_meta_for_user(body['host'])) \ No newline at end of file diff --git a/code/daisyclient/daisyclient/v1/image_members.py b/code/daisyclient/daisyclient/v1/image_members.py new file mode 100755 index 00000000..f78fe513 --- /dev/null +++ b/code/daisyclient/daisyclient/v1/image_members.py @@ -0,0 +1,103 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from daisyclient.openstack.common.apiclient import base + + +class ImageMember(base.Resource): + def __repr__(self): + return "" % self._info + + @property + def id(self): + return self.member_id + + def delete(self): + self.manager.delete(self) + + +class ImageMemberManager(base.ManagerWithFind): + resource_class = ImageMember + + def get(self, image, member_id): + image_id = base.getid(image) + url = '/v1/images/%s/members/%s' % (image_id, member_id) + resp, body = self.client.get(url) + member = body['member'] + member['image_id'] = image_id + return ImageMember(self, member, loaded=True) + + def list(self, image=None, member=None): + out = [] + if image and member: + try: + out.append(self.get(image, member)) + #TODO(bcwaldon): narrow this down to 404 + except Exception: + pass + elif image: + out.extend(self._list_by_image(image)) + elif member: + out.extend(self._list_by_member(member)) + else: + #TODO(bcwaldon): figure out what is appropriate to do here as we + # are unable to provide the requested response + pass + return out + + def _list_by_image(self, image): + image_id = base.getid(image) + url = '/v1/images/%s/members' % image_id + resp, body = self.client.get(url) + out = [] + for member in body['members']: + member['image_id'] = image_id + out.append(ImageMember(self, member, loaded=True)) + return out + + def _list_by_member(self, member): + member_id = base.getid(member) + url = '/v1/shared-images/%s' % member_id + resp, body = self.client.get(url) + out = [] + for member in body['shared_images']: + member['member_id'] = member_id + out.append(ImageMember(self, member, loaded=True)) + return out + + def delete(self, image_id, member_id): + self._delete("/v1/images/%s/members/%s" % (image_id, member_id)) + + def create(self, image, member_id, can_share=False): + """Creates an image.""" + url = '/v1/images/%s/members/%s' % (base.getid(image), member_id) + body = {'member': {'can_share': can_share}} + self.client.put(url, data=body) + + def replace(self, image, members): + memberships = [] + for member in members: + try: + obj = { + 'member_id': member.member_id, + 'can_share': member.can_share, + } + except AttributeError: + obj = {'member_id': member['member_id']} + if 'can_share' in member: + obj['can_share'] = member['can_share'] + memberships.append(obj) + url = '/v1/images/%s/members' % base.getid(image) + self.client.put(url, data={'memberships': memberships}) diff --git a/code/daisyclient/daisyclient/v1/images.py b/code/daisyclient/daisyclient/v1/images.py new file mode 100755 index 00000000..39ee24aa --- /dev/null +++ b/code/daisyclient/daisyclient/v1/images.py @@ -0,0 +1,361 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo_utils import encodeutils +from oslo_utils import strutils +import six +import six.moves.urllib.parse as urlparse + +from daisyclient.common import utils +from daisyclient.openstack.common.apiclient import base + +UPDATE_PARAMS = ('name', 'disk_format', 'container_format', 'min_disk', + 'min_ram', 'owner', 'size', 'is_public', 'protected', + 'location', 'checksum', 'copy_from', 'properties', + #NOTE(bcwaldon: an attempt to update 'deleted' will be + # ignored, but we need to support it for backwards- + # compatibility with the legacy client library + 'deleted') + +CREATE_PARAMS = UPDATE_PARAMS + ('id', 'store') + +DEFAULT_PAGE_SIZE = 20 + +SORT_DIR_VALUES = ('asc', 'desc') +SORT_KEY_VALUES = ('name', 'status', 'container_format', 'disk_format', + 'size', 'id', 'created_at', 'updated_at') + +OS_REQ_ID_HDR = 'x-openstack-request-id' + + +class Image(base.Resource): + def __repr__(self): + return "" % self._info + + def update(self, **fields): + self.manager.update(self, **fields) + + def delete(self, **kwargs): + return self.manager.delete(self) + + def data(self, **kwargs): + return self.manager.data(self, **kwargs) + + +class ImageManager(base.ManagerWithFind): + resource_class = Image + + def _list(self, url, response_key, obj_class=None, body=None): + resp, body = self.client.get(url) + + if obj_class is None: + obj_class = self.resource_class + + data = body[response_key] + return ([obj_class(self, res, loaded=True) for res in data if res], + resp) + + def _image_meta_from_headers(self, headers): + meta = {'properties': {}} + safe_decode = encodeutils.safe_decode + for key, value in six.iteritems(headers): + value = safe_decode(value, incoming='utf-8') + if key.startswith('x-image-meta-property-'): + _key = safe_decode(key[22:], incoming='utf-8') + meta['properties'][_key] = value + elif key.startswith('x-image-meta-'): + _key = safe_decode(key[13:], incoming='utf-8') + meta[_key] = value + + for key in ['is_public', 'protected', 'deleted']: + if key in meta: + meta[key] = strutils.bool_from_string(meta[key]) + + return self._format_image_meta_for_user(meta) + + def _image_meta_to_headers(self, fields): + headers = {} + fields_copy = copy.deepcopy(fields) + + # NOTE(flaper87): Convert to str, headers + # that are not instance of basestring. All + # headers will be encoded later, before the + # request is sent. + + for key, value in six.iteritems(fields_copy.pop('properties', {})): + headers['x-image-meta-property-%s' % key] = utils.to_str(value) + for key, value in six.iteritems(fields_copy): + headers['x-image-meta-%s' % key] = utils.to_str(value) + return headers + + @staticmethod + def _format_image_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + def get(self, image, **kwargs): + """Get the metadata for a specific image. + + :param image: image object or id to look up + :rtype: :class:`Image` + """ + image_id = base.getid(image) + resp, body = self.client.head('/v1/images/%s' + % urlparse.quote(str(image_id))) + meta = self._image_meta_from_headers(resp.headers) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + return Image(self, meta) + + def data(self, image, do_checksum=True, **kwargs): + """Get the raw data for a specific image. + + :param image: image object or id to look up + :param do_checksum: Enable/disable checksum validation + :rtype: iterable containing image data + """ + image_id = base.getid(image) + resp, body = self.client.get('/v1/images/%s' + % urlparse.quote(str(image_id))) + content_length = int(resp.headers.get('content-length', 0)) + checksum = resp.headers.get('x-image-meta-checksum', None) + if do_checksum and checksum is not None: + body = utils.integrity_iter(body, checksum) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return utils.IterableWithLength(body, content_length) + + def _build_params(self, parameters): + params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)} + + if 'marker' in parameters: + params['marker'] = parameters['marker'] + + sort_key = parameters.get('sort_key') + if sort_key is not None: + if sort_key in SORT_KEY_VALUES: + params['sort_key'] = sort_key + else: + raise ValueError('sort_key must be one of the following: %s.' + % ', '.join(SORT_KEY_VALUES)) + + sort_dir = parameters.get('sort_dir') + if sort_dir is not None: + if sort_dir in SORT_DIR_VALUES: + params['sort_dir'] = sort_dir + else: + raise ValueError('sort_dir must be one of the following: %s.' + % ', '.join(SORT_DIR_VALUES)) + + filters = parameters.get('filters', {}) + properties = filters.pop('properties', {}) + for key, value in properties.items(): + params['property-%s' % key] = value + params.update(filters) + if parameters.get('owner') is not None: + params['is_public'] = None + if 'is_public' in parameters: + params['is_public'] = parameters['is_public'] + + return params + + def list(self, **kwargs): + """Get a list of images. + + :param page_size: number of items to request in each paginated request + :param limit: maximum number of images to return + :param marker: begin returning images that appear later in the image + list than that represented by this image id + :param filters: dict of direct comparison filters that mimics the + structure of an image object + :param owner: If provided, only images with this owner (tenant id) + will be listed. An empty string ('') matches ownerless + images. + :param return_request_id: If an empty list is provided, populate this + list with the request ID value from the header + x-openstack-request-id + :rtype: list of :class:`Image` + """ + absolute_limit = kwargs.get('limit') + page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) + owner = kwargs.get('owner', None) + + def filter_owner(owner, image): + # If client side owner 'filter' is specified + # only return images that match 'owner'. + if owner is None: + # Do not filter based on owner + return False + if (not hasattr(image, 'owner')) or image.owner is None: + # ownerless image + return not (owner == '') + else: + return not (image.owner == owner) + + def paginate(qp, return_request_id=None): + for param, value in six.iteritems(qp): + if isinstance(value, six.string_types): + # Note(flaper87) Url encoding should + # be moved inside http utils, at least + # shouldn't be here. + # + # Making sure all params are str before + # trying to encode them + qp[param] = encodeutils.safe_decode(value) + + url = '/v1/images/detail?%s' % urlparse.urlencode(qp) + images, resp = self._list(url, "images") + + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + for image in images: + yield image + + return_request_id = kwargs.get('return_req_id', None) + + params = self._build_params(kwargs) + + seen = 0 + while True: + seen_last_page = 0 + filtered = 0 + for image in paginate(params, return_request_id): + last_image = image.id + + if filter_owner(owner, image): + # Note(kragniz): ignore this image + filtered += 1 + continue + + if (absolute_limit is not None and + seen + seen_last_page >= absolute_limit): + # Note(kragniz): we've seen enough images + return + else: + seen_last_page += 1 + yield image + + seen += seen_last_page + + if seen_last_page + filtered == 0: + # Note(kragniz): we didn't get any images in the last page + return + + if absolute_limit is not None and seen >= absolute_limit: + # Note(kragniz): reached the limit of images to return + return + + if page_size and seen_last_page + filtered < page_size: + # Note(kragniz): we've reached the last page of the images + return + + # Note(kragniz): there are more images to come + params['marker'] = last_image + seen_last_page = 0 + + def delete(self, image, **kwargs): + """Delete an image.""" + url = "/v1/images/%s" % base.getid(image) + resp, body = self.client.delete(url) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + def create(self, **kwargs): + """Create an image + + TODO(bcwaldon): document accepted params + """ + image_data = kwargs.pop('data', None) + if image_data is not None: + image_size = utils.get_file_size(image_data) + if image_size is not None: + kwargs.setdefault('size', image_size) + + fields = {} + for field in kwargs: + if field in CREATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'create() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + copy_from = fields.pop('copy_from', None) + hdrs = self._image_meta_to_headers(fields) + if copy_from is not None: + hdrs['x-glance-api-copy-from'] = copy_from + + resp, body = self.client.post('/v1/images', + headers=hdrs, + data=image_data) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Image(self, self._format_image_meta_for_user(body['image'])) + + def update(self, image, **kwargs): + """Update an image + + TODO(bcwaldon): document accepted params + """ + image_data = kwargs.pop('data', None) + if image_data is not None: + image_size = utils.get_file_size(image_data) + if image_size is not None: + kwargs.setdefault('size', image_size) + + hdrs = {} + purge_props = 'false' + purge_props_bool = kwargs.pop('purge_props', None) + if purge_props_bool: + purge_props = 'true' + + hdrs['x-glance-registry-purge-props'] = purge_props + fields = {} + for field in kwargs: + if field in UPDATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'update() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + copy_from = fields.pop('copy_from', None) + hdrs.update(self._image_meta_to_headers(fields)) + if copy_from is not None: + hdrs['x-glance-api-copy-from'] = copy_from + + url = '/v1/images/%s' % base.getid(image) + resp, body = self.client.put(url, headers=hdrs, data=image_data) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Image(self, self._format_image_meta_for_user(body['image'])) diff --git a/code/daisyclient/daisyclient/v1/install.py b/code/daisyclient/daisyclient/v1/install.py new file mode 100755 index 00000000..bac60a8a --- /dev/null +++ b/code/daisyclient/daisyclient/v1/install.py @@ -0,0 +1,129 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import os + +from oslo_utils import encodeutils +from oslo_utils import strutils +import six +import six.moves.urllib.parse as urlparse + +from daisyclient.common import utils +from daisyclient.openstack.common.apiclient import base +#import daisy.queue_process as queue +#from daisy.queue_process import exec_cmd + +CREATE_PARAMS = ('cluster_id', 'version_id','deployment_interface') +OS_REQ_ID_HDR = 'x-openstack-request-id' + + +class Install(base.Resource): + def __repr__(self): + return "" % self._info + + def update(self, **fields): + self.manager.update(self, **fields) + + def delete(self, **kwargs): + return self.manager.delete(self) + + def data(self, **kwargs): + return self.manager.data(self, **kwargs) + + +class InstallManager(base.ManagerWithFind): + resource_class = Install + + def _install_meta_to_headers(self, fields): + headers = {} + fields_copy = copy.deepcopy(fields) + + # NOTE(flaper87): Convert to str, headers + # that are not instance of basestring. All + # headers will be encoded later, before the + # request is sent. + + for key, value in six.iteritems(fields_copy): + headers['%s' % key] = utils.to_str(value) + return headers + + @staticmethod + def _format_install_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + def list(self, **kwargs): + pass + + def install(self, **kwargs): + """Install a cluster + + TODO(bcwaldon): document accepted params + """ + fields = {} + for field in kwargs: + if field in CREATE_PARAMS: + fields[field] = kwargs[field] + else: + msg = 'install() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + # if fields.has_key("version_id"): + # url = '/v1/install/%s/version/%s' % (fields['cluster_id'], fields['version_id']) + # else: + url = '/v1/install' + + hdrs = self._install_meta_to_headers(fields) + resp, body = self.client.post(url,headers=hdrs,data=hdrs) + return Install(self, self._format_install_meta_for_user(body)) + + def export_db(self, **kwargs): + """export database to tecs and HA config file + + TODO(bcwaldon): document accepted params + """ + fields = {} + for field in kwargs: + if field in CREATE_PARAMS: + fields[field] = kwargs[field] + else: + msg = 'export_db() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + url = '/v1/export_db' + hdrs = self._install_meta_to_headers(fields) + resp, body = self.client.post(url,headers=hdrs,data=hdrs) + return Install(self, self._format_install_meta_for_user(body)) + + def disk_array_update(self, cluster, **kwargs): + UPDATE_DISK_ARRAY_PARAMS = [] + fields = {} + for field in kwargs: + if field in UPDATE_DISK_ARRAY_PARAMS: + fields[field] = kwargs[field] + else: + msg = 'disk_array_update() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + url = '/v1/disk_array/%s' % base.getid(cluster) + hdrs = self._install_meta_to_headers(fields) + resp, body = self.client.post(url,headers=hdrs,data=hdrs) + return Install(self, self._format_install_meta_for_user(body)) \ No newline at end of file diff --git a/code/daisyclient/daisyclient/v1/networks.py b/code/daisyclient/daisyclient/v1/networks.py new file mode 100755 index 00000000..49f0be9c --- /dev/null +++ b/code/daisyclient/daisyclient/v1/networks.py @@ -0,0 +1,309 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo_utils import encodeutils +from oslo_utils import strutils +import six +import six.moves.urllib.parse as urlparse + +from daisyclient.common import utils +from daisyclient.openstack.common.apiclient import base + +UPDATE_PARAMS = ('alias', 'mtu', 'vlan_id', 'ip', 'name', 'cluster_id','ip_ranges', 'vlan_start','vlan_end','gateway','cidr', 'description', 'type','ml2_type','network_type','physnet_name','capability') + +CREATE_PARAMS = ('alias', 'mtu', 'vlan_id', 'ip', 'id', 'name', 'cluster_id','ip_ranges', 'vlan_start','vlan_end','gateway','cidr', 'description', 'type', 'ml2_type','network_type','physnet_name','capability') + +DEFAULT_PAGE_SIZE = 20 + +SORT_DIR_VALUES = ('asc', 'desc') +SORT_KEY_VALUES = ('name', 'id', 'cluster_id', 'created_at', 'updated_at') + +OS_REQ_ID_HDR = 'x-openstack-request-id' + + +class Network(base.Resource): + def __repr__(self): + return "" % self._info + + def update(self, **fields): + self.manager.update(self, **fields) + + def delete(self, **kwargs): + return self.manager.delete(self) + + def data(self, **kwargs): + return self.manager.data(self, **kwargs) + + +class NetworkManager(base.ManagerWithFind): + resource_class = Network + + def _list(self, url, response_key, obj_class=None, body=None): + resp, body = self.client.get(url) + + if obj_class is None: + obj_class = self.resource_class + + data = body[response_key] + return ([obj_class(self, res, loaded=True) for res in data if res], + resp) + + def _network_meta_from_headers(self, headers): + meta = {'properties': {}} + safe_decode = encodeutils.safe_decode + for key, value in six.iteritems(headers): + value = safe_decode(value, incoming='utf-8') + if key.startswith('x-image-meta-property-'): + _key = safe_decode(key[22:], incoming='utf-8') + meta['properties'][_key] = value + elif key.startswith('x-image-meta-'): + _key = safe_decode(key[13:], incoming='utf-8') + meta[_key] = value + + for key in ['is_public', 'protected', 'deleted']: + if key in meta: + meta[key] = strutils.bool_from_string(meta[key]) + + return self._format_network_meta_for_user(meta) + + def _network_meta_to_headers(self, fields): + headers = {} + fields_copy = copy.deepcopy(fields) + + # NOTE(flaper87): Convert to str, headers + # that are not instance of basestring. All + # headers will be encoded later, before the + # request is sent. + + for key, value in six.iteritems(fields_copy): + headers['%s' % key] = utils.to_str(value) + return headers + + @staticmethod + def _format_image_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + @staticmethod + def _format_network_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + def get(self, network, **kwargs): + """Get the metadata for a specific network. + + :param network: host object or id to look up + :rtype: :class:`Network` + """ + network_id = base.getid(network) + resp, body = self.client.get('/v1/networks/%s' + % urlparse.quote(str(network_id))) + #meta = self._network_meta_from_headers(resp.headers) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + return Network(self, self._format_network_meta_for_user(body['network'])) + + def data(self, image, do_checksum=True, **kwargs): + """Get the raw data for a specific image. + + :param image: image object or id to look up + :param do_checksum: Enable/disable checksum validation + :rtype: iterable containing image data + """ + image_id = base.getid(image) + resp, body = self.client.get('/v1/images/%s' + % urlparse.quote(str(image_id))) + content_length = int(resp.headers.get('content-length', 0)) + checksum = resp.headers.get('x-image-meta-checksum', None) + if do_checksum and checksum is not None: + body = utils.integrity_iter(body, checksum) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return utils.IterableWithLength(body, content_length) + + def _build_params(self, parameters): + params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)} + + if 'marker' in parameters: + params['marker'] = parameters['marker'] + + sort_key = parameters.get('sort_key') + if sort_key is not None: + if sort_key in SORT_KEY_VALUES: + params['sort_key'] = sort_key + else: + raise ValueError('sort_key must be one of the following: %s.' + % ', '.join(SORT_KEY_VALUES)) + + sort_dir = parameters.get('sort_dir') + if sort_dir is not None: + if sort_dir in SORT_DIR_VALUES: + params['sort_dir'] = sort_dir + else: + raise ValueError('sort_dir must be one of the following: %s.' + % ', '.join(SORT_DIR_VALUES)) + + filters = parameters.get('filters', {}) + params.update(filters) + + return params + + def list(self, **kwargs): + """Get a list of networks. + + :param page_size: number of items to request in each paginated request + :param limit: maximum number of networks to return + :param marker: begin returning networks that appear later in the network + list than that represented by this network id + :param filters: dict of direct comparison filters that mimics the + structure of an network object + :param return_request_id: If an empty list is provided, populate this + list with the request ID value from the header + x-openstack-request-id + :rtype: list of :class:`Network` + """ + absolute_limit = kwargs.get('limit') + page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) + + def paginate(qp, return_request_id=None): + for param, value in six.iteritems(qp): + if isinstance(value, six.string_types): + # Note(flaper87) Url encoding should + # be moved inside http utils, at least + # shouldn't be here. + # + # Making sure all params are str before + # trying to encode them + qp[param] = encodeutils.safe_decode(value) + url = '/v1/clusters/%s/networks?%s' % (qp['cluster_id'], urlparse.urlencode(qp)) \ + if qp.get('cluster_id', None) else '/v1/networks?%s' % urlparse.urlencode(qp) + networks, resp = self._list(url, "networks") + + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + for network in networks: + yield network + + return_request_id = kwargs.get('return_req_id', None) + + params = self._build_params(kwargs) + + seen = 0 + while True: + seen_last_page = 0 + filtered = 0 + for network in paginate(params, return_request_id): + last_network = network.id + + if (absolute_limit is not None and + seen + seen_last_page >= absolute_limit): + # Note(kragniz): we've seen enough images + return + else: + seen_last_page += 1 + yield network + + seen += seen_last_page + + if seen_last_page + filtered == 0: + # Note(kragniz): we didn't get any networks in the last page + return + + if absolute_limit is not None and seen >= absolute_limit: + # Note(kragniz): reached the limit of networks to return + return + + if page_size and seen_last_page + filtered < page_size: + # Note(kragniz): we've reached the last page of the networks + return + + # Note(kragniz): there are more networks to come + params['marker'] = last_network + seen_last_page = 0 + + def add(self, **kwargs): + """Add a network + + TODO(bcwaldon): document accepted params + """ + + fields = {} + for field in kwargs: + if field in CREATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'create() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs = self._network_meta_to_headers(fields) + resp, body = self.client.post('/v1/networks', + headers=hdrs, + data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Network(self, self._format_network_meta_for_user(body['network'])) + + def delete(self, network, **kwargs): + """Delete an network.""" + url = "/v1/networks/%s" % base.getid(network) + resp, body = self.client.delete(url) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + def update(self, network, **kwargs): + """Update an network + + TODO(bcwaldon): document accepted params + """ + hdrs = {} + fields = {} + for field in kwargs: + if field in UPDATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'update() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs.update(self._network_meta_to_headers(fields)) + url = '/v1/networks/%s' % base.getid(network) + resp, body = self.client.put(url, headers=None, data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Network(self, self._format_network_meta_for_user(body['network_meta'])) diff --git a/code/daisyclient/daisyclient/v1/param_helper.py b/code/daisyclient/daisyclient/v1/param_helper.py new file mode 100755 index 00000000..3e860db5 --- /dev/null +++ b/code/daisyclient/daisyclient/v1/param_helper.py @@ -0,0 +1,69 @@ +# -*- coding:utf-8 -*- + +import os + +def _read_template_file(args): + template_file = args.params_file_path + if not os.path.exists(template_file): + print("Params_file not exist or permission deiny.") + return + with open(template_file) as tfp: + params = ''.join(tfp.read().replace("\\'", "").split(" ")).replace("\n", "") + return dict(eval(params)) + +CLUSTER_ADD_PARAMS_FILE = { + 'description': 'desc', + 'name': "test", + 'routers': [{ + 'description': 'router1', + 'external_logic_network': 'flat1', + 'name': 'router1', + 'subnets': ['subnet2', 'subnet10']}], + 'networks': [], + 'nodes': [], + 'logic_networks': [{ + 'name': 'internal1', + 'physnet_name': 'PRIVATE1', + 'segmentation_id': 200, + 'segmentation_type': 'vlan', + 'shared': True, + 'subnets': [{'cidr': '192.168.1.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['192.168.1.2', + '192.168.1.200']], + 'gateway': '192.168.1.1', + 'name': 'subnet2'}, + {'cidr': '172.16.1.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['172.16.1.130', + '172.16.1.150'], + ['172.16.1.151', + '172.16.1.254']], + 'gateway': '172.16.1.1', + 'name': 'subnet10'}], + 'type': 'internal'}, + {'name': 'flat1', + 'physnet_name': 'physnet1', + 'segmentation_type': 'flat', + 'segmentation_id': -1, + 'shared': True, + 'subnets': [{'cidr': '192.168.2.0/24', + 'dns_nameservers': ['8.8.4.4', + '8.8.8.8'], + 'floating_ranges': [['192.168.2.130', + '192.168.2.254']], + 'gateway': '192.168.2.1', + 'name': 'subnet123'}], + 'type': 'external'} + ], + 'networking_parameters':{ + 'base_mac': 'fa:16:3e:00:00:00', + 'gre_id_range': [2, 4094], + 'net_l23_provider': 'ovs', + 'public_vip': '172.16.0.3', + 'segmentation_type': 'vlan,flat,vxlan,gre', + 'vlan_range': [2, 4094], + 'vni_range': [2, 4094]} +} diff --git a/code/daisyclient/daisyclient/v1/roles.py b/code/daisyclient/daisyclient/v1/roles.py new file mode 100755 index 00000000..4bd708f0 --- /dev/null +++ b/code/daisyclient/daisyclient/v1/roles.py @@ -0,0 +1,321 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo_utils import encodeutils +from oslo_utils import strutils +import six +import six.moves.urllib.parse as urlparse +from webob.exc import HTTPBadRequest +from daisyclient.common import utils +from daisyclient.openstack.common.apiclient import base + +UPDATE_PARAMS = ('name', 'description','status','progress','config_set_id', + 'nodes','services', 'cluster_id','type','vip', 'glance_lv_size', + 'deployment_backend', + #NOTE(bcwaldon: an attempt to update 'deleted' will be + # ignored, but we need to support it for backwards- + # compatibility with the legacy client library + 'deleted', 'db_lv_size', 'nova_lv_size', 'disk_location', + 'ntp_server', 'role_type', 'db_vip', 'glance_vip', 'public_vip', 'mongodb_vip') + +CREATE_PARAMS = ('id', 'name','description','status','progress','config_set_id', + 'nodes', 'services', 'cluster_id', 'type', 'vip', + 'glance_lv_size', 'db_vip', 'glance_vip', 'public_vip', 'mongodb_vip', 'deployment_backend', + 'db_lv_size', 'nova_lv_size', 'disk_location', 'role_type') + +DEFAULT_PAGE_SIZE = 20 + +SORT_DIR_VALUES = ('asc', 'desc') +SORT_KEY_VALUES = ('name', 'id', 'created_at', 'updated_at') + +OS_REQ_ID_HDR = 'x-openstack-request-id' + + +class Role(base.Resource): + def __repr__(self): + return "" % self._info + + def update(self, **fields): + self.manager.update(self, **fields) + + def delete(self, **kwargs): + return self.manager.delete(self) + + def data(self, **kwargs): + return self.manager.data(self, **kwargs) + + +class RoleManager(base.ManagerWithFind): + resource_class = Role + + def _list(self, url, response_key, obj_class=None, body=None): + resp, body = self.client.get(url) + + if obj_class is None: + obj_class = self.resource_class + + data = body[response_key] + return ([obj_class(self, res, loaded=True) for res in data if res], + resp) + + def _role_meta_from_headers(self, headers): + meta = {'properties': {}} + safe_decode = encodeutils.safe_decode + for key, value in six.iteritems(headers): + value = safe_decode(value, incoming='utf-8') + if key.startswith('x-image-meta-property-'): + _key = safe_decode(key[22:], incoming='utf-8') + meta['properties'][_key] = value + elif key.startswith('x-image-meta-'): + _key = safe_decode(key[13:], incoming='utf-8') + meta[_key] = value + + for key in ['is_public', 'protected', 'deleted']: + if key in meta: + meta[key] = strutils.bool_from_string(meta[key]) + + return self._format_role_meta_for_user(meta) + + def _role_meta_to_headers(self, fields): + headers = {} + fields_copy = copy.deepcopy(fields) + + # NOTE(flaper87): Convert to str, headers + # that are not instance of basestring. All + # headers will be encoded later, before the + # request is sent. + + for key, value in six.iteritems(fields_copy): + headers['%s' % key] = utils.to_str(value) + return headers + + @staticmethod + def _format_image_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + @staticmethod + def _format_role_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + def get(self, role, **kwargs): + """Get the metadata for a specific role. + + :param role: host object or id to look up + :rtype: :class:`Role` + """ + role_id = base.getid(role) + resp, body = self.client.get('/v1/roles/%s' + % urlparse.quote(str(role_id))) + #meta = self._role_meta_from_headers(resp.headers) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + #return Host(self, meta) + return Role(self, self._format_role_meta_for_user(body['role'])) + + def data(self, image, do_checksum=True, **kwargs): + """Get the raw data for a specific image. + + :param image: image object or id to look up + :param do_checksum: Enable/disable checksum validation + :rtype: iterable containing image data + """ + image_id = base.getid(image) + resp, body = self.client.get('/v1/images/%s' + % urlparse.quote(str(image_id))) + content_length = int(resp.headers.get('content-length', 0)) + checksum = resp.headers.get('x-image-meta-checksum', None) + if do_checksum and checksum is not None: + body = utils.integrity_iter(body, checksum) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return utils.IterableWithLength(body, content_length) + + def _build_params(self, parameters): + params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)} + + if 'marker' in parameters: + params['marker'] = parameters['marker'] + + sort_key = parameters.get('sort_key') + if sort_key is not None: + if sort_key in SORT_KEY_VALUES: + params['sort_key'] = sort_key + else: + raise ValueError('sort_key must be one of the following: %s.' + % ', '.join(SORT_KEY_VALUES)) + + sort_dir = parameters.get('sort_dir') + if sort_dir is not None: + if sort_dir in SORT_DIR_VALUES: + params['sort_dir'] = sort_dir + else: + raise ValueError('sort_dir must be one of the following: %s.' + % ', '.join(SORT_DIR_VALUES)) + + filters = parameters.get('filters', {}) + params.update(filters) + + return params + + def list(self, **kwargs): + """Get a list of roles. + + :param page_size: number of items to request in each paginated request + :param limit: maximum number of roles to return + :param marker: begin returning roles that appear later in the role + list than that represented by this role id + :param filters: dict of direct comparison filters that mimics the + structure of an role object + :param return_request_id: If an empty list is provided, populate this + list with the request ID value from the header + x-openstack-request-id + :rtype: list of :class:`Role` + """ + absolute_limit = kwargs.get('limit') + page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) + + def paginate(qp, return_request_id=None): + for param, value in six.iteritems(qp): + if isinstance(value, six.string_types): + # Note(flaper87) Url encoding should + # be moved inside http utils, at least + # shouldn't be here. + # + # Making sure all params are str before + # trying to encode them + qp[param] = encodeutils.safe_decode(value) + + url = '/v1/roles/detail?%s' % urlparse.urlencode(qp) + roles, resp = self._list(url, "roles") + + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + for role in roles: + yield role + + return_request_id = kwargs.get('return_req_id', None) + + params = self._build_params(kwargs) + + seen = 0 + while True: + seen_last_page = 0 + filtered = 0 + for role in paginate(params, return_request_id): + last_role = role.id + + if (absolute_limit is not None and + seen + seen_last_page >= absolute_limit): + # Note(kragniz): we've seen enough images + return + else: + seen_last_page += 1 + yield role + + seen += seen_last_page + + if seen_last_page + filtered == 0: + # Note(kragniz): we didn't get any roles in the last page + return + + if absolute_limit is not None and seen >= absolute_limit: + # Note(kragniz): reached the limit of roles to return + return + + if page_size and seen_last_page + filtered < page_size: + # Note(kragniz): we've reached the last page of the roles + return + + # Note(kragniz): there are more roles to come + params['marker'] = last_role + seen_last_page = 0 + + def add(self, **kwargs): + """Add a role + + TODO(bcwaldon): document accepted params + """ + + fields = {} + for field in kwargs: + if field in CREATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'create() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs = self._role_meta_to_headers(fields) + resp, body = self.client.post('/v1/roles', + headers=hdrs, + data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Role(self, self._format_role_meta_for_user(body['role'])) + + def delete(self, role, **kwargs): + """Delete an role.""" + url = "/v1/roles/%s" % base.getid(role) + resp, body = self.client.delete(url) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + def update(self, role, **kwargs): + """Update an role + + TODO(bcwaldon): document accepted params + """ + hdrs = {} + fields = {} + for field in kwargs: + if field in UPDATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'update() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs.update(self._role_meta_to_headers(fields)) + + url = '/v1/roles/%s' % base.getid(role) + resp, body = self.client.put(url, headers=None, data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Role(self, self._format_role_meta_for_user(body['role_meta'])) diff --git a/code/daisyclient/daisyclient/v1/services.py b/code/daisyclient/daisyclient/v1/services.py new file mode 100755 index 00000000..9ad0015c --- /dev/null +++ b/code/daisyclient/daisyclient/v1/services.py @@ -0,0 +1,315 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from oslo_utils import encodeutils +from oslo_utils import strutils +import six +import six.moves.urllib.parse as urlparse + +from daisyclient.common import utils +from daisyclient.openstack.common.apiclient import base + +UPDATE_PARAMS = ('name', 'description','component_id','backup_type', + #NOTE(bcwaldon: an attempt to update 'deleted' will be + # ignored, but we need to support it for backwards- + # compatibility with the legacy client library + 'deleted') + +CREATE_PARAMS = ('id', 'name','description','component_id','backup_type') + +DEFAULT_PAGE_SIZE = 20 + +SORT_DIR_VALUES = ('asc', 'desc') +SORT_KEY_VALUES = ('name', 'id', 'created_at', 'updated_at') + +OS_REQ_ID_HDR = 'x-openstack-request-id' + + +class Service(base.Resource): + def __repr__(self): + return "" % self._info + + def update(self, **fields): + self.manager.update(self, **fields) + + def delete(self, **kwargs): + return self.manager.delete(self) + + def data(self, **kwargs): + return self.manager.data(self, **kwargs) + + +class ServiceManager(base.ManagerWithFind): + resource_class = Service + + def _list(self, url, response_key, obj_class=None, body=None): + resp, body = self.client.get(url) + + if obj_class is None: + obj_class = self.resource_class + + data = body[response_key] + return ([obj_class(self, res, loaded=True) for res in data if res], + resp) + + def _service_meta_from_headers(self, headers): + meta = {'properties': {}} + safe_decode = encodeutils.safe_decode + for key, value in six.iteritems(headers): + value = safe_decode(value, incoming='utf-8') + if key.startswith('x-image-meta-property-'): + _key = safe_decode(key[22:], incoming='utf-8') + meta['properties'][_key] = value + elif key.startswith('x-image-meta-'): + _key = safe_decode(key[13:], incoming='utf-8') + meta[_key] = value + + for key in ['is_public', 'protected', 'deleted']: + if key in meta: + meta[key] = strutils.bool_from_string(meta[key]) + + return self._format_service_meta_for_user(meta) + + def _service_meta_to_headers(self, fields): + headers = {} + fields_copy = copy.deepcopy(fields) + + # NOTE(flaper87): Convert to str, headers + # that are not instance of basestring. All + # headers will be encoded later, before the + # request is sent. + + for key, value in six.iteritems(fields_copy): + headers['%s' % key] = utils.to_str(value) + return headers + + @staticmethod + def _format_image_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + @staticmethod + def _format_service_meta_for_user(meta): + for key in ['size', 'min_ram', 'min_disk']: + if key in meta: + try: + meta[key] = int(meta[key]) if meta[key] else 0 + except ValueError: + pass + return meta + + def get(self, service, **kwargs): + """Get the metadata for a specific service. + + :param service: host object or id to look up + :rtype: :class:`Service` + """ + service_id = base.getid(service) + resp, body = self.client.get('/v1/services/%s' + % urlparse.quote(str(service_id))) + #meta = self._service_meta_from_headers(resp.headers) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + #return Host(self, meta) + return Service(self, self._format_service_meta_for_user(body['service'])) + + def data(self, image, do_checksum=True, **kwargs): + """Get the raw data for a specific image. + + :param image: image object or id to look up + :param do_checksum: Enable/disable checksum validation + :rtype: iterable containing image data + """ + image_id = base.getid(image) + resp, body = self.client.get('/v1/images/%s' + % urlparse.quote(str(image_id))) + content_length = int(resp.headers.get('content-length', 0)) + checksum = resp.headers.get('x-image-meta-checksum', None) + if do_checksum and checksum is not None: + body = utils.integrity_iter(body, checksum) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return utils.IterableWithLength(body, content_length) + + def _build_params(self, parameters): + params = {'limit': parameters.get('page_size', DEFAULT_PAGE_SIZE)} + + if 'marker' in parameters: + params['marker'] = parameters['marker'] + + sort_key = parameters.get('sort_key') + if sort_key is not None: + if sort_key in SORT_KEY_VALUES: + params['sort_key'] = sort_key + else: + raise ValueError('sort_key must be one of the following: %s.' + % ', '.join(SORT_KEY_VALUES)) + + sort_dir = parameters.get('sort_dir') + if sort_dir is not None: + if sort_dir in SORT_DIR_VALUES: + params['sort_dir'] = sort_dir + else: + raise ValueError('sort_dir must be one of the following: %s.' + % ', '.join(SORT_DIR_VALUES)) + + filters = parameters.get('filters', {}) + params.update(filters) + + return params + + def list(self, **kwargs): + """Get a list of services. + + :param page_size: number of items to request in each paginated request + :param limit: maximum number of services to return + :param marker: begin returning services that appear later in the service + list than that represented by this service id + :param filters: dict of direct comparison filters that mimics the + structure of an service object + :param return_request_id: If an empty list is provided, populate this + list with the request ID value from the header + x-openstack-request-id + :rtype: list of :class:`Service` + """ + absolute_limit = kwargs.get('limit') + page_size = kwargs.get('page_size', DEFAULT_PAGE_SIZE) + + def paginate(qp, return_request_id=None): + for param, value in six.iteritems(qp): + if isinstance(value, six.string_types): + # Note(flaper87) Url encoding should + # be moved inside http utils, at least + # shouldn't be here. + # + # Making sure all params are str before + # trying to encode them + qp[param] = encodeutils.safe_decode(value) + + url = '/v1/services/detail?%s' % urlparse.urlencode(qp) + services, resp = self._list(url, "services") + + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + for service in services: + yield service + + return_request_id = kwargs.get('return_req_id', None) + + params = self._build_params(kwargs) + + seen = 0 + while True: + seen_last_page = 0 + filtered = 0 + for service in paginate(params, return_request_id): + last_service = service.id + + if (absolute_limit is not None and + seen + seen_last_page >= absolute_limit): + # Note(kragniz): we've seen enough images + return + else: + seen_last_page += 1 + yield service + + seen += seen_last_page + + if seen_last_page + filtered == 0: + # Note(kragniz): we didn't get any services in the last page + return + + if absolute_limit is not None and seen >= absolute_limit: + # Note(kragniz): reached the limit of services to return + return + + if page_size and seen_last_page + filtered < page_size: + # Note(kragniz): we've reached the last page of the services + return + + # Note(kragniz): there are more services to come + params['marker'] = last_service + seen_last_page = 0 + + def add(self, **kwargs): + """Add a service + + TODO(bcwaldon): document accepted params + """ + + fields = {} + for field in kwargs: + if field in CREATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'create() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs = self._service_meta_to_headers(fields) + resp, body = self.client.post('/v1/services', + headers=hdrs, + data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Service(self, self._format_service_meta_for_user(body['service'])) + + def delete(self, service, **kwargs): + """Delete an service.""" + url = "/v1/services/%s" % base.getid(service) + resp, body = self.client.delete(url) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + def update(self, service, **kwargs): + """Update an service + + TODO(bcwaldon): document accepted params + """ + hdrs = {} + fields = {} + for field in kwargs: + if field in UPDATE_PARAMS: + fields[field] = kwargs[field] + elif field == 'return_req_id': + continue + else: + msg = 'update() got an unexpected keyword argument \'%s\'' + raise TypeError(msg % field) + + hdrs.update(self._service_meta_to_headers(fields)) + + url = '/v1/services/%s' % base.getid(service) + resp, body = self.client.put(url, headers=None, data=hdrs) + return_request_id = kwargs.get('return_req_id', None) + if return_request_id is not None: + return_request_id.append(resp.headers.get(OS_REQ_ID_HDR, None)) + + return Service(self, self._format_service_meta_for_user(body['service_meta'])) diff --git a/code/daisyclient/daisyclient/v1/shell.py b/code/daisyclient/daisyclient/v1/shell.py new file mode 100755 index 00000000..739a171f --- /dev/null +++ b/code/daisyclient/daisyclient/v1/shell.py @@ -0,0 +1,2046 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import print_function + +import copy +import functools +import pprint +import os +import six +import sys +import json + +from oslo_utils import encodeutils +from oslo_utils import strutils + +from daisyclient.common import progressbar +from daisyclient.common import utils +from daisyclient import exc +import daisyclient.v1.hosts +import daisyclient.v1.clusters +import daisyclient.v1.cluster_hosts +import daisyclient.v1.template +import daisyclient.v1.components +import daisyclient.v1.services +import daisyclient.v1.roles +import daisyclient.v1.config_files +import daisyclient.v1.config_sets +import daisyclient.v1.networks +import daisyclient.v1.configs +import daisyclient.v1.uninstall +import daisyclient.v1.update +import daisyclient.v1.disk_array +import daisyclient.v1.template +from daisyclient.v1 import param_helper + +_bool_strict = functools.partial(strutils.bool_from_string, strict=True) + + +def _daisy_show(daisy, max_column_width=80): + info = copy.deepcopy(daisy._info) + exclusive_field = ('deleted', 'deleted_at') + for field in exclusive_field: + if info.has_key(field): + info.pop(field) + utils.print_dict(info, max_column_width=max_column_width) + + +@utils.arg('name', metavar='', + help='node name to be added.') +@utils.arg('description', metavar='', + help='node description to be added.') +@utils.arg('--resource-type', metavar='', + help='node resource type to be added, supported type are "baremetal", "server" and "docker".\ + "baremetal" is traditional physical server ,\ + "server" is virtual machine and \ + "docker" is container created by docker.') +@utils.arg('--dmi-uuid', metavar='', + help='node dmi uuid to be added.') +@utils.arg('--ipmi-user', metavar='', + help='ipmi user name to be added.') +@utils.arg('--ipmi-passwd', metavar='', + help='ipmi user of password to be added.') +@utils.arg('--ipmi-addr', metavar='', + help='ipmi ip to be added.') +@utils.arg('--role', metavar='',nargs='+', + help='name of node role to be added.') +#@utils.arg('--status', metavar='', +# help='node status to be added.') +@utils.arg('--cluster', metavar='', + help='id of cluster that the node will be added.') +@utils.arg('--os-version', metavar='', + help='os version of the host.') +@utils.arg('--os-status', metavar='', + help='os status of the host.') +@utils.arg('--interfaces', metavar='', + nargs='+', + help='node network interface detail, ip must be given if assigned_networks is empty,\ + and cluster must be given if assigned_networks is not empty.') +def do_host_add(gc, args): + """Add a host.""" + if args.cluster: + cluster = utils.find_resource(gc.clusters, args.cluster) + if cluster and cluster.deleted: + msg = "No cluster with an ID of '%s' exists." % cluster.id + raise exc.CommandError(msg) + # if args.role: + # role = utils.find_resource(gc.roles, args.role) + # if role and role.deleted: + # msg = "No role with an ID of '%s' exists." % role.id + # raise exc.CommandError(msg) + interface_list = [] + if args.interfaces: + for interface in args.interfaces: + interface_info = {"pci":"", "mode":"", "gateway":"", "type": "", "name": "", "mac": "", "ip": "", "netmask": "", "assigned_networks": "", "slaves":"", "is_deployment":"", "vswitch_type":""} + for kv_str in interface.split(","): + try: + k, v = kv_str.split("=", 1) + except ValueError: + raise exc.CommandError("interface error") + + if k in interface_info: + interface_info[k] = v + if k == "assigned_networks": + networks_list_obj = interface_info['assigned_networks'].split("_") + networks_list=[] + for network in networks_list_obj: + network_dict={} + name, ip = network.split(":", 1) + network_dict={'name':name,'ip':ip} + networks_list.append(network_dict) + interface_info['assigned_networks'] = networks_list + if k == "slaves": + slaves_list = interface_info['slaves'].split("_", 1) + interface_info['slaves'] = slaves_list + interface_list.append(interface_info) + args.interfaces = interface_list + + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + # Filter out values we can't use + CREATE_PARAMS = daisyclient.v1.hosts.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + + host = gc.hosts.add(**fields) + + _daisy_show(host) + +@utils.arg('hosts', metavar='', nargs='+', + help='ID of host(s) to delete.') +def do_host_delete(gc, args): + """Delete specified host(s).""" + + for args_host in args.hosts: + host = utils.find_resource(gc.hosts, args_host) + if host and host.deleted: + msg = "No host with an ID of '%s' exists." % host.id + raise exc.CommandError(msg) + try: + if args.verbose: + print('Requesting host delete for %s ...' % + encodeutils.safe_decode(args_host), end=' ') + gc.hosts.delete(host) + + if args.verbose: + print('[Done]') + + except exc.HTTPException as e: + if args.verbose: + print('[Fail]') + print('%s: Unable to delete host %s' % (e, args_host)) + +@utils.arg('host', metavar='', help='ID of host to modify.') +@utils.arg('--name', metavar='', + help='Name of host.') +@utils.arg('--resource-type', metavar='', + help='node resource type to be added, supported type are "baremetal", "server" and "docker".\ + "baremetal" is traditional physical server ,\ + "server" is virtual machine and \ + "docker" is container created by docker.') +@utils.arg('--dmi-uuid', metavar='', + help='node dmi uuid for the host.') +@utils.arg('--ipmi-user', metavar='', + help='ipmi user name for the host.') +@utils.arg('--ipmi-passwd', metavar='', + help='ipmi user of password for the host.') +@utils.arg('--ipmi-addr', metavar='', + help='ipmi ip for the host.') +@utils.arg('--description', metavar='', + help='Description of host.') +@utils.arg('--root-disk', metavar='', + help='the disk used to install OS.') +@utils.arg('--root-lv-size', metavar='', + help='the size of root_lv(M).') +@utils.arg('--swap-lv-size', metavar='', + help='the size of swap_lv(M).') +@utils.arg('--root-pwd', metavar='', + help='the passward of os.') +@utils.arg('--isolcpus', metavar='', + help='the cpus to be isolated.') +@utils.arg('--cluster', metavar='', + help='id of cluster that the node will be added.') +@utils.arg('--os-version', metavar='', + help='os version for the host.') +@utils.arg('--os-status', metavar='', + help='os status for the host.') +#@utils.arg('--status', metavar='', +# help='node status for the host.') +@utils.arg('--role', metavar='',nargs='+', + help='name of node role for the host.') +@utils.arg('--interfaces', metavar='', + nargs='+', + help='node network interface detail, ip must be given if assigned_networks is empty,\ + and cluster must be given if assigned_networks is not empty.') +@utils.arg('--hugepagesize', metavar='', + help='size of hugepage.') +@utils.arg('--hugepages', metavar='', + help='number of hugepages.') +def do_host_update(gc, args): + """Update a specific host.""" + # Filter out None values + if args.cluster: + cluster = utils.find_resource(gc.clusters, args.cluster) + if cluster and cluster.deleted: + msg = "No cluster with an ID of '%s' exists." % cluster.id + raise exc.CommandError(msg) + interface_list = [] + if args.interfaces: + for interfaces in args.interfaces: + interface_info = {"pci":"", "mode":"", "gateway":"", "type": "", "name": "", "mac": "", "ip": "", "netmask": "", "mode": "","assigned_networks": "", "slaves":"", "is_deployment":"", "vswitch_type":""} + for kv_str in interfaces.split(","): + try: + k, v = kv_str.split("=", 1) + except ValueError: + raise exc.CommandError("interface error") + if k in interface_info: + interface_info[k] = v + if k == "assigned_networks": + networks_list_obj = interface_info['assigned_networks'].split("_") + networks_list=[] + for network in networks_list_obj: + network_dict={} + name, ip = network.split(":", 1) + network_dict={'name':name,'ip':ip} + networks_list.append(network_dict) + interface_info['assigned_networks'] = networks_list + if k == "slaves": + slaves_list = interface_info['slaves'].split("_", 1) + interface_info['slaves'] = slaves_list + interface_list.append(interface_info) + args.interfaces = interface_list + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + host_arg = fields.pop('host') + host = utils.find_resource(gc.hosts, host_arg) + + # Filter out values we can't use + UPDATE_PARAMS = daisyclient.v1.hosts.UPDATE_PARAMS + fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items())) + + host = gc.hosts.update(host, **fields) + _daisy_show(host) + + +@utils.arg('--name', metavar='', + help='Filter hosts to those that have this name.') +@utils.arg('--status', metavar='', + help='Filter hosts status.') +@utils.arg('--cluster-id', metavar='', + help='Filter by cluster_id.') +@utils.arg('--page-size', metavar='', default=None, type=int, + help='Number of hosts to request in each paginated request.') +@utils.arg('--sort-key', default='name', + choices=daisyclient.v1.hosts.SORT_KEY_VALUES, + help='Sort host list by specified field.') +@utils.arg('--sort-dir', default='asc', + choices=daisyclient.v1.hosts.SORT_DIR_VALUES, + help='Sort host list in specified direction.') +def do_host_list(gc, args): + """List hosts you can access.""" + filter_keys = ['name', 'status', 'cluster_id'] + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + + kwargs = {'filters': filters} + if args.page_size is not None: + kwargs['page_size'] = args.page_size + + kwargs['sort_key'] = args.sort_key + kwargs['sort_dir'] = args.sort_dir + + hosts = gc.hosts.list(**kwargs) + + columns = ['ID', 'Name','Description', 'Resource_type', 'Status', 'Os_progress','Os_status','Messages'] + if filters.has_key('cluster_id'): + role_columns = ['Role_progress','Role_status', 'Role_messages'] + columns += role_columns + + utils.print_list(hosts, columns) + +@utils.arg('id', metavar='', + help='Filter host to those that have this id.') +def do_host_detail(gc, args): + """List host you can access.""" + host = utils.find_resource(gc.hosts, args.id) + _daisy_show(host) + +# @utils.arg('name', metavar='', +# help='Cluster name to be added.') +# @utils.arg('--nodes', metavar='',nargs='+', +# help='id of cluster nodes to be added.') +# @utils.arg('description', metavar='', +# help='Cluster description to be added.') +# @utils.arg('--networks', metavar='',nargs='+', +# help='id of cluster networks.') +# @utils.arg('--floating_ranges', metavar='',nargs='+', +# help='Cluster floating ranges:"172.16.0.130","172.16.0.254"') +# @utils.arg('--dns_nameservers', metavar='',nargs='+', +# help='Cluster dns nameservers:"8.8.4.4" "8.8.8.8" ') +# @utils.arg('--net_l23_provider', metavar='', +# help='Cluster net_l23_provider.') +# @utils.arg('--base_mac', metavar='', +# help='Cluster base_mac.') +# @utils.arg('--internal_gateway', metavar='', +# help='Cluster internal gateway.') +# @utils.arg('--internal_cidr', metavar='', +# help='Cluster internal_cidr.') +# @utils.arg('--external_cidr', metavar='', +# help='Cluster external cidr.') +# @utils.arg('--gre_id_range', metavar='',nargs='+', +# help='Cluster gre_id_range. 2 65535') +# @utils.arg('--vlan_range', metavar='',nargs='+', +# help='Cluster vlan_range.1000 1030') +# @utils.arg('--vni_range', metavar='',nargs='+', +# help='Cluster vNI range.1000 1030') +# @utils.arg('--segmentation_type', metavar='', +# help='Cluster segmentation_type.') +# @utils.arg('--public_vip', metavar='', +# help='Cluster public vip.') + +@utils.arg('ip', metavar='', + help='ip of the host will be discovered.') +@utils.arg('passwd', metavar='', + help='passwd of the host.') +@utils.arg('--user', metavar='', + help='user name of the host.') +def do_discover_host_add(gc, args): + """Add a discover host.""" + + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + # Filter out values we can't use + CREATE_PARAMS = daisyclient.v1.hosts.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + + host = gc.hosts.add_discover_host(**fields) + _daisy_show(host) + +@utils.arg('id', metavar='', nargs='+', + help='ID of discover host(s) to delete.') +def do_discover_host_delete(gc, args): + """Delete specified host(s).""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + hosts = fields.get('id', None) + for args_host in hosts: + host = args_host + try: + if args.verbose: + print('Requesting host delete for %s ...' % + encodeutils.safe_decode(args_host), end=' ') + gc.hosts.delete_discover_host(host) + + if args.verbose: + print('[Done]') + + except exc.HTTPException as e: + if args.verbose: + print('[Fail]') + print('%s: Unable to delete host %s' % (e, args_host)) + +@utils.arg('--ip', metavar='', + help='Filter hosts to those that have this ip.') +@utils.arg('--user', metavar='', + help='Filter by user.') +def do_discover_host_list(gc, args): + """List hosts you can access.""" + + filter_keys = ['ip', 'user'] + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + kwargs = {'filters': filters} + discover_hosts = gc.hosts.list_discover_host(**kwargs) + columns = ['Id', 'Ip','User', 'Passwd', 'Status', 'Message', 'Host_id'] + utils.print_list(discover_hosts, columns) + +@utils.arg('id', metavar='', + help='id of the host.') +@utils.arg('--ip', metavar='', + help='ip of the host.') +@utils.arg('--passwd', metavar='', + help='passwd of the host.') +@utils.arg('--user', metavar='', + help='user name of the host.') +def do_discover_host_update(gc, args): + """Add a discover host.""" + + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + host = fields.get('id', None) + # Filter out values we can't use + CREATE_PARAMS = daisyclient.v1.hosts.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + + host = gc.hosts.update_discover_host(host, **fields) + _daisy_show(host) + +@utils.arg('id', metavar='', + help='ID of discover host.') +def do_discover_host_detail(gc, args): + """get host detail infomation.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + host_id = fields.get('id', None) + # Filter out values we can't use + CREATE_PARAMS = daisyclient.v1.hosts.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + discover_host = gc.hosts.get_discover_host_detail(host_id, **fields) + _daisy_show(discover_host) + +@utils.arg('params_file_path', metavar='', + help="""Template file path. + Run \"daisy params-helper params_file_path\" for the template content. + Then save the output to a template file.Just use this path.""") +def do_cluster_add(gc, args): + """Add a cluster.""" + fields = None + if not args.params_file_path: + if args.nodes: + for arg_node in args.nodes: + host = utils.find_resource(gc.hosts, arg_node) + if host and host.deleted: + msg = "No host with an ID of '%s' exists." % host.id + raise exc.CommandError(msg) + if args.networks: + for arg_network in args.networks: + network = utils.find_resource(gc.networks, arg_network) + if network and network.deleted: + msg = "No network with an ID of '%s' exists." % network.id + raise exc.CommandError(msg) + range_list = [] + if args.floating_ranges: + for floating_ranges in args.floating_ranges: + float_ip_list = floating_ranges.split(",") + range_list.append(float_ip_list) + args.floating_ranges = range_list + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + # Filter out values we can't use + CREATE_PARAMS = daisyclient.v1.clusters.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + else: + fields = param_helper._read_template_file(args) + + cluster = gc.clusters.add(**fields) + _daisy_show(cluster) + +@utils.arg('cluster', metavar='', help='ID of cluster to modify.') +# @utils.arg('--name', metavar='', +# help='Name of host.') +# @utils.arg('--description', metavar='', +# help='Description of host.') +# @utils.arg('--nodes', metavar='',nargs='+', +# help='id of cluster nodes to be updated.') +# @utils.arg('--networks', metavar='',nargs='+', +# help='id of update networks.') +# @utils.arg('--floating_ranges', metavar='',nargs='+', +# help='Cluster floating ranges:"172.16.0.130","172.16.0.254"') +# @utils.arg('--dns_nameservers', metavar='',nargs='+', +# help='Cluster dns nameservers:"8.8.4.4" "8.8.8.8" ') +# @utils.arg('--net_l23_provider', metavar='', +# help='Cluster net_l23_provider.') +# @utils.arg('--base_mac', metavar='', +# help='Cluster base_mac.') +# @utils.arg('--internal_gateway', metavar='', +# help='Cluster internal gateway.') +# @utils.arg('--internal_cidr', metavar='', +# help='Cluster internal_cidr.') +# @utils.arg('--external_cidr', metavar='', +# help='Cluster external cidr.') +# @utils.arg('--gre_id_range', metavar='',nargs='+', +# help='Cluster gre_id_range. 2 65535') +# @utils.arg('--vlan_range', metavar='',nargs='+', +# help='Cluster vlan_range:1000 1030') +# @utils.arg('--vni_range', metavar='',nargs='+', +# help='Cluster vNI range:1000 1030') +# @utils.arg('--segmentation_type', metavar='', +# help='Cluster segmentation_type.') +# @utils.arg('--public_vip', metavar='', +# help='Cluster public vip.') +@utils.arg('params_file_path', metavar='', + help="""Template file path. + Run \"daisy params-helper params_file_path\" for the template content. + Then save the output to a template file.Just use this path.""") +def do_cluster_update(gc, args): + """Update a specific cluster.""" + # Filter out None values + fields = None + cluster = None + if not args.params_file_path: + if args.nodes: + for arg_node in args.nodes: + host = utils.find_resource(gc.hosts, arg_node) + if host and host.deleted: + msg = "No host with an ID of '%s' exists." % host.id + raise exc.CommandError(msg) + if args.networks: + for arg_network in args.networks: + network = utils.find_resource(gc.networks, arg_network) + if network and network.deleted: + msg = "No network with an ID of '%s' exists." % network.id + raise exc.CommandError(msg) + range_list = [] + if args.floating_ranges: + for floating_ranges in args.floating_ranges: + float_ip_list = floating_ranges.split(",") + range_list.append(float_ip_list) + args.floating_ranges = range_list + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + cluster_arg = fields.pop('cluster') + + cluster = utils.find_resource(gc.clusters, cluster_arg) + + # Filter out values we can't use + UPDATE_PARAMS = daisyclient.v1.clusters.UPDATE_PARAMS + fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items())) + else: + cluster_arg = args.cluster + cluster = utils.find_resource(gc.clusters, cluster_arg) + fields = param_helper._read_template_file(args) + + cluster = gc.clusters.update(cluster, **fields) + _daisy_show(cluster) + +@utils.arg('subcommand_param', nargs='+', + metavar='', + help='Subcommand param, [\'params_file_path\', \'test\'].') +def do_params_helper(gc, args): + """ Params helper for some subcommand. """ + PARAMS = ('params_file_path', 'test') + valid_params_list = \ + [param for param in args.subcommand_param if param in PARAMS] + + for valid_param in valid_params_list: + if 0 == cmp(valid_param, u"params_file_path"): + print("------------------------------------------") + print("Cluster \'name\' and \'description\' segment must be supportted.Template:") + pprint.pprint(param_helper.CLUSTER_ADD_PARAMS_FILE) + print("------------------------------------------") + elif 0 == cmp(valid_param, u"test"): + print("------------------------------------------") + print("test") + print("------------------------------------------") + +@utils.arg('clusters', metavar='', nargs='+', + help=' ID of cluster(s) to delete.') +def do_cluster_delete(gc, args): + """Delete specified cluster(s).""" + + for args_cluster in args.clusters: + cluster = utils.find_resource(gc.clusters, args_cluster) + if cluster and cluster.deleted: + msg = "No cluster with an ID of '%s' exists." % cluster.id + raise exc.CommandError(msg) + try: + if args.verbose: + print('Requesting cluster delete for %s ...' % + encodeutils.safe_decode(args_cluster), end=' ') + gc.clusters.delete(cluster) + + if args.verbose: + print('[Done]') + + except exc.HTTPException as e: + if args.verbose: + print('[Fail]') + print('%s: Unable to delete cluster %s' % (e, args_cluster)) + +@utils.arg('--name', metavar='', + help='Filter clusters to those that have this name.') + +@utils.arg('--auto-scale', metavar='', + help='auto-scale:1 or 0.') +@utils.arg('--page-size', metavar='', default=None, type=int, + help='Number of clusters to request in each paginated request.') +@utils.arg('--sort-key', default='name', + choices=daisyclient.v1.clusters.SORT_KEY_VALUES, + help='Sort cluster list by specified field.') +@utils.arg('--sort-dir', default='asc', + choices=daisyclient.v1.clusters.SORT_DIR_VALUES, + help='Sort cluster list in specified direction.') +def do_cluster_list(gc, args): + """List clusters you can access.""" + filter_keys = ['name','auto_scale'] + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + + kwargs = {'filters': filters} + if args.page_size is not None: + kwargs['page_size'] = args.page_size + + kwargs['sort_key'] = args.sort_key + kwargs['sort_dir'] = args.sort_dir + + clusters = gc.clusters.list(**kwargs) + + columns = ['ID', 'Name', 'Description', 'Nodes', 'Networks', 'Auto_scale', 'Use_dns'] + utils.print_list(clusters, columns) + +@utils.arg('id', metavar='', + help='Filter cluster to those that have this id.') +def do_cluster_detail(gc, args): + """List cluster you can access.""" + filter_keys = ['id'] + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + kwargs = {'filters': filters} + if filters: + cluster = utils.find_resource(gc.clusters, fields.pop('id')) + _daisy_show(cluster) + else: + cluster = gc.clusters.list(**kwargs) + columns = ['ID', 'Name','Description','Nodes', 'Networks','Auto_scale', 'Use_dns'] + utils.print_list(cluster, columns) + +#@utils.arg('cluster', metavar='', +# help='Filter results by an cluster ID.') +#def do_cluster_host_list(gc, args): +# """Show cluster host membership by cluster or host.""" + # if not args.cluster: + # utils.exit('Unable to list all members. Specify cluster-id') + # if args.cluster: + # kwargs = {'cluster': args.cluster} +# + # members = gc.cluster_hosts.list(**kwargs) + # columns = ['Cluster_ID', 'Host_ID'] + # utils.print_list(members, columns) + + +@utils.arg('cluster', metavar='', + help='Project from which to remove member.') +@utils.arg('node', metavar='', + help='id of host to remove as member.') +def do_cluster_host_del(gc, args): + """Remove a host from cluster.""" + #cluster_id = utils.find_resource(gc.clusters, args.cluster).id + #host_id = utils.find_resource(gc.hosts, args.node).id + cluster_id = args.cluster + host_id = args.node + gc.cluster_hosts.delete(cluster_id, host_id) + + + +@utils.arg('name', metavar='', + help='Component name to be added.') +@utils.arg('description', metavar='', + help='Component description to be added.') +def do_component_add(gc, args): + """Add a component.""" + + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + # Filter out values we can't use + CREATE_PARAMS = daisyclient.v1.components.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + + component = gc.components.add(**fields) + + _daisy_show(component) + +@utils.arg('components', metavar='', nargs='+', + help='ID of component(s) to delete.') +def do_component_delete(gc, args): + """Delete specified component(s).""" + + for args_component in args.components: + component = utils.find_resource(gc.components, args_component) + if component and component.deleted: + msg = "No component with an ID of '%s' exists." % component.id + raise exc.CommandError(msg) + try: + if args.verbose: + print('Requesting component delete for %s ...' % + encodeutils.safe_decode(args_component), end=' ') + gc.components.delete(component) + + if args.verbose: + print('[Done]') + + except exc.HTTPException as e: + if args.verbose: + print('[Fail]') + print('%s: Unable to delete component %s' % (e, args_component)) + +@utils.arg('--id', metavar='', + help='Filter components to those that have this name.') +def do_component_list(gc, args): + """List components you can access.""" + filter_keys = ['id'] + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + kwargs = {'filters': filters} + if filters: + component = utils.find_resource(gc.components, fields.pop('id')) + _daisy_show(component) + else: + components = gc.components.list(**kwargs) + columns = ['ID', 'Name','Description'] + utils.print_list(components, columns) + +@utils.arg('component', metavar='', help='ID of component to modify.') +@utils.arg('--name', metavar='', + help='Name of component.') +@utils.arg('--description', metavar='', + help='Description of component.') +def do_component_update(gc, args): + """Update a specific component.""" + # Filter out None values + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + component_arg = fields.pop('component') + component = utils.find_resource(gc.components, component_arg) + + # Filter out values we can't use + UPDATE_PARAMS = daisyclient.v1.components.UPDATE_PARAMS + fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items())) + + component = gc.components.update(component, **fields) + _daisy_show(component) + +@utils.arg('name', metavar='', + help='Service name to be added.') +@utils.arg('description', metavar='', + help='Service description to be added.') +@utils.arg('--component-id', metavar='', + help='Services that belong to the component of the ID.') +@utils.arg('--backup-type', metavar='', + help='The backup-type mybe lb or ha.') +def do_service_add(gc, args): + """Add a service.""" + + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + # Filter out values we can't use + CREATE_PARAMS = daisyclient.v1.services.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + + service = gc.services.add(**fields) + + _daisy_show(service) + +@utils.arg('services', metavar='', nargs='+', + help='ID of service(s) to delete.') +def do_service_delete(gc, args): + """Delete specified service(s).""" + + for args_service in args.services: + service = utils.find_resource(gc.services, args_service) + if service and service.deleted: + msg = "No service with an ID of '%s' exists." % service.id + raise exc.CommandError(msg) + try: + if args.verbose: + print('Requesting service delete for %s ...' % + encodeutils.safe_decode(args_service), end=' ') + gc.services.delete(service) + + if args.verbose: + print('[Done]') + + except exc.HTTPException as e: + if args.verbose: + print('[Fail]') + print('%s: Unable to delete service %s' % (e, args_service)) + +@utils.arg('--id', metavar='', + help='Filter services to those that have this name.') +def do_service_list(gc, args): + """List services you can access.""" + filter_keys = ['id'] + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + kwargs = {'filters': filters} + if filters: + service = utils.find_resource(gc.services, fields.pop('id')) + _daisy_show(service) + else: + services = gc.services.list(**kwargs) + columns = ['ID', 'Name','Description', 'Component_ID', 'Backup_Type'] + utils.print_list(services, columns) + +@utils.arg('service', metavar='', help='ID of service to modify.') +@utils.arg('--name', metavar='', + help='Name of service.') +@utils.arg('--description', metavar='', + help='Description of service.') +@utils.arg('--component-id', metavar='', + help='Services that belong to the component of the ID.') +@utils.arg('--backup-type', metavar='', + help='The backup-type mybe lb or ha.') +def do_service_update(gc, args): + """Update a specific service.""" + # Filter out None values + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + service_arg = fields.pop('service') + service = utils.find_resource(gc.services, service_arg) + + # Filter out values we can't use + UPDATE_PARAMS = daisyclient.v1.services.UPDATE_PARAMS + fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items())) + + service = gc.services.update(service, **fields) + _daisy_show(service) + +@utils.arg('name', metavar='', + help='Role name to be added.') +@utils.arg('description', metavar='', + help='Role description to be added.') +#@utils.arg('--progress', metavar='', +# help='The role of the progress.') +@utils.arg('--config-set-id', metavar='', + help='Roles that belong to the config-set of the ID.') +@utils.arg('--nodes', metavar='', nargs='+', + help='Roles that belong to the host of the id,host id can be more than one') +@utils.arg('--services', metavar='', nargs='+', + help='Roles that belong to the service of the id, service id can be more than one') +#@utils.arg('--status', metavar='', +# help='The role of the status.') +@utils.arg('--cluster-id', metavar='', + help='Roles that belong to cluster of id.') +@utils.arg('--type', metavar='', + help='The value should be template or custom.') +@utils.arg('--vip', metavar='', + help='float ip.') +@utils.arg('--db-vip', metavar='', + help='float ip of db.') +@utils.arg('--glance-vip', metavar='', + help='float ip of glance.') +@utils.arg('--public-vip', metavar='', + help='float ip of public.') +@utils.arg('--mongodb-vip', metavar='', + help='float ip of mongodb.') +@utils.arg('--glance-lv-size', metavar='', + help='the size of logic volume disk for storaging image, and the unit is M.') +@utils.arg('--deployment-backend', metavar='', + help="deployment backend, supported bacends are 'tecs' and 'zenic' now.") +@utils.arg('--db-lv-size', metavar='', + help='the size of database disk(M).') +@utils.arg('--nova-lv-size', metavar='', + help='the size of logic volume disk for nvoa, and the unit is MB.') +@utils.arg('--disk-location', metavar='', + help='where disks used by backends application from, default is "local". \ + "local" means disks come from local host, "share" means disks come from share storage devices') +@utils.arg('--role-type', metavar='', + help='type of role') +def do_role_add(gc, args): + """Add a role.""" + + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + # Filter out values we can't use + CREATE_PARAMS = daisyclient.v1.roles.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + + role = gc.roles.add(**fields) + + _daisy_show(role) + +@utils.arg('roles', metavar='', nargs='+', + help='ID of role(s) to delete.') +def do_role_delete(gc, args): + """Delete specified role(s).""" + + for args_role in args.roles: + role = utils.find_resource(gc.roles, args_role) + if role and role.deleted: + msg = "No role with an ID of '%s' exists." % role.id + raise exc.CommandError(msg) + try: + if args.verbose: + print('Requesting role delete for %s ...' % + encodeutils.safe_decode(args_role), end=' ') + gc.roles.delete(role) + + if args.verbose: + print('[Done]') + + except exc.HTTPException as e: + if args.verbose: + print('[Fail]') + print('%s: Unable to delete role %s' % (e, args_role)) + +@utils.arg('--cluster-id', metavar='', + help='Roles that belong to cluster.') +def do_role_list(gc, args): + """List roles you can access.""" + filter_keys = ['cluster_id'] + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + kwargs = {'filters': filters} + + roles = gc.roles.list(**kwargs) + columns = ['ID', 'Name','Description','Status','Progress','Config_Set_ID','CLUSTER_ID','TYPE','VIP','Deployment_Backend'] + utils.print_list(roles, columns) + +@utils.arg('id', metavar='', + help='Filter roles to those that have this name.') +def do_role_detail(gc, args): + """List roles you can access.""" + filter_keys = ['id'] + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + kwargs = {'filters': filters} + if filters: + role = utils.find_resource(gc.roles, fields.pop('id')) + _daisy_show(role) + else: + roles = gc.roles.list(**kwargs) + columns = ['ID', 'Name','Description','Status','Progress','Config_Set_ID','CLUSTER_ID','TYPE','VIP'] + utils.print_list(roles, columns) + +@utils.arg('role', metavar='', help='ID of role to modify.') +@utils.arg('--name', metavar='', + help='Name of role.') +@utils.arg('--description', metavar='', + help='Description of role.') +@utils.arg('--config-set-id', metavar='', + help='Roles that belong to the config-set of the ID.') +@utils.arg('--nodes', metavar='', nargs='+', + help='Roles that belong to the host of the id,host id can be more than one') +@utils.arg('--services', metavar='', nargs='+', + help='Roles that belong to the service of the id, service id can be more than one') +#@utils.arg('--status', metavar='', +# help='The role of the status.') +#@utils.arg('--progress', metavar='', +# help='The role of the progress.') +@utils.arg('--cluster-id', metavar='', + help='Roles that belong to cluster of id.') +@utils.arg('--type', metavar='', + help='The value should be template or custom.') +@utils.arg('--vip', metavar='', + help='float ip.') +@utils.arg('--glance-lv-size', metavar='', + help='the size of logic volume disk for storaging image, and the unit is M.') +@utils.arg('--deployment-backend', metavar='', + help="deployment backend, supported bacends are 'tecs' and 'zenic' now.") +@utils.arg('--db-lv-size', metavar='', + help='the size of database disk(M).') +@utils.arg('--nova-lv-size', metavar='', + help='the size of logic volume disk for nvoa, and the unit is MB.') +@utils.arg('--disk-location', metavar='', + help='where disks used by backends application from, default is "local". \ + "local" means disks come from local host, "share" means disks come from share storage devices') +@utils.arg('--ntp-server', metavar='', + help='ip of ntp server') +@utils.arg('--role-type', metavar='', + help='type of role') +@utils.arg('--db-vip', metavar='', + help='float ip of db') +@utils.arg('--glance-vip', metavar='', + help='float ip of glance') +@utils.arg('--public-vip', metavar='', + help='float ip of public') +@utils.arg('--mongodb-vip', metavar='', + help='float ip of mongodb') +def do_role_update(gc, args): + """Update a specific role.""" + # Filter out None values + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + role_arg = fields.pop('role') + role = utils.find_resource(gc.roles, role_arg) + + # Filter out values we can't use + UPDATE_PARAMS = daisyclient.v1.roles.UPDATE_PARAMS + fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items())) + + role = gc.roles.update(role, **fields) + _daisy_show(role) + + +@utils.arg('name', metavar='', + help='config_file name to be added.') +@utils.arg('description', metavar='', + help='config_file description to be added.') +def do_config_file_add(gc, args): + """Add a config_file.""" + + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + # Filter out values we can't use + CREATE_PARAMS = daisyclient.v1.config_files.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + + config_file = gc.config_files.add(**fields) + + _daisy_show(config_file) + +@utils.arg('config_files', metavar='', nargs='+', + help='ID of config_file(s) to delete.') +def do_config_file_delete(gc, args): + """Delete specified config_file(s).""" + + for args_config_file in args.config_files: + config_file = utils.find_resource(gc.config_files, args_config_file) + if config_file and config_file.deleted: + msg = "No config_file with an ID of '%s' exists." % config_file.id + raise exc.CommandError(msg) + try: + if args.verbose: + print('Requesting config_file delete for %s ...' % + encodeutils.safe_decode(args_config_file), end=' ') + gc.config_files.delete(config_file) + + if args.verbose: + print('[Done]') + + except exc.HTTPException as e: + if args.verbose: + print('[Fail]') + print('%s: Unable to delete config_file %s' % (e, args_config_file)) + +@utils.arg('config_file', metavar='', help='ID of config_file to modify.') +@utils.arg('--name', metavar='', + help='Name of config_file.') +@utils.arg('--description', metavar='', + help='Description of config_file.') +def do_config_file_update(gc, args): + """Update a specific config_file.""" + # Filter out None values + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + config_file_arg = fields.pop('config_file') + config_file = utils.find_resource(gc.config_files, config_file_arg) + + # Filter out values we can't use + UPDATE_PARAMS = daisyclient.v1.config_files.UPDATE_PARAMS + fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items())) + + config_file = gc.config_files.update(config_file, **fields) + _daisy_show(config_file) + + +def do_config_file_list(gc, args): + """List config_files you can access.""" + filter_keys = '' + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + kwargs = {'filters': filters} + if filters: + config_file = utils.find_resource(gc.config_files, fields.pop('id')) + _daisy_show(config_file) + else: + config_files = gc.config_files.list(**kwargs) + columns = ['ID', 'Name','Description'] + utils.print_list(config_files, columns) + +@utils.arg('id', metavar='', + help='Filter config_file to those that have this id.') +def do_config_file_detail(gc, args): + """List config_files you can access.""" + filter_keys = ['id'] + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + kwargs = {'filters': filters} + if filters: + config_file = utils.find_resource(gc.config_files, fields.pop('id')) + _daisy_show(config_file) + else: + config_files = gc.config_files.list(**kwargs) + columns = ['ID', 'Name','Description'] + utils.print_list(config_files, columns) + +@utils.arg('name', metavar='', + help='config_set name to be added.') +@utils.arg('description', metavar='', + help='config_set description to be added.') +def do_config_set_add(gc, args): + """Add a config_set.""" + + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + # Filter out values we can't use + CREATE_PARAMS = daisyclient.v1.config_sets.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + + config_set = gc.config_sets.add(**fields) + + _daisy_show(config_set) + +@utils.arg('config_sets', metavar='', nargs='+', + help='ID of config_set(s) to delete.') +def do_config_set_delete(gc, args): + """Delete specified config_set(s).""" + + for args_config_set in args.config_sets: + config_set = utils.find_resource(gc.config_sets, args_config_set) + if config_set and config_set.deleted: + msg = "No config_set with an ID of '%s' exists." % config_set.id + raise exc.CommandError(msg) + try: + if args.verbose: + print('Requesting config_set delete for %s ...' % + encodeutils.safe_decode(args_config_set), end=' ') + gc.config_sets.delete(config_set) + + if args.verbose: + print('[Done]') + + except exc.HTTPException as e: + if args.verbose: + print('[Fail]') + print('%s: Unable to delete config_set %s' % (e, args_config_set)) + +@utils.arg('config_set', metavar='', help=' ID of config_set to modify.') +@utils.arg('--name', metavar='', + help='Name of config_set.') +@utils.arg('--description', metavar='', + help='Description of config_set.') +def do_config_set_update(gc, args): + """Update a specific config_set.""" + # Filter out None values + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + config_set_arg = fields.pop('config_set') + config_set = utils.find_resource(gc.config_sets, config_set_arg) + + # Filter out values we can't use + UPDATE_PARAMS = daisyclient.v1.config_sets.UPDATE_PARAMS + fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items())) + + config_set = gc.config_sets.update(config_set, **fields) + _daisy_show(config_set) + + + +def do_config_set_list(gc, args): + """List config_sets you can access.""" + filter_keys = '' + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + kwargs = {'filters': filters} + if filters: + config_set = utils.find_resource(gc.config_sets, fields.pop('id')) + _daisy_show(config_set) + else: + config_sets = gc.config_sets.list(**kwargs) + columns = ['ID', 'Name','Description'] + utils.print_list(config_sets, columns) + +@utils.arg('id', metavar='', + help='Filter components to those that have this name.') +def do_config_set_detail(gc, args): + """List config_sets you can access.""" + filter_keys = ['id'] + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + kwargs = {'filters': filters} + if filters: + config_set = utils.find_resource(gc.config_sets, fields.pop('id')) + _daisy_show(config_set) + else: + config_sets = gc.config_sets.list(**kwargs) + columns = ['ID', 'Name','Description'] + utils.print_list(config_sets, columns) + +@utils.arg('config', metavar='', nargs='+', + help='ID of config(s) to delete.') +def do_config_delete(gc, args): + """Delete specified config(s).""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + # Filter out values we can't use + UPDATE_PARAMS = daisyclient.v1.configs.UPDATE_PARAMS + fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items())) + gc.configs.delete(**fields) + +def do_config_list(gc, args): + """List configs you can access.""" + filter_keys = '' + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + kwargs = {'filters': filters} + if filters: + config = utils.find_resource(gc.configs, fields.pop('id')) + _daisy_show(config) + else: + configs = gc.configs.list(**kwargs) + columns = ['ID','Section' ,'Key','Value','Description', 'Config_file_id','Config_version','Running_version'] + utils.print_list(configs, columns) + +@utils.arg('id', metavar='', + help='Filter configs to those that have this id.') +def do_config_detail(gc, args): + """List configs you can access.""" + filter_keys = ['id'] + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + kwargs = {'filters': filters} + if filters: + config = utils.find_resource(gc.configs, fields.pop('id')) + _daisy_show(config) + else: + configs = gc.configs.list(**kwargs) + columns = ['ID','Section' ,'Key','Value','Description', 'Config_file_id','Config_version','Running_version'] + utils.print_list(configs, columns) + +@utils.arg('name', metavar='', help='NAME of network.') +@utils.arg('description', metavar='', + help='Description of network.') +@utils.arg('network_type', metavar='' , + help='type of network:PUBLIC,PRIVATE,STORAGE,MANAGEMENT,EXTERNAL,DEPLOYMENT') +@utils.arg('--cluster-id', metavar='', help='ID of cluster, must be given.') +@utils.arg('--vlan-start', metavar='', + help='vlan start of network.it should be a integer in "1~4096", and it must be appeared with vlan end') +@utils.arg('--vlan-end', metavar='', + help='vlan end of network.it should be a integer in "1~4096", and it must be appeared with vlan start') +@utils.arg('--cidr', metavar='', + help='specifying ip range of network. eg:192.168.1.1/24') +@utils.arg('--ip', metavar='', + help='network ip') +@utils.arg('--ip-ranges', metavar='' ,nargs='+', + help='ip ranges of network. for example:"start":"172.16.0.2","end":"172.16.0.126"') +@utils.arg('--gateway', metavar='' , + help='gate way of network') +@utils.arg('--type', metavar='' , + help='type of network:custom or template') +@utils.arg('--ml2-type', metavar='' , + help='ml2 type:"ovs", "sriov(direct)", "sriov(macvtap)", "ovs,sriov(direct)" or "ovs,sriov(macvtap)".\ + when network-type is PRIVATE, ml2-type must be given') +@utils.arg('--physnet-name', metavar='' , + help='physnet name,eg:physnet_eth0') +@utils.arg('--capability', metavar='' , + help='CAPABILITY of network:high or low') +@utils.arg('--vlan-id', metavar='' , + help='Vlan Tag.') +@utils.arg('--mtu', metavar='' , + help='Private plane mtu.eg.:1600.') +def do_network_add(gc, args): + """Add a network.""" + ip_range_list = [] + if args.ip_ranges: + for ip_range in args.ip_ranges: + ip_range_ref={} + for range_value in ip_range.split(","): + try: + k, v = range_value.split(":", 1) + if str(k) == "start": + ip_range_ref['start'] = str(v) + if str(k) == "end": + ip_range_ref['end'] = str(v) + except ValueError: + raise exc.CommandError("ip_ranges error") + ip_range_list.append(ip_range_ref) + args.ip_ranges = ip_range_list + + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + # Filter out values we can't use + CREATE_PARAMS = daisyclient.v1.networks.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + + network = gc.networks.add(**fields) + + _daisy_show(network) + +@utils.arg('network', metavar='', help='ID of network.') +@utils.arg('--network-type', metavar='' , + help='type of network:PUBLIC,PRIVATE,STORAGE,MANAGEMENT,EXTERNAL,DEPLOYMENT') +@utils.arg('--name', metavar='', + help='Name of network.') +@utils.arg('--description', metavar='', + help='Description of network.') +@utils.arg('--vlan-start', metavar='', + help='vlan start of network.it should be a integer in "1~4096", and it must be appeared with vlan end') +@utils.arg('--vlan-end', metavar='', + help='vlan end of network.it should be a integer in "1~4096", and it must be appeared with vlan start') +@utils.arg('--cidr', metavar='', + help='specifying ip range of network. eg:192.168.1.1/24') +@utils.arg('--ip-ranges', metavar='' ,nargs='+', + help='ip ranges of network,for example:"start":"172.16.0.2","end":"172.16.0.126"') +@utils.arg('--gateway', metavar='' , + help='gate way of network') +@utils.arg('--type', metavar='' , + help='type of network:custom or template') +@utils.arg('--ml2-type', metavar='' , + help='ml2 type:"ovs", "sriov(direct)", "sriov(macvtap)", "ovs,sriov(direct)" or "ovs,sriov(macvtap)".\ + when network-type is PRIVATE, ml2-type must be given') +@utils.arg('--physnet-name', metavar='' , + help='physnet name,eg:physnet_eth0') +@utils.arg('--capability', metavar='' , + help='CAPABILITY of network:high or low') +@utils.arg('--vlan-id', metavar='' , + help='Vlan Tag.') +@utils.arg('--mtu', metavar='' , + help='Private plane mtu.eg.:1600.') +@utils.arg('--alias', metavar='' , + help='alias of network') +def do_network_update(gc, args): + """Update a specific network.""" + # Filter out None values + + ip_range_list = [] + + if args.ip_ranges: + for ip_range in args.ip_ranges: + ip_range_ref={} + for range_value in ip_range.split(","): + try: + k, v = range_value.split(":", 1) + if str(k) == "start": + ip_range_ref['start'] = str(v) + if str(k) == "end": + ip_range_ref['end'] = str(v) + except ValueError: + raise exc.CommandError("ip_ranges error") + ip_range_list.append(ip_range_ref) + args.ip_ranges = ip_range_list + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + network_arg = fields.pop('network') + + network = utils.find_resource(gc.networks, network_arg) + # Filter out values we can't use + UPDATE_PARAMS = daisyclient.v1.networks.UPDATE_PARAMS + fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items())) + + + network = gc.networks.update(network, **fields) + _daisy_show(network) + + +@utils.arg('networks', metavar='', nargs='+', help='ID of network.') +@utils.arg('--cluster-id', metavar='', help='ID of cluster .') +def do_network_delete(gc, args): + """Delete specified network(s).""" + + for args_network in args.networks: + network = utils.find_resource(gc.networks, args_network) + if network and network.deleted: + msg = "No network with an ID of '%s' exists." % network.id + raise exc.CommandError(msg) + try: + if args.verbose: + print('Requesting network delete for %s ...' % + encodeutils.safe_decode(args_network), end=' ') + gc.networks.delete(network) + + if args.verbose: + print('[Done]') + + except exc.HTTPException as e: + if args.verbose: + print('[Fail]') + print('%s: Unable to delete network %s' % (e, args_network)) + +@utils.arg('--cluster-id', metavar='', + help='Filter networks to those that have this name.') +@utils.arg('--page-size', metavar='', default=None, type=int, + help='Number of networks to request in each paginated request.') +@utils.arg('--sort-key', default='name', + choices=daisyclient.v1.networks.SORT_KEY_VALUES, + help='Sort networks list by specified field.') +@utils.arg('--sort-dir', default='asc', + choices=daisyclient.v1.networks.SORT_DIR_VALUES, + help='Sort networks list in specified direction.') +def do_network_list(gc, args): + """List networks you can access.""" + filter_keys = ['cluster_id'] + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + kwargs = {'id': args.cluster_id, 'filters': filters} + if args.page_size is not None: + kwargs['page_size'] = args.page_size + + kwargs['sort_key'] = args.sort_key + kwargs['sort_dir'] = args.sort_dir + + networks = gc.networks.list(**kwargs) + + columns = ['ID', 'Name', 'Cluster_id', 'Description', 'Vlan_start','Vlan_end','Gateway','Cidr','Type', 'Ip_ranges'] + utils.print_list(networks, columns) + + +@utils.arg('id', metavar='', + help='Filter network to those that have this id.') +def do_network_detail(gc, args): + """List network you can access.""" + filter_keys = ['id'] + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + kwargs = {'filters': filters} + if filters: + network = utils.find_resource(gc.networks, fields.pop('id')) + _daisy_show(network) + else: + network = gc.networks.list(**kwargs) + columns = ['ID', 'Name', 'Cluster_id', 'Description', 'Vlan_start','Vlan_end','Gateway','Cidr','Type', 'Ip_ranges'] + utils.print_list(network, columns) + + +@utils.arg('cluster_id', metavar='', + help='ID of cluster to install TECS.') +@utils.arg('--version-id', metavar='', + help='Version of TECS.') +@utils.arg('--deployment-interface', metavar='', + help='Network interface construction of PXE server(eg:eth0).') +def do_install(dc, args): + """Install TECS.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + # Filter out values we can't use + CREATE_PARAMS = daisyclient.v1.install.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + + install = dc.install.install(**fields) + + _daisy_show(install) + + +@utils.arg('cluster_id', metavar='', + help='The cluster ID to uninstall TECS.') +def do_uninstall(gc, args): + """Uninstall TECS.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + # Filter out values we can't use + CREATE_PARAMS = daisyclient.v1.uninstall.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + + uninstall = gc.uninstall.uninstall(**fields) + _daisy_show(uninstall) + +@utils.arg('cluster_id', metavar='', + help='The cluster ID to query progress of uninstall TECS .') +def do_query_uninstall_progress(gc, args): + """Query uninstall progress.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + CREATE_PARAMS = daisyclient.v1.uninstall.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + + query_progress = gc.uninstall.query_progress(**fields) + + _daisy_show(query_progress) + + +@utils.arg('cluster_id', metavar='', + help='The cluster ID to update os and TECS.') +def do_update(gc, args): + """update TECS.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + # Filter out values we can't use + CREATE_PARAMS = daisyclient.v1.update.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + + update = gc.update.update(**fields) + _daisy_show(update) + +@utils.arg('cluster_id', metavar='', + help='The cluster ID to query progress of update os and TECS .') +def do_query_update_progress(gc, args): + """Query update progress.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + CREATE_PARAMS = daisyclient.v1.update.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + + query_progress = gc.update.query_progress(**fields) + _daisy_show(query_progress) + +@utils.arg('cluster_id', metavar='', + help='The cluster ID on which to export tecs and HA config file from database.') +def do_export_db(gc, args): + """export database.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + # Filter out values we can't use + CREATE_PARAMS = daisyclient.v1.install.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + + config_file = gc.install.export_db(**fields) + _daisy_show(config_file) + + +@utils.arg('--cluster', metavar='', + help='ID of cluster to config file.') +@utils.arg('--role', metavar='', + help=' role name.') +@utils.arg('--config-set', metavar='', + help='id of the config-set.') +@utils.arg('--config', metavar='', + nargs='+', + help='file-name must take full path.such as:file-name=/etc/nova/nova.conf,section=DEFAULT,key=port,value=5661,description=description') +def do_config_add(gc, args): + """add and update config interfaces.""" + config_interface_list = [] + if args.config: + for interfaces in args.config: + interface_info = {"file-name":"", "section":"", "key":"", "value": "","description": ""} + for kv_str in interfaces.split(","): + try: + k, v = kv_str.split("=", 1) + except ValueError: + raise exc.CommandError("config-interface error") + if k in interface_info: + interface_info[k] = v + config_interface_list.append(interface_info) + args.config = config_interface_list + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + CREATE_PARAMS = daisyclient.v1.configs.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + config_interface_info = gc.configs.add(**fields) + _daisy_show(config_interface_info) + +@utils.arg('cluster', metavar='', + help='ID of cluster to config file.') +@utils.arg('--role', metavar='', + nargs='+', + help=' role name.') +def do_cluster_config_set_update(gc, args): + """the cluster of config effect.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + CREATE_PARAMS = daisyclient.v1.configs.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + config_interface_info = gc.config_sets.cluster_config_set_update(**fields) + _daisy_show(config_interface_info) + +@utils.arg('cluster', metavar='', + help='ID of cluster to config file.') +@utils.arg('--role', metavar='', + nargs='+', + help=' role name.') +def do_cluster_config_set_progress(gc, args): + """query cluster of config progress.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + CREATE_PARAMS = daisyclient.v1.configs.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + config_set_progress = gc.config_sets.cluster_config_set_progress(**fields) + _daisy_show(config_set_progress) + +def do_discover_host(gc, args): + filter_keys = '' + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + kwargs = {'filters': filters} + + discover_host = gc.hosts.discover_host(**kwargs) + _daisy_show(discover_host) + +@utils.arg('service', metavar='', + help='service name who will use disk storage, suport db, glance and dbbackup.') +@utils.arg('role_id', metavar='', + help='which role service come from.') +@utils.arg('--disk-location', metavar='', + help='where disks from, default is "local". \ + "local" means disks come from local host,\ + "share" means disks come from share storage devices') +@utils.arg('--data-ips', metavar='', + help='data interfaces ip of Disk Array device, separate by ",", \ + when DISK_LOCATION is share, DATA_IPS cannot be empty') +@utils.arg('--size', metavar='', + help='unit is G, and default is -1, it means to use all of the disk.') +@utils.arg('--lun', metavar='', + help='mark which volume is used for glance sharing disk.') +def do_service_disk_add(dc, args): + """ config services share disk. """ + + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + # Filter out values we can't use + SERVICE_DISK_PARAMS = daisyclient.v1.disk_array.CREATE_SERVICE_DISK_PARAMS + fields = dict(filter(lambda x: x[0] in SERVICE_DISK_PARAMS, fields.items())) + #if fields.has_key('data_ips'): + # fields['data_ips'] = fields['data_ips'].split(",") + + service_disk_info = dc.disk_array.service_disk_add(**fields) + + _daisy_show(service_disk_info) + +@utils.arg('service_disks', metavar='', nargs='+', + help='ID(s) of service_disk to delete.') +def do_service_disk_delete(dc, args): + """Delete specified service_disk.""" + + for service_disk_id in args.service_disks: + # service_disk = utils.find_resource(dc.disk_array, service_disk_id) + # if service_disk and service_disk.deleted: + # msg = "No service_disk with ID '%s' exists." % service_disk_id + # raise exc.CommandError(msg) + try: + if args.verbose: + print('Requesting service_disk_id delete for %s ...' % + encodeutils.safe_decode(service_disk_id), end=' ') + dc.disk_array.service_disk_delete(service_disk_id) + + if args.verbose: + print('[Done]') + + except exc.HTTPException as e: + if args.verbose: + print('[Fail]') + print('%s: Unable to delete service_disk %s' % (e, service_disk_id)) + +@utils.arg('id', metavar='', + help='ID of service_disk.') +@utils.arg('--service', metavar='', + help='service name who will use Disk Array device, suport db, glance and dbbackup.') +@utils.arg('--role-id', metavar='', + help='which role service come from.') +@utils.arg('--disk-location', metavar='', + help='where disks from, default is "local". \ + "local" means disks come from local host,\ + "share" means disks come from Disk Array device') +@utils.arg('--data-ips', metavar='', + help='data interfaces ip of Disk Array device, separate by ",", \ + when DISK_LOCATION is share, DATA_IPS cannot be empty') +@utils.arg('--size', metavar='', + help='unit is G, and default is -1, it means to use all of the disk.') +@utils.arg('--lun', metavar='', + help='mark which lun is used for Disk Array device,default is 0.') +def do_service_disk_update(dc, args): + """Update a specific service_disk.""" + # Filter out None values + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + + id = fields.pop('id') + service_disk = utils.find_resource(dc.disk_array, id) + + # Filter out values we can't use + SERVICE_DISK_PARAMS = daisyclient.v1.disk_array.CREATE_SERVICE_DISK_PARAMS + fields = dict(filter(lambda x: x[0] in SERVICE_DISK_PARAMS, fields.items())) + + service_disk_info = dc.disk_array.service_disk_update(id, **fields) + _daisy_show(service_disk_info) + +@utils.arg('--role-id', metavar='', + help='filter service_disks by role id.') +def do_service_disk_list(dc, args): + """List service_disk you can access.""" + filter_keys = ['role_id'] + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + kwargs = {'filters': filters} + + disk_array_list = dc.disk_array.service_disk_list(**kwargs) + columns = ['ID', 'SERVICE','ROLE_ID','DISK_LOCATION','DATA_IPS','SIZE', 'LUN'] + utils.print_list(disk_array_list, columns) + + +@utils.arg('id', metavar='', + help='get service_disk detail by its id.') +def do_service_disk_detail(dc, args): + """detail service_disk you can access.""" + filter_keys = ['id'] + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + kwargs = {'filters': filters} + if filters: + #service_disk = utils.find_resource(dc.disk_array, fields.pop('id')) + service_disk_info = dc.disk_array.service_disk_detail(fields.pop('id'), **fields) + _daisy_show(service_disk_info) + else: + service_disk = dc.disk_array.service_disk_list(**kwargs) + columns = ['ID', 'SERVICE','ROLE_ID','DISK_LOCATION','DATA_IPS','SIZE', 'LUN'] + utils.print_list(service_disk, columns) + +def _paraser_disk_array(disk_array): + disk_arrays = [] + CINDER_VOLUME_BACKEND_PARAMS =\ + daisyclient.v1.disk_array.CREATE_CINDER_BACKEND_INTER_PARAMS + if disk_array: + for array in disk_array: + disk_array_info = {} + for kv_str in array.split(","): + try: + k, v = kv_str.split("=", 1) + except ValueError: + raise exc.CommandError("disk_array error") + if k in CINDER_VOLUME_BACKEND_PARAMS: + if (k == 'pools' or + k == 'data_ips' or + k == 'management_ips'): + disk_array_info[k] = ','.join(v.split("_")) + else: + disk_array_info[k] = v + disk_arrays.append(disk_array_info) + return disk_arrays + +@utils.arg('disk_array', metavar='', + nargs='+', + help='management_ips: management interfaces ip of Disk Array\ + device, separate by "_";\ + data_ips:data interfaces ip of Disk Array device,\ + separate by ",", \ + when using FUJISTU Disk Array, DATA_IPS cannot be empty;\ + pools: pools name which are configed in Disk Array device;\ + user_name: user name to login Disk Array device;\ + user_pwd: user password to login Disk Array device;\ + volume_driver: supports "KS3200_FCSAN", "KS3200_IPSAN"\ + and "FUJISTU_ETERNUS" according by Disk Array device type,\ + separate by "_";\ + volume_type: maybe same in two backends.') +@utils.arg('role_id', metavar='', + help='filter cinder_volumes by role id.') +def do_cinder_volume_add(dc, args): + """config cinder volume backend.""" + args.disk_array = _paraser_disk_array(args.disk_array) + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + # Filter out values we can't use + CINDER_BACKEND_PARAMS = daisyclient.v1.disk_array.CREATE_CINDER_BACKEND_PARAMS + fields = dict(filter(lambda x: x[0] in CINDER_BACKEND_PARAMS, fields.items())) + cinder_volume_info = dc.disk_array.cinder_volume_add(**fields) + + _daisy_show(cinder_volume_info) + +@utils.arg('cinder_volumes', metavar='', nargs='+', + help='ID(s) of cinder volumes to delete.') +def do_cinder_volume_delete(dc, args): + """delete specified cinder_volume backend.""" + for cinder_volume_id in args.cinder_volumes: + #cinder_volume = utils.find_resource(dc.disk_array, cinder_volume_id) + #if cinder_volume and cinder_volume.deleted: + # msg = "No cinder_volume with ID '%s' exists." % cinder_volume_id + # raise exc.CommandError(msg) + try: + if args.verbose: + print('Requesting cinder_volume_id delete for %s ...' % + encodeutils.safe_decode(cinder_volume_id), end=' ') + dc.disk_array.cinder_volume_delete(cinder_volume_id) + + if args.verbose: + print('[Done]') + + except exc.HTTPException as e: + if args.verbose: + print('[Fail]') + print('%s: Unable to delete cinder volume %s' % (e, cinder_volume_id)) + +@utils.arg('id', metavar='', + help='ID of cinder_volume.') +@utils.arg('--management-ips', metavar='', + help='management interfaces ip of Disk Array device, separate by ","') +@utils.arg('--data-ips', metavar='', + help='data interfaces ip of Disk Array device, separate by ",", \ + when using FUJISTU Disk Array, DATA_IPS cannot be empty') +@utils.arg('--pools', metavar='', + help='pools name which are configed in Disk Array device') +@utils.arg('--volume-driver', metavar='', + help='supports "KS3200_FCSAN", "KS3200_IPSAN" and "FUJISTU_ETERNUS"\ + according by Disk Array device type, separate by ","') +@utils.arg('--volume-type', metavar='', + help='it maybe same in two backends, supprot "" and ""') +@utils.arg('--role-id', metavar='', + help='which role cinder_volume come from.') +@utils.arg('--user-name', metavar='', + help='user name of disk array') +@utils.arg('--user-pwd', metavar='', + help='user password of disk arry') +def do_cinder_volume_update(dc, args): + """Update a specific cinder_volume.""" + # Filter out None values + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + id = fields.pop('id') + # Filter out values we can't use + CINDER_VOLUME_PARAMS =\ + daisyclient.v1.disk_array.CREATE_CINDER_BACKEND_INTER_PARAMS + fields = dict(filter(lambda x: x[0] in CINDER_VOLUME_PARAMS, fields.items())) + + if fields.has_key('management_ips'): + fields['management_ips'] = ','.join(fields['management_ips'].split("_")) + if fields.has_key('data_ips'): + fields['data_ips'] = ','.join(fields['data_ips'].split("_")) + if fields.has_key('pools'): + fields['pools'] = ','.join(fields['pools'].split("_")) + + cinder_volume_info = dc.disk_array.cinder_volume_update(id, **fields) + _daisy_show(cinder_volume_info) + + +@utils.arg('--role-id', metavar='', + help='filter cinder_volumes by role id.') +def do_cinder_volume_list(dc, args): + """List cinder_volume you can access.""" + filter_keys = ['role_id'] + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + kwargs = {'filters': filters} + + disk_array_list = dc.disk_array.cinder_volume_list(**kwargs) + + columns = ['ID', 'MANAGEMENT_IPS','DATA_IPS','POOLS', + 'VOLUME_DRIVER','VOLUME_TYPE','BACKEND_INDEX', + 'USER_NAME','USER_PWD', 'ROLE_ID'] + utils.print_list(disk_array_list, columns) + +@utils.arg('id', metavar='', + help='get cinder_volume detail by its id.') +def do_cinder_volume_detail(dc, args): + """detail cinder_volume you can access.""" + filter_keys = ['id'] + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + kwargs = {'filters': filters} + if filters: + cinder_volume_info = dc.disk_array.cinder_volume_detail(fields.pop('id'), **fields) + _daisy_show(cinder_volume_info) + else: + cinder_volume = dc.disk_array.service_disk_list(**kwargs) + columns = ['ID', 'MANAGEMENT_IPS','DATA_IPS','POOLS', + 'VOLUME_DRIVER','VOLUME_TYPE','BACKEND_INDEX', + 'USER_NAME','USER_PWD', 'ROLE_ID'] + utils.print_list(cinder_volume, columns) + +@utils.arg('cluster', metavar='', + help='ID of cluster to update disk array.') +def do_disk_array_update(dc, args): + """update cluster disk array configuration for tecs backend only.""" + # Filter out None values + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + cluster_id = fields.pop('cluster') + # Filter out values we can't use + DISK_ARRAY_PARAMS = [] + fields = dict(filter(lambda x: x[0] in DISK_ARRAY_PARAMS, fields.items())) + + update_result = dc.install.disk_array_update(cluster_id, **fields) + _daisy_show(update_result) + +@utils.arg('name', metavar='', + help='Template name of the cluster.') +@utils.arg('--description', metavar='', + help='Description of the template.') +@utils.arg('--type', metavar='', + help='Type of the cluster.') +@utils.arg('--hosts', metavar='', + help='Hosts informations of the cluster.') +@utils.arg('--content', metavar='', + help='Contents of the cluster.') +def do_template_add(gc, args): + """Add a template.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + CREATE_PARAMS = daisyclient.v1.template.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + template = gc.template.add(**fields) + _daisy_show(template) + +@utils.arg('id', metavar='', + help='Id of the cluster template.') +@utils.arg('--name', metavar='', + help='Template name of the cluster.') +@utils.arg('--description', metavar='', + help='Description of the template.') +@utils.arg('--type', metavar='', + help='Type of the cluster.') +@utils.arg('--hosts', metavar='', + help='Hosts informations of the cluster.') +@utils.arg('--content', metavar='', + help='Contents of the cluster.') +def do_template_update(gc, args): + """Update a template.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + template_id = fields.get('id', None) + # Filter out values we can't use + UPDATE_PARAMS = daisyclient.v1.template.UPDATE_PARAMS + fields = dict(filter(lambda x: x[0] in UPDATE_PARAMS, fields.items())) + template = gc.template.update(template_id, **fields) + _daisy_show(template) + +@utils.arg('id', metavar='', nargs='+', + help='ID of templates.') +def do_template_delete(gc, args): + """Delete specified template(s).""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + templates = fields.get('id', None) + for template in templates: + try: + if args.verbose: + print('Requesting host delete for %s ...' % + encodeutils.safe_decode(template), end=' ') + gc.template.delete(template) + if args.verbose: + print('[Done]') + except exc.HTTPException as e: + if args.verbose: + print('[Fail]') + print('%s: Unable to delete cluster template %s' % (e, template)) + +@utils.arg('--name', metavar='', + help='Filter cluster templates to those that have this name.') +@utils.arg('--type', metavar='', + help='Filter cluster template type.') +@utils.arg('--sort-key', default='name', + choices=daisyclient.v1.template.SORT_KEY_VALUES, + help='Sort cluster templates list by specified field.') +@utils.arg('--sort-dir', default='asc', + choices=daisyclient.v1.template.SORT_DIR_VALUES, + help='Sort cluster templates list in specified direction.') +def do_template_list(gc, args): + """List templates you can access.""" + filter_keys = ['name', 'type'] + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + kwargs = {'filters': filters} + kwargs['sort_key'] = args.sort_key + kwargs['sort_dir'] = args.sort_dir + templates = gc.template.list(**kwargs) + columns = ['ID', 'Name','Type', 'Hosts', 'Content'] + utils.print_list(templates, columns) + +@utils.arg('id', metavar='', + help='ID of template.') +def do_template_detail(gc, args): + """Get specified template.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + template_id = fields.get('id', None) + try: + if args.verbose: + print('Requesting get template infomation for %s ...' % + encodeutils.safe_decode(template_id), end=' ') + template = gc.template.get(template_id) + if args.verbose: + print('[Done]') + _daisy_show(template) + + except exc.HTTPException as e: + if args.verbose: + print('[Fail]') + print('%s: Unable to get template infomation %s' % (e, template_id)) + +@utils.arg('cluster_name', metavar = '', + help = 'Name of cluster to create template.') +@utils.arg('template_name', metavar = '', + help = 'the name of json.') +@utils.arg('--description', metavar = '', + help = 'Description of the template.') +@utils.arg('--type', metavar = '', + help = 'Export backend database based on type,for example:tecs,zenic') +def do_export_db_to_json(dc, args): + """export db to json.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + CREATE_PARAMS = daisyclient.v1.template.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + export_db_to_json = dc.template.export_db_to_json(**fields) + _daisy_show(export_db_to_json) + +@utils.arg('json_file_path', metavar = '', + help = 'The json file of path') +def do_import_json_to_template(dc, args): + """import json to tempalte""" + json_file = args.json_file_path + if not os.path.exists(json_file): + print("the json file not exist or permission deiny.") + return + with open(json_file) as tfp: + params_json = tfp.read() + dict_params = {'template':params_json} + import_json_to_template = dc.template.import_json_to_template(**dict_params) + _daisy_show(import_json_to_template) + +@utils.arg('template_name', metavar = '', + help = 'the name of json.') +@utils.arg('cluster', metavar = '', + help = 'The name of create cluster') +def do_import_template_to_db(dc, args): + """import template to db""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + CREATE_PARAMS = daisyclient.v1.template.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + import_template_to_db = dc.template.import_template_to_db(**fields) + _daisy_show(import_template_to_db) + +@utils.arg('cluster_name', metavar='', + help='name of template.') +@utils.arg('host_id', metavar='', + help='host id.') +@utils.arg('host_template_name', metavar='', + help='host template name.') +@utils.arg('--description', metavar='', + help='host template description.') +def do_host_to_template(dc, args): + """HOST TO TEMPLATE.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + # Filter out values we can't use + CREATE_PARAMS = daisyclient.v1.template.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + host_to_template = dc.template.host_to_template(**fields) + _daisy_show(host_to_template) + +@utils.arg('cluster_name', metavar='', + help='name of cluster to config file.') +@utils.arg('host_template_name', metavar='', + help='host template name.') +@utils.arg('host_id', metavar='', + help='host id list') +def do_template_to_host(dc, args): + """TEMPLATE TO HOST.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + # Filter out values we can't use + CREATE_PARAMS = daisyclient.v1.template.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + template_to_host = dc.template.template_to_host(**fields) + _daisy_show(template_to_host) + +@utils.arg('cluster_name', metavar='', + help='name of cluster.') +def do_host_template_list(dc, args): + """GET ALL HOST TEMPLATE.""" + filter_keys = ['cluster_name'] + filter_items = [(key, getattr(args, key)) for key in filter_keys] + filters = dict([item for item in filter_items if item[1] is not None]) + kwargs = {'filters': filters} + get_all_host_template = dc.template.host_template_list(**kwargs) + columns = ['name','description','os_version_file','role','interfaces'] + utils.print_list(get_all_host_template, columns) + +@utils.arg('cluster_name', metavar='', + help='name of cluster to config file.') +@utils.arg('host_template_name', metavar='', + help='host template name.') +def do_delete_host_template(dc, args): + """DELETE HOST TEMPLATE.""" + fields = dict(filter(lambda x: x[1] is not None, vars(args).items())) + CREATE_PARAMS = daisyclient.v1.template.CREATE_PARAMS + fields = dict(filter(lambda x: x[0] in CREATE_PARAMS, fields.items())) + host_template = dc.template.delete_host_template(**fields) + _daisy_show(host_template) + diff --git a/code/daisyclient/daisyclient/v1/template.py b/code/daisyclient/daisyclient/v1/template.py new file mode 100755 index 00000000..5a105f9b --- /dev/null +++ b/code/daisyclient/daisyclient/v1/template.py @@ -0,0 +1,397 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import os + +from oslo_utils import encodeutils +from oslo_utils import strutils +import six +import six.moves.urllib.parse as urlparse + +from daisyclient.common import utils +from daisyclient.openstack.common.apiclient import base + +UPDATE_PARAMS = ('name','description', 'type', 'hosts', 'content','cluster_name','template_name', 'template') + +CREATE_PARAMS = ('id', 'cluster_id','name', 'description', 'cluster_name','host_id', 'host_template_name', 'type', 'hosts', 'content','cluster','template_name', 'template') + +DEFAULT_PAGE_SIZE = 20 + +SORT_DIR_VALUES = ('asc', 'desc') +SORT_KEY_VALUES = ('name', 'id', 'created_at', 'updated_at') + +OS_REQ_ID_HDR = 'x-openstack-request-id' + + +class Template(base.Resource): + def __repr__(self): + return "