Merge "Don't mix declaration and set of locals"
This commit is contained in:
commit
d111182f21
@ -122,41 +122,47 @@ function foreach_tenant_net {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function get_image_id {
|
function get_image_id {
|
||||||
local IMAGE_ID=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
|
local IMAGE_ID
|
||||||
|
IMAGE_ID=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
|
||||||
die_if_not_set $LINENO IMAGE_ID "Failure retrieving IMAGE_ID"
|
die_if_not_set $LINENO IMAGE_ID "Failure retrieving IMAGE_ID"
|
||||||
echo "$IMAGE_ID"
|
echo "$IMAGE_ID"
|
||||||
}
|
}
|
||||||
|
|
||||||
function get_tenant_id {
|
function get_tenant_id {
|
||||||
local TENANT_NAME=$1
|
local TENANT_NAME=$1
|
||||||
local TENANT_ID=`openstack project list | grep " $TENANT_NAME " | head -n 1 | get_field 1`
|
local TENANT_ID
|
||||||
|
TENANT_ID=`openstack project list | grep " $TENANT_NAME " | head -n 1 | get_field 1`
|
||||||
die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for $TENANT_NAME"
|
die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for $TENANT_NAME"
|
||||||
echo "$TENANT_ID"
|
echo "$TENANT_ID"
|
||||||
}
|
}
|
||||||
|
|
||||||
function get_user_id {
|
function get_user_id {
|
||||||
local USER_NAME=$1
|
local USER_NAME=$1
|
||||||
local USER_ID=`openstack user list | grep $USER_NAME | awk '{print $2}'`
|
local USER_ID
|
||||||
|
USER_ID=`openstack user list | grep $USER_NAME | awk '{print $2}'`
|
||||||
die_if_not_set $LINENO USER_ID "Failure retrieving USER_ID for $USER_NAME"
|
die_if_not_set $LINENO USER_ID "Failure retrieving USER_ID for $USER_NAME"
|
||||||
echo "$USER_ID"
|
echo "$USER_ID"
|
||||||
}
|
}
|
||||||
|
|
||||||
function get_role_id {
|
function get_role_id {
|
||||||
local ROLE_NAME=$1
|
local ROLE_NAME=$1
|
||||||
local ROLE_ID=`openstack role list | grep $ROLE_NAME | awk '{print $2}'`
|
local ROLE_ID
|
||||||
|
ROLE_ID=`openstack role list | grep $ROLE_NAME | awk '{print $2}'`
|
||||||
die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME"
|
die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME"
|
||||||
echo "$ROLE_ID"
|
echo "$ROLE_ID"
|
||||||
}
|
}
|
||||||
|
|
||||||
function get_network_id {
|
function get_network_id {
|
||||||
local NETWORK_NAME="$1"
|
local NETWORK_NAME="$1"
|
||||||
local NETWORK_ID=`neutron net-list -F id -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'`
|
local NETWORK_ID
|
||||||
|
NETWORK_ID=`neutron net-list -F id -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'`
|
||||||
echo $NETWORK_ID
|
echo $NETWORK_ID
|
||||||
}
|
}
|
||||||
|
|
||||||
function get_flavor_id {
|
function get_flavor_id {
|
||||||
local INSTANCE_TYPE=$1
|
local INSTANCE_TYPE=$1
|
||||||
local FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'`
|
local FLAVOR_ID
|
||||||
|
FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'`
|
||||||
die_if_not_set $LINENO FLAVOR_ID "Failure retrieving FLAVOR_ID for $INSTANCE_TYPE"
|
die_if_not_set $LINENO FLAVOR_ID "Failure retrieving FLAVOR_ID for $INSTANCE_TYPE"
|
||||||
echo "$FLAVOR_ID"
|
echo "$FLAVOR_ID"
|
||||||
}
|
}
|
||||||
@ -185,13 +191,15 @@ function add_tenant {
|
|||||||
|
|
||||||
function remove_tenant {
|
function remove_tenant {
|
||||||
local TENANT=$1
|
local TENANT=$1
|
||||||
local TENANT_ID=$(get_tenant_id $TENANT)
|
local TENANT_ID
|
||||||
|
TENANT_ID=$(get_tenant_id $TENANT)
|
||||||
openstack project delete $TENANT_ID
|
openstack project delete $TENANT_ID
|
||||||
}
|
}
|
||||||
|
|
||||||
function remove_user {
|
function remove_user {
|
||||||
local USER=$1
|
local USER=$1
|
||||||
local USER_ID=$(get_user_id $USER)
|
local USER_ID
|
||||||
|
USER_ID=$(get_user_id $USER)
|
||||||
openstack user delete $USER_ID
|
openstack user delete $USER_ID
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -221,9 +229,11 @@ function create_network {
|
|||||||
local NET_NAME="${TENANT}-net$NUM"
|
local NET_NAME="${TENANT}-net$NUM"
|
||||||
local ROUTER_NAME="${TENANT}-router${NUM}"
|
local ROUTER_NAME="${TENANT}-router${NUM}"
|
||||||
source $TOP_DIR/openrc admin admin
|
source $TOP_DIR/openrc admin admin
|
||||||
local TENANT_ID=$(get_tenant_id $TENANT)
|
local TENANT_ID
|
||||||
|
TENANT_ID=$(get_tenant_id $TENANT)
|
||||||
source $TOP_DIR/openrc $TENANT $TENANT
|
source $TOP_DIR/openrc $TENANT $TENANT
|
||||||
local NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
|
local NET_ID
|
||||||
|
NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
|
||||||
die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA"
|
die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA"
|
||||||
neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR
|
neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR
|
||||||
neutron_debug_admin probe-create --device-owner compute $NET_ID
|
neutron_debug_admin probe-create --device-owner compute $NET_ID
|
||||||
@ -251,7 +261,8 @@ function create_vm {
|
|||||||
done
|
done
|
||||||
#TODO (nati) Add multi-nic test
|
#TODO (nati) Add multi-nic test
|
||||||
#TODO (nati) Add public-net test
|
#TODO (nati) Add public-net test
|
||||||
local VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \
|
local VM_UUID
|
||||||
|
VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \
|
||||||
--image $(get_image_id) \
|
--image $(get_image_id) \
|
||||||
$NIC \
|
$NIC \
|
||||||
$TENANT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
|
$TENANT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
|
||||||
@ -309,7 +320,8 @@ function delete_network {
|
|||||||
local NUM=$2
|
local NUM=$2
|
||||||
local NET_NAME="${TENANT}-net$NUM"
|
local NET_NAME="${TENANT}-net$NUM"
|
||||||
source $TOP_DIR/openrc admin admin
|
source $TOP_DIR/openrc admin admin
|
||||||
local TENANT_ID=$(get_tenant_id $TENANT)
|
local TENANT_ID
|
||||||
|
TENANT_ID=$(get_tenant_id $TENANT)
|
||||||
#TODO(nati) comment out until l3-agent merged
|
#TODO(nati) comment out until l3-agent merged
|
||||||
#for res in port subnet net router;do
|
#for res in port subnet net router;do
|
||||||
for net_id in `neutron net-list -c id -c name | grep $NET_NAME | awk '{print $2}'`;do
|
for net_id in `neutron net-list -c id -c name | grep $NET_NAME | awk '{print $2}'`;do
|
||||||
|
@ -264,7 +264,8 @@ function upload_image {
|
|||||||
;;
|
;;
|
||||||
*.img)
|
*.img)
|
||||||
image_name=$(basename "$image" ".img")
|
image_name=$(basename "$image" ".img")
|
||||||
local format=$(qemu-img info ${image} | awk '/^file format/ { print $3; exit }')
|
local format
|
||||||
|
format=$(qemu-img info ${image} | awk '/^file format/ { print $3; exit }')
|
||||||
if [[ ",qcow2,raw,vdi,vmdk,vpc," =~ ",$format," ]]; then
|
if [[ ",qcow2,raw,vdi,vmdk,vpc," =~ ",$format," ]]; then
|
||||||
disk_format=$format
|
disk_format=$format
|
||||||
else
|
else
|
||||||
@ -405,7 +406,8 @@ function get_instance_ip {
|
|||||||
local vm_id=$1
|
local vm_id=$1
|
||||||
local network_name=$2
|
local network_name=$2
|
||||||
local nova_result="$(nova show $vm_id)"
|
local nova_result="$(nova show $vm_id)"
|
||||||
local ip=$(echo "$nova_result" | grep "$network_name" | get_field 2)
|
local ip
|
||||||
|
ip=$(echo "$nova_result" | grep "$network_name" | get_field 2)
|
||||||
if [[ $ip = "" ]];then
|
if [[ $ip = "" ]];then
|
||||||
echo "$nova_result"
|
echo "$nova_result"
|
||||||
die $LINENO "[Fail] Coudn't get ipaddress of VM"
|
die $LINENO "[Fail] Coudn't get ipaddress of VM"
|
||||||
@ -455,7 +457,8 @@ function check_path_perm_sanity {
|
|||||||
# homedir permissions on RHEL and common practice of making DEST in
|
# homedir permissions on RHEL and common practice of making DEST in
|
||||||
# the stack user's homedir.
|
# the stack user's homedir.
|
||||||
|
|
||||||
local real_path=$(readlink -f $1)
|
local real_path
|
||||||
|
real_path=$(readlink -f $1)
|
||||||
local rebuilt_path=""
|
local rebuilt_path=""
|
||||||
for i in $(echo ${real_path} | tr "/" " "); do
|
for i in $(echo ${real_path} | tr "/" " "); do
|
||||||
rebuilt_path=$rebuilt_path"/"$i
|
rebuilt_path=$rebuilt_path"/"$i
|
||||||
|
@ -140,7 +140,8 @@ function isset {
|
|||||||
# backtrace level
|
# backtrace level
|
||||||
function backtrace {
|
function backtrace {
|
||||||
local level=$1
|
local level=$1
|
||||||
local deep=$((${#BASH_SOURCE[@]} - 1))
|
local deep
|
||||||
|
deep=$((${#BASH_SOURCE[@]} - 1))
|
||||||
echo "[Call Trace]"
|
echo "[Call Trace]"
|
||||||
while [ $level -le $deep ]; do
|
while [ $level -le $deep ]; do
|
||||||
echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}"
|
echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}"
|
||||||
@ -483,7 +484,8 @@ function git_clone {
|
|||||||
local git_remote=$1
|
local git_remote=$1
|
||||||
local git_dest=$2
|
local git_dest=$2
|
||||||
local git_ref=$3
|
local git_ref=$3
|
||||||
local orig_dir=$(pwd)
|
local orig_dir
|
||||||
|
orig_dir=$(pwd)
|
||||||
local git_clone_flags=""
|
local git_clone_flags=""
|
||||||
|
|
||||||
RECLONE=$(trueorfalse False RECLONE)
|
RECLONE=$(trueorfalse False RECLONE)
|
||||||
@ -647,7 +649,8 @@ function get_default_host_ip {
|
|||||||
host_ip=""
|
host_ip=""
|
||||||
# Find the interface used for the default route
|
# Find the interface used for the default route
|
||||||
host_ip_iface=${host_ip_iface:-$(ip -f $af route | awk '/default/ {print $5}' | head -1)}
|
host_ip_iface=${host_ip_iface:-$(ip -f $af route | awk '/default/ {print $5}' | head -1)}
|
||||||
local host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/"); print parts[1]}')
|
local host_ips
|
||||||
|
host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/"); print parts[1]}')
|
||||||
local ip
|
local ip
|
||||||
for ip in $host_ips; do
|
for ip in $host_ips; do
|
||||||
# Attempt to filter out IP addresses that are part of the fixed and
|
# Attempt to filter out IP addresses that are part of the fixed and
|
||||||
@ -696,7 +699,8 @@ function get_field {
|
|||||||
# copy over a default policy.json and policy.d for projects
|
# copy over a default policy.json and policy.d for projects
|
||||||
function install_default_policy {
|
function install_default_policy {
|
||||||
local project=$1
|
local project=$1
|
||||||
local project_uc=$(echo $1|tr a-z A-Z)
|
local project_uc
|
||||||
|
project_uc=$(echo $1|tr a-z A-Z)
|
||||||
local conf_dir="${project_uc}_CONF_DIR"
|
local conf_dir="${project_uc}_CONF_DIR"
|
||||||
# eval conf dir to get the variable
|
# eval conf dir to get the variable
|
||||||
conf_dir="${!conf_dir}"
|
conf_dir="${!conf_dir}"
|
||||||
@ -729,7 +733,8 @@ function policy_add {
|
|||||||
|
|
||||||
# Add a terminating comma to policy lines without one
|
# Add a terminating comma to policy lines without one
|
||||||
# Remove the closing '}' and all lines following to the end-of-file
|
# Remove the closing '}' and all lines following to the end-of-file
|
||||||
local tmpfile=$(mktemp)
|
local tmpfile
|
||||||
|
tmpfile=$(mktemp)
|
||||||
uniq ${policy_file} | sed -e '
|
uniq ${policy_file} | sed -e '
|
||||||
s/]$/],/
|
s/]$/],/
|
||||||
/^[}]/,$d
|
/^[}]/,$d
|
||||||
@ -922,7 +927,8 @@ function get_or_create_endpoint {
|
|||||||
# scenarios currently that use the returned id. Ideally this behaviour
|
# scenarios currently that use the returned id. Ideally this behaviour
|
||||||
# should be pushed out to the service setups and let them create the
|
# should be pushed out to the service setups and let them create the
|
||||||
# endpoints they need.
|
# endpoints they need.
|
||||||
local public_id=$(_get_or_create_endpoint_with_interface $1 public $3 $2)
|
local public_id
|
||||||
|
public_id=$(_get_or_create_endpoint_with_interface $1 public $3 $2)
|
||||||
_get_or_create_endpoint_with_interface $1 admin $4 $2
|
_get_or_create_endpoint_with_interface $1 admin $4 $2
|
||||||
_get_or_create_endpoint_with_interface $1 internal $5 $2
|
_get_or_create_endpoint_with_interface $1 internal $5 $2
|
||||||
|
|
||||||
@ -1048,7 +1054,8 @@ function get_packages {
|
|||||||
xtrace=$(set +o | grep xtrace)
|
xtrace=$(set +o | grep xtrace)
|
||||||
set +o xtrace
|
set +o xtrace
|
||||||
local services=$@
|
local services=$@
|
||||||
local package_dir=$(_get_package_dir)
|
local package_dir
|
||||||
|
package_dir=$(_get_package_dir)
|
||||||
local file_to_parse=""
|
local file_to_parse=""
|
||||||
local service=""
|
local service=""
|
||||||
|
|
||||||
@ -1992,8 +1999,10 @@ function address_in_net {
|
|||||||
local ip=$1
|
local ip=$1
|
||||||
local range=$2
|
local range=$2
|
||||||
local masklen=${range#*/}
|
local masklen=${range#*/}
|
||||||
local network=$(maskip ${range%/*} $(cidr2netmask $masklen))
|
local network
|
||||||
local subnet=$(maskip $ip $(cidr2netmask $masklen))
|
network=$(maskip ${range%/*} $(cidr2netmask $masklen))
|
||||||
|
local subnet
|
||||||
|
subnet=$(maskip $ip $(cidr2netmask $masklen))
|
||||||
[[ $network == $subnet ]]
|
[[ $network == $subnet ]]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2045,7 +2054,8 @@ function export_proxy_variables {
|
|||||||
|
|
||||||
# Returns true if the directory is on a filesystem mounted via NFS.
|
# Returns true if the directory is on a filesystem mounted via NFS.
|
||||||
function is_nfs_directory {
|
function is_nfs_directory {
|
||||||
local mount_type=`stat -f -L -c %T $1`
|
local mount_type
|
||||||
|
mount_type=`stat -f -L -c %T $1`
|
||||||
test "$mount_type" == "nfs"
|
test "$mount_type" == "nfs"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2056,13 +2066,15 @@ function maskip {
|
|||||||
local ip=$1
|
local ip=$1
|
||||||
local mask=$2
|
local mask=$2
|
||||||
local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}"
|
local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}"
|
||||||
local subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.}))
|
local subnet
|
||||||
|
subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.}))
|
||||||
echo $subnet
|
echo $subnet
|
||||||
}
|
}
|
||||||
|
|
||||||
# Return the current python as "python<major>.<minor>"
|
# Return the current python as "python<major>.<minor>"
|
||||||
function python_version {
|
function python_version {
|
||||||
local python_version=$(python -c 'import sys; print("%s.%s" % sys.version_info[0:2])')
|
local python_version
|
||||||
|
python_version=$(python -c 'import sys; print("%s.%s" % sys.version_info[0:2])')
|
||||||
echo "python${python_version}"
|
echo "python${python_version}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -196,7 +196,8 @@ function iniset {
|
|||||||
$option = $value
|
$option = $value
|
||||||
" "$file"
|
" "$file"
|
||||||
else
|
else
|
||||||
local sep=$(echo -ne "\x01")
|
local sep
|
||||||
|
sep=$(echo -ne "\x01")
|
||||||
# Replace it
|
# Replace it
|
||||||
$sudo sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file"
|
$sudo sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file"
|
||||||
fi
|
fi
|
||||||
|
@ -89,7 +89,8 @@ function merge_config_file {
|
|||||||
# note, configfile might be a variable (note the iniset, etc
|
# note, configfile might be a variable (note the iniset, etc
|
||||||
# created in the mega-awk below is "eval"ed too, so we just leave
|
# created in the mega-awk below is "eval"ed too, so we just leave
|
||||||
# it alone.
|
# it alone.
|
||||||
local real_configfile=$(eval echo $configfile)
|
local real_configfile
|
||||||
|
real_configfile=$(eval echo $configfile)
|
||||||
if [ ! -f $real_configfile ]; then
|
if [ ! -f $real_configfile ]; then
|
||||||
touch $real_configfile
|
touch $real_configfile
|
||||||
fi
|
fi
|
||||||
|
18
inc/python
18
inc/python
@ -61,7 +61,8 @@ function get_python_exec_prefix {
|
|||||||
# pip_install_gr packagename
|
# pip_install_gr packagename
|
||||||
function pip_install_gr {
|
function pip_install_gr {
|
||||||
local name=$1
|
local name=$1
|
||||||
local clean_name=$(get_from_global_requirements $name)
|
local clean_name
|
||||||
|
clean_name=$(get_from_global_requirements $name)
|
||||||
pip_install $clean_name
|
pip_install $clean_name
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -102,7 +103,8 @@ function pip_install {
|
|||||||
local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
|
local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
|
||||||
local sudo_pip="env"
|
local sudo_pip="env"
|
||||||
else
|
else
|
||||||
local cmd_pip=$(get_pip_command)
|
local cmd_pip
|
||||||
|
cmd_pip=$(get_pip_command)
|
||||||
local sudo_pip="sudo -H"
|
local sudo_pip="sudo -H"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
@ -111,7 +113,8 @@ function pip_install {
|
|||||||
# Always apply constraints
|
# Always apply constraints
|
||||||
cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt"
|
cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt"
|
||||||
|
|
||||||
local pip_version=$(python -c "import pip; \
|
local pip_version
|
||||||
|
pip_version=$(python -c "import pip; \
|
||||||
print(pip.__version__.strip('.')[0])")
|
print(pip.__version__.strip('.')[0])")
|
||||||
if (( pip_version<6 )); then
|
if (( pip_version<6 )); then
|
||||||
die $LINENO "Currently installed pip version ${pip_version} does not" \
|
die $LINENO "Currently installed pip version ${pip_version} does not" \
|
||||||
@ -147,7 +150,8 @@ function pip_install {
|
|||||||
# get_from_global_requirements <package>
|
# get_from_global_requirements <package>
|
||||||
function get_from_global_requirements {
|
function get_from_global_requirements {
|
||||||
local package=$1
|
local package=$1
|
||||||
local required_pkg=$(grep -i -h ^${package} $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1)
|
local required_pkg
|
||||||
|
required_pkg=$(grep -i -h ^${package} $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1)
|
||||||
if [[ $required_pkg == "" ]]; then
|
if [[ $required_pkg == "" ]]; then
|
||||||
die $LINENO "Can't find package $package in requirements"
|
die $LINENO "Can't find package $package in requirements"
|
||||||
fi
|
fi
|
||||||
@ -226,7 +230,8 @@ function setup_develop {
|
|||||||
# practical ways.
|
# practical ways.
|
||||||
function is_in_projects_txt {
|
function is_in_projects_txt {
|
||||||
local project_dir=$1
|
local project_dir=$1
|
||||||
local project_name=$(basename $project_dir)
|
local project_name
|
||||||
|
project_name=$(basename $project_dir)
|
||||||
grep -q "/$project_name\$" $REQUIREMENTS_DIR/projects.txt
|
grep -q "/$project_name\$" $REQUIREMENTS_DIR/projects.txt
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -245,7 +250,8 @@ function setup_package_with_constraints_edit {
|
|||||||
|
|
||||||
if [ -n "$REQUIREMENTS_DIR" ]; then
|
if [ -n "$REQUIREMENTS_DIR" ]; then
|
||||||
# Constrain this package to this project directory from here on out.
|
# Constrain this package to this project directory from here on out.
|
||||||
local name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg)
|
local name
|
||||||
|
name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg)
|
||||||
$REQUIREMENTS_DIR/.venv/bin/edit-constraints \
|
$REQUIREMENTS_DIR/.venv/bin/edit-constraints \
|
||||||
$REQUIREMENTS_DIR/upper-constraints.txt -- $name \
|
$REQUIREMENTS_DIR/upper-constraints.txt -- $name \
|
||||||
"$flags file://$project_dir#egg=$name"
|
"$flags file://$project_dir#egg=$name"
|
||||||
|
@ -41,7 +41,8 @@ function add_sudo_secure_path {
|
|||||||
# configure_rootwrap project
|
# configure_rootwrap project
|
||||||
function configure_rootwrap {
|
function configure_rootwrap {
|
||||||
local project=$1
|
local project=$1
|
||||||
local project_uc=$(echo $1|tr a-z A-Z)
|
local project_uc
|
||||||
|
project_uc=$(echo $1|tr a-z A-Z)
|
||||||
local bin_dir="${project_uc}_BIN_DIR"
|
local bin_dir="${project_uc}_BIN_DIR"
|
||||||
bin_dir="${!bin_dir}"
|
bin_dir="${!bin_dir}"
|
||||||
local project_dir="${project_uc}_DIR"
|
local project_dir="${project_uc}_DIR"
|
||||||
@ -60,7 +61,8 @@ function configure_rootwrap {
|
|||||||
sudo sed -e "s:^filters_path=.*$:filters_path=/etc/${project}/rootwrap.d:" -i /etc/${project}/rootwrap.conf
|
sudo sed -e "s:^filters_path=.*$:filters_path=/etc/${project}/rootwrap.d:" -i /etc/${project}/rootwrap.conf
|
||||||
|
|
||||||
# Set up the rootwrap sudoers
|
# Set up the rootwrap sudoers
|
||||||
local tempfile=$(mktemp)
|
local tempfile
|
||||||
|
tempfile=$(mktemp)
|
||||||
# Specify rootwrap.conf as first parameter to rootwrap
|
# Specify rootwrap.conf as first parameter to rootwrap
|
||||||
rootwrap_sudo_cmd="${rootwrap_bin} /etc/${project}/rootwrap.conf *"
|
rootwrap_sudo_cmd="${rootwrap_bin} /etc/${project}/rootwrap.conf *"
|
||||||
echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudo_cmd" >$tempfile
|
echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudo_cmd" >$tempfile
|
||||||
|
12
lib/apache
12
lib/apache
@ -72,11 +72,14 @@ function install_apache_wsgi {
|
|||||||
# various differences between Apache 2.2 and 2.4 that warrant special handling.
|
# various differences between Apache 2.2 and 2.4 that warrant special handling.
|
||||||
function get_apache_version {
|
function get_apache_version {
|
||||||
if is_ubuntu; then
|
if is_ubuntu; then
|
||||||
local version_str=$(sudo /usr/sbin/apache2ctl -v | awk '/Server version/ {print $3}' | cut -f2 -d/)
|
local version_str
|
||||||
|
version_str=$(sudo /usr/sbin/apache2ctl -v | awk '/Server version/ {print $3}' | cut -f2 -d/)
|
||||||
elif is_fedora; then
|
elif is_fedora; then
|
||||||
local version_str=$(rpm -qa --queryformat '%{VERSION}' httpd)
|
local version_str
|
||||||
|
version_str=$(rpm -qa --queryformat '%{VERSION}' httpd)
|
||||||
elif is_suse; then
|
elif is_suse; then
|
||||||
local version_str=$(rpm -qa --queryformat '%{VERSION}' apache2)
|
local version_str
|
||||||
|
version_str=$(rpm -qa --queryformat '%{VERSION}' apache2)
|
||||||
else
|
else
|
||||||
exit_distro_not_supported "cannot determine apache version"
|
exit_distro_not_supported "cannot determine apache version"
|
||||||
fi
|
fi
|
||||||
@ -115,7 +118,8 @@ function get_apache_version {
|
|||||||
function apache_site_config_for {
|
function apache_site_config_for {
|
||||||
local site=$@
|
local site=$@
|
||||||
if is_ubuntu; then
|
if is_ubuntu; then
|
||||||
local apache_version=$(get_apache_version)
|
local apache_version
|
||||||
|
apache_version=$(get_apache_version)
|
||||||
if [[ "$apache_version" == "2.2" ]]; then
|
if [[ "$apache_version" == "2.2" ]]; then
|
||||||
# Ubuntu 12.04 - Apache 2.2
|
# Ubuntu 12.04 - Apache 2.2
|
||||||
echo $APACHE_CONF_DIR/${site}
|
echo $APACHE_CONF_DIR/${site}
|
||||||
|
9
lib/ceph
9
lib/ceph
@ -83,7 +83,8 @@ ATTACH_ENCRYPTED_VOLUME_AVAILABLE=False
|
|||||||
# ------------
|
# ------------
|
||||||
|
|
||||||
function get_ceph_version {
|
function get_ceph_version {
|
||||||
local ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4 | cut -f 1,2 -d '.')
|
local ceph_version_str
|
||||||
|
ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4 | cut -f 1,2 -d '.')
|
||||||
echo $ceph_version_str
|
echo $ceph_version_str
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,7 +107,8 @@ EOF
|
|||||||
# undefine_virsh_secret() - Undefine Cinder key secret from libvirt
|
# undefine_virsh_secret() - Undefine Cinder key secret from libvirt
|
||||||
function undefine_virsh_secret {
|
function undefine_virsh_secret {
|
||||||
if is_service_enabled cinder || is_service_enabled nova; then
|
if is_service_enabled cinder || is_service_enabled nova; then
|
||||||
local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
|
local virsh_uuid
|
||||||
|
virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
|
||||||
sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
|
sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
@ -219,7 +221,8 @@ EOF
|
|||||||
done
|
done
|
||||||
|
|
||||||
# pools data and metadata were removed in the Giant release so depending on the version we apply different commands
|
# pools data and metadata were removed in the Giant release so depending on the version we apply different commands
|
||||||
local ceph_version=$(get_ceph_version)
|
local ceph_version
|
||||||
|
ceph_version=$(get_ceph_version)
|
||||||
# change pool replica size according to the CEPH_REPLICAS set by the user
|
# change pool replica size according to the CEPH_REPLICAS set by the user
|
||||||
if [[ ${ceph_version%%.*} -eq 0 ]] && [[ ${ceph_version##*.} -lt 87 ]]; then
|
if [[ ${ceph_version%%.*} -eq 0 ]] && [[ ${ceph_version##*.} -lt 87 ]]; then
|
||||||
sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
|
sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
|
||||||
|
@ -150,7 +150,8 @@ function cleanup_cinder {
|
|||||||
# ensure the volume group is cleared up because fails might
|
# ensure the volume group is cleared up because fails might
|
||||||
# leave dead volumes in the group
|
# leave dead volumes in the group
|
||||||
if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then
|
if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then
|
||||||
local targets=$(sudo tgtadm --op show --mode target)
|
local targets
|
||||||
|
targets=$(sudo tgtadm --op show --mode target)
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
# If tgt driver isn't running this won't work obviously
|
# If tgt driver isn't running this won't work obviously
|
||||||
# So check the response and restart if need be
|
# So check the response and restart if need be
|
||||||
@ -198,7 +199,8 @@ function cleanup_cinder {
|
|||||||
|
|
||||||
# _cinder_config_apache_wsgi() - Set WSGI config files
|
# _cinder_config_apache_wsgi() - Set WSGI config files
|
||||||
function _cinder_config_apache_wsgi {
|
function _cinder_config_apache_wsgi {
|
||||||
local cinder_apache_conf=$(apache_site_config_for osapi-volume)
|
local cinder_apache_conf
|
||||||
|
cinder_apache_conf=$(apache_site_config_for osapi-volume)
|
||||||
local cinder_ssl=""
|
local cinder_ssl=""
|
||||||
local cinder_certfile=""
|
local cinder_certfile=""
|
||||||
local cinder_keyfile=""
|
local cinder_keyfile=""
|
||||||
|
@ -106,7 +106,8 @@ function configure_glance {
|
|||||||
iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
|
iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
|
||||||
iniset $GLANCE_REGISTRY_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
|
iniset $GLANCE_REGISTRY_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
|
||||||
inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file
|
inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file
|
||||||
local dburl=`database_connection_url glance`
|
local dburl
|
||||||
|
dburl=`database_connection_url glance`
|
||||||
iniset $GLANCE_REGISTRY_CONF database connection $dburl
|
iniset $GLANCE_REGISTRY_CONF database connection $dburl
|
||||||
iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG
|
iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG
|
||||||
iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS"
|
iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS"
|
||||||
@ -265,7 +266,8 @@ function create_glance_accounts {
|
|||||||
# required for swift access
|
# required for swift access
|
||||||
if is_service_enabled s-proxy; then
|
if is_service_enabled s-proxy; then
|
||||||
|
|
||||||
local glance_swift_user=$(get_or_create_user "glance-swift" \
|
local glance_swift_user
|
||||||
|
glance_swift_user=$(get_or_create_user "glance-swift" \
|
||||||
"$SERVICE_PASSWORD" "default" "glance-swift@example.com")
|
"$SERVICE_PASSWORD" "default" "glance-swift@example.com")
|
||||||
get_or_add_user_project_role "ResellerAdmin" $glance_swift_user $SERVICE_TENANT_NAME
|
get_or_add_user_project_role "ResellerAdmin" $glance_swift_user $SERVICE_TENANT_NAME
|
||||||
fi
|
fi
|
||||||
|
3
lib/heat
3
lib/heat
@ -321,7 +321,8 @@ function build_heat_pip_mirror {
|
|||||||
|
|
||||||
echo "</body></html>" >> $HEAT_PIP_REPO/index.html
|
echo "</body></html>" >> $HEAT_PIP_REPO/index.html
|
||||||
|
|
||||||
local heat_pip_repo_apache_conf=$(apache_site_config_for heat_pip_repo)
|
local heat_pip_repo_apache_conf
|
||||||
|
heat_pip_repo_apache_conf=$(apache_site_config_for heat_pip_repo)
|
||||||
|
|
||||||
sudo cp $FILES/apache-heat-pip-repo.template $heat_pip_repo_apache_conf
|
sudo cp $FILES/apache-heat-pip-repo.template $heat_pip_repo_apache_conf
|
||||||
sudo sed -e "
|
sudo sed -e "
|
||||||
|
@ -49,7 +49,8 @@ function _horizon_config_set {
|
|||||||
sed -e "/^$option/d" -i $local_settings
|
sed -e "/^$option/d" -i $local_settings
|
||||||
echo -e "\n$option=$value" >> $file
|
echo -e "\n$option=$value" >> $file
|
||||||
elif grep -q "^$section" $file; then
|
elif grep -q "^$section" $file; then
|
||||||
local line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file)
|
local line
|
||||||
|
line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file)
|
||||||
if [ -n "$line" ]; then
|
if [ -n "$line" ]; then
|
||||||
sed -i -e "/^$section/,/^}/ s/^\( *'$option'\) *:.*$/\1: $value,/" $file
|
sed -i -e "/^$section/,/^}/ s/^\( *'$option'\) *:.*$/\1: $value,/" $file
|
||||||
else
|
else
|
||||||
@ -68,7 +69,8 @@ function _horizon_config_set {
|
|||||||
# cleanup_horizon() - Remove residual data files, anything left over from previous
|
# cleanup_horizon() - Remove residual data files, anything left over from previous
|
||||||
# runs that a clean run would need to clean up
|
# runs that a clean run would need to clean up
|
||||||
function cleanup_horizon {
|
function cleanup_horizon {
|
||||||
local horizon_conf=$(apache_site_config_for horizon)
|
local horizon_conf
|
||||||
|
horizon_conf=$(apache_site_config_for horizon)
|
||||||
sudo rm -f $horizon_conf
|
sudo rm -f $horizon_conf
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -112,7 +114,8 @@ function init_horizon {
|
|||||||
# Create an empty directory that apache uses as docroot
|
# Create an empty directory that apache uses as docroot
|
||||||
sudo mkdir -p $HORIZON_DIR/.blackhole
|
sudo mkdir -p $HORIZON_DIR/.blackhole
|
||||||
|
|
||||||
local horizon_conf=$(apache_site_config_for horizon)
|
local horizon_conf
|
||||||
|
horizon_conf=$(apache_site_config_for horizon)
|
||||||
|
|
||||||
# Configure apache to run horizon
|
# Configure apache to run horizon
|
||||||
sudo sh -c "sed -e \"
|
sudo sh -c "sed -e \"
|
||||||
|
63
lib/ironic
63
lib/ironic
@ -225,7 +225,8 @@ function _cleanup_ironic_apache_wsgi {
|
|||||||
|
|
||||||
# _config_ironic_apache_wsgi() - Set WSGI config files of Ironic
|
# _config_ironic_apache_wsgi() - Set WSGI config files of Ironic
|
||||||
function _config_ironic_apache_wsgi {
|
function _config_ironic_apache_wsgi {
|
||||||
local ironic_apache_conf=$(apache_site_config_for ironic)
|
local ironic_apache_conf
|
||||||
|
ironic_apache_conf=$(apache_site_config_for ironic)
|
||||||
sudo cp $FILES/apache-ironic.template $ironic_apache_conf
|
sudo cp $FILES/apache-ironic.template $ironic_apache_conf
|
||||||
sudo sed -e "
|
sudo sed -e "
|
||||||
s|%PUBLICPORT%|$IRONIC_HTTP_PORT|g;
|
s|%PUBLICPORT%|$IRONIC_HTTP_PORT|g;
|
||||||
@ -325,11 +326,13 @@ function configure_ironic_api {
|
|||||||
function configure_ironic_conductor {
|
function configure_ironic_conductor {
|
||||||
cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF
|
cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF
|
||||||
cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR
|
cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR
|
||||||
local ironic_rootwrap=$(get_rootwrap_location ironic)
|
local ironic_rootwrap
|
||||||
|
ironic_rootwrap=$(get_rootwrap_location ironic)
|
||||||
local rootwrap_isudoer_cmd="$ironic_rootwrap $IRONIC_CONF_DIR/rootwrap.conf *"
|
local rootwrap_isudoer_cmd="$ironic_rootwrap $IRONIC_CONF_DIR/rootwrap.conf *"
|
||||||
|
|
||||||
# Set up the rootwrap sudoers for ironic
|
# Set up the rootwrap sudoers for ironic
|
||||||
local tempfile=`mktemp`
|
local tempfile
|
||||||
|
tempfile=`mktemp`
|
||||||
echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_isudoer_cmd" >$tempfile
|
echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_isudoer_cmd" >$tempfile
|
||||||
chmod 0440 $tempfile
|
chmod 0440 $tempfile
|
||||||
sudo chown root:root $tempfile
|
sudo chown root:root $tempfile
|
||||||
@ -370,7 +373,8 @@ function configure_ironic_conductor {
|
|||||||
fi
|
fi
|
||||||
iniset $IRONIC_CONF_FILE glance swift_endpoint_url http://${HOST_IP}:${SWIFT_DEFAULT_BIND_PORT:-8080}
|
iniset $IRONIC_CONF_FILE glance swift_endpoint_url http://${HOST_IP}:${SWIFT_DEFAULT_BIND_PORT:-8080}
|
||||||
iniset $IRONIC_CONF_FILE glance swift_api_version v1
|
iniset $IRONIC_CONF_FILE glance swift_api_version v1
|
||||||
local tenant_id=$(get_or_create_project $SERVICE_TENANT_NAME default)
|
local tenant_id
|
||||||
|
tenant_id=$(get_or_create_project $SERVICE_TENANT_NAME default)
|
||||||
iniset $IRONIC_CONF_FILE glance swift_account AUTH_${tenant_id}
|
iniset $IRONIC_CONF_FILE glance swift_account AUTH_${tenant_id}
|
||||||
iniset $IRONIC_CONF_FILE glance swift_container glance
|
iniset $IRONIC_CONF_FILE glance swift_container glance
|
||||||
iniset $IRONIC_CONF_FILE glance swift_temp_url_duration 3600
|
iniset $IRONIC_CONF_FILE glance swift_temp_url_duration 3600
|
||||||
@ -379,7 +383,8 @@ function configure_ironic_conductor {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
|
if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
|
||||||
local pxebin=`basename $IRONIC_PXE_BOOT_IMAGE`
|
local pxebin
|
||||||
|
pxebin=`basename $IRONIC_PXE_BOOT_IMAGE`
|
||||||
iniset $IRONIC_CONF_FILE pxe ipxe_enabled True
|
iniset $IRONIC_CONF_FILE pxe ipxe_enabled True
|
||||||
iniset $IRONIC_CONF_FILE pxe pxe_config_template '\$pybasedir/drivers/modules/ipxe_config.template'
|
iniset $IRONIC_CONF_FILE pxe pxe_config_template '\$pybasedir/drivers/modules/ipxe_config.template'
|
||||||
iniset $IRONIC_CONF_FILE pxe pxe_bootfile_name $pxebin
|
iniset $IRONIC_CONF_FILE pxe pxe_bootfile_name $pxebin
|
||||||
@ -445,7 +450,8 @@ function init_ironic {
|
|||||||
# _ironic_bm_vm_names() - Generates list of names for baremetal VMs.
|
# _ironic_bm_vm_names() - Generates list of names for baremetal VMs.
|
||||||
function _ironic_bm_vm_names {
|
function _ironic_bm_vm_names {
|
||||||
local idx
|
local idx
|
||||||
local num_vms=$(($IRONIC_VM_COUNT - 1))
|
local num_vms
|
||||||
|
num_vms=$(($IRONIC_VM_COUNT - 1))
|
||||||
for idx in $(seq 0 $num_vms); do
|
for idx in $(seq 0 $num_vms); do
|
||||||
echo "baremetal${IRONIC_VM_NETWORK_BRIDGE}_${idx}"
|
echo "baremetal${IRONIC_VM_NETWORK_BRIDGE}_${idx}"
|
||||||
done
|
done
|
||||||
@ -498,22 +504,27 @@ function stop_ironic {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function create_ovs_taps {
|
function create_ovs_taps {
|
||||||
local ironic_net_id=$(neutron net-list | grep private | get_field 1)
|
local ironic_net_id
|
||||||
|
ironic_net_id=$(neutron net-list | grep private | get_field 1)
|
||||||
|
|
||||||
# Work around: No netns exists on host until a Neutron port is created. We
|
# Work around: No netns exists on host until a Neutron port is created. We
|
||||||
# need to create one in Neutron to know what netns to tap into prior to the
|
# need to create one in Neutron to know what netns to tap into prior to the
|
||||||
# first node booting.
|
# first node booting.
|
||||||
local port_id=$(neutron port-create private | grep " id " | get_field 2)
|
local port_id
|
||||||
|
port_id=$(neutron port-create private | grep " id " | get_field 2)
|
||||||
|
|
||||||
# intentional sleep to make sure the tag has been set to port
|
# intentional sleep to make sure the tag has been set to port
|
||||||
sleep 10
|
sleep 10
|
||||||
|
|
||||||
if [[ "$Q_USE_NAMESPACE" = "True" ]]; then
|
if [[ "$Q_USE_NAMESPACE" = "True" ]]; then
|
||||||
local tapdev=$(sudo ip netns exec qdhcp-${ironic_net_id} ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-)
|
local tapdev
|
||||||
|
tapdev=$(sudo ip netns exec qdhcp-${ironic_net_id} ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-)
|
||||||
else
|
else
|
||||||
local tapdev=$(sudo ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-)
|
local tapdev
|
||||||
|
tapdev=$(sudo ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-)
|
||||||
fi
|
fi
|
||||||
local tag_id=$(sudo ovs-vsctl show |grep ${tapdev} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-)
|
local tag_id
|
||||||
|
tag_id=$(sudo ovs-vsctl show |grep ${tapdev} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-)
|
||||||
|
|
||||||
# make sure veth pair is not existing, otherwise delete its links
|
# make sure veth pair is not existing, otherwise delete its links
|
||||||
sudo ip link show ovs-tap1 && sudo ip link delete ovs-tap1
|
sudo ip link show ovs-tap1 && sudo ip link delete ovs-tap1
|
||||||
@ -571,7 +582,8 @@ function wait_for_nova_resources {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function enroll_nodes {
|
function enroll_nodes {
|
||||||
local chassis_id=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2)
|
local chassis_id
|
||||||
|
chassis_id=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2)
|
||||||
|
|
||||||
if ! is_ironic_hardware; then
|
if ! is_ironic_hardware; then
|
||||||
local ironic_node_cpu=$IRONIC_VM_SPECS_CPU
|
local ironic_node_cpu=$IRONIC_VM_SPECS_CPU
|
||||||
@ -603,10 +615,14 @@ function enroll_nodes {
|
|||||||
if ! is_ironic_hardware; then
|
if ! is_ironic_hardware; then
|
||||||
local mac_address=$hardware_info
|
local mac_address=$hardware_info
|
||||||
elif [[ -z "${IRONIC_DEPLOY_DRIVER##*_ipmitool}" ]]; then
|
elif [[ -z "${IRONIC_DEPLOY_DRIVER##*_ipmitool}" ]]; then
|
||||||
local ipmi_address=$(echo $hardware_info |awk '{print $1}')
|
local ipmi_address
|
||||||
local mac_address=$(echo $hardware_info |awk '{print $2}')
|
ipmi_address=$(echo $hardware_info |awk '{print $1}')
|
||||||
local ironic_ipmi_username=$(echo $hardware_info |awk '{print $3}')
|
local mac_address
|
||||||
local ironic_ipmi_passwd=$(echo $hardware_info |awk '{print $4}')
|
mac_address=$(echo $hardware_info |awk '{print $2}')
|
||||||
|
local ironic_ipmi_username
|
||||||
|
ironic_ipmi_username=$(echo $hardware_info |awk '{print $3}')
|
||||||
|
local ironic_ipmi_passwd
|
||||||
|
ironic_ipmi_passwd=$(echo $hardware_info |awk '{print $4}')
|
||||||
# Currently we require all hardware platform have same CPU/RAM/DISK info
|
# Currently we require all hardware platform have same CPU/RAM/DISK info
|
||||||
# in future, this can be enhanced to support different type, and then
|
# in future, this can be enhanced to support different type, and then
|
||||||
# we create the bare metal flavor with minimum value
|
# we create the bare metal flavor with minimum value
|
||||||
@ -618,9 +634,11 @@ function enroll_nodes {
|
|||||||
|
|
||||||
# First node created will be used for testing in ironic w/o glance
|
# First node created will be used for testing in ironic w/o glance
|
||||||
# scenario, so we need to know its UUID.
|
# scenario, so we need to know its UUID.
|
||||||
local standalone_node_uuid=$([ $total_nodes -eq 0 ] && echo "--uuid $IRONIC_NODE_UUID")
|
local standalone_node_uuid
|
||||||
|
standalone_node_uuid=$([ $total_nodes -eq 0 ] && echo "--uuid $IRONIC_NODE_UUID")
|
||||||
|
|
||||||
local node_id=$(ironic node-create $standalone_node_uuid\
|
local node_id
|
||||||
|
node_id=$(ironic node-create $standalone_node_uuid\
|
||||||
--chassis_uuid $chassis_id \
|
--chassis_uuid $chassis_id \
|
||||||
--driver $IRONIC_DEPLOY_DRIVER \
|
--driver $IRONIC_DEPLOY_DRIVER \
|
||||||
--name node-$total_nodes \
|
--name node-$total_nodes \
|
||||||
@ -641,7 +659,8 @@ function enroll_nodes {
|
|||||||
# NOTE(adam_g): Attempting to use an autogenerated UUID for flavor id here uncovered
|
# NOTE(adam_g): Attempting to use an autogenerated UUID for flavor id here uncovered
|
||||||
# bug (LP: #1333852) in Trove. This can be changed to use an auto flavor id when the
|
# bug (LP: #1333852) in Trove. This can be changed to use an auto flavor id when the
|
||||||
# bug is fixed in Juno.
|
# bug is fixed in Juno.
|
||||||
local adjusted_disk=$(($ironic_node_disk - $ironic_ephemeral_disk))
|
local adjusted_disk
|
||||||
|
adjusted_disk=$(($ironic_node_disk - $ironic_ephemeral_disk))
|
||||||
nova flavor-create --ephemeral $ironic_ephemeral_disk baremetal 551 $ironic_node_ram $adjusted_disk $ironic_node_cpu
|
nova flavor-create --ephemeral $ironic_ephemeral_disk baremetal 551 $ironic_node_ram $adjusted_disk $ironic_node_cpu
|
||||||
|
|
||||||
nova flavor-key baremetal set "cpu_arch"="x86_64"
|
nova flavor-key baremetal set "cpu_arch"="x86_64"
|
||||||
@ -772,7 +791,8 @@ function upload_baremetal_ironic_deploy {
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local token=$(openstack token issue -c id -f value)
|
local token
|
||||||
|
token=$(openstack token issue -c id -f value)
|
||||||
die_if_not_set $LINENO token "Keystone fail to get token"
|
die_if_not_set $LINENO token "Keystone fail to get token"
|
||||||
|
|
||||||
# load them into glance
|
# load them into glance
|
||||||
@ -810,7 +830,8 @@ function prepare_baremetal_basic_ops {
|
|||||||
function cleanup_baremetal_basic_ops {
|
function cleanup_baremetal_basic_ops {
|
||||||
rm -f $IRONIC_VM_MACS_CSV_FILE
|
rm -f $IRONIC_VM_MACS_CSV_FILE
|
||||||
if [ -f $IRONIC_KEY_FILE ]; then
|
if [ -f $IRONIC_KEY_FILE ]; then
|
||||||
local key=$(cat $IRONIC_KEY_FILE.pub)
|
local key
|
||||||
|
key=$(cat $IRONIC_KEY_FILE.pub)
|
||||||
# remove public key from authorized_keys
|
# remove public key from authorized_keys
|
||||||
grep -v "$key" $IRONIC_AUTHORIZED_KEYS_FILE > temp && mv temp $IRONIC_AUTHORIZED_KEYS_FILE
|
grep -v "$key" $IRONIC_AUTHORIZED_KEYS_FILE > temp && mv temp $IRONIC_AUTHORIZED_KEYS_FILE
|
||||||
chmod 0600 $IRONIC_AUTHORIZED_KEYS_FILE
|
chmod 0600 $IRONIC_AUTHORIZED_KEYS_FILE
|
||||||
|
36
lib/keystone
36
lib/keystone
@ -132,7 +132,8 @@ function _cleanup_keystone_apache_wsgi {
|
|||||||
|
|
||||||
# _config_keystone_apache_wsgi() - Set WSGI config files of Keystone
|
# _config_keystone_apache_wsgi() - Set WSGI config files of Keystone
|
||||||
function _config_keystone_apache_wsgi {
|
function _config_keystone_apache_wsgi {
|
||||||
local keystone_apache_conf=$(apache_site_config_for keystone)
|
local keystone_apache_conf
|
||||||
|
keystone_apache_conf=$(apache_site_config_for keystone)
|
||||||
local keystone_ssl=""
|
local keystone_ssl=""
|
||||||
local keystone_certfile=""
|
local keystone_certfile=""
|
||||||
local keystone_keyfile=""
|
local keystone_keyfile=""
|
||||||
@ -347,9 +348,12 @@ function configure_keystone_extensions {
|
|||||||
function create_keystone_accounts {
|
function create_keystone_accounts {
|
||||||
|
|
||||||
# admin
|
# admin
|
||||||
local admin_tenant=$(get_or_create_project "admin" default)
|
local admin_tenant
|
||||||
local admin_user=$(get_or_create_user "admin" "$ADMIN_PASSWORD" default)
|
admin_tenant=$(get_or_create_project "admin" default)
|
||||||
local admin_role=$(get_or_create_role "admin")
|
local admin_user
|
||||||
|
admin_user=$(get_or_create_user "admin" "$ADMIN_PASSWORD" default)
|
||||||
|
local admin_role
|
||||||
|
admin_role=$(get_or_create_role "admin")
|
||||||
get_or_add_user_project_role $admin_role $admin_user $admin_tenant
|
get_or_add_user_project_role $admin_role $admin_user $admin_tenant
|
||||||
|
|
||||||
# Create service project/role
|
# Create service project/role
|
||||||
@ -365,18 +369,23 @@ function create_keystone_accounts {
|
|||||||
get_or_create_role ResellerAdmin
|
get_or_create_role ResellerAdmin
|
||||||
|
|
||||||
# The Member role is used by Horizon and Swift so we need to keep it:
|
# The Member role is used by Horizon and Swift so we need to keep it:
|
||||||
local member_role=$(get_or_create_role "Member")
|
local member_role
|
||||||
|
member_role=$(get_or_create_role "Member")
|
||||||
|
|
||||||
# another_role demonstrates that an arbitrary role may be created and used
|
# another_role demonstrates that an arbitrary role may be created and used
|
||||||
# TODO(sleepsonthefloor): show how this can be used for rbac in the future!
|
# TODO(sleepsonthefloor): show how this can be used for rbac in the future!
|
||||||
local another_role=$(get_or_create_role "anotherrole")
|
local another_role
|
||||||
|
another_role=$(get_or_create_role "anotherrole")
|
||||||
|
|
||||||
# invisible tenant - admin can't see this one
|
# invisible tenant - admin can't see this one
|
||||||
local invis_tenant=$(get_or_create_project "invisible_to_admin" default)
|
local invis_tenant
|
||||||
|
invis_tenant=$(get_or_create_project "invisible_to_admin" default)
|
||||||
|
|
||||||
# demo
|
# demo
|
||||||
local demo_tenant=$(get_or_create_project "demo" default)
|
local demo_tenant
|
||||||
local demo_user=$(get_or_create_user "demo" \
|
demo_tenant=$(get_or_create_project "demo" default)
|
||||||
|
local demo_user
|
||||||
|
demo_user=$(get_or_create_user "demo" \
|
||||||
"$ADMIN_PASSWORD" "default" "demo@example.com")
|
"$ADMIN_PASSWORD" "default" "demo@example.com")
|
||||||
|
|
||||||
get_or_add_user_project_role $member_role $demo_user $demo_tenant
|
get_or_add_user_project_role $member_role $demo_user $demo_tenant
|
||||||
@ -384,9 +393,11 @@ function create_keystone_accounts {
|
|||||||
get_or_add_user_project_role $another_role $demo_user $demo_tenant
|
get_or_add_user_project_role $another_role $demo_user $demo_tenant
|
||||||
get_or_add_user_project_role $member_role $demo_user $invis_tenant
|
get_or_add_user_project_role $member_role $demo_user $invis_tenant
|
||||||
|
|
||||||
local admin_group=$(get_or_create_group "admins" \
|
local admin_group
|
||||||
|
admin_group=$(get_or_create_group "admins" \
|
||||||
"default" "openstack admin group")
|
"default" "openstack admin group")
|
||||||
local non_admin_group=$(get_or_create_group "nonadmins" \
|
local non_admin_group
|
||||||
|
non_admin_group=$(get_or_create_group "nonadmins" \
|
||||||
"default" "non-admin group")
|
"default" "non-admin group")
|
||||||
|
|
||||||
get_or_add_group_project_role $member_role $non_admin_group $demo_tenant
|
get_or_add_group_project_role $member_role $non_admin_group $demo_tenant
|
||||||
@ -415,7 +426,8 @@ function create_keystone_accounts {
|
|||||||
function create_service_user {
|
function create_service_user {
|
||||||
local role=${2:-service}
|
local role=${2:-service}
|
||||||
|
|
||||||
local user=$(get_or_create_user "$1" "$SERVICE_PASSWORD" default)
|
local user
|
||||||
|
user=$(get_or_create_user "$1" "$SERVICE_PASSWORD" default)
|
||||||
get_or_add_user_project_role "$role" "$user" "$SERVICE_TENANT_NAME"
|
get_or_add_user_project_role "$role" "$user" "$SERVICE_TENANT_NAME"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
9
lib/ldap
9
lib/ldap
@ -82,7 +82,8 @@ function cleanup_ldap {
|
|||||||
function init_ldap {
|
function init_ldap {
|
||||||
local keystone_ldif
|
local keystone_ldif
|
||||||
|
|
||||||
local tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
|
local tmp_ldap_dir
|
||||||
|
tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
|
||||||
|
|
||||||
# Remove data but not schemas
|
# Remove data but not schemas
|
||||||
clear_ldap_state
|
clear_ldap_state
|
||||||
@ -113,7 +114,8 @@ function install_ldap {
|
|||||||
echo "Installing LDAP inside function"
|
echo "Installing LDAP inside function"
|
||||||
echo "os_VENDOR is $os_VENDOR"
|
echo "os_VENDOR is $os_VENDOR"
|
||||||
|
|
||||||
local tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
|
local tmp_ldap_dir
|
||||||
|
tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
|
||||||
|
|
||||||
printf "installing OpenLDAP"
|
printf "installing OpenLDAP"
|
||||||
if is_ubuntu; then
|
if is_ubuntu; then
|
||||||
@ -129,7 +131,8 @@ function install_ldap {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
echo "LDAP_PASSWORD is $LDAP_PASSWORD"
|
echo "LDAP_PASSWORD is $LDAP_PASSWORD"
|
||||||
local slappass=$(slappasswd -s $LDAP_PASSWORD)
|
local slappass
|
||||||
|
slappass=$(slappasswd -s $LDAP_PASSWORD)
|
||||||
printf "LDAP secret is $slappass\n"
|
printf "LDAP secret is $slappass\n"
|
||||||
|
|
||||||
# Create manager.ldif and add to olcdb
|
# Create manager.ldif and add to olcdb
|
||||||
|
6
lib/lvm
6
lib/lvm
@ -56,7 +56,8 @@ function _clean_lvm_backing_file {
|
|||||||
|
|
||||||
# If the backing physical device is a loop device, it was probably setup by DevStack
|
# If the backing physical device is a loop device, it was probably setup by DevStack
|
||||||
if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
|
if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
|
||||||
local vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}')
|
local vg_dev
|
||||||
|
vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}')
|
||||||
sudo losetup -d $vg_dev
|
sudo losetup -d $vg_dev
|
||||||
rm -f $backing_file
|
rm -f $backing_file
|
||||||
fi
|
fi
|
||||||
@ -89,7 +90,8 @@ function _create_lvm_volume_group {
|
|||||||
if ! sudo vgs $vg; then
|
if ! sudo vgs $vg; then
|
||||||
# Only create if the file doesn't already exists
|
# Only create if the file doesn't already exists
|
||||||
[[ -f $backing_file ]] || truncate -s $size $backing_file
|
[[ -f $backing_file ]] || truncate -s $size $backing_file
|
||||||
local vg_dev=`sudo losetup -f --show $backing_file`
|
local vg_dev
|
||||||
|
vg_dev=`sudo losetup -f --show $backing_file`
|
||||||
|
|
||||||
# Only create volume group if it doesn't already exist
|
# Only create volume group if it doesn't already exist
|
||||||
if ! sudo vgs $vg; then
|
if ! sudo vgs $vg; then
|
||||||
|
@ -806,7 +806,8 @@ function _move_neutron_addresses_route {
|
|||||||
|
|
||||||
local IP_ADD=""
|
local IP_ADD=""
|
||||||
local IP_DEL=""
|
local IP_DEL=""
|
||||||
local DEFAULT_ROUTE_GW=$(ip r | awk "/default.+$from_intf/ { print \$3; exit }")
|
local DEFAULT_ROUTE_GW
|
||||||
|
DEFAULT_ROUTE_GW=$(ip r | awk "/default.+$from_intf/ { print \$3; exit }")
|
||||||
local ADD_OVS_PORT=""
|
local ADD_OVS_PORT=""
|
||||||
|
|
||||||
if [[ $af == "inet" ]]; then
|
if [[ $af == "inet" ]]; then
|
||||||
@ -1247,7 +1248,8 @@ function _neutron_create_private_subnet_v4 {
|
|||||||
subnet_params+="--gateway $NETWORK_GATEWAY "
|
subnet_params+="--gateway $NETWORK_GATEWAY "
|
||||||
subnet_params+="--name $PRIVATE_SUBNET_NAME "
|
subnet_params+="--name $PRIVATE_SUBNET_NAME "
|
||||||
subnet_params+="$NET_ID $FIXED_RANGE"
|
subnet_params+="$NET_ID $FIXED_RANGE"
|
||||||
local subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2)
|
local subnet_id
|
||||||
|
subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2)
|
||||||
die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $TENANT_ID"
|
die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $TENANT_ID"
|
||||||
echo $subnet_id
|
echo $subnet_id
|
||||||
}
|
}
|
||||||
@ -1262,7 +1264,8 @@ function _neutron_create_private_subnet_v6 {
|
|||||||
subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY "
|
subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY "
|
||||||
subnet_params+="--name $IPV6_PRIVATE_SUBNET_NAME "
|
subnet_params+="--name $IPV6_PRIVATE_SUBNET_NAME "
|
||||||
subnet_params+="$NET_ID $FIXED_RANGE_V6 $ipv6_modes"
|
subnet_params+="$NET_ID $FIXED_RANGE_V6 $ipv6_modes"
|
||||||
local ipv6_subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2)
|
local ipv6_subnet_id
|
||||||
|
ipv6_subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2)
|
||||||
die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $TENANT_ID"
|
die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $TENANT_ID"
|
||||||
echo $ipv6_subnet_id
|
echo $ipv6_subnet_id
|
||||||
}
|
}
|
||||||
@ -1275,7 +1278,8 @@ function _neutron_create_public_subnet_v4 {
|
|||||||
subnet_params+="--name $PUBLIC_SUBNET_NAME "
|
subnet_params+="--name $PUBLIC_SUBNET_NAME "
|
||||||
subnet_params+="$EXT_NET_ID $FLOATING_RANGE "
|
subnet_params+="$EXT_NET_ID $FLOATING_RANGE "
|
||||||
subnet_params+="-- --enable_dhcp=False"
|
subnet_params+="-- --enable_dhcp=False"
|
||||||
local id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
|
local id_and_ext_gw_ip
|
||||||
|
id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
|
||||||
die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet"
|
die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet"
|
||||||
echo $id_and_ext_gw_ip
|
echo $id_and_ext_gw_ip
|
||||||
}
|
}
|
||||||
@ -1287,7 +1291,8 @@ function _neutron_create_public_subnet_v6 {
|
|||||||
subnet_params+="--name $IPV6_PUBLIC_SUBNET_NAME "
|
subnet_params+="--name $IPV6_PUBLIC_SUBNET_NAME "
|
||||||
subnet_params+="$EXT_NET_ID $IPV6_PUBLIC_RANGE "
|
subnet_params+="$EXT_NET_ID $IPV6_PUBLIC_RANGE "
|
||||||
subnet_params+="-- --enable_dhcp=False"
|
subnet_params+="-- --enable_dhcp=False"
|
||||||
local ipv6_id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
|
local ipv6_id_and_ext_gw_ip
|
||||||
|
ipv6_id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
|
||||||
die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet"
|
die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet"
|
||||||
echo $ipv6_id_and_ext_gw_ip
|
echo $ipv6_id_and_ext_gw_ip
|
||||||
}
|
}
|
||||||
@ -1296,8 +1301,10 @@ function _neutron_create_public_subnet_v6 {
|
|||||||
function _neutron_configure_router_v4 {
|
function _neutron_configure_router_v4 {
|
||||||
neutron router-interface-add $ROUTER_ID $SUBNET_ID
|
neutron router-interface-add $ROUTER_ID $SUBNET_ID
|
||||||
# Create a public subnet on the external network
|
# Create a public subnet on the external network
|
||||||
local id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID)
|
local id_and_ext_gw_ip
|
||||||
local ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2)
|
id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID)
|
||||||
|
local ext_gw_ip
|
||||||
|
ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2)
|
||||||
PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5)
|
PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5)
|
||||||
# Configure the external network as the default router gateway
|
# Configure the external network as the default router gateway
|
||||||
neutron router-gateway-set $ROUTER_ID $EXT_NET_ID
|
neutron router-gateway-set $ROUTER_ID $EXT_NET_ID
|
||||||
@ -1334,9 +1341,12 @@ function _neutron_configure_router_v4 {
|
|||||||
function _neutron_configure_router_v6 {
|
function _neutron_configure_router_v6 {
|
||||||
neutron router-interface-add $ROUTER_ID $IPV6_SUBNET_ID
|
neutron router-interface-add $ROUTER_ID $IPV6_SUBNET_ID
|
||||||
# Create a public subnet on the external network
|
# Create a public subnet on the external network
|
||||||
local ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID)
|
local ipv6_id_and_ext_gw_ip
|
||||||
local ipv6_ext_gw_ip=$(echo $ipv6_id_and_ext_gw_ip | get_field 2)
|
ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID)
|
||||||
local ipv6_pub_subnet_id=$(echo $ipv6_id_and_ext_gw_ip | get_field 5)
|
local ipv6_ext_gw_ip
|
||||||
|
ipv6_ext_gw_ip=$(echo $ipv6_id_and_ext_gw_ip | get_field 2)
|
||||||
|
local ipv6_pub_subnet_id
|
||||||
|
ipv6_pub_subnet_id=$(echo $ipv6_id_and_ext_gw_ip | get_field 5)
|
||||||
|
|
||||||
# If the external network has not already been set as the default router
|
# If the external network has not already been set as the default router
|
||||||
# gateway when configuring an IPv4 public subnet, do so now
|
# gateway when configuring an IPv4 public subnet, do so now
|
||||||
@ -1354,7 +1364,8 @@ function _neutron_configure_router_v6 {
|
|||||||
die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP"
|
die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP"
|
||||||
|
|
||||||
if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then
|
if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then
|
||||||
local ext_gw_interface=$(_neutron_get_ext_gw_interface)
|
local ext_gw_interface
|
||||||
|
ext_gw_interface=$(_neutron_get_ext_gw_interface)
|
||||||
local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/}
|
local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/}
|
||||||
|
|
||||||
# Configure interface for public bridge
|
# Configure interface for public bridge
|
||||||
|
@ -10,7 +10,8 @@ set +o xtrace
|
|||||||
source $TOP_DIR/lib/neutron_plugins/openvswitch
|
source $TOP_DIR/lib/neutron_plugins/openvswitch
|
||||||
|
|
||||||
function save_function {
|
function save_function {
|
||||||
local ORIG_FUNC=$(declare -f $1)
|
local ORIG_FUNC
|
||||||
|
ORIG_FUNC=$(declare -f $1)
|
||||||
local NEW_FUNC="$2${ORIG_FUNC#$1}"
|
local NEW_FUNC="$2${ORIG_FUNC#$1}"
|
||||||
eval "$NEW_FUNC"
|
eval "$NEW_FUNC"
|
||||||
}
|
}
|
||||||
|
@ -49,8 +49,10 @@ function neutron_ovs_base_cleanup {
|
|||||||
|
|
||||||
function _neutron_ovs_base_install_ubuntu_dkms {
|
function _neutron_ovs_base_install_ubuntu_dkms {
|
||||||
# install Dynamic Kernel Module Support packages if needed
|
# install Dynamic Kernel Module Support packages if needed
|
||||||
local kernel_version=$(uname -r)
|
local kernel_version
|
||||||
local kernel_major_minor=`echo $kernel_version | cut -d. -f1-2`
|
kernel_version=$(uname -r)
|
||||||
|
local kernel_major_minor
|
||||||
|
kernel_major_minor=`echo $kernel_version | cut -d. -f1-2`
|
||||||
# From kernel 3.13 on, openvswitch-datapath-dkms is not needed
|
# From kernel 3.13 on, openvswitch-datapath-dkms is not needed
|
||||||
if [ `vercmp_numbers "$kernel_major_minor" "3.13"` -lt "0" ]; then
|
if [ `vercmp_numbers "$kernel_major_minor" "3.13"` -lt "0" ]; then
|
||||||
install_package "dkms openvswitch-datapath-dkms linux-headers-$kernel_version"
|
install_package "dkms openvswitch-datapath-dkms linux-headers-$kernel_version"
|
||||||
|
15
lib/nova
15
lib/nova
@ -202,14 +202,16 @@ function cleanup_nova {
|
|||||||
clean_iptables
|
clean_iptables
|
||||||
|
|
||||||
# Destroy old instances
|
# Destroy old instances
|
||||||
local instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
|
local instances
|
||||||
|
instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
|
||||||
if [ ! "$instances" = "" ]; then
|
if [ ! "$instances" = "" ]; then
|
||||||
echo $instances | xargs -n1 sudo virsh destroy || true
|
echo $instances | xargs -n1 sudo virsh destroy || true
|
||||||
echo $instances | xargs -n1 sudo virsh undefine --managed-save || true
|
echo $instances | xargs -n1 sudo virsh undefine --managed-save || true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Logout and delete iscsi sessions
|
# Logout and delete iscsi sessions
|
||||||
local tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2)
|
local tgts
|
||||||
|
tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2)
|
||||||
local target
|
local target
|
||||||
for target in $tgts; do
|
for target in $tgts; do
|
||||||
sudo iscsiadm --mode node -T $target --logout || true
|
sudo iscsiadm --mode node -T $target --logout || true
|
||||||
@ -245,8 +247,10 @@ function _cleanup_nova_apache_wsgi {
|
|||||||
function _config_nova_apache_wsgi {
|
function _config_nova_apache_wsgi {
|
||||||
sudo mkdir -p $NOVA_WSGI_DIR
|
sudo mkdir -p $NOVA_WSGI_DIR
|
||||||
|
|
||||||
local nova_apache_conf=$(apache_site_config_for nova-api)
|
local nova_apache_conf
|
||||||
local nova_ec2_apache_conf=$(apache_site_config_for nova-ec2-api)
|
nova_apache_conf=$(apache_site_config_for nova-api)
|
||||||
|
local nova_ec2_apache_conf
|
||||||
|
nova_ec2_apache_conf=$(apache_site_config_for nova-ec2-api)
|
||||||
local nova_ssl=""
|
local nova_ssl=""
|
||||||
local nova_certfile=""
|
local nova_certfile=""
|
||||||
local nova_keyfile=""
|
local nova_keyfile=""
|
||||||
@ -784,7 +788,8 @@ function start_nova_api {
|
|||||||
export PATH=$NOVA_BIN_DIR:$PATH
|
export PATH=$NOVA_BIN_DIR:$PATH
|
||||||
|
|
||||||
# If the site is not enabled then we are in a grenade scenario
|
# If the site is not enabled then we are in a grenade scenario
|
||||||
local enabled_site_file=$(apache_site_config_for nova-api)
|
local enabled_site_file
|
||||||
|
enabled_site_file=$(apache_site_config_for nova-api)
|
||||||
if [ -f ${enabled_site_file} ] && [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
|
if [ -f ${enabled_site_file} ] && [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
|
||||||
enable_apache_site nova-api
|
enable_apache_site nova-api
|
||||||
enable_apache_site nova-ec2-api
|
enable_apache_site nova-ec2-api
|
||||||
|
42
lib/swift
42
lib/swift
@ -205,9 +205,12 @@ function _config_swift_apache_wsgi {
|
|||||||
# copy apache vhost file and set name and port
|
# copy apache vhost file and set name and port
|
||||||
local node_number
|
local node_number
|
||||||
for node_number in ${SWIFT_REPLICAS_SEQ}; do
|
for node_number in ${SWIFT_REPLICAS_SEQ}; do
|
||||||
local object_port=$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))
|
local object_port
|
||||||
local container_port=$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))
|
object_port=$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))
|
||||||
local account_port=$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))
|
local container_port
|
||||||
|
container_port=$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))
|
||||||
|
local account_port
|
||||||
|
account_port=$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))
|
||||||
|
|
||||||
sudo cp ${SWIFT_DIR}/examples/apache2/object-server.template $(apache_site_config_for object-server-${node_number})
|
sudo cp ${SWIFT_DIR}/examples/apache2/object-server.template $(apache_site_config_for object-server-${node_number})
|
||||||
sudo sed -e "
|
sudo sed -e "
|
||||||
@ -504,7 +507,8 @@ EOF
|
|||||||
|
|
||||||
if is_service_enabled keystone; then
|
if is_service_enabled keystone; then
|
||||||
iniuncomment ${testfile} func_test auth_version
|
iniuncomment ${testfile} func_test auth_version
|
||||||
local auth_vers=$(iniget ${testfile} func_test auth_version)
|
local auth_vers
|
||||||
|
auth_vers=$(iniget ${testfile} func_test auth_version)
|
||||||
iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST}
|
iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST}
|
||||||
iniset ${testfile} func_test auth_port ${KEYSTONE_AUTH_PORT}
|
iniset ${testfile} func_test auth_port ${KEYSTONE_AUTH_PORT}
|
||||||
if [[ $auth_vers == "3" ]]; then
|
if [[ $auth_vers == "3" ]]; then
|
||||||
@ -514,7 +518,8 @@ EOF
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local user_group=$(id -g ${STACK_USER})
|
local user_group
|
||||||
|
user_group=$(id -g ${STACK_USER})
|
||||||
sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}
|
sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}
|
||||||
|
|
||||||
local swift_log_dir=${SWIFT_DATA_DIR}/logs
|
local swift_log_dir=${SWIFT_DATA_DIR}/logs
|
||||||
@ -540,7 +545,8 @@ function create_swift_disk {
|
|||||||
# First do a bit of setup by creating the directories and
|
# First do a bit of setup by creating the directories and
|
||||||
# changing the permissions so we can run it as our user.
|
# changing the permissions so we can run it as our user.
|
||||||
|
|
||||||
local user_group=$(id -g ${STACK_USER})
|
local user_group
|
||||||
|
user_group=$(id -g ${STACK_USER})
|
||||||
sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}/{drives,cache,run,logs}
|
sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}/{drives,cache,run,logs}
|
||||||
|
|
||||||
# Create a loopback disk and format it to XFS.
|
# Create a loopback disk and format it to XFS.
|
||||||
@ -607,7 +613,8 @@ function create_swift_accounts {
|
|||||||
|
|
||||||
KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql}
|
KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql}
|
||||||
|
|
||||||
local another_role=$(get_or_create_role "anotherrole")
|
local another_role
|
||||||
|
another_role=$(get_or_create_role "anotherrole")
|
||||||
|
|
||||||
# NOTE(jroll): Swift doesn't need the admin role here, however Ironic uses
|
# NOTE(jroll): Swift doesn't need the admin role here, however Ironic uses
|
||||||
# temp urls, which break when uploaded by a non-admin role
|
# temp urls, which break when uploaded by a non-admin role
|
||||||
@ -623,33 +630,40 @@ function create_swift_accounts {
|
|||||||
"$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s"
|
"$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local swift_tenant_test1=$(get_or_create_project swifttenanttest1 default)
|
local swift_tenant_test1
|
||||||
|
swift_tenant_test1=$(get_or_create_project swifttenanttest1 default)
|
||||||
die_if_not_set $LINENO swift_tenant_test1 "Failure creating swift_tenant_test1"
|
die_if_not_set $LINENO swift_tenant_test1 "Failure creating swift_tenant_test1"
|
||||||
SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $swiftusertest1_password \
|
SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $swiftusertest1_password \
|
||||||
"default" "test@example.com")
|
"default" "test@example.com")
|
||||||
die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1"
|
die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1"
|
||||||
get_or_add_user_project_role admin $SWIFT_USER_TEST1 $swift_tenant_test1
|
get_or_add_user_project_role admin $SWIFT_USER_TEST1 $swift_tenant_test1
|
||||||
|
|
||||||
local swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password \
|
local swift_user_test3
|
||||||
|
swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password \
|
||||||
"default" "test3@example.com")
|
"default" "test3@example.com")
|
||||||
die_if_not_set $LINENO swift_user_test3 "Failure creating swift_user_test3"
|
die_if_not_set $LINENO swift_user_test3 "Failure creating swift_user_test3"
|
||||||
get_or_add_user_project_role $another_role $swift_user_test3 $swift_tenant_test1
|
get_or_add_user_project_role $another_role $swift_user_test3 $swift_tenant_test1
|
||||||
|
|
||||||
local swift_tenant_test2=$(get_or_create_project swifttenanttest2 default)
|
local swift_tenant_test2
|
||||||
|
swift_tenant_test2=$(get_or_create_project swifttenanttest2 default)
|
||||||
die_if_not_set $LINENO swift_tenant_test2 "Failure creating swift_tenant_test2"
|
die_if_not_set $LINENO swift_tenant_test2 "Failure creating swift_tenant_test2"
|
||||||
|
|
||||||
local swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password \
|
local swift_user_test2
|
||||||
|
swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password \
|
||||||
"default" "test2@example.com")
|
"default" "test2@example.com")
|
||||||
die_if_not_set $LINENO swift_user_test2 "Failure creating swift_user_test2"
|
die_if_not_set $LINENO swift_user_test2 "Failure creating swift_user_test2"
|
||||||
get_or_add_user_project_role admin $swift_user_test2 $swift_tenant_test2
|
get_or_add_user_project_role admin $swift_user_test2 $swift_tenant_test2
|
||||||
|
|
||||||
local swift_domain=$(get_or_create_domain swift_test 'Used for swift functional testing')
|
local swift_domain
|
||||||
|
swift_domain=$(get_or_create_domain swift_test 'Used for swift functional testing')
|
||||||
die_if_not_set $LINENO swift_domain "Failure creating swift_test domain"
|
die_if_not_set $LINENO swift_domain "Failure creating swift_test domain"
|
||||||
|
|
||||||
local swift_tenant_test4=$(get_or_create_project swifttenanttest4 $swift_domain)
|
local swift_tenant_test4
|
||||||
|
swift_tenant_test4=$(get_or_create_project swifttenanttest4 $swift_domain)
|
||||||
die_if_not_set $LINENO swift_tenant_test4 "Failure creating swift_tenant_test4"
|
die_if_not_set $LINENO swift_tenant_test4 "Failure creating swift_tenant_test4"
|
||||||
|
|
||||||
local swift_user_test4=$(get_or_create_user swiftusertest4 $swiftusertest4_password \
|
local swift_user_test4
|
||||||
|
swift_user_test4=$(get_or_create_user swiftusertest4 $swiftusertest4_password \
|
||||||
$swift_domain "test4@example.com")
|
$swift_domain "test4@example.com")
|
||||||
die_if_not_set $LINENO swift_user_test4 "Failure creating swift_user_test4"
|
die_if_not_set $LINENO swift_user_test4 "Failure creating swift_user_test4"
|
||||||
get_or_add_user_project_role admin $swift_user_test4 $swift_tenant_test4
|
get_or_add_user_project_role admin $swift_user_test4 $swift_tenant_test4
|
||||||
|
@ -363,7 +363,8 @@ function configure_tempest {
|
|||||||
# Compute Features
|
# Compute Features
|
||||||
# Run ``verify_tempest_config -ur`` to retrieve enabled extensions on API endpoints
|
# Run ``verify_tempest_config -ur`` to retrieve enabled extensions on API endpoints
|
||||||
# NOTE(mtreinish): This must be done after auth settings are added to the tempest config
|
# NOTE(mtreinish): This must be done after auth settings are added to the tempest config
|
||||||
local tmp_cfg_file=$(mktemp)
|
local tmp_cfg_file
|
||||||
|
tmp_cfg_file=$(mktemp)
|
||||||
cd $TEMPEST_DIR
|
cd $TEMPEST_DIR
|
||||||
tox -revenv -- verify-tempest-config -uro $tmp_cfg_file
|
tox -revenv -- verify-tempest-config -uro $tmp_cfg_file
|
||||||
|
|
||||||
|
3
lib/tls
3
lib/tls
@ -346,7 +346,8 @@ function make_root_CA {
|
|||||||
# we need to change it.
|
# we need to change it.
|
||||||
function fix_system_ca_bundle_path {
|
function fix_system_ca_bundle_path {
|
||||||
if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then
|
if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then
|
||||||
local capath=$(python -c $'try:\n from requests import certs\n print certs.where()\nexcept ImportError: pass')
|
local capath
|
||||||
|
capath=$(python -c $'try:\n from requests import certs\n print certs.where()\nexcept ImportError: pass')
|
||||||
|
|
||||||
if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then
|
if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then
|
||||||
if is_fedora; then
|
if is_fedora; then
|
||||||
|
@ -20,8 +20,10 @@ FAILED_FUNCS=""
|
|||||||
# pass a test, printing out MSG
|
# pass a test, printing out MSG
|
||||||
# usage: passed message
|
# usage: passed message
|
||||||
function passed {
|
function passed {
|
||||||
local lineno=$(caller 0 | awk '{print $1}')
|
local lineno
|
||||||
local function=$(caller 0 | awk '{print $2}')
|
lineno=$(caller 0 | awk '{print $1}')
|
||||||
|
local function
|
||||||
|
function=$(caller 0 | awk '{print $2}')
|
||||||
local msg="$1"
|
local msg="$1"
|
||||||
if [ -z "$msg" ]; then
|
if [ -z "$msg" ]; then
|
||||||
msg="OK"
|
msg="OK"
|
||||||
@ -33,8 +35,10 @@ function passed {
|
|||||||
# fail a test, printing out MSG
|
# fail a test, printing out MSG
|
||||||
# usage: failed message
|
# usage: failed message
|
||||||
function failed {
|
function failed {
|
||||||
local lineno=$(caller 0 | awk '{print $1}')
|
local lineno
|
||||||
local function=$(caller 0 | awk '{print $2}')
|
lineno=$(caller 0 | awk '{print $1}')
|
||||||
|
local function
|
||||||
|
function=$(caller 0 | awk '{print $2}')
|
||||||
local msg="$1"
|
local msg="$1"
|
||||||
FAILED_FUNCS+="$function:L$lineno\n"
|
FAILED_FUNCS+="$function:L$lineno\n"
|
||||||
echo "ERROR: $function:L$lineno!"
|
echo "ERROR: $function:L$lineno!"
|
||||||
@ -45,8 +49,10 @@ function failed {
|
|||||||
# assert string comparision of val1 equal val2, printing out msg
|
# assert string comparision of val1 equal val2, printing out msg
|
||||||
# usage: assert_equal val1 val2 msg
|
# usage: assert_equal val1 val2 msg
|
||||||
function assert_equal {
|
function assert_equal {
|
||||||
local lineno=`caller 0 | awk '{print $1}'`
|
local lineno
|
||||||
local function=`caller 0 | awk '{print $2}'`
|
lineno=`caller 0 | awk '{print $1}'`
|
||||||
|
local function
|
||||||
|
function=`caller 0 | awk '{print $2}'`
|
||||||
local msg=$3
|
local msg=$3
|
||||||
|
|
||||||
if [ -z "$msg" ]; then
|
if [ -z "$msg" ]; then
|
||||||
@ -66,8 +72,10 @@ function assert_equal {
|
|||||||
# assert variable is empty/blank, printing out msg
|
# assert variable is empty/blank, printing out msg
|
||||||
# usage: assert_empty VAR msg
|
# usage: assert_empty VAR msg
|
||||||
function assert_empty {
|
function assert_empty {
|
||||||
local lineno=`caller 0 | awk '{print $1}'`
|
local lineno
|
||||||
local function=`caller 0 | awk '{print $2}'`
|
lineno=`caller 0 | awk '{print $1}'`
|
||||||
|
local function
|
||||||
|
function=`caller 0 | awk '{print $2}'`
|
||||||
local msg=$2
|
local msg=$2
|
||||||
|
|
||||||
if [ -z "$msg" ]; then
|
if [ -z "$msg" ]; then
|
||||||
|
@ -190,7 +190,8 @@ function add_entry {
|
|||||||
local user_passwd=$5
|
local user_passwd=$5
|
||||||
|
|
||||||
# The admin user can see all user's secret AWS keys, it does not looks good
|
# The admin user can see all user's secret AWS keys, it does not looks good
|
||||||
local line=`openstack ec2 credentials list --user $user_id | grep " $project_id "`
|
local line
|
||||||
|
line=$(openstack ec2 credentials list --user $user_id | grep " $project_id " || true)
|
||||||
if [ -z "$line" ]; then
|
if [ -z "$line" ]; then
|
||||||
openstack ec2 credentials create --user $user_id --project $project_id 1>&2
|
openstack ec2 credentials create --user $user_id --project $project_id 1>&2
|
||||||
line=`openstack ec2 credentials list --user $user_id | grep " $project_id "`
|
line=`openstack ec2 credentials list --user $user_id | grep " $project_id "`
|
||||||
|
@ -41,10 +41,12 @@ function get_mem_available {
|
|||||||
# snapshot of current usage; i.e. checking the latest entry in the
|
# snapshot of current usage; i.e. checking the latest entry in the
|
||||||
# file will give the peak-memory usage
|
# file will give the peak-memory usage
|
||||||
function tracker {
|
function tracker {
|
||||||
local low_point=$(get_mem_available)
|
local low_point
|
||||||
|
low_point=$(get_mem_available)
|
||||||
while [ 1 ]; do
|
while [ 1 ]; do
|
||||||
|
|
||||||
local mem_available=$(get_mem_available)
|
local mem_available
|
||||||
|
mem_available=$(get_mem_available)
|
||||||
|
|
||||||
if [[ $mem_available -lt $low_point ]]; then
|
if [[ $mem_available -lt $low_point ]]; then
|
||||||
low_point=$mem_available
|
low_point=$mem_available
|
||||||
|
@ -100,7 +100,8 @@ create_vif()
|
|||||||
{
|
{
|
||||||
local v="$1"
|
local v="$1"
|
||||||
echo "Installing VM interface on [$BRIDGE]"
|
echo "Installing VM interface on [$BRIDGE]"
|
||||||
local out_network_uuid=$(find_network "$BRIDGE")
|
local out_network_uuid
|
||||||
|
out_network_uuid=$(find_network "$BRIDGE")
|
||||||
xe vif-create vm-uuid="$v" network-uuid="$out_network_uuid" device="0"
|
xe vif-create vm-uuid="$v" network-uuid="$out_network_uuid" device="0"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,9 +35,12 @@ xe_min()
|
|||||||
destroy_vdi()
|
destroy_vdi()
|
||||||
{
|
{
|
||||||
local vbd_uuid="$1"
|
local vbd_uuid="$1"
|
||||||
local type=$(xe_min vbd-list uuid=$vbd_uuid params=type)
|
local type
|
||||||
local dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice)
|
type=$(xe_min vbd-list uuid=$vbd_uuid params=type)
|
||||||
local vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid)
|
local dev
|
||||||
|
dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice)
|
||||||
|
local vdi_uuid
|
||||||
|
vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid)
|
||||||
|
|
||||||
if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then
|
if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then
|
||||||
xe vdi-destroy uuid=$vdi_uuid
|
xe vdi-destroy uuid=$vdi_uuid
|
||||||
@ -47,7 +50,8 @@ destroy_vdi()
|
|||||||
uninstall()
|
uninstall()
|
||||||
{
|
{
|
||||||
local vm_uuid="$1"
|
local vm_uuid="$1"
|
||||||
local power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state)
|
local power_state
|
||||||
|
power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state)
|
||||||
|
|
||||||
if [ "$power_state" != "halted" ]; then
|
if [ "$power_state" != "halted" ]; then
|
||||||
xe vm-shutdown vm=$vm_uuid force=true
|
xe vm-shutdown vm=$vm_uuid force=true
|
||||||
|
@ -165,7 +165,8 @@ EOF
|
|||||||
function test_get_local_sr {
|
function test_get_local_sr {
|
||||||
setup_xe_response "uuid123"
|
setup_xe_response "uuid123"
|
||||||
|
|
||||||
local RESULT=$(. mocks && get_local_sr)
|
local RESULT
|
||||||
|
RESULT=$(. mocks && get_local_sr)
|
||||||
|
|
||||||
[ "$RESULT" == "uuid123" ]
|
[ "$RESULT" == "uuid123" ]
|
||||||
|
|
||||||
@ -173,7 +174,8 @@ function test_get_local_sr {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function test_get_local_sr_path {
|
function test_get_local_sr_path {
|
||||||
local RESULT=$(mock_out get_local_sr "uuid1" && get_local_sr_path)
|
local RESULT
|
||||||
|
RESULT=$(mock_out get_local_sr "uuid1" && get_local_sr_path)
|
||||||
|
|
||||||
[ "/var/run/sr-mount/uuid1" == "$RESULT" ]
|
[ "/var/run/sr-mount/uuid1" == "$RESULT" ]
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user