Mostly docs cleanups

Fix documentation build errors and RST formatting

Change-Id: Id93153400c5b069dd9d772381558c7085f64c207
This commit is contained in:
Dean Troyer 2015-03-28 08:20:50 -05:00
parent 9720239618
commit dc97cb71e8
35 changed files with 284 additions and 282 deletions

View File

@ -2,7 +2,7 @@
# **exercise.sh** # **exercise.sh**
# Keep track of the current devstack directory. # Keep track of the current DevStack directory.
TOP_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=$(cd $(dirname "$0") && pwd)
# Import common functions # Import common functions
@ -14,11 +14,11 @@ source $TOP_DIR/stackrc
# Run everything in the exercises/ directory that isn't explicitly disabled # Run everything in the exercises/ directory that isn't explicitly disabled
# comma separated list of script basenames to skip # comma separated list of script basenames to skip
# to refrain from exercising euca.sh use SKIP_EXERCISES=euca # to refrain from exercising euca.sh use ``SKIP_EXERCISES=euca``
SKIP_EXERCISES=${SKIP_EXERCISES:-""} SKIP_EXERCISES=${SKIP_EXERCISES:-""}
# comma separated list of script basenames to run # comma separated list of script basenames to run
# to run only euca.sh use RUN_EXERCISES=euca # to run only euca.sh use ``RUN_EXERCISES=euca``
basenames=${RUN_EXERCISES:-""} basenames=${RUN_EXERCISES:-""}
EXERCISE_DIR=$TOP_DIR/exercises EXERCISE_DIR=$TOP_DIR/exercises
@ -27,7 +27,7 @@ if [[ -z "${basenames}" ]]; then
# Locate the scripts we should run # Locate the scripts we should run
basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done) basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done)
else else
# If RUN_EXERCISES was specified, ignore SKIP_EXERCISES. # If ``RUN_EXERCISES`` was specified, ignore ``SKIP_EXERCISES``.
SKIP_EXERCISES= SKIP_EXERCISES=
fi fi
@ -56,7 +56,7 @@ for script in $basenames; do
fi fi
done done
# output status of exercise run # Output status of exercise run
echo "=====================================================================" echo "====================================================================="
for script in $skips; do for script in $skips; do
echo SKIP $script echo SKIP $script

View File

@ -439,7 +439,7 @@ function check_path_perm_sanity {
echo "*** DEST path element" echo "*** DEST path element"
echo "*** ${rebuilt_path}" echo "*** ${rebuilt_path}"
echo "*** appears to have 0700 permissions." echo "*** appears to have 0700 permissions."
echo "*** This is very likely to cause fatal issues for devstack daemons." echo "*** This is very likely to cause fatal issues for DevStack daemons."
if [[ -n "$SKIP_PATH_SANITY" ]]; then if [[ -n "$SKIP_PATH_SANITY" ]]; then
return return
@ -526,8 +526,8 @@ function setup_colorized_logging {
} }
# These functions are provided for basic fall-back functionality for # These functions are provided for basic fall-back functionality for
# projects that include parts of devstack (grenade). stack.sh will # projects that include parts of DevStack (Grenade). stack.sh will
# override these with more specific versions for devstack (with fancy # override these with more specific versions for DevStack (with fancy
# spinners, etc). We never override an existing version # spinners, etc). We never override an existing version
if ! function_exists echo_summary; then if ! function_exists echo_summary; then
function echo_summary { function echo_summary {

View File

@ -971,7 +971,7 @@ function get_packages {
# #
# Only packages required for enabled and collected plugins will included. # Only packages required for enabled and collected plugins will included.
# #
# The same metadata used in the main devstack prerequisite files may be used # The same metadata used in the main DevStack prerequisite files may be used
# in these prerequisite files, see get_packages() for more info. # in these prerequisite files, see get_packages() for more info.
function get_plugin_packages { function get_plugin_packages {
local xtrace=$(set +o | grep xtrace) local xtrace=$(set +o | grep xtrace)
@ -1471,7 +1471,7 @@ function fetch_plugins {
return return
fi fi
echo "Fetching devstack plugins" echo "Fetching DevStack plugins"
for plugin in ${plugins//,/ }; do for plugin in ${plugins//,/ }; do
git_clone_by_name $plugin git_clone_by_name $plugin
done done

View File

@ -4,7 +4,7 @@
# #
# Note: this is expected to start running as jenkins # Note: this is expected to start running as jenkins
# Step 1: give back sudoers permissions to devstack # Step 1: give back sudoers permissions to DevStack
TEMPFILE=`mktemp` TEMPFILE=`mktemp`
echo "stack ALL=(root) NOPASSWD:ALL" >$TEMPFILE echo "stack ALL=(root) NOPASSWD:ALL" >$TEMPFILE
chmod 0440 $TEMPFILE chmod 0440 $TEMPFILE

View File

@ -4,7 +4,7 @@
# Install and start **Ceilometer** service # Install and start **Ceilometer** service
# To enable a minimal set of Ceilometer services, add the following to the # To enable a minimal set of Ceilometer services, add the following to the
# localrc section of local.conf: # ``localrc`` section of ``local.conf``:
# #
# enable_service ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api # enable_service ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api
# #
@ -17,14 +17,11 @@
# of Ceilometer (see within for additional settings): # of Ceilometer (see within for additional settings):
# #
# CEILOMETER_USE_MOD_WSGI: When True, run the api under mod_wsgi. # CEILOMETER_USE_MOD_WSGI: When True, run the api under mod_wsgi.
# CEILOMETER_PIPELINE_INTERVAL: The number of seconds between pipeline processing # CEILOMETER_PIPELINE_INTERVAL: Seconds between pipeline processing runs. Default 600.
# runs. Default 600. # CEILOMETER_BACKEND: Database backend (e.g. 'mysql', 'mongodb', 'es')
# CEILOMETER_BACKEND: The database backend (e.g. 'mysql', 'mongodb', 'es') # CEILOMETER_COORDINATION_URL: URL for group membership service provided by tooz.
# CEILOMETER_COORDINATION_URL: The URL for a group membership service provided
# by tooz.
# CEILOMETER_EVENTS: Enable event collection # CEILOMETER_EVENTS: Enable event collection
# Dependencies: # Dependencies:
# #
# - functions # - functions
@ -94,7 +91,7 @@ function is_ceilometer_enabled {
return 1 return 1
} }
# create_ceilometer_accounts() - Set up common required ceilometer accounts # create_ceilometer_accounts() - Set up common required Ceilometer accounts
# #
# Project User Roles # Project User Roles
# ------------------------------------------------------------------ # ------------------------------------------------------------------
@ -117,14 +114,14 @@ function create_ceilometer_accounts {
"$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/"
fi fi
if is_service_enabled swift; then if is_service_enabled swift; then
# Ceilometer needs ResellerAdmin role to access swift account stats. # Ceilometer needs ResellerAdmin role to access Swift account stats.
get_or_add_user_project_role "ResellerAdmin" "ceilometer" $SERVICE_TENANT_NAME get_or_add_user_project_role "ResellerAdmin" "ceilometer" $SERVICE_TENANT_NAME
fi fi
fi fi
} }
# _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file # _cleanup_keystone_apache_wsgi() - Remove WSGI files, disable and remove Apache vhost file
function _cleanup_ceilometer_apache_wsgi { function _cleanup_ceilometer_apache_wsgi {
sudo rm -f $CEILOMETER_WSGI_DIR/* sudo rm -f $CEILOMETER_WSGI_DIR/*
sudo rm -f $(apache_site_config_for ceilometer) sudo rm -f $(apache_site_config_for ceilometer)
@ -149,7 +146,7 @@ function _config_ceilometer_apache_wsgi {
local ceilometer_apache_conf=$(apache_site_config_for ceilometer) local ceilometer_apache_conf=$(apache_site_config_for ceilometer)
local apache_version=$(get_apache_version) local apache_version=$(get_apache_version)
# copy proxy vhost and wsgi file # Copy proxy vhost and wsgi file
sudo cp $CEILOMETER_DIR/ceilometer/api/app.wsgi $CEILOMETER_WSGI_DIR/app sudo cp $CEILOMETER_DIR/ceilometer/api/app.wsgi $CEILOMETER_WSGI_DIR/app
sudo cp $FILES/apache-ceilometer.template $ceilometer_apache_conf sudo cp $FILES/apache-ceilometer.template $ceilometer_apache_conf
@ -189,9 +186,9 @@ function configure_ceilometer {
sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml
fi fi
# the compute and central agents need these credentials in order to # The compute and central agents need these credentials in order to
# call out to other services' public APIs # call out to other services' public APIs.
# the alarm evaluator needs these options to call ceilometer APIs # The alarm evaluator needs these options to call ceilometer APIs
iniset $CEILOMETER_CONF service_credentials os_username ceilometer iniset $CEILOMETER_CONF service_credentials os_username ceilometer
iniset $CEILOMETER_CONF service_credentials os_password $SERVICE_PASSWORD iniset $CEILOMETER_CONF service_credentials os_password $SERVICE_PASSWORD
iniset $CEILOMETER_CONF service_credentials os_tenant_name $SERVICE_TENANT_NAME iniset $CEILOMETER_CONF service_credentials os_tenant_name $SERVICE_TENANT_NAME
@ -237,7 +234,7 @@ function configure_ceilometer {
} }
function configure_mongodb { function configure_mongodb {
# server package is the same on all # Server package is the same on all
local packages=mongodb-server local packages=mongodb-server
if is_fedora; then if is_fedora; then
@ -250,13 +247,13 @@ function configure_mongodb {
install_package ${packages} install_package ${packages}
if is_fedora; then if is_fedora; then
# ensure smallfiles selected to minimize freespace requirements # Ensure smallfiles is selected to minimize freespace requirements
sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod
restart_service mongod restart_service mongod
fi fi
# give mongodb time to start-up # Give mongodb time to start-up
sleep 5 sleep 5
} }
@ -347,7 +344,7 @@ function start_ceilometer {
run_process ceilometer-acompute "ceilometer-agent-compute --config-file $CEILOMETER_CONF" run_process ceilometer-acompute "ceilometer-agent-compute --config-file $CEILOMETER_CONF"
fi fi
# only die on API if it was actually intended to be turned on # Only die on API if it was actually intended to be turned on
if is_service_enabled ceilometer-api; then if is_service_enabled ceilometer-api; then
echo "Waiting for ceilometer-api to start..." echo "Waiting for ceilometer-api to start..."
if ! wait_for_service $SERVICE_TIMEOUT $CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/v2/; then if ! wait_for_service $SERVICE_TIMEOUT $CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/v2/; then

View File

@ -41,7 +41,7 @@ function start_dstat {
# stop_dstat() stop dstat process # stop_dstat() stop dstat process
function stop_dstat { function stop_dstat {
# dstat runs as a console, not as a service, and isn't trackable # dstat runs as a console, not as a service, and isn't trackable
# via the normal mechanisms for devstack. So lets just do a # via the normal mechanisms for DevStack. So lets just do a
# killall and move on. # killall and move on.
killall dstat || /bin/true killall dstat || /bin/true
} }

View File

@ -129,7 +129,7 @@ function init_horizon {
fi fi
enable_apache_site horizon enable_apache_site horizon
# Remove old log files that could mess with how devstack detects whether Horizon # Remove old log files that could mess with how DevStack detects whether Horizon
# has been successfully started (see start_horizon() and functions::screen_it()) # has been successfully started (see start_horizon() and functions::screen_it())
# and run_process # and run_process
sudo rm -f /var/log/$APACHE_NAME/horizon_* sudo rm -f /var/log/$APACHE_NAME/horizon_*

View File

@ -53,7 +53,7 @@ IRONIC_HW_EPHEMERAL_DISK=${IRONIC_HW_EPHEMERAL_DISK:-0}
# The file is composed of multiple lines, each line includes four field # The file is composed of multiple lines, each line includes four field
# separated by white space: IPMI address, MAC address, IPMI username # separated by white space: IPMI address, MAC address, IPMI username
# and IPMI password. # and IPMI password.
# An example: #
# 192.168.110.107 00:1e:67:57:50:4c root otc123 # 192.168.110.107 00:1e:67:57:50:4c root otc123
IRONIC_IPMIINFO_FILE=${IRONIC_IPMIINFO_FILE:-$IRONIC_DATA_DIR/hardware_info} IRONIC_IPMIINFO_FILE=${IRONIC_IPMIINFO_FILE:-$IRONIC_DATA_DIR/hardware_info}
@ -99,10 +99,10 @@ IRONIC_AGENT_KERNEL_URL=${IRONIC_AGENT_KERNEL_URL:-http://tarballs.openstack.org
IRONIC_AGENT_RAMDISK_URL=${IRONIC_AGENT_RAMDISK_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe_image-oem.cpio.gz} IRONIC_AGENT_RAMDISK_URL=${IRONIC_AGENT_RAMDISK_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe_image-oem.cpio.gz}
# Which deploy driver to use - valid choices right now # Which deploy driver to use - valid choices right now
# are 'pxe_ssh', 'pxe_ipmitool', 'agent_ssh' and 'agent_ipmitool'. # are ``pxe_ssh``, ``pxe_ipmitool``, ``agent_ssh`` and ``agent_ipmitool``.
IRONIC_DEPLOY_DRIVER=${IRONIC_DEPLOY_DRIVER:-pxe_ssh} IRONIC_DEPLOY_DRIVER=${IRONIC_DEPLOY_DRIVER:-pxe_ssh}
#TODO(agordeev): replace 'ubuntu' with host distro name getting # TODO(agordeev): replace 'ubuntu' with host distro name getting
IRONIC_DEPLOY_FLAVOR=${IRONIC_DEPLOY_FLAVOR:-ubuntu $IRONIC_DEPLOY_ELEMENT} IRONIC_DEPLOY_FLAVOR=${IRONIC_DEPLOY_FLAVOR:-ubuntu $IRONIC_DEPLOY_ELEMENT}
# Support entry points installation of console scripts # Support entry points installation of console scripts

View File

@ -1,3 +1,5 @@
#!/bin/bash
#
# lib/lvm # lib/lvm
# Configure the default LVM volume group used by Cinder and Nova # Configure the default LVM volume group used by Cinder and Nova
@ -32,8 +34,8 @@ DEFAULT_VOLUME_GROUP_NAME=$VOLUME_GROUP_NAME-default
BACKING_FILE_SUFFIX=-backing-file BACKING_FILE_SUFFIX=-backing-file
# Entry Points # Functions
# ------------ # ---------
# _clean_lvm_volume_group removes all default LVM volumes # _clean_lvm_volume_group removes all default LVM volumes
# #
@ -52,7 +54,7 @@ function _clean_lvm_volume_group {
function _clean_lvm_backing_file { function _clean_lvm_backing_file {
local backing_file=$1 local backing_file=$1
# if the backing physical device is a loop device, it was probably setup by devstack # If the backing physical device is a loop device, it was probably setup by DevStack
if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
local vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}') local vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}')
sudo losetup -d $vg_dev sudo losetup -d $vg_dev

View File

@ -57,6 +57,7 @@ NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini}
# NOVA_API_VERSION valid options # NOVA_API_VERSION valid options
# - default - setup API end points as nova does out of the box # - default - setup API end points as nova does out of the box
# - v21default - make v21 the default on /v2 # - v21default - make v21 the default on /v2
#
# NOTE(sdague): this is for transitional testing of the Nova v21 API. # NOTE(sdague): this is for transitional testing of the Nova v21 API.
# Expect to remove in L or M. # Expect to remove in L or M.
NOVA_API_VERSION=${NOVA_API_VERSION-default} NOVA_API_VERSION=${NOVA_API_VERSION-default}
@ -77,7 +78,7 @@ EC2_SERVICE_PORT=${EC2_SERVICE_PORT:-8773}
EC2_SERVICE_PORT_INT=${EC2_SERVICE_PORT_INT:-18773} EC2_SERVICE_PORT_INT=${EC2_SERVICE_PORT_INT:-18773}
# Option to enable/disable config drive # Option to enable/disable config drive
# NOTE: Set FORCE_CONFIG_DRIVE="False" to turn OFF config drive # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive
FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"True"} FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"True"}
# Nova supports pluggable schedulers. The default ``FilterScheduler`` # Nova supports pluggable schedulers. The default ``FilterScheduler``
@ -89,11 +90,11 @@ QEMU_CONF=/etc/libvirt/qemu.conf
# Set default defaults here as some hypervisor drivers override these # Set default defaults here as some hypervisor drivers override these
PUBLIC_INTERFACE_DEFAULT=br100 PUBLIC_INTERFACE_DEFAULT=br100
FLAT_NETWORK_BRIDGE_DEFAULT=br100 FLAT_NETWORK_BRIDGE_DEFAULT=br100
# set the GUEST_INTERFACE_DEFAULT to some interface on the box so that # Set ``GUEST_INTERFACE_DEFAULT`` to some interface on the box so that
# the default isn't completely crazy. This will match eth*, em*, or # the default isn't completely crazy. This will match ``eth*``, ``em*``, or
# the new p* interfaces, then basically picks the first # the new ``p*`` interfaces, then basically picks the first
# alphabetically. It's probably wrong, however it's less wrong than # alphabetically. It's probably wrong, however it's less wrong than
# always using 'eth0' which doesn't exist on new Linux distros at all. # always using ``eth0`` which doesn't exist on new Linux distros at all.
GUEST_INTERFACE_DEFAULT=$(ip link \ GUEST_INTERFACE_DEFAULT=$(ip link \
| grep 'state UP' \ | grep 'state UP' \
| awk '{print $2}' \ | awk '{print $2}' \
@ -101,8 +102,8 @@ GUEST_INTERFACE_DEFAULT=$(ip link \
| grep ^[ep] \ | grep ^[ep] \
| head -1) | head -1)
# $NOVA_VNC_ENABLED can be used to forcibly enable vnc configuration. # ``NOVA_VNC_ENABLED`` can be used to forcibly enable VNC configuration.
# In multi-node setups allows compute hosts to not run n-novnc. # In multi-node setups allows compute hosts to not run ``n-novnc``.
NOVA_VNC_ENABLED=$(trueorfalse False NOVA_VNC_ENABLED) NOVA_VNC_ENABLED=$(trueorfalse False NOVA_VNC_ENABLED)
# Get hypervisor configuration # Get hypervisor configuration
@ -144,7 +145,7 @@ FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
# running the VM - removing a SPOF and bandwidth bottleneck. # running the VM - removing a SPOF and bandwidth bottleneck.
MULTI_HOST=$(trueorfalse False MULTI_HOST) MULTI_HOST=$(trueorfalse False MULTI_HOST)
# ``NOVA_ALLOW_MOVE_TO_SAME_HOST` can be set to False in multi node devstack, # ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack,
# where there are at least two nova-computes. # where there are at least two nova-computes.
NOVA_ALLOW_MOVE_TO_SAME_HOST=$(trueorfalse True NOVA_ALLOW_MOVE_TO_SAME_HOST) NOVA_ALLOW_MOVE_TO_SAME_HOST=$(trueorfalse True NOVA_ALLOW_MOVE_TO_SAME_HOST)

View File

@ -2,7 +2,7 @@
# #
# lib/oslo # lib/oslo
# #
# Functions to install oslo libraries from git # Functions to install **Oslo** libraries from git
# #
# We need this to handle the fact that projects would like to use # We need this to handle the fact that projects would like to use
# pre-released versions of oslo libraries. # pre-released versions of oslo libraries.
@ -46,8 +46,9 @@ GITDIR["tooz"]=$DEST/tooz
# Support entry points installation of console scripts # Support entry points installation of console scripts
OSLO_BIN_DIR=$(get_python_exec_prefix) OSLO_BIN_DIR=$(get_python_exec_prefix)
# Entry Points
# ------------ # Functions
# ---------
function _do_install_oslo_lib { function _do_install_oslo_lib {
local name=$1 local name=$1

View File

@ -1,8 +1,7 @@
#!/bin/bash #!/bin/bash
# #
# lib/rpc_backend # lib/rpc_backend
# Interface for interactig with different rpc backend # Interface for interactig with different RPC backends
# rpc backend settings
# Dependencies: # Dependencies:
# #
@ -27,10 +26,10 @@ RPC_MESSAGING_PROTOCOL=${RPC_MESSAGING_PROTOCOL:-0.9}
# messaging server as a service, which it really isn't for multi host # messaging server as a service, which it really isn't for multi host
QPID_HOST=${QPID_HOST:-} QPID_HOST=${QPID_HOST:-}
# Functions # Functions
# --------- # ---------
# Make sure we only have one rpc backend enabled. # Make sure we only have one rpc backend enabled.
# Also check the specified rpc backend is available on your platform. # Also check the specified rpc backend is available on your platform.
function check_rpc_backend { function check_rpc_backend {

View File

@ -2,15 +2,18 @@
# #
# lib/stack # lib/stack
# #
# These functions are code snippets pulled out of stack.sh for easier # These functions are code snippets pulled out of ``stack.sh`` for easier
# re-use by Grenade. They can assume the same environment is available # re-use by Grenade. They can assume the same environment is available
# as in the lower part of stack.sh, namely a valid stackrc has been sourced # as in the lower part of ``stack.sh``, namely a valid stackrc has been sourced
# as well as all of the lib/* files for the services have been sourced. # as well as all of the ``lib/*`` files for the services have been sourced.
# #
# For clarity, all functions declared here that came from ``stack.sh`` # For clarity, all functions declared here that came from ``stack.sh``
# shall be named with the prefix ``stack_``. # shall be named with the prefix ``stack_``.
# Functions
# ---------
# Generic service install handles venv creation if confgured for service # Generic service install handles venv creation if confgured for service
# stack_install_service service # stack_install_service service
function stack_install_service { function stack_install_service {

View File

@ -38,7 +38,6 @@ fi
# Set up default directories # Set up default directories
GITDIR["python-swiftclient"]=$DEST/python-swiftclient GITDIR["python-swiftclient"]=$DEST/python-swiftclient
SWIFT_DIR=$DEST/swift SWIFT_DIR=$DEST/swift
SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift}
SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift} SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift}
@ -59,7 +58,7 @@ SWIFT_DISK_IMAGE=${SWIFT_DATA_DIR}/drives/images/swift.img
SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-/etc/swift} SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-/etc/swift}
if is_service_enabled s-proxy && is_service_enabled swift3; then if is_service_enabled s-proxy && is_service_enabled swift3; then
# If we are using swift3, we can default the s3 port to swift instead # If we are using ``swift3``, we can default the S3 port to swift instead
# of nova-objectstore # of nova-objectstore
S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080} S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080}
fi fi
@ -137,11 +136,12 @@ ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012}
SWIFT_ENABLE_TEMPURLS=${SWIFT_ENABLE_TEMPURLS:-False} SWIFT_ENABLE_TEMPURLS=${SWIFT_ENABLE_TEMPURLS:-False}
SWIFT_TEMPURL_KEY=${SWIFT_TEMPURL_KEY:-} SWIFT_TEMPURL_KEY=${SWIFT_TEMPURL_KEY:-}
# Toggle for deploying Swift under HTTPD + mod_wsgi
SWIFT_USE_MOD_WSGI=${SWIFT_USE_MOD_WSGI:-False}
# Tell Tempest this project is present # Tell Tempest this project is present
TEMPEST_SERVICES+=,swift TEMPEST_SERVICES+=,swift
# Toggle for deploying Swift under HTTPD + mod_wsgi
SWIFT_USE_MOD_WSGI=${SWIFT_USE_MOD_WSGI:-False}
# Functions # Functions
# --------- # ---------
@ -303,7 +303,6 @@ function generate_swift_config_services {
sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config}
} }
# configure_swift() - Set config files, create data dirs and loop image # configure_swift() - Set config files, create data dirs and loop image
function configure_swift { function configure_swift {
local swift_pipeline="${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH}" local swift_pipeline="${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH}"
@ -374,12 +373,9 @@ function configure_swift {
iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT key_file "$SWIFT_SSL_KEY" iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT key_file "$SWIFT_SSL_KEY"
fi fi
# Devstack is commonly run in a small slow environment, so bump the # DevStack is commonly run in a small slow environment, so bump the timeouts up.
# timeouts up. # ``node_timeout`` is the node read operation response time to the proxy server
# node_timeout is how long between read operations a node takes to # ``conn_timeout`` is how long it takes a connect() system call to return
# respond to the proxy server
# conn_timeout is all about how long it takes a connect() system call to
# return
iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server node_timeout 120 iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server node_timeout 120
iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server conn_timeout 20 iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server conn_timeout 20
@ -394,10 +390,10 @@ function configure_swift {
SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer" SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer"
fi fi
# Restrict the length of auth tokens in the swift proxy-server logs. # Restrict the length of auth tokens in the Swift ``proxy-server`` logs.
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:proxy-logging reveal_sensitive_prefix ${SWIFT_LOG_TOKEN_LENGTH} iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:proxy-logging reveal_sensitive_prefix ${SWIFT_LOG_TOKEN_LENGTH}
# By default Swift will be installed with keystone and tempauth middleware # By default Swift will be installed with Keystone and tempauth middleware
# and add the swift3 middleware if its configured for it. The token for # and add the swift3 middleware if its configured for it. The token for
# tempauth would be prefixed with the reseller_prefix setting `TEMPAUTH_` the # tempauth would be prefixed with the reseller_prefix setting `TEMPAUTH_` the
# token for keystoneauth would have the standard reseller_prefix `AUTH_` # token for keystoneauth would have the standard reseller_prefix `AUTH_`
@ -413,17 +409,13 @@ function configure_swift {
sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER} sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER}
sed -i "/^pipeline/ { s/proxy-server/${SWIFT_EXTRAS_MIDDLEWARE_LAST} proxy-server/ ; }" ${SWIFT_CONFIG_PROXY_SERVER} sed -i "/^pipeline/ { s/proxy-server/${SWIFT_EXTRAS_MIDDLEWARE_LAST} proxy-server/ ; }" ${SWIFT_CONFIG_PROXY_SERVER}
iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true
# Configure Crossdomain # Configure Crossdomain
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:crossdomain use "egg:swift#crossdomain" iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:crossdomain use "egg:swift#crossdomain"
# Configure authtoken middleware to use the same Python logging
# This causes the authtoken middleware to use the same python logging # adapter provided by the Swift ``proxy-server``, so that request transaction
# adapter provided by the swift proxy-server, so that request transaction
# IDs will included in all of its log messages. # IDs will included in all of its log messages.
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken log_name swift iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken log_name swift
@ -436,7 +428,7 @@ function configure_swift {
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use "egg:swift#keystoneauth" iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use "egg:swift#keystoneauth"
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin" iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin"
# Configure Tempauth. In the sample config file, Keystoneauth is commented # Configure Tempauth. In the sample config file Keystoneauth is commented
# out. Make sure we uncomment Tempauth after we uncomment Keystoneauth # out. Make sure we uncomment Tempauth after we uncomment Keystoneauth
# otherwise, this code also sets the reseller_prefix for Keystoneauth. # otherwise, this code also sets the reseller_prefix for Keystoneauth.
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate
@ -579,7 +571,8 @@ function create_swift_disk {
sudo chown -R ${STACK_USER}: ${node} sudo chown -R ${STACK_USER}: ${node}
done done
} }
# create_swift_accounts() - Set up standard swift accounts and extra
# create_swift_accounts() - Set up standard Swift accounts and extra
# one for tests we do this by attaching all words in the account name # one for tests we do this by attaching all words in the account name
# since we want to make it compatible with tempauth which use # since we want to make it compatible with tempauth which use
# underscores for separators. # underscores for separators.
@ -593,9 +586,9 @@ function create_swift_disk {
# swifttenanttest4 swiftusertest4 admin swift_test # swifttenanttest4 swiftusertest4 admin swift_test
function create_swift_accounts { function create_swift_accounts {
# Defines specific passwords used by tools/create_userrc.sh # Defines specific passwords used by ``tools/create_userrc.sh``
# As these variables are used by create_userrc.sh, they must be exported # As these variables are used by ``create_userrc.sh,`` they must be exported
# The _password suffix is expected by create_userrc.sh # The _password suffix is expected by ``create_userrc.sh``.
export swiftusertest1_password=testing export swiftusertest1_password=testing
export swiftusertest2_password=testing2 export swiftusertest2_password=testing2
export swiftusertest3_password=testing3 export swiftusertest3_password=testing3
@ -725,8 +718,8 @@ function start_swift {
# By default with only one replica we are launching the proxy, # By default with only one replica we are launching the proxy,
# container, account and object server in screen in foreground and # container, account and object server in screen in foreground and
# other services in background. If we have SWIFT_REPLICAS set to something # other services in background. If we have ``SWIFT_REPLICAS`` set to something
# greater than one we first spawn all the swift services then kill the proxy # greater than one we first spawn all the Swift services then kill the proxy
# service so we can run it in foreground in screen. ``swift-init ... # service so we can run it in foreground in screen. ``swift-init ...
# {stop|restart}`` exits with '1' if no servers are running, ignore it just # {stop|restart}`` exits with '1' if no servers are running, ignore it just
# in case # in case
@ -762,7 +755,7 @@ function stop_swift {
swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0 swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0
fi fi
# screen normally killed by unstack.sh # screen normally killed by ``unstack.sh``
if type -p swift-init >/dev/null; then if type -p swift-init >/dev/null; then
swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
fi fi

View File

@ -62,13 +62,11 @@ BUILD_INTERVAL=1
# The default is set to 196 seconds. # The default is set to 196 seconds.
BUILD_TIMEOUT=${BUILD_TIMEOUT:-196} BUILD_TIMEOUT=${BUILD_TIMEOUT:-196}
# This must be False on stable branches, as master tempest # This must be False on stable branches, as master tempest
# deps do not match stable branch deps. Set this to True to # deps do not match stable branch deps. Set this to True to
# have tempest installed in devstack by default. # have tempest installed in DevStack by default.
INSTALL_TEMPEST=${INSTALL_TEMPEST:-"True"} INSTALL_TEMPEST=${INSTALL_TEMPEST:-"True"}
BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-${CIRROS_VERSION}" BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-${CIRROS_VERSION}"
BOTO_CONF=/etc/boto.cfg BOTO_CONF=/etc/boto.cfg
@ -83,6 +81,7 @@ TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-$TEMPEST_DEFAULT_STORAGE_PR
IPV6_ENABLED=$(trueorfalse True IPV6_ENABLED) IPV6_ENABLED=$(trueorfalse True IPV6_ENABLED)
IPV6_SUBNET_ATTRIBUTES_ENABLED=$(trueorfalse True IPV6_SUBNET_ATTRIBUTES_ENABLED) IPV6_SUBNET_ATTRIBUTES_ENABLED=$(trueorfalse True IPV6_SUBNET_ATTRIBUTES_ENABLED)
# Functions # Functions
# --------- # ---------
@ -168,8 +167,8 @@ function configure_tempest {
esac esac
fi fi
# Create tempest.conf from tempest.conf.sample # Create ``tempest.conf`` from ``tempest.conf.sample``
# copy every time, because the image UUIDS are going to change # Copy every time because the image UUIDS are going to change
sudo install -d -o $STACK_USER $TEMPEST_CONFIG_DIR sudo install -d -o $STACK_USER $TEMPEST_CONFIG_DIR
install -m 644 $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG install -m 644 $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG
@ -179,8 +178,8 @@ function configure_tempest {
# the cloud. We don't always want to so that we can ensure Tempest # the cloud. We don't always want to so that we can ensure Tempest
# would work on a public cloud. # would work on a public cloud.
TEMPEST_HAS_ADMIN=$(trueorfalse True TEMPEST_HAS_ADMIN) TEMPEST_HAS_ADMIN=$(trueorfalse True TEMPEST_HAS_ADMIN)
# See files/keystone_data.sh and stack.sh where admin, demo and alt_demo
# user and tenant are set up... # See ``lib/keystone`` where these users and tenants are set up
ADMIN_USERNAME=${ADMIN_USERNAME:-admin} ADMIN_USERNAME=${ADMIN_USERNAME:-admin}
ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin} ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin}
ADMIN_DOMAIN_NAME=${ADMIN_DOMAIN_NAME:-Default} ADMIN_DOMAIN_NAME=${ADMIN_DOMAIN_NAME:-Default}
@ -191,13 +190,13 @@ function configure_tempest {
ADMIN_TENANT_ID=$(openstack project list | awk "/ admin / { print \$2 }") ADMIN_TENANT_ID=$(openstack project list | awk "/ admin / { print \$2 }")
if is_service_enabled nova; then if is_service_enabled nova; then
# If the ``DEFAULT_INSTANCE_TYPE`` not declared, use the new behavior # If ``DEFAULT_INSTANCE_TYPE`` is not declared, use the new behavior
# Tempest creates instane types for himself # Tempest creates its own instance types
if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then
available_flavors=$(nova flavor-list) available_flavors=$(nova flavor-list)
if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then
if is_arch "ppc64"; then if is_arch "ppc64"; then
# qemu needs at least 128MB of memory to boot on ppc64 # Qemu needs at least 128MB of memory to boot on ppc64
nova flavor-create m1.nano 42 128 0 1 nova flavor-create m1.nano 42 128 0 1
else else
nova flavor-create m1.nano 42 64 0 1 nova flavor-create m1.nano 42 64 0 1
@ -214,8 +213,7 @@ function configure_tempest {
fi fi
flavor_ref_alt=84 flavor_ref_alt=84
else else
# Check Nova for existing flavors and, if set, look for the # Check Nova for existing flavors, if ``DEFAULT_INSTANCE_TYPE`` is set use it.
# ``DEFAULT_INSTANCE_TYPE`` and use that.
boto_instance_type=$DEFAULT_INSTANCE_TYPE boto_instance_type=$DEFAULT_INSTANCE_TYPE
flavor_lines=`nova flavor-list` flavor_lines=`nova flavor-list`
IFS=$'\r\n' IFS=$'\r\n'
@ -240,8 +238,8 @@ function configure_tempest {
flavor_ref=${flavors[0]} flavor_ref=${flavors[0]}
flavor_ref_alt=$flavor_ref flavor_ref_alt=$flavor_ref
# ensure flavor_ref and flavor_ref_alt have different values # Ensure ``flavor_ref`` and ``flavor_ref_alt`` have different values.
# some resize instance in tempest tests depends on this. # Some resize instance in tempest tests depends on this.
for f in ${flavors[@]:1}; do for f in ${flavors[@]:1}; do
if [[ $f -ne $flavor_ref ]]; then if [[ $f -ne $flavor_ref ]]; then
flavor_ref_alt=$f flavor_ref_alt=$f
@ -266,7 +264,7 @@ function configure_tempest {
public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \ public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \
awk '{print $2}') awk '{print $2}')
if [ "$Q_USE_NAMESPACE" == "False" ]; then if [ "$Q_USE_NAMESPACE" == "False" ]; then
# If namespaces are disabled, devstack will create a single # If namespaces are disabled, DevStack will create a single
# public router that tempest should be configured to use. # public router that tempest should be configured to use.
public_router_id=$(neutron router-list | awk "/ $Q_ROUTER_NAME / \ public_router_id=$(neutron router-list | awk "/ $Q_ROUTER_NAME / \
{ print \$2 }") { print \$2 }")
@ -274,6 +272,7 @@ function configure_tempest {
fi fi
iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG
# Oslo # Oslo
iniset $TEMPEST_CONFIG oslo_concurrency lock_path $TEMPEST_STATE_PATH iniset $TEMPEST_CONFIG oslo_concurrency lock_path $TEMPEST_STATE_PATH
mkdir -p $TEMPEST_STATE_PATH mkdir -p $TEMPEST_STATE_PATH
@ -309,15 +308,13 @@ function configure_tempest {
fi fi
# Image # Image
# for the gate we want to be able to override this variable so we aren't # We want to be able to override this variable in the gate to avoid
# doing an HTTP fetch over the wide internet for this test # doing an external HTTP fetch for this test.
if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then
iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE
fi fi
# Auth # Auth
#
#
TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-$TEMPEST_HAS_ADMIN} TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-$TEMPEST_HAS_ADMIN}
iniset $TEMPEST_CONFIG auth allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} iniset $TEMPEST_CONFIG auth allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True}
iniset $TEMPEST_CONFIG auth tempest_roles "Member" iniset $TEMPEST_CONFIG auth tempest_roles "Member"
@ -336,7 +333,7 @@ function configure_tempest {
iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method
# Compute Features # Compute Features
# Run verify_tempest_config -ur to retrieve enabled extensions on API endpoints # Run ``verify_tempest_config -ur`` to retrieve enabled extensions on API endpoints
# NOTE(mtreinish): This must be done after auth settings are added to the tempest config # NOTE(mtreinish): This must be done after auth settings are added to the tempest config
local tmp_cfg_file=$(mktemp) local tmp_cfg_file=$(mktemp)
cd $TEMPEST_DIR cd $TEMPEST_DIR
@ -417,11 +414,11 @@ function configure_tempest {
iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0}
# Telemetry # Telemetry
# Ceilometer API optimization happened in juno that allows to run more tests in tempest. # Ceilometer API optimization happened in Juno that allows to run more tests in tempest.
# Once Tempest retires support for icehouse this flag can be removed. # Once Tempest retires support for icehouse this flag can be removed.
iniset $TEMPEST_CONFIG telemetry too_slow_to_test "False" iniset $TEMPEST_CONFIG telemetry too_slow_to_test "False"
# Object storage # Object Store
local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-"all"} local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-"all"}
if [[ ! -z "$DISABLE_OBJECT_STORAGE_API_EXTENSIONS" ]]; then if [[ ! -z "$DISABLE_OBJECT_STORAGE_API_EXTENSIONS" ]]; then
# Enabled extensions are either the ones explicitly specified or those available on the API endpoint # Enabled extensions are either the ones explicitly specified or those available on the API endpoint
@ -445,7 +442,7 @@ function configure_tempest {
iniset $TEMPEST_CONFIG volume-feature-enabled backup False iniset $TEMPEST_CONFIG volume-feature-enabled backup False
fi fi
# Using CINDER_ENABLED_BACKENDS # Using ``CINDER_ENABLED_BACKENDS``
if [[ -n "$CINDER_ENABLED_BACKENDS" ]] && [[ $CINDER_ENABLED_BACKENDS =~ .*,.* ]]; then if [[ -n "$CINDER_ENABLED_BACKENDS" ]] && [[ $CINDER_ENABLED_BACKENDS =~ .*,.* ]]; then
iniset $TEMPEST_CONFIG volume-feature-enabled multi_backend "True" iniset $TEMPEST_CONFIG volume-feature-enabled multi_backend "True"
local i=1 local i=1
@ -470,7 +467,7 @@ function configure_tempest {
iniset $TEMPEST_CONFIG dashboard dashboard_url "http://$SERVICE_HOST/" iniset $TEMPEST_CONFIG dashboard dashboard_url "http://$SERVICE_HOST/"
iniset $TEMPEST_CONFIG dashboard login_url "http://$SERVICE_HOST/auth/login/" iniset $TEMPEST_CONFIG dashboard login_url "http://$SERVICE_HOST/auth/login/"
# cli # CLI
iniset $TEMPEST_CONFIG cli cli_dir $NOVA_BIN_DIR iniset $TEMPEST_CONFIG cli cli_dir $NOVA_BIN_DIR
# Baremetal # Baremetal
@ -495,7 +492,7 @@ function configure_tempest {
iniset $TEMPEST_CONFIG compute-feature-enabled suspend False iniset $TEMPEST_CONFIG compute-feature-enabled suspend False
fi fi
# service_available # ``service_available``
for service in ${TEMPEST_SERVICES//,/ }; do for service in ${TEMPEST_SERVICES//,/ }; do
if is_service_enabled $service ; then if is_service_enabled $service ; then
iniset $TEMPEST_CONFIG service_available $service "True" iniset $TEMPEST_CONFIG service_available $service "True"
@ -505,7 +502,7 @@ function configure_tempest {
done done
if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then
# Use the BOTO_CONFIG environment variable to point to this file # Use the ``BOTO_CONFIG`` environment variable to point to this file
iniset $BOTO_CONF Boto ca_certificates_file $SSL_BUNDLE_FILE iniset $BOTO_CONF Boto ca_certificates_file $SSL_BUNDLE_FILE
sudo chown $STACK_USER $BOTO_CONF sudo chown $STACK_USER $BOTO_CONF
fi fi
@ -520,7 +517,6 @@ function configure_tempest {
# ------------------------------------------------------------------ # ------------------------------------------------------------------
# alt_demo alt_demo Member # alt_demo alt_demo Member
# Migrated from keystone_data.sh
function create_tempest_accounts { function create_tempest_accounts {
if is_service_enabled tempest; then if is_service_enabled tempest; then
# Tempest has some tests that validate various authorization checks # Tempest has some tests that validate various authorization checks
@ -531,13 +527,13 @@ function create_tempest_accounts {
fi fi
} }
# install_tempest_lib() - Collect source, prepare, and install tempest-lib # install_tempest_lib() - Collect source, prepare, and install ``tempest-lib``
function install_tempest_lib { function install_tempest_lib {
if use_library_from_git "tempest-lib"; then if use_library_from_git "tempest-lib"; then
git_clone_by_name "tempest-lib" git_clone_by_name "tempest-lib"
setup_dev_lib "tempest-lib" setup_dev_lib "tempest-lib"
# NOTE(mtreinish) For testing tempest-lib from git with tempest we need # NOTE(mtreinish) For testing ``tempest-lib`` from git with Tempest we need to
# put the git version of tempest-lib in the tempest job's tox venv # put the git version of ``tempest-lib`` in the Tempest job's tox venv
export PIP_VIRTUAL_ENV=${PROJECT_VENV["tempest"]} export PIP_VIRTUAL_ENV=${PROJECT_VENV["tempest"]}
setup_dev_lib "tempest-lib" setup_dev_lib "tempest-lib"
unset PIP_VIRTUAL_ENV unset PIP_VIRTUAL_ENV
@ -555,7 +551,7 @@ function install_tempest {
popd popd
} }
# init_tempest() - Initialize ec2 images # init_tempest() - Initialize EC2 images
function init_tempest { function init_tempest {
local base_image_name=cirros-${CIRROS_VERSION}-${CIRROS_ARCH} local base_image_name=cirros-${CIRROS_VERSION}-${CIRROS_ARCH}
# /opt/stack/devstack/files/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec # /opt/stack/devstack/files/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec
@ -564,7 +560,7 @@ function init_tempest {
local ramdisk="$image_dir/${base_image_name}-initrd" local ramdisk="$image_dir/${base_image_name}-initrd"
local disk_image="$image_dir/${base_image_name}-blank.img" local disk_image="$image_dir/${base_image_name}-blank.img"
if is_service_enabled nova; then if is_service_enabled nova; then
# if the cirros uec downloaded and the system is uec capable # If the CirrOS uec downloaded and the system is UEC capable
if [ -f "$kernel" -a -f "$ramdisk" -a -f "$disk_image" -a "$VIRT_DRIVER" != "openvz" \ if [ -f "$kernel" -a -f "$ramdisk" -a -f "$disk_image" -a "$VIRT_DRIVER" != "openvz" \
-a \( "$LIBVIRT_TYPE" != "lxc" -o "$VIRT_DRIVER" != "libvirt" \) ]; then -a \( "$LIBVIRT_TYPE" != "lxc" -o "$VIRT_DRIVER" != "libvirt" \) ]; then
echo "Prepare aki/ari/ami Images" echo "Prepare aki/ari/ami Images"

15
lib/tls
View File

@ -32,6 +32,7 @@
# - is_ssl_enabled_service # - is_ssl_enabled_service
# - enable_mod_ssl # - enable_mod_ssl
# Defaults # Defaults
# -------- # --------
@ -92,7 +93,6 @@ function create_CA_base {
cp /dev/null $ca_dir/index.txt cp /dev/null $ca_dir/index.txt
} }
# Create a new CA configuration file # Create a new CA configuration file
# create_CA_config ca-dir common-name # create_CA_config ca-dir common-name
function create_CA_config { function create_CA_config {
@ -248,7 +248,6 @@ function init_cert {
fi fi
} }
# make_cert creates and signs a new certificate with the given commonName and CA # make_cert creates and signs a new certificate with the given commonName and CA
# make_cert ca-dir cert-name "common-name" ["alt-name" ...] # make_cert ca-dir cert-name "common-name" ["alt-name" ...]
function make_cert { function make_cert {
@ -287,7 +286,6 @@ function make_cert {
fi fi
} }
# Make an intermediate CA to sign everything else # Make an intermediate CA to sign everything else
# make_int_CA ca-dir signing-ca-dir # make_int_CA ca-dir signing-ca-dir
function make_int_CA { function make_int_CA {
@ -362,17 +360,16 @@ function is_ssl_enabled_service {
return 1 return 1
} }
# Ensure that the certificates for a service are in place. This function does # Ensure that the certificates for a service are in place. This function does
# not check that a service is SSL enabled, this should already have been # not check that a service is SSL enabled, this should already have been
# completed. # completed.
# #
# The function expects to find a certificate, key and CA certificate in the # The function expects to find a certificate, key and CA certificate in the
# variables {service}_SSL_CERT, {service}_SSL_KEY and {service}_SSL_CA. For # variables ``{service}_SSL_CERT``, ``{service}_SSL_KEY`` and ``{service}_SSL_CA``. For
# example for keystone this would be KEYSTONE_SSL_CERT, KEYSTONE_SSL_KEY and # example for keystone this would be ``KEYSTONE_SSL_CERT``, ``KEYSTONE_SSL_KEY`` and
# KEYSTONE_SSL_CA. # ``KEYSTONE_SSL_CA``.
# #
# If it does not find these certificates then the devstack-issued server # If it does not find these certificates then the DevStack-issued server
# certificate, key and CA certificate will be associated with the service. # certificate, key and CA certificate will be associated with the service.
# #
# If only some of the variables are provided then the function will quit. # If only some of the variables are provided then the function will quit.
@ -437,14 +434,12 @@ function start_tls_proxy {
# Cleanup Functions # Cleanup Functions
# ================= # =================
# Stops all stud processes. This should be done only after all services # Stops all stud processes. This should be done only after all services
# using tls configuration are down. # using tls configuration are down.
function stop_tls_proxy { function stop_tls_proxy {
killall stud killall stud
} }
# Remove CA along with configuration, as well as the local server certificate # Remove CA along with configuration, as well as the local server certificate
function cleanup_CA { function cleanup_CA {
rm -rf "$DATA_DIR/CA" "$DEVSTACK_CERT" rm -rf "$DATA_DIR/CA" "$DEVSTACK_CERT"

View File

@ -21,6 +21,7 @@
XTRACE=$(set +o | grep xtrace) XTRACE=$(set +o | grep xtrace)
set +o xtrace set +o xtrace
# Defaults # Defaults
# -------- # --------
if is_service_enabled neutron; then if is_service_enabled neutron; then
@ -80,7 +81,7 @@ function setup_trove_logging {
fi fi
} }
# create_trove_accounts() - Set up common required trove accounts # create_trove_accounts() - Set up common required Trove accounts
# Tenant User Roles # Tenant User Roles
# ------------------------------------------------------------------ # ------------------------------------------------------------------
@ -115,7 +116,6 @@ function cleanup_trove {
rm -fr $TROVE_CONF_DIR/* rm -fr $TROVE_CONF_DIR/*
} }
# configure_trove() - Set config files, create data dirs, etc # configure_trove() - Set config files, create data dirs, etc
function configure_trove { function configure_trove {
setup_develop $TROVE_DIR setup_develop $TROVE_DIR

View File

@ -11,9 +11,8 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
#
# # This runs a series of unit tests for DevStack to ensure it's functioning
# this runs a series of unit tests for devstack to ensure it's functioning
PASSES="" PASSES=""
FAILURES="" FAILURES=""

View File

@ -1,7 +1,6 @@
# Sample ``local.conf`` for user-configurable variables in ``stack.sh`` # Sample ``local.conf`` for user-configurable variables in ``stack.sh``
# NOTE: Copy this file to the root ``devstack`` directory for it to # NOTE: Copy this file to the root DevStack directory for it to work properly.
# work properly.
# ``local.conf`` is a user-maintained settings file that is sourced from ``stackrc``. # ``local.conf`` is a user-maintained settings file that is sourced from ``stackrc``.
# This gives it the ability to override any variables set in ``stackrc``. # This gives it the ability to override any variables set in ``stackrc``.

View File

@ -3,15 +3,14 @@
# Sample ``local.sh`` for user-configurable tasks to run automatically # Sample ``local.sh`` for user-configurable tasks to run automatically
# at the successful conclusion of ``stack.sh``. # at the successful conclusion of ``stack.sh``.
# NOTE: Copy this file to the root ``devstack`` directory for it to # NOTE: Copy this file to the root DevStack directory for it to work properly.
# work properly.
# This is a collection of some of the things we have found to be useful to run # This is a collection of some of the things we have found to be useful to run
# after ``stack.sh`` to tweak the OpenStack configuration that DevStack produces. # after ``stack.sh`` to tweak the OpenStack configuration that DevStack produces.
# These should be considered as samples and are unsupported DevStack code. # These should be considered as samples and are unsupported DevStack code.
# Keep track of the devstack directory # Keep track of the DevStack directory
TOP_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=$(cd $(dirname "$0") && pwd)
# Import common functions # Import common functions
@ -50,7 +49,7 @@ if is_service_enabled nova; then
source $TOP_DIR/openrc admin admin source $TOP_DIR/openrc admin admin
# Name of new flavor # Name of new flavor
# set in ``localrc`` with ``DEFAULT_INSTANCE_TYPE=m1.micro`` # set in ``local.conf`` with ``DEFAULT_INSTANCE_TYPE=m1.micro``
MI_NAME=m1.micro MI_NAME=m1.micro
# Create micro flavor if not present # Create micro flavor if not present

185
stack.sh
View File

@ -16,18 +16,11 @@
# (14.04 Trusty or newer), **Fedora** (F20 or newer), or **CentOS/RHEL** # (14.04 Trusty or newer), **Fedora** (F20 or newer), or **CentOS/RHEL**
# (7 or newer) machine. (It may work on other platforms but support for those # (7 or newer) machine. (It may work on other platforms but support for those
# platforms is left to those who added them to DevStack.) It should work in # platforms is left to those who added them to DevStack.) It should work in
# a VM or physical server. Additionally, we maintain a list of ``apt`` and # a VM or physical server. Additionally, we maintain a list of ``deb`` and
# ``rpm`` dependencies and other configuration files in this repo. # ``rpm`` dependencies and other configuration files in this repo.
# Learn more and get the most recent version at http://devstack.org # Learn more and get the most recent version at http://devstack.org
# check if someone has invoked with "sh"
if [[ "${POSIXLY_CORRECT}" == "y" ]]; then
echo "You appear to be running bash in POSIX compatibility mode."
echo "devstack uses bash features. \"./stack.sh\" should do the right thing"
exit 1
fi
# Make sure custom grep options don't get in the way # Make sure custom grep options don't get in the way
unset GREP_OPTIONS unset GREP_OPTIONS
@ -44,7 +37,7 @@ umask 022
# Not all distros have sbin in PATH for regular users. # Not all distros have sbin in PATH for regular users.
PATH=$PATH:/usr/local/sbin:/usr/sbin:/sbin PATH=$PATH:/usr/local/sbin:/usr/sbin:/sbin
# Keep track of the devstack directory # Keep track of the DevStack directory
TOP_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=$(cd $(dirname "$0") && pwd)
# Check for uninitialized variables, a big cause of bugs # Check for uninitialized variables, a big cause of bugs
@ -53,6 +46,10 @@ if [[ -n "$NOUNSET" ]]; then
set -o nounset set -o nounset
fi fi
# Configuration
# =============
# Sanity Checks # Sanity Checks
# ------------- # -------------
@ -61,7 +58,7 @@ if [[ -r $TOP_DIR/.stackenv ]]; then
rm $TOP_DIR/.stackenv rm $TOP_DIR/.stackenv
fi fi
# ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config # ``stack.sh`` keeps the list of ``deb`` and ``rpm`` dependencies, config
# templates and other useful files in the ``files`` subdirectory # templates and other useful files in the ``files`` subdirectory
FILES=$TOP_DIR/files FILES=$TOP_DIR/files
if [ ! -d $FILES ]; then if [ ! -d $FILES ]; then
@ -69,12 +66,23 @@ if [ ! -d $FILES ]; then
fi fi
# ``stack.sh`` keeps function libraries here # ``stack.sh`` keeps function libraries here
# Make sure ``$TOP_DIR/inc`` directory is present
if [ ! -d $TOP_DIR/inc ]; then
die $LINENO "missing devstack/inc"
fi
# ``stack.sh`` keeps project libraries here
# Make sure ``$TOP_DIR/lib`` directory is present # Make sure ``$TOP_DIR/lib`` directory is present
if [ ! -d $TOP_DIR/lib ]; then if [ ! -d $TOP_DIR/lib ]; then
die $LINENO "missing devstack/lib" die $LINENO "missing devstack/lib"
fi fi
# Check if run as root # Check if run in POSIX shell
if [[ "${POSIXLY_CORRECT}" == "y" ]]; then
echo "You are running POSIX compatibility mode, DevStack requires bash 4.2 or newer."
exit 1
fi
# OpenStack is designed to be run as a non-root user; Horizon will fail to run # OpenStack is designed to be run as a non-root user; Horizon will fail to run
# as **root** since Apache will not serve content from **root** user). # as **root** since Apache will not serve content from **root** user).
# ``stack.sh`` must not be run as **root**. It aborts and suggests one course of # ``stack.sh`` must not be run as **root**. It aborts and suggests one course of
@ -89,8 +97,6 @@ if [[ $EUID -eq 0 ]]; then
exit 1 exit 1
fi fi
# Print the kernel version
uname -a
# Prepare the environment # Prepare the environment
# ----------------------- # -----------------------
@ -112,6 +118,7 @@ source $TOP_DIR/lib/stack
# and ``DISTRO`` # and ``DISTRO``
GetDistro GetDistro
# Global Settings # Global Settings
# --------------- # ---------------
@ -134,7 +141,6 @@ if [[ -r $TOP_DIR/local.conf ]]; then
done done
fi fi
# ``stack.sh`` is customizable by setting environment variables. Override a # ``stack.sh`` is customizable by setting environment variables. Override a
# default setting via export:: # default setting via export::
# #
@ -145,18 +151,20 @@ fi
# #
# DATABASE_PASSWORD=simple ./stack.sh # DATABASE_PASSWORD=simple ./stack.sh
# #
# Persistent variables can be placed in a ``localrc`` file:: # Persistent variables can be placed in a ``local.conf`` file::
# #
# [[local|localrc]]
# DATABASE_PASSWORD=anothersecret # DATABASE_PASSWORD=anothersecret
# DATABASE_USER=hellaroot # DATABASE_USER=hellaroot
# #
# We try to have sensible defaults, so you should be able to run ``./stack.sh`` # We try to have sensible defaults, so you should be able to run ``./stack.sh``
# in most cases. ``localrc`` is not distributed with DevStack and will never # in most cases. ``local.conf`` is not distributed with DevStack and will never
# be overwritten by a DevStack update. # be overwritten by a DevStack update.
# #
# DevStack distributes ``stackrc`` which contains locations for the OpenStack # DevStack distributes ``stackrc`` which contains locations for the OpenStack
# repositories, branches to configure, and other configuration defaults. # repositories, branches to configure, and other configuration defaults.
# ``stackrc`` sources ``localrc`` to allow you to safely override those settings. # ``stackrc`` sources the ``localrc`` section of ``local.conf`` to allow you to
# safely override those settings.
if [[ ! -r $TOP_DIR/stackrc ]]; then if [[ ! -r $TOP_DIR/stackrc ]]; then
die $LINENO "missing $TOP_DIR/stackrc - did you grab more than just stack.sh?" die $LINENO "missing $TOP_DIR/stackrc - did you grab more than just stack.sh?"
@ -188,34 +196,27 @@ fi
# Make sure the proxy config is visible to sub-processes # Make sure the proxy config is visible to sub-processes
export_proxy_variables export_proxy_variables
# Remove services which were negated in ENABLED_SERVICES # Remove services which were negated in ``ENABLED_SERVICES``
# using the "-" prefix (e.g., "-rabbit") instead of # using the "-" prefix (e.g., "-rabbit") instead of
# calling disable_service(). # calling disable_service().
disable_negated_services disable_negated_services
# Look for obsolete stuff
# if [[ ,${ENABLED_SERVICES}, =~ ,"swift", ]]; then
# echo "FATAL: 'swift' is not supported as a service name"
# echo "FATAL: Use the actual swift service names to enable them as required:"
# echo "FATAL: s-proxy s-object s-container s-account"
# exit 1
# fi
# Configure sudo # Configure sudo
# -------------- # --------------
# We're not **root**, make sure ``sudo`` is available # We're not as **root** so make sure ``sudo`` is available
is_package_installed sudo || install_package sudo is_package_installed sudo || install_package sudo
# UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one
sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers ||
echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers
# Set up devstack sudoers # Set up DevStack sudoers
TEMPFILE=`mktemp` TEMPFILE=`mktemp`
echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE
# Some binaries might be under /sbin or /usr/sbin, so make sure sudo will # Some binaries might be under ``/sbin`` or ``/usr/sbin``, so make sure sudo will
# see them by forcing PATH # see them by forcing ``PATH``
echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE
echo "Defaults:$STACK_USER !requiretty" >> $TEMPFILE echo "Defaults:$STACK_USER !requiretty" >> $TEMPFILE
chmod 0440 $TEMPFILE chmod 0440 $TEMPFILE
@ -226,7 +227,7 @@ sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh
# Configure Distro Repositories # Configure Distro Repositories
# ----------------------------- # -----------------------------
# For debian/ubuntu make apt attempt to retry network ops on it's own # For Debian/Ubuntu make apt attempt to retry network ops on it's own
if is_ubuntu; then if is_ubuntu; then
echo 'APT::Acquire::Retries "20";' | sudo tee /etc/apt/apt.conf.d/80retry >/dev/null echo 'APT::Acquire::Retries "20";' | sudo tee /etc/apt/apt.conf.d/80retry >/dev/null
fi fi
@ -237,7 +238,7 @@ fi
if is_fedora && [[ $DISTRO == "rhel7" ]]; then if is_fedora && [[ $DISTRO == "rhel7" ]]; then
# RHEL requires EPEL for many Open Stack dependencies # RHEL requires EPEL for many Open Stack dependencies
# note we always remove and install latest -- some environments # NOTE: We always remove and install latest -- some environments
# use snapshot images, and if EPEL version updates they break # use snapshot images, and if EPEL version updates they break
# unless we update them to latest version. # unless we update them to latest version.
if sudo yum repolist enabled epel | grep -q 'epel'; then if sudo yum repolist enabled epel | grep -q 'epel'; then
@ -248,7 +249,7 @@ if is_fedora && [[ $DISTRO == "rhel7" ]]; then
# repo, then removes itself (as epel-release installed the # repo, then removes itself (as epel-release installed the
# "real" repo). # "real" repo).
# #
# you would think that rather than this, you could use # You would think that rather than this, you could use
# $releasever directly in .repo file we create below. However # $releasever directly in .repo file we create below. However
# RHEL gives a $releasever of "6Server" which breaks the path; # RHEL gives a $releasever of "6Server" which breaks the path;
# see https://bugzilla.redhat.com/show_bug.cgi?id=1150759 # see https://bugzilla.redhat.com/show_bug.cgi?id=1150759
@ -265,7 +266,7 @@ EOF
sudo yum-config-manager --enable epel-bootstrap sudo yum-config-manager --enable epel-bootstrap
yum_install epel-release || \ yum_install epel-release || \
die $LINENO "Error installing EPEL repo, cannot continue" die $LINENO "Error installing EPEL repo, cannot continue"
# epel rpm has installed it's version # EPEL rpm has installed it's version
sudo rm -f /etc/yum.repos.d/epel-bootstrap.repo sudo rm -f /etc/yum.repos.d/epel-bootstrap.repo
# ... and also optional to be enabled # ... and also optional to be enabled
@ -300,7 +301,7 @@ sudo mkdir -p $DEST
safe_chown -R $STACK_USER $DEST safe_chown -R $STACK_USER $DEST
safe_chmod 0755 $DEST safe_chmod 0755 $DEST
# a basic test for $DEST path permissions (fatal on error unless skipped) # Basic test for ``$DEST`` path permissions (fatal on error unless skipped)
check_path_perm_sanity ${DEST} check_path_perm_sanity ${DEST}
# Destination path for service data # Destination path for service data
@ -488,6 +489,9 @@ set -o errexit
# an error. It is also useful for following along as the install occurs. # an error. It is also useful for following along as the install occurs.
set -o xtrace set -o xtrace
# Print the kernel version
uname -a
# Reset the bundle of CA certificates # Reset the bundle of CA certificates
SSL_BUNDLE_FILE="$DATA_DIR/ca-bundle.pem" SSL_BUNDLE_FILE="$DATA_DIR/ca-bundle.pem"
rm -f $SSL_BUNDLE_FILE rm -f $SSL_BUNDLE_FILE
@ -500,7 +504,7 @@ source $TOP_DIR/lib/rpc_backend
# and the specified rpc backend is available on your platform. # and the specified rpc backend is available on your platform.
check_rpc_backend check_rpc_backend
# Service to enable with SSL if USE_SSL is True # Service to enable with SSL if ``USE_SSL`` is True
SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron" SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron"
if is_service_enabled tls-proxy && [ "$USE_SSL" == "True" ]; then if is_service_enabled tls-proxy && [ "$USE_SSL" == "True" ]; then
@ -514,7 +518,7 @@ fi
# defaults before other services are run # defaults before other services are run
run_phase override_defaults run_phase override_defaults
# Import apache functions # Import Apache functions
source $TOP_DIR/lib/apache source $TOP_DIR/lib/apache
# Import TLS functions # Import TLS functions
@ -598,8 +602,9 @@ function read_password {
# Database Configuration # Database Configuration
# ----------------------
# To select between database backends, add the following to ``localrc``: # To select between database backends, add the following to ``local.conf``:
# #
# disable_service mysql # disable_service mysql
# enable_service postgresql # enable_service postgresql
@ -611,9 +616,10 @@ initialize_database_backends && echo "Using $DATABASE_TYPE database backend" ||
# Queue Configuration # Queue Configuration
# -------------------
# Rabbit connection info # Rabbit connection info
# In multi node devstack, second node needs RABBIT_USERID, but rabbit # In multi node DevStack, second node needs ``RABBIT_USERID``, but rabbit
# isn't enabled. # isn't enabled.
RABBIT_USERID=${RABBIT_USERID:-stackrabbit} RABBIT_USERID=${RABBIT_USERID:-stackrabbit}
if is_service_enabled rabbit; then if is_service_enabled rabbit; then
@ -623,6 +629,7 @@ fi
# Keystone # Keystone
# --------
if is_service_enabled keystone; then if is_service_enabled keystone; then
# The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database. It is # The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database. It is
@ -634,14 +641,14 @@ if is_service_enabled keystone; then
read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)." read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)."
# Keystone can now optionally install OpenLDAP by enabling the ``ldap`` # Keystone can now optionally install OpenLDAP by enabling the ``ldap``
# service in ``localrc`` (e.g. ``enable_service ldap``). # service in ``local.conf`` (e.g. ``enable_service ldap``).
# To clean out the Keystone contents in OpenLDAP set ``KEYSTONE_CLEAR_LDAP`` # To clean out the Keystone contents in OpenLDAP set ``KEYSTONE_CLEAR_LDAP``
# to ``yes`` (e.g. ``KEYSTONE_CLEAR_LDAP=yes``) in ``localrc``. To enable the # to ``yes`` (e.g. ``KEYSTONE_CLEAR_LDAP=yes``) in ``local.conf``. To enable the
# Keystone Identity Driver (``keystone.identity.backends.ldap.Identity``) # Keystone Identity Driver (``keystone.identity.backends.ldap.Identity``)
# set ``KEYSTONE_IDENTITY_BACKEND`` to ``ldap`` (e.g. # set ``KEYSTONE_IDENTITY_BACKEND`` to ``ldap`` (e.g.
# ``KEYSTONE_IDENTITY_BACKEND=ldap``) in ``localrc``. # ``KEYSTONE_IDENTITY_BACKEND=ldap``) in ``local.conf``.
# only request ldap password if the service is enabled # Only request LDAP password if the service is enabled
if is_service_enabled ldap; then if is_service_enabled ldap; then
read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP" read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP"
fi fi
@ -649,6 +656,7 @@ fi
# Swift # Swift
# -----
if is_service_enabled s-proxy; then if is_service_enabled s-proxy; then
# We only ask for Swift Hash if we have enabled swift service. # We only ask for Swift Hash if we have enabled swift service.
@ -672,14 +680,14 @@ fi
echo_summary "Installing package prerequisites" echo_summary "Installing package prerequisites"
source $TOP_DIR/tools/install_prereqs.sh source $TOP_DIR/tools/install_prereqs.sh
# Configure an appropriate python environment # Configure an appropriate Python environment
if [[ "$OFFLINE" != "True" ]]; then if [[ "$OFFLINE" != "True" ]]; then
PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh
fi fi
TRACK_DEPENDS=${TRACK_DEPENDS:-False} TRACK_DEPENDS=${TRACK_DEPENDS:-False}
# Install python packages into a virtualenv so that we can track them # Install Python packages into a virtualenv so that we can track them
if [[ $TRACK_DEPENDS = True ]]; then if [[ $TRACK_DEPENDS = True ]]; then
echo_summary "Installing Python packages into a virtualenv $DEST/.venv" echo_summary "Installing Python packages into a virtualenv $DEST/.venv"
pip_install -U virtualenv pip_install -U virtualenv
@ -728,10 +736,10 @@ echo_summary "Installing OpenStack project source"
# Install required infra support libraries # Install required infra support libraries
install_infra install_infra
# Install oslo libraries that have graduated # Install Oslo libraries
install_oslo install_oslo
# Install clients libraries # Install client libraries
install_keystoneclient install_keystoneclient
install_glanceclient install_glanceclient
install_cinderclient install_cinderclient
@ -749,7 +757,6 @@ fi
# Install middleware # Install middleware
install_keystonemiddleware install_keystonemiddleware
if is_service_enabled keystone; then if is_service_enabled keystone; then
if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then
stack_install_service keystone stack_install_service keystone
@ -766,7 +773,7 @@ if is_service_enabled s-proxy; then
# swift3 middleware to provide S3 emulation to Swift # swift3 middleware to provide S3 emulation to Swift
if is_service_enabled swift3; then if is_service_enabled swift3; then
# replace the nova-objectstore port by the swift port # Replace the nova-objectstore port by the swift port
S3_SERVICE_PORT=8080 S3_SERVICE_PORT=8080
git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH
setup_develop $SWIFT3_DIR setup_develop $SWIFT3_DIR
@ -774,23 +781,25 @@ if is_service_enabled s-proxy; then
fi fi
if is_service_enabled g-api n-api; then if is_service_enabled g-api n-api; then
# image catalog service # Image catalog service
stack_install_service glance stack_install_service glance
configure_glance configure_glance
fi fi
if is_service_enabled cinder; then if is_service_enabled cinder; then
# Block volume service
stack_install_service cinder stack_install_service cinder
configure_cinder configure_cinder
fi fi
if is_service_enabled neutron; then if is_service_enabled neutron; then
# Network service
stack_install_service neutron stack_install_service neutron
install_neutron_third_party install_neutron_third_party
fi fi
if is_service_enabled nova; then if is_service_enabled nova; then
# compute service # Compute service
stack_install_service nova stack_install_service nova
cleanup_nova cleanup_nova
configure_nova configure_nova
@ -822,18 +831,18 @@ if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then
configure_CA configure_CA
init_CA init_CA
init_cert init_cert
# Add name to /etc/hosts # Add name to ``/etc/hosts``.
# don't be naive and add to existing line! # Don't be naive and add to existing line!
fi fi
# Extras Install # Extras Install
# -------------- # --------------
# Phase: install # Phase: install
run_phase stack install run_phase stack install
# Install the OpenStack client, needed for most setup commands
# install the OpenStack client, needed for most setup commands
if use_library_from_git "python-openstackclient"; then if use_library_from_git "python-openstackclient"; then
git_clone_by_name "python-openstackclient" git_clone_by_name "python-openstackclient"
setup_dev_lib "python-openstackclient" setup_dev_lib "python-openstackclient"
@ -841,7 +850,6 @@ else
pip_install 'python-openstackclient>=1.0.2' pip_install 'python-openstackclient>=1.0.2'
fi fi
if [[ $TRACK_DEPENDS = True ]]; then if [[ $TRACK_DEPENDS = True ]]; then
$DEST/.venv/bin/pip freeze > $DEST/requires-post-pip $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip
if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then
@ -934,7 +942,7 @@ if [[ "$USE_SCREEN" == "True" ]]; then
screen -r $SCREEN_NAME -X setenv PROMPT_COMMAND /bin/true screen -r $SCREEN_NAME -X setenv PROMPT_COMMAND /bin/true
fi fi
# Clear screen rc file # Clear ``screenrc`` file
SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc
if [[ -e $SCREENRC ]]; then if [[ -e $SCREENRC ]]; then
rm -f $SCREENRC rm -f $SCREENRC
@ -943,14 +951,16 @@ fi
# Initialize the directory for service status check # Initialize the directory for service status check
init_service_check init_service_check
# Start Services
# ==============
# Dstat # Dstat
# ------- # -----
# A better kind of sysstat, with the top process per time slice # A better kind of sysstat, with the top process per time slice
start_dstat start_dstat
# Start Services
# ==============
# Keystone # Keystone
# -------- # --------
@ -972,7 +982,7 @@ if is_service_enabled keystone; then
SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0 SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0
fi fi
# Setup OpenStackclient token-flow auth # Setup OpenStackClient token-endpoint auth
export OS_TOKEN=$SERVICE_TOKEN export OS_TOKEN=$SERVICE_TOKEN
export OS_URL=$SERVICE_ENDPOINT export OS_URL=$SERVICE_ENDPOINT
@ -994,10 +1004,10 @@ if is_service_enabled keystone; then
create_heat_accounts create_heat_accounts
fi fi
# Begone token-flow auth # Begone token auth
unset OS_TOKEN OS_URL unset OS_TOKEN OS_URL
# Set up password-flow auth creds now that keystone is bootstrapped # Set up password auth credentials now that Keystone is bootstrapped
export OS_AUTH_URL=$SERVICE_ENDPOINT export OS_AUTH_URL=$SERVICE_ENDPOINT
export OS_TENANT_NAME=admin export OS_TENANT_NAME=admin
export OS_USERNAME=admin export OS_USERNAME=admin
@ -1042,7 +1052,7 @@ if is_service_enabled neutron; then
echo_summary "Configuring Neutron" echo_summary "Configuring Neutron"
configure_neutron configure_neutron
# Run init_neutron only on the node hosting the neutron API server # Run init_neutron only on the node hosting the Neutron API server
if is_service_enabled $DATABASE_BACKENDS && is_service_enabled q-svc; then if is_service_enabled $DATABASE_BACKENDS && is_service_enabled q-svc; then
init_neutron init_neutron
fi fi
@ -1118,6 +1128,7 @@ if is_service_enabled nova; then
init_nova_cells init_nova_cells
fi fi
# Extras Configuration # Extras Configuration
# ==================== # ====================
@ -1128,7 +1139,7 @@ run_phase stack post-config
# Local Configuration # Local Configuration
# =================== # ===================
# Apply configuration from local.conf if it exists for layer 2 services # Apply configuration from ``local.conf`` if it exists for layer 2 services
# Phase: post-config # Phase: post-config
merge_config_group $TOP_DIR/local.conf post-config merge_config_group $TOP_DIR/local.conf post-config
@ -1150,18 +1161,16 @@ if is_service_enabled glance; then
start_glance start_glance
fi fi
# Install Images # Install Images
# ============== # ==============
# Upload an image to glance. # Upload an image to Glance.
# #
# The default image is cirros, a small testing image which lets you login as **root** # The default image is CirrOS, a small testing image which lets you login as **root**
# cirros has a ``cloud-init`` analog supporting login via keypair and sending # CirrOS has a ``cloud-init`` analog supporting login via keypair and sending
# scripts as userdata. # scripts as userdata.
# See https://help.ubuntu.com/community/CloudInit for more on cloud-init # See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init``
#
# Override ``IMAGE_URLS`` with a comma-separated list of UEC images.
# * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz
if is_service_enabled g-reg; then if is_service_enabled g-reg; then
TOKEN=$(keystone token-get | grep ' id ' | get_field 2) TOKEN=$(keystone token-get | grep ' id ' | get_field 2)
@ -1179,7 +1188,7 @@ if is_service_enabled g-reg; then
done done
fi fi
# Create an access key and secret key for nova ec2 register image # Create an access key and secret key for Nova EC2 register image
if is_service_enabled keystone && is_service_enabled swift3 && is_service_enabled nova; then if is_service_enabled keystone && is_service_enabled swift3 && is_service_enabled nova; then
eval $(openstack ec2 credentials create --user nova --project $SERVICE_TENANT_NAME -f shell -c access -c secret) eval $(openstack ec2 credentials create --user nova --project $SERVICE_TENANT_NAME -f shell -c access -c secret)
iniset $NOVA_CONF DEFAULT s3_access_key "$access" iniset $NOVA_CONF DEFAULT s3_access_key "$access"
@ -1242,7 +1251,7 @@ if is_service_enabled ceilometer; then
start_ceilometer start_ceilometer
fi fi
# Configure and launch heat engine, api and metadata # Configure and launch Heat engine, api and metadata
if is_service_enabled heat; then if is_service_enabled heat; then
# Initialize heat # Initialize heat
echo_summary "Configuring Heat" echo_summary "Configuring Heat"
@ -1287,30 +1296,34 @@ for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \
done done
# Local Configuration # Wrapup configuration
# =================== # ====================
# Apply configuration from local.conf if it exists for layer 2 services # local.conf extra
# ----------------
# Apply configuration from ``local.conf`` if it exists for layer 2 services
# Phase: extra # Phase: extra
merge_config_group $TOP_DIR/local.conf extra merge_config_group $TOP_DIR/local.conf extra
# Run extras # Run extras
# ========== # ----------
# Phase: extra # Phase: extra
run_phase stack extra run_phase stack extra
# Local Configuration
# ===================
# Apply configuration from local.conf if it exists for layer 2 services # local.conf post-extra
# ---------------------
# Apply late configuration from ``local.conf`` if it exists for layer 2 services
# Phase: post-extra # Phase: post-extra
merge_config_group $TOP_DIR/local.conf post-extra merge_config_group $TOP_DIR/local.conf post-extra
# Run local script # Run local script
# ================ # ----------------
# Run ``local.sh`` if it exists to perform user-managed tasks # Run ``local.sh`` if it exists to perform user-managed tasks
if [[ -x $TOP_DIR/local.sh ]]; then if [[ -x $TOP_DIR/local.sh ]]; then
@ -1338,6 +1351,7 @@ if is_service_enabled cinder; then
fi fi
fi fi
# Fin # Fin
# === # ===
@ -1354,11 +1368,12 @@ fi
# Using the cloud # Using the cloud
# --------------- # ===============
echo "" echo ""
echo "" echo ""
echo "" echo ""
echo "This is your host ip: $HOST_IP"
# If you installed Horizon on this server you should be able # If you installed Horizon on this server you should be able
# to access the site using your browser. # to access the site using your browser.
@ -1368,15 +1383,11 @@ fi
# If Keystone is present you can point ``nova`` cli to this server # If Keystone is present you can point ``nova`` cli to this server
if is_service_enabled keystone; then if is_service_enabled keystone; then
echo "Keystone is serving at $KEYSTONE_SERVICE_URI/v2.0/" echo "Keystone is serving at $KEYSTONE_SERVICE_URI/"
echo "Examples on using novaclient command line is in exercise.sh"
echo "The default users are: admin and demo" echo "The default users are: admin and demo"
echo "The password: $ADMIN_PASSWORD" echo "The password: $ADMIN_PASSWORD"
fi fi
# Echo ``HOST_IP`` - useful for ``build_uec.sh``, which uses dhcp to give the instance an address
echo "This is your host ip: $HOST_IP"
# Warn that a deprecated feature was used # Warn that a deprecated feature was used
if [[ -n "$DEPRECATED_TEXT" ]]; then if [[ -n "$DEPRECATED_TEXT" ]]; then
echo_summary "WARNING: $DEPRECATED_TEXT" echo_summary "WARNING: $DEPRECATED_TEXT"

39
stackrc
View File

@ -5,7 +5,7 @@
# Find the other rc files # Find the other rc files
RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd)
# Source required devstack functions and globals # Source required DevStack functions and globals
source $RC_DIR/functions source $RC_DIR/functions
# Destination path for installation # Destination path for installation
@ -41,20 +41,20 @@ REGION_NAME=${REGION_NAME:-RegionOne}
# enable_service q-dhcp # enable_service q-dhcp
# enable_service q-l3 # enable_service q-l3
# enable_service q-meta # enable_service q-meta
# # Optional, to enable tempest configuration as part of devstack # # Optional, to enable tempest configuration as part of DevStack
# enable_service tempest # enable_service tempest
# this allows us to pass ENABLED_SERVICES # This allows us to pass ``ENABLED_SERVICES``
if ! isset ENABLED_SERVICES ; then if ! isset ENABLED_SERVICES ; then
# core compute (glance / keystone / nova (+ nova-network)) # Compute (Glance / Keystone / Nova (+ nova-network))
ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,n-sch,n-novnc,n-xvnc,n-cauth ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,n-sch,n-novnc,n-xvnc,n-cauth
# cinder # Cinder
ENABLED_SERVICES+=,c-sch,c-api,c-vol ENABLED_SERVICES+=,c-sch,c-api,c-vol
# heat # Heat
ENABLED_SERVICES+=,h-eng,h-api,h-api-cfn,h-api-cw ENABLED_SERVICES+=,h-eng,h-api,h-api-cfn,h-api-cw
# dashboard # Dashboard
ENABLED_SERVICES+=,horizon ENABLED_SERVICES+=,horizon
# additional services # Additional services
ENABLED_SERVICES+=,rabbit,tempest,mysql ENABLED_SERVICES+=,rabbit,tempest,mysql
fi fi
@ -79,7 +79,7 @@ ENABLE_HTTPD_MOD_WSGI_SERVICES=True
# Tell Tempest which services are available. The default is set here as # Tell Tempest which services are available. The default is set here as
# Tempest falls late in the configuration sequence. This differs from # Tempest falls late in the configuration sequence. This differs from
# ``ENABLED_SERVICES`` in that the project names are used here rather than # ``ENABLED_SERVICES`` in that the project names are used here rather than
# the service names, i.e.: TEMPEST_SERVICES="key,glance,nova" # the service names, i.e.: ``TEMPEST_SERVICES="key,glance,nova"``
TEMPEST_SERVICES="" TEMPEST_SERVICES=""
# Set the default Nova APIs to enable # Set the default Nova APIs to enable
@ -145,6 +145,7 @@ GIT_TIMEOUT=${GIT_TIMEOUT:-0}
# but pass through any extras) # but pass through any extras)
REQUIREMENTS_MODE=${REQUIREMENTS_MODE:-strict} REQUIREMENTS_MODE=${REQUIREMENTS_MODE:-strict}
# Repositories # Repositories
# ------------ # ------------
@ -155,16 +156,17 @@ GIT_BASE=${GIT_BASE:-git://git.openstack.org}
# Which libraries should we install from git instead of using released # Which libraries should we install from git instead of using released
# versions on pypi? # versions on pypi?
# #
# By default devstack is now installing libraries from pypi instead of # By default DevStack is now installing libraries from pypi instead of
# from git repositories by default. This works great if you are # from git repositories by default. This works great if you are
# developing server components, but if you want to develop libraries # developing server components, but if you want to develop libraries
# and see them live in devstack you need to tell devstack it should # and see them live in DevStack you need to tell DevStack it should
# install them from git. # install them from git.
# #
# ex: LIBS_FROM_GIT=python-keystoneclient,oslo.config # ex: LIBS_FROM_GIT=python-keystoneclient,oslo.config
# #
# Will install those 2 libraries from git, the rest from pypi. # Will install those 2 libraries from git, the rest from pypi.
############## ##############
# #
# OpenStack Server Components # OpenStack Server Components
@ -231,6 +233,7 @@ SWIFT_BRANCH=${SWIFT_BRANCH:-master}
TROVE_REPO=${TROVE_REPO:-${GIT_BASE}/openstack/trove.git} TROVE_REPO=${TROVE_REPO:-${GIT_BASE}/openstack/trove.git}
TROVE_BRANCH=${TROVE_BRANCH:-master} TROVE_BRANCH=${TROVE_BRANCH:-master}
############## ##############
# #
# Testing Components # Testing Components
@ -306,6 +309,7 @@ GITBRANCH["python-openstackclient"]=${OPENSTACKCLIENT_BRANCH:-master}
# this doesn't exist in a lib file, so set it here # this doesn't exist in a lib file, so set it here
GITDIR["python-openstackclient"]=$DEST/python-openstackclient GITDIR["python-openstackclient"]=$DEST/python-openstackclient
################### ###################
# #
# Oslo Libraries # Oslo Libraries
@ -396,6 +400,7 @@ GITBRANCH["tooz"]=${TOOZ_BRANCH:-master}
GITREPO["pbr"]=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git} GITREPO["pbr"]=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git}
GITBRANCH["pbr"]=${PBR_BRANCH:-master} GITBRANCH["pbr"]=${PBR_BRANCH:-master}
################## ##################
# #
# Libraries managed by OpenStack programs (non oslo) # Libraries managed by OpenStack programs (non oslo)
@ -453,6 +458,7 @@ OCC_BRANCH=${OCC_BRANCH:-master}
ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git} ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git}
ORC_BRANCH=${ORC_BRANCH:-master} ORC_BRANCH=${ORC_BRANCH:-master}
################# #################
# #
# 3rd Party Components (non pip installable) # 3rd Party Components (non pip installable)
@ -474,7 +480,6 @@ SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.gi
SPICE_BRANCH=${SPICE_BRANCH:-master} SPICE_BRANCH=${SPICE_BRANCH:-master}
# Nova hypervisor configuration. We default to libvirt with **kvm** but will # Nova hypervisor configuration. We default to libvirt with **kvm** but will
# drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can
# also install an **LXC**, **OpenVZ** or **XenAPI** based system. If xenserver-core # also install an **LXC**, **OpenVZ** or **XenAPI** based system. If xenserver-core
@ -641,7 +646,7 @@ ENABLE_DEBUG_LOG_LEVEL=$(trueorfalse True ENABLE_DEBUG_LOG_LEVEL)
# Set fixed and floating range here so we can make sure not to use addresses # Set fixed and floating range here so we can make sure not to use addresses
# from either range when attempting to guess the IP to use for the host. # from either range when attempting to guess the IP to use for the host.
# Note that setting FIXED_RANGE may be necessary when running DevStack # Note that setting ``FIXED_RANGE`` may be necessary when running DevStack
# in an OpenStack cloud that uses either of these address ranges internally. # in an OpenStack cloud that uses either of these address ranges internally.
FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24}
FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
@ -669,9 +674,10 @@ LOG_COLOR=$(trueorfalse True LOG_COLOR)
# Set to 0 to disable shallow cloning # Set to 0 to disable shallow cloning
GIT_DEPTH=${GIT_DEPTH:-0} GIT_DEPTH=${GIT_DEPTH:-0}
# Use native SSL for servers in SSL_ENABLED_SERVICES # Use native SSL for servers in ``SSL_ENABLED_SERVICES``
USE_SSL=$(trueorfalse False USE_SSL) USE_SSL=$(trueorfalse False USE_SSL)
# Following entries need to be last items in file # Following entries need to be last items in file
# Compatibility bits required by other callers like Grenade # Compatibility bits required by other callers like Grenade
@ -693,7 +699,6 @@ USE_SSL=$(trueorfalse False USE_SSL)
# For compat, if SCREEN_LOGDIR is set, it will be used to create back-compat symlinks to the LOGDIR # For compat, if SCREEN_LOGDIR is set, it will be used to create back-compat symlinks to the LOGDIR
# symlinks to SCREEN_LOGDIR (compat) # symlinks to SCREEN_LOGDIR (compat)
# Set up new logging defaults # Set up new logging defaults
if [[ -z "${LOGDIR:-}" ]]; then if [[ -z "${LOGDIR:-}" ]]; then
default_logdir=$DEST/logs default_logdir=$DEST/logs
@ -718,8 +723,8 @@ if [[ -z "${LOGDIR:-}" ]]; then
unset default_logdir logfile unset default_logdir logfile
fi fi
# LOGDIR is always set at this point so it is not useful as a 'enable' for service logs # ``LOGDIR`` is always set at this point so it is not useful as a 'enable' for service logs
# SCREEN_LOGDIR may be set, it is useful to enable the compat symlinks # ``SCREEN_LOGDIR`` may be set, it is useful to enable the compat symlinks
# Local variables: # Local variables:
# mode: shell-script # mode: shell-script

View File

@ -2,8 +2,8 @@
# **build_docs.sh** - Build the docs for DevStack # **build_docs.sh** - Build the docs for DevStack
# #
# - Install shocco if not found on PATH and INSTALL_SHOCCO is set # - Install shocco if not found on ``PATH`` and ``INSTALL_SHOCCO`` is set
# - Clone MASTER_REPO branch MASTER_BRANCH # - Clone ``MASTER_REPO`` branch ``MASTER_BRANCH``
# - Re-creates ``doc/build/html`` directory from existing repo + new generated script docs # - Re-creates ``doc/build/html`` directory from existing repo + new generated script docs
# Usage: # Usage:
@ -16,7 +16,7 @@
HTML_BUILD=doc/build/html HTML_BUILD=doc/build/html
# Keep track of the devstack directory # Keep track of the DevStack directory
TOP_DIR=$(cd $(dirname "$0")/.. && pwd) TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
# Uses this shocco branch: https://github.com/dtroyer/shocco/tree/rst_support # Uses this shocco branch: https://github.com/dtroyer/shocco/tree/rst_support

View File

@ -4,11 +4,12 @@
# #
# build_venv.sh venv-path [package [...]] # build_venv.sh venv-path [package [...]]
# #
# Installs basic common prereq packages that require compilation
# to allow quick copying of resulting venv as a baseline
#
# Assumes: # Assumes:
# - a useful pip is installed # - a useful pip is installed
# - virtualenv will be installed by pip # - virtualenv will be installed by pip
# - installs basic common prereq packages that require compilation
# to allow quick copying of resulting venv as a baseline
VENV_DEST=${1:-.venv} VENV_DEST=${1:-.venv}
@ -16,14 +17,14 @@ shift
MORE_PACKAGES="$@" MORE_PACKAGES="$@"
# If TOP_DIR is set we're being sourced rather than running stand-alone # If ``TOP_DIR`` is set we're being sourced rather than running stand-alone
# or in a sub-shell # or in a sub-shell
if [[ -z "$TOP_DIR" ]]; then if [[ -z "$TOP_DIR" ]]; then
set -o errexit set -o errexit
set -o nounset set -o nounset
# Keep track of the devstack directory # Keep track of the DevStack directory
TOP_DIR=$(cd $(dirname "$0")/.. && pwd) TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
FILES=$TOP_DIR/files FILES=$TOP_DIR/files

View File

@ -4,21 +4,22 @@
# #
# build_wheels.sh [package [...]] # build_wheels.sh [package [...]]
# #
# System package prerequisites listed in files/*/devlibs will be installed # System package prerequisites listed in ``files/*/devlibs`` will be installed
# #
# Builds wheels for all virtual env requirements listed in # Builds wheels for all virtual env requirements listed in
# ``venv-requirements.txt`` plus any supplied on the command line. # ``venv-requirements.txt`` plus any supplied on the command line.
# #
# Assumes ``tools/install_pip.sh`` has been run and a suitable pip/setuptools is available. # Assumes:
# - ``tools/install_pip.sh`` has been run and a suitable ``pip/setuptools`` is available.
# If TOP_DIR is set we're being sourced rather than running stand-alone # If ``TOP_DIR`` is set we're being sourced rather than running stand-alone
# or in a sub-shell # or in a sub-shell
if [[ -z "$TOP_DIR" ]]; then if [[ -z "$TOP_DIR" ]]; then
set -o errexit set -o errexit
set -o nounset set -o nounset
# Keep track of the devstack directory # Keep track of the DevStack directory
TOP_DIR=$(cd $(dirname "$0")/.. && pwd) TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
FILES=$TOP_DIR/files FILES=$TOP_DIR/files
@ -59,7 +60,7 @@ virtualenv $TMP_VENV_PATH
# Install modern pip and wheel # Install modern pip and wheel
PIP_VIRTUAL_ENV=$TMP_VENV_PATH pip_install -U pip wheel PIP_VIRTUAL_ENV=$TMP_VENV_PATH pip_install -U pip wheel
# VENV_PACKAGES is a list of packages we want to pre-install # ``VENV_PACKAGES`` is a list of packages we want to pre-install
VENV_PACKAGE_FILE=$FILES/venv-requirements.txt VENV_PACKAGE_FILE=$FILES/venv-requirements.txt
if [[ -r $VENV_PACKAGE_FILE ]]; then if [[ -r $VENV_PACKAGE_FILE ]]; then
VENV_PACKAGES=$(grep -v '^#' $VENV_PACKAGE_FILE) VENV_PACKAGES=$(grep -v '^#' $VENV_PACKAGE_FILE)

View File

@ -17,7 +17,7 @@
set -o errexit set -o errexit
# Keep track of the devstack directory # Keep track of the DevStack directory
TOP_DIR=$(cd $(dirname "$0")/.. && pwd) TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
# Import common functions # Import common functions

View File

@ -17,7 +17,7 @@
# - uninstall firewalld (f20 only) # - uninstall firewalld (f20 only)
# If TOP_DIR is set we're being sourced rather than running stand-alone # If ``TOP_DIR`` is set we're being sourced rather than running stand-alone
# or in a sub-shell # or in a sub-shell
if [[ -z "$TOP_DIR" ]]; then if [[ -z "$TOP_DIR" ]]; then
set -o errexit set -o errexit
@ -27,7 +27,7 @@ if [[ -z "$TOP_DIR" ]]; then
TOOLS_DIR=$(cd $(dirname "$0") && pwd) TOOLS_DIR=$(cd $(dirname "$0") && pwd)
TOP_DIR=$(cd $TOOLS_DIR/..; pwd) TOP_DIR=$(cd $TOOLS_DIR/..; pwd)
# Change dir to top of devstack # Change dir to top of DevStack
cd $TOP_DIR cd $TOP_DIR
# Import common functions # Import common functions
@ -38,7 +38,7 @@ fi
# Keystone Port Reservation # Keystone Port Reservation
# ------------------------- # -------------------------
# Reserve and prevent $KEYSTONE_AUTH_PORT and $KEYSTONE_AUTH_PORT_INT from # Reserve and prevent ``KEYSTONE_AUTH_PORT`` and ``KEYSTONE_AUTH_PORT_INT`` from
# being used as ephemeral ports by the system. The default(s) are 35357 and # being used as ephemeral ports by the system. The default(s) are 35357 and
# 35358 which are in the Linux defined ephemeral port range (in disagreement # 35358 which are in the Linux defined ephemeral port range (in disagreement
# with the IANA ephemeral port range). This is a workaround for bug #1253482 # with the IANA ephemeral port range). This is a workaround for bug #1253482
@ -47,9 +47,9 @@ fi
# exception into the Kernel for the Keystone AUTH ports. # exception into the Kernel for the Keystone AUTH ports.
keystone_ports=${KEYSTONE_AUTH_PORT:-35357},${KEYSTONE_AUTH_PORT_INT:-35358} keystone_ports=${KEYSTONE_AUTH_PORT:-35357},${KEYSTONE_AUTH_PORT_INT:-35358}
# only do the reserved ports when available, on some system (like containers) # Only do the reserved ports when available, on some system (like containers)
# where it's not exposed we are almost pretty sure these ports would be # where it's not exposed we are almost pretty sure these ports would be
# exclusive for our devstack. # exclusive for our DevStack.
if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then
# Get any currently reserved ports, strip off leading whitespace # Get any currently reserved ports, strip off leading whitespace
reserved_ports=$(sysctl net.ipv4.ip_local_reserved_ports | awk -F'=' '{print $2;}' | sed 's/^ //') reserved_ports=$(sysctl net.ipv4.ip_local_reserved_ports | awk -F'=' '{print $2;}' | sed 's/^ //')
@ -59,7 +59,7 @@ if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then
sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports} sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports}
else else
# If there are currently reserved ports, keep those and also reserve the # If there are currently reserved ports, keep those and also reserve the
# keystone specific ports. Duplicate reservations are merged into a single # Keystone specific ports. Duplicate reservations are merged into a single
# reservation (or range) automatically by the kernel. # reservation (or range) automatically by the kernel.
sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports},${reserved_ports} sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports},${reserved_ports}
fi fi

View File

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
# Keep track of the devstack directory # Keep track of the DevStack directory
TOP_DIR=$(cd $(dirname "$0")/.. && pwd) TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
source $TOP_DIR/functions source $TOP_DIR/functions

View File

@ -2,7 +2,7 @@
# **info.sh** # **info.sh**
# Produce a report on the state of devstack installs # Produce a report on the state of DevStack installs
# #
# Output fields are separated with '|' chars # Output fields are separated with '|' chars
# Output types are git,localrc,os,pip,pkg: # Output types are git,localrc,os,pip,pkg:
@ -14,7 +14,7 @@
# pkg|<package>|<version> # pkg|<package>|<version>
function usage { function usage {
echo "$0 - Report on the devstack configuration" echo "$0 - Report on the DevStack configuration"
echo "" echo ""
echo "Usage: $0" echo "Usage: $0"
exit 1 exit 1

View File

@ -16,7 +16,7 @@ set -o xtrace
TOOLS_DIR=$(cd $(dirname "$0") && pwd) TOOLS_DIR=$(cd $(dirname "$0") && pwd)
TOP_DIR=`cd $TOOLS_DIR/..; pwd` TOP_DIR=`cd $TOOLS_DIR/..; pwd`
# Change dir to top of devstack # Change dir to top of DevStack
cd $TOP_DIR cd $TOP_DIR
# Import common functions # Import common functions
@ -42,11 +42,11 @@ function get_versions {
function install_get_pip { function install_get_pip {
# the openstack gate and others put a cached version of get-pip.py # The OpenStack gate and others put a cached version of get-pip.py
# for this to find, explicitly to avoid download issues. # for this to find, explicitly to avoid download issues.
# #
# However, if devstack *did* download the file, we want to check # However, if DevStack *did* download the file, we want to check
# for updates; people can leave thier stacks around for a long # for updates; people can leave their stacks around for a long
# time and in the mean-time pip might get upgraded. # time and in the mean-time pip might get upgraded.
# #
# Thus we use curl's "-z" feature to always check the modified # Thus we use curl's "-z" feature to always check the modified
@ -74,7 +74,7 @@ function configure_pypi_alternative_url {
touch $PIP_CONFIG_FILE touch $PIP_CONFIG_FILE
fi fi
if ! ini_has_option "$PIP_CONFIG_FILE" "global" "index-url"; then if ! ini_has_option "$PIP_CONFIG_FILE" "global" "index-url"; then
#it means that the index-url does not exist # It means that the index-url does not exist
iniset "$PIP_CONFIG_FILE" "global" "index-url" "$PYPI_OVERRIDE" iniset "$PIP_CONFIG_FILE" "global" "index-url" "$PYPI_OVERRIDE"
fi fi

View File

@ -18,10 +18,10 @@ while getopts ":f" opt; do
esac esac
done done
# If TOP_DIR is set we're being sourced rather than running stand-alone # If ``TOP_DIR`` is set we're being sourced rather than running stand-alone
# or in a sub-shell # or in a sub-shell
if [[ -z "$TOP_DIR" ]]; then if [[ -z "$TOP_DIR" ]]; then
# Keep track of the devstack directory # Keep track of the DevStack directory
TOP_DIR=$(cd $(dirname "$0")/.. && pwd) TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
# Import common functions # Import common functions
@ -65,7 +65,7 @@ PACKAGES=$(get_packages general $ENABLED_SERVICES)
PACKAGES="$PACKAGES $(get_plugin_packages)" PACKAGES="$PACKAGES $(get_plugin_packages)"
if is_ubuntu && echo $PACKAGES | grep -q dkms ; then if is_ubuntu && echo $PACKAGES | grep -q dkms ; then
# ensure headers for the running kernel are installed for any DKMS builds # Ensure headers for the running kernel are installed for any DKMS builds
PACKAGES="$PACKAGES linux-headers-$(uname -r)" PACKAGES="$PACKAGES linux-headers-$(uname -r)"
fi fi

View File

@ -6,13 +6,13 @@
set -ex set -ex
# Keep track of the devstack directory # Keep track of the DevStack directory
TOP_DIR=$(cd $(dirname "$0")/.. && pwd) TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
NAME=$1 NAME=$1
CPU=$2 CPU=$2
MEM=$(( 1024 * $3 )) MEM=$(( 1024 * $3 ))
# extra G to allow fuzz for partition table : flavor size and registered size # Extra G to allow fuzz for partition table : flavor size and registered size
# need to be different to actual size. # need to be different to actual size.
DISK=$(( $4 + 1)) DISK=$(( $4 + 1))

View File

@ -9,7 +9,7 @@ set -exu
LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"} LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"}
# Keep track of the devstack directory # Keep track of the DevStack directory
TOP_DIR=$(cd $(dirname "$0")/.. && pwd) TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
BRIDGE_SUFFIX=${1:-''} BRIDGE_SUFFIX=${1:-''}
BRIDGE_NAME=brbm$BRIDGE_SUFFIX BRIDGE_NAME=brbm$BRIDGE_SUFFIX
@ -19,7 +19,7 @@ export VIRSH_DEFAULT_CONNECT_URI="$LIBVIRT_CONNECT_URI"
# Only add bridge if missing # Only add bridge if missing
(sudo ovs-vsctl list-br | grep ${BRIDGE_NAME}$) || sudo ovs-vsctl add-br ${BRIDGE_NAME} (sudo ovs-vsctl list-br | grep ${BRIDGE_NAME}$) || sudo ovs-vsctl add-br ${BRIDGE_NAME}
# remove bridge before replacing it. # Remove bridge before replacing it.
(virsh net-list | grep "${BRIDGE_NAME} ") && virsh net-destroy ${BRIDGE_NAME} (virsh net-list | grep "${BRIDGE_NAME} ") && virsh net-destroy ${BRIDGE_NAME}
(virsh net-list --inactive | grep "${BRIDGE_NAME} ") && virsh net-undefine ${BRIDGE_NAME} (virsh net-list --inactive | grep "${BRIDGE_NAME} ") && virsh net-undefine ${BRIDGE_NAME}

View File

@ -14,8 +14,8 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
# This is an output filter to filter and timestamp the logs from grenade and # This is an output filter to filter and timestamp the logs from Grenade and
# devstack. Largely our awk filters got beyond the complexity level which were # DevStack. Largely our awk filters got beyond the complexity level which were
# sustainable, so this provides us much more control in a single place. # sustainable, so this provides us much more control in a single place.
# #
# The overhead of running python should be less than execing `date` a million # The overhead of running python should be less than execing `date` a million
@ -32,7 +32,7 @@ HAS_DATE = re.compile('^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3} \|')
def get_options(): def get_options():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description='Filter output by devstack and friends') description='Filter output by DevStack and friends')
parser.add_argument('-o', '--outfile', parser.add_argument('-o', '--outfile',
help='Output file for content', help='Output file for content',
default=None) default=None)
@ -52,7 +52,7 @@ def main():
if opts.outfile: if opts.outfile:
outfile = open(opts.outfile, 'a', 0) outfile = open(opts.outfile, 'a', 0)
# otherwise fileinput reprocess args as files # Otherwise fileinput reprocess args as files
sys.argv = [] sys.argv = []
while True: while True:
line = sys.stdin.readline() line = sys.stdin.readline()
@ -63,9 +63,9 @@ def main():
if skip_line(line): if skip_line(line):
continue continue
# this prevents us from nesting date lines, because # This prevents us from nesting date lines, because
# we'd like to pull this in directly in grenade and not double # we'd like to pull this in directly in Grenade and not double
# up on devstack lines # up on DevStack lines
if HAS_DATE.search(line) is None: if HAS_DATE.search(line) is None:
now = datetime.datetime.utcnow() now = datetime.datetime.utcnow()
line = ("%s | %s" % ( line = ("%s | %s" % (

View File

@ -19,7 +19,7 @@ while getopts ":a" opt; do
esac esac
done done
# Keep track of the current devstack directory. # Keep track of the current DevStack directory.
TOP_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=$(cd $(dirname "$0") && pwd)
FILES=$TOP_DIR/files FILES=$TOP_DIR/files