a688bc6510
On Ubuntu 14.04, the site configuration file must have a .conf suffix for a2ensite and a2dissite to recognise it. a2ensite and a2dissite ignore the .conf suffix used as parameter. The default sites' files are 000-default.conf and default-ssl.conf. On Ubuntu 12.04, the site configuration file may have any format, as long as it is in /etc/apache2/sites-available/. a2ensite and a2dissite need the entire file name to work. The default sites' files are default and default-ssl. On Fedora, any file in /etc/httpd/conf.d/ whose name ends with .conf is enabled. On RHEL and CentOS, things should hopefully work as in Fedora. This change puts all distribution-related site configuration file name differences in lib/apache and the other services gets the file name for its sites using the new exported function apache_site_config_for <sitename>. It also makes Fedora disabled sites use the .conf.disabled suffix instead of removing the .conf from the file name. The table below summarizes what should happen on each distribution: +----------------------+--------------------+--------------------------+--------------------------+ | Distribution | File name | Site enabling command | Site disabling command | +----------------------+--------------------+--------------------------+--------------------------+ | Ubuntu 12.04 | site | a2ensite site | a2dissite site | | Ubuntu 14.04 | site.conf | a2ensite site | a2dissite site | | Fedora, RHEL, CentOS | site.conf.disabled | mv site.conf{.disabled,} | mv site.conf{,.disabled} | +----------------------+--------------------+--------------------------+--------------------------+ Change-Id: Ia2ba3cb7caccb6e9b65380f9d51d9d21180b894e Closes-bug: #1313765
714 lines
30 KiB
Plaintext
714 lines
30 KiB
Plaintext
# lib/swift
|
||
# Functions to control the configuration and operation of the **Swift** service
|
||
|
||
# Dependencies:
|
||
#
|
||
# - ``functions`` file
|
||
# - ``apache`` file
|
||
# - ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined
|
||
# - ``STACK_USER`` must be defined
|
||
# - ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined
|
||
# - ``lib/keystone`` file
|
||
#
|
||
# ``stack.sh`` calls the entry points in this order:
|
||
#
|
||
# - install_swift
|
||
# - _config_swift_apache_wsgi
|
||
# - configure_swift
|
||
# - init_swift
|
||
# - start_swift
|
||
# - stop_swift
|
||
# - cleanup_swift
|
||
# - _cleanup_swift_apache_wsgi
|
||
|
||
# Save trace setting
|
||
XTRACE=$(set +o | grep xtrace)
|
||
set +o xtrace
|
||
|
||
|
||
# Defaults
|
||
# --------
|
||
|
||
# Set up default directories
|
||
SWIFT_DIR=$DEST/swift
|
||
SWIFTCLIENT_DIR=$DEST/python-swiftclient
|
||
SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift}
|
||
SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift}
|
||
SWIFT3_DIR=$DEST/swift3
|
||
|
||
# TODO: add logging to different location.
|
||
|
||
# Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects.
|
||
# Default is the common DevStack data directory.
|
||
SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift}
|
||
SWIFT_DISK_IMAGE=${SWIFT_DATA_DIR}/drives/images/swift.img
|
||
|
||
# Set ``SWIFT_CONF_DIR`` to the location of the configuration files.
|
||
# Default is ``/etc/swift``.
|
||
# TODO(dtroyer): remove SWIFT_CONFIG_DIR after cutting stable/grizzly
|
||
SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-${SWIFT_CONFIG_DIR:-/etc/swift}}
|
||
|
||
if is_service_enabled s-proxy && is_service_enabled swift3; then
|
||
# If we are using swift3, we can default the s3 port to swift instead
|
||
# of nova-objectstore
|
||
S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080}
|
||
fi
|
||
|
||
# DevStack will create a loop-back disk formatted as XFS to store the
|
||
# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in
|
||
# kilobytes.
|
||
# Default is 1 gigabyte.
|
||
SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=1G
|
||
# if tempest enabled the default size is 6 Gigabyte.
|
||
if is_service_enabled tempest; then
|
||
SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=${SWIFT_LOOPBACK_DISK_SIZE:-6G}
|
||
fi
|
||
|
||
SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_DEFAULT}
|
||
|
||
# Set ``SWIFT_EXTRAS_MIDDLEWARE`` to extras middlewares.
|
||
# Default is ``staticweb, formpost``
|
||
SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-formpost staticweb}
|
||
|
||
# Set ``SWIFT_EXTRAS_MIDDLEWARE_LAST`` to extras middlewares that need to be at
|
||
# the end of the pipeline.
|
||
SWIFT_EXTRAS_MIDDLEWARE_LAST=${SWIFT_EXTRAS_MIDDLEWARE_LAST}
|
||
|
||
# Set ``SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH`` to extras middlewares that need to be at
|
||
# the beginning of the pipeline, before authentication middlewares.
|
||
SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH=${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH:-crossdomain}
|
||
|
||
# The ring uses a configurable number of bits from a path’s MD5 hash as
|
||
# a partition index that designates a device. The number of bits kept
|
||
# from the hash is known as the partition power, and 2 to the partition
|
||
# power indicates the partition count. Partitioning the full MD5 hash
|
||
# ring allows other parts of the cluster to work in batches of items at
|
||
# once which ends up either more efficient or at least less complex than
|
||
# working with each item separately or the entire cluster all at once.
|
||
# By default we define 9 for the partition count (which mean 512).
|
||
SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9}
|
||
|
||
# Set ``SWIFT_REPLICAS`` to configure how many replicas are to be
|
||
# configured for your Swift cluster. By default we are configuring
|
||
# only one replica since this is way less CPU and memory intensive. If
|
||
# you are planning to test swift replication you may want to set this
|
||
# up to 3.
|
||
SWIFT_REPLICAS=${SWIFT_REPLICAS:-1}
|
||
SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS})
|
||
|
||
# Set ``SWIFT_LOG_TOKEN_LENGTH`` to configure how many characters of an auth
|
||
# token should be placed in the logs. When keystone is used with PKI tokens,
|
||
# the token values can be huge, seemingly larger the 2K, at the least. We
|
||
# restrict it here to a default of 12 characters, which should be enough to
|
||
# trace through the logs when looking for its use.
|
||
SWIFT_LOG_TOKEN_LENGTH=${SWIFT_LOG_TOKEN_LENGTH:-12}
|
||
|
||
# Set ``SWIFT_MAX_HEADER_SIZE`` to configure the maximun length of headers in
|
||
# Swift API
|
||
SWIFT_MAX_HEADER_SIZE=${SWIFT_MAX_HEADER_SIZE:-16384}
|
||
|
||
# Set ``OBJECT_PORT_BASE``, ``CONTAINER_PORT_BASE``, ``ACCOUNT_PORT_BASE``
|
||
# Port bases used in port number calclution for the service "nodes"
|
||
# The specified port number will be used, the additinal ports calculated by
|
||
# base_port + node_num * 10
|
||
OBJECT_PORT_BASE=${OBJECT_PORT_BASE:-6013}
|
||
CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6011}
|
||
ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012}
|
||
|
||
# Tell Tempest this project is present
|
||
TEMPEST_SERVICES+=,swift
|
||
|
||
|
||
# Functions
|
||
# ---------
|
||
|
||
# Test if any Swift services are enabled
|
||
# is_swift_enabled
|
||
function is_swift_enabled {
|
||
[[ ,${ENABLED_SERVICES} =~ ,"s-" ]] && return 0
|
||
return 1
|
||
}
|
||
|
||
# cleanup_swift() - Remove residual data files
|
||
function cleanup_swift {
|
||
rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz}
|
||
if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
|
||
sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
|
||
fi
|
||
if [[ -e ${SWIFT_DISK_IMAGE} ]]; then
|
||
rm ${SWIFT_DISK_IMAGE}
|
||
fi
|
||
rm -rf ${SWIFT_DATA_DIR}/run/
|
||
if is_apache_enabled_service swift; then
|
||
_cleanup_swift_apache_wsgi
|
||
fi
|
||
}
|
||
|
||
# _cleanup_swift_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
|
||
function _cleanup_swift_apache_wsgi {
|
||
sudo rm -f $SWIFT_APACHE_WSGI_DIR/*.wsgi
|
||
disable_apache_site proxy-server
|
||
for node_number in ${SWIFT_REPLICAS_SEQ}; do
|
||
for type in object container account; do
|
||
site_name=${type}-server-${node_number}
|
||
disable_apache_site ${site_name}
|
||
sudo rm -f $(apache_site_config_for ${site_name})
|
||
done
|
||
done
|
||
}
|
||
|
||
# _config_swift_apache_wsgi() - Set WSGI config files of Swift
|
||
function _config_swift_apache_wsgi {
|
||
sudo mkdir -p ${SWIFT_APACHE_WSGI_DIR}
|
||
local proxy_port=${SWIFT_DEFAULT_BIND_PORT:-8080}
|
||
|
||
# copy proxy vhost and wsgi file
|
||
sudo cp ${SWIFT_DIR}/examples/apache2/proxy-server.template $(apache_site_config_for proxy-server)
|
||
sudo sed -e "
|
||
/^#/d;/^$/d;
|
||
s/%PORT%/$proxy_port/g;
|
||
s/%SERVICENAME%/proxy-server/g;
|
||
s/%APACHE_NAME%/${APACHE_NAME}/g;
|
||
s/%USER%/${STACK_USER}/g;
|
||
" -i $(apache_site_config_for proxy-server)
|
||
enable_apache_site proxy-server
|
||
|
||
sudo cp ${SWIFT_DIR}/examples/wsgi/proxy-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi
|
||
sudo sed -e "
|
||
/^#/d;/^$/d;
|
||
s/%SERVICECONF%/proxy-server.conf/g;
|
||
" -i ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi
|
||
|
||
# copy apache vhost file and set name and port
|
||
for node_number in ${SWIFT_REPLICAS_SEQ}; do
|
||
object_port=$[OBJECT_PORT_BASE + 10 * ($node_number - 1)]
|
||
container_port=$[CONTAINER_PORT_BASE + 10 * ($node_number - 1)]
|
||
account_port=$[ACCOUNT_PORT_BASE + 10 * ($node_number - 1)]
|
||
|
||
sudo cp ${SWIFT_DIR}/examples/apache2/object-server.template $(apache_site_config_for object-server-${node_number})
|
||
sudo sed -e "
|
||
s/%PORT%/$object_port/g;
|
||
s/%SERVICENAME%/object-server-${node_number}/g;
|
||
s/%APACHE_NAME%/${APACHE_NAME}/g;
|
||
s/%USER%/${STACK_USER}/g;
|
||
" -i $(apache_site_config_for object-server-${node_number})
|
||
enable_apache_site object-server-${node_number}
|
||
|
||
sudo cp ${SWIFT_DIR}/examples/wsgi/object-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/object-server-${node_number}.wsgi
|
||
sudo sed -e "
|
||
/^#/d;/^$/d;
|
||
s/%SERVICECONF%/object-server\/${node_number}.conf/g;
|
||
" -i ${SWIFT_APACHE_WSGI_DIR}/object-server-${node_number}.wsgi
|
||
|
||
sudo cp ${SWIFT_DIR}/examples/apache2/container-server.template $(apache_site_config_for container-server-${node_number})
|
||
sudo sed -e "
|
||
/^#/d;/^$/d;
|
||
s/%PORT%/$container_port/g;
|
||
s/%SERVICENAME%/container-server-${node_number}/g;
|
||
s/%APACHE_NAME%/${APACHE_NAME}/g;
|
||
s/%USER%/${STACK_USER}/g;
|
||
" -i $(apache_site_config_for container-server-${node_number})
|
||
enable_apache_site container-server-${node_number}
|
||
|
||
sudo cp ${SWIFT_DIR}/examples/wsgi/container-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/container-server-${node_number}.wsgi
|
||
sudo sed -e "
|
||
/^#/d;/^$/d;
|
||
s/%SERVICECONF%/container-server\/${node_number}.conf/g;
|
||
" -i ${SWIFT_APACHE_WSGI_DIR}/container-server-${node_number}.wsgi
|
||
|
||
sudo cp ${SWIFT_DIR}/examples/apache2/account-server.template $(apache_site_config_for account-server-${node_number})
|
||
sudo sed -e "
|
||
/^#/d;/^$/d;
|
||
s/%PORT%/$account_port/g;
|
||
s/%SERVICENAME%/account-server-${node_number}/g;
|
||
s/%APACHE_NAME%/${APACHE_NAME}/g;
|
||
s/%USER%/${STACK_USER}/g;
|
||
" -i $(apache_site_config_for account-server-${node_number})
|
||
enable_apache_site account-server-${node_number}
|
||
|
||
sudo cp ${SWIFT_DIR}/examples/wsgi/account-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi
|
||
sudo sed -e "
|
||
/^#/d;/^$/d;
|
||
s/%SERVICECONF%/account-server\/${node_number}.conf/g;
|
||
" -i ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi
|
||
done
|
||
}
|
||
|
||
# This function generates an object/container/account configuration
|
||
# emulating 4 nodes on different ports
|
||
function generate_swift_config {
|
||
local swift_node_config=$1
|
||
local node_id=$2
|
||
local bind_port=$3
|
||
local server_type=$4
|
||
|
||
log_facility=$[ node_id - 1 ]
|
||
node_path=${SWIFT_DATA_DIR}/${node_number}
|
||
|
||
iniuncomment ${swift_node_config} DEFAULT user
|
||
iniset ${swift_node_config} DEFAULT user ${STACK_USER}
|
||
|
||
iniuncomment ${swift_node_config} DEFAULT bind_port
|
||
iniset ${swift_node_config} DEFAULT bind_port ${bind_port}
|
||
|
||
iniuncomment ${swift_node_config} DEFAULT swift_dir
|
||
iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONF_DIR}
|
||
|
||
iniuncomment ${swift_node_config} DEFAULT devices
|
||
iniset ${swift_node_config} DEFAULT devices ${node_path}
|
||
|
||
iniuncomment ${swift_node_config} DEFAULT log_facility
|
||
iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility}
|
||
|
||
iniuncomment ${swift_node_config} DEFAULT workers
|
||
iniset ${swift_node_config} DEFAULT workers 1
|
||
|
||
iniuncomment ${swift_node_config} DEFAULT disable_fallocate
|
||
iniset ${swift_node_config} DEFAULT disable_fallocate true
|
||
|
||
iniuncomment ${swift_node_config} DEFAULT mount_check
|
||
iniset ${swift_node_config} DEFAULT mount_check false
|
||
|
||
iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode
|
||
iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes
|
||
}
|
||
|
||
|
||
# configure_swift() - Set config files, create data dirs and loop image
|
||
function configure_swift {
|
||
local swift_pipeline="${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH}"
|
||
local node_number
|
||
local swift_node_config
|
||
local swift_log_dir
|
||
|
||
# Make sure to kill all swift processes first
|
||
swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
|
||
|
||
sudo mkdir -p ${SWIFT_CONF_DIR}/{object,container,account}-server
|
||
sudo chown -R ${STACK_USER}: ${SWIFT_CONF_DIR}
|
||
|
||
if [[ "$SWIFT_CONF_DIR" != "/etc/swift" ]]; then
|
||
# Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed.
|
||
# Create a symlink if the config dir is moved
|
||
sudo ln -sf ${SWIFT_CONF_DIR} /etc/swift
|
||
fi
|
||
|
||
# Swift use rsync to synchronize between all the different
|
||
# partitions (which make more sense when you have a multi-node
|
||
# setup) we configure it with our version of rsync.
|
||
sed -e "
|
||
s/%GROUP%/${USER_GROUP}/;
|
||
s/%USER%/${STACK_USER}/;
|
||
s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,;
|
||
" $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf
|
||
# rsyncd.conf just prepared for 4 nodes
|
||
if is_ubuntu; then
|
||
sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync
|
||
elif [ -e /etc/xinetd.d/rsync ]; then
|
||
sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync
|
||
fi
|
||
|
||
SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONF_DIR}/proxy-server.conf
|
||
cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER}
|
||
|
||
cp ${SWIFT_DIR}/etc/container-sync-realms.conf-sample ${SWIFT_CONF_DIR}/container-sync-realms.conf
|
||
|
||
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${STACK_USER}
|
||
|
||
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONF_DIR}
|
||
|
||
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1
|
||
|
||
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG
|
||
|
||
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080}
|
||
|
||
# Devstack is commonly run in a small slow environment, so bump the
|
||
# timeouts up.
|
||
# node_timeout is how long between read operations a node takes to
|
||
# respond to the proxy server
|
||
# conn_timeout is all about how long it takes a connect() system call to
|
||
# return
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server node_timeout 120
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server conn_timeout 20
|
||
|
||
# Skipped due to bug 1294789
|
||
## Configure Ceilometer
|
||
#if is_service_enabled ceilometer; then
|
||
# iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift"
|
||
# SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer"
|
||
#fi
|
||
|
||
# Restrict the length of auth tokens in the swift proxy-server logs.
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:proxy-logging reveal_sensitive_prefix ${SWIFT_LOG_TOKEN_LENGTH}
|
||
|
||
# By default Swift will be installed with keystone and tempauth middleware
|
||
# and add the swift3 middleware if its configured for it. The token for
|
||
# tempauth would be prefixed with the reseller_prefix setting `TEMPAUTH_` the
|
||
# token for keystoneauth would have the standard reseller_prefix `AUTH_`
|
||
if is_service_enabled swift3;then
|
||
swift_pipeline+=" swift3 s3token "
|
||
fi
|
||
swift_pipeline+=" authtoken keystoneauth tempauth "
|
||
sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER}
|
||
sed -i "/^pipeline/ { s/proxy-server/${SWIFT_EXTRAS_MIDDLEWARE_LAST} proxy-server/ ; }" ${SWIFT_CONFIG_PROXY_SERVER}
|
||
|
||
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true
|
||
|
||
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix "TEMPAUTH"
|
||
|
||
# Configure Crossdomain
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:crossdomain use "egg:swift#crossdomain"
|
||
|
||
# Configure Keystone
|
||
sed -i '/^# \[filter:authtoken\]/,/^# \[filter:keystoneauth\]$/ s/^#[ \t]*//' ${SWIFT_CONFIG_PROXY_SERVER}
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken cafile $KEYSTONE_SSL_CA
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken signing_dir $SWIFT_AUTH_CACHE_DIR
|
||
# This causes the authtoken middleware to use the same python logging
|
||
# adapter provided by the swift proxy-server, so that request transaction
|
||
# IDs will included in all of its log messages.
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken log_name swift
|
||
|
||
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use
|
||
iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin"
|
||
|
||
if is_service_enabled swift3; then
|
||
cat <<EOF >>${SWIFT_CONFIG_PROXY_SERVER}
|
||
# NOTE(chmou): s3token middleware is not updated yet to use only
|
||
# username and password.
|
||
[filter:s3token]
|
||
paste.filter_factory = keystoneclient.middleware.s3_token:filter_factory
|
||
auth_port = ${KEYSTONE_AUTH_PORT}
|
||
auth_host = ${KEYSTONE_AUTH_HOST}
|
||
auth_protocol = ${KEYSTONE_AUTH_PROTOCOL}
|
||
cafile = ${KEYSTONE_SSL_CA}
|
||
auth_token = ${SERVICE_TOKEN}
|
||
admin_token = ${SERVICE_TOKEN}
|
||
|
||
[filter:swift3]
|
||
use = egg:swift3#swift3
|
||
EOF
|
||
fi
|
||
|
||
cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONF_DIR}/swift.conf
|
||
iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH}
|
||
iniset ${SWIFT_CONF_DIR}/swift.conf swift-constraints max_header_size ${SWIFT_MAX_HEADER_SIZE}
|
||
|
||
for node_number in ${SWIFT_REPLICAS_SEQ}; do
|
||
swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf
|
||
cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config}
|
||
generate_swift_config ${swift_node_config} ${node_number} $[OBJECT_PORT_BASE + 10 * (node_number - 1)] object
|
||
iniset ${swift_node_config} filter:recon recon_cache_path ${SWIFT_DATA_DIR}/cache
|
||
# Using a sed and not iniset/iniuncomment because we want to a global
|
||
# modification and make sure it works for new sections.
|
||
sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config}
|
||
|
||
swift_node_config=${SWIFT_CONF_DIR}/container-server/${node_number}.conf
|
||
cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config}
|
||
generate_swift_config ${swift_node_config} ${node_number} $[CONTAINER_PORT_BASE + 10 * (node_number - 1)] container
|
||
iniuncomment ${swift_node_config} app:container-server allow_versions
|
||
iniset ${swift_node_config} app:container-server allow_versions "true"
|
||
sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config}
|
||
|
||
swift_node_config=${SWIFT_CONF_DIR}/account-server/${node_number}.conf
|
||
cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config}
|
||
generate_swift_config ${swift_node_config} ${node_number} $[ACCOUNT_PORT_BASE + 10 * (node_number - 1)] account
|
||
sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config}
|
||
done
|
||
|
||
# Set new accounts in tempauth to match keystone tenant/user (to make testing easier)
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth user_swifttenanttest1_swiftusertest1 "testing .admin"
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth user_swifttenanttest2_swiftusertest2 "testing2 .admin"
|
||
iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth user_swifttenanttest1_swiftusertest3 "testing3 .admin"
|
||
|
||
testfile=${SWIFT_CONF_DIR}/test.conf
|
||
cp ${SWIFT_DIR}/test/sample.conf ${testfile}
|
||
|
||
# Set accounts for functional tests
|
||
iniset ${testfile} func_test account swifttenanttest1
|
||
iniset ${testfile} func_test username swiftusertest1
|
||
iniset ${testfile} func_test username3 swiftusertest3
|
||
iniset ${testfile} func_test account2 swifttenanttest2
|
||
iniset ${testfile} func_test username2 swiftusertest2
|
||
|
||
if is_service_enabled key;then
|
||
iniuncomment ${testfile} func_test auth_version
|
||
iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST}
|
||
iniset ${testfile} func_test auth_port ${KEYSTONE_AUTH_PORT}
|
||
iniset ${testfile} func_test auth_prefix /v2.0/
|
||
fi
|
||
|
||
swift_log_dir=${SWIFT_DATA_DIR}/logs
|
||
rm -rf ${swift_log_dir}
|
||
mkdir -p ${swift_log_dir}/hourly
|
||
sudo chown -R ${STACK_USER}:adm ${swift_log_dir}
|
||
|
||
if [[ $SYSLOG != "False" ]]; then
|
||
sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \
|
||
tee /etc/rsyslog.d/10-swift.conf
|
||
# restart syslog to take the changes
|
||
sudo killall -HUP rsyslogd
|
||
fi
|
||
|
||
if is_apache_enabled_service swift; then
|
||
_config_swift_apache_wsgi
|
||
fi
|
||
}
|
||
|
||
# create_swift_disk - Create Swift backing disk
|
||
function create_swift_disk {
|
||
local node_number
|
||
|
||
# First do a bit of setup by creating the directories and
|
||
# changing the permissions so we can run it as our user.
|
||
|
||
USER_GROUP=$(id -g ${STACK_USER})
|
||
sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache,run,logs}
|
||
sudo chown -R ${STACK_USER}:${USER_GROUP} ${SWIFT_DATA_DIR}
|
||
|
||
# Create a loopback disk and format it to XFS.
|
||
if [[ -e ${SWIFT_DISK_IMAGE} ]]; then
|
||
if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
|
||
sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
|
||
sudo rm -f ${SWIFT_DISK_IMAGE}
|
||
fi
|
||
fi
|
||
|
||
mkdir -p ${SWIFT_DATA_DIR}/drives/images
|
||
sudo touch ${SWIFT_DISK_IMAGE}
|
||
sudo chown ${STACK_USER}: ${SWIFT_DISK_IMAGE}
|
||
|
||
truncate -s ${SWIFT_LOOPBACK_DISK_SIZE} ${SWIFT_DISK_IMAGE}
|
||
|
||
# Make a fresh XFS filesystem
|
||
/sbin/mkfs.xfs -f -i size=1024 ${SWIFT_DISK_IMAGE}
|
||
|
||
# Mount the disk with mount options to make it as efficient as possible
|
||
mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1
|
||
if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
|
||
sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \
|
||
${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1
|
||
fi
|
||
|
||
# Create a link to the above mount and
|
||
# create all of the directories needed to emulate a few different servers
|
||
for node_number in ${SWIFT_REPLICAS_SEQ}; do
|
||
sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number;
|
||
drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number}
|
||
node=${SWIFT_DATA_DIR}/${node_number}/node
|
||
node_device=${node}/sdb1
|
||
[[ -d $node ]] && continue
|
||
[[ -d $drive ]] && continue
|
||
sudo install -o ${STACK_USER} -g $USER_GROUP -d $drive
|
||
sudo install -o ${STACK_USER} -g $USER_GROUP -d $node_device
|
||
sudo chown -R ${STACK_USER}: ${node}
|
||
done
|
||
}
|
||
# create_swift_accounts() - Set up standard swift accounts and extra
|
||
# one for tests we do this by attaching all words in the account name
|
||
# since we want to make it compatible with tempauth which use
|
||
# underscores for separators.
|
||
|
||
# Tenant User Roles
|
||
# ------------------------------------------------------------------
|
||
# service swift service
|
||
# swifttenanttest1 swiftusertest1 admin
|
||
# swifttenanttest1 swiftusertest3 anotherrole
|
||
# swifttenanttest2 swiftusertest2 admin
|
||
|
||
function create_swift_accounts {
|
||
# Defines specific passwords used by tools/create_userrc.sh
|
||
SWIFTUSERTEST1_PASSWORD=testing
|
||
SWIFTUSERTEST2_PASSWORD=testing2
|
||
SWIFTUSERTEST3_PASSWORD=testing3
|
||
|
||
KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql}
|
||
|
||
SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
|
||
ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
|
||
|
||
SWIFT_USER=$(openstack user create \
|
||
swift \
|
||
--password "$SERVICE_PASSWORD" \
|
||
--project $SERVICE_TENANT \
|
||
--email=swift@example.com \
|
||
| grep " id " | get_field 2)
|
||
openstack role add \
|
||
$ADMIN_ROLE \
|
||
--project $SERVICE_TENANT \
|
||
--user $SWIFT_USER
|
||
|
||
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
|
||
SWIFT_SERVICE=$(openstack service create \
|
||
swift \
|
||
--type="object-store" \
|
||
--description="Swift Service" \
|
||
| grep " id " | get_field 2)
|
||
openstack endpoint create \
|
||
$SWIFT_SERVICE \
|
||
--region RegionOne \
|
||
--publicurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \
|
||
--adminurl "http://$SERVICE_HOST:8080" \
|
||
--internalurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s"
|
||
fi
|
||
|
||
SWIFT_TENANT_TEST1=$(openstack project create swifttenanttest1 | grep " id " | get_field 2)
|
||
die_if_not_set $LINENO SWIFT_TENANT_TEST1 "Failure creating SWIFT_TENANT_TEST1"
|
||
SWIFT_USER_TEST1=$(openstack user create swiftusertest1 --password=$SWIFTUSERTEST1_PASSWORD \
|
||
--project "$SWIFT_TENANT_TEST1" --email=test@example.com | grep " id " | get_field 2)
|
||
die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1"
|
||
openstack role add --user $SWIFT_USER_TEST1 --project $SWIFT_TENANT_TEST1 $ADMIN_ROLE
|
||
|
||
SWIFT_USER_TEST3=$(openstack user create swiftusertest3 --password=$SWIFTUSERTEST3_PASSWORD \
|
||
--project "$SWIFT_TENANT_TEST1" --email=test3@example.com | grep " id " | get_field 2)
|
||
die_if_not_set $LINENO SWIFT_USER_TEST3 "Failure creating SWIFT_USER_TEST3"
|
||
openstack role add --user $SWIFT_USER_TEST3 --project $SWIFT_TENANT_TEST1 $ANOTHER_ROLE
|
||
|
||
SWIFT_TENANT_TEST2=$(openstack project create swifttenanttest2 | grep " id " | get_field 2)
|
||
die_if_not_set $LINENO SWIFT_TENANT_TEST2 "Failure creating SWIFT_TENANT_TEST2"
|
||
|
||
SWIFT_USER_TEST2=$(openstack user create swiftusertest2 --password=$SWIFTUSERTEST2_PASSWORD \
|
||
--project "$SWIFT_TENANT_TEST2" --email=test2@example.com | grep " id " | get_field 2)
|
||
die_if_not_set $LINENO SWIFT_USER_TEST2 "Failure creating SWIFT_USER_TEST2"
|
||
openstack role add --user $SWIFT_USER_TEST2 --project $SWIFT_TENANT_TEST2 $ADMIN_ROLE
|
||
}
|
||
|
||
# init_swift() - Initialize rings
|
||
function init_swift {
|
||
local node_number
|
||
# Make sure to kill all swift processes first
|
||
swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
|
||
|
||
# Forcibly re-create the backing filesystem
|
||
create_swift_disk
|
||
|
||
# This is where we create three different rings for swift with
|
||
# different object servers binding on different ports.
|
||
pushd ${SWIFT_CONF_DIR} >/dev/null && {
|
||
|
||
rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
|
||
|
||
swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
|
||
swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
|
||
swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
|
||
|
||
for node_number in ${SWIFT_REPLICAS_SEQ}; do
|
||
swift-ring-builder object.builder add z${node_number}-127.0.0.1:$[OBJECT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1
|
||
swift-ring-builder container.builder add z${node_number}-127.0.0.1:$[CONTAINER_PORT_BASE + 10 * (node_number - 1)]/sdb1 1
|
||
swift-ring-builder account.builder add z${node_number}-127.0.0.1:$[ACCOUNT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1
|
||
done
|
||
swift-ring-builder object.builder rebalance
|
||
swift-ring-builder container.builder rebalance
|
||
swift-ring-builder account.builder rebalance
|
||
} && popd >/dev/null
|
||
|
||
# Create cache dir
|
||
sudo mkdir -p $SWIFT_AUTH_CACHE_DIR
|
||
sudo chown $STACK_USER $SWIFT_AUTH_CACHE_DIR
|
||
rm -f $SWIFT_AUTH_CACHE_DIR/*
|
||
}
|
||
|
||
function install_swift {
|
||
git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH
|
||
setup_develop $SWIFT_DIR
|
||
if is_apache_enabled_service swift; then
|
||
install_apache_wsgi
|
||
fi
|
||
}
|
||
|
||
function install_swiftclient {
|
||
git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH
|
||
setup_develop $SWIFTCLIENT_DIR
|
||
}
|
||
|
||
# start_swift() - Start running processes, including screen
|
||
function start_swift {
|
||
# (re)start memcached to make sure we have a clean memcache.
|
||
restart_service memcached
|
||
|
||
# Start rsync
|
||
if is_ubuntu; then
|
||
sudo /etc/init.d/rsync restart || :
|
||
elif [ -e /etc/xinetd.d/rsync ]; then
|
||
start_service xinetd
|
||
else
|
||
start_service rsyncd
|
||
fi
|
||
|
||
if is_apache_enabled_service swift; then
|
||
restart_apache_server
|
||
swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start
|
||
screen_it s-proxy "cd $SWIFT_DIR && sudo tail -f /var/log/$APACHE_NAME/proxy-server"
|
||
if [[ ${SWIFT_REPLICAS} == 1 ]]; then
|
||
for type in object container account; do
|
||
screen_it s-${type} "cd $SWIFT_DIR && sudo tail -f /var/log/$APACHE_NAME/${type}-server-1"
|
||
done
|
||
fi
|
||
return 0
|
||
fi
|
||
|
||
# By default with only one replica we are launching the proxy,
|
||
# container, account and object server in screen in foreground and
|
||
# other services in background. If we have SWIFT_REPLICAS set to something
|
||
# greater than one we first spawn all the swift services then kill the proxy
|
||
# service so we can run it in foreground in screen. ``swift-init ...
|
||
# {stop|restart}`` exits with '1' if no servers are running, ignore it just
|
||
# in case
|
||
swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true
|
||
if [[ ${SWIFT_REPLICAS} == 1 ]]; then
|
||
todo="object container account"
|
||
fi
|
||
for type in proxy ${todo}; do
|
||
swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true
|
||
done
|
||
screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
|
||
if [[ ${SWIFT_REPLICAS} == 1 ]]; then
|
||
for type in object container account; do
|
||
screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v"
|
||
done
|
||
fi
|
||
}
|
||
|
||
# stop_swift() - Stop running processes (non-screen)
|
||
function stop_swift {
|
||
|
||
if is_apache_enabled_service swift; then
|
||
swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0
|
||
fi
|
||
|
||
# screen normally killed by unstack.sh
|
||
if type -p swift-init >/dev/null; then
|
||
swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
|
||
fi
|
||
# Dump all of the servers
|
||
# Maintain the iteration as screen_stop() has some desirable side-effects
|
||
for type in proxy object container account; do
|
||
screen_stop s-${type}
|
||
done
|
||
# Blast out any stragglers
|
||
pkill -f swift-
|
||
}
|
||
|
||
# Restore xtrace
|
||
$XTRACE
|
||
|
||
# Tell emacs to use shell-script-mode
|
||
## Local variables:
|
||
## mode: shell-script
|
||
## End:
|