Automate functional tests
By storing the functional tests configuration files in the repo, we can now run the functional_tests.sh to setup, run the functional tests, and teardown. Most likely this will be able to be run as a user from the same directory as the repo, but at the moment, the configuration files are copied to /etc/swift. The only requirements are: 1. /etc/swift does not exist. That way the tests will not interfere with an existing deployment. 2. /mnt/gluster-object/test and /mnt/gluster-object/test2 must have been created and setup correctly on an XFS or GlusterFS volume 3. sudo rights without password prompt 4. glusterfs-openstack-swift-* rpm must not be installed on the system Once the requirements are met, you can execute the tests as follows: $ bash tools/functional_tests.sh Change-Id: Icdbcd420355b02e64f294df7298a3e473b343655 Signed-off-by: Luis Pabon <lpabon@redhat.com> Reviewed-on: http://review.gluster.org/5281 Reviewed-by: Peter Portante <pportant@redhat.com>
This commit is contained in:
parent
ceb18f16cc
commit
c52b889657
19
test/functional/conf/account-server.conf
Normal file
19
test/functional/conf/account-server.conf
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
devices = /mnt/gluster-object
|
||||||
|
mount_check = false
|
||||||
|
bind_port = 6012
|
||||||
|
user = root
|
||||||
|
log_facility = LOG_LOCAL2
|
||||||
|
|
||||||
|
[pipeline:main]
|
||||||
|
pipeline = account-server
|
||||||
|
|
||||||
|
[app:account-server]
|
||||||
|
use = egg:gluster_swift#account
|
||||||
|
|
||||||
|
[account-replicator]
|
||||||
|
vm_test_mode = yes
|
||||||
|
|
||||||
|
[account-auditor]
|
||||||
|
|
||||||
|
[account-reaper]
|
21
test/functional/conf/container-server.conf
Normal file
21
test/functional/conf/container-server.conf
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
devices = /mnt/gluster-object
|
||||||
|
mount_check = false
|
||||||
|
bind_port = 6011
|
||||||
|
user = root
|
||||||
|
log_facility = LOG_LOCAL2
|
||||||
|
|
||||||
|
[pipeline:main]
|
||||||
|
pipeline = container-server
|
||||||
|
|
||||||
|
[app:container-server]
|
||||||
|
use = egg:gluster_swift#container
|
||||||
|
|
||||||
|
[container-replicator]
|
||||||
|
vm_test_mode = yes
|
||||||
|
|
||||||
|
[container-updater]
|
||||||
|
|
||||||
|
[container-auditor]
|
||||||
|
|
||||||
|
[container-sync]
|
17
test/functional/conf/fs.conf
Normal file
17
test/functional/conf/fs.conf
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
# IP address of a GlusterFS volume server member. By default, we assume the
|
||||||
|
# local host.
|
||||||
|
mount_ip = localhost
|
||||||
|
|
||||||
|
# By default it is assumed the Gluster volumes can be accessed using other
|
||||||
|
# methods besides UFO (not object only), which disables a caching
|
||||||
|
# optimizations in order to keep in sync with file system changes.
|
||||||
|
object_only = yes
|
||||||
|
|
||||||
|
# Performance optimization parameter. When turned off, the filesystem will
|
||||||
|
# see a reduced number of stat calls, resulting in substantially faster
|
||||||
|
# response time for GET and HEAD container requests on containers with large
|
||||||
|
# numbers of objects, at the expense of an accurate count of combined bytes
|
||||||
|
# used by all objects in the container. For most installations "off" works
|
||||||
|
# fine.
|
||||||
|
accurate_size_in_listing = on
|
17
test/functional/conf/object-expirer.conf
Normal file
17
test/functional/conf/object-expirer.conf
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
|
||||||
|
[object-expirer]
|
||||||
|
# auto_create_account_prefix = .
|
||||||
|
|
||||||
|
[pipeline:main]
|
||||||
|
pipeline = catch_errors cache proxy-server
|
||||||
|
|
||||||
|
[app:proxy-server]
|
||||||
|
use = egg:swift#proxy
|
||||||
|
|
||||||
|
[filter:cache]
|
||||||
|
use = egg:swift#memcache
|
||||||
|
memcache_servers = 127.0.0.1:11211
|
||||||
|
|
||||||
|
[filter:catch_errors]
|
||||||
|
use = egg:swift#catch_errors
|
36
test/functional/conf/object-server.conf
Normal file
36
test/functional/conf/object-server.conf
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
devices = /mnt/gluster-object
|
||||||
|
mount_check = false
|
||||||
|
bind_port = 6010
|
||||||
|
# If not doing the above, setting this value initially to match the number of
|
||||||
|
# CPUs is a good starting point for determining the right value.
|
||||||
|
workers = 1
|
||||||
|
|
||||||
|
[pipeline:main]
|
||||||
|
pipeline = object-server
|
||||||
|
|
||||||
|
[app:object-server]
|
||||||
|
use = egg:gluster_swift#object
|
||||||
|
user = root
|
||||||
|
log_facility = LOG_LOCAL2
|
||||||
|
# Timeout clients that don't read or write to the proxy server after 5
|
||||||
|
# seconds.
|
||||||
|
conn_timeout = 5
|
||||||
|
# For high load situations, once connected to a container server, allow for
|
||||||
|
# delays communicating with it.
|
||||||
|
node_timeout = 60
|
||||||
|
# Adjust this value to match the stripe width of the underlying storage array
|
||||||
|
# (not the stripe element size). This will provide a reasonable starting point
|
||||||
|
# for tuning this value.
|
||||||
|
disk_chunk_size = 65536
|
||||||
|
# Adjust this value match whatever is set for the disk_chunk_size
|
||||||
|
# initially. This will provide a reasonable starting point for tuning this
|
||||||
|
# value.
|
||||||
|
network_chunk_size = 65556
|
||||||
|
|
||||||
|
[object-replicator]
|
||||||
|
vm_test_mode = yes
|
||||||
|
|
||||||
|
[object-updater]
|
||||||
|
|
||||||
|
[object-auditor]
|
61
test/functional/conf/proxy-server.conf
Normal file
61
test/functional/conf/proxy-server.conf
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
bind_port = 8080
|
||||||
|
user = root
|
||||||
|
log_facility = LOG_LOCAL1
|
||||||
|
# Consider using 1 worker per CPU
|
||||||
|
workers = 1
|
||||||
|
|
||||||
|
[pipeline:main]
|
||||||
|
pipeline = healthcheck cache tempauth proxy-server
|
||||||
|
|
||||||
|
[app:proxy-server]
|
||||||
|
use = egg:gluster_swift#proxy
|
||||||
|
log_facility = LOG_LOCAL1
|
||||||
|
# The API allows for account creation and deletion, but since Gluster/Swift
|
||||||
|
# automounts a Gluster volume for a given account, there is no way to create
|
||||||
|
# or delete an account. So leave this off.
|
||||||
|
allow_account_management = false
|
||||||
|
account_autocreate = true
|
||||||
|
# Only need to recheck the account exists once a day
|
||||||
|
recheck_account_existence = 86400
|
||||||
|
# May want to consider bumping this up if containers are created and destroyed
|
||||||
|
# infrequently.
|
||||||
|
recheck_container_existence = 60
|
||||||
|
# Timeout clients that don't read or write to the proxy server after 5
|
||||||
|
# seconds.
|
||||||
|
client_timeout = 5
|
||||||
|
# Give more time to connect to the object, container or account servers in
|
||||||
|
# cases of high load.
|
||||||
|
conn_timeout = 5
|
||||||
|
# For high load situations, once connected to an object, container or account
|
||||||
|
# server, allow for delays communicating with them.
|
||||||
|
node_timeout = 60
|
||||||
|
# May want to consider bumping up this value to 1 - 4 MB depending on how much
|
||||||
|
# traffic is for multi-megabyte or gigabyte requests; perhaps matching the
|
||||||
|
# stripe width (not stripe element size) of your storage volume is a good
|
||||||
|
# starting point. See below for sizing information.
|
||||||
|
object_chunk_size = 65536
|
||||||
|
# If you do decide to increase the object_chunk_size, then consider lowering
|
||||||
|
# this value to one. Up to "put_queue_length" object_chunk_size'd buffers can
|
||||||
|
# be queued to the object server for processing. Given one proxy server worker
|
||||||
|
# can handle up to 1,024 connections, by default, it will consume 10 * 65,536
|
||||||
|
# * 1,024 bytes of memory in the worse case (default values). Be sure the
|
||||||
|
# amount of memory available on the system can accommodate increased values
|
||||||
|
# for object_chunk_size.
|
||||||
|
put_queue_depth = 10
|
||||||
|
|
||||||
|
[filter:healthcheck]
|
||||||
|
use = egg:swift#healthcheck
|
||||||
|
|
||||||
|
[filter:tempauth]
|
||||||
|
use = egg:swift#tempauth
|
||||||
|
user_admin_admin = admin .admin .reseller_admin
|
||||||
|
user_test_tester = testing .admin
|
||||||
|
user_test2_tester2 = testing2 .admin
|
||||||
|
user_test_tester3 = testing3
|
||||||
|
|
||||||
|
[filter:cache]
|
||||||
|
use = egg:swift#memcache
|
||||||
|
# Update this line to contain a comma separated list of memcache servers
|
||||||
|
# shared by all nodes running the proxy-server service.
|
||||||
|
memcache_servers = localhost:11211
|
91
test/functional/conf/swift.conf
Normal file
91
test/functional/conf/swift.conf
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
|
||||||
|
|
||||||
|
[swift-hash]
|
||||||
|
# random unique string that can never change (DO NOT LOSE)
|
||||||
|
swift_hash_path_suffix = gluster
|
||||||
|
|
||||||
|
|
||||||
|
# The swift-constraints section sets the basic constraints on data
|
||||||
|
# saved in the swift cluster.
|
||||||
|
|
||||||
|
[swift-constraints]
|
||||||
|
|
||||||
|
# max_file_size is the largest "normal" object that can be saved in
|
||||||
|
# the cluster. This is also the limit on the size of each segment of
|
||||||
|
# a "large" object when using the large object manifest support.
|
||||||
|
# This value is set in bytes. Setting it to lower than 1MiB will cause
|
||||||
|
# some tests to fail. It is STRONGLY recommended to leave this value at
|
||||||
|
# the default (5 * 2**30 + 2).
|
||||||
|
|
||||||
|
# FIXME: Really? Gluster can handle a 2^64 sized file? And can the fronting
|
||||||
|
# web service handle such a size? I think with UFO, we need to keep with the
|
||||||
|
# default size from Swift and encourage users to research what size their web
|
||||||
|
# services infrastructure can handle.
|
||||||
|
|
||||||
|
max_file_size = 18446744073709551616
|
||||||
|
|
||||||
|
|
||||||
|
# max_meta_name_length is the max number of bytes in the utf8 encoding
|
||||||
|
# of the name portion of a metadata header.
|
||||||
|
|
||||||
|
#max_meta_name_length = 128
|
||||||
|
|
||||||
|
|
||||||
|
# max_meta_value_length is the max number of bytes in the utf8 encoding
|
||||||
|
# of a metadata value
|
||||||
|
|
||||||
|
#max_meta_value_length = 256
|
||||||
|
|
||||||
|
|
||||||
|
# max_meta_count is the max number of metadata keys that can be stored
|
||||||
|
# on a single account, container, or object
|
||||||
|
|
||||||
|
#max_meta_count = 90
|
||||||
|
|
||||||
|
|
||||||
|
# max_meta_overall_size is the max number of bytes in the utf8 encoding
|
||||||
|
# of the metadata (keys + values)
|
||||||
|
|
||||||
|
#max_meta_overall_size = 4096
|
||||||
|
|
||||||
|
|
||||||
|
# max_object_name_length is the max number of bytes in the utf8 encoding of an
|
||||||
|
# object name: Gluster FS can handle much longer file names, but the length
|
||||||
|
# between the slashes of the URL is handled below. Remember that most web
|
||||||
|
# clients can't handle anything greater than 2048, and those that do are
|
||||||
|
# rather clumsy.
|
||||||
|
|
||||||
|
max_object_name_length = 2048
|
||||||
|
|
||||||
|
# max_object_name_component_length (GlusterFS) is the max number of bytes in
|
||||||
|
# the utf8 encoding of an object name component (the part between the
|
||||||
|
# slashes); this is a limit imposed by the underlying file system (for XFS it
|
||||||
|
# is 255 bytes).
|
||||||
|
|
||||||
|
max_object_name_component_length = 255
|
||||||
|
|
||||||
|
# container_listing_limit is the default (and max) number of items
|
||||||
|
# returned for a container listing request
|
||||||
|
|
||||||
|
#container_listing_limit = 10000
|
||||||
|
|
||||||
|
|
||||||
|
# account_listing_limit is the default (and max) number of items returned
|
||||||
|
# for an account listing request
|
||||||
|
|
||||||
|
#account_listing_limit = 10000
|
||||||
|
|
||||||
|
|
||||||
|
# max_account_name_length is the max number of bytes in the utf8 encoding of
|
||||||
|
# an account name: Gluster FS Filename limit (XFS limit?), must be the same
|
||||||
|
# size as max_object_name_component_length above.
|
||||||
|
|
||||||
|
max_account_name_length = 255
|
||||||
|
|
||||||
|
|
||||||
|
# max_container_name_length is the max number of bytes in the utf8 encoding
|
||||||
|
# of a container name: Gluster FS Filename limit (XFS limit?), must be the same
|
||||||
|
# size as max_object_name_component_length above.
|
||||||
|
|
||||||
|
max_container_name_length = 255
|
50
test/functional/conf/test.conf
Normal file
50
test/functional/conf/test.conf
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
[func_test]
|
||||||
|
# sample config
|
||||||
|
auth_host = 127.0.0.1
|
||||||
|
auth_port = 8080
|
||||||
|
auth_ssl = no
|
||||||
|
auth_prefix = /auth/
|
||||||
|
## sample config for Swift with Keystone
|
||||||
|
#auth_version = 2
|
||||||
|
#auth_host = localhost
|
||||||
|
#auth_port = 5000
|
||||||
|
#auth_ssl = no
|
||||||
|
#auth_prefix = /v2.0/
|
||||||
|
|
||||||
|
# Primary functional test account (needs admin access to the account)
|
||||||
|
account = test
|
||||||
|
username = tester
|
||||||
|
password = testing
|
||||||
|
|
||||||
|
# User on a second account (needs admin access to the account)
|
||||||
|
account2 = test2
|
||||||
|
username2 = tester2
|
||||||
|
password2 = testing2
|
||||||
|
|
||||||
|
# User on same account as first, but without admin access
|
||||||
|
username3 = tester3
|
||||||
|
password3 = testing3
|
||||||
|
|
||||||
|
# Default constraints if not defined here, the test runner will try
|
||||||
|
# to set them from /etc/swift/swift.conf. If that file isn't found,
|
||||||
|
# the test runner will skip tests that depend on these values.
|
||||||
|
# Note that the cluster must have "sane" values for the test suite to pass.
|
||||||
|
#max_file_size = 5368709122
|
||||||
|
#max_meta_name_length = 128
|
||||||
|
#max_meta_value_length = 256
|
||||||
|
#max_meta_count = 90
|
||||||
|
#max_meta_overall_size = 4096
|
||||||
|
#max_object_name_length = 1024
|
||||||
|
#container_listing_limit = 10000
|
||||||
|
#account_listing_limit = 10000
|
||||||
|
#max_account_name_length = 256
|
||||||
|
#max_container_name_length = 256
|
||||||
|
normalized_urls = True
|
||||||
|
|
||||||
|
collate = C
|
||||||
|
|
||||||
|
[unit_test]
|
||||||
|
fake_syslog = False
|
||||||
|
|
||||||
|
[probe_test]
|
||||||
|
# check_server_timeout = 30
|
63
tools/functional_tests.sh
Normal file
63
tools/functional_tests.sh
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
cleanup()
|
||||||
|
{
|
||||||
|
sudo service memcached stop
|
||||||
|
sudo swift-init main stop
|
||||||
|
sudo yum -y remove glusterfs-openstack-swift
|
||||||
|
sudo rm -rf /etc/swift > /dev/null 2>&1
|
||||||
|
}
|
||||||
|
|
||||||
|
quit()
|
||||||
|
{
|
||||||
|
echo "$1"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
fail()
|
||||||
|
{
|
||||||
|
cleanup
|
||||||
|
quit "$1"
|
||||||
|
}
|
||||||
|
|
||||||
|
### MAIN ###
|
||||||
|
|
||||||
|
# Only run if there is no configuration in the system
|
||||||
|
if [ -x /etc/swift ] ; then
|
||||||
|
quit "/etc/swift exists, cannot run functional tests."
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check the directories exist
|
||||||
|
DIRS="/mnt/gluster-object /mnt/gluster-object/test /mnt/gluster-object/test2"
|
||||||
|
for d in $DIRS ; do
|
||||||
|
if [ ! -x $d ] ; then
|
||||||
|
quit "$d must exist on an XFS or GlusterFS volume"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
export SWIFT_TEST_CONFIG_FILE=/etc/swift/test.conf
|
||||||
|
|
||||||
|
# Create and install the rpm
|
||||||
|
PKG_RELEASE=functest bash makerpm.sh
|
||||||
|
sudo yum -y install build/glusterfs-openstack-swift-1.8.0-functest.noarch.rpm || fail "Unable to install rpm"
|
||||||
|
|
||||||
|
# Install the configuration files
|
||||||
|
mkdir /etc/swift > /dev/null 2>&1
|
||||||
|
sudo cp -r test/functional/conf/* /etc/swift || fail "Unable to copy configuration files to /etc/swift"
|
||||||
|
( cd /etc/swift ; sudo gluster-swift-gen-builders test test2 ) || fail "Unable to create ring files"
|
||||||
|
|
||||||
|
# Start the services
|
||||||
|
sudo service memcached start || fail "Unable to start memcached"
|
||||||
|
sudo swift-init main start || fail "Unable to start swift"
|
||||||
|
|
||||||
|
mkdir functional_tests > /dev/null 2>&1
|
||||||
|
nosetests -v --exe \
|
||||||
|
--with-xunit \
|
||||||
|
--xunit-file functional_tests/gluster-swift-functional-TC-report.xml test/functional || fail "Functional tests failed"
|
||||||
|
nosetests -v --exe \
|
||||||
|
--with-xunit \
|
||||||
|
--xunit-file functional_tests/gluster-swift-functionalnosetests-TC-report.xml test/functionalnosetests || fail "Functional-nose tests failed"
|
||||||
|
|
||||||
|
cleanup
|
||||||
|
exit 0
|
Loading…
x
Reference in New Issue
Block a user