first gswauth functional tests

commiting first gswauth functional tests.
Currently there are two tests, to create account
and to create an user. Each test is self contained
in that it goes through the process of creating and deleting
accounts and users as needed.

More tests will be added shortly.

Change-Id: I26d577790aed8c79c9de11f224516423e9769962
Signed-off-by: Thiago da Silva <thiago@redhat.com>
Reviewed-on: http://review.gluster.org/6188
Reviewed-by: Luis Pabon <lpabon@redhat.com>
Tested-by: Luis Pabon <lpabon@redhat.com>
This commit is contained in:
Thiago da Silva 2013-10-29 17:08:03 -04:00 committed by Luis Pabon
parent 100d6b01bd
commit 9f8d2e61a7
17 changed files with 612 additions and 15 deletions

View File

@ -92,7 +92,7 @@ class Swauth(object):
pass pass
raise ValueError(msg) raise ValueError(msg)
self.swauth_remote_timeout = int(conf.get('swauth_remote_timeout', 10)) self.swauth_remote_timeout = int(conf.get('swauth_remote_timeout', 10))
self.auth_account = '%s.auth' % self.reseller_prefix self.auth_account = '%sgsmetadata' % self.reseller_prefix
self.default_swift_cluster = conf.get( self.default_swift_cluster = conf.get(
'default_swift_cluster', 'default_swift_cluster',
'local#http://127.0.0.1:8080/v1') 'local#http://127.0.0.1:8080/v1')
@ -398,7 +398,7 @@ class Swauth(object):
user_groups = (req.remote_user or '').split(',') user_groups = (req.remote_user or '').split(',')
if '.reseller_admin' in user_groups and \ if '.reseller_admin' in user_groups and \
account != self.reseller_prefix and \ account != self.reseller_prefix and \
account[len(self.reseller_prefix)] != '.': account[len(self.reseller_prefix):] != 'gsmetadata':
req.environ['swift_owner'] = True req.environ['swift_owner'] = True
return None return None
if account in user_groups and \ if account in user_groups and \

View File

@ -89,6 +89,15 @@ rm -rf %{buildroot}
%{python_sitelib}/gluster_swift-%{version}-*.egg-info %{python_sitelib}/gluster_swift-%{version}-*.egg-info
%{_bindir}/gluster-swift-gen-builders %{_bindir}/gluster-swift-gen-builders
%{_bindir}/gluster-swift-print-metadata %{_bindir}/gluster-swift-print-metadata
%{_bindir}/swauth-add-account
%{_bindir}/swauth-add-user
%{_bindir}/swauth-cleanup-tokens
%{_bindir}/swauth-delete-account
%{_bindir}/swauth-delete-user
%{_bindir}/swauth-list
%{_bindir}/swauth-prep
%{_bindir}/swauth-set-account-service
%dir %{_confdir} %dir %{_confdir}
%config %{_confdir}/account-server.conf-gluster %config %{_confdir}/account-server.conf-gluster
%config %{_confdir}/container-server.conf-gluster %config %{_confdir}/container-server.conf-gluster

View File

@ -48,6 +48,16 @@ setup(
scripts=[ scripts=[
'bin/gluster-swift-gen-builders', 'bin/gluster-swift-gen-builders',
'bin/gluster-swift-print-metadata', 'bin/gluster-swift-print-metadata',
'gluster/swift/common/middleware/gswauth/bin/swauth-add-account',
'gluster/swift/common/middleware/gswauth/bin/swauth-add-user',
'gluster/swift/common/middleware/gswauth/bin/swauth-cleanup-tokens',
'gluster/swift/common/middleware/gswauth/bin/swauth-delete-account',
'gluster/swift/common/middleware/gswauth/bin/swauth-delete-user',
'gluster/swift/common/middleware/gswauth/bin/swauth-list',
'gluster/swift/common/middleware/gswauth/bin/swauth-prep',
'gluster/swift/common/middleware/gswauth/bin/'
'swauth-set-account-service',
], ],
entry_points={ entry_points={
'paste.app_factory': [ 'paste.app_factory': [
@ -56,5 +66,9 @@ setup(
'container=gluster.swift.container.server:app_factory', 'container=gluster.swift.container.server:app_factory',
'account=gluster.swift.account.server:app_factory', 'account=gluster.swift.account.server:app_factory',
], ],
'paste.filter_factory': [
'swauth=gluster.swift.common.middleware.gswauth.swauth.middleware:'
'filter_factory',
],
}, },
) )

View File

View File

View File

@ -0,0 +1,32 @@
[DEFAULT]
devices = /mnt/gluster-object
#
# Once you are confident that your startup processes will always have your
# gluster volumes properly mounted *before* the account-server workers start,
# you can *consider* setting this value to "false" to reduce the per-request
# overhead it can incur.
#
# *** Keep false for Functional Tests ***
mount_check = false
bind_port = 6012
#
# Override swift's default behaviour for fallocate.
disable_fallocate = true
#
# One or two workers should be sufficient for almost any installation of
# Gluster.
workers = 1
[pipeline:main]
pipeline = account-server
[app:account-server]
use = egg:gluster_swift#account
user = root
log_facility = LOG_LOCAL2
log_level = WARN
#
# After ensuring things are running in a stable manner, you can turn off
# normal request logging for the account server to unclutter the log
# files. Warnings and errors will still be logged.
log_requests = off

View File

@ -0,0 +1,35 @@
[DEFAULT]
devices = /mnt/gluster-object
#
# Once you are confident that your startup processes will always have your
# gluster volumes properly mounted *before* the container-server workers
# start, you can *consider* setting this value to "false" to reduce the
# per-request overhead it can incur.
#
# *** Keep false for Functional Tests ***
mount_check = false
bind_port = 6011
#
# Override swift's default behaviour for fallocate.
disable_fallocate = true
#
# One or two workers should be sufficient for almost any installation of
# Gluster.
workers = 1
[pipeline:main]
pipeline = container-server
[app:container-server]
use = egg:gluster_swift#container
user = root
log_facility = LOG_LOCAL2
log_level = WARN
#
# After ensuring things are running in a stable manner, you can turn off
# normal request logging for the container server to unclutter the log
# files. Warnings and errors will still be logged.
log_requests = off
#enable object versioning for functional test
allow_versions = on

View File

@ -0,0 +1,19 @@
[DEFAULT]
#
# IP address of a node in the GlusterFS server cluster hosting the
# volumes to be served via Swift API.
mount_ip = localhost
# Performance optimization parameter. When turned off, the filesystem will
# see a reduced number of stat calls, resulting in substantially faster
# response time for GET and HEAD container requests on containers with large
# numbers of objects, at the expense of an accurate count of combined bytes
# used by all objects in the container. For most installations "off" works
# fine.
#
# *** Keep on for Functional Tests ***
accurate_size_in_listing = on
# *** Keep on for Functional Tests ***
container_update_object_count = on
account_update_container_count = on

View File

@ -0,0 +1,17 @@
[DEFAULT]
[object-expirer]
# auto_create_account_prefix = .
[pipeline:main]
pipeline = catch_errors cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
[filter:cache]
use = egg:swift#memcache
memcache_servers = 127.0.0.1:11211
[filter:catch_errors]
use = egg:swift#catch_errors

View File

@ -0,0 +1,48 @@
[DEFAULT]
devices = /mnt/gluster-object
#
# Once you are confident that your startup processes will always have your
# gluster volumes properly mounted *before* the object-server workers start,
# you can *consider* setting this value to "false" to reduce the per-request
# overhead it can incur.
#
# *** Keep false for Functional Tests ***
mount_check = false
bind_port = 6010
#
# Maximum number of clients one worker can process simultaneously (it will
# actually accept N + 1). Setting this to one (1) will only handle one request
# at a time, without accepting another request concurrently. By increasing the
# number of workers to a much higher value, one can prevent slow file system
# operations for one request from starving other requests.
max_clients = 1024
#
# If not doing the above, setting this value initially to match the number of
# CPUs is a good starting point for determining the right value.
workers = 1
# Override swift's default behaviour for fallocate.
disable_fallocate = true
[pipeline:main]
pipeline = object-server
[app:object-server]
use = egg:gluster_swift#object
user = root
log_facility = LOG_LOCAL2
log_level = WARN
#
# For performance, after ensuring things are running in a stable manner, you
# can turn off normal request logging for the object server to reduce the
# per-request overhead and unclutter the log files. Warnings and errors will
# still be logged.
log_requests = off
#
# Adjust this value to match the stripe width of the underlying storage array
# (not the stripe element size). This will provide a reasonable starting point
# for tuning this value.
disk_chunk_size = 65536
#
# Adjust this value match whatever is set for the disk_chunk_size initially.
# This will provide a reasonable starting point for tuning this value.
network_chunk_size = 65556

View File

@ -0,0 +1,72 @@
[DEFAULT]
bind_port = 8080
user = root
# Consider using 1 worker per CPU
workers = 1
[pipeline:main]
pipeline = catch_errors healthcheck proxy-logging cache swauth proxy-logging proxy-server
[app:proxy-server]
use = egg:gluster_swift#proxy
log_facility = LOG_LOCAL1
log_level = WARN
# The API allows for account creation and deletion, but since Gluster/Swift
# automounts a Gluster volume for a given account, there is no way to create
# or delete an account. So leave this off.
allow_account_management = true
account_autocreate = true
# Only need to recheck the account exists once a day
recheck_account_existence = 86400
# May want to consider bumping this up if containers are created and destroyed
# infrequently.
recheck_container_existence = 60
# Timeout clients that don't read or write to the proxy server after 5
# seconds.
client_timeout = 5
# Give more time to connect to the object, container or account servers in
# cases of high load.
conn_timeout = 5
# For high load situations, once connected to an object, container or account
# server, allow for delays communicating with them.
node_timeout = 60
# May want to consider bumping up this value to 1 - 4 MB depending on how much
# traffic is for multi-megabyte or gigabyte requests; perhaps matching the
# stripe width (not stripe element size) of your storage volume is a good
# starting point. See below for sizing information.
object_chunk_size = 65536
# If you do decide to increase the object_chunk_size, then consider lowering
# this value to one. Up to "put_queue_length" object_chunk_size'd buffers can
# be queued to the object server for processing. Given one proxy server worker
# can handle up to 1,024 connections, by default, it will consume 10 * 65,536
# * 1,024 bytes of memory in the worse case (default values). Be sure the
# amount of memory available on the system can accommodate increased values
# for object_chunk_size.
put_queue_depth = 10
[filter:catch_errors]
use = egg:swift#catch_errors
[filter:proxy-logging]
use = egg:swift#proxy_logging
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:tempauth]
use = egg:swift#tempauth
user_admin_admin = admin .admin .reseller_admin
user_test_tester = testing .admin
user_test2_tester2 = testing2 .admin
user_test_tester3 = testing3
[filter:swauth]
use = egg:gluster_swift#swauth
set log_name = swauth
super_admin_key = swauthkey
[filter:cache]
use = egg:swift#memcache
# Update this line to contain a comma separated list of memcache servers
# shared by all nodes running the proxy-server service.
memcache_servers = localhost:11211

View File

@ -0,0 +1,85 @@
[DEFAULT]
[swift-hash]
# random unique string that can never change (DO NOT LOSE)
swift_hash_path_suffix = gluster
# The swift-constraints section sets the basic constraints on data
# saved in the swift cluster.
[swift-constraints]
# max_file_size is the largest "normal" object that can be saved in
# the cluster. This is also the limit on the size of each segment of
# a "large" object when using the large object manifest support.
# This value is set in bytes. Setting it to lower than 1MiB will cause
# some tests to fail.
# Default is 1 TiB = 2**30*1024
max_file_size = 1099511627776
# max_meta_name_length is the max number of bytes in the utf8 encoding
# of the name portion of a metadata header.
#max_meta_name_length = 128
# max_meta_value_length is the max number of bytes in the utf8 encoding
# of a metadata value
#max_meta_value_length = 256
# max_meta_count is the max number of metadata keys that can be stored
# on a single account, container, or object
#max_meta_count = 90
# max_meta_overall_size is the max number of bytes in the utf8 encoding
# of the metadata (keys + values)
#max_meta_overall_size = 4096
# max_object_name_length is the max number of bytes in the utf8 encoding of an
# object name: Gluster FS can handle much longer file names, but the length
# between the slashes of the URL is handled below. Remember that most web
# clients can't handle anything greater than 2048, and those that do are
# rather clumsy.
max_object_name_length = 2048
# max_object_name_component_length (GlusterFS) is the max number of bytes in
# the utf8 encoding of an object name component (the part between the
# slashes); this is a limit imposed by the underlying file system (for XFS it
# is 255 bytes).
max_object_name_component_length = 255
# container_listing_limit is the default (and max) number of items
# returned for a container listing request
#container_listing_limit = 10000
# account_listing_limit is the default (and max) number of items returned
# for an account listing request
#account_listing_limit = 10000
# max_account_name_length is the max number of bytes in the utf8 encoding of
# an account name: Gluster FS Filename limit (XFS limit?), must be the same
# size as max_object_name_component_length above.
max_account_name_length = 255
# max_container_name_length is the max number of bytes in the utf8 encoding
# of a container name: Gluster FS Filename limit (XFS limit?), must be the same
# size as max_object_name_component_length above.
max_container_name_length = 255

View File

@ -0,0 +1,54 @@
[func_test]
# sample config
auth_host = 127.0.0.1
auth_port = 8080
auth_ssl = no
auth_prefix = /auth/
## sample config for Swift with Keystone
#auth_version = 2
#auth_host = localhost
#auth_port = 5000
#auth_ssl = no
#auth_prefix = /v2.0/
# GSWauth internal admin user configuration information
admin_key = swauthkey
admin_user = .super_admin
# Primary functional test account (needs admin access to the account)
account = test
username = tester
password = testing
# User on a second account (needs admin access to the account)
account2 = test2
username2 = tester2
password2 = testing2
# User on same account as first, but without admin access
username3 = tester3
password3 = testing3
# Default constraints if not defined here, the test runner will try
# to set them from /etc/swift/swift.conf. If that file isn't found,
# the test runner will skip tests that depend on these values.
# Note that the cluster must have "sane" values for the test suite to pass.
#max_file_size = 5368709122
#max_meta_name_length = 128
#max_meta_value_length = 256
#max_meta_count = 90
#max_meta_overall_size = 4096
#max_object_name_length = 1024
#container_listing_limit = 10000
#account_listing_limit = 10000
#max_account_name_length = 256
#max_container_name_length = 256
normalized_urls = True
collate = C
[unit_test]
fake_syslog = False
[probe_test]
# check_server_timeout = 30

View File

@ -0,0 +1,95 @@
#!/usr/bin/python
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from nose import SkipTest
from swift.common.bufferedhttp import http_connect_raw as http_connect
from test import get_config
config = get_config('func_test')
class TestGSWauth(unittest.TestCase):
def setUp(self):
#TODO
None
def tearDown(self):
#TODO
None
def _get_admin_headers(self):
return {'X-Auth-Admin-User': config['admin_user'],
'X-Auth-Admin-Key': config['admin_key']}
def _check_test_account_does_not_exist(self):
# check account exists
path = '%sv2/%s' % (config['auth_prefix'], config['account'])
headers = self._get_admin_headers()
headers.update({'Content-Length': '0'})
conn = http_connect(config['auth_host'], config['auth_port'], 'GET',
path, headers)
resp = conn.getresponse()
self.assertTrue(resp.status == 404)
def _create_test_account(self):
# create account in swauth (not a swift account)
# This current version only supports one account per volume
# and the account name is the same as the volume name
# still an account must be created with swauth to map
# swauth accounts with swift accounts
path = '%sv2/%s' % (config['auth_prefix'], config['account'])
headers = self._get_admin_headers()
headers.update({'Content-Length': '0'})
conn = http_connect(config['auth_host'], config['auth_port'], 'PUT',
path, headers)
resp = conn.getresponse()
self.assertTrue(resp.status == 201)
def _delete_test_account(self):
# delete account in swauth (not a swift account)
# @see _create_test_account
path = '%sv2/%s' % (config['auth_prefix'], config['account'])
headers = self._get_admin_headers()
headers.update({'Content-Length': '0'})
conn = http_connect(config['auth_host'], config['auth_port'],
'DELETE', path, headers)
resp = conn.getresponse()
self.assertTrue(resp.status == 204)
def test_add_account(self):
self._check_test_account_does_not_exist()
self._create_test_account()
self._delete_test_account()
def test_add_user(self):
# check and create account
self._check_test_account_does_not_exist()
self._create_test_account()
# create user
path = '%sv2/%s/%s' % (config['auth_prefix'], config['account'],
config['username'])
headers = self._get_admin_headers()
headers.update({'X-Auth-User-Key': config['password'],
'Content-Length': '0',
'X-Auth-User-Admin': 'true'})
conn = http_connect(config['auth_host'], config['auth_port'], 'PUT',
path, headers)
resp = conn.getresponse()
self.assertTrue(resp.status == 201)

View File

@ -1180,7 +1180,7 @@ class TestAuth(unittest.TestCase):
def test_prep_success(self): def test_prep_success(self):
list_to_iter = [ list_to_iter = [
# PUT of .auth account # PUT of gsmetadata account
('201 Created', {}, ''), ('201 Created', {}, ''),
# PUT of .account_id container # PUT of .account_id container
('201 Created', {}, '')] ('201 Created', {}, '')]
@ -1266,7 +1266,7 @@ class TestAuth(unittest.TestCase):
def test_prep_fail_account_create(self): def test_prep_fail_account_create(self):
self.test_auth.app = FakeApp(iter([ self.test_auth.app = FakeApp(iter([
# PUT of .auth account # PUT of gsmetadata account
('503 Service Unavailable', {}, '')])) ('503 Service Unavailable', {}, '')]))
resp = Request.blank('/auth/v2/.prep', resp = Request.blank('/auth/v2/.prep',
environ={ environ={
@ -1281,7 +1281,7 @@ class TestAuth(unittest.TestCase):
def test_prep_fail_token_container_create(self): def test_prep_fail_token_container_create(self):
self.test_auth.app = FakeApp(iter([ self.test_auth.app = FakeApp(iter([
# PUT of .auth account # PUT of gsmetadata account
('201 Created', {}, ''), ('201 Created', {}, ''),
# PUT of .token container # PUT of .token container
('503 Service Unavailable', {}, '')])) ('503 Service Unavailable', {}, '')]))
@ -1298,7 +1298,7 @@ class TestAuth(unittest.TestCase):
def test_prep_fail_account_id_container_create(self): def test_prep_fail_account_id_container_create(self):
self.test_auth.app = FakeApp(iter([ self.test_auth.app = FakeApp(iter([
# PUT of .auth account # PUT of gsmetadata account
('201 Created', {}, ''), ('201 Created', {}, ''),
# PUT of .token container # PUT of .token container
('201 Created', {}, ''), ('201 Created', {}, ''),
@ -1317,13 +1317,13 @@ class TestAuth(unittest.TestCase):
def test_get_reseller_success(self): def test_get_reseller_success(self):
self.test_auth.app = FakeApp(iter([ self.test_auth.app = FakeApp(iter([
# GET of .auth account (list containers) # GET of gsmetadata account (list containers)
('200 Ok', {}, json.dumps([ ('200 Ok', {}, json.dumps([
{"name": ".token", "count": 0, "bytes": 0}, {"name": ".token", "count": 0, "bytes": 0},
{"name": ".account_id", {"name": ".account_id",
"count": 0, "bytes": 0}, "count": 0, "bytes": 0},
{"name": "act", "count": 0, "bytes": 0}])), {"name": "act", "count": 0, "bytes": 0}])),
# GET of .auth account (list containers # GET of gsmetadata account (list containers
# continuation) # continuation)
('200 Ok', {}, '[]')])) ('200 Ok', {}, '[]')]))
resp = Request.blank('/auth/v2', resp = Request.blank('/auth/v2',
@ -1342,13 +1342,13 @@ class TestAuth(unittest.TestCase):
('200 Ok', {}, json.dumps({"groups": [{"name": "act:adm"}, ('200 Ok', {}, json.dumps({"groups": [{"name": "act:adm"},
{"name": "test"}, {"name": ".admin"}, {"name": "test"}, {"name": ".admin"},
{"name": ".reseller_admin"}], "auth": "plaintext:key"})), {"name": ".reseller_admin"}], "auth": "plaintext:key"})),
# GET of .auth account (list containers) # GET of gsmetadata account (list containers)
('200 Ok', {}, json.dumps([ ('200 Ok', {}, json.dumps([
{"name": ".token", "count": 0, "bytes": 0}, {"name": ".token", "count": 0, "bytes": 0},
{"name": ".account_id", {"name": ".account_id",
"count": 0, "bytes": 0}, "count": 0, "bytes": 0},
{"name": "act", "count": 0, "bytes": 0}])), {"name": "act", "count": 0, "bytes": 0}])),
# GET of .auth account (list containers # GET of gsmetadata account (list containers
# continuation) # continuation)
('200 Ok', {}, '[]')])) ('200 Ok', {}, '[]')]))
resp = Request.blank('/auth/v2', resp = Request.blank('/auth/v2',
@ -1405,7 +1405,7 @@ class TestAuth(unittest.TestCase):
def test_get_reseller_fail_listing(self): def test_get_reseller_fail_listing(self):
self.test_auth.app = FakeApp(iter([ self.test_auth.app = FakeApp(iter([
# GET of .auth account (list containers) # GET of gsmetadata account (list containers)
('503 Service Unavailable', {}, '')])) ('503 Service Unavailable', {}, '')]))
resp = Request.blank('/auth/v2', resp = Request.blank('/auth/v2',
headers={ headers={
@ -1417,13 +1417,13 @@ class TestAuth(unittest.TestCase):
self.assertEquals(self.test_auth.app.calls, 1) self.assertEquals(self.test_auth.app.calls, 1)
self.test_auth.app = FakeApp(iter([ self.test_auth.app = FakeApp(iter([
# GET of .auth account (list containers) # GET of gsmetadata account (list containers)
('200 Ok', {}, json.dumps([ ('200 Ok', {}, json.dumps([
{"name": ".token", "count": 0, "bytes": 0}, {"name": ".token", "count": 0, "bytes": 0},
{"name": ".account_id", {"name": ".account_id",
"count": 0, "bytes": 0}, "count": 0, "bytes": 0},
{"name": "act", "count": 0, "bytes": 0}])), {"name": "act", "count": 0, "bytes": 0}])),
# GET of .auth account (list containers # GET of gsmetadata account (list containers
# continuation) # continuation)
('503 Service Unavailable', {}, '')])) ('503 Service Unavailable', {}, '')]))
resp = Request.blank('/auth/v2', resp = Request.blank('/auth/v2',
@ -3858,7 +3858,7 @@ class TestAuth(unittest.TestCase):
except Exception as err: except Exception as err:
exc = err exc = err
self.assertEquals(str(exc), 'Could not get admin user object: ' self.assertEquals(str(exc), 'Could not get admin user object: '
'/v1/AUTH_.auth/act/usr 503 Service Unavailable') '/v1/AUTH_gsmetadata/act/usr 503 Service Unavailable')
self.assertEquals(self.test_auth.app.calls, 1) self.assertEquals(self.test_auth.app.calls, 1)
def test_get_admin_detail_success(self): def test_get_admin_detail_success(self):
@ -4079,7 +4079,7 @@ class TestAuth(unittest.TestCase):
def test_reseller_admin_but_account_is_internal_use_only( def test_reseller_admin_but_account_is_internal_use_only(
self): self):
req = Request.blank('/v1/AUTH_.auth', req = Request.blank('/v1/AUTH_gsmetadata',
environ={'REQUEST_METHOD': 'GET'}) environ={'REQUEST_METHOD': 'GET'})
req.remote_user = 'act:usr,act,.reseller_admin' req.remote_user = 'act:usr,act,.reseller_admin'
resp = self.test_auth.authorize(req) resp = self.test_auth.authorize(req)

116
tools/gswauth_functional_tests.sh Executable file
View File

@ -0,0 +1,116 @@
#!/bin/bash
# Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This program expects to be run by tox in a virtual python environment
# so that it does not pollute the host development system
sudo_env()
{
sudo bash -c "PATH=$PATH $*"
}
cleanup()
{
sudo service memcached stop
sudo_env swift-init main stop
sudo rm -rf /etc/swift > /dev/null 2>&1
sudo rm -rf /mnt/gluster-object/test{,2}/* > /dev/null 2>&1
sudo setfattr -x user.swift.metadata /mnt/gluster-object/test{,2} > /dev/null 2>&1
gswauth_cleanup
}
gswauth_cleanup()
{
sudo rm -rf /mnt/gluster-object/gsmetadata/.* > /dev/null 2>&1
sudo rm -rf /mnt/gluster-object/gsmetadata/* > /dev/null 2>&1
sudo setfattr -x user.swift.metadata /mnt/gluster-object/gsmetadata > /dev/null 2>&1
}
quit()
{
echo "$1"
exit 1
}
fail()
{
cleanup
quit "$1"
}
### MAIN ###
# Only run if there is no configuration in the system
if [ -x /etc/swift ] ; then
quit "/etc/swift exists, cannot run functional tests."
fi
# Check the directories exist
DIRS="/mnt/gluster-object /mnt/gluster-object/test /mnt/gluster-object/test2 /mnt/gluster-object/gsmetadata"
for d in $DIRS ; do
if [ ! -x $d ] ; then
quit "$d must exist on an XFS or GlusterFS volume"
fi
done
export SWIFT_TEST_CONFIG_FILE=/etc/swift/test.conf
# Install the configuration files
sudo mkdir /etc/swift > /dev/null 2>&1
sudo cp -r test/functional_auth/gswauth/conf/* /etc/swift || fail "Unable to copy configuration files to /etc/swift"
sudo_env gluster-swift-gen-builders test test2 gsmetadata || fail "Unable to create ring files"
# Start the services
sudo service memcached start || fail "Unable to start memcached"
sudo_env swift-init main start || fail "Unable to start swift"
#swauth-prep
sudo_env swauth-prep -K swauthkey || fail "Unable to prep gswauth"
mkdir functional_tests > /dev/null 2>&1
nosetests -v --exe \
--with-xunit \
--xunit-file functional_tests/gluster-swift-gswauth-functional-TC-report.xml \
--with-html-output \
--html-out-file functional_tests/gluster-swift-gswauth-functional-result.html \
test/functional_auth/gswauth || fail "Functional gswauth test failed"
# clean up gsmetadata dir
gswauth_cleanup
#swauth-prep
sudo_env swauth-prep -K swauthkey || fail "Unable to prep gswauth"
sudo_env swauth-add-user -K swauthkey -a test tester testing || fail "Unable to add user test"
sudo_env swauth-add-user -K swauthkey -a test2 tester2 testing2 || fail "Unable to add user test2"
sudo_env swauth-add-user -K swauthkey test tester3 testing3 || fail "Unable to add user test3"
nosetests -v --exe \
--with-xunit \
--xunit-file functional_tests/gluster-swift-gswauth-generic-functional-TC-report.xml \
--with-html-output \
--html-out-file functional_tests/gluster-swift-gswauth-generic-functional-result.html \
test/functional || fail "Functional tests failed"
nosetests -v --exe \
--with-xunit \
--xunit-file functional_tests/gluster-swift-gswauth-functionalnosetests-TC-report.xml \
--with-html-output \
--html-out-file functional_tests/gluster-swift-gswauth-functionalnosetests-result.html \
test/functionalnosetests || fail "Functional-nose tests failed"
cleanup
exit 0

View File

@ -23,6 +23,7 @@ downloadcache = ~/cache/pip
changedir = {toxinidir} changedir = {toxinidir}
whitelist_externals=bash whitelist_externals=bash
commands = bash tools/functional_tests.sh commands = bash tools/functional_tests.sh
bash tools/gswauth_functional_tests.sh
[testenv:pep8] [testenv:pep8]
deps = deps =