Functional tests for SwiftKerbAuth filter.

This provides an infrastructure for swiftkerbauth
related functional test cases.
More test cases will be added later.
Added a section in swiftkerbauth guide about how to run
functional tests.

test/functional_auth/swiftkerbauth
----------------------------------

A new authentication filter related functional
tests and configuration to reside here.
The configuration would help setup the
environment. All the generic functional tests
should run fine with PASSIVE mode of swiftkerbatuh.
Please refere to swiftkerbatuh documentation for
ACTIVE/PASSIVE mode of working.

swiftkerbauth/test_swkrbath_active.py
-------------------------------------
This file has all the testcases of active mode of
swiftkerbauth. More test cases to be added later.

SwiftKerbAuth related test cases are meant to run
on the setup where SwiftKerbAuth is setup and
installed.

Change-Id: Ibc2a3945f5c9b6714475fcec0ee9d153debb48e3
Signed-off-by: Chetan Risbud <crisbud@redhat.com>
Reviewed-on: http://review.gluster.org/6925
Reviewed-by: Luis Pabon <lpabon@redhat.com>
Tested-by: Luis Pabon <lpabon@redhat.com>
This commit is contained in:
Chetan Risbud 2014-02-06 16:04:40 +05:30 committed by Luis Pabon
parent 73aa6e7883
commit 2505d82815
13 changed files with 568 additions and 5 deletions

View File

@ -1,10 +1,11 @@
#swiftkerbauth
* [Installing Kerberos module for Apache on IPA client] (#httpd-kerb-install)
* [Creating HTTP Service Principal on IPA server] (#http-principal)
* [Installing and configuring swiftkerbauth on IPA client] (#install-swiftkerbauth)
* [Installing Kerberos module for Apache] (#httpd-kerb-install)
* [Creating HTTP Service Principal] (#http-principal)
* [Installing and configuring swiftkerbauth] (#install-swiftkerbauth)
* [Using swiftkerbauth] (#use-swiftkerbauth)
* [Configurable Parameters] (#config-swiftkerbauth)
* [Functional tests] (#swfunctest)
<a name="httpd-kerb-install" />
## Installing Kerberos module for Apache on IPA client
@ -487,5 +488,30 @@ Default value: passive
#### realm_name
This is applicable only when the auth_method=passive. This option specifies
realm name if RHS server belongs to more than one realm and realm name is not
realm name if storage server belongs to more than one realm and realm name is not
part of the username specified in X-Auth-User header.
<a name="swfunctest" />
##Functional tests for SwiftkerbAuth
Functional tests to be run on the storage node after SwiftKerbAuth is setup using
either IPA server or Windows AD. The gluster-swift/doc/markdown/swiftkerbauth
directory contains the SwiftkerbAuth setup documents. There are two modes of
working with SwiftKerbAuth. 'PASSIVE' mode indicates the client is outside the
domain configured using SwiftKerbAuth. Client provides the 'Username' and
'Password' while invoking a command. SwiftKerbAuth auth filter code then
would get the ticket granting ticket from AD server or IPA server.
In 'ACTIVE' mode of SwiftKerbAuth, User is already logged into storage node using
its kerberos credentials. That user is authenticated across AD/IPA server.
In PASSIVE mode all the generic functional tests are run. ACTIVE mode has a
different way of acquiring Ticket Granting Ticket. And hence the different
framework of functional tests there.
The accounts, users, passwords must be prepared on AD/IPA server as per
mentioned in test/functional_auth/swiftkerbauth/conf/test.conf
Command to invoke SwiftKerbAuth functional tests is
> $tox -e swfunctest
This would run both ACTIVE and PASSIVE mode functional test cases.

View File

@ -144,7 +144,6 @@ class Connection(object):
auth_scheme = 'https://' if self.auth_ssl else 'http://'
auth_netloc = "%s:%d" % (self.auth_host, self.auth_port)
auth_url = auth_scheme + auth_netloc + auth_path
(storage_url, storage_token) = get_auth(
auth_url, auth_user, self.password, snet=False,
tenant_name=self.account, auth_version=self.auth_version,

View File

@ -0,0 +1,36 @@
[DEFAULT]
#
# Default gluster mount point to be used for object store,can be changed by
# setting the following value in {account,container,object}-server.conf files.
# It is recommended to keep this value same for all the three services but can
# be kept different if environment demands.
devices = /mnt/gluster-object
#
# Once you are confident that your startup processes will always have your
# gluster volumes properly mounted *before* the account-server workers start,
# you can *consider* setting this value to "false" to reduce the per-request
# overhead it can incur.
mount_check = true
bind_port = 6012
#
# Override swift's default behaviour for fallocate.
disable_fallocate = true
#
# One or two workers should be sufficient for almost any installation of
# Gluster.
workers = 1
[pipeline:main]
pipeline = account-server
[app:account-server]
use = egg:gluster_swift#account
user = root
log_facility = LOG_LOCAL2
log_level = WARN
#
# After ensuring things are running in a stable manner, you can turn off
# normal request logging for the account server to unclutter the log
# files. Warnings and errors will still be logged.
log_requests = off

View File

@ -0,0 +1,36 @@
[DEFAULT]
#
# Default gluster mount point to be used for object store,can be changed by
# setting the following value in {account,container,object}-server.conf files.
# It is recommended to keep this value same for all the three services but can
# be kept different if environment demands.
devices = /mnt/gluster-object
#
# Once you are confident that your startup processes will always have your
# gluster volumes properly mounted *before* the container-server workers
# start, you can *consider* setting this value to "false" to reduce the
# per-request overhead it can incur.
mount_check = true
bind_port = 6011
#
# Override swift's default behaviour for fallocate.
disable_fallocate = true
#
# One or two workers should be sufficient for almost any installation of
# Gluster.
workers = 1
[pipeline:main]
pipeline = container-server
[app:container-server]
use = egg:gluster_swift#container
user = root
log_facility = LOG_LOCAL2
log_level = WARN
#
# After ensuring things are running in a stable manner, you can turn off
# normal request logging for the container server to unclutter the log
# files. Warnings and errors will still be logged.
log_requests = off

View File

@ -0,0 +1,13 @@
[DEFAULT]
#
# IP address of a node in the GlusterFS server cluster hosting the
# volumes to be served via Swift API.
mount_ip = localhost
# Performance optimization parameter. When turned off, the filesystem will
# see a reduced number of stat calls, resulting in substantially faster
# response time for GET and HEAD container requests on containers with large
# numbers of objects, at the expense of an accurate count of combined bytes
# used by all objects in the container. For most installations "off" works
# fine.
accurate_size_in_listing = off

View File

@ -0,0 +1,51 @@
[DEFAULT]
#
# Default gluster mount point to be used for object store,can be changed by
# setting the following value in {account,container,object}-server.conf files.
# It is recommended to keep this value same for all the three services but can
# be kept different if environment demands.
devices = /mnt/gluster-object
#
# Once you are confident that your startup processes will always have your
# gluster volumes properly mounted *before* the object-server workers start,
# you can *consider* setting this value to "false" to reduce the per-request
# overhead it can incur.
mount_check = true
bind_port = 6010
#
# Maximum number of clients one worker can process simultaneously (it will
# actually accept N + 1). Setting this to one (1) will only handle one request
# at a time, without accepting another request concurrently. By increasing the
# number of workers to a much higher value, one can prevent slow file system
# operations for one request from starving other requests.
max_clients = 1024
#
# If not doing the above, setting this value initially to match the number of
# CPUs is a good starting point for determining the right value.
workers = 1
# Override swift's default behaviour for fallocate.
disable_fallocate = true
[pipeline:main]
pipeline = object-server
[app:object-server]
use = egg:gluster_swift#object
user = root
log_facility = LOG_LOCAL2
log_level = WARN
#
# For performance, after ensuring things are running in a stable manner, you
# can turn off normal request logging for the object server to reduce the
# per-request overhead and unclutter the log files. Warnings and errors will
# still be logged.
log_requests = off
#
# Adjust this value to match the stripe width of the underlying storage array
# (not the stripe element size). This will provide a reasonable starting point
# for tuning this value.
disk_chunk_size = 65536
#
# Adjust this value match whatever is set for the disk_chunk_size initially.
# This will provide a reasonable starting point for tuning this value.
network_chunk_size = 65536

View File

@ -0,0 +1,70 @@
[DEFAULT]
bind_port = 8080
user = root
# Consider using 1 worker per CPU
workers = 1
[pipeline:main]
pipeline = catch_errors healthcheck proxy-logging cache proxy-logging kerbauth proxy-server
[app:proxy-server]
use = egg:gluster_swift#proxy
log_facility = LOG_LOCAL1
log_level = WARN
# The API allows for account creation and deletion, but since Gluster/Swift
# automounts a Gluster volume for a given account, there is no way to create
# or delete an account. So leave this off.
allow_account_management = false
account_autocreate = true
# Ensure the proxy server uses fast-POSTs since we don't need to make a copy
# of the entire object given that all metadata is stored in the object
# extended attributes (no .meta file used after creation) and no container
# sync feature to present.
object_post_as_copy = false
# Only need to recheck the account exists once a day
recheck_account_existence = 86400
# May want to consider bumping this up if containers are created and destroyed
# infrequently.
recheck_container_existence = 60
# Timeout clients that don't read or write to the proxy server after 5
# seconds.
client_timeout = 5
# Give more time to connect to the object, container or account servers in
# cases of high load.
conn_timeout = 5
# For high load situations, once connected to an object, container or account
# server, allow for delays communicating with them.
node_timeout = 60
# May want to consider bumping up this value to 1 - 4 MB depending on how much
# traffic is for multi-megabyte or gigabyte requests; perhaps matching the
# stripe width (not stripe element size) of your storage volume is a good
# starting point. See below for sizing information.
object_chunk_size = 65536
# If you do decide to increase the object_chunk_size, then consider lowering
# this value to one. Up to "put_queue_length" object_chunk_size'd buffers can
# be queued to the object server for processing. Given one proxy server worker
# can handle up to 1,024 connections, by default, it will consume 10 * 65,536
# * 1,024 bytes of memory in the worse case (default values). Be sure the
# amount of memory available on the system can accommodate increased values
# for object_chunk_size.
put_queue_depth = 10
[filter:catch_errors]
use = egg:swift#catch_errors
[filter:proxy-logging]
use = egg:swift#proxy_logging
access_log_level = WARN
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:kerbauth]
use = egg:gluster_swift#kerbauth
ext_authentication_url = http://client.rhelbox.com/cgi-bin/swift-auth
[filter:cache]
use = egg:swift#memcache
# Update this line to contain a comma separated list of memcache servers
# shared by all nodes running the proxy-server service.
memcache_servers = localhost:11211

View File

@ -0,0 +1,84 @@
[DEFAULT]
[swift-hash]
# random unique string that can never change (DO NOT LOSE)
swift_hash_path_suffix = gluster
# The swift-constraints section sets the basic constraints on data
# saved in the swift cluster.
[swift-constraints]
# max_file_size is the largest "normal" object that can be saved in
# the cluster. This is also the limit on the size of each segment of
# a "large" object when using the large object manifest support.
# This value is set in bytes. Setting it to lower than 1MiB will cause
# some tests to fail.
# Default is 1 TiB = 2**30*1024
max_file_size = 1099511627776
# max_meta_name_length is the max number of bytes in the utf8 encoding
# of the name portion of a metadata header.
#max_meta_name_length = 128
# max_meta_value_length is the max number of bytes in the utf8 encoding
# of a metadata value
#max_meta_value_length = 256
# max_meta_count is the max number of metadata keys that can be stored
# on a single account, container, or object
#max_meta_count = 90
# max_meta_overall_size is the max number of bytes in the utf8 encoding
# of the metadata (keys + values)
#max_meta_overall_size = 4096
# max_object_name_length is the max number of bytes in the utf8 encoding of an
# object name: Gluster FS can handle much longer file names, but the length
# between the slashes of the URL is handled below. Remember that most web
# clients can't handle anything greater than 2048, and those that do are
# rather clumsy.
max_object_name_length = 2048
# max_object_name_component_length (GlusterFS) is the max number of bytes in
# the utf8 encoding of an object name component (the part between the
# slashes); this is a limit imposed by the underlying file system (for XFS it
# is 255 bytes).
max_object_name_component_length = 255
# container_listing_limit is the default (and max) number of items
# returned for a container listing request
#container_listing_limit = 10000
# account_listing_limit is the default (and max) number of items returned
# for an account listing request
#account_listing_limit = 10000
# max_account_name_length is the max number of bytes in the utf8 encoding of
# an account name: Gluster FS Filename limit (XFS limit?), must be the same
# size as max_object_name_component_length above.
max_account_name_length = 255
# max_container_name_length is the max number of bytes in the utf8 encoding
# of a container name: Gluster FS Filename limit (XFS limit?), must be the same
# size as max_object_name_component_length above.
max_container_name_length = 255

View File

@ -0,0 +1,49 @@
[func_test]
# Swiftkerbauth configuration
auth_host = 127.0.0.1
auth_port = 8080
auth_prefix = /auth/
auth_scheme = http://
auth_mode = passive
auth_version = 1
domain_name = RHELBOX.COM
#All the accounts, users & passwords to be prepared on kerberos server.
# Primary functional test account (needs admin access to the account)
# Note: Account name to be prepared on kerberos server 'AUTH_accoun'
account = test
username = tester
password = testing
# User on a second account (needs admin access to the account)
account2 = test2
username2 = tester2
password2 = testing2
# User on same account as first, but without admin access
username3 = tester3
password3 = testing3
# Default constraints if not defined here, the test runner will try
# to set them from /etc/swift/swift.conf. If that file isn't found,
# the test runner will skip tests that depend on these values.
# Note that the cluster must have "sane" values for the test suite to pass.
#max_file_size = 5368709122
#max_meta_name_length = 128
#max_meta_value_length = 256
#max_meta_count = 90
#max_meta_overall_size = 4096
#max_object_name_length = 1024
#container_listing_limit = 10000
#account_listing_limit = 10000
#max_account_name_length = 256
#max_container_name_length = 256
normalized_urls = True
collate = C
[unit_test]
fake_syslog = False
[probe_test]
# check_server_timeout = 30

View File

@ -0,0 +1,93 @@
#!/usr/bin/python
# Copyright (c) 2010-2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import unittest
from nose import SkipTest
import commands
import os
from test import get_config
from swift.common.bufferedhttp import http_connect_raw as http_connect
config = get_config('func_test')
class Utils:
@classmethod
def SwiftKerbAuthPrep(self,
user=config['username'],domain=config['domain_name'],\
passwd=config['password']):
username = '%s@%s' % (user, domain)
return commands.getstatusoutput('kinit %s <<< %s' % (username, passwd))
@classmethod
def SwiftKerbAuthCleanAll(self):
return commands.getstatusoutput('kdestroy')
class TestSwKrbAthActive(unittest.TestCase):
def setUp(self):
#Perform kinit in active mode.
(status, output) = Utils.SwiftKerbAuthPrep()
self.assertEqual(status, 0, \
'swkrbauth prep failed with valid credentials'+output)
self.auth_host = config['auth_host']
self.auth_port = int(config['auth_port'])
self.auth_prefix = config.get('auth_prefix', '/auth/')
self.auth_version = str(config.get('auth_version', '1'))
self.account_name = config['account']
self.username = config['username']
self.password = config['password']
self.auth_scheme = config['auth_scheme']
#Prepare auth_url. e.g. http://client.rhelbox.com:8080/auth/v1.0
if self.auth_version == "1":
self.auth_path = '%sv1.0' % (self.auth_prefix)
else:
self.auth_path = self.auth_prefix
self.auth_netloc = "%s:%d" % (self.auth_host, self.auth_port)
auth_url = self.auth_scheme + self.auth_netloc + self.auth_path
#Obtain the X-Auth-Token from kerberos server to use it in furhter
#testing
self.auth_token = None
(status, output) = commands.getstatusoutput('curl -v -u : --negotiate\
--location-trusted %s' % (auth_url))
self.assertEqual(status, 0, 'Token negotiation failed:' +output)
match = re.search('X-Auth-Token: AUTH.*', output)
if match:
self.auth_token = match.group(0).split(':')[1].strip()
else:
self.fail('No X-Auth-Token found, failed')
def tearDown(self):
Utils.SwiftKerbAuthCleanAll()
def _get_auth_token(self):
return {'X-Auth-Token' : self.auth_token}
def testGetAccounts(self):
#TODO: The test case is to perform GET on the account mentioned via
#configuration file. This is a sample test case. The whole test
#suite can be enhanced further to have further complicated test cases.
path = '/v1/AUTH_%s' % (config['account'])
headers = self._get_auth_token()
conn = http_connect(config['auth_host'], config['auth_port'], 'GET',
path, headers)
resp = conn.getresponse()
self.assertTrue(resp.status == 204)

View File

@ -0,0 +1,102 @@
#!/bin/bash
# Copyright (c) 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This program expects to be run by tox in a virtual python environment
# so that it does not pollute the host development system
sudo_env()
{
sudo bash -c "PATH=$PATH $*"
}
cleanup()
{
sudo service memcached stop
sudo_env swift-init main stop
sudo rm -rf /etc/swift > /dev/null 2>&1
for acct in /mnt/gluster-object/* ; do
sudo rm -rf /mnt/gluster-object/${acct}/* > /dev/null 2>&1
sudo setfattr -x user.swift.metadata /mnt/gluster-object/${acct} > /dev/null 2>&1
done
}
quit()
{
echo "$1"
exit 1
}
fail()
{
cleanup
quit "$1"
}
### MAIN ###
# Only run if there is no configuration in the system
if [ -x /etc/swift ] ; then
quit "/etc/swift exists, cannot run functional tests."
fi
# Check the directories exist
DIRS="/mnt/gluster-object /mnt/gluster-object/test /mnt/gluster-object/test2 /mnt/gluster-object/gsmetadata"
for d in $DIRS ; do
if [ ! -x $d ] ; then
quit "$d must exist on an XFS or GlusterFS volume"
fi
done
export SWIFT_TEST_CONFIG_FILE=/etc/swift/test.conf
# Install the configuration files
sudo mkdir /etc/swift > /dev/null 2>&1
sudo cp -r test/functional_auth/swiftkerbauth/conf/* /etc/swift || fail "Unable to copy configuration files to /etc/swift"
# Create the ring files
accounts=""
for acct in /mnt/gluster-object/* ; do
acct=`basename $acct`
accounts="$acct $accounts"
done
sudo_env gluster-swift-gen-builders $accounts || fail "Unable to create ring files"
# Start the services
sudo service memcached start || fail "Unable to start memcached"
sudo_env swift-init main start || fail "Unable to start swift"
mkdir functional_tests > /dev/null 2>&1
echo "== SwiftKerbAuth: Functional Tests =="
nosetests -v --exe \
--with-xunit \
--xunit-file functional_tests/gluster-swift-swiftkerbauth-generic-functional-TC-report.xml \
--with-html-output \
--html-out-file functional_tests/gluster-swift-swiftkerbauth-generic-functional-result.html \
test/functional_auth/swiftkerbauth || fail "Functional tests failed"
#nosetests -v --exe \
# --with-xunit \
# --xunit-file functional_tests/gluster-swift-swiftkerbauth-functionalnosetests-TC-report.xml \
# --with-html-output \
# --html-out-file functional_tests/gluster-swift-swiftkerbauth-functionalnosetests-result.html \
# test/functional || fail "Functional-nose tests failed"
cleanup
exit 0

View File

@ -34,6 +34,10 @@ commands = bash tools/functional_tests.sh
changedir = {toxinidir}
commands = bash tools/keystone_functional_tests.sh
[testenv:swfunctest]
changedir = {toxinidir}
commands = bash tools/swkrbath_functional_tests.sh
[testenv:pep8]
deps =
--download-cache={homedir}/.pipcache