Merge pull request #19 from prashanthpai/sp-cleanup
Support for Storage Policies
This commit is contained in:
commit
89c8ec0ef7
80
.functests
80
.functests
@ -18,6 +18,15 @@
|
|||||||
# This program expects to be run by tox in a virtual python environment
|
# This program expects to be run by tox in a virtual python environment
|
||||||
# so that it does not pollute the host development system
|
# so that it does not pollute the host development system
|
||||||
|
|
||||||
|
GREEN='\e[0;32m'
|
||||||
|
RED='\e[0;31m'
|
||||||
|
NC='\e[0m' # No Color
|
||||||
|
|
||||||
|
print()
|
||||||
|
{
|
||||||
|
echo -e "\n${GREEN}$*${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
sudo_env()
|
sudo_env()
|
||||||
{
|
{
|
||||||
sudo bash -c "PATH=$PATH $*"
|
sudo bash -c "PATH=$PATH $*"
|
||||||
@ -25,54 +34,61 @@ sudo_env()
|
|||||||
|
|
||||||
cleanup()
|
cleanup()
|
||||||
{
|
{
|
||||||
sudo service memcached stop
|
print "Cleaning SoF mount point"
|
||||||
sudo_env swift-init main stop
|
sudo rm -rf /mnt/swiftonfile/test/* > /dev/null 2>&1
|
||||||
sudo rm -rf /etc/swift > /dev/null 2>&1
|
sudo setfattr -x user.swift.metadata /mnt/swiftonfile/test > /dev/null 2>&1
|
||||||
sudo rm -rf /mnt/gluster-object/test{,2}/* > /dev/null 2>&1
|
|
||||||
sudo setfattr -x user.swift.metadata /mnt/gluster-object/test{,2} > /dev/null 2>&1
|
|
||||||
}
|
}
|
||||||
|
|
||||||
quit()
|
|
||||||
{
|
|
||||||
echo "$1"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
fail()
|
fail()
|
||||||
{
|
{
|
||||||
cleanup
|
cleanup
|
||||||
quit "$1"
|
echo -e "\n${RED}$1${NC}"
|
||||||
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
### MAIN ###
|
### MAIN ###
|
||||||
# This script runs functional tests only with tempauth
|
|
||||||
|
|
||||||
# Only run if there is no configuration in the system
|
print """
|
||||||
if [ -x /etc/swift ] ; then
|
Before proceeding forward, please make sure you already have:
|
||||||
quit "/etc/swift exists, cannot run functional tests."
|
1. SAIO deployment.
|
||||||
|
2. XFS/GlusterFS mounted at /mnt/swiftonfile/test
|
||||||
|
3. Added swiftonfile policy section to swift.conf file.
|
||||||
|
Example:
|
||||||
|
|
||||||
|
[storage-policy:2]
|
||||||
|
name = swiftonfile
|
||||||
|
default = yes
|
||||||
|
|
||||||
|
4. Copied etc/object-server.conf-gluster to /etc/swift/object-server/5.conf
|
||||||
|
|
||||||
|
5. Generated ring files for swiftonfile policy.
|
||||||
|
Example: for policy with index 2
|
||||||
|
|
||||||
|
swift-ring-builder object-2.builder create 1 1 1
|
||||||
|
swift-ring-builder object-2.builder add r1z1-127.0.0.1:6050/test 1
|
||||||
|
swift-ring-builder object-2.builder rebalance
|
||||||
|
|
||||||
|
6. Started memcached and swift services.
|
||||||
|
"""
|
||||||
|
|
||||||
|
prompt=true
|
||||||
|
if [ "$1" == "-q" ]; then
|
||||||
|
prompt=false
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Check the directories exist
|
if $prompt; then
|
||||||
DIRS="/mnt/gluster-object /mnt/gluster-object/test /mnt/gluster-object/test2"
|
read -p "Continue ? " -r
|
||||||
for d in $DIRS ; do
|
if [[ $REPLY =~ ^[Nn]$ ]]
|
||||||
if [ ! -x $d ] ; then
|
then
|
||||||
quit "$d must exist on an XFS or GlusterFS volume"
|
exit 1
|
||||||
fi
|
fi
|
||||||
done
|
fi
|
||||||
|
|
||||||
export SWIFT_TEST_CONFIG_FILE=/etc/swift/test.conf
|
export SWIFT_TEST_CONFIG_FILE=/etc/swift/test.conf
|
||||||
|
|
||||||
# Install the configuration files
|
|
||||||
sudo mkdir /etc/swift > /dev/null 2>&1
|
|
||||||
sudo cp -r test/functional_auth/tempauth/conf/* /etc/swift || fail "Unable to copy configuration files to /etc/swift"
|
|
||||||
sudo_env gluster-swift-gen-builders test test2 || fail "Unable to create ring files"
|
|
||||||
|
|
||||||
# Start the services
|
|
||||||
sudo service memcached start || fail "Unable to start memcached"
|
|
||||||
sudo_env swift-init main start || fail "Unable to start swift"
|
|
||||||
|
|
||||||
mkdir functional_tests_result > /dev/null 2>&1
|
mkdir functional_tests_result > /dev/null 2>&1
|
||||||
|
|
||||||
|
print "Runnning functional tests"
|
||||||
nosetests -v --exe \
|
nosetests -v --exe \
|
||||||
--with-xunit \
|
--with-xunit \
|
||||||
--xunit-file functional_tests_result/gluster-swift-generic-functional-TC-report.xml \
|
--xunit-file functional_tests_result/gluster-swift-generic-functional-TC-report.xml \
|
||||||
|
111
.functests-ci
Executable file
111
.functests-ci
Executable file
@ -0,0 +1,111 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright (c) 2013 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# This program expects to be run by tox in a virtual python environment
|
||||||
|
# so that it does not pollute the host development system
|
||||||
|
|
||||||
|
GREEN='\e[0;32m'
|
||||||
|
RED='\e[0;31m'
|
||||||
|
NC='\e[0m' # No Color
|
||||||
|
|
||||||
|
print()
|
||||||
|
{
|
||||||
|
echo -e "\n${GREEN}$*${NC}"
|
||||||
|
}
|
||||||
|
|
||||||
|
sudo_env()
|
||||||
|
{
|
||||||
|
sudo bash -c "PATH=$PATH $*"
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup()
|
||||||
|
{
|
||||||
|
print "Stopping memcached"
|
||||||
|
sudo service memcached stop
|
||||||
|
print "Stopping swift sevices"
|
||||||
|
sudo_env swift-init main stop
|
||||||
|
print "Cleaning SoF mount point"
|
||||||
|
sudo rm -rf /mnt/swiftonfile/test/* > /dev/null 2>&1
|
||||||
|
sudo setfattr -x user.swift.metadata /mnt/swiftonfile/test > /dev/null 2>&1
|
||||||
|
print "Invoking SAIO's resetswift script"
|
||||||
|
resetswift
|
||||||
|
}
|
||||||
|
|
||||||
|
fail()
|
||||||
|
{
|
||||||
|
cleanup
|
||||||
|
echo -e "\n${RED}$1${NC}"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
### MAIN ###
|
||||||
|
|
||||||
|
print """
|
||||||
|
Before proceeding forward, please make sure you already have:
|
||||||
|
1. SAIO deployment. (with resetswift and remakerings script)
|
||||||
|
2. XFS/GlusterFS mounted at /mnt/swiftonfile/test
|
||||||
|
"""
|
||||||
|
|
||||||
|
prompt=true
|
||||||
|
if [ "$1" == "-q" ]; then
|
||||||
|
prompt=false
|
||||||
|
fi
|
||||||
|
|
||||||
|
if $prompt; then
|
||||||
|
read -p "Continue ? " -r
|
||||||
|
if [[ $REPLY =~ ^[Nn]$ ]]
|
||||||
|
then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
print "Invoking SAIO's resetswift script"
|
||||||
|
resetswift
|
||||||
|
|
||||||
|
print "Invoking SAIO's remakerings script"
|
||||||
|
remakerings
|
||||||
|
|
||||||
|
print "Copying conf files into /etc/swift. This will replace swift.conf and test.conf"
|
||||||
|
\cp etc/object-server.conf-gluster /etc/swift/object-server/5.conf
|
||||||
|
\cp test/functional/conf/swift.conf /etc/swift/swift.conf
|
||||||
|
\cp test/functional/conf/test.conf /etc/swift/test.conf
|
||||||
|
|
||||||
|
print "Generating additional object-rings for swiftonfile SP"
|
||||||
|
cd /etc/swift
|
||||||
|
swift-ring-builder object-2.builder create 1 1 1
|
||||||
|
swift-ring-builder object-2.builder add r1z1-127.0.0.1:6050/test 1
|
||||||
|
swift-ring-builder object-2.builder rebalance
|
||||||
|
cd -
|
||||||
|
|
||||||
|
export SWIFT_TEST_CONFIG_FILE=/etc/swift/test.conf
|
||||||
|
|
||||||
|
print "Starting memcached"
|
||||||
|
sudo service memcached start || fail "Unable to start memcached"
|
||||||
|
print "Starting swift services"
|
||||||
|
sudo_env swift-init main start || fail "Unable to start swift"
|
||||||
|
|
||||||
|
mkdir functional_tests_result > /dev/null 2>&1
|
||||||
|
|
||||||
|
print "Runnning functional tests"
|
||||||
|
nosetests -v --exe \
|
||||||
|
--with-xunit \
|
||||||
|
--xunit-file functional_tests_result/gluster-swift-generic-functional-TC-report.xml \
|
||||||
|
--with-html-output \
|
||||||
|
--html-out-file functional_tests_result/gluster-swift-generic-functional-result.html \
|
||||||
|
test/functional || fail "Functional tests failed"
|
||||||
|
cleanup
|
||||||
|
exit 0
|
@ -1,89 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Note that these port numbers must match the configured values for the
|
|
||||||
# various servers in their configuration files.
|
|
||||||
declare -A port=(["account.builder"]=6012 ["container.builder"]=6011 \
|
|
||||||
["object.builder"]=6010)
|
|
||||||
|
|
||||||
print_usage() {
|
|
||||||
echo "
|
|
||||||
NAME
|
|
||||||
gluster-swift-gen-builders - Registers GlusterFS volumes to be accessed by
|
|
||||||
object storage.
|
|
||||||
SYNOPSIS
|
|
||||||
gluster-swift-gen-builders [-v] [-h] volumes...
|
|
||||||
DESCRIPTION
|
|
||||||
Register GlusterFS volumes to be accessed over OpenStack Swift object
|
|
||||||
storage.
|
|
||||||
OPTIONS
|
|
||||||
-v or --verbose
|
|
||||||
Verbose
|
|
||||||
-h or --help
|
|
||||||
Prints help screen
|
|
||||||
EXAMPLES
|
|
||||||
gluster-swift-gen-builders myvol1 myvol2
|
|
||||||
-Creates new ring files with myvol1 and myvol2
|
|
||||||
|
|
||||||
gluster-swift-gen-builders myvol2
|
|
||||||
-Creates new ring files by removing myvol1
|
|
||||||
"
|
|
||||||
}
|
|
||||||
|
|
||||||
builder_files="account.builder container.builder object.builder"
|
|
||||||
|
|
||||||
function create {
|
|
||||||
swift-ring-builder $1 create 1 1 1 >> /tmp/out
|
|
||||||
}
|
|
||||||
|
|
||||||
function add {
|
|
||||||
swift-ring-builder $1 add z$2-127.0.0.1:$3/$4_ 100.0
|
|
||||||
}
|
|
||||||
|
|
||||||
function rebalance {
|
|
||||||
swift-ring-builder $1 rebalance
|
|
||||||
}
|
|
||||||
|
|
||||||
function build {
|
|
||||||
swift-ring-builder $1
|
|
||||||
}
|
|
||||||
|
|
||||||
verbose=0
|
|
||||||
outdev="/dev/null"
|
|
||||||
|
|
||||||
if [ "$1" = "-v" ] || [ "$1" = "--verbose" ]; then
|
|
||||||
verbose=1
|
|
||||||
outdev="/dev/stdout"
|
|
||||||
shift
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "x$1" = "x" ]; then
|
|
||||||
echo "Please specify the gluster volume name to use."
|
|
||||||
print_usage
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$1" = "-h" ] || [ "$1" = "--help" ]; then
|
|
||||||
print_usage
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
cd /etc/swift
|
|
||||||
|
|
||||||
for builder_file in $builder_files
|
|
||||||
do
|
|
||||||
create $builder_file
|
|
||||||
|
|
||||||
zone=1
|
|
||||||
for volname in $@
|
|
||||||
do
|
|
||||||
add $builder_file $zone ${port[$builder_file]} $volname >& $outdev
|
|
||||||
zone=$(expr $zone + 1)
|
|
||||||
done
|
|
||||||
|
|
||||||
rebalance $builder_file >& $outdev
|
|
||||||
build $builder_file >& $outdev
|
|
||||||
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "Ring files are prepared in /etc/swift. Please restart object store services"
|
|
@ -1,69 +0,0 @@
|
|||||||
.TH gluster-swift-gen-builders 8 "gluster-swift helper utility" "18 November 2013" "Red Hat Inc."
|
|
||||||
.SH NAME
|
|
||||||
\fBgluster-swift-gen-builders \fP- Registers GlusterFS volumes to be accessed by
|
|
||||||
\fBOpenStack Swift.
|
|
||||||
\fB
|
|
||||||
.SH SYNOPSIS
|
|
||||||
.nf
|
|
||||||
.fam C
|
|
||||||
\fBgluster-swift-gen-builders\fP [\fB-v\fP] [\fB-h\fP] volumes\.\.\.
|
|
||||||
|
|
||||||
.fam T
|
|
||||||
.fi
|
|
||||||
.fam T
|
|
||||||
.fi
|
|
||||||
.SH DESCRIPTION
|
|
||||||
Register GlusterFS volumes to be accessed over OpenStack Swift.
|
|
||||||
.SH OPTIONS
|
|
||||||
\fB-v\fP or \fB--verbose\fP
|
|
||||||
.PP
|
|
||||||
.nf
|
|
||||||
.fam C
|
|
||||||
Verbose
|
|
||||||
|
|
||||||
.fam T
|
|
||||||
.fi
|
|
||||||
\fB-h\fP or \fB--help\fP
|
|
||||||
.PP
|
|
||||||
.nf
|
|
||||||
.fam C
|
|
||||||
Prints help screen
|
|
||||||
|
|
||||||
.fam T
|
|
||||||
.fi
|
|
||||||
.SH EXAMPLES
|
|
||||||
|
|
||||||
\fBgluster-swift-gen-builders\fP vol1 vol2
|
|
||||||
.PP
|
|
||||||
.nf
|
|
||||||
.fam C
|
|
||||||
Creates new ring files with vol1 and vol2
|
|
||||||
|
|
||||||
.fam T
|
|
||||||
.fi
|
|
||||||
\fBgluster-swift-gen-builders\fP vol2
|
|
||||||
.PP
|
|
||||||
.nf
|
|
||||||
.fam C
|
|
||||||
Creates new ring files by removing vol1
|
|
||||||
|
|
||||||
.fam T
|
|
||||||
.fi
|
|
||||||
\fBgluster-swift-gen-builders\fP \fB-v\fP vol1
|
|
||||||
.PP
|
|
||||||
.nf
|
|
||||||
.fam C
|
|
||||||
Create new ring files with vol1, (Verbose).
|
|
||||||
|
|
||||||
.fam T
|
|
||||||
.fi
|
|
||||||
\fBgluster-swift-gen-builders\fP \fB-h\fP
|
|
||||||
.PP
|
|
||||||
.nf
|
|
||||||
.fam C
|
|
||||||
Displays help screen
|
|
||||||
|
|
||||||
.fam T
|
|
||||||
.fi
|
|
||||||
.SH COPYRIGHT
|
|
||||||
\fBCopyright\fP(c) 2013 RedHat, Inc. <http://www.redhat.com>
|
|
@ -1,479 +0,0 @@
|
|||||||
# Authentication Services Start Guide
|
|
||||||
|
|
||||||
## Contents
|
|
||||||
* [Keystone](#keystone)
|
|
||||||
* [Overview](#keystone_overview)
|
|
||||||
* [Creation of swift accounts](#keystone_swift_accounts)
|
|
||||||
* [Configuration](#keystone_configuration)
|
|
||||||
* [Configuring keystone endpoint](#keystone_endpoint)
|
|
||||||
* [GSwauth](#gswauth)
|
|
||||||
* [Overview](#gswauth_overview)
|
|
||||||
* [Installing GSwauth](#gswauth_install)
|
|
||||||
* [User roles](#gswauth_user_roles)
|
|
||||||
* [GSwauth Tools](#gswauth_tools)
|
|
||||||
* [Authenticating a user](#gswauth_authenticate)
|
|
||||||
* [Swiftkerbauth](#swiftkerbauth)
|
|
||||||
* [Architecture](swiftkerbauth/architecture.md)
|
|
||||||
* [RHEL IPA Server Guide](swiftkerbauth/ipa_server.md)
|
|
||||||
* [RHEL IPA Client Guide](swiftkerbauth/ipa_client.md)
|
|
||||||
* [Windows AD Server Guide](swiftkerbauth/AD_server.md)
|
|
||||||
* [Windows AD Client Guide](swiftkerbauth/AD_client.md)
|
|
||||||
* [Swiftkerbauth Guide](swiftkerbauth/swiftkerbauth_guide.md)
|
|
||||||
|
|
||||||
## <a name="keystone" />Keystone ##
|
|
||||||
The Standard Openstack authentication service
|
|
||||||
|
|
||||||
### <a name="keystone_overview" />Overview ###
|
|
||||||
[Keystone](https://wiki.openstack.org/wiki/Keystone) is the identity
|
|
||||||
service for OpenStack, used for authentication and authorization when
|
|
||||||
interacting with OpenStack services.
|
|
||||||
|
|
||||||
Configuring gluster-swift to authenticate against keystone is thus
|
|
||||||
very useful because allows users to access a gluster-swift storage
|
|
||||||
using the same credentials used for all other OpenStack services.
|
|
||||||
|
|
||||||
Currently, gluster-swift has a strict mapping of one account to a
|
|
||||||
GlusterFS volume, and this volume has to be named after the **tenant
|
|
||||||
id** (aka **project id**) of the user accessing it.
|
|
||||||
|
|
||||||
### <a name="keystone_installation" />Installation ###
|
|
||||||
|
|
||||||
Keystone authentication is performed using the
|
|
||||||
[swift.common.middleware.keystone](http://docs.openstack.org/developer/swift/middleware.html#module-swift.common.middleware.keystoneauth)
|
|
||||||
which is part of swift itself. It depends on keystone python APIs,
|
|
||||||
contained in the package `python-keystoneclient`.
|
|
||||||
|
|
||||||
You can install `python-keystoneclient` from the packages of your
|
|
||||||
distribution running:
|
|
||||||
|
|
||||||
* on Ubuntu:
|
|
||||||
|
|
||||||
sudo apt-get install python-keystoneclient
|
|
||||||
|
|
||||||
* on Fedora:
|
|
||||||
|
|
||||||
sudo yum install python-keystoneclient
|
|
||||||
|
|
||||||
otherwise you can install it via pip:
|
|
||||||
|
|
||||||
sudo pip install python-keystoneclient
|
|
||||||
|
|
||||||
### <a name="keystone_swift_accounts />Creation of swift accounts ###
|
|
||||||
|
|
||||||
Due to current limitations of gluster-swift, you *must* create one
|
|
||||||
volume for each Keystone tenant (project), and its name *must* match
|
|
||||||
the *tenant id* of the tenant.
|
|
||||||
|
|
||||||
You can get the tenant id from the output of the command `keystone
|
|
||||||
tenant-get`, for example:
|
|
||||||
|
|
||||||
# keystone tenant-get demo
|
|
||||||
+-------------+----------------------------------+
|
|
||||||
| Property | Value |
|
|
||||||
+-------------+----------------------------------+
|
|
||||||
| description | |
|
|
||||||
| enabled | True |
|
|
||||||
| id | a9b091f85e04499eb2282733ff7d183e |
|
|
||||||
| name | demo |
|
|
||||||
+-------------+----------------------------------+
|
|
||||||
|
|
||||||
will get the tenant id of the tenant `demo`.
|
|
||||||
|
|
||||||
Create the volume as usual
|
|
||||||
|
|
||||||
gluster volume create <tenant_id> <hostname>:<brick> ...
|
|
||||||
gluster volume start <tenant_id>
|
|
||||||
|
|
||||||
Once you have created all the volumes you need you must re-generate
|
|
||||||
the swift ring:
|
|
||||||
|
|
||||||
gluster-swift-gen-builders <tenant_id> [<tenant_id> ...]
|
|
||||||
|
|
||||||
After generation of swift rings you always have to restart the object,
|
|
||||||
account and container servers.
|
|
||||||
|
|
||||||
### <a name="keystone_configuration" />Configuration of the proxy-server ###
|
|
||||||
|
|
||||||
You only need to configure the proxy-server in order to enable
|
|
||||||
keystone authentication. The configuration is no different from what
|
|
||||||
is done for a standard swift installation (cfr. for instance the
|
|
||||||
related
|
|
||||||
[swift documentation](http://docs.openstack.org/developer/swift/overview_auth.html#keystone-auth)),
|
|
||||||
however we report it for completeness.
|
|
||||||
|
|
||||||
In the configuration file of the proxy server (usually
|
|
||||||
`/etc/swift/proxy-server.conf`) you must modify the main pipeline and
|
|
||||||
add `authtoken` and `keystoneauth`:
|
|
||||||
|
|
||||||
Was:
|
|
||||||
~~~
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = catch_errors healthcheck cache ratelimit tempauth proxy-server
|
|
||||||
~~~
|
|
||||||
Change To:
|
|
||||||
~~~
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = catch_errors healthcheck cache ratelimit authtoken keystoneauth proxy-server
|
|
||||||
~~~
|
|
||||||
|
|
||||||
(note that we also removed `tempauth`, although this is not necessary)
|
|
||||||
|
|
||||||
Add configuration for the `authtoken` middleware by adding the following section:
|
|
||||||
|
|
||||||
[filter:authtoken]
|
|
||||||
paste.filter_factory = keystone.middleware.auth_token:filter_factory
|
|
||||||
auth_host = KEYSTONE_HOSTNAME
|
|
||||||
auth_port = 35357
|
|
||||||
auth_protocol = http
|
|
||||||
auth_uri = http://KEYSTONE_HOSTNAME:5000/
|
|
||||||
admin_tenant_name = TENANT_NAME
|
|
||||||
admin_user = SWIFT_USERNAME
|
|
||||||
admin_password = SWIFT_PASSWORD
|
|
||||||
include_service_catalog = False
|
|
||||||
|
|
||||||
`SWIFT_USERNAME`, `SWIFT_PASSWORD` and `TENANT_NAME` will be used by
|
|
||||||
swift to get an admin token from `KEYSTONE_HOSTNAME`, used to
|
|
||||||
authorize user tokens so they must match an user in keystone with
|
|
||||||
administrative privileges.
|
|
||||||
|
|
||||||
Add configuration for the `keystoneauth` middleware:
|
|
||||||
|
|
||||||
[filter:keystoneauth]
|
|
||||||
use = egg:swift#keystoneauth
|
|
||||||
# Operator roles is the role which user would be allowed to manage a
|
|
||||||
# tenant and be able to create container or give ACL to others.
|
|
||||||
operator_roles = Member, admin
|
|
||||||
|
|
||||||
Restart the `proxy-server` service.
|
|
||||||
|
|
||||||
### <a name="keystone_endpoint" />Configuring keystone endpoint ###
|
|
||||||
|
|
||||||
In order to be able to use the `swift` command line you also need to
|
|
||||||
configure keystone by adding a service and its relative endpoint. Up
|
|
||||||
to date documentation can be found in the OpenStack documentation, but
|
|
||||||
we report it here for completeness:
|
|
||||||
|
|
||||||
First of all create the swift service of type `object-store`:
|
|
||||||
|
|
||||||
$ keystone service-create --name=swift \
|
|
||||||
--type=object-store --description="Swift Service"
|
|
||||||
+-------------+---------------------------------+
|
|
||||||
| Property | Value |
|
|
||||||
+-------------+----------------------------------+
|
|
||||||
| description | Swift Service |
|
|
||||||
| id | 272efad2d1234376cbb911c1e5a5a6ed |
|
|
||||||
| name | swift |
|
|
||||||
| type | object-store |
|
|
||||||
+-------------+----------------------------------+
|
|
||||||
|
|
||||||
and use the `id` of the service you just created to create the
|
|
||||||
corresponding endpoint:
|
|
||||||
|
|
||||||
$ keystone endpoint-create \
|
|
||||||
--region RegionOne \
|
|
||||||
--service-id=<service_id> \
|
|
||||||
--publicurl 'http://<swift-host>:8080/v1/AUTH_$(tenant_id)s' \
|
|
||||||
--internalurl 'http://<swift-host>:8080/v1/AUTH_$(tenant_id)s' \
|
|
||||||
--adminurl 'http://<swift-host>:8080/v1'
|
|
||||||
|
|
||||||
Now you should be able to use the swift command line to list the containers of your account with:
|
|
||||||
|
|
||||||
$ swift --os-auth-url http://<keystone-host>:5000/v2.0 \
|
|
||||||
-U <tenant-name>:<username> -K <password> list
|
|
||||||
|
|
||||||
to create a container
|
|
||||||
|
|
||||||
$ swift --os-auth-url http://<keystone-host>:5000/v2.0 \
|
|
||||||
-U <tenant-name>:<username> -K <password> post mycontainer
|
|
||||||
|
|
||||||
and upload a file
|
|
||||||
|
|
||||||
$ swift --os-auth-url http://<keystone-host>:5000/v2.0 \
|
|
||||||
-U <tenant-name>:<username> -K <password> upload <filename>
|
|
||||||
|
|
||||||
## <a name="gswauth" />GSwauth ##
|
|
||||||
|
|
||||||
### <a name="gswauth_overview" />Overview ###
|
|
||||||
An easily deployable GlusterFS aware authentication service based on [Swauth](http://gholt.github.com/swauth/).
|
|
||||||
GSwauth is a WSGI Middleware that uses Swift itself as a backing store to
|
|
||||||
maintain its metadata.
|
|
||||||
|
|
||||||
This model has the benefit of having the metadata available to all proxy servers
|
|
||||||
and saving the data to a GlusterFS volume. To protect the metadata, the GlusterFS
|
|
||||||
volume should only be able to be mounted by the systems running the proxy servers.
|
|
||||||
|
|
||||||
Currently, gluster-swift has a strict mapping of one account to a GlusterFS volume.
|
|
||||||
Future releases, this will be enhanced to support multiple accounts per GlusterFS
|
|
||||||
volume.
|
|
||||||
|
|
||||||
See <http://gholt.github.com/swauth/> for more information on Swauth.
|
|
||||||
|
|
||||||
### <a name="gswauth_install" />Installing GSwauth ###
|
|
||||||
|
|
||||||
1. GSwauth is installed by default with Gluster-Swift.
|
|
||||||
|
|
||||||
1. Create and start the `gsmetadata` gluster volume
|
|
||||||
~~~
|
|
||||||
gluster volume create gsmetadata <hostname>:<brick>
|
|
||||||
gluster volume start gsmetadata
|
|
||||||
~~~
|
|
||||||
|
|
||||||
1. run `gluster-swift-gen-builders` with all volumes that should be
|
|
||||||
accessible by gluster-swift, including `gsmetadata`
|
|
||||||
~~~
|
|
||||||
gluster-swift-gen-builders gsmetadata <other volumes>
|
|
||||||
~~~
|
|
||||||
|
|
||||||
1. Change your proxy-server.conf pipeline to have gswauth instead of tempauth:
|
|
||||||
|
|
||||||
Was:
|
|
||||||
~~~
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = catch_errors cache tempauth proxy-server
|
|
||||||
~~~
|
|
||||||
Change To:
|
|
||||||
~~~
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = catch_errors cache gswauth proxy-server
|
|
||||||
~~~
|
|
||||||
|
|
||||||
1. Add to your proxy-server.conf the section for the GSwauth WSGI filter:
|
|
||||||
~~~
|
|
||||||
[filter:gswauth]
|
|
||||||
use = egg:gluster_swift#gswauth
|
|
||||||
set log_name = gswauth
|
|
||||||
super_admin_key = gswauthkey
|
|
||||||
metadata_volume = gsmetadata
|
|
||||||
auth_type = sha1
|
|
||||||
auth_type_salt = swauthsalt
|
|
||||||
token_life = 86400
|
|
||||||
max_token_life = 86400
|
|
||||||
~~~
|
|
||||||
|
|
||||||
1. Restart your proxy server ``swift-init proxy reload``
|
|
||||||
|
|
||||||
##### Advanced options for GSwauth WSGI filter:
|
|
||||||
|
|
||||||
* `default-swift-cluster` - default storage-URL for newly created accounts. When attempting to authenticate with a user for the first time, the return information is the access token and the storage-URL where data for the given account is stored.
|
|
||||||
|
|
||||||
* `token_life` - set default token life. The default value is 86400 (24hrs).
|
|
||||||
|
|
||||||
* `max_token_life` - The maximum token life. Users can set a token lifetime when requesting a new token with header `x-auth-token-lifetime`. If the passed in value is bigger than the `max_token_life`, then `max_token_life` will be used.
|
|
||||||
|
|
||||||
### <a name="gswauth_user_roles" />User Roles
|
|
||||||
There are only three user roles in GSwauth:
|
|
||||||
|
|
||||||
* A regular user has basically no rights. He needs to be given both read/write priviliges to any container.
|
|
||||||
* The `admin` user is a super-user at the account level. This user can create and delete users for the account they are members and have both write and read priviliges to all stored objects in that account.
|
|
||||||
* The `reseller admin` user is a super-user at the cluster level. This user can create and delete accounts and users and has read/write priviliges to all accounts under that cluster.
|
|
||||||
|
|
||||||
|
|
||||||
| Role/Group | get list of accounts | get Acccount Details (users, etc)| Create Account | Delete Account | Get User Details | Create admin user | Create reseller-admin user | Create regular user | Delete admin user | Delete reseller-admin user | Delete regular user | Set Service Endpoints | Get Account Groups | Modify User |
|
|
||||||
| ----------------------- |:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|
|
|
||||||
| .super_admin (username) |x|x|x|x|x|x|x|x|x|x|x|x|x|x|
|
|
||||||
| .reseller_admin (group) |x|x|x|x|x|x| |x|x| |x|x|x|x|
|
|
||||||
| .admin (group) | |x| | |x|x| |x|x| |x| |x|x|
|
|
||||||
| regular user (type) | | | | | | | | | | | | | | |
|
|
||||||
|
|
||||||
|
|
||||||
### <a name="gswauth_tools" />GSwauth Tools
|
|
||||||
GSwauth provides cli tools to facilitate managing accounts and users. All tools have some options in common:
|
|
||||||
|
|
||||||
#### Common Options:
|
|
||||||
* -A, --admin-url: The URL to the auth
|
|
||||||
* Default: `http://127.0.0.1:8080/auth/`
|
|
||||||
* -U, --admin-user: The user with admin rights to perform action
|
|
||||||
* Default: `.super_admin`
|
|
||||||
* -K, --admin-key: The key for the user with admin rights to perform action
|
|
||||||
* no default value
|
|
||||||
|
|
||||||
#### gswauth-prep:
|
|
||||||
Prepare the gluster volume where gswauth will save its metadata.
|
|
||||||
|
|
||||||
~~~
|
|
||||||
gswauth-prep [option]
|
|
||||||
~~~
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
~~~
|
|
||||||
gswauth-prep -A http://10.20.30.40:8080/auth/ -K gswauthkey
|
|
||||||
~~~
|
|
||||||
|
|
||||||
#### gswauth-add-account:
|
|
||||||
Create account. Currently there's a requirement that an account must map to a gluster volume. The gluster volume must not exist at the time when the account is being created.
|
|
||||||
|
|
||||||
~~~
|
|
||||||
gswauth-add-account [option] <account_name>
|
|
||||||
~~~
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
~~~
|
|
||||||
gswauth-add-account -K gswauthkey <account_name>
|
|
||||||
~~~
|
|
||||||
|
|
||||||
#### gswauth-add-user:
|
|
||||||
Create user. If the provided account does not exist, it will be automatically created before creating the user.
|
|
||||||
Use the `-r` flag to create a reseller admin user and the `-a` flag to create an admin user. To change the password or make the user an admin, just run the same command with the new information.
|
|
||||||
|
|
||||||
~~~
|
|
||||||
gswauth-add-user [option] <account_name> <user> <password>
|
|
||||||
~~~
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
~~~
|
|
||||||
gswauth-add-user -K gswauthkey -a test ana anapwd
|
|
||||||
~~~
|
|
||||||
|
|
||||||
**Change password examples**
|
|
||||||
|
|
||||||
Command to update password/key of regular user:
|
|
||||||
|
|
||||||
~~~
|
|
||||||
gswauth-add-user -U account1:user1 -K old_pass account1 user1 new_pass
|
|
||||||
~~~
|
|
||||||
|
|
||||||
Command to update password/key of account admin:
|
|
||||||
|
|
||||||
~~~
|
|
||||||
gswauth-add-user -U account1:admin -K old_pass -a account1 admin new_pass
|
|
||||||
~~~
|
|
||||||
|
|
||||||
Command to update password/key of reseller_admin:
|
|
||||||
|
|
||||||
~~~
|
|
||||||
gswauth-add-user -U account1:radmin -K old_pass -r account1 radmin new_pass
|
|
||||||
~~~
|
|
||||||
|
|
||||||
#### gswauth-delete-account:
|
|
||||||
Delete an account. An account cannot be deleted if it still contains users, an error will be returned.
|
|
||||||
|
|
||||||
~~~
|
|
||||||
gswauth-delete-account [option] <account_name>
|
|
||||||
~~~
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
~~~
|
|
||||||
gswauth-delete-account -K gswauthkey test
|
|
||||||
~~~
|
|
||||||
|
|
||||||
#### gswauth-delete-user:
|
|
||||||
Delete a user.
|
|
||||||
|
|
||||||
~~~
|
|
||||||
gswauth-delete-user [option] <account_name> <user>
|
|
||||||
~~~
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
~~~
|
|
||||||
gswauth-delete-user -K gswauthkey test ana
|
|
||||||
~~~
|
|
||||||
|
|
||||||
#### gswauth-set-account-service:
|
|
||||||
Sets a service URL for an account. Can only be set by a reseller admin.
|
|
||||||
This command can be used to changed the default storage URL for a given account.
|
|
||||||
All accounts have the same storage-URL default value, which comes from the `default-swift-cluster`
|
|
||||||
option.
|
|
||||||
|
|
||||||
~~~
|
|
||||||
gswauth-set-account-service [options] <account> <service> <name> <value>
|
|
||||||
~~~
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
~~~
|
|
||||||
gswauth-set-account-service -K gswauthkey test storage local http://newhost:8080/v1/AUTH_test
|
|
||||||
~~~
|
|
||||||
|
|
||||||
#### gswauth-list:
|
|
||||||
List information about accounts and users
|
|
||||||
|
|
||||||
* If `[account]` and `[user]` are omitted, a list of accounts will be output.
|
|
||||||
* If `[account]` is included but not `[user]`, a list of users within the account will be output.
|
|
||||||
* If `[account]` and `[user]` are included, a list of groups the user belongs to will be ouptput.
|
|
||||||
* If the `[user]` is `.groups`, the active groups for the account will be listed.
|
|
||||||
|
|
||||||
The default output format is tabular. `-p` changes the output to plain text. `-j` changes the
|
|
||||||
output to JSON format. This will print all information about given account or user, including
|
|
||||||
stored password
|
|
||||||
|
|
||||||
~~~
|
|
||||||
gswauth-list [options] [account] [user]
|
|
||||||
~~~
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
~~~
|
|
||||||
gswauth-list -K gswauthkey test ana
|
|
||||||
+----------+
|
|
||||||
| Groups |
|
|
||||||
+----------+
|
|
||||||
| test:ana |
|
|
||||||
| test |
|
|
||||||
| .admin |
|
|
||||||
+----------+
|
|
||||||
~~~
|
|
||||||
|
|
||||||
#### gswauth-cleanup-tokens:
|
|
||||||
Delete expired tokens. Users also have the option to provide the expected life of tokens, delete all tokens or all tokens for a given account.
|
|
||||||
|
|
||||||
Options:
|
|
||||||
|
|
||||||
* `-t`, `--token-life`: The expected life of tokens, token objects modified more than this number of
|
|
||||||
seconds ago will be checked for expiration (default: 86400).
|
|
||||||
* `--purge`: Purge all tokens for a given account whether the tokens have expired or not.
|
|
||||||
* `--purge-all`: Purges all tokens for all accounts and users whether the tokens have expired or not.
|
|
||||||
|
|
||||||
~~~
|
|
||||||
gswauth-cleanup-tokens [options]
|
|
||||||
~~~
|
|
||||||
|
|
||||||
Example:
|
|
||||||
|
|
||||||
~~~
|
|
||||||
gswauth-cleanup-tokens -K gswauthkey --purge test
|
|
||||||
~~~
|
|
||||||
|
|
||||||
### <a name="gswauth_authenticate" />Authenticating a user with swift client
|
|
||||||
There are two methods of accessing data using the swift client. The first (and most simple one) is by providing the user name and password everytime. The swift client takes care of acquiring the token from gswauth. See example below:
|
|
||||||
|
|
||||||
~~~
|
|
||||||
swift -A http://127.0.0.1:8080/auth/v1.0 -U test:ana -K anapwd upload container1 README.md
|
|
||||||
~~~
|
|
||||||
|
|
||||||
The second method is a two-step process, but it allows users to only provide their username and password once. First users must authenticate with a username and password to get a token and the storage URL. Then, users can make the object requests to the storage URL with the given token.
|
|
||||||
|
|
||||||
It is important to remember that tokens expires, so the authentication process needs to be repeated every so often.
|
|
||||||
|
|
||||||
Authenticate a user with the curl command
|
|
||||||
|
|
||||||
~~~
|
|
||||||
curl -v -H 'X-Storage-User: test:ana' -H 'X-Storage-Pass: anapwd' -k http://localhost:8080/auth/v1.0
|
|
||||||
...
|
|
||||||
< X-Auth-Token: AUTH_tk7e68ef4698f14c7f95af07ab7b298610
|
|
||||||
< X-Storage-Url: http://127.0.0.1:8080/v1/AUTH_test
|
|
||||||
...
|
|
||||||
~~~
|
|
||||||
Now, the user can access the object-storage using the swift client with the given token and storage URL
|
|
||||||
|
|
||||||
~~~
|
|
||||||
bash-4.2$ swift --os-auth-token=AUTH_tk7e68ef4698f14c7f95af07ab7b298610 --os-storage-url=http://127.0.0.1:8080/v1/AUTH_test upload container1 README.md
|
|
||||||
README.md
|
|
||||||
bash-4.2$
|
|
||||||
bash-4.2$ swift --os-auth-token=AUTH_tk7e68ef4698f14c7f95af07ab7b298610 --os-storage-url=http://127.0.0.1:8080/v1/AUTH_test list container1
|
|
||||||
README.md
|
|
||||||
~~~
|
|
||||||
**Note:** Reseller admins must always use the second method to acquire a token, in order to be given access to other accounts different than his own. The first method of using the username and password will give them access only to their own accounts.
|
|
||||||
|
|
||||||
## <a name="swiftkerbauth" />Swiftkerbauth ##
|
|
||||||
Kerberos authentication filter
|
|
||||||
|
|
||||||
Carsten Clasohm implemented a new authentication filter for swift
|
|
||||||
that uses Kerberos tickets for single sign on authentication, and
|
|
||||||
grants administrator permissions based on the users group membership
|
|
||||||
in a directory service like Red Hat Enterprise Linux Identity Management
|
|
||||||
or Microsoft Active Directory.
|
|
@ -1,2 +0,0 @@
|
|||||||
# Overview and Concepts
|
|
||||||
TBD
|
|
@ -1,75 +0,0 @@
|
|||||||
# Object Expiration
|
|
||||||
|
|
||||||
## Contents
|
|
||||||
* [Overview](#overview)
|
|
||||||
* [Setup](#setup)
|
|
||||||
* [Using object expiration](#using)
|
|
||||||
* [Running object-expirer daemon](#running-daemon)
|
|
||||||
|
|
||||||
<a name="overview" />
|
|
||||||
## Overview
|
|
||||||
The Object Expiration feature offers **scheduled deletion of objects**. The client would use the *X-Delete-At* or *X-Delete-After* headers during an object PUT or POST and the cluster would automatically quit serving that object at the specified time and would shortly thereafter remove the object from the GlusterFS volume.
|
|
||||||
|
|
||||||
Expired objects however do appear in container listings until they are deleted by object-expirer daemon. This behaviour is expected: https://bugs.launchpad.net/swift/+bug/1069849
|
|
||||||
|
|
||||||
<a name="setup" />
|
|
||||||
## Setup
|
|
||||||
Object expirer uses a seprate account (a GlusterFS volume, for now, until multiple accounts per volume is implemented) named *gsexpiring*. You will have to [create a GlusterFS volume](quick_start_guide.md#gluster-volume-setup) by that name.
|
|
||||||
|
|
||||||
Object-expirer uses the */etc/swift/object-expirer.conf* configuration file. Make sure that it exists. If not, you can copy it from */etc* directory of gluster-swift source repo.
|
|
||||||
|
|
||||||
<a name="using" />
|
|
||||||
## Using object expiration
|
|
||||||
|
|
||||||
**PUT an object with X-Delete-At header using curl**
|
|
||||||
|
|
||||||
~~~
|
|
||||||
curl -v -X PUT -H 'X-Delete-At: 1392013619' http://127.0.0.1:8080/v1/AUTH_test/container1/object1 -T ./localfile
|
|
||||||
~~~
|
|
||||||
|
|
||||||
**PUT an object with X-Delete-At header using swift client**
|
|
||||||
|
|
||||||
~~~
|
|
||||||
swift --os-auth-token=AUTH_tk99a39aecc3dd4f80b2b1e801d00df846 --os-storage-url=http://127.0.0.1:8080/v1/AUTH_test upload container1 ./localfile --header 'X-Delete-At: 1392013619'
|
|
||||||
~~~
|
|
||||||
|
|
||||||
where *X-Delete-At* header takes a Unix Epoch timestamp in integer. For example, the current time in Epoch notation can be found by running this command:
|
|
||||||
|
|
||||||
~~~
|
|
||||||
date +%s
|
|
||||||
~~~
|
|
||||||
|
|
||||||
|
|
||||||
**PUT an object with X-Delete-After header using curl**
|
|
||||||
|
|
||||||
~~~
|
|
||||||
curl -v -X PUT -H 'X-Delete-After: 3600' http://127.0.0.1:8080/v1/AUTH_test/container1/object1 -T ./localfile
|
|
||||||
~~~
|
|
||||||
|
|
||||||
**PUT an object with X-Delete-At header using swift client**
|
|
||||||
|
|
||||||
~~~
|
|
||||||
swift --os-auth-token=AUTH_tk99a39aecc3dd4f80b2b1e801d00df846 --os-storage-url=http://127.0.0.1:8080/v1/AUTH_test upload container1 ./localfile --header 'X-Delete-After: 3600'
|
|
||||||
~~~
|
|
||||||
|
|
||||||
where *X-Delete-After* header takes a integer number of seconds, after which the object expires. The proxy server that receives the request will convert this header into an X-Delete-At header using its current time plus the value given.
|
|
||||||
|
|
||||||
<a name="running-daemon" />
|
|
||||||
## Running object-expirer daemon
|
|
||||||
The object-expirer daemon runs a pass once every X seconds (configurable using *interval* option in config file). For every pass it makes, it queries the *gsexpiring* account for "tracker objects". Based on (timestamp, path) present in name of "tracker objects", object-expirer then deletes the actual object and the corresponding tracker object.
|
|
||||||
|
|
||||||
|
|
||||||
To run object-expirer forever as a daemon:
|
|
||||||
~~~
|
|
||||||
swift-init object-expirer start
|
|
||||||
~~~
|
|
||||||
|
|
||||||
To run just once:
|
|
||||||
~~~
|
|
||||||
swift-object-expirer -o -v /etc/swift/object-expirer.conf
|
|
||||||
~~~
|
|
||||||
|
|
||||||
**For more information, visit:**
|
|
||||||
http://docs.openstack.org/developer/swift/overview_expiring_objects.html
|
|
||||||
|
|
||||||
|
|
@ -1,67 +0,0 @@
|
|||||||
# Syncing Gluster-Swift with Swift
|
|
||||||
|
|
||||||
## Create a release
|
|
||||||
Create a release in launchpad.net so that we can place the latest swift source for download. We'll place the source here, and it will allow tox in gluster-swift to download the latest code.
|
|
||||||
|
|
||||||
## Upload swift release
|
|
||||||
|
|
||||||
* Clone the git swift repo
|
|
||||||
* Go to the release tag or just use the latest
|
|
||||||
* Type the following to package the swift code:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ python setup.py sdist
|
|
||||||
$ ls dist
|
|
||||||
```
|
|
||||||
|
|
||||||
* Take the file in the `dist` directory and upload it to the new release we created it on launchpad.net.
|
|
||||||
* Alternatively, if we are syncing with a Swift version which is already released, we can get the tar.gz file from Swift launchpad page and upload the same to gluster-swift launchpad.
|
|
||||||
|
|
||||||
## Setup Tox
|
|
||||||
Now that the swift source is availabe on launchpad.net, copy its link location and update tox.ini in gluster-swift with the new link.
|
|
||||||
|
|
||||||
## Update tests
|
|
||||||
This part is a little more complicated and now we need to *merge* the latest tests with ours.
|
|
||||||
|
|
||||||
[meld](http://meldmerge.org/) is a great tool to make this work easier. The 3-way comparison feature of meld comes handy to compare 3 version of same file from:
|
|
||||||
|
|
||||||
* Latest swift (say v1.13)
|
|
||||||
* Previous swift (say v1.12)
|
|
||||||
* gluster-swift (v1.12)
|
|
||||||
|
|
||||||
Files that need to be merged:
|
|
||||||
|
|
||||||
* Update unit tests
|
|
||||||
|
|
||||||
```
|
|
||||||
$ export SWIFTDIR=../swift
|
|
||||||
$ meld $SWIFTDIR/tox.ini tox.ini
|
|
||||||
$ meld $SWIFTDIR/test-requirements.txt tools/test-requires
|
|
||||||
$ meld $SWIFTDIR/requirements.txt tools/requirements.txt
|
|
||||||
$ meld $SWIFTDIR/test/unit/proxy/test_servers.py test/unit/proxy/test_server.py
|
|
||||||
$ cp $SWIFTDIR/test/unit/proxy/controllers/*.py test/unit/proxy/controllers
|
|
||||||
$ meld $SWIFTDIR/test/unit/__init__.py test/unit/__init__.py
|
|
||||||
```
|
|
||||||
|
|
||||||
* Update all the functional tests
|
|
||||||
First check if there are any new files in the swift functional test directory. If there are, copy them over.
|
|
||||||
|
|
||||||
* Remember to `git add` any new files
|
|
||||||
|
|
||||||
* Now merge the existing ones:
|
|
||||||
|
|
||||||
```
|
|
||||||
for i in $SWIFTDIR/test/functional/*.py ; do
|
|
||||||
meld $i test/functional/`basename $i`
|
|
||||||
done
|
|
||||||
```
|
|
||||||
|
|
||||||
## Update the version
|
|
||||||
If needed, update the version now in `gluster/swift/__init__.py`.
|
|
||||||
|
|
||||||
## Upload the patch
|
|
||||||
Upload the patch to Gerrit.
|
|
||||||
|
|
||||||
## Update the release in launchpad.net
|
|
||||||
Upload the gluster-swift*.tar.gz built by Jenkins to launchpad.net once the fix has been commited to the main branch.
|
|
||||||
|
|
@ -1,206 +0,0 @@
|
|||||||
#AD client setup guide
|
|
||||||
|
|
||||||
###Contents
|
|
||||||
* [Setup Overview] (#setup)
|
|
||||||
* [Configure Network] (#network)
|
|
||||||
* [Installing AD Client] (#AD-client)
|
|
||||||
|
|
||||||
<a name="setup" />
|
|
||||||
###Setup Overview
|
|
||||||
|
|
||||||
This guide talks about adding fedora linux client to windows domain.
|
|
||||||
The test setup included a client machine with Fedora 19 installed
|
|
||||||
on it with all the latest packages updated. The crux is to add this linux
|
|
||||||
machine to Windows Domain. This linux box is expected to act as RHS node and on which swiftkerbauth,
|
|
||||||
apachekerbauth code would run.
|
|
||||||
|
|
||||||
Set hostname (FQDN) to fcclient.winad.com
|
|
||||||
|
|
||||||
# hostnamectl set-hostname "fcclient.winad.com"
|
|
||||||
|
|
||||||
# hostname "fcclient.winad.com"
|
|
||||||
|
|
||||||
|
|
||||||
<a name="network" />
|
|
||||||
### Configure client
|
|
||||||
|
|
||||||
* Deploy Fedora linux 19.
|
|
||||||
|
|
||||||
* Update the system with latest packages.
|
|
||||||
|
|
||||||
* Configure SELinux security parameters.
|
|
||||||
|
|
||||||
* Install & configure samba
|
|
||||||
|
|
||||||
* Configure DNS
|
|
||||||
|
|
||||||
* Synchronize the time services
|
|
||||||
|
|
||||||
* Join Domain
|
|
||||||
|
|
||||||
* Install / Configure Kerberos Client
|
|
||||||
|
|
||||||
|
|
||||||
The document assumes the installing Fedora Linux and configuring SELinux
|
|
||||||
parameters to 'permissive' is known already.
|
|
||||||
|
|
||||||
###Install & Configure Samba:
|
|
||||||
# yum -y install samba samba-client samba-common samba-winbind
|
|
||||||
samba-winbind-clients
|
|
||||||
|
|
||||||
# service start smb
|
|
||||||
|
|
||||||
# ps -aef | grep smb
|
|
||||||
# chkconfig smb on
|
|
||||||
|
|
||||||
###Synchronize time services
|
|
||||||
The kerberos authentication and most of the DNS functionality could fail with
|
|
||||||
clock skew if times are not synchronized.
|
|
||||||
|
|
||||||
# cat /etc/ntp.conf
|
|
||||||
server ns1.bos.redhat.com
|
|
||||||
server 10.5.26.10
|
|
||||||
|
|
||||||
# service ntpd stop
|
|
||||||
|
|
||||||
# ntpdate 10.16.255.2
|
|
||||||
|
|
||||||
# service ntpd start
|
|
||||||
|
|
||||||
#chkconfig ntpd on
|
|
||||||
|
|
||||||
Check if Windows server in the whole environment is also time synchronized with
|
|
||||||
same source.
|
|
||||||
|
|
||||||
# C:\Users\Administrator>w32tm /query /status | find "Source"
|
|
||||||
|
|
||||||
Source: ns1.xxx.xxx.com
|
|
||||||
|
|
||||||
###Configure DNS on client
|
|
||||||
Improperly resolved hostname is the leading cause in authentication failures.
|
|
||||||
Best practice is to configure fedora client to use Windows DNS.
|
|
||||||
'nameserver' below is the IP address of the windows server.
|
|
||||||
# cat /etc/resolve.conf
|
|
||||||
domain server.winad.com
|
|
||||||
search server.winad.com
|
|
||||||
nameserver 10.nn.nnn.3
|
|
||||||
|
|
||||||
###Set the hostname of the client properly (FQDN)
|
|
||||||
# cat /etc/sysconfig/network
|
|
||||||
HOSTNAME=fcclient.winad.com
|
|
||||||
|
|
||||||
|
|
||||||
###Install & Configure kerberos client
|
|
||||||
|
|
||||||
# yum -y install krb5-workstation
|
|
||||||
|
|
||||||
Edit the /etc/krb5.conf as follows:
|
|
||||||
|
|
||||||
# cat /etc/krb5.conf
|
|
||||||
[logging]
|
|
||||||
default = FILE:/var/log/krb5libs.log
|
|
||||||
kdc = FILE:/var/log/krb5kdc.log
|
|
||||||
admin_server = FILE:/var/log/kadmind.log
|
|
||||||
|
|
||||||
[libdefaults]
|
|
||||||
default_realm = WINAD.COM
|
|
||||||
dns_lookup_realm = false
|
|
||||||
dns_lookup_kdc = false
|
|
||||||
ticket_lifetime = 24h
|
|
||||||
renew_lifetime = 7d
|
|
||||||
forwardable = true
|
|
||||||
|
|
||||||
[realms]
|
|
||||||
WINAD.COM = {
|
|
||||||
kdc = server.winad.com
|
|
||||||
admin_server = server.winad.com
|
|
||||||
}
|
|
||||||
[domain_realm]
|
|
||||||
.demo = server.winad.com
|
|
||||||
demo = server.winad.com
|
|
||||||
|
|
||||||
###Join Domain
|
|
||||||
Fire command 'system-config-authentication' on client. This should display a
|
|
||||||
graphical wizard. Below inputs would help configure this wizard.
|
|
||||||
|
|
||||||
- User account data base = winbind
|
|
||||||
- winbind domain = winad
|
|
||||||
- security model = ads
|
|
||||||
- winbind ads realm = winad.com
|
|
||||||
- winbind controller = server.winad.com
|
|
||||||
- template shell = /bin/bash
|
|
||||||
- let the other options be as is to default.
|
|
||||||
- Perform Join domain and appy settings and quit. Please note this join should
|
|
||||||
not see any errors. This makes the client fedora box to join the windows
|
|
||||||
domain.
|
|
||||||
|
|
||||||
###Configure the kerberos client
|
|
||||||
This would bring the users/groups from Windows Active directory to this
|
|
||||||
fedora client.
|
|
||||||
|
|
||||||
Edit /etc/samba/smb.conf file to have below parameters in the global section.
|
|
||||||
|
|
||||||
# cat /etc/samba/smb.conf
|
|
||||||
[global]
|
|
||||||
workgroup = winad
|
|
||||||
realm = winad.com
|
|
||||||
server string = Samba Server Version %v
|
|
||||||
security = ADS
|
|
||||||
allow trusted domains = No
|
|
||||||
password server = server.winad.com
|
|
||||||
log file = /var/log/samba/log.%m
|
|
||||||
max log size = 50
|
|
||||||
idmap uid = 1000019999
|
|
||||||
idmap gid = 1000019999
|
|
||||||
template shell = /bin/bash
|
|
||||||
winbind separator = +
|
|
||||||
winbind use default domain = Yes
|
|
||||||
idmap config REFARCHAD:range = 1000000019999999
|
|
||||||
idmap config REFARCHAD:backend = rid
|
|
||||||
cups options = raw
|
|
||||||
|
|
||||||
|
|
||||||
# service smb stop
|
|
||||||
|
|
||||||
# service winbind stop
|
|
||||||
|
|
||||||
# tar -cvf /var/tmp/samba-cache-backup.tar /var/lib/samba
|
|
||||||
|
|
||||||
# ls -la /var/tmp/samba-cache-backup.tar
|
|
||||||
|
|
||||||
# rm -f /var/lib/samba/*
|
|
||||||
|
|
||||||
|
|
||||||
Verify that no kerberos ticket available and cached.
|
|
||||||
|
|
||||||
# kdestroy
|
|
||||||
|
|
||||||
# klist
|
|
||||||
|
|
||||||
Rejoin the domain.
|
|
||||||
|
|
||||||
# net join -S server -U Administrstor
|
|
||||||
|
|
||||||
Test that client rejoined the domain.
|
|
||||||
|
|
||||||
# net ads info
|
|
||||||
|
|
||||||
Restart smb and winbind service.
|
|
||||||
|
|
||||||
# wbinfo --domain-users
|
|
||||||
|
|
||||||
Perform kinit for the domain users prepared on active directory. This is obtain
|
|
||||||
the kerberos ticket for user 'auth_admin'
|
|
||||||
|
|
||||||
# kinit auth_admin
|
|
||||||
|
|
||||||
# id -Gn auth_admin
|
|
||||||
|
|
||||||
###Notes
|
|
||||||
Obtaining the HTTP service principal & keytab file and installing it with
|
|
||||||
swiftkerbauth is added to swiftkerbauth_guide
|
|
||||||
|
|
||||||
###References
|
|
||||||
Reference Document for adding Linux box to windows domain :
|
|
||||||
Integrating Red Hat Enterprise Linux 6
|
|
||||||
with Active Directory
|
|
@ -1,119 +0,0 @@
|
|||||||
#Windows Active Directory & Domain Controller Server Guide
|
|
||||||
|
|
||||||
###Contents
|
|
||||||
* [Setup Overview] (#Setup)
|
|
||||||
* [Installing Active Directory Services] (#AD-server)
|
|
||||||
* [Configuring DNS] (#DNS)
|
|
||||||
* [Adding Users and Groups] (#users-groups)
|
|
||||||
|
|
||||||
|
|
||||||
<a name="Setup" />
|
|
||||||
###Setup Overview
|
|
||||||
|
|
||||||
The setup includes a server machine installed with Windows 2008 R2 Server, with
|
|
||||||
Domain Controller, Active Directory services & DNS server installed alongwith.
|
|
||||||
The steps to install windows operating system and above servers can be found
|
|
||||||
on MicroSoft Documentation. This windows Active Directory server would act as an
|
|
||||||
authentication server in the whole setup. This would provide the access control
|
|
||||||
and permissions for users on certain data objects.
|
|
||||||
|
|
||||||
|
|
||||||
Windows 2008 R2 deployment:
|
|
||||||
|
|
||||||
http://technet.microsoft.com/en-us/library/dd283085.aspx
|
|
||||||
|
|
||||||
|
|
||||||
Configuring Active Directory, Domain Services, DNS server:
|
|
||||||
|
|
||||||
http://technet.microsoft.com/en-us/library/cc770946.aspx
|
|
||||||
|
|
||||||
|
|
||||||
<a name="AD-server" />
|
|
||||||
###Installing AD Server
|
|
||||||
|
|
||||||
Administrators need to follow simple instructions in Server Manager on Windows
|
|
||||||
2008, and should add Active Directory Domain Services & DNS server. It is
|
|
||||||
recommended to use static IP for DNS server. Preferred Hostname(FQDN) for
|
|
||||||
Windows server could be of format hostname 'server.winad.com' where
|
|
||||||
'winad.com' is a domain name.
|
|
||||||
|
|
||||||
Following tips would help prepare a test setup neatly.
|
|
||||||
|
|
||||||
- Select Active Directory Domain services wizard in Server Manager
|
|
||||||
- Move on to install it with all the pre-requisits, e.g. .NET framework etc.
|
|
||||||
- Configure Active directory after installtion via exapanding the 'Roles'
|
|
||||||
section in the server manager.
|
|
||||||
- Create a new Domain in the New Forest.
|
|
||||||
- Type the FQDN, winad.com
|
|
||||||
- Set Forest functional level Windows 2008 R2.
|
|
||||||
- Selct additional options for this domain controller as DNS server.
|
|
||||||
- Leave the log locations to default provided by wizard.
|
|
||||||
- Set the Administrator Password carefully.
|
|
||||||
- Thats it. You are done configuring active directory.
|
|
||||||
|
|
||||||
|
|
||||||
<a name="dns" />
|
|
||||||
###Configuring DNS
|
|
||||||
|
|
||||||
This section explains configuring the DNS server installed on Windows 2008 R2
|
|
||||||
server. You must know know about
|
|
||||||
|
|
||||||
- Forward lookup zone
|
|
||||||
|
|
||||||
- Reverse lookup zone
|
|
||||||
|
|
||||||
- Zone type
|
|
||||||
|
|
||||||
A forward lookup zone is simply a way to resolve hostnames to IP address.
|
|
||||||
A reverse lookup zone is to lookup DNS hostname of the host IP.
|
|
||||||
|
|
||||||
Following tips would help configure the Zones on DNS server.
|
|
||||||
|
|
||||||
- Create a Forward lookup zone.
|
|
||||||
- Create it a primary zone.
|
|
||||||
- Add the Clients using their ip addresses and FQDN to this forward lookup
|
|
||||||
zones.
|
|
||||||
- This would add type 'A' record for that host on DNS server.
|
|
||||||
- Similarly create a Reverser lookup zone.
|
|
||||||
- Add clients 'PTR' record to this zone via browsing through the forward
|
|
||||||
zones clients.
|
|
||||||
|
|
||||||
The above setup can be tested on client once it joins the domain using 'dig'
|
|
||||||
command as mentioned below.
|
|
||||||
|
|
||||||
|
|
||||||
On client:
|
|
||||||
|
|
||||||
# dig fcclient.winad.com
|
|
||||||
This should yield you a Answer section mentioning its IP address.
|
|
||||||
|
|
||||||
Reverse lookup can be tested using
|
|
||||||
|
|
||||||
# 'dig -t ptr 101.56.168.192.in-addr.arpa.'
|
|
||||||
The answer section should state the FQDN of the client.
|
|
||||||
|
|
||||||
Repeat the above steps on client for Windows AD server as well.
|
|
||||||
|
|
||||||
|
|
||||||
<a name="users-groups" />
|
|
||||||
###Adding users and groups
|
|
||||||
|
|
||||||
The following convention is to be followed in creating group names:
|
|
||||||
|
|
||||||
<reseller-prefix>\_<volume-name>
|
|
||||||
|
|
||||||
<reseller-prefix>\_<account-name>
|
|
||||||
|
|
||||||
As of now, account=volume=group
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
AUTH\_test
|
|
||||||
|
|
||||||
Adding groups and users to the Windows domain is easy task.
|
|
||||||
|
|
||||||
- Start -> Administrative Tools -> Active Directory Users & Computers
|
|
||||||
- Expand the domain name which was prepared earlier. e.g winad.com
|
|
||||||
- Add groups with appropreate access rights.
|
|
||||||
- Add users to the group with appropreate permissions.
|
|
||||||
- Make sure you set password for users prepared on AD server.
|
|
@ -1,105 +0,0 @@
|
|||||||
# Architecture
|
|
||||||
|
|
||||||
The Swift API is HTTP-based. As described in the Swift documentation
|
|
||||||
[1], clients first make a request to an authentication URL, providing
|
|
||||||
a username and password. The reply contains a token which is used in
|
|
||||||
all subsequent requests.
|
|
||||||
|
|
||||||
Swift has a chain of filters through which all client requests go. The
|
|
||||||
filters to use are configured with the pipeline parameter in
|
|
||||||
/etc/swift/proxy-server.conf:
|
|
||||||
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = healthcheck cache tempauth proxy-server
|
|
||||||
|
|
||||||
For the single sign authentication, we added a new filter called
|
|
||||||
"kerbauth" and put it into the filter pipeline in place of tempauth.
|
|
||||||
|
|
||||||
The filter checks the URL for each client request. If it matches the
|
|
||||||
authentication URL, the client is redirected to a URL on a different
|
|
||||||
server (on the same machine). The URL is handled by a CGI script, which
|
|
||||||
is set up to authenticate the client with Kerberos negotiation, retrieve
|
|
||||||
the user's system groups [2], store them in a memcache ring shared with
|
|
||||||
the Swift server, and return the authentication token to the client.
|
|
||||||
|
|
||||||
When the client provides the token as part of a resource request, the
|
|
||||||
kerbauth filter checks it against its memcache, grants administrator
|
|
||||||
rights based on the group membership retrieved from memcache, and
|
|
||||||
either grants or denies the resource access.
|
|
||||||
|
|
||||||
[1] http://docs.openstack.org/api/openstack-object-storage/1.0/content/authentication-object-dev-guide.html
|
|
||||||
|
|
||||||
[2] The user data and system groups are usually provided by Red Hat
|
|
||||||
Enterprise Linux identity Management or Microsoft Active
|
|
||||||
Directory. The script relies on the system configuration to be set
|
|
||||||
accordingly (/etc/nsswitch.conf).
|
|
||||||
|
|
||||||
*****
|
|
||||||
|
|
||||||
## kerbauth.py
|
|
||||||
|
|
||||||
The script kerbauth.py began as a copy of the tempauth.py script from
|
|
||||||
from tempauth middleware. It contains the following modifications, among
|
|
||||||
others:
|
|
||||||
|
|
||||||
In the __init__ method, we read the ext_authentication_url parameter
|
|
||||||
from /etc/swift/proxy-server.conf. This is the URL that clients are
|
|
||||||
redirected to when they access either the Swift authentication URL, or
|
|
||||||
when they request a resource without a valid authentication token.
|
|
||||||
|
|
||||||
The configuration in proxy-server.conf looks like this:
|
|
||||||
|
|
||||||
[filter:kerbauth]
|
|
||||||
use = egg:swiftkerbauth#kerbauth
|
|
||||||
ext_authentication_url = http://client.rhelbox.com/cgi-bin/swift-auth
|
|
||||||
|
|
||||||
The authorize method was changed so that global administrator rights
|
|
||||||
are granted if the user is a member of the auth_reseller_admin
|
|
||||||
group. Administrator rights for a specific account like vol1 are
|
|
||||||
granted if the user is a member of the auth_vol1 group. [3]
|
|
||||||
|
|
||||||
The denied_response method was changed to return a HTTP redirect to
|
|
||||||
the external authentication URL if no valid token was provided by the
|
|
||||||
client.
|
|
||||||
|
|
||||||
Most of the handle_get_token method was moved to the external
|
|
||||||
authentication script. This method now returns a HTTP redirect.
|
|
||||||
|
|
||||||
In the __call__ and get_groups method, we removed support for the
|
|
||||||
HTTP_AUTHORIZATION header, which is only needed when Amazon S3 is
|
|
||||||
used.
|
|
||||||
|
|
||||||
Like tempauth.py, kerbauth.py uses a Swift wrapper to access
|
|
||||||
memcache. This wrapper converts the key to an MD5 hash and uses the
|
|
||||||
hash value to determine on which of a pre-defined list of servers to
|
|
||||||
store the data.
|
|
||||||
|
|
||||||
[3] "auth" is the default reseller prefix, and would be different if
|
|
||||||
the reseller_prefix parameter in proxy-server.conf was set.
|
|
||||||
|
|
||||||
## swift-auth CGI script
|
|
||||||
|
|
||||||
swift-auth resides on an Apache server and assumes that Apache is
|
|
||||||
configured to authenticate the user before this script is
|
|
||||||
executed. The script retrieves the username from the REMOTE_USER
|
|
||||||
environment variable, and checks if there already is a token for this
|
|
||||||
user in the memcache ring. If not, it generates a new one, retrieves
|
|
||||||
the user's system groups with "id -Gn USERNAME", stores this
|
|
||||||
information in the memcache ring, and returns the token to the client.
|
|
||||||
|
|
||||||
To allow the CGI script to connect to memcache, the SELinux booleans
|
|
||||||
httpd_can_network_connect and httpd_can_network_memcache had to be
|
|
||||||
set.
|
|
||||||
|
|
||||||
The tempauth filter uses the uuid module to generate token
|
|
||||||
strings. This module creates and runs temporary files, which leads to
|
|
||||||
AVC denial messages in /var/log/audit/audit.log when used from an
|
|
||||||
Apache CGI script. While the module still works, the audit log would
|
|
||||||
grow quickly. Instead of writing an SELinux policy module to allow or
|
|
||||||
to silently ignore these accesses, the swift-auth script uses the
|
|
||||||
"random" module for generating token strings.
|
|
||||||
|
|
||||||
Red Hat Enterprise Linux 6 comes with Python 2.6 which only provides
|
|
||||||
method to list the locally defined user groups. To include groups from
|
|
||||||
Red Hat Enterprise Linux Identity Management and in the future from
|
|
||||||
Active Directory, the "id" command is run in a subprocess.
|
|
@ -1,80 +0,0 @@
|
|||||||
#IPA Client Guide
|
|
||||||
|
|
||||||
##Contents
|
|
||||||
* [Setup Overview] (#setup)
|
|
||||||
* [Configure Network] (#network)
|
|
||||||
* [Installing IPA Client] (#ipa-client)
|
|
||||||
|
|
||||||
<a name="setup" />
|
|
||||||
##Setup Overview
|
|
||||||
We have used a F18 box as IPA client machine and used FreeIPA client.
|
|
||||||
This document borrows instructions from the following more detailed guide.
|
|
||||||
[RHEL 6 Identity Management Guide][]
|
|
||||||
|
|
||||||
|
|
||||||
<a name="network" />
|
|
||||||
## Configure network
|
|
||||||
|
|
||||||
Set hostname (FQDN) to client.rhelbox.com
|
|
||||||
> hostnamectl set-hostname "client.rhelbox.com"
|
|
||||||
>
|
|
||||||
> hostname "client.rhelbox.com"
|
|
||||||
|
|
||||||
Add following to /etc/sysconfig/network:
|
|
||||||
|
|
||||||
HOSTNAME=client.rhelbox.com
|
|
||||||
|
|
||||||
Add the following to /etc/hostname
|
|
||||||
|
|
||||||
client.rhelbox.com
|
|
||||||
|
|
||||||
Add the following to /etc/hosts
|
|
||||||
|
|
||||||
192.168.56.110 server.rhelbox.com server
|
|
||||||
192.168.56.101 client.rhelbox.com client
|
|
||||||
|
|
||||||
Logout and login again and verify hostname :
|
|
||||||
> hostname --fqdn
|
|
||||||
|
|
||||||
Edit */etc/resolv.conf* to add this at beginning of file
|
|
||||||
|
|
||||||
nameserver 192.168.56.110
|
|
||||||
|
|
||||||
Warning: NetworkManager changes resolv.conf on restart
|
|
||||||
|
|
||||||
Turn off firewall
|
|
||||||
> service iptables stop
|
|
||||||
>
|
|
||||||
> chkconfig iptables off
|
|
||||||
|
|
||||||
<a name="ipa-client" />
|
|
||||||
## Installing IPA Client
|
|
||||||
|
|
||||||
Install IPA client packages:
|
|
||||||
|
|
||||||
For RHEL:
|
|
||||||
> yum install ipa-client ipa-admintools
|
|
||||||
|
|
||||||
For Fedora:
|
|
||||||
> yum install freeipa-client freeipa-admintools
|
|
||||||
|
|
||||||
Install IPA client and add to domain:
|
|
||||||
>ipa-client-install --enable-dns-updates
|
|
||||||
|
|
||||||
Discovery was successful!
|
|
||||||
Hostname: client.rhelbox.com
|
|
||||||
Realm: RHELBOX.COM
|
|
||||||
DNS Domain: rhelbox.com
|
|
||||||
IPA Server: server.rhelbox.com
|
|
||||||
BaseDN: dc=rhelbox,dc=com
|
|
||||||
|
|
||||||
Continue to configure the system with these values? [no]: yes
|
|
||||||
User authorized to enroll computers: admin
|
|
||||||
|
|
||||||
Check if client is configured correctly:
|
|
||||||
> kinit admin
|
|
||||||
>
|
|
||||||
> getent passwd admin
|
|
||||||
|
|
||||||
|
|
||||||
[RHEL 6 Identity Management Guide]: https://access.redhat.com/site/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Identity_Management_Guide/
|
|
@ -1,146 +0,0 @@
|
|||||||
#IPA Server Guide
|
|
||||||
|
|
||||||
##Contents
|
|
||||||
* [Setup Overview] (#setup)
|
|
||||||
* [Configure Network] (#network)
|
|
||||||
* [Installing IPA Server] (#ipa-server)
|
|
||||||
* [Configuring DNS] (#dns)
|
|
||||||
* [Adding Users and Groups] (#users-groups)
|
|
||||||
|
|
||||||
|
|
||||||
<a name="setup" />
|
|
||||||
##Setup Overview
|
|
||||||
We have used a RHEL 6.4 box as IPA and DNS server. This document borrows
|
|
||||||
instructions from the following more detailed guide.
|
|
||||||
[RHEL 6 Identity Management Guide][]
|
|
||||||
|
|
||||||
|
|
||||||
<a name="network" />
|
|
||||||
## Configure network
|
|
||||||
|
|
||||||
Change hostname (FQDN) to server.rhelbox.com
|
|
||||||
> hostname "server.rhelbox.com"
|
|
||||||
|
|
||||||
Add following to */etc/sysconfig/network* file
|
|
||||||
|
|
||||||
HOSTNAME=server.rhelbox.com
|
|
||||||
|
|
||||||
Add the following to */etc/hosts* file
|
|
||||||
|
|
||||||
192.168.56.110 server.rhelbox.com server
|
|
||||||
192.168.56.101 client.rhelbox.com client
|
|
||||||
|
|
||||||
Logout and login again and verify new hostname
|
|
||||||
> hostname --fqdn
|
|
||||||
|
|
||||||
Turn off firewall
|
|
||||||
> service iptables stop
|
|
||||||
>
|
|
||||||
> chkconfig iptables off
|
|
||||||
|
|
||||||
|
|
||||||
<a name="ipa-server" />
|
|
||||||
## Installing IPA Server
|
|
||||||
|
|
||||||
Install IPA server packages and DNS dependencies
|
|
||||||
> yum install ipa-server bind bind-dyndb-ldap
|
|
||||||
|
|
||||||
Run the following interactive setup to install IPA server with DNS
|
|
||||||
> ipa-server-install --setup-dns
|
|
||||||
|
|
||||||
The IPA Master Server will be configured with:
|
|
||||||
Hostname: server.rhelbox.com
|
|
||||||
IP address: 192.168.56.110
|
|
||||||
Domain name: rhelbox.com
|
|
||||||
Realm name: RHELBOX.COM
|
|
||||||
|
|
||||||
BIND DNS server will be configured to serve IPA domain with:
|
|
||||||
Forwarders: No forwarders
|
|
||||||
Reverse zone: 56.168.192.in-addr.arpa.
|
|
||||||
|
|
||||||
The installation may take some time.
|
|
||||||
|
|
||||||
Check if IPA is installed correctly :
|
|
||||||
> kinit admin
|
|
||||||
>
|
|
||||||
> ipa user-find admin
|
|
||||||
|
|
||||||
|
|
||||||
<a name="dns" />
|
|
||||||
## Configuring DNS
|
|
||||||
|
|
||||||
Edit */etc/resolv.conf* to add this at beginning of file :
|
|
||||||
|
|
||||||
nameserver 192.168.56.110
|
|
||||||
|
|
||||||
Warning: NetworkManager changes resolv.conf on restart
|
|
||||||
|
|
||||||
Add a DNS A record and PTR record for the client under rhelbox.com zone
|
|
||||||
> ipa dnsrecord-add rhelbox.com client --a-rec=192.168.56.101 --a-create-reverse
|
|
||||||
|
|
||||||
Check if DNS resolution is working by running :
|
|
||||||
|
|
||||||
> dig server.rhelbox.com
|
|
||||||
|
|
||||||
;; ANSWER SECTION:
|
|
||||||
server.rhelbox.com. 1200 IN A 192.168.56.110
|
|
||||||
|
|
||||||
> dig client.rhelbox.com
|
|
||||||
|
|
||||||
;; ANSWER SECTION:
|
|
||||||
client.rhelbox.com. 86400 IN A 192.168.56.101
|
|
||||||
|
|
||||||
Check if reverse resolution works :
|
|
||||||
|
|
||||||
> dig -t ptr 101.56.168.192.in-addr.arpa.
|
|
||||||
|
|
||||||
;; ANSWER SECTION:
|
|
||||||
101.56.168.192.in-addr.arpa. 86400 IN PTR client.rhelbox.com.
|
|
||||||
|
|
||||||
|
|
||||||
> dig -t ptr 110.56.168.192.in-addr.arpa.
|
|
||||||
|
|
||||||
;; ANSWER SECTION:
|
|
||||||
110.56.168.192.in-addr.arpa. 86400 IN PTR server.rhelbox.com.
|
|
||||||
|
|
||||||
|
|
||||||
<a name="users-groups" />
|
|
||||||
## Adding users and groups
|
|
||||||
|
|
||||||
The following convention is to be followed in creating group names:
|
|
||||||
|
|
||||||
<reseller-prefix>\_<volume-name>
|
|
||||||
|
|
||||||
<reseller-prefix>\_<account-name>
|
|
||||||
|
|
||||||
As of now, account=volume=group
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
AUTH\_test
|
|
||||||
|
|
||||||
Create *auth_reseller_admin* user group
|
|
||||||
> ipa group-add auth_reseller_admin --desc="Full access to all Swift accounts"
|
|
||||||
|
|
||||||
Create *auth_rhs_test* user group
|
|
||||||
> ipa group-add auth_rhs_test --desc="Full access to rhs_test account"
|
|
||||||
|
|
||||||
Create user *auth_admin* user as member of *auth_reseller_admin* user group
|
|
||||||
> ipa user-add auth_admin --first=Auth --last=Admin --password
|
|
||||||
>
|
|
||||||
> ipa group-add-member auth_reseller_admin --users=auth_admin
|
|
||||||
|
|
||||||
Create user *rhs_test_admin* as member of *auth_rhs_test* user group
|
|
||||||
> ipa user-add rhs_test_admin --first=RHS --last=Admin --password
|
|
||||||
>
|
|
||||||
> ipa group-add-member auth_rhs_test --users=rhs_test_admin
|
|
||||||
|
|
||||||
Create user *jsmith* with no relevant group membership
|
|
||||||
> ipa user-add rhs_test_admin --first=RHS --last=Admin --password
|
|
||||||
|
|
||||||
You can verify users have been added by running
|
|
||||||
>ipa user-find admin
|
|
||||||
|
|
||||||
NOTE: Every user has to change password on first login.
|
|
||||||
|
|
||||||
[RHEL 6 Identity Management Guide]: https://access.redhat.com/site/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Identity_Management_Guide/
|
|
@ -1,517 +0,0 @@
|
|||||||
#swiftkerbauth
|
|
||||||
|
|
||||||
* [Installing Kerberos module for Apache] (#httpd-kerb-install)
|
|
||||||
* [Creating HTTP Service Principal] (#http-principal)
|
|
||||||
* [Installing and configuring swiftkerbauth] (#install-swiftkerbauth)
|
|
||||||
* [Using swiftkerbauth] (#use-swiftkerbauth)
|
|
||||||
* [Configurable Parameters] (#config-swiftkerbauth)
|
|
||||||
* [Functional tests] (#swfunctest)
|
|
||||||
|
|
||||||
<a name="httpd-kerb-install" />
|
|
||||||
## Installing Kerberos module for Apache on IPA client
|
|
||||||
|
|
||||||
Install httpd server with kerberos module:
|
|
||||||
> yum install httpd mod_auth_kerb
|
|
||||||
>
|
|
||||||
> service httpd restart
|
|
||||||
|
|
||||||
Check if auth_kerb_module is loaded :
|
|
||||||
> httpd -M | grep kerb
|
|
||||||
|
|
||||||
Change httpd log level to debug by adding/changing the following in
|
|
||||||
*/etc/httpd/conf/httpd.conf* file
|
|
||||||
|
|
||||||
LogLevel debug
|
|
||||||
|
|
||||||
httpd logs are at */var/log/httpd/error_log* for troubleshooting
|
|
||||||
|
|
||||||
If SELinux is enabled, allow Apache to connect to memcache and
|
|
||||||
activate the changes by running
|
|
||||||
>setsebool -P httpd_can_network_connect 1
|
|
||||||
>
|
|
||||||
>setsebool -P httpd_can_network_memcache 1
|
|
||||||
|
|
||||||
*****
|
|
||||||
|
|
||||||
<a name="http-principal" />
|
|
||||||
## Creating HTTP Service Principal on IPA server
|
|
||||||
|
|
||||||
Add a HTTP Kerberos service principal :
|
|
||||||
> ipa service-add HTTP/client.rhelbox.com@RHELBOX.COM
|
|
||||||
|
|
||||||
Retrieve the HTTP service principal to a keytab file:
|
|
||||||
> ipa-getkeytab -s server.rhelbox.com -p HTTP/client.rhelbox.com@RHELBOX.COM -k /tmp/http.keytab
|
|
||||||
|
|
||||||
Copy keytab file to client:
|
|
||||||
> scp /tmp/http.keytab root@192.168.56.101:/etc/httpd/conf/http.keytab
|
|
||||||
|
|
||||||
## Creating HTTP Service Principal on Windows AD server
|
|
||||||
|
|
||||||
Add a HTTP Kerberos service principal:
|
|
||||||
> c:\>ktpass.exe -princ HTTP/fcclient.winad.com@WINAD.COM -mapuser
|
|
||||||
> auth_admin@WINAD.COM -pass Redhat*123 -out c:\HTTP.keytab -crypto DES-CBC-CRC
|
|
||||||
> -kvno 0
|
|
||||||
|
|
||||||
Use winscp to copy HTTP.ketab file to /etc/httpd/conf/http.keytab
|
|
||||||
|
|
||||||
*****
|
|
||||||
|
|
||||||
<a name="install-swiftkerbauth" />
|
|
||||||
##Installing and configuring swiftkerbauth on IPA client
|
|
||||||
|
|
||||||
Prerequisites for installing swiftkerbauth
|
|
||||||
* swift (havana)
|
|
||||||
* gluster-swift (optional)
|
|
||||||
|
|
||||||
You can install swiftkerbauth using one of these three ways:
|
|
||||||
|
|
||||||
Installing swiftkerbauth from source:
|
|
||||||
> python setup.py install
|
|
||||||
|
|
||||||
Installing swiftkerbauth using pip:
|
|
||||||
> pip install swiftkerbauth
|
|
||||||
|
|
||||||
Installing swiftkerbauth from RPMs:
|
|
||||||
> ./makerpm.sh
|
|
||||||
>
|
|
||||||
> rpm -ivh dist/swiftkerbauth-1.0.0-1.noarch.rpm
|
|
||||||
|
|
||||||
Edit */etc/httpd/conf.d/swift-auth.conf* and change KrbServiceName, KrbAuthRealms and Krb5KeyTab parameters accordingly.
|
|
||||||
More detail on configuring kerberos for apache can be found at:
|
|
||||||
[auth_kerb_module Configuration][]
|
|
||||||
|
|
||||||
Make /etc/httpd/conf/http.keytab readable by any user :
|
|
||||||
> chmod 644 /etc/httpd/conf/http.keytab
|
|
||||||
|
|
||||||
And preferably change owner of keytab file to apache :
|
|
||||||
> chown apache:apache /etc/httpd/conf/http.keytab
|
|
||||||
|
|
||||||
Reload httpd
|
|
||||||
> service httpd reload
|
|
||||||
|
|
||||||
Make authentication script executable:
|
|
||||||
> chmod +x /var/www/cgi-bin/swift-auth
|
|
||||||
|
|
||||||
*****
|
|
||||||
|
|
||||||
<a name="#use-swiftkerbauth" />
|
|
||||||
##Using swiftkerbauth
|
|
||||||
|
|
||||||
### Adding kerbauth filter in swift pipeline
|
|
||||||
|
|
||||||
Edit */etc/swift/proxy-server.conf* and add a new filter section as follows:
|
|
||||||
|
|
||||||
[filter:kerbauth]
|
|
||||||
use = egg:swiftkerbauth#kerbauth
|
|
||||||
ext_authentication_url = http://client.rhelbox.com/cgi-bin/swift-auth
|
|
||||||
auth_mode=passive
|
|
||||||
|
|
||||||
Add kerbauth to pipeline
|
|
||||||
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = catch_errors healthcheck proxy-logging cache proxy-logging kerbauth proxy-server
|
|
||||||
|
|
||||||
If the Swift server is not one of your Gluster nodes, edit
|
|
||||||
*/etc/swift/fs.conf* and change the following lines in the DEFAULT
|
|
||||||
section:
|
|
||||||
|
|
||||||
mount_ip = RHS_NODE_HOSTNAME
|
|
||||||
remote_cluster = yes
|
|
||||||
|
|
||||||
Restart swift to activate kerbauth filer
|
|
||||||
> swift-init main restart
|
|
||||||
|
|
||||||
|
|
||||||
###Examples
|
|
||||||
|
|
||||||
####Authenticate user and get Kerberos ticket
|
|
||||||
|
|
||||||
> kinit auth_admin
|
|
||||||
|
|
||||||
NOTE: curl ignores user specified in -u option. All further curl commands
|
|
||||||
will use the currently authenticated auth_admin user.
|
|
||||||
|
|
||||||
####Get an authentication token:
|
|
||||||
> curl -v -u : --negotiate --location-trusted http://client.rhelbox.com:8080/auth/v1.0
|
|
||||||
|
|
||||||
* About to connect() to client.rhelbox.com port 8080 (#0)
|
|
||||||
* Trying 192.168.56.101...
|
|
||||||
* connected
|
|
||||||
* Connected to client.rhelbox.com (192.168.56.101) port 8080 (#0)
|
|
||||||
> GET /auth/v1.0 HTTP/1.1
|
|
||||||
> User-Agent: curl/7.27.0
|
|
||||||
> Host: client.rhelbox.com:8080
|
|
||||||
> Accept: */*
|
|
||||||
>
|
|
||||||
< HTTP/1.1 303 See Other
|
|
||||||
< Content-Type: text/html; charset=UTF-8
|
|
||||||
< Location: http://client.rhelbox.com/cgi-bin/swift-auth
|
|
||||||
< Content-Length: 0
|
|
||||||
< X-Trans-Id: txecd415aae89b4320b6145-0052417ea5
|
|
||||||
< Date: Tue, 24 Sep 2013 11:59:33 GMT
|
|
||||||
<
|
|
||||||
* Connection #0 to host client.rhelbox.com left intact
|
|
||||||
* Issue another request to this URL: 'http://client.rhelbox.com/cgi-bin/swift-auth'
|
|
||||||
* About to connect() to client.rhelbox.com port 80 (#1)
|
|
||||||
* Trying 192.168.56.101...
|
|
||||||
* connected
|
|
||||||
* Connected to client.rhelbox.com (192.168.56.101) port 80 (#1)
|
|
||||||
> GET /cgi-bin/swift-auth HTTP/1.1
|
|
||||||
> User-Agent: curl/7.27.0
|
|
||||||
> Host: client.rhelbox.com
|
|
||||||
> Accept: */*
|
|
||||||
>
|
|
||||||
< HTTP/1.1 401 Unauthorized
|
|
||||||
< Date: Tue, 24 Sep 2013 11:59:33 GMT
|
|
||||||
< Server: Apache/2.4.6 (Fedora) mod_auth_kerb/5.4
|
|
||||||
< WWW-Authenticate: Negotiate
|
|
||||||
< WWW-Authenticate: Basic realm="Swift Authentication"
|
|
||||||
< Content-Length: 381
|
|
||||||
< Content-Type: text/html; charset=iso-8859-1
|
|
||||||
<
|
|
||||||
* Ignoring the response-body
|
|
||||||
* Connection #1 to host client.rhelbox.com left intact
|
|
||||||
* Issue another request to this URL: 'http://client.rhelbox.com/cgi-bin/swift-auth'
|
|
||||||
* Re-using existing connection! (#1) with host (nil)
|
|
||||||
* Connected to (nil) (192.168.56.101) port 80 (#1)
|
|
||||||
* Server auth using GSS-Negotiate with user ''
|
|
||||||
> GET /cgi-bin/swift-auth HTTP/1.1
|
|
||||||
> Authorization: Negotiate YIICYgYJKoZIhvcSAQICAQBuggJRMIICTaADAgEFoQMCAQ6iBwMFACAAAACjggFgYYIBXDCCAVigAwIBBaENGwtSSEVMQk9YLkNPTaIlMCOgAwIBA6EcMBobBEhUVFAbEmNsaWVudC5yaGVsYm94LmNvbaOCARkwggEVoAMCARKhAwIBAaKCAQcEggEDx9SH2R90RO4eAkhsNKow/DYfjv1rWhgxNRqj/My3yslASSgefls48VdDNHVVWqr1Kd6mB/9BIoumpA+of+KSAg2QfPtcWiVFj5n5Fa8fyCHyQPvV8c92KzUdrBPc8OVn0aldFp0I4P1MsYZbnddDRSH3kjVA5oSucHF59DhZWiGJV/F6sVimBSeoTBHQD38Cs5RhyDHNyUad9v3gZERVGCJXC76i7+yyaoIDA+N9s0hasHajhTnjs3XQBYfZFwp8lWl3Ub+sOtPO1Ng7mFlSAYXCM6ljlKTEaxRwaYoXUC1EoIqEOG/8pC9SJThS2M1G7MW1c5xm4lksNss72OH4gtPns6SB0zCB0KADAgESooHIBIHFrLtai5U8ajEWo1J9B26PnIUqLd+uA0KPd2Y2FjrH6rx4xT8qG2p8i36SVGubvwBVmfQ7lSJcXt6wUvb43qyPs/fMiSY7QxHxt7/btMgxQl6JWMagvXMhCNXnhEHNNaTdBcG5KFERDGeo0txaAD1bzZ4mnxCQmoqusGzZ6wdDw6+5wq1tK/hQTQUgk2NwxfXAg2J5K02/3fKjFR2h7zewI1pEyhhpeONRkkRETcyojkK2EbVzZ8kc3RsuwzFYsJ+9u5Qj3E4=
|
|
||||||
> User-Agent: curl/7.27.0
|
|
||||||
> Host: client.rhelbox.com
|
|
||||||
> Accept: */*
|
|
||||||
>
|
|
||||||
< HTTP/1.1 200 OK
|
|
||||||
< Date: Tue, 24 Sep 2013 11:59:33 GMT
|
|
||||||
< Server: Apache/2.4.6 (Fedora) mod_auth_kerb/5.4
|
|
||||||
< WWW-Authenticate: Negotiate YIGZBgkqhkiG9xIBAgICAG+BiTCBhqADAgEFoQMCAQ+iejB4oAMCARKicQRveeZTV/QRJSIOoOWPbZkEmtdug9V5ZcMGXWqAJvCAnrvw9gHbklMyLl8f8jU2e0wU3ehtchLEL4dVeAYgKsnUgw4wGhHu59AZBwSbHRKSpv3I6gWEZqC4NAEuZJFW9ipdUHOiclBQniVXXCsRF/5Y
|
|
||||||
< X-Auth-Token: AUTH_tk083b8abc92f4a514f34224a181ed568a
|
|
||||||
< X-Debug-Remote-User: auth_admin
|
|
||||||
< X-Debug-Groups: auth_admin,auth_reseller_admin
|
|
||||||
< X-Debug-Token-Life: 86400s
|
|
||||||
< X-Debug-Token-Expires: Wed Sep 25 17:29:33 2013
|
|
||||||
< Content-Length: 0
|
|
||||||
< Content-Type: text/html; charset=UTF-8
|
|
||||||
<
|
|
||||||
* Connection #1 to host (nil) left intact
|
|
||||||
* Closing connection #0
|
|
||||||
* Closing connection #1
|
|
||||||
|
|
||||||
The header *X-Auth-Token* in response contains the token *AUTH_tk083b8abc92f4a514f34224a181ed568a*.
|
|
||||||
|
|
||||||
####PUT a container
|
|
||||||
>curl -v -X PUT -H 'X-Auth-Token: AUTH_tk083b8abc92f4a514f34224a181ed568a' http://client.rhelbox.com:8080/v1/AUTH_myvolume/c1
|
|
||||||
|
|
||||||
* About to connect() to client.rhelbox.com port 8080 (#0)
|
|
||||||
* Trying 192.168.56.101...
|
|
||||||
* connected
|
|
||||||
* Connected to client.rhelbox.com (192.168.56.101) port 8080 (#0)
|
|
||||||
> PUT /v1/AUTH_myvolume/c1 HTTP/1.1
|
|
||||||
> User-Agent: curl/7.27.0
|
|
||||||
> Host: client.rhelbox.com:8080
|
|
||||||
> Accept: */*
|
|
||||||
> X-Auth-Token: AUTH_tk083b8abc92f4a514f34224a181ed568a
|
|
||||||
>
|
|
||||||
< HTTP/1.1 201 Created
|
|
||||||
< Content-Length: 0
|
|
||||||
< Content-Type: text/html; charset=UTF-8
|
|
||||||
< X-Trans-Id: txc420b0ebf9714445900e8-0052418863
|
|
||||||
< Date: Tue, 24 Sep 2013 12:41:07 GMT
|
|
||||||
<
|
|
||||||
* Connection #0 to host client.rhelbox.com left intact
|
|
||||||
* Closing connection #0
|
|
||||||
|
|
||||||
####GET a container listing
|
|
||||||
> curl -v -X GET -H 'X-Auth-Token: AUTH_tk083b8abc92f4a514f34224a181ed568a' http://client.rhelbox.com:8080/v1/AUTH_myvolume
|
|
||||||
|
|
||||||
* About to connect() to client.rhelbox.com port 8080 (#0)
|
|
||||||
* Trying 192.168.56.101...
|
|
||||||
* connected
|
|
||||||
* Connected to client.rhelbox.com (192.168.56.101) port 8080 (#0)
|
|
||||||
> GET /v1/AUTH_myvolume HTTP/1.1
|
|
||||||
> User-Agent: curl/7.27.0
|
|
||||||
> Host: client.rhelbox.com:8080
|
|
||||||
> Accept: */*
|
|
||||||
> X-Auth-Token: AUTH_tk083b8abc92f4a514f34224a181ed568a
|
|
||||||
>
|
|
||||||
< HTTP/1.1 200 OK
|
|
||||||
< Content-Length: 3
|
|
||||||
< X-Account-Container-Count: 0
|
|
||||||
< Accept-Ranges: bytes
|
|
||||||
< X-Account-Object-Count: 0
|
|
||||||
< X-Bytes-Used: 0
|
|
||||||
< X-Timestamp: 1379997117.09468
|
|
||||||
< X-Object-Count: 0
|
|
||||||
< X-Account-Bytes-Used: 0
|
|
||||||
< X-Type: Account
|
|
||||||
< Content-Type: text/plain; charset=utf-8
|
|
||||||
< X-Container-Count: 0
|
|
||||||
< X-Trans-Id: tx89826736a1ab4d6aae6e3-00524188dc
|
|
||||||
< Date: Tue, 24 Sep 2013 12:43:08 GMT
|
|
||||||
<
|
|
||||||
c1
|
|
||||||
* Connection #0 to host client.rhelbox.com left intact
|
|
||||||
* Closing connection #0
|
|
||||||
|
|
||||||
####PUT an object in container
|
|
||||||
> curl -v -X PUT -H 'X-Auth-Token: AUTH_tk083b8abc92f4a514f34224a181ed568a' http://client.rhelbox.com:8080/v1/AUTH_myvolume/c1/object1 -d'Hello world'
|
|
||||||
|
|
||||||
* About to connect() to client.rhelbox.com port 8080 (#0)
|
|
||||||
* Trying 192.168.56.101...
|
|
||||||
* connected
|
|
||||||
* Connected to client.rhelbox.com (192.168.56.101) port 8080 (#0)
|
|
||||||
> PUT /v1/AUTH_myvolume/c1/object1 HTTP/1.1
|
|
||||||
> User-Agent: curl/7.27.0
|
|
||||||
> Host: client.rhelbox.com:8080
|
|
||||||
> Accept: */*
|
|
||||||
> X-Auth-Token: AUTH_tk083b8abc92f4a514f34224a181ed568a
|
|
||||||
> Content-Length: 11
|
|
||||||
> Content-Type: application/x-www-form-urlencoded
|
|
||||||
>
|
|
||||||
* upload completely sent off: 11 out of 11 bytes
|
|
||||||
< HTTP/1.1 201 Created
|
|
||||||
< Last-Modified: Wed, 25 Sep 2013 06:08:00 GMT
|
|
||||||
< Content-Length: 0
|
|
||||||
< Etag: 3e25960a79dbc69b674cd4ec67a72c62
|
|
||||||
< Content-Type: text/html; charset=UTF-8
|
|
||||||
< X-Trans-Id: tx01f1b5a430cf4af3897be-0052427dc0
|
|
||||||
< Date: Wed, 25 Sep 2013 06:08:01 GMT
|
|
||||||
<
|
|
||||||
* Connection #0 to host client.rhelbox.com left intact
|
|
||||||
* Closing connection #0
|
|
||||||
|
|
||||||
####Give permission to jsmith to list and download objects from c1 container
|
|
||||||
> curl -v -X POST -H 'X-Auth-Token: AUTH_tk083b8abc92f4a514f34224a181ed568a' -H 'X-Container-Read: jsmith' http://client.rhelbox.com:8080/v1/AUTH_myvolume/c1
|
|
||||||
|
|
||||||
* About to connect() to client.rhelbox.com port 8080 (#0)
|
|
||||||
* Trying 192.168.56.101...
|
|
||||||
* connected
|
|
||||||
* Connected to client.rhelbox.com (192.168.56.101) port 8080 (#0)
|
|
||||||
> POST /v1/AUTH_myvolume/c1 HTTP/1.1
|
|
||||||
> User-Agent: curl/7.27.0
|
|
||||||
> Host: client.rhelbox.com:8080
|
|
||||||
> Accept: */*
|
|
||||||
> X-Auth-Token: AUTH_tk083b8abc92f4a514f34224a181ed568a
|
|
||||||
> X-Container-Read: jsmith
|
|
||||||
>
|
|
||||||
< HTTP/1.1 204 No Content
|
|
||||||
< Content-Length: 0
|
|
||||||
< Content-Type: text/html; charset=UTF-8
|
|
||||||
< X-Trans-Id: txcedea3e2557d463eb591d-0052427f60
|
|
||||||
< Date: Wed, 25 Sep 2013 06:14:56 GMT
|
|
||||||
<
|
|
||||||
* Connection #0 to host client.rhelbox.com left intact
|
|
||||||
* Closing connection #0
|
|
||||||
|
|
||||||
####Access container as jsmith
|
|
||||||
|
|
||||||
> kinit jsmith
|
|
||||||
|
|
||||||
Get token for jsmith
|
|
||||||
> curl -v -u : --negotiate --location-trusted http://client.rhelbox.com:8080/auth/v1.0
|
|
||||||
|
|
||||||
* About to connect() to client.rhelbox.com port 8080 (#0)
|
|
||||||
* Trying 192.168.56.101...
|
|
||||||
* connected
|
|
||||||
* Connected to client.rhelbox.com (192.168.56.101) port 8080 (#0)
|
|
||||||
> GET /auth/v1.0 HTTP/1.1
|
|
||||||
> User-Agent: curl/7.27.0
|
|
||||||
> Host: client.rhelbox.com:8080
|
|
||||||
> Accept: */*
|
|
||||||
>
|
|
||||||
< HTTP/1.1 303 See Other
|
|
||||||
< Content-Type: text/html; charset=UTF-8
|
|
||||||
< Location: http://client.rhelbox.com/cgi-bin/swift-auth
|
|
||||||
< Content-Length: 0
|
|
||||||
< X-Trans-Id: txf51e1bf7f8c5496f8cc93-005242800b
|
|
||||||
< Date: Wed, 25 Sep 2013 06:17:47 GMT
|
|
||||||
<
|
|
||||||
* Connection #0 to host client.rhelbox.com left intact
|
|
||||||
* Issue another request to this URL: 'http://client.rhelbox.com/cgi-bin/swift-auth'
|
|
||||||
* About to connect() to client.rhelbox.com port 80 (#1)
|
|
||||||
* Trying 192.168.56.101...
|
|
||||||
* connected
|
|
||||||
* Connected to client.rhelbox.com (192.168.56.101) port 80 (#1)
|
|
||||||
> GET /cgi-bin/swift-auth HTTP/1.1
|
|
||||||
> User-Agent: curl/7.27.0
|
|
||||||
> Host: client.rhelbox.com
|
|
||||||
> Accept: */*
|
|
||||||
>
|
|
||||||
< HTTP/1.1 401 Unauthorized
|
|
||||||
< Date: Wed, 25 Sep 2013 06:17:47 GMT
|
|
||||||
< Server: Apache/2.4.6 (Fedora) mod_auth_kerb/5.4
|
|
||||||
< WWW-Authenticate: Negotiate
|
|
||||||
< WWW-Authenticate: Basic realm="Swift Authentication"
|
|
||||||
< Content-Length: 381
|
|
||||||
< Content-Type: text/html; charset=iso-8859-1
|
|
||||||
<
|
|
||||||
* Ignoring the response-body
|
|
||||||
* Connection #1 to host client.rhelbox.com left intact
|
|
||||||
* Issue another request to this URL: 'http://client.rhelbox.com/cgi-bin/swift-auth'
|
|
||||||
* Re-using existing connection! (#1) with host (nil)
|
|
||||||
* Connected to (nil) (192.168.56.101) port 80 (#1)
|
|
||||||
* Server auth using GSS-Negotiate with user ''
|
|
||||||
> GET /cgi-bin/swift-auth HTTP/1.1
|
|
||||||
> Authorization: Negotiate YIICWAYJKoZIhvcSAQICAQBuggJHMIICQ6ADAgEFoQMCAQ6iBwMFACAAAACjggFbYYIBVzCCAVOgAwIBBaENGwtSSEVMQk9YLkNPTaIlMCOgAwIBA6EcMBobBEhUVFAbEmNsaWVudC5yaGVsYm94LmNvbaOCARQwggEQoAMCARKhAwIBAaKCAQIEgf/+3OaXYCSEjcsjU3t3lOLcYG84GBP9Kj9YTHc7yVMlcam4ivCwMqCkzxgvNo2E3a5KSWyFwngeX4b/QFbCKPXA4sfBibZRkeMk5gr2f0MLI3gWEAIYq7bJLre04bnkD2F0MzijPJrOLIx1KmFe08UGWCEmnG2uj07lvIR1RwV/7dMM4J1B+KKvDVKA0LxahwPIpx8oOON2yMGcstrBAHBBk5pmpt1Gg9Lh7xdNPsjP0IfI5Q0zkGCRBKpvpXymP1lQpQXlHbqkdBYOmG4+p/R+vIosO4ui1G6GWE9t71h3AqW61CcCj3/oOjZsG56k8HMSNk/+3mfUTP86nzLRGkekgc4wgcugAwIBEqKBwwSBwPsG9nGloEnOsA1abP4R1/yUDcikjjwKiacvZ+cu7bWEzu3L376k08U8C2YIClyUJy3Grt68LxhnfZ65VCZ5J5IOLiXOJnHBIoJ1L4GMYp4EgZzHvI7R3U3DApMzNWZwc1MsSF5UGhmLwxSevDLetJHjgKzKNteRyVN/8CFgjSBEjGSN1Qgy1RZHuQR9d3JHPczONZ4+ZgStfy+I1m2IUIgW3+4JGFVafHiBQVwSWRNfdXFgI3wBz7slntd7r3qMWA==
|
|
||||||
> User-Agent: curl/7.27.0
|
|
||||||
> Host: client.rhelbox.com
|
|
||||||
> Accept: */*
|
|
||||||
>
|
|
||||||
< HTTP/1.1 200 OK
|
|
||||||
< Date: Wed, 25 Sep 2013 06:17:47 GMT
|
|
||||||
< Server: Apache/2.4.6 (Fedora) mod_auth_kerb/5.4
|
|
||||||
< WWW-Authenticate: Negotiate YIGYBgkqhkiG9xIBAgICAG+BiDCBhaADAgEFoQMCAQ+ieTB3oAMCARKicARuH2YpjFrtgIhGr5nO7gh/21EvGH9tayRo5A3pw5pxD1B1036ePLG/x98OdMrSflse5s8ttz8FmvRphCFJa8kfYtnWULgoFLF2F2a1zBdSo2oCA0R05YFwArNhkg6ou5o7wWZkERHK33CKlhudSj8=
|
|
||||||
< X-Auth-Token: AUTH_tkb5a20eb8207a819e76619431c8410447
|
|
||||||
< X-Debug-Remote-User: jsmith
|
|
||||||
< X-Debug-Groups: jsmith
|
|
||||||
< X-Debug-Token-Life: 86400s
|
|
||||||
< X-Debug-Token-Expires: Thu Sep 26 11:47:47 2013
|
|
||||||
< Content-Length: 0
|
|
||||||
< Content-Type: text/html; charset=UTF-8
|
|
||||||
<
|
|
||||||
* Connection #1 to host (nil) left intact
|
|
||||||
* Closing connection #0
|
|
||||||
* Closing connection #1
|
|
||||||
|
|
||||||
List the container using authentication token for jsmith:
|
|
||||||
> curl -v -X GET -H 'X-Auth-Token: AUTH_tkb5a20eb8207a819e76619431c8410447' http://client.rhelbox.com:8080/v1/AUTH_myvolume/c1
|
|
||||||
|
|
||||||
* About to connect() to client.rhelbox.com port 8080 (#0)
|
|
||||||
* Trying 192.168.56.101...
|
|
||||||
* connected
|
|
||||||
* Connected to client.rhelbox.com (192.168.56.101) port 8080 (#0)
|
|
||||||
> GET /v1/AUTH_myvolume/c1 HTTP/1.1
|
|
||||||
> User-Agent: curl/7.27.0
|
|
||||||
> Host: client.rhelbox.com:8080
|
|
||||||
> Accept: */*
|
|
||||||
> X-Auth-Token: AUTH_tkb5a20eb8207a819e76619431c8410447
|
|
||||||
>
|
|
||||||
< HTTP/1.1 200 OK
|
|
||||||
< Content-Length: 8
|
|
||||||
< X-Container-Object-Count: 0
|
|
||||||
< Accept-Ranges: bytes
|
|
||||||
< X-Timestamp: 1
|
|
||||||
< X-Container-Bytes-Used: 0
|
|
||||||
< Content-Type: text/plain; charset=utf-8
|
|
||||||
< X-Trans-Id: tx575215929c654d9f9f284-00524280a4
|
|
||||||
< Date: Wed, 25 Sep 2013 06:20:20 GMT
|
|
||||||
<
|
|
||||||
object1
|
|
||||||
* Connection #0 to host client.rhelbox.com left intact
|
|
||||||
* Closing connection #0
|
|
||||||
|
|
||||||
Downloading the object as jsmith:
|
|
||||||
> curl -v -X GET -H 'X-Auth-Token: AUTH_tkb5a20eb8207a819e76619431c8410447' http://client.rhelbox.com:8080/v1/AUTH_myvolume/c1/object1
|
|
||||||
|
|
||||||
* About to connect() to client.rhelbox.com port 8080 (#0)
|
|
||||||
* Trying 192.168.56.101...
|
|
||||||
* connected
|
|
||||||
* Connected to client.rhelbox.com (192.168.56.101) port 8080 (#0)
|
|
||||||
> GET /v1/AUTH_myvolume/c1/object1 HTTP/1.1
|
|
||||||
> User-Agent: curl/7.27.0
|
|
||||||
> Host: client.rhelbox.com:8080
|
|
||||||
> Accept: */*
|
|
||||||
> X-Auth-Token: AUTH_tkb5a20eb8207a819e76619431c8410447
|
|
||||||
>
|
|
||||||
< HTTP/1.1 200 OK
|
|
||||||
< Content-Length: 11
|
|
||||||
< Accept-Ranges: bytes
|
|
||||||
< Last-Modified: Wed, 25 Sep 2013 06:08:00 GMT
|
|
||||||
< Etag: 3e25960a79dbc69b674cd4ec67a72c62
|
|
||||||
< X-Timestamp: 1380089280.98829
|
|
||||||
< Content-Type: application/x-www-form-urlencoded
|
|
||||||
< X-Trans-Id: tx19b5cc3847854f40a6ca8-00524281aa
|
|
||||||
< Date: Wed, 25 Sep 2013 06:24:42 GMT
|
|
||||||
<
|
|
||||||
* Connection #0 to host client.rhelbox.com left intact
|
|
||||||
Hello world* Closing connection #0
|
|
||||||
|
|
||||||
For curl to follow the redirect, you need to specify additional
|
|
||||||
options. With these, and with a current Kerberos ticket, you should
|
|
||||||
get the Kerberos user's cached authentication token, or a new one if
|
|
||||||
the previous token has expired.
|
|
||||||
|
|
||||||
> curl -v -u : --negotiate --location-trusted -X GET http://client.rhelbox.com:8080/v1/AUTH_myvolume/c1/object1
|
|
||||||
|
|
||||||
The --negotiate option is for curl to perform Kerberos authentication and
|
|
||||||
--location-trusted is for curl to follow the redirect.
|
|
||||||
|
|
||||||
[auth_kerb_module Configuration]: http://modauthkerb.sourceforge.net/configure.html
|
|
||||||
|
|
||||||
|
|
||||||
#### Get an authentication token when auth_mode=passive:
|
|
||||||
> curl -v -H 'X-Auth-User: test:auth_admin' -H 'X-Auth-Key: Redhat*123' http://127.0.0.1:8080/auth/v1.0
|
|
||||||
|
|
||||||
**NOTE**: X-Storage-Url response header can be returned only in passive mode.
|
|
||||||
|
|
||||||
<a name="config-swiftkerbauth" />
|
|
||||||
##Configurable Parameters
|
|
||||||
|
|
||||||
The kerbauth filter section in **/etc/swift/proxy-server.conf** looks something
|
|
||||||
like this:
|
|
||||||
|
|
||||||
[filter:kerbauth]
|
|
||||||
use = egg:swiftkerbauth#kerbauth
|
|
||||||
ext_authentication_url = http://client.rhelbox.com/cgi-bin/swift-auth
|
|
||||||
auth_method = active
|
|
||||||
token_life = 86400
|
|
||||||
debug_headers = yes
|
|
||||||
realm_name = RHELBOX.COM
|
|
||||||
|
|
||||||
Of all the options listed above, specifying **ext\_authentication\_url** is
|
|
||||||
mandatory. The rest of the options are optional and have default values.
|
|
||||||
|
|
||||||
#### ext\_authentication\_url
|
|
||||||
A URL specifying location of the swift-auth CGI script. Avoid using IP address.
|
|
||||||
Default value: None
|
|
||||||
|
|
||||||
#### token_life
|
|
||||||
After how many seconds the cached information about an authentication token is
|
|
||||||
discarded.
|
|
||||||
Default value: 86400
|
|
||||||
|
|
||||||
#### debug_headers
|
|
||||||
When turned on, the response headers sent to the user will contain additional
|
|
||||||
debug information apart from the auth token.
|
|
||||||
Default value: yes
|
|
||||||
|
|
||||||
#### auth_method
|
|
||||||
Set this to **"active"** when you want to allow access **only to clients
|
|
||||||
residing inside the domain**. In this mode, authentication is performed by
|
|
||||||
mod\_auth\_kerb using the Kerberos ticket bundled with the client request.
|
|
||||||
No username and password have to be specified to get a token.
|
|
||||||
Set this to **"passive"** when you want to allow access to clients residing
|
|
||||||
outside the domain. In this mode, authentication is performed by gleaning
|
|
||||||
username and password from request headers (X-Auth-User and X-Auth-Key) and
|
|
||||||
running kinit command against it.
|
|
||||||
Default value: passive
|
|
||||||
|
|
||||||
#### realm_name
|
|
||||||
This is applicable only when the auth_method=passive. This option specifies
|
|
||||||
realm name if storage server belongs to more than one realm and realm name is not
|
|
||||||
part of the username specified in X-Auth-User header.
|
|
||||||
|
|
||||||
<a name="swfunctest" />
|
|
||||||
##Functional tests for SwiftkerbAuth
|
|
||||||
|
|
||||||
Functional tests to be run on the storage node after SwiftKerbAuth is setup using
|
|
||||||
either IPA server or Windows AD. The gluster-swift/doc/markdown/swiftkerbauth
|
|
||||||
directory contains the SwiftkerbAuth setup documents. There are two modes of
|
|
||||||
working with SwiftKerbAuth. 'PASSIVE' mode indicates the client is outside the
|
|
||||||
domain configured using SwiftKerbAuth. Client provides the 'Username' and
|
|
||||||
'Password' while invoking a command. SwiftKerbAuth auth filter code then
|
|
||||||
would get the ticket granting ticket from AD server or IPA server.
|
|
||||||
In 'ACTIVE' mode of SwiftKerbAuth, User is already logged into storage node using
|
|
||||||
its kerberos credentials. That user is authenticated across AD/IPA server.
|
|
||||||
|
|
||||||
In PASSIVE mode all the generic functional tests are run. ACTIVE mode has a
|
|
||||||
different way of acquiring Ticket Granting Ticket. And hence the different
|
|
||||||
framework of functional tests there.
|
|
||||||
|
|
||||||
The accounts, users, passwords must be prepared on AD/IPA server as per
|
|
||||||
mentioned in test/functional_auth/swiftkerbauth/conf/test.conf
|
|
||||||
|
|
||||||
Command to invoke SwiftKerbAuth functional tests is
|
|
||||||
> $tox -e swfunctest
|
|
||||||
|
|
||||||
This would run both ACTIVE and PASSIVE mode functional test cases.
|
|
@ -1,66 +0,0 @@
|
|||||||
# User Guide
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
### GlusterFS Installation
|
|
||||||
First, we need to install GlusterFS on the system by following the
|
|
||||||
instructions on [GlusterFS QuickStart Guide][].
|
|
||||||
|
|
||||||
### Fedora/RHEL/CentOS
|
|
||||||
Gluster for Swift depends on OpenStack Swift Grizzly, which can be
|
|
||||||
obtained by using [RedHat's RDO][] packages as follows:
|
|
||||||
|
|
||||||
~~~
|
|
||||||
yum install -y http://rdo.fedorapeople.org/openstack/openstack-grizzly/rdo-release-grizzly.rpm
|
|
||||||
~~~
|
|
||||||
|
|
||||||
### Download
|
|
||||||
Gluster for Swift uses [Jenkins][] for continuous integration and
|
|
||||||
creation of distribution builds. Download the latest RPM builds
|
|
||||||
from one of the links below:
|
|
||||||
|
|
||||||
* RHEL/CentOS 6: [Download](http://build.gluster.org/job/gluster-swift-builds-cent6/lastSuccessfulBuild/artifact/build/)
|
|
||||||
* Fedora 18+: [Download](http://build.gluster.org/job/gluster-swift-builds-f18/lastSuccessfulBuild/artifact/build/)
|
|
||||||
|
|
||||||
Install the downloaded RPM using the following command:
|
|
||||||
|
|
||||||
~~~
|
|
||||||
yum install -y RPMFILE
|
|
||||||
~~~
|
|
||||||
|
|
||||||
where *RPMFILE* is the RPM file downloaded from Jenkins.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
TBD
|
|
||||||
|
|
||||||
## Server Control
|
|
||||||
Command to start the servers (TBD)
|
|
||||||
|
|
||||||
~~~
|
|
||||||
swift-init main start
|
|
||||||
~~~
|
|
||||||
|
|
||||||
Command to stop the servers (TBD)
|
|
||||||
|
|
||||||
~~~
|
|
||||||
swift-init main stop
|
|
||||||
~~~
|
|
||||||
|
|
||||||
Command to gracefully reload the servers
|
|
||||||
|
|
||||||
~~~
|
|
||||||
swift-init main reload
|
|
||||||
~~~
|
|
||||||
|
|
||||||
### Mounting your volumes
|
|
||||||
TBD
|
|
||||||
|
|
||||||
Once this is done, you can access GlusterFS volumes via the Swift API where
|
|
||||||
accounts are mounted volumes, containers are top-level directories,
|
|
||||||
and objects are files and sub-directories of container directories.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
[GlusterFS QuickStart Guide]: http://www.gluster.org/community/documentation/index.php/QuickStart
|
|
||||||
[RedHat's RDO]: http://openstack.redhat.com/Quickstart
|
|
||||||
[Jenkins]: http://jenkins-ci.org
|
|
@ -1,39 +0,0 @@
|
|||||||
[DEFAULT]
|
|
||||||
#
|
|
||||||
# Default gluster mount point to be used for object store,can be changed by
|
|
||||||
# setting the following value in {account,container,object}-server.conf files.
|
|
||||||
# It is recommended to keep this value same for all the three services but can
|
|
||||||
# be kept different if environment demands.
|
|
||||||
devices = /mnt/gluster-object
|
|
||||||
#
|
|
||||||
# Once you are confident that your startup processes will always have your
|
|
||||||
# gluster volumes properly mounted *before* the account-server workers start,
|
|
||||||
# you can *consider* setting this value to "false" to reduce the per-request
|
|
||||||
# overhead it can incur.
|
|
||||||
mount_check = true
|
|
||||||
bind_port = 6012
|
|
||||||
#
|
|
||||||
# Override swift's default behaviour for fallocate.
|
|
||||||
disable_fallocate = true
|
|
||||||
#
|
|
||||||
# One or two workers should be sufficient for almost any installation of
|
|
||||||
# Gluster.
|
|
||||||
workers = 1
|
|
||||||
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = account-server
|
|
||||||
|
|
||||||
[app:account-server]
|
|
||||||
use = egg:gluster_swift#account
|
|
||||||
user = root
|
|
||||||
log_facility = LOG_LOCAL2
|
|
||||||
log_level = WARN
|
|
||||||
# The following parameter is used by object-expirer and needs to be same
|
|
||||||
# across all conf files!
|
|
||||||
auto_create_account_prefix = gs
|
|
||||||
#
|
|
||||||
# After ensuring things are running in a stable manner, you can turn off
|
|
||||||
# normal request logging for the account server to unclutter the log
|
|
||||||
# files. Warnings and errors will still be logged.
|
|
||||||
log_requests = off
|
|
||||||
|
|
@ -1,39 +0,0 @@
|
|||||||
[DEFAULT]
|
|
||||||
#
|
|
||||||
# Default gluster mount point to be used for object store,can be changed by
|
|
||||||
# setting the following value in {account,container,object}-server.conf files.
|
|
||||||
# It is recommended to keep this value same for all the three services but can
|
|
||||||
# be kept different if environment demands.
|
|
||||||
devices = /mnt/gluster-object
|
|
||||||
#
|
|
||||||
# Once you are confident that your startup processes will always have your
|
|
||||||
# gluster volumes properly mounted *before* the container-server workers
|
|
||||||
# start, you can *consider* setting this value to "false" to reduce the
|
|
||||||
# per-request overhead it can incur.
|
|
||||||
mount_check = true
|
|
||||||
bind_port = 6011
|
|
||||||
#
|
|
||||||
# Override swift's default behaviour for fallocate.
|
|
||||||
disable_fallocate = true
|
|
||||||
#
|
|
||||||
# One or two workers should be sufficient for almost any installation of
|
|
||||||
# Gluster.
|
|
||||||
workers = 1
|
|
||||||
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = container-server
|
|
||||||
|
|
||||||
[app:container-server]
|
|
||||||
use = egg:gluster_swift#container
|
|
||||||
user = root
|
|
||||||
log_facility = LOG_LOCAL2
|
|
||||||
log_level = WARN
|
|
||||||
# The following parameters is used by object-expirer and needs to be same
|
|
||||||
# across all conf files!
|
|
||||||
auto_create_account_prefix = gs
|
|
||||||
#
|
|
||||||
# After ensuring things are running in a stable manner, you can turn off
|
|
||||||
# normal request logging for the container server to unclutter the log
|
|
||||||
# files. Warnings and errors will still be logged.
|
|
||||||
log_requests = off
|
|
||||||
|
|
@ -1,27 +0,0 @@
|
|||||||
#TODO: Add documentation to explain various options
|
|
||||||
#For now, refer: https://github.com/openstack/swift/blob/master/etc/object-expirer.conf-sample
|
|
||||||
|
|
||||||
[DEFAULT]
|
|
||||||
|
|
||||||
[object-expirer]
|
|
||||||
user = root
|
|
||||||
log_facility = LOG_LOCAL2
|
|
||||||
log_level = DEBUG
|
|
||||||
# The following parameters are used by object-expirer and needs to be same
|
|
||||||
# across all conf files!
|
|
||||||
auto_create_account_prefix = gs
|
|
||||||
expiring_objects_account_name = expiring
|
|
||||||
|
|
||||||
interval = 30
|
|
||||||
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = catch_errors cache proxy-server
|
|
||||||
|
|
||||||
[app:proxy-server]
|
|
||||||
use = egg:gluster_swift#proxy
|
|
||||||
|
|
||||||
[filter:cache]
|
|
||||||
use = egg:swift#memcache
|
|
||||||
|
|
||||||
[filter:catch_errors]
|
|
||||||
use = egg:swift#catch_errors
|
|
@ -4,14 +4,14 @@
|
|||||||
# setting the following value in {account,container,object}-server.conf files.
|
# setting the following value in {account,container,object}-server.conf files.
|
||||||
# It is recommended to keep this value same for all the three services but can
|
# It is recommended to keep this value same for all the three services but can
|
||||||
# be kept different if environment demands.
|
# be kept different if environment demands.
|
||||||
devices = /mnt/gluster-object
|
devices = /mnt/swiftonfile
|
||||||
#
|
#
|
||||||
# Once you are confident that your startup processes will always have your
|
# Once you are confident that your startup processes will always have your
|
||||||
# gluster volumes properly mounted *before* the object-server workers start,
|
# gluster volumes properly mounted *before* the object-server workers start,
|
||||||
# you can *consider* setting this value to "false" to reduce the per-request
|
# you can *consider* setting this value to "false" to reduce the per-request
|
||||||
# overhead it can incur.
|
# overhead it can incur.
|
||||||
mount_check = true
|
mount_check = true
|
||||||
bind_port = 6010
|
bind_port = 6050
|
||||||
#
|
#
|
||||||
# Maximum number of clients one worker can process simultaneously (it will
|
# Maximum number of clients one worker can process simultaneously (it will
|
||||||
# actually accept N + 1). Setting this to one (1) will only handle one request
|
# actually accept N + 1). Setting this to one (1) will only handle one request
|
||||||
@ -34,11 +34,6 @@ use = egg:gluster_swift#object
|
|||||||
user = root
|
user = root
|
||||||
log_facility = LOG_LOCAL2
|
log_facility = LOG_LOCAL2
|
||||||
log_level = WARN
|
log_level = WARN
|
||||||
# The following parameters are used by object-expirer and needs to be same
|
|
||||||
# across all conf files!
|
|
||||||
auto_create_account_prefix = gs
|
|
||||||
expiring_objects_account_name = expiring
|
|
||||||
#
|
|
||||||
# For performance, after ensuring things are running in a stable manner, you
|
# For performance, after ensuring things are running in a stable manner, you
|
||||||
# can turn off normal request logging for the object server to reduce the
|
# can turn off normal request logging for the object server to reduce the
|
||||||
# per-request overhead and unclutter the log files. Warnings and errors will
|
# per-request overhead and unclutter the log files. Warnings and errors will
|
||||||
|
@ -1,70 +0,0 @@
|
|||||||
[DEFAULT]
|
|
||||||
bind_port = 8080
|
|
||||||
user = root
|
|
||||||
# Consider using 1 worker per CPU
|
|
||||||
workers = 1
|
|
||||||
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = catch_errors healthcheck proxy-logging cache proxy-logging proxy-server
|
|
||||||
|
|
||||||
[app:proxy-server]
|
|
||||||
use = egg:gluster_swift#proxy
|
|
||||||
log_facility = LOG_LOCAL1
|
|
||||||
log_level = WARN
|
|
||||||
# The API allows for account creation and deletion, but since Gluster/Swift
|
|
||||||
# automounts a Gluster volume for a given account, there is no way to create
|
|
||||||
# or delete an account. So leave this off.
|
|
||||||
allow_account_management = false
|
|
||||||
account_autocreate = true
|
|
||||||
# The following parameters are used by object-expirer and needs to be same
|
|
||||||
# across all conf files!
|
|
||||||
auto_create_account_prefix = gs
|
|
||||||
expiring_objects_account_name = expiring
|
|
||||||
# Ensure the proxy server uses fast-POSTs since we don't need to make a copy
|
|
||||||
# of the entire object given that all metadata is stored in the object
|
|
||||||
# extended attributes (no .meta file used after creation) and no container
|
|
||||||
# sync feature to present.
|
|
||||||
object_post_as_copy = false
|
|
||||||
# Only need to recheck the account exists once a day
|
|
||||||
recheck_account_existence = 86400
|
|
||||||
# May want to consider bumping this up if containers are created and destroyed
|
|
||||||
# infrequently.
|
|
||||||
recheck_container_existence = 60
|
|
||||||
# Timeout clients that don't read or write to the proxy server after 5
|
|
||||||
# seconds.
|
|
||||||
client_timeout = 5
|
|
||||||
# Give more time to connect to the object, container or account servers in
|
|
||||||
# cases of high load.
|
|
||||||
conn_timeout = 5
|
|
||||||
# For high load situations, once connected to an object, container or account
|
|
||||||
# server, allow for delays communicating with them.
|
|
||||||
node_timeout = 60
|
|
||||||
# May want to consider bumping up this value to 1 - 4 MB depending on how much
|
|
||||||
# traffic is for multi-megabyte or gigabyte requests; perhaps matching the
|
|
||||||
# stripe width (not stripe element size) of your storage volume is a good
|
|
||||||
# starting point. See below for sizing information.
|
|
||||||
object_chunk_size = 65536
|
|
||||||
# If you do decide to increase the object_chunk_size, then consider lowering
|
|
||||||
# this value to one. Up to "put_queue_length" object_chunk_size'd buffers can
|
|
||||||
# be queued to the object server for processing. Given one proxy server worker
|
|
||||||
# can handle up to 1,024 connections, by default, it will consume 10 * 65,536
|
|
||||||
# * 1,024 bytes of memory in the worse case (default values). Be sure the
|
|
||||||
# amount of memory available on the system can accommodate increased values
|
|
||||||
# for object_chunk_size.
|
|
||||||
put_queue_depth = 10
|
|
||||||
|
|
||||||
[filter:catch_errors]
|
|
||||||
use = egg:swift#catch_errors
|
|
||||||
|
|
||||||
[filter:proxy-logging]
|
|
||||||
use = egg:swift#proxy_logging
|
|
||||||
access_log_level = WARN
|
|
||||||
|
|
||||||
[filter:healthcheck]
|
|
||||||
use = egg:swift#healthcheck
|
|
||||||
|
|
||||||
[filter:cache]
|
|
||||||
use = egg:swift#memcache
|
|
||||||
# Update this line to contain a comma separated list of memcache servers
|
|
||||||
# shared by all nodes running the proxy-server service.
|
|
||||||
memcache_servers = localhost:11211
|
|
@ -1,13 +1,53 @@
|
|||||||
[DEFAULT]
|
|
||||||
|
|
||||||
|
|
||||||
[swift-hash]
|
[swift-hash]
|
||||||
# random unique string that can never change (DO NOT LOSE)
|
|
||||||
swift_hash_path_suffix = gluster
|
|
||||||
|
|
||||||
|
# swift_hash_path_suffix and swift_hash_path_prefix are used as part of the
|
||||||
|
# the hashing algorithm when determining data placement in the cluster.
|
||||||
|
# These values should remain secret and MUST NOT change
|
||||||
|
# once a cluster has been deployed.
|
||||||
|
|
||||||
|
swift_hash_path_suffix = changeme
|
||||||
|
swift_hash_path_prefix = changeme
|
||||||
|
|
||||||
|
# storage policies are defined here and determine various characteristics
|
||||||
|
# about how objects are stored and treated. Policies are specified by name on
|
||||||
|
# a per container basis. Names are case-insensitive. The policy index is
|
||||||
|
# specified in the section header and is used internally. The policy with
|
||||||
|
# index 0 is always used for legacy containers and can be given a name for use
|
||||||
|
# in metadata however the ring file name will always be 'object.ring.gz' for
|
||||||
|
# backwards compatibility. If no policies are defined a policy with index 0
|
||||||
|
# will be automatically created for backwards compatibility and given the name
|
||||||
|
# Policy-0. A default policy is used when creating new containers when no
|
||||||
|
# policy is specified in the request. If no other policies are defined the
|
||||||
|
# policy with index 0 will be declared the default. If multiple policies are
|
||||||
|
# defined you must define a policy with index 0 and you must specify a
|
||||||
|
# default. It is recommended you always define a section for
|
||||||
|
# storage-policy:0.
|
||||||
|
[storage-policy:0]
|
||||||
|
name = Policy-0
|
||||||
|
default = yes
|
||||||
|
|
||||||
|
# the following section would declare a policy called 'silver', the number of
|
||||||
|
# replicas will be determined by how the ring is built. In this example the
|
||||||
|
# 'silver' policy could have a lower or higher # of replicas than the
|
||||||
|
# 'Policy-0' policy above. The ring filename will be 'object-1.ring.gz'. You
|
||||||
|
# may only specify one storage policy section as the default. If you changed
|
||||||
|
# this section to specify 'silver' as the default, when a client created a new
|
||||||
|
# container w/o a policy specified, it will get the 'silver' policy because
|
||||||
|
# this config has specified it as the default. However if a legacy container
|
||||||
|
# (one created with a pre-policy version of swift) is accessed, it is known
|
||||||
|
# implicitly to be assigned to the policy with index 0 as opposed to the
|
||||||
|
# current default.
|
||||||
|
#[storage-policy:1]
|
||||||
|
#name = silver
|
||||||
|
|
||||||
|
# The following section defines a policy called 'swiftonfile' to be used by
|
||||||
|
# swiftonfile object-server implementation.
|
||||||
|
[storage-policy:2]
|
||||||
|
name = swiftonfile
|
||||||
|
|
||||||
# The swift-constraints section sets the basic constraints on data
|
# The swift-constraints section sets the basic constraints on data
|
||||||
# saved in the swift cluster.
|
# saved in the swift cluster. These constraints are automatically
|
||||||
|
# published by the proxy server in responses to /info requests.
|
||||||
|
|
||||||
[swift-constraints]
|
[swift-constraints]
|
||||||
|
|
||||||
@ -15,9 +55,10 @@ swift_hash_path_suffix = gluster
|
|||||||
# the cluster. This is also the limit on the size of each segment of
|
# the cluster. This is also the limit on the size of each segment of
|
||||||
# a "large" object when using the large object manifest support.
|
# a "large" object when using the large object manifest support.
|
||||||
# This value is set in bytes. Setting it to lower than 1MiB will cause
|
# This value is set in bytes. Setting it to lower than 1MiB will cause
|
||||||
# some tests to fail.
|
# some tests to fail. It is STRONGLY recommended to leave this value at
|
||||||
# Default is 1 TiB = 2**30*1024
|
# the default (5 * 2**30 + 2).
|
||||||
max_file_size = 1099511627776
|
|
||||||
|
#max_file_size = 5368709122
|
||||||
|
|
||||||
|
|
||||||
# max_meta_name_length is the max number of bytes in the utf8 encoding
|
# max_meta_name_length is the max number of bytes in the utf8 encoding
|
||||||
@ -43,43 +84,50 @@ max_file_size = 1099511627776
|
|||||||
|
|
||||||
#max_meta_overall_size = 4096
|
#max_meta_overall_size = 4096
|
||||||
|
|
||||||
|
# max_header_size is the max number of bytes in the utf8 encoding of each
|
||||||
|
# header. Using 8192 as default because eventlet use 8192 as max size of
|
||||||
|
# header line. This value may need to be increased when using identity
|
||||||
|
# v3 API tokens including more than 7 catalog entries.
|
||||||
|
# See also include_service_catalog in proxy-server.conf-sample
|
||||||
|
# (documented in overview_auth.rst)
|
||||||
|
|
||||||
# max_object_name_length is the max number of bytes in the utf8 encoding of an
|
#max_header_size = 8192
|
||||||
# object name: Gluster FS can handle much longer file names, but the length
|
|
||||||
# between the slashes of the URL is handled below. Remember that most web
|
|
||||||
# clients can't handle anything greater than 2048, and those that do are
|
|
||||||
# rather clumsy.
|
|
||||||
|
|
||||||
max_object_name_length = 2048
|
|
||||||
|
|
||||||
# max_object_name_component_length (GlusterFS) is the max number of bytes in
|
|
||||||
# the utf8 encoding of an object name component (the part between the
|
|
||||||
# slashes); this is a limit imposed by the underlying file system (for XFS it
|
|
||||||
# is 255 bytes).
|
|
||||||
|
|
||||||
max_object_name_component_length = 255
|
|
||||||
|
|
||||||
# container_listing_limit is the default (and max) number of items
|
# container_listing_limit is the default (and max) number of items
|
||||||
# returned for a container listing request
|
# returned for a container listing request
|
||||||
|
|
||||||
#container_listing_limit = 10000
|
#container_listing_limit = 10000
|
||||||
|
|
||||||
|
|
||||||
# account_listing_limit is the default (and max) number of items returned
|
# account_listing_limit is the default (and max) number of items returned
|
||||||
# for an account listing request
|
# for an account listing request
|
||||||
|
|
||||||
#account_listing_limit = 10000
|
#account_listing_limit = 10000
|
||||||
|
|
||||||
|
# SwiftOnFile constraints - do not exceed the maximum values which are
|
||||||
|
# set here as default
|
||||||
|
|
||||||
# max_account_name_length is the max number of bytes in the utf8 encoding of
|
# max_object_name_length is the max number of bytes in the utf8 encoding
|
||||||
# an account name: Gluster FS Filename limit (XFS limit?), must be the same
|
# of an object name
|
||||||
# size as max_object_name_component_length above.
|
max_object_name_length = 221
|
||||||
|
# Why 221 ?
|
||||||
|
# The longest filename supported by XFS in 255.
|
||||||
|
# http://lxr.free-electrons.com/source/fs/xfs/xfs_types.h#L125
|
||||||
|
# SoF creates a temp file with following naming convention:
|
||||||
|
# .OBJECT_NAME.<random-string>
|
||||||
|
# The random string is 32 character long and and file name has two dots.
|
||||||
|
# Hence 255 - 32 - 2 = 221
|
||||||
|
# NOTE: This limitation can be sefely raised by having slashes in really long
|
||||||
|
# object name. Each segment between slashes ('/') should not exceed 221.
|
||||||
|
|
||||||
|
|
||||||
|
# max_account_name_length is the max number of bytes in the utf8 encoding
|
||||||
|
# of an account name
|
||||||
max_account_name_length = 255
|
max_account_name_length = 255
|
||||||
|
|
||||||
|
|
||||||
# max_container_name_length is the max number of bytes in the utf8 encoding
|
# max_container_name_length is the max number of bytes in the utf8 encoding
|
||||||
# of a container name: Gluster FS Filename limit (XFS limit?), must be the same
|
# of a container name
|
||||||
# size as max_object_name_component_length above.
|
|
||||||
|
|
||||||
max_container_name_length = 255
|
max_container_name_length = 255
|
||||||
|
|
||||||
|
# Why 255 ?
|
||||||
|
# The longest filename supported by XFS in 255.
|
||||||
|
# http://lxr.free-electrons.com/source/fs/xfs/xfs_types.h#L125
|
||||||
|
# SoF creates following directory hierarchy on mount point: account/container
|
||||||
|
@ -1,46 +0,0 @@
|
|||||||
# Copyright (c) 2012-2013 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
""" Account Server for Gluster Swift UFO """
|
|
||||||
|
|
||||||
# Simply importing this monkey patches the constraint handling to fit our
|
|
||||||
# needs
|
|
||||||
import gluster.swift.common.constraints # noqa
|
|
||||||
|
|
||||||
from swift.account import server
|
|
||||||
from gluster.swift.common.DiskDir import DiskAccount
|
|
||||||
|
|
||||||
|
|
||||||
class AccountController(server.AccountController):
|
|
||||||
|
|
||||||
def _get_account_broker(self, drive, part, account, **kwargs):
|
|
||||||
"""
|
|
||||||
Overriden to provide the GlusterFS specific broker that talks to
|
|
||||||
Gluster for the information related to servicing a given request
|
|
||||||
instead of talking to a database.
|
|
||||||
|
|
||||||
:param drive: drive that holds the container
|
|
||||||
:param part: partition the container is in
|
|
||||||
:param account: account name
|
|
||||||
:returns: DiskDir object
|
|
||||||
"""
|
|
||||||
return DiskAccount(self.root, drive, account, self.logger, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def app_factory(global_conf, **local_conf):
|
|
||||||
"""paste.deploy app factory for creating WSGI account server apps."""
|
|
||||||
conf = global_conf.copy()
|
|
||||||
conf.update(local_conf)
|
|
||||||
return AccountController(conf)
|
|
@ -1,705 +0,0 @@
|
|||||||
# Copyright (c) 2012-2013 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import errno
|
|
||||||
|
|
||||||
from gluster.swift.common.fs_utils import dir_empty, mkdirs, do_chown, \
|
|
||||||
do_exists, do_touch
|
|
||||||
from gluster.swift.common.utils import validate_account, validate_container, \
|
|
||||||
get_container_details, get_account_details, create_container_metadata, \
|
|
||||||
create_account_metadata, DEFAULT_GID, get_container_metadata, \
|
|
||||||
get_account_metadata, DEFAULT_UID, validate_object, \
|
|
||||||
create_object_metadata, read_metadata, write_metadata, X_CONTENT_TYPE, \
|
|
||||||
X_CONTENT_LENGTH, X_TIMESTAMP, X_PUT_TIMESTAMP, X_ETAG, X_OBJECTS_COUNT, \
|
|
||||||
X_BYTES_USED, X_CONTAINER_COUNT, DIR_TYPE, rmobjdir, dir_is_object
|
|
||||||
from gluster.swift.common import Glusterfs
|
|
||||||
from gluster.swift.common.exceptions import FileOrDirNotFoundError
|
|
||||||
|
|
||||||
|
|
||||||
DATADIR = 'containers'
|
|
||||||
|
|
||||||
# Create a dummy db_file in Glusterfs.RUN_DIR
|
|
||||||
_db_file = ""
|
|
||||||
|
|
||||||
|
|
||||||
def _read_metadata(dd):
|
|
||||||
""" Filter read metadata so that it always returns a tuple that includes
|
|
||||||
some kind of timestamp. With 1.4.8 of the Swift integration the
|
|
||||||
timestamps were not stored. Here we fabricate timestamps for volumes
|
|
||||||
where the existing data has no timestamp (that is, stored data is not
|
|
||||||
a tuple), allowing us a measure of backward compatibility.
|
|
||||||
|
|
||||||
FIXME: At this time it does not appear that the timestamps on each
|
|
||||||
metadata are used for much, so this should not hurt anything.
|
|
||||||
"""
|
|
||||||
metadata_i = read_metadata(dd)
|
|
||||||
metadata = {}
|
|
||||||
timestamp = 0
|
|
||||||
for key, value in metadata_i.iteritems():
|
|
||||||
if not isinstance(value, tuple):
|
|
||||||
value = (value, timestamp)
|
|
||||||
metadata[key] = value
|
|
||||||
return metadata
|
|
||||||
|
|
||||||
|
|
||||||
def filter_prefix(objects, prefix):
|
|
||||||
"""
|
|
||||||
Accept a sorted list of strings, returning all strings starting with the
|
|
||||||
given prefix.
|
|
||||||
"""
|
|
||||||
found = False
|
|
||||||
for object_name in objects:
|
|
||||||
if object_name.startswith(prefix):
|
|
||||||
yield object_name
|
|
||||||
found = True
|
|
||||||
else:
|
|
||||||
# Since the list is assumed to be sorted, once we find an object
|
|
||||||
# name that does not start with the prefix we know we won't find
|
|
||||||
# any others, so we exit early.
|
|
||||||
if found:
|
|
||||||
break
|
|
||||||
|
|
||||||
|
|
||||||
def filter_delimiter(objects, delimiter, prefix, marker, path=None):
|
|
||||||
"""
|
|
||||||
Accept a sorted list of strings, returning strings that:
|
|
||||||
1. begin with "prefix" (empty string matches all)
|
|
||||||
2. does not match the "path" argument
|
|
||||||
3. does not contain the delimiter in the given prefix length
|
|
||||||
"""
|
|
||||||
assert delimiter
|
|
||||||
assert prefix is not None
|
|
||||||
skip_name = None
|
|
||||||
for object_name in objects:
|
|
||||||
if prefix and not object_name.startswith(prefix):
|
|
||||||
break
|
|
||||||
if path is not None:
|
|
||||||
if object_name == path:
|
|
||||||
continue
|
|
||||||
if skip_name:
|
|
||||||
if object_name < skip_name:
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
skip_name = None
|
|
||||||
end = object_name.find(delimiter, len(prefix))
|
|
||||||
if end >= 0 and (len(object_name) > (end + 1)):
|
|
||||||
skip_name = object_name[:end] + chr(ord(delimiter) + 1)
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
if skip_name:
|
|
||||||
if object_name < skip_name:
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
skip_name = None
|
|
||||||
end = object_name.find(delimiter, len(prefix))
|
|
||||||
if end > 0:
|
|
||||||
dir_name = object_name[:end + 1]
|
|
||||||
if dir_name != marker:
|
|
||||||
yield dir_name
|
|
||||||
skip_name = object_name[:end] + chr(ord(delimiter) + 1)
|
|
||||||
continue
|
|
||||||
yield object_name
|
|
||||||
|
|
||||||
|
|
||||||
def filter_marker(objects, marker):
|
|
||||||
"""
|
|
||||||
Accept sorted list of strings, return all strings whose value is strictly
|
|
||||||
greater than the given marker value.
|
|
||||||
"""
|
|
||||||
for object_name in objects:
|
|
||||||
if object_name > marker:
|
|
||||||
yield object_name
|
|
||||||
|
|
||||||
|
|
||||||
def filter_prefix_as_marker(objects, prefix):
|
|
||||||
"""
|
|
||||||
Accept sorted list of strings, return all strings whose value is greater
|
|
||||||
than or equal to the given prefix value.
|
|
||||||
"""
|
|
||||||
for object_name in objects:
|
|
||||||
if object_name >= prefix:
|
|
||||||
yield object_name
|
|
||||||
|
|
||||||
|
|
||||||
def filter_end_marker(objects, end_marker):
|
|
||||||
"""
|
|
||||||
Accept a list of strings, sorted, and return all the strings that are
|
|
||||||
strictly less than the given end_marker string. We perform this as a
|
|
||||||
generator to avoid creating potentially large intermediate object lists.
|
|
||||||
"""
|
|
||||||
for object_name in objects:
|
|
||||||
if object_name < end_marker:
|
|
||||||
yield object_name
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
|
|
||||||
|
|
||||||
class DiskCommon(object):
|
|
||||||
"""
|
|
||||||
Common fields and methods shared between DiskDir and DiskAccount classes.
|
|
||||||
"""
|
|
||||||
def __init__(self, root, drive, account, logger, pending_timeout=None,
|
|
||||||
stale_reads_ok=False):
|
|
||||||
# WARNING: The following four fields are referenced as fields by our
|
|
||||||
# callers outside of this module, do not remove.
|
|
||||||
# Create a dummy db_file in Glusterfs.RUN_DIR
|
|
||||||
global _db_file
|
|
||||||
if not _db_file:
|
|
||||||
_db_file = os.path.join(Glusterfs.RUN_DIR, 'db_file.db')
|
|
||||||
if not do_exists(_db_file):
|
|
||||||
do_touch(_db_file)
|
|
||||||
self.db_file = _db_file
|
|
||||||
self.metadata = {}
|
|
||||||
self.pending_timeout = pending_timeout or 10
|
|
||||||
self.stale_reads_ok = stale_reads_ok
|
|
||||||
# The following fields are common
|
|
||||||
self.root = root
|
|
||||||
assert logger is not None
|
|
||||||
self.logger = logger
|
|
||||||
self.account = account
|
|
||||||
self.datadir = os.path.join(root, drive)
|
|
||||||
self._dir_exists = None
|
|
||||||
|
|
||||||
def _dir_exists_read_metadata(self):
|
|
||||||
self._dir_exists = do_exists(self.datadir)
|
|
||||||
if self._dir_exists:
|
|
||||||
self.metadata = _read_metadata(self.datadir)
|
|
||||||
return self._dir_exists
|
|
||||||
|
|
||||||
def is_deleted(self):
|
|
||||||
# The intention of this method is to check the file system to see if
|
|
||||||
# the directory actually exists.
|
|
||||||
return not do_exists(self.datadir)
|
|
||||||
|
|
||||||
def empty(self):
|
|
||||||
# If it does not exist, then it is empty. A value of True is
|
|
||||||
# what is expected by OpenStack Swift when the directory does
|
|
||||||
# not exist. Check swift/common/db.py:ContainerBroker.empty()
|
|
||||||
# and swift/container/server.py:ContainerController.DELETE()
|
|
||||||
# for more information
|
|
||||||
try:
|
|
||||||
return dir_empty(self.datadir)
|
|
||||||
except FileOrDirNotFoundError:
|
|
||||||
return True
|
|
||||||
|
|
||||||
def update_metadata(self, metadata):
|
|
||||||
assert self.metadata, "Valid container/account metadata should have " \
|
|
||||||
"been created by now"
|
|
||||||
if metadata:
|
|
||||||
new_metadata = self.metadata.copy()
|
|
||||||
new_metadata.update(metadata)
|
|
||||||
if new_metadata != self.metadata:
|
|
||||||
write_metadata(self.datadir, new_metadata)
|
|
||||||
self.metadata = new_metadata
|
|
||||||
|
|
||||||
|
|
||||||
class DiskDir(DiskCommon):
|
|
||||||
"""
|
|
||||||
Manage object files on disk.
|
|
||||||
|
|
||||||
:param path: path to devices on the node
|
|
||||||
:param drive: gluster volume drive name
|
|
||||||
:param account: account name for the object
|
|
||||||
:param container: container name for the object
|
|
||||||
:param logger: account or container server logging object
|
|
||||||
:param uid: user ID container object should assume
|
|
||||||
:param gid: group ID container object should assume
|
|
||||||
|
|
||||||
Usage pattern from container/server.py (Havana, 1.8.0+):
|
|
||||||
DELETE:
|
|
||||||
if auto-create and obj and not .db_file:
|
|
||||||
# Creates container
|
|
||||||
.initialize()
|
|
||||||
if not .db_file:
|
|
||||||
# Container does not exist
|
|
||||||
return 404
|
|
||||||
if obj:
|
|
||||||
# Should be a NOOP
|
|
||||||
.delete_object()
|
|
||||||
else:
|
|
||||||
if not .empty()
|
|
||||||
# Gluster's definition of empty should mean only
|
|
||||||
# sub-directories exist in Object-Only mode
|
|
||||||
return conflict
|
|
||||||
.get_info()['put_timestamp'] and not .is_deleted()
|
|
||||||
# Deletes container
|
|
||||||
.delete_db()
|
|
||||||
if not .is_deleted():
|
|
||||||
return conflict
|
|
||||||
account_update():
|
|
||||||
.get_info()
|
|
||||||
PUT:
|
|
||||||
if obj:
|
|
||||||
if auto-create cont and not .db_file
|
|
||||||
# Creates container
|
|
||||||
.initialize()
|
|
||||||
if not .db_file
|
|
||||||
return 404
|
|
||||||
.put_object()
|
|
||||||
else:
|
|
||||||
if not .db_file:
|
|
||||||
# Creates container
|
|
||||||
.initialize()
|
|
||||||
else:
|
|
||||||
# Update container timestamp
|
|
||||||
.is_deleted()
|
|
||||||
.update_put_timestamp()
|
|
||||||
if .is_deleted()
|
|
||||||
return conflict
|
|
||||||
if metadata:
|
|
||||||
if .metadata
|
|
||||||
.set_x_container_sync_points()
|
|
||||||
.update_metadata()
|
|
||||||
account_update():
|
|
||||||
.get_info()
|
|
||||||
HEAD:
|
|
||||||
.pending_timeout
|
|
||||||
.stale_reads_ok
|
|
||||||
if .is_deleted():
|
|
||||||
return 404
|
|
||||||
.get_info()
|
|
||||||
.metadata
|
|
||||||
GET:
|
|
||||||
.pending_timeout
|
|
||||||
.stale_reads_ok
|
|
||||||
if .is_deleted():
|
|
||||||
return 404
|
|
||||||
.get_info()
|
|
||||||
.metadata
|
|
||||||
.list_objects_iter()
|
|
||||||
POST:
|
|
||||||
if .is_deleted():
|
|
||||||
return 404
|
|
||||||
.metadata
|
|
||||||
.set_x_container_sync_points()
|
|
||||||
.update_metadata()
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, path, drive, account, container, logger,
|
|
||||||
uid=DEFAULT_UID, gid=DEFAULT_GID, **kwargs):
|
|
||||||
super(DiskDir, self).__init__(path, drive, account, logger, **kwargs)
|
|
||||||
|
|
||||||
self.uid = int(uid)
|
|
||||||
self.gid = int(gid)
|
|
||||||
|
|
||||||
self.container = container
|
|
||||||
self.datadir = os.path.join(self.datadir, self.container)
|
|
||||||
|
|
||||||
if not self._dir_exists_read_metadata():
|
|
||||||
return
|
|
||||||
|
|
||||||
if not self.metadata:
|
|
||||||
create_container_metadata(self.datadir)
|
|
||||||
self.metadata = _read_metadata(self.datadir)
|
|
||||||
else:
|
|
||||||
if not validate_container(self.metadata):
|
|
||||||
create_container_metadata(self.datadir)
|
|
||||||
self.metadata = _read_metadata(self.datadir)
|
|
||||||
|
|
||||||
def list_objects_iter(self, limit, marker, end_marker,
|
|
||||||
prefix, delimiter, path=None):
|
|
||||||
"""
|
|
||||||
Returns tuple of name, created_at, size, content_type, etag.
|
|
||||||
"""
|
|
||||||
assert limit >= 0
|
|
||||||
assert not delimiter or (len(delimiter) == 1 and ord(delimiter) <= 254)
|
|
||||||
|
|
||||||
if path is not None:
|
|
||||||
if path:
|
|
||||||
prefix = path = path.rstrip('/') + '/'
|
|
||||||
else:
|
|
||||||
prefix = path
|
|
||||||
delimiter = '/'
|
|
||||||
elif delimiter and not prefix:
|
|
||||||
prefix = ''
|
|
||||||
|
|
||||||
container_list = []
|
|
||||||
|
|
||||||
objects = self._update_object_count()
|
|
||||||
if objects:
|
|
||||||
objects.sort()
|
|
||||||
else:
|
|
||||||
return container_list
|
|
||||||
|
|
||||||
if end_marker:
|
|
||||||
objects = filter_end_marker(objects, end_marker)
|
|
||||||
|
|
||||||
if marker and marker >= prefix:
|
|
||||||
objects = filter_marker(objects, marker)
|
|
||||||
elif prefix:
|
|
||||||
objects = filter_prefix_as_marker(objects, prefix)
|
|
||||||
|
|
||||||
if prefix is None:
|
|
||||||
# No prefix, we don't need to apply the other arguments, we just
|
|
||||||
# return what we have.
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
# We have a non-None (for all intents and purposes it is a string)
|
|
||||||
# prefix.
|
|
||||||
if not delimiter:
|
|
||||||
if not prefix:
|
|
||||||
# We have nothing more to do
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
objects = filter_prefix(objects, prefix)
|
|
||||||
else:
|
|
||||||
objects = filter_delimiter(objects, delimiter, prefix, marker,
|
|
||||||
path)
|
|
||||||
|
|
||||||
count = 0
|
|
||||||
for obj in objects:
|
|
||||||
obj_path = os.path.join(self.datadir, obj)
|
|
||||||
metadata = read_metadata(obj_path)
|
|
||||||
if not metadata or not validate_object(metadata):
|
|
||||||
if delimiter == '/' and obj_path[-1] == delimiter:
|
|
||||||
clean_obj_path = obj_path[:-1]
|
|
||||||
else:
|
|
||||||
clean_obj_path = obj_path
|
|
||||||
try:
|
|
||||||
metadata = create_object_metadata(clean_obj_path)
|
|
||||||
except OSError as e:
|
|
||||||
# FIXME - total hack to get upstream swift ported unit
|
|
||||||
# test cases working for now.
|
|
||||||
if e.errno != errno.ENOENT:
|
|
||||||
raise
|
|
||||||
if not Glusterfs._implicit_dir_objects and metadata \
|
|
||||||
and metadata[X_CONTENT_TYPE] == DIR_TYPE \
|
|
||||||
and not dir_is_object(metadata):
|
|
||||||
continue
|
|
||||||
list_item = []
|
|
||||||
list_item.append(obj)
|
|
||||||
if metadata:
|
|
||||||
list_item.append(metadata[X_TIMESTAMP])
|
|
||||||
list_item.append(int(metadata[X_CONTENT_LENGTH]))
|
|
||||||
list_item.append(metadata[X_CONTENT_TYPE])
|
|
||||||
list_item.append(metadata[X_ETAG])
|
|
||||||
container_list.append(list_item)
|
|
||||||
count += 1
|
|
||||||
if count >= limit:
|
|
||||||
break
|
|
||||||
|
|
||||||
return container_list
|
|
||||||
|
|
||||||
def _update_object_count(self):
|
|
||||||
objects, object_count, bytes_used = get_container_details(self.datadir)
|
|
||||||
|
|
||||||
if X_OBJECTS_COUNT not in self.metadata \
|
|
||||||
or int(self.metadata[X_OBJECTS_COUNT][0]) != object_count \
|
|
||||||
or X_BYTES_USED not in self.metadata \
|
|
||||||
or int(self.metadata[X_BYTES_USED][0]) != bytes_used:
|
|
||||||
self.metadata[X_OBJECTS_COUNT] = (object_count, 0)
|
|
||||||
self.metadata[X_BYTES_USED] = (bytes_used, 0)
|
|
||||||
write_metadata(self.datadir, self.metadata)
|
|
||||||
|
|
||||||
return objects
|
|
||||||
|
|
||||||
def get_info(self):
|
|
||||||
"""
|
|
||||||
Get global data for the container.
|
|
||||||
:returns: dict with keys: account, container, object_count, bytes_used,
|
|
||||||
hash, id, created_at, put_timestamp, delete_timestamp,
|
|
||||||
reported_put_timestamp, reported_delete_timestamp,
|
|
||||||
reported_object_count, and reported_bytes_used.
|
|
||||||
"""
|
|
||||||
if self._dir_exists and Glusterfs._container_update_object_count:
|
|
||||||
self._update_object_count()
|
|
||||||
|
|
||||||
data = {'account': self.account, 'container': self.container,
|
|
||||||
'object_count': self.metadata.get(
|
|
||||||
X_OBJECTS_COUNT, ('0', 0))[0],
|
|
||||||
'bytes_used': self.metadata.get(X_BYTES_USED, ('0', 0))[0],
|
|
||||||
'hash': '', 'id': '', 'created_at': '1',
|
|
||||||
'put_timestamp': self.metadata.get(
|
|
||||||
X_PUT_TIMESTAMP, ('0', 0))[0],
|
|
||||||
'delete_timestamp': '1',
|
|
||||||
'reported_put_timestamp': '1',
|
|
||||||
'reported_delete_timestamp': '1',
|
|
||||||
'reported_object_count': '1', 'reported_bytes_used': '1',
|
|
||||||
'x_container_sync_point1': self.metadata.get(
|
|
||||||
'x_container_sync_point1', -1),
|
|
||||||
'x_container_sync_point2': self.metadata.get(
|
|
||||||
'x_container_sync_point2', -1),
|
|
||||||
}
|
|
||||||
return data
|
|
||||||
|
|
||||||
def put_object(self, name, timestamp, size, content_type, etag, deleted=0):
|
|
||||||
# NOOP - should never be called since object file creation occurs
|
|
||||||
# within a directory implicitly.
|
|
||||||
pass
|
|
||||||
|
|
||||||
def initialize(self, timestamp):
|
|
||||||
"""
|
|
||||||
Create and write metatdata to directory/container.
|
|
||||||
:param metadata: Metadata to write.
|
|
||||||
"""
|
|
||||||
if not self._dir_exists:
|
|
||||||
mkdirs(self.datadir)
|
|
||||||
# If we create it, ensure we own it.
|
|
||||||
do_chown(self.datadir, self.uid, self.gid)
|
|
||||||
metadata = get_container_metadata(self.datadir)
|
|
||||||
metadata[X_TIMESTAMP] = timestamp
|
|
||||||
write_metadata(self.datadir, metadata)
|
|
||||||
self.metadata = metadata
|
|
||||||
self._dir_exists = True
|
|
||||||
|
|
||||||
def update_put_timestamp(self, timestamp):
|
|
||||||
"""
|
|
||||||
Update the PUT timestamp for the container.
|
|
||||||
|
|
||||||
If the container does not exist, create it using a PUT timestamp of
|
|
||||||
the given value.
|
|
||||||
|
|
||||||
If the container does exist, update the PUT timestamp only if it is
|
|
||||||
later than the existing value.
|
|
||||||
"""
|
|
||||||
if not do_exists(self.datadir):
|
|
||||||
self.initialize(timestamp)
|
|
||||||
else:
|
|
||||||
if timestamp > self.metadata[X_PUT_TIMESTAMP]:
|
|
||||||
self.metadata[X_PUT_TIMESTAMP] = (timestamp, 0)
|
|
||||||
write_metadata(self.datadir, self.metadata)
|
|
||||||
|
|
||||||
def delete_object(self, name, timestamp):
|
|
||||||
# NOOP - should never be called since object file removal occurs
|
|
||||||
# within a directory implicitly.
|
|
||||||
return
|
|
||||||
|
|
||||||
def delete_db(self, timestamp):
|
|
||||||
"""
|
|
||||||
Delete the container (directory) if empty.
|
|
||||||
|
|
||||||
:param timestamp: delete timestamp
|
|
||||||
"""
|
|
||||||
# Let's check and see if it has directories that
|
|
||||||
# where created by the code, but not by the
|
|
||||||
# caller as objects
|
|
||||||
rmobjdir(self.datadir)
|
|
||||||
|
|
||||||
def set_x_container_sync_points(self, sync_point1, sync_point2):
|
|
||||||
self.metadata['x_container_sync_point1'] = sync_point1
|
|
||||||
self.metadata['x_container_sync_point2'] = sync_point2
|
|
||||||
|
|
||||||
|
|
||||||
class DiskAccount(DiskCommon):
|
|
||||||
"""
|
|
||||||
Usage pattern from account/server.py (Havana, 1.8.0+):
|
|
||||||
DELETE:
|
|
||||||
.is_deleted()
|
|
||||||
.delete_db()
|
|
||||||
PUT:
|
|
||||||
container:
|
|
||||||
.pending_timeout
|
|
||||||
.db_file
|
|
||||||
.initialize()
|
|
||||||
.is_deleted()
|
|
||||||
.put_container()
|
|
||||||
account:
|
|
||||||
.db_file
|
|
||||||
.initialize()
|
|
||||||
.is_status_deleted()
|
|
||||||
.is_deleted()
|
|
||||||
.update_put_timestamp()
|
|
||||||
.is_deleted() ???
|
|
||||||
.update_metadata()
|
|
||||||
HEAD:
|
|
||||||
.pending_timeout
|
|
||||||
.stale_reads_ok
|
|
||||||
.is_deleted()
|
|
||||||
.get_info()
|
|
||||||
.metadata
|
|
||||||
GET:
|
|
||||||
.pending_timeout
|
|
||||||
.stale_reads_ok
|
|
||||||
.is_deleted()
|
|
||||||
.get_info()
|
|
||||||
.metadata
|
|
||||||
.list_containers_iter()
|
|
||||||
POST:
|
|
||||||
.is_deleted()
|
|
||||||
.update_metadata()
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, root, drive, account, logger, **kwargs):
|
|
||||||
super(DiskAccount, self).__init__(root, drive, account, logger,
|
|
||||||
**kwargs)
|
|
||||||
|
|
||||||
# Since accounts should always exist (given an account maps to a
|
|
||||||
# gluster volume directly, and the mount has already been checked at
|
|
||||||
# the beginning of the REST API handling), just assert that that
|
|
||||||
# assumption still holds.
|
|
||||||
assert self._dir_exists_read_metadata()
|
|
||||||
assert self._dir_exists
|
|
||||||
|
|
||||||
if not self.metadata or not validate_account(self.metadata):
|
|
||||||
create_account_metadata(self.datadir)
|
|
||||||
self.metadata = _read_metadata(self.datadir)
|
|
||||||
|
|
||||||
def is_status_deleted(self):
|
|
||||||
"""
|
|
||||||
Only returns true if the status field is set to DELETED.
|
|
||||||
"""
|
|
||||||
# This function should always return False. Accounts are not created
|
|
||||||
# and deleted, they exist if a Gluster volume can be mounted. There is
|
|
||||||
# no way to delete accounts, so this could never return True.
|
|
||||||
return False
|
|
||||||
|
|
||||||
def initialize(self, timestamp):
|
|
||||||
"""
|
|
||||||
Create and write metatdata to directory/account.
|
|
||||||
:param metadata: Metadata to write.
|
|
||||||
"""
|
|
||||||
metadata = get_account_metadata(self.datadir)
|
|
||||||
metadata[X_TIMESTAMP] = timestamp
|
|
||||||
write_metadata(self.datadir, metadata)
|
|
||||||
self.metadata = metadata
|
|
||||||
|
|
||||||
def update_put_timestamp(self, timestamp):
|
|
||||||
# Since accounts always exists at this point, just update the account
|
|
||||||
# PUT timestamp if this given timestamp is later than what we already
|
|
||||||
# know.
|
|
||||||
assert self._dir_exists
|
|
||||||
|
|
||||||
if timestamp > self.metadata[X_PUT_TIMESTAMP][0]:
|
|
||||||
self.metadata[X_PUT_TIMESTAMP] = (timestamp, 0)
|
|
||||||
write_metadata(self.datadir, self.metadata)
|
|
||||||
|
|
||||||
def delete_db(self, timestamp):
|
|
||||||
"""
|
|
||||||
Mark the account as deleted
|
|
||||||
|
|
||||||
:param timestamp: delete timestamp
|
|
||||||
"""
|
|
||||||
# Deleting an account is a no-op, since accounts are one-to-one
|
|
||||||
# mappings to gluster volumes.
|
|
||||||
#
|
|
||||||
# FIXME: This means the caller will end up returning a success status
|
|
||||||
# code for an operation that really should not be allowed. Instead, we
|
|
||||||
# should modify the account server to not allow the DELETE method, and
|
|
||||||
# should probably modify the proxy account controller to not allow the
|
|
||||||
# DELETE method as well.
|
|
||||||
return
|
|
||||||
|
|
||||||
def put_container(self, container, put_timestamp, del_timestamp,
|
|
||||||
object_count, bytes_used):
|
|
||||||
"""
|
|
||||||
Create a container with the given attributes.
|
|
||||||
|
|
||||||
:param name: name of the container to create
|
|
||||||
:param put_timestamp: put_timestamp of the container to create
|
|
||||||
:param delete_timestamp: delete_timestamp of the container to create
|
|
||||||
:param object_count: number of objects in the container
|
|
||||||
:param bytes_used: number of bytes used by the container
|
|
||||||
"""
|
|
||||||
# NOOP - should never be called since container directory creation
|
|
||||||
# occurs from within the account directory implicitly.
|
|
||||||
return
|
|
||||||
|
|
||||||
def _update_container_count(self):
|
|
||||||
containers, container_count = get_account_details(self.datadir)
|
|
||||||
|
|
||||||
if X_CONTAINER_COUNT not in self.metadata \
|
|
||||||
or int(self.metadata[X_CONTAINER_COUNT][0]) != container_count:
|
|
||||||
self.metadata[X_CONTAINER_COUNT] = (container_count, 0)
|
|
||||||
write_metadata(self.datadir, self.metadata)
|
|
||||||
|
|
||||||
return containers
|
|
||||||
|
|
||||||
def list_containers_iter(self, limit, marker, end_marker,
|
|
||||||
prefix, delimiter):
|
|
||||||
"""
|
|
||||||
Return tuple of name, object_count, bytes_used, 0(is_subdir).
|
|
||||||
Used by account server.
|
|
||||||
"""
|
|
||||||
if delimiter and not prefix:
|
|
||||||
prefix = ''
|
|
||||||
|
|
||||||
account_list = []
|
|
||||||
containers = self._update_container_count()
|
|
||||||
if containers:
|
|
||||||
containers.sort()
|
|
||||||
else:
|
|
||||||
return account_list
|
|
||||||
|
|
||||||
if containers and end_marker:
|
|
||||||
containers = filter_end_marker(containers, end_marker)
|
|
||||||
|
|
||||||
if containers:
|
|
||||||
if marker and marker >= prefix:
|
|
||||||
containers = filter_marker(containers, marker)
|
|
||||||
elif prefix:
|
|
||||||
containers = filter_prefix_as_marker(containers, prefix)
|
|
||||||
|
|
||||||
if prefix is None:
|
|
||||||
# No prefix, we don't need to apply the other arguments, we just
|
|
||||||
# return what we have.
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
# We have a non-None (for all intents and purposes it is a string)
|
|
||||||
# prefix.
|
|
||||||
if not delimiter:
|
|
||||||
if not prefix:
|
|
||||||
# We have nothing more to do
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
containers = filter_prefix(containers, prefix)
|
|
||||||
else:
|
|
||||||
containers = filter_delimiter(containers, delimiter, prefix,
|
|
||||||
marker)
|
|
||||||
|
|
||||||
count = 0
|
|
||||||
for cont in containers:
|
|
||||||
list_item = []
|
|
||||||
metadata = None
|
|
||||||
list_item.append(cont)
|
|
||||||
cont_path = os.path.join(self.datadir, cont)
|
|
||||||
metadata = _read_metadata(cont_path)
|
|
||||||
if not metadata or not validate_container(metadata):
|
|
||||||
try:
|
|
||||||
metadata = create_container_metadata(cont_path)
|
|
||||||
except OSError as e:
|
|
||||||
# FIXME - total hack to get upstream swift ported unit
|
|
||||||
# test cases working for now.
|
|
||||||
if e.errno != errno.ENOENT:
|
|
||||||
raise
|
|
||||||
if metadata:
|
|
||||||
list_item.append(metadata[X_OBJECTS_COUNT][0])
|
|
||||||
list_item.append(metadata[X_BYTES_USED][0])
|
|
||||||
list_item.append(0)
|
|
||||||
account_list.append(list_item)
|
|
||||||
count += 1
|
|
||||||
if count >= limit:
|
|
||||||
break
|
|
||||||
|
|
||||||
return account_list
|
|
||||||
|
|
||||||
def get_info(self):
|
|
||||||
"""
|
|
||||||
Get global data for the account.
|
|
||||||
:returns: dict with keys: account, created_at, put_timestamp,
|
|
||||||
delete_timestamp, container_count, object_count,
|
|
||||||
bytes_used, hash, id
|
|
||||||
"""
|
|
||||||
if Glusterfs._account_update_container_count:
|
|
||||||
self._update_container_count()
|
|
||||||
|
|
||||||
data = {'account': self.account, 'created_at': '1',
|
|
||||||
'put_timestamp': '1', 'delete_timestamp': '1',
|
|
||||||
'container_count': self.metadata.get(
|
|
||||||
X_CONTAINER_COUNT, (0, 0))[0],
|
|
||||||
'object_count': self.metadata.get(X_OBJECTS_COUNT, (0, 0))[0],
|
|
||||||
'bytes_used': self.metadata.get(X_BYTES_USED, (0, 0))[0],
|
|
||||||
'hash': '', 'id': ''}
|
|
||||||
return data
|
|
@ -32,12 +32,7 @@ _fs_conf = ConfigParser()
|
|||||||
MOUNT_IP = 'localhost'
|
MOUNT_IP = 'localhost'
|
||||||
RUN_DIR = '/var/run/swift'
|
RUN_DIR = '/var/run/swift'
|
||||||
SWIFT_DIR = '/etc/swift'
|
SWIFT_DIR = '/etc/swift'
|
||||||
_do_getsize = False
|
|
||||||
_allow_mount_per_server = False
|
_allow_mount_per_server = False
|
||||||
_implicit_dir_objects = False
|
|
||||||
_container_update_object_count = False
|
|
||||||
_account_update_container_count = False
|
|
||||||
_ignore_unsupported_headers = False
|
|
||||||
|
|
||||||
if _fs_conf.read(os.path.join(SWIFT_DIR, 'fs.conf')):
|
if _fs_conf.read(os.path.join(SWIFT_DIR, 'fs.conf')):
|
||||||
try:
|
try:
|
||||||
@ -49,13 +44,6 @@ if _fs_conf.read(os.path.join(SWIFT_DIR, 'fs.conf')):
|
|||||||
except (NoSectionError, NoOptionError):
|
except (NoSectionError, NoOptionError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
try:
|
|
||||||
_do_getsize = _fs_conf.get('DEFAULT',
|
|
||||||
'accurate_size_in_listing',
|
|
||||||
"no") in TRUE_VALUES
|
|
||||||
except (NoSectionError, NoOptionError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
_allow_mount_per_server = _fs_conf.get('DEFAULT',
|
_allow_mount_per_server = _fs_conf.get('DEFAULT',
|
||||||
'allow_mount_per_server',
|
'allow_mount_per_server',
|
||||||
@ -64,55 +52,6 @@ if _fs_conf.read(os.path.join(SWIFT_DIR, 'fs.conf')):
|
|||||||
except (NoSectionError, NoOptionError):
|
except (NoSectionError, NoOptionError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# -- Hidden configuration option --
|
|
||||||
# Report gratuitously created directories as objects
|
|
||||||
# Directories can be gratuitously created on the path to a given
|
|
||||||
# object. This option turn on or off the reporting of those directories.
|
|
||||||
# It defaults to False so that only those directories explicitly
|
|
||||||
# created by the object server PUT REST API are reported
|
|
||||||
try:
|
|
||||||
_implicit_dir_objects = \
|
|
||||||
_fs_conf.get('DEFAULT',
|
|
||||||
'implicit_dir_objects',
|
|
||||||
"no") in TRUE_VALUES
|
|
||||||
except (NoSectionError, NoOptionError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
# -- Hidden configuration option --
|
|
||||||
# Due to the impact on performance, this option is disabled by default
|
|
||||||
try:
|
|
||||||
_container_update_object_count = \
|
|
||||||
_fs_conf.get('DEFAULT',
|
|
||||||
'container_update_object_count',
|
|
||||||
"no") in TRUE_VALUES
|
|
||||||
except (NoSectionError, NoOptionError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
# -- Hidden configuration option --
|
|
||||||
# Due to the impact on performance, this option is disabled by default
|
|
||||||
try:
|
|
||||||
_account_update_container_count = \
|
|
||||||
_fs_conf.get('DEFAULT',
|
|
||||||
'account_update_container_count',
|
|
||||||
"no") in TRUE_VALUES
|
|
||||||
except (NoSectionError, NoOptionError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
# -- Hidden configuration option --
|
|
||||||
# Ignore unsupported headers and allow them in a request without
|
|
||||||
# returning a 400-BadRequest. This setting can be set to
|
|
||||||
# allow unsupported headers such as X-Delete-At and
|
|
||||||
# X-Delete-After even though they will not be used.
|
|
||||||
try:
|
|
||||||
_ignore_unsupported_headers = \
|
|
||||||
_fs_conf.get('DEFAULT',
|
|
||||||
'ignore_unsupported_headers',
|
|
||||||
"no") in TRUE_VALUES
|
|
||||||
except (NoSectionError, NoOptionError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
NAME = 'glusterfs'
|
|
||||||
|
|
||||||
|
|
||||||
def _busy_wait(full_mount_path):
|
def _busy_wait(full_mount_path):
|
||||||
# Iterate for definite number of time over a given
|
# Iterate for definite number of time over a given
|
||||||
|
@ -14,86 +14,36 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
try:
|
from swift.common.swob import HTTPBadRequest
|
||||||
from webob.exc import HTTPBadRequest
|
|
||||||
except ImportError:
|
|
||||||
from swift.common.swob import HTTPBadRequest
|
|
||||||
import swift.common.constraints
|
import swift.common.constraints
|
||||||
import swift.common.ring as _ring
|
|
||||||
from gluster.swift.common import Glusterfs, ring
|
|
||||||
|
|
||||||
MAX_OBJECT_NAME_COMPONENT_LENGTH = 255
|
SOF_MAX_OBJECT_NAME_LENGTH = 221
|
||||||
UNSUPPORTED_HEADERS = []
|
# Why 221 ?
|
||||||
|
# The longest filename supported by XFS in 255.
|
||||||
|
# http://lxr.free-electrons.com/source/fs/xfs/xfs_types.h#L125
|
||||||
def set_object_name_component_length(len=None):
|
# SoF creates a temp file with following naming convention:
|
||||||
global MAX_OBJECT_NAME_COMPONENT_LENGTH
|
# .OBJECT_NAME.<random-string>
|
||||||
|
# The random string is 32 character long and and file name has two dots.
|
||||||
if len:
|
# Hence 255 - 32 - 2 = 221
|
||||||
MAX_OBJECT_NAME_COMPONENT_LENGTH = len
|
# NOTE: This limitation can be sefely raised by having slashes in really long
|
||||||
elif hasattr(swift.common.constraints, 'constraints_conf_int'):
|
# object name. Each segment between slashes ('/') should not exceed 221.
|
||||||
MAX_OBJECT_NAME_COMPONENT_LENGTH = \
|
|
||||||
swift.common.constraints.constraints_conf_int(
|
|
||||||
'max_object_name_component_length', 255)
|
|
||||||
else:
|
|
||||||
MAX_OBJECT_NAME_COMPONENT_LENGTH = 255
|
|
||||||
return
|
|
||||||
|
|
||||||
set_object_name_component_length()
|
|
||||||
|
|
||||||
|
|
||||||
def get_object_name_component_length():
|
|
||||||
return MAX_OBJECT_NAME_COMPONENT_LENGTH
|
|
||||||
|
|
||||||
|
|
||||||
def validate_obj_name_component(obj):
|
def validate_obj_name_component(obj):
|
||||||
if not obj:
|
if not obj:
|
||||||
return 'cannot begin, end, or have contiguous %s\'s' % os.path.sep
|
return 'cannot begin, end, or have contiguous %s\'s' % os.path.sep
|
||||||
if len(obj) > MAX_OBJECT_NAME_COMPONENT_LENGTH:
|
if len(obj) > SOF_MAX_OBJECT_NAME_LENGTH:
|
||||||
return 'too long (%d)' % len(obj)
|
return 'too long (%d)' % len(obj)
|
||||||
if obj == '.' or obj == '..':
|
if obj == '.' or obj == '..':
|
||||||
return 'cannot be . or ..'
|
return 'cannot be . or ..'
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
|
# Store Swift's check_object_creation method to be invoked later
|
||||||
def validate_headers(req):
|
swift_check_object_creation = swift.common.constraints.check_object_creation
|
||||||
"""
|
|
||||||
Validate client header requests
|
|
||||||
:param req: Http request
|
|
||||||
"""
|
|
||||||
if not Glusterfs._ignore_unsupported_headers:
|
|
||||||
for unsupported_header in UNSUPPORTED_HEADERS:
|
|
||||||
if unsupported_header in req.headers:
|
|
||||||
return '%s headers are not supported' \
|
|
||||||
% ','.join(UNSUPPORTED_HEADERS)
|
|
||||||
return ''
|
|
||||||
|
|
||||||
# Save the original check object creation
|
|
||||||
__check_object_creation = swift.common.constraints.check_object_creation
|
|
||||||
__check_metadata = swift.common.constraints.check_metadata
|
|
||||||
|
|
||||||
|
|
||||||
def gluster_check_metadata(req, target_type, POST=True):
|
|
||||||
"""
|
|
||||||
:param req: HTTP request object
|
|
||||||
:param target_type: Value from POST passed to __check_metadata
|
|
||||||
:param POST: Only call __check_metadata on POST since Swift only
|
|
||||||
calls check_metadata on POSTs.
|
|
||||||
"""
|
|
||||||
ret = None
|
|
||||||
if POST:
|
|
||||||
ret = __check_metadata(req, target_type)
|
|
||||||
if ret is None:
|
|
||||||
bdy = validate_headers(req)
|
|
||||||
if bdy:
|
|
||||||
ret = HTTPBadRequest(body=bdy,
|
|
||||||
request=req,
|
|
||||||
content_type='text/plain')
|
|
||||||
return ret
|
|
||||||
|
|
||||||
|
|
||||||
# Define our new one which invokes the original
|
# Define our new one which invokes the original
|
||||||
def gluster_check_object_creation(req, object_name):
|
def sof_check_object_creation(req, object_name):
|
||||||
"""
|
"""
|
||||||
Check to ensure that everything is alright about an object to be created.
|
Check to ensure that everything is alright about an object to be created.
|
||||||
Monkey patches swift.common.constraints.check_object_creation, invoking
|
Monkey patches swift.common.constraints.check_object_creation, invoking
|
||||||
@ -108,8 +58,10 @@ def gluster_check_object_creation(req, object_name):
|
|||||||
:raises HTTPBadRequest: missing or bad content-type header, or
|
:raises HTTPBadRequest: missing or bad content-type header, or
|
||||||
bad metadata
|
bad metadata
|
||||||
"""
|
"""
|
||||||
ret = __check_object_creation(req, object_name)
|
# Invoke Swift's method
|
||||||
|
ret = swift_check_object_creation(req, object_name)
|
||||||
|
|
||||||
|
# SoF's additional checks
|
||||||
if ret is None:
|
if ret is None:
|
||||||
for obj in object_name.split(os.path.sep):
|
for obj in object_name.split(os.path.sep):
|
||||||
reason = validate_obj_name_component(obj)
|
reason = validate_obj_name_component(obj)
|
||||||
@ -119,20 +71,4 @@ def gluster_check_object_creation(req, object_name):
|
|||||||
ret = HTTPBadRequest(body=bdy,
|
ret = HTTPBadRequest(body=bdy,
|
||||||
request=req,
|
request=req,
|
||||||
content_type='text/plain')
|
content_type='text/plain')
|
||||||
if ret is None:
|
|
||||||
ret = gluster_check_metadata(req, 'object', POST=False)
|
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
# Replace the original checks with ours
|
|
||||||
swift.common.constraints.check_object_creation = gluster_check_object_creation
|
|
||||||
swift.common.constraints.check_metadata = gluster_check_metadata
|
|
||||||
|
|
||||||
# Replace the original check mount with ours
|
|
||||||
swift.common.constraints.check_mount = Glusterfs.mount
|
|
||||||
|
|
||||||
# Save the original Ring class
|
|
||||||
__Ring = _ring.Ring
|
|
||||||
|
|
||||||
# Replace the original Ring class
|
|
||||||
_ring.Ring = ring.Ring
|
|
||||||
|
@ -1,3 +0,0 @@
|
|||||||
*.egg-info
|
|
||||||
*.py[co]
|
|
||||||
.DS_Store
|
|
@ -1,4 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
nosetests test_swauth/unit --exe --with-coverage --cover-package swauth --cover-erase
|
|
||||||
rm -f .coverage
|
|
@ -1,39 +0,0 @@
|
|||||||
Maintainer
|
|
||||||
----------
|
|
||||||
Greg Holt
|
|
||||||
|
|
||||||
Original Authors
|
|
||||||
----------------
|
|
||||||
Chuck Thier
|
|
||||||
Greg Holt
|
|
||||||
Greg Lange
|
|
||||||
Jay Payne
|
|
||||||
John Dickinson
|
|
||||||
Michael Barton
|
|
||||||
Will Reese
|
|
||||||
|
|
||||||
Contributors
|
|
||||||
------------
|
|
||||||
Andrew Clay Shafer
|
|
||||||
Anne Gentle
|
|
||||||
Brian K. Jones
|
|
||||||
Caleb Tennis
|
|
||||||
Chmouel Boudjnah
|
|
||||||
Christian Schwede
|
|
||||||
Chris Wedgwood
|
|
||||||
Clay Gerrard
|
|
||||||
Colin Nicholson
|
|
||||||
Conrad Weidenkeller
|
|
||||||
Cory Wright
|
|
||||||
David Goetz
|
|
||||||
Ed Leafe
|
|
||||||
Fujita Tomonori
|
|
||||||
Kapil Thangavelu
|
|
||||||
Monty Taylor
|
|
||||||
Pablo Llopis
|
|
||||||
Paul Jimenez
|
|
||||||
Pete Zaitcev
|
|
||||||
Russ Nelson
|
|
||||||
Scott Simpson
|
|
||||||
Soren Hansen
|
|
||||||
Stephen Milton
|
|
@ -1,62 +0,0 @@
|
|||||||
swauth (1.0.8)
|
|
||||||
|
|
||||||
Added request.environ[reseller_request] = True if request is coming from an
|
|
||||||
user in .reseller_admin group
|
|
||||||
|
|
||||||
Fixed to work with newer Swift versions whose memcache clients require a
|
|
||||||
time keyword argument when the older versions required a timeout keyword
|
|
||||||
argument.
|
|
||||||
|
|
||||||
swauth (1.0.7)
|
|
||||||
|
|
||||||
New X-Auth-Token-Lifetime header a user can set to how long they'd like
|
|
||||||
their token to be good for.
|
|
||||||
|
|
||||||
New max_token_life config value for capping the above.
|
|
||||||
|
|
||||||
New X-Auth-Token-Expires header returned with the get token request.
|
|
||||||
|
|
||||||
Switchover to swift.common.swob instead of WebOb; requires Swift >= 1.7.6
|
|
||||||
now.
|
|
||||||
|
|
||||||
swauth (1.0.6)
|
|
||||||
|
|
||||||
Apparently I haven't been keeping up with this CHANGELOG. I'll try to be
|
|
||||||
better onward.
|
|
||||||
|
|
||||||
This release added passing OPTIONS requests through untouched, needed for
|
|
||||||
CORS support in Swift.
|
|
||||||
|
|
||||||
Also, Swauth is a bit more restrictive in deciding when it's the definitive
|
|
||||||
auth for a request.
|
|
||||||
|
|
||||||
swauth (1.0.3-dev)
|
|
||||||
|
|
||||||
This release is still under development. A full change log will be made at
|
|
||||||
release. Until then, you can see what has changed with:
|
|
||||||
|
|
||||||
git log 1.0.2..HEAD
|
|
||||||
|
|
||||||
swauth (1.0.2)
|
|
||||||
|
|
||||||
Fixed bug rejecting requests when using multiple instances of Swauth or
|
|
||||||
Swauth with other auth services.
|
|
||||||
|
|
||||||
Fixed bug interpreting URL-encoded user names and keys.
|
|
||||||
|
|
||||||
Added support for the Swift container sync feature.
|
|
||||||
|
|
||||||
Allowed /not/ setting super_admin_key to disable Swauth administration
|
|
||||||
features.
|
|
||||||
|
|
||||||
Added swauth_remote mode so the Swauth middleware for one Swift cluster
|
|
||||||
could be pointing to the Swauth service on another Swift cluster, sharing
|
|
||||||
account/user data sets.
|
|
||||||
|
|
||||||
Added ability to purge stored tokens.
|
|
||||||
|
|
||||||
Added API documentation for internal Swauth API.
|
|
||||||
|
|
||||||
swauth (1.0.1)
|
|
||||||
|
|
||||||
Initial release after separation from Swift.
|
|
@ -1,202 +0,0 @@
|
|||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
@ -1,4 +0,0 @@
|
|||||||
include AUTHORS LICENSE README.md .unittests test_swauth/__init__.py
|
|
||||||
include CHANGELOG
|
|
||||||
graft doc
|
|
||||||
graft etc
|
|
@ -1,71 +0,0 @@
|
|||||||
Swauth
|
|
||||||
------
|
|
||||||
|
|
||||||
An Auth Service for Swift as WSGI Middleware that uses Swift itself as a
|
|
||||||
backing store. Sphinx-built docs at: <http://gholt.github.com/swauth/>
|
|
||||||
|
|
||||||
See also <https://github.com/openstack/keystone> for the standard OpenStack
|
|
||||||
auth service.
|
|
||||||
|
|
||||||
|
|
||||||
NOTE
|
|
||||||
----
|
|
||||||
|
|
||||||
**Be sure to review the Sphinx-built docs at:
|
|
||||||
<http://gholt.github.com/swauth/>**
|
|
||||||
|
|
||||||
|
|
||||||
Quick Install
|
|
||||||
-------------
|
|
||||||
|
|
||||||
1) Install Swauth with ``sudo python setup.py install`` or ``sudo python
|
|
||||||
setup.py develop`` or via whatever packaging system you may be using.
|
|
||||||
|
|
||||||
2) Alter your proxy-server.conf pipeline to have swauth instead of tempauth:
|
|
||||||
|
|
||||||
Was:
|
|
||||||
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = catch_errors cache tempauth proxy-server
|
|
||||||
|
|
||||||
Change To:
|
|
||||||
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = catch_errors cache swauth proxy-server
|
|
||||||
|
|
||||||
3) Add to your proxy-server.conf the section for the Swauth WSGI filter:
|
|
||||||
|
|
||||||
[filter:swauth]
|
|
||||||
use = egg:swauth#swauth
|
|
||||||
set log_name = swauth
|
|
||||||
super_admin_key = swauthkey
|
|
||||||
|
|
||||||
4) Be sure your proxy server allows account management:
|
|
||||||
|
|
||||||
[app:proxy-server]
|
|
||||||
...
|
|
||||||
allow_account_management = true
|
|
||||||
|
|
||||||
5) Restart your proxy server ``swift-init proxy reload``
|
|
||||||
|
|
||||||
6) Initialize the Swauth backing store in Swift ``swauth-prep -K swauthkey``
|
|
||||||
|
|
||||||
7) Add an account/user ``swauth-add-user -A http://127.0.0.1:8080/auth/ -K
|
|
||||||
swauthkey -a test tester testing``
|
|
||||||
|
|
||||||
8) Ensure it works ``swift -A http://127.0.0.1:8080/auth/v1.0 -U test:tester -K
|
|
||||||
testing stat -v``
|
|
||||||
|
|
||||||
|
|
||||||
Web Admin Install
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
1) If you installed from packages, you'll need to cd to the webadmin directory
|
|
||||||
the package installed. This is ``/usr/share/doc/python-swauth/webadmin``
|
|
||||||
with the Lucid packages. If you installed from source, you'll need to cd to
|
|
||||||
the webadmin directory in the source directory.
|
|
||||||
|
|
||||||
2) Upload the Web Admin files with ``swift -A http://127.0.0.1:8080/auth/v1.0
|
|
||||||
-U .super_admin:.super_admin -K swauthkey upload .webadmin .``
|
|
||||||
|
|
||||||
3) Open ``http://127.0.0.1:8080/auth/`` in your browser.
|
|
@ -1,2 +0,0 @@
|
|||||||
[python: **.py]
|
|
||||||
|
|
@ -1,80 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import gettext
|
|
||||||
import socket
|
|
||||||
|
|
||||||
from optparse import OptionParser
|
|
||||||
from sys import argv, exit
|
|
||||||
|
|
||||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
|
||||||
from swift.common.utils import urlparse
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
gettext.install('gswauth', unicode=1)
|
|
||||||
parser = OptionParser(usage='Usage: %prog [options] <account>')
|
|
||||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
|
||||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
|
||||||
'subsystem (default: http://127.0.0.1:8080/auth/)')
|
|
||||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
|
||||||
default='.super_admin', help='The user with admin rights to add '
|
|
||||||
'accounts (default: .super_admin).')
|
|
||||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
|
||||||
help='The key for the user with admin rights is required.')
|
|
||||||
args = argv[1:]
|
|
||||||
if not args:
|
|
||||||
args.append('-h')
|
|
||||||
(options, args) = parser.parse_args(args)
|
|
||||||
if len(args) != 1:
|
|
||||||
parser.parse_args(['-h'])
|
|
||||||
if options.admin_key is None:
|
|
||||||
parser.parse_args(['-h'])
|
|
||||||
account = args[0]
|
|
||||||
parsed = urlparse(options.admin_url)
|
|
||||||
if parsed.scheme not in ('http', 'https'):
|
|
||||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
|
||||||
(parsed.scheme, repr(options.admin_url)))
|
|
||||||
parsed_path = parsed.path
|
|
||||||
if not parsed_path:
|
|
||||||
parsed_path = '/'
|
|
||||||
elif parsed_path[-1] != '/':
|
|
||||||
parsed_path += '/'
|
|
||||||
path = '%sv2/%s' % (parsed_path, account)
|
|
||||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
|
||||||
'X-Auth-Admin-Key': options.admin_key,
|
|
||||||
'Content-Length': '0'}
|
|
||||||
try:
|
|
||||||
conn = http_connect(parsed.hostname, parsed.port, 'PUT', path, headers,
|
|
||||||
ssl=(parsed.scheme == 'https'))
|
|
||||||
resp = conn.getresponse()
|
|
||||||
except socket.gaierror, err:
|
|
||||||
exit('Account creation failed: %s. ' \
|
|
||||||
'Check that the admin_url is valid' % err)
|
|
||||||
except socket.error, (errno, msg):
|
|
||||||
exit('Account creation failed: %s. ' \
|
|
||||||
'Check that the admin_url is valid' % msg)
|
|
||||||
|
|
||||||
if resp.status // 100 != 2:
|
|
||||||
if resp.status == 401:
|
|
||||||
exit('Account creation failed: %s %s: Invalid user/key provided' %
|
|
||||||
(resp.status, resp.reason))
|
|
||||||
elif resp.status == 403:
|
|
||||||
exit('Account creation failed: %s %s: Insufficient privileges' %
|
|
||||||
(resp.status, resp.reason))
|
|
||||||
else:
|
|
||||||
exit('Account creation failed: %s %s' %
|
|
||||||
(resp.status, resp.reason))
|
|
@ -1,127 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import gettext
|
|
||||||
import socket
|
|
||||||
|
|
||||||
from optparse import OptionParser
|
|
||||||
from sys import argv, exit
|
|
||||||
|
|
||||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
|
||||||
from swift.common.utils import urlparse
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
gettext.install('gswauth', unicode=1)
|
|
||||||
parser = OptionParser(
|
|
||||||
usage='Usage: %prog [options] <account> <user> <password>')
|
|
||||||
parser.add_option('-a', '--admin', dest='admin', action='store_true',
|
|
||||||
default=False, help='Give the user administrator access; otherwise '
|
|
||||||
'the user will only have access to containers specifically allowed '
|
|
||||||
'with ACLs.')
|
|
||||||
parser.add_option('-r', '--reseller-admin', dest='reseller_admin',
|
|
||||||
action='store_true', default=False, help='Give the user full reseller '
|
|
||||||
'administrator access, giving them full access to all accounts within '
|
|
||||||
'the reseller, including the ability to create new accounts. Creating '
|
|
||||||
'a new reseller admin requires super_admin rights.')
|
|
||||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
|
||||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
|
||||||
'subsystem (default: http://127.0.0.1:8080/auth/')
|
|
||||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
|
||||||
default='.super_admin', help='The user with admin rights to add users '
|
|
||||||
'(default: .super_admin).')
|
|
||||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
|
||||||
help='The key for the user with admin rights to add users is required.')
|
|
||||||
args = argv[1:]
|
|
||||||
if not args:
|
|
||||||
args.append('-h')
|
|
||||||
(options, args) = parser.parse_args(args)
|
|
||||||
if len(args) != 3:
|
|
||||||
parser.parse_args(['-h'])
|
|
||||||
if options.admin_key is None:
|
|
||||||
parser.parse_args(['-h'])
|
|
||||||
account, user, password = args
|
|
||||||
parsed = urlparse(options.admin_url)
|
|
||||||
if parsed.scheme not in ('http', 'https'):
|
|
||||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
|
||||||
(parsed.scheme, repr(options.admin_url)))
|
|
||||||
parsed_path = parsed.path
|
|
||||||
if not parsed_path:
|
|
||||||
parsed_path = '/'
|
|
||||||
elif parsed_path[-1] != '/':
|
|
||||||
parsed_path += '/'
|
|
||||||
|
|
||||||
# Check if user is changing his own password. This is carried out by
|
|
||||||
# making sure that the user changing the password and the user whose
|
|
||||||
# password is being changed are the same.
|
|
||||||
# If not, ensure that the account exists before creating new user.
|
|
||||||
if not options.admin_user == (account + ':' + user):
|
|
||||||
# GET the account
|
|
||||||
path = '%sv2/%s' % (parsed_path, account)
|
|
||||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
|
||||||
'X-Auth-Admin-Key': options.admin_key}
|
|
||||||
try:
|
|
||||||
conn = http_connect(parsed.hostname, parsed.port, 'GET', path,
|
|
||||||
headers, ssl=(parsed.scheme == 'https'))
|
|
||||||
resp = conn.getresponse()
|
|
||||||
if resp.status // 100 != 2:
|
|
||||||
# If the GET operation fails, it means the account does not
|
|
||||||
# exist. Now we create the account by sending a PUT request.
|
|
||||||
headers['Content-Length'] = '0'
|
|
||||||
conn = http_connect(parsed.hostname, parsed.port, 'PUT', path,
|
|
||||||
headers, ssl=(parsed.scheme == 'https'))
|
|
||||||
resp = conn.getresponse()
|
|
||||||
if resp.status // 100 != 2:
|
|
||||||
print 'Account creation failed: %s %s' % \
|
|
||||||
(resp.status, resp.reason)
|
|
||||||
except socket.gaierror, err:
|
|
||||||
exit('User creation failed: %s. ' \
|
|
||||||
'Check that the admin_url is valid' % err)
|
|
||||||
except socket.error, (errno, msg):
|
|
||||||
exit('User creation failed: %s. ' \
|
|
||||||
'Check that the admin_url is valid' % msg)
|
|
||||||
|
|
||||||
# Add the user
|
|
||||||
path = '%sv2/%s/%s' % (parsed_path, account, user)
|
|
||||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
|
||||||
'X-Auth-Admin-Key': options.admin_key,
|
|
||||||
'X-Auth-User-Key': password,
|
|
||||||
'Content-Length': '0'}
|
|
||||||
if options.admin:
|
|
||||||
headers['X-Auth-User-Admin'] = 'true'
|
|
||||||
if options.reseller_admin:
|
|
||||||
headers['X-Auth-User-Reseller-Admin'] = 'true'
|
|
||||||
try:
|
|
||||||
conn = http_connect(parsed.hostname, parsed.port, 'PUT', path, headers,
|
|
||||||
ssl=(parsed.scheme == 'https'))
|
|
||||||
resp = conn.getresponse()
|
|
||||||
except socket.gaierror, err:
|
|
||||||
exit('User creation failed: %s. ' \
|
|
||||||
'Check that the admin_url is valid' % err)
|
|
||||||
except socket.error, (errno, msg):
|
|
||||||
exit('User creation failed: %s. ' \
|
|
||||||
'Check that the admin_url is valid' % msg)
|
|
||||||
|
|
||||||
if resp.status // 100 != 2:
|
|
||||||
if resp.status == 401:
|
|
||||||
exit('User creation failed: %s %s: Invalid user/key provided' %
|
|
||||||
(resp.status, resp.reason))
|
|
||||||
elif resp.status == 403:
|
|
||||||
exit('User creation failed: %s %s: Insufficient privileges' %
|
|
||||||
(resp.status, resp.reason))
|
|
||||||
else:
|
|
||||||
exit('User creation failed: %s %s' %
|
|
||||||
(resp.status, resp.reason))
|
|
@ -1,202 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
try:
|
|
||||||
import simplejson as json
|
|
||||||
except ImportError:
|
|
||||||
import json
|
|
||||||
import gettext
|
|
||||||
import re
|
|
||||||
import socket
|
|
||||||
|
|
||||||
from datetime import datetime, timedelta
|
|
||||||
from optparse import OptionParser
|
|
||||||
from sys import argv, exit
|
|
||||||
from time import sleep, time
|
|
||||||
|
|
||||||
from swiftclient.client import Connection, ClientException
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
gettext.install('gswauth', unicode=1)
|
|
||||||
parser = OptionParser(usage='Usage: %prog [options]')
|
|
||||||
parser.add_option('-t', '--token-life', dest='token_life',
|
|
||||||
default='86400', help='The expected life of tokens; token objects '
|
|
||||||
'modified more than this number of seconds ago will be checked for '
|
|
||||||
'expiration (default: 86400).')
|
|
||||||
parser.add_option('-s', '--sleep', dest='sleep',
|
|
||||||
default='0.1', help='The number of seconds to sleep between token '
|
|
||||||
'checks (default: 0.1)')
|
|
||||||
parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
|
|
||||||
default=False, help='Outputs everything done instead of just the '
|
|
||||||
'deletions.')
|
|
||||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
|
||||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
|
||||||
'subsystem (default: http://127.0.0.1:8080/auth/)')
|
|
||||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
|
||||||
help='The key for .super_admin is required.')
|
|
||||||
parser.add_option('', '--purge', dest='purge_account', help='Purges all '
|
|
||||||
'tokens for a given account whether the tokens have expired or not.'
|
|
||||||
' Memcached restart is recommended. Old tokens may still persist in'
|
|
||||||
' memcached.')
|
|
||||||
parser.add_option('', '--purge-all', dest='purge_all', action='store_true',
|
|
||||||
default=False, help='Purges all tokens for all accounts and users '
|
|
||||||
'whether the tokens have expired or not.'
|
|
||||||
' Memcached restart is recommended. Old tokens may still persist in'
|
|
||||||
' memcached.')
|
|
||||||
args = argv[1:]
|
|
||||||
if not args:
|
|
||||||
args.append('-h')
|
|
||||||
(options, args) = parser.parse_args(args)
|
|
||||||
if len(args) != 0:
|
|
||||||
parser.parse_args(['-h'])
|
|
||||||
if options.admin_key is None:
|
|
||||||
parser.parse_args(['-h'])
|
|
||||||
|
|
||||||
options.admin_url = options.admin_url.rstrip('/')
|
|
||||||
if not options.admin_url.endswith('/v1.0'):
|
|
||||||
options.admin_url += '/v1.0'
|
|
||||||
options.admin_user = '.super_admin:.super_admin'
|
|
||||||
|
|
||||||
try:
|
|
||||||
options.token_life = timedelta(0, float(options.token_life))
|
|
||||||
options.sleep = float(options.sleep)
|
|
||||||
except ValueError:
|
|
||||||
parser.parse_args(['-h'])
|
|
||||||
|
|
||||||
conn = Connection(options.admin_url, options.admin_user, options.admin_key)
|
|
||||||
if options.purge_account:
|
|
||||||
marker = None
|
|
||||||
while True:
|
|
||||||
if options.verbose:
|
|
||||||
print 'GET %s?marker=%s' % (options.purge_account, marker)
|
|
||||||
try:
|
|
||||||
objs = conn.get_container(options.purge_account,
|
|
||||||
marker=marker)[1]
|
|
||||||
except ClientException, e:
|
|
||||||
if e.http_status == 404:
|
|
||||||
exit('Account %s not found.' % (options.purge_account))
|
|
||||||
elif e.http_status == 401:
|
|
||||||
exit('Cleanup tokens failed: 401 Unauthorized: ' \
|
|
||||||
'Invalid user/key provided')
|
|
||||||
else:
|
|
||||||
exit('Purging %s failed with status '
|
|
||||||
'code %d' % (options.purge_account, e.http_status))
|
|
||||||
except socket.error, (errno, msg):
|
|
||||||
exit('Token clean-up failed: %s. ' \
|
|
||||||
'Check that the admin_url is valid' % msg)
|
|
||||||
if objs:
|
|
||||||
marker = objs[-1]['name']
|
|
||||||
else:
|
|
||||||
if options.verbose:
|
|
||||||
print 'No more objects in %s' % options.purge_account
|
|
||||||
break
|
|
||||||
for obj in objs:
|
|
||||||
if options.verbose:
|
|
||||||
print 'HEAD %s/%s' % (options.purge_account, obj['name'])
|
|
||||||
headers = conn.head_object(options.purge_account, obj['name'])
|
|
||||||
if 'x-object-meta-auth-token' in headers:
|
|
||||||
token = headers['x-object-meta-auth-token']
|
|
||||||
container = '.token_%s' % token[-1]
|
|
||||||
if options.verbose:
|
|
||||||
print '%s/%s purge account %r; deleting' % \
|
|
||||||
(container, token, options.purge_account)
|
|
||||||
print 'DELETE %s/%s' % (container, token)
|
|
||||||
try:
|
|
||||||
conn.delete_object(container, token)
|
|
||||||
except ClientException, err:
|
|
||||||
if err.http_status != 404:
|
|
||||||
raise
|
|
||||||
continue
|
|
||||||
if options.verbose:
|
|
||||||
print 'Done.'
|
|
||||||
exit(0)
|
|
||||||
for x in xrange(16):
|
|
||||||
container = '.token_%x' % x
|
|
||||||
marker = None
|
|
||||||
while True:
|
|
||||||
if options.verbose:
|
|
||||||
print 'GET %s?marker=%s' % (container, marker)
|
|
||||||
try:
|
|
||||||
objs = conn.get_container(container, marker=marker)[1]
|
|
||||||
except ClientException, e:
|
|
||||||
if e.http_status == 404:
|
|
||||||
exit('Container %s not found. gswauth-prep needs to be '
|
|
||||||
'rerun' % (container))
|
|
||||||
elif e.http_status == 401:
|
|
||||||
exit('Cleanup tokens failed: 401 Unauthorized: ' \
|
|
||||||
'Invalid user/key provided')
|
|
||||||
else:
|
|
||||||
exit('Object listing on container %s failed with status '
|
|
||||||
'code %d' % (container, e.http_status))
|
|
||||||
except socket.error, (errno, msg):
|
|
||||||
exit('Token clean-up failed: %s. ' \
|
|
||||||
'Check that the admin_url is valid' % msg)
|
|
||||||
|
|
||||||
if objs:
|
|
||||||
marker = objs[-1]['name']
|
|
||||||
else:
|
|
||||||
if options.verbose:
|
|
||||||
print 'No more objects in %s' % container
|
|
||||||
break
|
|
||||||
for obj in objs:
|
|
||||||
if options.purge_all:
|
|
||||||
if options.verbose:
|
|
||||||
print '%s/%s purge all; deleting' % \
|
|
||||||
(container, obj['name'])
|
|
||||||
print 'DELETE %s/%s' % (container, obj['name'])
|
|
||||||
try:
|
|
||||||
conn.delete_object(container, obj['name'])
|
|
||||||
except ClientException, err:
|
|
||||||
if err.http_status != 404:
|
|
||||||
raise
|
|
||||||
continue
|
|
||||||
last_modified = datetime(*map(int, re.split('[^\d]',
|
|
||||||
obj['last_modified'])[:-1]))
|
|
||||||
ago = datetime.utcnow() - last_modified
|
|
||||||
if ago > options.token_life:
|
|
||||||
if options.verbose:
|
|
||||||
print '%s/%s last modified %ss ago; investigating' % \
|
|
||||||
(container, obj['name'],
|
|
||||||
ago.days * 86400 + ago.seconds)
|
|
||||||
print 'GET %s/%s' % (container, obj['name'])
|
|
||||||
detail = conn.get_object(container, obj['name'])[1]
|
|
||||||
detail = json.loads(detail)
|
|
||||||
if detail['expires'] < time():
|
|
||||||
if options.verbose:
|
|
||||||
print '%s/%s expired %ds ago; deleting' % \
|
|
||||||
(container, obj['name'],
|
|
||||||
time() - detail['expires'])
|
|
||||||
print 'DELETE %s/%s' % (container, obj['name'])
|
|
||||||
try:
|
|
||||||
conn.delete_object(container, obj['name'])
|
|
||||||
except ClientException, e:
|
|
||||||
if e.http_status != 404:
|
|
||||||
print 'DELETE of %s/%s failed with status ' \
|
|
||||||
'code %d' % (container, obj['name'],
|
|
||||||
e.http_status)
|
|
||||||
elif options.verbose:
|
|
||||||
print "%s/%s won't expire for %ds; skipping" % \
|
|
||||||
(container, obj['name'],
|
|
||||||
detail['expires'] - time())
|
|
||||||
elif options.verbose:
|
|
||||||
print '%s/%s last modified %ss ago; skipping' % \
|
|
||||||
(container, obj['name'],
|
|
||||||
ago.days * 86400 + ago.seconds)
|
|
||||||
sleep(options.sleep)
|
|
||||||
if options.verbose:
|
|
||||||
print 'Done.'
|
|
||||||
print 'Recommended to restart memcached as old invalid tokens may' \
|
|
||||||
' still persist in memcached.'
|
|
@ -1,86 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import gettext
|
|
||||||
import socket
|
|
||||||
|
|
||||||
from optparse import OptionParser
|
|
||||||
from sys import argv, exit
|
|
||||||
|
|
||||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
|
||||||
from swift.common.utils import urlparse
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
gettext.install('gswauth', unicode=1)
|
|
||||||
parser = OptionParser(usage='Usage: %prog [options] <account>')
|
|
||||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
|
||||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
|
||||||
'subsystem (default: http://127.0.0.1:8080/auth/')
|
|
||||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
|
||||||
default='.super_admin',
|
|
||||||
help='The user with admin rights to delete accounts '
|
|
||||||
'(default: .super_admin).')
|
|
||||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
|
||||||
help='The key for the user with admin rights to delete accounts '
|
|
||||||
'is required.')
|
|
||||||
args = argv[1:]
|
|
||||||
if not args:
|
|
||||||
args.append('-h')
|
|
||||||
(options, args) = parser.parse_args(args)
|
|
||||||
if len(args) != 1:
|
|
||||||
parser.parse_args(['-h'])
|
|
||||||
if options.admin_key is None:
|
|
||||||
parser.parse_args(['-h'])
|
|
||||||
account = args[0]
|
|
||||||
parsed = urlparse(options.admin_url)
|
|
||||||
if parsed.scheme not in ('http', 'https'):
|
|
||||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
|
||||||
(parsed.scheme, repr(options.admin_url)))
|
|
||||||
parsed_path = parsed.path
|
|
||||||
if not parsed_path:
|
|
||||||
parsed_path = '/'
|
|
||||||
elif parsed_path[-1] != '/':
|
|
||||||
parsed_path += '/'
|
|
||||||
path = '%sv2/%s' % (parsed_path, account)
|
|
||||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
|
||||||
'X-Auth-Admin-Key': options.admin_key}
|
|
||||||
try:
|
|
||||||
conn = http_connect(parsed.hostname, parsed.port, 'DELETE', path, headers,
|
|
||||||
ssl=(parsed.scheme == 'https'))
|
|
||||||
resp = conn.getresponse()
|
|
||||||
except socket.gaierror, err:
|
|
||||||
exit('Account deletion failed: %s. ' \
|
|
||||||
'Check that the admin_url is valid' % err)
|
|
||||||
except socket.error, (errno, msg):
|
|
||||||
exit('Account deletion failed: %s. ' \
|
|
||||||
'Check that the admin_url is valid' % msg)
|
|
||||||
|
|
||||||
if resp.status // 100 != 2:
|
|
||||||
if resp.status == 401:
|
|
||||||
exit('Delete account failed: %s %s: Invalid user/key provided' %
|
|
||||||
(resp.status, resp.reason))
|
|
||||||
elif resp.status == 403:
|
|
||||||
exit('Delete account failed: %s %s: Insufficient privileges' %
|
|
||||||
(resp.status, resp.reason))
|
|
||||||
elif resp.status == 404:
|
|
||||||
exit('Delete account failed: %s %s: Account %s does not exist' %
|
|
||||||
(resp.status, resp.reason, account))
|
|
||||||
elif resp.status == 409:
|
|
||||||
exit('Delete account failed: %s %s: Account %s contains active users. '
|
|
||||||
'Delete all users first.' % (resp.status, resp.reason, account))
|
|
||||||
else:
|
|
||||||
exit('Delete account failed: %s %s' % (resp.status, resp.reason))
|
|
@ -1,83 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import gettext
|
|
||||||
import socket
|
|
||||||
|
|
||||||
from optparse import OptionParser
|
|
||||||
from sys import argv, exit
|
|
||||||
|
|
||||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
|
||||||
from swift.common.utils import urlparse
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
gettext.install('gswauth', unicode=1)
|
|
||||||
parser = OptionParser(usage='Usage: %prog [options] <account> <user>')
|
|
||||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
|
||||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
|
||||||
'subsystem (default: http://127.0.0.1:8080/auth/')
|
|
||||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
|
||||||
default='.super_admin',
|
|
||||||
help='The user with admin rights to delete users '
|
|
||||||
'(default: .super_admin).')
|
|
||||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
|
||||||
help='The key for the user with admin rights to delete '
|
|
||||||
'users is required.')
|
|
||||||
args = argv[1:]
|
|
||||||
if not args:
|
|
||||||
args.append('-h')
|
|
||||||
(options, args) = parser.parse_args(args)
|
|
||||||
if len(args) != 2:
|
|
||||||
parser.parse_args(['-h'])
|
|
||||||
if options.admin_key is None:
|
|
||||||
parser.parse_args(['-h'])
|
|
||||||
account, user = args
|
|
||||||
parsed = urlparse(options.admin_url)
|
|
||||||
if parsed.scheme not in ('http', 'https'):
|
|
||||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
|
||||||
(parsed.scheme, repr(options.admin_url)))
|
|
||||||
parsed_path = parsed.path
|
|
||||||
if not parsed_path:
|
|
||||||
parsed_path = '/'
|
|
||||||
elif parsed_path[-1] != '/':
|
|
||||||
parsed_path += '/'
|
|
||||||
path = '%sv2/%s/%s' % (parsed_path, account, user)
|
|
||||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
|
||||||
'X-Auth-Admin-Key': options.admin_key}
|
|
||||||
try:
|
|
||||||
conn = http_connect(parsed.hostname, parsed.port, 'DELETE', path, headers,
|
|
||||||
ssl=(parsed.scheme == 'https'))
|
|
||||||
resp = conn.getresponse()
|
|
||||||
except socket.gaierror, err:
|
|
||||||
exit('User deletion failed: %s. ' \
|
|
||||||
'Check that the admin_url is valid' % err)
|
|
||||||
except socket.error, (errno, msg):
|
|
||||||
exit('User deletion failed: %s. ' \
|
|
||||||
'Check that the admin_url is valid' % msg)
|
|
||||||
|
|
||||||
if resp.status // 100 != 2:
|
|
||||||
if resp.status == 401:
|
|
||||||
exit('Delete user failed: %s %s: Invalid user/key provided' %
|
|
||||||
(resp.status, resp.reason))
|
|
||||||
elif resp.status == 403:
|
|
||||||
exit('Delete user failed: %s %s: Insufficient privileges' %
|
|
||||||
(resp.status, resp.reason))
|
|
||||||
elif resp.status == 404:
|
|
||||||
exit('Delete user failed: %s %s: User %s does not exist' %
|
|
||||||
(resp.status, resp.reason, user))
|
|
||||||
else:
|
|
||||||
exit('Delete user failed: %s %s' % (resp.status, resp.reason))
|
|
@ -1,117 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
try:
|
|
||||||
import simplejson as json
|
|
||||||
except ImportError:
|
|
||||||
import json
|
|
||||||
import gettext
|
|
||||||
import socket
|
|
||||||
import types
|
|
||||||
|
|
||||||
from optparse import OptionParser
|
|
||||||
from sys import argv, exit
|
|
||||||
|
|
||||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
|
||||||
from swift.common.utils import urlparse
|
|
||||||
|
|
||||||
from prettytable import PrettyTable
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
gettext.install('gswauth', unicode=1)
|
|
||||||
parser = OptionParser(usage='''
|
|
||||||
Usage: %prog [options] [account] [user]
|
|
||||||
|
|
||||||
If [account] and [user] are omitted, a list of accounts will be output.
|
|
||||||
|
|
||||||
If [account] is included but not [user], a list of users within the account
|
|
||||||
will be output.
|
|
||||||
|
|
||||||
If [account] and [user] are included, a list of groups the user belongs to
|
|
||||||
will be ouptput.
|
|
||||||
|
|
||||||
If the [user] is '.groups', the active groups for the account will be listed.
|
|
||||||
'''.strip())
|
|
||||||
parser.add_option('-p', '--plain-text', dest='plain_text',
|
|
||||||
action='store_true', default=False, help='Changes the output from '
|
|
||||||
'JSON to plain text. This will cause an account to list only the '
|
|
||||||
'users and a user to list only the groups.')
|
|
||||||
parser.add_option('-j', '--json', dest='json_format',
|
|
||||||
action='store_true', default=False, help='Output in JSON format. '
|
|
||||||
'This will print all information about given account or user, '
|
|
||||||
'including stored password.')
|
|
||||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
|
||||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
|
||||||
'subsystem (default: http://127.0.0.1:8080/auth/')
|
|
||||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
|
||||||
default='.super_admin', help='The user with admin rights '
|
|
||||||
'(default: .super_admin).')
|
|
||||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
|
||||||
help='The key for the user with admin rights is required.')
|
|
||||||
args = argv[1:]
|
|
||||||
if not args:
|
|
||||||
args.append('-h')
|
|
||||||
(options, args) = parser.parse_args(args)
|
|
||||||
if len(args) > 2:
|
|
||||||
parser.parse_args(['-h'])
|
|
||||||
if options.admin_key is None:
|
|
||||||
parser.parse_args(['-h'])
|
|
||||||
parsed = urlparse(options.admin_url)
|
|
||||||
if parsed.scheme not in ('http', 'https'):
|
|
||||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
|
||||||
(parsed.scheme, repr(options.admin_url)))
|
|
||||||
parsed_path = parsed.path
|
|
||||||
if not parsed_path:
|
|
||||||
parsed_path = '/'
|
|
||||||
elif parsed_path[-1] != '/':
|
|
||||||
parsed_path += '/'
|
|
||||||
path = '%sv2/%s' % (parsed_path, '/'.join(args))
|
|
||||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
|
||||||
'X-Auth-Admin-Key': options.admin_key}
|
|
||||||
try:
|
|
||||||
conn = http_connect(parsed.hostname, parsed.port, 'GET', path, headers,
|
|
||||||
ssl=(parsed.scheme == 'https'))
|
|
||||||
resp = conn.getresponse()
|
|
||||||
except socket.gaierror, err:
|
|
||||||
exit('List failed: %s. ' \
|
|
||||||
'Check that the admin_url is valid' % err)
|
|
||||||
except socket.error, (errno, msg):
|
|
||||||
exit('List failed: %s. ' \
|
|
||||||
'Check that the admin_url is valid' % msg)
|
|
||||||
|
|
||||||
body = resp.read()
|
|
||||||
if resp.status // 100 != 2:
|
|
||||||
if resp.status == 401:
|
|
||||||
exit('List failed: %s %s: Invalid user/key provided' %
|
|
||||||
(resp.status, resp.reason))
|
|
||||||
elif resp.status == 403:
|
|
||||||
exit('List failed: %s %s: Insufficient privileges' %
|
|
||||||
(resp.status, resp.reason))
|
|
||||||
else:
|
|
||||||
exit('List failed: %s %s' % (resp.status, resp.reason))
|
|
||||||
if options.plain_text:
|
|
||||||
info = json.loads(body)
|
|
||||||
for group in info[['accounts', 'users', 'groups'][len(args)]]:
|
|
||||||
print group['name']
|
|
||||||
elif options.json_format:
|
|
||||||
print body
|
|
||||||
else:
|
|
||||||
info = json.loads(body)
|
|
||||||
h = ['accounts', 'users', 'groups'][len(args)]
|
|
||||||
table = PrettyTable([h.title()])
|
|
||||||
for group in info[h]:
|
|
||||||
table.add_row([group['name']])
|
|
||||||
print table
|
|
@ -1,75 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import gettext
|
|
||||||
import socket
|
|
||||||
|
|
||||||
from optparse import OptionParser
|
|
||||||
from sys import argv, exit
|
|
||||||
|
|
||||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
|
||||||
from swift.common.utils import urlparse
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
gettext.install('gswauth', unicode=1)
|
|
||||||
parser = OptionParser(usage='Usage: %prog [options]')
|
|
||||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
|
||||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
|
||||||
'subsystem (default: http://127.0.0.1:8080/auth/')
|
|
||||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
|
||||||
default='.super_admin', help='The user with admin rights '
|
|
||||||
'(default: .super_admin).')
|
|
||||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
|
||||||
help='The key for the user with admin rights is required.')
|
|
||||||
args = argv[1:]
|
|
||||||
if not args:
|
|
||||||
args.append('-h')
|
|
||||||
(options, args) = parser.parse_args(args)
|
|
||||||
if args:
|
|
||||||
parser.parse_args(['-h'])
|
|
||||||
if options.admin_key is None:
|
|
||||||
parser.parse_args(['-h'])
|
|
||||||
parsed = urlparse(options.admin_url)
|
|
||||||
if parsed.scheme not in ('http', 'https'):
|
|
||||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
|
||||||
(parsed.scheme, repr(options.admin_url)))
|
|
||||||
parsed_path = parsed.path
|
|
||||||
if not parsed_path:
|
|
||||||
parsed_path = '/'
|
|
||||||
elif parsed_path[-1] != '/':
|
|
||||||
parsed_path += '/'
|
|
||||||
path = '%sv2/.prep' % parsed_path
|
|
||||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
|
||||||
'X-Auth-Admin-Key': options.admin_key}
|
|
||||||
try:
|
|
||||||
conn = http_connect(parsed.hostname, parsed.port, 'POST', path, headers,
|
|
||||||
ssl=(parsed.scheme == 'https'))
|
|
||||||
resp = conn.getresponse()
|
|
||||||
except socket.gaierror, err:
|
|
||||||
exit('gswauth preparation failed: %s. ' \
|
|
||||||
'Check that the admin_url is valid' % err)
|
|
||||||
except socket.error, (errno, msg):
|
|
||||||
exit('gswauth preparation failed: %s. ' \
|
|
||||||
'Check that the admin_url is valid' % msg)
|
|
||||||
|
|
||||||
if resp.status // 100 != 2:
|
|
||||||
if resp.status == 401:
|
|
||||||
exit('gswauth preparation failed: %s %s: Invalid user/key provided' %
|
|
||||||
(resp.status, resp.reason))
|
|
||||||
else:
|
|
||||||
exit('gswauth preparation failed: %s %s' %
|
|
||||||
(resp.status, resp.reason))
|
|
@ -1,89 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
try:
|
|
||||||
import simplejson as json
|
|
||||||
except ImportError:
|
|
||||||
import json
|
|
||||||
import gettext
|
|
||||||
import socket
|
|
||||||
from optparse import OptionParser
|
|
||||||
from sys import argv, exit
|
|
||||||
|
|
||||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
|
||||||
from swift.common.utils import urlparse
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
gettext.install('gswauth', unicode=1)
|
|
||||||
parser = OptionParser(usage='''
|
|
||||||
Usage: %prog [options] <account> <service> <name> <value>
|
|
||||||
|
|
||||||
Sets a service URL for an account. Can only be set by a reseller admin.
|
|
||||||
|
|
||||||
Example: %prog -K gswauthkey test storage local http://127.0.0.1:8080/v1/AUTH_018c3946-23f8-4efb-a8fb-b67aae8e4162
|
|
||||||
'''.strip())
|
|
||||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
|
||||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
|
||||||
'subsystem (default: http://127.0.0.1:8080/auth/)')
|
|
||||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
|
||||||
default='.super_admin', help='The user with admin rights '
|
|
||||||
'(default: .super_admin).')
|
|
||||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
|
||||||
help='The key for the user with admin rights is required.')
|
|
||||||
args = argv[1:]
|
|
||||||
if not args:
|
|
||||||
args.append('-h')
|
|
||||||
(options, args) = parser.parse_args(args)
|
|
||||||
if len(args) != 4:
|
|
||||||
parser.parse_args(['-h'])
|
|
||||||
if options.admin_key is None:
|
|
||||||
parser.parse_args(['-h'])
|
|
||||||
account, service, name, url = args
|
|
||||||
parsed = urlparse(options.admin_url)
|
|
||||||
if parsed.scheme not in ('http', 'https'):
|
|
||||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
|
||||||
(parsed.scheme, repr(options.admin_url)))
|
|
||||||
parsed_path = parsed.path
|
|
||||||
if not parsed_path:
|
|
||||||
parsed_path = '/'
|
|
||||||
elif parsed_path[-1] != '/':
|
|
||||||
parsed_path += '/'
|
|
||||||
path = '%sv2/%s/.services' % (parsed_path, account)
|
|
||||||
body = json.dumps({service: {name: url}})
|
|
||||||
headers = {'Content-Length': str(len(body)),
|
|
||||||
'X-Auth-Admin-User': options.admin_user,
|
|
||||||
'X-Auth-Admin-Key': options.admin_key}
|
|
||||||
try:
|
|
||||||
conn = http_connect(parsed.hostname, parsed.port, 'POST', path, headers,
|
|
||||||
ssl=(parsed.scheme == 'https'))
|
|
||||||
conn.send(body)
|
|
||||||
resp = conn.getresponse()
|
|
||||||
except socket.gaierror, err:
|
|
||||||
exit('Service set failed: %s. ' \
|
|
||||||
'Check that the admin_url is valid' % err)
|
|
||||||
except socket.error, (errno, msg):
|
|
||||||
exit('Service set failed: %s. ' \
|
|
||||||
'Check that the admin_url is valid' % msg)
|
|
||||||
if resp.status // 100 != 2:
|
|
||||||
if resp.status == 401:
|
|
||||||
exit('Service set failed: %s %s: Invalid user/key provided' %
|
|
||||||
(resp.status, resp.reason))
|
|
||||||
elif resp.status == 403:
|
|
||||||
exit('Service set failed: %s %s: Insufficient privileges' %
|
|
||||||
(resp.status, resp.reason))
|
|
||||||
else:
|
|
||||||
exit('Service set failed: %s %s' % (resp.status, resp.reason))
|
|
@ -1,466 +0,0 @@
|
|||||||
.. _api_top:
|
|
||||||
|
|
||||||
----------
|
|
||||||
Swauth API
|
|
||||||
----------
|
|
||||||
|
|
||||||
Overview
|
|
||||||
========
|
|
||||||
|
|
||||||
Swauth has its own internal versioned REST API for adding, removing,
|
|
||||||
and editing accounts. This document explains the v2 API.
|
|
||||||
|
|
||||||
Authentication
|
|
||||||
--------------
|
|
||||||
|
|
||||||
Each REST request against the swauth API requires the inclusion of a
|
|
||||||
specific authorization user and key to be passed in a specific HTTP
|
|
||||||
header. These headers are defined as ``X-Auth-Admin-User`` and
|
|
||||||
``X-Auth-Admin-Key``.
|
|
||||||
|
|
||||||
Typically, these values are ``.super_admin`` (the site super admin
|
|
||||||
user) with the key being specified in the swauth middleware
|
|
||||||
configuration as ``super_admin_key``.
|
|
||||||
|
|
||||||
This could also be a reseller admin with the appropriate rights to
|
|
||||||
perform actions on reseller accounts.
|
|
||||||
|
|
||||||
Endpoints
|
|
||||||
---------
|
|
||||||
|
|
||||||
The swauth API endpoint is presented on the proxy servers, in the
|
|
||||||
"/auth" namespace. In addition, the API is versioned, and the version
|
|
||||||
documented is version 2. API versions subdivide the auth namespace by
|
|
||||||
version, specified as a version identifier like "v2".
|
|
||||||
|
|
||||||
The auth endpoint described herein is therefore located at "/auth/v2/"
|
|
||||||
as presented by the proxy servers.
|
|
||||||
|
|
||||||
Bear in mind that in order for the auth management API to be
|
|
||||||
presented, it must be enabled in the proxy server config by setting
|
|
||||||
``allow_account_managment`` to ``true`` in the ``[app:proxy-server]``
|
|
||||||
stanza of your proxy-server.conf.
|
|
||||||
|
|
||||||
Responses
|
|
||||||
---------
|
|
||||||
|
|
||||||
Responses from the auth APIs are returned as a JSON structure.
|
|
||||||
Example return values in this document are edited for readability.
|
|
||||||
|
|
||||||
|
|
||||||
Reseller/Admin Services
|
|
||||||
=======================
|
|
||||||
|
|
||||||
Operations can be performed against the endpoint itself to perform
|
|
||||||
general administrative operations. Currently, the only operations
|
|
||||||
that can be performed is a GET operation to get reseller or site admin
|
|
||||||
information.
|
|
||||||
|
|
||||||
Get Admin Info
|
|
||||||
--------------
|
|
||||||
|
|
||||||
A GET request at the swauth endpoint will return reseller information
|
|
||||||
for the account specified in the ``X-Auth-Admin-User`` header.
|
|
||||||
Currently, the information returned is limited to a list of accounts
|
|
||||||
for the reseller or site admin.
|
|
||||||
|
|
||||||
Valid return codes:
|
|
||||||
* 200: Success
|
|
||||||
* 403: Invalid X-Auth-Admin-User/X-Auth-Admin-Key
|
|
||||||
* 5xx: Internal error
|
|
||||||
|
|
||||||
Example Request::
|
|
||||||
|
|
||||||
GET /auth/<api version>/ HTTP/1.1
|
|
||||||
X-Auth-Admin-User: .super_admin
|
|
||||||
X-Auth-Admin-Key: swauthkey
|
|
||||||
|
|
||||||
Example Curl Request::
|
|
||||||
|
|
||||||
curl -D - https://<endpoint>/auth/v2/ \
|
|
||||||
-H "X-Auth-Admin-User: .super_admin" \
|
|
||||||
-H "X-Auth-Admin-Key: swauthkey"
|
|
||||||
|
|
||||||
Example Result::
|
|
||||||
|
|
||||||
HTTP/1.1 200 OK
|
|
||||||
|
|
||||||
{ "accounts":
|
|
||||||
[
|
|
||||||
{ "name": "account1" },
|
|
||||||
{ "name": "account2" },
|
|
||||||
{ "name": "account3" }
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Account Services
|
|
||||||
================
|
|
||||||
|
|
||||||
There are API request to get account details, create, and delete
|
|
||||||
accounts, mapping logically to the REST verbs GET, PUT, and DELETE.
|
|
||||||
These actions are performed against an account URI, in the following
|
|
||||||
general request structure::
|
|
||||||
|
|
||||||
METHOD /auth/<version>/<account> HTTP/1.1
|
|
||||||
|
|
||||||
The methods that can be used are detailed below.
|
|
||||||
|
|
||||||
Get Account Details
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
Account details can be retrieved by performing a GET request against
|
|
||||||
an account URI. On success, a JSON dictionary will be returned
|
|
||||||
containing the keys `account_id`, `services`, and `users`. The
|
|
||||||
`account_id` is the value used when creating service accounts. The
|
|
||||||
`services` value is a dict that represents valid storage cluster
|
|
||||||
endpoints, and which endpoint is the default. The 'users' value is a
|
|
||||||
list of dicts, each dict representing a user and currently only
|
|
||||||
containing the single key 'name'.
|
|
||||||
|
|
||||||
Valid Responses:
|
|
||||||
* 200: Success
|
|
||||||
* 403: Invalid X-Auth-Admin-User/X-Auth-Admin-Key
|
|
||||||
* 5xx: Internal error
|
|
||||||
|
|
||||||
Example Request::
|
|
||||||
|
|
||||||
GET /auth/<api version>/<account> HTTP/1.1
|
|
||||||
X-Auth-Admin-User: .super_admin
|
|
||||||
X-Auth-Admin-Key: swauthkey
|
|
||||||
|
|
||||||
Example Curl Request::
|
|
||||||
|
|
||||||
curl -D - https://<endpoint>/auth/v2/<account> \
|
|
||||||
-H "X-Auth-Admin-User: .super_admin" \
|
|
||||||
-H "X-Auth-Admin-Key: swauthkey"
|
|
||||||
|
|
||||||
Example Response::
|
|
||||||
|
|
||||||
HTTP/1.1 200 OK
|
|
||||||
|
|
||||||
{ "services":
|
|
||||||
{ "storage":
|
|
||||||
{ "default": "local",
|
|
||||||
"local": "https://<storage endpoint>/v1/<account_id>" },
|
|
||||||
},
|
|
||||||
"account_id": "<account_id>",
|
|
||||||
"users": [ { "name": "user1" },
|
|
||||||
{ "name": "user2" } ]
|
|
||||||
}
|
|
||||||
|
|
||||||
Create Account
|
|
||||||
--------------
|
|
||||||
|
|
||||||
An account can be created with a PUT request against a non-existent
|
|
||||||
account. By default, a newly created UUID4 will be used with the
|
|
||||||
reseller prefix as the account ID used when creating corresponding
|
|
||||||
service accounts. However, you can provide an X-Account-Suffix header
|
|
||||||
to replace the UUDI4 part.
|
|
||||||
|
|
||||||
Valid return codes:
|
|
||||||
* 200: Success
|
|
||||||
* 403: Invalid X-Auth-Admin-User/X-Auth-Admin-Key
|
|
||||||
* 5xx: Internal error
|
|
||||||
|
|
||||||
Example Request::
|
|
||||||
|
|
||||||
GET /auth/<api version>/<new_account> HTTP/1.1
|
|
||||||
X-Auth-Admin-User: .super_admin
|
|
||||||
X-Auth-Admin-Key: swauthkey
|
|
||||||
|
|
||||||
Example Curl Request::
|
|
||||||
|
|
||||||
curl -D - https://<endpoint>/auth/v2/<new_account> \
|
|
||||||
-H "X-Auth-Admin-User: .super_admin" \
|
|
||||||
-H "X-Auth-Admin-Key: swauthkey"
|
|
||||||
|
|
||||||
Example Response::
|
|
||||||
|
|
||||||
HTTP/1.1 201 Created
|
|
||||||
|
|
||||||
|
|
||||||
Delete Account
|
|
||||||
--------------
|
|
||||||
|
|
||||||
An account can be deleted with a DELETE request against an existing
|
|
||||||
account.
|
|
||||||
|
|
||||||
Valid Responses:
|
|
||||||
* 204: Success
|
|
||||||
* 403: Invalid X-Auth-Admin-User/X-Auth-Admin-Key
|
|
||||||
* 404: Account not found
|
|
||||||
* 5xx: Internal error
|
|
||||||
|
|
||||||
Example Request::
|
|
||||||
|
|
||||||
DELETE /auth/<api version>/<account> HTTP/1.1
|
|
||||||
X-Auth-Admin-User: .super_admin
|
|
||||||
X-Auth-Admin-Key: swauthkey
|
|
||||||
|
|
||||||
Example Curl Request::
|
|
||||||
|
|
||||||
curl -XDELETE -D - https://<endpoint>/auth/v2/<account> \
|
|
||||||
-H "X-Auth-Admin-User: .super_admin" \
|
|
||||||
-H "X-Auth-Admin-Key: swauthkey"
|
|
||||||
|
|
||||||
Example Response::
|
|
||||||
|
|
||||||
HTTP/1.1 204 No Content
|
|
||||||
|
|
||||||
|
|
||||||
User Services
|
|
||||||
=============
|
|
||||||
|
|
||||||
Each account in swauth contains zero or more users. These users can
|
|
||||||
be determined with the 'Get Account Details' API request against an
|
|
||||||
account.
|
|
||||||
|
|
||||||
Users in an account can be created, modified, and detailed as
|
|
||||||
described below by apply the appropriate REST verbs to a user URI, in
|
|
||||||
the following general request structure::
|
|
||||||
|
|
||||||
METHOD /auth/<version>/<account>/<user> HTTP/1.1
|
|
||||||
|
|
||||||
The methods that can be used are detailed below.
|
|
||||||
|
|
||||||
Get User Details
|
|
||||||
----------------
|
|
||||||
|
|
||||||
User details can be retrieved by performing a GET request against
|
|
||||||
a user URI. On success, a JSON dictionary will be returned as
|
|
||||||
described::
|
|
||||||
|
|
||||||
{"groups": [ # List of groups the user is a member of
|
|
||||||
{"name": "<act>:<usr>"},
|
|
||||||
# The first group is a unique user identifier
|
|
||||||
{"name": "<account>"},
|
|
||||||
# The second group is the auth account name
|
|
||||||
{"name": "<additional-group>"}
|
|
||||||
# There may be additional groups, .admin being a
|
|
||||||
# special group indicating an account admin and
|
|
||||||
# .reseller_admin indicating a reseller admin.
|
|
||||||
],
|
|
||||||
"auth": "<auth-type>:<key>"
|
|
||||||
# The auth-type and key for the user; currently only
|
|
||||||
# plaintext and sha1 are implemented as auth types.
|
|
||||||
}
|
|
||||||
|
|
||||||
For example::
|
|
||||||
|
|
||||||
{"groups": [{"name": "test:tester"}, {"name": "test"},
|
|
||||||
{"name": ".admin"}],
|
|
||||||
"auth": "plaintext:testing"}
|
|
||||||
|
|
||||||
Valid Responses:
|
|
||||||
* 200: Success
|
|
||||||
* 403: Invalid X-Auth-Admin-User/X-Auth-Admin-Key
|
|
||||||
* 404: Unknown account
|
|
||||||
* 5xx: Internal error
|
|
||||||
|
|
||||||
Example Request::
|
|
||||||
|
|
||||||
GET /auth/<api version>/<account>/<user> HTTP/1.1
|
|
||||||
X-Auth-Admin-User: .super_admin
|
|
||||||
X-Auth-Admin-Key: swauthkey
|
|
||||||
|
|
||||||
Example Curl Request::
|
|
||||||
|
|
||||||
curl -D - https://<endpoint>/auth/v2/<account>/<user> \
|
|
||||||
-H "X-Auth-Admin-User: .super_admin" \
|
|
||||||
-H "X-Auth-Admin-Key: swauthkey"
|
|
||||||
|
|
||||||
Example Response::
|
|
||||||
|
|
||||||
HTTP/1.1 200 Ok
|
|
||||||
|
|
||||||
{ "groups": [ { "name": "<account>:<user>" },
|
|
||||||
{ "name": "<user>" },
|
|
||||||
{ "name": ".admin" } ],
|
|
||||||
"auth" : "plaintext:password" }
|
|
||||||
|
|
||||||
|
|
||||||
Create User
|
|
||||||
-----------
|
|
||||||
|
|
||||||
A user can be created with a PUT request against a non-existent
|
|
||||||
user URI. The new user's password must be set using the
|
|
||||||
``X-Auth-User-Key`` header. The user name MUST NOT start with a
|
|
||||||
period ('.'). This requirement is enforced by the API, and will
|
|
||||||
result in a 400 error.
|
|
||||||
|
|
||||||
Optional Headers:
|
|
||||||
|
|
||||||
* ``X-Auth-User-Admin: true``: create the user as an account admin
|
|
||||||
* ``X-Auth-User-Reseller-Admin: true``: create the user as a reseller
|
|
||||||
admin
|
|
||||||
|
|
||||||
Reseller admin accounts can only be created by the site admin, while
|
|
||||||
regular accounts (or account admin accounts) can be created by an
|
|
||||||
account admin, an appropriate reseller admin, or the site admin.
|
|
||||||
|
|
||||||
Note that PUT requests are idempotent, and the PUT request serves as
|
|
||||||
both a request and modify action.
|
|
||||||
|
|
||||||
Valid Responses:
|
|
||||||
* 200: Success
|
|
||||||
* 400: Invalid request (missing required headers)
|
|
||||||
* 403: Invalid X-Auth-Admin-User/X-Auth-Admin-Key, or insufficient priv
|
|
||||||
* 404: Unknown account
|
|
||||||
* 5xx: Internal error
|
|
||||||
|
|
||||||
Example Request::
|
|
||||||
|
|
||||||
PUT /auth/<api version>/<account>/<user> HTTP/1.1
|
|
||||||
X-Auth-Admin-User: .super_admin
|
|
||||||
X-Auth-Admin-Key: swauthkey
|
|
||||||
X-Auth-User-Admin: true
|
|
||||||
X-Auth-User-Key: secret
|
|
||||||
|
|
||||||
Example Curl Request::
|
|
||||||
|
|
||||||
curl -XPUT -D - https://<endpoint>/auth/v2/<account>/<user> \
|
|
||||||
-H "X-Auth-Admin-User: .super_admin" \
|
|
||||||
-H "X-Auth-Admin-Key: swauthkey" \
|
|
||||||
-H "X-Auth-User-Admin: true" \
|
|
||||||
-H "X-Auth-User-Key: secret"
|
|
||||||
|
|
||||||
Example Response::
|
|
||||||
|
|
||||||
HTTP/1.1 201 Created
|
|
||||||
|
|
||||||
Delete User
|
|
||||||
-----------
|
|
||||||
|
|
||||||
A user can be deleted by performing a DELETE request against a user
|
|
||||||
URI. This action can only be performed by an account admin,
|
|
||||||
appropriate reseller admin, or site admin.
|
|
||||||
|
|
||||||
Valid Responses:
|
|
||||||
* 200: Success
|
|
||||||
* 403: Invalid X-Auth-Admin-User/X-Auth-Admin-Key, or insufficient priv
|
|
||||||
* 404: Unknown account or user
|
|
||||||
* 5xx: Internal error
|
|
||||||
|
|
||||||
Example Request::
|
|
||||||
|
|
||||||
DELETE /auth/<api version>/<account>/<user> HTTP/1.1
|
|
||||||
X-Auth-Admin-User: .super_admin
|
|
||||||
X-Auth-Admin-Key: swauthkey
|
|
||||||
|
|
||||||
Example Curl Request::
|
|
||||||
|
|
||||||
curl -XDELETE -D - https://<endpoint>/auth/v2/<account>/<user> \
|
|
||||||
-H "X-Auth-Admin-User: .super_admin" \
|
|
||||||
-H "X-Auth-Admin-Key: swauthkey"
|
|
||||||
|
|
||||||
Example Response::
|
|
||||||
|
|
||||||
HTTP/1.1 204 No Content
|
|
||||||
|
|
||||||
|
|
||||||
Other Services
|
|
||||||
==============
|
|
||||||
|
|
||||||
There are several other swauth functions that can be performed, mostly
|
|
||||||
done via "pseudo-user" accounts. These are well-known user names that
|
|
||||||
are unable to be actually provisioned. These pseudo-users are
|
|
||||||
described below.
|
|
||||||
|
|
||||||
.. _api_set_service_endpoints:
|
|
||||||
|
|
||||||
Set Service Endpoints
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
Service endpoint information can be retrived using the _`Get Account
|
|
||||||
Details` API method.
|
|
||||||
|
|
||||||
This function allows setting values within this section for
|
|
||||||
the <account>, allowing the addition of new service end points
|
|
||||||
or updating existing ones by performing a POST to the URI
|
|
||||||
corresponding to the pseudo-user ".services".
|
|
||||||
|
|
||||||
The body of the POST request should contain a JSON dict with
|
|
||||||
the following format::
|
|
||||||
|
|
||||||
{"service_name": {"end_point_name": "end_point_value"}}
|
|
||||||
|
|
||||||
There can be multiple services and multiple end points in the
|
|
||||||
same call.
|
|
||||||
|
|
||||||
Any new services or end points will be added to the existing
|
|
||||||
set of services and end points. Any existing services with the
|
|
||||||
same service name will be merged with the new end points. Any
|
|
||||||
existing end points with the same end point name will have
|
|
||||||
their values updated.
|
|
||||||
|
|
||||||
The updated services dictionary will be returned on success.
|
|
||||||
|
|
||||||
Valid Responses:
|
|
||||||
|
|
||||||
* 200: Success
|
|
||||||
* 403: Invalid X-Auth-Admin-User/X-Auth-Admin-Key
|
|
||||||
* 404: Account not found
|
|
||||||
* 5xx: Internal error
|
|
||||||
|
|
||||||
Example Request::
|
|
||||||
|
|
||||||
POST /auth/<api version>/<account>/.services HTTP/1.0
|
|
||||||
X-Auth-Admin-User: .super_admin
|
|
||||||
X-Auth-Admin-Key: swauthkey
|
|
||||||
|
|
||||||
{"storage": { "local": "<new endpoint>" }}
|
|
||||||
|
|
||||||
Example Curl Request::
|
|
||||||
|
|
||||||
curl -XPOST -D - https://<endpoint>/auth/v2/<account>/.services \
|
|
||||||
-H "X-Auth-Admin-User: .super_admin" \
|
|
||||||
-H "X-Auth-Admin-Key: swauthkey" --data-binary \
|
|
||||||
'{ "storage": { "local": "<new endpoint>" }}'
|
|
||||||
|
|
||||||
Example Response::
|
|
||||||
|
|
||||||
HTTP/1.1 200 OK
|
|
||||||
|
|
||||||
{"storage": {"default": "local", "local": "<new endpoint>" }}
|
|
||||||
|
|
||||||
Get Account Groups
|
|
||||||
------------------
|
|
||||||
|
|
||||||
Individual user group information can be retrieved using the `Get User Details`_ API method.
|
|
||||||
|
|
||||||
This function allows retrieving all group information for all users in
|
|
||||||
an existing account. This can be achieved using a GET action against
|
|
||||||
a user URI with the pseudo-user ".groups".
|
|
||||||
|
|
||||||
The JSON dictionary returned will be a "groups" dictionary similar to
|
|
||||||
that documented in the `Get User Details`_ method, but representing
|
|
||||||
the summary of all groups utilized by all active users in the account.
|
|
||||||
|
|
||||||
Valid Responses:
|
|
||||||
* 200: Success
|
|
||||||
* 403: Invalid X-Auth-Admin-User/X-Auth-Admin-Key
|
|
||||||
* 404: Account not found
|
|
||||||
* 5xx: Internal error
|
|
||||||
|
|
||||||
Example Request::
|
|
||||||
|
|
||||||
GET /auth/<api version>/<account>/.groups
|
|
||||||
X-Auth-Admin-User: .super_admin
|
|
||||||
X-Auth-Admin-Key: swauthkey
|
|
||||||
|
|
||||||
Example Curl Request::
|
|
||||||
|
|
||||||
curl -D - https://<endpoint>/auth/v2/<account>/.groups \
|
|
||||||
-H "X-Auth-Admin-User: .super_admin" \
|
|
||||||
-H "X-Auth-Admin-Key: swauthkey"
|
|
||||||
|
|
||||||
Example Response::
|
|
||||||
|
|
||||||
HTTP/1.1 200 OK
|
|
||||||
|
|
||||||
{ "groups": [ { "name": ".admin" },
|
|
||||||
{ "name": "<account>" },
|
|
||||||
{ "name": "<account>:user1" },
|
|
||||||
{ "name": "<account>:user2" } ] }
|
|
||||||
|
|
@ -1,10 +0,0 @@
|
|||||||
.. _swauth_authtypes_module:
|
|
||||||
|
|
||||||
swauth.authtypes
|
|
||||||
=================
|
|
||||||
|
|
||||||
.. automodule:: swauth.authtypes
|
|
||||||
:members:
|
|
||||||
:undoc-members:
|
|
||||||
:show-inheritance:
|
|
||||||
:noindex:
|
|
@ -1,233 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
#
|
|
||||||
# Swauth documentation build configuration file, created by
|
|
||||||
# sphinx-quickstart on Mon Feb 14 19:34:51 2011.
|
|
||||||
#
|
|
||||||
# This file is execfile()d with the current directory set to its containing dir.
|
|
||||||
#
|
|
||||||
# Note that not all possible configuration values are present in this
|
|
||||||
# autogenerated file.
|
|
||||||
#
|
|
||||||
# All configuration values have a default; values that are commented out
|
|
||||||
# serve to show the default.
|
|
||||||
|
|
||||||
import sys, os
|
|
||||||
|
|
||||||
import swauth
|
|
||||||
|
|
||||||
# If extensions (or modules to document with autodoc) are in another directory,
|
|
||||||
# add these directories to sys.path here. If the directory is relative to the
|
|
||||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
|
||||||
#sys.path.insert(0, os.path.abspath('.'))
|
|
||||||
|
|
||||||
# -- General configuration -----------------------------------------------------
|
|
||||||
|
|
||||||
# If your documentation needs a minimal Sphinx version, state it here.
|
|
||||||
#needs_sphinx = '1.0'
|
|
||||||
|
|
||||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
|
||||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
|
||||||
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
|
|
||||||
|
|
||||||
# Add any paths that contain templates here, relative to this directory.
|
|
||||||
templates_path = ['_templates']
|
|
||||||
|
|
||||||
# The suffix of source filenames.
|
|
||||||
source_suffix = '.rst'
|
|
||||||
|
|
||||||
# The encoding of source files.
|
|
||||||
#source_encoding = 'utf-8-sig'
|
|
||||||
|
|
||||||
# The master toctree document.
|
|
||||||
master_doc = 'index'
|
|
||||||
|
|
||||||
# General information about the project.
|
|
||||||
project = u'Swauth'
|
|
||||||
copyright = u'2010-2011, OpenStack, LLC'
|
|
||||||
|
|
||||||
# The version info for the project you're documenting, acts as replacement for
|
|
||||||
# |version| and |release|, also used in various other places throughout the
|
|
||||||
# built documents.
|
|
||||||
#
|
|
||||||
# The short X.Y version.
|
|
||||||
version = '.'.join(str(v) for v in swauth.version_info[:2])
|
|
||||||
# The full version, including alpha/beta/rc tags.
|
|
||||||
release = swauth.version
|
|
||||||
|
|
||||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
|
||||||
# for a list of supported languages.
|
|
||||||
#language = None
|
|
||||||
|
|
||||||
# There are two options for replacing |today|: either, you set today to some
|
|
||||||
# non-false value, then it is used:
|
|
||||||
#today = ''
|
|
||||||
# Else, today_fmt is used as the format for a strftime call.
|
|
||||||
#today_fmt = '%B %d, %Y'
|
|
||||||
|
|
||||||
# List of patterns, relative to source directory, that match files and
|
|
||||||
# directories to ignore when looking for source files.
|
|
||||||
exclude_patterns = []
|
|
||||||
|
|
||||||
# The reST default role (used for this markup: `text`) to use for all documents.
|
|
||||||
#default_role = None
|
|
||||||
|
|
||||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
|
||||||
#add_function_parentheses = True
|
|
||||||
|
|
||||||
# If true, the current module name will be prepended to all description
|
|
||||||
# unit titles (such as .. function::).
|
|
||||||
#add_module_names = True
|
|
||||||
|
|
||||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
|
||||||
# output. They are ignored by default.
|
|
||||||
#show_authors = False
|
|
||||||
|
|
||||||
# The name of the Pygments (syntax highlighting) style to use.
|
|
||||||
pygments_style = 'sphinx'
|
|
||||||
|
|
||||||
# A list of ignored prefixes for module index sorting.
|
|
||||||
#modindex_common_prefix = []
|
|
||||||
|
|
||||||
|
|
||||||
# -- Options for HTML output ---------------------------------------------------
|
|
||||||
|
|
||||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
|
||||||
# a list of builtin themes.
|
|
||||||
html_theme = 'default'
|
|
||||||
|
|
||||||
# Theme options are theme-specific and customize the look and feel of a theme
|
|
||||||
# further. For a list of options available for each theme, see the
|
|
||||||
# documentation.
|
|
||||||
#html_theme_options = {}
|
|
||||||
|
|
||||||
# Add any paths that contain custom themes here, relative to this directory.
|
|
||||||
#html_theme_path = []
|
|
||||||
|
|
||||||
# The name for this set of Sphinx documents. If None, it defaults to
|
|
||||||
# "<project> v<release> documentation".
|
|
||||||
#html_title = None
|
|
||||||
|
|
||||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
|
||||||
#html_short_title = None
|
|
||||||
|
|
||||||
# The name of an image file (relative to this directory) to place at the top
|
|
||||||
# of the sidebar.
|
|
||||||
#html_logo = None
|
|
||||||
|
|
||||||
# The name of an image file (within the static path) to use as favicon of the
|
|
||||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
|
||||||
# pixels large.
|
|
||||||
#html_favicon = None
|
|
||||||
|
|
||||||
# Add any paths that contain custom static files (such as style sheets) here,
|
|
||||||
# relative to this directory. They are copied after the builtin static files,
|
|
||||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
|
||||||
html_static_path = ['_static']
|
|
||||||
|
|
||||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
|
||||||
# using the given strftime format.
|
|
||||||
#html_last_updated_fmt = '%b %d, %Y'
|
|
||||||
|
|
||||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
|
||||||
# typographically correct entities.
|
|
||||||
#html_use_smartypants = True
|
|
||||||
|
|
||||||
# Custom sidebar templates, maps document names to template names.
|
|
||||||
#html_sidebars = {}
|
|
||||||
|
|
||||||
# Additional templates that should be rendered to pages, maps page names to
|
|
||||||
# template names.
|
|
||||||
#html_additional_pages = {}
|
|
||||||
|
|
||||||
# If false, no module index is generated.
|
|
||||||
#html_domain_indices = True
|
|
||||||
|
|
||||||
# If false, no index is generated.
|
|
||||||
#html_use_index = True
|
|
||||||
|
|
||||||
# If true, the index is split into individual pages for each letter.
|
|
||||||
#html_split_index = False
|
|
||||||
|
|
||||||
# If true, links to the reST sources are added to the pages.
|
|
||||||
#html_show_sourcelink = True
|
|
||||||
|
|
||||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
|
||||||
#html_show_sphinx = True
|
|
||||||
|
|
||||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
|
||||||
#html_show_copyright = True
|
|
||||||
|
|
||||||
# If true, an OpenSearch description file will be output, and all pages will
|
|
||||||
# contain a <link> tag referring to it. The value of this option must be the
|
|
||||||
# base URL from which the finished HTML is served.
|
|
||||||
#html_use_opensearch = ''
|
|
||||||
|
|
||||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
|
||||||
#html_file_suffix = None
|
|
||||||
|
|
||||||
# Output file base name for HTML help builder.
|
|
||||||
htmlhelp_basename = 'Swauthdoc'
|
|
||||||
|
|
||||||
|
|
||||||
# -- Options for LaTeX output --------------------------------------------------
|
|
||||||
|
|
||||||
# The paper size ('letter' or 'a4').
|
|
||||||
#latex_paper_size = 'letter'
|
|
||||||
|
|
||||||
# The font size ('10pt', '11pt' or '12pt').
|
|
||||||
#latex_font_size = '10pt'
|
|
||||||
|
|
||||||
# Grouping the document tree into LaTeX files. List of tuples
|
|
||||||
# (source start file, target name, title, author, documentclass [howto/manual]).
|
|
||||||
latex_documents = [
|
|
||||||
('index', 'Swauth.tex', u'Swauth Documentation',
|
|
||||||
u'OpenStack, LLC', 'manual'),
|
|
||||||
]
|
|
||||||
|
|
||||||
# The name of an image file (relative to this directory) to place at the top of
|
|
||||||
# the title page.
|
|
||||||
#latex_logo = None
|
|
||||||
|
|
||||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
|
||||||
# not chapters.
|
|
||||||
#latex_use_parts = False
|
|
||||||
|
|
||||||
# If true, show page references after internal links.
|
|
||||||
#latex_show_pagerefs = False
|
|
||||||
|
|
||||||
# If true, show URL addresses after external links.
|
|
||||||
#latex_show_urls = False
|
|
||||||
|
|
||||||
# Additional stuff for the LaTeX preamble.
|
|
||||||
#latex_preamble = ''
|
|
||||||
|
|
||||||
# Documents to append as an appendix to all manuals.
|
|
||||||
#latex_appendices = []
|
|
||||||
|
|
||||||
# If false, no module index is generated.
|
|
||||||
#latex_domain_indices = True
|
|
||||||
|
|
||||||
|
|
||||||
# -- Options for manual page output --------------------------------------------
|
|
||||||
|
|
||||||
# One entry per manual page. List of tuples
|
|
||||||
# (source start file, name, description, authors, manual section).
|
|
||||||
man_pages = [
|
|
||||||
('index', 'swauth', u'Swauth Documentation',
|
|
||||||
[u'OpenStack, LLC'], 1)
|
|
||||||
]
|
|
@ -1,159 +0,0 @@
|
|||||||
----------------------
|
|
||||||
Implementation Details
|
|
||||||
----------------------
|
|
||||||
|
|
||||||
The Swauth system is a scalable authentication and authorization system that
|
|
||||||
uses Swift itself as its backing store. This section will describe how it
|
|
||||||
stores its data.
|
|
||||||
|
|
||||||
.. note::
|
|
||||||
|
|
||||||
You can access Swauth's internal .auth account by using the account:user of
|
|
||||||
.super_admin:.super_admin and the super admin key you have set in your
|
|
||||||
configuration. Here's an example using `st` on a standard SAIO: ``st -A
|
|
||||||
http://127.0.0.1:8080/auth/v1.0 -U .super_admin:.super_admin -K swauthkey
|
|
||||||
stat``
|
|
||||||
|
|
||||||
At the topmost level, the auth system has its own Swift account it stores its
|
|
||||||
own account information within. This Swift account is known as
|
|
||||||
self.auth_account in the code and its name is in the format
|
|
||||||
self.reseller_prefix + ".auth". In this text, we'll refer to this account as
|
|
||||||
<auth_account>.
|
|
||||||
|
|
||||||
The containers whose names do not begin with a period represent the accounts
|
|
||||||
within the auth service. For example, the <auth_account>/test container would
|
|
||||||
represent the "test" account.
|
|
||||||
|
|
||||||
The objects within each container represent the users for that auth service
|
|
||||||
account. For example, the <auth_account>/test/bob object would represent the
|
|
||||||
user "bob" within the auth service account of "test". Each of these user
|
|
||||||
objects contain a JSON dictionary of the format::
|
|
||||||
|
|
||||||
{"auth": "<auth_type>:<auth_value>", "groups": <groups_array>}
|
|
||||||
|
|
||||||
The `<auth_type>` specifies how the user key is encoded. The default is `plaintext`,
|
|
||||||
which saves the user's key in plaintext in the `<auth_value>` field.
|
|
||||||
The value `sha1` is supported as well, which stores the user's key as a salted
|
|
||||||
SHA1 hash. Note that using a one-way hash like SHA1 will likely inhibit future use of key-signing request types, assuming such support is added. The `<auth_type>` can be specified in the swauth section of the proxy server's
|
|
||||||
config file, along with the salt value in the following way::
|
|
||||||
|
|
||||||
auth_type = <auth_type>
|
|
||||||
auth_type_salt = <salt-value>
|
|
||||||
|
|
||||||
Both fields are optional. auth_type defaults to `plaintext` and auth_type_salt defaults to "swauthsalt". Additional auth types can be implemented along with existing ones in the authtypes.py module.
|
|
||||||
|
|
||||||
The `<groups_array>` contains at least two groups. The first is a unique group
|
|
||||||
identifying that user and it's name is of the format `<user>:<account>`. The
|
|
||||||
second group is the `<account>` itself. Additional groups of `.admin` for
|
|
||||||
account administrators and `.reseller_admin` for reseller administrators may
|
|
||||||
exist. Here's an example user JSON dictionary::
|
|
||||||
|
|
||||||
{"auth": "plaintext:testing",
|
|
||||||
"groups": ["name": "test:tester", "name": "test", "name": ".admin"]}
|
|
||||||
|
|
||||||
To map an auth service account to a Swift storage account, the Service Account
|
|
||||||
Id string is stored in the `X-Container-Meta-Account-Id` header for the
|
|
||||||
<auth_account>/<account> container. To map back the other way, an
|
|
||||||
<auth_account>/.account_id/<account_id> object is created with the contents of
|
|
||||||
the corresponding auth service's account name.
|
|
||||||
|
|
||||||
Also, to support a future where the auth service will support multiple Swift
|
|
||||||
clusters or even multiple services for the same auth service account, an
|
|
||||||
<auth_account>/<account>/.services object is created with its contents having a
|
|
||||||
JSON dictionary of the format::
|
|
||||||
|
|
||||||
{"storage": {"default": "local", "local": <url>}}
|
|
||||||
|
|
||||||
The "default" is always "local" right now, and "local" is always the single
|
|
||||||
Swift cluster URL; but in the future there can be more than one cluster with
|
|
||||||
various names instead of just "local", and the "default" key's value will
|
|
||||||
contain the primary cluster to use for that account. Also, there may be more
|
|
||||||
services in addition to the current "storage" service right now.
|
|
||||||
|
|
||||||
Here's an example .services dictionary at the moment::
|
|
||||||
|
|
||||||
{"storage":
|
|
||||||
{"default": "local",
|
|
||||||
"local": "http://127.0.0.1:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9"}}
|
|
||||||
|
|
||||||
But, here's an example of what the dictionary may look like in the future::
|
|
||||||
|
|
||||||
{"storage":
|
|
||||||
{"default": "dfw",
|
|
||||||
"dfw": "http://dfw.storage.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
|
||||||
"ord": "http://ord.storage.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
|
||||||
"sat": "http://ord.storage.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9"},
|
|
||||||
"servers":
|
|
||||||
{"default": "dfw",
|
|
||||||
"dfw": "http://dfw.servers.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
|
||||||
"ord": "http://ord.servers.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
|
||||||
"sat": "http://ord.servers.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9"}}
|
|
||||||
|
|
||||||
Lastly, the tokens themselves are stored as objects in the
|
|
||||||
`<auth_account>/.token_[0-f]` containers. The names of the objects are the
|
|
||||||
token strings themselves, such as `AUTH_tked86bbd01864458aa2bd746879438d5a`.
|
|
||||||
The exact `.token_[0-f]` container chosen is based on the final digit of the
|
|
||||||
token name, such as `.token_a` for the token
|
|
||||||
`AUTH_tked86bbd01864458aa2bd746879438d5a`. The contents of the token objects
|
|
||||||
are JSON dictionaries of the format::
|
|
||||||
|
|
||||||
{"account": <account>,
|
|
||||||
"user": <user>,
|
|
||||||
"account_id": <account_id>,
|
|
||||||
"groups": <groups_array>,
|
|
||||||
"expires": <time.time() value>}
|
|
||||||
|
|
||||||
The `<account>` is the auth service account's name for that token. The `<user>`
|
|
||||||
is the user within the account for that token. The `<account_id>` is the
|
|
||||||
same as the `X-Container-Meta-Account-Id` for the auth service's account,
|
|
||||||
as described above. The `<groups_array>` is the user's groups, as described
|
|
||||||
above with the user object. The "expires" value indicates when the token is no
|
|
||||||
longer valid, as compared to Python's time.time() value.
|
|
||||||
|
|
||||||
Here's an example token object's JSON dictionary::
|
|
||||||
|
|
||||||
{"account": "test",
|
|
||||||
"user": "tester",
|
|
||||||
"account_id": "AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
|
||||||
"groups": ["name": "test:tester", "name": "test", "name": ".admin"],
|
|
||||||
"expires": 1291273147.1624689}
|
|
||||||
|
|
||||||
To easily map a user to an already issued token, the token name is stored in
|
|
||||||
the user object's `X-Object-Meta-Auth-Token` header.
|
|
||||||
|
|
||||||
Here is an example full listing of an <auth_account>::
|
|
||||||
|
|
||||||
.account_id
|
|
||||||
AUTH_2282f516-559f-4966-b239-b5c88829e927
|
|
||||||
AUTH_f6f57a3c-33b5-4e85-95a5-a801e67505c8
|
|
||||||
AUTH_fea96a36-c177-4ca4-8c7e-b8c715d9d37b
|
|
||||||
.token_0
|
|
||||||
.token_1
|
|
||||||
.token_2
|
|
||||||
.token_3
|
|
||||||
.token_4
|
|
||||||
.token_5
|
|
||||||
.token_6
|
|
||||||
AUTH_tk9d2941b13d524b268367116ef956dee6
|
|
||||||
.token_7
|
|
||||||
.token_8
|
|
||||||
AUTH_tk93627c6324c64f78be746f1e6a4e3f98
|
|
||||||
.token_9
|
|
||||||
.token_a
|
|
||||||
.token_b
|
|
||||||
.token_c
|
|
||||||
.token_d
|
|
||||||
.token_e
|
|
||||||
AUTH_tk0d37d286af2c43ffad06e99112b3ec4e
|
|
||||||
.token_f
|
|
||||||
AUTH_tk766bbde93771489982d8dc76979d11cf
|
|
||||||
reseller
|
|
||||||
.services
|
|
||||||
reseller
|
|
||||||
test
|
|
||||||
.services
|
|
||||||
tester
|
|
||||||
tester3
|
|
||||||
test2
|
|
||||||
.services
|
|
||||||
tester2
|
|
@ -1,142 +0,0 @@
|
|||||||
.. Swauth documentation master file, created by
|
|
||||||
sphinx-quickstart on Mon Feb 14 19:34:51 2011.
|
|
||||||
You can adapt this file completely to your liking, but it should at least
|
|
||||||
contain the root `toctree` directive.
|
|
||||||
|
|
||||||
Swauth
|
|
||||||
======
|
|
||||||
|
|
||||||
Copyright (c) 2010-2012 OpenStack, LLC
|
|
||||||
|
|
||||||
An Auth Service for Swift as WSGI Middleware that uses Swift itself as a
|
|
||||||
backing store. Sphinx-built docs at: http://gholt.github.com/swauth/
|
|
||||||
Source available at: https://github.com/gholt/swauth
|
|
||||||
|
|
||||||
See also https://github.com/openstack/keystone for the standard OpenStack
|
|
||||||
auth service.
|
|
||||||
|
|
||||||
Overview
|
|
||||||
--------
|
|
||||||
|
|
||||||
Before discussing how to install Swauth within a Swift system, it might help to understand how Swauth does it work first.
|
|
||||||
|
|
||||||
1. Swauth is middleware installed in the Swift Proxy's WSGI pipeline.
|
|
||||||
|
|
||||||
2. It intercepts requests to ``/auth/`` (by default).
|
|
||||||
|
|
||||||
3. It also uses Swift's `authorize callback <http://swift.openstack.org/development_auth.html>`_ and `acl callback <http://swift.openstack.org/misc.html#module-swift.common.middleware.acl>`_ features to authorize Swift requests.
|
|
||||||
|
|
||||||
4. Swauth will also make various internal calls to the Swift WSGI pipeline it's installed in to manipulate containers and objects within an ``AUTH_.auth`` (by default) Swift account. These containers and objects are what store account and user information.
|
|
||||||
|
|
||||||
5. Instead of #4, Swauth can be configured to call out to another remote Swauth to perform #4 on its behalf (using the swauth_remote config value).
|
|
||||||
|
|
||||||
6. When managing accounts and users with the various ``swauth-`` command line tools, these tools are actually just performing HTTP requests against the ``/auth/`` end point referenced in #2. You can make your own tools that use the same :ref:`API <api_top>`.
|
|
||||||
|
|
||||||
7. In the special case of creating a new account, Swauth will do its usual WSGI-internal requests as per #4 but will also call out to the Swift cluster to create the actual Swift account.
|
|
||||||
|
|
||||||
a. This Swift cluster callout is an account PUT request to the URL defined by the ``swift_default_cluster`` config value.
|
|
||||||
|
|
||||||
b. This callout end point is also saved when the account is created so that it can be given to the users of that account in the future.
|
|
||||||
|
|
||||||
c. Sometimes, due to public/private network routing or firewalling, the URL Swauth should use should be different than the URL Swauth should give the users later. That is why the ``default_swift_cluster`` config value can accept two URLs (first is the one for users, second is the one for Swauth).
|
|
||||||
|
|
||||||
d. Once an account is created, the URL given to users for that account will not change, even if the ``default_swift_cluster`` config value changes. This is so that you can use multiple clusters with the same Swauth system; ``default_swift_cluster`` just points to the one where you want new users to go.
|
|
||||||
|
|
||||||
f. You can change the stored URL for an account if need be with the ``swauth-set-account-service`` command line tool or a POST request (see :ref:`API <api_set_service_endpoints>`).
|
|
||||||
|
|
||||||
|
|
||||||
Install
|
|
||||||
-------
|
|
||||||
|
|
||||||
1) Install Swauth with ``sudo python setup.py install`` or ``sudo python
|
|
||||||
setup.py develop`` or via whatever packaging system you may be using.
|
|
||||||
|
|
||||||
2) Alter your ``proxy-server.conf`` pipeline to have ``swauth`` instead of ``tempauth``:
|
|
||||||
|
|
||||||
Was::
|
|
||||||
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = catch_errors cache tempauth proxy-server
|
|
||||||
|
|
||||||
Change To::
|
|
||||||
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = catch_errors cache swauth proxy-server
|
|
||||||
|
|
||||||
3) Add to your ``proxy-server.conf`` the section for the Swauth WSGI filter::
|
|
||||||
|
|
||||||
[filter:swauth]
|
|
||||||
use = egg:swauth#swauth
|
|
||||||
set log_name = swauth
|
|
||||||
super_admin_key = swauthkey
|
|
||||||
default_swift_cluster = <your setting as discussed below>
|
|
||||||
|
|
||||||
The ``default_swift_cluster`` setting can be confusing.
|
|
||||||
|
|
||||||
a. If you're using an all-in-one type configuration where everything will be run on the local host on port 8080, you can omit the ``default_swift_cluster`` completely and it will default to ``local#http://127.0.0.1:8080/v1``.
|
|
||||||
|
|
||||||
b. If you're using a single Swift proxy you can just set the ``default_swift_cluster = cluster_name#https://<public_ip>:<port>/v1`` and that URL will be given to users as well as used by Swauth internally. (Quick note: be sure the ``http`` vs. ``https`` is set right depending on if you're using SSL.)
|
|
||||||
|
|
||||||
c. If you're using multiple Swift proxies behind a load balancer, you'll probably want ``default_swift_cluster = cluster_name#https://<load_balancer_ip>:<port>/v1#http://127.0.0.1:<port>/v1`` so that Swauth gives out the first URL but uses the second URL internally. Remember to double-check the ``http`` vs. ``https`` settings for each of the URLs; they might be different if you're terminating SSL at the load balancer.
|
|
||||||
|
|
||||||
Also see the ``proxy-server.conf-sample`` for more config options, such as the ability to have a remote Swauth in a multiple Swift cluster configuration.
|
|
||||||
|
|
||||||
4) Be sure your Swift proxy allows account management in the ``proxy-server.conf``::
|
|
||||||
|
|
||||||
[app:proxy-server]
|
|
||||||
...
|
|
||||||
allow_account_management = true
|
|
||||||
|
|
||||||
For greater security, you can leave this off any public proxies and just have one or two private proxies with it turned on.
|
|
||||||
|
|
||||||
5) Restart your proxy server ``swift-init proxy reload``
|
|
||||||
|
|
||||||
6) Initialize the Swauth backing store in Swift ``swauth-prep -K swauthkey``
|
|
||||||
|
|
||||||
7) Add an account/user ``swauth-add-user -A http[s]://<host>:<port>/auth/ -K
|
|
||||||
swauthkey -a test tester testing``
|
|
||||||
|
|
||||||
8) Ensure it works ``swift -A http[s]://<host>:<port>/auth/v1.0 -U test:tester -K testing stat -v``
|
|
||||||
|
|
||||||
|
|
||||||
If anything goes wrong, it's best to start checking the proxy server logs. The client command line utilities often don't get enough information to help. I will often just ``tail -F`` the appropriate proxy log (``/var/log/syslog`` or however you have it configured) and then run the Swauth command to see exactly what requests are happening to try to determine where things fail.
|
|
||||||
|
|
||||||
General note, I find I occasionally just forget to reload the proxies after a config change; so that's the first thing you might try. Or, if you suspect the proxies aren't reloading properly, you might try ``swift-init proxy stop``, ensure all the processes died, then ``swift-init proxy start``.
|
|
||||||
|
|
||||||
Also, it's quite common to get the ``/auth/v1.0`` vs. just ``/auth/`` URL paths confused. Usual rule is: Swauth tools use just ``/auth/`` and Swift tools use ``/auth/v1.0``.
|
|
||||||
|
|
||||||
|
|
||||||
Web Admin Install
|
|
||||||
-----------------
|
|
||||||
|
|
||||||
1) If you installed from packages, you'll need to cd to the webadmin directory
|
|
||||||
the package installed. This is ``/usr/share/doc/python-swauth/webadmin``
|
|
||||||
with the Lucid packages. If you installed from source, you'll need to cd to
|
|
||||||
the webadmin directory in the source directory.
|
|
||||||
|
|
||||||
2) Upload the Web Admin files with ``swift -A http[s]://<host>:<port>/auth/v1.0
|
|
||||||
-U .super_admin:.super_admin -K swauthkey upload .webadmin .``
|
|
||||||
|
|
||||||
3) Open ``http[s]://<host>:<port>/auth/`` in your browser.
|
|
||||||
|
|
||||||
|
|
||||||
Contents
|
|
||||||
--------
|
|
||||||
|
|
||||||
.. toctree::
|
|
||||||
:maxdepth: 2
|
|
||||||
|
|
||||||
license
|
|
||||||
details
|
|
||||||
swauth
|
|
||||||
middleware
|
|
||||||
api
|
|
||||||
authtypes
|
|
||||||
|
|
||||||
|
|
||||||
Indices and tables
|
|
||||||
------------------
|
|
||||||
|
|
||||||
* :ref:`genindex`
|
|
||||||
* :ref:`modindex`
|
|
||||||
* :ref:`search`
|
|
@ -1,225 +0,0 @@
|
|||||||
.. _license:
|
|
||||||
|
|
||||||
*******
|
|
||||||
LICENSE
|
|
||||||
*******
|
|
||||||
|
|
||||||
::
|
|
||||||
|
|
||||||
Copyright (c) 2010-2011 OpenStack, LLC
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
|
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
@ -1,9 +0,0 @@
|
|||||||
.. _swauth_middleware_module:
|
|
||||||
|
|
||||||
swauth.middleware
|
|
||||||
=================
|
|
||||||
|
|
||||||
.. automodule:: swauth.middleware
|
|
||||||
:members:
|
|
||||||
:undoc-members:
|
|
||||||
:show-inheritance:
|
|
@ -1,9 +0,0 @@
|
|||||||
.. _swauth_module:
|
|
||||||
|
|
||||||
swauth
|
|
||||||
======
|
|
||||||
|
|
||||||
.. automodule:: swauth
|
|
||||||
:members:
|
|
||||||
:undoc-members:
|
|
||||||
:show-inheritance:
|
|
@ -1,78 +0,0 @@
|
|||||||
[DEFAULT]
|
|
||||||
# Standard from Swift
|
|
||||||
|
|
||||||
[pipeline:main]
|
|
||||||
# Standard from Swift, this is just an example of where to put swauth
|
|
||||||
pipeline = catch_errors healthcheck cache ratelimit swauth proxy-server
|
|
||||||
|
|
||||||
[app:proxy-server]
|
|
||||||
# Standard from Swift, main point to note is the inclusion of
|
|
||||||
# allow_account_management = true (only for the proxy servers where you want to
|
|
||||||
# be able to create/delete accounts).
|
|
||||||
use = egg:swift#proxy
|
|
||||||
allow_account_management = true
|
|
||||||
|
|
||||||
[filter:swauth]
|
|
||||||
use = egg:swauth#swauth
|
|
||||||
# You can override the default log routing for this filter here:
|
|
||||||
# set log_name = swauth
|
|
||||||
# set log_facility = LOG_LOCAL0
|
|
||||||
# set log_level = INFO
|
|
||||||
# set log_headers = False
|
|
||||||
# The reseller prefix will verify a token begins with this prefix before even
|
|
||||||
# attempting to validate it. Also, with authorization, only Swift storage
|
|
||||||
# accounts with this prefix will be authorized by this middleware. Useful if
|
|
||||||
# multiple auth systems are in use for one Swift cluster.
|
|
||||||
# reseller_prefix = AUTH
|
|
||||||
# If you wish to use a Swauth service on a remote cluster with this cluster:
|
|
||||||
# swauth_remote = http://remotehost:port/auth
|
|
||||||
# swauth_remote_timeout = 10
|
|
||||||
# When using swauth_remote, the rest of these settings have no effect.
|
|
||||||
#
|
|
||||||
# The auth prefix will cause requests beginning with this prefix to be routed
|
|
||||||
# to the auth subsystem, for granting tokens, creating accounts, users, etc.
|
|
||||||
# auth_prefix = /auth/
|
|
||||||
# Cluster strings are of the format name#url where name is a short name for the
|
|
||||||
# Swift cluster and url is the url to the proxy server(s) for the cluster.
|
|
||||||
# default_swift_cluster = local#http://127.0.0.1:8080/v1
|
|
||||||
# You may also use the format name#url#url where the first url is the one
|
|
||||||
# given to users to access their account (public url) and the second is the one
|
|
||||||
# used by swauth itself to create and delete accounts (private url). This is
|
|
||||||
# useful when a load balancer url should be used by users, but swauth itself is
|
|
||||||
# behind the load balancer. Example:
|
|
||||||
# default_swift_cluster = local#https://public.com:8080/v1#http://private.com:8080/v1
|
|
||||||
# Number of seconds a newly issued token should be valid for, by default.
|
|
||||||
# token_life = 86400
|
|
||||||
# Maximum number of seconds a newly issued token can be valid for.
|
|
||||||
# max_token_life = <same as token_life>
|
|
||||||
# Specifies how the user key is stored. The default is 'plaintext', leaving the
|
|
||||||
# key unsecured but available for key-signing features if such are ever added.
|
|
||||||
# An alternative is 'sha1' which stores only a one-way hash of the key leaving
|
|
||||||
# it secure but unavailable for key-signing.
|
|
||||||
# auth_type = plaintext
|
|
||||||
# Used if the auth_type is sha1 or another method that can make use of a salt.
|
|
||||||
# auth_type_salt = swauthsalt
|
|
||||||
# This allows middleware higher in the WSGI pipeline to override auth
|
|
||||||
# processing, useful for middleware such as tempurl and formpost. If you know
|
|
||||||
# you're not going to use such middleware and you want a bit of extra security,
|
|
||||||
# you can set this to false.
|
|
||||||
# allow_overrides = true
|
|
||||||
# Highly recommended to change this. If you comment this out, the Swauth
|
|
||||||
# administration features will be disabled for this proxy.
|
|
||||||
super_admin_key = swauthkey
|
|
||||||
|
|
||||||
[filter:ratelimit]
|
|
||||||
# Standard from Swift
|
|
||||||
use = egg:swift#ratelimit
|
|
||||||
|
|
||||||
[filter:cache]
|
|
||||||
# Standard from Swift
|
|
||||||
use = egg:swift#memcache
|
|
||||||
|
|
||||||
[filter:healthcheck]
|
|
||||||
# Standard from Swift
|
|
||||||
use = egg:swift#healthcheck
|
|
||||||
|
|
||||||
[filter:catch_errors]
|
|
||||||
# Standard from Swift
|
|
||||||
use = egg:swift#catch_errors
|
|
@ -1,30 +0,0 @@
|
|||||||
# Translations template for swauth.
|
|
||||||
# Copyright (C) 2011 ORGANIZATION
|
|
||||||
# This file is distributed under the same license as the swauth project.
|
|
||||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2011.
|
|
||||||
#
|
|
||||||
#, fuzzy
|
|
||||||
msgid ""
|
|
||||||
msgstr ""
|
|
||||||
"Project-Id-Version: swauth 1.0.1.dev\n"
|
|
||||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
|
||||||
"POT-Creation-Date: 2011-05-26 10:35+0000\n"
|
|
||||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
|
||||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
|
||||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
|
||||||
"MIME-Version: 1.0\n"
|
|
||||||
"Content-Type: text/plain; charset=utf-8\n"
|
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
|
||||||
"Generated-By: Babel 0.9.4\n"
|
|
||||||
|
|
||||||
#: swauth/middleware.py:94
|
|
||||||
msgid "No super_admin_key set in conf file! Exiting."
|
|
||||||
msgstr ""
|
|
||||||
|
|
||||||
#: swauth/middleware.py:637
|
|
||||||
#, python-format
|
|
||||||
msgid ""
|
|
||||||
"ERROR: Exception while trying to communicate with "
|
|
||||||
"%(scheme)s://%(host)s:%(port)s/%(path)s"
|
|
||||||
msgstr ""
|
|
||||||
|
|
@ -1,23 +0,0 @@
|
|||||||
[build_sphinx]
|
|
||||||
all_files = 1
|
|
||||||
build-dir = doc/build
|
|
||||||
source-dir = doc/source
|
|
||||||
|
|
||||||
[egg_info]
|
|
||||||
tag_build =
|
|
||||||
tag_date = 0
|
|
||||||
tag_svn_revision = 0
|
|
||||||
|
|
||||||
[compile_catalog]
|
|
||||||
directory = locale
|
|
||||||
domain = swauth
|
|
||||||
|
|
||||||
[update_catalog]
|
|
||||||
domain = swauth
|
|
||||||
output_dir = locale
|
|
||||||
input_file = locale/swauth.pot
|
|
||||||
|
|
||||||
[extract_messages]
|
|
||||||
keywords = _ l_ lazy_gettext
|
|
||||||
mapping_file = babel.cfg
|
|
||||||
output_file = locale/swauth.pot
|
|
@ -1,89 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from setuptools import setup, find_packages
|
|
||||||
from setuptools.command.sdist import sdist
|
|
||||||
import os
|
|
||||||
import subprocess
|
|
||||||
try:
|
|
||||||
from babel.messages import frontend
|
|
||||||
except ImportError:
|
|
||||||
frontend = None
|
|
||||||
|
|
||||||
from swauth import __version__ as version
|
|
||||||
|
|
||||||
|
|
||||||
class local_sdist(sdist):
|
|
||||||
"""Customized sdist hook - builds the ChangeLog file from VC first"""
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
if os.path.isdir('.bzr'):
|
|
||||||
# We're in a bzr branch
|
|
||||||
|
|
||||||
log_cmd = subprocess.Popen(["bzr", "log", "--gnu"],
|
|
||||||
stdout=subprocess.PIPE)
|
|
||||||
changelog = log_cmd.communicate()[0]
|
|
||||||
with open("ChangeLog", "w") as changelog_file:
|
|
||||||
changelog_file.write(changelog)
|
|
||||||
sdist.run(self)
|
|
||||||
|
|
||||||
|
|
||||||
name = 'swauth'
|
|
||||||
|
|
||||||
|
|
||||||
cmdclass = {'sdist': local_sdist}
|
|
||||||
|
|
||||||
|
|
||||||
if frontend:
|
|
||||||
cmdclass.update({
|
|
||||||
'compile_catalog': frontend.compile_catalog,
|
|
||||||
'extract_messages': frontend.extract_messages,
|
|
||||||
'init_catalog': frontend.init_catalog,
|
|
||||||
'update_catalog': frontend.update_catalog,
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
setup(
|
|
||||||
name=name,
|
|
||||||
version=version,
|
|
||||||
description='Swauth',
|
|
||||||
license='Apache License (2.0)',
|
|
||||||
author='OpenStack, LLC.',
|
|
||||||
author_email='swauth@brim.net',
|
|
||||||
url='https://github.com/gholt/swauth',
|
|
||||||
packages=find_packages(exclude=['test_swauth', 'bin']),
|
|
||||||
test_suite='nose.collector',
|
|
||||||
cmdclass=cmdclass,
|
|
||||||
classifiers=[
|
|
||||||
'Development Status :: 4 - Beta',
|
|
||||||
'License :: OSI Approved :: Apache Software License',
|
|
||||||
'Operating System :: POSIX :: Linux',
|
|
||||||
'Programming Language :: Python :: 2.6',
|
|
||||||
'Environment :: No Input/Output (Daemon)',
|
|
||||||
],
|
|
||||||
install_requires=[], # removed for better compat
|
|
||||||
scripts=[
|
|
||||||
'bin/swauth-add-account', 'bin/swauth-add-user',
|
|
||||||
'bin/swauth-cleanup-tokens', 'bin/swauth-delete-account',
|
|
||||||
'bin/swauth-delete-user', 'bin/swauth-list', 'bin/swauth-prep',
|
|
||||||
'bin/swauth-set-account-service',
|
|
||||||
],
|
|
||||||
entry_points={
|
|
||||||
'paste.filter_factory': [
|
|
||||||
'swauth=swauth.middleware:filter_factory',
|
|
||||||
],
|
|
||||||
},
|
|
||||||
)
|
|
@ -1,23 +0,0 @@
|
|||||||
# Copyright (c) 2010-2013 OpenStack, LLC.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import gettext
|
|
||||||
|
|
||||||
|
|
||||||
#: Version information (major, minor, revision[, 'dev']).
|
|
||||||
version_info = (1, 0, 9, 'dev')
|
|
||||||
#: Version string 'major.minor.revision'.
|
|
||||||
version = __version__ = ".".join(map(str, version_info))
|
|
||||||
gettext.install('swauth')
|
|
@ -1,103 +0,0 @@
|
|||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
# Pablo Llopis 2011
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
This module hosts available auth types for encoding and matching user keys.
|
|
||||||
For adding a new auth type, simply write a class that satisfies the following
|
|
||||||
conditions:
|
|
||||||
|
|
||||||
- For the class name, capitalize first letter only. This makes sure the user
|
|
||||||
can specify an all-lowercase config option such as "plaintext" or "sha1".
|
|
||||||
Swauth takes care of capitalizing the first letter before instantiating it.
|
|
||||||
- Write an encode(key) method that will take a single argument, the user's key,
|
|
||||||
and returns the encoded string. For plaintext, this would be
|
|
||||||
"plaintext:<key>"
|
|
||||||
- Write a match(key, creds) method that will take two arguments: the user's
|
|
||||||
key, and the user's retrieved credentials. Return a boolean value that
|
|
||||||
indicates whether the match is True or False.
|
|
||||||
|
|
||||||
Note that, since some of the encodings will be hashes, swauth supports the
|
|
||||||
notion of salts. Thus, self.salt will be set to either a user-specified salt
|
|
||||||
value or to a default value.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import hashlib
|
|
||||||
|
|
||||||
|
|
||||||
#: Maximum length any valid token should ever be.
|
|
||||||
MAX_TOKEN_LENGTH = 5000
|
|
||||||
|
|
||||||
|
|
||||||
class Plaintext(object):
|
|
||||||
"""
|
|
||||||
Provides a particular auth type for encoding format for encoding and
|
|
||||||
matching user keys.
|
|
||||||
|
|
||||||
This class must be all lowercase except for the first character, which
|
|
||||||
must be capitalized. encode and match methods must be provided and are
|
|
||||||
the only ones that will be used by swauth.
|
|
||||||
"""
|
|
||||||
def encode(self, key):
|
|
||||||
"""
|
|
||||||
Encodes a user key into a particular format. The result of this method
|
|
||||||
will be used by swauth for storing user credentials.
|
|
||||||
|
|
||||||
:param key: User's secret key
|
|
||||||
:returns: A string representing user credentials
|
|
||||||
"""
|
|
||||||
return "plaintext:%s" % key
|
|
||||||
|
|
||||||
def match(self, key, creds):
|
|
||||||
"""
|
|
||||||
Checks whether the user-provided key matches the user's credentials
|
|
||||||
|
|
||||||
:param key: User-supplied key
|
|
||||||
:param creds: User's stored credentials
|
|
||||||
:returns: True if the supplied key is valid, False otherwise
|
|
||||||
"""
|
|
||||||
return self.encode(key) == creds
|
|
||||||
|
|
||||||
|
|
||||||
class Sha1(object):
|
|
||||||
"""
|
|
||||||
Provides a particular auth type for encoding format for encoding and
|
|
||||||
matching user keys.
|
|
||||||
|
|
||||||
This class must be all lowercase except for the first character, which
|
|
||||||
must be capitalized. encode and match methods must be provided and are
|
|
||||||
the only ones that will be used by swauth.
|
|
||||||
"""
|
|
||||||
def encode(self, key):
|
|
||||||
"""
|
|
||||||
Encodes a user key into a particular format. The result of this method
|
|
||||||
will be used by swauth for storing user credentials.
|
|
||||||
|
|
||||||
:param key: User's secret key
|
|
||||||
:returns: A string representing user credentials
|
|
||||||
"""
|
|
||||||
enc_key = '%s%s' % (self.salt, key)
|
|
||||||
enc_val = hashlib.sha1(enc_key).hexdigest()
|
|
||||||
return "sha1:%s$%s" % (self.salt, enc_val)
|
|
||||||
|
|
||||||
def match(self, key, creds):
|
|
||||||
"""
|
|
||||||
Checks whether the user-provided key matches the user's credentials
|
|
||||||
|
|
||||||
:param key: User-supplied key
|
|
||||||
:param creds: User's stored credentials
|
|
||||||
:returns: True if the supplied key is valid, False otherwise
|
|
||||||
"""
|
|
||||||
return self.encode(key) == creds
|
|
File diff suppressed because it is too large
Load Diff
@ -1,71 +0,0 @@
|
|||||||
import swift
|
|
||||||
|
|
||||||
|
|
||||||
MAJOR = None
|
|
||||||
MINOR = None
|
|
||||||
REVISION = None
|
|
||||||
FINAL = None
|
|
||||||
|
|
||||||
|
|
||||||
def parse(value):
|
|
||||||
parts = value.split('.')
|
|
||||||
if parts[-1].endswith('-dev'):
|
|
||||||
final = False
|
|
||||||
parts[-1] = parts[-1][:-4]
|
|
||||||
else:
|
|
||||||
final = True
|
|
||||||
major = int(parts.pop(0))
|
|
||||||
minor = int(parts.pop(0))
|
|
||||||
if parts:
|
|
||||||
revision = int(parts.pop(0))
|
|
||||||
else:
|
|
||||||
revision = 0
|
|
||||||
return major, minor, revision, final
|
|
||||||
|
|
||||||
|
|
||||||
def newer_than(value):
|
|
||||||
global MAJOR, MINOR, REVISION, FINAL
|
|
||||||
major, minor, revision, final = parse(value)
|
|
||||||
if MAJOR is None:
|
|
||||||
MAJOR, MINOR, REVISION, FINAL = parse(swift.__version__)
|
|
||||||
if MAJOR < major:
|
|
||||||
return False
|
|
||||||
elif MAJOR == major:
|
|
||||||
if MINOR < minor:
|
|
||||||
return False
|
|
||||||
elif MINOR == minor:
|
|
||||||
if REVISION < revision:
|
|
||||||
return False
|
|
||||||
elif REVISION == revision:
|
|
||||||
if not FINAL or final:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def run_tests():
|
|
||||||
global MAJOR, MINOR, REVISION, FINAL
|
|
||||||
MAJOR, MINOR, REVISION, FINAL = parse('1.3')
|
|
||||||
assert(newer_than('1.2'))
|
|
||||||
assert(newer_than('1.2.9'))
|
|
||||||
assert(newer_than('1.3-dev'))
|
|
||||||
assert(newer_than('1.3.0-dev'))
|
|
||||||
assert(not newer_than('1.3'))
|
|
||||||
assert(not newer_than('1.3.0'))
|
|
||||||
assert(not newer_than('1.3.1-dev'))
|
|
||||||
assert(not newer_than('1.3.1'))
|
|
||||||
assert(not newer_than('1.4'))
|
|
||||||
assert(not newer_than('2.0'))
|
|
||||||
MAJOR, MINOR, REVISION, FINAL = parse('1.7.7-dev')
|
|
||||||
assert(newer_than('1.6'))
|
|
||||||
assert(newer_than('1.7'))
|
|
||||||
assert(newer_than('1.7.6-dev'))
|
|
||||||
assert(newer_than('1.7.6'))
|
|
||||||
assert(not newer_than('1.7.7'))
|
|
||||||
assert(not newer_than('1.7.8-dev'))
|
|
||||||
assert(not newer_than('1.7.8'))
|
|
||||||
assert(not newer_than('1.8.0'))
|
|
||||||
assert(not newer_than('2.0'))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
run_tests()
|
|
@ -1,6 +0,0 @@
|
|||||||
# See http://code.google.com/p/python-nose/issues/detail?id=373
|
|
||||||
# The code below enables nosetests to work with i18n _() blocks
|
|
||||||
|
|
||||||
import __builtin__
|
|
||||||
|
|
||||||
setattr(__builtin__, '_', lambda x: x)
|
|
@ -1,63 +0,0 @@
|
|||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
# Pablo Llopis 2011
|
|
||||||
|
|
||||||
import unittest
|
|
||||||
from swauth import authtypes
|
|
||||||
|
|
||||||
|
|
||||||
class TestPlaintext(unittest.TestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
self.auth_encoder = authtypes.Plaintext()
|
|
||||||
|
|
||||||
def test_plaintext_encode(self):
|
|
||||||
enc_key = self.auth_encoder.encode('keystring')
|
|
||||||
self.assertEquals('plaintext:keystring', enc_key)
|
|
||||||
|
|
||||||
def test_plaintext_valid_match(self):
|
|
||||||
creds = 'plaintext:keystring'
|
|
||||||
match = self.auth_encoder.match('keystring', creds)
|
|
||||||
self.assertEquals(match, True)
|
|
||||||
|
|
||||||
def test_plaintext_invalid_match(self):
|
|
||||||
creds = 'plaintext:other-keystring'
|
|
||||||
match = self.auth_encoder.match('keystring', creds)
|
|
||||||
self.assertEquals(match, False)
|
|
||||||
|
|
||||||
|
|
||||||
class TestSha1(unittest.TestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
self.auth_encoder = authtypes.Sha1()
|
|
||||||
self.auth_encoder.salt = 'salt'
|
|
||||||
|
|
||||||
def test_sha1_encode(self):
|
|
||||||
enc_key = self.auth_encoder.encode('keystring')
|
|
||||||
self.assertEquals('sha1:salt$d50dc700c296e23ce5b41f7431a0e01f69010f06',
|
|
||||||
enc_key)
|
|
||||||
|
|
||||||
def test_sha1_valid_match(self):
|
|
||||||
creds = 'sha1:salt$d50dc700c296e23ce5b41f7431a0e01f69010f06'
|
|
||||||
match = self.auth_encoder.match('keystring', creds)
|
|
||||||
self.assertEquals(match, True)
|
|
||||||
|
|
||||||
def test_sha1_invalid_match(self):
|
|
||||||
creds = 'sha1:salt$deadbabedeadbabedeadbabec0ffeebadc0ffeee'
|
|
||||||
match = self.auth_encoder.match('keystring', creds)
|
|
||||||
self.assertEquals(match, False)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
File diff suppressed because it is too large
Load Diff
@ -1,552 +0,0 @@
|
|||||||
<html>
|
|
||||||
<head>
|
|
||||||
<style type="text/css">
|
|
||||||
body {font-family: sans-serif}
|
|
||||||
table {border-collapse: collapse}
|
|
||||||
td {padding-left: 1ex; padding-right: 1ex}
|
|
||||||
.account {color: #0000ff; padding-left: 3ex; cursor: pointer}
|
|
||||||
.add_account_heading {text-align: right; padding-right: 0}
|
|
||||||
.service {padding-left: 3ex; vertical-align: top}
|
|
||||||
.service_detail {padding-left: 0}
|
|
||||||
.user {color: #0000ff; padding-left: 3ex; cursor: pointer}
|
|
||||||
.group {padding-left: 3ex}
|
|
||||||
.add_user_heading {text-align: right; padding-right: 0}
|
|
||||||
.shadow_delement {color: #0000ff; cursor: pointer}
|
|
||||||
.shadow_felement {display: none}
|
|
||||||
#swauth {font-size: 200%; font-weight: bold; font-style: italic; margin: 0px; padding: 0px}
|
|
||||||
#creds_area {float: right}
|
|
||||||
#logout {color: #0000ff; padding-left: 3ex; cursor: pointer}
|
|
||||||
#refresh_accounts {color: #0000ff; padding-left: 1ex; cursor: pointer}
|
|
||||||
#add_account {color: #0000ff; padding-left: 1ex; padding-right: 1ex; cursor: pointer}
|
|
||||||
#add_account_title {padding-top: 1ex; padding-bottom: 1ex}
|
|
||||||
#add_account_cancel {color: #0000ff; padding-top: 1ex; padding-left: 3ex; cursor: pointer}
|
|
||||||
#add_account_save {color: #0000ff; text-align: right; padding-top: 1ex; padding-right: 3ex; cursor: pointer}
|
|
||||||
#account_area {background: #ddeeff}
|
|
||||||
#add_user {color: #0000ff; padding-left: 1ex; padding-right: 1ex; cursor: pointer}
|
|
||||||
#add_user_title {padding-top: 1ex; padding-bottom: 1ex}
|
|
||||||
#add_user_cancel {color: #0000ff; padding-top: 1ex; padding-left: 3ex; cursor: pointer}
|
|
||||||
#add_user_save {color: #0000ff; text-align: right; padding-top: 1ex; padding-right: 3ex; cursor: pointer}
|
|
||||||
#delete_account {color: #0000ff; text-align: right; margin-left: 45ex; padding-right: 1ex; cursor: pointer}
|
|
||||||
#user_area {background: #aaccff}
|
|
||||||
#delete_user {color: #0000ff; text-align: right; margin-left: 45ex; padding-right: 1ex; cursor: pointer}
|
|
||||||
#auth_view {display: none}
|
|
||||||
#auth_toggler {color: #0000ff; cursor: pointer}
|
|
||||||
#auth_update {color: #0000ff; padding-left: 1ex; cursor: pointer}
|
|
||||||
#auth_update_field {display: none}
|
|
||||||
</style>
|
|
||||||
<script type="text/javascript">
|
|
||||||
var request = null;
|
|
||||||
var creds_user = '';
|
|
||||||
var creds_key = '';
|
|
||||||
var creds_logged_in = true;
|
|
||||||
var account = '';
|
|
||||||
var user = '';
|
|
||||||
var account_selection = -1;
|
|
||||||
var user_selection = -1;
|
|
||||||
var swauth_area_selected_background = '#ddeeff';
|
|
||||||
var account_area_selected_background = '#aaccff';
|
|
||||||
var endpoints;
|
|
||||||
|
|
||||||
function get_bounds(element) {
|
|
||||||
bounds = {};
|
|
||||||
bounds.top = 0;
|
|
||||||
bounds.left = 0;
|
|
||||||
bounds.width = element.offsetWidth;
|
|
||||||
bounds.height = element.offsetHeight;
|
|
||||||
if (element.offsetParent) {
|
|
||||||
do {
|
|
||||||
bounds.top += element.offsetTop;
|
|
||||||
bounds.left += element.offsetLeft;
|
|
||||||
} while (element = element.offsetParent);
|
|
||||||
}
|
|
||||||
return bounds;
|
|
||||||
}
|
|
||||||
|
|
||||||
function shadow_edit(delement) {
|
|
||||||
felement = document.getElementById('f' + delement.id.substring(1));
|
|
||||||
felement.value = delement.innerHTML;
|
|
||||||
delement.style.display = 'none';
|
|
||||||
felement.style.display = 'inline';
|
|
||||||
felement.focus();
|
|
||||||
}
|
|
||||||
|
|
||||||
function shadow_submitter(felement, evnt, func) {
|
|
||||||
keycode = 0;
|
|
||||||
if (window.event) {
|
|
||||||
keycode = window.event.keyCode;
|
|
||||||
} else if (evnt) {
|
|
||||||
keycode = evnt.which;
|
|
||||||
}
|
|
||||||
if (keycode == 13) {
|
|
||||||
func(felement);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
function shadow_escaper(felement, evnt) {
|
|
||||||
keycode = 0;
|
|
||||||
if (window.event) {
|
|
||||||
keycode = window.event.keyCode;
|
|
||||||
} else if (evnt) {
|
|
||||||
keycode = evnt.which;
|
|
||||||
}
|
|
||||||
if (keycode == 27) {
|
|
||||||
felement.style.display = 'none';
|
|
||||||
document.getElementById('d' + felement.id.substring(1)).style.display = 'inline';
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
function creds_clicked() {
|
|
||||||
creds_area = document.getElementById('creds_area');
|
|
||||||
if (creds_logged_in) {
|
|
||||||
creds_user = '';
|
|
||||||
creds_key = '';
|
|
||||||
creds_area.innerHTML = 'User: <input id="creds_user" type="text" size="10" /> Key: <input id="creds_key" type="password" size="10" onkeypress="return creds_submitter(event)" />';
|
|
||||||
document.getElementById('swauth_area').innerHTML = '';
|
|
||||||
creds_logged_in = false;
|
|
||||||
document.getElementById("creds_user").focus();
|
|
||||||
} else {
|
|
||||||
creds_user = document.getElementById('creds_user').value;
|
|
||||||
creds_key = document.getElementById('creds_key').value;
|
|
||||||
creds_area.innerHTML = '<div>Logged in as ' + creds_user + ' <span id="logout" onclick="creds_clicked()">Logout</span></div>';
|
|
||||||
creds_logged_in = true;
|
|
||||||
swauth_area_load();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function creds_submitter(e) {
|
|
||||||
keycode = 0;
|
|
||||||
if (window.event) {
|
|
||||||
keycode = window.event.keyCode;
|
|
||||||
} else if (e) {
|
|
||||||
keycode = e.which;
|
|
||||||
}
|
|
||||||
if (keycode == 13) {
|
|
||||||
creds_clicked();
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
function swauth_area_reset() {
|
|
||||||
account_area_reset();
|
|
||||||
document.getElementById('swauth_area').innerHTML = '';
|
|
||||||
}
|
|
||||||
|
|
||||||
function account_area_reset() {
|
|
||||||
user_area_reset();
|
|
||||||
element = document.getElementById('add_account')
|
|
||||||
if (element) {
|
|
||||||
element.style.background = 'none';
|
|
||||||
}
|
|
||||||
if (account_selection != -1) {
|
|
||||||
document.getElementById('account_' + account_selection).style.background = 'none';
|
|
||||||
}
|
|
||||||
account = '';
|
|
||||||
account_selection = -1;
|
|
||||||
document.getElementById('account_area').innerHTML = '';
|
|
||||||
}
|
|
||||||
|
|
||||||
function user_area_reset() {
|
|
||||||
element = document.getElementById('add_user')
|
|
||||||
if (element) {
|
|
||||||
element.style.background = 'none';
|
|
||||||
}
|
|
||||||
if (user_selection != -1) {
|
|
||||||
document.getElementById('user_' + user_selection).style.background = 'none';
|
|
||||||
}
|
|
||||||
user = '';
|
|
||||||
user_selection = -1;
|
|
||||||
document.getElementById('user_area').innerHTML = '';
|
|
||||||
}
|
|
||||||
|
|
||||||
function swauth_area_load() {
|
|
||||||
swauth_area_reset();
|
|
||||||
request = new XMLHttpRequest();
|
|
||||||
request.onreadystatechange = swauth_area_load2;
|
|
||||||
request.open('GET', '/auth/v2/', true);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
|
||||||
request.send();
|
|
||||||
}
|
|
||||||
|
|
||||||
function swauth_area_load2() {
|
|
||||||
if (request.readyState == 4) {
|
|
||||||
swauth_area = document.getElementById('swauth_area');
|
|
||||||
if (request.status >= 200 && request.status <= 299) {
|
|
||||||
data = JSON.parse(request.responseText);
|
|
||||||
content = '<table><tr><td>Accounts <span id="refresh_accounts" onclick="swauth_area_load()">Refresh</span> <span id="add_account" onclick="add_account()">Add</span></td></tr>';
|
|
||||||
for (ix = 0; ix < data.accounts.length; ix++) {
|
|
||||||
content += '<tr><td id="account_' + ix + '" onclick="account_area_load(' + ix + ')" class="account">' + data.accounts[ix].name + '</td></tr>';
|
|
||||||
}
|
|
||||||
content += '</table>';
|
|
||||||
swauth_area.innerHTML = content;
|
|
||||||
} else {
|
|
||||||
swauth_area.innerHTML = 'Server returned status: ' + request.status + ' ' + request.statusText;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function add_account() {
|
|
||||||
account_area_reset();
|
|
||||||
document.getElementById('add_account').style.background = swauth_area_selected_background;
|
|
||||||
account_area = document.getElementById('account_area');
|
|
||||||
account_area.innerHTML = '<table><tr><td id="add_account_title" colspan="2">New Account</td></tr><tr><td class="add_account_heading">Name</td><td><input id="add_account_name" type="text" size="20" /></td></tr><tr><td class="add_account_heading">Suffix</td><td><input id="add_account_suffix" type="text" size="20" /> (Optional)</td></tr><tr><td id="add_account_cancel" onclick="swauth_area_load()">Cancel</td><td id="add_account_save" onclick="add_account_save()">Add</td></tr></table>';
|
|
||||||
bounds = get_bounds(document.getElementById('add_account'));
|
|
||||||
account_area.style.position = 'absolute';
|
|
||||||
account_area.style.top = bounds.top;
|
|
||||||
account_area.style.left = bounds.left + bounds.width;
|
|
||||||
document.getElementById("add_account_name").focus();
|
|
||||||
}
|
|
||||||
|
|
||||||
function add_account_save() {
|
|
||||||
request = new XMLHttpRequest();
|
|
||||||
request.onreadystatechange = add_account_save2;
|
|
||||||
request.open('PUT', '/auth/v2/' + document.getElementById('add_account_name').value, true);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
|
||||||
request.setRequestHeader('X-Account-Suffix', document.getElementById('add_account_suffix').value);
|
|
||||||
request.send();
|
|
||||||
}
|
|
||||||
|
|
||||||
function add_account_save2() {
|
|
||||||
if (request.readyState == 4) {
|
|
||||||
if (request.status >= 200 && request.status <= 299) {
|
|
||||||
swauth_area_load();
|
|
||||||
} else {
|
|
||||||
alert('Server returned status: ' + request.status + ' ' + request.statusText);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function account_area_load(account_index) {
|
|
||||||
account_area_reset();
|
|
||||||
account_element = document.getElementById('account_' + account_index);
|
|
||||||
account_element.style.background = swauth_area_selected_background;
|
|
||||||
account_selection = account_index;
|
|
||||||
account = account_element.innerHTML;
|
|
||||||
request = new XMLHttpRequest();
|
|
||||||
request.onreadystatechange = account_area_load2;
|
|
||||||
request.open('GET', '/auth/v2/' + account, true);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
|
||||||
request.send();
|
|
||||||
}
|
|
||||||
|
|
||||||
function account_area_load2() {
|
|
||||||
account_area = document.getElementById('account_area');
|
|
||||||
if (request.readyState == 4) {
|
|
||||||
if (request.status >= 200 && request.status <= 299) {
|
|
||||||
data = JSON.parse(request.responseText);
|
|
||||||
content = '<div id="delete_account" onclick="delete_account()">Delete</div><table><tr><td>Account Id</td><td>' + data.account_id + '</td></tr></table><table><tr><td>Services</td></tr>';
|
|
||||||
services = [];
|
|
||||||
for (service in data.services) {
|
|
||||||
services.push(service);
|
|
||||||
}
|
|
||||||
services.sort();
|
|
||||||
for (ix = 0; ix < services.length; ix++) {
|
|
||||||
content += '<tr><td class="service">' + services[ix] + '</td><td class="service_detail"><table>';
|
|
||||||
if (data.services[services[ix]]['default']) {
|
|
||||||
content += '<tr><td>default</td><td><span id="d-' + services[ix] + '" class="shadow_delement" onclick="shadow_edit(this)">' + data.services[services[ix]]['default'] + '</span><input id="f-' + services[ix] + '" class="shadow_felement" type="text" size="40" onkeypress="return shadow_submitter(this, event, endpoint_save)" onkeydown="return shadow_escaper(this, event)" /></td></tr>';
|
|
||||||
}
|
|
||||||
endpoints = [];
|
|
||||||
for (name in data.services[services[ix]]) {
|
|
||||||
if (name != 'default') {
|
|
||||||
endpoints.push(name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
endpoints.sort();
|
|
||||||
for (iy = 0; iy < endpoints.length; iy++) {
|
|
||||||
content += '<tr><td>' + endpoints[iy] + '</td><td><span id="d' + iy + '-' + services[ix] + '" class="shadow_delement" onclick="shadow_edit(this)">' + data.services[services[ix]][endpoints[iy]] + '</span><input id="f' + iy + '-' + services[ix] + '" class="shadow_felement" type="text" size="40" onkeypress="return shadow_submitter(this, event, endpoint_save)" onkeydown="return shadow_escaper(this, event)" /></td></tr>';
|
|
||||||
}
|
|
||||||
content += '</table></td></tr>';
|
|
||||||
}
|
|
||||||
content += '</table><table><tr><td>Users <span id="add_user" onclick="add_user()">Add</span></td></tr>';
|
|
||||||
for (ix = 0; ix < data.users.length; ix++) {
|
|
||||||
content += '<tr><td id="user_' + ix + '" onclick="user_area_load(' + ix + ')" class="user">' + data.users[ix].name + '</td></tr>';
|
|
||||||
}
|
|
||||||
content += '</table>';
|
|
||||||
account_area.innerHTML = content;
|
|
||||||
} else {
|
|
||||||
account_area.innerHTML = 'Server returned status: ' + request.status + ' ' + request.statusText;
|
|
||||||
}
|
|
||||||
bounds = get_bounds(document.getElementById('account_' + account_selection));
|
|
||||||
account_area.style.position = 'absolute';
|
|
||||||
account_area.style.top = bounds.top;
|
|
||||||
account_area.style.left = bounds.left + bounds.width;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function endpoint_save(field) {
|
|
||||||
service = field.id.substring(field.id.indexOf('-') + 1)
|
|
||||||
index = field.id.substring(1, field.id.indexOf('-'))
|
|
||||||
if (index) {
|
|
||||||
endpoint = endpoints[index];
|
|
||||||
} else {
|
|
||||||
endpoint = 'default';
|
|
||||||
}
|
|
||||||
services = {};
|
|
||||||
services[service] = {};
|
|
||||||
services[service][endpoint] = field.value;
|
|
||||||
request = new XMLHttpRequest();
|
|
||||||
request.onreadystatechange = endpoint_save2;
|
|
||||||
request.open('POST', '/auth/v2/' + account + '/.services', true);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
|
||||||
request.send(JSON.stringify(services));
|
|
||||||
}
|
|
||||||
|
|
||||||
function endpoint_save2() {
|
|
||||||
if (request.readyState == 4) {
|
|
||||||
if (request.status >= 200 && request.status <= 299) {
|
|
||||||
account_area_load(account_selection);
|
|
||||||
} else {
|
|
||||||
alert('Server returned status: ' + request.status + ' ' + request.statusText);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function add_user() {
|
|
||||||
user_area_reset();
|
|
||||||
document.getElementById('add_user').style.background = account_area_selected_background;
|
|
||||||
user_area = document.getElementById('user_area');
|
|
||||||
user_area.innerHTML = '<table><tr><td id="add_user_title" colspan="2">New User</td></tr><tr><td class="add_user_heading">Name</td><td><input id="add_user_name" type="text" size="20" /></td></tr><tr><td class="add_user_heading">Auth Key</td><td><input id="add_user_key" type="password" size="20" /></td></tr><tr><td class="add_user_heading">Account Admin</td><td><input id="add_user_admin" type="checkbox" /></td></tr><tr><td class="add_user_heading">Reseller Admin</td><td><input id="add_user_reseller_admin" type="checkbox" /></td></tr><tr><td id="add_user_cancel" onclick="add_user_cancel()">Cancel</td><td id="add_user_save" onclick="add_user_save()">Add</td></tr></table>';
|
|
||||||
bounds = get_bounds(document.getElementById('add_user'));
|
|
||||||
user_area.style.position = 'absolute';
|
|
||||||
user_area.style.top = bounds.top;
|
|
||||||
user_area.style.left = bounds.left + bounds.width;
|
|
||||||
document.getElementById("add_user_name").focus();
|
|
||||||
}
|
|
||||||
|
|
||||||
function add_user_cancel() {
|
|
||||||
document.getElementById('add_user').style.background = 'none';
|
|
||||||
document.getElementById('user_area').innerHTML = '';
|
|
||||||
}
|
|
||||||
|
|
||||||
function add_user_save() {
|
|
||||||
request = new XMLHttpRequest();
|
|
||||||
request.onreadystatechange = add_user_save2;
|
|
||||||
request.open('PUT', '/auth/v2/' + account + '/' + document.getElementById('add_user_name').value, true);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
|
||||||
request.setRequestHeader('X-Auth-User-Key', document.getElementById('add_user_key').value);
|
|
||||||
if (document.getElementById('add_user_admin').value) {
|
|
||||||
request.setRequestHeader('X-Auth-User-Admin', 'true');
|
|
||||||
}
|
|
||||||
if (document.getElementById('add_user_reseller_admin').value) {
|
|
||||||
request.setRequestHeader('X-Auth-User-Reseller-Admin', 'true');
|
|
||||||
}
|
|
||||||
request.send();
|
|
||||||
}
|
|
||||||
|
|
||||||
function add_user_save2() {
|
|
||||||
if (request.readyState == 4) {
|
|
||||||
if (request.status >= 200 && request.status <= 299) {
|
|
||||||
account_area_load(account_selection);
|
|
||||||
} else {
|
|
||||||
alert('Server returned status: ' + request.status + ' ' + request.statusText);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function delete_account() {
|
|
||||||
request = new XMLHttpRequest();
|
|
||||||
request.onreadystatechange = delete_account2;
|
|
||||||
request.open('DELETE', '/auth/v2/' + account, true);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
|
||||||
request.send();
|
|
||||||
}
|
|
||||||
|
|
||||||
function delete_account2() {
|
|
||||||
if (request.readyState == 4) {
|
|
||||||
if (request.status >= 200 && request.status <= 299) {
|
|
||||||
swauth_area_load();
|
|
||||||
} else {
|
|
||||||
alert('Server returned status: ' + request.status + ' ' + request.statusText);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function user_area_load(account_area_user_index) {
|
|
||||||
user_area_reset();
|
|
||||||
user_element = document.getElementById('user_' + account_area_user_index);
|
|
||||||
user_element.style.background = account_area_selected_background;
|
|
||||||
user_selection = account_area_user_index;
|
|
||||||
user = user_element.innerHTML;
|
|
||||||
request = new XMLHttpRequest();
|
|
||||||
request.onreadystatechange = user_area_load2;
|
|
||||||
request.open('GET', '/auth/v2/' + account + '/' + user, true);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
|
||||||
request.send();
|
|
||||||
}
|
|
||||||
|
|
||||||
function user_area_load2() {
|
|
||||||
user_area = document.getElementById('user_area');
|
|
||||||
if (request.readyState == 4) {
|
|
||||||
if (request.status >= 200 && request.status <= 299) {
|
|
||||||
data = JSON.parse(request.responseText);
|
|
||||||
content = '<div id="delete_user" onclick="delete_user()">Delete</div><table><tr><td>Auth</td><td><span id="auth_toggler" onclick="auth_toggle()">Show</span> <span id="auth_view">' + data.auth + '</span></td><td><input id="auth_update_field" type="password" size="20" onkeypress="return auth_submitter(event)" onkeydown="return auth_escaper(event)" /> <span id="auth_update" onclick="auth_update()">Update</span></td></tr></table><table><tr><td>Groups</td></tr>';
|
|
||||||
groups = [];
|
|
||||||
for (ix = 0; ix < data.groups.length; ix++) {
|
|
||||||
groups.push(data.groups[ix].name);
|
|
||||||
}
|
|
||||||
groups.sort();
|
|
||||||
for (ix = 0; ix < groups.length; ix++) {
|
|
||||||
content += '<tr><td class="group">' + groups[ix] + '</td></tr>';
|
|
||||||
}
|
|
||||||
content += '</table>';
|
|
||||||
user_area.innerHTML = content;
|
|
||||||
} else {
|
|
||||||
user_area.innerHTML = 'Server returned status: ' + request.status + ' ' + request.statusText;
|
|
||||||
}
|
|
||||||
bounds = get_bounds(document.getElementById('user_' + user_selection));
|
|
||||||
user_area.style.position = 'absolute';
|
|
||||||
user_area.style.top = bounds.top;
|
|
||||||
user_area.style.left = bounds.left + bounds.width;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function delete_user() {
|
|
||||||
request = new XMLHttpRequest();
|
|
||||||
request.onreadystatechange = delete_user2;
|
|
||||||
request.open('DELETE', '/auth/v2/' + account + '/' + user, true);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
|
||||||
request.send();
|
|
||||||
}
|
|
||||||
|
|
||||||
function delete_user2() {
|
|
||||||
if (request.readyState == 4) {
|
|
||||||
if (request.status >= 200 && request.status <= 299) {
|
|
||||||
account_area_load(account_selection);
|
|
||||||
} else {
|
|
||||||
alert('Server returned status: ' + request.status + ' ' + request.statusText);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function auth_toggle() {
|
|
||||||
to_toggle = document.getElementById('auth_view');
|
|
||||||
toggler = document.getElementById('auth_toggler');
|
|
||||||
if (to_toggle.style.display && to_toggle.style.display != 'none') {
|
|
||||||
toggler.innerHTML = 'Show';
|
|
||||||
to_toggle.style.display = 'none';
|
|
||||||
} else {
|
|
||||||
toggler.innerHTML = 'Hide';
|
|
||||||
to_toggle.style.display = 'inline';
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function auth_update() {
|
|
||||||
field = document.getElementById('auth_update_field');
|
|
||||||
trigger = document.getElementById('auth_update');
|
|
||||||
if (field.style.display && field.style.display != 'none') {
|
|
||||||
auth_save();
|
|
||||||
} else {
|
|
||||||
field.style.display = 'inline';
|
|
||||||
trigger.style.display = 'none';
|
|
||||||
field.focus();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function auth_submitter(e) {
|
|
||||||
keycode = 0;
|
|
||||||
if (window.event) {
|
|
||||||
keycode = window.event.keyCode;
|
|
||||||
} else if (e) {
|
|
||||||
keycode = e.which;
|
|
||||||
}
|
|
||||||
if (keycode == 13) {
|
|
||||||
auth_save();
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
function auth_escaper(e) {
|
|
||||||
keycode = 0;
|
|
||||||
if (window.event) {
|
|
||||||
keycode = window.event.keyCode;
|
|
||||||
} else if (e) {
|
|
||||||
keycode = e.which;
|
|
||||||
}
|
|
||||||
if (keycode == 27) {
|
|
||||||
field = document.getElementById('auth_update_field');
|
|
||||||
field.value = '';
|
|
||||||
field.style.display ='none';
|
|
||||||
document.getElementById('auth_update').style.display ='inline';
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
function auth_save() {
|
|
||||||
document.getElementById('auth_update_field').style.display ='none';
|
|
||||||
if (document.getElementById('auth_update_field').value) {
|
|
||||||
request = new XMLHttpRequest();
|
|
||||||
request.onreadystatechange = auth_save2;
|
|
||||||
request.open('GET', '/auth/v2/' + account + '/' + user, true);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
|
||||||
request.send();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function auth_save2() {
|
|
||||||
if (request.readyState == 4) {
|
|
||||||
if (request.status >= 200 && request.status <= 299) {
|
|
||||||
data = JSON.parse(request.responseText);
|
|
||||||
request = new XMLHttpRequest();
|
|
||||||
request.onreadystatechange = auth_save3;
|
|
||||||
request.open('PUT', '/auth/v2/' + account_element.innerHTML + '/' + user_element.innerHTML, true);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
|
||||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
|
||||||
request.setRequestHeader('X-Auth-User-Key', document.getElementById('auth_update_field').value);
|
|
||||||
admin = false;
|
|
||||||
reseller_admin = false;
|
|
||||||
for (ix = 0; ix < data.groups.length; ix++) {
|
|
||||||
if (data.groups[ix].name == '.admin') {
|
|
||||||
admin = true;
|
|
||||||
} else if (data.groups[ix].name == '.reseller_admin') {
|
|
||||||
reseller_admin = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (admin) {
|
|
||||||
request.setRequestHeader('X-Auth-User-Admin', 'true');
|
|
||||||
}
|
|
||||||
if (reseller_admin) {
|
|
||||||
request.setRequestHeader('X-Auth-User-Reseller-Admin', 'true');
|
|
||||||
}
|
|
||||||
request.send();
|
|
||||||
} else {
|
|
||||||
alert('Server returned status: ' + request.status + ' ' + request.statusText);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function auth_save3() {
|
|
||||||
if (request.readyState == 4) {
|
|
||||||
if (request.status >= 200 && request.status <= 299) {
|
|
||||||
user_area_load(user_selection);
|
|
||||||
} else {
|
|
||||||
alert('Server returned status: ' + request.status + ' ' + request.statusText);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
</script>
|
|
||||||
</head>
|
|
||||||
<body onload="creds_clicked()">
|
|
||||||
<form onsubmit="return false">
|
|
||||||
<div id="creds_area"></div>
|
|
||||||
<div id="swauth">Swauth</div>
|
|
||||||
<div id="swauth_area"></div>
|
|
||||||
<div id="account_area"></div>
|
|
||||||
<div id="user_area"></div>
|
|
||||||
</form>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@ -1,38 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# Copyright (c) 2013 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from swift.common.utils import readconf, config_true_value
|
|
||||||
|
|
||||||
config_file = {}
|
|
||||||
try:
|
|
||||||
config_file = readconf("/etc/swift/proxy-server.conf",
|
|
||||||
section_name="filter:cache")
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
|
|
||||||
MEMCACHE_SERVERS = config_file.get('memcache_servers', None)
|
|
||||||
|
|
||||||
config_file = {}
|
|
||||||
|
|
||||||
try:
|
|
||||||
config_file = readconf("/etc/swift/proxy-server.conf",
|
|
||||||
section_name="filter:kerbauth")
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
|
|
||||||
TOKEN_LIFE = int(config_file.get('token_life', 86400))
|
|
||||||
RESELLER_PREFIX = config_file.get('reseller_prefix', "AUTH_")
|
|
||||||
DEBUG_HEADERS = config_true_value(config_file.get('debug_headers', 'yes'))
|
|
@ -1,12 +0,0 @@
|
|||||||
<Location /cgi-bin/swift-auth>
|
|
||||||
AuthType Kerberos
|
|
||||||
AuthName "Swift Authentication"
|
|
||||||
KrbMethodNegotiate On
|
|
||||||
KrbMethodK5Passwd On
|
|
||||||
KrbSaveCredentials On
|
|
||||||
KrbServiceName HTTP/client.example.com
|
|
||||||
KrbAuthRealms EXAMPLE.COM
|
|
||||||
Krb5KeyTab /etc/httpd/conf/http.keytab
|
|
||||||
KrbVerifyKDC Off
|
|
||||||
Require valid-user
|
|
||||||
</Location>
|
|
@ -1,70 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
|
|
||||||
# Copyright (c) 2013 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
# Requires the following command to be run:
|
|
||||||
# setsebool -P httpd_can_network_connect 1
|
|
||||||
# setsebool -P httpd_can_network_memcache 1
|
|
||||||
|
|
||||||
import os
|
|
||||||
import cgi
|
|
||||||
from swift.common.memcached import MemcacheRing
|
|
||||||
from time import time, ctime
|
|
||||||
from swiftkerbauth import MEMCACHE_SERVERS, TOKEN_LIFE, DEBUG_HEADERS
|
|
||||||
from swiftkerbauth.kerbauth_utils import get_remote_user, get_auth_data, \
|
|
||||||
generate_token, set_auth_data, get_groups_from_username
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
try:
|
|
||||||
username = get_remote_user(os.environ)
|
|
||||||
except RuntimeError:
|
|
||||||
print "Status: 401 Unauthorized\n"
|
|
||||||
print "Malformed REMOTE_USER"
|
|
||||||
return
|
|
||||||
|
|
||||||
if not MEMCACHE_SERVERS:
|
|
||||||
print "Status: 500 Internal Server Error\n"
|
|
||||||
print "Memcache not configured in /etc/swift/proxy-server.conf"
|
|
||||||
return
|
|
||||||
|
|
||||||
mc_servers = [s.strip() for s in MEMCACHE_SERVERS.split(',') if s.strip()]
|
|
||||||
mc = MemcacheRing(mc_servers)
|
|
||||||
|
|
||||||
token, expires, groups = get_auth_data(mc, username)
|
|
||||||
|
|
||||||
if not token:
|
|
||||||
token = generate_token()
|
|
||||||
expires = time() + TOKEN_LIFE
|
|
||||||
groups = get_groups_from_username(username)
|
|
||||||
set_auth_data(mc, username, token, expires, groups)
|
|
||||||
|
|
||||||
print "X-Auth-Token: %s" % token
|
|
||||||
print "X-Storage-Token: %s" % token
|
|
||||||
|
|
||||||
# For debugging.
|
|
||||||
if DEBUG_HEADERS:
|
|
||||||
print "X-Debug-Remote-User: %s" % username
|
|
||||||
print "X-Debug-Groups: %s" % groups
|
|
||||||
print "X-Debug-Token-Life: %ss" % TOKEN_LIFE
|
|
||||||
print "X-Debug-Token-Expires: %s" % ctime(expires)
|
|
||||||
|
|
||||||
print ""
|
|
||||||
|
|
||||||
try:
|
|
||||||
print("Content-Type: text/html")
|
|
||||||
main()
|
|
||||||
except:
|
|
||||||
cgi.print_exception()
|
|
@ -1,463 +0,0 @@
|
|||||||
# Copyright (c) 2013 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import errno
|
|
||||||
from time import time, ctime
|
|
||||||
from traceback import format_exc
|
|
||||||
from eventlet import Timeout
|
|
||||||
from urllib import unquote
|
|
||||||
|
|
||||||
from swift.common.swob import Request, Response
|
|
||||||
from swift.common.swob import HTTPBadRequest, HTTPForbidden, HTTPNotFound, \
|
|
||||||
HTTPSeeOther, HTTPUnauthorized, HTTPServerError
|
|
||||||
|
|
||||||
from swift.common.middleware.acl import clean_acl, parse_acl, referrer_allowed
|
|
||||||
from swift.common.utils import cache_from_env, get_logger, \
|
|
||||||
split_path, config_true_value
|
|
||||||
from gluster.swift.common.middleware.swiftkerbauth.kerbauth_utils import \
|
|
||||||
get_auth_data, generate_token, \
|
|
||||||
set_auth_data, run_kinit, get_groups_from_username
|
|
||||||
|
|
||||||
|
|
||||||
class KerbAuth(object):
|
|
||||||
"""
|
|
||||||
Test authentication and authorization system.
|
|
||||||
|
|
||||||
Add to your pipeline in proxy-server.conf, such as::
|
|
||||||
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = catch_errors cache kerbauth proxy-server
|
|
||||||
|
|
||||||
Set account auto creation to true in proxy-server.conf::
|
|
||||||
|
|
||||||
[app:proxy-server]
|
|
||||||
account_autocreate = true
|
|
||||||
|
|
||||||
And add a kerbauth filter section, such as::
|
|
||||||
|
|
||||||
[filter:kerbauth]
|
|
||||||
use = egg:swiftkerbauth#kerbauth
|
|
||||||
|
|
||||||
See the proxy-server.conf-sample for more information.
|
|
||||||
|
|
||||||
:param app: The next WSGI app in the pipeline
|
|
||||||
:param conf: The dict of configuration values
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, app, conf):
|
|
||||||
self.app = app
|
|
||||||
self.conf = conf
|
|
||||||
self.logger = get_logger(conf, log_route='kerbauth')
|
|
||||||
self.log_headers = config_true_value(conf.get('log_headers', 'f'))
|
|
||||||
self.reseller_prefix = conf.get('reseller_prefix', 'AUTH').strip()
|
|
||||||
if self.reseller_prefix and self.reseller_prefix[-1] != '_':
|
|
||||||
self.reseller_prefix += '_'
|
|
||||||
self.logger.set_statsd_prefix('kerbauth.%s' % (
|
|
||||||
self.reseller_prefix if self.reseller_prefix else 'NONE',))
|
|
||||||
self.auth_prefix = conf.get('auth_prefix', '/auth/')
|
|
||||||
if not self.auth_prefix or not self.auth_prefix.strip('/'):
|
|
||||||
self.logger.warning('Rewriting invalid auth prefix "%s" to '
|
|
||||||
'"/auth/" (Non-empty auth prefix path '
|
|
||||||
'is required)' % self.auth_prefix)
|
|
||||||
self.auth_prefix = '/auth/'
|
|
||||||
if self.auth_prefix[0] != '/':
|
|
||||||
self.auth_prefix = '/' + self.auth_prefix
|
|
||||||
if self.auth_prefix[-1] != '/':
|
|
||||||
self.auth_prefix += '/'
|
|
||||||
self.token_life = int(conf.get('token_life', 86400))
|
|
||||||
self.auth_method = conf.get('auth_method', 'passive')
|
|
||||||
self.debug_headers = config_true_value(
|
|
||||||
conf.get('debug_headers', 'yes'))
|
|
||||||
self.realm_name = conf.get('realm_name', None)
|
|
||||||
self.allow_overrides = config_true_value(
|
|
||||||
conf.get('allow_overrides', 't'))
|
|
||||||
self.storage_url_scheme = conf.get('storage_url_scheme', 'default')
|
|
||||||
self.ext_authentication_url = conf.get('ext_authentication_url')
|
|
||||||
if not self.ext_authentication_url:
|
|
||||||
raise RuntimeError("Missing filter parameter ext_authentication_"
|
|
||||||
"url in /etc/swift/proxy-server.conf")
|
|
||||||
|
|
||||||
def __call__(self, env, start_response):
|
|
||||||
"""
|
|
||||||
Accepts a standard WSGI application call, authenticating the request
|
|
||||||
and installing callback hooks for authorization and ACL header
|
|
||||||
validation. For an authenticated request, REMOTE_USER will be set to a
|
|
||||||
comma separated list of the user's groups.
|
|
||||||
|
|
||||||
If the request matches the self.auth_prefix, the request will be
|
|
||||||
routed through the internal auth request handler (self.handle).
|
|
||||||
This is to handle granting tokens, etc.
|
|
||||||
"""
|
|
||||||
if self.allow_overrides and env.get('swift.authorize_override', False):
|
|
||||||
return self.app(env, start_response)
|
|
||||||
if env.get('PATH_INFO', '').startswith(self.auth_prefix):
|
|
||||||
return self.handle(env, start_response)
|
|
||||||
token = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN'))
|
|
||||||
if token and token.startswith(self.reseller_prefix):
|
|
||||||
groups = self.get_groups(env, token)
|
|
||||||
if groups:
|
|
||||||
user = groups and groups.split(',', 1)[0] or ''
|
|
||||||
trans_id = env.get('swift.trans_id')
|
|
||||||
self.logger.debug('User: %s uses token %s (trans_id %s)' %
|
|
||||||
(user, token, trans_id))
|
|
||||||
env['REMOTE_USER'] = groups
|
|
||||||
env['swift.authorize'] = self.authorize
|
|
||||||
env['swift.clean_acl'] = clean_acl
|
|
||||||
if '.reseller_admin' in groups:
|
|
||||||
env['reseller_request'] = True
|
|
||||||
else:
|
|
||||||
# Invalid token (may be expired)
|
|
||||||
if self.auth_method == "active":
|
|
||||||
return HTTPSeeOther(
|
|
||||||
location=self.ext_authentication_url)(env,
|
|
||||||
start_response)
|
|
||||||
elif self.auth_method == "passive":
|
|
||||||
self.logger.increment('unauthorized')
|
|
||||||
return HTTPUnauthorized()(env, start_response)
|
|
||||||
else:
|
|
||||||
# With a non-empty reseller_prefix, I would like to be called
|
|
||||||
# back for anonymous access to accounts I know I'm the
|
|
||||||
# definitive auth for.
|
|
||||||
try:
|
|
||||||
version, rest = split_path(env.get('PATH_INFO', ''),
|
|
||||||
1, 2, True)
|
|
||||||
except ValueError:
|
|
||||||
version, rest = None, None
|
|
||||||
self.logger.increment('errors')
|
|
||||||
# Not my token, not my account, I can't authorize this request,
|
|
||||||
# deny all is a good idea if not already set...
|
|
||||||
if 'swift.authorize' not in env:
|
|
||||||
env['swift.authorize'] = self.denied_response
|
|
||||||
|
|
||||||
return self.app(env, start_response)
|
|
||||||
|
|
||||||
def get_groups(self, env, token):
|
|
||||||
"""
|
|
||||||
Get groups for the given token.
|
|
||||||
|
|
||||||
:param env: The current WSGI environment dictionary.
|
|
||||||
:param token: Token to validate and return a group string for.
|
|
||||||
|
|
||||||
:returns: None if the token is invalid or a string containing a comma
|
|
||||||
separated list of groups the authenticated user is a member
|
|
||||||
of. The first group in the list is also considered a unique
|
|
||||||
identifier for that user.
|
|
||||||
"""
|
|
||||||
groups = None
|
|
||||||
memcache_client = cache_from_env(env)
|
|
||||||
if not memcache_client:
|
|
||||||
raise Exception('Memcache required')
|
|
||||||
memcache_token_key = '%s/token/%s' % (self.reseller_prefix, token)
|
|
||||||
cached_auth_data = memcache_client.get(memcache_token_key)
|
|
||||||
if cached_auth_data:
|
|
||||||
expires, groups = cached_auth_data
|
|
||||||
if expires < time():
|
|
||||||
groups = None
|
|
||||||
|
|
||||||
return groups
|
|
||||||
|
|
||||||
def authorize(self, req):
|
|
||||||
"""
|
|
||||||
Returns None if the request is authorized to continue or a standard
|
|
||||||
WSGI response callable if not.
|
|
||||||
|
|
||||||
Assumes that user groups are all lower case, which is true when Red Hat
|
|
||||||
Enterprise Linux Identity Management is used.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
version, account, container, obj = req.split_path(1, 4, True)
|
|
||||||
except ValueError:
|
|
||||||
self.logger.increment('errors')
|
|
||||||
return HTTPNotFound(request=req)
|
|
||||||
|
|
||||||
if not account or not account.startswith(self.reseller_prefix):
|
|
||||||
self.logger.debug("Account name: %s doesn't start with "
|
|
||||||
"reseller_prefix: %s."
|
|
||||||
% (account, self.reseller_prefix))
|
|
||||||
return self.denied_response(req)
|
|
||||||
|
|
||||||
user_groups = (req.remote_user or '').split(',')
|
|
||||||
account_user = user_groups[1] if len(user_groups) > 1 else None
|
|
||||||
# If the user is in the reseller_admin group for our prefix, he gets
|
|
||||||
# full access to all accounts we manage. For the default reseller
|
|
||||||
# prefix, the group name is auth_reseller_admin.
|
|
||||||
admin_group = ("%sreseller_admin" % self.reseller_prefix).lower()
|
|
||||||
if admin_group in user_groups and \
|
|
||||||
account != self.reseller_prefix and \
|
|
||||||
account[len(self.reseller_prefix)] != '.':
|
|
||||||
req.environ['swift_owner'] = True
|
|
||||||
return None
|
|
||||||
|
|
||||||
# The "account" is part of the request URL, and already contains the
|
|
||||||
# reseller prefix, like in "/v1/AUTH_vol1/pictures/pic1.png".
|
|
||||||
if account.lower() in user_groups and \
|
|
||||||
(req.method not in ('DELETE', 'PUT') or container):
|
|
||||||
# If the user is admin for the account and is not trying to do an
|
|
||||||
# account DELETE or PUT...
|
|
||||||
req.environ['swift_owner'] = True
|
|
||||||
self.logger.debug("User %s has admin authorizing."
|
|
||||||
% account_user)
|
|
||||||
return None
|
|
||||||
|
|
||||||
if (req.environ.get('swift_sync_key')
|
|
||||||
and (req.environ['swift_sync_key'] ==
|
|
||||||
req.headers.get('x-container-sync-key', None))
|
|
||||||
and 'x-timestamp' in req.headers):
|
|
||||||
self.logger.debug("Allow request with container sync-key: %s."
|
|
||||||
% req.environ['swift_sync_key'])
|
|
||||||
return None
|
|
||||||
|
|
||||||
if req.method == 'OPTIONS':
|
|
||||||
#allow OPTIONS requests to proceed as normal
|
|
||||||
self.logger.debug("Allow OPTIONS request.")
|
|
||||||
return None
|
|
||||||
|
|
||||||
referrers, groups = parse_acl(getattr(req, 'acl', None))
|
|
||||||
|
|
||||||
if referrer_allowed(req.referer, referrers):
|
|
||||||
if obj or '.rlistings' in groups:
|
|
||||||
self.logger.debug("Allow authorizing %s via referer ACL."
|
|
||||||
% req.referer)
|
|
||||||
return None
|
|
||||||
|
|
||||||
for user_group in user_groups:
|
|
||||||
if user_group in groups:
|
|
||||||
self.logger.debug("User %s allowed in ACL: %s authorizing."
|
|
||||||
% (account_user, user_group))
|
|
||||||
return None
|
|
||||||
|
|
||||||
return self.denied_response(req)
|
|
||||||
|
|
||||||
def denied_response(self, req):
|
|
||||||
"""
|
|
||||||
Returns a standard WSGI response callable with the status of 403 or 401
|
|
||||||
depending on whether the REMOTE_USER is set or not.
|
|
||||||
"""
|
|
||||||
if req.remote_user:
|
|
||||||
self.logger.increment('forbidden')
|
|
||||||
return HTTPForbidden(request=req)
|
|
||||||
else:
|
|
||||||
if self.auth_method == "active":
|
|
||||||
return HTTPSeeOther(location=self.ext_authentication_url)
|
|
||||||
elif self.auth_method == "passive":
|
|
||||||
self.logger.increment('unauthorized')
|
|
||||||
return HTTPUnauthorized(request=req)
|
|
||||||
|
|
||||||
def handle(self, env, start_response):
|
|
||||||
"""
|
|
||||||
WSGI entry point for auth requests (ones that match the
|
|
||||||
self.auth_prefix).
|
|
||||||
Wraps env in swob.Request object and passes it down.
|
|
||||||
|
|
||||||
:param env: WSGI environment dictionary
|
|
||||||
:param start_response: WSGI callable
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
req = Request(env)
|
|
||||||
if self.auth_prefix:
|
|
||||||
req.path_info_pop()
|
|
||||||
req.bytes_transferred = '-'
|
|
||||||
req.client_disconnect = False
|
|
||||||
if 'x-storage-token' in req.headers and \
|
|
||||||
'x-auth-token' not in req.headers:
|
|
||||||
req.headers['x-auth-token'] = req.headers['x-storage-token']
|
|
||||||
return self.handle_request(req)(env, start_response)
|
|
||||||
except (Exception, Timeout):
|
|
||||||
print "EXCEPTION IN handle: %s: %s" % (format_exc(), env)
|
|
||||||
self.logger.increment('errors')
|
|
||||||
start_response('500 Server Error',
|
|
||||||
[('Content-Type', 'text/plain')])
|
|
||||||
return ['Internal server error.\n']
|
|
||||||
|
|
||||||
def handle_request(self, req):
|
|
||||||
"""
|
|
||||||
Entry point for auth requests (ones that match the self.auth_prefix).
|
|
||||||
Should return a WSGI-style callable (such as webob.Response).
|
|
||||||
|
|
||||||
:param req: swob.Request object
|
|
||||||
"""
|
|
||||||
req.start_time = time()
|
|
||||||
handler = None
|
|
||||||
try:
|
|
||||||
version, account, user, _junk = req.split_path(1, 4, True)
|
|
||||||
except ValueError:
|
|
||||||
self.logger.increment('errors')
|
|
||||||
return HTTPNotFound(request=req)
|
|
||||||
if version in ('v1', 'v1.0', 'auth'):
|
|
||||||
if req.method == 'GET':
|
|
||||||
handler = self.handle_get_token
|
|
||||||
if not handler:
|
|
||||||
self.logger.increment('errors')
|
|
||||||
req.response = HTTPBadRequest(request=req)
|
|
||||||
else:
|
|
||||||
req.response = handler(req)
|
|
||||||
return req.response
|
|
||||||
|
|
||||||
def handle_get_token(self, req):
|
|
||||||
"""
|
|
||||||
Handles the various `request for token and service end point(s)` calls.
|
|
||||||
There are various formats to support the various auth servers in the
|
|
||||||
past.
|
|
||||||
|
|
||||||
"Active Mode" usage:
|
|
||||||
All formats require GSS (Kerberos) authentication.
|
|
||||||
|
|
||||||
GET <auth-prefix>/v1/<act>/auth
|
|
||||||
GET <auth-prefix>/auth
|
|
||||||
GET <auth-prefix>/v1.0
|
|
||||||
|
|
||||||
On successful authentication, the response will have X-Auth-Token
|
|
||||||
and X-Storage-Token set to the token to use with Swift.
|
|
||||||
|
|
||||||
"Passive Mode" usage::
|
|
||||||
|
|
||||||
GET <auth-prefix>/v1/<act>/auth
|
|
||||||
X-Auth-User: <act>:<usr> or X-Storage-User: <usr>
|
|
||||||
X-Auth-Key: <key> or X-Storage-Pass: <key>
|
|
||||||
GET <auth-prefix>/auth
|
|
||||||
X-Auth-User: <act>:<usr> or X-Storage-User: <act>:<usr>
|
|
||||||
X-Auth-Key: <key> or X-Storage-Pass: <key>
|
|
||||||
GET <auth-prefix>/v1.0
|
|
||||||
X-Auth-User: <act>:<usr> or X-Storage-User: <act>:<usr>
|
|
||||||
X-Auth-Key: <key> or X-Storage-Pass: <key>
|
|
||||||
|
|
||||||
Values should be url encoded, "act%3Ausr" instead of "act:usr" for
|
|
||||||
example; however, for backwards compatibility the colon may be
|
|
||||||
included unencoded.
|
|
||||||
|
|
||||||
On successful authentication, the response will have X-Auth-Token
|
|
||||||
and X-Storage-Token set to the token to use with Swift and
|
|
||||||
X-Storage-URL set to the URL to the default Swift cluster to use.
|
|
||||||
|
|
||||||
:param req: The swob.Request to process.
|
|
||||||
:returns: swob.Response, 2xx on success with data set as explained
|
|
||||||
above.
|
|
||||||
"""
|
|
||||||
# Validate the request info
|
|
||||||
try:
|
|
||||||
pathsegs = split_path(req.path_info, 1, 3, True)
|
|
||||||
except ValueError:
|
|
||||||
self.logger.increment('errors')
|
|
||||||
return HTTPNotFound(request=req)
|
|
||||||
if not ((pathsegs[0] == 'v1' and pathsegs[2] == 'auth')
|
|
||||||
or pathsegs[0] in ('auth', 'v1.0')):
|
|
||||||
return HTTPBadRequest(request=req)
|
|
||||||
|
|
||||||
# Client is inside the domain
|
|
||||||
if self.auth_method == "active":
|
|
||||||
return HTTPSeeOther(location=self.ext_authentication_url)
|
|
||||||
|
|
||||||
# Client is outside the domain
|
|
||||||
elif self.auth_method == "passive":
|
|
||||||
account, user, key = None, None, None
|
|
||||||
# Extract user, account and key from request
|
|
||||||
if pathsegs[0] == 'v1' and pathsegs[2] == 'auth':
|
|
||||||
account = pathsegs[1]
|
|
||||||
user = req.headers.get('x-storage-user')
|
|
||||||
if not user:
|
|
||||||
user = unquote(req.headers.get('x-auth-user', ''))
|
|
||||||
if user:
|
|
||||||
if ':' not in user:
|
|
||||||
return HTTPUnauthorized(request=req)
|
|
||||||
else:
|
|
||||||
account2, user = user.split(':', 1)
|
|
||||||
if account != account2:
|
|
||||||
return HTTPUnauthorized(request=req)
|
|
||||||
key = req.headers.get('x-storage-pass')
|
|
||||||
if not key:
|
|
||||||
key = unquote(req.headers.get('x-auth-key', ''))
|
|
||||||
elif pathsegs[0] in ('auth', 'v1.0'):
|
|
||||||
user = unquote(req.headers.get('x-auth-user', ''))
|
|
||||||
if not user:
|
|
||||||
user = req.headers.get('x-storage-user')
|
|
||||||
if user:
|
|
||||||
if ':' not in user:
|
|
||||||
return HTTPUnauthorized(request=req)
|
|
||||||
else:
|
|
||||||
account, user = user.split(':', 1)
|
|
||||||
key = unquote(req.headers.get('x-auth-key', ''))
|
|
||||||
if not key:
|
|
||||||
key = req.headers.get('x-storage-pass')
|
|
||||||
|
|
||||||
if not (account or user or key):
|
|
||||||
# If all are not given, client may be part of the domain
|
|
||||||
return HTTPSeeOther(location=self.ext_authentication_url)
|
|
||||||
elif None in (key, user, account):
|
|
||||||
# If only one or two of them is given, but not all
|
|
||||||
return HTTPUnauthorized(request=req)
|
|
||||||
|
|
||||||
# Run kinit on the user
|
|
||||||
if self.realm_name and "@" not in user:
|
|
||||||
user = user + "@" + self.realm_name
|
|
||||||
try:
|
|
||||||
ret = run_kinit(user, key)
|
|
||||||
except OSError as e:
|
|
||||||
if e.errno == errno.ENOENT:
|
|
||||||
return HTTPServerError("kinit command not found\n")
|
|
||||||
if ret != 0:
|
|
||||||
self.logger.warning("Failed: kinit %s", user)
|
|
||||||
if ret == -1:
|
|
||||||
self.logger.warning("Failed: kinit: Password has probably "
|
|
||||||
"expired.")
|
|
||||||
return HTTPServerError("Kinit is taking too long.\n")
|
|
||||||
return HTTPUnauthorized(request=req)
|
|
||||||
self.logger.debug("kinit succeeded")
|
|
||||||
|
|
||||||
if "@" in user:
|
|
||||||
user = user.split("@")[0]
|
|
||||||
|
|
||||||
# Check if user really belongs to the account
|
|
||||||
groups_list = get_groups_from_username(user).strip().split(",")
|
|
||||||
user_group = ("%s%s" % (self.reseller_prefix, account)).lower()
|
|
||||||
reseller_admin_group = \
|
|
||||||
("%sreseller_admin" % self.reseller_prefix).lower()
|
|
||||||
if user_group not in groups_list:
|
|
||||||
# Check if user is reseller_admin. If not, return Unauthorized.
|
|
||||||
# On AD/IdM server, auth_reseller_admin is a separate group
|
|
||||||
if reseller_admin_group not in groups_list:
|
|
||||||
return HTTPUnauthorized(request=req)
|
|
||||||
|
|
||||||
mc = cache_from_env(req.environ)
|
|
||||||
if not mc:
|
|
||||||
raise Exception('Memcache required')
|
|
||||||
token, expires, groups = get_auth_data(mc, user)
|
|
||||||
if not token:
|
|
||||||
token = generate_token()
|
|
||||||
expires = time() + self.token_life
|
|
||||||
groups = get_groups_from_username(user)
|
|
||||||
set_auth_data(mc, user, token, expires, groups)
|
|
||||||
|
|
||||||
headers = {'X-Auth-Token': token,
|
|
||||||
'X-Storage-Token': token}
|
|
||||||
|
|
||||||
if self.debug_headers:
|
|
||||||
headers.update({'X-Debug-Remote-User': user,
|
|
||||||
'X-Debug-Groups:': groups,
|
|
||||||
'X-Debug-Token-Life': self.token_life,
|
|
||||||
'X-Debug-Token-Expires': ctime(expires)})
|
|
||||||
|
|
||||||
resp = Response(request=req, headers=headers)
|
|
||||||
resp.headers['X-Storage-Url'] = \
|
|
||||||
'%s/v1/%s%s' % (resp.host_url, self.reseller_prefix, account)
|
|
||||||
return resp
|
|
||||||
|
|
||||||
|
|
||||||
def filter_factory(global_conf, **local_conf):
|
|
||||||
"""Returns a WSGI filter app for use with paste.deploy."""
|
|
||||||
conf = global_conf.copy()
|
|
||||||
conf.update(local_conf)
|
|
||||||
|
|
||||||
def auth_filter(app):
|
|
||||||
return KerbAuth(app, conf)
|
|
||||||
return auth_filter
|
|
@ -1,137 +0,0 @@
|
|||||||
# Copyright (c) 2013 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import re
|
|
||||||
import random
|
|
||||||
import grp
|
|
||||||
import signal
|
|
||||||
from subprocess import Popen, PIPE
|
|
||||||
from time import time
|
|
||||||
from gluster.swift.common.middleware.swiftkerbauth \
|
|
||||||
import TOKEN_LIFE, RESELLER_PREFIX
|
|
||||||
|
|
||||||
|
|
||||||
def get_remote_user(env):
|
|
||||||
"""Retrieve REMOTE_USER set by Apache from environment."""
|
|
||||||
remote_user = env.get('REMOTE_USER', "")
|
|
||||||
matches = re.match('([^@]+)@.*', remote_user)
|
|
||||||
if not matches:
|
|
||||||
raise RuntimeError("Malformed REMOTE_USER \"%s\"" % remote_user)
|
|
||||||
return matches.group(1)
|
|
||||||
|
|
||||||
|
|
||||||
def get_auth_data(mc, username):
|
|
||||||
"""
|
|
||||||
Returns the token, expiry time and groups for the user if it already exists
|
|
||||||
on memcache. Returns None otherwise.
|
|
||||||
|
|
||||||
:param mc: MemcacheRing object
|
|
||||||
:param username: swift user
|
|
||||||
"""
|
|
||||||
token, expires, groups = None, None, None
|
|
||||||
memcache_user_key = '%s/user/%s' % (RESELLER_PREFIX, username)
|
|
||||||
candidate_token = mc.get(memcache_user_key)
|
|
||||||
if candidate_token:
|
|
||||||
memcache_token_key = '%s/token/%s' % (RESELLER_PREFIX, candidate_token)
|
|
||||||
cached_auth_data = mc.get(memcache_token_key)
|
|
||||||
if cached_auth_data:
|
|
||||||
expires, groups = cached_auth_data
|
|
||||||
if expires > time():
|
|
||||||
token = candidate_token
|
|
||||||
else:
|
|
||||||
expires, groups = None, None
|
|
||||||
return (token, expires, groups)
|
|
||||||
|
|
||||||
|
|
||||||
def set_auth_data(mc, username, token, expires, groups):
|
|
||||||
"""
|
|
||||||
Stores the following key value pairs on Memcache:
|
|
||||||
(token, expires+groups)
|
|
||||||
(user, token)
|
|
||||||
"""
|
|
||||||
auth_data = (expires, groups)
|
|
||||||
memcache_token_key = "%s/token/%s" % (RESELLER_PREFIX, token)
|
|
||||||
mc.set(memcache_token_key, auth_data, time=TOKEN_LIFE)
|
|
||||||
|
|
||||||
# Record the token with the user info for future use.
|
|
||||||
memcache_user_key = '%s/user/%s' % (RESELLER_PREFIX, username)
|
|
||||||
mc.set(memcache_user_key, token, time=TOKEN_LIFE)
|
|
||||||
|
|
||||||
|
|
||||||
def generate_token():
|
|
||||||
"""Generates a random token."""
|
|
||||||
# We don't use uuid.uuid4() here because importing the uuid module
|
|
||||||
# causes (harmless) SELinux denials in the audit log on RHEL 6. If this
|
|
||||||
# is a security concern, a custom SELinux policy module could be
|
|
||||||
# written to not log those denials.
|
|
||||||
r = random.SystemRandom()
|
|
||||||
token = '%stk%s' % \
|
|
||||||
(RESELLER_PREFIX,
|
|
||||||
''.join(r.choice('abcdef0123456789') for x in range(32)))
|
|
||||||
return token
|
|
||||||
|
|
||||||
|
|
||||||
def get_groups_from_username(username):
|
|
||||||
"""Return a set of groups to which the user belongs to."""
|
|
||||||
# Retrieve the numerical group IDs. We cannot list the group names
|
|
||||||
# because group names from Active Directory may contain spaces, and
|
|
||||||
# we wouldn't be able to split the list of group names into its
|
|
||||||
# elements.
|
|
||||||
p = Popen(['id', '-G', username], stdout=PIPE)
|
|
||||||
if p.wait() != 0:
|
|
||||||
raise RuntimeError("Failure running id -G for %s" % username)
|
|
||||||
(p_stdout, p_stderr) = p.communicate()
|
|
||||||
|
|
||||||
# Convert the group numbers into group names.
|
|
||||||
groups = []
|
|
||||||
for gid in p_stdout.strip().split(" "):
|
|
||||||
groups.append(grp.getgrgid(int(gid))[0])
|
|
||||||
|
|
||||||
# The first element of the list is considered a unique identifier
|
|
||||||
# for the user. We add the username to accomplish this.
|
|
||||||
if username in groups:
|
|
||||||
groups.remove(username)
|
|
||||||
groups = [username] + groups
|
|
||||||
groups = ','.join(groups)
|
|
||||||
return groups
|
|
||||||
|
|
||||||
|
|
||||||
def run_kinit(username, password):
|
|
||||||
"""Runs kinit command as a child process and returns the status code."""
|
|
||||||
kinit = Popen(['kinit', username],
|
|
||||||
stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
|
||||||
kinit.stdin.write('%s\n' % password)
|
|
||||||
|
|
||||||
# The following code handles a corner case where the Kerberos password
|
|
||||||
# has expired and a prompt is displayed to enter new password. Ideally,
|
|
||||||
# we would want to read from stdout but these are blocked reads. This is
|
|
||||||
# a hack to kill the process if it's taking too long!
|
|
||||||
|
|
||||||
class Alarm(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def signal_handler(signum, frame):
|
|
||||||
raise Alarm
|
|
||||||
# Set the signal handler and a 1-second alarm
|
|
||||||
signal.signal(signal.SIGALRM, signal_handler)
|
|
||||||
signal.alarm(1)
|
|
||||||
try:
|
|
||||||
kinit.wait() # Wait for the child to exit
|
|
||||||
signal.alarm(0) # Reset the alarm
|
|
||||||
return kinit.returncode # Exit status of child on graceful exit
|
|
||||||
except Alarm:
|
|
||||||
# Taking too long, kill and return error
|
|
||||||
kinit.kill()
|
|
||||||
return -1
|
|
@ -1,156 +0,0 @@
|
|||||||
# Copyright (c) 2013 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import errno
|
|
||||||
from ConfigParser import ConfigParser
|
|
||||||
from swift.common.ring import ring
|
|
||||||
from swift.common.utils import search_tree
|
|
||||||
from gluster.swift.common.Glusterfs import SWIFT_DIR
|
|
||||||
|
|
||||||
reseller_prefix = "AUTH_"
|
|
||||||
conf_files = search_tree(SWIFT_DIR, "proxy-server*", 'conf')
|
|
||||||
if conf_files:
|
|
||||||
conf_file = conf_files[0]
|
|
||||||
|
|
||||||
_conf = ConfigParser()
|
|
||||||
if conf_files and _conf.read(conf_file):
|
|
||||||
if _conf.defaults().get("reseller_prefix", None):
|
|
||||||
reseller_prefix = _conf.defaults().get("reseller_prefix")
|
|
||||||
else:
|
|
||||||
for key, value in _conf._sections.items():
|
|
||||||
if value.get("reseller_prefix", None):
|
|
||||||
reseller_prefix = value["reseller_prefix"]
|
|
||||||
break
|
|
||||||
|
|
||||||
if not reseller_prefix.endswith('_'):
|
|
||||||
reseller_prefix = reseller_prefix + '_'
|
|
||||||
|
|
||||||
|
|
||||||
class Ring(ring.Ring):
|
|
||||||
|
|
||||||
def __init__(self, serialized_path, reload_time=15, ring_name=None):
|
|
||||||
self.false_node = {'zone': 1, 'weight': 100.0, 'ip': '127.0.0.1',
|
|
||||||
'id': 0, 'meta': '', 'device': 'volume_not_in_ring',
|
|
||||||
'port': 6012}
|
|
||||||
self.account_list = []
|
|
||||||
|
|
||||||
if ring_name:
|
|
||||||
_serialized_path = os.path.join(serialized_path,
|
|
||||||
ring_name + '.ring.gz')
|
|
||||||
else:
|
|
||||||
_serialized_path = os.path.join(serialized_path)
|
|
||||||
|
|
||||||
if not os.path.exists(_serialized_path):
|
|
||||||
raise OSError(errno.ENOENT, 'No such file or directory',
|
|
||||||
'%s ring file does not exists, aborting '
|
|
||||||
'proxy-server start.' % _serialized_path)
|
|
||||||
|
|
||||||
ring.Ring.__init__(self, serialized_path, reload_time, ring_name)
|
|
||||||
|
|
||||||
def _get_part_nodes(self, part):
|
|
||||||
seen_ids = set()
|
|
||||||
|
|
||||||
try:
|
|
||||||
account = self.account_list[part]
|
|
||||||
except IndexError:
|
|
||||||
return [self.false_node]
|
|
||||||
else:
|
|
||||||
nodes = []
|
|
||||||
for dev in self._devs:
|
|
||||||
if dev['device'] == account:
|
|
||||||
if dev['id'] not in seen_ids:
|
|
||||||
seen_ids.add(dev['id'])
|
|
||||||
nodes.append(dev)
|
|
||||||
if not nodes:
|
|
||||||
nodes = [self.false_node]
|
|
||||||
return nodes
|
|
||||||
|
|
||||||
def get_part_nodes(self, part):
|
|
||||||
"""
|
|
||||||
Get the nodes that are responsible for the partition. If one
|
|
||||||
node is responsible for more than one replica of the same
|
|
||||||
partition, it will only appear in the output once.
|
|
||||||
|
|
||||||
:param part: partition to get nodes for
|
|
||||||
:returns: list of node dicts
|
|
||||||
|
|
||||||
See :func:`get_nodes` for a description of the node dicts.
|
|
||||||
"""
|
|
||||||
return self._get_part_nodes(part)
|
|
||||||
|
|
||||||
def get_part(self, account, container=None, obj=None):
|
|
||||||
"""
|
|
||||||
Get the partition for an account/container/object.
|
|
||||||
|
|
||||||
:param account: account name
|
|
||||||
:param container: container name
|
|
||||||
:param obj: object name
|
|
||||||
:returns: the partition number
|
|
||||||
"""
|
|
||||||
if account.startswith(reseller_prefix):
|
|
||||||
account = account.replace(reseller_prefix, '', 1)
|
|
||||||
|
|
||||||
# Save the account name in the table
|
|
||||||
# This makes part be the index of the location of the account
|
|
||||||
# in the list
|
|
||||||
try:
|
|
||||||
part = self.account_list.index(account)
|
|
||||||
except ValueError:
|
|
||||||
self.account_list.append(account)
|
|
||||||
part = self.account_list.index(account)
|
|
||||||
|
|
||||||
return part
|
|
||||||
|
|
||||||
def get_nodes(self, account, container=None, obj=None):
|
|
||||||
"""
|
|
||||||
Get the partition and nodes for an account/container/object.
|
|
||||||
If a node is responsible for more than one replica, it will
|
|
||||||
only appear in the output once.
|
|
||||||
:param account: account name
|
|
||||||
:param container: container name
|
|
||||||
:param obj: object name
|
|
||||||
:returns: a tuple of (partition, list of node dicts)
|
|
||||||
|
|
||||||
Each node dict will have at least the following keys:
|
|
||||||
====== ===============================================================
|
|
||||||
id unique integer identifier amongst devices
|
|
||||||
weight a float of the relative weight of this device as compared to
|
|
||||||
others; this indicates how many partitions the builder will try
|
|
||||||
to assign to this device
|
|
||||||
zone integer indicating which zone the device is in; a given
|
|
||||||
partition will not be assigned to multiple devices within the
|
|
||||||
same zone
|
|
||||||
ip the ip address of the device
|
|
||||||
port the tcp port of the device
|
|
||||||
device the device's name on disk (sdb1, for example)
|
|
||||||
meta general use 'extra' field; for example: the online date, the
|
|
||||||
hardware description
|
|
||||||
====== ===============================================================
|
|
||||||
"""
|
|
||||||
part = self.get_part(account, container, obj)
|
|
||||||
return part, self._get_part_nodes(part)
|
|
||||||
|
|
||||||
def get_more_nodes(self, part):
|
|
||||||
"""
|
|
||||||
Generator to get extra nodes for a partition for hinted handoff.
|
|
||||||
|
|
||||||
:param part: partition to get handoff nodes for
|
|
||||||
:returns: generator of node dicts
|
|
||||||
|
|
||||||
See :func:`get_nodes` for a description of the node dicts.
|
|
||||||
Should never be called in the swift UFO environment, so yield nothing
|
|
||||||
"""
|
|
||||||
return []
|
|
@ -16,37 +16,29 @@
|
|||||||
import os
|
import os
|
||||||
import stat
|
import stat
|
||||||
import errno
|
import errno
|
||||||
|
import random
|
||||||
import logging
|
import logging
|
||||||
from hashlib import md5
|
from hashlib import md5
|
||||||
from eventlet import sleep
|
from eventlet import sleep
|
||||||
import cPickle as pickle
|
import cPickle as pickle
|
||||||
from gluster.swift.common.exceptions import GlusterFileSystemIOError
|
from gluster.swift.common.exceptions import GlusterFileSystemIOError
|
||||||
from swift.common.exceptions import DiskFileNoSpace
|
from swift.common.exceptions import DiskFileNoSpace
|
||||||
from gluster.swift.common.fs_utils import do_getctime, do_getmtime, do_stat, \
|
from gluster.swift.common.fs_utils import do_stat, \
|
||||||
do_listdir, do_walk, do_rmdir, do_log_rl, get_filename_from_fd, do_open, \
|
do_walk, do_rmdir, do_log_rl, get_filename_from_fd, do_open, \
|
||||||
do_isdir, do_getsize, do_getxattr, do_setxattr, do_removexattr, do_read, \
|
do_getxattr, do_setxattr, do_removexattr, do_read, \
|
||||||
do_close, do_dup, do_lseek, do_fstat
|
do_close, do_dup, do_lseek, do_fstat, do_fsync, do_rename
|
||||||
from gluster.swift.common import Glusterfs
|
|
||||||
|
|
||||||
X_CONTENT_TYPE = 'Content-Type'
|
X_CONTENT_TYPE = 'Content-Type'
|
||||||
X_CONTENT_LENGTH = 'Content-Length'
|
X_CONTENT_LENGTH = 'Content-Length'
|
||||||
X_TIMESTAMP = 'X-Timestamp'
|
X_TIMESTAMP = 'X-Timestamp'
|
||||||
X_PUT_TIMESTAMP = 'X-PUT-Timestamp'
|
|
||||||
X_TYPE = 'X-Type'
|
X_TYPE = 'X-Type'
|
||||||
X_ETAG = 'ETag'
|
X_ETAG = 'ETag'
|
||||||
X_OBJECTS_COUNT = 'X-Object-Count'
|
|
||||||
X_BYTES_USED = 'X-Bytes-Used'
|
|
||||||
X_CONTAINER_COUNT = 'X-Container-Count'
|
|
||||||
X_OBJECT_TYPE = 'X-Object-Type'
|
X_OBJECT_TYPE = 'X-Object-Type'
|
||||||
DIR_TYPE = 'application/directory'
|
DIR_TYPE = 'application/directory'
|
||||||
ACCOUNT = 'Account'
|
|
||||||
METADATA_KEY = 'user.swift.metadata'
|
METADATA_KEY = 'user.swift.metadata'
|
||||||
MAX_XATTR_SIZE = 65536
|
MAX_XATTR_SIZE = 65536
|
||||||
CONTAINER = 'container'
|
|
||||||
DIR_NON_OBJECT = 'dir'
|
DIR_NON_OBJECT = 'dir'
|
||||||
DIR_OBJECT = 'marker_dir'
|
DIR_OBJECT = 'marker_dir'
|
||||||
TEMP_DIR = 'tmp'
|
|
||||||
ASYNCDIR = 'async_pending' # Keep in sync with swift.obj.server.ASYNCDIR
|
|
||||||
FILE = 'file'
|
FILE = 'file'
|
||||||
FILE_TYPE = 'application/octet-stream'
|
FILE_TYPE = 'application/octet-stream'
|
||||||
OBJECT = 'Object'
|
OBJECT = 'Object'
|
||||||
@ -169,49 +161,6 @@ def clean_metadata(path_or_fd):
|
|||||||
key += 1
|
key += 1
|
||||||
|
|
||||||
|
|
||||||
def validate_container(metadata):
|
|
||||||
if not metadata:
|
|
||||||
logging.warn('validate_container: No metadata')
|
|
||||||
return False
|
|
||||||
|
|
||||||
if X_TYPE not in metadata.keys() or \
|
|
||||||
X_TIMESTAMP not in metadata.keys() or \
|
|
||||||
X_PUT_TIMESTAMP not in metadata.keys() or \
|
|
||||||
X_OBJECTS_COUNT not in metadata.keys() or \
|
|
||||||
X_BYTES_USED not in metadata.keys():
|
|
||||||
return False
|
|
||||||
|
|
||||||
(value, timestamp) = metadata[X_TYPE]
|
|
||||||
if value == CONTAINER:
|
|
||||||
return True
|
|
||||||
|
|
||||||
logging.warn('validate_container: metadata type is not CONTAINER (%r)',
|
|
||||||
value)
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def validate_account(metadata):
|
|
||||||
if not metadata:
|
|
||||||
logging.warn('validate_account: No metadata')
|
|
||||||
return False
|
|
||||||
|
|
||||||
if X_TYPE not in metadata.keys() or \
|
|
||||||
X_TIMESTAMP not in metadata.keys() or \
|
|
||||||
X_PUT_TIMESTAMP not in metadata.keys() or \
|
|
||||||
X_OBJECTS_COUNT not in metadata.keys() or \
|
|
||||||
X_BYTES_USED not in metadata.keys() or \
|
|
||||||
X_CONTAINER_COUNT not in metadata.keys():
|
|
||||||
return False
|
|
||||||
|
|
||||||
(value, timestamp) = metadata[X_TYPE]
|
|
||||||
if value == ACCOUNT:
|
|
||||||
return True
|
|
||||||
|
|
||||||
logging.warn('validate_account: metadata type is not ACCOUNT (%r)',
|
|
||||||
value)
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def validate_object(metadata):
|
def validate_object(metadata):
|
||||||
if not metadata:
|
if not metadata:
|
||||||
return False
|
return False
|
||||||
@ -232,86 +181,6 @@ def validate_object(metadata):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def _update_list(path, cont_path, src_list, reg_file=True, object_count=0,
|
|
||||||
bytes_used=0, obj_list=[]):
|
|
||||||
# strip the prefix off, also stripping the leading and trailing slashes
|
|
||||||
obj_path = path.replace(cont_path, '').strip(os.path.sep)
|
|
||||||
|
|
||||||
for obj_name in src_list:
|
|
||||||
# If it is not a reg_file then it is a directory.
|
|
||||||
if not reg_file and not Glusterfs._implicit_dir_objects:
|
|
||||||
# Now check if this is a dir object or a gratuiously crated
|
|
||||||
# directory
|
|
||||||
metadata = \
|
|
||||||
read_metadata(os.path.join(cont_path, obj_path, obj_name))
|
|
||||||
if not dir_is_object(metadata):
|
|
||||||
continue
|
|
||||||
|
|
||||||
if obj_path:
|
|
||||||
obj_list.append(os.path.join(obj_path, obj_name))
|
|
||||||
else:
|
|
||||||
obj_list.append(obj_name)
|
|
||||||
|
|
||||||
object_count += 1
|
|
||||||
|
|
||||||
if reg_file and Glusterfs._do_getsize:
|
|
||||||
bytes_used += do_getsize(os.path.join(path, obj_name))
|
|
||||||
sleep()
|
|
||||||
|
|
||||||
return object_count, bytes_used
|
|
||||||
|
|
||||||
|
|
||||||
def update_list(path, cont_path, dirs=[], files=[], object_count=0,
|
|
||||||
bytes_used=0, obj_list=[]):
|
|
||||||
if files:
|
|
||||||
object_count, bytes_used = _update_list(path, cont_path, files, True,
|
|
||||||
object_count, bytes_used,
|
|
||||||
obj_list)
|
|
||||||
if dirs:
|
|
||||||
object_count, bytes_used = _update_list(path, cont_path, dirs, False,
|
|
||||||
object_count, bytes_used,
|
|
||||||
obj_list)
|
|
||||||
return object_count, bytes_used
|
|
||||||
|
|
||||||
|
|
||||||
def get_container_details(cont_path):
|
|
||||||
"""
|
|
||||||
get container details by traversing the filesystem
|
|
||||||
"""
|
|
||||||
bytes_used = 0
|
|
||||||
object_count = 0
|
|
||||||
obj_list = []
|
|
||||||
|
|
||||||
if do_isdir(cont_path):
|
|
||||||
for (path, dirs, files) in do_walk(cont_path):
|
|
||||||
object_count, bytes_used = update_list(path, cont_path, dirs,
|
|
||||||
files, object_count,
|
|
||||||
bytes_used, obj_list)
|
|
||||||
|
|
||||||
sleep()
|
|
||||||
|
|
||||||
return obj_list, object_count, bytes_used
|
|
||||||
|
|
||||||
|
|
||||||
def get_account_details(acc_path):
|
|
||||||
"""
|
|
||||||
Return container_list and container_count.
|
|
||||||
"""
|
|
||||||
container_list = []
|
|
||||||
container_count = 0
|
|
||||||
|
|
||||||
if do_isdir(acc_path):
|
|
||||||
for name in do_listdir(acc_path):
|
|
||||||
if name.lower() == TEMP_DIR \
|
|
||||||
or name.lower() == ASYNCDIR \
|
|
||||||
or not do_isdir(os.path.join(acc_path, name)):
|
|
||||||
continue
|
|
||||||
container_count += 1
|
|
||||||
container_list.append(name)
|
|
||||||
|
|
||||||
return container_list, container_count
|
|
||||||
|
|
||||||
|
|
||||||
def _read_for_etag(fp):
|
def _read_for_etag(fp):
|
||||||
etag = md5()
|
etag = md5()
|
||||||
while True:
|
while True:
|
||||||
@ -381,49 +250,6 @@ def get_object_metadata(obj_path_or_fd):
|
|||||||
return metadata
|
return metadata
|
||||||
|
|
||||||
|
|
||||||
def _add_timestamp(metadata_i):
|
|
||||||
# At this point we have a simple key/value dictionary, turn it into
|
|
||||||
# key/(value,timestamp) pairs.
|
|
||||||
timestamp = 0
|
|
||||||
metadata = {}
|
|
||||||
for key, value_i in metadata_i.iteritems():
|
|
||||||
if not isinstance(value_i, tuple):
|
|
||||||
metadata[key] = (value_i, timestamp)
|
|
||||||
else:
|
|
||||||
metadata[key] = value_i
|
|
||||||
return metadata
|
|
||||||
|
|
||||||
|
|
||||||
def get_container_metadata(cont_path):
|
|
||||||
objects = []
|
|
||||||
object_count = 0
|
|
||||||
bytes_used = 0
|
|
||||||
objects, object_count, bytes_used = get_container_details(cont_path)
|
|
||||||
metadata = {X_TYPE: CONTAINER,
|
|
||||||
X_TIMESTAMP: normalize_timestamp(
|
|
||||||
do_getctime(cont_path)),
|
|
||||||
X_PUT_TIMESTAMP: normalize_timestamp(
|
|
||||||
do_getmtime(cont_path)),
|
|
||||||
X_OBJECTS_COUNT: object_count,
|
|
||||||
X_BYTES_USED: bytes_used}
|
|
||||||
return _add_timestamp(metadata)
|
|
||||||
|
|
||||||
|
|
||||||
def get_account_metadata(acc_path):
|
|
||||||
containers = []
|
|
||||||
container_count = 0
|
|
||||||
containers, container_count = get_account_details(acc_path)
|
|
||||||
metadata = {X_TYPE: ACCOUNT,
|
|
||||||
X_TIMESTAMP: normalize_timestamp(
|
|
||||||
do_getctime(acc_path)),
|
|
||||||
X_PUT_TIMESTAMP: normalize_timestamp(
|
|
||||||
do_getmtime(acc_path)),
|
|
||||||
X_OBJECTS_COUNT: 0,
|
|
||||||
X_BYTES_USED: 0,
|
|
||||||
X_CONTAINER_COUNT: container_count}
|
|
||||||
return _add_timestamp(metadata)
|
|
||||||
|
|
||||||
|
|
||||||
def restore_metadata(path, metadata):
|
def restore_metadata(path, metadata):
|
||||||
meta_orig = read_metadata(path)
|
meta_orig = read_metadata(path)
|
||||||
if meta_orig:
|
if meta_orig:
|
||||||
@ -444,18 +270,6 @@ def create_object_metadata(obj_path_or_fd):
|
|||||||
return restore_metadata(obj_path_or_fd, metadata)
|
return restore_metadata(obj_path_or_fd, metadata)
|
||||||
|
|
||||||
|
|
||||||
def create_container_metadata(cont_path):
|
|
||||||
metadata = get_container_metadata(cont_path)
|
|
||||||
rmd = restore_metadata(cont_path, metadata)
|
|
||||||
return rmd
|
|
||||||
|
|
||||||
|
|
||||||
def create_account_metadata(acc_path):
|
|
||||||
metadata = get_account_metadata(acc_path)
|
|
||||||
rmd = restore_metadata(acc_path, metadata)
|
|
||||||
return rmd
|
|
||||||
|
|
||||||
|
|
||||||
# The following dir_xxx calls should definitely be replaced
|
# The following dir_xxx calls should definitely be replaced
|
||||||
# with a Metadata class to encapsulate their implementation.
|
# with a Metadata class to encapsulate their implementation.
|
||||||
# :FIXME: For now we have them as functions, but we should
|
# :FIXME: For now we have them as functions, but we should
|
||||||
@ -530,3 +344,41 @@ def rmobjdir(dir_path):
|
|||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def write_pickle(obj, dest, tmp=None, pickle_protocol=0):
|
||||||
|
"""
|
||||||
|
Ensure that a pickle file gets written to disk. The file is first written
|
||||||
|
to a tmp file location in the destination directory path, ensured it is
|
||||||
|
synced to disk, then moved to its final destination name.
|
||||||
|
|
||||||
|
This version takes advantage of Gluster's dot-prefix-dot-suffix naming
|
||||||
|
where the a file named ".thefile.name.9a7aasv" is hashed to the same
|
||||||
|
Gluster node as "thefile.name". This ensures the renaming of a temp file
|
||||||
|
once written does not move it to another Gluster node.
|
||||||
|
|
||||||
|
:param obj: python object to be pickled
|
||||||
|
:param dest: path of final destination file
|
||||||
|
:param tmp: path to tmp to use, defaults to None (ignored)
|
||||||
|
:param pickle_protocol: protocol to pickle the obj with, defaults to 0
|
||||||
|
"""
|
||||||
|
dirname = os.path.dirname(dest)
|
||||||
|
# Create destination directory
|
||||||
|
try:
|
||||||
|
os.makedirs(dirname)
|
||||||
|
except OSError as err:
|
||||||
|
if err.errno != errno.EEXIST:
|
||||||
|
raise
|
||||||
|
basename = os.path.basename(dest)
|
||||||
|
tmpname = '.' + basename + '.' + \
|
||||||
|
md5(basename + str(random.random())).hexdigest()
|
||||||
|
tmppath = os.path.join(dirname, tmpname)
|
||||||
|
with open(tmppath, 'wb') as fo:
|
||||||
|
pickle.dump(obj, fo, pickle_protocol)
|
||||||
|
# TODO: This flush() method call turns into a flush() system call
|
||||||
|
# We'll need to wrap this as well, but we would do this by writing
|
||||||
|
# a context manager for our own open() method which returns an object
|
||||||
|
# in fo which makes the gluster API call.
|
||||||
|
fo.flush()
|
||||||
|
do_fsync(fo)
|
||||||
|
do_rename(tmppath, dest)
|
||||||
|
@ -1,85 +0,0 @@
|
|||||||
# Copyright (c) 2012-2013 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
""" Container Server for Gluster Swift UFO """
|
|
||||||
|
|
||||||
# Simply importing this monkey patches the constraint handling to fit our
|
|
||||||
# needs
|
|
||||||
import gluster.swift.common.constraints # noqa
|
|
||||||
|
|
||||||
from swift.container import server
|
|
||||||
from gluster.swift.common.DiskDir import DiskDir
|
|
||||||
from swift.common.utils import public, timing_stats
|
|
||||||
from swift.common.exceptions import DiskFileNoSpace
|
|
||||||
from swift.common.swob import HTTPInsufficientStorage
|
|
||||||
|
|
||||||
|
|
||||||
class ContainerController(server.ContainerController):
|
|
||||||
"""
|
|
||||||
Subclass of the container server's ContainerController which replaces the
|
|
||||||
_get_container_broker() method so that we can use Gluster's DiskDir
|
|
||||||
duck-type of the container DatabaseBroker object, and make the
|
|
||||||
account_update() method a no-op (information is simply stored on disk and
|
|
||||||
already updated by virtue of performaing the file system operations
|
|
||||||
directly).
|
|
||||||
"""
|
|
||||||
|
|
||||||
def _get_container_broker(self, drive, part, account, container, **kwargs):
|
|
||||||
"""
|
|
||||||
Overriden to provide the GlusterFS specific broker that talks to
|
|
||||||
Gluster for the information related to servicing a given request
|
|
||||||
instead of talking to a database.
|
|
||||||
|
|
||||||
:param drive: drive that holds the container
|
|
||||||
:param part: partition the container is in
|
|
||||||
:param account: account name
|
|
||||||
:param container: container name
|
|
||||||
:returns: DiskDir object, a duck-type of DatabaseBroker
|
|
||||||
"""
|
|
||||||
return DiskDir(self.root, drive, account, container, self.logger,
|
|
||||||
**kwargs)
|
|
||||||
|
|
||||||
def account_update(self, req, account, container, broker):
|
|
||||||
"""
|
|
||||||
Update the account server(s) with latest container info.
|
|
||||||
|
|
||||||
For Gluster, this is just a no-op, since an account is just the
|
|
||||||
directory holding all the container directories.
|
|
||||||
|
|
||||||
:param req: swob.Request object
|
|
||||||
:param account: account name
|
|
||||||
:param container: container name
|
|
||||||
:param broker: container DB broker object
|
|
||||||
:returns: None.
|
|
||||||
"""
|
|
||||||
return None
|
|
||||||
|
|
||||||
@public
|
|
||||||
@timing_stats()
|
|
||||||
def PUT(self, req):
|
|
||||||
try:
|
|
||||||
return server.ContainerController.PUT(self, req)
|
|
||||||
except DiskFileNoSpace:
|
|
||||||
# As container=directory in gluster-swift, we might run out of
|
|
||||||
# space or exceed quota when creating containers.
|
|
||||||
drive = req.split_path(1, 1, True)
|
|
||||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
|
||||||
|
|
||||||
|
|
||||||
def app_factory(global_conf, **local_conf):
|
|
||||||
"""paste.deploy app factory for creating WSGI container server apps."""
|
|
||||||
conf = global_conf.copy()
|
|
||||||
conf.update(local_conf)
|
|
||||||
return ContainerController(conf)
|
|
@ -23,7 +23,6 @@ except ImportError:
|
|||||||
import random
|
import random
|
||||||
import logging
|
import logging
|
||||||
import time
|
import time
|
||||||
from collections import defaultdict
|
|
||||||
from socket import gethostname
|
from socket import gethostname
|
||||||
from hashlib import md5
|
from hashlib import md5
|
||||||
from eventlet import sleep
|
from eventlet import sleep
|
||||||
@ -31,36 +30,32 @@ from greenlet import getcurrent
|
|||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from gluster.swift.common.exceptions import AlreadyExistsAsFile, \
|
from gluster.swift.common.exceptions import AlreadyExistsAsFile, \
|
||||||
AlreadyExistsAsDir
|
AlreadyExistsAsDir
|
||||||
from swift.common.utils import TRUE_VALUES, ThreadPool, config_true_value
|
from swift.common.utils import TRUE_VALUES, ThreadPool, hash_path, \
|
||||||
|
normalize_timestamp
|
||||||
from swift.common.exceptions import DiskFileNotExist, DiskFileError, \
|
from swift.common.exceptions import DiskFileNotExist, DiskFileError, \
|
||||||
DiskFileNoSpace, DiskFileDeviceUnavailable, DiskFileNotOpen, \
|
DiskFileNoSpace, DiskFileDeviceUnavailable, DiskFileNotOpen, \
|
||||||
DiskFileExpired
|
DiskFileExpired
|
||||||
from swift.common.swob import multi_range_iterator
|
from swift.common.swob import multi_range_iterator
|
||||||
|
|
||||||
from gluster.swift.common.exceptions import GlusterFileSystemOSError
|
from gluster.swift.common.exceptions import GlusterFileSystemOSError
|
||||||
from gluster.swift.common.Glusterfs import mount
|
|
||||||
from gluster.swift.common.fs_utils import do_fstat, do_open, do_close, \
|
from gluster.swift.common.fs_utils import do_fstat, do_open, do_close, \
|
||||||
do_unlink, do_chown, do_fsync, do_fchown, do_stat, do_write, do_read, \
|
do_unlink, do_chown, do_fsync, do_fchown, do_stat, do_write, do_read, \
|
||||||
do_fadvise64, do_rename, do_fdatasync, do_lseek, do_mkdir
|
do_fadvise64, do_rename, do_fdatasync, do_lseek, do_mkdir
|
||||||
from gluster.swift.common.utils import read_metadata, write_metadata, \
|
from gluster.swift.common.utils import read_metadata, write_metadata, \
|
||||||
validate_object, create_object_metadata, rmobjdir, dir_is_object, \
|
validate_object, create_object_metadata, rmobjdir, dir_is_object, \
|
||||||
get_object_metadata
|
get_object_metadata, write_pickle
|
||||||
from gluster.swift.common.utils import X_CONTENT_TYPE, \
|
from gluster.swift.common.utils import X_CONTENT_TYPE, \
|
||||||
X_TIMESTAMP, X_TYPE, X_OBJECT_TYPE, FILE, OBJECT, DIR_TYPE, \
|
X_TIMESTAMP, X_TYPE, X_OBJECT_TYPE, FILE, OBJECT, DIR_TYPE, \
|
||||||
FILE_TYPE, DEFAULT_UID, DEFAULT_GID, DIR_NON_OBJECT, DIR_OBJECT, \
|
FILE_TYPE, DEFAULT_UID, DEFAULT_GID, DIR_NON_OBJECT, DIR_OBJECT, \
|
||||||
X_ETAG, X_CONTENT_LENGTH
|
X_ETAG, X_CONTENT_LENGTH
|
||||||
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
|
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
|
||||||
|
from swift.obj.diskfile import DiskFileManager as SwiftDiskFileManager
|
||||||
|
from swift.obj.diskfile import get_async_dir
|
||||||
|
|
||||||
# FIXME: Hopefully we'll be able to move to Python 2.7+ where O_CLOEXEC will
|
# FIXME: Hopefully we'll be able to move to Python 2.7+ where O_CLOEXEC will
|
||||||
# be back ported. See http://www.python.org/dev/peps/pep-0433/
|
# be back ported. See http://www.python.org/dev/peps/pep-0433/
|
||||||
O_CLOEXEC = 02000000
|
O_CLOEXEC = 02000000
|
||||||
|
|
||||||
DEFAULT_DISK_CHUNK_SIZE = 65536
|
|
||||||
DEFAULT_KEEP_CACHE_SIZE = (5 * 1024 * 1024)
|
|
||||||
DEFAULT_MB_PER_SYNC = 512
|
|
||||||
# keep these lower-case
|
|
||||||
DISALLOWED_HEADERS = set('content-length content-type deleted etag'.split())
|
|
||||||
|
|
||||||
MAX_RENAME_ATTEMPTS = 10
|
MAX_RENAME_ATTEMPTS = 10
|
||||||
MAX_OPEN_ATTEMPTS = 10
|
MAX_OPEN_ATTEMPTS = 10
|
||||||
|
|
||||||
@ -183,13 +178,6 @@ def make_directory(full_path, uid, gid, metadata=None):
|
|||||||
|
|
||||||
_fs_conf = ConfigParser()
|
_fs_conf = ConfigParser()
|
||||||
if _fs_conf.read(os.path.join('/etc/swift', 'fs.conf')):
|
if _fs_conf.read(os.path.join('/etc/swift', 'fs.conf')):
|
||||||
try:
|
|
||||||
_mkdir_locking = _fs_conf.get('DEFAULT', 'mkdir_locking', "no") \
|
|
||||||
in TRUE_VALUES
|
|
||||||
logging.warn("The option mkdir_locking has been deprecated and is"
|
|
||||||
" no longer supported")
|
|
||||||
except (NoSectionError, NoOptionError):
|
|
||||||
pass
|
|
||||||
try:
|
try:
|
||||||
_use_put_mount = _fs_conf.get('DEFAULT', 'use_put_mount', "no") \
|
_use_put_mount = _fs_conf.get('DEFAULT', 'use_put_mount', "no") \
|
||||||
in TRUE_VALUES
|
in TRUE_VALUES
|
||||||
@ -223,7 +211,7 @@ def _adjust_metadata(metadata):
|
|||||||
return metadata
|
return metadata
|
||||||
|
|
||||||
|
|
||||||
class OnDiskManager(object):
|
class DiskFileManager(SwiftDiskFileManager):
|
||||||
"""
|
"""
|
||||||
Management class for devices, providing common place for shared parameters
|
Management class for devices, providing common place for shared parameters
|
||||||
and methods not provided by the DiskFile class (which primarily services
|
and methods not provided by the DiskFile class (which primarily services
|
||||||
@ -241,41 +229,33 @@ class OnDiskManager(object):
|
|||||||
:param logger: caller provided logger
|
:param logger: caller provided logger
|
||||||
"""
|
"""
|
||||||
def __init__(self, conf, logger):
|
def __init__(self, conf, logger):
|
||||||
self.logger = logger
|
super(DiskFileManager, self).__init__(conf, logger)
|
||||||
self.disk_chunk_size = int(conf.get('disk_chunk_size',
|
self.reseller_prefix = \
|
||||||
DEFAULT_DISK_CHUNK_SIZE))
|
conf.get('reseller_prefix', 'AUTH_').strip() # Not used, currently
|
||||||
self.keep_cache_size = int(conf.get('keep_cache_size',
|
|
||||||
DEFAULT_KEEP_CACHE_SIZE))
|
|
||||||
self.bytes_per_sync = int(conf.get('mb_per_sync',
|
|
||||||
DEFAULT_MB_PER_SYNC)) * 1024 * 1024
|
|
||||||
self.devices = conf.get('devices', '/srv/node/')
|
|
||||||
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
|
||||||
threads_per_disk = int(conf.get('threads_per_disk', '0'))
|
|
||||||
self.threadpools = defaultdict(
|
|
||||||
lambda: ThreadPool(nthreads=threads_per_disk))
|
|
||||||
|
|
||||||
def _get_dev_path(self, device):
|
def get_diskfile(self, device, partition, account, container, obj,
|
||||||
"""
|
policy_idx=0, **kwargs):
|
||||||
Return the path to a device, checking to see that it is a proper mount
|
dev_path = self.get_dev_path(device)
|
||||||
point based on a configuration parameter.
|
|
||||||
|
|
||||||
:param device: name of target device
|
|
||||||
:returns: full path to the device, None if the path to the device is
|
|
||||||
not a proper mount point.
|
|
||||||
"""
|
|
||||||
if self.mount_check and not mount(self.devices, device):
|
|
||||||
dev_path = None
|
|
||||||
else:
|
|
||||||
dev_path = os.path.join(self.devices, device)
|
|
||||||
return dev_path
|
|
||||||
|
|
||||||
def get_diskfile(self, device, account, container, obj,
|
|
||||||
**kwargs):
|
|
||||||
dev_path = self._get_dev_path(device)
|
|
||||||
if not dev_path:
|
if not dev_path:
|
||||||
raise DiskFileDeviceUnavailable()
|
raise DiskFileDeviceUnavailable()
|
||||||
return DiskFile(self, dev_path, self.threadpools[device],
|
return DiskFile(self, dev_path, self.threadpools[device],
|
||||||
account, container, obj, **kwargs)
|
partition, account, container, obj,
|
||||||
|
policy_idx=policy_idx, **kwargs)
|
||||||
|
|
||||||
|
def pickle_async_update(self, device, account, container, obj, data,
|
||||||
|
timestamp, policy_idx):
|
||||||
|
# This method invokes swiftonfile's writepickle method.
|
||||||
|
# Is patching just write_pickle and calling parent method better ?
|
||||||
|
device_path = self.construct_dev_path(device)
|
||||||
|
async_dir = os.path.join(device_path, get_async_dir(policy_idx))
|
||||||
|
ohash = hash_path(account, container, obj)
|
||||||
|
self.threadpools[device].run_in_thread(
|
||||||
|
write_pickle,
|
||||||
|
data,
|
||||||
|
os.path.join(async_dir, ohash[-3:], ohash + '-' +
|
||||||
|
normalize_timestamp(timestamp)),
|
||||||
|
os.path.join(device_path, 'tmp'))
|
||||||
|
self.logger.increment('async_pendings')
|
||||||
|
|
||||||
|
|
||||||
class DiskFileWriter(object):
|
class DiskFileWriter(object):
|
||||||
@ -593,8 +573,10 @@ class DiskFile(object):
|
|||||||
:param uid: user ID disk object should assume (file or directory)
|
:param uid: user ID disk object should assume (file or directory)
|
||||||
:param gid: group ID disk object should assume (file or directory)
|
:param gid: group ID disk object should assume (file or directory)
|
||||||
"""
|
"""
|
||||||
def __init__(self, mgr, dev_path, threadpool, account, container, obj,
|
def __init__(self, mgr, dev_path, threadpool, partition,
|
||||||
uid=DEFAULT_UID, gid=DEFAULT_GID):
|
account=None, container=None, obj=None,
|
||||||
|
policy_idx=0, uid=DEFAULT_UID, gid=DEFAULT_GID):
|
||||||
|
# Variables partition and policy_idx is currently unused.
|
||||||
self._mgr = mgr
|
self._mgr = mgr
|
||||||
self._device_path = dev_path
|
self._device_path = dev_path
|
||||||
self._threadpool = threadpool or ThreadPool(nthreads=0)
|
self._threadpool = threadpool or ThreadPool(nthreads=0)
|
||||||
@ -607,7 +589,14 @@ class DiskFile(object):
|
|||||||
# Don't store a value for data_file until we know it exists.
|
# Don't store a value for data_file until we know it exists.
|
||||||
self._data_file = None
|
self._data_file = None
|
||||||
|
|
||||||
self._container_path = os.path.join(self._device_path, container)
|
# Account name contains resller_prefix which is retained and not
|
||||||
|
# stripped. This to conform to Swift's behavior where account name
|
||||||
|
# entry in Account DBs contain resller_prefix.
|
||||||
|
self._account = account
|
||||||
|
self._container = container
|
||||||
|
|
||||||
|
self._container_path = \
|
||||||
|
os.path.join(self._device_path, self._account, self._container)
|
||||||
obj = obj.strip(os.path.sep)
|
obj = obj.strip(os.path.sep)
|
||||||
obj_path, self._obj = os.path.split(obj)
|
obj_path, self._obj = os.path.split(obj)
|
||||||
if obj_path:
|
if obj_path:
|
||||||
@ -862,6 +851,13 @@ class DiskFile(object):
|
|||||||
:raises AlreadyExistsAsFile: if path or part of a path is not a \
|
:raises AlreadyExistsAsFile: if path or part of a path is not a \
|
||||||
directory
|
directory
|
||||||
"""
|
"""
|
||||||
|
# Create /account/container directory structure on mount point root
|
||||||
|
try:
|
||||||
|
os.makedirs(self._container_path)
|
||||||
|
except OSError as err:
|
||||||
|
if err.errno != errno.EEXIST:
|
||||||
|
raise
|
||||||
|
|
||||||
data_file = os.path.join(self._put_datadir, self._obj)
|
data_file = os.path.join(self._put_datadir, self._obj)
|
||||||
|
|
||||||
# Assume the full directory path exists to the file already, and
|
# Assume the full directory path exists to the file already, and
|
||||||
|
@ -15,8 +15,6 @@
|
|||||||
|
|
||||||
""" Object Server for Gluster for Swift """
|
""" Object Server for Gluster for Swift """
|
||||||
|
|
||||||
# Simply importing this monkey patches the constraint handling to fit our
|
|
||||||
# needs
|
|
||||||
import gluster.swift.common.constraints # noqa
|
import gluster.swift.common.constraints # noqa
|
||||||
from swift.common.swob import HTTPConflict
|
from swift.common.swob import HTTPConflict
|
||||||
from swift.common.utils import public, timing_stats
|
from swift.common.utils import public, timing_stats
|
||||||
@ -26,15 +24,7 @@ from swift.common.request_helpers import split_and_validate_path
|
|||||||
|
|
||||||
from swift.obj import server
|
from swift.obj import server
|
||||||
|
|
||||||
from gluster.swift.obj.diskfile import OnDiskManager
|
from gluster.swift.obj.diskfile import DiskFileManager
|
||||||
|
|
||||||
import os
|
|
||||||
from swift.common.exceptions import ConnectionTimeout
|
|
||||||
from swift.common.bufferedhttp import http_connect
|
|
||||||
from eventlet import Timeout
|
|
||||||
from swift.common.http import is_success
|
|
||||||
from gluster.swift.common.ring import Ring
|
|
||||||
from swift import gettext_ as _
|
|
||||||
|
|
||||||
|
|
||||||
class ObjectController(server.ObjectController):
|
class ObjectController(server.ObjectController):
|
||||||
@ -54,11 +44,10 @@ class ObjectController(server.ObjectController):
|
|||||||
"""
|
"""
|
||||||
# Common on-disk hierarchy shared across account, container and object
|
# Common on-disk hierarchy shared across account, container and object
|
||||||
# servers.
|
# servers.
|
||||||
self._ondisk_mgr = OnDiskManager(conf, self.logger)
|
self._diskfile_mgr = DiskFileManager(conf, self.logger)
|
||||||
self.swift_dir = conf.get('swift_dir', '/etc/swift')
|
|
||||||
|
|
||||||
def get_diskfile(self, device, partition, account, container, obj,
|
def get_diskfile(self, device, partition, account, container, obj,
|
||||||
**kwargs):
|
policy_idx, **kwargs):
|
||||||
"""
|
"""
|
||||||
Utility method for instantiating a DiskFile object supporting a given
|
Utility method for instantiating a DiskFile object supporting a given
|
||||||
REST API.
|
REST API.
|
||||||
@ -67,108 +56,15 @@ class ObjectController(server.ObjectController):
|
|||||||
DiskFile class would simply over-ride this method to provide that
|
DiskFile class would simply over-ride this method to provide that
|
||||||
behavior.
|
behavior.
|
||||||
"""
|
"""
|
||||||
return self._ondisk_mgr.get_diskfile(device, account, container, obj,
|
return self._diskfile_mgr.get_diskfile(
|
||||||
**kwargs)
|
device, partition, account, container, obj, policy_idx, **kwargs)
|
||||||
|
|
||||||
def container_update(self, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
Update the container when objects are updated.
|
|
||||||
|
|
||||||
For Gluster, this is just a no-op, since a container is just the
|
|
||||||
directory holding all the objects (sub-directory hierarchy of files).
|
|
||||||
"""
|
|
||||||
return
|
|
||||||
|
|
||||||
def get_object_ring(self):
|
|
||||||
if hasattr(self, 'object_ring'):
|
|
||||||
if not self.object_ring:
|
|
||||||
self.object_ring = Ring(self.swift_dir, ring_name='object')
|
|
||||||
else:
|
|
||||||
self.object_ring = Ring(self.swift_dir, ring_name='object')
|
|
||||||
return self.object_ring
|
|
||||||
|
|
||||||
def async_update(self, op, account, container, obj, host, partition,
|
|
||||||
contdevice, headers_out, objdevice):
|
|
||||||
"""
|
|
||||||
In Openstack Swift, this method is called by:
|
|
||||||
* container_update (a no-op in gluster-swift)
|
|
||||||
* delete_at_update (to PUT objects into .expiring_objects account)
|
|
||||||
|
|
||||||
The Swift's version of async_update only sends the request to
|
|
||||||
container-server to PUT the object. The container-server calls
|
|
||||||
container_update method which makes an entry for the object in it's
|
|
||||||
database. No actual object is created on disk.
|
|
||||||
|
|
||||||
But in gluster-swift container_update is a no-op, so we'll
|
|
||||||
have to PUT an actual object. We override async_update to create a
|
|
||||||
container first and then the corresponding "tracker object" which
|
|
||||||
tracks expired objects scheduled for deletion.
|
|
||||||
"""
|
|
||||||
|
|
||||||
headers_out['user-agent'] = 'obj-server %s' % os.getpid()
|
|
||||||
if all([host, partition, contdevice]):
|
|
||||||
# PUT the container. Send request directly to container-server
|
|
||||||
container_path = '/%s/%s' % (account, container)
|
|
||||||
try:
|
|
||||||
with ConnectionTimeout(self.conn_timeout):
|
|
||||||
ip, port = host.rsplit(':', 1)
|
|
||||||
conn = http_connect(ip, port, contdevice, partition, op,
|
|
||||||
container_path, headers_out)
|
|
||||||
with Timeout(self.node_timeout):
|
|
||||||
response = conn.getresponse()
|
|
||||||
response.read()
|
|
||||||
if not is_success(response.status):
|
|
||||||
self.logger.error(_(
|
|
||||||
'async_update : '
|
|
||||||
'ERROR Container update failed :%(status)d '
|
|
||||||
'response from %(ip)s:%(port)s/%(dev)s'),
|
|
||||||
{'status': response.status, 'ip': ip, 'port': port,
|
|
||||||
'dev': contdevice})
|
|
||||||
return
|
|
||||||
except (Exception, Timeout):
|
|
||||||
self.logger.exception(_(
|
|
||||||
'async_update : '
|
|
||||||
'ERROR Container update failed :%(ip)s:%(port)s/%(dev)s'),
|
|
||||||
{'ip': ip, 'port': port, 'dev': contdevice})
|
|
||||||
|
|
||||||
# PUT the tracker object. Send request directly to object-server
|
|
||||||
object_path = '/%s/%s/%s' % (account, container, obj)
|
|
||||||
headers_out['Content-Length'] = 0
|
|
||||||
headers_out['Content-Type'] = 'text/plain'
|
|
||||||
try:
|
|
||||||
with ConnectionTimeout(self.conn_timeout):
|
|
||||||
# FIXME: Assuming that get_nodes returns single node
|
|
||||||
part, nodes = self.get_object_ring().get_nodes(account,
|
|
||||||
container,
|
|
||||||
obj)
|
|
||||||
ip = nodes[0]['ip']
|
|
||||||
port = nodes[0]['port']
|
|
||||||
objdevice = nodes[0]['device']
|
|
||||||
conn = http_connect(ip, port, objdevice, partition, op,
|
|
||||||
object_path, headers_out)
|
|
||||||
with Timeout(self.node_timeout):
|
|
||||||
response = conn.getresponse()
|
|
||||||
response.read()
|
|
||||||
if is_success(response.status):
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
self.logger.error(_(
|
|
||||||
'async_update : '
|
|
||||||
'ERROR Object PUT failed : %(status)d '
|
|
||||||
'response from %(ip)s:%(port)s/%(dev)s'),
|
|
||||||
{'status': response.status, 'ip': ip, 'port': port,
|
|
||||||
'dev': objdevice})
|
|
||||||
except (Exception, Timeout):
|
|
||||||
self.logger.exception(_(
|
|
||||||
'async_update : '
|
|
||||||
'ERROR Object PUT failed :%(ip)s:%(port)s/%(dev)s'),
|
|
||||||
{'ip': ip, 'port': port, 'dev': objdevice})
|
|
||||||
return
|
|
||||||
|
|
||||||
@public
|
@public
|
||||||
@timing_stats()
|
@timing_stats()
|
||||||
def PUT(self, request):
|
def PUT(self, request):
|
||||||
try:
|
try:
|
||||||
|
server.check_object_creation = \
|
||||||
|
gluster.swift.common.constraints.sof_check_object_creation
|
||||||
return server.ObjectController.PUT(self, request)
|
return server.ObjectController.PUT(self, request)
|
||||||
except (AlreadyExistsAsFile, AlreadyExistsAsDir):
|
except (AlreadyExistsAsFile, AlreadyExistsAsDir):
|
||||||
device = \
|
device = \
|
||||||
|
@ -1,31 +0,0 @@
|
|||||||
# Copyright (c) 2012-2013 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
|
|
||||||
# Simply importing this monkey patches the constraint handling to fit our
|
|
||||||
# needs
|
|
||||||
import gluster.swift.common.constraints # noqa
|
|
||||||
|
|
||||||
from swift.proxy.server import Application, mimetypes # noqa
|
|
||||||
from swift.proxy.controllers import AccountController # noqa
|
|
||||||
from swift.proxy.controllers import ObjectController # noqa
|
|
||||||
from swift.proxy.controllers import ContainerController # noqa
|
|
||||||
|
|
||||||
|
|
||||||
def app_factory(global_conf, **local_conf): # noqa
|
|
||||||
"""paste.deploy app factory for creating WSGI proxy apps."""
|
|
||||||
conf = global_conf.copy()
|
|
||||||
conf.update(local_conf)
|
|
||||||
return Application(conf)
|
|
@ -1 +0,0 @@
|
|||||||
Subproject commit f310006fae1af991097eee5929a1c73051eb1e00
|
|
20
setup.py
20
setup.py
@ -41,31 +41,11 @@ setup(
|
|||||||
],
|
],
|
||||||
install_requires=[],
|
install_requires=[],
|
||||||
scripts=[
|
scripts=[
|
||||||
'bin/gluster-swift-gen-builders',
|
|
||||||
'bin/gluster-swift-print-metadata',
|
'bin/gluster-swift-print-metadata',
|
||||||
'gluster/swift/common/middleware/gswauth/bin/gswauth-add-account',
|
|
||||||
'gluster/swift/common/middleware/gswauth/bin/gswauth-add-user',
|
|
||||||
'gluster/swift/common/middleware/gswauth/bin/gswauth-cleanup-tokens',
|
|
||||||
'gluster/swift/common/middleware/gswauth/bin/gswauth-delete-account',
|
|
||||||
'gluster/swift/common/middleware/gswauth/bin/gswauth-delete-user',
|
|
||||||
'gluster/swift/common/middleware/gswauth/bin/gswauth-list',
|
|
||||||
'gluster/swift/common/middleware/gswauth/bin/gswauth-prep',
|
|
||||||
'gluster/swift/common/middleware/gswauth/bin/'
|
|
||||||
'gswauth-set-account-service',
|
|
||||||
|
|
||||||
],
|
],
|
||||||
entry_points={
|
entry_points={
|
||||||
'paste.app_factory': [
|
'paste.app_factory': [
|
||||||
'proxy=gluster.swift.proxy.server:app_factory',
|
|
||||||
'object=gluster.swift.obj.server:app_factory',
|
'object=gluster.swift.obj.server:app_factory',
|
||||||
'container=gluster.swift.container.server:app_factory',
|
|
||||||
'account=gluster.swift.account.server:app_factory',
|
|
||||||
],
|
|
||||||
'paste.filter_factory': [
|
|
||||||
'gswauth=gluster.swift.common.middleware.gswauth.swauth.'
|
|
||||||
'middleware:filter_factory',
|
|
||||||
'kerbauth=gluster.swift.common.middleware.'
|
|
||||||
'swiftkerbauth.kerbauth:filter_factory',
|
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -53,12 +53,12 @@ def get_config(section_name=None, defaults=None):
|
|||||||
:param section_name: the section to read (all sections if not defined)
|
:param section_name: the section to read (all sections if not defined)
|
||||||
:param defaults: an optional dictionary namespace of defaults
|
:param defaults: an optional dictionary namespace of defaults
|
||||||
"""
|
"""
|
||||||
config_file = os.environ.get('SWIFT_TEST_CONFIG_FILE',
|
|
||||||
'/etc/swift/test.conf')
|
|
||||||
config = {}
|
config = {}
|
||||||
if defaults is not None:
|
if defaults is not None:
|
||||||
config.update(defaults)
|
config.update(defaults)
|
||||||
|
|
||||||
|
config_file = os.environ.get('SWIFT_TEST_CONFIG_FILE',
|
||||||
|
'/etc/swift/test.conf')
|
||||||
try:
|
try:
|
||||||
config = readconf(config_file, section_name)
|
config = readconf(config_file, section_name)
|
||||||
except SystemExit:
|
except SystemExit:
|
||||||
|
@ -1,32 +0,0 @@
|
|||||||
[DEFAULT]
|
|
||||||
devices = /mnt/gluster-object
|
|
||||||
#
|
|
||||||
# Once you are confident that your startup processes will always have your
|
|
||||||
# gluster volumes properly mounted *before* the account-server workers start,
|
|
||||||
# you can *consider* setting this value to "false" to reduce the per-request
|
|
||||||
# overhead it can incur.
|
|
||||||
#
|
|
||||||
# *** Keep false for Functional Tests ***
|
|
||||||
mount_check = true
|
|
||||||
bind_port = 6012
|
|
||||||
#
|
|
||||||
# Override swift's default behaviour for fallocate.
|
|
||||||
disable_fallocate = true
|
|
||||||
#
|
|
||||||
# One or two workers should be sufficient for almost any installation of
|
|
||||||
# Gluster.
|
|
||||||
workers = 1
|
|
||||||
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = account-server
|
|
||||||
|
|
||||||
[app:account-server]
|
|
||||||
use = egg:gluster_swift#account
|
|
||||||
user = root
|
|
||||||
log_facility = LOG_LOCAL2
|
|
||||||
log_level = WARN
|
|
||||||
#
|
|
||||||
# After ensuring things are running in a stable manner, you can turn off
|
|
||||||
# normal request logging for the account server to unclutter the log
|
|
||||||
# files. Warnings and errors will still be logged.
|
|
||||||
log_requests = off
|
|
@ -1,35 +0,0 @@
|
|||||||
[DEFAULT]
|
|
||||||
devices = /mnt/gluster-object
|
|
||||||
#
|
|
||||||
# Once you are confident that your startup processes will always have your
|
|
||||||
# gluster volumes properly mounted *before* the container-server workers
|
|
||||||
# start, you can *consider* setting this value to "false" to reduce the
|
|
||||||
# per-request overhead it can incur.
|
|
||||||
#
|
|
||||||
# *** Keep false for Functional Tests ***
|
|
||||||
mount_check = true
|
|
||||||
bind_port = 6011
|
|
||||||
#
|
|
||||||
# Override swift's default behaviour for fallocate.
|
|
||||||
disable_fallocate = true
|
|
||||||
#
|
|
||||||
# One or two workers should be sufficient for almost any installation of
|
|
||||||
# Gluster.
|
|
||||||
workers = 1
|
|
||||||
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = container-server
|
|
||||||
|
|
||||||
[app:container-server]
|
|
||||||
use = egg:gluster_swift#container
|
|
||||||
user = root
|
|
||||||
log_facility = LOG_LOCAL2
|
|
||||||
log_level = WARN
|
|
||||||
#
|
|
||||||
# After ensuring things are running in a stable manner, you can turn off
|
|
||||||
# normal request logging for the container server to unclutter the log
|
|
||||||
# files. Warnings and errors will still be logged.
|
|
||||||
log_requests = off
|
|
||||||
|
|
||||||
#enable object versioning for functional test
|
|
||||||
allow_versions = on
|
|
@ -1,19 +0,0 @@
|
|||||||
[DEFAULT]
|
|
||||||
#
|
|
||||||
# IP address of a node in the GlusterFS server cluster hosting the
|
|
||||||
# volumes to be served via Swift API.
|
|
||||||
mount_ip = localhost
|
|
||||||
|
|
||||||
# Performance optimization parameter. When turned off, the filesystem will
|
|
||||||
# see a reduced number of stat calls, resulting in substantially faster
|
|
||||||
# response time for GET and HEAD container requests on containers with large
|
|
||||||
# numbers of objects, at the expense of an accurate count of combined bytes
|
|
||||||
# used by all objects in the container. For most installations "off" works
|
|
||||||
# fine.
|
|
||||||
#
|
|
||||||
# *** Keep on for Functional Tests ***
|
|
||||||
accurate_size_in_listing = on
|
|
||||||
|
|
||||||
# *** Keep on for Functional Tests ***
|
|
||||||
container_update_object_count = on
|
|
||||||
account_update_container_count = on
|
|
@ -1,17 +0,0 @@
|
|||||||
[DEFAULT]
|
|
||||||
|
|
||||||
[object-expirer]
|
|
||||||
# auto_create_account_prefix = .
|
|
||||||
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = catch_errors cache proxy-server
|
|
||||||
|
|
||||||
[app:proxy-server]
|
|
||||||
use = egg:swift#proxy
|
|
||||||
|
|
||||||
[filter:cache]
|
|
||||||
use = egg:swift#memcache
|
|
||||||
memcache_servers = 127.0.0.1:11211
|
|
||||||
|
|
||||||
[filter:catch_errors]
|
|
||||||
use = egg:swift#catch_errors
|
|
@ -1,48 +0,0 @@
|
|||||||
[DEFAULT]
|
|
||||||
devices = /mnt/gluster-object
|
|
||||||
#
|
|
||||||
# Once you are confident that your startup processes will always have your
|
|
||||||
# gluster volumes properly mounted *before* the object-server workers start,
|
|
||||||
# you can *consider* setting this value to "false" to reduce the per-request
|
|
||||||
# overhead it can incur.
|
|
||||||
#
|
|
||||||
# *** Keep false for Functional Tests ***
|
|
||||||
mount_check = true
|
|
||||||
bind_port = 6010
|
|
||||||
#
|
|
||||||
# Maximum number of clients one worker can process simultaneously (it will
|
|
||||||
# actually accept N + 1). Setting this to one (1) will only handle one request
|
|
||||||
# at a time, without accepting another request concurrently. By increasing the
|
|
||||||
# number of workers to a much higher value, one can prevent slow file system
|
|
||||||
# operations for one request from starving other requests.
|
|
||||||
max_clients = 1024
|
|
||||||
#
|
|
||||||
# If not doing the above, setting this value initially to match the number of
|
|
||||||
# CPUs is a good starting point for determining the right value.
|
|
||||||
workers = 1
|
|
||||||
# Override swift's default behaviour for fallocate.
|
|
||||||
disable_fallocate = true
|
|
||||||
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = object-server
|
|
||||||
|
|
||||||
[app:object-server]
|
|
||||||
use = egg:gluster_swift#object
|
|
||||||
user = root
|
|
||||||
log_facility = LOG_LOCAL2
|
|
||||||
log_level = WARN
|
|
||||||
#
|
|
||||||
# For performance, after ensuring things are running in a stable manner, you
|
|
||||||
# can turn off normal request logging for the object server to reduce the
|
|
||||||
# per-request overhead and unclutter the log files. Warnings and errors will
|
|
||||||
# still be logged.
|
|
||||||
log_requests = off
|
|
||||||
#
|
|
||||||
# Adjust this value to match the stripe width of the underlying storage array
|
|
||||||
# (not the stripe element size). This will provide a reasonable starting point
|
|
||||||
# for tuning this value.
|
|
||||||
disk_chunk_size = 65536
|
|
||||||
#
|
|
||||||
# Adjust this value match whatever is set for the disk_chunk_size initially.
|
|
||||||
# This will provide a reasonable starting point for tuning this value.
|
|
||||||
network_chunk_size = 65556
|
|
@ -1,78 +0,0 @@
|
|||||||
[DEFAULT]
|
|
||||||
bind_port = 8080
|
|
||||||
user = root
|
|
||||||
# Consider using 1 worker per CPU
|
|
||||||
workers = 1
|
|
||||||
|
|
||||||
[pipeline:main]
|
|
||||||
pipeline = catch_errors healthcheck proxy-logging cache gswauth proxy-logging proxy-server
|
|
||||||
|
|
||||||
[app:proxy-server]
|
|
||||||
use = egg:gluster_swift#proxy
|
|
||||||
log_facility = LOG_LOCAL1
|
|
||||||
log_level = WARN
|
|
||||||
# The API allows for account creation and deletion, but since Gluster/Swift
|
|
||||||
# automounts a Gluster volume for a given account, there is no way to create
|
|
||||||
# or delete an account. So leave this off.
|
|
||||||
allow_account_management = false
|
|
||||||
account_autocreate = true
|
|
||||||
# Ensure the proxy server uses fast-POSTs since we don't need to make a copy
|
|
||||||
# of the entire object given that all metadata is stored in the object
|
|
||||||
# extended attributes (no .meta file used after creation) and no container
|
|
||||||
# sync feature to present.
|
|
||||||
object_post_as_copy = false
|
|
||||||
# Only need to recheck the account exists once a day
|
|
||||||
recheck_account_existence = 86400
|
|
||||||
# May want to consider bumping this up if containers are created and destroyed
|
|
||||||
# infrequently.
|
|
||||||
recheck_container_existence = 60
|
|
||||||
# Timeout clients that don't read or write to the proxy server after 5
|
|
||||||
# seconds.
|
|
||||||
client_timeout = 5
|
|
||||||
# Give more time to connect to the object, container or account servers in
|
|
||||||
# cases of high load.
|
|
||||||
conn_timeout = 5
|
|
||||||
# For high load situations, once connected to an object, container or account
|
|
||||||
# server, allow for delays communicating with them.
|
|
||||||
node_timeout = 60
|
|
||||||
# May want to consider bumping up this value to 1 - 4 MB depending on how much
|
|
||||||
# traffic is for multi-megabyte or gigabyte requests; perhaps matching the
|
|
||||||
# stripe width (not stripe element size) of your storage volume is a good
|
|
||||||
# starting point. See below for sizing information.
|
|
||||||
object_chunk_size = 65536
|
|
||||||
# If you do decide to increase the object_chunk_size, then consider lowering
|
|
||||||
# this value to one. Up to "put_queue_length" object_chunk_size'd buffers can
|
|
||||||
# be queued to the object server for processing. Given one proxy server worker
|
|
||||||
# can handle up to 1,024 connections, by default, it will consume 10 * 65,536
|
|
||||||
# * 1,024 bytes of memory in the worse case (default values). Be sure the
|
|
||||||
# amount of memory available on the system can accommodate increased values
|
|
||||||
# for object_chunk_size.
|
|
||||||
put_queue_depth = 10
|
|
||||||
|
|
||||||
[filter:catch_errors]
|
|
||||||
use = egg:swift#catch_errors
|
|
||||||
|
|
||||||
[filter:proxy-logging]
|
|
||||||
use = egg:swift#proxy_logging
|
|
||||||
|
|
||||||
[filter:healthcheck]
|
|
||||||
use = egg:swift#healthcheck
|
|
||||||
|
|
||||||
[filter:tempauth]
|
|
||||||
use = egg:swift#tempauth
|
|
||||||
user_admin_admin = admin .admin .reseller_admin
|
|
||||||
user_test_tester = testing .admin
|
|
||||||
user_test2_tester2 = testing2 .admin
|
|
||||||
user_test_tester3 = testing3
|
|
||||||
|
|
||||||
[filter:gswauth]
|
|
||||||
use = egg:gluster_swift#gswauth
|
|
||||||
set log_name = gswauth
|
|
||||||
super_admin_key = gswauthkey
|
|
||||||
metadata_volume = gsmetadata
|
|
||||||
|
|
||||||
[filter:cache]
|
|
||||||
use = egg:swift#memcache
|
|
||||||
# Update this line to contain a comma separated list of memcache servers
|
|
||||||
# shared by all nodes running the proxy-server service.
|
|
||||||
memcache_servers = localhost:11211
|
|
@ -1,85 +0,0 @@
|
|||||||
[DEFAULT]
|
|
||||||
|
|
||||||
|
|
||||||
[swift-hash]
|
|
||||||
# random unique string that can never change (DO NOT LOSE)
|
|
||||||
swift_hash_path_suffix = gluster
|
|
||||||
|
|
||||||
|
|
||||||
# The swift-constraints section sets the basic constraints on data
|
|
||||||
# saved in the swift cluster.
|
|
||||||
|
|
||||||
[swift-constraints]
|
|
||||||
|
|
||||||
# max_file_size is the largest "normal" object that can be saved in
|
|
||||||
# the cluster. This is also the limit on the size of each segment of
|
|
||||||
# a "large" object when using the large object manifest support.
|
|
||||||
# This value is set in bytes. Setting it to lower than 1MiB will cause
|
|
||||||
# some tests to fail.
|
|
||||||
# Default is 1 TiB = 2**30*1024
|
|
||||||
max_file_size = 1099511627776
|
|
||||||
|
|
||||||
|
|
||||||
# max_meta_name_length is the max number of bytes in the utf8 encoding
|
|
||||||
# of the name portion of a metadata header.
|
|
||||||
|
|
||||||
#max_meta_name_length = 128
|
|
||||||
|
|
||||||
|
|
||||||
# max_meta_value_length is the max number of bytes in the utf8 encoding
|
|
||||||
# of a metadata value
|
|
||||||
|
|
||||||
#max_meta_value_length = 256
|
|
||||||
|
|
||||||
|
|
||||||
# max_meta_count is the max number of metadata keys that can be stored
|
|
||||||
# on a single account, container, or object
|
|
||||||
|
|
||||||
#max_meta_count = 90
|
|
||||||
|
|
||||||
|
|
||||||
# max_meta_overall_size is the max number of bytes in the utf8 encoding
|
|
||||||
# of the metadata (keys + values)
|
|
||||||
|
|
||||||
#max_meta_overall_size = 4096
|
|
||||||
|
|
||||||
|
|
||||||
# max_object_name_length is the max number of bytes in the utf8 encoding of an
|
|
||||||
# object name: Gluster FS can handle much longer file names, but the length
|
|
||||||
# between the slashes of the URL is handled below. Remember that most web
|
|
||||||
# clients can't handle anything greater than 2048, and those that do are
|
|
||||||
# rather clumsy.
|
|
||||||
|
|
||||||
max_object_name_length = 2048
|
|
||||||
|
|
||||||
# max_object_name_component_length (GlusterFS) is the max number of bytes in
|
|
||||||
# the utf8 encoding of an object name component (the part between the
|
|
||||||
# slashes); this is a limit imposed by the underlying file system (for XFS it
|
|
||||||
# is 255 bytes).
|
|
||||||
|
|
||||||
max_object_name_component_length = 255
|
|
||||||
|
|
||||||
# container_listing_limit is the default (and max) number of items
|
|
||||||
# returned for a container listing request
|
|
||||||
|
|
||||||
#container_listing_limit = 10000
|
|
||||||
|
|
||||||
|
|
||||||
# account_listing_limit is the default (and max) number of items returned
|
|
||||||
# for an account listing request
|
|
||||||
|
|
||||||
#account_listing_limit = 10000
|
|
||||||
|
|
||||||
|
|
||||||
# max_account_name_length is the max number of bytes in the utf8 encoding of
|
|
||||||
# an account name: Gluster FS Filename limit (XFS limit?), must be the same
|
|
||||||
# size as max_object_name_component_length above.
|
|
||||||
|
|
||||||
max_account_name_length = 255
|
|
||||||
|
|
||||||
|
|
||||||
# max_container_name_length is the max number of bytes in the utf8 encoding
|
|
||||||
# of a container name: Gluster FS Filename limit (XFS limit?), must be the same
|
|
||||||
# size as max_object_name_component_length above.
|
|
||||||
|
|
||||||
max_container_name_length = 255
|
|
@ -1,58 +0,0 @@
|
|||||||
[func_test]
|
|
||||||
# sample config
|
|
||||||
auth_host = 127.0.0.1
|
|
||||||
auth_port = 8080
|
|
||||||
auth_ssl = no
|
|
||||||
auth_prefix = /auth/
|
|
||||||
## sample config for Swift with Keystone
|
|
||||||
#auth_version = 2
|
|
||||||
#auth_host = localhost
|
|
||||||
#auth_port = 5000
|
|
||||||
#auth_ssl = no
|
|
||||||
#auth_prefix = /v2.0/
|
|
||||||
|
|
||||||
# GSWauth internal admin user configuration information
|
|
||||||
admin_key = gswauthkey
|
|
||||||
admin_user = .super_admin
|
|
||||||
|
|
||||||
# Gluster setup information
|
|
||||||
devices = /mnt/gluster-object
|
|
||||||
gsmetadata_volume = gsmetadata
|
|
||||||
|
|
||||||
# Primary functional test account (needs admin access to the account)
|
|
||||||
account = test
|
|
||||||
username = tester
|
|
||||||
password = testing
|
|
||||||
|
|
||||||
# User on a second account (needs admin access to the account)
|
|
||||||
account2 = test2
|
|
||||||
username2 = tester2
|
|
||||||
password2 = testing2
|
|
||||||
|
|
||||||
# User on same account as first, but without admin access
|
|
||||||
username3 = tester3
|
|
||||||
password3 = testing3
|
|
||||||
|
|
||||||
# Default constraints if not defined here, the test runner will try
|
|
||||||
# to set them from /etc/swift/swift.conf. If that file isn't found,
|
|
||||||
# the test runner will skip tests that depend on these values.
|
|
||||||
# Note that the cluster must have "sane" values for the test suite to pass.
|
|
||||||
#max_file_size = 5368709122
|
|
||||||
#max_meta_name_length = 128
|
|
||||||
#max_meta_value_length = 256
|
|
||||||
#max_meta_count = 90
|
|
||||||
#max_meta_overall_size = 4096
|
|
||||||
#max_object_name_length = 1024
|
|
||||||
#container_listing_limit = 10000
|
|
||||||
#account_listing_limit = 10000
|
|
||||||
#max_account_name_length = 256
|
|
||||||
#max_container_name_length = 256
|
|
||||||
normalized_urls = True
|
|
||||||
|
|
||||||
collate = C
|
|
||||||
|
|
||||||
[unit_test]
|
|
||||||
fake_syslog = False
|
|
||||||
|
|
||||||
[probe_test]
|
|
||||||
# check_server_timeout = 30
|
|
@ -1,106 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright (c) 2014 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
# This program expects to be run against a locally deployed swiftonfile
|
|
||||||
# applicatoin. This tests also expects three glusterfs volumes to have
|
|
||||||
# been created: 'test', 'test2', and 'gsmetadata'.
|
|
||||||
|
|
||||||
cleanup()
|
|
||||||
{
|
|
||||||
service memcached stop
|
|
||||||
swift-init main stop
|
|
||||||
if [ -x /etc/swift.bak ] ; then
|
|
||||||
rm -rf /etc/swift > /dev/null 2>&1
|
|
||||||
mv /etc/swift.bak /etc/swift > /dev/null 2>&1
|
|
||||||
fi
|
|
||||||
rm -rf /mnt/gluster-object/test{,2}/* > /dev/null 2>&1
|
|
||||||
setfattr -x user.swift.metadata /mnt/gluster-object/test{,2} > /dev/null 2>&1
|
|
||||||
gswauth_cleanup
|
|
||||||
}
|
|
||||||
|
|
||||||
gswauth_cleanup()
|
|
||||||
{
|
|
||||||
rm -rf /mnt/gluster-object/gsmetadata/.* > /dev/null 2>&1
|
|
||||||
rm -rf /mnt/gluster-object/gsmetadata/* > /dev/null 2>&1
|
|
||||||
setfattr -x user.swift.metadata /mnt/gluster-object/gsmetadata > /dev/null 2>&1
|
|
||||||
}
|
|
||||||
|
|
||||||
quit()
|
|
||||||
{
|
|
||||||
echo "$1"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
fail()
|
|
||||||
{
|
|
||||||
cleanup
|
|
||||||
quit "$1"
|
|
||||||
}
|
|
||||||
|
|
||||||
run_generic_tests()
|
|
||||||
{
|
|
||||||
# clean up gsmetadata dir
|
|
||||||
gswauth_cleanup
|
|
||||||
|
|
||||||
#swauth-prep
|
|
||||||
gswauth-prep -K gswauthkey || fail "Unable to prep gswauth"
|
|
||||||
gswauth-add-user -K gswauthkey -a test tester testing || fail "Unable to add user test"
|
|
||||||
gswauth-add-user -K gswauthkey -a test2 tester2 testing2 || fail "Unable to add user test2"
|
|
||||||
gswauth-add-user -K gswauthkey test tester3 testing3 || fail "Unable to add user test3"
|
|
||||||
|
|
||||||
nosetests -v --exe \
|
|
||||||
--with-xunit \
|
|
||||||
--xunit-file functional_tests/gluster-swift-gswauth-generic-functional-TC-report.xml \
|
|
||||||
test/functional || fail "Functional tests failed"
|
|
||||||
nosetests -v --exe \
|
|
||||||
--with-xunit \
|
|
||||||
--xunit-file functional_tests/gluster-swift-gswauth-functionalnosetests-TC-report.xml \
|
|
||||||
test/functionalnosetests || fail "Functional-nose tests failed"
|
|
||||||
}
|
|
||||||
|
|
||||||
### MAIN ###
|
|
||||||
|
|
||||||
# Backup the swift directory if it already exists
|
|
||||||
if [ -x /etc/swift ] ; then
|
|
||||||
mv /etc/swift /etc/swift.bak
|
|
||||||
fi
|
|
||||||
|
|
||||||
export SWIFT_TEST_CONFIG_FILE=/etc/swift/test.conf
|
|
||||||
|
|
||||||
# Install the configuration files
|
|
||||||
mkdir /etc/swift > /dev/null 2>&1
|
|
||||||
cp -r test/deploy/glusterfs/conf/* /etc/swift || fail "Unable to copy configuration files to /etc/swift"
|
|
||||||
gluster-swift-gen-builders test test2 gsmetadata || fail "Unable to create ring files"
|
|
||||||
|
|
||||||
# Start the services
|
|
||||||
service memcached start || fail "Unable to start memcached"
|
|
||||||
swift-init main start || fail "Unable to start swift"
|
|
||||||
|
|
||||||
#swauth-prep
|
|
||||||
gswauth-prep -K gswauthkey || fail "Unable to prep gswauth"
|
|
||||||
|
|
||||||
mkdir functional_tests > /dev/null 2>&1
|
|
||||||
nosetests -v --exe \
|
|
||||||
--with-xunit \
|
|
||||||
--xunit-file functional_tests/gluster-swift-gswauth-functional-TC-report.xml \
|
|
||||||
test/functional_auth/gswauth || fail "Functional gswauth test failed"
|
|
||||||
|
|
||||||
run_generic_tests
|
|
||||||
|
|
||||||
cleanup
|
|
||||||
exit 0
|
|
@ -0,0 +1,729 @@
|
|||||||
|
# Copyright (c) 2014 OpenStack Foundation
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import pickle
|
||||||
|
import socket
|
||||||
|
import locale
|
||||||
|
import eventlet
|
||||||
|
import eventlet.debug
|
||||||
|
import functools
|
||||||
|
import random
|
||||||
|
from time import time, sleep
|
||||||
|
from httplib import HTTPException
|
||||||
|
from urlparse import urlparse
|
||||||
|
from nose import SkipTest
|
||||||
|
from contextlib import closing
|
||||||
|
from gzip import GzipFile
|
||||||
|
from shutil import rmtree
|
||||||
|
from tempfile import mkdtemp
|
||||||
|
|
||||||
|
from test import get_config
|
||||||
|
from test.functional.swift_test_client import Connection, ResponseError
|
||||||
|
# This has the side effect of mocking out the xattr module so that unit tests
|
||||||
|
# (and in this case, when in-process functional tests are called for) can run
|
||||||
|
# on file systems that don't support extended attributes.
|
||||||
|
from test.unit import debug_logger, FakeMemcache
|
||||||
|
|
||||||
|
from swift.common import constraints, utils, ring, storage_policy
|
||||||
|
from swift.common.wsgi import monkey_patch_mimetools
|
||||||
|
from swift.common.middleware import catch_errors, gatekeeper, healthcheck, \
|
||||||
|
proxy_logging, container_sync, bulk, tempurl, slo, dlo, ratelimit, \
|
||||||
|
tempauth, container_quotas, account_quotas
|
||||||
|
from swift.common.utils import config_true_value
|
||||||
|
from swift.proxy import server as proxy_server
|
||||||
|
from swift.account import server as account_server
|
||||||
|
from swift.container import server as container_server
|
||||||
|
from swift.obj import server as object_server
|
||||||
|
import swift.proxy.controllers.obj
|
||||||
|
|
||||||
|
# In order to get the proper blocking behavior of sockets without using
|
||||||
|
# threads, where we can set an arbitrary timeout for some piece of code under
|
||||||
|
# test, we use eventlet with the standard socket library patched. We have to
|
||||||
|
# perform this setup at module import time, since all the socket module
|
||||||
|
# bindings in the swiftclient code will have been made by the time nose
|
||||||
|
# invokes the package or class setup methods.
|
||||||
|
eventlet.hubs.use_hub(utils.get_hub())
|
||||||
|
eventlet.patcher.monkey_patch(all=False, socket=True)
|
||||||
|
eventlet.debug.hub_exceptions(False)
|
||||||
|
|
||||||
|
from swiftclient import get_auth, http_connection
|
||||||
|
|
||||||
|
has_insecure = False
|
||||||
|
try:
|
||||||
|
from swiftclient import __version__ as client_version
|
||||||
|
# Prevent a ValueError in StrictVersion with '2.0.3.68.ga99c2ff'
|
||||||
|
client_version = '.'.join(client_version.split('.')[:3])
|
||||||
|
except ImportError:
|
||||||
|
# Pre-PBR we had version, not __version__. Anyhow...
|
||||||
|
client_version = '1.2'
|
||||||
|
from distutils.version import StrictVersion
|
||||||
|
if StrictVersion(client_version) >= StrictVersion('2.0'):
|
||||||
|
has_insecure = True
|
||||||
|
|
||||||
|
|
||||||
|
config = {}
|
||||||
|
web_front_end = None
|
||||||
|
normalized_urls = None
|
||||||
|
|
||||||
|
# If no config was read, we will fall back to old school env vars
|
||||||
|
swift_test_auth_version = None
|
||||||
|
swift_test_auth = os.environ.get('SWIFT_TEST_AUTH')
|
||||||
|
swift_test_user = [os.environ.get('SWIFT_TEST_USER'), None, None]
|
||||||
|
swift_test_key = [os.environ.get('SWIFT_TEST_KEY'), None, None]
|
||||||
|
swift_test_tenant = ['', '', '']
|
||||||
|
swift_test_perm = ['', '', '']
|
||||||
|
|
||||||
|
skip, skip2, skip3 = False, False, False
|
||||||
|
|
||||||
|
orig_collate = ''
|
||||||
|
insecure = False
|
||||||
|
|
||||||
|
orig_hash_path_suff_pref = ('', '')
|
||||||
|
orig_swift_conf_name = None
|
||||||
|
|
||||||
|
in_process = False
|
||||||
|
_testdir = _test_servers = _test_sockets = _test_coros = None
|
||||||
|
|
||||||
|
|
||||||
|
class FakeMemcacheMiddleware(object):
|
||||||
|
"""
|
||||||
|
Caching middleware that fakes out caching in swift.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, app, conf):
|
||||||
|
self.app = app
|
||||||
|
self.memcache = FakeMemcache()
|
||||||
|
|
||||||
|
def __call__(self, env, start_response):
|
||||||
|
env['swift.cache'] = self.memcache
|
||||||
|
return self.app(env, start_response)
|
||||||
|
|
||||||
|
|
||||||
|
def fake_memcache_filter_factory(conf):
|
||||||
|
def filter_app(app):
|
||||||
|
return FakeMemcacheMiddleware(app, conf)
|
||||||
|
return filter_app
|
||||||
|
|
||||||
|
|
||||||
|
# swift.conf contents for in-process functional test runs
|
||||||
|
functests_swift_conf = '''
|
||||||
|
[swift-hash]
|
||||||
|
swift_hash_path_suffix = inprocfunctests
|
||||||
|
swift_hash_path_prefix = inprocfunctests
|
||||||
|
|
||||||
|
[swift-constraints]
|
||||||
|
max_file_size = %d
|
||||||
|
''' % ((8 * 1024 * 1024) + 2) # 8 MB + 2
|
||||||
|
|
||||||
|
|
||||||
|
def in_process_setup(the_object_server=object_server):
|
||||||
|
print >>sys.stderr, 'IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS'
|
||||||
|
|
||||||
|
monkey_patch_mimetools()
|
||||||
|
|
||||||
|
global _testdir
|
||||||
|
_testdir = os.path.join(mkdtemp(), 'tmp_functional')
|
||||||
|
utils.mkdirs(_testdir)
|
||||||
|
rmtree(_testdir)
|
||||||
|
utils.mkdirs(os.path.join(_testdir, 'sda1'))
|
||||||
|
utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
|
||||||
|
utils.mkdirs(os.path.join(_testdir, 'sdb1'))
|
||||||
|
utils.mkdirs(os.path.join(_testdir, 'sdb1', 'tmp'))
|
||||||
|
|
||||||
|
swift_conf = os.path.join(_testdir, "swift.conf")
|
||||||
|
with open(swift_conf, "w") as scfp:
|
||||||
|
scfp.write(functests_swift_conf)
|
||||||
|
|
||||||
|
global orig_swift_conf_name
|
||||||
|
orig_swift_conf_name = utils.SWIFT_CONF_FILE
|
||||||
|
utils.SWIFT_CONF_FILE = swift_conf
|
||||||
|
constraints.reload_constraints()
|
||||||
|
storage_policy.SWIFT_CONF_FILE = swift_conf
|
||||||
|
storage_policy.reload_storage_policies()
|
||||||
|
global config
|
||||||
|
if constraints.SWIFT_CONSTRAINTS_LOADED:
|
||||||
|
# Use the swift constraints that are loaded for the test framework
|
||||||
|
# configuration
|
||||||
|
config.update(constraints.EFFECTIVE_CONSTRAINTS)
|
||||||
|
else:
|
||||||
|
# In-process swift constraints were not loaded, somethings wrong
|
||||||
|
raise SkipTest
|
||||||
|
global orig_hash_path_suff_pref
|
||||||
|
orig_hash_path_suff_pref = utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX
|
||||||
|
utils.validate_hash_conf()
|
||||||
|
|
||||||
|
# We create the proxy server listening socket to get its port number so
|
||||||
|
# that we can add it as the "auth_port" value for the functional test
|
||||||
|
# clients.
|
||||||
|
prolis = eventlet.listen(('localhost', 0))
|
||||||
|
|
||||||
|
# The following set of configuration values is used both for the
|
||||||
|
# functional test frame work and for the various proxy, account, container
|
||||||
|
# and object servers.
|
||||||
|
config.update({
|
||||||
|
# Values needed by the various in-process swift servers
|
||||||
|
'devices': _testdir,
|
||||||
|
'swift_dir': _testdir,
|
||||||
|
'mount_check': 'false',
|
||||||
|
'client_timeout': 4,
|
||||||
|
'allow_account_management': 'true',
|
||||||
|
'account_autocreate': 'true',
|
||||||
|
'allowed_headers':
|
||||||
|
'content-disposition, content-encoding, x-delete-at,'
|
||||||
|
' x-object-manifest, x-static-large-object',
|
||||||
|
'allow_versions': 'True',
|
||||||
|
# Below are values used by the functional test framework, as well as
|
||||||
|
# by the various in-process swift servers
|
||||||
|
'auth_host': '127.0.0.1',
|
||||||
|
'auth_port': str(prolis.getsockname()[1]),
|
||||||
|
'auth_ssl': 'no',
|
||||||
|
'auth_prefix': '/auth/',
|
||||||
|
# Primary functional test account (needs admin access to the
|
||||||
|
# account)
|
||||||
|
'account': 'test',
|
||||||
|
'username': 'tester',
|
||||||
|
'password': 'testing',
|
||||||
|
# User on a second account (needs admin access to the account)
|
||||||
|
'account2': 'test2',
|
||||||
|
'username2': 'tester2',
|
||||||
|
'password2': 'testing2',
|
||||||
|
# User on same account as first, but without admin access
|
||||||
|
'username3': 'tester3',
|
||||||
|
'password3': 'testing3',
|
||||||
|
# For tempauth middleware
|
||||||
|
'user_admin_admin': 'admin .admin .reseller_admin',
|
||||||
|
'user_test_tester': 'testing .admin',
|
||||||
|
'user_test2_tester2': 'testing2 .admin',
|
||||||
|
'user_test_tester3': 'testing3'
|
||||||
|
})
|
||||||
|
|
||||||
|
acc1lis = eventlet.listen(('localhost', 0))
|
||||||
|
acc2lis = eventlet.listen(('localhost', 0))
|
||||||
|
con1lis = eventlet.listen(('localhost', 0))
|
||||||
|
con2lis = eventlet.listen(('localhost', 0))
|
||||||
|
obj1lis = eventlet.listen(('localhost', 0))
|
||||||
|
obj2lis = eventlet.listen(('localhost', 0))
|
||||||
|
global _test_sockets
|
||||||
|
_test_sockets = \
|
||||||
|
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis)
|
||||||
|
|
||||||
|
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
|
||||||
|
with closing(GzipFile(account_ring_path, 'wb')) as f:
|
||||||
|
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
|
||||||
|
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
|
||||||
|
'port': acc1lis.getsockname()[1]},
|
||||||
|
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
|
||||||
|
'port': acc2lis.getsockname()[1]}], 30),
|
||||||
|
f)
|
||||||
|
container_ring_path = os.path.join(_testdir, 'container.ring.gz')
|
||||||
|
with closing(GzipFile(container_ring_path, 'wb')) as f:
|
||||||
|
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
|
||||||
|
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
|
||||||
|
'port': con1lis.getsockname()[1]},
|
||||||
|
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
|
||||||
|
'port': con2lis.getsockname()[1]}], 30),
|
||||||
|
f)
|
||||||
|
object_ring_path = os.path.join(_testdir, 'object.ring.gz')
|
||||||
|
with closing(GzipFile(object_ring_path, 'wb')) as f:
|
||||||
|
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
|
||||||
|
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
|
||||||
|
'port': obj1lis.getsockname()[1]},
|
||||||
|
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
|
||||||
|
'port': obj2lis.getsockname()[1]}], 30),
|
||||||
|
f)
|
||||||
|
|
||||||
|
eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
|
||||||
|
# Turn off logging requests by the underlying WSGI software.
|
||||||
|
eventlet.wsgi.HttpProtocol.log_request = lambda *a: None
|
||||||
|
logger = utils.get_logger(config, 'wsgi-server', log_route='wsgi')
|
||||||
|
# Redirect logging other messages by the underlying WSGI software.
|
||||||
|
eventlet.wsgi.HttpProtocol.log_message = \
|
||||||
|
lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a)
|
||||||
|
# Default to only 4 seconds for in-process functional test runs
|
||||||
|
eventlet.wsgi.WRITE_TIMEOUT = 4
|
||||||
|
|
||||||
|
prosrv = proxy_server.Application(config, logger=debug_logger('proxy'))
|
||||||
|
acc1srv = account_server.AccountController(
|
||||||
|
config, logger=debug_logger('acct1'))
|
||||||
|
acc2srv = account_server.AccountController(
|
||||||
|
config, logger=debug_logger('acct2'))
|
||||||
|
con1srv = container_server.ContainerController(
|
||||||
|
config, logger=debug_logger('cont1'))
|
||||||
|
con2srv = container_server.ContainerController(
|
||||||
|
config, logger=debug_logger('cont2'))
|
||||||
|
obj1srv = the_object_server.ObjectController(
|
||||||
|
config, logger=debug_logger('obj1'))
|
||||||
|
obj2srv = the_object_server.ObjectController(
|
||||||
|
config, logger=debug_logger('obj2'))
|
||||||
|
global _test_servers
|
||||||
|
_test_servers = \
|
||||||
|
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv)
|
||||||
|
|
||||||
|
pipeline = [
|
||||||
|
catch_errors.filter_factory,
|
||||||
|
gatekeeper.filter_factory,
|
||||||
|
healthcheck.filter_factory,
|
||||||
|
proxy_logging.filter_factory,
|
||||||
|
fake_memcache_filter_factory,
|
||||||
|
container_sync.filter_factory,
|
||||||
|
bulk.filter_factory,
|
||||||
|
tempurl.filter_factory,
|
||||||
|
slo.filter_factory,
|
||||||
|
dlo.filter_factory,
|
||||||
|
ratelimit.filter_factory,
|
||||||
|
tempauth.filter_factory,
|
||||||
|
container_quotas.filter_factory,
|
||||||
|
account_quotas.filter_factory,
|
||||||
|
proxy_logging.filter_factory,
|
||||||
|
]
|
||||||
|
app = prosrv
|
||||||
|
import mock
|
||||||
|
for filter_factory in reversed(pipeline):
|
||||||
|
app_filter = filter_factory(config)
|
||||||
|
with mock.patch('swift.common.utils') as mock_utils:
|
||||||
|
mock_utils.get_logger.return_value = None
|
||||||
|
app = app_filter(app)
|
||||||
|
app.logger = prosrv.logger
|
||||||
|
|
||||||
|
nl = utils.NullLogger()
|
||||||
|
prospa = eventlet.spawn(eventlet.wsgi.server, prolis, app, nl)
|
||||||
|
acc1spa = eventlet.spawn(eventlet.wsgi.server, acc1lis, acc1srv, nl)
|
||||||
|
acc2spa = eventlet.spawn(eventlet.wsgi.server, acc2lis, acc2srv, nl)
|
||||||
|
con1spa = eventlet.spawn(eventlet.wsgi.server, con1lis, con1srv, nl)
|
||||||
|
con2spa = eventlet.spawn(eventlet.wsgi.server, con2lis, con2srv, nl)
|
||||||
|
obj1spa = eventlet.spawn(eventlet.wsgi.server, obj1lis, obj1srv, nl)
|
||||||
|
obj2spa = eventlet.spawn(eventlet.wsgi.server, obj2lis, obj2srv, nl)
|
||||||
|
global _test_coros
|
||||||
|
_test_coros = \
|
||||||
|
(prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa)
|
||||||
|
|
||||||
|
# Create accounts "test" and "test2"
|
||||||
|
def create_account(act):
|
||||||
|
ts = utils.normalize_timestamp(time())
|
||||||
|
partition, nodes = prosrv.account_ring.get_nodes(act)
|
||||||
|
for node in nodes:
|
||||||
|
# Note: we are just using the http_connect method in the object
|
||||||
|
# controller here to talk to the account server nodes.
|
||||||
|
conn = swift.proxy.controllers.obj.http_connect(
|
||||||
|
node['ip'], node['port'], node['device'], partition, 'PUT',
|
||||||
|
'/' + act, {'X-Timestamp': ts, 'x-trans-id': act})
|
||||||
|
resp = conn.getresponse()
|
||||||
|
assert(resp.status == 201)
|
||||||
|
|
||||||
|
create_account('AUTH_test')
|
||||||
|
create_account('AUTH_test2')
|
||||||
|
|
||||||
|
cluster_info = {}
|
||||||
|
|
||||||
|
|
||||||
|
def get_cluster_info():
|
||||||
|
# The fallback constraints used for testing will come from the current
|
||||||
|
# effective constraints.
|
||||||
|
eff_constraints = dict(constraints.EFFECTIVE_CONSTRAINTS)
|
||||||
|
|
||||||
|
# We'll update those constraints based on what the /info API provides, if
|
||||||
|
# anything.
|
||||||
|
global cluster_info
|
||||||
|
try:
|
||||||
|
conn = Connection(config)
|
||||||
|
conn.authenticate()
|
||||||
|
cluster_info.update(conn.cluster_info())
|
||||||
|
except (ResponseError, socket.error):
|
||||||
|
# Failed to get cluster_information via /info API, so fall back on
|
||||||
|
# test.conf data
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
eff_constraints.update(cluster_info.get('swift', {}))
|
||||||
|
|
||||||
|
# Finally, we'll allow any constraint present in the swift-constraints
|
||||||
|
# section of test.conf to override everything. Note that only those
|
||||||
|
# constraints defined in the constraints module are converted to integers.
|
||||||
|
test_constraints = get_config('swift-constraints')
|
||||||
|
for k in constraints.DEFAULT_CONSTRAINTS:
|
||||||
|
try:
|
||||||
|
test_constraints[k] = int(test_constraints[k])
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
except ValueError:
|
||||||
|
print >>sys.stderr, "Invalid constraint value: %s = %s" % (
|
||||||
|
k, test_constraints[k])
|
||||||
|
eff_constraints.update(test_constraints)
|
||||||
|
|
||||||
|
# Just make it look like these constraints were loaded from a /info call,
|
||||||
|
# even if the /info call failed, or when they are overridden by values
|
||||||
|
# from the swift-constraints section of test.conf
|
||||||
|
cluster_info['swift'] = eff_constraints
|
||||||
|
|
||||||
|
|
||||||
|
def setup_package():
|
||||||
|
in_process_env = os.environ.get('SWIFT_TEST_IN_PROCESS')
|
||||||
|
if in_process_env is not None:
|
||||||
|
use_in_process = utils.config_true_value(in_process_env)
|
||||||
|
else:
|
||||||
|
use_in_process = None
|
||||||
|
|
||||||
|
global in_process
|
||||||
|
|
||||||
|
if use_in_process:
|
||||||
|
# Explicitly set to True, so barrel on ahead with in-process
|
||||||
|
# functional test setup.
|
||||||
|
in_process = True
|
||||||
|
# NOTE: No attempt is made to a read local test.conf file.
|
||||||
|
else:
|
||||||
|
if use_in_process is None:
|
||||||
|
# Not explicitly set, default to using in-process functional tests
|
||||||
|
# if the test.conf file is not found, or does not provide a usable
|
||||||
|
# configuration.
|
||||||
|
config.update(get_config('func_test'))
|
||||||
|
if config:
|
||||||
|
in_process = False
|
||||||
|
else:
|
||||||
|
in_process = True
|
||||||
|
else:
|
||||||
|
# Explicitly set to False, do not attempt to use in-process
|
||||||
|
# functional tests, be sure we attempt to read from local
|
||||||
|
# test.conf file.
|
||||||
|
in_process = False
|
||||||
|
config.update(get_config('func_test'))
|
||||||
|
|
||||||
|
if in_process:
|
||||||
|
in_process_setup()
|
||||||
|
|
||||||
|
global web_front_end
|
||||||
|
web_front_end = config.get('web_front_end', 'integral')
|
||||||
|
global normalized_urls
|
||||||
|
normalized_urls = config.get('normalized_urls', False)
|
||||||
|
|
||||||
|
global orig_collate
|
||||||
|
orig_collate = locale.setlocale(locale.LC_COLLATE)
|
||||||
|
locale.setlocale(locale.LC_COLLATE, config.get('collate', 'C'))
|
||||||
|
|
||||||
|
global insecure
|
||||||
|
insecure = config_true_value(config.get('insecure', False))
|
||||||
|
|
||||||
|
global swift_test_auth_version
|
||||||
|
global swift_test_auth
|
||||||
|
global swift_test_user
|
||||||
|
global swift_test_key
|
||||||
|
global swift_test_tenant
|
||||||
|
global swift_test_perm
|
||||||
|
|
||||||
|
if config:
|
||||||
|
swift_test_auth_version = str(config.get('auth_version', '1'))
|
||||||
|
|
||||||
|
swift_test_auth = 'http'
|
||||||
|
if config_true_value(config.get('auth_ssl', 'no')):
|
||||||
|
swift_test_auth = 'https'
|
||||||
|
if 'auth_prefix' not in config:
|
||||||
|
config['auth_prefix'] = '/'
|
||||||
|
try:
|
||||||
|
suffix = '://%(auth_host)s:%(auth_port)s%(auth_prefix)s' % config
|
||||||
|
swift_test_auth += suffix
|
||||||
|
except KeyError:
|
||||||
|
pass # skip
|
||||||
|
|
||||||
|
if swift_test_auth_version == "1":
|
||||||
|
swift_test_auth += 'v1.0'
|
||||||
|
|
||||||
|
try:
|
||||||
|
if 'account' in config:
|
||||||
|
swift_test_user[0] = '%(account)s:%(username)s' % config
|
||||||
|
else:
|
||||||
|
swift_test_user[0] = '%(username)s' % config
|
||||||
|
swift_test_key[0] = config['password']
|
||||||
|
except KeyError:
|
||||||
|
# bad config, no account/username configured, tests cannot be
|
||||||
|
# run
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
swift_test_user[1] = '%s%s' % (
|
||||||
|
'%s:' % config['account2'] if 'account2' in config else '',
|
||||||
|
config['username2'])
|
||||||
|
swift_test_key[1] = config['password2']
|
||||||
|
except KeyError:
|
||||||
|
pass # old config, no second account tests can be run
|
||||||
|
try:
|
||||||
|
swift_test_user[2] = '%s%s' % (
|
||||||
|
'%s:' % config['account'] if 'account'
|
||||||
|
in config else '', config['username3'])
|
||||||
|
swift_test_key[2] = config['password3']
|
||||||
|
except KeyError:
|
||||||
|
pass # old config, no third account tests can be run
|
||||||
|
|
||||||
|
for _ in range(3):
|
||||||
|
swift_test_perm[_] = swift_test_user[_]
|
||||||
|
|
||||||
|
else:
|
||||||
|
swift_test_user[0] = config['username']
|
||||||
|
swift_test_tenant[0] = config['account']
|
||||||
|
swift_test_key[0] = config['password']
|
||||||
|
swift_test_user[1] = config['username2']
|
||||||
|
swift_test_tenant[1] = config['account2']
|
||||||
|
swift_test_key[1] = config['password2']
|
||||||
|
swift_test_user[2] = config['username3']
|
||||||
|
swift_test_tenant[2] = config['account']
|
||||||
|
swift_test_key[2] = config['password3']
|
||||||
|
|
||||||
|
for _ in range(3):
|
||||||
|
swift_test_perm[_] = swift_test_tenant[_] + ':' \
|
||||||
|
+ swift_test_user[_]
|
||||||
|
|
||||||
|
global skip
|
||||||
|
skip = not all([swift_test_auth, swift_test_user[0], swift_test_key[0]])
|
||||||
|
if skip:
|
||||||
|
print >>sys.stderr, 'SKIPPING FUNCTIONAL TESTS DUE TO NO CONFIG'
|
||||||
|
|
||||||
|
global skip2
|
||||||
|
skip2 = not all([not skip, swift_test_user[1], swift_test_key[1]])
|
||||||
|
if not skip and skip2:
|
||||||
|
print >>sys.stderr, \
|
||||||
|
'SKIPPING SECOND ACCOUNT FUNCTIONAL TESTS' \
|
||||||
|
' DUE TO NO CONFIG FOR THEM'
|
||||||
|
|
||||||
|
global skip3
|
||||||
|
skip3 = not all([not skip, swift_test_user[2], swift_test_key[2]])
|
||||||
|
if not skip and skip3:
|
||||||
|
print >>sys.stderr, \
|
||||||
|
'SKIPPING THIRD ACCOUNT FUNCTIONAL TESTS DUE TO NO CONFIG FOR THEM'
|
||||||
|
|
||||||
|
get_cluster_info()
|
||||||
|
|
||||||
|
|
||||||
|
def teardown_package():
|
||||||
|
global orig_collate
|
||||||
|
locale.setlocale(locale.LC_COLLATE, orig_collate)
|
||||||
|
|
||||||
|
global in_process
|
||||||
|
if in_process:
|
||||||
|
try:
|
||||||
|
for server in _test_coros:
|
||||||
|
server.kill()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
rmtree(os.path.dirname(_testdir))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
utils.HASH_PATH_PREFIX, utils.HASH_PATH_SUFFIX = \
|
||||||
|
orig_hash_path_suff_pref
|
||||||
|
utils.SWIFT_CONF_FILE = orig_swift_conf_name
|
||||||
|
constraints.reload_constraints()
|
||||||
|
|
||||||
|
|
||||||
|
class AuthError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class InternalServerError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
url = [None, None, None]
|
||||||
|
token = [None, None, None]
|
||||||
|
parsed = [None, None, None]
|
||||||
|
conn = [None, None, None]
|
||||||
|
|
||||||
|
|
||||||
|
def connection(url):
|
||||||
|
if has_insecure:
|
||||||
|
return http_connection(url, insecure=insecure)
|
||||||
|
return http_connection(url)
|
||||||
|
|
||||||
|
|
||||||
|
def retry(func, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
You can use the kwargs to override:
|
||||||
|
'retries' (default: 5)
|
||||||
|
'use_account' (default: 1) - which user's token to pass
|
||||||
|
'url_account' (default: matches 'use_account') - which user's storage URL
|
||||||
|
'resource' (default: url[url_account] - URL to connect to; retry()
|
||||||
|
will interpolate the variable :storage_url: if present
|
||||||
|
"""
|
||||||
|
global url, token, parsed, conn
|
||||||
|
retries = kwargs.get('retries', 5)
|
||||||
|
attempts, backoff = 0, 1
|
||||||
|
|
||||||
|
# use account #1 by default; turn user's 1-indexed account into 0-indexed
|
||||||
|
use_account = kwargs.pop('use_account', 1) - 1
|
||||||
|
|
||||||
|
# access our own account by default
|
||||||
|
url_account = kwargs.pop('url_account', use_account + 1) - 1
|
||||||
|
|
||||||
|
while attempts <= retries:
|
||||||
|
attempts += 1
|
||||||
|
try:
|
||||||
|
if not url[use_account] or not token[use_account]:
|
||||||
|
url[use_account], token[use_account] = \
|
||||||
|
get_auth(swift_test_auth, swift_test_user[use_account],
|
||||||
|
swift_test_key[use_account],
|
||||||
|
snet=False,
|
||||||
|
tenant_name=swift_test_tenant[use_account],
|
||||||
|
auth_version=swift_test_auth_version,
|
||||||
|
os_options={})
|
||||||
|
parsed[use_account] = conn[use_account] = None
|
||||||
|
if not parsed[use_account] or not conn[use_account]:
|
||||||
|
parsed[use_account], conn[use_account] = \
|
||||||
|
connection(url[use_account])
|
||||||
|
|
||||||
|
# default resource is the account url[url_account]
|
||||||
|
resource = kwargs.pop('resource', '%(storage_url)s')
|
||||||
|
template_vars = {'storage_url': url[url_account]}
|
||||||
|
parsed_result = urlparse(resource % template_vars)
|
||||||
|
return func(url[url_account], token[use_account],
|
||||||
|
parsed_result, conn[url_account],
|
||||||
|
*args, **kwargs)
|
||||||
|
except (socket.error, HTTPException):
|
||||||
|
if attempts > retries:
|
||||||
|
raise
|
||||||
|
parsed[use_account] = conn[use_account] = None
|
||||||
|
except AuthError:
|
||||||
|
url[use_account] = token[use_account] = None
|
||||||
|
continue
|
||||||
|
except InternalServerError:
|
||||||
|
pass
|
||||||
|
if attempts <= retries:
|
||||||
|
sleep(backoff)
|
||||||
|
backoff *= 2
|
||||||
|
raise Exception('No result after %s retries.' % retries)
|
||||||
|
|
||||||
|
|
||||||
|
def check_response(conn):
|
||||||
|
resp = conn.getresponse()
|
||||||
|
if resp.status == 401:
|
||||||
|
resp.read()
|
||||||
|
raise AuthError()
|
||||||
|
elif resp.status // 100 == 5:
|
||||||
|
resp.read()
|
||||||
|
raise InternalServerError()
|
||||||
|
return resp
|
||||||
|
|
||||||
|
|
||||||
|
def load_constraint(name):
|
||||||
|
global cluster_info
|
||||||
|
try:
|
||||||
|
c = cluster_info['swift'][name]
|
||||||
|
except KeyError:
|
||||||
|
raise SkipTest("Missing constraint: %s" % name)
|
||||||
|
if not isinstance(c, int):
|
||||||
|
raise SkipTest("Bad value, %r, for constraint: %s" % (c, name))
|
||||||
|
return c
|
||||||
|
|
||||||
|
|
||||||
|
def get_storage_policy_from_cluster_info(info):
|
||||||
|
policies = info['swift'].get('policies', {})
|
||||||
|
default_policy = []
|
||||||
|
non_default_policies = []
|
||||||
|
for p in policies:
|
||||||
|
if p.get('default', {}):
|
||||||
|
default_policy.append(p)
|
||||||
|
else:
|
||||||
|
non_default_policies.append(p)
|
||||||
|
return default_policy, non_default_policies
|
||||||
|
|
||||||
|
|
||||||
|
def reset_acl():
|
||||||
|
def post(url, token, parsed, conn):
|
||||||
|
conn.request('POST', parsed.path, '', {
|
||||||
|
'X-Auth-Token': token,
|
||||||
|
'X-Account-Access-Control': '{}'
|
||||||
|
})
|
||||||
|
return check_response(conn)
|
||||||
|
resp = retry(post, use_account=1)
|
||||||
|
resp.read()
|
||||||
|
|
||||||
|
|
||||||
|
def requires_acls(f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
global skip, cluster_info
|
||||||
|
if skip or not cluster_info:
|
||||||
|
raise SkipTest
|
||||||
|
# Determine whether this cluster has account ACLs; if not, skip test
|
||||||
|
if not cluster_info.get('tempauth', {}).get('account_acls'):
|
||||||
|
raise SkipTest
|
||||||
|
if 'keystoneauth' in cluster_info:
|
||||||
|
# remove when keystoneauth supports account acls
|
||||||
|
raise SkipTest
|
||||||
|
reset_acl()
|
||||||
|
try:
|
||||||
|
rv = f(*args, **kwargs)
|
||||||
|
finally:
|
||||||
|
reset_acl()
|
||||||
|
return rv
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
class FunctionalStoragePolicyCollection(object):
|
||||||
|
|
||||||
|
def __init__(self, policies):
|
||||||
|
self._all = policies
|
||||||
|
self.default = None
|
||||||
|
for p in self:
|
||||||
|
if p.get('default', False):
|
||||||
|
assert self.default is None, 'Found multiple default ' \
|
||||||
|
'policies %r and %r' % (self.default, p)
|
||||||
|
self.default = p
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_info(cls, info=None):
|
||||||
|
if not (info or cluster_info):
|
||||||
|
get_cluster_info()
|
||||||
|
info = info or cluster_info
|
||||||
|
try:
|
||||||
|
policy_info = info['swift']['policies']
|
||||||
|
except KeyError:
|
||||||
|
raise AssertionError('Did not find any policy info in %r' % info)
|
||||||
|
policies = cls(policy_info)
|
||||||
|
assert policies.default, \
|
||||||
|
'Did not find default policy in %r' % policy_info
|
||||||
|
return policies
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self._all)
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return iter(self._all)
|
||||||
|
|
||||||
|
def __getitem__(self, index):
|
||||||
|
return self._all[index]
|
||||||
|
|
||||||
|
def filter(self, **kwargs):
|
||||||
|
return self.__class__([p for p in self if all(
|
||||||
|
p.get(k) == v for k, v in kwargs.items())])
|
||||||
|
|
||||||
|
def exclude(self, **kwargs):
|
||||||
|
return self.__class__([p for p in self if all(
|
||||||
|
p.get(k) != v for k, v in kwargs.items())])
|
||||||
|
|
||||||
|
def select(self):
|
||||||
|
return random.choice(self)
|
||||||
|
|
||||||
|
|
||||||
|
def requires_policies(f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def wrapper(self, *args, **kwargs):
|
||||||
|
if skip:
|
||||||
|
raise SkipTest
|
||||||
|
try:
|
||||||
|
self.policies = FunctionalStoragePolicyCollection.from_info()
|
||||||
|
except AssertionError:
|
||||||
|
raise SkipTest("Unable to determine available policies")
|
||||||
|
if len(self.policies) < 2:
|
||||||
|
raise SkipTest("Multiple policies not enabled")
|
||||||
|
return f(self, *args, **kwargs)
|
||||||
|
|
||||||
|
return wrapper
|
21
test/functional/conf/swift.conf
Normal file
21
test/functional/conf/swift.conf
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
[swift-hash]
|
||||||
|
# random unique strings that can never change (DO NOT LOSE)
|
||||||
|
swift_hash_path_prefix = changeme
|
||||||
|
swift_hash_path_suffix = changeme
|
||||||
|
|
||||||
|
[storage-policy:0]
|
||||||
|
name = gold
|
||||||
|
|
||||||
|
[storage-policy:1]
|
||||||
|
name = silver
|
||||||
|
|
||||||
|
# SwiftOnFile
|
||||||
|
[storage-policy:2]
|
||||||
|
name = swiftonfile
|
||||||
|
default = yes
|
||||||
|
|
||||||
|
[swift-constraints]
|
||||||
|
max_object_name_length = 221
|
||||||
|
max_account_name_length = 255
|
||||||
|
max_container_name_length = 255
|
||||||
|
|
69
test/functional/conf/test.conf
Normal file
69
test/functional/conf/test.conf
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
[func_test]
|
||||||
|
# sample config for Swift with tempauth
|
||||||
|
auth_host = 127.0.0.1
|
||||||
|
auth_port = 8080
|
||||||
|
auth_ssl = no
|
||||||
|
auth_prefix = /auth/
|
||||||
|
## sample config for Swift with Keystone
|
||||||
|
#auth_version = 2
|
||||||
|
#auth_host = localhost
|
||||||
|
#auth_port = 5000
|
||||||
|
#auth_ssl = no
|
||||||
|
#auth_prefix = /v2.0/
|
||||||
|
|
||||||
|
# Primary functional test account (needs admin access to the account)
|
||||||
|
account = test
|
||||||
|
username = tester
|
||||||
|
password = testing
|
||||||
|
|
||||||
|
# User on a second account (needs admin access to the account)
|
||||||
|
account2 = test2
|
||||||
|
username2 = tester2
|
||||||
|
password2 = testing2
|
||||||
|
|
||||||
|
# User on same account as first, but without admin access
|
||||||
|
username3 = tester3
|
||||||
|
password3 = testing3
|
||||||
|
|
||||||
|
collate = C
|
||||||
|
|
||||||
|
[unit_test]
|
||||||
|
fake_syslog = False
|
||||||
|
|
||||||
|
[probe_test]
|
||||||
|
# check_server_timeout = 30
|
||||||
|
# validate_rsync = false
|
||||||
|
|
||||||
|
[swift-constraints]
|
||||||
|
# The functional test runner will try to use the constraint values provided in
|
||||||
|
# the swift-constraints section of test.conf.
|
||||||
|
#
|
||||||
|
# If a constraint value does not exist in that section, or because the
|
||||||
|
# swift-constraints section does not exist, the constraints values found in
|
||||||
|
# the /info API call (if successful) will be used.
|
||||||
|
#
|
||||||
|
# If a constraint value cannot be found in the /info results, either because
|
||||||
|
# the /info API call failed, or a value is not present, the constraint value
|
||||||
|
# used will fall back to those loaded by the constraints module at time of
|
||||||
|
# import (which will attempt to load /etc/swift/swift.conf, see the
|
||||||
|
# swift.common.constraints module for more information).
|
||||||
|
#
|
||||||
|
# Note that the cluster must have "sane" values for the test suite to pass
|
||||||
|
# (for some definition of sane).
|
||||||
|
#
|
||||||
|
#max_file_size = 1099511
|
||||||
|
#max_meta_name_length = 128
|
||||||
|
#max_meta_value_length = 256
|
||||||
|
#max_meta_count = 90
|
||||||
|
#max_meta_overall_size = 4096
|
||||||
|
#max_header_size = 8192
|
||||||
|
max_object_name_length = 221
|
||||||
|
#container_listing_limit = 10000
|
||||||
|
#account_listing_limit = 10000
|
||||||
|
max_account_name_length = 255
|
||||||
|
max_container_name_length = 255
|
||||||
|
|
||||||
|
# Newer swift versions default to strict cors mode, but older ones were the
|
||||||
|
# opposite.
|
||||||
|
#strict_cors_mode = true
|
||||||
|
#
|
@ -1,385 +0,0 @@
|
|||||||
# Copyright (c) 2013 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
""" OpenStack Swift based functional tests for Gluster for Swift"""
|
|
||||||
|
|
||||||
import random
|
|
||||||
import os,sys,re,hashlib
|
|
||||||
from nose import SkipTest
|
|
||||||
|
|
||||||
from test.functional.tests import config, locale, Base, Base2, Utils, \
|
|
||||||
TestFileEnv
|
|
||||||
from test.functional.swift_test_client import Account, Connection, File, \
|
|
||||||
ResponseError
|
|
||||||
|
|
||||||
web_front_end = config.get('web_front_end', 'integral')
|
|
||||||
|
|
||||||
class TestFile(Base):
|
|
||||||
env = TestFileEnv
|
|
||||||
set_up = False
|
|
||||||
|
|
||||||
def testObjectManifest(self):
|
|
||||||
if (web_front_end == 'apache2'):
|
|
||||||
raise SkipTest()
|
|
||||||
data = File.random_data(10000)
|
|
||||||
parts = random.randrange(2,10)
|
|
||||||
charsEachPart = len(data)/parts
|
|
||||||
for i in range(parts+1):
|
|
||||||
if i==0 :
|
|
||||||
file = self.env.container.file('objectmanifest')
|
|
||||||
hdrs={}
|
|
||||||
hdrs['Content-Length']='0'
|
|
||||||
hdrs['X-Object-Manifest']=str(self.env.container.name)+'/objectmanifest'
|
|
||||||
self.assert_(file.write('',hdrs=hdrs))
|
|
||||||
self.assert_(file.name in self.env.container.files())
|
|
||||||
self.assert_(file.read() == '')
|
|
||||||
elif i==parts :
|
|
||||||
file = self.env.container.file('objectmanifest'+'-'+str(i))
|
|
||||||
segment=data[ (i-1)*charsEachPart :]
|
|
||||||
self.assertTrue(file.write(segment))
|
|
||||||
else :
|
|
||||||
file = self.env.container.file('objectmanifest'+'-'+str(i))
|
|
||||||
segment=data[ (i-1)*charsEachPart : i*charsEachPart]
|
|
||||||
self.assertTrue(file.write(segment))
|
|
||||||
#matching the manifest file content with orignal data, as etag won't match
|
|
||||||
file = self.env.container.file('objectmanifest')
|
|
||||||
data_read = file.read()
|
|
||||||
self.assertEquals(data,data_read)
|
|
||||||
|
|
||||||
def test_PUT_large_object(self):
|
|
||||||
file_item = self.env.container.file(Utils.create_name())
|
|
||||||
data = File.random_data(1024 * 1024 * 2)
|
|
||||||
self.assertTrue(file_item.write(data))
|
|
||||||
self.assert_status(201)
|
|
||||||
self.assertTrue(data == file_item.read())
|
|
||||||
self.assert_status(200)
|
|
||||||
|
|
||||||
def testInvalidHeadersPUT(self):
|
|
||||||
#TODO: Although we now support x-delete-at and x-delete-after,
|
|
||||||
#retained this test case as we may add some other header to
|
|
||||||
#unsupported list in future
|
|
||||||
raise SkipTest()
|
|
||||||
file = self.env.container.file(Utils.create_name())
|
|
||||||
self.assertRaises(ResponseError,
|
|
||||||
file.write_random,
|
|
||||||
self.env.file_size,
|
|
||||||
hdrs={'X-Delete-At': '9876545321'})
|
|
||||||
self.assert_status(400)
|
|
||||||
self.assertRaises(ResponseError,
|
|
||||||
file.write_random,
|
|
||||||
self.env.file_size,
|
|
||||||
hdrs={'X-Delete-After': '60'})
|
|
||||||
self.assert_status(400)
|
|
||||||
|
|
||||||
def testInvalidHeadersPOST(self):
|
|
||||||
#TODO: Although we now support x-delete-at and x-delete-after,
|
|
||||||
#retained this test case as we may add some other header to
|
|
||||||
#unsupported list in future
|
|
||||||
raise SkipTest()
|
|
||||||
file = self.env.container.file(Utils.create_name())
|
|
||||||
file.write_random(self.env.file_size)
|
|
||||||
headers = file.make_headers(cfg={})
|
|
||||||
headers.update({ 'X-Delete-At' : '987654321'})
|
|
||||||
# Need to call conn.make_request instead of file.sync_metadata
|
|
||||||
# because sync_metadata calls make_headers. make_headers()
|
|
||||||
# overwrites any headers in file.metadata as 'user' metadata
|
|
||||||
# by appending 'X-Object-Meta-' to any of the headers
|
|
||||||
# in file.metadata.
|
|
||||||
file.conn.make_request('POST', file.path, hdrs=headers, cfg={})
|
|
||||||
self.assertEqual(400, file.conn.response.status)
|
|
||||||
|
|
||||||
headers = file.make_headers(cfg={})
|
|
||||||
headers.update({ 'X-Delete-After' : '60'})
|
|
||||||
file.conn.make_request('POST', file.path, hdrs=headers, cfg={})
|
|
||||||
self.assertEqual(400, file.conn.response.status)
|
|
||||||
|
|
||||||
|
|
||||||
class TestFileUTF8(Base2, TestFile):
|
|
||||||
set_up = False
|
|
||||||
|
|
||||||
|
|
||||||
class TestContainerPathsEnv:
|
|
||||||
@classmethod
|
|
||||||
def setUp(cls):
|
|
||||||
cls.conn = Connection(config)
|
|
||||||
cls.conn.authenticate()
|
|
||||||
cls.account = Account(cls.conn, config.get('account',
|
|
||||||
config['username']))
|
|
||||||
cls.account.delete_containers()
|
|
||||||
|
|
||||||
cls.file_size = 8
|
|
||||||
|
|
||||||
cls.container = cls.account.container(Utils.create_name())
|
|
||||||
if not cls.container.create():
|
|
||||||
raise ResponseError(cls.conn.response)
|
|
||||||
|
|
||||||
cls.dirs = [
|
|
||||||
'dir1',
|
|
||||||
'dir2',
|
|
||||||
'dir1/subdir1',
|
|
||||||
'dir1/subdir2',
|
|
||||||
'dir1/subdir1/subsubdir1',
|
|
||||||
'dir1/subdir1/subsubdir2',
|
|
||||||
'dir1/subdir with spaces',
|
|
||||||
'dir1/subdir+with{whatever',
|
|
||||||
]
|
|
||||||
|
|
||||||
cls.files = [
|
|
||||||
'file1',
|
|
||||||
'file A',
|
|
||||||
'dir1/file2',
|
|
||||||
'dir1/subdir1/file2',
|
|
||||||
'dir1/subdir1/file3',
|
|
||||||
'dir1/subdir1/file4',
|
|
||||||
'dir1/subdir1/subsubdir1/file5',
|
|
||||||
'dir1/subdir1/subsubdir1/file6',
|
|
||||||
'dir1/subdir1/subsubdir1/file7',
|
|
||||||
'dir1/subdir1/subsubdir1/file8',
|
|
||||||
'dir1/subdir1/subsubdir2/file9',
|
|
||||||
'dir1/subdir1/subsubdir2/file0',
|
|
||||||
'dir1/subdir with spaces/file B',
|
|
||||||
'dir1/subdir+with{whatever/file D',
|
|
||||||
]
|
|
||||||
|
|
||||||
stored_files = set()
|
|
||||||
for d in cls.dirs:
|
|
||||||
file = cls.container.file(d)
|
|
||||||
file.write(hdrs={'Content-Type': 'application/directory'})
|
|
||||||
for f in cls.files:
|
|
||||||
file = cls.container.file(f)
|
|
||||||
file.write_random(cls.file_size, hdrs={'Content-Type':
|
|
||||||
'application/octet-stream'})
|
|
||||||
stored_files.add(f)
|
|
||||||
cls.stored_files = sorted(stored_files)
|
|
||||||
cls.sorted_objects = sorted(set(cls.dirs + cls.files))
|
|
||||||
|
|
||||||
|
|
||||||
class TestContainerPaths(Base):
|
|
||||||
env = TestContainerPathsEnv
|
|
||||||
set_up = False
|
|
||||||
|
|
||||||
def testTraverseContainer(self):
|
|
||||||
found_files = []
|
|
||||||
found_dirs = []
|
|
||||||
|
|
||||||
def recurse_path(path, count=0):
|
|
||||||
if count > 10:
|
|
||||||
raise ValueError('too deep recursion')
|
|
||||||
|
|
||||||
for file in self.env.container.files(parms={'path': path}):
|
|
||||||
self.assert_(file.startswith(path))
|
|
||||||
if file in self.env.dirs:
|
|
||||||
recurse_path(file, count + 1)
|
|
||||||
found_dirs.append(file)
|
|
||||||
else:
|
|
||||||
found_files.append(file)
|
|
||||||
|
|
||||||
recurse_path('')
|
|
||||||
for file in self.env.stored_files:
|
|
||||||
self.assert_(file in found_files)
|
|
||||||
self.assert_(file not in found_dirs)
|
|
||||||
|
|
||||||
|
|
||||||
def testContainerListing(self):
|
|
||||||
for format in (None, 'json', 'xml'):
|
|
||||||
files = self.env.container.files(parms={'format': format})
|
|
||||||
self.assertFalse(len(files) == 0)
|
|
||||||
|
|
||||||
if isinstance(files[0], dict):
|
|
||||||
files = [str(x['name']) for x in files]
|
|
||||||
|
|
||||||
self.assertEquals(files, self.env.sorted_objects)
|
|
||||||
|
|
||||||
for format in ('json', 'xml'):
|
|
||||||
for file in self.env.container.files(parms={'format': format}):
|
|
||||||
self.assert_(int(file['bytes']) >= 0)
|
|
||||||
self.assert_('last_modified' in file)
|
|
||||||
if file['name'] in self.env.dirs:
|
|
||||||
self.assertEquals(file['content_type'],
|
|
||||||
'application/directory')
|
|
||||||
else:
|
|
||||||
self.assertEquals(file['content_type'],
|
|
||||||
'application/octet-stream')
|
|
||||||
|
|
||||||
def testStructure(self):
|
|
||||||
def assert_listing(path, list):
|
|
||||||
files = self.env.container.files(parms={'path': path})
|
|
||||||
self.assertEquals(sorted(list, cmp=locale.strcoll), files)
|
|
||||||
|
|
||||||
assert_listing('', ['file1', 'dir1', 'dir2', 'file A'])
|
|
||||||
assert_listing('dir1', ['dir1/file2', 'dir1/subdir1',
|
|
||||||
'dir1/subdir2', 'dir1/subdir with spaces',
|
|
||||||
'dir1/subdir+with{whatever'])
|
|
||||||
assert_listing('dir1/subdir1',
|
|
||||||
['dir1/subdir1/file4', 'dir1/subdir1/subsubdir2',
|
|
||||||
'dir1/subdir1/file2', 'dir1/subdir1/file3',
|
|
||||||
'dir1/subdir1/subsubdir1'])
|
|
||||||
assert_listing('dir1/subdir1/subsubdir1',
|
|
||||||
['dir1/subdir1/subsubdir1/file7',
|
|
||||||
'dir1/subdir1/subsubdir1/file5',
|
|
||||||
'dir1/subdir1/subsubdir1/file8',
|
|
||||||
'dir1/subdir1/subsubdir1/file6'])
|
|
||||||
assert_listing('dir1/subdir1/subsubdir1',
|
|
||||||
['dir1/subdir1/subsubdir1/file7',
|
|
||||||
'dir1/subdir1/subsubdir1/file5',
|
|
||||||
'dir1/subdir1/subsubdir1/file8',
|
|
||||||
'dir1/subdir1/subsubdir1/file6'])
|
|
||||||
assert_listing('dir1/subdir with spaces',
|
|
||||||
['dir1/subdir with spaces/file B'])
|
|
||||||
|
|
||||||
|
|
||||||
class TestObjectVersioningEnv:
|
|
||||||
@classmethod
|
|
||||||
def setUp(cls):
|
|
||||||
cls.conn = Connection(config)
|
|
||||||
cls.conn.authenticate()
|
|
||||||
cls.account = Account(cls.conn, config.get('account',
|
|
||||||
config['username']))
|
|
||||||
cls.account.delete_containers()
|
|
||||||
cls.containers = {}
|
|
||||||
#create two containers one for object other for versions of objects
|
|
||||||
for i in range(2):
|
|
||||||
hdrs={}
|
|
||||||
if i==0:
|
|
||||||
hdrs={'X-Versions-Location':'versions'}
|
|
||||||
cont = cls.containers['object'] = cls.account.container('object')
|
|
||||||
else:
|
|
||||||
cont = cls.containers['versions'] = cls.account.container('versions')
|
|
||||||
if not cont.create(hdrs=hdrs):
|
|
||||||
raise ResponseError(cls.conn.response)
|
|
||||||
cls.containers.append(cont)
|
|
||||||
|
|
||||||
|
|
||||||
class TestObjectVersioning(Base):
|
|
||||||
env = TestObjectVersioningEnv
|
|
||||||
set_up = False
|
|
||||||
|
|
||||||
def testObjectVersioning(self):
|
|
||||||
versions = random.randrange(2,10)
|
|
||||||
dataArr=[]
|
|
||||||
#create versions
|
|
||||||
for i in range(versions):
|
|
||||||
data = File.random_data(10000*(i+1))
|
|
||||||
file = self.env.containers['object'].file('object')
|
|
||||||
self.assertTrue(file.write(data))
|
|
||||||
dataArr.append(data)
|
|
||||||
cont = self.env.containers['versions']
|
|
||||||
info = cont.info()
|
|
||||||
self.assertEquals(info['object_count'], versions-1)
|
|
||||||
#match the current version of object with data in arr and delete it
|
|
||||||
for i in range(versions):
|
|
||||||
data = dataArr[-(i+1)]
|
|
||||||
file = self.env.containers['object'].file('object')
|
|
||||||
self.assertEquals(data,file.read())
|
|
||||||
self.assert_(file.delete())
|
|
||||||
self.assert_status(204)
|
|
||||||
|
|
||||||
|
|
||||||
class TestMultiProtocolAccessEnv:
|
|
||||||
@classmethod
|
|
||||||
def setUp(cls):
|
|
||||||
cls.conn = Connection(config)
|
|
||||||
cls.conn.authenticate()
|
|
||||||
cls.account = Account(cls.conn, config.get('account',
|
|
||||||
config['username']))
|
|
||||||
cls.root_dir = os.path.join('/mnt/gluster-object',cls.account.conn.storage_url.split('/')[2].split('_')[1])
|
|
||||||
cls.account.delete_containers()
|
|
||||||
|
|
||||||
cls.file_size = 8
|
|
||||||
cls.container = cls.account.container(Utils.create_name())
|
|
||||||
if not cls.container.create():
|
|
||||||
raise ResponseError(cls.conn.response)
|
|
||||||
|
|
||||||
cls.dirs = [
|
|
||||||
'dir1',
|
|
||||||
'dir2',
|
|
||||||
'dir1/subdir1',
|
|
||||||
'dir1/subdir2',
|
|
||||||
'dir1/subdir1/subsubdir1',
|
|
||||||
'dir1/subdir1/subsubdir2',
|
|
||||||
'dir1/subdir with spaces',
|
|
||||||
'dir1/subdir+with{whatever',
|
|
||||||
]
|
|
||||||
|
|
||||||
cls.files = [
|
|
||||||
'file1',
|
|
||||||
'file A',
|
|
||||||
'dir1/file2',
|
|
||||||
'dir1/subdir1/file2',
|
|
||||||
'dir1/subdir1/file3',
|
|
||||||
'dir1/subdir1/file4',
|
|
||||||
'dir1/subdir1/subsubdir1/file5',
|
|
||||||
'dir1/subdir1/subsubdir1/file6',
|
|
||||||
'dir1/subdir1/subsubdir1/file7',
|
|
||||||
'dir1/subdir1/subsubdir1/file8',
|
|
||||||
'dir1/subdir1/subsubdir2/file9',
|
|
||||||
'dir1/subdir1/subsubdir2/file0',
|
|
||||||
'dir1/subdir with spaces/file B',
|
|
||||||
'dir1/subdir+with{whatever/file D',
|
|
||||||
]
|
|
||||||
|
|
||||||
stored_files = set()
|
|
||||||
for d in cls.dirs:
|
|
||||||
file = cls.container.file(d)
|
|
||||||
file.write(hdrs={'Content-Type': 'application/directory'})
|
|
||||||
for f in cls.files:
|
|
||||||
file = cls.container.file(f)
|
|
||||||
file.write_random(cls.file_size, hdrs={'Content-Type':
|
|
||||||
'application/octet-stream'})
|
|
||||||
stored_files.add(f)
|
|
||||||
cls.stored_files = sorted(stored_files)
|
|
||||||
cls.sorted_objects = sorted(set(cls.dirs + cls.files))
|
|
||||||
|
|
||||||
|
|
||||||
class TestMultiProtocolAccess(Base):
|
|
||||||
env = TestMultiProtocolAccessEnv
|
|
||||||
set_up = False
|
|
||||||
|
|
||||||
def testObjectsFromMountPoint(self):
|
|
||||||
found_files = []
|
|
||||||
found_dirs = []
|
|
||||||
|
|
||||||
def recurse_path(path, count=0):
|
|
||||||
if count > 10:
|
|
||||||
raise ValueError('too deep recursion')
|
|
||||||
self.assert_(os.path.exists(path))
|
|
||||||
for file in os.listdir(path):
|
|
||||||
if os.path.isdir(os.path.join(path,file)):
|
|
||||||
recurse_path(os.path.join(path,file), count + 1)
|
|
||||||
found_dirs.append(file)
|
|
||||||
elif os.path.isfile(os.path.join(path,file)):
|
|
||||||
filename=os.path.join(os.path.relpath(path,os.path.join(self.env.root_dir,self.env.container.name)),file)
|
|
||||||
if re.match('^[\.]',filename):
|
|
||||||
filename=filename[2:]
|
|
||||||
found_files.append(filename)
|
|
||||||
else:
|
|
||||||
pass #Just a Place holder
|
|
||||||
|
|
||||||
recurse_path(os.path.join(self.env.root_dir,self.env.container.name))
|
|
||||||
for file in self.env.stored_files:
|
|
||||||
self.assert_(file in found_files)
|
|
||||||
self.assert_(file not in found_dirs)
|
|
||||||
|
|
||||||
def testObjectContentFromMountPoint(self):
|
|
||||||
file_name = Utils.create_name()
|
|
||||||
file_item = self.env.container.file(file_name)
|
|
||||||
data = file_item.write_random()
|
|
||||||
self.assert_status(201)
|
|
||||||
file_info = file_item.info()
|
|
||||||
fhOnMountPoint = open(os.path.join(self.env.root_dir,self.env.container.name,file_name),'r')
|
|
||||||
data_read_from_mountP = fhOnMountPoint.read()
|
|
||||||
md5_returned = hashlib.md5(data_read_from_mountP).hexdigest()
|
|
||||||
self.assertEquals(md5_returned,file_info['etag'])
|
|
||||||
fhOnMountPoint.close()
|
|
137
test/functional/swift_on_file_tests.py
Normal file
137
test/functional/swift_on_file_tests.py
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
# Copyright (c) 2013 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
""" OpenStack Swift based functional tests for Swift on File"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import hashlib
|
||||||
|
from shutil import rmtree
|
||||||
|
|
||||||
|
from test.functional.tests import Base, Utils
|
||||||
|
from test.functional.swift_test_client import Account, Connection, \
|
||||||
|
ResponseError
|
||||||
|
import test.functional as tf
|
||||||
|
|
||||||
|
|
||||||
|
class TestSwiftOnFileEnv:
|
||||||
|
@classmethod
|
||||||
|
def setUp(cls):
|
||||||
|
cls.conn = Connection(tf.config)
|
||||||
|
cls.conn.authenticate()
|
||||||
|
cls.account = Account(cls.conn, tf.config.get('account',
|
||||||
|
tf.config['username']))
|
||||||
|
cls.root_dir = os.path.join('/mnt/swiftonfile/test')
|
||||||
|
cls.account.delete_containers()
|
||||||
|
|
||||||
|
cls.file_size = 8
|
||||||
|
cls.container = cls.account.container(Utils.create_name())
|
||||||
|
if not cls.container.create():
|
||||||
|
raise ResponseError(cls.conn.response)
|
||||||
|
|
||||||
|
cls.dirs = [
|
||||||
|
'dir1',
|
||||||
|
'dir2',
|
||||||
|
'dir1/subdir1',
|
||||||
|
'dir1/subdir2',
|
||||||
|
'dir1/subdir1/subsubdir1',
|
||||||
|
'dir1/subdir1/subsubdir2',
|
||||||
|
'dir1/subdir with spaces',
|
||||||
|
'dir1/subdir+with{whatever',
|
||||||
|
]
|
||||||
|
|
||||||
|
cls.files = [
|
||||||
|
'file1',
|
||||||
|
'file A',
|
||||||
|
'dir1/file2',
|
||||||
|
'dir1/subdir1/file2',
|
||||||
|
'dir1/subdir1/file3',
|
||||||
|
'dir1/subdir1/file4',
|
||||||
|
'dir1/subdir1/subsubdir1/file5',
|
||||||
|
'dir1/subdir1/subsubdir1/file6',
|
||||||
|
'dir1/subdir1/subsubdir1/file7',
|
||||||
|
'dir1/subdir1/subsubdir1/file8',
|
||||||
|
'dir1/subdir1/subsubdir2/file9',
|
||||||
|
'dir1/subdir1/subsubdir2/file0',
|
||||||
|
'dir1/subdir with spaces/file B',
|
||||||
|
'dir1/subdir+with{whatever/file D',
|
||||||
|
]
|
||||||
|
|
||||||
|
stored_files = set()
|
||||||
|
for d in cls.dirs:
|
||||||
|
file = cls.container.file(d)
|
||||||
|
file.write(hdrs={'Content-Type': 'application/directory'})
|
||||||
|
for f in cls.files:
|
||||||
|
file = cls.container.file(f)
|
||||||
|
file.write_random(cls.file_size, hdrs={'Content-Type':
|
||||||
|
'application/octet-stream'})
|
||||||
|
stored_files.add(f)
|
||||||
|
cls.stored_files = sorted(stored_files)
|
||||||
|
cls.sorted_objects = sorted(set(cls.dirs + cls.files))
|
||||||
|
|
||||||
|
|
||||||
|
class TestSwiftOnFile(Base):
|
||||||
|
env = TestSwiftOnFileEnv
|
||||||
|
set_up = False
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def tearDownClass(self):
|
||||||
|
self.env.account.delete_containers()
|
||||||
|
for account_dir in os.listdir(self.env.root_dir):
|
||||||
|
rmtree(os.path.join(self.env.root_dir, account_dir))
|
||||||
|
|
||||||
|
def testObjectsFromMountPoint(self):
|
||||||
|
found_files = []
|
||||||
|
found_dirs = []
|
||||||
|
|
||||||
|
def recurse_path(path, count=0):
|
||||||
|
if count > 10:
|
||||||
|
raise ValueError('too deep recursion')
|
||||||
|
self.assert_(os.path.exists(path))
|
||||||
|
for file in os.listdir(path):
|
||||||
|
if os.path.isdir(os.path.join(path, file)):
|
||||||
|
recurse_path(os.path.join(path, file), count + 1)
|
||||||
|
found_dirs.append(file)
|
||||||
|
elif os.path.isfile(os.path.join(path, file)):
|
||||||
|
filename = os.path.join(os.path.relpath(path, os.path.join(
|
||||||
|
self.env.root_dir, 'AUTH_' + self.env.account.name,
|
||||||
|
self.env.container.name)), file)
|
||||||
|
if re.match('^[\.]', filename):
|
||||||
|
filename = filename[2:]
|
||||||
|
found_files.append(filename)
|
||||||
|
else:
|
||||||
|
pass # Just a Place holder
|
||||||
|
|
||||||
|
recurse_path(os.path.join(self.env.root_dir,
|
||||||
|
'AUTH_' + self.env.account.name,
|
||||||
|
self.env.container.name))
|
||||||
|
for file in self.env.stored_files:
|
||||||
|
self.assert_(file in found_files)
|
||||||
|
self.assert_(file not in found_dirs)
|
||||||
|
|
||||||
|
def testObjectContentFromMountPoint(self):
|
||||||
|
file_name = Utils.create_name()
|
||||||
|
file_item = self.env.container.file(file_name)
|
||||||
|
file_item.write_random()
|
||||||
|
self.assert_status(201)
|
||||||
|
file_info = file_item.info()
|
||||||
|
fhOnMountPoint = open(os.path.join(self.env.root_dir,
|
||||||
|
'AUTH_' + self.env.account.name,
|
||||||
|
self.env.container.name,
|
||||||
|
file_name), 'r')
|
||||||
|
data_read_from_mountP = fhOnMountPoint.read()
|
||||||
|
md5_returned = hashlib.md5(data_read_from_mountP).hexdigest()
|
||||||
|
self.assertEquals(md5_returned, file_info['etag'])
|
||||||
|
fhOnMountPoint.close()
|
@ -103,7 +103,8 @@ class Connection(object):
|
|||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
for key in 'auth_host auth_port auth_ssl username password'.split():
|
for key in 'auth_host auth_port auth_ssl username password'.split():
|
||||||
if key not in config:
|
if key not in config:
|
||||||
raise SkipTest
|
raise SkipTest(
|
||||||
|
"Missing required configuration parameter: %s" % key)
|
||||||
|
|
||||||
self.auth_host = config['auth_host']
|
self.auth_host = config['auth_host']
|
||||||
self.auth_port = int(config['auth_port'])
|
self.auth_port = int(config['auth_port'])
|
||||||
@ -117,6 +118,7 @@ class Connection(object):
|
|||||||
|
|
||||||
self.storage_host = None
|
self.storage_host = None
|
||||||
self.storage_port = None
|
self.storage_port = None
|
||||||
|
self.storage_url = None
|
||||||
|
|
||||||
self.conn_class = None
|
self.conn_class = None
|
||||||
|
|
||||||
@ -184,7 +186,7 @@ class Connection(object):
|
|||||||
"""
|
"""
|
||||||
status = self.make_request('GET', '/info',
|
status = self.make_request('GET', '/info',
|
||||||
cfg={'absolute_path': True})
|
cfg={'absolute_path': True})
|
||||||
if status == 404:
|
if status // 100 == 4:
|
||||||
return {}
|
return {}
|
||||||
if not 200 <= status <= 299:
|
if not 200 <= status <= 299:
|
||||||
raise ResponseError(self.response, 'GET', '/info')
|
raise ResponseError(self.response, 'GET', '/info')
|
||||||
@ -195,7 +197,12 @@ class Connection(object):
|
|||||||
port=self.storage_port)
|
port=self.storage_port)
|
||||||
#self.connection.set_debuglevel(3)
|
#self.connection.set_debuglevel(3)
|
||||||
|
|
||||||
def make_path(self, path=[], cfg={}):
|
def make_path(self, path=None, cfg=None):
|
||||||
|
if path is None:
|
||||||
|
path = []
|
||||||
|
if cfg is None:
|
||||||
|
cfg = {}
|
||||||
|
|
||||||
if cfg.get('version_only_path'):
|
if cfg.get('version_only_path'):
|
||||||
return '/' + self.storage_url.split('/')[1]
|
return '/' + self.storage_url.split('/')[1]
|
||||||
|
|
||||||
@ -208,7 +215,9 @@ class Connection(object):
|
|||||||
else:
|
else:
|
||||||
return self.storage_url
|
return self.storage_url
|
||||||
|
|
||||||
def make_headers(self, hdrs, cfg={}):
|
def make_headers(self, hdrs, cfg=None):
|
||||||
|
if cfg is None:
|
||||||
|
cfg = {}
|
||||||
headers = {}
|
headers = {}
|
||||||
|
|
||||||
if not cfg.get('no_auth_token'):
|
if not cfg.get('no_auth_token'):
|
||||||
@ -218,8 +227,16 @@ class Connection(object):
|
|||||||
headers.update(hdrs)
|
headers.update(hdrs)
|
||||||
return headers
|
return headers
|
||||||
|
|
||||||
def make_request(self, method, path=[], data='', hdrs={}, parms={},
|
def make_request(self, method, path=None, data='', hdrs=None, parms=None,
|
||||||
cfg={}):
|
cfg=None):
|
||||||
|
if path is None:
|
||||||
|
path = []
|
||||||
|
if hdrs is None:
|
||||||
|
hdrs = {}
|
||||||
|
if parms is None:
|
||||||
|
parms = {}
|
||||||
|
if cfg is None:
|
||||||
|
cfg = {}
|
||||||
if not cfg.get('absolute_path'):
|
if not cfg.get('absolute_path'):
|
||||||
# Set absolute_path=True to make a request to exactly the given
|
# Set absolute_path=True to make a request to exactly the given
|
||||||
# path, not storage path + given path. Useful for
|
# path, not storage path + given path. Useful for
|
||||||
@ -277,7 +294,14 @@ class Connection(object):
|
|||||||
'Attempts: %s, Failures: %s' %
|
'Attempts: %s, Failures: %s' %
|
||||||
(request, len(fail_messages), fail_messages))
|
(request, len(fail_messages), fail_messages))
|
||||||
|
|
||||||
def put_start(self, path, hdrs={}, parms={}, cfg={}, chunked=False):
|
def put_start(self, path, hdrs=None, parms=None, cfg=None, chunked=False):
|
||||||
|
if hdrs is None:
|
||||||
|
hdrs = {}
|
||||||
|
if parms is None:
|
||||||
|
parms = {}
|
||||||
|
if cfg is None:
|
||||||
|
cfg = {}
|
||||||
|
|
||||||
self.http_connect()
|
self.http_connect()
|
||||||
|
|
||||||
path = self.make_path(path, cfg)
|
path = self.make_path(path, cfg)
|
||||||
@ -322,7 +346,10 @@ class Base(object):
|
|||||||
def __str__(self):
|
def __str__(self):
|
||||||
return self.name
|
return self.name
|
||||||
|
|
||||||
def header_fields(self, required_fields, optional_fields=()):
|
def header_fields(self, required_fields, optional_fields=None):
|
||||||
|
if optional_fields is None:
|
||||||
|
optional_fields = ()
|
||||||
|
|
||||||
headers = dict(self.conn.response.getheaders())
|
headers = dict(self.conn.response.getheaders())
|
||||||
ret = {}
|
ret = {}
|
||||||
|
|
||||||
@ -352,7 +379,11 @@ class Account(Base):
|
|||||||
self.conn = conn
|
self.conn = conn
|
||||||
self.name = str(name)
|
self.name = str(name)
|
||||||
|
|
||||||
def update_metadata(self, metadata={}, cfg={}):
|
def update_metadata(self, metadata=None, cfg=None):
|
||||||
|
if metadata is None:
|
||||||
|
metadata = {}
|
||||||
|
if cfg is None:
|
||||||
|
cfg = {}
|
||||||
headers = dict(("X-Account-Meta-%s" % k, v)
|
headers = dict(("X-Account-Meta-%s" % k, v)
|
||||||
for k, v in metadata.items())
|
for k, v in metadata.items())
|
||||||
|
|
||||||
@ -365,7 +396,14 @@ class Account(Base):
|
|||||||
def container(self, container_name):
|
def container(self, container_name):
|
||||||
return Container(self.conn, self.name, container_name)
|
return Container(self.conn, self.name, container_name)
|
||||||
|
|
||||||
def containers(self, hdrs={}, parms={}, cfg={}):
|
def containers(self, hdrs=None, parms=None, cfg=None):
|
||||||
|
if hdrs is None:
|
||||||
|
hdrs = {}
|
||||||
|
if parms is None:
|
||||||
|
parms = {}
|
||||||
|
if cfg is None:
|
||||||
|
cfg = {}
|
||||||
|
|
||||||
format_type = parms.get('format', None)
|
format_type = parms.get('format', None)
|
||||||
if format_type not in [None, 'json', 'xml']:
|
if format_type not in [None, 'json', 'xml']:
|
||||||
raise RequestError('Invalid format: %s' % format_type)
|
raise RequestError('Invalid format: %s' % format_type)
|
||||||
@ -411,7 +449,13 @@ class Account(Base):
|
|||||||
|
|
||||||
return listing_empty(self.containers)
|
return listing_empty(self.containers)
|
||||||
|
|
||||||
def info(self, hdrs={}, parms={}, cfg={}):
|
def info(self, hdrs=None, parms=None, cfg=None):
|
||||||
|
if hdrs is None:
|
||||||
|
hdrs = {}
|
||||||
|
if parms is None:
|
||||||
|
parms = {}
|
||||||
|
if cfg is None:
|
||||||
|
cfg = {}
|
||||||
if self.conn.make_request('HEAD', self.path, hdrs=hdrs,
|
if self.conn.make_request('HEAD', self.path, hdrs=hdrs,
|
||||||
parms=parms, cfg=cfg) != 204:
|
parms=parms, cfg=cfg) != 204:
|
||||||
|
|
||||||
@ -435,11 +479,21 @@ class Container(Base):
|
|||||||
self.account = str(account)
|
self.account = str(account)
|
||||||
self.name = str(name)
|
self.name = str(name)
|
||||||
|
|
||||||
def create(self, hdrs={}, parms={}, cfg={}):
|
def create(self, hdrs=None, parms=None, cfg=None):
|
||||||
|
if hdrs is None:
|
||||||
|
hdrs = {}
|
||||||
|
if parms is None:
|
||||||
|
parms = {}
|
||||||
|
if cfg is None:
|
||||||
|
cfg = {}
|
||||||
return self.conn.make_request('PUT', self.path, hdrs=hdrs,
|
return self.conn.make_request('PUT', self.path, hdrs=hdrs,
|
||||||
parms=parms, cfg=cfg) in (201, 202)
|
parms=parms, cfg=cfg) in (201, 202)
|
||||||
|
|
||||||
def delete(self, hdrs={}, parms={}):
|
def delete(self, hdrs=None, parms=None):
|
||||||
|
if hdrs is None:
|
||||||
|
hdrs = {}
|
||||||
|
if parms is None:
|
||||||
|
parms = {}
|
||||||
return self.conn.make_request('DELETE', self.path, hdrs=hdrs,
|
return self.conn.make_request('DELETE', self.path, hdrs=hdrs,
|
||||||
parms=parms) == 204
|
parms=parms) == 204
|
||||||
|
|
||||||
@ -457,7 +511,13 @@ class Container(Base):
|
|||||||
def file(self, file_name):
|
def file(self, file_name):
|
||||||
return File(self.conn, self.account, self.name, file_name)
|
return File(self.conn, self.account, self.name, file_name)
|
||||||
|
|
||||||
def files(self, hdrs={}, parms={}, cfg={}):
|
def files(self, hdrs=None, parms=None, cfg=None):
|
||||||
|
if hdrs is None:
|
||||||
|
hdrs = {}
|
||||||
|
if parms is None:
|
||||||
|
parms = {}
|
||||||
|
if cfg is None:
|
||||||
|
cfg = {}
|
||||||
format_type = parms.get('format', None)
|
format_type = parms.get('format', None)
|
||||||
if format_type not in [None, 'json', 'xml']:
|
if format_type not in [None, 'json', 'xml']:
|
||||||
raise RequestError('Invalid format: %s' % format_type)
|
raise RequestError('Invalid format: %s' % format_type)
|
||||||
@ -507,7 +567,13 @@ class Container(Base):
|
|||||||
raise ResponseError(self.conn.response, 'GET',
|
raise ResponseError(self.conn.response, 'GET',
|
||||||
self.conn.make_path(self.path))
|
self.conn.make_path(self.path))
|
||||||
|
|
||||||
def info(self, hdrs={}, parms={}, cfg={}):
|
def info(self, hdrs=None, parms=None, cfg=None):
|
||||||
|
if hdrs is None:
|
||||||
|
hdrs = {}
|
||||||
|
if parms is None:
|
||||||
|
parms = {}
|
||||||
|
if cfg is None:
|
||||||
|
cfg = {}
|
||||||
self.conn.make_request('HEAD', self.path, hdrs=hdrs,
|
self.conn.make_request('HEAD', self.path, hdrs=hdrs,
|
||||||
parms=parms, cfg=cfg)
|
parms=parms, cfg=cfg)
|
||||||
|
|
||||||
@ -538,7 +604,9 @@ class File(Base):
|
|||||||
self.size = None
|
self.size = None
|
||||||
self.metadata = {}
|
self.metadata = {}
|
||||||
|
|
||||||
def make_headers(self, cfg={}):
|
def make_headers(self, cfg=None):
|
||||||
|
if cfg is None:
|
||||||
|
cfg = {}
|
||||||
headers = {}
|
headers = {}
|
||||||
if not cfg.get('no_content_length'):
|
if not cfg.get('no_content_length'):
|
||||||
if cfg.get('set_content_length'):
|
if cfg.get('set_content_length'):
|
||||||
@ -575,7 +643,13 @@ class File(Base):
|
|||||||
data.seek(0)
|
data.seek(0)
|
||||||
return checksum.hexdigest()
|
return checksum.hexdigest()
|
||||||
|
|
||||||
def copy(self, dest_cont, dest_file, hdrs={}, parms={}, cfg={}):
|
def copy(self, dest_cont, dest_file, hdrs=None, parms=None, cfg=None):
|
||||||
|
if hdrs is None:
|
||||||
|
hdrs = {}
|
||||||
|
if parms is None:
|
||||||
|
parms = {}
|
||||||
|
if cfg is None:
|
||||||
|
cfg = {}
|
||||||
if 'destination' in cfg:
|
if 'destination' in cfg:
|
||||||
headers = {'Destination': cfg['destination']}
|
headers = {'Destination': cfg['destination']}
|
||||||
elif cfg.get('no_destination'):
|
elif cfg.get('no_destination'):
|
||||||
@ -590,7 +664,11 @@ class File(Base):
|
|||||||
return self.conn.make_request('COPY', self.path, hdrs=headers,
|
return self.conn.make_request('COPY', self.path, hdrs=headers,
|
||||||
parms=parms) == 201
|
parms=parms) == 201
|
||||||
|
|
||||||
def delete(self, hdrs={}, parms={}):
|
def delete(self, hdrs=None, parms=None):
|
||||||
|
if hdrs is None:
|
||||||
|
hdrs = {}
|
||||||
|
if parms is None:
|
||||||
|
parms = {}
|
||||||
if self.conn.make_request('DELETE', self.path, hdrs=hdrs,
|
if self.conn.make_request('DELETE', self.path, hdrs=hdrs,
|
||||||
parms=parms) != 204:
|
parms=parms) != 204:
|
||||||
|
|
||||||
@ -599,7 +677,13 @@ class File(Base):
|
|||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def info(self, hdrs={}, parms={}, cfg={}):
|
def info(self, hdrs=None, parms=None, cfg=None):
|
||||||
|
if hdrs is None:
|
||||||
|
hdrs = {}
|
||||||
|
if parms is None:
|
||||||
|
parms = {}
|
||||||
|
if cfg is None:
|
||||||
|
cfg = {}
|
||||||
if self.conn.make_request('HEAD', self.path, hdrs=hdrs,
|
if self.conn.make_request('HEAD', self.path, hdrs=hdrs,
|
||||||
parms=parms, cfg=cfg) != 200:
|
parms=parms, cfg=cfg) != 200:
|
||||||
|
|
||||||
@ -615,7 +699,11 @@ class File(Base):
|
|||||||
header_fields['etag'] = header_fields['etag'].strip('"')
|
header_fields['etag'] = header_fields['etag'].strip('"')
|
||||||
return header_fields
|
return header_fields
|
||||||
|
|
||||||
def initialize(self, hdrs={}, parms={}):
|
def initialize(self, hdrs=None, parms=None):
|
||||||
|
if hdrs is None:
|
||||||
|
hdrs = {}
|
||||||
|
if parms is None:
|
||||||
|
parms = {}
|
||||||
if not self.name:
|
if not self.name:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -660,7 +748,11 @@ class File(Base):
|
|||||||
return data
|
return data
|
||||||
|
|
||||||
def read(self, size=-1, offset=0, hdrs=None, buffer=None,
|
def read(self, size=-1, offset=0, hdrs=None, buffer=None,
|
||||||
callback=None, cfg={}, parms={}):
|
callback=None, cfg=None, parms=None):
|
||||||
|
if cfg is None:
|
||||||
|
cfg = {}
|
||||||
|
if parms is None:
|
||||||
|
parms = {}
|
||||||
|
|
||||||
if size > 0:
|
if size > 0:
|
||||||
range_string = 'bytes=%d-%d' % (offset, (offset + size) - 1)
|
range_string = 'bytes=%d-%d' % (offset, (offset + size) - 1)
|
||||||
@ -717,7 +809,12 @@ class File(Base):
|
|||||||
finally:
|
finally:
|
||||||
fobj.close()
|
fobj.close()
|
||||||
|
|
||||||
def sync_metadata(self, metadata={}, cfg={}):
|
def sync_metadata(self, metadata=None, cfg=None):
|
||||||
|
if metadata is None:
|
||||||
|
metadata = {}
|
||||||
|
if cfg is None:
|
||||||
|
cfg = {}
|
||||||
|
|
||||||
self.metadata.update(metadata)
|
self.metadata.update(metadata)
|
||||||
|
|
||||||
if self.metadata:
|
if self.metadata:
|
||||||
@ -737,7 +834,14 @@ class File(Base):
|
|||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def chunked_write(self, data=None, hdrs={}, parms={}, cfg={}):
|
def chunked_write(self, data=None, hdrs=None, parms=None, cfg=None):
|
||||||
|
if hdrs is None:
|
||||||
|
hdrs = {}
|
||||||
|
if parms is None:
|
||||||
|
parms = {}
|
||||||
|
if cfg is None:
|
||||||
|
cfg = {}
|
||||||
|
|
||||||
if data is not None and self.chunked_write_in_progress:
|
if data is not None and self.chunked_write_in_progress:
|
||||||
self.conn.put_data(data, True)
|
self.conn.put_data(data, True)
|
||||||
elif data is not None:
|
elif data is not None:
|
||||||
@ -756,8 +860,15 @@ class File(Base):
|
|||||||
else:
|
else:
|
||||||
raise RuntimeError
|
raise RuntimeError
|
||||||
|
|
||||||
def write(self, data='', hdrs={}, parms={}, callback=None, cfg={},
|
def write(self, data='', hdrs=None, parms=None, callback=None, cfg=None,
|
||||||
return_resp=False):
|
return_resp=False):
|
||||||
|
if hdrs is None:
|
||||||
|
hdrs = {}
|
||||||
|
if parms is None:
|
||||||
|
parms = {}
|
||||||
|
if cfg is None:
|
||||||
|
cfg = {}
|
||||||
|
|
||||||
block_size = 2 ** 20
|
block_size = 2 ** 20
|
||||||
|
|
||||||
if isinstance(data, file):
|
if isinstance(data, file):
|
||||||
@ -778,13 +889,15 @@ class File(Base):
|
|||||||
|
|
||||||
transferred = 0
|
transferred = 0
|
||||||
buff = data.read(block_size)
|
buff = data.read(block_size)
|
||||||
|
buff_len = len(buff)
|
||||||
try:
|
try:
|
||||||
while len(buff) > 0:
|
while buff_len > 0:
|
||||||
self.conn.put_data(buff)
|
self.conn.put_data(buff)
|
||||||
buff = data.read(block_size)
|
transferred += buff_len
|
||||||
transferred += len(buff)
|
|
||||||
if callable(callback):
|
if callable(callback):
|
||||||
callback(transferred, self.size)
|
callback(transferred, self.size)
|
||||||
|
buff = data.read(block_size)
|
||||||
|
buff_len = len(buff)
|
||||||
|
|
||||||
self.conn.put_end()
|
self.conn.put_end()
|
||||||
except socket.timeout as err:
|
except socket.timeout as err:
|
||||||
@ -806,7 +919,14 @@ class File(Base):
|
|||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def write_random(self, size=None, hdrs={}, parms={}, cfg={}):
|
def write_random(self, size=None, hdrs=None, parms=None, cfg=None):
|
||||||
|
if hdrs is None:
|
||||||
|
hdrs = {}
|
||||||
|
if parms is None:
|
||||||
|
parms = {}
|
||||||
|
if cfg is None:
|
||||||
|
cfg = {}
|
||||||
|
|
||||||
data = self.random_data(size)
|
data = self.random_data(size)
|
||||||
if not self.write(data, hdrs=hdrs, parms=parms, cfg=cfg):
|
if not self.write(data, hdrs=hdrs, parms=parms, cfg=cfg):
|
||||||
raise ResponseError(self.conn.response, 'PUT',
|
raise ResponseError(self.conn.response, 'PUT',
|
||||||
@ -814,7 +934,15 @@ class File(Base):
|
|||||||
self.md5 = self.compute_md5sum(StringIO.StringIO(data))
|
self.md5 = self.compute_md5sum(StringIO.StringIO(data))
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def write_random_return_resp(self, size=None, hdrs={}, parms={}, cfg={}):
|
def write_random_return_resp(self, size=None, hdrs=None, parms=None,
|
||||||
|
cfg=None):
|
||||||
|
if hdrs is None:
|
||||||
|
hdrs = {}
|
||||||
|
if parms is None:
|
||||||
|
parms = {}
|
||||||
|
if cfg is None:
|
||||||
|
cfg = {}
|
||||||
|
|
||||||
data = self.random_data(size)
|
data = self.random_data(size)
|
||||||
resp = self.write(data, hdrs=hdrs, parms=parms, cfg=cfg,
|
resp = self.write(data, hdrs=hdrs, parms=parms, cfg=cfg,
|
||||||
return_resp=True)
|
return_resp=True)
|
||||||
|
@ -1,231 +0,0 @@
|
|||||||
# Copyright (c) 2010-2014 OpenStack Foundation
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from httplib import HTTPException
|
|
||||||
import os
|
|
||||||
import socket
|
|
||||||
import sys
|
|
||||||
from time import sleep
|
|
||||||
from urlparse import urlparse
|
|
||||||
import functools
|
|
||||||
from nose import SkipTest
|
|
||||||
|
|
||||||
from test import get_config
|
|
||||||
|
|
||||||
from swiftclient import get_auth, http_connection
|
|
||||||
from test.functional.swift_test_client import Connection
|
|
||||||
|
|
||||||
conf = get_config('func_test')
|
|
||||||
web_front_end = conf.get('web_front_end', 'integral')
|
|
||||||
normalized_urls = conf.get('normalized_urls', False)
|
|
||||||
|
|
||||||
# If no conf was read, we will fall back to old school env vars
|
|
||||||
swift_test_auth = os.environ.get('SWIFT_TEST_AUTH')
|
|
||||||
swift_test_user = [os.environ.get('SWIFT_TEST_USER'), None, None]
|
|
||||||
swift_test_key = [os.environ.get('SWIFT_TEST_KEY'), None, None]
|
|
||||||
swift_test_tenant = ['', '', '']
|
|
||||||
swift_test_perm = ['', '', '']
|
|
||||||
|
|
||||||
if conf:
|
|
||||||
swift_test_auth_version = str(conf.get('auth_version', '1'))
|
|
||||||
|
|
||||||
swift_test_auth = 'http'
|
|
||||||
if conf.get('auth_ssl', 'no').lower() in ('yes', 'true', 'on', '1'):
|
|
||||||
swift_test_auth = 'https'
|
|
||||||
if 'auth_prefix' not in conf:
|
|
||||||
conf['auth_prefix'] = '/'
|
|
||||||
try:
|
|
||||||
suffix = '://%(auth_host)s:%(auth_port)s%(auth_prefix)s' % conf
|
|
||||||
swift_test_auth += suffix
|
|
||||||
except KeyError:
|
|
||||||
pass # skip
|
|
||||||
|
|
||||||
if swift_test_auth_version == "1":
|
|
||||||
swift_test_auth += 'v1.0'
|
|
||||||
|
|
||||||
if 'account' in conf:
|
|
||||||
swift_test_user[0] = '%(account)s:%(username)s' % conf
|
|
||||||
else:
|
|
||||||
swift_test_user[0] = '%(username)s' % conf
|
|
||||||
swift_test_key[0] = conf['password']
|
|
||||||
try:
|
|
||||||
swift_test_user[1] = '%s%s' % (
|
|
||||||
'%s:' % conf['account2'] if 'account2' in conf else '',
|
|
||||||
conf['username2'])
|
|
||||||
swift_test_key[1] = conf['password2']
|
|
||||||
except KeyError as err:
|
|
||||||
pass # old conf, no second account tests can be run
|
|
||||||
try:
|
|
||||||
swift_test_user[2] = '%s%s' % ('%s:' % conf['account'] if 'account'
|
|
||||||
in conf else '', conf['username3'])
|
|
||||||
swift_test_key[2] = conf['password3']
|
|
||||||
except KeyError as err:
|
|
||||||
pass # old conf, no third account tests can be run
|
|
||||||
|
|
||||||
for _ in range(3):
|
|
||||||
swift_test_perm[_] = swift_test_user[_]
|
|
||||||
|
|
||||||
else:
|
|
||||||
swift_test_user[0] = conf['username']
|
|
||||||
swift_test_tenant[0] = conf['account']
|
|
||||||
swift_test_key[0] = conf['password']
|
|
||||||
swift_test_user[1] = conf['username2']
|
|
||||||
swift_test_tenant[1] = conf['account2']
|
|
||||||
swift_test_key[1] = conf['password2']
|
|
||||||
swift_test_user[2] = conf['username3']
|
|
||||||
swift_test_tenant[2] = conf['account']
|
|
||||||
swift_test_key[2] = conf['password3']
|
|
||||||
|
|
||||||
for _ in range(3):
|
|
||||||
swift_test_perm[_] = swift_test_tenant[_] + ':' \
|
|
||||||
+ swift_test_user[_]
|
|
||||||
|
|
||||||
skip = not all([swift_test_auth, swift_test_user[0], swift_test_key[0]])
|
|
||||||
if skip:
|
|
||||||
print >>sys.stderr, 'SKIPPING FUNCTIONAL TESTS DUE TO NO CONFIG'
|
|
||||||
|
|
||||||
skip2 = not all([not skip, swift_test_user[1], swift_test_key[1]])
|
|
||||||
if not skip and skip2:
|
|
||||||
print >>sys.stderr, \
|
|
||||||
'SKIPPING SECOND ACCOUNT FUNCTIONAL TESTS DUE TO NO CONFIG FOR THEM'
|
|
||||||
|
|
||||||
skip3 = not all([not skip, swift_test_user[2], swift_test_key[2]])
|
|
||||||
if not skip and skip3:
|
|
||||||
print >>sys.stderr, \
|
|
||||||
'SKIPPING THIRD ACCOUNT FUNCTIONAL TESTS DUE TO NO CONFIG FOR THEM'
|
|
||||||
|
|
||||||
|
|
||||||
class AuthError(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class InternalServerError(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
url = [None, None, None]
|
|
||||||
token = [None, None, None]
|
|
||||||
parsed = [None, None, None]
|
|
||||||
conn = [None, None, None]
|
|
||||||
|
|
||||||
|
|
||||||
def retry(func, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
You can use the kwargs to override:
|
|
||||||
'retries' (default: 5)
|
|
||||||
'use_account' (default: 1) - which user's token to pass
|
|
||||||
'url_account' (default: matches 'use_account') - which user's storage URL
|
|
||||||
'resource' (default: url[url_account] - URL to connect to; retry()
|
|
||||||
will interpolate the variable :storage_url: if present
|
|
||||||
"""
|
|
||||||
global url, token, parsed, conn
|
|
||||||
retries = kwargs.get('retries', 5)
|
|
||||||
attempts, backoff = 0, 1
|
|
||||||
|
|
||||||
# use account #1 by default; turn user's 1-indexed account into 0-indexed
|
|
||||||
use_account = kwargs.pop('use_account', 1) - 1
|
|
||||||
|
|
||||||
# access our own account by default
|
|
||||||
url_account = kwargs.pop('url_account', use_account + 1) - 1
|
|
||||||
|
|
||||||
while attempts <= retries:
|
|
||||||
attempts += 1
|
|
||||||
try:
|
|
||||||
if not url[use_account] or not token[use_account]:
|
|
||||||
url[use_account], token[use_account] = \
|
|
||||||
get_auth(swift_test_auth, swift_test_user[use_account],
|
|
||||||
swift_test_key[use_account],
|
|
||||||
snet=False,
|
|
||||||
tenant_name=swift_test_tenant[use_account],
|
|
||||||
auth_version=swift_test_auth_version,
|
|
||||||
os_options={})
|
|
||||||
parsed[use_account] = conn[use_account] = None
|
|
||||||
if not parsed[use_account] or not conn[use_account]:
|
|
||||||
parsed[use_account], conn[use_account] = \
|
|
||||||
http_connection(url[use_account])
|
|
||||||
|
|
||||||
# default resource is the account url[url_account]
|
|
||||||
resource = kwargs.pop('resource', '%(storage_url)s')
|
|
||||||
template_vars = {'storage_url': url[url_account]}
|
|
||||||
parsed_result = urlparse(resource % template_vars)
|
|
||||||
return func(url[url_account], token[use_account],
|
|
||||||
parsed_result, conn[url_account],
|
|
||||||
*args, **kwargs)
|
|
||||||
except (socket.error, HTTPException):
|
|
||||||
if attempts > retries:
|
|
||||||
raise
|
|
||||||
parsed[use_account] = conn[use_account] = None
|
|
||||||
except AuthError:
|
|
||||||
url[use_account] = token[use_account] = None
|
|
||||||
continue
|
|
||||||
except InternalServerError:
|
|
||||||
pass
|
|
||||||
if attempts <= retries:
|
|
||||||
sleep(backoff)
|
|
||||||
backoff *= 2
|
|
||||||
raise Exception('No result after %s retries.' % retries)
|
|
||||||
|
|
||||||
|
|
||||||
def check_response(conn):
|
|
||||||
resp = conn.getresponse()
|
|
||||||
if resp.status == 401:
|
|
||||||
resp.read()
|
|
||||||
raise AuthError()
|
|
||||||
elif resp.status // 100 == 5:
|
|
||||||
resp.read()
|
|
||||||
raise InternalServerError()
|
|
||||||
return resp
|
|
||||||
|
|
||||||
cluster_info = {}
|
|
||||||
|
|
||||||
|
|
||||||
def get_cluster_info():
|
|
||||||
conn = Connection(conf)
|
|
||||||
conn.authenticate()
|
|
||||||
global cluster_info
|
|
||||||
cluster_info = conn.cluster_info()
|
|
||||||
|
|
||||||
|
|
||||||
def reset_acl():
|
|
||||||
def post(url, token, parsed, conn):
|
|
||||||
conn.request('POST', parsed.path, '', {
|
|
||||||
'X-Auth-Token': token,
|
|
||||||
'X-Account-Access-Control': '{}'
|
|
||||||
})
|
|
||||||
return check_response(conn)
|
|
||||||
resp = retry(post, use_account=1)
|
|
||||||
resp.read()
|
|
||||||
|
|
||||||
|
|
||||||
def requires_acls(f):
|
|
||||||
@functools.wraps(f)
|
|
||||||
def wrapper(*args, **kwargs):
|
|
||||||
if skip:
|
|
||||||
raise SkipTest
|
|
||||||
if not cluster_info:
|
|
||||||
get_cluster_info()
|
|
||||||
# Determine whether this cluster has account ACLs; if not, skip test
|
|
||||||
if not cluster_info.get('tempauth', {}).get('account_acls'):
|
|
||||||
raise SkipTest
|
|
||||||
if 'keystoneauth' in cluster_info:
|
|
||||||
# remove when keystoneauth supports account acls
|
|
||||||
raise SkipTest
|
|
||||||
reset_acl()
|
|
||||||
try:
|
|
||||||
rv = f(*args, **kwargs)
|
|
||||||
finally:
|
|
||||||
reset_acl()
|
|
||||||
return rv
|
|
||||||
return wrapper
|
|
@ -21,19 +21,53 @@ from uuid import uuid4
|
|||||||
from nose import SkipTest
|
from nose import SkipTest
|
||||||
from string import letters
|
from string import letters
|
||||||
|
|
||||||
from swift.common.constraints import MAX_META_COUNT, MAX_META_NAME_LENGTH, \
|
|
||||||
MAX_META_OVERALL_SIZE, MAX_META_VALUE_LENGTH
|
|
||||||
from swift.common.middleware.acl import format_acl
|
from swift.common.middleware.acl import format_acl
|
||||||
from swift_testing import (check_response, retry, skip, skip2, skip3,
|
|
||||||
web_front_end, requires_acls)
|
from test.functional import check_response, retry, requires_acls, \
|
||||||
import swift_testing
|
load_constraint
|
||||||
from test.functional.tests import load_constraint
|
import test.functional as tf
|
||||||
|
|
||||||
|
|
||||||
class TestAccount(unittest.TestCase):
|
class TestAccount(unittest.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.max_meta_count = load_constraint('max_meta_count')
|
||||||
|
self.max_meta_name_length = load_constraint('max_meta_name_length')
|
||||||
|
self.max_meta_overall_size = load_constraint('max_meta_overall_size')
|
||||||
|
self.max_meta_value_length = load_constraint('max_meta_value_length')
|
||||||
|
|
||||||
|
def head(url, token, parsed, conn):
|
||||||
|
conn.request('HEAD', parsed.path, '', {'X-Auth-Token': token})
|
||||||
|
return check_response(conn)
|
||||||
|
resp = retry(head)
|
||||||
|
self.existing_metadata = set([
|
||||||
|
k for k, v in resp.getheaders() if
|
||||||
|
k.lower().startswith('x-account-meta')])
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
def head(url, token, parsed, conn):
|
||||||
|
conn.request('HEAD', parsed.path, '', {'X-Auth-Token': token})
|
||||||
|
return check_response(conn)
|
||||||
|
resp = retry(head)
|
||||||
|
resp.read()
|
||||||
|
new_metadata = set(
|
||||||
|
[k for k, v in resp.getheaders() if
|
||||||
|
k.lower().startswith('x-account-meta')])
|
||||||
|
|
||||||
|
def clear_meta(url, token, parsed, conn, remove_metadata_keys):
|
||||||
|
headers = {'X-Auth-Token': token}
|
||||||
|
headers.update((k, '') for k in remove_metadata_keys)
|
||||||
|
conn.request('POST', parsed.path, '', headers)
|
||||||
|
return check_response(conn)
|
||||||
|
extra_metadata = list(self.existing_metadata ^ new_metadata)
|
||||||
|
for i in range(0, len(extra_metadata), 90):
|
||||||
|
batch = extra_metadata[i:i + 90]
|
||||||
|
resp = retry(clear_meta, batch)
|
||||||
|
resp.read()
|
||||||
|
self.assertEqual(resp.status // 100, 2)
|
||||||
|
|
||||||
def test_metadata(self):
|
def test_metadata(self):
|
||||||
if skip:
|
if tf.skip:
|
||||||
raise SkipTest
|
raise SkipTest
|
||||||
|
|
||||||
def post(url, token, parsed, conn, value):
|
def post(url, token, parsed, conn, value):
|
||||||
@ -73,6 +107,9 @@ class TestAccount(unittest.TestCase):
|
|||||||
self.assertEqual(resp.getheader('x-account-meta-test'), 'Value')
|
self.assertEqual(resp.getheader('x-account-meta-test'), 'Value')
|
||||||
|
|
||||||
def test_invalid_acls(self):
|
def test_invalid_acls(self):
|
||||||
|
if tf.skip:
|
||||||
|
raise SkipTest
|
||||||
|
|
||||||
def post(url, token, parsed, conn, headers):
|
def post(url, token, parsed, conn, headers):
|
||||||
new_headers = dict({'X-Auth-Token': token}, **headers)
|
new_headers = dict({'X-Auth-Token': token}, **headers)
|
||||||
conn.request('POST', parsed.path, '', new_headers)
|
conn.request('POST', parsed.path, '', new_headers)
|
||||||
@ -109,7 +146,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
resp.read()
|
resp.read()
|
||||||
self.assertEqual(resp.status, 400)
|
self.assertEqual(resp.status, 400)
|
||||||
|
|
||||||
acl_user = swift_testing.swift_test_user[1]
|
acl_user = tf.swift_test_user[1]
|
||||||
acl = {'admin': [acl_user], 'invalid_key': 'invalid_value'}
|
acl = {'admin': [acl_user], 'invalid_key': 'invalid_value'}
|
||||||
headers = {'x-account-access-control': format_acl(
|
headers = {'x-account-access-control': format_acl(
|
||||||
version=2, acl_dict=acl)}
|
version=2, acl_dict=acl)}
|
||||||
@ -137,7 +174,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
|
|
||||||
@requires_acls
|
@requires_acls
|
||||||
def test_read_only_acl(self):
|
def test_read_only_acl(self):
|
||||||
if skip3:
|
if tf.skip3:
|
||||||
raise SkipTest
|
raise SkipTest
|
||||||
|
|
||||||
def get(url, token, parsed, conn):
|
def get(url, token, parsed, conn):
|
||||||
@ -155,7 +192,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
self.assertEquals(resp.status, 403)
|
self.assertEquals(resp.status, 403)
|
||||||
|
|
||||||
# grant read access
|
# grant read access
|
||||||
acl_user = swift_testing.swift_test_user[2]
|
acl_user = tf.swift_test_user[2]
|
||||||
acl = {'read-only': [acl_user]}
|
acl = {'read-only': [acl_user]}
|
||||||
headers = {'x-account-access-control': format_acl(
|
headers = {'x-account-access-control': format_acl(
|
||||||
version=2, acl_dict=acl)}
|
version=2, acl_dict=acl)}
|
||||||
@ -188,7 +225,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
|
|
||||||
@requires_acls
|
@requires_acls
|
||||||
def test_read_write_acl(self):
|
def test_read_write_acl(self):
|
||||||
if skip3:
|
if tf.skip3:
|
||||||
raise SkipTest
|
raise SkipTest
|
||||||
|
|
||||||
def get(url, token, parsed, conn):
|
def get(url, token, parsed, conn):
|
||||||
@ -206,7 +243,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
self.assertEquals(resp.status, 403)
|
self.assertEquals(resp.status, 403)
|
||||||
|
|
||||||
# grant read-write access
|
# grant read-write access
|
||||||
acl_user = swift_testing.swift_test_user[2]
|
acl_user = tf.swift_test_user[2]
|
||||||
acl = {'read-write': [acl_user]}
|
acl = {'read-write': [acl_user]}
|
||||||
headers = {'x-account-access-control': format_acl(
|
headers = {'x-account-access-control': format_acl(
|
||||||
version=2, acl_dict=acl)}
|
version=2, acl_dict=acl)}
|
||||||
@ -229,7 +266,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
|
|
||||||
@requires_acls
|
@requires_acls
|
||||||
def test_admin_acl(self):
|
def test_admin_acl(self):
|
||||||
if skip3:
|
if tf.skip3:
|
||||||
raise SkipTest
|
raise SkipTest
|
||||||
|
|
||||||
def get(url, token, parsed, conn):
|
def get(url, token, parsed, conn):
|
||||||
@ -247,7 +284,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
self.assertEquals(resp.status, 403)
|
self.assertEquals(resp.status, 403)
|
||||||
|
|
||||||
# grant admin access
|
# grant admin access
|
||||||
acl_user = swift_testing.swift_test_user[2]
|
acl_user = tf.swift_test_user[2]
|
||||||
acl = {'admin': [acl_user]}
|
acl = {'admin': [acl_user]}
|
||||||
acl_json_str = format_acl(version=2, acl_dict=acl)
|
acl_json_str = format_acl(version=2, acl_dict=acl)
|
||||||
headers = {'x-account-access-control': acl_json_str}
|
headers = {'x-account-access-control': acl_json_str}
|
||||||
@ -287,7 +324,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
|
|
||||||
@requires_acls
|
@requires_acls
|
||||||
def test_protected_tempurl(self):
|
def test_protected_tempurl(self):
|
||||||
if skip3:
|
if tf.skip3:
|
||||||
raise SkipTest
|
raise SkipTest
|
||||||
|
|
||||||
def get(url, token, parsed, conn):
|
def get(url, token, parsed, conn):
|
||||||
@ -299,7 +336,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
conn.request('POST', parsed.path, '', new_headers)
|
conn.request('POST', parsed.path, '', new_headers)
|
||||||
return check_response(conn)
|
return check_response(conn)
|
||||||
|
|
||||||
# add a account metadata, and temp-url-key to account
|
# add an account metadata, and temp-url-key to account
|
||||||
value = str(uuid4())
|
value = str(uuid4())
|
||||||
headers = {
|
headers = {
|
||||||
'x-account-meta-temp-url-key': 'secret',
|
'x-account-meta-temp-url-key': 'secret',
|
||||||
@ -310,7 +347,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
self.assertEqual(resp.status, 204)
|
self.assertEqual(resp.status, 204)
|
||||||
|
|
||||||
# grant read-only access to tester3
|
# grant read-only access to tester3
|
||||||
acl_user = swift_testing.swift_test_user[2]
|
acl_user = tf.swift_test_user[2]
|
||||||
acl = {'read-only': [acl_user]}
|
acl = {'read-only': [acl_user]}
|
||||||
acl_json_str = format_acl(version=2, acl_dict=acl)
|
acl_json_str = format_acl(version=2, acl_dict=acl)
|
||||||
headers = {'x-account-access-control': acl_json_str}
|
headers = {'x-account-access-control': acl_json_str}
|
||||||
@ -328,7 +365,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'), None)
|
self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'), None)
|
||||||
|
|
||||||
# grant read-write access to tester3
|
# grant read-write access to tester3
|
||||||
acl_user = swift_testing.swift_test_user[2]
|
acl_user = tf.swift_test_user[2]
|
||||||
acl = {'read-write': [acl_user]}
|
acl = {'read-write': [acl_user]}
|
||||||
acl_json_str = format_acl(version=2, acl_dict=acl)
|
acl_json_str = format_acl(version=2, acl_dict=acl)
|
||||||
headers = {'x-account-access-control': acl_json_str}
|
headers = {'x-account-access-control': acl_json_str}
|
||||||
@ -346,7 +383,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'), None)
|
self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'), None)
|
||||||
|
|
||||||
# grant admin access to tester3
|
# grant admin access to tester3
|
||||||
acl_user = swift_testing.swift_test_user[2]
|
acl_user = tf.swift_test_user[2]
|
||||||
acl = {'admin': [acl_user]}
|
acl = {'admin': [acl_user]}
|
||||||
acl_json_str = format_acl(version=2, acl_dict=acl)
|
acl_json_str = format_acl(version=2, acl_dict=acl)
|
||||||
headers = {'x-account-access-control': acl_json_str}
|
headers = {'x-account-access-control': acl_json_str}
|
||||||
@ -381,7 +418,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
|
|
||||||
@requires_acls
|
@requires_acls
|
||||||
def test_account_acls(self):
|
def test_account_acls(self):
|
||||||
if skip2:
|
if tf.skip2:
|
||||||
raise SkipTest
|
raise SkipTest
|
||||||
|
|
||||||
def post(url, token, parsed, conn, headers):
|
def post(url, token, parsed, conn, headers):
|
||||||
@ -428,7 +465,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
|
|
||||||
# User1 is swift_owner of their own account, so they can POST an
|
# User1 is swift_owner of their own account, so they can POST an
|
||||||
# ACL -- let's do this and make User2 (test_user[1]) an admin
|
# ACL -- let's do this and make User2 (test_user[1]) an admin
|
||||||
acl_user = swift_testing.swift_test_user[1]
|
acl_user = tf.swift_test_user[1]
|
||||||
acl = {'admin': [acl_user]}
|
acl = {'admin': [acl_user]}
|
||||||
headers = {'x-account-access-control': format_acl(
|
headers = {'x-account-access-control': format_acl(
|
||||||
version=2, acl_dict=acl)}
|
version=2, acl_dict=acl)}
|
||||||
@ -505,7 +542,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
|
|
||||||
@requires_acls
|
@requires_acls
|
||||||
def test_swift_account_acls(self):
|
def test_swift_account_acls(self):
|
||||||
if skip:
|
if tf.skip:
|
||||||
raise SkipTest
|
raise SkipTest
|
||||||
|
|
||||||
def post(url, token, parsed, conn, headers):
|
def post(url, token, parsed, conn, headers):
|
||||||
@ -568,7 +605,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
resp.read()
|
resp.read()
|
||||||
|
|
||||||
def test_swift_prohibits_garbage_account_acls(self):
|
def test_swift_prohibits_garbage_account_acls(self):
|
||||||
if skip:
|
if tf.skip:
|
||||||
raise SkipTest
|
raise SkipTest
|
||||||
|
|
||||||
def post(url, token, parsed, conn, headers):
|
def post(url, token, parsed, conn, headers):
|
||||||
@ -635,7 +672,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
resp.read()
|
resp.read()
|
||||||
|
|
||||||
def test_unicode_metadata(self):
|
def test_unicode_metadata(self):
|
||||||
if skip:
|
if tf.skip:
|
||||||
raise SkipTest
|
raise SkipTest
|
||||||
|
|
||||||
def post(url, token, parsed, conn, name, value):
|
def post(url, token, parsed, conn, name, value):
|
||||||
@ -648,7 +685,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
return check_response(conn)
|
return check_response(conn)
|
||||||
uni_key = u'X-Account-Meta-uni\u0E12'
|
uni_key = u'X-Account-Meta-uni\u0E12'
|
||||||
uni_value = u'uni\u0E12'
|
uni_value = u'uni\u0E12'
|
||||||
if (web_front_end == 'integral'):
|
if (tf.web_front_end == 'integral'):
|
||||||
resp = retry(post, uni_key, '1')
|
resp = retry(post, uni_key, '1')
|
||||||
resp.read()
|
resp.read()
|
||||||
self.assertTrue(resp.status in (201, 204))
|
self.assertTrue(resp.status in (201, 204))
|
||||||
@ -664,7 +701,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
self.assert_(resp.status in (200, 204), resp.status)
|
self.assert_(resp.status in (200, 204), resp.status)
|
||||||
self.assertEqual(resp.getheader('X-Account-Meta-uni'),
|
self.assertEqual(resp.getheader('X-Account-Meta-uni'),
|
||||||
uni_value.encode('utf-8'))
|
uni_value.encode('utf-8'))
|
||||||
if (web_front_end == 'integral'):
|
if (tf.web_front_end == 'integral'):
|
||||||
resp = retry(post, uni_key, uni_value)
|
resp = retry(post, uni_key, uni_value)
|
||||||
resp.read()
|
resp.read()
|
||||||
self.assertEqual(resp.status, 204)
|
self.assertEqual(resp.status, 204)
|
||||||
@ -675,7 +712,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
uni_value.encode('utf-8'))
|
uni_value.encode('utf-8'))
|
||||||
|
|
||||||
def test_multi_metadata(self):
|
def test_multi_metadata(self):
|
||||||
if skip:
|
if tf.skip:
|
||||||
raise SkipTest
|
raise SkipTest
|
||||||
|
|
||||||
def post(url, token, parsed, conn, name, value):
|
def post(url, token, parsed, conn, name, value):
|
||||||
@ -704,7 +741,7 @@ class TestAccount(unittest.TestCase):
|
|||||||
self.assertEqual(resp.getheader('x-account-meta-two'), '2')
|
self.assertEqual(resp.getheader('x-account-meta-two'), '2')
|
||||||
|
|
||||||
def test_bad_metadata(self):
|
def test_bad_metadata(self):
|
||||||
if skip:
|
if tf.skip:
|
||||||
raise SkipTest
|
raise SkipTest
|
||||||
|
|
||||||
def post(url, token, parsed, conn, extra_headers):
|
def post(url, token, parsed, conn, extra_headers):
|
||||||
@ -714,54 +751,59 @@ class TestAccount(unittest.TestCase):
|
|||||||
return check_response(conn)
|
return check_response(conn)
|
||||||
|
|
||||||
resp = retry(post,
|
resp = retry(post,
|
||||||
{'X-Account-Meta-' + ('k' * MAX_META_NAME_LENGTH): 'v'})
|
{'X-Account-Meta-' + (
|
||||||
|
'k' * self.max_meta_name_length): 'v'})
|
||||||
resp.read()
|
resp.read()
|
||||||
self.assertEqual(resp.status, 204)
|
self.assertEqual(resp.status, 204)
|
||||||
resp = retry(
|
resp = retry(
|
||||||
post,
|
post,
|
||||||
{'X-Account-Meta-' + ('k' * (MAX_META_NAME_LENGTH + 1)): 'v'})
|
{'X-Account-Meta-' + ('k' * (
|
||||||
|
self.max_meta_name_length + 1)): 'v'})
|
||||||
resp.read()
|
resp.read()
|
||||||
self.assertEqual(resp.status, 400)
|
self.assertEqual(resp.status, 400)
|
||||||
|
|
||||||
resp = retry(post,
|
resp = retry(post,
|
||||||
{'X-Account-Meta-Too-Long': 'k' * MAX_META_VALUE_LENGTH})
|
{'X-Account-Meta-Too-Long': (
|
||||||
|
'k' * self.max_meta_value_length)})
|
||||||
resp.read()
|
resp.read()
|
||||||
self.assertEqual(resp.status, 204)
|
self.assertEqual(resp.status, 204)
|
||||||
resp = retry(
|
resp = retry(
|
||||||
post,
|
post,
|
||||||
{'X-Account-Meta-Too-Long': 'k' * (MAX_META_VALUE_LENGTH + 1)})
|
{'X-Account-Meta-Too-Long': 'k' * (
|
||||||
|
self.max_meta_value_length + 1)})
|
||||||
resp.read()
|
resp.read()
|
||||||
self.assertEqual(resp.status, 400)
|
self.assertEqual(resp.status, 400)
|
||||||
|
|
||||||
headers = {}
|
headers = {}
|
||||||
for x in xrange(MAX_META_COUNT):
|
for x in xrange(self.max_meta_count):
|
||||||
headers['X-Account-Meta-%d' % x] = 'v'
|
headers['X-Account-Meta-%d' % x] = 'v'
|
||||||
resp = retry(post, headers)
|
resp = retry(post, headers)
|
||||||
resp.read()
|
resp.read()
|
||||||
self.assertEqual(resp.status, 204)
|
self.assertEqual(resp.status, 204)
|
||||||
headers = {}
|
headers = {}
|
||||||
for x in xrange(MAX_META_COUNT + 1):
|
for x in xrange(self.max_meta_count + 1):
|
||||||
headers['X-Account-Meta-%d' % x] = 'v'
|
headers['X-Account-Meta-%d' % x] = 'v'
|
||||||
resp = retry(post, headers)
|
resp = retry(post, headers)
|
||||||
resp.read()
|
resp.read()
|
||||||
self.assertEqual(resp.status, 400)
|
self.assertEqual(resp.status, 400)
|
||||||
|
|
||||||
headers = {}
|
headers = {}
|
||||||
header_value = 'k' * MAX_META_VALUE_LENGTH
|
header_value = 'k' * self.max_meta_value_length
|
||||||
size = 0
|
size = 0
|
||||||
x = 0
|
x = 0
|
||||||
while size < MAX_META_OVERALL_SIZE - 4 - MAX_META_VALUE_LENGTH:
|
while size < (self.max_meta_overall_size - 4
|
||||||
size += 4 + MAX_META_VALUE_LENGTH
|
- self.max_meta_value_length):
|
||||||
|
size += 4 + self.max_meta_value_length
|
||||||
headers['X-Account-Meta-%04d' % x] = header_value
|
headers['X-Account-Meta-%04d' % x] = header_value
|
||||||
x += 1
|
x += 1
|
||||||
if MAX_META_OVERALL_SIZE - size > 1:
|
if self.max_meta_overall_size - size > 1:
|
||||||
headers['X-Account-Meta-k'] = \
|
headers['X-Account-Meta-k'] = \
|
||||||
'v' * (MAX_META_OVERALL_SIZE - size - 1)
|
'v' * (self.max_meta_overall_size - size - 1)
|
||||||
resp = retry(post, headers)
|
resp = retry(post, headers)
|
||||||
resp.read()
|
resp.read()
|
||||||
self.assertEqual(resp.status, 204)
|
self.assertEqual(resp.status, 204)
|
||||||
headers['X-Account-Meta-k'] = \
|
headers['X-Account-Meta-k'] = \
|
||||||
'v' * (MAX_META_OVERALL_SIZE - size)
|
'v' * (self.max_meta_overall_size - size)
|
||||||
resp = retry(post, headers)
|
resp = retry(post, headers)
|
||||||
resp.read()
|
resp.read()
|
||||||
self.assertEqual(resp.status, 400)
|
self.assertEqual(resp.status, 400)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user