Remove non SP related code
This commit only removes code that is not required when swiftonfile will be used as Storage Policy in Swift. This commit does NOT: * include code changes to support SP * fix functional tests * update documentation related to SP This commit removes the following: * Gen builders script and its man page. * Authentication guide, user guide and other docs. * Swiftkerbauth code, unit tests, func tests and doc. * gswauth code, unit tests, func tests and doc. * Object expirer code modifications, unit tests and doc. * Conf files of account, container, proxy. * Account, container and proxy server code - server.py of each. * Account, container and proxy unit and functional tests. * DiskDir class and corresponding unit tests. * Our overridden ring.py (enforces account = volume = device) and tests. * Functional tests for authentication filters. * modules/swift and test/deploy directories. * Proxy base controllers unit tests. NOTE: We may have to reintroduce some of the above functional and unit tests after SP related code changes - during fixing functional tests. This commit modifies: * setup.py to reflect the above code removals. * constraints.py to remove references to ring.py * object server.py to remove object-expirer changes * tox.ini to remove ksfunctest and swfunctest Signed-off-by: Prashanth Pai <ppai@redhat.com>
This commit is contained in:
parent
9f3687bc66
commit
ba39ade716
@ -1,89 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Note that these port numbers must match the configured values for the
|
||||
# various servers in their configuration files.
|
||||
declare -A port=(["account.builder"]=6012 ["container.builder"]=6011 \
|
||||
["object.builder"]=6010)
|
||||
|
||||
print_usage() {
|
||||
echo "
|
||||
NAME
|
||||
gluster-swift-gen-builders - Registers GlusterFS volumes to be accessed by
|
||||
object storage.
|
||||
SYNOPSIS
|
||||
gluster-swift-gen-builders [-v] [-h] volumes...
|
||||
DESCRIPTION
|
||||
Register GlusterFS volumes to be accessed over OpenStack Swift object
|
||||
storage.
|
||||
OPTIONS
|
||||
-v or --verbose
|
||||
Verbose
|
||||
-h or --help
|
||||
Prints help screen
|
||||
EXAMPLES
|
||||
gluster-swift-gen-builders myvol1 myvol2
|
||||
-Creates new ring files with myvol1 and myvol2
|
||||
|
||||
gluster-swift-gen-builders myvol2
|
||||
-Creates new ring files by removing myvol1
|
||||
"
|
||||
}
|
||||
|
||||
builder_files="account.builder container.builder object.builder"
|
||||
|
||||
function create {
|
||||
swift-ring-builder $1 create 1 1 1 >> /tmp/out
|
||||
}
|
||||
|
||||
function add {
|
||||
swift-ring-builder $1 add z$2-127.0.0.1:$3/$4_ 100.0
|
||||
}
|
||||
|
||||
function rebalance {
|
||||
swift-ring-builder $1 rebalance
|
||||
}
|
||||
|
||||
function build {
|
||||
swift-ring-builder $1
|
||||
}
|
||||
|
||||
verbose=0
|
||||
outdev="/dev/null"
|
||||
|
||||
if [ "$1" = "-v" ] || [ "$1" = "--verbose" ]; then
|
||||
verbose=1
|
||||
outdev="/dev/stdout"
|
||||
shift
|
||||
fi
|
||||
|
||||
if [ "x$1" = "x" ]; then
|
||||
echo "Please specify the gluster volume name to use."
|
||||
print_usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$1" = "-h" ] || [ "$1" = "--help" ]; then
|
||||
print_usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
cd /etc/swift
|
||||
|
||||
for builder_file in $builder_files
|
||||
do
|
||||
create $builder_file
|
||||
|
||||
zone=1
|
||||
for volname in $@
|
||||
do
|
||||
add $builder_file $zone ${port[$builder_file]} $volname >& $outdev
|
||||
zone=$(expr $zone + 1)
|
||||
done
|
||||
|
||||
rebalance $builder_file >& $outdev
|
||||
build $builder_file >& $outdev
|
||||
|
||||
done
|
||||
|
||||
echo "Ring files are prepared in /etc/swift. Please restart object store services"
|
@ -1,69 +0,0 @@
|
||||
.TH gluster-swift-gen-builders 8 "gluster-swift helper utility" "18 November 2013" "Red Hat Inc."
|
||||
.SH NAME
|
||||
\fBgluster-swift-gen-builders \fP- Registers GlusterFS volumes to be accessed by
|
||||
\fBOpenStack Swift.
|
||||
\fB
|
||||
.SH SYNOPSIS
|
||||
.nf
|
||||
.fam C
|
||||
\fBgluster-swift-gen-builders\fP [\fB-v\fP] [\fB-h\fP] volumes\.\.\.
|
||||
|
||||
.fam T
|
||||
.fi
|
||||
.fam T
|
||||
.fi
|
||||
.SH DESCRIPTION
|
||||
Register GlusterFS volumes to be accessed over OpenStack Swift.
|
||||
.SH OPTIONS
|
||||
\fB-v\fP or \fB--verbose\fP
|
||||
.PP
|
||||
.nf
|
||||
.fam C
|
||||
Verbose
|
||||
|
||||
.fam T
|
||||
.fi
|
||||
\fB-h\fP or \fB--help\fP
|
||||
.PP
|
||||
.nf
|
||||
.fam C
|
||||
Prints help screen
|
||||
|
||||
.fam T
|
||||
.fi
|
||||
.SH EXAMPLES
|
||||
|
||||
\fBgluster-swift-gen-builders\fP vol1 vol2
|
||||
.PP
|
||||
.nf
|
||||
.fam C
|
||||
Creates new ring files with vol1 and vol2
|
||||
|
||||
.fam T
|
||||
.fi
|
||||
\fBgluster-swift-gen-builders\fP vol2
|
||||
.PP
|
||||
.nf
|
||||
.fam C
|
||||
Creates new ring files by removing vol1
|
||||
|
||||
.fam T
|
||||
.fi
|
||||
\fBgluster-swift-gen-builders\fP \fB-v\fP vol1
|
||||
.PP
|
||||
.nf
|
||||
.fam C
|
||||
Create new ring files with vol1, (Verbose).
|
||||
|
||||
.fam T
|
||||
.fi
|
||||
\fBgluster-swift-gen-builders\fP \fB-h\fP
|
||||
.PP
|
||||
.nf
|
||||
.fam C
|
||||
Displays help screen
|
||||
|
||||
.fam T
|
||||
.fi
|
||||
.SH COPYRIGHT
|
||||
\fBCopyright\fP(c) 2013 RedHat, Inc. <http://www.redhat.com>
|
@ -1,479 +0,0 @@
|
||||
# Authentication Services Start Guide
|
||||
|
||||
## Contents
|
||||
* [Keystone](#keystone)
|
||||
* [Overview](#keystone_overview)
|
||||
* [Creation of swift accounts](#keystone_swift_accounts)
|
||||
* [Configuration](#keystone_configuration)
|
||||
* [Configuring keystone endpoint](#keystone_endpoint)
|
||||
* [GSwauth](#gswauth)
|
||||
* [Overview](#gswauth_overview)
|
||||
* [Installing GSwauth](#gswauth_install)
|
||||
* [User roles](#gswauth_user_roles)
|
||||
* [GSwauth Tools](#gswauth_tools)
|
||||
* [Authenticating a user](#gswauth_authenticate)
|
||||
* [Swiftkerbauth](#swiftkerbauth)
|
||||
* [Architecture](swiftkerbauth/architecture.md)
|
||||
* [RHEL IPA Server Guide](swiftkerbauth/ipa_server.md)
|
||||
* [RHEL IPA Client Guide](swiftkerbauth/ipa_client.md)
|
||||
* [Windows AD Server Guide](swiftkerbauth/AD_server.md)
|
||||
* [Windows AD Client Guide](swiftkerbauth/AD_client.md)
|
||||
* [Swiftkerbauth Guide](swiftkerbauth/swiftkerbauth_guide.md)
|
||||
|
||||
## <a name="keystone" />Keystone ##
|
||||
The Standard Openstack authentication service
|
||||
|
||||
### <a name="keystone_overview" />Overview ###
|
||||
[Keystone](https://wiki.openstack.org/wiki/Keystone) is the identity
|
||||
service for OpenStack, used for authentication and authorization when
|
||||
interacting with OpenStack services.
|
||||
|
||||
Configuring gluster-swift to authenticate against keystone is thus
|
||||
very useful because allows users to access a gluster-swift storage
|
||||
using the same credentials used for all other OpenStack services.
|
||||
|
||||
Currently, gluster-swift has a strict mapping of one account to a
|
||||
GlusterFS volume, and this volume has to be named after the **tenant
|
||||
id** (aka **project id**) of the user accessing it.
|
||||
|
||||
### <a name="keystone_installation" />Installation ###
|
||||
|
||||
Keystone authentication is performed using the
|
||||
[swift.common.middleware.keystone](http://docs.openstack.org/developer/swift/middleware.html#module-swift.common.middleware.keystoneauth)
|
||||
which is part of swift itself. It depends on keystone python APIs,
|
||||
contained in the package `python-keystoneclient`.
|
||||
|
||||
You can install `python-keystoneclient` from the packages of your
|
||||
distribution running:
|
||||
|
||||
* on Ubuntu:
|
||||
|
||||
sudo apt-get install python-keystoneclient
|
||||
|
||||
* on Fedora:
|
||||
|
||||
sudo yum install python-keystoneclient
|
||||
|
||||
otherwise you can install it via pip:
|
||||
|
||||
sudo pip install python-keystoneclient
|
||||
|
||||
### <a name="keystone_swift_accounts />Creation of swift accounts ###
|
||||
|
||||
Due to current limitations of gluster-swift, you *must* create one
|
||||
volume for each Keystone tenant (project), and its name *must* match
|
||||
the *tenant id* of the tenant.
|
||||
|
||||
You can get the tenant id from the output of the command `keystone
|
||||
tenant-get`, for example:
|
||||
|
||||
# keystone tenant-get demo
|
||||
+-------------+----------------------------------+
|
||||
| Property | Value |
|
||||
+-------------+----------------------------------+
|
||||
| description | |
|
||||
| enabled | True |
|
||||
| id | a9b091f85e04499eb2282733ff7d183e |
|
||||
| name | demo |
|
||||
+-------------+----------------------------------+
|
||||
|
||||
will get the tenant id of the tenant `demo`.
|
||||
|
||||
Create the volume as usual
|
||||
|
||||
gluster volume create <tenant_id> <hostname>:<brick> ...
|
||||
gluster volume start <tenant_id>
|
||||
|
||||
Once you have created all the volumes you need you must re-generate
|
||||
the swift ring:
|
||||
|
||||
gluster-swift-gen-builders <tenant_id> [<tenant_id> ...]
|
||||
|
||||
After generation of swift rings you always have to restart the object,
|
||||
account and container servers.
|
||||
|
||||
### <a name="keystone_configuration" />Configuration of the proxy-server ###
|
||||
|
||||
You only need to configure the proxy-server in order to enable
|
||||
keystone authentication. The configuration is no different from what
|
||||
is done for a standard swift installation (cfr. for instance the
|
||||
related
|
||||
[swift documentation](http://docs.openstack.org/developer/swift/overview_auth.html#keystone-auth)),
|
||||
however we report it for completeness.
|
||||
|
||||
In the configuration file of the proxy server (usually
|
||||
`/etc/swift/proxy-server.conf`) you must modify the main pipeline and
|
||||
add `authtoken` and `keystoneauth`:
|
||||
|
||||
Was:
|
||||
~~~
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors healthcheck cache ratelimit tempauth proxy-server
|
||||
~~~
|
||||
Change To:
|
||||
~~~
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors healthcheck cache ratelimit authtoken keystoneauth proxy-server
|
||||
~~~
|
||||
|
||||
(note that we also removed `tempauth`, although this is not necessary)
|
||||
|
||||
Add configuration for the `authtoken` middleware by adding the following section:
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystone.middleware.auth_token:filter_factory
|
||||
auth_host = KEYSTONE_HOSTNAME
|
||||
auth_port = 35357
|
||||
auth_protocol = http
|
||||
auth_uri = http://KEYSTONE_HOSTNAME:5000/
|
||||
admin_tenant_name = TENANT_NAME
|
||||
admin_user = SWIFT_USERNAME
|
||||
admin_password = SWIFT_PASSWORD
|
||||
include_service_catalog = False
|
||||
|
||||
`SWIFT_USERNAME`, `SWIFT_PASSWORD` and `TENANT_NAME` will be used by
|
||||
swift to get an admin token from `KEYSTONE_HOSTNAME`, used to
|
||||
authorize user tokens so they must match an user in keystone with
|
||||
administrative privileges.
|
||||
|
||||
Add configuration for the `keystoneauth` middleware:
|
||||
|
||||
[filter:keystoneauth]
|
||||
use = egg:swift#keystoneauth
|
||||
# Operator roles is the role which user would be allowed to manage a
|
||||
# tenant and be able to create container or give ACL to others.
|
||||
operator_roles = Member, admin
|
||||
|
||||
Restart the `proxy-server` service.
|
||||
|
||||
### <a name="keystone_endpoint" />Configuring keystone endpoint ###
|
||||
|
||||
In order to be able to use the `swift` command line you also need to
|
||||
configure keystone by adding a service and its relative endpoint. Up
|
||||
to date documentation can be found in the OpenStack documentation, but
|
||||
we report it here for completeness:
|
||||
|
||||
First of all create the swift service of type `object-store`:
|
||||
|
||||
$ keystone service-create --name=swift \
|
||||
--type=object-store --description="Swift Service"
|
||||
+-------------+---------------------------------+
|
||||
| Property | Value |
|
||||
+-------------+----------------------------------+
|
||||
| description | Swift Service |
|
||||
| id | 272efad2d1234376cbb911c1e5a5a6ed |
|
||||
| name | swift |
|
||||
| type | object-store |
|
||||
+-------------+----------------------------------+
|
||||
|
||||
and use the `id` of the service you just created to create the
|
||||
corresponding endpoint:
|
||||
|
||||
$ keystone endpoint-create \
|
||||
--region RegionOne \
|
||||
--service-id=<service_id> \
|
||||
--publicurl 'http://<swift-host>:8080/v1/AUTH_$(tenant_id)s' \
|
||||
--internalurl 'http://<swift-host>:8080/v1/AUTH_$(tenant_id)s' \
|
||||
--adminurl 'http://<swift-host>:8080/v1'
|
||||
|
||||
Now you should be able to use the swift command line to list the containers of your account with:
|
||||
|
||||
$ swift --os-auth-url http://<keystone-host>:5000/v2.0 \
|
||||
-U <tenant-name>:<username> -K <password> list
|
||||
|
||||
to create a container
|
||||
|
||||
$ swift --os-auth-url http://<keystone-host>:5000/v2.0 \
|
||||
-U <tenant-name>:<username> -K <password> post mycontainer
|
||||
|
||||
and upload a file
|
||||
|
||||
$ swift --os-auth-url http://<keystone-host>:5000/v2.0 \
|
||||
-U <tenant-name>:<username> -K <password> upload <filename>
|
||||
|
||||
## <a name="gswauth" />GSwauth ##
|
||||
|
||||
### <a name="gswauth_overview" />Overview ###
|
||||
An easily deployable GlusterFS aware authentication service based on [Swauth](http://gholt.github.com/swauth/).
|
||||
GSwauth is a WSGI Middleware that uses Swift itself as a backing store to
|
||||
maintain its metadata.
|
||||
|
||||
This model has the benefit of having the metadata available to all proxy servers
|
||||
and saving the data to a GlusterFS volume. To protect the metadata, the GlusterFS
|
||||
volume should only be able to be mounted by the systems running the proxy servers.
|
||||
|
||||
Currently, gluster-swift has a strict mapping of one account to a GlusterFS volume.
|
||||
Future releases, this will be enhanced to support multiple accounts per GlusterFS
|
||||
volume.
|
||||
|
||||
See <http://gholt.github.com/swauth/> for more information on Swauth.
|
||||
|
||||
### <a name="gswauth_install" />Installing GSwauth ###
|
||||
|
||||
1. GSwauth is installed by default with Gluster-Swift.
|
||||
|
||||
1. Create and start the `gsmetadata` gluster volume
|
||||
~~~
|
||||
gluster volume create gsmetadata <hostname>:<brick>
|
||||
gluster volume start gsmetadata
|
||||
~~~
|
||||
|
||||
1. run `gluster-swift-gen-builders` with all volumes that should be
|
||||
accessible by gluster-swift, including `gsmetadata`
|
||||
~~~
|
||||
gluster-swift-gen-builders gsmetadata <other volumes>
|
||||
~~~
|
||||
|
||||
1. Change your proxy-server.conf pipeline to have gswauth instead of tempauth:
|
||||
|
||||
Was:
|
||||
~~~
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors cache tempauth proxy-server
|
||||
~~~
|
||||
Change To:
|
||||
~~~
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors cache gswauth proxy-server
|
||||
~~~
|
||||
|
||||
1. Add to your proxy-server.conf the section for the GSwauth WSGI filter:
|
||||
~~~
|
||||
[filter:gswauth]
|
||||
use = egg:gluster_swift#gswauth
|
||||
set log_name = gswauth
|
||||
super_admin_key = gswauthkey
|
||||
metadata_volume = gsmetadata
|
||||
auth_type = sha1
|
||||
auth_type_salt = swauthsalt
|
||||
token_life = 86400
|
||||
max_token_life = 86400
|
||||
~~~
|
||||
|
||||
1. Restart your proxy server ``swift-init proxy reload``
|
||||
|
||||
##### Advanced options for GSwauth WSGI filter:
|
||||
|
||||
* `default-swift-cluster` - default storage-URL for newly created accounts. When attempting to authenticate with a user for the first time, the return information is the access token and the storage-URL where data for the given account is stored.
|
||||
|
||||
* `token_life` - set default token life. The default value is 86400 (24hrs).
|
||||
|
||||
* `max_token_life` - The maximum token life. Users can set a token lifetime when requesting a new token with header `x-auth-token-lifetime`. If the passed in value is bigger than the `max_token_life`, then `max_token_life` will be used.
|
||||
|
||||
### <a name="gswauth_user_roles" />User Roles
|
||||
There are only three user roles in GSwauth:
|
||||
|
||||
* A regular user has basically no rights. He needs to be given both read/write priviliges to any container.
|
||||
* The `admin` user is a super-user at the account level. This user can create and delete users for the account they are members and have both write and read priviliges to all stored objects in that account.
|
||||
* The `reseller admin` user is a super-user at the cluster level. This user can create and delete accounts and users and has read/write priviliges to all accounts under that cluster.
|
||||
|
||||
|
||||
| Role/Group | get list of accounts | get Acccount Details (users, etc)| Create Account | Delete Account | Get User Details | Create admin user | Create reseller-admin user | Create regular user | Delete admin user | Delete reseller-admin user | Delete regular user | Set Service Endpoints | Get Account Groups | Modify User |
|
||||
| ----------------------- |:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|
|
||||
| .super_admin (username) |x|x|x|x|x|x|x|x|x|x|x|x|x|x|
|
||||
| .reseller_admin (group) |x|x|x|x|x|x| |x|x| |x|x|x|x|
|
||||
| .admin (group) | |x| | |x|x| |x|x| |x| |x|x|
|
||||
| regular user (type) | | | | | | | | | | | | | | |
|
||||
|
||||
|
||||
### <a name="gswauth_tools" />GSwauth Tools
|
||||
GSwauth provides cli tools to facilitate managing accounts and users. All tools have some options in common:
|
||||
|
||||
#### Common Options:
|
||||
* -A, --admin-url: The URL to the auth
|
||||
* Default: `http://127.0.0.1:8080/auth/`
|
||||
* -U, --admin-user: The user with admin rights to perform action
|
||||
* Default: `.super_admin`
|
||||
* -K, --admin-key: The key for the user with admin rights to perform action
|
||||
* no default value
|
||||
|
||||
#### gswauth-prep:
|
||||
Prepare the gluster volume where gswauth will save its metadata.
|
||||
|
||||
~~~
|
||||
gswauth-prep [option]
|
||||
~~~
|
||||
|
||||
Example:
|
||||
|
||||
~~~
|
||||
gswauth-prep -A http://10.20.30.40:8080/auth/ -K gswauthkey
|
||||
~~~
|
||||
|
||||
#### gswauth-add-account:
|
||||
Create account. Currently there's a requirement that an account must map to a gluster volume. The gluster volume must not exist at the time when the account is being created.
|
||||
|
||||
~~~
|
||||
gswauth-add-account [option] <account_name>
|
||||
~~~
|
||||
|
||||
Example:
|
||||
|
||||
~~~
|
||||
gswauth-add-account -K gswauthkey <account_name>
|
||||
~~~
|
||||
|
||||
#### gswauth-add-user:
|
||||
Create user. If the provided account does not exist, it will be automatically created before creating the user.
|
||||
Use the `-r` flag to create a reseller admin user and the `-a` flag to create an admin user. To change the password or make the user an admin, just run the same command with the new information.
|
||||
|
||||
~~~
|
||||
gswauth-add-user [option] <account_name> <user> <password>
|
||||
~~~
|
||||
|
||||
Example:
|
||||
|
||||
~~~
|
||||
gswauth-add-user -K gswauthkey -a test ana anapwd
|
||||
~~~
|
||||
|
||||
**Change password examples**
|
||||
|
||||
Command to update password/key of regular user:
|
||||
|
||||
~~~
|
||||
gswauth-add-user -U account1:user1 -K old_pass account1 user1 new_pass
|
||||
~~~
|
||||
|
||||
Command to update password/key of account admin:
|
||||
|
||||
~~~
|
||||
gswauth-add-user -U account1:admin -K old_pass -a account1 admin new_pass
|
||||
~~~
|
||||
|
||||
Command to update password/key of reseller_admin:
|
||||
|
||||
~~~
|
||||
gswauth-add-user -U account1:radmin -K old_pass -r account1 radmin new_pass
|
||||
~~~
|
||||
|
||||
#### gswauth-delete-account:
|
||||
Delete an account. An account cannot be deleted if it still contains users, an error will be returned.
|
||||
|
||||
~~~
|
||||
gswauth-delete-account [option] <account_name>
|
||||
~~~
|
||||
|
||||
Example:
|
||||
|
||||
~~~
|
||||
gswauth-delete-account -K gswauthkey test
|
||||
~~~
|
||||
|
||||
#### gswauth-delete-user:
|
||||
Delete a user.
|
||||
|
||||
~~~
|
||||
gswauth-delete-user [option] <account_name> <user>
|
||||
~~~
|
||||
|
||||
Example:
|
||||
|
||||
~~~
|
||||
gswauth-delete-user -K gswauthkey test ana
|
||||
~~~
|
||||
|
||||
#### gswauth-set-account-service:
|
||||
Sets a service URL for an account. Can only be set by a reseller admin.
|
||||
This command can be used to changed the default storage URL for a given account.
|
||||
All accounts have the same storage-URL default value, which comes from the `default-swift-cluster`
|
||||
option.
|
||||
|
||||
~~~
|
||||
gswauth-set-account-service [options] <account> <service> <name> <value>
|
||||
~~~
|
||||
|
||||
Example:
|
||||
|
||||
~~~
|
||||
gswauth-set-account-service -K gswauthkey test storage local http://newhost:8080/v1/AUTH_test
|
||||
~~~
|
||||
|
||||
#### gswauth-list:
|
||||
List information about accounts and users
|
||||
|
||||
* If `[account]` and `[user]` are omitted, a list of accounts will be output.
|
||||
* If `[account]` is included but not `[user]`, a list of users within the account will be output.
|
||||
* If `[account]` and `[user]` are included, a list of groups the user belongs to will be ouptput.
|
||||
* If the `[user]` is `.groups`, the active groups for the account will be listed.
|
||||
|
||||
The default output format is tabular. `-p` changes the output to plain text. `-j` changes the
|
||||
output to JSON format. This will print all information about given account or user, including
|
||||
stored password
|
||||
|
||||
~~~
|
||||
gswauth-list [options] [account] [user]
|
||||
~~~
|
||||
|
||||
Example:
|
||||
|
||||
~~~
|
||||
gswauth-list -K gswauthkey test ana
|
||||
+----------+
|
||||
| Groups |
|
||||
+----------+
|
||||
| test:ana |
|
||||
| test |
|
||||
| .admin |
|
||||
+----------+
|
||||
~~~
|
||||
|
||||
#### gswauth-cleanup-tokens:
|
||||
Delete expired tokens. Users also have the option to provide the expected life of tokens, delete all tokens or all tokens for a given account.
|
||||
|
||||
Options:
|
||||
|
||||
* `-t`, `--token-life`: The expected life of tokens, token objects modified more than this number of
|
||||
seconds ago will be checked for expiration (default: 86400).
|
||||
* `--purge`: Purge all tokens for a given account whether the tokens have expired or not.
|
||||
* `--purge-all`: Purges all tokens for all accounts and users whether the tokens have expired or not.
|
||||
|
||||
~~~
|
||||
gswauth-cleanup-tokens [options]
|
||||
~~~
|
||||
|
||||
Example:
|
||||
|
||||
~~~
|
||||
gswauth-cleanup-tokens -K gswauthkey --purge test
|
||||
~~~
|
||||
|
||||
### <a name="gswauth_authenticate" />Authenticating a user with swift client
|
||||
There are two methods of accessing data using the swift client. The first (and most simple one) is by providing the user name and password everytime. The swift client takes care of acquiring the token from gswauth. See example below:
|
||||
|
||||
~~~
|
||||
swift -A http://127.0.0.1:8080/auth/v1.0 -U test:ana -K anapwd upload container1 README.md
|
||||
~~~
|
||||
|
||||
The second method is a two-step process, but it allows users to only provide their username and password once. First users must authenticate with a username and password to get a token and the storage URL. Then, users can make the object requests to the storage URL with the given token.
|
||||
|
||||
It is important to remember that tokens expires, so the authentication process needs to be repeated every so often.
|
||||
|
||||
Authenticate a user with the curl command
|
||||
|
||||
~~~
|
||||
curl -v -H 'X-Storage-User: test:ana' -H 'X-Storage-Pass: anapwd' -k http://localhost:8080/auth/v1.0
|
||||
...
|
||||
< X-Auth-Token: AUTH_tk7e68ef4698f14c7f95af07ab7b298610
|
||||
< X-Storage-Url: http://127.0.0.1:8080/v1/AUTH_test
|
||||
...
|
||||
~~~
|
||||
Now, the user can access the object-storage using the swift client with the given token and storage URL
|
||||
|
||||
~~~
|
||||
bash-4.2$ swift --os-auth-token=AUTH_tk7e68ef4698f14c7f95af07ab7b298610 --os-storage-url=http://127.0.0.1:8080/v1/AUTH_test upload container1 README.md
|
||||
README.md
|
||||
bash-4.2$
|
||||
bash-4.2$ swift --os-auth-token=AUTH_tk7e68ef4698f14c7f95af07ab7b298610 --os-storage-url=http://127.0.0.1:8080/v1/AUTH_test list container1
|
||||
README.md
|
||||
~~~
|
||||
**Note:** Reseller admins must always use the second method to acquire a token, in order to be given access to other accounts different than his own. The first method of using the username and password will give them access only to their own accounts.
|
||||
|
||||
## <a name="swiftkerbauth" />Swiftkerbauth ##
|
||||
Kerberos authentication filter
|
||||
|
||||
Carsten Clasohm implemented a new authentication filter for swift
|
||||
that uses Kerberos tickets for single sign on authentication, and
|
||||
grants administrator permissions based on the users group membership
|
||||
in a directory service like Red Hat Enterprise Linux Identity Management
|
||||
or Microsoft Active Directory.
|
@ -1,2 +0,0 @@
|
||||
# Overview and Concepts
|
||||
TBD
|
@ -1,75 +0,0 @@
|
||||
# Object Expiration
|
||||
|
||||
## Contents
|
||||
* [Overview](#overview)
|
||||
* [Setup](#setup)
|
||||
* [Using object expiration](#using)
|
||||
* [Running object-expirer daemon](#running-daemon)
|
||||
|
||||
<a name="overview" />
|
||||
## Overview
|
||||
The Object Expiration feature offers **scheduled deletion of objects**. The client would use the *X-Delete-At* or *X-Delete-After* headers during an object PUT or POST and the cluster would automatically quit serving that object at the specified time and would shortly thereafter remove the object from the GlusterFS volume.
|
||||
|
||||
Expired objects however do appear in container listings until they are deleted by object-expirer daemon. This behaviour is expected: https://bugs.launchpad.net/swift/+bug/1069849
|
||||
|
||||
<a name="setup" />
|
||||
## Setup
|
||||
Object expirer uses a seprate account (a GlusterFS volume, for now, until multiple accounts per volume is implemented) named *gsexpiring*. You will have to [create a GlusterFS volume](quick_start_guide.md#gluster-volume-setup) by that name.
|
||||
|
||||
Object-expirer uses the */etc/swift/object-expirer.conf* configuration file. Make sure that it exists. If not, you can copy it from */etc* directory of gluster-swift source repo.
|
||||
|
||||
<a name="using" />
|
||||
## Using object expiration
|
||||
|
||||
**PUT an object with X-Delete-At header using curl**
|
||||
|
||||
~~~
|
||||
curl -v -X PUT -H 'X-Delete-At: 1392013619' http://127.0.0.1:8080/v1/AUTH_test/container1/object1 -T ./localfile
|
||||
~~~
|
||||
|
||||
**PUT an object with X-Delete-At header using swift client**
|
||||
|
||||
~~~
|
||||
swift --os-auth-token=AUTH_tk99a39aecc3dd4f80b2b1e801d00df846 --os-storage-url=http://127.0.0.1:8080/v1/AUTH_test upload container1 ./localfile --header 'X-Delete-At: 1392013619'
|
||||
~~~
|
||||
|
||||
where *X-Delete-At* header takes a Unix Epoch timestamp in integer. For example, the current time in Epoch notation can be found by running this command:
|
||||
|
||||
~~~
|
||||
date +%s
|
||||
~~~
|
||||
|
||||
|
||||
**PUT an object with X-Delete-After header using curl**
|
||||
|
||||
~~~
|
||||
curl -v -X PUT -H 'X-Delete-After: 3600' http://127.0.0.1:8080/v1/AUTH_test/container1/object1 -T ./localfile
|
||||
~~~
|
||||
|
||||
**PUT an object with X-Delete-At header using swift client**
|
||||
|
||||
~~~
|
||||
swift --os-auth-token=AUTH_tk99a39aecc3dd4f80b2b1e801d00df846 --os-storage-url=http://127.0.0.1:8080/v1/AUTH_test upload container1 ./localfile --header 'X-Delete-After: 3600'
|
||||
~~~
|
||||
|
||||
where *X-Delete-After* header takes a integer number of seconds, after which the object expires. The proxy server that receives the request will convert this header into an X-Delete-At header using its current time plus the value given.
|
||||
|
||||
<a name="running-daemon" />
|
||||
## Running object-expirer daemon
|
||||
The object-expirer daemon runs a pass once every X seconds (configurable using *interval* option in config file). For every pass it makes, it queries the *gsexpiring* account for "tracker objects". Based on (timestamp, path) present in name of "tracker objects", object-expirer then deletes the actual object and the corresponding tracker object.
|
||||
|
||||
|
||||
To run object-expirer forever as a daemon:
|
||||
~~~
|
||||
swift-init object-expirer start
|
||||
~~~
|
||||
|
||||
To run just once:
|
||||
~~~
|
||||
swift-object-expirer -o -v /etc/swift/object-expirer.conf
|
||||
~~~
|
||||
|
||||
**For more information, visit:**
|
||||
http://docs.openstack.org/developer/swift/overview_expiring_objects.html
|
||||
|
||||
|
@ -1,67 +0,0 @@
|
||||
# Syncing Gluster-Swift with Swift
|
||||
|
||||
## Create a release
|
||||
Create a release in launchpad.net so that we can place the latest swift source for download. We'll place the source here, and it will allow tox in gluster-swift to download the latest code.
|
||||
|
||||
## Upload swift release
|
||||
|
||||
* Clone the git swift repo
|
||||
* Go to the release tag or just use the latest
|
||||
* Type the following to package the swift code:
|
||||
|
||||
```
|
||||
$ python setup.py sdist
|
||||
$ ls dist
|
||||
```
|
||||
|
||||
* Take the file in the `dist` directory and upload it to the new release we created it on launchpad.net.
|
||||
* Alternatively, if we are syncing with a Swift version which is already released, we can get the tar.gz file from Swift launchpad page and upload the same to gluster-swift launchpad.
|
||||
|
||||
## Setup Tox
|
||||
Now that the swift source is availabe on launchpad.net, copy its link location and update tox.ini in gluster-swift with the new link.
|
||||
|
||||
## Update tests
|
||||
This part is a little more complicated and now we need to *merge* the latest tests with ours.
|
||||
|
||||
[meld](http://meldmerge.org/) is a great tool to make this work easier. The 3-way comparison feature of meld comes handy to compare 3 version of same file from:
|
||||
|
||||
* Latest swift (say v1.13)
|
||||
* Previous swift (say v1.12)
|
||||
* gluster-swift (v1.12)
|
||||
|
||||
Files that need to be merged:
|
||||
|
||||
* Update unit tests
|
||||
|
||||
```
|
||||
$ export SWIFTDIR=../swift
|
||||
$ meld $SWIFTDIR/tox.ini tox.ini
|
||||
$ meld $SWIFTDIR/test-requirements.txt tools/test-requires
|
||||
$ meld $SWIFTDIR/requirements.txt tools/requirements.txt
|
||||
$ meld $SWIFTDIR/test/unit/proxy/test_servers.py test/unit/proxy/test_server.py
|
||||
$ cp $SWIFTDIR/test/unit/proxy/controllers/*.py test/unit/proxy/controllers
|
||||
$ meld $SWIFTDIR/test/unit/__init__.py test/unit/__init__.py
|
||||
```
|
||||
|
||||
* Update all the functional tests
|
||||
First check if there are any new files in the swift functional test directory. If there are, copy them over.
|
||||
|
||||
* Remember to `git add` any new files
|
||||
|
||||
* Now merge the existing ones:
|
||||
|
||||
```
|
||||
for i in $SWIFTDIR/test/functional/*.py ; do
|
||||
meld $i test/functional/`basename $i`
|
||||
done
|
||||
```
|
||||
|
||||
## Update the version
|
||||
If needed, update the version now in `gluster/swift/__init__.py`.
|
||||
|
||||
## Upload the patch
|
||||
Upload the patch to Gerrit.
|
||||
|
||||
## Update the release in launchpad.net
|
||||
Upload the gluster-swift*.tar.gz built by Jenkins to launchpad.net once the fix has been commited to the main branch.
|
||||
|
@ -1,206 +0,0 @@
|
||||
#AD client setup guide
|
||||
|
||||
###Contents
|
||||
* [Setup Overview] (#setup)
|
||||
* [Configure Network] (#network)
|
||||
* [Installing AD Client] (#AD-client)
|
||||
|
||||
<a name="setup" />
|
||||
###Setup Overview
|
||||
|
||||
This guide talks about adding fedora linux client to windows domain.
|
||||
The test setup included a client machine with Fedora 19 installed
|
||||
on it with all the latest packages updated. The crux is to add this linux
|
||||
machine to Windows Domain. This linux box is expected to act as RHS node and on which swiftkerbauth,
|
||||
apachekerbauth code would run.
|
||||
|
||||
Set hostname (FQDN) to fcclient.winad.com
|
||||
|
||||
# hostnamectl set-hostname "fcclient.winad.com"
|
||||
|
||||
# hostname "fcclient.winad.com"
|
||||
|
||||
|
||||
<a name="network" />
|
||||
### Configure client
|
||||
|
||||
* Deploy Fedora linux 19.
|
||||
|
||||
* Update the system with latest packages.
|
||||
|
||||
* Configure SELinux security parameters.
|
||||
|
||||
* Install & configure samba
|
||||
|
||||
* Configure DNS
|
||||
|
||||
* Synchronize the time services
|
||||
|
||||
* Join Domain
|
||||
|
||||
* Install / Configure Kerberos Client
|
||||
|
||||
|
||||
The document assumes the installing Fedora Linux and configuring SELinux
|
||||
parameters to 'permissive' is known already.
|
||||
|
||||
###Install & Configure Samba:
|
||||
# yum -y install samba samba-client samba-common samba-winbind
|
||||
samba-winbind-clients
|
||||
|
||||
# service start smb
|
||||
|
||||
# ps -aef | grep smb
|
||||
# chkconfig smb on
|
||||
|
||||
###Synchronize time services
|
||||
The kerberos authentication and most of the DNS functionality could fail with
|
||||
clock skew if times are not synchronized.
|
||||
|
||||
# cat /etc/ntp.conf
|
||||
server ns1.bos.redhat.com
|
||||
server 10.5.26.10
|
||||
|
||||
# service ntpd stop
|
||||
|
||||
# ntpdate 10.16.255.2
|
||||
|
||||
# service ntpd start
|
||||
|
||||
#chkconfig ntpd on
|
||||
|
||||
Check if Windows server in the whole environment is also time synchronized with
|
||||
same source.
|
||||
|
||||
# C:\Users\Administrator>w32tm /query /status | find "Source"
|
||||
|
||||
Source: ns1.xxx.xxx.com
|
||||
|
||||
###Configure DNS on client
|
||||
Improperly resolved hostname is the leading cause in authentication failures.
|
||||
Best practice is to configure fedora client to use Windows DNS.
|
||||
'nameserver' below is the IP address of the windows server.
|
||||
# cat /etc/resolve.conf
|
||||
domain server.winad.com
|
||||
search server.winad.com
|
||||
nameserver 10.nn.nnn.3
|
||||
|
||||
###Set the hostname of the client properly (FQDN)
|
||||
# cat /etc/sysconfig/network
|
||||
HOSTNAME=fcclient.winad.com
|
||||
|
||||
|
||||
###Install & Configure kerberos client
|
||||
|
||||
# yum -y install krb5-workstation
|
||||
|
||||
Edit the /etc/krb5.conf as follows:
|
||||
|
||||
# cat /etc/krb5.conf
|
||||
[logging]
|
||||
default = FILE:/var/log/krb5libs.log
|
||||
kdc = FILE:/var/log/krb5kdc.log
|
||||
admin_server = FILE:/var/log/kadmind.log
|
||||
|
||||
[libdefaults]
|
||||
default_realm = WINAD.COM
|
||||
dns_lookup_realm = false
|
||||
dns_lookup_kdc = false
|
||||
ticket_lifetime = 24h
|
||||
renew_lifetime = 7d
|
||||
forwardable = true
|
||||
|
||||
[realms]
|
||||
WINAD.COM = {
|
||||
kdc = server.winad.com
|
||||
admin_server = server.winad.com
|
||||
}
|
||||
[domain_realm]
|
||||
.demo = server.winad.com
|
||||
demo = server.winad.com
|
||||
|
||||
###Join Domain
|
||||
Fire command 'system-config-authentication' on client. This should display a
|
||||
graphical wizard. Below inputs would help configure this wizard.
|
||||
|
||||
- User account data base = winbind
|
||||
- winbind domain = winad
|
||||
- security model = ads
|
||||
- winbind ads realm = winad.com
|
||||
- winbind controller = server.winad.com
|
||||
- template shell = /bin/bash
|
||||
- let the other options be as is to default.
|
||||
- Perform Join domain and appy settings and quit. Please note this join should
|
||||
not see any errors. This makes the client fedora box to join the windows
|
||||
domain.
|
||||
|
||||
###Configure the kerberos client
|
||||
This would bring the users/groups from Windows Active directory to this
|
||||
fedora client.
|
||||
|
||||
Edit /etc/samba/smb.conf file to have below parameters in the global section.
|
||||
|
||||
# cat /etc/samba/smb.conf
|
||||
[global]
|
||||
workgroup = winad
|
||||
realm = winad.com
|
||||
server string = Samba Server Version %v
|
||||
security = ADS
|
||||
allow trusted domains = No
|
||||
password server = server.winad.com
|
||||
log file = /var/log/samba/log.%m
|
||||
max log size = 50
|
||||
idmap uid = 1000019999
|
||||
idmap gid = 1000019999
|
||||
template shell = /bin/bash
|
||||
winbind separator = +
|
||||
winbind use default domain = Yes
|
||||
idmap config REFARCHAD:range = 1000000019999999
|
||||
idmap config REFARCHAD:backend = rid
|
||||
cups options = raw
|
||||
|
||||
|
||||
# service smb stop
|
||||
|
||||
# service winbind stop
|
||||
|
||||
# tar -cvf /var/tmp/samba-cache-backup.tar /var/lib/samba
|
||||
|
||||
# ls -la /var/tmp/samba-cache-backup.tar
|
||||
|
||||
# rm -f /var/lib/samba/*
|
||||
|
||||
|
||||
Verify that no kerberos ticket available and cached.
|
||||
|
||||
# kdestroy
|
||||
|
||||
# klist
|
||||
|
||||
Rejoin the domain.
|
||||
|
||||
# net join -S server -U Administrstor
|
||||
|
||||
Test that client rejoined the domain.
|
||||
|
||||
# net ads info
|
||||
|
||||
Restart smb and winbind service.
|
||||
|
||||
# wbinfo --domain-users
|
||||
|
||||
Perform kinit for the domain users prepared on active directory. This is obtain
|
||||
the kerberos ticket for user 'auth_admin'
|
||||
|
||||
# kinit auth_admin
|
||||
|
||||
# id -Gn auth_admin
|
||||
|
||||
###Notes
|
||||
Obtaining the HTTP service principal & keytab file and installing it with
|
||||
swiftkerbauth is added to swiftkerbauth_guide
|
||||
|
||||
###References
|
||||
Reference Document for adding Linux box to windows domain :
|
||||
Integrating Red Hat Enterprise Linux 6
|
||||
with Active Directory
|
@ -1,119 +0,0 @@
|
||||
#Windows Active Directory & Domain Controller Server Guide
|
||||
|
||||
###Contents
|
||||
* [Setup Overview] (#Setup)
|
||||
* [Installing Active Directory Services] (#AD-server)
|
||||
* [Configuring DNS] (#DNS)
|
||||
* [Adding Users and Groups] (#users-groups)
|
||||
|
||||
|
||||
<a name="Setup" />
|
||||
###Setup Overview
|
||||
|
||||
The setup includes a server machine installed with Windows 2008 R2 Server, with
|
||||
Domain Controller, Active Directory services & DNS server installed alongwith.
|
||||
The steps to install windows operating system and above servers can be found
|
||||
on MicroSoft Documentation. This windows Active Directory server would act as an
|
||||
authentication server in the whole setup. This would provide the access control
|
||||
and permissions for users on certain data objects.
|
||||
|
||||
|
||||
Windows 2008 R2 deployment:
|
||||
|
||||
http://technet.microsoft.com/en-us/library/dd283085.aspx
|
||||
|
||||
|
||||
Configuring Active Directory, Domain Services, DNS server:
|
||||
|
||||
http://technet.microsoft.com/en-us/library/cc770946.aspx
|
||||
|
||||
|
||||
<a name="AD-server" />
|
||||
###Installing AD Server
|
||||
|
||||
Administrators need to follow simple instructions in Server Manager on Windows
|
||||
2008, and should add Active Directory Domain Services & DNS server. It is
|
||||
recommended to use static IP for DNS server. Preferred Hostname(FQDN) for
|
||||
Windows server could be of format hostname 'server.winad.com' where
|
||||
'winad.com' is a domain name.
|
||||
|
||||
Following tips would help prepare a test setup neatly.
|
||||
|
||||
- Select Active Directory Domain services wizard in Server Manager
|
||||
- Move on to install it with all the pre-requisits, e.g. .NET framework etc.
|
||||
- Configure Active directory after installtion via exapanding the 'Roles'
|
||||
section in the server manager.
|
||||
- Create a new Domain in the New Forest.
|
||||
- Type the FQDN, winad.com
|
||||
- Set Forest functional level Windows 2008 R2.
|
||||
- Selct additional options for this domain controller as DNS server.
|
||||
- Leave the log locations to default provided by wizard.
|
||||
- Set the Administrator Password carefully.
|
||||
- Thats it. You are done configuring active directory.
|
||||
|
||||
|
||||
<a name="dns" />
|
||||
###Configuring DNS
|
||||
|
||||
This section explains configuring the DNS server installed on Windows 2008 R2
|
||||
server. You must know know about
|
||||
|
||||
- Forward lookup zone
|
||||
|
||||
- Reverse lookup zone
|
||||
|
||||
- Zone type
|
||||
|
||||
A forward lookup zone is simply a way to resolve hostnames to IP address.
|
||||
A reverse lookup zone is to lookup DNS hostname of the host IP.
|
||||
|
||||
Following tips would help configure the Zones on DNS server.
|
||||
|
||||
- Create a Forward lookup zone.
|
||||
- Create it a primary zone.
|
||||
- Add the Clients using their ip addresses and FQDN to this forward lookup
|
||||
zones.
|
||||
- This would add type 'A' record for that host on DNS server.
|
||||
- Similarly create a Reverser lookup zone.
|
||||
- Add clients 'PTR' record to this zone via browsing through the forward
|
||||
zones clients.
|
||||
|
||||
The above setup can be tested on client once it joins the domain using 'dig'
|
||||
command as mentioned below.
|
||||
|
||||
|
||||
On client:
|
||||
|
||||
# dig fcclient.winad.com
|
||||
This should yield you a Answer section mentioning its IP address.
|
||||
|
||||
Reverse lookup can be tested using
|
||||
|
||||
# 'dig -t ptr 101.56.168.192.in-addr.arpa.'
|
||||
The answer section should state the FQDN of the client.
|
||||
|
||||
Repeat the above steps on client for Windows AD server as well.
|
||||
|
||||
|
||||
<a name="users-groups" />
|
||||
###Adding users and groups
|
||||
|
||||
The following convention is to be followed in creating group names:
|
||||
|
||||
<reseller-prefix>\_<volume-name>
|
||||
|
||||
<reseller-prefix>\_<account-name>
|
||||
|
||||
As of now, account=volume=group
|
||||
|
||||
For example:
|
||||
|
||||
AUTH\_test
|
||||
|
||||
Adding groups and users to the Windows domain is easy task.
|
||||
|
||||
- Start -> Administrative Tools -> Active Directory Users & Computers
|
||||
- Expand the domain name which was prepared earlier. e.g winad.com
|
||||
- Add groups with appropreate access rights.
|
||||
- Add users to the group with appropreate permissions.
|
||||
- Make sure you set password for users prepared on AD server.
|
@ -1,105 +0,0 @@
|
||||
# Architecture
|
||||
|
||||
The Swift API is HTTP-based. As described in the Swift documentation
|
||||
[1], clients first make a request to an authentication URL, providing
|
||||
a username and password. The reply contains a token which is used in
|
||||
all subsequent requests.
|
||||
|
||||
Swift has a chain of filters through which all client requests go. The
|
||||
filters to use are configured with the pipeline parameter in
|
||||
/etc/swift/proxy-server.conf:
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = healthcheck cache tempauth proxy-server
|
||||
|
||||
For the single sign authentication, we added a new filter called
|
||||
"kerbauth" and put it into the filter pipeline in place of tempauth.
|
||||
|
||||
The filter checks the URL for each client request. If it matches the
|
||||
authentication URL, the client is redirected to a URL on a different
|
||||
server (on the same machine). The URL is handled by a CGI script, which
|
||||
is set up to authenticate the client with Kerberos negotiation, retrieve
|
||||
the user's system groups [2], store them in a memcache ring shared with
|
||||
the Swift server, and return the authentication token to the client.
|
||||
|
||||
When the client provides the token as part of a resource request, the
|
||||
kerbauth filter checks it against its memcache, grants administrator
|
||||
rights based on the group membership retrieved from memcache, and
|
||||
either grants or denies the resource access.
|
||||
|
||||
[1] http://docs.openstack.org/api/openstack-object-storage/1.0/content/authentication-object-dev-guide.html
|
||||
|
||||
[2] The user data and system groups are usually provided by Red Hat
|
||||
Enterprise Linux identity Management or Microsoft Active
|
||||
Directory. The script relies on the system configuration to be set
|
||||
accordingly (/etc/nsswitch.conf).
|
||||
|
||||
*****
|
||||
|
||||
## kerbauth.py
|
||||
|
||||
The script kerbauth.py began as a copy of the tempauth.py script from
|
||||
from tempauth middleware. It contains the following modifications, among
|
||||
others:
|
||||
|
||||
In the __init__ method, we read the ext_authentication_url parameter
|
||||
from /etc/swift/proxy-server.conf. This is the URL that clients are
|
||||
redirected to when they access either the Swift authentication URL, or
|
||||
when they request a resource without a valid authentication token.
|
||||
|
||||
The configuration in proxy-server.conf looks like this:
|
||||
|
||||
[filter:kerbauth]
|
||||
use = egg:swiftkerbauth#kerbauth
|
||||
ext_authentication_url = http://client.rhelbox.com/cgi-bin/swift-auth
|
||||
|
||||
The authorize method was changed so that global administrator rights
|
||||
are granted if the user is a member of the auth_reseller_admin
|
||||
group. Administrator rights for a specific account like vol1 are
|
||||
granted if the user is a member of the auth_vol1 group. [3]
|
||||
|
||||
The denied_response method was changed to return a HTTP redirect to
|
||||
the external authentication URL if no valid token was provided by the
|
||||
client.
|
||||
|
||||
Most of the handle_get_token method was moved to the external
|
||||
authentication script. This method now returns a HTTP redirect.
|
||||
|
||||
In the __call__ and get_groups method, we removed support for the
|
||||
HTTP_AUTHORIZATION header, which is only needed when Amazon S3 is
|
||||
used.
|
||||
|
||||
Like tempauth.py, kerbauth.py uses a Swift wrapper to access
|
||||
memcache. This wrapper converts the key to an MD5 hash and uses the
|
||||
hash value to determine on which of a pre-defined list of servers to
|
||||
store the data.
|
||||
|
||||
[3] "auth" is the default reseller prefix, and would be different if
|
||||
the reseller_prefix parameter in proxy-server.conf was set.
|
||||
|
||||
## swift-auth CGI script
|
||||
|
||||
swift-auth resides on an Apache server and assumes that Apache is
|
||||
configured to authenticate the user before this script is
|
||||
executed. The script retrieves the username from the REMOTE_USER
|
||||
environment variable, and checks if there already is a token for this
|
||||
user in the memcache ring. If not, it generates a new one, retrieves
|
||||
the user's system groups with "id -Gn USERNAME", stores this
|
||||
information in the memcache ring, and returns the token to the client.
|
||||
|
||||
To allow the CGI script to connect to memcache, the SELinux booleans
|
||||
httpd_can_network_connect and httpd_can_network_memcache had to be
|
||||
set.
|
||||
|
||||
The tempauth filter uses the uuid module to generate token
|
||||
strings. This module creates and runs temporary files, which leads to
|
||||
AVC denial messages in /var/log/audit/audit.log when used from an
|
||||
Apache CGI script. While the module still works, the audit log would
|
||||
grow quickly. Instead of writing an SELinux policy module to allow or
|
||||
to silently ignore these accesses, the swift-auth script uses the
|
||||
"random" module for generating token strings.
|
||||
|
||||
Red Hat Enterprise Linux 6 comes with Python 2.6 which only provides
|
||||
method to list the locally defined user groups. To include groups from
|
||||
Red Hat Enterprise Linux Identity Management and in the future from
|
||||
Active Directory, the "id" command is run in a subprocess.
|
@ -1,80 +0,0 @@
|
||||
#IPA Client Guide
|
||||
|
||||
##Contents
|
||||
* [Setup Overview] (#setup)
|
||||
* [Configure Network] (#network)
|
||||
* [Installing IPA Client] (#ipa-client)
|
||||
|
||||
<a name="setup" />
|
||||
##Setup Overview
|
||||
We have used a F18 box as IPA client machine and used FreeIPA client.
|
||||
This document borrows instructions from the following more detailed guide.
|
||||
[RHEL 6 Identity Management Guide][]
|
||||
|
||||
|
||||
<a name="network" />
|
||||
## Configure network
|
||||
|
||||
Set hostname (FQDN) to client.rhelbox.com
|
||||
> hostnamectl set-hostname "client.rhelbox.com"
|
||||
>
|
||||
> hostname "client.rhelbox.com"
|
||||
|
||||
Add following to /etc/sysconfig/network:
|
||||
|
||||
HOSTNAME=client.rhelbox.com
|
||||
|
||||
Add the following to /etc/hostname
|
||||
|
||||
client.rhelbox.com
|
||||
|
||||
Add the following to /etc/hosts
|
||||
|
||||
192.168.56.110 server.rhelbox.com server
|
||||
192.168.56.101 client.rhelbox.com client
|
||||
|
||||
Logout and login again and verify hostname :
|
||||
> hostname --fqdn
|
||||
|
||||
Edit */etc/resolv.conf* to add this at beginning of file
|
||||
|
||||
nameserver 192.168.56.110
|
||||
|
||||
Warning: NetworkManager changes resolv.conf on restart
|
||||
|
||||
Turn off firewall
|
||||
> service iptables stop
|
||||
>
|
||||
> chkconfig iptables off
|
||||
|
||||
<a name="ipa-client" />
|
||||
## Installing IPA Client
|
||||
|
||||
Install IPA client packages:
|
||||
|
||||
For RHEL:
|
||||
> yum install ipa-client ipa-admintools
|
||||
|
||||
For Fedora:
|
||||
> yum install freeipa-client freeipa-admintools
|
||||
|
||||
Install IPA client and add to domain:
|
||||
>ipa-client-install --enable-dns-updates
|
||||
|
||||
Discovery was successful!
|
||||
Hostname: client.rhelbox.com
|
||||
Realm: RHELBOX.COM
|
||||
DNS Domain: rhelbox.com
|
||||
IPA Server: server.rhelbox.com
|
||||
BaseDN: dc=rhelbox,dc=com
|
||||
|
||||
Continue to configure the system with these values? [no]: yes
|
||||
User authorized to enroll computers: admin
|
||||
|
||||
Check if client is configured correctly:
|
||||
> kinit admin
|
||||
>
|
||||
> getent passwd admin
|
||||
|
||||
|
||||
[RHEL 6 Identity Management Guide]: https://access.redhat.com/site/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Identity_Management_Guide/
|
@ -1,146 +0,0 @@
|
||||
#IPA Server Guide
|
||||
|
||||
##Contents
|
||||
* [Setup Overview] (#setup)
|
||||
* [Configure Network] (#network)
|
||||
* [Installing IPA Server] (#ipa-server)
|
||||
* [Configuring DNS] (#dns)
|
||||
* [Adding Users and Groups] (#users-groups)
|
||||
|
||||
|
||||
<a name="setup" />
|
||||
##Setup Overview
|
||||
We have used a RHEL 6.4 box as IPA and DNS server. This document borrows
|
||||
instructions from the following more detailed guide.
|
||||
[RHEL 6 Identity Management Guide][]
|
||||
|
||||
|
||||
<a name="network" />
|
||||
## Configure network
|
||||
|
||||
Change hostname (FQDN) to server.rhelbox.com
|
||||
> hostname "server.rhelbox.com"
|
||||
|
||||
Add following to */etc/sysconfig/network* file
|
||||
|
||||
HOSTNAME=server.rhelbox.com
|
||||
|
||||
Add the following to */etc/hosts* file
|
||||
|
||||
192.168.56.110 server.rhelbox.com server
|
||||
192.168.56.101 client.rhelbox.com client
|
||||
|
||||
Logout and login again and verify new hostname
|
||||
> hostname --fqdn
|
||||
|
||||
Turn off firewall
|
||||
> service iptables stop
|
||||
>
|
||||
> chkconfig iptables off
|
||||
|
||||
|
||||
<a name="ipa-server" />
|
||||
## Installing IPA Server
|
||||
|
||||
Install IPA server packages and DNS dependencies
|
||||
> yum install ipa-server bind bind-dyndb-ldap
|
||||
|
||||
Run the following interactive setup to install IPA server with DNS
|
||||
> ipa-server-install --setup-dns
|
||||
|
||||
The IPA Master Server will be configured with:
|
||||
Hostname: server.rhelbox.com
|
||||
IP address: 192.168.56.110
|
||||
Domain name: rhelbox.com
|
||||
Realm name: RHELBOX.COM
|
||||
|
||||
BIND DNS server will be configured to serve IPA domain with:
|
||||
Forwarders: No forwarders
|
||||
Reverse zone: 56.168.192.in-addr.arpa.
|
||||
|
||||
The installation may take some time.
|
||||
|
||||
Check if IPA is installed correctly :
|
||||
> kinit admin
|
||||
>
|
||||
> ipa user-find admin
|
||||
|
||||
|
||||
<a name="dns" />
|
||||
## Configuring DNS
|
||||
|
||||
Edit */etc/resolv.conf* to add this at beginning of file :
|
||||
|
||||
nameserver 192.168.56.110
|
||||
|
||||
Warning: NetworkManager changes resolv.conf on restart
|
||||
|
||||
Add a DNS A record and PTR record for the client under rhelbox.com zone
|
||||
> ipa dnsrecord-add rhelbox.com client --a-rec=192.168.56.101 --a-create-reverse
|
||||
|
||||
Check if DNS resolution is working by running :
|
||||
|
||||
> dig server.rhelbox.com
|
||||
|
||||
;; ANSWER SECTION:
|
||||
server.rhelbox.com. 1200 IN A 192.168.56.110
|
||||
|
||||
> dig client.rhelbox.com
|
||||
|
||||
;; ANSWER SECTION:
|
||||
client.rhelbox.com. 86400 IN A 192.168.56.101
|
||||
|
||||
Check if reverse resolution works :
|
||||
|
||||
> dig -t ptr 101.56.168.192.in-addr.arpa.
|
||||
|
||||
;; ANSWER SECTION:
|
||||
101.56.168.192.in-addr.arpa. 86400 IN PTR client.rhelbox.com.
|
||||
|
||||
|
||||
> dig -t ptr 110.56.168.192.in-addr.arpa.
|
||||
|
||||
;; ANSWER SECTION:
|
||||
110.56.168.192.in-addr.arpa. 86400 IN PTR server.rhelbox.com.
|
||||
|
||||
|
||||
<a name="users-groups" />
|
||||
## Adding users and groups
|
||||
|
||||
The following convention is to be followed in creating group names:
|
||||
|
||||
<reseller-prefix>\_<volume-name>
|
||||
|
||||
<reseller-prefix>\_<account-name>
|
||||
|
||||
As of now, account=volume=group
|
||||
|
||||
For example:
|
||||
|
||||
AUTH\_test
|
||||
|
||||
Create *auth_reseller_admin* user group
|
||||
> ipa group-add auth_reseller_admin --desc="Full access to all Swift accounts"
|
||||
|
||||
Create *auth_rhs_test* user group
|
||||
> ipa group-add auth_rhs_test --desc="Full access to rhs_test account"
|
||||
|
||||
Create user *auth_admin* user as member of *auth_reseller_admin* user group
|
||||
> ipa user-add auth_admin --first=Auth --last=Admin --password
|
||||
>
|
||||
> ipa group-add-member auth_reseller_admin --users=auth_admin
|
||||
|
||||
Create user *rhs_test_admin* as member of *auth_rhs_test* user group
|
||||
> ipa user-add rhs_test_admin --first=RHS --last=Admin --password
|
||||
>
|
||||
> ipa group-add-member auth_rhs_test --users=rhs_test_admin
|
||||
|
||||
Create user *jsmith* with no relevant group membership
|
||||
> ipa user-add rhs_test_admin --first=RHS --last=Admin --password
|
||||
|
||||
You can verify users have been added by running
|
||||
>ipa user-find admin
|
||||
|
||||
NOTE: Every user has to change password on first login.
|
||||
|
||||
[RHEL 6 Identity Management Guide]: https://access.redhat.com/site/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Identity_Management_Guide/
|
@ -1,517 +0,0 @@
|
||||
#swiftkerbauth
|
||||
|
||||
* [Installing Kerberos module for Apache] (#httpd-kerb-install)
|
||||
* [Creating HTTP Service Principal] (#http-principal)
|
||||
* [Installing and configuring swiftkerbauth] (#install-swiftkerbauth)
|
||||
* [Using swiftkerbauth] (#use-swiftkerbauth)
|
||||
* [Configurable Parameters] (#config-swiftkerbauth)
|
||||
* [Functional tests] (#swfunctest)
|
||||
|
||||
<a name="httpd-kerb-install" />
|
||||
## Installing Kerberos module for Apache on IPA client
|
||||
|
||||
Install httpd server with kerberos module:
|
||||
> yum install httpd mod_auth_kerb
|
||||
>
|
||||
> service httpd restart
|
||||
|
||||
Check if auth_kerb_module is loaded :
|
||||
> httpd -M | grep kerb
|
||||
|
||||
Change httpd log level to debug by adding/changing the following in
|
||||
*/etc/httpd/conf/httpd.conf* file
|
||||
|
||||
LogLevel debug
|
||||
|
||||
httpd logs are at */var/log/httpd/error_log* for troubleshooting
|
||||
|
||||
If SELinux is enabled, allow Apache to connect to memcache and
|
||||
activate the changes by running
|
||||
>setsebool -P httpd_can_network_connect 1
|
||||
>
|
||||
>setsebool -P httpd_can_network_memcache 1
|
||||
|
||||
*****
|
||||
|
||||
<a name="http-principal" />
|
||||
## Creating HTTP Service Principal on IPA server
|
||||
|
||||
Add a HTTP Kerberos service principal :
|
||||
> ipa service-add HTTP/client.rhelbox.com@RHELBOX.COM
|
||||
|
||||
Retrieve the HTTP service principal to a keytab file:
|
||||
> ipa-getkeytab -s server.rhelbox.com -p HTTP/client.rhelbox.com@RHELBOX.COM -k /tmp/http.keytab
|
||||
|
||||
Copy keytab file to client:
|
||||
> scp /tmp/http.keytab root@192.168.56.101:/etc/httpd/conf/http.keytab
|
||||
|
||||
## Creating HTTP Service Principal on Windows AD server
|
||||
|
||||
Add a HTTP Kerberos service principal:
|
||||
> c:\>ktpass.exe -princ HTTP/fcclient.winad.com@WINAD.COM -mapuser
|
||||
> auth_admin@WINAD.COM -pass Redhat*123 -out c:\HTTP.keytab -crypto DES-CBC-CRC
|
||||
> -kvno 0
|
||||
|
||||
Use winscp to copy HTTP.ketab file to /etc/httpd/conf/http.keytab
|
||||
|
||||
*****
|
||||
|
||||
<a name="install-swiftkerbauth" />
|
||||
##Installing and configuring swiftkerbauth on IPA client
|
||||
|
||||
Prerequisites for installing swiftkerbauth
|
||||
* swift (havana)
|
||||
* gluster-swift (optional)
|
||||
|
||||
You can install swiftkerbauth using one of these three ways:
|
||||
|
||||
Installing swiftkerbauth from source:
|
||||
> python setup.py install
|
||||
|
||||
Installing swiftkerbauth using pip:
|
||||
> pip install swiftkerbauth
|
||||
|
||||
Installing swiftkerbauth from RPMs:
|
||||
> ./makerpm.sh
|
||||
>
|
||||
> rpm -ivh dist/swiftkerbauth-1.0.0-1.noarch.rpm
|
||||
|
||||
Edit */etc/httpd/conf.d/swift-auth.conf* and change KrbServiceName, KrbAuthRealms and Krb5KeyTab parameters accordingly.
|
||||
More detail on configuring kerberos for apache can be found at:
|
||||
[auth_kerb_module Configuration][]
|
||||
|
||||
Make /etc/httpd/conf/http.keytab readable by any user :
|
||||
> chmod 644 /etc/httpd/conf/http.keytab
|
||||
|
||||
And preferably change owner of keytab file to apache :
|
||||
> chown apache:apache /etc/httpd/conf/http.keytab
|
||||
|
||||
Reload httpd
|
||||
> service httpd reload
|
||||
|
||||
Make authentication script executable:
|
||||
> chmod +x /var/www/cgi-bin/swift-auth
|
||||
|
||||
*****
|
||||
|
||||
<a name="#use-swiftkerbauth" />
|
||||
##Using swiftkerbauth
|
||||
|
||||
### Adding kerbauth filter in swift pipeline
|
||||
|
||||
Edit */etc/swift/proxy-server.conf* and add a new filter section as follows:
|
||||
|
||||
[filter:kerbauth]
|
||||
use = egg:swiftkerbauth#kerbauth
|
||||
ext_authentication_url = http://client.rhelbox.com/cgi-bin/swift-auth
|
||||
auth_mode=passive
|
||||
|
||||
Add kerbauth to pipeline
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors healthcheck proxy-logging cache proxy-logging kerbauth proxy-server
|
||||
|
||||
If the Swift server is not one of your Gluster nodes, edit
|
||||
*/etc/swift/fs.conf* and change the following lines in the DEFAULT
|
||||
section:
|
||||
|
||||
mount_ip = RHS_NODE_HOSTNAME
|
||||
remote_cluster = yes
|
||||
|
||||
Restart swift to activate kerbauth filer
|
||||
> swift-init main restart
|
||||
|
||||
|
||||
###Examples
|
||||
|
||||
####Authenticate user and get Kerberos ticket
|
||||
|
||||
> kinit auth_admin
|
||||
|
||||
NOTE: curl ignores user specified in -u option. All further curl commands
|
||||
will use the currently authenticated auth_admin user.
|
||||
|
||||
####Get an authentication token:
|
||||
> curl -v -u : --negotiate --location-trusted http://client.rhelbox.com:8080/auth/v1.0
|
||||
|
||||
* About to connect() to client.rhelbox.com port 8080 (#0)
|
||||
* Trying 192.168.56.101...
|
||||
* connected
|
||||
* Connected to client.rhelbox.com (192.168.56.101) port 8080 (#0)
|
||||
> GET /auth/v1.0 HTTP/1.1
|
||||
> User-Agent: curl/7.27.0
|
||||
> Host: client.rhelbox.com:8080
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 303 See Other
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Location: http://client.rhelbox.com/cgi-bin/swift-auth
|
||||
< Content-Length: 0
|
||||
< X-Trans-Id: txecd415aae89b4320b6145-0052417ea5
|
||||
< Date: Tue, 24 Sep 2013 11:59:33 GMT
|
||||
<
|
||||
* Connection #0 to host client.rhelbox.com left intact
|
||||
* Issue another request to this URL: 'http://client.rhelbox.com/cgi-bin/swift-auth'
|
||||
* About to connect() to client.rhelbox.com port 80 (#1)
|
||||
* Trying 192.168.56.101...
|
||||
* connected
|
||||
* Connected to client.rhelbox.com (192.168.56.101) port 80 (#1)
|
||||
> GET /cgi-bin/swift-auth HTTP/1.1
|
||||
> User-Agent: curl/7.27.0
|
||||
> Host: client.rhelbox.com
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 401 Unauthorized
|
||||
< Date: Tue, 24 Sep 2013 11:59:33 GMT
|
||||
< Server: Apache/2.4.6 (Fedora) mod_auth_kerb/5.4
|
||||
< WWW-Authenticate: Negotiate
|
||||
< WWW-Authenticate: Basic realm="Swift Authentication"
|
||||
< Content-Length: 381
|
||||
< Content-Type: text/html; charset=iso-8859-1
|
||||
<
|
||||
* Ignoring the response-body
|
||||
* Connection #1 to host client.rhelbox.com left intact
|
||||
* Issue another request to this URL: 'http://client.rhelbox.com/cgi-bin/swift-auth'
|
||||
* Re-using existing connection! (#1) with host (nil)
|
||||
* Connected to (nil) (192.168.56.101) port 80 (#1)
|
||||
* Server auth using GSS-Negotiate with user ''
|
||||
> GET /cgi-bin/swift-auth HTTP/1.1
|
||||
> Authorization: Negotiate YIICYgYJKoZIhvcSAQICAQBuggJRMIICTaADAgEFoQMCAQ6iBwMFACAAAACjggFgYYIBXDCCAVigAwIBBaENGwtSSEVMQk9YLkNPTaIlMCOgAwIBA6EcMBobBEhUVFAbEmNsaWVudC5yaGVsYm94LmNvbaOCARkwggEVoAMCARKhAwIBAaKCAQcEggEDx9SH2R90RO4eAkhsNKow/DYfjv1rWhgxNRqj/My3yslASSgefls48VdDNHVVWqr1Kd6mB/9BIoumpA+of+KSAg2QfPtcWiVFj5n5Fa8fyCHyQPvV8c92KzUdrBPc8OVn0aldFp0I4P1MsYZbnddDRSH3kjVA5oSucHF59DhZWiGJV/F6sVimBSeoTBHQD38Cs5RhyDHNyUad9v3gZERVGCJXC76i7+yyaoIDA+N9s0hasHajhTnjs3XQBYfZFwp8lWl3Ub+sOtPO1Ng7mFlSAYXCM6ljlKTEaxRwaYoXUC1EoIqEOG/8pC9SJThS2M1G7MW1c5xm4lksNss72OH4gtPns6SB0zCB0KADAgESooHIBIHFrLtai5U8ajEWo1J9B26PnIUqLd+uA0KPd2Y2FjrH6rx4xT8qG2p8i36SVGubvwBVmfQ7lSJcXt6wUvb43qyPs/fMiSY7QxHxt7/btMgxQl6JWMagvXMhCNXnhEHNNaTdBcG5KFERDGeo0txaAD1bzZ4mnxCQmoqusGzZ6wdDw6+5wq1tK/hQTQUgk2NwxfXAg2J5K02/3fKjFR2h7zewI1pEyhhpeONRkkRETcyojkK2EbVzZ8kc3RsuwzFYsJ+9u5Qj3E4=
|
||||
> User-Agent: curl/7.27.0
|
||||
> Host: client.rhelbox.com
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Date: Tue, 24 Sep 2013 11:59:33 GMT
|
||||
< Server: Apache/2.4.6 (Fedora) mod_auth_kerb/5.4
|
||||
< WWW-Authenticate: Negotiate YIGZBgkqhkiG9xIBAgICAG+BiTCBhqADAgEFoQMCAQ+iejB4oAMCARKicQRveeZTV/QRJSIOoOWPbZkEmtdug9V5ZcMGXWqAJvCAnrvw9gHbklMyLl8f8jU2e0wU3ehtchLEL4dVeAYgKsnUgw4wGhHu59AZBwSbHRKSpv3I6gWEZqC4NAEuZJFW9ipdUHOiclBQniVXXCsRF/5Y
|
||||
< X-Auth-Token: AUTH_tk083b8abc92f4a514f34224a181ed568a
|
||||
< X-Debug-Remote-User: auth_admin
|
||||
< X-Debug-Groups: auth_admin,auth_reseller_admin
|
||||
< X-Debug-Token-Life: 86400s
|
||||
< X-Debug-Token-Expires: Wed Sep 25 17:29:33 2013
|
||||
< Content-Length: 0
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
<
|
||||
* Connection #1 to host (nil) left intact
|
||||
* Closing connection #0
|
||||
* Closing connection #1
|
||||
|
||||
The header *X-Auth-Token* in response contains the token *AUTH_tk083b8abc92f4a514f34224a181ed568a*.
|
||||
|
||||
####PUT a container
|
||||
>curl -v -X PUT -H 'X-Auth-Token: AUTH_tk083b8abc92f4a514f34224a181ed568a' http://client.rhelbox.com:8080/v1/AUTH_myvolume/c1
|
||||
|
||||
* About to connect() to client.rhelbox.com port 8080 (#0)
|
||||
* Trying 192.168.56.101...
|
||||
* connected
|
||||
* Connected to client.rhelbox.com (192.168.56.101) port 8080 (#0)
|
||||
> PUT /v1/AUTH_myvolume/c1 HTTP/1.1
|
||||
> User-Agent: curl/7.27.0
|
||||
> Host: client.rhelbox.com:8080
|
||||
> Accept: */*
|
||||
> X-Auth-Token: AUTH_tk083b8abc92f4a514f34224a181ed568a
|
||||
>
|
||||
< HTTP/1.1 201 Created
|
||||
< Content-Length: 0
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< X-Trans-Id: txc420b0ebf9714445900e8-0052418863
|
||||
< Date: Tue, 24 Sep 2013 12:41:07 GMT
|
||||
<
|
||||
* Connection #0 to host client.rhelbox.com left intact
|
||||
* Closing connection #0
|
||||
|
||||
####GET a container listing
|
||||
> curl -v -X GET -H 'X-Auth-Token: AUTH_tk083b8abc92f4a514f34224a181ed568a' http://client.rhelbox.com:8080/v1/AUTH_myvolume
|
||||
|
||||
* About to connect() to client.rhelbox.com port 8080 (#0)
|
||||
* Trying 192.168.56.101...
|
||||
* connected
|
||||
* Connected to client.rhelbox.com (192.168.56.101) port 8080 (#0)
|
||||
> GET /v1/AUTH_myvolume HTTP/1.1
|
||||
> User-Agent: curl/7.27.0
|
||||
> Host: client.rhelbox.com:8080
|
||||
> Accept: */*
|
||||
> X-Auth-Token: AUTH_tk083b8abc92f4a514f34224a181ed568a
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Content-Length: 3
|
||||
< X-Account-Container-Count: 0
|
||||
< Accept-Ranges: bytes
|
||||
< X-Account-Object-Count: 0
|
||||
< X-Bytes-Used: 0
|
||||
< X-Timestamp: 1379997117.09468
|
||||
< X-Object-Count: 0
|
||||
< X-Account-Bytes-Used: 0
|
||||
< X-Type: Account
|
||||
< Content-Type: text/plain; charset=utf-8
|
||||
< X-Container-Count: 0
|
||||
< X-Trans-Id: tx89826736a1ab4d6aae6e3-00524188dc
|
||||
< Date: Tue, 24 Sep 2013 12:43:08 GMT
|
||||
<
|
||||
c1
|
||||
* Connection #0 to host client.rhelbox.com left intact
|
||||
* Closing connection #0
|
||||
|
||||
####PUT an object in container
|
||||
> curl -v -X PUT -H 'X-Auth-Token: AUTH_tk083b8abc92f4a514f34224a181ed568a' http://client.rhelbox.com:8080/v1/AUTH_myvolume/c1/object1 -d'Hello world'
|
||||
|
||||
* About to connect() to client.rhelbox.com port 8080 (#0)
|
||||
* Trying 192.168.56.101...
|
||||
* connected
|
||||
* Connected to client.rhelbox.com (192.168.56.101) port 8080 (#0)
|
||||
> PUT /v1/AUTH_myvolume/c1/object1 HTTP/1.1
|
||||
> User-Agent: curl/7.27.0
|
||||
> Host: client.rhelbox.com:8080
|
||||
> Accept: */*
|
||||
> X-Auth-Token: AUTH_tk083b8abc92f4a514f34224a181ed568a
|
||||
> Content-Length: 11
|
||||
> Content-Type: application/x-www-form-urlencoded
|
||||
>
|
||||
* upload completely sent off: 11 out of 11 bytes
|
||||
< HTTP/1.1 201 Created
|
||||
< Last-Modified: Wed, 25 Sep 2013 06:08:00 GMT
|
||||
< Content-Length: 0
|
||||
< Etag: 3e25960a79dbc69b674cd4ec67a72c62
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< X-Trans-Id: tx01f1b5a430cf4af3897be-0052427dc0
|
||||
< Date: Wed, 25 Sep 2013 06:08:01 GMT
|
||||
<
|
||||
* Connection #0 to host client.rhelbox.com left intact
|
||||
* Closing connection #0
|
||||
|
||||
####Give permission to jsmith to list and download objects from c1 container
|
||||
> curl -v -X POST -H 'X-Auth-Token: AUTH_tk083b8abc92f4a514f34224a181ed568a' -H 'X-Container-Read: jsmith' http://client.rhelbox.com:8080/v1/AUTH_myvolume/c1
|
||||
|
||||
* About to connect() to client.rhelbox.com port 8080 (#0)
|
||||
* Trying 192.168.56.101...
|
||||
* connected
|
||||
* Connected to client.rhelbox.com (192.168.56.101) port 8080 (#0)
|
||||
> POST /v1/AUTH_myvolume/c1 HTTP/1.1
|
||||
> User-Agent: curl/7.27.0
|
||||
> Host: client.rhelbox.com:8080
|
||||
> Accept: */*
|
||||
> X-Auth-Token: AUTH_tk083b8abc92f4a514f34224a181ed568a
|
||||
> X-Container-Read: jsmith
|
||||
>
|
||||
< HTTP/1.1 204 No Content
|
||||
< Content-Length: 0
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< X-Trans-Id: txcedea3e2557d463eb591d-0052427f60
|
||||
< Date: Wed, 25 Sep 2013 06:14:56 GMT
|
||||
<
|
||||
* Connection #0 to host client.rhelbox.com left intact
|
||||
* Closing connection #0
|
||||
|
||||
####Access container as jsmith
|
||||
|
||||
> kinit jsmith
|
||||
|
||||
Get token for jsmith
|
||||
> curl -v -u : --negotiate --location-trusted http://client.rhelbox.com:8080/auth/v1.0
|
||||
|
||||
* About to connect() to client.rhelbox.com port 8080 (#0)
|
||||
* Trying 192.168.56.101...
|
||||
* connected
|
||||
* Connected to client.rhelbox.com (192.168.56.101) port 8080 (#0)
|
||||
> GET /auth/v1.0 HTTP/1.1
|
||||
> User-Agent: curl/7.27.0
|
||||
> Host: client.rhelbox.com:8080
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 303 See Other
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
< Location: http://client.rhelbox.com/cgi-bin/swift-auth
|
||||
< Content-Length: 0
|
||||
< X-Trans-Id: txf51e1bf7f8c5496f8cc93-005242800b
|
||||
< Date: Wed, 25 Sep 2013 06:17:47 GMT
|
||||
<
|
||||
* Connection #0 to host client.rhelbox.com left intact
|
||||
* Issue another request to this URL: 'http://client.rhelbox.com/cgi-bin/swift-auth'
|
||||
* About to connect() to client.rhelbox.com port 80 (#1)
|
||||
* Trying 192.168.56.101...
|
||||
* connected
|
||||
* Connected to client.rhelbox.com (192.168.56.101) port 80 (#1)
|
||||
> GET /cgi-bin/swift-auth HTTP/1.1
|
||||
> User-Agent: curl/7.27.0
|
||||
> Host: client.rhelbox.com
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 401 Unauthorized
|
||||
< Date: Wed, 25 Sep 2013 06:17:47 GMT
|
||||
< Server: Apache/2.4.6 (Fedora) mod_auth_kerb/5.4
|
||||
< WWW-Authenticate: Negotiate
|
||||
< WWW-Authenticate: Basic realm="Swift Authentication"
|
||||
< Content-Length: 381
|
||||
< Content-Type: text/html; charset=iso-8859-1
|
||||
<
|
||||
* Ignoring the response-body
|
||||
* Connection #1 to host client.rhelbox.com left intact
|
||||
* Issue another request to this URL: 'http://client.rhelbox.com/cgi-bin/swift-auth'
|
||||
* Re-using existing connection! (#1) with host (nil)
|
||||
* Connected to (nil) (192.168.56.101) port 80 (#1)
|
||||
* Server auth using GSS-Negotiate with user ''
|
||||
> GET /cgi-bin/swift-auth HTTP/1.1
|
||||
> Authorization: Negotiate YIICWAYJKoZIhvcSAQICAQBuggJHMIICQ6ADAgEFoQMCAQ6iBwMFACAAAACjggFbYYIBVzCCAVOgAwIBBaENGwtSSEVMQk9YLkNPTaIlMCOgAwIBA6EcMBobBEhUVFAbEmNsaWVudC5yaGVsYm94LmNvbaOCARQwggEQoAMCARKhAwIBAaKCAQIEgf/+3OaXYCSEjcsjU3t3lOLcYG84GBP9Kj9YTHc7yVMlcam4ivCwMqCkzxgvNo2E3a5KSWyFwngeX4b/QFbCKPXA4sfBibZRkeMk5gr2f0MLI3gWEAIYq7bJLre04bnkD2F0MzijPJrOLIx1KmFe08UGWCEmnG2uj07lvIR1RwV/7dMM4J1B+KKvDVKA0LxahwPIpx8oOON2yMGcstrBAHBBk5pmpt1Gg9Lh7xdNPsjP0IfI5Q0zkGCRBKpvpXymP1lQpQXlHbqkdBYOmG4+p/R+vIosO4ui1G6GWE9t71h3AqW61CcCj3/oOjZsG56k8HMSNk/+3mfUTP86nzLRGkekgc4wgcugAwIBEqKBwwSBwPsG9nGloEnOsA1abP4R1/yUDcikjjwKiacvZ+cu7bWEzu3L376k08U8C2YIClyUJy3Grt68LxhnfZ65VCZ5J5IOLiXOJnHBIoJ1L4GMYp4EgZzHvI7R3U3DApMzNWZwc1MsSF5UGhmLwxSevDLetJHjgKzKNteRyVN/8CFgjSBEjGSN1Qgy1RZHuQR9d3JHPczONZ4+ZgStfy+I1m2IUIgW3+4JGFVafHiBQVwSWRNfdXFgI3wBz7slntd7r3qMWA==
|
||||
> User-Agent: curl/7.27.0
|
||||
> Host: client.rhelbox.com
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Date: Wed, 25 Sep 2013 06:17:47 GMT
|
||||
< Server: Apache/2.4.6 (Fedora) mod_auth_kerb/5.4
|
||||
< WWW-Authenticate: Negotiate YIGYBgkqhkiG9xIBAgICAG+BiDCBhaADAgEFoQMCAQ+ieTB3oAMCARKicARuH2YpjFrtgIhGr5nO7gh/21EvGH9tayRo5A3pw5pxD1B1036ePLG/x98OdMrSflse5s8ttz8FmvRphCFJa8kfYtnWULgoFLF2F2a1zBdSo2oCA0R05YFwArNhkg6ou5o7wWZkERHK33CKlhudSj8=
|
||||
< X-Auth-Token: AUTH_tkb5a20eb8207a819e76619431c8410447
|
||||
< X-Debug-Remote-User: jsmith
|
||||
< X-Debug-Groups: jsmith
|
||||
< X-Debug-Token-Life: 86400s
|
||||
< X-Debug-Token-Expires: Thu Sep 26 11:47:47 2013
|
||||
< Content-Length: 0
|
||||
< Content-Type: text/html; charset=UTF-8
|
||||
<
|
||||
* Connection #1 to host (nil) left intact
|
||||
* Closing connection #0
|
||||
* Closing connection #1
|
||||
|
||||
List the container using authentication token for jsmith:
|
||||
> curl -v -X GET -H 'X-Auth-Token: AUTH_tkb5a20eb8207a819e76619431c8410447' http://client.rhelbox.com:8080/v1/AUTH_myvolume/c1
|
||||
|
||||
* About to connect() to client.rhelbox.com port 8080 (#0)
|
||||
* Trying 192.168.56.101...
|
||||
* connected
|
||||
* Connected to client.rhelbox.com (192.168.56.101) port 8080 (#0)
|
||||
> GET /v1/AUTH_myvolume/c1 HTTP/1.1
|
||||
> User-Agent: curl/7.27.0
|
||||
> Host: client.rhelbox.com:8080
|
||||
> Accept: */*
|
||||
> X-Auth-Token: AUTH_tkb5a20eb8207a819e76619431c8410447
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Content-Length: 8
|
||||
< X-Container-Object-Count: 0
|
||||
< Accept-Ranges: bytes
|
||||
< X-Timestamp: 1
|
||||
< X-Container-Bytes-Used: 0
|
||||
< Content-Type: text/plain; charset=utf-8
|
||||
< X-Trans-Id: tx575215929c654d9f9f284-00524280a4
|
||||
< Date: Wed, 25 Sep 2013 06:20:20 GMT
|
||||
<
|
||||
object1
|
||||
* Connection #0 to host client.rhelbox.com left intact
|
||||
* Closing connection #0
|
||||
|
||||
Downloading the object as jsmith:
|
||||
> curl -v -X GET -H 'X-Auth-Token: AUTH_tkb5a20eb8207a819e76619431c8410447' http://client.rhelbox.com:8080/v1/AUTH_myvolume/c1/object1
|
||||
|
||||
* About to connect() to client.rhelbox.com port 8080 (#0)
|
||||
* Trying 192.168.56.101...
|
||||
* connected
|
||||
* Connected to client.rhelbox.com (192.168.56.101) port 8080 (#0)
|
||||
> GET /v1/AUTH_myvolume/c1/object1 HTTP/1.1
|
||||
> User-Agent: curl/7.27.0
|
||||
> Host: client.rhelbox.com:8080
|
||||
> Accept: */*
|
||||
> X-Auth-Token: AUTH_tkb5a20eb8207a819e76619431c8410447
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Content-Length: 11
|
||||
< Accept-Ranges: bytes
|
||||
< Last-Modified: Wed, 25 Sep 2013 06:08:00 GMT
|
||||
< Etag: 3e25960a79dbc69b674cd4ec67a72c62
|
||||
< X-Timestamp: 1380089280.98829
|
||||
< Content-Type: application/x-www-form-urlencoded
|
||||
< X-Trans-Id: tx19b5cc3847854f40a6ca8-00524281aa
|
||||
< Date: Wed, 25 Sep 2013 06:24:42 GMT
|
||||
<
|
||||
* Connection #0 to host client.rhelbox.com left intact
|
||||
Hello world* Closing connection #0
|
||||
|
||||
For curl to follow the redirect, you need to specify additional
|
||||
options. With these, and with a current Kerberos ticket, you should
|
||||
get the Kerberos user's cached authentication token, or a new one if
|
||||
the previous token has expired.
|
||||
|
||||
> curl -v -u : --negotiate --location-trusted -X GET http://client.rhelbox.com:8080/v1/AUTH_myvolume/c1/object1
|
||||
|
||||
The --negotiate option is for curl to perform Kerberos authentication and
|
||||
--location-trusted is for curl to follow the redirect.
|
||||
|
||||
[auth_kerb_module Configuration]: http://modauthkerb.sourceforge.net/configure.html
|
||||
|
||||
|
||||
#### Get an authentication token when auth_mode=passive:
|
||||
> curl -v -H 'X-Auth-User: test:auth_admin' -H 'X-Auth-Key: Redhat*123' http://127.0.0.1:8080/auth/v1.0
|
||||
|
||||
**NOTE**: X-Storage-Url response header can be returned only in passive mode.
|
||||
|
||||
<a name="config-swiftkerbauth" />
|
||||
##Configurable Parameters
|
||||
|
||||
The kerbauth filter section in **/etc/swift/proxy-server.conf** looks something
|
||||
like this:
|
||||
|
||||
[filter:kerbauth]
|
||||
use = egg:swiftkerbauth#kerbauth
|
||||
ext_authentication_url = http://client.rhelbox.com/cgi-bin/swift-auth
|
||||
auth_method = active
|
||||
token_life = 86400
|
||||
debug_headers = yes
|
||||
realm_name = RHELBOX.COM
|
||||
|
||||
Of all the options listed above, specifying **ext\_authentication\_url** is
|
||||
mandatory. The rest of the options are optional and have default values.
|
||||
|
||||
#### ext\_authentication\_url
|
||||
A URL specifying location of the swift-auth CGI script. Avoid using IP address.
|
||||
Default value: None
|
||||
|
||||
#### token_life
|
||||
After how many seconds the cached information about an authentication token is
|
||||
discarded.
|
||||
Default value: 86400
|
||||
|
||||
#### debug_headers
|
||||
When turned on, the response headers sent to the user will contain additional
|
||||
debug information apart from the auth token.
|
||||
Default value: yes
|
||||
|
||||
#### auth_method
|
||||
Set this to **"active"** when you want to allow access **only to clients
|
||||
residing inside the domain**. In this mode, authentication is performed by
|
||||
mod\_auth\_kerb using the Kerberos ticket bundled with the client request.
|
||||
No username and password have to be specified to get a token.
|
||||
Set this to **"passive"** when you want to allow access to clients residing
|
||||
outside the domain. In this mode, authentication is performed by gleaning
|
||||
username and password from request headers (X-Auth-User and X-Auth-Key) and
|
||||
running kinit command against it.
|
||||
Default value: passive
|
||||
|
||||
#### realm_name
|
||||
This is applicable only when the auth_method=passive. This option specifies
|
||||
realm name if storage server belongs to more than one realm and realm name is not
|
||||
part of the username specified in X-Auth-User header.
|
||||
|
||||
<a name="swfunctest" />
|
||||
##Functional tests for SwiftkerbAuth
|
||||
|
||||
Functional tests to be run on the storage node after SwiftKerbAuth is setup using
|
||||
either IPA server or Windows AD. The gluster-swift/doc/markdown/swiftkerbauth
|
||||
directory contains the SwiftkerbAuth setup documents. There are two modes of
|
||||
working with SwiftKerbAuth. 'PASSIVE' mode indicates the client is outside the
|
||||
domain configured using SwiftKerbAuth. Client provides the 'Username' and
|
||||
'Password' while invoking a command. SwiftKerbAuth auth filter code then
|
||||
would get the ticket granting ticket from AD server or IPA server.
|
||||
In 'ACTIVE' mode of SwiftKerbAuth, User is already logged into storage node using
|
||||
its kerberos credentials. That user is authenticated across AD/IPA server.
|
||||
|
||||
In PASSIVE mode all the generic functional tests are run. ACTIVE mode has a
|
||||
different way of acquiring Ticket Granting Ticket. And hence the different
|
||||
framework of functional tests there.
|
||||
|
||||
The accounts, users, passwords must be prepared on AD/IPA server as per
|
||||
mentioned in test/functional_auth/swiftkerbauth/conf/test.conf
|
||||
|
||||
Command to invoke SwiftKerbAuth functional tests is
|
||||
> $tox -e swfunctest
|
||||
|
||||
This would run both ACTIVE and PASSIVE mode functional test cases.
|
@ -1,66 +0,0 @@
|
||||
# User Guide
|
||||
|
||||
## Installation
|
||||
|
||||
### GlusterFS Installation
|
||||
First, we need to install GlusterFS on the system by following the
|
||||
instructions on [GlusterFS QuickStart Guide][].
|
||||
|
||||
### Fedora/RHEL/CentOS
|
||||
Gluster for Swift depends on OpenStack Swift Grizzly, which can be
|
||||
obtained by using [RedHat's RDO][] packages as follows:
|
||||
|
||||
~~~
|
||||
yum install -y http://rdo.fedorapeople.org/openstack/openstack-grizzly/rdo-release-grizzly.rpm
|
||||
~~~
|
||||
|
||||
### Download
|
||||
Gluster for Swift uses [Jenkins][] for continuous integration and
|
||||
creation of distribution builds. Download the latest RPM builds
|
||||
from one of the links below:
|
||||
|
||||
* RHEL/CentOS 6: [Download](http://build.gluster.org/job/gluster-swift-builds-cent6/lastSuccessfulBuild/artifact/build/)
|
||||
* Fedora 18+: [Download](http://build.gluster.org/job/gluster-swift-builds-f18/lastSuccessfulBuild/artifact/build/)
|
||||
|
||||
Install the downloaded RPM using the following command:
|
||||
|
||||
~~~
|
||||
yum install -y RPMFILE
|
||||
~~~
|
||||
|
||||
where *RPMFILE* is the RPM file downloaded from Jenkins.
|
||||
|
||||
## Configuration
|
||||
TBD
|
||||
|
||||
## Server Control
|
||||
Command to start the servers (TBD)
|
||||
|
||||
~~~
|
||||
swift-init main start
|
||||
~~~
|
||||
|
||||
Command to stop the servers (TBD)
|
||||
|
||||
~~~
|
||||
swift-init main stop
|
||||
~~~
|
||||
|
||||
Command to gracefully reload the servers
|
||||
|
||||
~~~
|
||||
swift-init main reload
|
||||
~~~
|
||||
|
||||
### Mounting your volumes
|
||||
TBD
|
||||
|
||||
Once this is done, you can access GlusterFS volumes via the Swift API where
|
||||
accounts are mounted volumes, containers are top-level directories,
|
||||
and objects are files and sub-directories of container directories.
|
||||
|
||||
|
||||
|
||||
[GlusterFS QuickStart Guide]: http://www.gluster.org/community/documentation/index.php/QuickStart
|
||||
[RedHat's RDO]: http://openstack.redhat.com/Quickstart
|
||||
[Jenkins]: http://jenkins-ci.org
|
@ -1,39 +0,0 @@
|
||||
[DEFAULT]
|
||||
#
|
||||
# Default gluster mount point to be used for object store,can be changed by
|
||||
# setting the following value in {account,container,object}-server.conf files.
|
||||
# It is recommended to keep this value same for all the three services but can
|
||||
# be kept different if environment demands.
|
||||
devices = /mnt/gluster-object
|
||||
#
|
||||
# Once you are confident that your startup processes will always have your
|
||||
# gluster volumes properly mounted *before* the account-server workers start,
|
||||
# you can *consider* setting this value to "false" to reduce the per-request
|
||||
# overhead it can incur.
|
||||
mount_check = true
|
||||
bind_port = 6012
|
||||
#
|
||||
# Override swift's default behaviour for fallocate.
|
||||
disable_fallocate = true
|
||||
#
|
||||
# One or two workers should be sufficient for almost any installation of
|
||||
# Gluster.
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = account-server
|
||||
|
||||
[app:account-server]
|
||||
use = egg:gluster_swift#account
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = WARN
|
||||
# The following parameter is used by object-expirer and needs to be same
|
||||
# across all conf files!
|
||||
auto_create_account_prefix = gs
|
||||
#
|
||||
# After ensuring things are running in a stable manner, you can turn off
|
||||
# normal request logging for the account server to unclutter the log
|
||||
# files. Warnings and errors will still be logged.
|
||||
log_requests = off
|
||||
|
@ -1,39 +0,0 @@
|
||||
[DEFAULT]
|
||||
#
|
||||
# Default gluster mount point to be used for object store,can be changed by
|
||||
# setting the following value in {account,container,object}-server.conf files.
|
||||
# It is recommended to keep this value same for all the three services but can
|
||||
# be kept different if environment demands.
|
||||
devices = /mnt/gluster-object
|
||||
#
|
||||
# Once you are confident that your startup processes will always have your
|
||||
# gluster volumes properly mounted *before* the container-server workers
|
||||
# start, you can *consider* setting this value to "false" to reduce the
|
||||
# per-request overhead it can incur.
|
||||
mount_check = true
|
||||
bind_port = 6011
|
||||
#
|
||||
# Override swift's default behaviour for fallocate.
|
||||
disable_fallocate = true
|
||||
#
|
||||
# One or two workers should be sufficient for almost any installation of
|
||||
# Gluster.
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = container-server
|
||||
|
||||
[app:container-server]
|
||||
use = egg:gluster_swift#container
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = WARN
|
||||
# The following parameters is used by object-expirer and needs to be same
|
||||
# across all conf files!
|
||||
auto_create_account_prefix = gs
|
||||
#
|
||||
# After ensuring things are running in a stable manner, you can turn off
|
||||
# normal request logging for the container server to unclutter the log
|
||||
# files. Warnings and errors will still be logged.
|
||||
log_requests = off
|
||||
|
@ -1,27 +0,0 @@
|
||||
#TODO: Add documentation to explain various options
|
||||
#For now, refer: https://github.com/openstack/swift/blob/master/etc/object-expirer.conf-sample
|
||||
|
||||
[DEFAULT]
|
||||
|
||||
[object-expirer]
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = DEBUG
|
||||
# The following parameters are used by object-expirer and needs to be same
|
||||
# across all conf files!
|
||||
auto_create_account_prefix = gs
|
||||
expiring_objects_account_name = expiring
|
||||
|
||||
interval = 30
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors cache proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:gluster_swift#proxy
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
|
||||
[filter:catch_errors]
|
||||
use = egg:swift#catch_errors
|
@ -1,70 +0,0 @@
|
||||
[DEFAULT]
|
||||
bind_port = 8080
|
||||
user = root
|
||||
# Consider using 1 worker per CPU
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors healthcheck proxy-logging cache proxy-logging proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:gluster_swift#proxy
|
||||
log_facility = LOG_LOCAL1
|
||||
log_level = WARN
|
||||
# The API allows for account creation and deletion, but since Gluster/Swift
|
||||
# automounts a Gluster volume for a given account, there is no way to create
|
||||
# or delete an account. So leave this off.
|
||||
allow_account_management = false
|
||||
account_autocreate = true
|
||||
# The following parameters are used by object-expirer and needs to be same
|
||||
# across all conf files!
|
||||
auto_create_account_prefix = gs
|
||||
expiring_objects_account_name = expiring
|
||||
# Ensure the proxy server uses fast-POSTs since we don't need to make a copy
|
||||
# of the entire object given that all metadata is stored in the object
|
||||
# extended attributes (no .meta file used after creation) and no container
|
||||
# sync feature to present.
|
||||
object_post_as_copy = false
|
||||
# Only need to recheck the account exists once a day
|
||||
recheck_account_existence = 86400
|
||||
# May want to consider bumping this up if containers are created and destroyed
|
||||
# infrequently.
|
||||
recheck_container_existence = 60
|
||||
# Timeout clients that don't read or write to the proxy server after 5
|
||||
# seconds.
|
||||
client_timeout = 5
|
||||
# Give more time to connect to the object, container or account servers in
|
||||
# cases of high load.
|
||||
conn_timeout = 5
|
||||
# For high load situations, once connected to an object, container or account
|
||||
# server, allow for delays communicating with them.
|
||||
node_timeout = 60
|
||||
# May want to consider bumping up this value to 1 - 4 MB depending on how much
|
||||
# traffic is for multi-megabyte or gigabyte requests; perhaps matching the
|
||||
# stripe width (not stripe element size) of your storage volume is a good
|
||||
# starting point. See below for sizing information.
|
||||
object_chunk_size = 65536
|
||||
# If you do decide to increase the object_chunk_size, then consider lowering
|
||||
# this value to one. Up to "put_queue_length" object_chunk_size'd buffers can
|
||||
# be queued to the object server for processing. Given one proxy server worker
|
||||
# can handle up to 1,024 connections, by default, it will consume 10 * 65,536
|
||||
# * 1,024 bytes of memory in the worse case (default values). Be sure the
|
||||
# amount of memory available on the system can accommodate increased values
|
||||
# for object_chunk_size.
|
||||
put_queue_depth = 10
|
||||
|
||||
[filter:catch_errors]
|
||||
use = egg:swift#catch_errors
|
||||
|
||||
[filter:proxy-logging]
|
||||
use = egg:swift#proxy_logging
|
||||
access_log_level = WARN
|
||||
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
# Update this line to contain a comma separated list of memcache servers
|
||||
# shared by all nodes running the proxy-server service.
|
||||
memcache_servers = localhost:11211
|
@ -1,46 +0,0 @@
|
||||
# Copyright (c) 2012-2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" Account Server for Gluster Swift UFO """
|
||||
|
||||
# Simply importing this monkey patches the constraint handling to fit our
|
||||
# needs
|
||||
import gluster.swift.common.constraints # noqa
|
||||
|
||||
from swift.account import server
|
||||
from gluster.swift.common.DiskDir import DiskAccount
|
||||
|
||||
|
||||
class AccountController(server.AccountController):
|
||||
|
||||
def _get_account_broker(self, drive, part, account, **kwargs):
|
||||
"""
|
||||
Overriden to provide the GlusterFS specific broker that talks to
|
||||
Gluster for the information related to servicing a given request
|
||||
instead of talking to a database.
|
||||
|
||||
:param drive: drive that holds the container
|
||||
:param part: partition the container is in
|
||||
:param account: account name
|
||||
:returns: DiskDir object
|
||||
"""
|
||||
return DiskAccount(self.root, drive, account, self.logger, **kwargs)
|
||||
|
||||
|
||||
def app_factory(global_conf, **local_conf):
|
||||
"""paste.deploy app factory for creating WSGI account server apps."""
|
||||
conf = global_conf.copy()
|
||||
conf.update(local_conf)
|
||||
return AccountController(conf)
|
@ -1,705 +0,0 @@
|
||||
# Copyright (c) 2012-2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import errno
|
||||
|
||||
from gluster.swift.common.fs_utils import dir_empty, mkdirs, do_chown, \
|
||||
do_exists, do_touch
|
||||
from gluster.swift.common.utils import validate_account, validate_container, \
|
||||
get_container_details, get_account_details, create_container_metadata, \
|
||||
create_account_metadata, DEFAULT_GID, get_container_metadata, \
|
||||
get_account_metadata, DEFAULT_UID, validate_object, \
|
||||
create_object_metadata, read_metadata, write_metadata, X_CONTENT_TYPE, \
|
||||
X_CONTENT_LENGTH, X_TIMESTAMP, X_PUT_TIMESTAMP, X_ETAG, X_OBJECTS_COUNT, \
|
||||
X_BYTES_USED, X_CONTAINER_COUNT, DIR_TYPE, rmobjdir, dir_is_object
|
||||
from gluster.swift.common import Glusterfs
|
||||
from gluster.swift.common.exceptions import FileOrDirNotFoundError
|
||||
|
||||
|
||||
DATADIR = 'containers'
|
||||
|
||||
# Create a dummy db_file in Glusterfs.RUN_DIR
|
||||
_db_file = ""
|
||||
|
||||
|
||||
def _read_metadata(dd):
|
||||
""" Filter read metadata so that it always returns a tuple that includes
|
||||
some kind of timestamp. With 1.4.8 of the Swift integration the
|
||||
timestamps were not stored. Here we fabricate timestamps for volumes
|
||||
where the existing data has no timestamp (that is, stored data is not
|
||||
a tuple), allowing us a measure of backward compatibility.
|
||||
|
||||
FIXME: At this time it does not appear that the timestamps on each
|
||||
metadata are used for much, so this should not hurt anything.
|
||||
"""
|
||||
metadata_i = read_metadata(dd)
|
||||
metadata = {}
|
||||
timestamp = 0
|
||||
for key, value in metadata_i.iteritems():
|
||||
if not isinstance(value, tuple):
|
||||
value = (value, timestamp)
|
||||
metadata[key] = value
|
||||
return metadata
|
||||
|
||||
|
||||
def filter_prefix(objects, prefix):
|
||||
"""
|
||||
Accept a sorted list of strings, returning all strings starting with the
|
||||
given prefix.
|
||||
"""
|
||||
found = False
|
||||
for object_name in objects:
|
||||
if object_name.startswith(prefix):
|
||||
yield object_name
|
||||
found = True
|
||||
else:
|
||||
# Since the list is assumed to be sorted, once we find an object
|
||||
# name that does not start with the prefix we know we won't find
|
||||
# any others, so we exit early.
|
||||
if found:
|
||||
break
|
||||
|
||||
|
||||
def filter_delimiter(objects, delimiter, prefix, marker, path=None):
|
||||
"""
|
||||
Accept a sorted list of strings, returning strings that:
|
||||
1. begin with "prefix" (empty string matches all)
|
||||
2. does not match the "path" argument
|
||||
3. does not contain the delimiter in the given prefix length
|
||||
"""
|
||||
assert delimiter
|
||||
assert prefix is not None
|
||||
skip_name = None
|
||||
for object_name in objects:
|
||||
if prefix and not object_name.startswith(prefix):
|
||||
break
|
||||
if path is not None:
|
||||
if object_name == path:
|
||||
continue
|
||||
if skip_name:
|
||||
if object_name < skip_name:
|
||||
continue
|
||||
else:
|
||||
skip_name = None
|
||||
end = object_name.find(delimiter, len(prefix))
|
||||
if end >= 0 and (len(object_name) > (end + 1)):
|
||||
skip_name = object_name[:end] + chr(ord(delimiter) + 1)
|
||||
continue
|
||||
else:
|
||||
if skip_name:
|
||||
if object_name < skip_name:
|
||||
continue
|
||||
else:
|
||||
skip_name = None
|
||||
end = object_name.find(delimiter, len(prefix))
|
||||
if end > 0:
|
||||
dir_name = object_name[:end + 1]
|
||||
if dir_name != marker:
|
||||
yield dir_name
|
||||
skip_name = object_name[:end] + chr(ord(delimiter) + 1)
|
||||
continue
|
||||
yield object_name
|
||||
|
||||
|
||||
def filter_marker(objects, marker):
|
||||
"""
|
||||
Accept sorted list of strings, return all strings whose value is strictly
|
||||
greater than the given marker value.
|
||||
"""
|
||||
for object_name in objects:
|
||||
if object_name > marker:
|
||||
yield object_name
|
||||
|
||||
|
||||
def filter_prefix_as_marker(objects, prefix):
|
||||
"""
|
||||
Accept sorted list of strings, return all strings whose value is greater
|
||||
than or equal to the given prefix value.
|
||||
"""
|
||||
for object_name in objects:
|
||||
if object_name >= prefix:
|
||||
yield object_name
|
||||
|
||||
|
||||
def filter_end_marker(objects, end_marker):
|
||||
"""
|
||||
Accept a list of strings, sorted, and return all the strings that are
|
||||
strictly less than the given end_marker string. We perform this as a
|
||||
generator to avoid creating potentially large intermediate object lists.
|
||||
"""
|
||||
for object_name in objects:
|
||||
if object_name < end_marker:
|
||||
yield object_name
|
||||
else:
|
||||
break
|
||||
|
||||
|
||||
class DiskCommon(object):
|
||||
"""
|
||||
Common fields and methods shared between DiskDir and DiskAccount classes.
|
||||
"""
|
||||
def __init__(self, root, drive, account, logger, pending_timeout=None,
|
||||
stale_reads_ok=False):
|
||||
# WARNING: The following four fields are referenced as fields by our
|
||||
# callers outside of this module, do not remove.
|
||||
# Create a dummy db_file in Glusterfs.RUN_DIR
|
||||
global _db_file
|
||||
if not _db_file:
|
||||
_db_file = os.path.join(Glusterfs.RUN_DIR, 'db_file.db')
|
||||
if not do_exists(_db_file):
|
||||
do_touch(_db_file)
|
||||
self.db_file = _db_file
|
||||
self.metadata = {}
|
||||
self.pending_timeout = pending_timeout or 10
|
||||
self.stale_reads_ok = stale_reads_ok
|
||||
# The following fields are common
|
||||
self.root = root
|
||||
assert logger is not None
|
||||
self.logger = logger
|
||||
self.account = account
|
||||
self.datadir = os.path.join(root, drive)
|
||||
self._dir_exists = None
|
||||
|
||||
def _dir_exists_read_metadata(self):
|
||||
self._dir_exists = do_exists(self.datadir)
|
||||
if self._dir_exists:
|
||||
self.metadata = _read_metadata(self.datadir)
|
||||
return self._dir_exists
|
||||
|
||||
def is_deleted(self):
|
||||
# The intention of this method is to check the file system to see if
|
||||
# the directory actually exists.
|
||||
return not do_exists(self.datadir)
|
||||
|
||||
def empty(self):
|
||||
# If it does not exist, then it is empty. A value of True is
|
||||
# what is expected by OpenStack Swift when the directory does
|
||||
# not exist. Check swift/common/db.py:ContainerBroker.empty()
|
||||
# and swift/container/server.py:ContainerController.DELETE()
|
||||
# for more information
|
||||
try:
|
||||
return dir_empty(self.datadir)
|
||||
except FileOrDirNotFoundError:
|
||||
return True
|
||||
|
||||
def update_metadata(self, metadata):
|
||||
assert self.metadata, "Valid container/account metadata should have " \
|
||||
"been created by now"
|
||||
if metadata:
|
||||
new_metadata = self.metadata.copy()
|
||||
new_metadata.update(metadata)
|
||||
if new_metadata != self.metadata:
|
||||
write_metadata(self.datadir, new_metadata)
|
||||
self.metadata = new_metadata
|
||||
|
||||
|
||||
class DiskDir(DiskCommon):
|
||||
"""
|
||||
Manage object files on disk.
|
||||
|
||||
:param path: path to devices on the node
|
||||
:param drive: gluster volume drive name
|
||||
:param account: account name for the object
|
||||
:param container: container name for the object
|
||||
:param logger: account or container server logging object
|
||||
:param uid: user ID container object should assume
|
||||
:param gid: group ID container object should assume
|
||||
|
||||
Usage pattern from container/server.py (Havana, 1.8.0+):
|
||||
DELETE:
|
||||
if auto-create and obj and not .db_file:
|
||||
# Creates container
|
||||
.initialize()
|
||||
if not .db_file:
|
||||
# Container does not exist
|
||||
return 404
|
||||
if obj:
|
||||
# Should be a NOOP
|
||||
.delete_object()
|
||||
else:
|
||||
if not .empty()
|
||||
# Gluster's definition of empty should mean only
|
||||
# sub-directories exist in Object-Only mode
|
||||
return conflict
|
||||
.get_info()['put_timestamp'] and not .is_deleted()
|
||||
# Deletes container
|
||||
.delete_db()
|
||||
if not .is_deleted():
|
||||
return conflict
|
||||
account_update():
|
||||
.get_info()
|
||||
PUT:
|
||||
if obj:
|
||||
if auto-create cont and not .db_file
|
||||
# Creates container
|
||||
.initialize()
|
||||
if not .db_file
|
||||
return 404
|
||||
.put_object()
|
||||
else:
|
||||
if not .db_file:
|
||||
# Creates container
|
||||
.initialize()
|
||||
else:
|
||||
# Update container timestamp
|
||||
.is_deleted()
|
||||
.update_put_timestamp()
|
||||
if .is_deleted()
|
||||
return conflict
|
||||
if metadata:
|
||||
if .metadata
|
||||
.set_x_container_sync_points()
|
||||
.update_metadata()
|
||||
account_update():
|
||||
.get_info()
|
||||
HEAD:
|
||||
.pending_timeout
|
||||
.stale_reads_ok
|
||||
if .is_deleted():
|
||||
return 404
|
||||
.get_info()
|
||||
.metadata
|
||||
GET:
|
||||
.pending_timeout
|
||||
.stale_reads_ok
|
||||
if .is_deleted():
|
||||
return 404
|
||||
.get_info()
|
||||
.metadata
|
||||
.list_objects_iter()
|
||||
POST:
|
||||
if .is_deleted():
|
||||
return 404
|
||||
.metadata
|
||||
.set_x_container_sync_points()
|
||||
.update_metadata()
|
||||
"""
|
||||
|
||||
def __init__(self, path, drive, account, container, logger,
|
||||
uid=DEFAULT_UID, gid=DEFAULT_GID, **kwargs):
|
||||
super(DiskDir, self).__init__(path, drive, account, logger, **kwargs)
|
||||
|
||||
self.uid = int(uid)
|
||||
self.gid = int(gid)
|
||||
|
||||
self.container = container
|
||||
self.datadir = os.path.join(self.datadir, self.container)
|
||||
|
||||
if not self._dir_exists_read_metadata():
|
||||
return
|
||||
|
||||
if not self.metadata:
|
||||
create_container_metadata(self.datadir)
|
||||
self.metadata = _read_metadata(self.datadir)
|
||||
else:
|
||||
if not validate_container(self.metadata):
|
||||
create_container_metadata(self.datadir)
|
||||
self.metadata = _read_metadata(self.datadir)
|
||||
|
||||
def list_objects_iter(self, limit, marker, end_marker,
|
||||
prefix, delimiter, path=None):
|
||||
"""
|
||||
Returns tuple of name, created_at, size, content_type, etag.
|
||||
"""
|
||||
assert limit >= 0
|
||||
assert not delimiter or (len(delimiter) == 1 and ord(delimiter) <= 254)
|
||||
|
||||
if path is not None:
|
||||
if path:
|
||||
prefix = path = path.rstrip('/') + '/'
|
||||
else:
|
||||
prefix = path
|
||||
delimiter = '/'
|
||||
elif delimiter and not prefix:
|
||||
prefix = ''
|
||||
|
||||
container_list = []
|
||||
|
||||
objects = self._update_object_count()
|
||||
if objects:
|
||||
objects.sort()
|
||||
else:
|
||||
return container_list
|
||||
|
||||
if end_marker:
|
||||
objects = filter_end_marker(objects, end_marker)
|
||||
|
||||
if marker and marker >= prefix:
|
||||
objects = filter_marker(objects, marker)
|
||||
elif prefix:
|
||||
objects = filter_prefix_as_marker(objects, prefix)
|
||||
|
||||
if prefix is None:
|
||||
# No prefix, we don't need to apply the other arguments, we just
|
||||
# return what we have.
|
||||
pass
|
||||
else:
|
||||
# We have a non-None (for all intents and purposes it is a string)
|
||||
# prefix.
|
||||
if not delimiter:
|
||||
if not prefix:
|
||||
# We have nothing more to do
|
||||
pass
|
||||
else:
|
||||
objects = filter_prefix(objects, prefix)
|
||||
else:
|
||||
objects = filter_delimiter(objects, delimiter, prefix, marker,
|
||||
path)
|
||||
|
||||
count = 0
|
||||
for obj in objects:
|
||||
obj_path = os.path.join(self.datadir, obj)
|
||||
metadata = read_metadata(obj_path)
|
||||
if not metadata or not validate_object(metadata):
|
||||
if delimiter == '/' and obj_path[-1] == delimiter:
|
||||
clean_obj_path = obj_path[:-1]
|
||||
else:
|
||||
clean_obj_path = obj_path
|
||||
try:
|
||||
metadata = create_object_metadata(clean_obj_path)
|
||||
except OSError as e:
|
||||
# FIXME - total hack to get upstream swift ported unit
|
||||
# test cases working for now.
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
if not Glusterfs._implicit_dir_objects and metadata \
|
||||
and metadata[X_CONTENT_TYPE] == DIR_TYPE \
|
||||
and not dir_is_object(metadata):
|
||||
continue
|
||||
list_item = []
|
||||
list_item.append(obj)
|
||||
if metadata:
|
||||
list_item.append(metadata[X_TIMESTAMP])
|
||||
list_item.append(int(metadata[X_CONTENT_LENGTH]))
|
||||
list_item.append(metadata[X_CONTENT_TYPE])
|
||||
list_item.append(metadata[X_ETAG])
|
||||
container_list.append(list_item)
|
||||
count += 1
|
||||
if count >= limit:
|
||||
break
|
||||
|
||||
return container_list
|
||||
|
||||
def _update_object_count(self):
|
||||
objects, object_count, bytes_used = get_container_details(self.datadir)
|
||||
|
||||
if X_OBJECTS_COUNT not in self.metadata \
|
||||
or int(self.metadata[X_OBJECTS_COUNT][0]) != object_count \
|
||||
or X_BYTES_USED not in self.metadata \
|
||||
or int(self.metadata[X_BYTES_USED][0]) != bytes_used:
|
||||
self.metadata[X_OBJECTS_COUNT] = (object_count, 0)
|
||||
self.metadata[X_BYTES_USED] = (bytes_used, 0)
|
||||
write_metadata(self.datadir, self.metadata)
|
||||
|
||||
return objects
|
||||
|
||||
def get_info(self):
|
||||
"""
|
||||
Get global data for the container.
|
||||
:returns: dict with keys: account, container, object_count, bytes_used,
|
||||
hash, id, created_at, put_timestamp, delete_timestamp,
|
||||
reported_put_timestamp, reported_delete_timestamp,
|
||||
reported_object_count, and reported_bytes_used.
|
||||
"""
|
||||
if self._dir_exists and Glusterfs._container_update_object_count:
|
||||
self._update_object_count()
|
||||
|
||||
data = {'account': self.account, 'container': self.container,
|
||||
'object_count': self.metadata.get(
|
||||
X_OBJECTS_COUNT, ('0', 0))[0],
|
||||
'bytes_used': self.metadata.get(X_BYTES_USED, ('0', 0))[0],
|
||||
'hash': '', 'id': '', 'created_at': '1',
|
||||
'put_timestamp': self.metadata.get(
|
||||
X_PUT_TIMESTAMP, ('0', 0))[0],
|
||||
'delete_timestamp': '1',
|
||||
'reported_put_timestamp': '1',
|
||||
'reported_delete_timestamp': '1',
|
||||
'reported_object_count': '1', 'reported_bytes_used': '1',
|
||||
'x_container_sync_point1': self.metadata.get(
|
||||
'x_container_sync_point1', -1),
|
||||
'x_container_sync_point2': self.metadata.get(
|
||||
'x_container_sync_point2', -1),
|
||||
}
|
||||
return data
|
||||
|
||||
def put_object(self, name, timestamp, size, content_type, etag, deleted=0):
|
||||
# NOOP - should never be called since object file creation occurs
|
||||
# within a directory implicitly.
|
||||
pass
|
||||
|
||||
def initialize(self, timestamp):
|
||||
"""
|
||||
Create and write metatdata to directory/container.
|
||||
:param metadata: Metadata to write.
|
||||
"""
|
||||
if not self._dir_exists:
|
||||
mkdirs(self.datadir)
|
||||
# If we create it, ensure we own it.
|
||||
do_chown(self.datadir, self.uid, self.gid)
|
||||
metadata = get_container_metadata(self.datadir)
|
||||
metadata[X_TIMESTAMP] = timestamp
|
||||
write_metadata(self.datadir, metadata)
|
||||
self.metadata = metadata
|
||||
self._dir_exists = True
|
||||
|
||||
def update_put_timestamp(self, timestamp):
|
||||
"""
|
||||
Update the PUT timestamp for the container.
|
||||
|
||||
If the container does not exist, create it using a PUT timestamp of
|
||||
the given value.
|
||||
|
||||
If the container does exist, update the PUT timestamp only if it is
|
||||
later than the existing value.
|
||||
"""
|
||||
if not do_exists(self.datadir):
|
||||
self.initialize(timestamp)
|
||||
else:
|
||||
if timestamp > self.metadata[X_PUT_TIMESTAMP]:
|
||||
self.metadata[X_PUT_TIMESTAMP] = (timestamp, 0)
|
||||
write_metadata(self.datadir, self.metadata)
|
||||
|
||||
def delete_object(self, name, timestamp):
|
||||
# NOOP - should never be called since object file removal occurs
|
||||
# within a directory implicitly.
|
||||
return
|
||||
|
||||
def delete_db(self, timestamp):
|
||||
"""
|
||||
Delete the container (directory) if empty.
|
||||
|
||||
:param timestamp: delete timestamp
|
||||
"""
|
||||
# Let's check and see if it has directories that
|
||||
# where created by the code, but not by the
|
||||
# caller as objects
|
||||
rmobjdir(self.datadir)
|
||||
|
||||
def set_x_container_sync_points(self, sync_point1, sync_point2):
|
||||
self.metadata['x_container_sync_point1'] = sync_point1
|
||||
self.metadata['x_container_sync_point2'] = sync_point2
|
||||
|
||||
|
||||
class DiskAccount(DiskCommon):
|
||||
"""
|
||||
Usage pattern from account/server.py (Havana, 1.8.0+):
|
||||
DELETE:
|
||||
.is_deleted()
|
||||
.delete_db()
|
||||
PUT:
|
||||
container:
|
||||
.pending_timeout
|
||||
.db_file
|
||||
.initialize()
|
||||
.is_deleted()
|
||||
.put_container()
|
||||
account:
|
||||
.db_file
|
||||
.initialize()
|
||||
.is_status_deleted()
|
||||
.is_deleted()
|
||||
.update_put_timestamp()
|
||||
.is_deleted() ???
|
||||
.update_metadata()
|
||||
HEAD:
|
||||
.pending_timeout
|
||||
.stale_reads_ok
|
||||
.is_deleted()
|
||||
.get_info()
|
||||
.metadata
|
||||
GET:
|
||||
.pending_timeout
|
||||
.stale_reads_ok
|
||||
.is_deleted()
|
||||
.get_info()
|
||||
.metadata
|
||||
.list_containers_iter()
|
||||
POST:
|
||||
.is_deleted()
|
||||
.update_metadata()
|
||||
"""
|
||||
|
||||
def __init__(self, root, drive, account, logger, **kwargs):
|
||||
super(DiskAccount, self).__init__(root, drive, account, logger,
|
||||
**kwargs)
|
||||
|
||||
# Since accounts should always exist (given an account maps to a
|
||||
# gluster volume directly, and the mount has already been checked at
|
||||
# the beginning of the REST API handling), just assert that that
|
||||
# assumption still holds.
|
||||
assert self._dir_exists_read_metadata()
|
||||
assert self._dir_exists
|
||||
|
||||
if not self.metadata or not validate_account(self.metadata):
|
||||
create_account_metadata(self.datadir)
|
||||
self.metadata = _read_metadata(self.datadir)
|
||||
|
||||
def is_status_deleted(self):
|
||||
"""
|
||||
Only returns true if the status field is set to DELETED.
|
||||
"""
|
||||
# This function should always return False. Accounts are not created
|
||||
# and deleted, they exist if a Gluster volume can be mounted. There is
|
||||
# no way to delete accounts, so this could never return True.
|
||||
return False
|
||||
|
||||
def initialize(self, timestamp):
|
||||
"""
|
||||
Create and write metatdata to directory/account.
|
||||
:param metadata: Metadata to write.
|
||||
"""
|
||||
metadata = get_account_metadata(self.datadir)
|
||||
metadata[X_TIMESTAMP] = timestamp
|
||||
write_metadata(self.datadir, metadata)
|
||||
self.metadata = metadata
|
||||
|
||||
def update_put_timestamp(self, timestamp):
|
||||
# Since accounts always exists at this point, just update the account
|
||||
# PUT timestamp if this given timestamp is later than what we already
|
||||
# know.
|
||||
assert self._dir_exists
|
||||
|
||||
if timestamp > self.metadata[X_PUT_TIMESTAMP][0]:
|
||||
self.metadata[X_PUT_TIMESTAMP] = (timestamp, 0)
|
||||
write_metadata(self.datadir, self.metadata)
|
||||
|
||||
def delete_db(self, timestamp):
|
||||
"""
|
||||
Mark the account as deleted
|
||||
|
||||
:param timestamp: delete timestamp
|
||||
"""
|
||||
# Deleting an account is a no-op, since accounts are one-to-one
|
||||
# mappings to gluster volumes.
|
||||
#
|
||||
# FIXME: This means the caller will end up returning a success status
|
||||
# code for an operation that really should not be allowed. Instead, we
|
||||
# should modify the account server to not allow the DELETE method, and
|
||||
# should probably modify the proxy account controller to not allow the
|
||||
# DELETE method as well.
|
||||
return
|
||||
|
||||
def put_container(self, container, put_timestamp, del_timestamp,
|
||||
object_count, bytes_used):
|
||||
"""
|
||||
Create a container with the given attributes.
|
||||
|
||||
:param name: name of the container to create
|
||||
:param put_timestamp: put_timestamp of the container to create
|
||||
:param delete_timestamp: delete_timestamp of the container to create
|
||||
:param object_count: number of objects in the container
|
||||
:param bytes_used: number of bytes used by the container
|
||||
"""
|
||||
# NOOP - should never be called since container directory creation
|
||||
# occurs from within the account directory implicitly.
|
||||
return
|
||||
|
||||
def _update_container_count(self):
|
||||
containers, container_count = get_account_details(self.datadir)
|
||||
|
||||
if X_CONTAINER_COUNT not in self.metadata \
|
||||
or int(self.metadata[X_CONTAINER_COUNT][0]) != container_count:
|
||||
self.metadata[X_CONTAINER_COUNT] = (container_count, 0)
|
||||
write_metadata(self.datadir, self.metadata)
|
||||
|
||||
return containers
|
||||
|
||||
def list_containers_iter(self, limit, marker, end_marker,
|
||||
prefix, delimiter):
|
||||
"""
|
||||
Return tuple of name, object_count, bytes_used, 0(is_subdir).
|
||||
Used by account server.
|
||||
"""
|
||||
if delimiter and not prefix:
|
||||
prefix = ''
|
||||
|
||||
account_list = []
|
||||
containers = self._update_container_count()
|
||||
if containers:
|
||||
containers.sort()
|
||||
else:
|
||||
return account_list
|
||||
|
||||
if containers and end_marker:
|
||||
containers = filter_end_marker(containers, end_marker)
|
||||
|
||||
if containers:
|
||||
if marker and marker >= prefix:
|
||||
containers = filter_marker(containers, marker)
|
||||
elif prefix:
|
||||
containers = filter_prefix_as_marker(containers, prefix)
|
||||
|
||||
if prefix is None:
|
||||
# No prefix, we don't need to apply the other arguments, we just
|
||||
# return what we have.
|
||||
pass
|
||||
else:
|
||||
# We have a non-None (for all intents and purposes it is a string)
|
||||
# prefix.
|
||||
if not delimiter:
|
||||
if not prefix:
|
||||
# We have nothing more to do
|
||||
pass
|
||||
else:
|
||||
containers = filter_prefix(containers, prefix)
|
||||
else:
|
||||
containers = filter_delimiter(containers, delimiter, prefix,
|
||||
marker)
|
||||
|
||||
count = 0
|
||||
for cont in containers:
|
||||
list_item = []
|
||||
metadata = None
|
||||
list_item.append(cont)
|
||||
cont_path = os.path.join(self.datadir, cont)
|
||||
metadata = _read_metadata(cont_path)
|
||||
if not metadata or not validate_container(metadata):
|
||||
try:
|
||||
metadata = create_container_metadata(cont_path)
|
||||
except OSError as e:
|
||||
# FIXME - total hack to get upstream swift ported unit
|
||||
# test cases working for now.
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
if metadata:
|
||||
list_item.append(metadata[X_OBJECTS_COUNT][0])
|
||||
list_item.append(metadata[X_BYTES_USED][0])
|
||||
list_item.append(0)
|
||||
account_list.append(list_item)
|
||||
count += 1
|
||||
if count >= limit:
|
||||
break
|
||||
|
||||
return account_list
|
||||
|
||||
def get_info(self):
|
||||
"""
|
||||
Get global data for the account.
|
||||
:returns: dict with keys: account, created_at, put_timestamp,
|
||||
delete_timestamp, container_count, object_count,
|
||||
bytes_used, hash, id
|
||||
"""
|
||||
if Glusterfs._account_update_container_count:
|
||||
self._update_container_count()
|
||||
|
||||
data = {'account': self.account, 'created_at': '1',
|
||||
'put_timestamp': '1', 'delete_timestamp': '1',
|
||||
'container_count': self.metadata.get(
|
||||
X_CONTAINER_COUNT, (0, 0))[0],
|
||||
'object_count': self.metadata.get(X_OBJECTS_COUNT, (0, 0))[0],
|
||||
'bytes_used': self.metadata.get(X_BYTES_USED, (0, 0))[0],
|
||||
'hash': '', 'id': ''}
|
||||
return data
|
@ -19,8 +19,7 @@ try:
|
||||
except ImportError:
|
||||
from swift.common.swob import HTTPBadRequest
|
||||
import swift.common.constraints
|
||||
import swift.common.ring as _ring
|
||||
from gluster.swift.common import Glusterfs, ring
|
||||
from gluster.swift.common import Glusterfs
|
||||
|
||||
MAX_OBJECT_NAME_COMPONENT_LENGTH = 255
|
||||
UNSUPPORTED_HEADERS = []
|
||||
@ -130,9 +129,3 @@ swift.common.constraints.check_metadata = gluster_check_metadata
|
||||
|
||||
# Replace the original check mount with ours
|
||||
swift.common.constraints.check_mount = Glusterfs.mount
|
||||
|
||||
# Save the original Ring class
|
||||
__Ring = _ring.Ring
|
||||
|
||||
# Replace the original Ring class
|
||||
_ring.Ring = ring.Ring
|
||||
|
@ -1,3 +0,0 @@
|
||||
*.egg-info
|
||||
*.py[co]
|
||||
.DS_Store
|
@ -1,4 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
nosetests test_swauth/unit --exe --with-coverage --cover-package swauth --cover-erase
|
||||
rm -f .coverage
|
@ -1,39 +0,0 @@
|
||||
Maintainer
|
||||
----------
|
||||
Greg Holt
|
||||
|
||||
Original Authors
|
||||
----------------
|
||||
Chuck Thier
|
||||
Greg Holt
|
||||
Greg Lange
|
||||
Jay Payne
|
||||
John Dickinson
|
||||
Michael Barton
|
||||
Will Reese
|
||||
|
||||
Contributors
|
||||
------------
|
||||
Andrew Clay Shafer
|
||||
Anne Gentle
|
||||
Brian K. Jones
|
||||
Caleb Tennis
|
||||
Chmouel Boudjnah
|
||||
Christian Schwede
|
||||
Chris Wedgwood
|
||||
Clay Gerrard
|
||||
Colin Nicholson
|
||||
Conrad Weidenkeller
|
||||
Cory Wright
|
||||
David Goetz
|
||||
Ed Leafe
|
||||
Fujita Tomonori
|
||||
Kapil Thangavelu
|
||||
Monty Taylor
|
||||
Pablo Llopis
|
||||
Paul Jimenez
|
||||
Pete Zaitcev
|
||||
Russ Nelson
|
||||
Scott Simpson
|
||||
Soren Hansen
|
||||
Stephen Milton
|
@ -1,62 +0,0 @@
|
||||
swauth (1.0.8)
|
||||
|
||||
Added request.environ[reseller_request] = True if request is coming from an
|
||||
user in .reseller_admin group
|
||||
|
||||
Fixed to work with newer Swift versions whose memcache clients require a
|
||||
time keyword argument when the older versions required a timeout keyword
|
||||
argument.
|
||||
|
||||
swauth (1.0.7)
|
||||
|
||||
New X-Auth-Token-Lifetime header a user can set to how long they'd like
|
||||
their token to be good for.
|
||||
|
||||
New max_token_life config value for capping the above.
|
||||
|
||||
New X-Auth-Token-Expires header returned with the get token request.
|
||||
|
||||
Switchover to swift.common.swob instead of WebOb; requires Swift >= 1.7.6
|
||||
now.
|
||||
|
||||
swauth (1.0.6)
|
||||
|
||||
Apparently I haven't been keeping up with this CHANGELOG. I'll try to be
|
||||
better onward.
|
||||
|
||||
This release added passing OPTIONS requests through untouched, needed for
|
||||
CORS support in Swift.
|
||||
|
||||
Also, Swauth is a bit more restrictive in deciding when it's the definitive
|
||||
auth for a request.
|
||||
|
||||
swauth (1.0.3-dev)
|
||||
|
||||
This release is still under development. A full change log will be made at
|
||||
release. Until then, you can see what has changed with:
|
||||
|
||||
git log 1.0.2..HEAD
|
||||
|
||||
swauth (1.0.2)
|
||||
|
||||
Fixed bug rejecting requests when using multiple instances of Swauth or
|
||||
Swauth with other auth services.
|
||||
|
||||
Fixed bug interpreting URL-encoded user names and keys.
|
||||
|
||||
Added support for the Swift container sync feature.
|
||||
|
||||
Allowed /not/ setting super_admin_key to disable Swauth administration
|
||||
features.
|
||||
|
||||
Added swauth_remote mode so the Swauth middleware for one Swift cluster
|
||||
could be pointing to the Swauth service on another Swift cluster, sharing
|
||||
account/user data sets.
|
||||
|
||||
Added ability to purge stored tokens.
|
||||
|
||||
Added API documentation for internal Swauth API.
|
||||
|
||||
swauth (1.0.1)
|
||||
|
||||
Initial release after separation from Swift.
|
@ -1,202 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
@ -1,4 +0,0 @@
|
||||
include AUTHORS LICENSE README.md .unittests test_swauth/__init__.py
|
||||
include CHANGELOG
|
||||
graft doc
|
||||
graft etc
|
@ -1,71 +0,0 @@
|
||||
Swauth
|
||||
------
|
||||
|
||||
An Auth Service for Swift as WSGI Middleware that uses Swift itself as a
|
||||
backing store. Sphinx-built docs at: <http://gholt.github.com/swauth/>
|
||||
|
||||
See also <https://github.com/openstack/keystone> for the standard OpenStack
|
||||
auth service.
|
||||
|
||||
|
||||
NOTE
|
||||
----
|
||||
|
||||
**Be sure to review the Sphinx-built docs at:
|
||||
<http://gholt.github.com/swauth/>**
|
||||
|
||||
|
||||
Quick Install
|
||||
-------------
|
||||
|
||||
1) Install Swauth with ``sudo python setup.py install`` or ``sudo python
|
||||
setup.py develop`` or via whatever packaging system you may be using.
|
||||
|
||||
2) Alter your proxy-server.conf pipeline to have swauth instead of tempauth:
|
||||
|
||||
Was:
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors cache tempauth proxy-server
|
||||
|
||||
Change To:
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors cache swauth proxy-server
|
||||
|
||||
3) Add to your proxy-server.conf the section for the Swauth WSGI filter:
|
||||
|
||||
[filter:swauth]
|
||||
use = egg:swauth#swauth
|
||||
set log_name = swauth
|
||||
super_admin_key = swauthkey
|
||||
|
||||
4) Be sure your proxy server allows account management:
|
||||
|
||||
[app:proxy-server]
|
||||
...
|
||||
allow_account_management = true
|
||||
|
||||
5) Restart your proxy server ``swift-init proxy reload``
|
||||
|
||||
6) Initialize the Swauth backing store in Swift ``swauth-prep -K swauthkey``
|
||||
|
||||
7) Add an account/user ``swauth-add-user -A http://127.0.0.1:8080/auth/ -K
|
||||
swauthkey -a test tester testing``
|
||||
|
||||
8) Ensure it works ``swift -A http://127.0.0.1:8080/auth/v1.0 -U test:tester -K
|
||||
testing stat -v``
|
||||
|
||||
|
||||
Web Admin Install
|
||||
-----------------
|
||||
|
||||
1) If you installed from packages, you'll need to cd to the webadmin directory
|
||||
the package installed. This is ``/usr/share/doc/python-swauth/webadmin``
|
||||
with the Lucid packages. If you installed from source, you'll need to cd to
|
||||
the webadmin directory in the source directory.
|
||||
|
||||
2) Upload the Web Admin files with ``swift -A http://127.0.0.1:8080/auth/v1.0
|
||||
-U .super_admin:.super_admin -K swauthkey upload .webadmin .``
|
||||
|
||||
3) Open ``http://127.0.0.1:8080/auth/`` in your browser.
|
@ -1,2 +0,0 @@
|
||||
[python: **.py]
|
||||
|
@ -1,80 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import gettext
|
||||
import socket
|
||||
|
||||
from optparse import OptionParser
|
||||
from sys import argv, exit
|
||||
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
from swift.common.utils import urlparse
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
gettext.install('gswauth', unicode=1)
|
||||
parser = OptionParser(usage='Usage: %prog [options] <account>')
|
||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||
'subsystem (default: http://127.0.0.1:8080/auth/)')
|
||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||
default='.super_admin', help='The user with admin rights to add '
|
||||
'accounts (default: .super_admin).')
|
||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||
help='The key for the user with admin rights is required.')
|
||||
args = argv[1:]
|
||||
if not args:
|
||||
args.append('-h')
|
||||
(options, args) = parser.parse_args(args)
|
||||
if len(args) != 1:
|
||||
parser.parse_args(['-h'])
|
||||
if options.admin_key is None:
|
||||
parser.parse_args(['-h'])
|
||||
account = args[0]
|
||||
parsed = urlparse(options.admin_url)
|
||||
if parsed.scheme not in ('http', 'https'):
|
||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||
(parsed.scheme, repr(options.admin_url)))
|
||||
parsed_path = parsed.path
|
||||
if not parsed_path:
|
||||
parsed_path = '/'
|
||||
elif parsed_path[-1] != '/':
|
||||
parsed_path += '/'
|
||||
path = '%sv2/%s' % (parsed_path, account)
|
||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||
'X-Auth-Admin-Key': options.admin_key,
|
||||
'Content-Length': '0'}
|
||||
try:
|
||||
conn = http_connect(parsed.hostname, parsed.port, 'PUT', path, headers,
|
||||
ssl=(parsed.scheme == 'https'))
|
||||
resp = conn.getresponse()
|
||||
except socket.gaierror, err:
|
||||
exit('Account creation failed: %s. ' \
|
||||
'Check that the admin_url is valid' % err)
|
||||
except socket.error, (errno, msg):
|
||||
exit('Account creation failed: %s. ' \
|
||||
'Check that the admin_url is valid' % msg)
|
||||
|
||||
if resp.status // 100 != 2:
|
||||
if resp.status == 401:
|
||||
exit('Account creation failed: %s %s: Invalid user/key provided' %
|
||||
(resp.status, resp.reason))
|
||||
elif resp.status == 403:
|
||||
exit('Account creation failed: %s %s: Insufficient privileges' %
|
||||
(resp.status, resp.reason))
|
||||
else:
|
||||
exit('Account creation failed: %s %s' %
|
||||
(resp.status, resp.reason))
|
@ -1,127 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import gettext
|
||||
import socket
|
||||
|
||||
from optparse import OptionParser
|
||||
from sys import argv, exit
|
||||
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
from swift.common.utils import urlparse
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
gettext.install('gswauth', unicode=1)
|
||||
parser = OptionParser(
|
||||
usage='Usage: %prog [options] <account> <user> <password>')
|
||||
parser.add_option('-a', '--admin', dest='admin', action='store_true',
|
||||
default=False, help='Give the user administrator access; otherwise '
|
||||
'the user will only have access to containers specifically allowed '
|
||||
'with ACLs.')
|
||||
parser.add_option('-r', '--reseller-admin', dest='reseller_admin',
|
||||
action='store_true', default=False, help='Give the user full reseller '
|
||||
'administrator access, giving them full access to all accounts within '
|
||||
'the reseller, including the ability to create new accounts. Creating '
|
||||
'a new reseller admin requires super_admin rights.')
|
||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||
'subsystem (default: http://127.0.0.1:8080/auth/')
|
||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||
default='.super_admin', help='The user with admin rights to add users '
|
||||
'(default: .super_admin).')
|
||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||
help='The key for the user with admin rights to add users is required.')
|
||||
args = argv[1:]
|
||||
if not args:
|
||||
args.append('-h')
|
||||
(options, args) = parser.parse_args(args)
|
||||
if len(args) != 3:
|
||||
parser.parse_args(['-h'])
|
||||
if options.admin_key is None:
|
||||
parser.parse_args(['-h'])
|
||||
account, user, password = args
|
||||
parsed = urlparse(options.admin_url)
|
||||
if parsed.scheme not in ('http', 'https'):
|
||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||
(parsed.scheme, repr(options.admin_url)))
|
||||
parsed_path = parsed.path
|
||||
if not parsed_path:
|
||||
parsed_path = '/'
|
||||
elif parsed_path[-1] != '/':
|
||||
parsed_path += '/'
|
||||
|
||||
# Check if user is changing his own password. This is carried out by
|
||||
# making sure that the user changing the password and the user whose
|
||||
# password is being changed are the same.
|
||||
# If not, ensure that the account exists before creating new user.
|
||||
if not options.admin_user == (account + ':' + user):
|
||||
# GET the account
|
||||
path = '%sv2/%s' % (parsed_path, account)
|
||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||
'X-Auth-Admin-Key': options.admin_key}
|
||||
try:
|
||||
conn = http_connect(parsed.hostname, parsed.port, 'GET', path,
|
||||
headers, ssl=(parsed.scheme == 'https'))
|
||||
resp = conn.getresponse()
|
||||
if resp.status // 100 != 2:
|
||||
# If the GET operation fails, it means the account does not
|
||||
# exist. Now we create the account by sending a PUT request.
|
||||
headers['Content-Length'] = '0'
|
||||
conn = http_connect(parsed.hostname, parsed.port, 'PUT', path,
|
||||
headers, ssl=(parsed.scheme == 'https'))
|
||||
resp = conn.getresponse()
|
||||
if resp.status // 100 != 2:
|
||||
print 'Account creation failed: %s %s' % \
|
||||
(resp.status, resp.reason)
|
||||
except socket.gaierror, err:
|
||||
exit('User creation failed: %s. ' \
|
||||
'Check that the admin_url is valid' % err)
|
||||
except socket.error, (errno, msg):
|
||||
exit('User creation failed: %s. ' \
|
||||
'Check that the admin_url is valid' % msg)
|
||||
|
||||
# Add the user
|
||||
path = '%sv2/%s/%s' % (parsed_path, account, user)
|
||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||
'X-Auth-Admin-Key': options.admin_key,
|
||||
'X-Auth-User-Key': password,
|
||||
'Content-Length': '0'}
|
||||
if options.admin:
|
||||
headers['X-Auth-User-Admin'] = 'true'
|
||||
if options.reseller_admin:
|
||||
headers['X-Auth-User-Reseller-Admin'] = 'true'
|
||||
try:
|
||||
conn = http_connect(parsed.hostname, parsed.port, 'PUT', path, headers,
|
||||
ssl=(parsed.scheme == 'https'))
|
||||
resp = conn.getresponse()
|
||||
except socket.gaierror, err:
|
||||
exit('User creation failed: %s. ' \
|
||||
'Check that the admin_url is valid' % err)
|
||||
except socket.error, (errno, msg):
|
||||
exit('User creation failed: %s. ' \
|
||||
'Check that the admin_url is valid' % msg)
|
||||
|
||||
if resp.status // 100 != 2:
|
||||
if resp.status == 401:
|
||||
exit('User creation failed: %s %s: Invalid user/key provided' %
|
||||
(resp.status, resp.reason))
|
||||
elif resp.status == 403:
|
||||
exit('User creation failed: %s %s: Insufficient privileges' %
|
||||
(resp.status, resp.reason))
|
||||
else:
|
||||
exit('User creation failed: %s %s' %
|
||||
(resp.status, resp.reason))
|
@ -1,202 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
import gettext
|
||||
import re
|
||||
import socket
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from optparse import OptionParser
|
||||
from sys import argv, exit
|
||||
from time import sleep, time
|
||||
|
||||
from swiftclient.client import Connection, ClientException
|
||||
|
||||
if __name__ == '__main__':
|
||||
gettext.install('gswauth', unicode=1)
|
||||
parser = OptionParser(usage='Usage: %prog [options]')
|
||||
parser.add_option('-t', '--token-life', dest='token_life',
|
||||
default='86400', help='The expected life of tokens; token objects '
|
||||
'modified more than this number of seconds ago will be checked for '
|
||||
'expiration (default: 86400).')
|
||||
parser.add_option('-s', '--sleep', dest='sleep',
|
||||
default='0.1', help='The number of seconds to sleep between token '
|
||||
'checks (default: 0.1)')
|
||||
parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
|
||||
default=False, help='Outputs everything done instead of just the '
|
||||
'deletions.')
|
||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||
'subsystem (default: http://127.0.0.1:8080/auth/)')
|
||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||
help='The key for .super_admin is required.')
|
||||
parser.add_option('', '--purge', dest='purge_account', help='Purges all '
|
||||
'tokens for a given account whether the tokens have expired or not.'
|
||||
' Memcached restart is recommended. Old tokens may still persist in'
|
||||
' memcached.')
|
||||
parser.add_option('', '--purge-all', dest='purge_all', action='store_true',
|
||||
default=False, help='Purges all tokens for all accounts and users '
|
||||
'whether the tokens have expired or not.'
|
||||
' Memcached restart is recommended. Old tokens may still persist in'
|
||||
' memcached.')
|
||||
args = argv[1:]
|
||||
if not args:
|
||||
args.append('-h')
|
||||
(options, args) = parser.parse_args(args)
|
||||
if len(args) != 0:
|
||||
parser.parse_args(['-h'])
|
||||
if options.admin_key is None:
|
||||
parser.parse_args(['-h'])
|
||||
|
||||
options.admin_url = options.admin_url.rstrip('/')
|
||||
if not options.admin_url.endswith('/v1.0'):
|
||||
options.admin_url += '/v1.0'
|
||||
options.admin_user = '.super_admin:.super_admin'
|
||||
|
||||
try:
|
||||
options.token_life = timedelta(0, float(options.token_life))
|
||||
options.sleep = float(options.sleep)
|
||||
except ValueError:
|
||||
parser.parse_args(['-h'])
|
||||
|
||||
conn = Connection(options.admin_url, options.admin_user, options.admin_key)
|
||||
if options.purge_account:
|
||||
marker = None
|
||||
while True:
|
||||
if options.verbose:
|
||||
print 'GET %s?marker=%s' % (options.purge_account, marker)
|
||||
try:
|
||||
objs = conn.get_container(options.purge_account,
|
||||
marker=marker)[1]
|
||||
except ClientException, e:
|
||||
if e.http_status == 404:
|
||||
exit('Account %s not found.' % (options.purge_account))
|
||||
elif e.http_status == 401:
|
||||
exit('Cleanup tokens failed: 401 Unauthorized: ' \
|
||||
'Invalid user/key provided')
|
||||
else:
|
||||
exit('Purging %s failed with status '
|
||||
'code %d' % (options.purge_account, e.http_status))
|
||||
except socket.error, (errno, msg):
|
||||
exit('Token clean-up failed: %s. ' \
|
||||
'Check that the admin_url is valid' % msg)
|
||||
if objs:
|
||||
marker = objs[-1]['name']
|
||||
else:
|
||||
if options.verbose:
|
||||
print 'No more objects in %s' % options.purge_account
|
||||
break
|
||||
for obj in objs:
|
||||
if options.verbose:
|
||||
print 'HEAD %s/%s' % (options.purge_account, obj['name'])
|
||||
headers = conn.head_object(options.purge_account, obj['name'])
|
||||
if 'x-object-meta-auth-token' in headers:
|
||||
token = headers['x-object-meta-auth-token']
|
||||
container = '.token_%s' % token[-1]
|
||||
if options.verbose:
|
||||
print '%s/%s purge account %r; deleting' % \
|
||||
(container, token, options.purge_account)
|
||||
print 'DELETE %s/%s' % (container, token)
|
||||
try:
|
||||
conn.delete_object(container, token)
|
||||
except ClientException, err:
|
||||
if err.http_status != 404:
|
||||
raise
|
||||
continue
|
||||
if options.verbose:
|
||||
print 'Done.'
|
||||
exit(0)
|
||||
for x in xrange(16):
|
||||
container = '.token_%x' % x
|
||||
marker = None
|
||||
while True:
|
||||
if options.verbose:
|
||||
print 'GET %s?marker=%s' % (container, marker)
|
||||
try:
|
||||
objs = conn.get_container(container, marker=marker)[1]
|
||||
except ClientException, e:
|
||||
if e.http_status == 404:
|
||||
exit('Container %s not found. gswauth-prep needs to be '
|
||||
'rerun' % (container))
|
||||
elif e.http_status == 401:
|
||||
exit('Cleanup tokens failed: 401 Unauthorized: ' \
|
||||
'Invalid user/key provided')
|
||||
else:
|
||||
exit('Object listing on container %s failed with status '
|
||||
'code %d' % (container, e.http_status))
|
||||
except socket.error, (errno, msg):
|
||||
exit('Token clean-up failed: %s. ' \
|
||||
'Check that the admin_url is valid' % msg)
|
||||
|
||||
if objs:
|
||||
marker = objs[-1]['name']
|
||||
else:
|
||||
if options.verbose:
|
||||
print 'No more objects in %s' % container
|
||||
break
|
||||
for obj in objs:
|
||||
if options.purge_all:
|
||||
if options.verbose:
|
||||
print '%s/%s purge all; deleting' % \
|
||||
(container, obj['name'])
|
||||
print 'DELETE %s/%s' % (container, obj['name'])
|
||||
try:
|
||||
conn.delete_object(container, obj['name'])
|
||||
except ClientException, err:
|
||||
if err.http_status != 404:
|
||||
raise
|
||||
continue
|
||||
last_modified = datetime(*map(int, re.split('[^\d]',
|
||||
obj['last_modified'])[:-1]))
|
||||
ago = datetime.utcnow() - last_modified
|
||||
if ago > options.token_life:
|
||||
if options.verbose:
|
||||
print '%s/%s last modified %ss ago; investigating' % \
|
||||
(container, obj['name'],
|
||||
ago.days * 86400 + ago.seconds)
|
||||
print 'GET %s/%s' % (container, obj['name'])
|
||||
detail = conn.get_object(container, obj['name'])[1]
|
||||
detail = json.loads(detail)
|
||||
if detail['expires'] < time():
|
||||
if options.verbose:
|
||||
print '%s/%s expired %ds ago; deleting' % \
|
||||
(container, obj['name'],
|
||||
time() - detail['expires'])
|
||||
print 'DELETE %s/%s' % (container, obj['name'])
|
||||
try:
|
||||
conn.delete_object(container, obj['name'])
|
||||
except ClientException, e:
|
||||
if e.http_status != 404:
|
||||
print 'DELETE of %s/%s failed with status ' \
|
||||
'code %d' % (container, obj['name'],
|
||||
e.http_status)
|
||||
elif options.verbose:
|
||||
print "%s/%s won't expire for %ds; skipping" % \
|
||||
(container, obj['name'],
|
||||
detail['expires'] - time())
|
||||
elif options.verbose:
|
||||
print '%s/%s last modified %ss ago; skipping' % \
|
||||
(container, obj['name'],
|
||||
ago.days * 86400 + ago.seconds)
|
||||
sleep(options.sleep)
|
||||
if options.verbose:
|
||||
print 'Done.'
|
||||
print 'Recommended to restart memcached as old invalid tokens may' \
|
||||
' still persist in memcached.'
|
@ -1,86 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import gettext
|
||||
import socket
|
||||
|
||||
from optparse import OptionParser
|
||||
from sys import argv, exit
|
||||
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
from swift.common.utils import urlparse
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
gettext.install('gswauth', unicode=1)
|
||||
parser = OptionParser(usage='Usage: %prog [options] <account>')
|
||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||
'subsystem (default: http://127.0.0.1:8080/auth/')
|
||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||
default='.super_admin',
|
||||
help='The user with admin rights to delete accounts '
|
||||
'(default: .super_admin).')
|
||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||
help='The key for the user with admin rights to delete accounts '
|
||||
'is required.')
|
||||
args = argv[1:]
|
||||
if not args:
|
||||
args.append('-h')
|
||||
(options, args) = parser.parse_args(args)
|
||||
if len(args) != 1:
|
||||
parser.parse_args(['-h'])
|
||||
if options.admin_key is None:
|
||||
parser.parse_args(['-h'])
|
||||
account = args[0]
|
||||
parsed = urlparse(options.admin_url)
|
||||
if parsed.scheme not in ('http', 'https'):
|
||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||
(parsed.scheme, repr(options.admin_url)))
|
||||
parsed_path = parsed.path
|
||||
if not parsed_path:
|
||||
parsed_path = '/'
|
||||
elif parsed_path[-1] != '/':
|
||||
parsed_path += '/'
|
||||
path = '%sv2/%s' % (parsed_path, account)
|
||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||
'X-Auth-Admin-Key': options.admin_key}
|
||||
try:
|
||||
conn = http_connect(parsed.hostname, parsed.port, 'DELETE', path, headers,
|
||||
ssl=(parsed.scheme == 'https'))
|
||||
resp = conn.getresponse()
|
||||
except socket.gaierror, err:
|
||||
exit('Account deletion failed: %s. ' \
|
||||
'Check that the admin_url is valid' % err)
|
||||
except socket.error, (errno, msg):
|
||||
exit('Account deletion failed: %s. ' \
|
||||
'Check that the admin_url is valid' % msg)
|
||||
|
||||
if resp.status // 100 != 2:
|
||||
if resp.status == 401:
|
||||
exit('Delete account failed: %s %s: Invalid user/key provided' %
|
||||
(resp.status, resp.reason))
|
||||
elif resp.status == 403:
|
||||
exit('Delete account failed: %s %s: Insufficient privileges' %
|
||||
(resp.status, resp.reason))
|
||||
elif resp.status == 404:
|
||||
exit('Delete account failed: %s %s: Account %s does not exist' %
|
||||
(resp.status, resp.reason, account))
|
||||
elif resp.status == 409:
|
||||
exit('Delete account failed: %s %s: Account %s contains active users. '
|
||||
'Delete all users first.' % (resp.status, resp.reason, account))
|
||||
else:
|
||||
exit('Delete account failed: %s %s' % (resp.status, resp.reason))
|
@ -1,83 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import gettext
|
||||
import socket
|
||||
|
||||
from optparse import OptionParser
|
||||
from sys import argv, exit
|
||||
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
from swift.common.utils import urlparse
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
gettext.install('gswauth', unicode=1)
|
||||
parser = OptionParser(usage='Usage: %prog [options] <account> <user>')
|
||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||
'subsystem (default: http://127.0.0.1:8080/auth/')
|
||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||
default='.super_admin',
|
||||
help='The user with admin rights to delete users '
|
||||
'(default: .super_admin).')
|
||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||
help='The key for the user with admin rights to delete '
|
||||
'users is required.')
|
||||
args = argv[1:]
|
||||
if not args:
|
||||
args.append('-h')
|
||||
(options, args) = parser.parse_args(args)
|
||||
if len(args) != 2:
|
||||
parser.parse_args(['-h'])
|
||||
if options.admin_key is None:
|
||||
parser.parse_args(['-h'])
|
||||
account, user = args
|
||||
parsed = urlparse(options.admin_url)
|
||||
if parsed.scheme not in ('http', 'https'):
|
||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||
(parsed.scheme, repr(options.admin_url)))
|
||||
parsed_path = parsed.path
|
||||
if not parsed_path:
|
||||
parsed_path = '/'
|
||||
elif parsed_path[-1] != '/':
|
||||
parsed_path += '/'
|
||||
path = '%sv2/%s/%s' % (parsed_path, account, user)
|
||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||
'X-Auth-Admin-Key': options.admin_key}
|
||||
try:
|
||||
conn = http_connect(parsed.hostname, parsed.port, 'DELETE', path, headers,
|
||||
ssl=(parsed.scheme == 'https'))
|
||||
resp = conn.getresponse()
|
||||
except socket.gaierror, err:
|
||||
exit('User deletion failed: %s. ' \
|
||||
'Check that the admin_url is valid' % err)
|
||||
except socket.error, (errno, msg):
|
||||
exit('User deletion failed: %s. ' \
|
||||
'Check that the admin_url is valid' % msg)
|
||||
|
||||
if resp.status // 100 != 2:
|
||||
if resp.status == 401:
|
||||
exit('Delete user failed: %s %s: Invalid user/key provided' %
|
||||
(resp.status, resp.reason))
|
||||
elif resp.status == 403:
|
||||
exit('Delete user failed: %s %s: Insufficient privileges' %
|
||||
(resp.status, resp.reason))
|
||||
elif resp.status == 404:
|
||||
exit('Delete user failed: %s %s: User %s does not exist' %
|
||||
(resp.status, resp.reason, user))
|
||||
else:
|
||||
exit('Delete user failed: %s %s' % (resp.status, resp.reason))
|
@ -1,117 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
import gettext
|
||||
import socket
|
||||
import types
|
||||
|
||||
from optparse import OptionParser
|
||||
from sys import argv, exit
|
||||
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
from swift.common.utils import urlparse
|
||||
|
||||
from prettytable import PrettyTable
|
||||
|
||||
if __name__ == '__main__':
|
||||
gettext.install('gswauth', unicode=1)
|
||||
parser = OptionParser(usage='''
|
||||
Usage: %prog [options] [account] [user]
|
||||
|
||||
If [account] and [user] are omitted, a list of accounts will be output.
|
||||
|
||||
If [account] is included but not [user], a list of users within the account
|
||||
will be output.
|
||||
|
||||
If [account] and [user] are included, a list of groups the user belongs to
|
||||
will be ouptput.
|
||||
|
||||
If the [user] is '.groups', the active groups for the account will be listed.
|
||||
'''.strip())
|
||||
parser.add_option('-p', '--plain-text', dest='plain_text',
|
||||
action='store_true', default=False, help='Changes the output from '
|
||||
'JSON to plain text. This will cause an account to list only the '
|
||||
'users and a user to list only the groups.')
|
||||
parser.add_option('-j', '--json', dest='json_format',
|
||||
action='store_true', default=False, help='Output in JSON format. '
|
||||
'This will print all information about given account or user, '
|
||||
'including stored password.')
|
||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||
'subsystem (default: http://127.0.0.1:8080/auth/')
|
||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||
default='.super_admin', help='The user with admin rights '
|
||||
'(default: .super_admin).')
|
||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||
help='The key for the user with admin rights is required.')
|
||||
args = argv[1:]
|
||||
if not args:
|
||||
args.append('-h')
|
||||
(options, args) = parser.parse_args(args)
|
||||
if len(args) > 2:
|
||||
parser.parse_args(['-h'])
|
||||
if options.admin_key is None:
|
||||
parser.parse_args(['-h'])
|
||||
parsed = urlparse(options.admin_url)
|
||||
if parsed.scheme not in ('http', 'https'):
|
||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||
(parsed.scheme, repr(options.admin_url)))
|
||||
parsed_path = parsed.path
|
||||
if not parsed_path:
|
||||
parsed_path = '/'
|
||||
elif parsed_path[-1] != '/':
|
||||
parsed_path += '/'
|
||||
path = '%sv2/%s' % (parsed_path, '/'.join(args))
|
||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||
'X-Auth-Admin-Key': options.admin_key}
|
||||
try:
|
||||
conn = http_connect(parsed.hostname, parsed.port, 'GET', path, headers,
|
||||
ssl=(parsed.scheme == 'https'))
|
||||
resp = conn.getresponse()
|
||||
except socket.gaierror, err:
|
||||
exit('List failed: %s. ' \
|
||||
'Check that the admin_url is valid' % err)
|
||||
except socket.error, (errno, msg):
|
||||
exit('List failed: %s. ' \
|
||||
'Check that the admin_url is valid' % msg)
|
||||
|
||||
body = resp.read()
|
||||
if resp.status // 100 != 2:
|
||||
if resp.status == 401:
|
||||
exit('List failed: %s %s: Invalid user/key provided' %
|
||||
(resp.status, resp.reason))
|
||||
elif resp.status == 403:
|
||||
exit('List failed: %s %s: Insufficient privileges' %
|
||||
(resp.status, resp.reason))
|
||||
else:
|
||||
exit('List failed: %s %s' % (resp.status, resp.reason))
|
||||
if options.plain_text:
|
||||
info = json.loads(body)
|
||||
for group in info[['accounts', 'users', 'groups'][len(args)]]:
|
||||
print group['name']
|
||||
elif options.json_format:
|
||||
print body
|
||||
else:
|
||||
info = json.loads(body)
|
||||
h = ['accounts', 'users', 'groups'][len(args)]
|
||||
table = PrettyTable([h.title()])
|
||||
for group in info[h]:
|
||||
table.add_row([group['name']])
|
||||
print table
|
@ -1,75 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import gettext
|
||||
import socket
|
||||
|
||||
from optparse import OptionParser
|
||||
from sys import argv, exit
|
||||
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
from swift.common.utils import urlparse
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
gettext.install('gswauth', unicode=1)
|
||||
parser = OptionParser(usage='Usage: %prog [options]')
|
||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||
'subsystem (default: http://127.0.0.1:8080/auth/')
|
||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||
default='.super_admin', help='The user with admin rights '
|
||||
'(default: .super_admin).')
|
||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||
help='The key for the user with admin rights is required.')
|
||||
args = argv[1:]
|
||||
if not args:
|
||||
args.append('-h')
|
||||
(options, args) = parser.parse_args(args)
|
||||
if args:
|
||||
parser.parse_args(['-h'])
|
||||
if options.admin_key is None:
|
||||
parser.parse_args(['-h'])
|
||||
parsed = urlparse(options.admin_url)
|
||||
if parsed.scheme not in ('http', 'https'):
|
||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||
(parsed.scheme, repr(options.admin_url)))
|
||||
parsed_path = parsed.path
|
||||
if not parsed_path:
|
||||
parsed_path = '/'
|
||||
elif parsed_path[-1] != '/':
|
||||
parsed_path += '/'
|
||||
path = '%sv2/.prep' % parsed_path
|
||||
headers = {'X-Auth-Admin-User': options.admin_user,
|
||||
'X-Auth-Admin-Key': options.admin_key}
|
||||
try:
|
||||
conn = http_connect(parsed.hostname, parsed.port, 'POST', path, headers,
|
||||
ssl=(parsed.scheme == 'https'))
|
||||
resp = conn.getresponse()
|
||||
except socket.gaierror, err:
|
||||
exit('gswauth preparation failed: %s. ' \
|
||||
'Check that the admin_url is valid' % err)
|
||||
except socket.error, (errno, msg):
|
||||
exit('gswauth preparation failed: %s. ' \
|
||||
'Check that the admin_url is valid' % msg)
|
||||
|
||||
if resp.status // 100 != 2:
|
||||
if resp.status == 401:
|
||||
exit('gswauth preparation failed: %s %s: Invalid user/key provided' %
|
||||
(resp.status, resp.reason))
|
||||
else:
|
||||
exit('gswauth preparation failed: %s %s' %
|
||||
(resp.status, resp.reason))
|
@ -1,89 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
import gettext
|
||||
import socket
|
||||
from optparse import OptionParser
|
||||
from sys import argv, exit
|
||||
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
from swift.common.utils import urlparse
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
gettext.install('gswauth', unicode=1)
|
||||
parser = OptionParser(usage='''
|
||||
Usage: %prog [options] <account> <service> <name> <value>
|
||||
|
||||
Sets a service URL for an account. Can only be set by a reseller admin.
|
||||
|
||||
Example: %prog -K gswauthkey test storage local http://127.0.0.1:8080/v1/AUTH_018c3946-23f8-4efb-a8fb-b67aae8e4162
|
||||
'''.strip())
|
||||
parser.add_option('-A', '--admin-url', dest='admin_url',
|
||||
default='http://127.0.0.1:8080/auth/', help='The URL to the auth '
|
||||
'subsystem (default: http://127.0.0.1:8080/auth/)')
|
||||
parser.add_option('-U', '--admin-user', dest='admin_user',
|
||||
default='.super_admin', help='The user with admin rights '
|
||||
'(default: .super_admin).')
|
||||
parser.add_option('-K', '--admin-key', dest='admin_key',
|
||||
help='The key for the user with admin rights is required.')
|
||||
args = argv[1:]
|
||||
if not args:
|
||||
args.append('-h')
|
||||
(options, args) = parser.parse_args(args)
|
||||
if len(args) != 4:
|
||||
parser.parse_args(['-h'])
|
||||
if options.admin_key is None:
|
||||
parser.parse_args(['-h'])
|
||||
account, service, name, url = args
|
||||
parsed = urlparse(options.admin_url)
|
||||
if parsed.scheme not in ('http', 'https'):
|
||||
raise Exception('Cannot handle protocol scheme %s for url %s' %
|
||||
(parsed.scheme, repr(options.admin_url)))
|
||||
parsed_path = parsed.path
|
||||
if not parsed_path:
|
||||
parsed_path = '/'
|
||||
elif parsed_path[-1] != '/':
|
||||
parsed_path += '/'
|
||||
path = '%sv2/%s/.services' % (parsed_path, account)
|
||||
body = json.dumps({service: {name: url}})
|
||||
headers = {'Content-Length': str(len(body)),
|
||||
'X-Auth-Admin-User': options.admin_user,
|
||||
'X-Auth-Admin-Key': options.admin_key}
|
||||
try:
|
||||
conn = http_connect(parsed.hostname, parsed.port, 'POST', path, headers,
|
||||
ssl=(parsed.scheme == 'https'))
|
||||
conn.send(body)
|
||||
resp = conn.getresponse()
|
||||
except socket.gaierror, err:
|
||||
exit('Service set failed: %s. ' \
|
||||
'Check that the admin_url is valid' % err)
|
||||
except socket.error, (errno, msg):
|
||||
exit('Service set failed: %s. ' \
|
||||
'Check that the admin_url is valid' % msg)
|
||||
if resp.status // 100 != 2:
|
||||
if resp.status == 401:
|
||||
exit('Service set failed: %s %s: Invalid user/key provided' %
|
||||
(resp.status, resp.reason))
|
||||
elif resp.status == 403:
|
||||
exit('Service set failed: %s %s: Insufficient privileges' %
|
||||
(resp.status, resp.reason))
|
||||
else:
|
||||
exit('Service set failed: %s %s' % (resp.status, resp.reason))
|
@ -1,466 +0,0 @@
|
||||
.. _api_top:
|
||||
|
||||
----------
|
||||
Swauth API
|
||||
----------
|
||||
|
||||
Overview
|
||||
========
|
||||
|
||||
Swauth has its own internal versioned REST API for adding, removing,
|
||||
and editing accounts. This document explains the v2 API.
|
||||
|
||||
Authentication
|
||||
--------------
|
||||
|
||||
Each REST request against the swauth API requires the inclusion of a
|
||||
specific authorization user and key to be passed in a specific HTTP
|
||||
header. These headers are defined as ``X-Auth-Admin-User`` and
|
||||
``X-Auth-Admin-Key``.
|
||||
|
||||
Typically, these values are ``.super_admin`` (the site super admin
|
||||
user) with the key being specified in the swauth middleware
|
||||
configuration as ``super_admin_key``.
|
||||
|
||||
This could also be a reseller admin with the appropriate rights to
|
||||
perform actions on reseller accounts.
|
||||
|
||||
Endpoints
|
||||
---------
|
||||
|
||||
The swauth API endpoint is presented on the proxy servers, in the
|
||||
"/auth" namespace. In addition, the API is versioned, and the version
|
||||
documented is version 2. API versions subdivide the auth namespace by
|
||||
version, specified as a version identifier like "v2".
|
||||
|
||||
The auth endpoint described herein is therefore located at "/auth/v2/"
|
||||
as presented by the proxy servers.
|
||||
|
||||
Bear in mind that in order for the auth management API to be
|
||||
presented, it must be enabled in the proxy server config by setting
|
||||
``allow_account_managment`` to ``true`` in the ``[app:proxy-server]``
|
||||
stanza of your proxy-server.conf.
|
||||
|
||||
Responses
|
||||
---------
|
||||
|
||||
Responses from the auth APIs are returned as a JSON structure.
|
||||
Example return values in this document are edited for readability.
|
||||
|
||||
|
||||
Reseller/Admin Services
|
||||
=======================
|
||||
|
||||
Operations can be performed against the endpoint itself to perform
|
||||
general administrative operations. Currently, the only operations
|
||||
that can be performed is a GET operation to get reseller or site admin
|
||||
information.
|
||||
|
||||
Get Admin Info
|
||||
--------------
|
||||
|
||||
A GET request at the swauth endpoint will return reseller information
|
||||
for the account specified in the ``X-Auth-Admin-User`` header.
|
||||
Currently, the information returned is limited to a list of accounts
|
||||
for the reseller or site admin.
|
||||
|
||||
Valid return codes:
|
||||
* 200: Success
|
||||
* 403: Invalid X-Auth-Admin-User/X-Auth-Admin-Key
|
||||
* 5xx: Internal error
|
||||
|
||||
Example Request::
|
||||
|
||||
GET /auth/<api version>/ HTTP/1.1
|
||||
X-Auth-Admin-User: .super_admin
|
||||
X-Auth-Admin-Key: swauthkey
|
||||
|
||||
Example Curl Request::
|
||||
|
||||
curl -D - https://<endpoint>/auth/v2/ \
|
||||
-H "X-Auth-Admin-User: .super_admin" \
|
||||
-H "X-Auth-Admin-Key: swauthkey"
|
||||
|
||||
Example Result::
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
|
||||
{ "accounts":
|
||||
[
|
||||
{ "name": "account1" },
|
||||
{ "name": "account2" },
|
||||
{ "name": "account3" }
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
Account Services
|
||||
================
|
||||
|
||||
There are API request to get account details, create, and delete
|
||||
accounts, mapping logically to the REST verbs GET, PUT, and DELETE.
|
||||
These actions are performed against an account URI, in the following
|
||||
general request structure::
|
||||
|
||||
METHOD /auth/<version>/<account> HTTP/1.1
|
||||
|
||||
The methods that can be used are detailed below.
|
||||
|
||||
Get Account Details
|
||||
-------------------
|
||||
|
||||
Account details can be retrieved by performing a GET request against
|
||||
an account URI. On success, a JSON dictionary will be returned
|
||||
containing the keys `account_id`, `services`, and `users`. The
|
||||
`account_id` is the value used when creating service accounts. The
|
||||
`services` value is a dict that represents valid storage cluster
|
||||
endpoints, and which endpoint is the default. The 'users' value is a
|
||||
list of dicts, each dict representing a user and currently only
|
||||
containing the single key 'name'.
|
||||
|
||||
Valid Responses:
|
||||
* 200: Success
|
||||
* 403: Invalid X-Auth-Admin-User/X-Auth-Admin-Key
|
||||
* 5xx: Internal error
|
||||
|
||||
Example Request::
|
||||
|
||||
GET /auth/<api version>/<account> HTTP/1.1
|
||||
X-Auth-Admin-User: .super_admin
|
||||
X-Auth-Admin-Key: swauthkey
|
||||
|
||||
Example Curl Request::
|
||||
|
||||
curl -D - https://<endpoint>/auth/v2/<account> \
|
||||
-H "X-Auth-Admin-User: .super_admin" \
|
||||
-H "X-Auth-Admin-Key: swauthkey"
|
||||
|
||||
Example Response::
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
|
||||
{ "services":
|
||||
{ "storage":
|
||||
{ "default": "local",
|
||||
"local": "https://<storage endpoint>/v1/<account_id>" },
|
||||
},
|
||||
"account_id": "<account_id>",
|
||||
"users": [ { "name": "user1" },
|
||||
{ "name": "user2" } ]
|
||||
}
|
||||
|
||||
Create Account
|
||||
--------------
|
||||
|
||||
An account can be created with a PUT request against a non-existent
|
||||
account. By default, a newly created UUID4 will be used with the
|
||||
reseller prefix as the account ID used when creating corresponding
|
||||
service accounts. However, you can provide an X-Account-Suffix header
|
||||
to replace the UUDI4 part.
|
||||
|
||||
Valid return codes:
|
||||
* 200: Success
|
||||
* 403: Invalid X-Auth-Admin-User/X-Auth-Admin-Key
|
||||
* 5xx: Internal error
|
||||
|
||||
Example Request::
|
||||
|
||||
GET /auth/<api version>/<new_account> HTTP/1.1
|
||||
X-Auth-Admin-User: .super_admin
|
||||
X-Auth-Admin-Key: swauthkey
|
||||
|
||||
Example Curl Request::
|
||||
|
||||
curl -D - https://<endpoint>/auth/v2/<new_account> \
|
||||
-H "X-Auth-Admin-User: .super_admin" \
|
||||
-H "X-Auth-Admin-Key: swauthkey"
|
||||
|
||||
Example Response::
|
||||
|
||||
HTTP/1.1 201 Created
|
||||
|
||||
|
||||
Delete Account
|
||||
--------------
|
||||
|
||||
An account can be deleted with a DELETE request against an existing
|
||||
account.
|
||||
|
||||
Valid Responses:
|
||||
* 204: Success
|
||||
* 403: Invalid X-Auth-Admin-User/X-Auth-Admin-Key
|
||||
* 404: Account not found
|
||||
* 5xx: Internal error
|
||||
|
||||
Example Request::
|
||||
|
||||
DELETE /auth/<api version>/<account> HTTP/1.1
|
||||
X-Auth-Admin-User: .super_admin
|
||||
X-Auth-Admin-Key: swauthkey
|
||||
|
||||
Example Curl Request::
|
||||
|
||||
curl -XDELETE -D - https://<endpoint>/auth/v2/<account> \
|
||||
-H "X-Auth-Admin-User: .super_admin" \
|
||||
-H "X-Auth-Admin-Key: swauthkey"
|
||||
|
||||
Example Response::
|
||||
|
||||
HTTP/1.1 204 No Content
|
||||
|
||||
|
||||
User Services
|
||||
=============
|
||||
|
||||
Each account in swauth contains zero or more users. These users can
|
||||
be determined with the 'Get Account Details' API request against an
|
||||
account.
|
||||
|
||||
Users in an account can be created, modified, and detailed as
|
||||
described below by apply the appropriate REST verbs to a user URI, in
|
||||
the following general request structure::
|
||||
|
||||
METHOD /auth/<version>/<account>/<user> HTTP/1.1
|
||||
|
||||
The methods that can be used are detailed below.
|
||||
|
||||
Get User Details
|
||||
----------------
|
||||
|
||||
User details can be retrieved by performing a GET request against
|
||||
a user URI. On success, a JSON dictionary will be returned as
|
||||
described::
|
||||
|
||||
{"groups": [ # List of groups the user is a member of
|
||||
{"name": "<act>:<usr>"},
|
||||
# The first group is a unique user identifier
|
||||
{"name": "<account>"},
|
||||
# The second group is the auth account name
|
||||
{"name": "<additional-group>"}
|
||||
# There may be additional groups, .admin being a
|
||||
# special group indicating an account admin and
|
||||
# .reseller_admin indicating a reseller admin.
|
||||
],
|
||||
"auth": "<auth-type>:<key>"
|
||||
# The auth-type and key for the user; currently only
|
||||
# plaintext and sha1 are implemented as auth types.
|
||||
}
|
||||
|
||||
For example::
|
||||
|
||||
{"groups": [{"name": "test:tester"}, {"name": "test"},
|
||||
{"name": ".admin"}],
|
||||
"auth": "plaintext:testing"}
|
||||
|
||||
Valid Responses:
|
||||
* 200: Success
|
||||
* 403: Invalid X-Auth-Admin-User/X-Auth-Admin-Key
|
||||
* 404: Unknown account
|
||||
* 5xx: Internal error
|
||||
|
||||
Example Request::
|
||||
|
||||
GET /auth/<api version>/<account>/<user> HTTP/1.1
|
||||
X-Auth-Admin-User: .super_admin
|
||||
X-Auth-Admin-Key: swauthkey
|
||||
|
||||
Example Curl Request::
|
||||
|
||||
curl -D - https://<endpoint>/auth/v2/<account>/<user> \
|
||||
-H "X-Auth-Admin-User: .super_admin" \
|
||||
-H "X-Auth-Admin-Key: swauthkey"
|
||||
|
||||
Example Response::
|
||||
|
||||
HTTP/1.1 200 Ok
|
||||
|
||||
{ "groups": [ { "name": "<account>:<user>" },
|
||||
{ "name": "<user>" },
|
||||
{ "name": ".admin" } ],
|
||||
"auth" : "plaintext:password" }
|
||||
|
||||
|
||||
Create User
|
||||
-----------
|
||||
|
||||
A user can be created with a PUT request against a non-existent
|
||||
user URI. The new user's password must be set using the
|
||||
``X-Auth-User-Key`` header. The user name MUST NOT start with a
|
||||
period ('.'). This requirement is enforced by the API, and will
|
||||
result in a 400 error.
|
||||
|
||||
Optional Headers:
|
||||
|
||||
* ``X-Auth-User-Admin: true``: create the user as an account admin
|
||||
* ``X-Auth-User-Reseller-Admin: true``: create the user as a reseller
|
||||
admin
|
||||
|
||||
Reseller admin accounts can only be created by the site admin, while
|
||||
regular accounts (or account admin accounts) can be created by an
|
||||
account admin, an appropriate reseller admin, or the site admin.
|
||||
|
||||
Note that PUT requests are idempotent, and the PUT request serves as
|
||||
both a request and modify action.
|
||||
|
||||
Valid Responses:
|
||||
* 200: Success
|
||||
* 400: Invalid request (missing required headers)
|
||||
* 403: Invalid X-Auth-Admin-User/X-Auth-Admin-Key, or insufficient priv
|
||||
* 404: Unknown account
|
||||
* 5xx: Internal error
|
||||
|
||||
Example Request::
|
||||
|
||||
PUT /auth/<api version>/<account>/<user> HTTP/1.1
|
||||
X-Auth-Admin-User: .super_admin
|
||||
X-Auth-Admin-Key: swauthkey
|
||||
X-Auth-User-Admin: true
|
||||
X-Auth-User-Key: secret
|
||||
|
||||
Example Curl Request::
|
||||
|
||||
curl -XPUT -D - https://<endpoint>/auth/v2/<account>/<user> \
|
||||
-H "X-Auth-Admin-User: .super_admin" \
|
||||
-H "X-Auth-Admin-Key: swauthkey" \
|
||||
-H "X-Auth-User-Admin: true" \
|
||||
-H "X-Auth-User-Key: secret"
|
||||
|
||||
Example Response::
|
||||
|
||||
HTTP/1.1 201 Created
|
||||
|
||||
Delete User
|
||||
-----------
|
||||
|
||||
A user can be deleted by performing a DELETE request against a user
|
||||
URI. This action can only be performed by an account admin,
|
||||
appropriate reseller admin, or site admin.
|
||||
|
||||
Valid Responses:
|
||||
* 200: Success
|
||||
* 403: Invalid X-Auth-Admin-User/X-Auth-Admin-Key, or insufficient priv
|
||||
* 404: Unknown account or user
|
||||
* 5xx: Internal error
|
||||
|
||||
Example Request::
|
||||
|
||||
DELETE /auth/<api version>/<account>/<user> HTTP/1.1
|
||||
X-Auth-Admin-User: .super_admin
|
||||
X-Auth-Admin-Key: swauthkey
|
||||
|
||||
Example Curl Request::
|
||||
|
||||
curl -XDELETE -D - https://<endpoint>/auth/v2/<account>/<user> \
|
||||
-H "X-Auth-Admin-User: .super_admin" \
|
||||
-H "X-Auth-Admin-Key: swauthkey"
|
||||
|
||||
Example Response::
|
||||
|
||||
HTTP/1.1 204 No Content
|
||||
|
||||
|
||||
Other Services
|
||||
==============
|
||||
|
||||
There are several other swauth functions that can be performed, mostly
|
||||
done via "pseudo-user" accounts. These are well-known user names that
|
||||
are unable to be actually provisioned. These pseudo-users are
|
||||
described below.
|
||||
|
||||
.. _api_set_service_endpoints:
|
||||
|
||||
Set Service Endpoints
|
||||
---------------------
|
||||
|
||||
Service endpoint information can be retrived using the _`Get Account
|
||||
Details` API method.
|
||||
|
||||
This function allows setting values within this section for
|
||||
the <account>, allowing the addition of new service end points
|
||||
or updating existing ones by performing a POST to the URI
|
||||
corresponding to the pseudo-user ".services".
|
||||
|
||||
The body of the POST request should contain a JSON dict with
|
||||
the following format::
|
||||
|
||||
{"service_name": {"end_point_name": "end_point_value"}}
|
||||
|
||||
There can be multiple services and multiple end points in the
|
||||
same call.
|
||||
|
||||
Any new services or end points will be added to the existing
|
||||
set of services and end points. Any existing services with the
|
||||
same service name will be merged with the new end points. Any
|
||||
existing end points with the same end point name will have
|
||||
their values updated.
|
||||
|
||||
The updated services dictionary will be returned on success.
|
||||
|
||||
Valid Responses:
|
||||
|
||||
* 200: Success
|
||||
* 403: Invalid X-Auth-Admin-User/X-Auth-Admin-Key
|
||||
* 404: Account not found
|
||||
* 5xx: Internal error
|
||||
|
||||
Example Request::
|
||||
|
||||
POST /auth/<api version>/<account>/.services HTTP/1.0
|
||||
X-Auth-Admin-User: .super_admin
|
||||
X-Auth-Admin-Key: swauthkey
|
||||
|
||||
{"storage": { "local": "<new endpoint>" }}
|
||||
|
||||
Example Curl Request::
|
||||
|
||||
curl -XPOST -D - https://<endpoint>/auth/v2/<account>/.services \
|
||||
-H "X-Auth-Admin-User: .super_admin" \
|
||||
-H "X-Auth-Admin-Key: swauthkey" --data-binary \
|
||||
'{ "storage": { "local": "<new endpoint>" }}'
|
||||
|
||||
Example Response::
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
|
||||
{"storage": {"default": "local", "local": "<new endpoint>" }}
|
||||
|
||||
Get Account Groups
|
||||
------------------
|
||||
|
||||
Individual user group information can be retrieved using the `Get User Details`_ API method.
|
||||
|
||||
This function allows retrieving all group information for all users in
|
||||
an existing account. This can be achieved using a GET action against
|
||||
a user URI with the pseudo-user ".groups".
|
||||
|
||||
The JSON dictionary returned will be a "groups" dictionary similar to
|
||||
that documented in the `Get User Details`_ method, but representing
|
||||
the summary of all groups utilized by all active users in the account.
|
||||
|
||||
Valid Responses:
|
||||
* 200: Success
|
||||
* 403: Invalid X-Auth-Admin-User/X-Auth-Admin-Key
|
||||
* 404: Account not found
|
||||
* 5xx: Internal error
|
||||
|
||||
Example Request::
|
||||
|
||||
GET /auth/<api version>/<account>/.groups
|
||||
X-Auth-Admin-User: .super_admin
|
||||
X-Auth-Admin-Key: swauthkey
|
||||
|
||||
Example Curl Request::
|
||||
|
||||
curl -D - https://<endpoint>/auth/v2/<account>/.groups \
|
||||
-H "X-Auth-Admin-User: .super_admin" \
|
||||
-H "X-Auth-Admin-Key: swauthkey"
|
||||
|
||||
Example Response::
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
|
||||
{ "groups": [ { "name": ".admin" },
|
||||
{ "name": "<account>" },
|
||||
{ "name": "<account>:user1" },
|
||||
{ "name": "<account>:user2" } ] }
|
||||
|
@ -1,10 +0,0 @@
|
||||
.. _swauth_authtypes_module:
|
||||
|
||||
swauth.authtypes
|
||||
=================
|
||||
|
||||
.. automodule:: swauth.authtypes
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
||||
:noindex:
|
@ -1,233 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
#
|
||||
# Swauth documentation build configuration file, created by
|
||||
# sphinx-quickstart on Mon Feb 14 19:34:51 2011.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys, os
|
||||
|
||||
import swauth
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration -----------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'Swauth'
|
||||
copyright = u'2010-2011, OpenStack, LLC'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '.'.join(str(v) for v in swauth.version_info[:2])
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = swauth.version
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = []
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all documents.
|
||||
#default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
||||
|
||||
# -- Options for HTML output ---------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'default'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
#html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'Swauthdoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output --------------------------------------------------
|
||||
|
||||
# The paper size ('letter' or 'a4').
|
||||
#latex_paper_size = 'letter'
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#latex_font_size = '10pt'
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass [howto/manual]).
|
||||
latex_documents = [
|
||||
('index', 'Swauth.tex', u'Swauth Documentation',
|
||||
u'OpenStack, LLC', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#latex_preamble = ''
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output --------------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('index', 'swauth', u'Swauth Documentation',
|
||||
[u'OpenStack, LLC'], 1)
|
||||
]
|
@ -1,159 +0,0 @@
|
||||
----------------------
|
||||
Implementation Details
|
||||
----------------------
|
||||
|
||||
The Swauth system is a scalable authentication and authorization system that
|
||||
uses Swift itself as its backing store. This section will describe how it
|
||||
stores its data.
|
||||
|
||||
.. note::
|
||||
|
||||
You can access Swauth's internal .auth account by using the account:user of
|
||||
.super_admin:.super_admin and the super admin key you have set in your
|
||||
configuration. Here's an example using `st` on a standard SAIO: ``st -A
|
||||
http://127.0.0.1:8080/auth/v1.0 -U .super_admin:.super_admin -K swauthkey
|
||||
stat``
|
||||
|
||||
At the topmost level, the auth system has its own Swift account it stores its
|
||||
own account information within. This Swift account is known as
|
||||
self.auth_account in the code and its name is in the format
|
||||
self.reseller_prefix + ".auth". In this text, we'll refer to this account as
|
||||
<auth_account>.
|
||||
|
||||
The containers whose names do not begin with a period represent the accounts
|
||||
within the auth service. For example, the <auth_account>/test container would
|
||||
represent the "test" account.
|
||||
|
||||
The objects within each container represent the users for that auth service
|
||||
account. For example, the <auth_account>/test/bob object would represent the
|
||||
user "bob" within the auth service account of "test". Each of these user
|
||||
objects contain a JSON dictionary of the format::
|
||||
|
||||
{"auth": "<auth_type>:<auth_value>", "groups": <groups_array>}
|
||||
|
||||
The `<auth_type>` specifies how the user key is encoded. The default is `plaintext`,
|
||||
which saves the user's key in plaintext in the `<auth_value>` field.
|
||||
The value `sha1` is supported as well, which stores the user's key as a salted
|
||||
SHA1 hash. Note that using a one-way hash like SHA1 will likely inhibit future use of key-signing request types, assuming such support is added. The `<auth_type>` can be specified in the swauth section of the proxy server's
|
||||
config file, along with the salt value in the following way::
|
||||
|
||||
auth_type = <auth_type>
|
||||
auth_type_salt = <salt-value>
|
||||
|
||||
Both fields are optional. auth_type defaults to `plaintext` and auth_type_salt defaults to "swauthsalt". Additional auth types can be implemented along with existing ones in the authtypes.py module.
|
||||
|
||||
The `<groups_array>` contains at least two groups. The first is a unique group
|
||||
identifying that user and it's name is of the format `<user>:<account>`. The
|
||||
second group is the `<account>` itself. Additional groups of `.admin` for
|
||||
account administrators and `.reseller_admin` for reseller administrators may
|
||||
exist. Here's an example user JSON dictionary::
|
||||
|
||||
{"auth": "plaintext:testing",
|
||||
"groups": ["name": "test:tester", "name": "test", "name": ".admin"]}
|
||||
|
||||
To map an auth service account to a Swift storage account, the Service Account
|
||||
Id string is stored in the `X-Container-Meta-Account-Id` header for the
|
||||
<auth_account>/<account> container. To map back the other way, an
|
||||
<auth_account>/.account_id/<account_id> object is created with the contents of
|
||||
the corresponding auth service's account name.
|
||||
|
||||
Also, to support a future where the auth service will support multiple Swift
|
||||
clusters or even multiple services for the same auth service account, an
|
||||
<auth_account>/<account>/.services object is created with its contents having a
|
||||
JSON dictionary of the format::
|
||||
|
||||
{"storage": {"default": "local", "local": <url>}}
|
||||
|
||||
The "default" is always "local" right now, and "local" is always the single
|
||||
Swift cluster URL; but in the future there can be more than one cluster with
|
||||
various names instead of just "local", and the "default" key's value will
|
||||
contain the primary cluster to use for that account. Also, there may be more
|
||||
services in addition to the current "storage" service right now.
|
||||
|
||||
Here's an example .services dictionary at the moment::
|
||||
|
||||
{"storage":
|
||||
{"default": "local",
|
||||
"local": "http://127.0.0.1:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9"}}
|
||||
|
||||
But, here's an example of what the dictionary may look like in the future::
|
||||
|
||||
{"storage":
|
||||
{"default": "dfw",
|
||||
"dfw": "http://dfw.storage.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
||||
"ord": "http://ord.storage.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
||||
"sat": "http://ord.storage.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9"},
|
||||
"servers":
|
||||
{"default": "dfw",
|
||||
"dfw": "http://dfw.servers.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
||||
"ord": "http://ord.servers.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
||||
"sat": "http://ord.servers.com:8080/v1/AUTH_8980f74b1cda41e483cbe0a925f448a9"}}
|
||||
|
||||
Lastly, the tokens themselves are stored as objects in the
|
||||
`<auth_account>/.token_[0-f]` containers. The names of the objects are the
|
||||
token strings themselves, such as `AUTH_tked86bbd01864458aa2bd746879438d5a`.
|
||||
The exact `.token_[0-f]` container chosen is based on the final digit of the
|
||||
token name, such as `.token_a` for the token
|
||||
`AUTH_tked86bbd01864458aa2bd746879438d5a`. The contents of the token objects
|
||||
are JSON dictionaries of the format::
|
||||
|
||||
{"account": <account>,
|
||||
"user": <user>,
|
||||
"account_id": <account_id>,
|
||||
"groups": <groups_array>,
|
||||
"expires": <time.time() value>}
|
||||
|
||||
The `<account>` is the auth service account's name for that token. The `<user>`
|
||||
is the user within the account for that token. The `<account_id>` is the
|
||||
same as the `X-Container-Meta-Account-Id` for the auth service's account,
|
||||
as described above. The `<groups_array>` is the user's groups, as described
|
||||
above with the user object. The "expires" value indicates when the token is no
|
||||
longer valid, as compared to Python's time.time() value.
|
||||
|
||||
Here's an example token object's JSON dictionary::
|
||||
|
||||
{"account": "test",
|
||||
"user": "tester",
|
||||
"account_id": "AUTH_8980f74b1cda41e483cbe0a925f448a9",
|
||||
"groups": ["name": "test:tester", "name": "test", "name": ".admin"],
|
||||
"expires": 1291273147.1624689}
|
||||
|
||||
To easily map a user to an already issued token, the token name is stored in
|
||||
the user object's `X-Object-Meta-Auth-Token` header.
|
||||
|
||||
Here is an example full listing of an <auth_account>::
|
||||
|
||||
.account_id
|
||||
AUTH_2282f516-559f-4966-b239-b5c88829e927
|
||||
AUTH_f6f57a3c-33b5-4e85-95a5-a801e67505c8
|
||||
AUTH_fea96a36-c177-4ca4-8c7e-b8c715d9d37b
|
||||
.token_0
|
||||
.token_1
|
||||
.token_2
|
||||
.token_3
|
||||
.token_4
|
||||
.token_5
|
||||
.token_6
|
||||
AUTH_tk9d2941b13d524b268367116ef956dee6
|
||||
.token_7
|
||||
.token_8
|
||||
AUTH_tk93627c6324c64f78be746f1e6a4e3f98
|
||||
.token_9
|
||||
.token_a
|
||||
.token_b
|
||||
.token_c
|
||||
.token_d
|
||||
.token_e
|
||||
AUTH_tk0d37d286af2c43ffad06e99112b3ec4e
|
||||
.token_f
|
||||
AUTH_tk766bbde93771489982d8dc76979d11cf
|
||||
reseller
|
||||
.services
|
||||
reseller
|
||||
test
|
||||
.services
|
||||
tester
|
||||
tester3
|
||||
test2
|
||||
.services
|
||||
tester2
|
@ -1,142 +0,0 @@
|
||||
.. Swauth documentation master file, created by
|
||||
sphinx-quickstart on Mon Feb 14 19:34:51 2011.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
Swauth
|
||||
======
|
||||
|
||||
Copyright (c) 2010-2012 OpenStack, LLC
|
||||
|
||||
An Auth Service for Swift as WSGI Middleware that uses Swift itself as a
|
||||
backing store. Sphinx-built docs at: http://gholt.github.com/swauth/
|
||||
Source available at: https://github.com/gholt/swauth
|
||||
|
||||
See also https://github.com/openstack/keystone for the standard OpenStack
|
||||
auth service.
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
Before discussing how to install Swauth within a Swift system, it might help to understand how Swauth does it work first.
|
||||
|
||||
1. Swauth is middleware installed in the Swift Proxy's WSGI pipeline.
|
||||
|
||||
2. It intercepts requests to ``/auth/`` (by default).
|
||||
|
||||
3. It also uses Swift's `authorize callback <http://swift.openstack.org/development_auth.html>`_ and `acl callback <http://swift.openstack.org/misc.html#module-swift.common.middleware.acl>`_ features to authorize Swift requests.
|
||||
|
||||
4. Swauth will also make various internal calls to the Swift WSGI pipeline it's installed in to manipulate containers and objects within an ``AUTH_.auth`` (by default) Swift account. These containers and objects are what store account and user information.
|
||||
|
||||
5. Instead of #4, Swauth can be configured to call out to another remote Swauth to perform #4 on its behalf (using the swauth_remote config value).
|
||||
|
||||
6. When managing accounts and users with the various ``swauth-`` command line tools, these tools are actually just performing HTTP requests against the ``/auth/`` end point referenced in #2. You can make your own tools that use the same :ref:`API <api_top>`.
|
||||
|
||||
7. In the special case of creating a new account, Swauth will do its usual WSGI-internal requests as per #4 but will also call out to the Swift cluster to create the actual Swift account.
|
||||
|
||||
a. This Swift cluster callout is an account PUT request to the URL defined by the ``swift_default_cluster`` config value.
|
||||
|
||||
b. This callout end point is also saved when the account is created so that it can be given to the users of that account in the future.
|
||||
|
||||
c. Sometimes, due to public/private network routing or firewalling, the URL Swauth should use should be different than the URL Swauth should give the users later. That is why the ``default_swift_cluster`` config value can accept two URLs (first is the one for users, second is the one for Swauth).
|
||||
|
||||
d. Once an account is created, the URL given to users for that account will not change, even if the ``default_swift_cluster`` config value changes. This is so that you can use multiple clusters with the same Swauth system; ``default_swift_cluster`` just points to the one where you want new users to go.
|
||||
|
||||
f. You can change the stored URL for an account if need be with the ``swauth-set-account-service`` command line tool or a POST request (see :ref:`API <api_set_service_endpoints>`).
|
||||
|
||||
|
||||
Install
|
||||
-------
|
||||
|
||||
1) Install Swauth with ``sudo python setup.py install`` or ``sudo python
|
||||
setup.py develop`` or via whatever packaging system you may be using.
|
||||
|
||||
2) Alter your ``proxy-server.conf`` pipeline to have ``swauth`` instead of ``tempauth``:
|
||||
|
||||
Was::
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors cache tempauth proxy-server
|
||||
|
||||
Change To::
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors cache swauth proxy-server
|
||||
|
||||
3) Add to your ``proxy-server.conf`` the section for the Swauth WSGI filter::
|
||||
|
||||
[filter:swauth]
|
||||
use = egg:swauth#swauth
|
||||
set log_name = swauth
|
||||
super_admin_key = swauthkey
|
||||
default_swift_cluster = <your setting as discussed below>
|
||||
|
||||
The ``default_swift_cluster`` setting can be confusing.
|
||||
|
||||
a. If you're using an all-in-one type configuration where everything will be run on the local host on port 8080, you can omit the ``default_swift_cluster`` completely and it will default to ``local#http://127.0.0.1:8080/v1``.
|
||||
|
||||
b. If you're using a single Swift proxy you can just set the ``default_swift_cluster = cluster_name#https://<public_ip>:<port>/v1`` and that URL will be given to users as well as used by Swauth internally. (Quick note: be sure the ``http`` vs. ``https`` is set right depending on if you're using SSL.)
|
||||
|
||||
c. If you're using multiple Swift proxies behind a load balancer, you'll probably want ``default_swift_cluster = cluster_name#https://<load_balancer_ip>:<port>/v1#http://127.0.0.1:<port>/v1`` so that Swauth gives out the first URL but uses the second URL internally. Remember to double-check the ``http`` vs. ``https`` settings for each of the URLs; they might be different if you're terminating SSL at the load balancer.
|
||||
|
||||
Also see the ``proxy-server.conf-sample`` for more config options, such as the ability to have a remote Swauth in a multiple Swift cluster configuration.
|
||||
|
||||
4) Be sure your Swift proxy allows account management in the ``proxy-server.conf``::
|
||||
|
||||
[app:proxy-server]
|
||||
...
|
||||
allow_account_management = true
|
||||
|
||||
For greater security, you can leave this off any public proxies and just have one or two private proxies with it turned on.
|
||||
|
||||
5) Restart your proxy server ``swift-init proxy reload``
|
||||
|
||||
6) Initialize the Swauth backing store in Swift ``swauth-prep -K swauthkey``
|
||||
|
||||
7) Add an account/user ``swauth-add-user -A http[s]://<host>:<port>/auth/ -K
|
||||
swauthkey -a test tester testing``
|
||||
|
||||
8) Ensure it works ``swift -A http[s]://<host>:<port>/auth/v1.0 -U test:tester -K testing stat -v``
|
||||
|
||||
|
||||
If anything goes wrong, it's best to start checking the proxy server logs. The client command line utilities often don't get enough information to help. I will often just ``tail -F`` the appropriate proxy log (``/var/log/syslog`` or however you have it configured) and then run the Swauth command to see exactly what requests are happening to try to determine where things fail.
|
||||
|
||||
General note, I find I occasionally just forget to reload the proxies after a config change; so that's the first thing you might try. Or, if you suspect the proxies aren't reloading properly, you might try ``swift-init proxy stop``, ensure all the processes died, then ``swift-init proxy start``.
|
||||
|
||||
Also, it's quite common to get the ``/auth/v1.0`` vs. just ``/auth/`` URL paths confused. Usual rule is: Swauth tools use just ``/auth/`` and Swift tools use ``/auth/v1.0``.
|
||||
|
||||
|
||||
Web Admin Install
|
||||
-----------------
|
||||
|
||||
1) If you installed from packages, you'll need to cd to the webadmin directory
|
||||
the package installed. This is ``/usr/share/doc/python-swauth/webadmin``
|
||||
with the Lucid packages. If you installed from source, you'll need to cd to
|
||||
the webadmin directory in the source directory.
|
||||
|
||||
2) Upload the Web Admin files with ``swift -A http[s]://<host>:<port>/auth/v1.0
|
||||
-U .super_admin:.super_admin -K swauthkey upload .webadmin .``
|
||||
|
||||
3) Open ``http[s]://<host>:<port>/auth/`` in your browser.
|
||||
|
||||
|
||||
Contents
|
||||
--------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
license
|
||||
details
|
||||
swauth
|
||||
middleware
|
||||
api
|
||||
authtypes
|
||||
|
||||
|
||||
Indices and tables
|
||||
------------------
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
@ -1,225 +0,0 @@
|
||||
.. _license:
|
||||
|
||||
*******
|
||||
LICENSE
|
||||
*******
|
||||
|
||||
::
|
||||
|
||||
Copyright (c) 2010-2011 OpenStack, LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
@ -1,9 +0,0 @@
|
||||
.. _swauth_middleware_module:
|
||||
|
||||
swauth.middleware
|
||||
=================
|
||||
|
||||
.. automodule:: swauth.middleware
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
@ -1,9 +0,0 @@
|
||||
.. _swauth_module:
|
||||
|
||||
swauth
|
||||
======
|
||||
|
||||
.. automodule:: swauth
|
||||
:members:
|
||||
:undoc-members:
|
||||
:show-inheritance:
|
@ -1,78 +0,0 @@
|
||||
[DEFAULT]
|
||||
# Standard from Swift
|
||||
|
||||
[pipeline:main]
|
||||
# Standard from Swift, this is just an example of where to put swauth
|
||||
pipeline = catch_errors healthcheck cache ratelimit swauth proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
# Standard from Swift, main point to note is the inclusion of
|
||||
# allow_account_management = true (only for the proxy servers where you want to
|
||||
# be able to create/delete accounts).
|
||||
use = egg:swift#proxy
|
||||
allow_account_management = true
|
||||
|
||||
[filter:swauth]
|
||||
use = egg:swauth#swauth
|
||||
# You can override the default log routing for this filter here:
|
||||
# set log_name = swauth
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_headers = False
|
||||
# The reseller prefix will verify a token begins with this prefix before even
|
||||
# attempting to validate it. Also, with authorization, only Swift storage
|
||||
# accounts with this prefix will be authorized by this middleware. Useful if
|
||||
# multiple auth systems are in use for one Swift cluster.
|
||||
# reseller_prefix = AUTH
|
||||
# If you wish to use a Swauth service on a remote cluster with this cluster:
|
||||
# swauth_remote = http://remotehost:port/auth
|
||||
# swauth_remote_timeout = 10
|
||||
# When using swauth_remote, the rest of these settings have no effect.
|
||||
#
|
||||
# The auth prefix will cause requests beginning with this prefix to be routed
|
||||
# to the auth subsystem, for granting tokens, creating accounts, users, etc.
|
||||
# auth_prefix = /auth/
|
||||
# Cluster strings are of the format name#url where name is a short name for the
|
||||
# Swift cluster and url is the url to the proxy server(s) for the cluster.
|
||||
# default_swift_cluster = local#http://127.0.0.1:8080/v1
|
||||
# You may also use the format name#url#url where the first url is the one
|
||||
# given to users to access their account (public url) and the second is the one
|
||||
# used by swauth itself to create and delete accounts (private url). This is
|
||||
# useful when a load balancer url should be used by users, but swauth itself is
|
||||
# behind the load balancer. Example:
|
||||
# default_swift_cluster = local#https://public.com:8080/v1#http://private.com:8080/v1
|
||||
# Number of seconds a newly issued token should be valid for, by default.
|
||||
# token_life = 86400
|
||||
# Maximum number of seconds a newly issued token can be valid for.
|
||||
# max_token_life = <same as token_life>
|
||||
# Specifies how the user key is stored. The default is 'plaintext', leaving the
|
||||
# key unsecured but available for key-signing features if such are ever added.
|
||||
# An alternative is 'sha1' which stores only a one-way hash of the key leaving
|
||||
# it secure but unavailable for key-signing.
|
||||
# auth_type = plaintext
|
||||
# Used if the auth_type is sha1 or another method that can make use of a salt.
|
||||
# auth_type_salt = swauthsalt
|
||||
# This allows middleware higher in the WSGI pipeline to override auth
|
||||
# processing, useful for middleware such as tempurl and formpost. If you know
|
||||
# you're not going to use such middleware and you want a bit of extra security,
|
||||
# you can set this to false.
|
||||
# allow_overrides = true
|
||||
# Highly recommended to change this. If you comment this out, the Swauth
|
||||
# administration features will be disabled for this proxy.
|
||||
super_admin_key = swauthkey
|
||||
|
||||
[filter:ratelimit]
|
||||
# Standard from Swift
|
||||
use = egg:swift#ratelimit
|
||||
|
||||
[filter:cache]
|
||||
# Standard from Swift
|
||||
use = egg:swift#memcache
|
||||
|
||||
[filter:healthcheck]
|
||||
# Standard from Swift
|
||||
use = egg:swift#healthcheck
|
||||
|
||||
[filter:catch_errors]
|
||||
# Standard from Swift
|
||||
use = egg:swift#catch_errors
|
@ -1,30 +0,0 @@
|
||||
# Translations template for swauth.
|
||||
# Copyright (C) 2011 ORGANIZATION
|
||||
# This file is distributed under the same license as the swauth project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2011.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: swauth 1.0.1.dev\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2011-05-26 10:35+0000\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 0.9.4\n"
|
||||
|
||||
#: swauth/middleware.py:94
|
||||
msgid "No super_admin_key set in conf file! Exiting."
|
||||
msgstr ""
|
||||
|
||||
#: swauth/middleware.py:637
|
||||
#, python-format
|
||||
msgid ""
|
||||
"ERROR: Exception while trying to communicate with "
|
||||
"%(scheme)s://%(host)s:%(port)s/%(path)s"
|
||||
msgstr ""
|
||||
|
@ -1,23 +0,0 @@
|
||||
[build_sphinx]
|
||||
all_files = 1
|
||||
build-dir = doc/build
|
||||
source-dir = doc/source
|
||||
|
||||
[egg_info]
|
||||
tag_build =
|
||||
tag_date = 0
|
||||
tag_svn_revision = 0
|
||||
|
||||
[compile_catalog]
|
||||
directory = locale
|
||||
domain = swauth
|
||||
|
||||
[update_catalog]
|
||||
domain = swauth
|
||||
output_dir = locale
|
||||
input_file = locale/swauth.pot
|
||||
|
||||
[extract_messages]
|
||||
keywords = _ l_ lazy_gettext
|
||||
mapping_file = babel.cfg
|
||||
output_file = locale/swauth.pot
|
@ -1,89 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2010-2011 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from setuptools import setup, find_packages
|
||||
from setuptools.command.sdist import sdist
|
||||
import os
|
||||
import subprocess
|
||||
try:
|
||||
from babel.messages import frontend
|
||||
except ImportError:
|
||||
frontend = None
|
||||
|
||||
from swauth import __version__ as version
|
||||
|
||||
|
||||
class local_sdist(sdist):
|
||||
"""Customized sdist hook - builds the ChangeLog file from VC first"""
|
||||
|
||||
def run(self):
|
||||
if os.path.isdir('.bzr'):
|
||||
# We're in a bzr branch
|
||||
|
||||
log_cmd = subprocess.Popen(["bzr", "log", "--gnu"],
|
||||
stdout=subprocess.PIPE)
|
||||
changelog = log_cmd.communicate()[0]
|
||||
with open("ChangeLog", "w") as changelog_file:
|
||||
changelog_file.write(changelog)
|
||||
sdist.run(self)
|
||||
|
||||
|
||||
name = 'swauth'
|
||||
|
||||
|
||||
cmdclass = {'sdist': local_sdist}
|
||||
|
||||
|
||||
if frontend:
|
||||
cmdclass.update({
|
||||
'compile_catalog': frontend.compile_catalog,
|
||||
'extract_messages': frontend.extract_messages,
|
||||
'init_catalog': frontend.init_catalog,
|
||||
'update_catalog': frontend.update_catalog,
|
||||
})
|
||||
|
||||
|
||||
setup(
|
||||
name=name,
|
||||
version=version,
|
||||
description='Swauth',
|
||||
license='Apache License (2.0)',
|
||||
author='OpenStack, LLC.',
|
||||
author_email='swauth@brim.net',
|
||||
url='https://github.com/gholt/swauth',
|
||||
packages=find_packages(exclude=['test_swauth', 'bin']),
|
||||
test_suite='nose.collector',
|
||||
cmdclass=cmdclass,
|
||||
classifiers=[
|
||||
'Development Status :: 4 - Beta',
|
||||
'License :: OSI Approved :: Apache Software License',
|
||||
'Operating System :: POSIX :: Linux',
|
||||
'Programming Language :: Python :: 2.6',
|
||||
'Environment :: No Input/Output (Daemon)',
|
||||
],
|
||||
install_requires=[], # removed for better compat
|
||||
scripts=[
|
||||
'bin/swauth-add-account', 'bin/swauth-add-user',
|
||||
'bin/swauth-cleanup-tokens', 'bin/swauth-delete-account',
|
||||
'bin/swauth-delete-user', 'bin/swauth-list', 'bin/swauth-prep',
|
||||
'bin/swauth-set-account-service',
|
||||
],
|
||||
entry_points={
|
||||
'paste.filter_factory': [
|
||||
'swauth=swauth.middleware:filter_factory',
|
||||
],
|
||||
},
|
||||
)
|
@ -1,23 +0,0 @@
|
||||
# Copyright (c) 2010-2013 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import gettext
|
||||
|
||||
|
||||
#: Version information (major, minor, revision[, 'dev']).
|
||||
version_info = (1, 0, 9, 'dev')
|
||||
#: Version string 'major.minor.revision'.
|
||||
version = __version__ = ".".join(map(str, version_info))
|
||||
gettext.install('swauth')
|
@ -1,103 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Pablo Llopis 2011
|
||||
|
||||
|
||||
"""
|
||||
This module hosts available auth types for encoding and matching user keys.
|
||||
For adding a new auth type, simply write a class that satisfies the following
|
||||
conditions:
|
||||
|
||||
- For the class name, capitalize first letter only. This makes sure the user
|
||||
can specify an all-lowercase config option such as "plaintext" or "sha1".
|
||||
Swauth takes care of capitalizing the first letter before instantiating it.
|
||||
- Write an encode(key) method that will take a single argument, the user's key,
|
||||
and returns the encoded string. For plaintext, this would be
|
||||
"plaintext:<key>"
|
||||
- Write a match(key, creds) method that will take two arguments: the user's
|
||||
key, and the user's retrieved credentials. Return a boolean value that
|
||||
indicates whether the match is True or False.
|
||||
|
||||
Note that, since some of the encodings will be hashes, swauth supports the
|
||||
notion of salts. Thus, self.salt will be set to either a user-specified salt
|
||||
value or to a default value.
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
|
||||
|
||||
#: Maximum length any valid token should ever be.
|
||||
MAX_TOKEN_LENGTH = 5000
|
||||
|
||||
|
||||
class Plaintext(object):
|
||||
"""
|
||||
Provides a particular auth type for encoding format for encoding and
|
||||
matching user keys.
|
||||
|
||||
This class must be all lowercase except for the first character, which
|
||||
must be capitalized. encode and match methods must be provided and are
|
||||
the only ones that will be used by swauth.
|
||||
"""
|
||||
def encode(self, key):
|
||||
"""
|
||||
Encodes a user key into a particular format. The result of this method
|
||||
will be used by swauth for storing user credentials.
|
||||
|
||||
:param key: User's secret key
|
||||
:returns: A string representing user credentials
|
||||
"""
|
||||
return "plaintext:%s" % key
|
||||
|
||||
def match(self, key, creds):
|
||||
"""
|
||||
Checks whether the user-provided key matches the user's credentials
|
||||
|
||||
:param key: User-supplied key
|
||||
:param creds: User's stored credentials
|
||||
:returns: True if the supplied key is valid, False otherwise
|
||||
"""
|
||||
return self.encode(key) == creds
|
||||
|
||||
|
||||
class Sha1(object):
|
||||
"""
|
||||
Provides a particular auth type for encoding format for encoding and
|
||||
matching user keys.
|
||||
|
||||
This class must be all lowercase except for the first character, which
|
||||
must be capitalized. encode and match methods must be provided and are
|
||||
the only ones that will be used by swauth.
|
||||
"""
|
||||
def encode(self, key):
|
||||
"""
|
||||
Encodes a user key into a particular format. The result of this method
|
||||
will be used by swauth for storing user credentials.
|
||||
|
||||
:param key: User's secret key
|
||||
:returns: A string representing user credentials
|
||||
"""
|
||||
enc_key = '%s%s' % (self.salt, key)
|
||||
enc_val = hashlib.sha1(enc_key).hexdigest()
|
||||
return "sha1:%s$%s" % (self.salt, enc_val)
|
||||
|
||||
def match(self, key, creds):
|
||||
"""
|
||||
Checks whether the user-provided key matches the user's credentials
|
||||
|
||||
:param key: User-supplied key
|
||||
:param creds: User's stored credentials
|
||||
:returns: True if the supplied key is valid, False otherwise
|
||||
"""
|
||||
return self.encode(key) == creds
|
File diff suppressed because it is too large
Load Diff
@ -1,71 +0,0 @@
|
||||
import swift
|
||||
|
||||
|
||||
MAJOR = None
|
||||
MINOR = None
|
||||
REVISION = None
|
||||
FINAL = None
|
||||
|
||||
|
||||
def parse(value):
|
||||
parts = value.split('.')
|
||||
if parts[-1].endswith('-dev'):
|
||||
final = False
|
||||
parts[-1] = parts[-1][:-4]
|
||||
else:
|
||||
final = True
|
||||
major = int(parts.pop(0))
|
||||
minor = int(parts.pop(0))
|
||||
if parts:
|
||||
revision = int(parts.pop(0))
|
||||
else:
|
||||
revision = 0
|
||||
return major, minor, revision, final
|
||||
|
||||
|
||||
def newer_than(value):
|
||||
global MAJOR, MINOR, REVISION, FINAL
|
||||
major, minor, revision, final = parse(value)
|
||||
if MAJOR is None:
|
||||
MAJOR, MINOR, REVISION, FINAL = parse(swift.__version__)
|
||||
if MAJOR < major:
|
||||
return False
|
||||
elif MAJOR == major:
|
||||
if MINOR < minor:
|
||||
return False
|
||||
elif MINOR == minor:
|
||||
if REVISION < revision:
|
||||
return False
|
||||
elif REVISION == revision:
|
||||
if not FINAL or final:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def run_tests():
|
||||
global MAJOR, MINOR, REVISION, FINAL
|
||||
MAJOR, MINOR, REVISION, FINAL = parse('1.3')
|
||||
assert(newer_than('1.2'))
|
||||
assert(newer_than('1.2.9'))
|
||||
assert(newer_than('1.3-dev'))
|
||||
assert(newer_than('1.3.0-dev'))
|
||||
assert(not newer_than('1.3'))
|
||||
assert(not newer_than('1.3.0'))
|
||||
assert(not newer_than('1.3.1-dev'))
|
||||
assert(not newer_than('1.3.1'))
|
||||
assert(not newer_than('1.4'))
|
||||
assert(not newer_than('2.0'))
|
||||
MAJOR, MINOR, REVISION, FINAL = parse('1.7.7-dev')
|
||||
assert(newer_than('1.6'))
|
||||
assert(newer_than('1.7'))
|
||||
assert(newer_than('1.7.6-dev'))
|
||||
assert(newer_than('1.7.6'))
|
||||
assert(not newer_than('1.7.7'))
|
||||
assert(not newer_than('1.7.8-dev'))
|
||||
assert(not newer_than('1.7.8'))
|
||||
assert(not newer_than('1.8.0'))
|
||||
assert(not newer_than('2.0'))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
@ -1,6 +0,0 @@
|
||||
# See http://code.google.com/p/python-nose/issues/detail?id=373
|
||||
# The code below enables nosetests to work with i18n _() blocks
|
||||
|
||||
import __builtin__
|
||||
|
||||
setattr(__builtin__, '_', lambda x: x)
|
@ -1,63 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Pablo Llopis 2011
|
||||
|
||||
import unittest
|
||||
from swauth import authtypes
|
||||
|
||||
|
||||
class TestPlaintext(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.auth_encoder = authtypes.Plaintext()
|
||||
|
||||
def test_plaintext_encode(self):
|
||||
enc_key = self.auth_encoder.encode('keystring')
|
||||
self.assertEquals('plaintext:keystring', enc_key)
|
||||
|
||||
def test_plaintext_valid_match(self):
|
||||
creds = 'plaintext:keystring'
|
||||
match = self.auth_encoder.match('keystring', creds)
|
||||
self.assertEquals(match, True)
|
||||
|
||||
def test_plaintext_invalid_match(self):
|
||||
creds = 'plaintext:other-keystring'
|
||||
match = self.auth_encoder.match('keystring', creds)
|
||||
self.assertEquals(match, False)
|
||||
|
||||
|
||||
class TestSha1(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.auth_encoder = authtypes.Sha1()
|
||||
self.auth_encoder.salt = 'salt'
|
||||
|
||||
def test_sha1_encode(self):
|
||||
enc_key = self.auth_encoder.encode('keystring')
|
||||
self.assertEquals('sha1:salt$d50dc700c296e23ce5b41f7431a0e01f69010f06',
|
||||
enc_key)
|
||||
|
||||
def test_sha1_valid_match(self):
|
||||
creds = 'sha1:salt$d50dc700c296e23ce5b41f7431a0e01f69010f06'
|
||||
match = self.auth_encoder.match('keystring', creds)
|
||||
self.assertEquals(match, True)
|
||||
|
||||
def test_sha1_invalid_match(self):
|
||||
creds = 'sha1:salt$deadbabedeadbabedeadbabec0ffeebadc0ffeee'
|
||||
match = self.auth_encoder.match('keystring', creds)
|
||||
self.assertEquals(match, False)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
File diff suppressed because it is too large
Load Diff
@ -1,552 +0,0 @@
|
||||
<html>
|
||||
<head>
|
||||
<style type="text/css">
|
||||
body {font-family: sans-serif}
|
||||
table {border-collapse: collapse}
|
||||
td {padding-left: 1ex; padding-right: 1ex}
|
||||
.account {color: #0000ff; padding-left: 3ex; cursor: pointer}
|
||||
.add_account_heading {text-align: right; padding-right: 0}
|
||||
.service {padding-left: 3ex; vertical-align: top}
|
||||
.service_detail {padding-left: 0}
|
||||
.user {color: #0000ff; padding-left: 3ex; cursor: pointer}
|
||||
.group {padding-left: 3ex}
|
||||
.add_user_heading {text-align: right; padding-right: 0}
|
||||
.shadow_delement {color: #0000ff; cursor: pointer}
|
||||
.shadow_felement {display: none}
|
||||
#swauth {font-size: 200%; font-weight: bold; font-style: italic; margin: 0px; padding: 0px}
|
||||
#creds_area {float: right}
|
||||
#logout {color: #0000ff; padding-left: 3ex; cursor: pointer}
|
||||
#refresh_accounts {color: #0000ff; padding-left: 1ex; cursor: pointer}
|
||||
#add_account {color: #0000ff; padding-left: 1ex; padding-right: 1ex; cursor: pointer}
|
||||
#add_account_title {padding-top: 1ex; padding-bottom: 1ex}
|
||||
#add_account_cancel {color: #0000ff; padding-top: 1ex; padding-left: 3ex; cursor: pointer}
|
||||
#add_account_save {color: #0000ff; text-align: right; padding-top: 1ex; padding-right: 3ex; cursor: pointer}
|
||||
#account_area {background: #ddeeff}
|
||||
#add_user {color: #0000ff; padding-left: 1ex; padding-right: 1ex; cursor: pointer}
|
||||
#add_user_title {padding-top: 1ex; padding-bottom: 1ex}
|
||||
#add_user_cancel {color: #0000ff; padding-top: 1ex; padding-left: 3ex; cursor: pointer}
|
||||
#add_user_save {color: #0000ff; text-align: right; padding-top: 1ex; padding-right: 3ex; cursor: pointer}
|
||||
#delete_account {color: #0000ff; text-align: right; margin-left: 45ex; padding-right: 1ex; cursor: pointer}
|
||||
#user_area {background: #aaccff}
|
||||
#delete_user {color: #0000ff; text-align: right; margin-left: 45ex; padding-right: 1ex; cursor: pointer}
|
||||
#auth_view {display: none}
|
||||
#auth_toggler {color: #0000ff; cursor: pointer}
|
||||
#auth_update {color: #0000ff; padding-left: 1ex; cursor: pointer}
|
||||
#auth_update_field {display: none}
|
||||
</style>
|
||||
<script type="text/javascript">
|
||||
var request = null;
|
||||
var creds_user = '';
|
||||
var creds_key = '';
|
||||
var creds_logged_in = true;
|
||||
var account = '';
|
||||
var user = '';
|
||||
var account_selection = -1;
|
||||
var user_selection = -1;
|
||||
var swauth_area_selected_background = '#ddeeff';
|
||||
var account_area_selected_background = '#aaccff';
|
||||
var endpoints;
|
||||
|
||||
function get_bounds(element) {
|
||||
bounds = {};
|
||||
bounds.top = 0;
|
||||
bounds.left = 0;
|
||||
bounds.width = element.offsetWidth;
|
||||
bounds.height = element.offsetHeight;
|
||||
if (element.offsetParent) {
|
||||
do {
|
||||
bounds.top += element.offsetTop;
|
||||
bounds.left += element.offsetLeft;
|
||||
} while (element = element.offsetParent);
|
||||
}
|
||||
return bounds;
|
||||
}
|
||||
|
||||
function shadow_edit(delement) {
|
||||
felement = document.getElementById('f' + delement.id.substring(1));
|
||||
felement.value = delement.innerHTML;
|
||||
delement.style.display = 'none';
|
||||
felement.style.display = 'inline';
|
||||
felement.focus();
|
||||
}
|
||||
|
||||
function shadow_submitter(felement, evnt, func) {
|
||||
keycode = 0;
|
||||
if (window.event) {
|
||||
keycode = window.event.keyCode;
|
||||
} else if (evnt) {
|
||||
keycode = evnt.which;
|
||||
}
|
||||
if (keycode == 13) {
|
||||
func(felement);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function shadow_escaper(felement, evnt) {
|
||||
keycode = 0;
|
||||
if (window.event) {
|
||||
keycode = window.event.keyCode;
|
||||
} else if (evnt) {
|
||||
keycode = evnt.which;
|
||||
}
|
||||
if (keycode == 27) {
|
||||
felement.style.display = 'none';
|
||||
document.getElementById('d' + felement.id.substring(1)).style.display = 'inline';
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function creds_clicked() {
|
||||
creds_area = document.getElementById('creds_area');
|
||||
if (creds_logged_in) {
|
||||
creds_user = '';
|
||||
creds_key = '';
|
||||
creds_area.innerHTML = 'User: <input id="creds_user" type="text" size="10" /> Key: <input id="creds_key" type="password" size="10" onkeypress="return creds_submitter(event)" />';
|
||||
document.getElementById('swauth_area').innerHTML = '';
|
||||
creds_logged_in = false;
|
||||
document.getElementById("creds_user").focus();
|
||||
} else {
|
||||
creds_user = document.getElementById('creds_user').value;
|
||||
creds_key = document.getElementById('creds_key').value;
|
||||
creds_area.innerHTML = '<div>Logged in as ' + creds_user + ' <span id="logout" onclick="creds_clicked()">Logout</span></div>';
|
||||
creds_logged_in = true;
|
||||
swauth_area_load();
|
||||
}
|
||||
}
|
||||
|
||||
function creds_submitter(e) {
|
||||
keycode = 0;
|
||||
if (window.event) {
|
||||
keycode = window.event.keyCode;
|
||||
} else if (e) {
|
||||
keycode = e.which;
|
||||
}
|
||||
if (keycode == 13) {
|
||||
creds_clicked();
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function swauth_area_reset() {
|
||||
account_area_reset();
|
||||
document.getElementById('swauth_area').innerHTML = '';
|
||||
}
|
||||
|
||||
function account_area_reset() {
|
||||
user_area_reset();
|
||||
element = document.getElementById('add_account')
|
||||
if (element) {
|
||||
element.style.background = 'none';
|
||||
}
|
||||
if (account_selection != -1) {
|
||||
document.getElementById('account_' + account_selection).style.background = 'none';
|
||||
}
|
||||
account = '';
|
||||
account_selection = -1;
|
||||
document.getElementById('account_area').innerHTML = '';
|
||||
}
|
||||
|
||||
function user_area_reset() {
|
||||
element = document.getElementById('add_user')
|
||||
if (element) {
|
||||
element.style.background = 'none';
|
||||
}
|
||||
if (user_selection != -1) {
|
||||
document.getElementById('user_' + user_selection).style.background = 'none';
|
||||
}
|
||||
user = '';
|
||||
user_selection = -1;
|
||||
document.getElementById('user_area').innerHTML = '';
|
||||
}
|
||||
|
||||
function swauth_area_load() {
|
||||
swauth_area_reset();
|
||||
request = new XMLHttpRequest();
|
||||
request.onreadystatechange = swauth_area_load2;
|
||||
request.open('GET', '/auth/v2/', true);
|
||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
||||
request.send();
|
||||
}
|
||||
|
||||
function swauth_area_load2() {
|
||||
if (request.readyState == 4) {
|
||||
swauth_area = document.getElementById('swauth_area');
|
||||
if (request.status >= 200 && request.status <= 299) {
|
||||
data = JSON.parse(request.responseText);
|
||||
content = '<table><tr><td>Accounts <span id="refresh_accounts" onclick="swauth_area_load()">Refresh</span> <span id="add_account" onclick="add_account()">Add</span></td></tr>';
|
||||
for (ix = 0; ix < data.accounts.length; ix++) {
|
||||
content += '<tr><td id="account_' + ix + '" onclick="account_area_load(' + ix + ')" class="account">' + data.accounts[ix].name + '</td></tr>';
|
||||
}
|
||||
content += '</table>';
|
||||
swauth_area.innerHTML = content;
|
||||
} else {
|
||||
swauth_area.innerHTML = 'Server returned status: ' + request.status + ' ' + request.statusText;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function add_account() {
|
||||
account_area_reset();
|
||||
document.getElementById('add_account').style.background = swauth_area_selected_background;
|
||||
account_area = document.getElementById('account_area');
|
||||
account_area.innerHTML = '<table><tr><td id="add_account_title" colspan="2">New Account</td></tr><tr><td class="add_account_heading">Name</td><td><input id="add_account_name" type="text" size="20" /></td></tr><tr><td class="add_account_heading">Suffix</td><td><input id="add_account_suffix" type="text" size="20" /> (Optional)</td></tr><tr><td id="add_account_cancel" onclick="swauth_area_load()">Cancel</td><td id="add_account_save" onclick="add_account_save()">Add</td></tr></table>';
|
||||
bounds = get_bounds(document.getElementById('add_account'));
|
||||
account_area.style.position = 'absolute';
|
||||
account_area.style.top = bounds.top;
|
||||
account_area.style.left = bounds.left + bounds.width;
|
||||
document.getElementById("add_account_name").focus();
|
||||
}
|
||||
|
||||
function add_account_save() {
|
||||
request = new XMLHttpRequest();
|
||||
request.onreadystatechange = add_account_save2;
|
||||
request.open('PUT', '/auth/v2/' + document.getElementById('add_account_name').value, true);
|
||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
||||
request.setRequestHeader('X-Account-Suffix', document.getElementById('add_account_suffix').value);
|
||||
request.send();
|
||||
}
|
||||
|
||||
function add_account_save2() {
|
||||
if (request.readyState == 4) {
|
||||
if (request.status >= 200 && request.status <= 299) {
|
||||
swauth_area_load();
|
||||
} else {
|
||||
alert('Server returned status: ' + request.status + ' ' + request.statusText);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function account_area_load(account_index) {
|
||||
account_area_reset();
|
||||
account_element = document.getElementById('account_' + account_index);
|
||||
account_element.style.background = swauth_area_selected_background;
|
||||
account_selection = account_index;
|
||||
account = account_element.innerHTML;
|
||||
request = new XMLHttpRequest();
|
||||
request.onreadystatechange = account_area_load2;
|
||||
request.open('GET', '/auth/v2/' + account, true);
|
||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
||||
request.send();
|
||||
}
|
||||
|
||||
function account_area_load2() {
|
||||
account_area = document.getElementById('account_area');
|
||||
if (request.readyState == 4) {
|
||||
if (request.status >= 200 && request.status <= 299) {
|
||||
data = JSON.parse(request.responseText);
|
||||
content = '<div id="delete_account" onclick="delete_account()">Delete</div><table><tr><td>Account Id</td><td>' + data.account_id + '</td></tr></table><table><tr><td>Services</td></tr>';
|
||||
services = [];
|
||||
for (service in data.services) {
|
||||
services.push(service);
|
||||
}
|
||||
services.sort();
|
||||
for (ix = 0; ix < services.length; ix++) {
|
||||
content += '<tr><td class="service">' + services[ix] + '</td><td class="service_detail"><table>';
|
||||
if (data.services[services[ix]]['default']) {
|
||||
content += '<tr><td>default</td><td><span id="d-' + services[ix] + '" class="shadow_delement" onclick="shadow_edit(this)">' + data.services[services[ix]]['default'] + '</span><input id="f-' + services[ix] + '" class="shadow_felement" type="text" size="40" onkeypress="return shadow_submitter(this, event, endpoint_save)" onkeydown="return shadow_escaper(this, event)" /></td></tr>';
|
||||
}
|
||||
endpoints = [];
|
||||
for (name in data.services[services[ix]]) {
|
||||
if (name != 'default') {
|
||||
endpoints.push(name);
|
||||
}
|
||||
}
|
||||
endpoints.sort();
|
||||
for (iy = 0; iy < endpoints.length; iy++) {
|
||||
content += '<tr><td>' + endpoints[iy] + '</td><td><span id="d' + iy + '-' + services[ix] + '" class="shadow_delement" onclick="shadow_edit(this)">' + data.services[services[ix]][endpoints[iy]] + '</span><input id="f' + iy + '-' + services[ix] + '" class="shadow_felement" type="text" size="40" onkeypress="return shadow_submitter(this, event, endpoint_save)" onkeydown="return shadow_escaper(this, event)" /></td></tr>';
|
||||
}
|
||||
content += '</table></td></tr>';
|
||||
}
|
||||
content += '</table><table><tr><td>Users <span id="add_user" onclick="add_user()">Add</span></td></tr>';
|
||||
for (ix = 0; ix < data.users.length; ix++) {
|
||||
content += '<tr><td id="user_' + ix + '" onclick="user_area_load(' + ix + ')" class="user">' + data.users[ix].name + '</td></tr>';
|
||||
}
|
||||
content += '</table>';
|
||||
account_area.innerHTML = content;
|
||||
} else {
|
||||
account_area.innerHTML = 'Server returned status: ' + request.status + ' ' + request.statusText;
|
||||
}
|
||||
bounds = get_bounds(document.getElementById('account_' + account_selection));
|
||||
account_area.style.position = 'absolute';
|
||||
account_area.style.top = bounds.top;
|
||||
account_area.style.left = bounds.left + bounds.width;
|
||||
}
|
||||
}
|
||||
|
||||
function endpoint_save(field) {
|
||||
service = field.id.substring(field.id.indexOf('-') + 1)
|
||||
index = field.id.substring(1, field.id.indexOf('-'))
|
||||
if (index) {
|
||||
endpoint = endpoints[index];
|
||||
} else {
|
||||
endpoint = 'default';
|
||||
}
|
||||
services = {};
|
||||
services[service] = {};
|
||||
services[service][endpoint] = field.value;
|
||||
request = new XMLHttpRequest();
|
||||
request.onreadystatechange = endpoint_save2;
|
||||
request.open('POST', '/auth/v2/' + account + '/.services', true);
|
||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
||||
request.send(JSON.stringify(services));
|
||||
}
|
||||
|
||||
function endpoint_save2() {
|
||||
if (request.readyState == 4) {
|
||||
if (request.status >= 200 && request.status <= 299) {
|
||||
account_area_load(account_selection);
|
||||
} else {
|
||||
alert('Server returned status: ' + request.status + ' ' + request.statusText);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function add_user() {
|
||||
user_area_reset();
|
||||
document.getElementById('add_user').style.background = account_area_selected_background;
|
||||
user_area = document.getElementById('user_area');
|
||||
user_area.innerHTML = '<table><tr><td id="add_user_title" colspan="2">New User</td></tr><tr><td class="add_user_heading">Name</td><td><input id="add_user_name" type="text" size="20" /></td></tr><tr><td class="add_user_heading">Auth Key</td><td><input id="add_user_key" type="password" size="20" /></td></tr><tr><td class="add_user_heading">Account Admin</td><td><input id="add_user_admin" type="checkbox" /></td></tr><tr><td class="add_user_heading">Reseller Admin</td><td><input id="add_user_reseller_admin" type="checkbox" /></td></tr><tr><td id="add_user_cancel" onclick="add_user_cancel()">Cancel</td><td id="add_user_save" onclick="add_user_save()">Add</td></tr></table>';
|
||||
bounds = get_bounds(document.getElementById('add_user'));
|
||||
user_area.style.position = 'absolute';
|
||||
user_area.style.top = bounds.top;
|
||||
user_area.style.left = bounds.left + bounds.width;
|
||||
document.getElementById("add_user_name").focus();
|
||||
}
|
||||
|
||||
function add_user_cancel() {
|
||||
document.getElementById('add_user').style.background = 'none';
|
||||
document.getElementById('user_area').innerHTML = '';
|
||||
}
|
||||
|
||||
function add_user_save() {
|
||||
request = new XMLHttpRequest();
|
||||
request.onreadystatechange = add_user_save2;
|
||||
request.open('PUT', '/auth/v2/' + account + '/' + document.getElementById('add_user_name').value, true);
|
||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
||||
request.setRequestHeader('X-Auth-User-Key', document.getElementById('add_user_key').value);
|
||||
if (document.getElementById('add_user_admin').value) {
|
||||
request.setRequestHeader('X-Auth-User-Admin', 'true');
|
||||
}
|
||||
if (document.getElementById('add_user_reseller_admin').value) {
|
||||
request.setRequestHeader('X-Auth-User-Reseller-Admin', 'true');
|
||||
}
|
||||
request.send();
|
||||
}
|
||||
|
||||
function add_user_save2() {
|
||||
if (request.readyState == 4) {
|
||||
if (request.status >= 200 && request.status <= 299) {
|
||||
account_area_load(account_selection);
|
||||
} else {
|
||||
alert('Server returned status: ' + request.status + ' ' + request.statusText);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function delete_account() {
|
||||
request = new XMLHttpRequest();
|
||||
request.onreadystatechange = delete_account2;
|
||||
request.open('DELETE', '/auth/v2/' + account, true);
|
||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
||||
request.send();
|
||||
}
|
||||
|
||||
function delete_account2() {
|
||||
if (request.readyState == 4) {
|
||||
if (request.status >= 200 && request.status <= 299) {
|
||||
swauth_area_load();
|
||||
} else {
|
||||
alert('Server returned status: ' + request.status + ' ' + request.statusText);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function user_area_load(account_area_user_index) {
|
||||
user_area_reset();
|
||||
user_element = document.getElementById('user_' + account_area_user_index);
|
||||
user_element.style.background = account_area_selected_background;
|
||||
user_selection = account_area_user_index;
|
||||
user = user_element.innerHTML;
|
||||
request = new XMLHttpRequest();
|
||||
request.onreadystatechange = user_area_load2;
|
||||
request.open('GET', '/auth/v2/' + account + '/' + user, true);
|
||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
||||
request.send();
|
||||
}
|
||||
|
||||
function user_area_load2() {
|
||||
user_area = document.getElementById('user_area');
|
||||
if (request.readyState == 4) {
|
||||
if (request.status >= 200 && request.status <= 299) {
|
||||
data = JSON.parse(request.responseText);
|
||||
content = '<div id="delete_user" onclick="delete_user()">Delete</div><table><tr><td>Auth</td><td><span id="auth_toggler" onclick="auth_toggle()">Show</span> <span id="auth_view">' + data.auth + '</span></td><td><input id="auth_update_field" type="password" size="20" onkeypress="return auth_submitter(event)" onkeydown="return auth_escaper(event)" /> <span id="auth_update" onclick="auth_update()">Update</span></td></tr></table><table><tr><td>Groups</td></tr>';
|
||||
groups = [];
|
||||
for (ix = 0; ix < data.groups.length; ix++) {
|
||||
groups.push(data.groups[ix].name);
|
||||
}
|
||||
groups.sort();
|
||||
for (ix = 0; ix < groups.length; ix++) {
|
||||
content += '<tr><td class="group">' + groups[ix] + '</td></tr>';
|
||||
}
|
||||
content += '</table>';
|
||||
user_area.innerHTML = content;
|
||||
} else {
|
||||
user_area.innerHTML = 'Server returned status: ' + request.status + ' ' + request.statusText;
|
||||
}
|
||||
bounds = get_bounds(document.getElementById('user_' + user_selection));
|
||||
user_area.style.position = 'absolute';
|
||||
user_area.style.top = bounds.top;
|
||||
user_area.style.left = bounds.left + bounds.width;
|
||||
}
|
||||
}
|
||||
|
||||
function delete_user() {
|
||||
request = new XMLHttpRequest();
|
||||
request.onreadystatechange = delete_user2;
|
||||
request.open('DELETE', '/auth/v2/' + account + '/' + user, true);
|
||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
||||
request.send();
|
||||
}
|
||||
|
||||
function delete_user2() {
|
||||
if (request.readyState == 4) {
|
||||
if (request.status >= 200 && request.status <= 299) {
|
||||
account_area_load(account_selection);
|
||||
} else {
|
||||
alert('Server returned status: ' + request.status + ' ' + request.statusText);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function auth_toggle() {
|
||||
to_toggle = document.getElementById('auth_view');
|
||||
toggler = document.getElementById('auth_toggler');
|
||||
if (to_toggle.style.display && to_toggle.style.display != 'none') {
|
||||
toggler.innerHTML = 'Show';
|
||||
to_toggle.style.display = 'none';
|
||||
} else {
|
||||
toggler.innerHTML = 'Hide';
|
||||
to_toggle.style.display = 'inline';
|
||||
}
|
||||
}
|
||||
|
||||
function auth_update() {
|
||||
field = document.getElementById('auth_update_field');
|
||||
trigger = document.getElementById('auth_update');
|
||||
if (field.style.display && field.style.display != 'none') {
|
||||
auth_save();
|
||||
} else {
|
||||
field.style.display = 'inline';
|
||||
trigger.style.display = 'none';
|
||||
field.focus();
|
||||
}
|
||||
}
|
||||
|
||||
function auth_submitter(e) {
|
||||
keycode = 0;
|
||||
if (window.event) {
|
||||
keycode = window.event.keyCode;
|
||||
} else if (e) {
|
||||
keycode = e.which;
|
||||
}
|
||||
if (keycode == 13) {
|
||||
auth_save();
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function auth_escaper(e) {
|
||||
keycode = 0;
|
||||
if (window.event) {
|
||||
keycode = window.event.keyCode;
|
||||
} else if (e) {
|
||||
keycode = e.which;
|
||||
}
|
||||
if (keycode == 27) {
|
||||
field = document.getElementById('auth_update_field');
|
||||
field.value = '';
|
||||
field.style.display ='none';
|
||||
document.getElementById('auth_update').style.display ='inline';
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function auth_save() {
|
||||
document.getElementById('auth_update_field').style.display ='none';
|
||||
if (document.getElementById('auth_update_field').value) {
|
||||
request = new XMLHttpRequest();
|
||||
request.onreadystatechange = auth_save2;
|
||||
request.open('GET', '/auth/v2/' + account + '/' + user, true);
|
||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
||||
request.send();
|
||||
}
|
||||
}
|
||||
|
||||
function auth_save2() {
|
||||
if (request.readyState == 4) {
|
||||
if (request.status >= 200 && request.status <= 299) {
|
||||
data = JSON.parse(request.responseText);
|
||||
request = new XMLHttpRequest();
|
||||
request.onreadystatechange = auth_save3;
|
||||
request.open('PUT', '/auth/v2/' + account_element.innerHTML + '/' + user_element.innerHTML, true);
|
||||
request.setRequestHeader('X-Auth-Admin-User', creds_user);
|
||||
request.setRequestHeader('X-Auth-Admin-Key', creds_key);
|
||||
request.setRequestHeader('X-Auth-User-Key', document.getElementById('auth_update_field').value);
|
||||
admin = false;
|
||||
reseller_admin = false;
|
||||
for (ix = 0; ix < data.groups.length; ix++) {
|
||||
if (data.groups[ix].name == '.admin') {
|
||||
admin = true;
|
||||
} else if (data.groups[ix].name == '.reseller_admin') {
|
||||
reseller_admin = true;
|
||||
}
|
||||
}
|
||||
if (admin) {
|
||||
request.setRequestHeader('X-Auth-User-Admin', 'true');
|
||||
}
|
||||
if (reseller_admin) {
|
||||
request.setRequestHeader('X-Auth-User-Reseller-Admin', 'true');
|
||||
}
|
||||
request.send();
|
||||
} else {
|
||||
alert('Server returned status: ' + request.status + ' ' + request.statusText);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function auth_save3() {
|
||||
if (request.readyState == 4) {
|
||||
if (request.status >= 200 && request.status <= 299) {
|
||||
user_area_load(user_selection);
|
||||
} else {
|
||||
alert('Server returned status: ' + request.status + ' ' + request.statusText);
|
||||
}
|
||||
}
|
||||
}
|
||||
</script>
|
||||
</head>
|
||||
<body onload="creds_clicked()">
|
||||
<form onsubmit="return false">
|
||||
<div id="creds_area"></div>
|
||||
<div id="swauth">Swauth</div>
|
||||
<div id="swauth_area"></div>
|
||||
<div id="account_area"></div>
|
||||
<div id="user_area"></div>
|
||||
</form>
|
||||
</body>
|
||||
</html>
|
@ -1,38 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from swift.common.utils import readconf, config_true_value
|
||||
|
||||
config_file = {}
|
||||
try:
|
||||
config_file = readconf("/etc/swift/proxy-server.conf",
|
||||
section_name="filter:cache")
|
||||
except SystemExit:
|
||||
pass
|
||||
|
||||
MEMCACHE_SERVERS = config_file.get('memcache_servers', None)
|
||||
|
||||
config_file = {}
|
||||
|
||||
try:
|
||||
config_file = readconf("/etc/swift/proxy-server.conf",
|
||||
section_name="filter:kerbauth")
|
||||
except SystemExit:
|
||||
pass
|
||||
|
||||
TOKEN_LIFE = int(config_file.get('token_life', 86400))
|
||||
RESELLER_PREFIX = config_file.get('reseller_prefix', "AUTH_")
|
||||
DEBUG_HEADERS = config_true_value(config_file.get('debug_headers', 'yes'))
|
@ -1,12 +0,0 @@
|
||||
<Location /cgi-bin/swift-auth>
|
||||
AuthType Kerberos
|
||||
AuthName "Swift Authentication"
|
||||
KrbMethodNegotiate On
|
||||
KrbMethodK5Passwd On
|
||||
KrbSaveCredentials On
|
||||
KrbServiceName HTTP/client.example.com
|
||||
KrbAuthRealms EXAMPLE.COM
|
||||
Krb5KeyTab /etc/httpd/conf/http.keytab
|
||||
KrbVerifyKDC Off
|
||||
Require valid-user
|
||||
</Location>
|
@ -1,70 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Requires the following command to be run:
|
||||
# setsebool -P httpd_can_network_connect 1
|
||||
# setsebool -P httpd_can_network_memcache 1
|
||||
|
||||
import os
|
||||
import cgi
|
||||
from swift.common.memcached import MemcacheRing
|
||||
from time import time, ctime
|
||||
from swiftkerbauth import MEMCACHE_SERVERS, TOKEN_LIFE, DEBUG_HEADERS
|
||||
from swiftkerbauth.kerbauth_utils import get_remote_user, get_auth_data, \
|
||||
generate_token, set_auth_data, get_groups_from_username
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
username = get_remote_user(os.environ)
|
||||
except RuntimeError:
|
||||
print "Status: 401 Unauthorized\n"
|
||||
print "Malformed REMOTE_USER"
|
||||
return
|
||||
|
||||
if not MEMCACHE_SERVERS:
|
||||
print "Status: 500 Internal Server Error\n"
|
||||
print "Memcache not configured in /etc/swift/proxy-server.conf"
|
||||
return
|
||||
|
||||
mc_servers = [s.strip() for s in MEMCACHE_SERVERS.split(',') if s.strip()]
|
||||
mc = MemcacheRing(mc_servers)
|
||||
|
||||
token, expires, groups = get_auth_data(mc, username)
|
||||
|
||||
if not token:
|
||||
token = generate_token()
|
||||
expires = time() + TOKEN_LIFE
|
||||
groups = get_groups_from_username(username)
|
||||
set_auth_data(mc, username, token, expires, groups)
|
||||
|
||||
print "X-Auth-Token: %s" % token
|
||||
print "X-Storage-Token: %s" % token
|
||||
|
||||
# For debugging.
|
||||
if DEBUG_HEADERS:
|
||||
print "X-Debug-Remote-User: %s" % username
|
||||
print "X-Debug-Groups: %s" % groups
|
||||
print "X-Debug-Token-Life: %ss" % TOKEN_LIFE
|
||||
print "X-Debug-Token-Expires: %s" % ctime(expires)
|
||||
|
||||
print ""
|
||||
|
||||
try:
|
||||
print("Content-Type: text/html")
|
||||
main()
|
||||
except:
|
||||
cgi.print_exception()
|
@ -1,463 +0,0 @@
|
||||
# Copyright (c) 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import errno
|
||||
from time import time, ctime
|
||||
from traceback import format_exc
|
||||
from eventlet import Timeout
|
||||
from urllib import unquote
|
||||
|
||||
from swift.common.swob import Request, Response
|
||||
from swift.common.swob import HTTPBadRequest, HTTPForbidden, HTTPNotFound, \
|
||||
HTTPSeeOther, HTTPUnauthorized, HTTPServerError
|
||||
|
||||
from swift.common.middleware.acl import clean_acl, parse_acl, referrer_allowed
|
||||
from swift.common.utils import cache_from_env, get_logger, \
|
||||
split_path, config_true_value
|
||||
from gluster.swift.common.middleware.swiftkerbauth.kerbauth_utils import \
|
||||
get_auth_data, generate_token, \
|
||||
set_auth_data, run_kinit, get_groups_from_username
|
||||
|
||||
|
||||
class KerbAuth(object):
|
||||
"""
|
||||
Test authentication and authorization system.
|
||||
|
||||
Add to your pipeline in proxy-server.conf, such as::
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors cache kerbauth proxy-server
|
||||
|
||||
Set account auto creation to true in proxy-server.conf::
|
||||
|
||||
[app:proxy-server]
|
||||
account_autocreate = true
|
||||
|
||||
And add a kerbauth filter section, such as::
|
||||
|
||||
[filter:kerbauth]
|
||||
use = egg:swiftkerbauth#kerbauth
|
||||
|
||||
See the proxy-server.conf-sample for more information.
|
||||
|
||||
:param app: The next WSGI app in the pipeline
|
||||
:param conf: The dict of configuration values
|
||||
"""
|
||||
|
||||
def __init__(self, app, conf):
|
||||
self.app = app
|
||||
self.conf = conf
|
||||
self.logger = get_logger(conf, log_route='kerbauth')
|
||||
self.log_headers = config_true_value(conf.get('log_headers', 'f'))
|
||||
self.reseller_prefix = conf.get('reseller_prefix', 'AUTH').strip()
|
||||
if self.reseller_prefix and self.reseller_prefix[-1] != '_':
|
||||
self.reseller_prefix += '_'
|
||||
self.logger.set_statsd_prefix('kerbauth.%s' % (
|
||||
self.reseller_prefix if self.reseller_prefix else 'NONE',))
|
||||
self.auth_prefix = conf.get('auth_prefix', '/auth/')
|
||||
if not self.auth_prefix or not self.auth_prefix.strip('/'):
|
||||
self.logger.warning('Rewriting invalid auth prefix "%s" to '
|
||||
'"/auth/" (Non-empty auth prefix path '
|
||||
'is required)' % self.auth_prefix)
|
||||
self.auth_prefix = '/auth/'
|
||||
if self.auth_prefix[0] != '/':
|
||||
self.auth_prefix = '/' + self.auth_prefix
|
||||
if self.auth_prefix[-1] != '/':
|
||||
self.auth_prefix += '/'
|
||||
self.token_life = int(conf.get('token_life', 86400))
|
||||
self.auth_method = conf.get('auth_method', 'passive')
|
||||
self.debug_headers = config_true_value(
|
||||
conf.get('debug_headers', 'yes'))
|
||||
self.realm_name = conf.get('realm_name', None)
|
||||
self.allow_overrides = config_true_value(
|
||||
conf.get('allow_overrides', 't'))
|
||||
self.storage_url_scheme = conf.get('storage_url_scheme', 'default')
|
||||
self.ext_authentication_url = conf.get('ext_authentication_url')
|
||||
if not self.ext_authentication_url:
|
||||
raise RuntimeError("Missing filter parameter ext_authentication_"
|
||||
"url in /etc/swift/proxy-server.conf")
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
"""
|
||||
Accepts a standard WSGI application call, authenticating the request
|
||||
and installing callback hooks for authorization and ACL header
|
||||
validation. For an authenticated request, REMOTE_USER will be set to a
|
||||
comma separated list of the user's groups.
|
||||
|
||||
If the request matches the self.auth_prefix, the request will be
|
||||
routed through the internal auth request handler (self.handle).
|
||||
This is to handle granting tokens, etc.
|
||||
"""
|
||||
if self.allow_overrides and env.get('swift.authorize_override', False):
|
||||
return self.app(env, start_response)
|
||||
if env.get('PATH_INFO', '').startswith(self.auth_prefix):
|
||||
return self.handle(env, start_response)
|
||||
token = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN'))
|
||||
if token and token.startswith(self.reseller_prefix):
|
||||
groups = self.get_groups(env, token)
|
||||
if groups:
|
||||
user = groups and groups.split(',', 1)[0] or ''
|
||||
trans_id = env.get('swift.trans_id')
|
||||
self.logger.debug('User: %s uses token %s (trans_id %s)' %
|
||||
(user, token, trans_id))
|
||||
env['REMOTE_USER'] = groups
|
||||
env['swift.authorize'] = self.authorize
|
||||
env['swift.clean_acl'] = clean_acl
|
||||
if '.reseller_admin' in groups:
|
||||
env['reseller_request'] = True
|
||||
else:
|
||||
# Invalid token (may be expired)
|
||||
if self.auth_method == "active":
|
||||
return HTTPSeeOther(
|
||||
location=self.ext_authentication_url)(env,
|
||||
start_response)
|
||||
elif self.auth_method == "passive":
|
||||
self.logger.increment('unauthorized')
|
||||
return HTTPUnauthorized()(env, start_response)
|
||||
else:
|
||||
# With a non-empty reseller_prefix, I would like to be called
|
||||
# back for anonymous access to accounts I know I'm the
|
||||
# definitive auth for.
|
||||
try:
|
||||
version, rest = split_path(env.get('PATH_INFO', ''),
|
||||
1, 2, True)
|
||||
except ValueError:
|
||||
version, rest = None, None
|
||||
self.logger.increment('errors')
|
||||
# Not my token, not my account, I can't authorize this request,
|
||||
# deny all is a good idea if not already set...
|
||||
if 'swift.authorize' not in env:
|
||||
env['swift.authorize'] = self.denied_response
|
||||
|
||||
return self.app(env, start_response)
|
||||
|
||||
def get_groups(self, env, token):
|
||||
"""
|
||||
Get groups for the given token.
|
||||
|
||||
:param env: The current WSGI environment dictionary.
|
||||
:param token: Token to validate and return a group string for.
|
||||
|
||||
:returns: None if the token is invalid or a string containing a comma
|
||||
separated list of groups the authenticated user is a member
|
||||
of. The first group in the list is also considered a unique
|
||||
identifier for that user.
|
||||
"""
|
||||
groups = None
|
||||
memcache_client = cache_from_env(env)
|
||||
if not memcache_client:
|
||||
raise Exception('Memcache required')
|
||||
memcache_token_key = '%s/token/%s' % (self.reseller_prefix, token)
|
||||
cached_auth_data = memcache_client.get(memcache_token_key)
|
||||
if cached_auth_data:
|
||||
expires, groups = cached_auth_data
|
||||
if expires < time():
|
||||
groups = None
|
||||
|
||||
return groups
|
||||
|
||||
def authorize(self, req):
|
||||
"""
|
||||
Returns None if the request is authorized to continue or a standard
|
||||
WSGI response callable if not.
|
||||
|
||||
Assumes that user groups are all lower case, which is true when Red Hat
|
||||
Enterprise Linux Identity Management is used.
|
||||
"""
|
||||
try:
|
||||
version, account, container, obj = req.split_path(1, 4, True)
|
||||
except ValueError:
|
||||
self.logger.increment('errors')
|
||||
return HTTPNotFound(request=req)
|
||||
|
||||
if not account or not account.startswith(self.reseller_prefix):
|
||||
self.logger.debug("Account name: %s doesn't start with "
|
||||
"reseller_prefix: %s."
|
||||
% (account, self.reseller_prefix))
|
||||
return self.denied_response(req)
|
||||
|
||||
user_groups = (req.remote_user or '').split(',')
|
||||
account_user = user_groups[1] if len(user_groups) > 1 else None
|
||||
# If the user is in the reseller_admin group for our prefix, he gets
|
||||
# full access to all accounts we manage. For the default reseller
|
||||
# prefix, the group name is auth_reseller_admin.
|
||||
admin_group = ("%sreseller_admin" % self.reseller_prefix).lower()
|
||||
if admin_group in user_groups and \
|
||||
account != self.reseller_prefix and \
|
||||
account[len(self.reseller_prefix)] != '.':
|
||||
req.environ['swift_owner'] = True
|
||||
return None
|
||||
|
||||
# The "account" is part of the request URL, and already contains the
|
||||
# reseller prefix, like in "/v1/AUTH_vol1/pictures/pic1.png".
|
||||
if account.lower() in user_groups and \
|
||||
(req.method not in ('DELETE', 'PUT') or container):
|
||||
# If the user is admin for the account and is not trying to do an
|
||||
# account DELETE or PUT...
|
||||
req.environ['swift_owner'] = True
|
||||
self.logger.debug("User %s has admin authorizing."
|
||||
% account_user)
|
||||
return None
|
||||
|
||||
if (req.environ.get('swift_sync_key')
|
||||
and (req.environ['swift_sync_key'] ==
|
||||
req.headers.get('x-container-sync-key', None))
|
||||
and 'x-timestamp' in req.headers):
|
||||
self.logger.debug("Allow request with container sync-key: %s."
|
||||
% req.environ['swift_sync_key'])
|
||||
return None
|
||||
|
||||
if req.method == 'OPTIONS':
|
||||
#allow OPTIONS requests to proceed as normal
|
||||
self.logger.debug("Allow OPTIONS request.")
|
||||
return None
|
||||
|
||||
referrers, groups = parse_acl(getattr(req, 'acl', None))
|
||||
|
||||
if referrer_allowed(req.referer, referrers):
|
||||
if obj or '.rlistings' in groups:
|
||||
self.logger.debug("Allow authorizing %s via referer ACL."
|
||||
% req.referer)
|
||||
return None
|
||||
|
||||
for user_group in user_groups:
|
||||
if user_group in groups:
|
||||
self.logger.debug("User %s allowed in ACL: %s authorizing."
|
||||
% (account_user, user_group))
|
||||
return None
|
||||
|
||||
return self.denied_response(req)
|
||||
|
||||
def denied_response(self, req):
|
||||
"""
|
||||
Returns a standard WSGI response callable with the status of 403 or 401
|
||||
depending on whether the REMOTE_USER is set or not.
|
||||
"""
|
||||
if req.remote_user:
|
||||
self.logger.increment('forbidden')
|
||||
return HTTPForbidden(request=req)
|
||||
else:
|
||||
if self.auth_method == "active":
|
||||
return HTTPSeeOther(location=self.ext_authentication_url)
|
||||
elif self.auth_method == "passive":
|
||||
self.logger.increment('unauthorized')
|
||||
return HTTPUnauthorized(request=req)
|
||||
|
||||
def handle(self, env, start_response):
|
||||
"""
|
||||
WSGI entry point for auth requests (ones that match the
|
||||
self.auth_prefix).
|
||||
Wraps env in swob.Request object and passes it down.
|
||||
|
||||
:param env: WSGI environment dictionary
|
||||
:param start_response: WSGI callable
|
||||
"""
|
||||
try:
|
||||
req = Request(env)
|
||||
if self.auth_prefix:
|
||||
req.path_info_pop()
|
||||
req.bytes_transferred = '-'
|
||||
req.client_disconnect = False
|
||||
if 'x-storage-token' in req.headers and \
|
||||
'x-auth-token' not in req.headers:
|
||||
req.headers['x-auth-token'] = req.headers['x-storage-token']
|
||||
return self.handle_request(req)(env, start_response)
|
||||
except (Exception, Timeout):
|
||||
print "EXCEPTION IN handle: %s: %s" % (format_exc(), env)
|
||||
self.logger.increment('errors')
|
||||
start_response('500 Server Error',
|
||||
[('Content-Type', 'text/plain')])
|
||||
return ['Internal server error.\n']
|
||||
|
||||
def handle_request(self, req):
|
||||
"""
|
||||
Entry point for auth requests (ones that match the self.auth_prefix).
|
||||
Should return a WSGI-style callable (such as webob.Response).
|
||||
|
||||
:param req: swob.Request object
|
||||
"""
|
||||
req.start_time = time()
|
||||
handler = None
|
||||
try:
|
||||
version, account, user, _junk = req.split_path(1, 4, True)
|
||||
except ValueError:
|
||||
self.logger.increment('errors')
|
||||
return HTTPNotFound(request=req)
|
||||
if version in ('v1', 'v1.0', 'auth'):
|
||||
if req.method == 'GET':
|
||||
handler = self.handle_get_token
|
||||
if not handler:
|
||||
self.logger.increment('errors')
|
||||
req.response = HTTPBadRequest(request=req)
|
||||
else:
|
||||
req.response = handler(req)
|
||||
return req.response
|
||||
|
||||
def handle_get_token(self, req):
|
||||
"""
|
||||
Handles the various `request for token and service end point(s)` calls.
|
||||
There are various formats to support the various auth servers in the
|
||||
past.
|
||||
|
||||
"Active Mode" usage:
|
||||
All formats require GSS (Kerberos) authentication.
|
||||
|
||||
GET <auth-prefix>/v1/<act>/auth
|
||||
GET <auth-prefix>/auth
|
||||
GET <auth-prefix>/v1.0
|
||||
|
||||
On successful authentication, the response will have X-Auth-Token
|
||||
and X-Storage-Token set to the token to use with Swift.
|
||||
|
||||
"Passive Mode" usage::
|
||||
|
||||
GET <auth-prefix>/v1/<act>/auth
|
||||
X-Auth-User: <act>:<usr> or X-Storage-User: <usr>
|
||||
X-Auth-Key: <key> or X-Storage-Pass: <key>
|
||||
GET <auth-prefix>/auth
|
||||
X-Auth-User: <act>:<usr> or X-Storage-User: <act>:<usr>
|
||||
X-Auth-Key: <key> or X-Storage-Pass: <key>
|
||||
GET <auth-prefix>/v1.0
|
||||
X-Auth-User: <act>:<usr> or X-Storage-User: <act>:<usr>
|
||||
X-Auth-Key: <key> or X-Storage-Pass: <key>
|
||||
|
||||
Values should be url encoded, "act%3Ausr" instead of "act:usr" for
|
||||
example; however, for backwards compatibility the colon may be
|
||||
included unencoded.
|
||||
|
||||
On successful authentication, the response will have X-Auth-Token
|
||||
and X-Storage-Token set to the token to use with Swift and
|
||||
X-Storage-URL set to the URL to the default Swift cluster to use.
|
||||
|
||||
:param req: The swob.Request to process.
|
||||
:returns: swob.Response, 2xx on success with data set as explained
|
||||
above.
|
||||
"""
|
||||
# Validate the request info
|
||||
try:
|
||||
pathsegs = split_path(req.path_info, 1, 3, True)
|
||||
except ValueError:
|
||||
self.logger.increment('errors')
|
||||
return HTTPNotFound(request=req)
|
||||
if not ((pathsegs[0] == 'v1' and pathsegs[2] == 'auth')
|
||||
or pathsegs[0] in ('auth', 'v1.0')):
|
||||
return HTTPBadRequest(request=req)
|
||||
|
||||
# Client is inside the domain
|
||||
if self.auth_method == "active":
|
||||
return HTTPSeeOther(location=self.ext_authentication_url)
|
||||
|
||||
# Client is outside the domain
|
||||
elif self.auth_method == "passive":
|
||||
account, user, key = None, None, None
|
||||
# Extract user, account and key from request
|
||||
if pathsegs[0] == 'v1' and pathsegs[2] == 'auth':
|
||||
account = pathsegs[1]
|
||||
user = req.headers.get('x-storage-user')
|
||||
if not user:
|
||||
user = unquote(req.headers.get('x-auth-user', ''))
|
||||
if user:
|
||||
if ':' not in user:
|
||||
return HTTPUnauthorized(request=req)
|
||||
else:
|
||||
account2, user = user.split(':', 1)
|
||||
if account != account2:
|
||||
return HTTPUnauthorized(request=req)
|
||||
key = req.headers.get('x-storage-pass')
|
||||
if not key:
|
||||
key = unquote(req.headers.get('x-auth-key', ''))
|
||||
elif pathsegs[0] in ('auth', 'v1.0'):
|
||||
user = unquote(req.headers.get('x-auth-user', ''))
|
||||
if not user:
|
||||
user = req.headers.get('x-storage-user')
|
||||
if user:
|
||||
if ':' not in user:
|
||||
return HTTPUnauthorized(request=req)
|
||||
else:
|
||||
account, user = user.split(':', 1)
|
||||
key = unquote(req.headers.get('x-auth-key', ''))
|
||||
if not key:
|
||||
key = req.headers.get('x-storage-pass')
|
||||
|
||||
if not (account or user or key):
|
||||
# If all are not given, client may be part of the domain
|
||||
return HTTPSeeOther(location=self.ext_authentication_url)
|
||||
elif None in (key, user, account):
|
||||
# If only one or two of them is given, but not all
|
||||
return HTTPUnauthorized(request=req)
|
||||
|
||||
# Run kinit on the user
|
||||
if self.realm_name and "@" not in user:
|
||||
user = user + "@" + self.realm_name
|
||||
try:
|
||||
ret = run_kinit(user, key)
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
return HTTPServerError("kinit command not found\n")
|
||||
if ret != 0:
|
||||
self.logger.warning("Failed: kinit %s", user)
|
||||
if ret == -1:
|
||||
self.logger.warning("Failed: kinit: Password has probably "
|
||||
"expired.")
|
||||
return HTTPServerError("Kinit is taking too long.\n")
|
||||
return HTTPUnauthorized(request=req)
|
||||
self.logger.debug("kinit succeeded")
|
||||
|
||||
if "@" in user:
|
||||
user = user.split("@")[0]
|
||||
|
||||
# Check if user really belongs to the account
|
||||
groups_list = get_groups_from_username(user).strip().split(",")
|
||||
user_group = ("%s%s" % (self.reseller_prefix, account)).lower()
|
||||
reseller_admin_group = \
|
||||
("%sreseller_admin" % self.reseller_prefix).lower()
|
||||
if user_group not in groups_list:
|
||||
# Check if user is reseller_admin. If not, return Unauthorized.
|
||||
# On AD/IdM server, auth_reseller_admin is a separate group
|
||||
if reseller_admin_group not in groups_list:
|
||||
return HTTPUnauthorized(request=req)
|
||||
|
||||
mc = cache_from_env(req.environ)
|
||||
if not mc:
|
||||
raise Exception('Memcache required')
|
||||
token, expires, groups = get_auth_data(mc, user)
|
||||
if not token:
|
||||
token = generate_token()
|
||||
expires = time() + self.token_life
|
||||
groups = get_groups_from_username(user)
|
||||
set_auth_data(mc, user, token, expires, groups)
|
||||
|
||||
headers = {'X-Auth-Token': token,
|
||||
'X-Storage-Token': token}
|
||||
|
||||
if self.debug_headers:
|
||||
headers.update({'X-Debug-Remote-User': user,
|
||||
'X-Debug-Groups:': groups,
|
||||
'X-Debug-Token-Life': self.token_life,
|
||||
'X-Debug-Token-Expires': ctime(expires)})
|
||||
|
||||
resp = Response(request=req, headers=headers)
|
||||
resp.headers['X-Storage-Url'] = \
|
||||
'%s/v1/%s%s' % (resp.host_url, self.reseller_prefix, account)
|
||||
return resp
|
||||
|
||||
|
||||
def filter_factory(global_conf, **local_conf):
|
||||
"""Returns a WSGI filter app for use with paste.deploy."""
|
||||
conf = global_conf.copy()
|
||||
conf.update(local_conf)
|
||||
|
||||
def auth_filter(app):
|
||||
return KerbAuth(app, conf)
|
||||
return auth_filter
|
@ -1,137 +0,0 @@
|
||||
# Copyright (c) 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import re
|
||||
import random
|
||||
import grp
|
||||
import signal
|
||||
from subprocess import Popen, PIPE
|
||||
from time import time
|
||||
from gluster.swift.common.middleware.swiftkerbauth \
|
||||
import TOKEN_LIFE, RESELLER_PREFIX
|
||||
|
||||
|
||||
def get_remote_user(env):
|
||||
"""Retrieve REMOTE_USER set by Apache from environment."""
|
||||
remote_user = env.get('REMOTE_USER', "")
|
||||
matches = re.match('([^@]+)@.*', remote_user)
|
||||
if not matches:
|
||||
raise RuntimeError("Malformed REMOTE_USER \"%s\"" % remote_user)
|
||||
return matches.group(1)
|
||||
|
||||
|
||||
def get_auth_data(mc, username):
|
||||
"""
|
||||
Returns the token, expiry time and groups for the user if it already exists
|
||||
on memcache. Returns None otherwise.
|
||||
|
||||
:param mc: MemcacheRing object
|
||||
:param username: swift user
|
||||
"""
|
||||
token, expires, groups = None, None, None
|
||||
memcache_user_key = '%s/user/%s' % (RESELLER_PREFIX, username)
|
||||
candidate_token = mc.get(memcache_user_key)
|
||||
if candidate_token:
|
||||
memcache_token_key = '%s/token/%s' % (RESELLER_PREFIX, candidate_token)
|
||||
cached_auth_data = mc.get(memcache_token_key)
|
||||
if cached_auth_data:
|
||||
expires, groups = cached_auth_data
|
||||
if expires > time():
|
||||
token = candidate_token
|
||||
else:
|
||||
expires, groups = None, None
|
||||
return (token, expires, groups)
|
||||
|
||||
|
||||
def set_auth_data(mc, username, token, expires, groups):
|
||||
"""
|
||||
Stores the following key value pairs on Memcache:
|
||||
(token, expires+groups)
|
||||
(user, token)
|
||||
"""
|
||||
auth_data = (expires, groups)
|
||||
memcache_token_key = "%s/token/%s" % (RESELLER_PREFIX, token)
|
||||
mc.set(memcache_token_key, auth_data, time=TOKEN_LIFE)
|
||||
|
||||
# Record the token with the user info for future use.
|
||||
memcache_user_key = '%s/user/%s' % (RESELLER_PREFIX, username)
|
||||
mc.set(memcache_user_key, token, time=TOKEN_LIFE)
|
||||
|
||||
|
||||
def generate_token():
|
||||
"""Generates a random token."""
|
||||
# We don't use uuid.uuid4() here because importing the uuid module
|
||||
# causes (harmless) SELinux denials in the audit log on RHEL 6. If this
|
||||
# is a security concern, a custom SELinux policy module could be
|
||||
# written to not log those denials.
|
||||
r = random.SystemRandom()
|
||||
token = '%stk%s' % \
|
||||
(RESELLER_PREFIX,
|
||||
''.join(r.choice('abcdef0123456789') for x in range(32)))
|
||||
return token
|
||||
|
||||
|
||||
def get_groups_from_username(username):
|
||||
"""Return a set of groups to which the user belongs to."""
|
||||
# Retrieve the numerical group IDs. We cannot list the group names
|
||||
# because group names from Active Directory may contain spaces, and
|
||||
# we wouldn't be able to split the list of group names into its
|
||||
# elements.
|
||||
p = Popen(['id', '-G', username], stdout=PIPE)
|
||||
if p.wait() != 0:
|
||||
raise RuntimeError("Failure running id -G for %s" % username)
|
||||
(p_stdout, p_stderr) = p.communicate()
|
||||
|
||||
# Convert the group numbers into group names.
|
||||
groups = []
|
||||
for gid in p_stdout.strip().split(" "):
|
||||
groups.append(grp.getgrgid(int(gid))[0])
|
||||
|
||||
# The first element of the list is considered a unique identifier
|
||||
# for the user. We add the username to accomplish this.
|
||||
if username in groups:
|
||||
groups.remove(username)
|
||||
groups = [username] + groups
|
||||
groups = ','.join(groups)
|
||||
return groups
|
||||
|
||||
|
||||
def run_kinit(username, password):
|
||||
"""Runs kinit command as a child process and returns the status code."""
|
||||
kinit = Popen(['kinit', username],
|
||||
stdin=PIPE, stdout=PIPE, stderr=PIPE)
|
||||
kinit.stdin.write('%s\n' % password)
|
||||
|
||||
# The following code handles a corner case where the Kerberos password
|
||||
# has expired and a prompt is displayed to enter new password. Ideally,
|
||||
# we would want to read from stdout but these are blocked reads. This is
|
||||
# a hack to kill the process if it's taking too long!
|
||||
|
||||
class Alarm(Exception):
|
||||
pass
|
||||
|
||||
def signal_handler(signum, frame):
|
||||
raise Alarm
|
||||
# Set the signal handler and a 1-second alarm
|
||||
signal.signal(signal.SIGALRM, signal_handler)
|
||||
signal.alarm(1)
|
||||
try:
|
||||
kinit.wait() # Wait for the child to exit
|
||||
signal.alarm(0) # Reset the alarm
|
||||
return kinit.returncode # Exit status of child on graceful exit
|
||||
except Alarm:
|
||||
# Taking too long, kill and return error
|
||||
kinit.kill()
|
||||
return -1
|
@ -1,156 +0,0 @@
|
||||
# Copyright (c) 2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import errno
|
||||
from ConfigParser import ConfigParser
|
||||
from swift.common.ring import ring
|
||||
from swift.common.utils import search_tree
|
||||
from gluster.swift.common.Glusterfs import SWIFT_DIR
|
||||
|
||||
reseller_prefix = "AUTH_"
|
||||
conf_files = search_tree(SWIFT_DIR, "proxy-server*", 'conf')
|
||||
if conf_files:
|
||||
conf_file = conf_files[0]
|
||||
|
||||
_conf = ConfigParser()
|
||||
if conf_files and _conf.read(conf_file):
|
||||
if _conf.defaults().get("reseller_prefix", None):
|
||||
reseller_prefix = _conf.defaults().get("reseller_prefix")
|
||||
else:
|
||||
for key, value in _conf._sections.items():
|
||||
if value.get("reseller_prefix", None):
|
||||
reseller_prefix = value["reseller_prefix"]
|
||||
break
|
||||
|
||||
if not reseller_prefix.endswith('_'):
|
||||
reseller_prefix = reseller_prefix + '_'
|
||||
|
||||
|
||||
class Ring(ring.Ring):
|
||||
|
||||
def __init__(self, serialized_path, reload_time=15, ring_name=None):
|
||||
self.false_node = {'zone': 1, 'weight': 100.0, 'ip': '127.0.0.1',
|
||||
'id': 0, 'meta': '', 'device': 'volume_not_in_ring',
|
||||
'port': 6012}
|
||||
self.account_list = []
|
||||
|
||||
if ring_name:
|
||||
_serialized_path = os.path.join(serialized_path,
|
||||
ring_name + '.ring.gz')
|
||||
else:
|
||||
_serialized_path = os.path.join(serialized_path)
|
||||
|
||||
if not os.path.exists(_serialized_path):
|
||||
raise OSError(errno.ENOENT, 'No such file or directory',
|
||||
'%s ring file does not exists, aborting '
|
||||
'proxy-server start.' % _serialized_path)
|
||||
|
||||
ring.Ring.__init__(self, serialized_path, reload_time, ring_name)
|
||||
|
||||
def _get_part_nodes(self, part):
|
||||
seen_ids = set()
|
||||
|
||||
try:
|
||||
account = self.account_list[part]
|
||||
except IndexError:
|
||||
return [self.false_node]
|
||||
else:
|
||||
nodes = []
|
||||
for dev in self._devs:
|
||||
if dev['device'] == account:
|
||||
if dev['id'] not in seen_ids:
|
||||
seen_ids.add(dev['id'])
|
||||
nodes.append(dev)
|
||||
if not nodes:
|
||||
nodes = [self.false_node]
|
||||
return nodes
|
||||
|
||||
def get_part_nodes(self, part):
|
||||
"""
|
||||
Get the nodes that are responsible for the partition. If one
|
||||
node is responsible for more than one replica of the same
|
||||
partition, it will only appear in the output once.
|
||||
|
||||
:param part: partition to get nodes for
|
||||
:returns: list of node dicts
|
||||
|
||||
See :func:`get_nodes` for a description of the node dicts.
|
||||
"""
|
||||
return self._get_part_nodes(part)
|
||||
|
||||
def get_part(self, account, container=None, obj=None):
|
||||
"""
|
||||
Get the partition for an account/container/object.
|
||||
|
||||
:param account: account name
|
||||
:param container: container name
|
||||
:param obj: object name
|
||||
:returns: the partition number
|
||||
"""
|
||||
if account.startswith(reseller_prefix):
|
||||
account = account.replace(reseller_prefix, '', 1)
|
||||
|
||||
# Save the account name in the table
|
||||
# This makes part be the index of the location of the account
|
||||
# in the list
|
||||
try:
|
||||
part = self.account_list.index(account)
|
||||
except ValueError:
|
||||
self.account_list.append(account)
|
||||
part = self.account_list.index(account)
|
||||
|
||||
return part
|
||||
|
||||
def get_nodes(self, account, container=None, obj=None):
|
||||
"""
|
||||
Get the partition and nodes for an account/container/object.
|
||||
If a node is responsible for more than one replica, it will
|
||||
only appear in the output once.
|
||||
:param account: account name
|
||||
:param container: container name
|
||||
:param obj: object name
|
||||
:returns: a tuple of (partition, list of node dicts)
|
||||
|
||||
Each node dict will have at least the following keys:
|
||||
====== ===============================================================
|
||||
id unique integer identifier amongst devices
|
||||
weight a float of the relative weight of this device as compared to
|
||||
others; this indicates how many partitions the builder will try
|
||||
to assign to this device
|
||||
zone integer indicating which zone the device is in; a given
|
||||
partition will not be assigned to multiple devices within the
|
||||
same zone
|
||||
ip the ip address of the device
|
||||
port the tcp port of the device
|
||||
device the device's name on disk (sdb1, for example)
|
||||
meta general use 'extra' field; for example: the online date, the
|
||||
hardware description
|
||||
====== ===============================================================
|
||||
"""
|
||||
part = self.get_part(account, container, obj)
|
||||
return part, self._get_part_nodes(part)
|
||||
|
||||
def get_more_nodes(self, part):
|
||||
"""
|
||||
Generator to get extra nodes for a partition for hinted handoff.
|
||||
|
||||
:param part: partition to get handoff nodes for
|
||||
:returns: generator of node dicts
|
||||
|
||||
See :func:`get_nodes` for a description of the node dicts.
|
||||
Should never be called in the swift UFO environment, so yield nothing
|
||||
"""
|
||||
return []
|
@ -1,85 +0,0 @@
|
||||
# Copyright (c) 2012-2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" Container Server for Gluster Swift UFO """
|
||||
|
||||
# Simply importing this monkey patches the constraint handling to fit our
|
||||
# needs
|
||||
import gluster.swift.common.constraints # noqa
|
||||
|
||||
from swift.container import server
|
||||
from gluster.swift.common.DiskDir import DiskDir
|
||||
from swift.common.utils import public, timing_stats
|
||||
from swift.common.exceptions import DiskFileNoSpace
|
||||
from swift.common.swob import HTTPInsufficientStorage
|
||||
|
||||
|
||||
class ContainerController(server.ContainerController):
|
||||
"""
|
||||
Subclass of the container server's ContainerController which replaces the
|
||||
_get_container_broker() method so that we can use Gluster's DiskDir
|
||||
duck-type of the container DatabaseBroker object, and make the
|
||||
account_update() method a no-op (information is simply stored on disk and
|
||||
already updated by virtue of performaing the file system operations
|
||||
directly).
|
||||
"""
|
||||
|
||||
def _get_container_broker(self, drive, part, account, container, **kwargs):
|
||||
"""
|
||||
Overriden to provide the GlusterFS specific broker that talks to
|
||||
Gluster for the information related to servicing a given request
|
||||
instead of talking to a database.
|
||||
|
||||
:param drive: drive that holds the container
|
||||
:param part: partition the container is in
|
||||
:param account: account name
|
||||
:param container: container name
|
||||
:returns: DiskDir object, a duck-type of DatabaseBroker
|
||||
"""
|
||||
return DiskDir(self.root, drive, account, container, self.logger,
|
||||
**kwargs)
|
||||
|
||||
def account_update(self, req, account, container, broker):
|
||||
"""
|
||||
Update the account server(s) with latest container info.
|
||||
|
||||
For Gluster, this is just a no-op, since an account is just the
|
||||
directory holding all the container directories.
|
||||
|
||||
:param req: swob.Request object
|
||||
:param account: account name
|
||||
:param container: container name
|
||||
:param broker: container DB broker object
|
||||
:returns: None.
|
||||
"""
|
||||
return None
|
||||
|
||||
@public
|
||||
@timing_stats()
|
||||
def PUT(self, req):
|
||||
try:
|
||||
return server.ContainerController.PUT(self, req)
|
||||
except DiskFileNoSpace:
|
||||
# As container=directory in gluster-swift, we might run out of
|
||||
# space or exceed quota when creating containers.
|
||||
drive = req.split_path(1, 1, True)
|
||||
return HTTPInsufficientStorage(drive=drive, request=req)
|
||||
|
||||
|
||||
def app_factory(global_conf, **local_conf):
|
||||
"""paste.deploy app factory for creating WSGI container server apps."""
|
||||
conf = global_conf.copy()
|
||||
conf.update(local_conf)
|
||||
return ContainerController(conf)
|
@ -28,14 +28,6 @@ from swift.obj import server
|
||||
|
||||
from gluster.swift.obj.diskfile import OnDiskManager
|
||||
|
||||
import os
|
||||
from swift.common.exceptions import ConnectionTimeout
|
||||
from swift.common.bufferedhttp import http_connect
|
||||
from eventlet import Timeout
|
||||
from swift.common.http import is_success
|
||||
from gluster.swift.common.ring import Ring
|
||||
from swift import gettext_ as _
|
||||
|
||||
|
||||
class ObjectController(server.ObjectController):
|
||||
"""
|
||||
@ -79,92 +71,6 @@ class ObjectController(server.ObjectController):
|
||||
"""
|
||||
return
|
||||
|
||||
def get_object_ring(self):
|
||||
if hasattr(self, 'object_ring'):
|
||||
if not self.object_ring:
|
||||
self.object_ring = Ring(self.swift_dir, ring_name='object')
|
||||
else:
|
||||
self.object_ring = Ring(self.swift_dir, ring_name='object')
|
||||
return self.object_ring
|
||||
|
||||
def async_update(self, op, account, container, obj, host, partition,
|
||||
contdevice, headers_out, objdevice):
|
||||
"""
|
||||
In Openstack Swift, this method is called by:
|
||||
* container_update (a no-op in gluster-swift)
|
||||
* delete_at_update (to PUT objects into .expiring_objects account)
|
||||
|
||||
The Swift's version of async_update only sends the request to
|
||||
container-server to PUT the object. The container-server calls
|
||||
container_update method which makes an entry for the object in it's
|
||||
database. No actual object is created on disk.
|
||||
|
||||
But in gluster-swift container_update is a no-op, so we'll
|
||||
have to PUT an actual object. We override async_update to create a
|
||||
container first and then the corresponding "tracker object" which
|
||||
tracks expired objects scheduled for deletion.
|
||||
"""
|
||||
|
||||
headers_out['user-agent'] = 'obj-server %s' % os.getpid()
|
||||
if all([host, partition, contdevice]):
|
||||
# PUT the container. Send request directly to container-server
|
||||
container_path = '/%s/%s' % (account, container)
|
||||
try:
|
||||
with ConnectionTimeout(self.conn_timeout):
|
||||
ip, port = host.rsplit(':', 1)
|
||||
conn = http_connect(ip, port, contdevice, partition, op,
|
||||
container_path, headers_out)
|
||||
with Timeout(self.node_timeout):
|
||||
response = conn.getresponse()
|
||||
response.read()
|
||||
if not is_success(response.status):
|
||||
self.logger.error(_(
|
||||
'async_update : '
|
||||
'ERROR Container update failed :%(status)d '
|
||||
'response from %(ip)s:%(port)s/%(dev)s'),
|
||||
{'status': response.status, 'ip': ip, 'port': port,
|
||||
'dev': contdevice})
|
||||
return
|
||||
except (Exception, Timeout):
|
||||
self.logger.exception(_(
|
||||
'async_update : '
|
||||
'ERROR Container update failed :%(ip)s:%(port)s/%(dev)s'),
|
||||
{'ip': ip, 'port': port, 'dev': contdevice})
|
||||
|
||||
# PUT the tracker object. Send request directly to object-server
|
||||
object_path = '/%s/%s/%s' % (account, container, obj)
|
||||
headers_out['Content-Length'] = 0
|
||||
headers_out['Content-Type'] = 'text/plain'
|
||||
try:
|
||||
with ConnectionTimeout(self.conn_timeout):
|
||||
# FIXME: Assuming that get_nodes returns single node
|
||||
part, nodes = self.get_object_ring().get_nodes(account,
|
||||
container,
|
||||
obj)
|
||||
ip = nodes[0]['ip']
|
||||
port = nodes[0]['port']
|
||||
objdevice = nodes[0]['device']
|
||||
conn = http_connect(ip, port, objdevice, partition, op,
|
||||
object_path, headers_out)
|
||||
with Timeout(self.node_timeout):
|
||||
response = conn.getresponse()
|
||||
response.read()
|
||||
if is_success(response.status):
|
||||
return
|
||||
else:
|
||||
self.logger.error(_(
|
||||
'async_update : '
|
||||
'ERROR Object PUT failed : %(status)d '
|
||||
'response from %(ip)s:%(port)s/%(dev)s'),
|
||||
{'status': response.status, 'ip': ip, 'port': port,
|
||||
'dev': objdevice})
|
||||
except (Exception, Timeout):
|
||||
self.logger.exception(_(
|
||||
'async_update : '
|
||||
'ERROR Object PUT failed :%(ip)s:%(port)s/%(dev)s'),
|
||||
{'ip': ip, 'port': port, 'dev': objdevice})
|
||||
return
|
||||
|
||||
@public
|
||||
@timing_stats()
|
||||
def PUT(self, request):
|
||||
|
@ -1,31 +0,0 @@
|
||||
# Copyright (c) 2012-2013 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# Simply importing this monkey patches the constraint handling to fit our
|
||||
# needs
|
||||
import gluster.swift.common.constraints # noqa
|
||||
|
||||
from swift.proxy.server import Application, mimetypes # noqa
|
||||
from swift.proxy.controllers import AccountController # noqa
|
||||
from swift.proxy.controllers import ObjectController # noqa
|
||||
from swift.proxy.controllers import ContainerController # noqa
|
||||
|
||||
|
||||
def app_factory(global_conf, **local_conf): # noqa
|
||||
"""paste.deploy app factory for creating WSGI proxy apps."""
|
||||
conf = global_conf.copy()
|
||||
conf.update(local_conf)
|
||||
return Application(conf)
|
@ -1 +0,0 @@
|
||||
Subproject commit f310006fae1af991097eee5929a1c73051eb1e00
|
20
setup.py
20
setup.py
@ -41,31 +41,11 @@ setup(
|
||||
],
|
||||
install_requires=[],
|
||||
scripts=[
|
||||
'bin/gluster-swift-gen-builders',
|
||||
'bin/gluster-swift-print-metadata',
|
||||
'gluster/swift/common/middleware/gswauth/bin/gswauth-add-account',
|
||||
'gluster/swift/common/middleware/gswauth/bin/gswauth-add-user',
|
||||
'gluster/swift/common/middleware/gswauth/bin/gswauth-cleanup-tokens',
|
||||
'gluster/swift/common/middleware/gswauth/bin/gswauth-delete-account',
|
||||
'gluster/swift/common/middleware/gswauth/bin/gswauth-delete-user',
|
||||
'gluster/swift/common/middleware/gswauth/bin/gswauth-list',
|
||||
'gluster/swift/common/middleware/gswauth/bin/gswauth-prep',
|
||||
'gluster/swift/common/middleware/gswauth/bin/'
|
||||
'gswauth-set-account-service',
|
||||
|
||||
],
|
||||
entry_points={
|
||||
'paste.app_factory': [
|
||||
'proxy=gluster.swift.proxy.server:app_factory',
|
||||
'object=gluster.swift.obj.server:app_factory',
|
||||
'container=gluster.swift.container.server:app_factory',
|
||||
'account=gluster.swift.account.server:app_factory',
|
||||
],
|
||||
'paste.filter_factory': [
|
||||
'gswauth=gluster.swift.common.middleware.gswauth.swauth.'
|
||||
'middleware:filter_factory',
|
||||
'kerbauth=gluster.swift.common.middleware.'
|
||||
'swiftkerbauth.kerbauth:filter_factory',
|
||||
],
|
||||
},
|
||||
)
|
||||
|
@ -1,32 +0,0 @@
|
||||
[DEFAULT]
|
||||
devices = /mnt/gluster-object
|
||||
#
|
||||
# Once you are confident that your startup processes will always have your
|
||||
# gluster volumes properly mounted *before* the account-server workers start,
|
||||
# you can *consider* setting this value to "false" to reduce the per-request
|
||||
# overhead it can incur.
|
||||
#
|
||||
# *** Keep false for Functional Tests ***
|
||||
mount_check = true
|
||||
bind_port = 6012
|
||||
#
|
||||
# Override swift's default behaviour for fallocate.
|
||||
disable_fallocate = true
|
||||
#
|
||||
# One or two workers should be sufficient for almost any installation of
|
||||
# Gluster.
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = account-server
|
||||
|
||||
[app:account-server]
|
||||
use = egg:gluster_swift#account
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = WARN
|
||||
#
|
||||
# After ensuring things are running in a stable manner, you can turn off
|
||||
# normal request logging for the account server to unclutter the log
|
||||
# files. Warnings and errors will still be logged.
|
||||
log_requests = off
|
@ -1,35 +0,0 @@
|
||||
[DEFAULT]
|
||||
devices = /mnt/gluster-object
|
||||
#
|
||||
# Once you are confident that your startup processes will always have your
|
||||
# gluster volumes properly mounted *before* the container-server workers
|
||||
# start, you can *consider* setting this value to "false" to reduce the
|
||||
# per-request overhead it can incur.
|
||||
#
|
||||
# *** Keep false for Functional Tests ***
|
||||
mount_check = true
|
||||
bind_port = 6011
|
||||
#
|
||||
# Override swift's default behaviour for fallocate.
|
||||
disable_fallocate = true
|
||||
#
|
||||
# One or two workers should be sufficient for almost any installation of
|
||||
# Gluster.
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = container-server
|
||||
|
||||
[app:container-server]
|
||||
use = egg:gluster_swift#container
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = WARN
|
||||
#
|
||||
# After ensuring things are running in a stable manner, you can turn off
|
||||
# normal request logging for the container server to unclutter the log
|
||||
# files. Warnings and errors will still be logged.
|
||||
log_requests = off
|
||||
|
||||
#enable object versioning for functional test
|
||||
allow_versions = on
|
@ -1,19 +0,0 @@
|
||||
[DEFAULT]
|
||||
#
|
||||
# IP address of a node in the GlusterFS server cluster hosting the
|
||||
# volumes to be served via Swift API.
|
||||
mount_ip = localhost
|
||||
|
||||
# Performance optimization parameter. When turned off, the filesystem will
|
||||
# see a reduced number of stat calls, resulting in substantially faster
|
||||
# response time for GET and HEAD container requests on containers with large
|
||||
# numbers of objects, at the expense of an accurate count of combined bytes
|
||||
# used by all objects in the container. For most installations "off" works
|
||||
# fine.
|
||||
#
|
||||
# *** Keep on for Functional Tests ***
|
||||
accurate_size_in_listing = on
|
||||
|
||||
# *** Keep on for Functional Tests ***
|
||||
container_update_object_count = on
|
||||
account_update_container_count = on
|
@ -1,17 +0,0 @@
|
||||
[DEFAULT]
|
||||
|
||||
[object-expirer]
|
||||
# auto_create_account_prefix = .
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors cache proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:swift#proxy
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
memcache_servers = 127.0.0.1:11211
|
||||
|
||||
[filter:catch_errors]
|
||||
use = egg:swift#catch_errors
|
@ -1,48 +0,0 @@
|
||||
[DEFAULT]
|
||||
devices = /mnt/gluster-object
|
||||
#
|
||||
# Once you are confident that your startup processes will always have your
|
||||
# gluster volumes properly mounted *before* the object-server workers start,
|
||||
# you can *consider* setting this value to "false" to reduce the per-request
|
||||
# overhead it can incur.
|
||||
#
|
||||
# *** Keep false for Functional Tests ***
|
||||
mount_check = true
|
||||
bind_port = 6010
|
||||
#
|
||||
# Maximum number of clients one worker can process simultaneously (it will
|
||||
# actually accept N + 1). Setting this to one (1) will only handle one request
|
||||
# at a time, without accepting another request concurrently. By increasing the
|
||||
# number of workers to a much higher value, one can prevent slow file system
|
||||
# operations for one request from starving other requests.
|
||||
max_clients = 1024
|
||||
#
|
||||
# If not doing the above, setting this value initially to match the number of
|
||||
# CPUs is a good starting point for determining the right value.
|
||||
workers = 1
|
||||
# Override swift's default behaviour for fallocate.
|
||||
disable_fallocate = true
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = object-server
|
||||
|
||||
[app:object-server]
|
||||
use = egg:gluster_swift#object
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = WARN
|
||||
#
|
||||
# For performance, after ensuring things are running in a stable manner, you
|
||||
# can turn off normal request logging for the object server to reduce the
|
||||
# per-request overhead and unclutter the log files. Warnings and errors will
|
||||
# still be logged.
|
||||
log_requests = off
|
||||
#
|
||||
# Adjust this value to match the stripe width of the underlying storage array
|
||||
# (not the stripe element size). This will provide a reasonable starting point
|
||||
# for tuning this value.
|
||||
disk_chunk_size = 65536
|
||||
#
|
||||
# Adjust this value match whatever is set for the disk_chunk_size initially.
|
||||
# This will provide a reasonable starting point for tuning this value.
|
||||
network_chunk_size = 65556
|
@ -1,78 +0,0 @@
|
||||
[DEFAULT]
|
||||
bind_port = 8080
|
||||
user = root
|
||||
# Consider using 1 worker per CPU
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors healthcheck proxy-logging cache gswauth proxy-logging proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:gluster_swift#proxy
|
||||
log_facility = LOG_LOCAL1
|
||||
log_level = WARN
|
||||
# The API allows for account creation and deletion, but since Gluster/Swift
|
||||
# automounts a Gluster volume for a given account, there is no way to create
|
||||
# or delete an account. So leave this off.
|
||||
allow_account_management = false
|
||||
account_autocreate = true
|
||||
# Ensure the proxy server uses fast-POSTs since we don't need to make a copy
|
||||
# of the entire object given that all metadata is stored in the object
|
||||
# extended attributes (no .meta file used after creation) and no container
|
||||
# sync feature to present.
|
||||
object_post_as_copy = false
|
||||
# Only need to recheck the account exists once a day
|
||||
recheck_account_existence = 86400
|
||||
# May want to consider bumping this up if containers are created and destroyed
|
||||
# infrequently.
|
||||
recheck_container_existence = 60
|
||||
# Timeout clients that don't read or write to the proxy server after 5
|
||||
# seconds.
|
||||
client_timeout = 5
|
||||
# Give more time to connect to the object, container or account servers in
|
||||
# cases of high load.
|
||||
conn_timeout = 5
|
||||
# For high load situations, once connected to an object, container or account
|
||||
# server, allow for delays communicating with them.
|
||||
node_timeout = 60
|
||||
# May want to consider bumping up this value to 1 - 4 MB depending on how much
|
||||
# traffic is for multi-megabyte or gigabyte requests; perhaps matching the
|
||||
# stripe width (not stripe element size) of your storage volume is a good
|
||||
# starting point. See below for sizing information.
|
||||
object_chunk_size = 65536
|
||||
# If you do decide to increase the object_chunk_size, then consider lowering
|
||||
# this value to one. Up to "put_queue_length" object_chunk_size'd buffers can
|
||||
# be queued to the object server for processing. Given one proxy server worker
|
||||
# can handle up to 1,024 connections, by default, it will consume 10 * 65,536
|
||||
# * 1,024 bytes of memory in the worse case (default values). Be sure the
|
||||
# amount of memory available on the system can accommodate increased values
|
||||
# for object_chunk_size.
|
||||
put_queue_depth = 10
|
||||
|
||||
[filter:catch_errors]
|
||||
use = egg:swift#catch_errors
|
||||
|
||||
[filter:proxy-logging]
|
||||
use = egg:swift#proxy_logging
|
||||
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
|
||||
[filter:tempauth]
|
||||
use = egg:swift#tempauth
|
||||
user_admin_admin = admin .admin .reseller_admin
|
||||
user_test_tester = testing .admin
|
||||
user_test2_tester2 = testing2 .admin
|
||||
user_test_tester3 = testing3
|
||||
|
||||
[filter:gswauth]
|
||||
use = egg:gluster_swift#gswauth
|
||||
set log_name = gswauth
|
||||
super_admin_key = gswauthkey
|
||||
metadata_volume = gsmetadata
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
# Update this line to contain a comma separated list of memcache servers
|
||||
# shared by all nodes running the proxy-server service.
|
||||
memcache_servers = localhost:11211
|
@ -1,85 +0,0 @@
|
||||
[DEFAULT]
|
||||
|
||||
|
||||
[swift-hash]
|
||||
# random unique string that can never change (DO NOT LOSE)
|
||||
swift_hash_path_suffix = gluster
|
||||
|
||||
|
||||
# The swift-constraints section sets the basic constraints on data
|
||||
# saved in the swift cluster.
|
||||
|
||||
[swift-constraints]
|
||||
|
||||
# max_file_size is the largest "normal" object that can be saved in
|
||||
# the cluster. This is also the limit on the size of each segment of
|
||||
# a "large" object when using the large object manifest support.
|
||||
# This value is set in bytes. Setting it to lower than 1MiB will cause
|
||||
# some tests to fail.
|
||||
# Default is 1 TiB = 2**30*1024
|
||||
max_file_size = 1099511627776
|
||||
|
||||
|
||||
# max_meta_name_length is the max number of bytes in the utf8 encoding
|
||||
# of the name portion of a metadata header.
|
||||
|
||||
#max_meta_name_length = 128
|
||||
|
||||
|
||||
# max_meta_value_length is the max number of bytes in the utf8 encoding
|
||||
# of a metadata value
|
||||
|
||||
#max_meta_value_length = 256
|
||||
|
||||
|
||||
# max_meta_count is the max number of metadata keys that can be stored
|
||||
# on a single account, container, or object
|
||||
|
||||
#max_meta_count = 90
|
||||
|
||||
|
||||
# max_meta_overall_size is the max number of bytes in the utf8 encoding
|
||||
# of the metadata (keys + values)
|
||||
|
||||
#max_meta_overall_size = 4096
|
||||
|
||||
|
||||
# max_object_name_length is the max number of bytes in the utf8 encoding of an
|
||||
# object name: Gluster FS can handle much longer file names, but the length
|
||||
# between the slashes of the URL is handled below. Remember that most web
|
||||
# clients can't handle anything greater than 2048, and those that do are
|
||||
# rather clumsy.
|
||||
|
||||
max_object_name_length = 2048
|
||||
|
||||
# max_object_name_component_length (GlusterFS) is the max number of bytes in
|
||||
# the utf8 encoding of an object name component (the part between the
|
||||
# slashes); this is a limit imposed by the underlying file system (for XFS it
|
||||
# is 255 bytes).
|
||||
|
||||
max_object_name_component_length = 255
|
||||
|
||||
# container_listing_limit is the default (and max) number of items
|
||||
# returned for a container listing request
|
||||
|
||||
#container_listing_limit = 10000
|
||||
|
||||
|
||||
# account_listing_limit is the default (and max) number of items returned
|
||||
# for an account listing request
|
||||
|
||||
#account_listing_limit = 10000
|
||||
|
||||
|
||||
# max_account_name_length is the max number of bytes in the utf8 encoding of
|
||||
# an account name: Gluster FS Filename limit (XFS limit?), must be the same
|
||||
# size as max_object_name_component_length above.
|
||||
|
||||
max_account_name_length = 255
|
||||
|
||||
|
||||
# max_container_name_length is the max number of bytes in the utf8 encoding
|
||||
# of a container name: Gluster FS Filename limit (XFS limit?), must be the same
|
||||
# size as max_object_name_component_length above.
|
||||
|
||||
max_container_name_length = 255
|
@ -1,58 +0,0 @@
|
||||
[func_test]
|
||||
# sample config
|
||||
auth_host = 127.0.0.1
|
||||
auth_port = 8080
|
||||
auth_ssl = no
|
||||
auth_prefix = /auth/
|
||||
## sample config for Swift with Keystone
|
||||
#auth_version = 2
|
||||
#auth_host = localhost
|
||||
#auth_port = 5000
|
||||
#auth_ssl = no
|
||||
#auth_prefix = /v2.0/
|
||||
|
||||
# GSWauth internal admin user configuration information
|
||||
admin_key = gswauthkey
|
||||
admin_user = .super_admin
|
||||
|
||||
# Gluster setup information
|
||||
devices = /mnt/gluster-object
|
||||
gsmetadata_volume = gsmetadata
|
||||
|
||||
# Primary functional test account (needs admin access to the account)
|
||||
account = test
|
||||
username = tester
|
||||
password = testing
|
||||
|
||||
# User on a second account (needs admin access to the account)
|
||||
account2 = test2
|
||||
username2 = tester2
|
||||
password2 = testing2
|
||||
|
||||
# User on same account as first, but without admin access
|
||||
username3 = tester3
|
||||
password3 = testing3
|
||||
|
||||
# Default constraints if not defined here, the test runner will try
|
||||
# to set them from /etc/swift/swift.conf. If that file isn't found,
|
||||
# the test runner will skip tests that depend on these values.
|
||||
# Note that the cluster must have "sane" values for the test suite to pass.
|
||||
#max_file_size = 5368709122
|
||||
#max_meta_name_length = 128
|
||||
#max_meta_value_length = 256
|
||||
#max_meta_count = 90
|
||||
#max_meta_overall_size = 4096
|
||||
#max_object_name_length = 1024
|
||||
#container_listing_limit = 10000
|
||||
#account_listing_limit = 10000
|
||||
#max_account_name_length = 256
|
||||
#max_container_name_length = 256
|
||||
normalized_urls = True
|
||||
|
||||
collate = C
|
||||
|
||||
[unit_test]
|
||||
fake_syslog = False
|
||||
|
||||
[probe_test]
|
||||
# check_server_timeout = 30
|
@ -1,106 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright (c) 2014 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This program expects to be run against a locally deployed swiftonfile
|
||||
# applicatoin. This tests also expects three glusterfs volumes to have
|
||||
# been created: 'test', 'test2', and 'gsmetadata'.
|
||||
|
||||
cleanup()
|
||||
{
|
||||
service memcached stop
|
||||
swift-init main stop
|
||||
if [ -x /etc/swift.bak ] ; then
|
||||
rm -rf /etc/swift > /dev/null 2>&1
|
||||
mv /etc/swift.bak /etc/swift > /dev/null 2>&1
|
||||
fi
|
||||
rm -rf /mnt/gluster-object/test{,2}/* > /dev/null 2>&1
|
||||
setfattr -x user.swift.metadata /mnt/gluster-object/test{,2} > /dev/null 2>&1
|
||||
gswauth_cleanup
|
||||
}
|
||||
|
||||
gswauth_cleanup()
|
||||
{
|
||||
rm -rf /mnt/gluster-object/gsmetadata/.* > /dev/null 2>&1
|
||||
rm -rf /mnt/gluster-object/gsmetadata/* > /dev/null 2>&1
|
||||
setfattr -x user.swift.metadata /mnt/gluster-object/gsmetadata > /dev/null 2>&1
|
||||
}
|
||||
|
||||
quit()
|
||||
{
|
||||
echo "$1"
|
||||
exit 1
|
||||
}
|
||||
|
||||
|
||||
fail()
|
||||
{
|
||||
cleanup
|
||||
quit "$1"
|
||||
}
|
||||
|
||||
run_generic_tests()
|
||||
{
|
||||
# clean up gsmetadata dir
|
||||
gswauth_cleanup
|
||||
|
||||
#swauth-prep
|
||||
gswauth-prep -K gswauthkey || fail "Unable to prep gswauth"
|
||||
gswauth-add-user -K gswauthkey -a test tester testing || fail "Unable to add user test"
|
||||
gswauth-add-user -K gswauthkey -a test2 tester2 testing2 || fail "Unable to add user test2"
|
||||
gswauth-add-user -K gswauthkey test tester3 testing3 || fail "Unable to add user test3"
|
||||
|
||||
nosetests -v --exe \
|
||||
--with-xunit \
|
||||
--xunit-file functional_tests/gluster-swift-gswauth-generic-functional-TC-report.xml \
|
||||
test/functional || fail "Functional tests failed"
|
||||
nosetests -v --exe \
|
||||
--with-xunit \
|
||||
--xunit-file functional_tests/gluster-swift-gswauth-functionalnosetests-TC-report.xml \
|
||||
test/functionalnosetests || fail "Functional-nose tests failed"
|
||||
}
|
||||
|
||||
### MAIN ###
|
||||
|
||||
# Backup the swift directory if it already exists
|
||||
if [ -x /etc/swift ] ; then
|
||||
mv /etc/swift /etc/swift.bak
|
||||
fi
|
||||
|
||||
export SWIFT_TEST_CONFIG_FILE=/etc/swift/test.conf
|
||||
|
||||
# Install the configuration files
|
||||
mkdir /etc/swift > /dev/null 2>&1
|
||||
cp -r test/deploy/glusterfs/conf/* /etc/swift || fail "Unable to copy configuration files to /etc/swift"
|
||||
gluster-swift-gen-builders test test2 gsmetadata || fail "Unable to create ring files"
|
||||
|
||||
# Start the services
|
||||
service memcached start || fail "Unable to start memcached"
|
||||
swift-init main start || fail "Unable to start swift"
|
||||
|
||||
#swauth-prep
|
||||
gswauth-prep -K gswauthkey || fail "Unable to prep gswauth"
|
||||
|
||||
mkdir functional_tests > /dev/null 2>&1
|
||||
nosetests -v --exe \
|
||||
--with-xunit \
|
||||
--xunit-file functional_tests/gluster-swift-gswauth-functional-TC-report.xml \
|
||||
test/functional_auth/gswauth || fail "Functional gswauth test failed"
|
||||
|
||||
run_generic_tests
|
||||
|
||||
cleanup
|
||||
exit 0
|
@ -1,771 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2010-2012 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
import json
|
||||
from uuid import uuid4
|
||||
from nose import SkipTest
|
||||
from string import letters
|
||||
|
||||
from swift.common.constraints import MAX_META_COUNT, MAX_META_NAME_LENGTH, \
|
||||
MAX_META_OVERALL_SIZE, MAX_META_VALUE_LENGTH
|
||||
from swift.common.middleware.acl import format_acl
|
||||
from swift_testing import (check_response, retry, skip, skip2, skip3,
|
||||
web_front_end, requires_acls)
|
||||
import swift_testing
|
||||
from test.functional.tests import load_constraint
|
||||
|
||||
|
||||
class TestAccount(unittest.TestCase):
|
||||
|
||||
def test_metadata(self):
|
||||
if skip:
|
||||
raise SkipTest
|
||||
|
||||
def post(url, token, parsed, conn, value):
|
||||
conn.request('POST', parsed.path, '',
|
||||
{'X-Auth-Token': token, 'X-Account-Meta-Test': value})
|
||||
return check_response(conn)
|
||||
|
||||
def head(url, token, parsed, conn):
|
||||
conn.request('HEAD', parsed.path, '', {'X-Auth-Token': token})
|
||||
return check_response(conn)
|
||||
|
||||
def get(url, token, parsed, conn):
|
||||
conn.request('GET', parsed.path, '', {'X-Auth-Token': token})
|
||||
return check_response(conn)
|
||||
|
||||
resp = retry(post, '')
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertEqual(resp.getheader('x-account-meta-test'), None)
|
||||
resp = retry(get)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertEqual(resp.getheader('x-account-meta-test'), None)
|
||||
resp = retry(post, 'Value')
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertEqual(resp.getheader('x-account-meta-test'), 'Value')
|
||||
resp = retry(get)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertEqual(resp.getheader('x-account-meta-test'), 'Value')
|
||||
|
||||
def test_invalid_acls(self):
|
||||
def post(url, token, parsed, conn, headers):
|
||||
new_headers = dict({'X-Auth-Token': token}, **headers)
|
||||
conn.request('POST', parsed.path, '', new_headers)
|
||||
return check_response(conn)
|
||||
|
||||
# needs to be an acceptable header size
|
||||
num_keys = 8
|
||||
max_key_size = load_constraint('max_header_size') / num_keys
|
||||
acl = {'admin': [c * max_key_size for c in letters[:num_keys]]}
|
||||
headers = {'x-account-access-control': format_acl(
|
||||
version=2, acl_dict=acl)}
|
||||
resp = retry(post, headers=headers, use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 400)
|
||||
|
||||
# and again a touch smaller
|
||||
acl = {'admin': [c * max_key_size for c in letters[:num_keys - 1]]}
|
||||
headers = {'x-account-access-control': format_acl(
|
||||
version=2, acl_dict=acl)}
|
||||
resp = retry(post, headers=headers, use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
|
||||
@requires_acls
|
||||
def test_invalid_acl_keys(self):
|
||||
def post(url, token, parsed, conn, headers):
|
||||
new_headers = dict({'X-Auth-Token': token}, **headers)
|
||||
conn.request('POST', parsed.path, '', new_headers)
|
||||
return check_response(conn)
|
||||
|
||||
# needs to be json
|
||||
resp = retry(post, headers={'X-Account-Access-Control': 'invalid'},
|
||||
use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 400)
|
||||
|
||||
acl_user = swift_testing.swift_test_user[1]
|
||||
acl = {'admin': [acl_user], 'invalid_key': 'invalid_value'}
|
||||
headers = {'x-account-access-control': format_acl(
|
||||
version=2, acl_dict=acl)}
|
||||
|
||||
resp = retry(post, headers, use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 400)
|
||||
self.assertEqual(resp.getheader('X-Account-Access-Control'), None)
|
||||
|
||||
@requires_acls
|
||||
def test_invalid_acl_values(self):
|
||||
def post(url, token, parsed, conn, headers):
|
||||
new_headers = dict({'X-Auth-Token': token}, **headers)
|
||||
conn.request('POST', parsed.path, '', new_headers)
|
||||
return check_response(conn)
|
||||
|
||||
acl = {'admin': 'invalid_value'}
|
||||
headers = {'x-account-access-control': format_acl(
|
||||
version=2, acl_dict=acl)}
|
||||
|
||||
resp = retry(post, headers=headers, use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 400)
|
||||
self.assertEqual(resp.getheader('X-Account-Access-Control'), None)
|
||||
|
||||
@requires_acls
|
||||
def test_read_only_acl(self):
|
||||
if skip3:
|
||||
raise SkipTest
|
||||
|
||||
def get(url, token, parsed, conn):
|
||||
conn.request('GET', parsed.path, '', {'X-Auth-Token': token})
|
||||
return check_response(conn)
|
||||
|
||||
def post(url, token, parsed, conn, headers):
|
||||
new_headers = dict({'X-Auth-Token': token}, **headers)
|
||||
conn.request('POST', parsed.path, '', new_headers)
|
||||
return check_response(conn)
|
||||
|
||||
# cannot read account
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assertEquals(resp.status, 403)
|
||||
|
||||
# grant read access
|
||||
acl_user = swift_testing.swift_test_user[2]
|
||||
acl = {'read-only': [acl_user]}
|
||||
headers = {'x-account-access-control': format_acl(
|
||||
version=2, acl_dict=acl)}
|
||||
resp = retry(post, headers=headers, use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
|
||||
# read-only can read account headers
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204))
|
||||
# but not acls
|
||||
self.assertEqual(resp.getheader('X-Account-Access-Control'), None)
|
||||
|
||||
# read-only can not write metadata
|
||||
headers = {'x-account-meta-test': 'value'}
|
||||
resp = retry(post, headers=headers, use_account=3)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 403)
|
||||
|
||||
# but they can read it
|
||||
headers = {'x-account-meta-test': 'value'}
|
||||
resp = retry(post, headers=headers, use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204))
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-Test'), 'value')
|
||||
|
||||
@requires_acls
|
||||
def test_read_write_acl(self):
|
||||
if skip3:
|
||||
raise SkipTest
|
||||
|
||||
def get(url, token, parsed, conn):
|
||||
conn.request('GET', parsed.path, '', {'X-Auth-Token': token})
|
||||
return check_response(conn)
|
||||
|
||||
def post(url, token, parsed, conn, headers):
|
||||
new_headers = dict({'X-Auth-Token': token}, **headers)
|
||||
conn.request('POST', parsed.path, '', new_headers)
|
||||
return check_response(conn)
|
||||
|
||||
# cannot read account
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assertEquals(resp.status, 403)
|
||||
|
||||
# grant read-write access
|
||||
acl_user = swift_testing.swift_test_user[2]
|
||||
acl = {'read-write': [acl_user]}
|
||||
headers = {'x-account-access-control': format_acl(
|
||||
version=2, acl_dict=acl)}
|
||||
resp = retry(post, headers=headers, use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
|
||||
# read-write can read account headers
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204))
|
||||
# but not acls
|
||||
self.assertEqual(resp.getheader('X-Account-Access-Control'), None)
|
||||
|
||||
# read-write can not write account metadata
|
||||
headers = {'x-account-meta-test': 'value'}
|
||||
resp = retry(post, headers=headers, use_account=3)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 403)
|
||||
|
||||
@requires_acls
|
||||
def test_admin_acl(self):
|
||||
if skip3:
|
||||
raise SkipTest
|
||||
|
||||
def get(url, token, parsed, conn):
|
||||
conn.request('GET', parsed.path, '', {'X-Auth-Token': token})
|
||||
return check_response(conn)
|
||||
|
||||
def post(url, token, parsed, conn, headers):
|
||||
new_headers = dict({'X-Auth-Token': token}, **headers)
|
||||
conn.request('POST', parsed.path, '', new_headers)
|
||||
return check_response(conn)
|
||||
|
||||
# cannot read account
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assertEquals(resp.status, 403)
|
||||
|
||||
# grant admin access
|
||||
acl_user = swift_testing.swift_test_user[2]
|
||||
acl = {'admin': [acl_user]}
|
||||
acl_json_str = format_acl(version=2, acl_dict=acl)
|
||||
headers = {'x-account-access-control': acl_json_str}
|
||||
resp = retry(post, headers=headers, use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
|
||||
# admin can read account headers
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204))
|
||||
# including acls
|
||||
self.assertEqual(resp.getheader('X-Account-Access-Control'),
|
||||
acl_json_str)
|
||||
|
||||
# admin can write account metadata
|
||||
value = str(uuid4())
|
||||
headers = {'x-account-meta-test': value}
|
||||
resp = retry(post, headers=headers, use_account=3)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204))
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-Test'), value)
|
||||
|
||||
# admin can even revoke their own access
|
||||
headers = {'x-account-access-control': '{}'}
|
||||
resp = retry(post, headers=headers, use_account=3)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
|
||||
# and again, cannot read account
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assertEquals(resp.status, 403)
|
||||
|
||||
@requires_acls
|
||||
def test_protected_tempurl(self):
|
||||
if skip3:
|
||||
raise SkipTest
|
||||
|
||||
def get(url, token, parsed, conn):
|
||||
conn.request('GET', parsed.path, '', {'X-Auth-Token': token})
|
||||
return check_response(conn)
|
||||
|
||||
def post(url, token, parsed, conn, headers):
|
||||
new_headers = dict({'X-Auth-Token': token}, **headers)
|
||||
conn.request('POST', parsed.path, '', new_headers)
|
||||
return check_response(conn)
|
||||
|
||||
# add a account metadata, and temp-url-key to account
|
||||
value = str(uuid4())
|
||||
headers = {
|
||||
'x-account-meta-temp-url-key': 'secret',
|
||||
'x-account-meta-test': value,
|
||||
}
|
||||
resp = retry(post, headers=headers, use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
|
||||
# grant read-only access to tester3
|
||||
acl_user = swift_testing.swift_test_user[2]
|
||||
acl = {'read-only': [acl_user]}
|
||||
acl_json_str = format_acl(version=2, acl_dict=acl)
|
||||
headers = {'x-account-access-control': acl_json_str}
|
||||
resp = retry(post, headers=headers, use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
|
||||
# read-only tester3 can read account metadata
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204),
|
||||
'Expected status in (200, 204), got %s' % resp.status)
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-Test'), value)
|
||||
# but not temp-url-key
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'), None)
|
||||
|
||||
# grant read-write access to tester3
|
||||
acl_user = swift_testing.swift_test_user[2]
|
||||
acl = {'read-write': [acl_user]}
|
||||
acl_json_str = format_acl(version=2, acl_dict=acl)
|
||||
headers = {'x-account-access-control': acl_json_str}
|
||||
resp = retry(post, headers=headers, use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
|
||||
# read-write tester3 can read account metadata
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204),
|
||||
'Expected status in (200, 204), got %s' % resp.status)
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-Test'), value)
|
||||
# but not temp-url-key
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'), None)
|
||||
|
||||
# grant admin access to tester3
|
||||
acl_user = swift_testing.swift_test_user[2]
|
||||
acl = {'admin': [acl_user]}
|
||||
acl_json_str = format_acl(version=2, acl_dict=acl)
|
||||
headers = {'x-account-access-control': acl_json_str}
|
||||
resp = retry(post, headers=headers, use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
|
||||
# admin tester3 can read account metadata
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204),
|
||||
'Expected status in (200, 204), got %s' % resp.status)
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-Test'), value)
|
||||
# including temp-url-key
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'),
|
||||
'secret')
|
||||
|
||||
# admin tester3 can even change temp-url-key
|
||||
secret = str(uuid4())
|
||||
headers = {
|
||||
'x-account-meta-temp-url-key': secret,
|
||||
}
|
||||
resp = retry(post, headers=headers, use_account=3)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204),
|
||||
'Expected status in (200, 204), got %s' % resp.status)
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-Temp-Url-Key'),
|
||||
secret)
|
||||
|
||||
@requires_acls
|
||||
def test_account_acls(self):
|
||||
if skip2:
|
||||
raise SkipTest
|
||||
|
||||
def post(url, token, parsed, conn, headers):
|
||||
new_headers = dict({'X-Auth-Token': token}, **headers)
|
||||
conn.request('POST', parsed.path, '', new_headers)
|
||||
return check_response(conn)
|
||||
|
||||
def put(url, token, parsed, conn, headers):
|
||||
new_headers = dict({'X-Auth-Token': token}, **headers)
|
||||
conn.request('PUT', parsed.path, '', new_headers)
|
||||
return check_response(conn)
|
||||
|
||||
def delete(url, token, parsed, conn, headers):
|
||||
new_headers = dict({'X-Auth-Token': token}, **headers)
|
||||
conn.request('DELETE', parsed.path, '', new_headers)
|
||||
return check_response(conn)
|
||||
|
||||
def head(url, token, parsed, conn):
|
||||
conn.request('HEAD', parsed.path, '', {'X-Auth-Token': token})
|
||||
return check_response(conn)
|
||||
|
||||
def get(url, token, parsed, conn):
|
||||
conn.request('GET', parsed.path, '', {'X-Auth-Token': token})
|
||||
return check_response(conn)
|
||||
|
||||
try:
|
||||
# User1 can POST to their own account (and reset the ACLs)
|
||||
resp = retry(post, headers={'X-Account-Access-Control': '{}'},
|
||||
use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
self.assertEqual(resp.getheader('X-Account-Access-Control'), None)
|
||||
|
||||
# User1 can GET their own empty account
|
||||
resp = retry(get, use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status // 100, 2)
|
||||
self.assertEqual(resp.getheader('X-Account-Access-Control'), None)
|
||||
|
||||
# User2 can't GET User1's account
|
||||
resp = retry(get, use_account=2, url_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 403)
|
||||
|
||||
# User1 is swift_owner of their own account, so they can POST an
|
||||
# ACL -- let's do this and make User2 (test_user[1]) an admin
|
||||
acl_user = swift_testing.swift_test_user[1]
|
||||
acl = {'admin': [acl_user]}
|
||||
headers = {'x-account-access-control': format_acl(
|
||||
version=2, acl_dict=acl)}
|
||||
resp = retry(post, headers=headers, use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
|
||||
# User1 can see the new header
|
||||
resp = retry(get, use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status // 100, 2)
|
||||
data_from_headers = resp.getheader('x-account-access-control')
|
||||
expected = json.dumps(acl, separators=(',', ':'))
|
||||
self.assertEqual(data_from_headers, expected)
|
||||
|
||||
# Now User2 should be able to GET the account and see the ACL
|
||||
resp = retry(head, use_account=2, url_account=1)
|
||||
resp.read()
|
||||
data_from_headers = resp.getheader('x-account-access-control')
|
||||
self.assertEqual(data_from_headers, expected)
|
||||
|
||||
# Revoke User2's admin access, grant User2 read-write access
|
||||
acl = {'read-write': [acl_user]}
|
||||
headers = {'x-account-access-control': format_acl(
|
||||
version=2, acl_dict=acl)}
|
||||
resp = retry(post, headers=headers, use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
|
||||
# User2 can still GET the account, but not see the ACL
|
||||
# (since it's privileged data)
|
||||
resp = retry(head, use_account=2, url_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
self.assertEqual(resp.getheader('x-account-access-control'), None)
|
||||
|
||||
# User2 can PUT and DELETE a container
|
||||
resp = retry(put, use_account=2, url_account=1,
|
||||
resource='%(storage_url)s/mycontainer', headers={})
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 201)
|
||||
resp = retry(delete, use_account=2, url_account=1,
|
||||
resource='%(storage_url)s/mycontainer', headers={})
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
|
||||
# Revoke User2's read-write access, grant User2 read-only access
|
||||
acl = {'read-only': [acl_user]}
|
||||
headers = {'x-account-access-control': format_acl(
|
||||
version=2, acl_dict=acl)}
|
||||
resp = retry(post, headers=headers, use_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
|
||||
# User2 can still GET the account, but not see the ACL
|
||||
# (since it's privileged data)
|
||||
resp = retry(head, use_account=2, url_account=1)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
self.assertEqual(resp.getheader('x-account-access-control'), None)
|
||||
|
||||
# User2 can't PUT a container
|
||||
resp = retry(put, use_account=2, url_account=1,
|
||||
resource='%(storage_url)s/mycontainer', headers={})
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 403)
|
||||
|
||||
finally:
|
||||
# Make sure to clean up even if tests fail -- User2 should not
|
||||
# have access to User1's account in other functional tests!
|
||||
resp = retry(post, headers={'X-Account-Access-Control': '{}'},
|
||||
use_account=1)
|
||||
resp.read()
|
||||
|
||||
@requires_acls
|
||||
def test_swift_account_acls(self):
|
||||
if skip:
|
||||
raise SkipTest
|
||||
|
||||
def post(url, token, parsed, conn, headers):
|
||||
new_headers = dict({'X-Auth-Token': token}, **headers)
|
||||
conn.request('POST', parsed.path, '', new_headers)
|
||||
return check_response(conn)
|
||||
|
||||
def head(url, token, parsed, conn):
|
||||
conn.request('HEAD', parsed.path, '', {'X-Auth-Token': token})
|
||||
return check_response(conn)
|
||||
|
||||
def get(url, token, parsed, conn):
|
||||
conn.request('GET', parsed.path, '', {'X-Auth-Token': token})
|
||||
return check_response(conn)
|
||||
|
||||
try:
|
||||
# User1 can POST to their own account
|
||||
resp = retry(post, headers={'X-Account-Access-Control': '{}'})
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
self.assertEqual(resp.getheader('X-Account-Access-Control'), None)
|
||||
|
||||
# User1 can GET their own empty account
|
||||
resp = retry(get)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status // 100, 2)
|
||||
self.assertEqual(resp.getheader('X-Account-Access-Control'), None)
|
||||
|
||||
# User1 can POST non-empty data
|
||||
acl_json = '{"admin":["bob"]}'
|
||||
resp = retry(post, headers={'X-Account-Access-Control': acl_json})
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
|
||||
# User1 can GET the non-empty data
|
||||
resp = retry(get)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status // 100, 2)
|
||||
self.assertEqual(resp.getheader('X-Account-Access-Control'),
|
||||
acl_json)
|
||||
|
||||
# POST non-JSON ACL should fail
|
||||
resp = retry(post, headers={'X-Account-Access-Control': 'yuck'})
|
||||
resp.read()
|
||||
# resp.status will be 400 if tempauth or some other ACL-aware
|
||||
# auth middleware rejects it, or 200 (but silently swallowed by
|
||||
# core Swift) if ACL-unaware auth middleware approves it.
|
||||
|
||||
# A subsequent GET should show the old, valid data, not the garbage
|
||||
resp = retry(get)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status // 100, 2)
|
||||
self.assertEqual(resp.getheader('X-Account-Access-Control'),
|
||||
acl_json)
|
||||
|
||||
finally:
|
||||
# Make sure to clean up even if tests fail -- User2 should not
|
||||
# have access to User1's account in other functional tests!
|
||||
resp = retry(post, headers={'X-Account-Access-Control': '{}'})
|
||||
resp.read()
|
||||
|
||||
def test_swift_prohibits_garbage_account_acls(self):
|
||||
if skip:
|
||||
raise SkipTest
|
||||
|
||||
def post(url, token, parsed, conn, headers):
|
||||
new_headers = dict({'X-Auth-Token': token}, **headers)
|
||||
conn.request('POST', parsed.path, '', new_headers)
|
||||
return check_response(conn)
|
||||
|
||||
def get(url, token, parsed, conn):
|
||||
conn.request('GET', parsed.path, '', {'X-Auth-Token': token})
|
||||
return check_response(conn)
|
||||
|
||||
try:
|
||||
# User1 can POST to their own account
|
||||
resp = retry(post, headers={'X-Account-Access-Control': '{}'})
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
self.assertEqual(resp.getheader('X-Account-Access-Control'), None)
|
||||
|
||||
# User1 can GET their own empty account
|
||||
resp = retry(get)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status // 100, 2)
|
||||
self.assertEqual(resp.getheader('X-Account-Access-Control'), None)
|
||||
|
||||
# User1 can POST non-empty data
|
||||
acl_json = '{"admin":["bob"]}'
|
||||
resp = retry(post, headers={'X-Account-Access-Control': acl_json})
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
# If this request is handled by ACL-aware auth middleware, then the
|
||||
# ACL will be persisted. If it is handled by ACL-unaware auth
|
||||
# middleware, then the header will be thrown out. But the request
|
||||
# should return successfully in any case.
|
||||
|
||||
# User1 can GET the non-empty data
|
||||
resp = retry(get)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status // 100, 2)
|
||||
# ACL will be set if some ACL-aware auth middleware (e.g. tempauth)
|
||||
# propagates it to sysmeta; if no ACL-aware auth middleware does,
|
||||
# then X-Account-Access-Control will still be empty.
|
||||
|
||||
# POST non-JSON ACL should fail
|
||||
resp = retry(post, headers={'X-Account-Access-Control': 'yuck'})
|
||||
resp.read()
|
||||
# resp.status will be 400 if tempauth or some other ACL-aware
|
||||
# auth middleware rejects it, or 200 (but silently swallowed by
|
||||
# core Swift) if ACL-unaware auth middleware approves it.
|
||||
|
||||
# A subsequent GET should either show the old, valid data (if
|
||||
# ACL-aware auth middleware is propagating it) or show nothing
|
||||
# (if no auth middleware in the pipeline is ACL-aware), but should
|
||||
# never return the garbage ACL.
|
||||
resp = retry(get)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status // 100, 2)
|
||||
self.assertNotEqual(resp.getheader('X-Account-Access-Control'),
|
||||
'yuck')
|
||||
|
||||
finally:
|
||||
# Make sure to clean up even if tests fail -- User2 should not
|
||||
# have access to User1's account in other functional tests!
|
||||
resp = retry(post, headers={'X-Account-Access-Control': '{}'})
|
||||
resp.read()
|
||||
|
||||
def test_unicode_metadata(self):
|
||||
if skip:
|
||||
raise SkipTest
|
||||
|
||||
def post(url, token, parsed, conn, name, value):
|
||||
conn.request('POST', parsed.path, '',
|
||||
{'X-Auth-Token': token, name: value})
|
||||
return check_response(conn)
|
||||
|
||||
def head(url, token, parsed, conn):
|
||||
conn.request('HEAD', parsed.path, '', {'X-Auth-Token': token})
|
||||
return check_response(conn)
|
||||
uni_key = u'X-Account-Meta-uni\u0E12'
|
||||
uni_value = u'uni\u0E12'
|
||||
if (web_front_end == 'integral'):
|
||||
resp = retry(post, uni_key, '1')
|
||||
resp.read()
|
||||
self.assertTrue(resp.status in (201, 204))
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertEqual(resp.getheader(uni_key.encode('utf-8')), '1')
|
||||
resp = retry(post, 'X-Account-Meta-uni', uni_value)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertEqual(resp.getheader('X-Account-Meta-uni'),
|
||||
uni_value.encode('utf-8'))
|
||||
if (web_front_end == 'integral'):
|
||||
resp = retry(post, uni_key, uni_value)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertEqual(resp.getheader(uni_key.encode('utf-8')),
|
||||
uni_value.encode('utf-8'))
|
||||
|
||||
def test_multi_metadata(self):
|
||||
if skip:
|
||||
raise SkipTest
|
||||
|
||||
def post(url, token, parsed, conn, name, value):
|
||||
conn.request('POST', parsed.path, '',
|
||||
{'X-Auth-Token': token, name: value})
|
||||
return check_response(conn)
|
||||
|
||||
def head(url, token, parsed, conn):
|
||||
conn.request('HEAD', parsed.path, '', {'X-Auth-Token': token})
|
||||
return check_response(conn)
|
||||
|
||||
resp = retry(post, 'X-Account-Meta-One', '1')
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertEqual(resp.getheader('x-account-meta-one'), '1')
|
||||
resp = retry(post, 'X-Account-Meta-Two', '2')
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(head)
|
||||
resp.read()
|
||||
self.assert_(resp.status in (200, 204), resp.status)
|
||||
self.assertEqual(resp.getheader('x-account-meta-one'), '1')
|
||||
self.assertEqual(resp.getheader('x-account-meta-two'), '2')
|
||||
|
||||
def test_bad_metadata(self):
|
||||
if skip:
|
||||
raise SkipTest
|
||||
|
||||
def post(url, token, parsed, conn, extra_headers):
|
||||
headers = {'X-Auth-Token': token}
|
||||
headers.update(extra_headers)
|
||||
conn.request('POST', parsed.path, '', headers)
|
||||
return check_response(conn)
|
||||
|
||||
resp = retry(post,
|
||||
{'X-Account-Meta-' + ('k' * MAX_META_NAME_LENGTH): 'v'})
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(
|
||||
post,
|
||||
{'X-Account-Meta-' + ('k' * (MAX_META_NAME_LENGTH + 1)): 'v'})
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 400)
|
||||
|
||||
resp = retry(post,
|
||||
{'X-Account-Meta-Too-Long': 'k' * MAX_META_VALUE_LENGTH})
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
resp = retry(
|
||||
post,
|
||||
{'X-Account-Meta-Too-Long': 'k' * (MAX_META_VALUE_LENGTH + 1)})
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 400)
|
||||
|
||||
headers = {}
|
||||
for x in xrange(MAX_META_COUNT):
|
||||
headers['X-Account-Meta-%d' % x] = 'v'
|
||||
resp = retry(post, headers)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
headers = {}
|
||||
for x in xrange(MAX_META_COUNT + 1):
|
||||
headers['X-Account-Meta-%d' % x] = 'v'
|
||||
resp = retry(post, headers)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 400)
|
||||
|
||||
headers = {}
|
||||
header_value = 'k' * MAX_META_VALUE_LENGTH
|
||||
size = 0
|
||||
x = 0
|
||||
while size < MAX_META_OVERALL_SIZE - 4 - MAX_META_VALUE_LENGTH:
|
||||
size += 4 + MAX_META_VALUE_LENGTH
|
||||
headers['X-Account-Meta-%04d' % x] = header_value
|
||||
x += 1
|
||||
if MAX_META_OVERALL_SIZE - size > 1:
|
||||
headers['X-Account-Meta-k'] = \
|
||||
'v' * (MAX_META_OVERALL_SIZE - size - 1)
|
||||
resp = retry(post, headers)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 204)
|
||||
headers['X-Account-Meta-k'] = \
|
||||
'v' * (MAX_META_OVERALL_SIZE - size)
|
||||
resp = retry(post, headers)
|
||||
resp.read()
|
||||
self.assertEqual(resp.status, 400)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
File diff suppressed because it is too large
Load Diff
@ -1,32 +0,0 @@
|
||||
[DEFAULT]
|
||||
devices = /mnt/gluster-object
|
||||
#
|
||||
# Once you are confident that your startup processes will always have your
|
||||
# gluster volumes properly mounted *before* the account-server workers start,
|
||||
# you can *consider* setting this value to "false" to reduce the per-request
|
||||
# overhead it can incur.
|
||||
#
|
||||
# *** Keep false for Functional Tests ***
|
||||
mount_check = false
|
||||
bind_port = 6012
|
||||
#
|
||||
# Override swift's default behaviour for fallocate.
|
||||
disable_fallocate = true
|
||||
#
|
||||
# One or two workers should be sufficient for almost any installation of
|
||||
# Gluster.
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = account-server
|
||||
|
||||
[app:account-server]
|
||||
use = egg:gluster_swift#account
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = WARN
|
||||
#
|
||||
# After ensuring things are running in a stable manner, you can turn off
|
||||
# normal request logging for the account server to unclutter the log
|
||||
# files. Warnings and errors will still be logged.
|
||||
log_requests = off
|
@ -1,35 +0,0 @@
|
||||
[DEFAULT]
|
||||
devices = /mnt/gluster-object
|
||||
#
|
||||
# Once you are confident that your startup processes will always have your
|
||||
# gluster volumes properly mounted *before* the container-server workers
|
||||
# start, you can *consider* setting this value to "false" to reduce the
|
||||
# per-request overhead it can incur.
|
||||
#
|
||||
# *** Keep false for Functional Tests ***
|
||||
mount_check = false
|
||||
bind_port = 6011
|
||||
#
|
||||
# Override swift's default behaviour for fallocate.
|
||||
disable_fallocate = true
|
||||
#
|
||||
# One or two workers should be sufficient for almost any installation of
|
||||
# Gluster.
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = container-server
|
||||
|
||||
[app:container-server]
|
||||
use = egg:gluster_swift#container
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = WARN
|
||||
#
|
||||
# After ensuring things are running in a stable manner, you can turn off
|
||||
# normal request logging for the container server to unclutter the log
|
||||
# files. Warnings and errors will still be logged.
|
||||
log_requests = off
|
||||
|
||||
#enable object versioning for functional test
|
||||
allow_versions = on
|
@ -1,19 +0,0 @@
|
||||
[DEFAULT]
|
||||
#
|
||||
# IP address of a node in the GlusterFS server cluster hosting the
|
||||
# volumes to be served via Swift API.
|
||||
mount_ip = localhost
|
||||
|
||||
# Performance optimization parameter. When turned off, the filesystem will
|
||||
# see a reduced number of stat calls, resulting in substantially faster
|
||||
# response time for GET and HEAD container requests on containers with large
|
||||
# numbers of objects, at the expense of an accurate count of combined bytes
|
||||
# used by all objects in the container. For most installations "off" works
|
||||
# fine.
|
||||
#
|
||||
# *** Keep on for Functional Tests ***
|
||||
accurate_size_in_listing = on
|
||||
|
||||
# *** Keep on for Functional Tests ***
|
||||
container_update_object_count = on
|
||||
account_update_container_count = on
|
@ -1,17 +0,0 @@
|
||||
[DEFAULT]
|
||||
|
||||
[object-expirer]
|
||||
# auto_create_account_prefix = .
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors cache proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:swift#proxy
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
memcache_servers = 127.0.0.1:11211
|
||||
|
||||
[filter:catch_errors]
|
||||
use = egg:swift#catch_errors
|
@ -1,48 +0,0 @@
|
||||
[DEFAULT]
|
||||
devices = /mnt/gluster-object
|
||||
#
|
||||
# Once you are confident that your startup processes will always have your
|
||||
# gluster volumes properly mounted *before* the object-server workers start,
|
||||
# you can *consider* setting this value to "false" to reduce the per-request
|
||||
# overhead it can incur.
|
||||
#
|
||||
# *** Keep false for Functional Tests ***
|
||||
mount_check = false
|
||||
bind_port = 6010
|
||||
#
|
||||
# Maximum number of clients one worker can process simultaneously (it will
|
||||
# actually accept N + 1). Setting this to one (1) will only handle one request
|
||||
# at a time, without accepting another request concurrently. By increasing the
|
||||
# number of workers to a much higher value, one can prevent slow file system
|
||||
# operations for one request from starving other requests.
|
||||
max_clients = 1024
|
||||
#
|
||||
# If not doing the above, setting this value initially to match the number of
|
||||
# CPUs is a good starting point for determining the right value.
|
||||
workers = 1
|
||||
# Override swift's default behaviour for fallocate.
|
||||
disable_fallocate = true
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = object-server
|
||||
|
||||
[app:object-server]
|
||||
use = egg:gluster_swift#object
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = WARN
|
||||
#
|
||||
# For performance, after ensuring things are running in a stable manner, you
|
||||
# can turn off normal request logging for the object server to reduce the
|
||||
# per-request overhead and unclutter the log files. Warnings and errors will
|
||||
# still be logged.
|
||||
log_requests = off
|
||||
#
|
||||
# Adjust this value to match the stripe width of the underlying storage array
|
||||
# (not the stripe element size). This will provide a reasonable starting point
|
||||
# for tuning this value.
|
||||
disk_chunk_size = 65536
|
||||
#
|
||||
# Adjust this value match whatever is set for the disk_chunk_size initially.
|
||||
# This will provide a reasonable starting point for tuning this value.
|
||||
network_chunk_size = 65556
|
@ -1,81 +0,0 @@
|
||||
[DEFAULT]
|
||||
bind_port = 8080
|
||||
user = root
|
||||
# Consider using 1 worker per CPU
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = catch_errors healthcheck proxy-logging cache tempurl gswauth proxy-logging proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:gluster_swift#proxy
|
||||
log_facility = LOG_LOCAL1
|
||||
log_level = WARN
|
||||
# The API allows for account creation and deletion, but since Gluster/Swift
|
||||
# automounts a Gluster volume for a given account, there is no way to create
|
||||
# or delete an account. So leave this off.
|
||||
allow_account_management = false
|
||||
account_autocreate = true
|
||||
# Ensure the proxy server uses fast-POSTs since we don't need to make a copy
|
||||
# of the entire object given that all metadata is stored in the object
|
||||
# extended attributes (no .meta file used after creation) and no container
|
||||
# sync feature to present.
|
||||
object_post_as_copy = false
|
||||
# Only need to recheck the account exists once a day
|
||||
recheck_account_existence = 86400
|
||||
# May want to consider bumping this up if containers are created and destroyed
|
||||
# infrequently.
|
||||
recheck_container_existence = 60
|
||||
# Timeout clients that don't read or write to the proxy server after 5
|
||||
# seconds.
|
||||
client_timeout = 5
|
||||
# Give more time to connect to the object, container or account servers in
|
||||
# cases of high load.
|
||||
conn_timeout = 5
|
||||
# For high load situations, once connected to an object, container or account
|
||||
# server, allow for delays communicating with them.
|
||||
node_timeout = 60
|
||||
# May want to consider bumping up this value to 1 - 4 MB depending on how much
|
||||
# traffic is for multi-megabyte or gigabyte requests; perhaps matching the
|
||||
# stripe width (not stripe element size) of your storage volume is a good
|
||||
# starting point. See below for sizing information.
|
||||
object_chunk_size = 65536
|
||||
# If you do decide to increase the object_chunk_size, then consider lowering
|
||||
# this value to one. Up to "put_queue_length" object_chunk_size'd buffers can
|
||||
# be queued to the object server for processing. Given one proxy server worker
|
||||
# can handle up to 1,024 connections, by default, it will consume 10 * 65,536
|
||||
# * 1,024 bytes of memory in the worse case (default values). Be sure the
|
||||
# amount of memory available on the system can accommodate increased values
|
||||
# for object_chunk_size.
|
||||
put_queue_depth = 10
|
||||
|
||||
[filter:catch_errors]
|
||||
use = egg:swift#catch_errors
|
||||
|
||||
[filter:proxy-logging]
|
||||
use = egg:swift#proxy_logging
|
||||
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
|
||||
[filter:tempauth]
|
||||
use = egg:swift#tempauth
|
||||
user_admin_admin = admin .admin .reseller_admin
|
||||
user_test_tester = testing .admin
|
||||
user_test2_tester2 = testing2 .admin
|
||||
user_test_tester3 = testing3
|
||||
|
||||
[filter:gswauth]
|
||||
use = egg:gluster_swift#gswauth
|
||||
set log_name = gswauth
|
||||
super_admin_key = gswauthkey
|
||||
metadata_volume = gsmetadata
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
# Update this line to contain a comma separated list of memcache servers
|
||||
# shared by all nodes running the proxy-server service.
|
||||
memcache_servers = localhost:11211
|
||||
|
||||
[filter:tempurl]
|
||||
use = egg:swift#tempurl
|
@ -1,85 +0,0 @@
|
||||
[DEFAULT]
|
||||
|
||||
|
||||
[swift-hash]
|
||||
# random unique string that can never change (DO NOT LOSE)
|
||||
swift_hash_path_suffix = gluster
|
||||
|
||||
|
||||
# The swift-constraints section sets the basic constraints on data
|
||||
# saved in the swift cluster.
|
||||
|
||||
[swift-constraints]
|
||||
|
||||
# max_file_size is the largest "normal" object that can be saved in
|
||||
# the cluster. This is also the limit on the size of each segment of
|
||||
# a "large" object when using the large object manifest support.
|
||||
# This value is set in bytes. Setting it to lower than 1MiB will cause
|
||||
# some tests to fail.
|
||||
# Default is 1 TiB = 2**30*1024
|
||||
max_file_size = 1099511627776
|
||||
|
||||
|
||||
# max_meta_name_length is the max number of bytes in the utf8 encoding
|
||||
# of the name portion of a metadata header.
|
||||
|
||||
#max_meta_name_length = 128
|
||||
|
||||
|
||||
# max_meta_value_length is the max number of bytes in the utf8 encoding
|
||||
# of a metadata value
|
||||
|
||||
#max_meta_value_length = 256
|
||||
|
||||
|
||||
# max_meta_count is the max number of metadata keys that can be stored
|
||||
# on a single account, container, or object
|
||||
|
||||
#max_meta_count = 90
|
||||
|
||||
|
||||
# max_meta_overall_size is the max number of bytes in the utf8 encoding
|
||||
# of the metadata (keys + values)
|
||||
|
||||
#max_meta_overall_size = 4096
|
||||
|
||||
|
||||
# max_object_name_length is the max number of bytes in the utf8 encoding of an
|
||||
# object name: Gluster FS can handle much longer file names, but the length
|
||||
# between the slashes of the URL is handled below. Remember that most web
|
||||
# clients can't handle anything greater than 2048, and those that do are
|
||||
# rather clumsy.
|
||||
|
||||
max_object_name_length = 2048
|
||||
|
||||
# max_object_name_component_length (GlusterFS) is the max number of bytes in
|
||||
# the utf8 encoding of an object name component (the part between the
|
||||
# slashes); this is a limit imposed by the underlying file system (for XFS it
|
||||
# is 255 bytes).
|
||||
|
||||
max_object_name_component_length = 255
|
||||
|
||||
# container_listing_limit is the default (and max) number of items
|
||||
# returned for a container listing request
|
||||
|
||||
#container_listing_limit = 10000
|
||||
|
||||
|
||||
# account_listing_limit is the default (and max) number of items returned
|
||||
# for an account listing request
|
||||
|
||||
#account_listing_limit = 10000
|
||||
|
||||
|
||||
# max_account_name_length is the max number of bytes in the utf8 encoding of
|
||||
# an account name: Gluster FS Filename limit (XFS limit?), must be the same
|
||||
# size as max_object_name_component_length above.
|
||||
|
||||
max_account_name_length = 255
|
||||
|
||||
|
||||
# max_container_name_length is the max number of bytes in the utf8 encoding
|
||||
# of a container name: Gluster FS Filename limit (XFS limit?), must be the same
|
||||
# size as max_object_name_component_length above.
|
||||
|
||||
max_container_name_length = 255
|
@ -1,58 +0,0 @@
|
||||
[func_test]
|
||||
# sample config
|
||||
auth_host = 127.0.0.1
|
||||
auth_port = 8080
|
||||
auth_ssl = no
|
||||
auth_prefix = /auth/
|
||||
## sample config for Swift with Keystone
|
||||
#auth_version = 2
|
||||
#auth_host = localhost
|
||||
#auth_port = 5000
|
||||
#auth_ssl = no
|
||||
#auth_prefix = /v2.0/
|
||||
|
||||
# GSWauth internal admin user configuration information
|
||||
admin_key = gswauthkey
|
||||
admin_user = .super_admin
|
||||
|
||||
# Gluster setup information
|
||||
devices = /mnt/gluster-object
|
||||
gsmetadata_volume = gsmetadata
|
||||
|
||||
# Primary functional test account (needs admin access to the account)
|
||||
account = test
|
||||
username = tester
|
||||
password = testing
|
||||
|
||||
# User on a second account (needs admin access to the account)
|
||||
account2 = test2
|
||||
username2 = tester2
|
||||
password2 = testing2
|
||||
|
||||
# User on same account as first, but without admin access
|
||||
username3 = tester3
|
||||
password3 = testing3
|
||||
|
||||
# Default constraints if not defined here, the test runner will try
|
||||
# to set them from /etc/swift/swift.conf. If that file isn't found,
|
||||
# the test runner will skip tests that depend on these values.
|
||||
# Note that the cluster must have "sane" values for the test suite to pass.
|
||||
#max_file_size = 5368709122
|
||||
#max_meta_name_length = 128
|
||||
#max_meta_value_length = 256
|
||||
#max_meta_count = 90
|
||||
#max_meta_overall_size = 4096
|
||||
#max_object_name_length = 1024
|
||||
#container_listing_limit = 10000
|
||||
#account_listing_limit = 10000
|
||||
#max_account_name_length = 256
|
||||
#max_container_name_length = 256
|
||||
normalized_urls = True
|
||||
|
||||
collate = C
|
||||
|
||||
[unit_test]
|
||||
fake_syslog = False
|
||||
|
||||
[probe_test]
|
||||
# check_server_timeout = 30
|
@ -1,252 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2010-2012 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
import unittest
|
||||
from nose import SkipTest
|
||||
from swift.common.bufferedhttp import http_connect_raw as http_connect
|
||||
from test import get_config
|
||||
|
||||
config = get_config('func_test')
|
||||
|
||||
class TestGSWauth(unittest.TestCase):
|
||||
|
||||
def _get_admin_headers(self):
|
||||
return {'X-Auth-Admin-User': config['admin_user'],
|
||||
'X-Auth-Admin-Key': config['admin_key']}
|
||||
|
||||
def _check_test_account_is_not_registered(self):
|
||||
# check account exists
|
||||
path = '%sv2/%s' % (config['auth_prefix'], config['account'])
|
||||
|
||||
headers = self._get_admin_headers()
|
||||
headers.update({'Content-Length': '0'})
|
||||
conn = http_connect(config['auth_host'], config['auth_port'], 'GET',
|
||||
path, headers)
|
||||
resp = conn.getresponse()
|
||||
self.assertTrue(resp.status == 404)
|
||||
|
||||
def _register_test_account(self):
|
||||
# create account in swauth (not a swift account)
|
||||
# This current version only supports one account per volume
|
||||
# and the account name is the same as the volume name
|
||||
# still an account must be created with swauth to map
|
||||
# swauth accounts with swift accounts
|
||||
path = '%sv2/%s' % (config['auth_prefix'], config['account'])
|
||||
headers = self._get_admin_headers()
|
||||
headers.update({'Content-Length': '0'})
|
||||
conn = http_connect(config['auth_host'], config['auth_port'], 'PUT',
|
||||
path, headers)
|
||||
resp = conn.getresponse()
|
||||
self.assertTrue(resp.status == 201)
|
||||
|
||||
def _deregister_test_account(self):
|
||||
# delete account in swauth (not a swift account)
|
||||
# @see _register_test_account
|
||||
path = '%sv2/%s' % (config['auth_prefix'], config['account'])
|
||||
headers = self._get_admin_headers()
|
||||
headers.update({'Content-Length': '0'})
|
||||
conn = http_connect(config['auth_host'], config['auth_port'],
|
||||
'DELETE', path, headers)
|
||||
resp = conn.getresponse()
|
||||
self.assertTrue(resp.status == 204)
|
||||
|
||||
def test_register_account(self):
|
||||
# check and register account
|
||||
self._check_test_account_is_not_registered()
|
||||
self._register_test_account()
|
||||
|
||||
try:
|
||||
# list account
|
||||
path = '%sv2/%s' % (config['auth_prefix'], config['account'])
|
||||
headers = self._get_admin_headers()
|
||||
conn = http_connect(config['auth_host'], config['auth_port'],
|
||||
'GET', path, headers)
|
||||
resp = conn.getresponse()
|
||||
body = resp.read()
|
||||
info = json.loads(body)
|
||||
self.assertEqual(info['account_id'], 'AUTH_test')
|
||||
self.assertTrue(resp.status == 200)
|
||||
|
||||
finally:
|
||||
# de-register account
|
||||
self._deregister_test_account()
|
||||
|
||||
def test_add_user(self):
|
||||
# check and register account
|
||||
self._check_test_account_is_not_registered()
|
||||
self._register_test_account()
|
||||
|
||||
# create user
|
||||
path = '%sv2/%s/%s' % (config['auth_prefix'], config['account'],
|
||||
config['username'])
|
||||
headers = self._get_admin_headers()
|
||||
headers.update({'X-Auth-User-Key': config['password'],
|
||||
'Content-Length': '0',
|
||||
'X-Auth-User-Admin': 'true'})
|
||||
conn = http_connect(config['auth_host'], config['auth_port'], 'PUT',
|
||||
path, headers)
|
||||
resp = conn.getresponse()
|
||||
self.assertTrue(resp.status == 201)
|
||||
|
||||
try:
|
||||
# list user
|
||||
headers = self._get_admin_headers()
|
||||
conn = http_connect(config['auth_host'], config['auth_port'],
|
||||
'GET', path, headers)
|
||||
resp = conn.getresponse()
|
||||
body = resp.read()
|
||||
self.assertEqual(body, '{"groups": [{"name": "test:tester"}, {"name":'
|
||||
' "test"}, {"name": ".admin"}], "auth": "plaintext:testing"}')
|
||||
self.assertTrue(resp.status == 200)
|
||||
|
||||
finally:
|
||||
try:
|
||||
# delete user
|
||||
headers = self._get_admin_headers()
|
||||
conn = http_connect(config['auth_host'], config['auth_port'],
|
||||
'DELETE', path, headers)
|
||||
resp = conn.getresponse()
|
||||
self.assertTrue(resp.status == 204)
|
||||
|
||||
finally:
|
||||
# de-register account
|
||||
self._deregister_test_account()
|
||||
|
||||
def test_register_invalid_account(self):
|
||||
# invalid account
|
||||
path = '%sv2/%s' % (config['auth_prefix'], '.test')
|
||||
headers = self._get_admin_headers()
|
||||
headers.update({'Content-Length': '0'})
|
||||
conn = http_connect(config['auth_host'], config['auth_port'], 'PUT',
|
||||
path, headers)
|
||||
resp = conn.getresponse()
|
||||
self.assertTrue(resp.status == 400)
|
||||
|
||||
def test_add_invalid_user(self):
|
||||
path = '%sv2/%s/%s' % (config['auth_prefix'], config['account'],
|
||||
'.invaliduser')
|
||||
headers = self._get_admin_headers()
|
||||
headers.update({'X-Auth-User-Key': config['password'],
|
||||
'Content-Length': '0',
|
||||
'X-Auth-User-Admin': 'true'})
|
||||
conn = http_connect(config['auth_host'], config['auth_port'], 'PUT',
|
||||
path, headers)
|
||||
resp = conn.getresponse()
|
||||
self.assertTrue(resp.status == 400)
|
||||
|
||||
def test_register_account_without_admin_rights(self):
|
||||
path = '%sv2/%s' % (config['auth_prefix'], config['account'])
|
||||
headers = {'X-Auth-Admin-User': config['admin_user']}
|
||||
headers.update({'Content-Length': '0'})
|
||||
conn = http_connect(config['auth_host'], config['auth_port'], 'PUT',
|
||||
path, headers)
|
||||
resp = conn.getresponse()
|
||||
self.assertTrue(resp.status == 401)
|
||||
|
||||
def test_change_user_password(self):
|
||||
# check and register account
|
||||
self._check_test_account_is_not_registered()
|
||||
self._register_test_account()
|
||||
|
||||
try:
|
||||
# create user
|
||||
path = '%sv2/%s/%s' % (config['auth_prefix'], config['account'],
|
||||
config['username'])
|
||||
headers = self._get_admin_headers()
|
||||
headers.update({'X-Auth-User-Key': config['password'],
|
||||
'Content-Length': '0',
|
||||
'X-Auth-User-Admin': 'true'})
|
||||
conn = http_connect(config['auth_host'], config['auth_port'], 'PUT',
|
||||
path, headers)
|
||||
resp = conn.getresponse()
|
||||
print "resp creating user %s" % resp.status
|
||||
self.assertTrue(resp.status == 201)
|
||||
|
||||
# change password
|
||||
path = '%sv2/%s/%s' % (config['auth_prefix'], config['account'],
|
||||
config['username'])
|
||||
headers = self._get_admin_headers()
|
||||
headers.update({'X-Auth-User-Key': 'newpassword',
|
||||
'Content-Length': '0',
|
||||
'X-Auth-User-Admin': 'true'})
|
||||
conn = http_connect(config['auth_host'], config['auth_port'], 'PUT',
|
||||
path, headers)
|
||||
resp = conn.getresponse()
|
||||
print "resp changing password %s" % resp.status
|
||||
self.assertTrue(resp.status == 201)
|
||||
finally:
|
||||
try:
|
||||
# delete user
|
||||
headers = self._get_admin_headers()
|
||||
conn = http_connect(config['auth_host'], config['auth_port'],
|
||||
'DELETE', path, headers)
|
||||
resp = conn.getresponse()
|
||||
self.assertTrue(resp.status == 204)
|
||||
|
||||
finally:
|
||||
# de-register account
|
||||
self._deregister_test_account()
|
||||
|
||||
def test_change_user_password_without_admin_rights(self):
|
||||
# check and register account
|
||||
self._check_test_account_is_not_registered()
|
||||
self._register_test_account()
|
||||
|
||||
try:
|
||||
# create user
|
||||
path = '%sv2/%s/%s' % (config['auth_prefix'], config['account'],
|
||||
config['username'])
|
||||
headers = self._get_admin_headers()
|
||||
headers.update({'X-Auth-User-Key': config['password'],
|
||||
'Content-Length': '0',
|
||||
'X-Auth-User-Admin': 'true'})
|
||||
conn = http_connect(config['auth_host'], config['auth_port'], 'PUT',
|
||||
path, headers)
|
||||
resp = conn.getresponse()
|
||||
print "resp creating user %s" % resp.status
|
||||
self.assertTrue(resp.status == 201)
|
||||
|
||||
# attempt to change password
|
||||
path = '%sv2/%s/%s' % (config['auth_prefix'], config['account'],
|
||||
config['username'])
|
||||
headers = {'X-Auth-Admin-User':
|
||||
config['account'] + ':' + config['username'],
|
||||
'X-Auth-Admin-Key': config['password']}
|
||||
headers.update({'X-Auth-User-Key': 'newpassword',
|
||||
'Content-Length': '0',
|
||||
'X-Auth-User-Admin': 'true'})
|
||||
conn = http_connect(config['auth_host'], config['auth_port'], 'PUT',
|
||||
path, headers)
|
||||
resp = conn.getresponse()
|
||||
self.assertTrue(resp.status == 201)
|
||||
|
||||
finally:
|
||||
try:
|
||||
# delete user
|
||||
headers = self._get_admin_headers()
|
||||
conn = http_connect(config['auth_host'], config['auth_port'],
|
||||
'DELETE', path, headers)
|
||||
resp = conn.getresponse()
|
||||
self.assertTrue(resp.status == 204)
|
||||
|
||||
finally:
|
||||
# de-register account
|
||||
self._deregister_test_account()
|
@ -1,826 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2010-2012 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
from nose import SkipTest
|
||||
import commands
|
||||
import os
|
||||
from test import get_config
|
||||
|
||||
config = get_config('func_test')
|
||||
|
||||
class Utils:
|
||||
|
||||
@classmethod
|
||||
def swauthPrep(self,authurl='http://127.0.0.1:8080/auth/',user=config['admin_user'],key=config['admin_key']):
|
||||
return commands.getstatusoutput('gswauth-prep -A %s -U %s -K %s' % (authurl, user, key))
|
||||
|
||||
@classmethod
|
||||
def addAccount(self,account_name,suffix=None, authurl='http://127.0.0.1:8080/auth/',user=config['admin_user'],key=config['admin_key']):
|
||||
if suffix is not None:
|
||||
return commands.getstatusoutput('gswauth-add-account %s -s %s -A %s -U %s -K %s' % (account_name, suffix, authurl, user, key))
|
||||
else:
|
||||
return commands.getstatusoutput('gswauth-add-account %s -A %s -U %s -K %s' % (account_name, authurl, user, key))
|
||||
|
||||
@classmethod
|
||||
def deleteAccount(self,account_name,authurl='http://127.0.0.1:8080/auth/',user=config['admin_user'],key=config['admin_key']):
|
||||
return commands.getstatusoutput('gswauth-delete-account %s -A %s -U %s -K %s' % (account_name, authurl, user, key))
|
||||
|
||||
@classmethod
|
||||
def listAccounts(self,listtype=None,authurl='http://127.0.0.1:8080/auth/',user=config['admin_user'],key=config['admin_key']):
|
||||
if listtype is not None:
|
||||
return commands.getstatusoutput('gswauth-list %s -A %s -U %s -K %s' % (listtype, authurl, user, key))
|
||||
else:
|
||||
return commands.getstatusoutput('gswauth-list -A %s -U %s -K %s' % (authurl, user, key))
|
||||
|
||||
@classmethod
|
||||
def listUsers(self,account_name,listtype=None,authurl='http://127.0.0.1:8080/auth/',user=config['admin_user'],key=config['admin_key']):
|
||||
if listtype is not None:
|
||||
return commands.getstatusoutput('gswauth-list %s %s -A %s -U %s -K %s'% (account_name, listtype, authurl, user, key))
|
||||
else:
|
||||
return commands.getstatusoutput('gswauth-list %s -A %s -U %s -K %s'% (account_name, authurl, user, key))
|
||||
|
||||
@classmethod
|
||||
def addAdminUser(self,account_name,username,password,suffix=None,authurl='http://127.0.0.1:8080/auth/',user=config['admin_user'],key=config['admin_key']):
|
||||
if suffix is not None:
|
||||
return commands.getstatusoutput('gswauth-add-user -a %s %s %s -s %s -A %s -U %s -K %s'% (account_name, username, password, suffix, authurl, user, key))
|
||||
else:
|
||||
return commands.getstatusoutput('gswauth-add-user -a %s %s %s -A %s -U %s -K %s'% (account_name, username, password, authurl, user, key))
|
||||
|
||||
@classmethod
|
||||
def addUser(self,account_name,username,password,suffix=None,authurl='http://127.0.0.1:8080/auth/',user=config['admin_user'],key=config['admin_key']):
|
||||
if suffix is not None:
|
||||
return commands.getstatusoutput('gswauth-add-user %s %s %s -s %s -A %s -U %s -K %s'% (account_name, username, password, suffix, authurl, user, key))
|
||||
else:
|
||||
return commands.getstatusoutput('gswauth-add-user %s %s %s -A %s -U %s -K %s'% (account_name, username, password, authurl, user, key))
|
||||
|
||||
@classmethod
|
||||
def addResellerAdminUser(self,account_name,username,password,suffix=None,authurl='http://127.0.0.1:8080/auth/',user=config['admin_user'],key=config['admin_key']):
|
||||
if suffix is not None:
|
||||
return commands.getstatusoutput('gswauth-add-user -r %s %s %s -s %s -A %s -U %s -K %s'% (account_name, username, password, suffix, authurl, user, key))
|
||||
else:
|
||||
return commands.getstatusoutput('gswauth-add-user -r %s %s %s -A %s -U %s -K %s'% (account_name, username, password, authurl, user, key))
|
||||
|
||||
@classmethod
|
||||
def deleteUser(self,account_name,username,authurl='http://127.0.0.1:8080/auth/',user=config['admin_user'],key=config['admin_key']):
|
||||
return commands.getstatusoutput('gswauth-delete-user %s %s -A %s -U %s -K %s'% (account_name, username, authurl, user, key))
|
||||
|
||||
@classmethod
|
||||
def listUserGroups(self,account_name,username,listtype=None,authurl='http://127.0.0.1:8080/auth/',user=config['admin_user'],key=config['admin_key']):
|
||||
if listtype is not None:
|
||||
return commands.getstatusoutput('gswauth-list %s %s %s -A %s -U %s -K %s'% (account_name, username, listtype, authurl, user, key))
|
||||
else:
|
||||
return commands.getstatusoutput('gswauth-list %s %s %s -A %s -U %s -K %s'% (account_name, username, listtype, authurl, user, key))
|
||||
|
||||
@classmethod
|
||||
def cleanToken(self,option=None,value=None,authurl='http://127.0.0.1:8080/auth/', key=config['admin_key']):
|
||||
if option is None and value is None:
|
||||
return commands.getstatusoutput('gswauth-cleanup-tokens -A %s -K %s'% (authurl, key))
|
||||
elif option is not None and value is None:
|
||||
return commands.getstatusoutput('gswauth-cleanup-tokens --%s -A %s -K %s'% (option, authurl, key))
|
||||
else:
|
||||
return commands.getstatusoutput('gswauth-cleanup-tokens --%s %s -A %s -K %s'% (option, value, authurl, key))
|
||||
|
||||
@classmethod
|
||||
def setAccountService(self, account, service, name, value, authurl='http://127.0.0.1:8080/auth/',user=config['admin_user'],key=config['admin_key']):
|
||||
return commands.getstatusoutput('gswauth-set-account-service %s %s %s %s -A %s -U %s -K %s'% (account, service, name, value, authurl, user, key))
|
||||
|
||||
@classmethod
|
||||
def cleanAll(self):
|
||||
commands.getstatusoutput('sudo rm -rf '+os.path.join(config['devices'], config['gsmetadata_volume'], '*'))
|
||||
return commands.getstatusoutput('sudo rm -rf '+os.path.join(config['devices'], config['gsmetadata_volume'], '.*'))
|
||||
|
||||
|
||||
class TestSwauthPrep(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
pass
|
||||
|
||||
def tearDown(self):
|
||||
Utils.cleanAll()
|
||||
|
||||
def testSwauthPrep(self):
|
||||
(status,output)=Utils.swauthPrep()
|
||||
self.assertEqual(status, 0, 'swauth prep failed with valid credentials'+output)
|
||||
|
||||
(status,output)=Utils.swauthPrep(key='')
|
||||
self.assertEqual('Usage' in output,True, 'Invalid swauth-prep request accepted(no key provided): '+output)
|
||||
|
||||
(status,output)=Utils.swauthPrep(key='notavalidkey')
|
||||
self.assertNotEqual(status, 0, 'Invalid swauth-prep request accepted(wrong key provided):'+output)
|
||||
self.assertEqual('gswauth preparation failed: 401 Unauthorized: Invalid user/key provided' \
|
||||
in output,True, 'Invalid swauth-prep request accepted: '+output)
|
||||
|
||||
(status,output)=Utils.swauthPrep(authurl='http://127.0.0.1:80/auth/')
|
||||
self.assertEqual('Check that the admin_url is valid' in output, True,
|
||||
'Invalid swauth-prep request accepted(wrong admin-url provided): %s' % output)
|
||||
|
||||
(status,output)=Utils.swauthPrep(authurl='http://127.0.1:80/auth/')
|
||||
self.assertEqual('Check that the admin_url is valid' in output, True,
|
||||
'Invalid swauth-prep request accepted(wrong admin-url provided): %s' % output)
|
||||
#TODO:More cases for invalid url and admin user
|
||||
|
||||
def testAddAccountWithoutSwauthPrep(self):
|
||||
#Try to add account without running gswauth-prep
|
||||
Utils.cleanAll()
|
||||
(status,output)=Utils.addAccount('test')
|
||||
self.assertNotEqual(status, 0, 'account added without running gswauth-prep '+output)
|
||||
self.assertEqual('Account creation failed: 500 Server Error' \
|
||||
in output,True, 'account added without running gswauth-prep '+output)
|
||||
|
||||
|
||||
class TestAccount(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
(status,output)=Utils.swauthPrep()
|
||||
self.assertEqual(status, 0, 'setup swauth-prep failed'+output)
|
||||
|
||||
def tearDown(self):
|
||||
Utils.cleanAll()
|
||||
|
||||
def setTestAccUserEnv(self):
|
||||
(status,output)=Utils.addAccount('test')
|
||||
self.assertEqual(status, 0, 'test accUser creation failed env'+output)
|
||||
(status,output)=Utils.addResellerAdminUser('test','re_admin','testing')
|
||||
self.assertEqual(status, 0, 'test accUser creation failed env'+output)
|
||||
(status,output)=Utils.addAdminUser('test','admin','testing')
|
||||
self.assertEqual(status, 0, 'test accUser creation failed env'+output)
|
||||
(status,output)=Utils.addUser('test','tester','testing')
|
||||
self.assertEqual(status, 0, 'test accUser creation failed env'+output)
|
||||
|
||||
def setTest2AccUserEnv(self):
|
||||
(status,output)=Utils.addAccount('test2')
|
||||
self.assertEqual(status, 0, 'test2 accUser creation failed env'+output)
|
||||
(status,output)=Utils.addResellerAdminUser('test2','re_admin','testing')
|
||||
self.assertEqual(status, 0, 'test2 accUser creation failed env'+output)
|
||||
(status,output)=Utils.addAdminUser('test2','admin','testing')
|
||||
self.assertEqual(status, 0, 'test2 accUser creation failed env'+output)
|
||||
(status,output)=Utils.addUser('test2','tester','testing')
|
||||
self.assertEqual(status, 0, 'test2 accUser creation failed env'+output)
|
||||
|
||||
def testAddAccount(self):
|
||||
(status,output)=Utils.addAccount('test')
|
||||
self.assertEqual(status, 0, 'account creation failed'+output)
|
||||
|
||||
(status,output)=Utils.addAccount('accountvolumedoesnotexist')
|
||||
self.assertEqual(status, 0, 'account creation failed std err was: '+output)
|
||||
|
||||
(status,output)=Utils.addAccount('testnokey',key='')
|
||||
self.assertEqual('Usage:' in output, True, 'Invalid account creation request accepted : '+output)
|
||||
|
||||
(status,output)=Utils.addAccount('testinvalidkey',key='invalidkey')
|
||||
self.assertEqual('Account creation failed: 401 Unauthorized: Invalid user/key provided' \
|
||||
in output,True, 'Invalid account creation request accepted: '+output)
|
||||
|
||||
(status,output)=Utils.addAccount('test2', authurl='http://127.0.0.1:80/auth/')
|
||||
self.assertEqual('Check that the admin_url is valid' in output, True,
|
||||
'Invalid account creation request accepted(wrong admin-url provided): %s' % output)
|
||||
|
||||
(status,output)=Utils.addAccount('test2', authurl='http://127.0.1:80/auth/')
|
||||
self.assertEqual('Check that the admin_url is valid' in output, True,
|
||||
'Invalid account creation request accepted(wrong admin-url provided): %s' % output)
|
||||
|
||||
def testAddAccountNonSuperAdminUsers(self):
|
||||
#set test account with all types of user
|
||||
self.setTestAccUserEnv()
|
||||
#try to add another account with all type of users
|
||||
(status,output)=Utils.addAccount('accbyreselleradmin',user='test:re_admin',key='testing')
|
||||
self.assertEqual(status, 0, 'account creation failed with re_admin user: '+output)
|
||||
|
||||
(status,output)=Utils.addAccount('accbyadmin',user='test:admin',key='testing')
|
||||
self.assertNotEqual(status, 0, 'account creation success with admin user: '+output)
|
||||
self.assertEqual('Account creation failed: 403 Forbidden: Insufficient privileges' in output,True, 'account creation success with admin user: '+output)
|
||||
|
||||
(status,output)=Utils.addAccount('accbyuser',user='test:tester',key='testing')
|
||||
self.assertNotEqual(status, 0, 'account creation success with regular user: '+output)
|
||||
self.assertEqual('Account creation failed: 403 Forbidden: Insufficient privileges' \
|
||||
in output,True, 'account creation success with regular user: '+output)
|
||||
|
||||
def testDeleteAccount(self):
|
||||
#add test account with no users
|
||||
(status,output)=Utils.addAccount('test')
|
||||
self.assertEqual(status, 0, 'account creation failed for test account'+output)
|
||||
|
||||
#set test2 account with all type of users
|
||||
self.setTest2AccUserEnv()
|
||||
|
||||
#valid request to delete an account with no users
|
||||
(status,output)=Utils.deleteAccount('test')
|
||||
self.assertEqual(status, 0, 'account deletion failed for test account'+output)
|
||||
|
||||
#Invalid request to delete an account with users
|
||||
(status,output)=Utils.deleteAccount('test2')
|
||||
self.assertNotEqual(status, 0, 'account deletion succeeded for acc with active users'+output)
|
||||
self.assertEqual('Delete account failed: 409 Conflict: Account test2 contains active users. Delete all users first.' \
|
||||
in output,True, 'account deletion failed for test account'+output)
|
||||
|
||||
#delete all users in above account and then try again
|
||||
(status,output) = Utils.deleteUser('test2','tester')
|
||||
self.assertEqual(status, 0, 'setTestDeleteAccountEnv'+output)
|
||||
|
||||
(status,output) = Utils.deleteUser('test2','admin')
|
||||
self.assertEqual(status, 0, 'setTestDeleteAccountEnv'+output)
|
||||
|
||||
(status,output) = Utils.deleteUser('test2','re_admin')
|
||||
self.assertEqual(status, 0, 'setTestDeleteAccountEnv'+output)
|
||||
|
||||
(status,output)=Utils.deleteAccount('test2')
|
||||
self.assertEqual(status, 0, 'account deletion failed for test2 account'+output)
|
||||
|
||||
(status,output)=Utils.deleteAccount('accountdoesnotexist')
|
||||
self.assertNotEqual(status, 0, 'account deletion failed for accountdoesnotexist'+output)
|
||||
self.assertEqual('Delete account failed: 404 Not Found: Account accountdoesnotexist does not exist' in output,True, 'account deletion failed for test account'+output)
|
||||
|
||||
(status,output)=Utils.deleteAccount('test3', authurl='http://127.0.0.1:80/auth/')
|
||||
self.assertEqual('Check that the admin_url is valid' in output, True,
|
||||
'Invalid deletion request accepted(wrong admin-url provided): %s' % output)
|
||||
|
||||
(status,output)=Utils.deleteAccount('test3', authurl='http://127.0.1:80/auth/')
|
||||
self.assertEqual('Check that the admin_url is valid' in output, True,
|
||||
'Invalid deletion request accepted(wrong admin-url provided): %s' % output)
|
||||
|
||||
def testDeleteAccountNonSuperAdminUsers(self):
|
||||
#set test account with all types of user
|
||||
self.setTestAccUserEnv()
|
||||
|
||||
#try to add another account with all type of users
|
||||
Utils.addAccount('accbysuperadminforreadmin')
|
||||
(status,output)=Utils.deleteAccount('accbysuperadminforreadmin',user='test:re_admin',key='testing')
|
||||
self.assertEqual(status, 0, 'account deletion failed with re_admin user: '+output)
|
||||
|
||||
Utils.addAccount('accbysuperadminforadmin')
|
||||
(status,output)=Utils.deleteAccount('accbysuperadminforadmin',user='test:admin',key='testing')
|
||||
self.assertNotEqual(status, 0, 'account deletion success with admin user: '+output)
|
||||
self.assertEqual('Delete account failed: 403 Forbidden: Insufficient privileges' \
|
||||
in output,True, 'account deletion success with admin user: '+output)
|
||||
|
||||
Utils.addAccount('accbysuperadminforuser')
|
||||
(status,output)=Utils.deleteAccount('accbysuperadminforuser',user='test:tester',key='testing')
|
||||
self.assertNotEqual(status, 0, 'account creation success with regular user: '+output)
|
||||
self.assertEqual('Delete account failed: 403 Forbidden: Insufficient privileges' \
|
||||
in output,True, 'account deletion success with regular user: '+output)
|
||||
|
||||
def testListAcounts(self):
|
||||
(status,output)=Utils.addAccount('test')
|
||||
self.assertEqual(status, 0, 'account creation failed'+output)
|
||||
|
||||
(status,output)=Utils.listAccounts()
|
||||
self.assertEqual(output,
|
||||
'+----------+\n| Accounts |\n+----------+\n| test |\n+----------+',
|
||||
'swauth-list failed:\n%s' % output)
|
||||
|
||||
(status,output)=Utils.listAccounts(authurl='http://127.0.0.1:80/auth/')
|
||||
self.assertEqual('Check that the admin_url is valid' in output, True,
|
||||
'Invalid list request accepted(wrong admin-url provided): %s' % output)
|
||||
|
||||
(status,output)=Utils.listAccounts(authurl='http://127.0.1:80/auth/')
|
||||
self.assertEqual('Check that the admin_url is valid' in output, True,
|
||||
'Invalid list request accepted(wrong admin-url provided): %s' % output)
|
||||
|
||||
(status,output)=Utils.listAccounts('-j')
|
||||
self.assertEqual(output,
|
||||
'{"accounts": [{"name": "test"}]}',
|
||||
'swauth-list failed for json option:\n%s' % output)
|
||||
|
||||
(status,output)=Utils.listAccounts('-p')
|
||||
self.assertEqual(output,
|
||||
'test',
|
||||
'swauth-list failed for plain-text option:\n%s' % output)
|
||||
|
||||
def testListAcountsNonSuperAdminUsers(self):
|
||||
#set test acc with all type of users
|
||||
self.setTestAccUserEnv()
|
||||
|
||||
(status,output)=Utils.listAccounts(user='test:re_admin',key='testing')
|
||||
self.assertEqual(status, 0, 'account listing failed with re_admin user: '+output)
|
||||
self.assertEqual(output,
|
||||
'+----------+\n| Accounts |\n+----------+\n| test |\n+----------+',
|
||||
'swauth-list failed:\n%s' % output)
|
||||
|
||||
(status,output)=Utils.listAccounts(user='test:admin',key='testing')
|
||||
self.assertNotEqual(status, 0, 'account listing success with admin user: '+output)
|
||||
self.assertEqual('List failed: 403 Forbidden: Insufficient privileges' \
|
||||
in output,True, 'account listing success with admin user: '+output)
|
||||
|
||||
(status,output)=Utils.listAccounts(user='test:tester',key='testing')
|
||||
self.assertNotEqual(status, 0, 'account listing success with regular user: '+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 'account listing success with regular user: '+output)
|
||||
|
||||
class TestUser(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
(status,output)=Utils.swauthPrep()
|
||||
self.assertEqual(status, 0, 'setup swauth-prep failed'+output)
|
||||
|
||||
def tearDown(self):
|
||||
Utils.cleanAll()
|
||||
|
||||
def setTestAccUserEnv(self):
|
||||
(status,output)=Utils.addAccount('test')
|
||||
self.assertEqual(status, 0, 'test accUser creation failed env'+output)
|
||||
(status,output)=Utils.addResellerAdminUser('test','re_admin','testing')
|
||||
self.assertEqual(status, 0, 'test accUser creation failed env'+output)
|
||||
(status,output)=Utils.addAdminUser('test','admin','testing')
|
||||
self.assertEqual(status, 0, 'test accUser creation failed env'+output)
|
||||
(status,output)=Utils.addUser('test','tester','testing')
|
||||
self.assertEqual(status, 0, 'test accUser creation failed env'+output)
|
||||
|
||||
def setTest2AccUserEnv(self):
|
||||
(status,output)=Utils.addAccount('test2')
|
||||
self.assertEqual(status, 0, 'test2 accUser creation failed env'+output)
|
||||
(status,output)=Utils.addResellerAdminUser('test2','re_admin','testing')
|
||||
self.assertEqual(status, 0, 'test2 accUser creation failed env'+output)
|
||||
(status,output)=Utils.addAdminUser('test2','admin','testing')
|
||||
self.assertEqual(status, 0, 'test2 accUser creation failed env'+output)
|
||||
(status,output)=Utils.addUser('test2','tester','testing')
|
||||
self.assertEqual(status, 0, 'test2 accUser creation failed env'+output)
|
||||
|
||||
def testaddUser(self):
|
||||
#add test acc
|
||||
(status,output)=Utils.addAccount('test')
|
||||
self.assertEqual(status, 0, 'setTestaddAdminUserEnv (add test account) failed'+output)
|
||||
|
||||
(status,output) = Utils.addAdminUser('test','testadminuser','testadminuser')
|
||||
self.assertEqual(status, 0, 'user addition failed'+output)
|
||||
|
||||
(status,output) = Utils.addUser('test','testuser','testuser')
|
||||
self.assertEqual(status, 0, 'user addition failed'+output)
|
||||
|
||||
(status,output) = Utils.addResellerAdminUser('test','testreselleradminuser','testreselleradminuser')
|
||||
self.assertEqual(status, 0, 'user addition failed'+output)
|
||||
|
||||
(status,output) = Utils.addAdminUser('test', '', '')
|
||||
self.assertEqual('Usage:' in output, True, 'Invalid user creation request accepted: '+output)
|
||||
|
||||
(status,output) = Utils.addAdminUser('test', 'testcli', '')
|
||||
self.assertEqual('Usage:' in output, True, 'Invalid user creation request accepted'+output)
|
||||
|
||||
(status,output) = Utils.addAdminUser('test', '', 'testcli')
|
||||
self.assertEqual('Usage:' in output, True, 'Invalid user creation request accepted'+output)
|
||||
|
||||
(status,output) = Utils.addAdminUser('accountdoesnotexist', 'testcli', 'testcli')
|
||||
self.assertEqual(status, 0, 'User creation request failed, where accountdoesnotexist: '+output)
|
||||
|
||||
(status,output)=Utils.addAdminUser('test', 'admin2', 'adminpwd', authurl='http://127.0.0.1:80/auth/')
|
||||
self.assertEqual('Check that the admin_url is valid' in output, True,
|
||||
'Invalid add user request accepted(wrong admin-url provided): %s' % output)
|
||||
|
||||
(status,output)=Utils.addAdminUser('test', 'admin2', 'adminpwd', authurl='http://127.0.1:80/auth/')
|
||||
self.assertEqual('Check that the admin_url is valid' in output, True,
|
||||
'Invalid add user request accepted(wrong admin-url provided): %s' % output)
|
||||
|
||||
def testAddUserNonSuperAdminUsers (self):
|
||||
#setup test,testr accounts with all user types
|
||||
self.setTestAccUserEnv()
|
||||
self.setTest2AccUserEnv()
|
||||
|
||||
#try to add another reseller_admin users with all type of users
|
||||
#decision has been made to return 401 in place of 403 due to
|
||||
#performance related reasons, in scenarios tested below
|
||||
|
||||
(status,output)=Utils.addResellerAdminUser('test', 're_adminwithreadmin', 'testing', user='test:re_admin', key='testing')
|
||||
self.assertNotEqual(status, 0, 're_admin creation succeeded with re_admin user: '+output)
|
||||
self.assertEqual('401 Unauthorized' in output,True, 're_admin creation succeeded with re_admin user: '+output)
|
||||
|
||||
(status,output)=Utils.addResellerAdminUser('test', 're_adminwithadmin', 'testing', user='test:admin', key='testing')
|
||||
self.assertNotEqual(status, 0, 're_admin creation succeeded with admin user: '+output)
|
||||
self.assertEqual('401 Unauthorized' in output,True, 're_admin creation succeeded with admin user: '+output)
|
||||
|
||||
(status,output)=Utils.addResellerAdminUser('test', 're_adminwithuser', 'testing', user='test:tester', key='testing')
|
||||
self.assertNotEqual(status, 0, 're_admin creation succeeded with regular user: '+output)
|
||||
self.assertEqual('401 Unauthorized' in output,True, 're_admin creation succeeded with regular user: '+output)
|
||||
|
||||
(status,output)=Utils.addResellerAdminUser('test2', 're_adminwithreadmin', 'testing', user='test:re_admin', key='testing')
|
||||
self.assertNotEqual(status, 0, 're_admin creation succeeded with re_admin user: '+output)
|
||||
self.assertEqual('401 Unauthorized' in output,True, 're_admin creation succeeded with re_admin user: '+output)
|
||||
|
||||
(status,output)=Utils.addResellerAdminUser('test2', 're_adminwithadmin', 'testing', user='test:admin', key='testing')
|
||||
self.assertNotEqual(status, 0, 're_admin creation succeeded with admin user: '+output)
|
||||
self.assertEqual('401 Unauthorized' in output,True, 're_admin creation succeeded with admin user: '+output)
|
||||
|
||||
(status,output)=Utils.addResellerAdminUser('test2', 're_adminwithuser', 'testing', user='test:tester', key='testing')
|
||||
self.assertNotEqual(status, 0, 're_admin creation succeeded with regular user: '+output)
|
||||
self.assertEqual('401 Unauthorized' in output,True, 're_admin creation succeeded with regular user: '+output)
|
||||
|
||||
#update the password with own credential
|
||||
(status,output)=Utils.addResellerAdminUser('test', 're_adminwithreadmin', 'testingupdated', user='test:re_admin', key='testing')
|
||||
self.assertNotEqual(status, 0, 're_admin update password succeeded with own credentials: '+output)
|
||||
self.assertEqual('401 Unauthorized' in output,True, 're_admin update password succeeded with own credentials: '+output)
|
||||
|
||||
#try to add another admin users with all type of users
|
||||
(status,output)=Utils.addAdminUser('test', 'adminwithreadmin', 'testing', user='test:re_admin', key='testing')
|
||||
self.assertEqual(status, 0, 'admin creation failed with re_admin user: '+output)
|
||||
|
||||
(status,output)=Utils.addAdminUser('test', 'adminwithreadmin', 'testing', user='test:admin', key='testing')
|
||||
self.assertEqual(status, 0, 'admin creation failed with admin user: '+output)
|
||||
|
||||
(status,output)=Utils.addAdminUser('test', 'adminwithuser', 'testing', user='test:tester', key='testing')
|
||||
self.assertNotEqual(status, 0, 'admin creation succeeded with regular user: '+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 'admin creation succeeded with regular user: '+output)
|
||||
|
||||
(status,output)=Utils.addAdminUser('test2', 'adminwithreadminofotheraccount', 'testing', user='test:re_admin', key='testing')
|
||||
self.assertEqual(status, 0, 'admin creation failed with re_admin user of other account: '+output)
|
||||
|
||||
(status,output)=Utils.addAdminUser('test2', 'adminwithadminofotheraccount', 'testing', user='test:admin', key='testing')
|
||||
self.assertNotEqual(status, 0, 'admin creation succeeded with admin user of other acc: '+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 'admin creation succeeded with admin user of other acc: '+output)
|
||||
|
||||
(status,output)=Utils.addAdminUser('test2', 'adminwithuserfotheraccount', 'testing', user='test:tester', key='testing')
|
||||
self.assertNotEqual(status, 0, 'admin creation succeeded with user of other account: '+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 'admin creation succeeded with user of other account: '+output)
|
||||
|
||||
#update password of own admin account
|
||||
(status,output)=Utils.addAdminUser('test', 'admin', 'testingupdated', user='test:admin', key='testing')
|
||||
self.assertEqual(status, 0, 'admin password update failed with own credentials: '+output)
|
||||
#undo above password change
|
||||
(status,output)=Utils.addAdminUser('test', 'admin', 'testing', user='test:admin', key='testingupdated')
|
||||
self.assertEqual(status, 0, 'admin password update failed with own credentials: '+output)
|
||||
|
||||
#try to add another regular users with all type of users
|
||||
(status,output)=Utils.addUser('test', 'adduserwithre_admin', 'testing', user='test:re_admin', key='testing')
|
||||
self.assertEqual(status, 0, 'regular user creation with re_admin credentials failed: '+output)
|
||||
|
||||
(status,output)=Utils.addUser('test', 'adduserwithadmin', 'testing', user='test:admin', key='testing')
|
||||
self.assertEqual(status, 0, 'regular user creation with admin credentials failed: '+output)
|
||||
|
||||
(status,output)=Utils.addUser('test', 'adduserwithuser', 'testing', user='test:tester', key='testing')
|
||||
self.assertNotEqual(status, 0, 'regular user creation with regular user credentials succeded: '+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 'regular user creation with regular user credentials succeded: '+output)
|
||||
|
||||
(status,output)=Utils.addUser('test2', 'adduserwithreadminofotheraccount', 'testing', user='test:re_admin', key='testing')
|
||||
self.assertEqual(status, 0, 'user creation failed with re_admin user of other account: '+output)
|
||||
|
||||
(status,output)=Utils.addUser('test2', 'adduserwithadminofotheraccount', 'testing', user='test:admin', key='testing')
|
||||
self.assertNotEqual(status, 0, 'user creation succeeded with admin user of other acc: '+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 'user creation succeeded with admin user of other acc: '+output)
|
||||
|
||||
(status,output)=Utils.addUser('test2', 'adminwithuserfotheraccount', 'testing', user='test:tester', key='testing')
|
||||
self.assertNotEqual(status, 0, 'user creation succeeded with user of other account: '+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 'user creation succeeded with user of other account: '+output)
|
||||
|
||||
def testDeleteUser(self):
|
||||
#set test acc
|
||||
self.setTestAccUserEnv()
|
||||
|
||||
(status,output) = Utils.deleteUser('test','admin')
|
||||
self.assertEqual(status, 0, 'valid user deletion failed:'+output)
|
||||
|
||||
(status,output) = Utils.deleteUser('test','tester')
|
||||
self.assertEqual(status, 0, 'valid user deletion failed:'+output)
|
||||
|
||||
(status,output) = Utils.deleteUser('test','re_admin')
|
||||
self.assertEqual(status, 0, 'valid user deletion failed:'+output)
|
||||
|
||||
(status,output) = Utils.deleteUser('test', '')
|
||||
self.assertEqual('Usage:' in output, True, 'Invalid user deletion request accepted : '+output)
|
||||
|
||||
(status,output) = Utils.deleteUser('','testcli')
|
||||
self.assertEqual('Usage:' in output, True, 'Invalid user deletion request accepted : '+output)
|
||||
|
||||
(status,output) = Utils.deleteUser('test', 'userdoesnotexist')
|
||||
self.assertNotEqual(status, 0, 'Invalid user deletion request accepted,userdoesnotexist:'+output)
|
||||
|
||||
(status,output) = Utils.deleteUser('accountisnothere', 'testcli')
|
||||
self.assertNotEqual(status, 0, 'Invalid user deletion request accepted, accountdoesnotexist:'+output)
|
||||
#TODO:more testcases?
|
||||
(status,output)=Utils.deleteUser('test', 'admin2', authurl='http://127.0.0.1:80/auth/')
|
||||
self.assertEqual('Check that the admin_url is valid' in output, True,
|
||||
'Invalid delete user request accepted(wrong admin-url provided): %s' % output)
|
||||
|
||||
(status,output)=Utils.deleteUser('test', 'admin2', authurl='http://127.0.1:80/auth/')
|
||||
self.assertEqual('Check that the admin_url is valid' in output, True,
|
||||
'Invalid delete user request accepted(wrong admin-url provided): %s' % output)
|
||||
|
||||
def testDeleteUserNonSuperAdminUsers(self):
|
||||
#set test, test2 acc with all type of users
|
||||
self.setTestAccUserEnv()
|
||||
self.setTest2AccUserEnv()
|
||||
#try to delete reseller_admin users with all type of users
|
||||
Utils.addResellerAdminUser('test', 're_admintobedeletedbyotherusers1', 'testing')
|
||||
(status,output) = Utils.deleteUser('test', 're_admintobedeletedbyotherusers1',user='test:re_admin',key='testing')
|
||||
self.assertNotEqual(status, 0, 're_admin deletion succeeded with re_admin user: '+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 're_admin deletion succeeded with re_admin user: '+output)
|
||||
|
||||
Utils.addResellerAdminUser('test', 're_admintobedeletedbyotherusers2', 'testing')
|
||||
(status,output) = Utils.deleteUser('test', 're_admintobedeletedbyotherusers2',user='test:admin',key='testing')
|
||||
self.assertNotEqual(status, 0, 're_admin deletion succeeded with admin user: '+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 're_admin deletion succeeded with admin user: '+output)
|
||||
|
||||
Utils.addResellerAdminUser('test', 're_admintobedeletedbyotherusers3', 'testing')
|
||||
(status,output) = Utils.deleteUser('test', 're_admintobedeletedbyotherusers3',user='test:tester',key='testing')
|
||||
self.assertNotEqual(status, 0, 're_admin deletion succeeded with regular user: '+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 're_admin deletion succeeded with user: '+output)
|
||||
|
||||
Utils.addResellerAdminUser('test2', 're_admintobedeletedbyotheraccountusers1', 'testing')
|
||||
(status,output) = Utils.deleteUser('test2', 're_admintobedeletedbyotheraccountusers1',user='test:re_admin',key='testing')
|
||||
self.assertNotEqual(status, 0, 're_admin deletion succeeded with re_admin user of other account: '+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 're_admin deletion succeeded with re_admin user of other account: '+output)
|
||||
|
||||
Utils.addResellerAdminUser('test2', 're_admintobedeletedbyotheraccountusers2', 'testing')
|
||||
(status,output) = Utils.deleteUser('test2', 're_admintobedeletedbyotheraccountusers2',user='test:admin',key='testing')
|
||||
self.assertNotEqual(status, 0, 're_admin deletion succeeded with admin user of other account: '+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 're_admin deletion succeeded with admin user of other account: '+output)
|
||||
|
||||
Utils.addResellerAdminUser('test2', 're_admintobedeletedbyotheraccountusers3', 'testing')
|
||||
(status,output) = Utils.deleteUser('test2', 're_admintobedeletedbyotheraccountusers3',user='test:tester',key='testing')
|
||||
self.assertNotEqual(status, 0, 're_admin deletion succeeded with regular user of other account: '+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 're_admin deletion succeeded with user of other account: '+output)
|
||||
|
||||
#delete/de-active own re_admin account
|
||||
Utils.addAdminUser('test', 're_admintobedeletedbyitself', 'testing')
|
||||
(status,output) = Utils.deleteUser('test', 're_admintobedeletedbyitself',user='test:re_admintobedeletedbyitself',key='testing')
|
||||
self.assertEqual(status, 0, 're_admin deletion failed with own credentials : '+output)
|
||||
|
||||
#try to delete admin users with all type of users
|
||||
Utils.addAdminUser('test', 'admintobedeletedbyotherusers1', 'testing')
|
||||
(status,output) = Utils.deleteUser('test', 'admintobedeletedbyotherusers1',user='test:re_admin',key='testing')
|
||||
self.assertEqual(status, 0, 'admin deletion failed with re_admin user: '+output)
|
||||
|
||||
Utils.addAdminUser('test', 'admintobedeletedbyotherusers2', 'testing')
|
||||
(status,output) = Utils.deleteUser('test', 'admintobedeletedbyotherusers2',user='test:admin',key='testing')
|
||||
self.assertEqual(status, 0, 'admin deletion failed with admin user: '+output)
|
||||
|
||||
Utils.addAdminUser('test', 'admintobedeletedbyotherusers3', 'testing')
|
||||
(status,output) = Utils.deleteUser('test', 'admintobedeletedbyotherusers3',user='test:tester',key='testing')
|
||||
self.assertNotEqual(status, 0, 'admin deletion succeeded with regular user: '+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 'admin deletion succeeded with regular user: '+output)
|
||||
|
||||
Utils.addAdminUser('test2', 'admintobedeletedbyotheraccountusers1', 'testing')
|
||||
(status,output) = Utils.deleteUser('test2', 'admintobedeletedbyotheraccountusers1',user='test:re_admin',key='testing')
|
||||
self.assertEqual(status, 0, 'admin deletion failed with re_admin user of other account: '+output)
|
||||
|
||||
Utils.addAdminUser('test2', 'admintobedeletedbyotheraccountusers2', 'testing')
|
||||
(status,output) = Utils.deleteUser('test2', 'admintobedeletedbyotheraccountusers2',user='test:admin',key='testing')
|
||||
self.assertNotEqual(status, 0, 'admin deletion succeeded with admin user of other account: '+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 'admin deletion succeeded with admin user of other account: '+output)
|
||||
|
||||
Utils.addAdminUser('test2', 'admintobedeletedbyotheraccountusers3', 'testing')
|
||||
(status,output) = Utils.deleteUser('test2', 'admintobedeletedbyotheraccountusers3',user='test:tester',key='testing')
|
||||
self.assertNotEqual(status, 0, 'admin deletion succeeded with regular user of other account: '+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 'admin deletion succeeded with regular user of other account: '+output)
|
||||
|
||||
#delete/de-active own admin account
|
||||
Utils.addAdminUser('test', 'admintobedeletedbyitself', 'testing')
|
||||
(status,output) = Utils.deleteUser('test', 'admintobedeletedbyitself',user='test:admintobedeletedbyitself',key='testing')
|
||||
self.assertEqual(status, 0, 'admin deletion failed with own credentials : '+output)
|
||||
|
||||
#try to delete another regular users with all type of users
|
||||
Utils.addUser('test', 'usertobedeletedbyotherusers1', 'testing')
|
||||
(status,output) = Utils.deleteUser('test', 'usertobedeletedbyotherusers1',user='test:re_admin',key='testing')
|
||||
self.assertEqual(status, 0, 'user deletion failed with re_admin user: '+output)
|
||||
|
||||
Utils.addUser('test', 'usertobedeletedbyotherusers2', 'testing')
|
||||
(status,output) = Utils.deleteUser('test', 'usertobedeletedbyotherusers2',user='test:admin',key='testing')
|
||||
self.assertEqual(status, 0, 'user deletion failed with admin user: '+output)
|
||||
|
||||
Utils.addUser('test', 'usertobedeletedbyotherusers3', 'testing')
|
||||
(status,output) = Utils.deleteUser('test', 'usertobedeletedbyotherusers3',user='test:tester',key='testing')
|
||||
self.assertNotEqual(status, 0, 'user deletion succeeded with regular user: '+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 'user deletion succeeded with regular user: '+output)
|
||||
|
||||
Utils.addUser('test2', 'usertobedeletedbyotheraccountusers1', 'testing')
|
||||
(status,output) = Utils.deleteUser('test2', 'usertobedeletedbyotheraccountusers1',user='test:re_admin',key='testing')
|
||||
self.assertEqual(status, 0, 'user deletion failed with re_admin user of other account: '+output)
|
||||
|
||||
Utils.addUser('test2', 'usertobedeletedbyotheraccountusers2', 'testing')
|
||||
(status,output) = Utils.deleteUser('test2', 'usertobedeletedbyotheraccountusers2',user='test:admin',key='testing')
|
||||
self.assertNotEqual(status, 0, 'user deletion succeeded with admin user of other account: '+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 'user deletion succeeded with admin user of other account: '+output)
|
||||
|
||||
Utils.addUser('test2', 'usertobedeletedbyotheraccountusers3', 'testing')
|
||||
(status,output) = Utils.deleteUser('test2', 'usertobedeletedbyotheraccountusers3',user='test:tester',key='testing')
|
||||
self.assertNotEqual(status, 0, 'user deletion succeeded with regular user of other account: '+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 'user deletion succeeded with regular user of other account: '+output)
|
||||
|
||||
#delete/de-active own admin account
|
||||
Utils.addAdminUser('test', 'usertobedeletedbyitself', 'testing')
|
||||
(status,output) = Utils.deleteUser('test', 'usertobedeletedbyitself',user='test:usertobedeletedbyitself',key='testing')
|
||||
self.assertEqual(status, 0, 'user deletion failed with own credentials : '+output)
|
||||
|
||||
def testChangeKey(self):
|
||||
# Create account and users
|
||||
(status, output) = Utils.addAccount('test')
|
||||
self.assertEqual(status, 0, 'Account creation failed: ' + output)
|
||||
|
||||
(status, output) = Utils.addAdminUser('test', 'admin', 'password')
|
||||
self.assertEqual(status, 0, 'User addition failed: ' + output)
|
||||
|
||||
(status, output) = Utils.addUser('test', 'user', 'password')
|
||||
self.assertEqual(status, 0, 'User addition failed: ' + output)
|
||||
|
||||
(status, output) = Utils.addResellerAdminUser('test', 'radmin', 'password')
|
||||
self.assertEqual(status, 0, 'User addition failed: ' + output)
|
||||
|
||||
# Change acccount admin password/key
|
||||
(status, output) = Utils.addAdminUser('test', 'admin', 'new_password', user='test:admin', key='password')
|
||||
self.assertEqual(status, 0, 'Update key failed: ' + output)
|
||||
|
||||
# Change regular user password/key
|
||||
(status, output) = Utils.addUser('test', 'user', 'new_password', user='test:user', key='password')
|
||||
self.assertEqual(status, 0, 'Update key failed: ' + output)
|
||||
|
||||
# Change reseller admin password/key
|
||||
(status, output) = Utils.addResellerAdminUser('test', 'radmin', 'new_password', user='test:radmin', key='password')
|
||||
self.assertEqual(status, 0, 'Update key failed: ' + output)
|
||||
|
||||
# To verify that password was changed for real, re-run the above commands, but with the new password
|
||||
# Change acccount admin password/key using the new password
|
||||
(status, output) = Utils.addAdminUser('test', 'admin', 'password', user='test:admin', key='new_password')
|
||||
self.assertEqual(status, 0, 'Update key failed: ' + output)
|
||||
|
||||
# Change regular user password/key using the new password
|
||||
(status, output) = Utils.addUser('test', 'user', 'password', user='test:user', key='new_password')
|
||||
self.assertEqual(status, 0, 'Update key failed: ' + output)
|
||||
|
||||
# Change reseller admin password/key using the new password
|
||||
(status, output) = Utils.addResellerAdminUser('test', 'radmin', 'password', user='test:radmin', key='new_password')
|
||||
self.assertEqual(status, 0, 'Update key failed: ' + output)
|
||||
|
||||
# Make sure that regular user cannot upgrade to admin
|
||||
(status, output) = Utils.addAdminUser('test', 'user', 'password', user='test:user', key='password')
|
||||
self.assertEqual('User creation failed' in output, True, 'Update key failed: ' + output)
|
||||
|
||||
# Make sure that regular user cannot upgrade to reseller_admin
|
||||
(status, output) = Utils.addResellerAdminUser('test', 'user', 'password', user='test:user', key='password')
|
||||
self.assertEqual('User creation failed' in output, True, 'Update key failed: ' + output)
|
||||
|
||||
# Make sure admin cannot update himself to reseller_admin
|
||||
(status, output) = Utils.addResellerAdminUser('test', 'admin', 'password', user='test:admin', key='password')
|
||||
self.assertEqual('User creation failed' in output, True, 'Update key failed: ' + output)
|
||||
|
||||
# Account admin changing regular user password/key
|
||||
(status, output) = Utils.addUser('test', 'user', 'new_password', user='test:admin', key='password')
|
||||
self.assertEqual(status, 0, 'Update key failed: ' + output)
|
||||
# Verify by running the command with new password
|
||||
(status, output) = Utils.addUser('test', 'user', 'password', user='test:user', key='new_password')
|
||||
self.assertEqual(status, 0, 'Update key failed: ' + output)
|
||||
|
||||
# Reseller admin changing regular user password/key
|
||||
(status, output) = Utils.addUser('test', 'user', 'new_password', user='test:radmin', key='password')
|
||||
self.assertEqual(status, 0, 'Update key failed: ' + output)
|
||||
# Verify by running the command with new password
|
||||
(status, output) = Utils.addUser('test', 'user', 'password', user='test:user', key='new_password')
|
||||
self.assertEqual(status, 0, 'Update key failed: ' + output)
|
||||
|
||||
# Reseller admin changing account admin password/key
|
||||
(status, output) = Utils.addAdminUser('test', 'admin', 'new_password', user='test:radmin', key='password')
|
||||
self.assertEqual(status, 0, 'Update key failed: ' + output)
|
||||
# Verify by running the command with new password
|
||||
(status, output) = Utils.addAdminUser('test', 'admin', 'password', user='test:admin', key='new_password')
|
||||
self.assertEqual(status, 0, 'Update key failed: ' + output)
|
||||
|
||||
|
||||
class TestCleanUPToken(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
(status,output)=Utils.swauthPrep()
|
||||
self.assertEqual(status, 0, 'setup swauth-prep failed'+output)
|
||||
|
||||
def tearDown(self):
|
||||
Utils.cleanAll()
|
||||
|
||||
def setTestAccUserEnv(self):
|
||||
(status,output)=Utils.addAccount('test')
|
||||
self.assertEqual(status, 0, 'test accUser creation failed env'+output)
|
||||
(status,output)=Utils.addResellerAdminUser('test','re_admin','testing')
|
||||
self.assertEqual(status, 0, 'test accUser creation failed env'+output)
|
||||
(status,output)=Utils.addAdminUser('test','admin','testing')
|
||||
self.assertEqual(status, 0, 'test accUser creation failed env'+output)
|
||||
(status,output)=Utils.addUser('test','tester','testing')
|
||||
self.assertEqual(status, 0, 'test accUser creation failed env'+output)
|
||||
|
||||
def setTest2AccUserEnv(self):
|
||||
(status,output)=Utils.addAccount('test2')
|
||||
self.assertEqual(status, 0, 'test2 accUser creation failed env'+output)
|
||||
(status,output)=Utils.addResellerAdminUser('test2','re_admin','testing')
|
||||
self.assertEqual(status, 0, 'test2 accUser creation failed env'+output)
|
||||
(status,output)=Utils.addAdminUser('test2','admin','testing')
|
||||
self.assertEqual(status, 0, 'test2 accUser creation failed env'+output)
|
||||
(status,output)=Utils.addUser('test2','tester','testing')
|
||||
self.assertEqual(status, 0, 'test2 accUser creation failed env'+output)
|
||||
|
||||
def testCleanUPToken(self):
|
||||
self.setTestAccUserEnv()
|
||||
self.setTest2AccUserEnv()
|
||||
|
||||
#cleanup various validation
|
||||
(status,output)=Utils.cleanToken(key='')
|
||||
self.assertNotEqual(status, 0, 'clean up success without key'+output)
|
||||
self.assertEqual('Usage:' in output,True, 'clean up success without key: '+output)
|
||||
|
||||
#validate the admin-user option is not working here
|
||||
(status,output)=Utils.cleanToken(option='admin-user', value='.super_admin')
|
||||
self.assertNotEqual(status, 0, 'clean up success with a username'+output)
|
||||
self.assertEqual('Usage:' in output,True, 'clean up success with a username: '+output)
|
||||
|
||||
(status,output)=Utils.cleanToken(key='noavalidsuperadminkey')
|
||||
self.assertNotEqual(status, 0, 'clean up success with wrong super_admin key'+output)
|
||||
self.assertEqual('401 Unauthorized' in output,True, 'clean up success with wrong super_admin key: '+output)
|
||||
|
||||
#cleanup token with no options
|
||||
(status,output)=Utils.cleanToken()
|
||||
self.assertEqual(status, 0, 'clean up failed with no option'+output)
|
||||
|
||||
#cleanup token with purge option
|
||||
(status,output)=Utils.cleanToken(option='purge', value='test')
|
||||
self.assertEqual(status, 0, 'clean up failed with purge option'+output)
|
||||
|
||||
#cleanup token with purge option no valid account name
|
||||
#TODO:review following https://bugs.launchpad.net/gluster-swift/+bug/1271555
|
||||
(status,output)=Utils.cleanToken(option='purge', value='accountnotvalid')
|
||||
self.assertNotEqual(status, 0, 'clean up failed with purge option'+output)
|
||||
|
||||
#cleanup token with purge-all option
|
||||
(status,output)=Utils.cleanToken(option='purge-all')
|
||||
self.assertEqual(status, 0, 'clean up failed with purge-all option'+output)
|
||||
|
||||
#cleanup token with -v option
|
||||
(status,output)=Utils.cleanToken(option='verbose')
|
||||
self.assertEqual(status, 0, 'clean up failed with verbose option'+output)
|
||||
self.assertEqual('GET .token_0' in output and 'GET .token_f' in output,True,\
|
||||
'clean up success without key: '+output)
|
||||
|
||||
#cleanup token with token-life option
|
||||
(status,output)=Utils.cleanToken(option='token-life', value='500')
|
||||
self.assertEqual(status, 0, 'clean up failed with token-life option'+output)
|
||||
|
||||
#cleanup token with sleep option
|
||||
(status,output)=Utils.cleanToken(option='sleep', value='500')
|
||||
self.assertEqual(status, 0, 'clean up failed with sleep option'+output)
|
||||
|
||||
#TODO:revisit below two cases after fix for
|
||||
#https://bugs.launchpad.net/gluster-swift/+bug/1271550
|
||||
#cleanup token with token-life option non numeric value
|
||||
(status,output)=Utils.cleanToken(option='token-life', value='notanumaric')
|
||||
self.assertEqual('Usage:' in output, True, 'clean up success with token-life option non numeric value'+output)
|
||||
|
||||
#cleanup token with sleep option non numeric value
|
||||
(status,output)=Utils.cleanToken(option='sleep', value='notanumeric')
|
||||
self.assertEqual('Usage:' in output, True, 'clean up success with sleep option non numeric value'+output)
|
||||
|
||||
def testSetAccountService(self):
|
||||
self.setTestAccUserEnv()
|
||||
self.setTest2AccUserEnv()
|
||||
|
||||
#set-account-service asset all valid value
|
||||
(status,output)=Utils.setAccountService('test', 'storage', 'local', 'http://localhost:8080/v1/AUTH_test')
|
||||
self.assertEqual(status, 0, 'set account service fails with valid input'+output)
|
||||
(status,output)=Utils.listUsers('test', listtype='--json')
|
||||
self.assertEqual('{"services": {"storage": {"default": "local", "local": "http://localhost:8080/v1/AUTH_test"}}' in output,True, \
|
||||
'set account service success with valid input'+output)
|
||||
|
||||
#invalid account
|
||||
(status,output)=Utils.setAccountService('accountdoesnotexist', 'storage', 'local', 'http://localhost:8080/v1/AUTH_test')
|
||||
self.assertNotEqual(status, 0, 'set account service success with invalid accountname'+output)
|
||||
self.assertEqual('Service set failed: 404 Not Found' in output,True, 'set account service success with invalid accountname'+output)
|
||||
|
||||
#service name other than storage
|
||||
(status,output)=Utils.setAccountService('test', 'st', 'local', 'http://localhost:8080/v1/AUTH_test')
|
||||
self.assertEqual(status, 0, 'set account service success with service name other than storage'+output)
|
||||
(status,output)=Utils.listUsers('test', listtype='--json')
|
||||
self.assertEqual('"st": {"local": "http://localhost:8080/v1/AUTH_test"}}' in output,True, \
|
||||
'set account service success with service name other than storage'+output)
|
||||
|
||||
#name other than local
|
||||
(status,output)=Utils.setAccountService('test', 'storage', 'notlocal', 'http://localhost:8080/v1/AUTH_test')
|
||||
self.assertEqual(status, 0, 'set account service with name other than local failed'+output)
|
||||
(status,output)=Utils.listUsers('test', listtype='--json')
|
||||
self.assertEqual(' "notlocal": "http://localhost:8080/v1/AUTH_test"}' in output,True, \
|
||||
'set account service with name other than local failed'+output)
|
||||
|
||||
#set default to point notlocal
|
||||
(status,output)=Utils.setAccountService('test', 'storage', 'default', 'notlocal')
|
||||
self.assertEqual(status, 0, 'set account service set default to local failed'+output)
|
||||
(status,output)=Utils.listUsers('test', listtype='--json')
|
||||
self.assertEqual(' {"default": "notlocal", "notlocal": "http://localhost:8080/v1/AUTH_test"' in output,True, \
|
||||
'set account service set default to local failed'+output)
|
||||
|
||||
#try to set account service with users other than .super_admin
|
||||
#reseller_admin
|
||||
(status,output)=Utils.setAccountService('test', 'storage', 'local', 'http://localhost:8080/v1/AUTH_test', user='test:re_admin', key='testing')
|
||||
self.assertEqual(status, 0, 'set account service fails re_admin user cred'+output)
|
||||
|
||||
#admin user
|
||||
(status,output)=Utils.setAccountService('test', 'storage', 'local', 'http://localhost:8080/v1/AUTH_test', user='test:admin', key='testing')
|
||||
self.assertNotEqual(status, 0, 'set account service success with admin user cred'+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 'set account service success with admin user cred'+output)
|
||||
|
||||
#regular user
|
||||
(status,output)=Utils.setAccountService('test', 'storage', 'local', 'http://localhost:8080/v1/AUTH_test', user='test:tester', key='testing')
|
||||
self.assertNotEqual(status, 0, 'set account service success with regular user cred'+output)
|
||||
self.assertEqual('403 Forbidden' in output,True, 'set account service success with admin user cred'+output)
|
||||
|
@ -1,32 +0,0 @@
|
||||
[DEFAULT]
|
||||
devices = /mnt/gluster-object
|
||||
#
|
||||
# Once you are confident that your startup processes will always have your
|
||||
# gluster volumes properly mounted *before* the account-server workers start,
|
||||
# you can *consider* setting this value to "false" to reduce the per-request
|
||||
# overhead it can incur.
|
||||
#
|
||||
# *** Keep false for Functional Tests ***
|
||||
mount_check = false
|
||||
bind_port = 6012
|
||||
#
|
||||
# Override swift's default behaviour for fallocate.
|
||||
disable_fallocate = true
|
||||
#
|
||||
# One or two workers should be sufficient for almost any installation of
|
||||
# Gluster.
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = account-server
|
||||
|
||||
[app:account-server]
|
||||
use = egg:gluster_swift#account
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = WARN
|
||||
#
|
||||
# After ensuring things are running in a stable manner, you can turn off
|
||||
# normal request logging for the account server to unclutter the log
|
||||
# files. Warnings and errors will still be logged.
|
||||
log_requests = off
|
@ -1,35 +0,0 @@
|
||||
[DEFAULT]
|
||||
devices = /mnt/gluster-object
|
||||
#
|
||||
# Once you are confident that your startup processes will always have your
|
||||
# gluster volumes properly mounted *before* the container-server workers
|
||||
# start, you can *consider* setting this value to "false" to reduce the
|
||||
# per-request overhead it can incur.
|
||||
#
|
||||
# *** Keep false for Functional Tests ***
|
||||
mount_check = false
|
||||
bind_port = 6011
|
||||
#
|
||||
# Override swift's default behaviour for fallocate.
|
||||
disable_fallocate = true
|
||||
#
|
||||
# One or two workers should be sufficient for almost any installation of
|
||||
# Gluster.
|
||||
workers = 1
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = container-server
|
||||
|
||||
[app:container-server]
|
||||
use = egg:gluster_swift#container
|
||||
user = root
|
||||
log_facility = LOG_LOCAL2
|
||||
log_level = WARN
|
||||
#
|
||||
# After ensuring things are running in a stable manner, you can turn off
|
||||
# normal request logging for the container server to unclutter the log
|
||||
# files. Warnings and errors will still be logged.
|
||||
log_requests = off
|
||||
|
||||
#enable object versioning for functional test
|
||||
allow_versions = on
|
@ -1,19 +0,0 @@
|
||||
[DEFAULT]
|
||||
#
|
||||
# IP address of a node in the GlusterFS server cluster hosting the
|
||||
# volumes to be served via Swift API.
|
||||
mount_ip = localhost
|
||||
|
||||
# Performance optimization parameter. When turned off, the filesystem will
|
||||
# see a reduced number of stat calls, resulting in substantially faster
|
||||
# response time for GET and HEAD container requests on containers with large
|
||||
# numbers of objects, at the expense of an accurate count of combined bytes
|
||||
# used by all objects in the container. For most installations "off" works
|
||||
# fine.
|
||||
#
|
||||
# *** Keep on for Functional Tests ***
|
||||
accurate_size_in_listing = on
|
||||
|
||||
# *** Keep on for Functional Tests ***
|
||||
container_update_object_count = on
|
||||
account_update_container_count = on
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user