Bring st_stream up to date.
This commit is contained in:
commit
25449c4b59
@ -5,3 +5,4 @@ dist
|
||||
ChangeLog
|
||||
.coverage
|
||||
swift.egg-info
|
||||
.DS_Store
|
||||
|
1
AUTHORS
1
AUTHORS
@ -23,6 +23,7 @@ Soren Hansen
|
||||
Paul Jimenez
|
||||
Brian K. Jones
|
||||
Ed Leafe
|
||||
Stephen Milton
|
||||
Andrew Clay Shafer
|
||||
Monty Taylor
|
||||
Caleb Tennis
|
||||
|
@ -8,7 +8,7 @@ document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.
|
||||
</script>
|
||||
<script type="text/javascript">
|
||||
try {
|
||||
var pageTracker = _gat._getTracker("UA-17511903-1");
|
||||
var pageTracker = _gat._getTracker("UA-17511903-4");
|
||||
pageTracker._setDomainName("none");
|
||||
pageTracker._setAllowLinker(true);
|
||||
pageTracker._trackPageview();
|
||||
|
@ -60,7 +60,7 @@ master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'Swift'
|
||||
copyright = u'2010, OpenStack, LLC.'
|
||||
copyright = u'2010, OpenStack, LLC'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
|
@ -6,7 +6,7 @@ Getting Started
|
||||
System Requirements
|
||||
-------------------
|
||||
|
||||
Swift development currently targets Unbuntu Server 10.04, but should work on
|
||||
Swift development currently targets Ubuntu Server 10.04, but should work on
|
||||
most Linux platforms with the following software:
|
||||
|
||||
* Python 2.6
|
||||
@ -36,5 +36,6 @@ following docs will be useful:
|
||||
Production
|
||||
----------
|
||||
|
||||
We do not have documentation yet on how to set up and configure Swift for a
|
||||
production cluster, but hope to begin work on those soon.
|
||||
If you want to set up and configure Swift for a production cluster, the following doc should be useful:
|
||||
|
||||
* :doc:`Multiple Server Swift Installation <howto_installmultinode>`
|
391
doc/source/howto_installmultinode.rst
Normal file
391
doc/source/howto_installmultinode.rst
Normal file
@ -0,0 +1,391 @@
|
||||
==============================================================
|
||||
Instructions for a Multiple Server Swift Installation (Ubuntu)
|
||||
==============================================================
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
* Ubuntu Server 10.04 LTS installation media
|
||||
|
||||
.. note:
|
||||
Swift can run with other distros, but for this document we will focus
|
||||
on installing on Ubuntu Server, ypmv (your packaging may vary).
|
||||
|
||||
Basic architecture and terms
|
||||
----------------------------
|
||||
- *node* - a host machine running one or more Swift services
|
||||
- *Proxy node* - node that runs Proxy services
|
||||
- *Auth node* - node that runs the Auth service
|
||||
- *Storage node* - node that runs Account, Container, and Object services
|
||||
- *ring* - a set of mappings of Swift data to physical devices
|
||||
|
||||
This document shows a cluster using the following types of nodes:
|
||||
|
||||
- one Proxy node
|
||||
|
||||
- Runs the swift-proxy-server processes which proxy requests to the
|
||||
appropriate Storage nodes.
|
||||
|
||||
- one Auth node
|
||||
|
||||
- Runs the swift-auth-server which controls authentication and
|
||||
authorization for all requests. This can be on the same node as a
|
||||
Proxy node.
|
||||
|
||||
- five Storage nodes
|
||||
|
||||
- Runs the swift-account-server, swift-container-server, and
|
||||
swift-object-server processes which control storage of the account
|
||||
databases, the container databases, as well as the actual stored
|
||||
objects.
|
||||
|
||||
.. note::
|
||||
Fewer Storage nodes can be used initially, but a minimum of 5 is
|
||||
recommended for a production cluster.
|
||||
|
||||
This document describes each Storage node as a separate zone in the ring.
|
||||
It is recommended to have a minimum of 5 zones. A zone is a group of nodes
|
||||
that is as isolated as possible from other nodes (separate servers, network,
|
||||
power, even geography). The ring guarantees that every replica is stored
|
||||
in a separate zone. For more information about the ring and zones, see: :doc:`The Rings <overview_ring>`.
|
||||
|
||||
Network Setup Notes
|
||||
-------------------
|
||||
|
||||
This document refers to two networks. An external network for connecting to the Proxy server, and a storage network that is not accessibile from outside the cluster, to which all of the nodes are connected. All of the Swift services, as well as the rsync daemon on the Storage nodes are configured to listen on their STORAGE_LOCAL_NET IP addresses.
|
||||
|
||||
General OS configuration and partitioning for each node
|
||||
-------------------------------------------------------
|
||||
|
||||
#. Install the baseline Ubuntu Server 10.04 LTS on all nodes.
|
||||
|
||||
#. Install common Swift software prereqs::
|
||||
|
||||
apt-get install python-software-properties
|
||||
add-apt-repository ppa:swift-core/ppa
|
||||
apt-get update
|
||||
apt-get install swift openssh-server
|
||||
|
||||
#. Create and populate configuration directories::
|
||||
|
||||
mkdir -p /etc/swift
|
||||
chown -R swift:swift /etc/swift/
|
||||
|
||||
#. Create /etc/swift/swift.conf::
|
||||
|
||||
[swift-hash]
|
||||
# random unique string that can never change (DO NOT LOSE)
|
||||
swift_hash_path_suffix = changeme
|
||||
|
||||
.. note::
|
||||
/etc/swift/swift.conf should be set to some random string of text to be
|
||||
used as a salt when hashing to determine mappings in the ring. This
|
||||
file should be the same on every node in the cluster!
|
||||
|
||||
|
||||
Configure the Proxy node
|
||||
------------------------
|
||||
|
||||
.. note::
|
||||
It is assumed that all commands are run as the root user
|
||||
|
||||
#. Install swift-proxy service::
|
||||
|
||||
apt-get install swift-proxy memcached
|
||||
|
||||
#. Create self-signed cert for SSL::
|
||||
|
||||
cd /etc/swift
|
||||
openssl req -new -x509 -nodes -out cert.crt -keyout cert.key
|
||||
|
||||
#. Modify memcached to listen on the default interfaces. Preferably this should be on a local, non-public network. Edit the following line in /etc/memcached.conf, changing::
|
||||
|
||||
-l 127.0.0.1
|
||||
to
|
||||
-l <PROXY_LOCAL_NET_IP>
|
||||
|
||||
#. Restart the memcached server::
|
||||
|
||||
service memcached restart
|
||||
|
||||
#. Create /etc/swift/proxy-server.conf::
|
||||
|
||||
[DEFAULT]
|
||||
cert_file = /etc/swift/cert.crt
|
||||
key_file = /etc/swift/cert.key
|
||||
bind_port = 8080
|
||||
workers = 8
|
||||
user = swift
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = healthcheck cache auth proxy-server
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:swift#proxy
|
||||
|
||||
[filter:auth]
|
||||
use = egg:swift#auth
|
||||
ssl = true
|
||||
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
memcache_servers = <PROXY_LOCAL_NET_IP>:11211
|
||||
|
||||
.. note::
|
||||
|
||||
If you run multiple memcache servers, put the multiple IP:port listings
|
||||
in the [filter:cache] section of the proxy-server.conf file like:
|
||||
`10.1.2.3:11211,10.1.2.4:11211`. Only the proxy server uses memcache.
|
||||
|
||||
#. Create the account, container and object rings::
|
||||
|
||||
cd /etc/swift
|
||||
swift-ring-builder account.builder create 18 3 1
|
||||
swift-ring-builder container.builder create 18 3 1
|
||||
swift-ring-builder object.builder create 18 3 1
|
||||
|
||||
.. note::
|
||||
|
||||
For more information on building rings, see :doc:`overview_ring`.
|
||||
|
||||
#. For every storage device on each node add entries to each ring::
|
||||
|
||||
swift-ring-builder account.builder add z<ZONE>-<STORAGE_LOCAL_NET_IP>:6002/<DEVICE> 100
|
||||
swift-ring-builder container.builder add z<ZONE>-<STORAGE_LOCAL_NET_IP_1>:6001/<DEVICE> 100
|
||||
swift-ring-builder object.builder add z<ZONE>-<STORAGE_LOCAL_NET_IP_1>:6000/<DEVICE> 100
|
||||
|
||||
.. note::
|
||||
Assuming there are 5 zones with 1 node per zone, ZONE should start at
|
||||
1 and increment by one for each additional node.
|
||||
|
||||
#. Verify the ring contents for each ring::
|
||||
|
||||
swift-ring-builder account.builder
|
||||
swift-ring-builder container.builder
|
||||
swift-ring-builder object.builder
|
||||
|
||||
#. Rebalance the rings::
|
||||
|
||||
swift-ring-builder account.builder rebalance
|
||||
swift-ring-builder container.builder rebalance
|
||||
swift-ring-builder object.builder rebalance
|
||||
|
||||
.. note::
|
||||
Rebalancing rings can take some time.
|
||||
|
||||
#. Copy the account.ring.gz, container.ring.gz, and object.ring.gz files
|
||||
to each of the Proxy and Storage nodes in /etc/swift.
|
||||
|
||||
#. Make sure all the config files are owned by the swift user::
|
||||
|
||||
chown -R swift:swift /etc/swift
|
||||
|
||||
#. Start Proxy services::
|
||||
|
||||
swift-init proxy start
|
||||
|
||||
|
||||
Configure the Auth node
|
||||
-----------------------
|
||||
|
||||
#. If this node is not running on the same node as a proxy, create a
|
||||
self-signed cert as you did for the Proxy node
|
||||
|
||||
#. Install swift-auth service::
|
||||
|
||||
apt-get install swift-auth
|
||||
|
||||
#. Create /etc/swift/auth-server.conf::
|
||||
|
||||
[DEFAULT]
|
||||
cert_file = /etc/swift/cert.crt
|
||||
key_file = /etc/swift/cert.key
|
||||
user = swift
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = auth-server
|
||||
|
||||
[app:auth-server]
|
||||
use = egg:swift#auth
|
||||
default_cluster_url = https://<PROXY_HOSTNAME>:8080/v1
|
||||
# Highly recommended to change this key to something else!
|
||||
super_admin_key = devauth
|
||||
|
||||
#. Start Auth services::
|
||||
|
||||
swift-init auth start
|
||||
chown swift:swift /etc/swift/auth.db
|
||||
swift-init auth restart # 1.1.0 workaround because swift creates auth.db owned as root
|
||||
|
||||
Configure the Storage nodes
|
||||
---------------------------
|
||||
|
||||
.. note::
|
||||
Swift *should* work on any modern filesystem that supports
|
||||
Extended Attributes (XATTRS). We currently recommend XFS as it
|
||||
demonstrated the best overall performance for the swift use case after
|
||||
considerable testing and benchmarking at Rackspace. It is also the
|
||||
only filesystem that has been thoroughly tested.
|
||||
|
||||
#. Install Storage node packages::
|
||||
|
||||
apt-get install swift-account swift-container swift-object xfsprogs
|
||||
|
||||
#. For every device on the node, setup the XFS volume (/dev/sdb is used
|
||||
as an example)::
|
||||
|
||||
fdisk /dev/sdb (set up a single partition)
|
||||
mkfs.xfs -i size=1024 /dev/sdb1
|
||||
echo "/dev/sdb1 /srv/node/sdb1 xfs noatime,nodiratime,nobarrier,logbufs=8 0 0" >> /etc/fstab
|
||||
mkdir -p /srv/node/sdb1
|
||||
mount /srv/node/sdb1
|
||||
chown -R swift:swift /srv/node
|
||||
|
||||
#. Create /etc/rsyncd.conf::
|
||||
|
||||
uid = swift
|
||||
gid = swift
|
||||
log file = /var/log/rsyncd.log
|
||||
pid file = /var/run/rsyncd.pid
|
||||
address = <STORAGE_LOCAL_NET_IP>
|
||||
|
||||
[account]
|
||||
max connections = 2
|
||||
path = /srv/node/
|
||||
read only = false
|
||||
lock file = /var/lock/account.lock
|
||||
|
||||
[container]
|
||||
max connections = 2
|
||||
path = /srv/node/
|
||||
read only = false
|
||||
lock file = /var/lock/container.lock
|
||||
|
||||
[object]
|
||||
max connections = 2
|
||||
path = /srv/node/
|
||||
read only = false
|
||||
lock file = /var/lock/object.lock
|
||||
|
||||
#. Edit the following line in /etc/default/rsync::
|
||||
|
||||
RSYNC_ENABLE=true
|
||||
|
||||
#. Start rsync daemon::
|
||||
|
||||
service rsync start
|
||||
|
||||
.. note::
|
||||
The rsync daemon requires no authentication, so it should be run on
|
||||
a local, private network.
|
||||
|
||||
#. Create /etc/swift/account-server.conf::
|
||||
|
||||
[DEFAULT]
|
||||
bind_ip = <STORAGE_LOCAL_NET_IP>
|
||||
workers = 2
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = account-server
|
||||
|
||||
[app:account-server]
|
||||
use = egg:swift#account
|
||||
|
||||
[account-replicator]
|
||||
|
||||
[account-auditor]
|
||||
|
||||
[account-reaper]
|
||||
|
||||
#. Create /etc/swift/container-server.conf::
|
||||
|
||||
[DEFAULT]
|
||||
bind_ip = <STORAGE_LOCAL_NET_IP>
|
||||
workers = 2
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = container-server
|
||||
|
||||
[app:container-server]
|
||||
use = egg:swift#container
|
||||
|
||||
[container-replicator]
|
||||
|
||||
[container-updater]
|
||||
|
||||
[container-auditor]
|
||||
|
||||
#. Create /etc/swift/object-server.conf::
|
||||
|
||||
[DEFAULT]
|
||||
bind_ip = <STORAGE_LOCAL_NET_IP>
|
||||
workers = 2
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = object-server
|
||||
|
||||
[app:object-server]
|
||||
use = egg:swift#object
|
||||
|
||||
[object-replicator]
|
||||
|
||||
[object-updater]
|
||||
|
||||
[object-auditor]
|
||||
|
||||
#. Start the storage services::
|
||||
|
||||
swift-init object-server start
|
||||
swift-init object-replicator start
|
||||
swift-init object-updater start
|
||||
swift-init object-auditor start
|
||||
swift-init container-server start
|
||||
swift-init container-replicator start
|
||||
swift-init container-updater start
|
||||
swift-init container-auditor start
|
||||
swift-init account-server start
|
||||
swift-init account-replicator start
|
||||
swift-init account-auditor start
|
||||
|
||||
Create Swift admin account and test (run commands from Auth node)
|
||||
-----------------------------------------------------------------
|
||||
|
||||
#. Create a user with administrative priviledges (account = system,
|
||||
username = root, password = testpass). Make sure to replace
|
||||
``devauth`` with whatever super_admin key you assigned in the
|
||||
auth-server.conf file above. *Note: None of the values of
|
||||
account, username, or password are special - they can be anything.*::
|
||||
|
||||
swift-auth-add-user -K devauth -a system root testpass
|
||||
|
||||
#. Get an X-Storage-Url and X-Auth-Token::
|
||||
|
||||
curl -k -v -H 'X-Storage-User: system:root' -H 'X-Storage-Pass: testpass' https://<AUTH_HOSTNAME>:11000/v1.0
|
||||
|
||||
#. Check that you can HEAD the account::
|
||||
|
||||
curl -k -v -H 'X-Auth-Token: <token-from-x-auth-token-above>' <url-from-x-storage-url-above>
|
||||
|
||||
#. Check that ``st`` works::
|
||||
|
||||
st -A https://<AUTH_HOSTNAME>:11000/v1.0 -U system:root -K testpass stat
|
||||
|
||||
#. Use ``st`` to upload a few files named 'bigfile[1-2].tgz' to a container named 'myfiles'::
|
||||
|
||||
st -A https://<AUTH_HOSTNAME>:11000/v1.0 -U system:root -K testpass upload myfiles bigfile1.tgz
|
||||
st -A https://<AUTH_HOSTNAME>:11000/v1.0 -U system:root -K testpass upload myfiles bigfile2.tgz
|
||||
|
||||
#. Use ``st`` to download all files from the 'myfiles' container::
|
||||
|
||||
st -A https://<AUTH_HOSTNAME>:11000/v1.0 -U system:root -K testpass download myfiles
|
||||
|
||||
Troubleshooting Notes
|
||||
---------------------
|
||||
If you see problems, look in var/log/syslog (or messages on some distros).
|
||||
|
||||
Also, at Rackspace we have seen hints at drive failures by looking at error messages in /var/log/kern.log.
|
||||
|
||||
There are more debugging hints and tips in the :doc:`admin_guide`.
|
||||
|
@ -41,6 +41,7 @@ Deployment:
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
howto_installmultinode
|
||||
deployment_guide
|
||||
admin_guide
|
||||
debian_package_guide
|
||||
|
@ -85,3 +85,8 @@ use = egg:swift#domain_remap
|
||||
[filter:catch_errors]
|
||||
use = egg:swift#catch_errors
|
||||
|
||||
[filter:cname_lookup]
|
||||
# Note: this middleware requires python-dnspython
|
||||
use = egg:swift#cname_lookup
|
||||
# storage_domain = example.com
|
||||
# lookup_depth = 1
|
||||
|
1
setup.py
1
setup.py
@ -93,6 +93,7 @@ setup(
|
||||
'healthcheck=swift.common.middleware.healthcheck:filter_factory',
|
||||
'memcache=swift.common.middleware.memcache:filter_factory',
|
||||
'ratelimit=swift.common.middleware.ratelimit:filter_factory',
|
||||
'cname_lookup=swift.common.middleware.cname_lookup:filter_factory',
|
||||
'catch_errors=swift.common.middleware.catch_errors:filter_factory',
|
||||
'domain_remap=swift.common.middleware.domain_remap:filter_factory',
|
||||
],
|
||||
|
120
swift/common/middleware/cname_lookup.py
Normal file
120
swift/common/middleware/cname_lookup.py
Normal file
@ -0,0 +1,120 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from webob import Request
|
||||
from webob.exc import HTTPBadRequest
|
||||
import dns.resolver
|
||||
from dns.exception import DNSException
|
||||
|
||||
from swift.common.utils import cache_from_env, get_logger
|
||||
|
||||
|
||||
def lookup_cname(domain): # pragma: no cover
|
||||
"""
|
||||
Given a domain, returns it's DNS CNAME mapping and DNS ttl.
|
||||
|
||||
:param domain: domain to query on
|
||||
:returns: (ttl, result)
|
||||
"""
|
||||
try:
|
||||
answer = dns.resolver.query(domain, 'CNAME').rrset
|
||||
ttl = answer.ttl
|
||||
result = answer.items[0].to_text()
|
||||
result = result.rstrip('.')
|
||||
return ttl, result
|
||||
except DNSException:
|
||||
return 0, None
|
||||
|
||||
|
||||
class CNAMELookupMiddleware(object):
|
||||
"""
|
||||
Middleware that translates a unknown domain in the host header to
|
||||
something that ends with the configured storage_domain by looking up
|
||||
the given domain's CNAME record in DNS.
|
||||
"""
|
||||
|
||||
def __init__(self, app, conf):
|
||||
self.app = app
|
||||
self.storage_domain = conf.get('storage_domain', 'example.com')
|
||||
if self.storage_domain and self.storage_domain[0] != '.':
|
||||
self.storage_domain = '.' + self.storage_domain
|
||||
self.lookup_depth = int(conf.get('lookup_depth', '1'))
|
||||
self.memcache = None
|
||||
self.logger = get_logger(conf)
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
if not self.storage_domain:
|
||||
return self.app(env, start_response)
|
||||
given_domain = env['HTTP_HOST']
|
||||
port = ''
|
||||
if ':' in given_domain:
|
||||
given_domain, port = given_domain.rsplit(':', 1)
|
||||
if given_domain == self.storage_domain[1:]: # strip initial '.'
|
||||
return self.app(env, start_response)
|
||||
a_domain = given_domain
|
||||
if not a_domain.endswith(self.storage_domain):
|
||||
if self.memcache is None:
|
||||
self.memcache = cache_from_env(env)
|
||||
error = True
|
||||
for tries in xrange(self.lookup_depth):
|
||||
found_domain = None
|
||||
if self.memcache:
|
||||
memcache_key = ''.join(['cname-', a_domain])
|
||||
found_domain = self.memcache.get(memcache_key)
|
||||
if not found_domain:
|
||||
ttl, found_domain = lookup_cname(a_domain)
|
||||
if self.memcache:
|
||||
memcache_key = ''.join(['cname-', given_domain])
|
||||
self.memcache.set(memcache_key, found_domain,
|
||||
timeout=ttl)
|
||||
if found_domain is None or found_domain == a_domain:
|
||||
# no CNAME records or we're at the last lookup
|
||||
error = True
|
||||
found_domain = None
|
||||
break
|
||||
elif found_domain.endswith(self.storage_domain):
|
||||
# Found it!
|
||||
self.logger.info('Mapped %s to %s' % (given_domain,
|
||||
found_domain))
|
||||
if port:
|
||||
env['HTTP_HOST'] = ':'.join([found_domain, port])
|
||||
else:
|
||||
env['HTTP_HOST'] = found_domain
|
||||
error = False
|
||||
break
|
||||
else:
|
||||
# try one more deep in the chain
|
||||
self.logger.debug('Following CNAME chain for %s to %s' %
|
||||
(given_domain, found_domain))
|
||||
a_domain = found_domain
|
||||
if error:
|
||||
if found_domain:
|
||||
msg = 'CNAME lookup failed after %d tries' % \
|
||||
self.lookup_depth
|
||||
else:
|
||||
msg = 'CNAME lookup failed to resolve to a valid domain'
|
||||
resp = HTTPBadRequest(request=Request(env), body=msg,
|
||||
content_type='text/plain')
|
||||
return resp(env, start_response)
|
||||
return self.app(env, start_response)
|
||||
|
||||
|
||||
def filter_factory(global_conf, **local_conf): # pragma: no cover
|
||||
conf = global_conf.copy()
|
||||
conf.update(local_conf)
|
||||
|
||||
def cname_filter(app):
|
||||
return CNAMELookupMiddleware(app, conf)
|
||||
return cname_filter
|
@ -629,7 +629,8 @@ class ObjectController(Controller):
|
||||
return HTTPPreconditionFailed(request=req,
|
||||
body='X-Copy-From header must be of the form'
|
||||
'<container name>/<object name>')
|
||||
source_req = Request.blank(source_header)
|
||||
source_req = req.copy_get()
|
||||
source_req.path_info = source_header
|
||||
orig_obj_name = self.object_name
|
||||
orig_container_name = self.container_name
|
||||
self.object_name = src_obj_name
|
||||
@ -774,6 +775,8 @@ class ObjectController(Controller):
|
||||
for k, v in req.headers.items():
|
||||
if k.lower().startswith('x-object-meta-'):
|
||||
resp.headers[k] = v
|
||||
# reset the bytes, since the user didn't actually send anything
|
||||
req.bytes_transferred = 0
|
||||
resp.last_modified = float(req.headers['X-Timestamp'])
|
||||
return resp
|
||||
|
||||
@ -820,6 +823,7 @@ class ObjectController(Controller):
|
||||
'Object DELETE')
|
||||
|
||||
@public
|
||||
@delay_denial
|
||||
def COPY(self, req):
|
||||
"""HTTP COPY request handler."""
|
||||
dest = req.headers.get('Destination')
|
||||
@ -845,8 +849,8 @@ class ObjectController(Controller):
|
||||
new_headers['Content-Length'] = 0
|
||||
del new_headers['Destination']
|
||||
new_path = '/' + self.account_name + dest
|
||||
new_req = Request.blank(new_path,
|
||||
environ={'REQUEST_METHOD': 'PUT'}, headers=new_headers)
|
||||
new_req = Request.blank(new_path, environ=req.environ,
|
||||
headers=new_headers)
|
||||
return self.PUT(new_req)
|
||||
|
||||
|
||||
|
@ -73,8 +73,7 @@ class LogProcessor(object):
|
||||
self._internal_proxy = InternalProxy(proxy_server_conf,
|
||||
self.logger,
|
||||
retries=3)
|
||||
else:
|
||||
return self._internal_proxy
|
||||
return self._internal_proxy
|
||||
|
||||
def process_one_file(self, plugin_name, account, container, object_name):
|
||||
self.logger.info('Processing %s/%s/%s with plugin "%s"' % (account,
|
||||
|
@ -7,7 +7,7 @@ from uuid import uuid4
|
||||
from swift.common.constraints import MAX_META_COUNT, MAX_META_NAME_LENGTH, \
|
||||
MAX_META_OVERALL_SIZE, MAX_META_VALUE_LENGTH
|
||||
|
||||
from swift_testing import check_response, retry, skip
|
||||
from swift_testing import check_response, retry, skip, skip3, swift_test_user
|
||||
|
||||
|
||||
class TestObject(unittest.TestCase):
|
||||
@ -86,6 +86,93 @@ class TestObject(unittest.TestCase):
|
||||
except Exception, err:
|
||||
self.assert_(str(err).startswith('No result after '))
|
||||
|
||||
def test_private_object(self):
|
||||
if skip or skip3:
|
||||
raise SkipTest
|
||||
# Ensure we can't access the object with the third account
|
||||
def get(url, token, parsed, conn):
|
||||
conn.request('GET', '%s/%s/%s' % (parsed.path, self.container,
|
||||
self.obj), '',
|
||||
{'X-Auth-Token': token})
|
||||
return check_response(conn)
|
||||
resp = retry(get, use_account=3)
|
||||
resp.read()
|
||||
self.assertEquals(resp.status, 403)
|
||||
# create a shared container writable by account3
|
||||
shared_container = uuid4().hex
|
||||
def put(url, token, parsed, conn):
|
||||
conn.request('PUT', '%s/%s' % (parsed.path,
|
||||
shared_container), '',
|
||||
{'X-Auth-Token': token,
|
||||
'X-Container-Read': swift_test_user[2],
|
||||
'X-Container-Write': swift_test_user[2]})
|
||||
return check_response(conn)
|
||||
resp = retry(put)
|
||||
resp.read()
|
||||
self.assertEquals(resp.status, 201)
|
||||
# verify third account can not copy from private container
|
||||
def copy(url, token, parsed, conn):
|
||||
conn.request('PUT', '%s/%s/%s' % (parsed.path,
|
||||
shared_container,
|
||||
'private_object'),
|
||||
'', {'X-Auth-Token': token,
|
||||
'Content-Length': '0',
|
||||
'X-Copy-From': '%s/%s' % (self.container,
|
||||
self.obj)})
|
||||
return check_response(conn)
|
||||
resp = retry(copy, use_account=3)
|
||||
resp.read()
|
||||
self.assertEquals(resp.status, 403)
|
||||
# verify third account can write "obj1" to shared container
|
||||
def put(url, token, parsed, conn):
|
||||
conn.request('PUT', '%s/%s/%s' % (parsed.path, shared_container,
|
||||
'obj1'), 'test', {'X-Auth-Token': token})
|
||||
return check_response(conn)
|
||||
resp = retry(put, use_account=3)
|
||||
resp.read()
|
||||
self.assertEquals(resp.status, 201)
|
||||
# verify third account can copy "obj1" to shared container
|
||||
def copy2(url, token, parsed, conn):
|
||||
conn.request('COPY', '%s/%s/%s' % (parsed.path,
|
||||
shared_container,
|
||||
'obj1'),
|
||||
'', {'X-Auth-Token': token,
|
||||
'Destination': '%s/%s' % (shared_container,
|
||||
'obj1')})
|
||||
return check_response(conn)
|
||||
resp = retry(copy2, use_account=3)
|
||||
resp.read()
|
||||
self.assertEquals(resp.status, 201)
|
||||
# verify third account STILL can not copy from private container
|
||||
def copy3(url, token, parsed, conn):
|
||||
conn.request('COPY', '%s/%s/%s' % (parsed.path,
|
||||
self.container,
|
||||
self.obj),
|
||||
'', {'X-Auth-Token': token,
|
||||
'Destination': '%s/%s' % (shared_container,
|
||||
'private_object')})
|
||||
return check_response(conn)
|
||||
resp = retry(copy3, use_account=3)
|
||||
resp.read()
|
||||
self.assertEquals(resp.status, 403)
|
||||
# clean up "obj1"
|
||||
def delete(url, token, parsed, conn):
|
||||
conn.request('DELETE', '%s/%s/%s' % (parsed.path, shared_container,
|
||||
'obj1'), '', {'X-Auth-Token': token})
|
||||
return check_response(conn)
|
||||
resp = retry(delete)
|
||||
resp.read()
|
||||
self.assertEquals(resp.status, 204)
|
||||
# clean up shared_container
|
||||
def delete(url, token, parsed, conn):
|
||||
conn.request('DELETE',
|
||||
parsed.path + '/' + shared_container, '',
|
||||
{'X-Auth-Token': token})
|
||||
return check_response(conn)
|
||||
resp = retry(delete)
|
||||
resp.read()
|
||||
self.assertEquals(resp.status, 204)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
164
test/unit/common/middleware/test_cname_lookup.py
Normal file
164
test/unit/common/middleware/test_cname_lookup.py
Normal file
@ -0,0 +1,164 @@
|
||||
# Copyright (c) 2010 OpenStack, LLC.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
from nose import SkipTest
|
||||
|
||||
from webob import Request
|
||||
|
||||
try:
|
||||
# this test requires the dnspython package to be installed
|
||||
from swift.common.middleware import cname_lookup
|
||||
skip = False
|
||||
except ImportError:
|
||||
skip = True
|
||||
|
||||
class FakeApp(object):
|
||||
|
||||
def __call__(self, env, start_response):
|
||||
return "FAKE APP"
|
||||
|
||||
|
||||
def start_response(*args):
|
||||
pass
|
||||
|
||||
|
||||
class TestCNAMELookup(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
if skip:
|
||||
raise SkipTest
|
||||
self.app = cname_lookup.CNAMELookupMiddleware(FakeApp(),
|
||||
{'lookup_depth': 2})
|
||||
|
||||
def test_passthrough(self):
|
||||
|
||||
def my_lookup(d):
|
||||
return 0, d
|
||||
cname_lookup.lookup_cname = my_lookup
|
||||
|
||||
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Host': 'foo.example.com'})
|
||||
resp = self.app(req.environ, start_response)
|
||||
self.assertEquals(resp, 'FAKE APP')
|
||||
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Host': 'foo.example.com:8080'})
|
||||
resp = self.app(req.environ, start_response)
|
||||
self.assertEquals(resp, 'FAKE APP')
|
||||
|
||||
def test_good_lookup(self):
|
||||
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Host': 'mysite.com'})
|
||||
|
||||
def my_lookup(d):
|
||||
return 0, '%s.example.com' % d
|
||||
cname_lookup.lookup_cname = my_lookup
|
||||
|
||||
resp = self.app(req.environ, start_response)
|
||||
self.assertEquals(resp, 'FAKE APP')
|
||||
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Host': 'mysite.com:8080'})
|
||||
resp = self.app(req.environ, start_response)
|
||||
self.assertEquals(resp, 'FAKE APP')
|
||||
|
||||
def test_lookup_chain_too_long(self):
|
||||
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Host': 'mysite.com'})
|
||||
|
||||
def my_lookup(d):
|
||||
if d == 'mysite.com':
|
||||
site = 'level1.foo.com'
|
||||
elif d == 'level1.foo.com':
|
||||
site = 'level2.foo.com'
|
||||
elif d == 'level2.foo.com':
|
||||
site = 'bar.example.com'
|
||||
return 0, site
|
||||
cname_lookup.lookup_cname = my_lookup
|
||||
|
||||
resp = self.app(req.environ, start_response)
|
||||
self.assertEquals(resp, ['CNAME lookup failed after 2 tries'])
|
||||
|
||||
def test_lookup_chain_bad_target(self):
|
||||
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Host': 'mysite.com'})
|
||||
|
||||
def my_lookup(d):
|
||||
return 0, 'some.invalid.site.com'
|
||||
cname_lookup.lookup_cname = my_lookup
|
||||
|
||||
resp = self.app(req.environ, start_response)
|
||||
self.assertEquals(resp,
|
||||
['CNAME lookup failed to resolve to a valid domain'])
|
||||
|
||||
def test_something_weird(self):
|
||||
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Host': 'mysite.com'})
|
||||
|
||||
def my_lookup(d):
|
||||
return 0, None
|
||||
cname_lookup.lookup_cname = my_lookup
|
||||
|
||||
resp = self.app(req.environ, start_response)
|
||||
self.assertEquals(resp,
|
||||
['CNAME lookup failed to resolve to a valid domain'])
|
||||
|
||||
def test_with_memcache(self):
|
||||
def my_lookup(d):
|
||||
return 0, '%s.example.com' % d
|
||||
cname_lookup.lookup_cname = my_lookup
|
||||
class memcache_stub(object):
|
||||
def __init__(self):
|
||||
self.cache = {}
|
||||
def get(self, key):
|
||||
return self.cache.get(key, None)
|
||||
def set(self, key, value, *a, **kw):
|
||||
self.cache[key] = value
|
||||
memcache = memcache_stub()
|
||||
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
|
||||
'swift.cache': memcache},
|
||||
headers={'Host': 'mysite.com'})
|
||||
resp = self.app(req.environ, start_response)
|
||||
self.assertEquals(resp, 'FAKE APP')
|
||||
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
|
||||
'swift.cache': memcache},
|
||||
headers={'Host': 'mysite.com'})
|
||||
resp = self.app(req.environ, start_response)
|
||||
self.assertEquals(resp, 'FAKE APP')
|
||||
|
||||
def test_cname_matching_ending_not_domain(self):
|
||||
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Host': 'foo.com'})
|
||||
|
||||
def my_lookup(d):
|
||||
return 0, 'c.aexample.com'
|
||||
cname_lookup.lookup_cname = my_lookup
|
||||
|
||||
resp = self.app(req.environ, start_response)
|
||||
self.assertEquals(resp,
|
||||
['CNAME lookup failed to resolve to a valid domain'])
|
||||
|
||||
def test_cname_configured_with_empty_storage_domain(self):
|
||||
app = cname_lookup.CNAMELookupMiddleware(FakeApp(),
|
||||
{'storage_domain': '',
|
||||
'lookup_depth': 2})
|
||||
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
|
||||
headers={'Host': 'c.a.example.com'})
|
||||
|
||||
def my_lookup(d):
|
||||
return 0, None
|
||||
cname_lookup.lookup_cname = my_lookup
|
||||
|
||||
resp = app(req.environ, start_response)
|
||||
self.assertEquals(resp, 'FAKE APP')
|
@ -1782,6 +1782,21 @@ class TestObjectController(unittest.TestCase):
|
||||
self.assert_(hasattr(req, 'bytes_transferred'))
|
||||
self.assertEquals(req.bytes_transferred, 10)
|
||||
|
||||
def test_copy_zero_bytes_transferred_attr(self):
|
||||
with save_globals():
|
||||
proxy_server.http_connect = \
|
||||
fake_http_connect(200, 200, 200, 200, 200, 201, 201, 201,
|
||||
body='1234567890')
|
||||
controller = proxy_server.ObjectController(self.app, 'account',
|
||||
'container', 'object')
|
||||
req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'PUT'},
|
||||
headers={'X-Copy-From': 'c/o2',
|
||||
'Content-Length': '0'})
|
||||
self.app.update_request(req)
|
||||
res = controller.PUT(req)
|
||||
self.assert_(hasattr(req, 'bytes_transferred'))
|
||||
self.assertEquals(req.bytes_transferred, 0)
|
||||
|
||||
def test_response_bytes_transferred_attr(self):
|
||||
with save_globals():
|
||||
proxy_server.http_connect = \
|
||||
@ -1904,6 +1919,23 @@ class TestObjectController(unittest.TestCase):
|
||||
res = controller.PUT(req)
|
||||
self.assert_(called[0])
|
||||
|
||||
def test_COPY_calls_authorize(self):
|
||||
called = [False]
|
||||
|
||||
def authorize(req):
|
||||
called[0] = True
|
||||
return HTTPUnauthorized(request=req)
|
||||
with save_globals():
|
||||
proxy_server.http_connect = \
|
||||
fake_http_connect(200, 200, 200, 200, 200, 201, 201, 201)
|
||||
controller = proxy_server.ObjectController(self.app, 'account',
|
||||
'container', 'object')
|
||||
req = Request.blank('/a/c/o', environ={'REQUEST_METHOD': 'COPY'},
|
||||
headers={'Destination': 'c/o'})
|
||||
req.environ['swift.authorize'] = authorize
|
||||
self.app.update_request(req)
|
||||
res = controller.COPY(req)
|
||||
self.assert_(called[0])
|
||||
|
||||
class TestContainerController(unittest.TestCase):
|
||||
"Test swift.proxy_server.ContainerController"
|
||||
|
@ -14,9 +14,30 @@
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
import os
|
||||
from contextlib import contextmanager
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from swift.common import internal_proxy
|
||||
from swift.stats import log_processor
|
||||
|
||||
|
||||
@contextmanager
|
||||
def tmpfile(content):
|
||||
with NamedTemporaryFile('w', delete=False) as f:
|
||||
file_name = f.name
|
||||
f.write(str(content))
|
||||
try:
|
||||
yield file_name
|
||||
finally:
|
||||
os.unlink(file_name)
|
||||
|
||||
|
||||
class FakeUploadApp(object):
|
||||
def __init__(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
class DumbLogger(object):
|
||||
def __getattr__(self, n):
|
||||
return self.foo
|
||||
@ -63,6 +84,27 @@ class TestLogProcessor(unittest.TestCase):
|
||||
}
|
||||
}
|
||||
|
||||
def test_lazy_load_internal_proxy(self):
|
||||
# stub out internal_proxy's upload_app
|
||||
internal_proxy.BaseApplication = FakeUploadApp
|
||||
dummy_proxy_config = """[app:proxy-server]
|
||||
use = egg:swift#proxy
|
||||
"""
|
||||
with tmpfile(dummy_proxy_config) as proxy_config_file:
|
||||
conf = {'log-processor': {
|
||||
'proxy_server_conf': proxy_config_file,
|
||||
}
|
||||
}
|
||||
p = log_processor.LogProcessor(conf, DumbLogger())
|
||||
self.assert_(isinstance(p._internal_proxy,
|
||||
None.__class__))
|
||||
self.assert_(isinstance(p.internal_proxy,
|
||||
log_processor.InternalProxy))
|
||||
self.assertEquals(p.internal_proxy, p._internal_proxy)
|
||||
|
||||
# reset FakeUploadApp
|
||||
reload(internal_proxy)
|
||||
|
||||
def test_access_log_line_parser(self):
|
||||
access_proxy_config = self.proxy_config.copy()
|
||||
access_proxy_config.update({
|
||||
|
Loading…
x
Reference in New Issue
Block a user