0458016906
All the configuration files were missing settings used when gluster-swift is installed Change-Id: I4ff877241a5965f2b0359e0549629614596045bb Signed-off-by: Luis Pabon <lpabon@redhat.com> Reviewed-on: http://review.gluster.org/6430 Reviewed-by: Thiago Da Silva <thiago@redhat.com> Reviewed-by: Prashanth Pai <ppai@redhat.com> Reviewed-by: pushpesh sharma <psharma@redhat.com> Tested-by: pushpesh sharma <psharma@redhat.com>
79 lines
2.8 KiB
Plaintext
79 lines
2.8 KiB
Plaintext
[DEFAULT]
|
|
bind_port = 8080
|
|
user = root
|
|
# Consider using 1 worker per CPU
|
|
workers = 1
|
|
|
|
[pipeline:main]
|
|
pipeline = catch_errors healthcheck proxy-logging cache gswauth proxy-logging proxy-server
|
|
|
|
[app:proxy-server]
|
|
use = egg:gluster_swift#proxy
|
|
log_facility = LOG_LOCAL1
|
|
log_level = WARN
|
|
# The API allows for account creation and deletion, but since Gluster/Swift
|
|
# automounts a Gluster volume for a given account, there is no way to create
|
|
# or delete an account. So leave this off.
|
|
allow_account_management = false
|
|
account_autocreate = true
|
|
# Ensure the proxy server uses fast-POSTs since we don't need to make a copy
|
|
# of the entire object given that all metadata is stored in the object
|
|
# extended attributes (no .meta file used after creation) and no container
|
|
# sync feature to present.
|
|
object_post_as_copy = false
|
|
# Only need to recheck the account exists once a day
|
|
recheck_account_existence = 86400
|
|
# May want to consider bumping this up if containers are created and destroyed
|
|
# infrequently.
|
|
recheck_container_existence = 60
|
|
# Timeout clients that don't read or write to the proxy server after 5
|
|
# seconds.
|
|
client_timeout = 5
|
|
# Give more time to connect to the object, container or account servers in
|
|
# cases of high load.
|
|
conn_timeout = 5
|
|
# For high load situations, once connected to an object, container or account
|
|
# server, allow for delays communicating with them.
|
|
node_timeout = 60
|
|
# May want to consider bumping up this value to 1 - 4 MB depending on how much
|
|
# traffic is for multi-megabyte or gigabyte requests; perhaps matching the
|
|
# stripe width (not stripe element size) of your storage volume is a good
|
|
# starting point. See below for sizing information.
|
|
object_chunk_size = 65536
|
|
# If you do decide to increase the object_chunk_size, then consider lowering
|
|
# this value to one. Up to "put_queue_length" object_chunk_size'd buffers can
|
|
# be queued to the object server for processing. Given one proxy server worker
|
|
# can handle up to 1,024 connections, by default, it will consume 10 * 65,536
|
|
# * 1,024 bytes of memory in the worse case (default values). Be sure the
|
|
# amount of memory available on the system can accommodate increased values
|
|
# for object_chunk_size.
|
|
put_queue_depth = 10
|
|
|
|
[filter:catch_errors]
|
|
use = egg:swift#catch_errors
|
|
|
|
[filter:proxy-logging]
|
|
use = egg:swift#proxy_logging
|
|
|
|
[filter:healthcheck]
|
|
use = egg:swift#healthcheck
|
|
|
|
[filter:tempauth]
|
|
use = egg:swift#tempauth
|
|
user_admin_admin = admin .admin .reseller_admin
|
|
user_test_tester = testing .admin
|
|
user_test2_tester2 = testing2 .admin
|
|
user_test_tester3 = testing3
|
|
|
|
[filter:gswauth]
|
|
use = egg:gluster_swift#gswauth
|
|
set log_name = gswauth
|
|
super_admin_key = gswauthkey
|
|
metadata_volume = gsmetadata
|
|
|
|
[filter:cache]
|
|
use = egg:swift#memcache
|
|
# Update this line to contain a comma separated list of memcache servers
|
|
# shared by all nodes running the proxy-server service.
|
|
memcache_servers = localhost:11211
|