documentation clarification and pep8 fixes

This commit is contained in:
John Dickinson 2010-09-30 15:48:56 -05:00
parent b44296d146
commit 880f5af2ff
4 changed files with 44 additions and 23 deletions

View File

@ -4,7 +4,7 @@ Swift stats system
The swift stats system is composed of three parts parts: log creation, log The swift stats system is composed of three parts parts: log creation, log
uploading, and log processing. The system handles two types of logs (access uploading, and log processing. The system handles two types of logs (access
and storage stats), but it can be extended to handle other types of logs. and account stats), but it can be extended to handle other types of logs.
--------- ---------
Log Types Log Types
@ -20,21 +20,21 @@ is made on August 4, 2010 at 12:37 gets logged in a file named 2010080412.
This allows easy log rotation and easy per-hour log processing. This allows easy log rotation and easy per-hour log processing.
****************** ******************
Storage stats logs Account stats logs
****************** ******************
Storage logs (also referred to as stats logs) are generated by a stats system Account stats logs are generated by a stats system process.
process. swift-account-stats-logger runs on each account server (via cron) and swift-account-stats-logger runs on each account server (via cron) and walks
walks the filesystem looking for account databases. When an account database the filesystem looking for account databases. When an account database is
is found, the logger selects the account hash, bytes_used, container_count, found, the logger selects the account hash, bytes_used, container_count, and
and object_count. These values are then written out as one line in a csv file. object_count. These values are then written out as one line in a csv file. One
One csv file is produced for every run of swift-account-stats-logger. This csv file is produced for every run of swift-account-stats-logger. This means
means that, system wide, one csv file is produced for every storage node. that, system wide, one csv file is produced for every storage node. Rackspace
Rackspace runs the account stats logger every hour. Therefore, in a cluster of runs the account stats logger every hour. Therefore, in a cluster of ten
ten account servers, ten csv files are produced every hour. Also, every account servers, ten csv files are produced every hour. Also, every account
account will have one entry for every replica in the system. On average, there will have one entry for every replica in the system. On average, there will be
will be three copies of each account in the aggregate of all account stat csv three copies of each account in the aggregate of all account stat csv files
files created in one system-wide run. created in one system-wide run.
---------------------- ----------------------
Log Processing plugins Log Processing plugins

View File

@ -16,6 +16,7 @@
import zlib import zlib
import struct import struct
class CompressingFileReader(object): class CompressingFileReader(object):
''' '''
Wraps a file object and provides a read method that returns gzip'd data. Wraps a file object and provides a read method that returns gzip'd data.
@ -35,6 +36,7 @@ class CompressingFileReader(object):
:param file_obj: File object to read from :param file_obj: File object to read from
:param compresslevel: compression level :param compresslevel: compression level
''' '''
def __init__(self, file_obj, compresslevel=9): def __init__(self, file_obj, compresslevel=9):
self._f = file_obj self._f = file_obj
self._compressor = zlib.compressobj(compresslevel, self._compressor = zlib.compressobj(compresslevel,

View File

@ -20,13 +20,27 @@ from json import loads as json_loads
from swift.common.compressing_file_reader import CompressingFileReader from swift.common.compressing_file_reader import CompressingFileReader
from swift.proxy.server import BaseApplication from swift.proxy.server import BaseApplication
class MemcacheStub(object): class MemcacheStub(object):
def get(self, *a, **kw): return None
def set(self, *a, **kw): return None def get(self, *a, **kw):
def incr(self, *a, **kw): return 0 return None
def delete(self, *a, **kw): return None
def set_multi(self, *a, **kw): return None def set(self, *a, **kw):
def get_multi(self, *a, **kw): return [] return None
def incr(self, *a, **kw):
return 0
def delete(self, *a, **kw):
return None
def set_multi(self, *a, **kw):
return None
def get_multi(self, *a, **kw):
return []
class InternalProxy(object): class InternalProxy(object):
""" """
@ -38,6 +52,7 @@ class InternalProxy(object):
:param logger: logger to log requests to :param logger: logger to log requests to
:param retries: number of times to retry each request :param retries: number of times to retry each request
""" """
def __init__(self, proxy_server_conf=None, logger=None, retries=0): def __init__(self, proxy_server_conf=None, logger=None, retries=0):
self.upload_app = BaseApplication(proxy_server_conf, self.upload_app = BaseApplication(proxy_server_conf,
memcache=MemcacheStub(), memcache=MemcacheStub(),
@ -56,6 +71,7 @@ class InternalProxy(object):
:param object_name: name of object being uploaded :param object_name: name of object being uploaded
:param compress: if True, compresses object as it is uploaded :param compress: if True, compresses object as it is uploaded
:param content_type: content-type of object :param content_type: content-type of object
:param etag: etag for object to check successful upload
:returns: True if successful, False otherwise :returns: True if successful, False otherwise
""" """
log_create_pattern = '/v1/%s/%s/%s' % (account, container, object_name) log_create_pattern = '/v1/%s/%s/%s' % (account, container, object_name)
@ -72,7 +88,8 @@ class InternalProxy(object):
if hasattr(source_file, 'read'): if hasattr(source_file, 'read'):
compressed_file = CompressingFileReader(source_file) compressed_file = CompressingFileReader(source_file)
else: else:
compressed_file = CompressingFileReader(open(source_file, 'rb')) compressed_file = CompressingFileReader(
open(source_file, 'rb'))
req.body_file = compressed_file req.body_file = compressed_file
else: else:
if not hasattr(source_file, 'read'): if not hasattr(source_file, 'read'):

View File

@ -552,6 +552,7 @@ def cache_from_env(env):
""" """
return item_from_env(env, 'swift.cache') return item_from_env(env, 'swift.cache')
def readconf(conf, section_name, log_name=None): def readconf(conf, section_name, log_name=None):
""" """
Read config file and return config items as a dict Read config file and return config items as a dict
@ -571,7 +572,8 @@ def readconf(conf, section_name, log_name=None):
if c.has_section(section_name): if c.has_section(section_name):
conf = dict(c.items(section_name)) conf = dict(c.items(section_name))
else: else:
print "Unable to find %s config section in %s" % (section_name, conf) print "Unable to find %s config section in %s" % (section_name,
conf)
sys.exit(1) sys.exit(1)
if "log_name" not in conf: if "log_name" not in conf:
if log_name is not None: if log_name is not None:
@ -581,7 +583,7 @@ def readconf(conf, section_name, log_name=None):
else: else:
conf = {} conf = {}
for s in c.sections(): for s in c.sections():
conf.update({s:dict(c.items(s))}) conf.update({s: dict(c.items(s))})
if 'log_name' not in conf: if 'log_name' not in conf:
conf['log_name'] = log_name conf['log_name'] = log_name
return conf return conf