diff --git a/bin/st b/bin/st index 58285423bd..4e6024f84f 100755 --- a/bin/st +++ b/bin/st @@ -1723,7 +1723,7 @@ Example: error_thread.abort = True while error_thread.isAlive(): error_thread.join(0.01) - except Exception: + except (SystemExit, Exception): for thread in threading_enumerate(): thread.abort = True raise diff --git a/bin/swauth-add-account b/bin/swauth-add-account index 32aceffc7b..fe18b5a72d 100755 --- a/bin/swauth-add-account +++ b/bin/swauth-add-account @@ -18,9 +18,9 @@ import gettext from optparse import OptionParser from os.path import basename from sys import argv, exit -from urlparse import urlparse from swift.common.bufferedhttp import http_connect_raw as http_connect +from swift.common.utils import urlparse if __name__ == '__main__': diff --git a/bin/swauth-add-user b/bin/swauth-add-user index a844ed2a37..045dc0a766 100755 --- a/bin/swauth-add-user +++ b/bin/swauth-add-user @@ -18,9 +18,9 @@ import gettext from optparse import OptionParser from os.path import basename from sys import argv, exit -from urlparse import urlparse from swift.common.bufferedhttp import http_connect_raw as http_connect +from swift.common.utils import urlparse if __name__ == '__main__': diff --git a/bin/swauth-delete-account b/bin/swauth-delete-account index c46e5e3b91..3d98f6ec4e 100755 --- a/bin/swauth-delete-account +++ b/bin/swauth-delete-account @@ -18,9 +18,9 @@ import gettext from optparse import OptionParser from os.path import basename from sys import argv, exit -from urlparse import urlparse from swift.common.bufferedhttp import http_connect_raw as http_connect +from swift.common.utils import urlparse if __name__ == '__main__': diff --git a/bin/swauth-delete-user b/bin/swauth-delete-user index 5ee162437c..ede076dd5b 100755 --- a/bin/swauth-delete-user +++ b/bin/swauth-delete-user @@ -18,9 +18,9 @@ import gettext from optparse import OptionParser from os.path import basename from sys import argv, exit -from urlparse import urlparse from swift.common.bufferedhttp import http_connect_raw as http_connect +from swift.common.utils import urlparse if __name__ == '__main__': diff --git a/bin/swauth-list b/bin/swauth-list index 7433e3ddfd..85a7633966 100755 --- a/bin/swauth-list +++ b/bin/swauth-list @@ -22,9 +22,9 @@ import gettext from optparse import OptionParser from os.path import basename from sys import argv, exit -from urlparse import urlparse from swift.common.bufferedhttp import http_connect_raw as http_connect +from swift.common.utils import urlparse if __name__ == '__main__': diff --git a/bin/swauth-prep b/bin/swauth-prep index 5a931ae1d0..3d2cb7d3eb 100755 --- a/bin/swauth-prep +++ b/bin/swauth-prep @@ -18,9 +18,9 @@ import gettext from optparse import OptionParser from os.path import basename from sys import argv, exit -from urlparse import urlparse from swift.common.bufferedhttp import http_connect_raw as http_connect +from swift.common.utils import urlparse if __name__ == '__main__': diff --git a/bin/swauth-set-account-service b/bin/swauth-set-account-service index 32eb06dc6b..054e4cfc4b 100755 --- a/bin/swauth-set-account-service +++ b/bin/swauth-set-account-service @@ -22,9 +22,9 @@ import gettext from optparse import OptionParser from os.path import basename from sys import argv, exit -from urlparse import urlparse from swift.common.bufferedhttp import http_connect_raw as http_connect +from swift.common.utils import urlparse if __name__ == '__main__': diff --git a/bin/swift-account-stats-logger b/bin/swift-account-stats-logger index 7b95b20249..6256b690b5 100755 --- a/bin/swift-account-stats-logger +++ b/bin/swift-account-stats-logger @@ -23,4 +23,4 @@ if __name__ == '__main__': # currently AccountStat only supports run_once options['once'] = True run_daemon(AccountStat, conf_file, section_name='log-processor-stats', - **options) + log_name="account-stats", **options) diff --git a/bin/swift-auth-to-swauth b/bin/swift-auth-to-swauth index 93cb4fe199..e1010c315a 100755 --- a/bin/swift-auth-to-swauth +++ b/bin/swift-auth-to-swauth @@ -23,16 +23,18 @@ import sqlite3 if __name__ == '__main__': gettext.install('swift', unicode=1) - if len(argv) != 4 or argv[1] != '-K': - exit('Syntax: %s -K ' % argv[0]) - _junk, _junk, super_admin_key, auth_db = argv - # This version will not attempt to prep swauth - # call(['swauth-prep', '-K', super_admin_key]) + if len(argv) != 2: + exit('Syntax: %s ' % argv[0]) + _junk, auth_db = argv conn = sqlite3.connect(auth_db) - for account, cfaccount, user, password, admin, reseller_admin in \ - conn.execute('SELECT account, cfaccount, user, password, admin, ' - 'reseller_admin FROM account'): - cmd = ['swauth-add-user', '-K', super_admin_key, '-s', + try: + listing = conn.execute('SELECT account, cfaccount, user, password, ' + 'admin, reseller_admin FROM account') + except sqlite3.OperationalError, err: + listing = conn.execute('SELECT account, cfaccount, user, password, ' + '"f", "f" FROM account') + for account, cfaccount, user, password, admin, reseller_admin in listing: + cmd = ['swauth-add-user', '-K', '', '-s', cfaccount.split('_', 1)[1]] if admin == 't': cmd.append('-a') @@ -40,9 +42,3 @@ if __name__ == '__main__': cmd.append('-r') cmd.extend([account, user, password]) print ' '.join(cmd) - # For this version, the script will only print out the commands - # call(cmd) - print '----------------------------------------------------------------' - print ' Assuming the above worked perfectly, you should copy and paste ' - print ' those lines into your ~/bin/recreateaccounts script.' - print '----------------------------------------------------------------' diff --git a/bin/swift-bench b/bin/swift-bench index 3c167ee06f..0554782a06 100755 --- a/bin/swift-bench +++ b/bin/swift-bench @@ -22,7 +22,7 @@ import uuid from optparse import OptionParser from swift.common.bench import BenchController -from swift.common.utils import readconf, LogAdapter, NamedFormatter +from swift.common.utils import readconf, LogAdapter # The defaults should be sufficient to run swift-bench on a SAIO CONF_DEFAULTS = { @@ -125,9 +125,9 @@ if __name__ == '__main__': options.log_level.lower(), logging.INFO)) loghandler = logging.StreamHandler() logger.addHandler(loghandler) - logger = LogAdapter(logger) - logformat = NamedFormatter('swift-bench', logger, - fmt='%(server)s %(asctime)s %(levelname)s %(message)s') + logger = LogAdapter(logger, 'swift-bench') + logformat = logging.Formatter('%(server)s %(asctime)s %(levelname)s ' + '%(message)s') loghandler.setFormatter(logformat) controller = BenchController(logger, options) diff --git a/bin/swift-drive-audit b/bin/swift-drive-audit index e92d1e3c12..77912e720e 100755 --- a/bin/swift-drive-audit +++ b/bin/swift-drive-audit @@ -99,7 +99,8 @@ if __name__ == '__main__': device_dir = conf.get('device_dir', '/srv/node') minutes = int(conf.get('minutes', 60)) error_limit = int(conf.get('error_limit', 1)) - logger = get_logger(conf, 'drive-audit') + conf['log_name'] = conf.get('log_name', 'drive-audit') + logger = get_logger(conf, log_route='drive-audit') devices = get_devices(device_dir, logger) logger.debug("Devices found: %s" % str(devices)) if not devices: diff --git a/bin/swift-init b/bin/swift-init old mode 100755 new mode 100644 index cdbde28d4d..96ed1f63f3 --- a/bin/swift-init +++ b/bin/swift-init @@ -14,180 +14,60 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import with_statement -import errno -import glob -import os -import resource -import signal import sys -import time +from optparse import OptionParser -ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor', - 'container-replicator', 'container-server', 'container-updater', - 'object-auditor', 'object-server', 'object-replicator', 'object-updater', - 'proxy-server', 'account-replicator', 'auth-server', 'account-reaper'] -GRACEFUL_SHUTDOWN_SERVERS = ['account-server', 'container-server', - 'object-server', 'proxy-server', 'auth-server'] -MAX_DESCRIPTORS = 32768 -MAX_MEMORY = (1024 * 1024 * 1024) * 2 # 2 GB +from swift.common.manager import Server, Manager, UnknownCommandError -_junk, server, command = sys.argv -if server == 'all': - servers = ALL_SERVERS -else: - if '-' not in server: - server = '%s-server' % server - servers = [server] -command = command.lower() +USAGE = """%prog [ ...] [options] -def pid_files(server): - if os.path.exists('/var/run/swift/%s.pid' % server): - pid_files = ['/var/run/swift/%s.pid' % server] - else: - pid_files = glob.glob('/var/run/swift/%s/*.pid' % server) - for pid_file in pid_files: - pid = int(open(pid_file).read().strip()) - yield pid_file, pid +Commands: +""" + '\n'.join(["%16s: %s" % x for x in Manager.list_commands()]) -def do_start(server, once=False): - server_type = '-'.join(server.split('-')[:-1]) - for pid_file, pid in pid_files(server): - if os.path.exists('/proc/%s' % pid): - print "%s appears to already be running: %s" % (server, pid_file) - return - else: - print "Removing stale pid file %s" % pid_file - os.unlink(pid_file) +def main(): + parser = OptionParser(USAGE) + parser.add_option('-v', '--verbose', action="store_true", + default=False, help="display verbose output") + parser.add_option('-w', '--no-wait', action="store_false", dest="wait", + default=True, help="won't wait for server to start " + "before returning") + parser.add_option('-o', '--once', action="store_true", + default=False, help="only run one pass of daemon") + # this is a negative option, default is options.daemon = True + parser.add_option('-n', '--no-daemon', action="store_false", dest="daemon", + default=True, help="start server interactively") + parser.add_option('-g', '--graceful', action="store_true", + default=False, help="send SIGHUP to supporting servers") + parser.add_option('-c', '--config-num', metavar="N", type="int", + dest="number", default=0, + help="send command to the Nth server only") + options, args = parser.parse_args() + if len(args) < 2: + parser.print_help() + print 'ERROR: specify server(s) and command' + return 1 + + command = args[-1] + servers = args[:-1] + + # this is just a silly swap for me cause I always try to "start main" + commands = dict(Manager.list_commands()).keys() + if command not in commands and servers[0] in commands: + servers.append(command) + command = servers.pop(0) + + manager = Manager(servers) try: - resource.setrlimit(resource.RLIMIT_NOFILE, - (MAX_DESCRIPTORS, MAX_DESCRIPTORS)) - resource.setrlimit(resource.RLIMIT_DATA, - (MAX_MEMORY, MAX_MEMORY)) - except ValueError: - print "Unable to increase file descriptor limit. Running as non-root?" - os.environ['PYTHON_EGG_CACHE'] = '/tmp' + status = manager.run_command(command, **options.__dict__) + except UnknownCommandError: + parser.print_help() + print 'ERROR: unknown command, %s' % command + status = 1 - def write_pid_file(pid_file, pid): - dir, file = os.path.split(pid_file) - if not os.path.exists(dir): - try: - os.makedirs(dir) - except OSError, err: - if err.errno == errno.EACCES: - sys.exit('Unable to create %s. Running as non-root?' % dir) - fp = open(pid_file, 'w') - fp.write('%d\n' % pid) - fp.close() + return 1 if status else 0 - def launch(ini_file, pid_file): - cmd = 'swift-%s' % server - args = [server, ini_file] - if once: - print 'Running %s once' % server - args.append('once') - else: - print 'Starting %s' % server - pid = os.fork() - if pid == 0: - os.setsid() - with open(os.devnull, 'r+b') as nullfile: - for desc in (0, 1, 2): # close stdio - try: - os.dup2(nullfile.fileno(), desc) - except OSError: - pass - try: - if once: - os.execlp('swift-%s' % server, server, - ini_file, 'once') - else: - os.execlp('swift-%s' % server, server, ini_file) - except OSError: - print 'unable to launch %s' % server - sys.exit(0) - else: - write_pid_file(pid_file, pid) - - ini_file = '/etc/swift/%s-server.conf' % server_type - if os.path.exists(ini_file): - # single config file over-rides config dirs - pid_file = '/var/run/swift/%s.pid' % server - launch_args = [(ini_file, pid_file)] - elif os.path.exists('/etc/swift/%s-server/' % server_type): - # found config directory, searching for config file(s) - launch_args = [] - for num, ini_file in enumerate(glob.glob('/etc/swift/%s-server/*.conf' \ - % server_type)): - pid_file = '/var/run/swift/%s/%d.pid' % (server, num) - # start a server for each ini_file found - launch_args.append((ini_file, pid_file)) - else: - # maybe there's a config file(s) out there, but I couldn't find it! - print 'Unable to locate config file for %s. %s does not exist?' % \ - (server, ini_file) - return - - # start all servers - for ini_file, pid_file in launch_args: - launch(ini_file, pid_file) - -def do_stop(server, graceful=False): - if graceful and server in GRACEFUL_SHUTDOWN_SERVERS: - sig = signal.SIGHUP - else: - sig = signal.SIGTERM - - did_anything = False - pfiles = pid_files(server) - for pid_file, pid in pfiles: - did_anything = True - try: - print 'Stopping %s pid: %s signal: %s' % (server, pid, sig) - os.kill(pid, sig) - except OSError: - print "Process %d not running" % pid - try: - os.unlink(pid_file) - except OSError: - pass - for pid_file, pid in pfiles: - for _junk in xrange(150): # 15 seconds - if not os.path.exists('/proc/%s' % pid): - break - time.sleep(0.1) - else: - print 'Waited 15 seconds for pid %s (%s) to die; giving up' % \ - (pid, pid_file) - if not did_anything: - print 'No %s running' % server - -if command == 'start': - for server in servers: - do_start(server) - -if command == 'stop': - for server in servers: - do_stop(server) - -if command == 'shutdown': - for server in servers: - do_stop(server, graceful=True) - -if command == 'restart': - for server in servers: - do_stop(server) - for server in servers: - do_start(server) - -if command == 'reload' or command == 'force-reload': - for server in servers: - do_stop(server, graceful=True) - do_start(server) - -if command == 'once': - for server in servers: - do_start(server, once=True) +if __name__ == "__main__": + sys.exit(main()) diff --git a/bin/swift-log-uploader b/bin/swift-log-uploader index 9d0e27836c..7c36e2c2cc 100755 --- a/bin/swift-log-uploader +++ b/bin/swift-log-uploader @@ -34,7 +34,7 @@ if __name__ == '__main__': uploader_conf.update(plugin_conf) # pre-configure logger - logger = utils.get_logger(uploader_conf, plugin, + logger = utils.get_logger(uploader_conf, log_route='log-uploader', log_to_console=options.get('verbose', False)) # currently LogUploader only supports run_once options['once'] = True diff --git a/bin/swift-ring-builder b/bin/swift-ring-builder index c448bea5ca..fd24a1d93f 100755 --- a/bin/swift-ring-builder +++ b/bin/swift-ring-builder @@ -19,7 +19,7 @@ from errno import EEXIST from gzip import GzipFile from os import mkdir from os.path import basename, dirname, exists, join as pathjoin -from sys import argv, exit +from sys import argv, exit, modules from textwrap import wrap from time import time @@ -48,6 +48,8 @@ The can be of the form: /sdb1 Matches devices with the device name sdb1 _shiny Matches devices with shiny in the meta data _"snet: 5.6.7.8" Matches devices with snet: 5.6.7.8 in the meta data + [::1] Matches devices in any zone with the ip ::1 + z1-[::1]:5678 Matches devices in zone 1 with the ip ::1 and port 5678 Most specific example: d74z1-1.2.3.4:5678/sdb1_"snet: 5.6.7.8" Nerd explanation: @@ -76,6 +78,13 @@ The can be of the form: i += 1 match.append(('ip', search_value[:i])) search_value = search_value[i:] + elif len(search_value) and search_value[0] == '[': + i = 1 + while i < len(search_value) and search_value[i] != ']': + i += 1 + i += 1 + match.append(('ip', search_value[:i].lstrip('[').rstrip(']'))) + search_value = search_value[i:] if search_value.startswith(':'): i = 1 while i < len(search_value) and search_value[i].isdigit(): @@ -110,6 +119,16 @@ The can be of the form: return devs +def format_device(dev): + """ + Format a device for display. + """ + if ':' in dev['ip']: + return 'd%(id)sz%(zone)s-[%(ip)s]:%(port)s/%(device)s_"%(meta)s"' % dev + else: + return 'd%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_"%(meta)s"' % dev + + class Commands: def unknown(): @@ -134,9 +153,9 @@ swift-ring-builder create except OSError, err: if err.errno != EEXIST: raise - pickle.dump(builder, open(pathjoin(backup_dir, + pickle.dump(builder.to_dict(), open(pathjoin(backup_dir, '%d.' % time() + basename(argv[1])), 'wb'), protocol=2) - pickle.dump(builder, open(argv[1], 'wb'), protocol=2) + pickle.dump(builder.to_dict(), open(argv[1], 'wb'), protocol=2) exit(EXIT_RING_CHANGED) def default(): @@ -235,10 +254,18 @@ swift-ring-builder add z-:/_ print 'Invalid add value: %s' % argv[3] exit(EXIT_ERROR) i = 1 - while i < len(rest) and rest[i] in '0123456789.': + if rest[i] == '[': i += 1 - ip = rest[1:i] - rest = rest[i:] + while i < len(rest) and rest[i] != ']': + i += 1 + i += 1 + ip = rest[1:i].lstrip('[').rstrip(']') + rest = rest[i:] + else: + while i < len(rest) and rest[i] in '0123456789.': + i += 1 + ip = rest[1:i] + rest = rest[i:] if not rest.startswith(':'): print 'Invalid add value: %s' % argv[3] @@ -279,9 +306,13 @@ swift-ring-builder add z-:/_ builder.add_dev({'id': next_dev_id, 'zone': zone, 'ip': ip, 'port': port, 'device': device_name, 'weight': weight, 'meta': meta}) - print 'Device z%s-%s:%s/%s_"%s" with %s weight got id %s' % \ - (zone, ip, port, device_name, meta, weight, next_dev_id) - pickle.dump(builder, open(argv[1], 'wb'), protocol=2) + if ':' in ip: + print 'Device z%s-[%s]:%s/%s_"%s" with %s weight got id %s' % \ + (zone, ip, port, device_name, meta, weight, next_dev_id) + else: + print 'Device z%s-%s:%s/%s_"%s" with %s weight got id %s' % \ + (zone, ip, port, device_name, meta, weight, next_dev_id) + pickle.dump(builder.to_dict(), open(argv[1], 'wb'), protocol=2) exit(EXIT_RING_UNCHANGED) def set_weight(): @@ -314,7 +345,7 @@ swift-ring-builder set_weight builder.set_dev_weight(dev['id'], weight) print 'd%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_"%(meta)s" ' \ 'weight set to %(weight)s' % dev - pickle.dump(builder, open(argv[1], 'wb'), protocol=2) + pickle.dump(builder.to_dict(), open(argv[1], 'wb'), protocol=2) exit(EXIT_RING_UNCHANGED) def set_info(): @@ -342,6 +373,13 @@ swift-ring-builder set_info i += 1 change.append(('ip', change_value[:i])) change_value = change_value[i:] + elif len(change_value) and change_value[0] == '[': + i = 1 + while i < len(change_value) and change_value[i] != ']': + i += 1 + i += 1 + change.append(('ip', change_value[:i].lstrip('[').rstrip(']'))) + change_value = change_value[i:] if change_value.startswith(':'): i = 1 while i < len(change_value) and change_value[i].isdigit(): @@ -366,15 +404,13 @@ swift-ring-builder set_info if len(devs) > 1: print 'Matched more than one device:' for dev in devs: - print ' d%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_' \ - '"%(meta)s"' % dev + print ' %s' % format_device(dev) if raw_input('Are you sure you want to update the info for ' 'these %s devices? (y/N) ' % len(devs)) != 'y': print 'Aborting device modifications' exit(EXIT_ERROR) for dev in devs: - orig_dev_string = \ - 'd%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_"%(meta)s"' % dev + orig_dev_string = format_device(dev) test_dev = dict(dev) for key, value in change: test_dev[key] = value @@ -390,10 +426,8 @@ swift-ring-builder set_info exit(EXIT_ERROR) for key, value in change: dev[key] = value - new_dev_string = \ - 'd%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_"%(meta)s"' % dev - print 'Device %s is now %s' % (orig_dev_string, new_dev_string) - pickle.dump(builder, open(argv[1], 'wb'), protocol=2) + print 'Device %s is now %s' % (orig_dev_string, format_device(dev)) + pickle.dump(builder.to_dict(), open(argv[1], 'wb'), protocol=2) exit(EXIT_RING_UNCHANGED) def remove(): @@ -429,7 +463,7 @@ swift-ring-builder remove print 'd%(id)sz%(zone)s-%(ip)s:%(port)s/%(device)s_"%(meta)s" ' \ 'marked for removal and will be removed next rebalance.' \ % dev - pickle.dump(builder, open(argv[1], 'wb'), protocol=2) + pickle.dump(builder.to_dict(), open(argv[1], 'wb'), protocol=2) exit(EXIT_RING_UNCHANGED) def rebalance(): @@ -461,13 +495,14 @@ swift-ring-builder rebalance % builder.min_part_hours print '-' * 79 ts = time() - pickle.dump(builder.get_ring(), + pickle.dump(builder.get_ring().to_dict(), GzipFile(pathjoin(backup_dir, '%d.' % ts + basename(ring_file)), 'wb'), protocol=2) - pickle.dump(builder, open(pathjoin(backup_dir, + pickle.dump(builder.to_dict(), open(pathjoin(backup_dir, '%d.' % ts + basename(argv[1])), 'wb'), protocol=2) - pickle.dump(builder.get_ring(), GzipFile(ring_file, 'wb'), protocol=2) - pickle.dump(builder, open(argv[1], 'wb'), protocol=2) + pickle.dump(builder.get_ring().to_dict(), GzipFile(ring_file, 'wb'), + protocol=2) + pickle.dump(builder.to_dict(), open(argv[1], 'wb'), protocol=2) exit(EXIT_RING_CHANGED) def validate(): @@ -494,15 +529,15 @@ swift-ring-builder write_ring '"rebalance"?' else: print 'Warning: Writing an empty ring' - pickle.dump(ring_data, + pickle.dump(ring_data.to_dict(), GzipFile(pathjoin(backup_dir, '%d.' % time() + basename(ring_file)), 'wb'), protocol=2) - pickle.dump(ring_data, GzipFile(ring_file, 'wb'), protocol=2) + pickle.dump(ring_data.to_dict(), GzipFile(ring_file, 'wb'), protocol=2) exit(EXIT_RING_CHANGED) def pretend_min_part_hours_passed(): builder.pretend_min_part_hours_passed() - pickle.dump(builder, open(argv[1], 'wb'), protocol=2) + pickle.dump(builder.to_dict(), open(argv[1], 'wb'), protocol=2) exit(EXIT_RING_UNCHANGED) def set_min_part_hours(): @@ -518,7 +553,7 @@ swift-ring-builder set_min_part_hours builder.change_min_part_hours(int(argv[3])) print 'The minimum number of hours before a partition can be ' \ 'reassigned is now set to %s' % argv[3] - pickle.dump(builder, open(argv[1], 'wb'), protocol=2) + pickle.dump(builder.to_dict(), open(argv[1], 'wb'), protocol=2) exit(EXIT_RING_UNCHANGED) @@ -544,7 +579,17 @@ if __name__ == '__main__': exit(EXIT_RING_UNCHANGED) if exists(argv[1]): - builder = pickle.load(open(argv[1], 'rb')) + try: + builder = pickle.load(open(argv[1], 'rb')) + if not hasattr(builder, 'devs'): + builder_dict = builder + builder = RingBuilder(1, 1, 1) + builder.copy_from(builder_dict) + except ImportError: # Happens with really old builder pickles + modules['swift.ring_builder'] = \ + modules['swift.common.ring.builder'] + builder = RingBuilder(1, 1, 1) + builder.copy_from(pickle.load(open(argv[1], 'rb'))) for dev in builder.devs: if dev and 'meta' not in dev: dev['meta'] = '' diff --git a/doc/source/debian_package_guide.rst b/doc/source/debian_package_guide.rst index 4f82f97858..e8086adc16 100644 --- a/doc/source/debian_package_guide.rst +++ b/doc/source/debian_package_guide.rst @@ -107,6 +107,7 @@ Instructions for Deploying Debian Packages for Swift apt-get install rsync python-openssl python-setuptools python-webob python-simplejson python-xattr python-greenlet python-eventlet + python-netifaces #. Install base packages:: diff --git a/doc/source/development_saio.rst b/doc/source/development_saio.rst index bd0753794e..38c0475975 100644 --- a/doc/source/development_saio.rst +++ b/doc/source/development_saio.rst @@ -531,7 +531,6 @@ Setting up scripts for running Swift #!/bin/bash swift-init all stop - sleep 5 sudo umount /mnt/sdb1 sudo mkfs.xfs -f -i size=1024 /dev/sdb1 sudo mount /mnt/sdb1 @@ -573,12 +572,9 @@ Setting up scripts for running Swift #!/bin/bash + swift-init main start # The auth-server line is only needed for DevAuth: swift-init auth-server start - swift-init proxy-server start - swift-init account-server start - swift-init container-server start - swift-init object-server start #. For Swauth (not needed for DevAuth), create `~/bin/recreateaccounts`:: @@ -600,15 +596,7 @@ Setting up scripts for running Swift # /etc/swift/auth-server.conf). This swift-auth-recreate-accounts line # is only needed for DevAuth: swift-auth-recreate-accounts -K devauth - swift-init object-updater start - swift-init container-updater start - swift-init object-replicator start - swift-init container-replicator start - swift-init account-replicator start - swift-init object-auditor start - swift-init container-auditor start - swift-init account-auditor start - swift-init account-reaper start + swift-init rest start #. `chmod +x ~/bin/*` #. `remakerings` diff --git a/doc/source/misc.rst b/doc/source/misc.rst index 6d6ae04dbd..db77e464f3 100644 --- a/doc/source/misc.rst +++ b/doc/source/misc.rst @@ -116,6 +116,13 @@ MemCacheD :members: :show-inheritance: +Manager +========= + +.. automodule:: swift.common.manager + :members: + :show-inheritance: + Ratelimit ========= diff --git a/doc/source/overview_stats.rst b/doc/source/overview_stats.rst index 6364de4611..111e1f8df0 100644 --- a/doc/source/overview_stats.rst +++ b/doc/source/overview_stats.rst @@ -181,4 +181,4 @@ earlier. This file will have one entry per account per hour for each account with activity in that hour. One .csv file should be produced per hour. Note that the stats will be delayed by at least two hours by default. This can be changed with the new_log_cutoff variable in the config file. See -`log-processing.conf-sample` for more details. \ No newline at end of file +`log-processor.conf-sample` for more details. diff --git a/etc/log-processing.conf-sample b/etc/log-processor.conf-sample similarity index 100% rename from etc/log-processing.conf-sample rename to etc/log-processor.conf-sample diff --git a/etc/proxy-server.conf-sample b/etc/proxy-server.conf-sample index d5aa14c246..eb2e70e869 100644 --- a/etc/proxy-server.conf-sample +++ b/etc/proxy-server.conf-sample @@ -24,6 +24,9 @@ use = egg:swift#proxy # set log_name = proxy-server # set log_facility = LOG_LOCAL0 # set log_level = INFO +# set access_log_name = proxy-server +# set access_log_facility = LOG_LOCAL0 +# set access_log_level = INFO # set log_headers = False # recheck_account_existence = 60 # recheck_container_existence = 60 diff --git a/etc/rsyncd.conf-sample b/etc/rsyncd.conf-sample index 2f0c9a84e2..c3b9952b16 100644 --- a/etc/rsyncd.conf-sample +++ b/etc/rsyncd.conf-sample @@ -7,13 +7,16 @@ pid file = /var/run/rsyncd.pid max connections = 2 path = /srv/node read only = false +lock file = /var/lock/account.lock [container] max connections = 4 path = /srv/node read only = false +lock file = /var/lock/container.lock [object] max connections = 8 path = /srv/node read only = false +lock file = /var/lock/object.lock diff --git a/swift/__init__.py b/swift/__init__.py index 316208f929..25a1c6b8c7 100644 --- a/swift/__init__.py +++ b/swift/__init__.py @@ -1,5 +1,5 @@ import gettext -__version__ = '1.2-gamma' +__version__ = '1.3-dev' gettext.install('swift') diff --git a/swift/account/auditor.py b/swift/account/auditor.py index 1f24f93acc..63551354d8 100644 --- a/swift/account/auditor.py +++ b/swift/account/auditor.py @@ -28,7 +28,7 @@ class AccountAuditor(Daemon): def __init__(self, conf): self.conf = conf - self.logger = get_logger(conf, 'account-auditor') + self.logger = get_logger(conf, log_route='account-auditor') self.devices = conf.get('devices', '/srv/node') self.mount_check = conf.get('mount_check', 'true').lower() in \ ('true', 't', '1', 'on', 'yes', 'y') diff --git a/swift/account/reaper.py b/swift/account/reaper.py index dd0d4b3890..ba78db8d98 100644 --- a/swift/account/reaper.py +++ b/swift/account/reaper.py @@ -53,7 +53,7 @@ class AccountReaper(Daemon): def __init__(self, conf): self.conf = conf - self.logger = get_logger(conf) + self.logger = get_logger(conf, log_route='account-reaper') self.devices = conf.get('devices', '/srv/node') self.mount_check = conf.get('mount_check', 'true').lower() in \ ('true', 't', '1', 'on', 'yes', 'y') diff --git a/swift/account/server.py b/swift/account/server.py index 2c83f51cc6..f15ac38c11 100644 --- a/swift/account/server.py +++ b/swift/account/server.py @@ -42,7 +42,7 @@ class AccountController(object): """WSGI controller for the account server.""" def __init__(self, conf): - self.logger = get_logger(conf) + self.logger = get_logger(conf, log_route='account-server') self.root = conf.get('devices', '/srv/node') self.mount_check = conf.get('mount_check', 'true').lower() in \ ('true', 't', '1', 'on', 'yes', 'y') diff --git a/swift/auth/server.py b/swift/auth/server.py index f9cd56dd0e..4f5ae4b21c 100644 --- a/swift/auth/server.py +++ b/swift/auth/server.py @@ -20,7 +20,6 @@ from contextlib import contextmanager from time import gmtime, strftime, time from urllib import unquote, quote from uuid import uuid4 -from urlparse import urlparse from hashlib import md5, sha1 import hmac import base64 @@ -32,7 +31,7 @@ from webob.exc import HTTPBadRequest, HTTPConflict, HTTPForbidden, \ from swift.common.bufferedhttp import http_connect_raw as http_connect from swift.common.db import get_db_connection -from swift.common.utils import get_logger, split_path +from swift.common.utils import get_logger, split_path, urlparse class AuthController(object): @@ -90,7 +89,7 @@ class AuthController(object): """ def __init__(self, conf): - self.logger = get_logger(conf) + self.logger = get_logger(conf, log_route='auth-server') self.super_admin_key = conf.get('super_admin_key') if not self.super_admin_key: msg = _('No super_admin_key set in conf file! Exiting.') diff --git a/swift/common/bench.py b/swift/common/bench.py index 943f9ebe4d..482c2d77aa 100644 --- a/swift/common/bench.py +++ b/swift/common/bench.py @@ -16,13 +16,12 @@ import uuid import time import random -from urlparse import urlparse from contextlib import contextmanager import eventlet.pools from eventlet.green.httplib import CannotSendRequest -from swift.common.utils import TRUE_VALUES +from swift.common.utils import TRUE_VALUES, urlparse from swift.common import client from swift.common import direct_client diff --git a/swift/common/client.py b/swift/common/client.py index bf402adb76..1fffaa493d 100644 --- a/swift/common/client.py +++ b/swift/common/client.py @@ -222,7 +222,7 @@ def get_account(url, token, marker=None, limit=None, prefix=None, listing = \ get_account(url, token, marker, limit, prefix, http_conn)[1] if listing: - rv.extend(listing) + rv[1].extend(listing) return rv parsed, conn = http_conn qs = 'format=json' diff --git a/swift/common/daemon.py b/swift/common/daemon.py index eee3428679..9f4f004508 100644 --- a/swift/common/daemon.py +++ b/swift/common/daemon.py @@ -26,7 +26,7 @@ class Daemon(object): def __init__(self, conf): self.conf = conf - self.logger = utils.get_logger(conf, 'swift-daemon') + self.logger = utils.get_logger(conf, log_route='daemon') def run_once(self): """Override this to run the script once""" @@ -39,8 +39,8 @@ class Daemon(object): def run(self, once=False, **kwargs): """Run the daemon""" utils.validate_configuration() - utils.capture_stdio(self.logger, **kwargs) utils.drop_privileges(self.conf.get('user', 'swift')) + utils.capture_stdio(self.logger, **kwargs) def kill_children(*args): signal.signal(signal.SIGTERM, signal.SIG_IGN) @@ -84,7 +84,7 @@ def run_daemon(klass, conf_file, section_name='', logger = kwargs.pop('logger') else: logger = utils.get_logger(conf, conf.get('log_name', section_name), - log_to_console=kwargs.pop('verbose', False)) + log_to_console=kwargs.pop('verbose', False), log_route=section_name) try: klass(conf).run(once=once, **kwargs) except KeyboardInterrupt: diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py index 49756f1f7b..3c3731d45a 100644 --- a/swift/common/db_replicator.py +++ b/swift/common/db_replicator.py @@ -92,7 +92,7 @@ class Replicator(Daemon): def __init__(self, conf): self.conf = conf - self.logger = get_logger(conf) + self.logger = get_logger(conf, log_route='replicator') self.root = conf.get('devices', '/srv/node') self.mount_check = conf.get('mount_check', 'true').lower() in \ ('true', 't', '1', 'on', 'yes', 'y') diff --git a/swift/common/manager.py b/swift/common/manager.py new file mode 100644 index 0000000000..b5b126a822 --- /dev/null +++ b/swift/common/manager.py @@ -0,0 +1,605 @@ +# Copyright (c) 2010-2011 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import with_statement +import functools +import errno +import os +import resource +import signal +import sys +import time +import subprocess +import re + +from swift.common.utils import search_tree, remove_file, write_file + +SWIFT_DIR = '/etc/swift' +RUN_DIR = '/var/run/swift' + +# auth-server has been removed from ALL_SERVERS, start it explicitly +ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor', + 'container-replicator', 'container-server', 'container-updater', + 'object-auditor', 'object-server', 'object-replicator', 'object-updater', + 'proxy-server', 'account-replicator', 'account-reaper'] +MAIN_SERVERS = ['proxy-server', 'account-server', 'container-server', + 'object-server'] +REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS] +GRACEFUL_SHUTDOWN_SERVERS = MAIN_SERVERS + ['auth-server'] +START_ONCE_SERVERS = REST_SERVERS + +KILL_WAIT = 15 # seconds to wait for servers to die + +MAX_DESCRIPTORS = 32768 +MAX_MEMORY = (1024 * 1024 * 1024) * 2 # 2 GB + + +def setup_env(): + """Try to increase resource limits of the OS. Move PYTHON_EGG_CACHE to /tmp + """ + try: + resource.setrlimit(resource.RLIMIT_NOFILE, + (MAX_DESCRIPTORS, MAX_DESCRIPTORS)) + resource.setrlimit(resource.RLIMIT_DATA, + (MAX_MEMORY, MAX_MEMORY)) + except ValueError: + print _("WARNING: Unable to increase file descriptor limit. " + "Running as non-root?") + + os.environ['PYTHON_EGG_CACHE'] = '/tmp' + + +def command(func): + """ + Decorator to declare which methods are accessible as commands, commands + always return 1 or 0, where 0 should indicate success. + + :param func: function to make public + """ + func.publicly_accessible = True + + @functools.wraps(func) + def wrapped(*a, **kw): + rv = func(*a, **kw) + return 1 if rv else 0 + return wrapped + + +def watch_server_pids(server_pids, interval=1, **kwargs): + """Monitor a collection of server pids yeilding back those pids that + aren't responding to signals. + + :param server_pids: a dict, lists of pids [int,...] keyed on + Server objects + """ + status = {} + start = time.time() + end = start + interval + server_pids = dict(server_pids) # make a copy + while True: + for server, pids in server_pids.items(): + for pid in pids: + try: + # let pid stop if it wants to + os.waitpid(pid, os.WNOHANG) + except OSError, e: + if e.errno not in (errno.ECHILD, errno.ESRCH): + raise # else no such child/process + # check running pids for server + status[server] = server.get_running_pids(**kwargs) + for pid in pids: + # original pids no longer in running pids! + if pid not in status[server]: + yield server, pid + # update active pids list using running_pids + server_pids[server] = status[server] + if not [p for server, pids in status.items() for p in pids]: + # no more running pids + break + if time.time() > end: + break + else: + time.sleep(0.1) + + +class UnknownCommandError(Exception): + pass + + +class Manager(): + """Main class for performing commands on groups of servers. + + :param servers: list of server names as strings + + """ + + def __init__(self, servers): + server_names = set() + for server in servers: + if server == 'all': + server_names.update(ALL_SERVERS) + elif server == 'main': + server_names.update(MAIN_SERVERS) + elif server == 'rest': + server_names.update(REST_SERVERS) + elif '*' in server: + # convert glob to regex + server_names.update([s for s in ALL_SERVERS if + re.match(server.replace('*', '.*'), s)]) + else: + server_names.add(server) + + self.servers = set() + for name in server_names: + self.servers.add(Server(name)) + + @command + def status(self, **kwargs): + """display status of tracked pids for server + """ + status = 0 + for server in self.servers: + status += server.status(**kwargs) + return status + + @command + def start(self, **kwargs): + """starts a server + """ + setup_env() + status = 0 + + for server in self.servers: + server.launch(**kwargs) + if not kwargs.get('daemon', True): + for server in self.servers: + try: + status += server.interact(**kwargs) + except KeyboardInterrupt: + print _('\nuser quit') + self.stop(**kwargs) + break + elif kwargs.get('wait', True): + for server in self.servers: + status += server.wait(**kwargs) + return status + + @command + def no_wait(self, **kwargs): + """spawn server and return immediately + """ + kwargs['wait'] = False + return self.start(**kwargs) + + @command + def no_daemon(self, **kwargs): + """start a server interactively + """ + kwargs['daemon'] = False + return self.start(**kwargs) + + @command + def once(self, **kwargs): + """start server and run one pass on supporting daemons + """ + kwargs['once'] = True + return self.start(**kwargs) + + @command + def stop(self, **kwargs): + """stops a server + """ + server_pids = {} + for server in self.servers: + signaled_pids = server.stop(**kwargs) + if not signaled_pids: + print _('No %s running') % server + else: + server_pids[server] = signaled_pids + + # all signaled_pids, i.e. list(itertools.chain(*server_pids.values())) + signaled_pids = [p for server, pids in server_pids.items() + for p in pids] + # keep track of the pids yeiled back as killed for all servers + killed_pids = set() + for server, killed_pid in watch_server_pids(server_pids, + interval=KILL_WAIT, **kwargs): + print _("%s (%s) appears to have stopped") % (server, killed_pid) + killed_pids.add(killed_pid) + if not killed_pids.symmetric_difference(signaled_pids): + # all proccesses have been stopped + return 0 + + # reached interval n watch_pids w/o killing all servers + for server, pids in server_pids.items(): + if not killed_pids.issuperset(pids): + # some pids of this server were not killed + print _('Waited %s seconds for %s to die; giving up') % ( + KILL_WAIT, server) + return 1 + + @command + def shutdown(self, **kwargs): + """allow current requests to finish on supporting servers + """ + kwargs['graceful'] = True + status = 0 + status += self.stop(**kwargs) + return status + + @command + def restart(self, **kwargs): + """stops then restarts server + """ + status = 0 + status += self.stop(**kwargs) + status += self.start(**kwargs) + return status + + @command + def reload(self, **kwargs): + """graceful shutdown then restart on supporting servers + """ + kwargs['graceful'] = True + status = 0 + for server in self.servers: + m = Manager([server.server]) + status += m.stop(**kwargs) + status += m.start(**kwargs) + return status + + @command + def force_reload(self, **kwargs): + """alias for reload + """ + return self.reload(**kwargs) + + def get_command(self, cmd): + """Find and return the decorated method named like cmd + + :param cmd: the command to get, a string, if not found raises + UnknownCommandError + + """ + cmd = cmd.lower().replace('-', '_') + try: + f = getattr(self, cmd) + except AttributeError: + raise UnknownCommandError(cmd) + if not hasattr(f, 'publicly_accessible'): + raise UnknownCommandError(cmd) + return f + + @classmethod + def list_commands(cls): + """Get all publicly accessible commands + + :returns: a list of string tuples (cmd, help), the method names who are + decorated as commands + """ + get_method = lambda cmd: getattr(cls, cmd) + return sorted([(x.replace('_', '-'), get_method(x).__doc__.strip()) + for x in dir(cls) if + getattr(get_method(x), 'publicly_accessible', False)]) + + def run_command(self, cmd, **kwargs): + """Find the named command and run it + + :param cmd: the command name to run + + """ + f = self.get_command(cmd) + return f(**kwargs) + + +class Server(): + """Manage operations on a server or group of servers of similar type + + :param server: name of server + """ + + def __init__(self, server): + if '-' not in server: + server = '%s-server' % server + self.server = server.lower() + self.type = server.rsplit('-', 1)[0] + self.cmd = 'swift-%s' % server + self.procs = [] + + def __str__(self): + return self.server + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(str(self))) + + def __hash__(self): + return hash(str(self)) + + def __eq__(self, other): + try: + return self.server == other.server + except AttributeError: + return False + + def get_pid_file_name(self, conf_file): + """Translate conf_file to a corresponding pid_file + + :param conf_file: an conf_file for this server, a string + + :returns: the pid_file for this conf_file + + """ + return conf_file.replace( + os.path.normpath(SWIFT_DIR), RUN_DIR, 1).replace( + '%s-server' % self.type, self.server, 1).rsplit( + '.conf', 1)[0] + '.pid' + + def get_conf_file_name(self, pid_file): + """Translate pid_file to a corresponding conf_file + + :param pid_file: a pid_file for this server, a string + + :returns: the conf_file for this pid_file + + """ + return pid_file.replace( + os.path.normpath(RUN_DIR), SWIFT_DIR, 1).replace( + self.server, '%s-server' % self.type, 1).rsplit( + '.pid', 1)[0] + '.conf' + + def conf_files(self, **kwargs): + """Get conf files for this server + + :param: number, if supplied will only lookup the nth server + + :returns: list of conf files + """ + found_conf_files = search_tree(SWIFT_DIR, '%s-server*' % self.type, + '.conf') + number = kwargs.get('number') + if number: + try: + conf_files = [found_conf_files[number - 1]] + except IndexError: + conf_files = [] + else: + conf_files = found_conf_files + if not conf_files: + # maybe there's a config file(s) out there, but I couldn't find it! + if not kwargs.get('quiet'): + print _('Unable to locate config %sfor %s') % ( + ('number %s ' % number if number else ''), self.server) + if kwargs.get('verbose') and not kwargs.get('quiet'): + if found_conf_files: + print _('Found configs:') + for i, conf_file in enumerate(found_conf_files): + print ' %d) %s' % (i + 1, conf_file) + + return conf_files + + def pid_files(self, **kwargs): + """Get pid files for this server + + :param: number, if supplied will only lookup the nth server + + :returns: list of pid files + """ + pid_files = search_tree(RUN_DIR, '%s*' % self.server, '.pid') + if kwargs.get('number', 0): + conf_files = self.conf_files(**kwargs) + # filter pid_files to match the index of numbered conf_file + pid_files = [pid_file for pid_file in pid_files if + self.get_conf_file_name(pid_file) in conf_files] + return pid_files + + def iter_pid_files(self, **kwargs): + """Generator, yields (pid_file, pids) + """ + for pid_file in self.pid_files(**kwargs): + yield pid_file, int(open(pid_file).read().strip()) + + def signal_pids(self, sig, **kwargs): + """Send a signal to pids for this server + + :param sig: signal to send + + :returns: a dict mapping pids (ints) to pid_files (paths) + + """ + pids = {} + for pid_file, pid in self.iter_pid_files(**kwargs): + try: + if sig != signal.SIG_DFL: + print _('Signal %s pid: %s signal: %s') % (self.server, + pid, sig) + os.kill(pid, sig) + except OSError, e: + if e.errno == errno.ESRCH: + # pid does not exist + if kwargs.get('verbose'): + print _("Removing stale pid file %s") % pid_file + remove_file(pid_file) + else: + # process exists + pids[pid] = pid_file + return pids + + def get_running_pids(self, **kwargs): + """Get running pids + + :returns: a dict mapping pids (ints) to pid_files (paths) + + """ + return self.signal_pids(signal.SIG_DFL, **kwargs) # send noop + + def kill_running_pids(self, **kwargs): + """Kill running pids + + :param graceful: if True, attempt SIGHUP on supporting servers + + :returns: a dict mapping pids (ints) to pid_files (paths) + + """ + graceful = kwargs.get('graceful') + if graceful and self.server in GRACEFUL_SHUTDOWN_SERVERS: + sig = signal.SIGHUP + else: + sig = signal.SIGTERM + return self.signal_pids(sig, **kwargs) + + def status(self, pids=None, **kwargs): + """Display status of server + + :param: pids, if not supplied pids will be populated automatically + :param: number, if supplied will only lookup the nth server + + :returns: 1 if server is not running, 0 otherwise + """ + if pids is None: + pids = self.get_running_pids(**kwargs) + if not pids: + number = kwargs.get('number', 0) + if number: + kwargs['quiet'] = True + conf_files = self.conf_files(**kwargs) + if conf_files: + print _("%s #%d not running (%s)") % (self.server, number, + conf_files[0]) + else: + print _("No %s running") % self.server + return 1 + for pid, pid_file in pids.items(): + conf_file = self.get_conf_file_name(pid_file) + print _("%s running (%s - %s)") % (self.server, pid, conf_file) + return 0 + + def spawn(self, conf_file, once=False, wait=True, daemon=True, **kwargs): + """Launch a subprocess for this server. + + :param conf_file: path to conf_file to use as first arg + :param once: boolean, add once argument to command + :param wait: boolean, if true capture stdout with a pipe + :param daemon: boolean, if true ask server to log to console + + :returns : the pid of the spawned process + """ + args = [self.cmd, conf_file] + if once: + args.append('once') + if not daemon: + # ask the server to log to console + args.append('verbose') + + # figure out what we're going to do with stdio + if not daemon: + # do nothing, this process is open until the spawns close anyway + re_out = None + re_err = None + else: + re_err = subprocess.STDOUT + if wait: + # we're going to need to block on this... + re_out = subprocess.PIPE + else: + re_out = open(os.devnull, 'w+b') + proc = subprocess.Popen(args, stdout=re_out, stderr=re_err) + pid_file = self.get_pid_file_name(conf_file) + write_file(pid_file, proc.pid) + self.procs.append(proc) + return proc.pid + + def wait(self, **kwargs): + """ + wait on spawned procs to start + """ + status = 0 + for proc in self.procs: + # wait for process to close it's stdout + output = proc.stdout.read() + if output: + print output + proc.communicate() + if proc.returncode: + status += 1 + return status + + def interact(self, **kwargs): + """ + wait on spawned procs to terminate + """ + status = 0 + for proc in self.procs: + # wait for process to terminate + proc.communicate() + if proc.returncode: + status += 1 + return status + + def launch(self, **kwargs): + """ + Collect conf files and attempt to spawn the processes for this server + """ + conf_files = self.conf_files(**kwargs) + if not conf_files: + return [] + + pids = self.get_running_pids(**kwargs) + + already_started = False + for pid, pid_file in pids.items(): + conf_file = self.get_conf_file_name(pid_file) + # for legacy compat you can't start other servers if one server is + # already running (unless -n specifies which one you want), this + # restriction could potentially be lifted, and launch could start + # any unstarted instances + if conf_file in conf_files: + already_started = True + print _("%s running (%s - %s)") % (self.server, pid, conf_file) + elif not kwargs.get('number', 0): + already_started = True + print _("%s running (%s - %s)") % (self.server, pid, pid_file) + + if already_started: + print _("%s already started...") % self.server + return [] + + if self.server not in START_ONCE_SERVERS: + kwargs['once'] = False + + pids = {} + for conf_file in conf_files: + if kwargs.get('once'): + msg = _('Running %s once') % self.server + else: + msg = _('Starting %s') % self.server + print '%s...(%s)' % (msg, conf_file) + try: + pid = self.spawn(conf_file, **kwargs) + except OSError, e: + if e.errno == errno.ENOENT: + # TODO: should I check if self.cmd exists earlier? + print _("%s does not exist") % self.cmd + break + pids[pid] = conf_file + + return pids + + def stop(self, **kwargs): + """Send stop signals to pids for this server + + :returns: a dict mapping pids (ints) to pid_files (paths) + + """ + return self.kill_running_pids(**kwargs) diff --git a/swift/common/middleware/acl.py b/swift/common/middleware/acl.py index f6784953ac..f08780eedb 100644 --- a/swift/common/middleware/acl.py +++ b/swift/common/middleware/acl.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from urlparse import urlparse +from swift.common.utils import urlparse def clean_acl(name, value): diff --git a/swift/common/middleware/catch_errors.py b/swift/common/middleware/catch_errors.py index 10d8614194..716bda4da1 100644 --- a/swift/common/middleware/catch_errors.py +++ b/swift/common/middleware/catch_errors.py @@ -26,11 +26,7 @@ class CatchErrorMiddleware(object): def __init__(self, app, conf): self.app = app - # if the application already has a logger we should use that one - self.logger = getattr(app, 'logger', None) - if not self.logger: - # and only call get_logger if we have to - self.logger = get_logger(conf) + self.logger = get_logger(conf, log_route='catch-errors') def __call__(self, env, start_response): try: diff --git a/swift/common/middleware/cname_lookup.py b/swift/common/middleware/cname_lookup.py index f13155c1fe..8ea9f88071 100644 --- a/swift/common/middleware/cname_lookup.py +++ b/swift/common/middleware/cname_lookup.py @@ -53,7 +53,7 @@ class CNAMELookupMiddleware(object): self.storage_domain = '.' + self.storage_domain self.lookup_depth = int(conf.get('lookup_depth', '1')) self.memcache = None - self.logger = get_logger(conf) + self.logger = get_logger(conf, log_route='cname-lookup') def __call__(self, env, start_response): if not self.storage_domain: diff --git a/swift/common/middleware/ratelimit.py b/swift/common/middleware/ratelimit.py index 4657b6abcd..485b1db26e 100644 --- a/swift/common/middleware/ratelimit.py +++ b/swift/common/middleware/ratelimit.py @@ -39,7 +39,7 @@ class RateLimitMiddleware(object): if logger: self.logger = logger else: - self.logger = get_logger(conf) + self.logger = get_logger(conf, log_route='ratelimit') self.account_ratelimit = float(conf.get('account_ratelimit', 0)) self.max_sleep_time_seconds = \ float(conf.get('max_sleep_time_seconds', 60)) diff --git a/swift/common/middleware/swauth.py b/swift/common/middleware/swauth.py index 3399fd06a4..68b0d7afaf 100644 --- a/swift/common/middleware/swauth.py +++ b/swift/common/middleware/swauth.py @@ -21,7 +21,6 @@ from httplib import HTTPConnection, HTTPSConnection from time import gmtime, strftime, time from traceback import format_exc from urllib import quote, unquote -from urlparse import urlparse from uuid import uuid4 from hashlib import md5, sha1 import hmac @@ -36,7 +35,7 @@ from webob.exc import HTTPAccepted, HTTPBadRequest, HTTPConflict, \ from swift.common.bufferedhttp import http_connect_raw as http_connect from swift.common.middleware.acl import clean_acl, parse_acl, referrer_allowed -from swift.common.utils import cache_from_env, get_logger, split_path +from swift.common.utils import cache_from_env, get_logger, split_path, urlparse class Swauth(object): @@ -51,7 +50,7 @@ class Swauth(object): def __init__(self, app, conf): self.app = app self.conf = conf - self.logger = get_logger(conf) + self.logger = get_logger(conf, log_route='swauth') self.log_headers = conf.get('log_headers') == 'True' self.reseller_prefix = conf.get('reseller_prefix', 'AUTH').strip() if self.reseller_prefix and self.reseller_prefix[-1] != '_': @@ -269,7 +268,7 @@ class Swauth(object): user_groups = (req.remote_user or '').split(',') if '.reseller_admin' in user_groups and \ account != self.reseller_prefix and \ - account[len(self.reseller_prefix)].isalnum(): + account[len(self.reseller_prefix)] != '.': return None if account in user_groups and \ (req.method not in ('DELETE', 'PUT') or container): @@ -475,7 +474,7 @@ class Swauth(object): explained above. """ account = req.path_info_pop() - if req.path_info or not account.isalnum(): + if req.path_info or not account or account[0] == '.': return HTTPBadRequest(request=req) if not self.is_account_admin(req, account): return HTTPForbidden(request=req) @@ -551,7 +550,7 @@ class Swauth(object): if not self.is_reseller_admin(req): return HTTPForbidden(request=req) account = req.path_info_pop() - if req.path_info != '/.services' or not account.isalnum(): + if req.path_info != '/.services' or not account or account[0] == '.': return HTTPBadRequest(request=req) try: new_services = json.loads(req.body) @@ -597,7 +596,7 @@ class Swauth(object): if not self.is_reseller_admin(req): return HTTPForbidden(request=req) account = req.path_info_pop() - if req.path_info or not account.isalnum(): + if req.path_info or not account or account[0] == '.': return HTTPBadRequest(request=req) # Ensure the container in the main auth account exists (this # container represents the new account) @@ -679,7 +678,7 @@ class Swauth(object): if not self.is_reseller_admin(req): return HTTPForbidden(request=req) account = req.path_info_pop() - if req.path_info or not account.isalnum(): + if req.path_info or not account or account[0] == '.': return HTTPBadRequest(request=req) # Make sure the account has no users and get the account_id marker = '' @@ -799,8 +798,8 @@ class Swauth(object): """ account = req.path_info_pop() user = req.path_info_pop() - if req.path_info or not account.isalnum() or \ - (not user.isalnum() and user != '.groups'): + if req.path_info or not account or account[0] == '.' or not user or \ + (user[0] == '.' and user != '.groups'): return HTTPBadRequest(request=req) if not self.is_account_admin(req, account): return HTTPForbidden(request=req) @@ -874,8 +873,8 @@ class Swauth(object): req.headers.get('x-auth-user-reseller-admin') == 'true' if reseller_admin: admin = True - if req.path_info or not account.isalnum() or not user.isalnum() or \ - not key: + if req.path_info or not account or account[0] == '.' or not user or \ + user[0] == '.' or not key: return HTTPBadRequest(request=req) if reseller_admin: if not self.is_super_admin(req): @@ -923,7 +922,8 @@ class Swauth(object): # Validate path info account = req.path_info_pop() user = req.path_info_pop() - if req.path_info or not account.isalnum() or not user.isalnum(): + if req.path_info or not account or account[0] == '.' or not user or \ + user[0] == '.': return HTTPBadRequest(request=req) if not self.is_account_admin(req, account): return HTTPForbidden(request=req) @@ -1321,6 +1321,8 @@ class Swauth(object): return False def posthooklogger(self, env, req): + if not req.path.startswith(self.auth_prefix): + return response = getattr(req, 'response', None) if not response: return diff --git a/swift/common/ring/builder.py b/swift/common/ring/builder.py index 3f728e307a..86e6cce287 100644 --- a/swift/common/ring/builder.py +++ b/swift/common/ring/builder.py @@ -69,6 +69,49 @@ class RingBuilder(object): self._remove_devs = [] self._ring = None + def copy_from(self, builder): + if hasattr(builder, 'devs'): + self.part_power = builder.part_power + self.replicas = builder.replicas + self.min_part_hours = builder.min_part_hours + self.parts = builder.parts + self.devs = builder.devs + self.devs_changed = builder.devs_changed + self.version = builder.version + self._replica2part2dev = builder._replica2part2dev + self._last_part_moves_epoch = builder._last_part_moves_epoch + self._last_part_moves = builder._last_part_moves + self._last_part_gather_start = builder._last_part_gather_start + self._remove_devs = builder._remove_devs + else: + self.part_power = builder['part_power'] + self.replicas = builder['replicas'] + self.min_part_hours = builder['min_part_hours'] + self.parts = builder['parts'] + self.devs = builder['devs'] + self.devs_changed = builder['devs_changed'] + self.version = builder['version'] + self._replica2part2dev = builder['_replica2part2dev'] + self._last_part_moves_epoch = builder['_last_part_moves_epoch'] + self._last_part_moves = builder['_last_part_moves'] + self._last_part_gather_start = builder['_last_part_gather_start'] + self._remove_devs = builder['_remove_devs'] + self._ring = None + + def to_dict(self): + return {'part_power': self.part_power, + 'replicas': self.replicas, + 'min_part_hours': self.min_part_hours, + 'parts': self.parts, + 'devs': self.devs, + 'devs_changed': self.devs_changed, + 'version': self.version, + '_replica2part2dev': self._replica2part2dev, + '_last_part_moves_epoch': self._last_part_moves_epoch, + '_last_part_moves': self._last_part_moves, + '_last_part_gather_start': self._last_part_gather_start, + '_remove_devs': self._remove_devs} + def change_min_part_hours(self, min_part_hours): """ Changes the value used to decide if a given partition can be moved diff --git a/swift/common/ring/ring.py b/swift/common/ring/ring.py index 45ab407563..d95f16b59b 100644 --- a/swift/common/ring/ring.py +++ b/swift/common/ring/ring.py @@ -18,7 +18,7 @@ from gzip import GzipFile from os.path import getmtime from struct import unpack_from from time import time -from swift.common.utils import hash_path +from swift.common.utils import hash_path, validate_configuration class RingData(object): @@ -29,6 +29,11 @@ class RingData(object): self._replica2part2dev_id = replica2part2dev_id self._part_shift = part_shift + def to_dict(self): + return {'devs': self.devs, + 'replica2part2dev_id': self._replica2part2dev_id, + 'part_shift': self._part_shift} + class Ring(object): """ @@ -39,6 +44,8 @@ class Ring(object): """ def __init__(self, pickle_gz_path, reload_time=15): + # can't use the ring unless HASH_PATH_SUFFIX is set + validate_configuration() self.pickle_gz_path = pickle_gz_path self.reload_time = reload_time self._reload(force=True) @@ -47,6 +54,9 @@ class Ring(object): self._rtime = time() + self.reload_time if force or self.has_changed(): ring_data = pickle.load(GzipFile(self.pickle_gz_path, 'rb')) + if not hasattr(ring_data, 'devs'): + ring_data = RingData(ring_data['replica2part2dev_id'], + ring_data['devs'], ring_data['part_shift']) self._mtime = getmtime(self.pickle_gz_path) self.devs = ring_data.devs self.zone2devs = {} @@ -139,4 +149,12 @@ class Ring(object): zones.remove(self.devs[part2dev_id[part]]['zone']) while zones: zone = zones.pop(part % len(zones)) - yield self.zone2devs[zone][part % len(self.zone2devs[zone])] + weighted_node = None + for i in xrange(len(self.zone2devs[zone])): + node = self.zone2devs[zone][(part + i) % + len(self.zone2devs[zone])] + if node.get('weight'): + weighted_node = node + break + if weighted_node: + yield weighted_node diff --git a/swift/common/utils.py b/swift/common/utils.py index 5962c282bb..0da88c6036 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -34,6 +34,8 @@ from ConfigParser import ConfigParser, NoSectionError, NoOptionError from optparse import OptionParser from tempfile import mkstemp import cPickle as pickle +import glob +from urlparse import urlparse as stdlib_urlparse, ParseResult import eventlet from eventlet import greenio, GreenPool, sleep, Timeout, listen @@ -48,6 +50,10 @@ import logging logging.thread = eventlet.green.thread logging.threading = eventlet.green.threading logging._lock = logging.threading.RLock() +# setup notice level logging +NOTICE = 25 +logging._levelNames[NOTICE] = 'NOTICE' +SysLogHandler.priority_map['NOTICE'] = 'notice' # These are lazily pulled from libc elsewhere _sys_fallocate = None @@ -284,7 +290,8 @@ class LoggerFileObject(object): return self -class LogAdapter(object): +# double inheritance to support property with setter +class LogAdapter(logging.LoggerAdapter, object): """ A Logger like object which performs some reformatting on calls to :meth:`exception`. Can be used to store a threadlocal transaction id. @@ -292,11 +299,10 @@ class LogAdapter(object): _txn_id = threading.local() - def __init__(self, logger): - self.logger = logger - for proxied_method in ('debug', 'log', 'warn', 'warning', 'error', - 'critical', 'info'): - setattr(self, proxied_method, getattr(logger, proxied_method)) + def __init__(self, logger, server): + logging.LoggerAdapter.__init__(self, logger, {}) + self.server = server + setattr(self, 'warn', self.warning) @property def txn_id(self): @@ -310,15 +316,34 @@ class LogAdapter(object): def getEffectiveLevel(self): return self.logger.getEffectiveLevel() - def exception(self, msg, *args): + def process(self, msg, kwargs): + """ + Add extra info to message + """ + kwargs['extra'] = {'server': self.server, 'txn_id': self.txn_id} + return msg, kwargs + + def notice(self, msg, *args, **kwargs): + """ + Convenience function for syslog priority LOG_NOTICE. The python + logging lvl is set to 25, just above info. SysLogHandler is + monkey patched to map this log lvl to the LOG_NOTICE syslog + priority. + """ + self.log(NOTICE, msg, *args, **kwargs) + + def _exception(self, msg, *args, **kwargs): + logging.LoggerAdapter.exception(self, msg, *args, **kwargs) + + def exception(self, msg, *args, **kwargs): _junk, exc, _junk = sys.exc_info() - call = self.logger.error + call = self.error emsg = '' if isinstance(exc, OSError): if exc.errno in (errno.EIO, errno.ENOSPC): emsg = str(exc) else: - call = self.logger.exception + call = self._exception elif isinstance(exc, socket.error): if exc.errno == errno.ECONNREFUSED: emsg = _('Connection refused') @@ -327,7 +352,7 @@ class LogAdapter(object): elif exc.errno == errno.ETIMEDOUT: emsg = _('Connection timeout') else: - call = self.logger.exception + call = self._exception elif isinstance(exc, eventlet.Timeout): emsg = exc.__class__.__name__ if hasattr(exc, 'seconds'): @@ -336,53 +361,25 @@ class LogAdapter(object): if exc.msg: emsg += ' %s' % exc.msg else: - call = self.logger.exception - call('%s: %s' % (msg, emsg), *args) + call = self._exception + call('%s: %s' % (msg, emsg), *args, **kwargs) -class NamedFormatter(logging.Formatter): +class TxnFormatter(logging.Formatter): """ - NamedFormatter is used to add additional information to log messages. - Normally it will simply add the server name as an attribute on the - LogRecord and the default format string will include it at the - begining of the log message. Additionally, if the transaction id is - available and not already included in the message, NamedFormatter will - add it. - - NamedFormatter may be initialized with a format string which makes use - of the standard LogRecord attributes. In addition the format string - may include the following mapping key: - - +----------------+---------------------------------------------+ - | Format | Description | - +================+=============================================+ - | %(server)s | Name of the swift server doing logging | - +----------------+---------------------------------------------+ - - :param server: the swift server name, a string. - :param logger: a Logger or :class:`LogAdapter` instance, additional - context may be pulled from attributes on this logger if - available. - :param fmt: the format string used to construct the message, if none is - supplied it defaults to ``"%(server)s %(message)s"`` + Custom logging.Formatter will append txn_id to a log message if the record + has one and the message does not. """ - - def __init__(self, server, logger, - fmt="%(server)s %(message)s"): - logging.Formatter.__init__(self, fmt) - self.server = server - self.logger = logger - def format(self, record): - record.server = self.server msg = logging.Formatter.format(self, record) - if self.logger.txn_id and (record.levelno != logging.INFO or - self.logger.txn_id not in msg): - msg = "%s (txn: %s)" % (msg, self.logger.txn_id) + if (record.txn_id and record.levelno != logging.INFO and + record.txn_id not in msg): + msg = "%s (txn: %s)" % (msg, record.txn_id) return msg -def get_logger(conf, name=None, log_to_console=False, log_route=None): +def get_logger(conf, name=None, log_to_console=False, log_route=None, + fmt="%(server)s %(message)s"): """ Get the current system logger using config settings. @@ -395,44 +392,52 @@ def get_logger(conf, name=None, log_to_console=False, log_route=None): :param conf: Configuration dict to read settings from :param name: Name of the logger :param log_to_console: Add handler which writes to console on stderr + :param log_route: Route for the logging, not emitted to the log, just used + to separate logging configurations + :param fmt: Override log format """ if not conf: conf = {} - if not hasattr(get_logger, 'root_logger_configured'): - get_logger.root_logger_configured = True - get_logger(conf, name, log_to_console, log_route='root') if name is None: name = conf.get('log_name', 'swift') if not log_route: log_route = name - if log_route == 'root': - logger = logging.getLogger() - else: - logger = logging.getLogger(log_route) - if not hasattr(get_logger, 'handlers'): - get_logger.handlers = {} + logger = logging.getLogger(log_route) + logger.propagate = False + # all new handlers will get the same formatter + formatter = TxnFormatter(fmt) + + # get_logger will only ever add one SysLog Handler to a logger + if not hasattr(get_logger, 'handler4logger'): + get_logger.handler4logger = {} + if logger in get_logger.handler4logger: + logger.removeHandler(get_logger.handler4logger[logger]) + + # facility for this logger will be set by last call wins facility = getattr(SysLogHandler, conf.get('log_facility', 'LOG_LOCAL0'), SysLogHandler.LOG_LOCAL0) - if facility in get_logger.handlers: - logger.removeHandler(get_logger.handlers[facility]) - get_logger.handlers[facility].close() - del get_logger.handlers[facility] - if log_to_console: - # check if a previous call to get_logger already added a console logger - if hasattr(get_logger, 'console') and get_logger.console: - logger.removeHandler(get_logger.console) - get_logger.console = logging.StreamHandler(sys.__stderr__) - logger.addHandler(get_logger.console) - get_logger.handlers[facility] = \ - SysLogHandler(address='/dev/log', facility=facility) - logger.addHandler(get_logger.handlers[facility]) + handler = SysLogHandler(address='/dev/log', facility=facility) + handler.setFormatter(formatter) + logger.addHandler(handler) + get_logger.handler4logger[logger] = handler + + # setup console logging + if log_to_console or hasattr(get_logger, 'console_handler4logger'): + # remove pre-existing console handler for this logger + if not hasattr(get_logger, 'console_handler4logger'): + get_logger.console_handler4logger = {} + if logger in get_logger.console_handler4logger: + logger.removeHandler(get_logger.console_handler4logger[logger]) + + console_handler = logging.StreamHandler(sys.__stderr__) + console_handler.setFormatter(formatter) + logger.addHandler(console_handler) + get_logger.console_handler4logger[logger] = console_handler + + # set the level for the logger logger.setLevel( getattr(logging, conf.get('log_level', 'INFO').upper(), logging.INFO)) - adapted_logger = LogAdapter(logger) - formatter = NamedFormatter(name, adapted_logger) - get_logger.handlers[facility].setFormatter(formatter) - if hasattr(get_logger, 'console'): - get_logger.console.setFormatter(formatter) + adapted_logger = LogAdapter(logger, name) return adapted_logger @@ -465,8 +470,12 @@ def capture_stdio(logger, **kwargs): # collect stdio file desc not in use for logging stdio_fds = [0, 1, 2] - if hasattr(get_logger, 'console'): - stdio_fds.remove(get_logger.console.stream.fileno()) + for _junk, handler in getattr(get_logger, + 'console_handler4logger', {}).items(): + try: + stdio_fds.remove(handler.stream.fileno()) + except ValueError: + pass # fd not in list with open(os.devnull, 'r+b') as nullfile: # close stdio (excludes fds open for logging) @@ -781,6 +790,60 @@ def write_pickle(obj, dest, tmp): renamer(tmppath, dest) +def search_tree(root, glob_match, ext): + """Look in root, for any files/dirs matching glob, recurively traversing + any found directories looking for files ending with ext + + :param root: start of search path + :param glob_match: glob to match in root, matching dirs are traversed with + os.walk + :param ext: only files that end in ext will be returned + + :returns: list of full paths to matching files, sorted + + """ + found_files = [] + for path in glob.glob(os.path.join(root, glob_match)): + if path.endswith(ext): + found_files.append(path) + else: + for root, dirs, files in os.walk(path): + for file in files: + if file.endswith(ext): + found_files.append(os.path.join(root, file)) + return sorted(found_files) + + +def write_file(path, contents): + """Write contents to file at path + + :param path: any path, subdirs will be created as needed + :param contents: data to write to file, will be converted to string + + """ + dirname, name = os.path.split(path) + if not os.path.exists(dirname): + try: + os.makedirs(dirname) + except OSError, err: + if err.errno == errno.EACCES: + sys.exit('Unable to create %s. Running as ' + 'non-root?' % dirname) + with open(path, 'w') as f: + f.write('%s' % contents) + + +def remove_file(path): + """Quiet wrapper for os.unlink, OSErrors are suppressed + + :param path: first and only argument passed to os.unlink + """ + try: + os.unlink(path) + except OSError: + pass + + def audit_location_generator(devices, datadir, mount_check=True, logger=None): ''' Given a devices path and a data directory, yield (path, device, @@ -868,3 +931,35 @@ class ContextPool(GreenPool): def __exit__(self, type, value, traceback): for coro in list(self.coroutines_running): coro.kill() + + +class ModifiedParseResult(ParseResult): + "Parse results class for urlparse." + + @property + def hostname(self): + netloc = self.netloc.split('@', 1)[-1] + if netloc.startswith('['): + return netloc[1:].split(']')[0] + elif ':' in netloc: + return netloc.rsplit(':')[0] + return netloc + + @property + def port(self): + netloc = self.netloc.split('@', 1)[-1] + if netloc.startswith('['): + netloc = netloc.rsplit(']')[1] + if ':' in netloc: + return int(netloc.rsplit(':')[1]) + return None + + +def urlparse(url): + """ + urlparse augmentation. + This is necessary because urlparse can't handle RFC 2732 URLs. + + :param url: URL to parse. + """ + return ModifiedParseResult(*stdlib_urlparse(url)) diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index 9450bcf439..5f4494b736 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -68,11 +68,15 @@ def get_socket(conf, default_port=8080): """ bind_addr = (conf.get('bind_ip', '0.0.0.0'), int(conf.get('bind_port', default_port))) + address_family = [addr[0] for addr in socket.getaddrinfo(bind_addr[0], + bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM) + if addr[0] in (socket.AF_INET, socket.AF_INET6)][0] sock = None retry_until = time.time() + 30 while not sock and time.time() < retry_until: try: - sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096))) + sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)), + family=address_family) if 'cert_file' in conf: sock = ssl.wrap_socket(sock, certfile=conf['cert_file'], keyfile=conf['key_file']) @@ -113,10 +117,8 @@ def run_wsgi(conf_file, app_section, *args, **kwargs): logger = kwargs.pop('logger') else: logger = get_logger(conf, log_name, - log_to_console=kwargs.pop('verbose', False)) + log_to_console=kwargs.pop('verbose', False), log_route='wsgi') - # redirect errors to logger and close stdio - capture_stdio(logger) # bind to address and port sock = get_socket(conf, default_port=kwargs.get('default_port', 8080)) # remaining tasks should not require elevated privileges @@ -125,6 +127,9 @@ def run_wsgi(conf_file, app_section, *args, **kwargs): # finally after binding to ports and privilege drop, run app __init__ code app = loadapp('config:%s' % conf_file, global_conf={'log_name': log_name}) + # redirect errors to logger and close stdio + capture_stdio(logger) + def run_server(): wsgi.HttpProtocol.default_request_version = "HTTP/1.0" eventlet.hubs.use_hub('poll') @@ -168,10 +173,10 @@ def run_wsgi(conf_file, app_section, *args, **kwargs): signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) run_server() - logger.info('Child %d exiting normally' % os.getpid()) + logger.notice('Child %d exiting normally' % os.getpid()) return else: - logger.info('Started child %s' % pid) + logger.notice('Started child %s' % pid) children.append(pid) try: pid, status = os.wait() @@ -182,8 +187,8 @@ def run_wsgi(conf_file, app_section, *args, **kwargs): if err.errno not in (errno.EINTR, errno.ECHILD): raise except KeyboardInterrupt: - logger.info('User quit') + logger.notice('User quit') break greenio.shutdown_safe(sock) sock.close() - logger.info('Exited') + logger.notice('Exited') diff --git a/swift/container/auditor.py b/swift/container/auditor.py index d1ceb4f98a..0b1c10e03e 100644 --- a/swift/container/auditor.py +++ b/swift/container/auditor.py @@ -28,7 +28,7 @@ class ContainerAuditor(Daemon): def __init__(self, conf): self.conf = conf - self.logger = get_logger(conf, 'container-auditor') + self.logger = get_logger(conf, log_route='container-auditor') self.devices = conf.get('devices', '/srv/node') self.mount_check = conf.get('mount_check', 'true').lower() in \ ('true', 't', '1', 'on', 'yes', 'y') diff --git a/swift/container/server.py b/swift/container/server.py index fab93bf927..3601594afc 100644 --- a/swift/container/server.py +++ b/swift/container/server.py @@ -49,7 +49,7 @@ class ContainerController(object): save_headers = ['x-container-read', 'x-container-write'] def __init__(self, conf): - self.logger = get_logger(conf) + self.logger = get_logger(conf, log_route='container-server') self.root = conf.get('devices', '/srv/node/') self.mount_check = conf.get('mount_check', 'true').lower() in \ ('true', 't', '1', 'on', 'yes', 'y') @@ -89,7 +89,7 @@ class ContainerController(object): account_partition = req.headers.get('X-Account-Partition') account_device = req.headers.get('X-Account-Device') if all([account_host, account_partition, account_device]): - account_ip, account_port = account_host.split(':') + account_ip, account_port = account_host.rsplit(':', 1) new_path = '/' + '/'.join([account, container]) info = broker.get_info() account_headers = {'x-put-timestamp': info['put_timestamp'], diff --git a/swift/container/updater.py b/swift/container/updater.py index 883dd17101..0bd000f3f2 100644 --- a/swift/container/updater.py +++ b/swift/container/updater.py @@ -37,7 +37,7 @@ class ContainerUpdater(Daemon): def __init__(self, conf): self.conf = conf - self.logger = get_logger(conf, 'container-updater') + self.logger = get_logger(conf, log_route='container-updater') self.devices = conf.get('devices', '/srv/node') self.mount_check = conf.get('mount_check', 'true').lower() in \ ('true', 't', '1', 'on', 'yes', 'y') diff --git a/swift/obj/auditor.py b/swift/obj/auditor.py index 09fdd77774..8ed05049f3 100644 --- a/swift/obj/auditor.py +++ b/swift/obj/auditor.py @@ -31,7 +31,7 @@ class ObjectAuditor(Daemon): def __init__(self, conf): self.conf = conf - self.logger = get_logger(conf, 'object-auditor') + self.logger = get_logger(conf, log_route='object-auditor') self.devices = conf.get('devices', '/srv/node') self.mount_check = conf.get('mount_check', 'true').lower() in \ ('true', 't', '1', 'on', 'yes', 'y') diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py index dcfcb926f9..8dec8aa801 100644 --- a/swift/obj/replicator.py +++ b/swift/obj/replicator.py @@ -207,7 +207,7 @@ class ObjectReplicator(Daemon): :param logger: logging object """ self.conf = conf - self.logger = get_logger(conf, 'object-replicator') + self.logger = get_logger(conf, log_route='object-replicator') self.devices_dir = conf.get('devices', '/srv/node') self.mount_check = conf.get('mount_check', 'true').lower() in \ ('true', 't', '1', 'on', 'yes', 'y') diff --git a/swift/obj/server.py b/swift/obj/server.py index f2e2b31314..9e95ec7c6f 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -266,7 +266,7 @@ class ObjectController(object): /etc/object-server.conf-sample or /etc/swift/object-server.conf-sample. """ - self.logger = get_logger(conf) + self.logger = get_logger(conf, log_route='object-server') self.devices = conf.get('devices', '/srv/node/') self.mount_check = conf.get('mount_check', 'true').lower() in \ ('true', 't', '1', 'on', 'yes', 'y') @@ -301,7 +301,7 @@ class ObjectController(object): full_path = '/%s/%s/%s' % (account, container, obj) try: with ConnectionTimeout(self.conn_timeout): - ip, port = host.split(':') + ip, port = host.rsplit(':', 1) conn = http_connect(ip, port, contdevice, partition, op, full_path, headers_out) with Timeout(self.node_timeout): diff --git a/swift/obj/updater.py b/swift/obj/updater.py index 2b28ff08c5..356be64da4 100644 --- a/swift/obj/updater.py +++ b/swift/obj/updater.py @@ -35,7 +35,7 @@ class ObjectUpdater(Daemon): def __init__(self, conf): self.conf = conf - self.logger = get_logger(conf, 'object-updater') + self.logger = get_logger(conf, log_route='object-updater') self.devices = conf.get('devices', '/srv/node') self.mount_check = conf.get('mount_check', 'true').lower() in \ ('true', 't', '1', 'on', 'yes', 'y') diff --git a/swift/proxy/server.py b/swift/proxy/server.py index f34a982a50..d2100af010 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -634,7 +634,7 @@ class Controller(object): res.bytes_transferred += len(chunk) except GeneratorExit: res.client_disconnect = True - self.app.logger.info(_('Client disconnected on read')) + self.app.logger.warn(_('Client disconnected on read')) except (Exception, TimeoutError): self.exception_occurred(node, _('Object'), _('Trying to read during GET of %s') % req.path) @@ -1028,7 +1028,7 @@ class ObjectController(Controller): conn.queue.join() conns = [conn for conn in conns if not conn.failed] except ChunkReadTimeout, err: - self.app.logger.info( + self.app.logger.warn( _('ERROR Client read timeout (%ss)'), err.seconds) return HTTPRequestTimeout(request=req) except Exception: @@ -1038,7 +1038,7 @@ class ObjectController(Controller): return Response(status='499 Client Disconnect') if req.content_length and req.bytes_transferred < req.content_length: req.client_disconnect = True - self.app.logger.info( + self.app.logger.warn( _('Client disconnected without sending enough data')) return Response(status='499 Client Disconnect') statuses = [] @@ -1361,12 +1361,20 @@ class BaseApplication(object): def __init__(self, conf, memcache=None, logger=None, account_ring=None, container_ring=None, object_ring=None): - if logger is None: - self.logger = get_logger(conf) - else: - self.logger = logger if conf is None: conf = {} + if logger is None: + self.logger = get_logger(conf, log_route='proxy-server') + access_log_conf = {} + for key in ('log_facility', 'log_name', 'log_level'): + value = conf.get('access_' + key, conf.get(key, None)) + if value: + access_log_conf[key] = value + self.access_logger = get_logger(access_log_conf, + log_route='proxy-access') + else: + self.logger = self.access_logger = logger + swift_dir = conf.get('swift_dir', '/etc/swift') self.node_timeout = int(conf.get('node_timeout', 10)) self.conn_timeout = float(conf.get('conn_timeout', 0.5)) @@ -1546,7 +1554,7 @@ class Application(BaseApplication): if getattr(req, 'client_disconnect', False) or \ getattr(response, 'client_disconnect', False): status_int = 499 - self.logger.info(' '.join(quote(str(x)) for x in ( + self.access_logger.info(' '.join(quote(str(x)) for x in ( client or '-', req.remote_addr or '-', time.strftime('%d/%b/%Y/%H/%M/%S', time.gmtime()), diff --git a/swift/stats/access_processor.py b/swift/stats/access_processor.py index 2aee505415..6965ef2b4a 100644 --- a/swift/stats/access_processor.py +++ b/swift/stats/access_processor.py @@ -34,7 +34,7 @@ class AccessLogProcessor(object): conf.get('service_ips', '').split(',')\ if x.strip()] self.warn_percent = float(conf.get('warn_percent', '0.8')) - self.logger = get_logger(conf) + self.logger = get_logger(conf, log_route='access-processor') def log_line_parser(self, raw_log): '''given a raw access log line, return a dict of the good parts''' diff --git a/swift/stats/account_stats.py b/swift/stats/account_stats.py index 6a9688831f..34b024d2c2 100644 --- a/swift/stats/account_stats.py +++ b/swift/stats/account_stats.py @@ -21,7 +21,6 @@ import hashlib from swift.account.server import DATADIR as account_server_data_dir from swift.common.db import AccountBroker -from swift.common.internal_proxy import InternalProxy from swift.common.utils import renamer, get_logger, readconf, mkdirs from swift.common.constraints import check_mount from swift.common.daemon import Daemon @@ -49,7 +48,8 @@ class AccountStat(Daemon): self.devices = server_conf.get('devices', '/srv/node') self.mount_check = server_conf.get('mount_check', 'true').lower() in \ ('true', 't', '1', 'on', 'yes', 'y') - self.logger = get_logger(stats_conf, 'swift-account-stats-logger') + self.logger = \ + get_logger(stats_conf, log_route='account-stats') def run_once(self): self.logger.info(_("Gathering account stats")) diff --git a/swift/stats/log_processor.py b/swift/stats/log_processor.py index 5dbc92afbe..727e687f38 100644 --- a/swift/stats/log_processor.py +++ b/swift/stats/log_processor.py @@ -40,7 +40,7 @@ class LogProcessor(object): def __init__(self, conf, logger): if isinstance(logger, tuple): - self.logger = get_logger(*logger) + self.logger = get_logger(*logger, log_route='log-processor') else: self.logger = logger @@ -226,7 +226,7 @@ class LogProcessorDaemon(Daemon): c = conf.get('log-processor') super(LogProcessorDaemon, self).__init__(c) self.total_conf = conf - self.logger = get_logger(c) + self.logger = get_logger(c, log_route='log-processor') self.log_processor = LogProcessor(conf, self.logger) self.lookback_hours = int(c.get('lookback_hours', '120')) self.lookback_window = int(c.get('lookback_window', diff --git a/swift/stats/log_uploader.py b/swift/stats/log_uploader.py index b425738938..c88eeb9c0a 100644 --- a/swift/stats/log_uploader.py +++ b/swift/stats/log_uploader.py @@ -64,8 +64,9 @@ class LogUploader(Daemon): self.container_name = container_name self.filename_format = source_filename_format self.internal_proxy = InternalProxy(proxy_server_conf) - log_name = 'swift-log-uploader-%s' % plugin_name - self.logger = utils.get_logger(uploader_conf, plugin_name) + log_name = '%s-log-uploader' % plugin_name + self.logger = utils.get_logger(uploader_conf, log_name, + log_route=plugin_name) def run_once(self): self.logger.info(_("Uploading logs")) @@ -78,7 +79,7 @@ class LogUploader(Daemon): i = [(self.filename_format.index(c), c) for c in '%Y %m %d %H'.split()] i.sort() year_offset = month_offset = day_offset = hour_offset = None - base_offset = len(self.log_dir) + base_offset = len(self.log_dir.rstrip('/')) + 1 for start, c in i: offset = base_offset + start if c == '%Y': diff --git a/swift/stats/stats_processor.py b/swift/stats/stats_processor.py index 95dba7604c..f9496c1df9 100644 --- a/swift/stats/stats_processor.py +++ b/swift/stats/stats_processor.py @@ -20,7 +20,7 @@ class StatsLogProcessor(object): """Transform account storage stat logs""" def __init__(self, conf): - self.logger = get_logger(conf) + self.logger = get_logger(conf, log_route='stats-processor') def process(self, obj_stream, data_object_account, data_object_container, data_object_name): diff --git a/test/unit/__init__.py b/test/unit/__init__.py index 50b06766de..005aabd3db 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -4,6 +4,8 @@ import os from contextlib import contextmanager from tempfile import NamedTemporaryFile from eventlet.green import socket +from tempfile import mkdtemp +from shutil import rmtree def readuntil2crlfs(fd): @@ -68,6 +70,27 @@ xattr.setxattr = _setxattr xattr.getxattr = _getxattr +@contextmanager +def temptree(files, contents=''): + # generate enough contents to fill the files + c = len(files) + contents = (list(contents) + [''] * c)[:c] + tempdir = mkdtemp() + for path, content in zip(files, contents): + if os.path.isabs(path): + path = '.' + path + new_path = os.path.join(tempdir, path) + subdir = os.path.dirname(new_path) + if not os.path.exists(subdir): + os.makedirs(subdir) + with open(new_path, 'w') as f: + f.write(str(content)) + try: + yield tempdir + finally: + rmtree(tempdir) + + class MockTrue(object): """ Instances of MockTrue evaluate like True diff --git a/test/unit/auth/test_server.py b/test/unit/auth/test_server.py index 4060766d65..d58556ab22 100644 --- a/test/unit/auth/test_server.py +++ b/test/unit/auth/test_server.py @@ -456,7 +456,7 @@ class TestAuthServer(unittest.TestCase): def test_basic_logging(self): log = StringIO() log_handler = StreamHandler(log) - logger = get_logger(self.conf, 'auth') + logger = get_logger(self.conf, 'auth-server', log_route='auth-server') logger.logger.addHandler(log_handler) try: auth_server.http_connect = fake_http_connect(201) @@ -534,7 +534,7 @@ class TestAuthServer(unittest.TestCase): orig_Request = auth_server.Request log = StringIO() log_handler = StreamHandler(log) - logger = get_logger(self.conf, 'auth') + logger = get_logger(self.conf, 'auth-server', log_route='auth-server') logger.logger.addHandler(log_handler) try: auth_server.Request = request_causing_exception diff --git a/test/unit/common/middleware/test_swauth.py b/test/unit/common/middleware/test_swauth.py index ce3681ac06..eeda4f0cbf 100644 --- a/test/unit/common/middleware/test_swauth.py +++ b/test/unit/common/middleware/test_swauth.py @@ -2576,6 +2576,23 @@ class TestAuth(unittest.TestCase): {"groups": [{"name": "act:usr"}, {"name": "act"}], "auth": "plaintext:key"}) + def test_put_user_special_chars_success(self): + self.test_auth.app = FakeApp(iter([ + ('200 Ok', {'X-Container-Meta-Account-Id': 'AUTH_cfa'}, ''), + # PUT of user object + ('201 Created', {}, '')])) + resp = Request.blank('/auth/v2/act/u_s-r', + environ={'REQUEST_METHOD': 'PUT'}, + headers={'X-Auth-Admin-User': '.super_admin', + 'X-Auth-Admin-Key': 'supertest', + 'X-Auth-User-Key': 'key'} + ).get_response(self.test_auth) + self.assertEquals(resp.status_int, 201) + self.assertEquals(self.test_auth.app.calls, 2) + self.assertEquals(json.loads(self.test_auth.app.request.body), + {"groups": [{"name": "act:u_s-r"}, {"name": "act"}], + "auth": "plaintext:key"}) + def test_put_user_account_admin_success(self): self.test_auth.app = FakeApp(iter([ ('200 Ok', {'X-Container-Meta-Account-Id': 'AUTH_cfa'}, ''), diff --git a/test/unit/common/ring/test_ring.py b/test/unit/common/ring/test_ring.py index ad72a4c990..1d459ad919 100644 --- a/test/unit/common/ring/test_ring.py +++ b/test/unit/common/ring/test_ring.py @@ -50,7 +50,8 @@ class TestRing(unittest.TestCase): os.mkdir(self.testdir) self.testgz = os.path.join(self.testdir, 'ring.gz') self.intended_replica2part2dev_id = [[0, 2, 0, 2], [2, 0, 2, 0]] - self.intended_devs = [{'id': 0, 'zone': 0}, None, {'id': 2, 'zone': 2}] + self.intended_devs = [{'id': 0, 'zone': 0, 'weight': 1.0}, None, + {'id': 2, 'zone': 2, 'weight': 1.0}] self.intended_part_shift = 30 self.intended_reload_time = 15 pickle.dump(ring.RingData(self.intended_replica2part2dev_id, @@ -69,10 +70,17 @@ class TestRing(unittest.TestCase): self.assertEquals(self.ring.devs, self.intended_devs) self.assertEquals(self.ring.reload_time, self.intended_reload_time) self.assertEquals(self.ring.pickle_gz_path, self.testgz) + # test invalid endcap + _orig_hash_path_suffix = utils.HASH_PATH_SUFFIX + try: + utils.HASH_PATH_SUFFIX = '' + self.assertRaises(SystemExit, ring.Ring, self.testgz) + finally: + utils.HASH_PATH_SUFFIX = _orig_hash_path_suffix def test_has_changed(self): self.assertEquals(self.ring.has_changed(), False) - os.utime(self.testgz, (time()+60, time()+60)) + os.utime(self.testgz, (time() + 60, time() + 60)) self.assertEquals(self.ring.has_changed(), True) def test_reload(self): @@ -80,7 +88,7 @@ class TestRing(unittest.TestCase): self.ring = ring.Ring(self.testgz, reload_time=0.001) orig_mtime = self.ring._mtime self.assertEquals(len(self.ring.devs), 3) - self.intended_devs.append({'id': 3, 'zone': 3}) + self.intended_devs.append({'id': 3, 'zone': 3, 'weight': 1.0}) pickle.dump(ring.RingData(self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift), GzipFile(self.testgz, 'wb')) @@ -93,7 +101,7 @@ class TestRing(unittest.TestCase): self.ring = ring.Ring(self.testgz, reload_time=0.001) orig_mtime = self.ring._mtime self.assertEquals(len(self.ring.devs), 4) - self.intended_devs.append({'id': 4, 'zone': 4}) + self.intended_devs.append({'id': 4, 'zone': 4, 'weight': 1.0}) pickle.dump(ring.RingData(self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift), GzipFile(self.testgz, 'wb')) @@ -108,7 +116,7 @@ class TestRing(unittest.TestCase): orig_mtime = self.ring._mtime part, nodes = self.ring.get_nodes('a') self.assertEquals(len(self.ring.devs), 5) - self.intended_devs.append({'id': 5, 'zone': 5}) + self.intended_devs.append({'id': 5, 'zone': 5, 'weight': 1.0}) pickle.dump(ring.RingData(self.intended_replica2part2dev_id, self.intended_devs, self.intended_part_shift), GzipFile(self.testgz, 'wb')) @@ -127,57 +135,71 @@ class TestRing(unittest.TestCase): self.assertRaises(TypeError, self.ring.get_nodes) part, nodes = self.ring.get_nodes('a') self.assertEquals(part, 0) - self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}]) + self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0}, + {'id': 2, 'zone': 2, 'weight': 1.0}]) part, nodes = self.ring.get_nodes('a1') self.assertEquals(part, 0) - self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}]) + self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0}, + {'id': 2, 'zone': 2, 'weight': 1.0}]) part, nodes = self.ring.get_nodes('a4') self.assertEquals(part, 1) - self.assertEquals(nodes, [{'id': 2, 'zone': 2}, {'id': 0, 'zone': 0}]) + self.assertEquals(nodes, [{'id': 2, 'zone': 2, 'weight': 1.0}, + {'id': 0, 'zone': 0, 'weight': 1.0}]) part, nodes = self.ring.get_nodes('aa') self.assertEquals(part, 1) - self.assertEquals(nodes, [{'id': 2, 'zone': 2}, {'id': 0, 'zone': 0}]) + self.assertEquals(nodes, [{'id': 2, 'zone': 2, 'weight': 1.0}, + {'id': 0, 'zone': 0, 'weight': 1.0}]) part, nodes = self.ring.get_nodes('a', 'c1') self.assertEquals(part, 0) - self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}]) + self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0}, + {'id': 2, 'zone': 2, 'weight': 1.0}]) part, nodes = self.ring.get_nodes('a', 'c0') self.assertEquals(part, 3) - self.assertEquals(nodes, [{'id': 2, 'zone': 2}, {'id': 0, 'zone': 0}]) + self.assertEquals(nodes, [{'id': 2, 'zone': 2, 'weight': 1.0}, + {'id': 0, 'zone': 0, 'weight': 1.0}]) part, nodes = self.ring.get_nodes('a', 'c3') self.assertEquals(part, 2) - self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}]) + self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0}, + {'id': 2, 'zone': 2, 'weight': 1.0}]) part, nodes = self.ring.get_nodes('a', 'c2') self.assertEquals(part, 2) - self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}]) + self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0}, + {'id': 2, 'zone': 2, 'weight': 1.0}]) part, nodes = self.ring.get_nodes('a', 'c', 'o1') self.assertEquals(part, 1) - self.assertEquals(nodes, [{'id': 2, 'zone': 2}, {'id': 0, 'zone': 0}]) + self.assertEquals(nodes, [{'id': 2, 'zone': 2, 'weight': 1.0}, + {'id': 0, 'zone': 0, 'weight': 1.0}]) part, nodes = self.ring.get_nodes('a', 'c', 'o5') self.assertEquals(part, 0) - self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}]) + self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0}, + {'id': 2, 'zone': 2, 'weight': 1.0}]) part, nodes = self.ring.get_nodes('a', 'c', 'o0') self.assertEquals(part, 0) - self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}]) + self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0}, + {'id': 2, 'zone': 2, 'weight': 1.0}]) part, nodes = self.ring.get_nodes('a', 'c', 'o2') self.assertEquals(part, 2) - self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}]) + self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0}, + {'id': 2, 'zone': 2, 'weight': 1.0}]) def test_get_more_nodes(self): # Yes, these tests are deliberately very fragile. We want to make sure # that if someone changes the results the ring produces, they know it. part, nodes = self.ring.get_nodes('a', 'c', 'o2') self.assertEquals(part, 2) - self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}]) + self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0}, + {'id': 2, 'zone': 2, 'weight': 1.0}]) nodes = list(self.ring.get_more_nodes(part)) self.assertEquals(nodes, []) - self.ring.devs.append({'id': 3, 'zone': 0}) + self.ring.devs.append({'id': 3, 'zone': 0, 'weight': 1.0}) self.ring.zone2devs[0].append(self.ring.devs[3]) part, nodes = self.ring.get_nodes('a', 'c', 'o2') self.assertEquals(part, 2) - self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}]) + self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0}, + {'id': 2, 'zone': 2, 'weight': 1.0}]) nodes = list(self.ring.get_more_nodes(part)) self.assertEquals(nodes, []) @@ -186,18 +208,36 @@ class TestRing(unittest.TestCase): self.ring.zone2devs[3] = [self.ring.devs[3]] part, nodes = self.ring.get_nodes('a', 'c', 'o2') self.assertEquals(part, 2) - self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}]) + self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0}, + {'id': 2, 'zone': 2, 'weight': 1.0}]) nodes = list(self.ring.get_more_nodes(part)) - self.assertEquals(nodes, [{'id': 3, 'zone': 3}]) + self.assertEquals(nodes, [{'id': 3, 'zone': 3, 'weight': 1.0}]) self.ring.devs.append(None) - self.ring.devs.append({'id': 5, 'zone': 5}) + self.ring.devs.append({'id': 5, 'zone': 5, 'weight': 1.0}) self.ring.zone2devs[5] = [self.ring.devs[5]] part, nodes = self.ring.get_nodes('a', 'c', 'o2') self.assertEquals(part, 2) - self.assertEquals(nodes, [{'id': 0, 'zone': 0}, {'id': 2, 'zone': 2}]) + self.assertEquals(nodes, [{'id': 0, 'zone': 0, 'weight': 1.0}, + {'id': 2, 'zone': 2, 'weight': 1.0}]) nodes = list(self.ring.get_more_nodes(part)) - self.assertEquals(nodes, [{'id': 3, 'zone': 3}, {'id': 5, 'zone': 5}]) + self.assertEquals(nodes, [{'id': 3, 'zone': 3, 'weight': 1.0}, + {'id': 5, 'zone': 5, 'weight': 1.0}]) + + self.ring.devs.append({'id': 6, 'zone': 5, 'weight': 1.0}) + self.ring.zone2devs[5].append(self.ring.devs[6]) + nodes = list(self.ring.get_more_nodes(part)) + self.assertEquals(nodes, [{'id': 3, 'zone': 3, 'weight': 1.0}, + {'id': 5, 'zone': 5, 'weight': 1.0}]) + self.ring.devs[5]['weight'] = 0 + nodes = list(self.ring.get_more_nodes(part)) + self.assertEquals(nodes, [{'id': 3, 'zone': 3, 'weight': 1.0}, + {'id': 6, 'zone': 5, 'weight': 1.0}]) + self.ring.devs[3]['weight'] = 0 + self.ring.devs.append({'id': 7, 'zone': 6, 'weight': 0.0}) + self.ring.zone2devs[6] = [self.ring.devs[7]] + nodes = list(self.ring.get_more_nodes(part)) + self.assertEquals(nodes, [{'id': 6, 'zone': 5, 'weight': 1.0}]) if __name__ == '__main__': diff --git a/test/unit/common/test_daemon.py b/test/unit/common/test_daemon.py index 015928f670..1d54e78c3e 100644 --- a/test/unit/common/test_daemon.py +++ b/test/unit/common/test_daemon.py @@ -28,7 +28,7 @@ class MyDaemon(daemon.Daemon): def __init__(self, conf): self.conf = conf - self.logger = utils.get_logger(None) + self.logger = utils.get_logger(None, 'server', log_route='server') MyDaemon.forever_called = False MyDaemon.once_called = False @@ -97,9 +97,9 @@ user = %s # test user quit MyDaemon.run_forever = MyDaemon.run_quit sio = StringIO() - logger = logging.getLogger() + logger = logging.getLogger('server') logger.addHandler(logging.StreamHandler(sio)) - logger = utils.get_logger(None, 'server') + logger = utils.get_logger(None, 'server', log_route='server') daemon.run_daemon(MyDaemon, conf_file, logger=logger) self.assert_('user quit' in sio.getvalue().lower()) diff --git a/test/unit/common/test_manager.py b/test/unit/common/test_manager.py new file mode 100644 index 0000000000..47a50c6a24 --- /dev/null +++ b/test/unit/common/test_manager.py @@ -0,0 +1,1636 @@ +# Copyright (c) 2010-2011 OpenStack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from nose import SkipTest +from test.unit import temptree + +import os +import sys +import resource +import signal +import errno +from contextlib import contextmanager +from collections import defaultdict +from threading import Thread +from time import sleep, time + +from swift.common import manager + +DUMMY_SIG = 1 + + +class MockOs(): + + def __init__(self, pids): + self.running_pids = pids + self.pid_sigs = defaultdict(list) + self.closed_fds = [] + self.child_pid = 9999 # fork defaults to test parent process path + self.execlp_called = False + + def kill(self, pid, sig): + if pid not in self.running_pids: + raise OSError(3, 'No such process') + self.pid_sigs[pid].append(sig) + + def __getattr__(self, name): + # I only over-ride portions of the os module + try: + return object.__getattr__(self, name) + except AttributeError: + return getattr(os, name) + + +def pop_stream(f): + """read everything out of file from the top and clear it out + """ + f.flush() + f.seek(0) + output = f.read() + f.seek(0) + f.truncate() + #print >> sys.stderr, output + return output + + +class TestManagerModule(unittest.TestCase): + + def test_servers(self): + main_plus_rest = set(manager.MAIN_SERVERS + manager.REST_SERVERS) + self.assertEquals(set(manager.ALL_SERVERS), main_plus_rest) + # make sure there's no server listed in both + self.assertEquals(len(main_plus_rest), len(manager.MAIN_SERVERS) + + len(manager.REST_SERVERS)) + + def test_setup_env(self): + class MockResource(): + def __init__(self, error=None): + self.error = error + self.called_with_args = [] + + def setrlimit(self, resource, limits): + if self.error: + raise self.error + self.called_with_args.append((resource, limits)) + + def __getattr__(self, name): + # I only over-ride portions of the resource module + try: + return object.__getattr__(self, name) + except AttributeError: + return getattr(resource, name) + + _orig_resource = manager.resource + _orig_environ = os.environ + try: + manager.resource = MockResource() + manager.os.environ = {} + manager.setup_env() + expected = [ + (resource.RLIMIT_NOFILE, (manager.MAX_DESCRIPTORS, + manager.MAX_DESCRIPTORS)), + (resource.RLIMIT_DATA, (manager.MAX_MEMORY, + manager.MAX_MEMORY)), + ] + self.assertEquals(manager.resource.called_with_args, expected) + self.assertEquals(manager.os.environ['PYTHON_EGG_CACHE'], '/tmp') + + # test error condition + manager.resource = MockResource(error=ValueError()) + manager.os.environ = {} + manager.setup_env() + self.assertEquals(manager.resource.called_with_args, []) + self.assertEquals(manager.os.environ['PYTHON_EGG_CACHE'], '/tmp') + + manager.resource = MockResource(error=OSError()) + manager.os.environ = {} + self.assertRaises(OSError, manager.setup_env) + self.assertEquals(manager.os.environ.get('PYTHON_EGG_CACHE'), None) + finally: + manager.resource = _orig_resource + os.environ = _orig_environ + + def test_command_wrapper(self): + @manager.command + def myfunc(arg1): + """test doc + """ + return arg1 + + self.assertEquals(myfunc.__doc__.strip(), 'test doc') + self.assertEquals(myfunc(1), 1) + self.assertEquals(myfunc(0), 0) + self.assertEquals(myfunc(True), 1) + self.assertEquals(myfunc(False), 0) + self.assert_(hasattr(myfunc, 'publicly_accessible')) + self.assert_(myfunc.publicly_accessible) + + def test_watch_server_pids(self): + class MockOs(): + WNOHANG = os.WNOHANG + + def __init__(self, pid_map={}): + self.pid_map = {} + for pid, v in pid_map.items(): + self.pid_map[pid] = (x for x in v) + + def waitpid(self, pid, options): + try: + rv = self.pid_map[pid].next() + except StopIteration: + raise OSError(errno.ECHILD, os.strerror(errno.ECHILD)) + except KeyError: + raise OSError(errno.ESRCH, os.strerror(errno.ESRCH)) + if isinstance(rv, Exception): + raise rv + else: + return rv + + class MockTime(): + def __init__(self, ticks=None): + self.tock = time() + if not ticks: + ticks = [] + + self.ticks = (t for t in ticks) + + def time(self): + try: + self.tock += self.ticks.next() + except StopIteration: + self.tock += 1 + return self.tock + + def sleep(*args): + return + + class MockServer(): + + def __init__(self, pids, zombie=0): + self.heartbeat = (pids for _ in range(zombie)) + + def get_running_pids(self): + try: + rv = self.heartbeat.next() + return rv + except StopIteration: + return {} + + _orig_os = manager.os + _orig_time = manager.time + _orig_server = manager.Server + try: + manager.time = MockTime() + manager.os = MockOs() + # this server always says it's dead when you ask for running pids + server = MockServer([1]) + # list of pids keyed on servers to watch + server_pids = { + server: [1], + } + # basic test, server dies + gen = manager.watch_server_pids(server_pids) + expected = [(server, 1)] + self.assertEquals([x for x in gen], expected) + # start long running server and short interval + server = MockServer([1], zombie=15) + server_pids = { + server: [1], + } + gen = manager.watch_server_pids(server_pids) + self.assertEquals([x for x in gen], []) + # wait a little longer + gen = manager.watch_server_pids(server_pids, interval=15) + self.assertEquals([x for x in gen], [(server, 1)]) + # zombie process + server = MockServer([1], zombie=200) + server_pids = { + server: [1], + } + # test weird os error + manager.os = MockOs({1: [OSError()]}) + gen = manager.watch_server_pids(server_pids) + self.assertRaises(OSError, lambda: [x for x in gen]) + # test multi-server + server1 = MockServer([1, 10], zombie=200) + server2 = MockServer([2, 20], zombie=8) + server_pids = { + server1: [1, 10], + server2: [2, 20], + } + pid_map = { + 1: [None for _ in range(10)], + 2: [None for _ in range(8)], + 20: [None for _ in range(4)], + } + manager.os = MockOs(pid_map) + gen = manager.watch_server_pids(server_pids, + interval=manager.KILL_WAIT) + expected = [ + (server2, 2), + (server2, 20), + ] + self.assertEquals([x for x in gen], expected) + + finally: + manager.os = _orig_os + manager.time = _orig_time + manager.Server = _orig_server + + def test_exc(self): + self.assert_(issubclass(manager.UnknownCommandError, Exception)) + + +class TestServer(unittest.TestCase): + + def tearDown(self): + reload(manager) + + def join_swift_dir(self, path): + return os.path.join(manager.SWIFT_DIR, path) + + def join_run_dir(self, path): + return os.path.join(manager.RUN_DIR, path) + + def test_create_server(self): + server = manager.Server('proxy') + self.assertEquals(server.server, 'proxy-server') + self.assertEquals(server.type, 'proxy') + self.assertEquals(server.cmd, 'swift-proxy-server') + server = manager.Server('object-replicator') + self.assertEquals(server.server, 'object-replicator') + self.assertEquals(server.type, 'object') + self.assertEquals(server.cmd, 'swift-object-replicator') + + def test_server_to_string(self): + server = manager.Server('Proxy') + self.assertEquals(str(server), 'proxy-server') + server = manager.Server('object-replicator') + self.assertEquals(str(server), 'object-replicator') + + def test_server_repr(self): + server = manager.Server('proxy') + self.assert_(server.__class__.__name__ in repr(server)) + self.assert_(str(server) in repr(server)) + + def test_server_equality(self): + server1 = manager.Server('Proxy') + server2 = manager.Server('proxy-server') + self.assertEquals(server1, server2) + # it is NOT a string + self.assertNotEquals(server1, 'proxy-server') + + def test_get_pid_file_name(self): + server = manager.Server('proxy') + conf_file = self.join_swift_dir('proxy-server.conf') + pid_file = self.join_run_dir('proxy-server.pid') + self.assertEquals(pid_file, server.get_pid_file_name(conf_file)) + server = manager.Server('object-replicator') + conf_file = self.join_swift_dir('object-server/1.conf') + pid_file = self.join_run_dir('object-replicator/1.pid') + self.assertEquals(pid_file, server.get_pid_file_name(conf_file)) + server = manager.Server('container-auditor') + conf_file = self.join_swift_dir( + 'container-server/1/container-auditor.conf') + pid_file = self.join_run_dir( + 'container-auditor/1/container-auditor.pid') + self.assertEquals(pid_file, server.get_pid_file_name(conf_file)) + + def test_get_conf_file_name(self): + server = manager.Server('proxy') + conf_file = self.join_swift_dir('proxy-server.conf') + pid_file = self.join_run_dir('proxy-server.pid') + self.assertEquals(conf_file, server.get_conf_file_name(pid_file)) + server = manager.Server('object-replicator') + conf_file = self.join_swift_dir('object-server/1.conf') + pid_file = self.join_run_dir('object-replicator/1.pid') + self.assertEquals(conf_file, server.get_conf_file_name(pid_file)) + server = manager.Server('container-auditor') + conf_file = self.join_swift_dir( + 'container-server/1/container-auditor.conf') + pid_file = self.join_run_dir( + 'container-auditor/1/container-auditor.pid') + self.assertEquals(conf_file, server.get_conf_file_name(pid_file)) + + def test_conf_files(self): + # test get single conf file + conf_files = ( + 'proxy-server.conf', + 'proxy-server.ini', + 'auth-server.conf', + ) + with temptree(conf_files) as t: + manager.SWIFT_DIR = t + server = manager.Server('proxy') + conf_files = server.conf_files() + self.assertEquals(len(conf_files), 1) + conf_file = conf_files[0] + proxy_conf = self.join_swift_dir('proxy-server.conf') + self.assertEquals(conf_file, proxy_conf) + + # test multi server conf files & grouping of server-type config + conf_files = ( + 'object-server1.conf', + 'object-server/2.conf', + 'object-server/object3.conf', + 'object-server/conf/server4.conf', + 'object-server.txt', + 'proxy-server.conf', + ) + with temptree(conf_files) as t: + manager.SWIFT_DIR = t + server = manager.Server('object-replicator') + conf_files = server.conf_files() + self.assertEquals(len(conf_files), 4) + c1 = self.join_swift_dir('object-server1.conf') + c2 = self.join_swift_dir('object-server/2.conf') + c3 = self.join_swift_dir('object-server/object3.conf') + c4 = self.join_swift_dir('object-server/conf/server4.conf') + for c in [c1, c2, c3, c4]: + self.assert_(c in conf_files) + # test configs returned sorted + sorted_confs = sorted([c1, c2, c3, c4]) + self.assertEquals(conf_files, sorted_confs) + + # test get single numbered conf + conf_files = ( + 'account-server/1.conf', + 'account-server/2.conf', + 'account-server/3.conf', + 'account-server/4.conf', + ) + with temptree(conf_files) as t: + manager.SWIFT_DIR = t + server = manager.Server('account') + conf_files = server.conf_files(number=2) + self.assertEquals(len(conf_files), 1) + conf_file = conf_files[0] + self.assertEquals(conf_file, + self.join_swift_dir('account-server/2.conf')) + # test missing config number + conf_files = server.conf_files(number=5) + self.assertFalse(conf_files) + + # test verbose & quiet + conf_files = ( + 'auth-server.ini', + 'container-server/1.conf', + ) + with temptree(conf_files) as t: + manager.SWIFT_DIR = t + old_stdout = sys.stdout + try: + with open(os.path.join(t, 'output'), 'w+') as f: + sys.stdout = f + server = manager.Server('auth') + # check warn "unable to locate" + conf_files = server.conf_files() + self.assertFalse(conf_files) + self.assert_('unable to locate' in pop_stream(f).lower()) + # check quiet will silence warning + conf_files = server.conf_files(verbose=True, quiet=True) + self.assertEquals(pop_stream(f), '') + # check found config no warning + server = manager.Server('container-auditor') + conf_files = server.conf_files() + self.assertEquals(pop_stream(f), '') + # check missing config number warn "unable to locate" + conf_files = server.conf_files(number=2) + self.assert_('unable to locate' in pop_stream(f).lower()) + # check verbose lists configs + conf_files = server.conf_files(number=2, verbose=True) + c1 = self.join_swift_dir('container-server/1.conf') + self.assert_(c1 in pop_stream(f)) + finally: + sys.stdout = old_stdout + + def test_iter_pid_files(self): + """ + Server.iter_pid_files is kinda boring, test the + Server.pid_files stuff here as well + """ + pid_files = ( + ('proxy-server.pid', 1), + ('auth-server.pid', 'blah'), + ('object-replicator/1.pid', 11), + ('object-replicator/2.pid', 12), + ) + files, contents = zip(*pid_files) + with temptree(files, contents) as t: + manager.RUN_DIR = t + server = manager.Server('proxy') + # test get one file + iter = server.iter_pid_files() + pid_file, pid = iter.next() + self.assertEquals(pid_file, self.join_run_dir('proxy-server.pid')) + self.assertEquals(pid, 1) + # ... and only one file + self.assertRaises(StopIteration, iter.next) + # test invalid value in pid file + server = manager.Server('auth') + self.assertRaises(ValueError, server.iter_pid_files().next) + # test object-server doesn't steal pids from object-replicator + server = manager.Server('object') + self.assertRaises(StopIteration, server.iter_pid_files().next) + # test multi-pid iter + server = manager.Server('object-replicator') + real_map = { + 11: self.join_run_dir('object-replicator/1.pid'), + 12: self.join_run_dir('object-replicator/2.pid'), + } + pid_map = {} + for pid_file, pid in server.iter_pid_files(): + pid_map[pid] = pid_file + self.assertEquals(pid_map, real_map) + + # test get pid_files by number + conf_files = ( + 'object-server/1.conf', + 'object-server/2.conf', + 'object-server/3.conf', + 'object-server/4.conf', + ) + + pid_files = ( + ('object-server/1.pid', 1), + ('object-server/2.pid', 2), + ('object-server/5.pid', 5), + ) + + with temptree(conf_files) as swift_dir: + manager.SWIFT_DIR = swift_dir + files, pids = zip(*pid_files) + with temptree(files, pids) as t: + manager.RUN_DIR = t + server = manager.Server('object') + # test get all pid files + real_map = { + 1: self.join_run_dir('object-server/1.pid'), + 2: self.join_run_dir('object-server/2.pid'), + 5: self.join_run_dir('object-server/5.pid'), + } + pid_map = {} + for pid_file, pid in server.iter_pid_files(): + pid_map[pid] = pid_file + self.assertEquals(pid_map, real_map) + # test get pid with matching conf + pids = list(server.iter_pid_files(number=2)) + self.assertEquals(len(pids), 1) + pid_file, pid = pids[0] + self.assertEquals(pid, 2) + pid_two = self.join_run_dir('object-server/2.pid') + self.assertEquals(pid_file, pid_two) + # try to iter on a pid number with a matching conf but no pid + pids = list(server.iter_pid_files(number=3)) + self.assertFalse(pids) + # test get pids w/o matching conf + pids = list(server.iter_pid_files(number=5)) + self.assertFalse(pids) + + def test_signal_pids(self): + pid_files = ( + ('proxy-server.pid', 1), + ('auth-server.pid', 2), + ) + files, pids = zip(*pid_files) + with temptree(files, pids) as t: + manager.RUN_DIR = t + # mock os with both pids running + manager.os = MockOs([1, 2]) + server = manager.Server('proxy') + pids = server.signal_pids(DUMMY_SIG) + self.assertEquals(len(pids), 1) + self.assert_(1 in pids) + self.assertEquals(manager.os.pid_sigs[1], [DUMMY_SIG]) + # make sure other process not signaled + self.assertFalse(2 in pids) + self.assertFalse(2 in manager.os.pid_sigs) + # capture stdio + old_stdout = sys.stdout + try: + with open(os.path.join(t, 'output'), 'w+') as f: + sys.stdout = f + #test print details + pids = server.signal_pids(DUMMY_SIG) + output = pop_stream(f) + self.assert_('pid: %s' % 1 in output) + self.assert_('signal: %s' % DUMMY_SIG in output) + # test no details on signal.SIG_DFL + pids = server.signal_pids(signal.SIG_DFL) + self.assertEquals(pop_stream(f), '') + # reset mock os so only the other server is running + manager.os = MockOs([2]) + # test pid not running + pids = server.signal_pids(signal.SIG_DFL) + self.assert_(1 not in pids) + self.assert_(1 not in manager.os.pid_sigs) + # test remove stale pid file + self.assertFalse(os.path.exists( + self.join_run_dir('proxy-server.pid'))) + # reset mock os with no running pids + manager.os = MockOs([]) + server = manager.Server('auth') + # test verbose warns on removing pid file + pids = server.signal_pids(signal.SIG_DFL, verbose=True) + output = pop_stream(f) + self.assert_('stale pid' in output.lower()) + auth_pid = self.join_run_dir('auth-server.pid') + self.assert_(auth_pid in output) + finally: + sys.stdout = old_stdout + + def test_get_running_pids(self): + # test only gets running pids + pid_files = ( + ('test-server1.pid', 1), + ('test-server2.pid', 2), + ) + with temptree(*zip(*pid_files)) as t: + manager.RUN_DIR = t + server = manager.Server('test-server') + # mock os, only pid '1' is running + manager.os = MockOs([1]) + running_pids = server.get_running_pids() + self.assertEquals(len(running_pids), 1) + self.assert_(1 in running_pids) + self.assert_(2 not in running_pids) + # test persistant running pid files + self.assert_(os.path.exists(os.path.join(t, 'test-server1.pid'))) + # test clean up stale pids + pid_two = self.join_swift_dir('test-server2.pid') + self.assertFalse(os.path.exists(pid_two)) + # reset mock os, no pids running + manager.os = MockOs([]) + running_pids = server.get_running_pids() + self.assertFalse(running_pids) + # and now all pid files are cleaned out + pid_one = self.join_run_dir('test-server1.pid') + self.assertFalse(os.path.exists(pid_one)) + all_pids = os.listdir(t) + self.assertEquals(len(all_pids), 0) + + # test only get pids for right server + pid_files = ( + ('thing-doer.pid', 1), + ('thing-sayer.pid', 2), + ('other-doer.pid', 3), + ('other-sayer.pid', 4), + ) + files, pids = zip(*pid_files) + with temptree(files, pids) as t: + manager.RUN_DIR = t + # all pids are running + manager.os = MockOs(pids) + server = manager.Server('thing-doer') + running_pids = server.get_running_pids() + # only thing-doer.pid, 1 + self.assertEquals(len(running_pids), 1) + self.assert_(1 in running_pids) + # no other pids returned + for n in (2, 3, 4): + self.assert_(n not in running_pids) + # assert stale pids for other servers ignored + manager.os = MockOs([1]) # only thing-doer is running + running_pids = server.get_running_pids() + for f in ('thing-sayer.pid', 'other-doer.pid', 'other-sayer.pid'): + # other server pid files persist + self.assert_(os.path.exists, os.path.join(t, f)) + # verify that servers are in fact not running + for server_name in ('thing-sayer', 'other-doer', 'other-sayer'): + server = manager.Server(server_name) + running_pids = server.get_running_pids() + self.assertFalse(running_pids) + # and now all OTHER pid files are cleaned out + all_pids = os.listdir(t) + self.assertEquals(len(all_pids), 1) + self.assert_(os.path.exists(os.path.join(t, 'thing-doer.pid'))) + + def test_kill_running_pids(self): + pid_files = ( + ('object-server.pid', 1), + ('object-replicator1.pid', 11), + ('object-replicator2.pid', 12), + ) + files, running_pids = zip(*pid_files) + with temptree(files, running_pids) as t: + manager.RUN_DIR = t + server = manager.Server('object') + # test no servers running + manager.os = MockOs([]) + pids = server.kill_running_pids() + self.assertFalse(pids, pids) + files, running_pids = zip(*pid_files) + with temptree(files, running_pids) as t: + manager.RUN_DIR = t + # start up pid + manager.os = MockOs([1]) + # test kill one pid + pids = server.kill_running_pids() + self.assertEquals(len(pids), 1) + self.assert_(1 in pids) + self.assertEquals(manager.os.pid_sigs[1], [signal.SIGTERM]) + # reset os mock + manager.os = MockOs([1]) + # test shutdown + self.assert_('object-server' in + manager.GRACEFUL_SHUTDOWN_SERVERS) + pids = server.kill_running_pids(graceful=True) + self.assertEquals(len(pids), 1) + self.assert_(1 in pids) + self.assertEquals(manager.os.pid_sigs[1], [signal.SIGHUP]) + # start up other servers + manager.os = MockOs([11, 12]) + # test multi server kill & ignore graceful on unsupported server + self.assertFalse('object-replicator' in + manager.GRACEFUL_SHUTDOWN_SERVERS) + server = manager.Server('object-replicator') + pids = server.kill_running_pids(graceful=True) + self.assertEquals(len(pids), 2) + for pid in (11, 12): + self.assert_(pid in pids) + self.assertEquals(manager.os.pid_sigs[pid], + [signal.SIGTERM]) + # and the other pid is of course not signaled + self.assert_(1 not in manager.os.pid_sigs) + + def test_status(self): + conf_files = ( + 'test-server/1.conf', + 'test-server/2.conf', + 'test-server/3.conf', + 'test-server/4.conf', + ) + + pid_files = ( + ('test-server/1.pid', 1), + ('test-server/2.pid', 2), + ('test-server/3.pid', 3), + ('test-server/4.pid', 4), + ) + + with temptree(conf_files) as swift_dir: + manager.SWIFT_DIR = swift_dir + files, pids = zip(*pid_files) + with temptree(files, pids) as t: + manager.RUN_DIR = t + # setup running servers + server = manager.Server('test') + # capture stdio + old_stdout = sys.stdout + try: + with open(os.path.join(t, 'output'), 'w+') as f: + sys.stdout = f + # test status for all running + manager.os = MockOs(pids) + self.assertEquals(server.status(), 0) + output = pop_stream(f).strip().splitlines() + self.assertEquals(len(output), 4) + for line in output: + self.assert_('test-server running' in line) + # test get single server by number + self.assertEquals(server.status(number=4), 0) + output = pop_stream(f).strip().splitlines() + self.assertEquals(len(output), 1) + line = output[0] + self.assert_('test-server running' in line) + conf_four = self.join_swift_dir(conf_files[3]) + self.assert_('4 - %s' % conf_four in line) + # test some servers not running + manager.os = MockOs([1, 2, 3]) + self.assertEquals(server.status(), 0) + output = pop_stream(f).strip().splitlines() + self.assertEquals(len(output), 3) + for line in output: + self.assert_('test-server running' in line) + # test single server not running + manager.os = MockOs([1, 2]) + self.assertEquals(server.status(number=3), 1) + output = pop_stream(f).strip().splitlines() + self.assertEquals(len(output), 1) + line = output[0] + self.assert_('not running' in line) + conf_three = self.join_swift_dir(conf_files[2]) + self.assert_(conf_three in line) + # test no running pids + manager.os = MockOs([]) + self.assertEquals(server.status(), 1) + output = pop_stream(f).lower() + self.assert_('no test-server running' in output) + # test use provided pids + pids = { + 1: '1.pid', + 2: '2.pid', + } + # shouldn't call get_running_pids + called = [] + + def mock(*args, **kwargs): + called.append(True) + server.get_running_pids = mock + status = server.status(pids=pids) + self.assertEquals(status, 0) + self.assertFalse(called) + output = pop_stream(f).strip().splitlines() + self.assertEquals(len(output), 2) + for line in output: + self.assert_('test-server running' in line) + finally: + sys.stdout = old_stdout + + def test_spawn(self): + + # mocks + class MockProcess(): + + NOTHING = 'default besides None' + STDOUT = 'stdout' + PIPE = 'pipe' + + def __init__(self, pids=None): + if pids is None: + pids = [] + self.pids = (p for p in pids) + + def Popen(self, args, **kwargs): + return MockProc(self.pids.next(), args, **kwargs) + + class MockProc(): + + def __init__(self, pid, args, stdout=MockProcess.NOTHING, + stderr=MockProcess.NOTHING): + self.pid = pid + self.args = args + self.stdout = stdout + if stderr == MockProcess.STDOUT: + self.stderr = self.stdout + else: + self.stderr = stderr + + # setup running servers + server = manager.Server('test') + + with temptree(['test-server.conf']) as swift_dir: + manager.SWIFT_DIR = swift_dir + with temptree([]) as t: + manager.RUN_DIR = t + old_subprocess = manager.subprocess + try: + # test single server process calls spawn once + manager.subprocess = MockProcess([1]) + conf_file = self.join_swift_dir('test-server.conf') + # spawn server no kwargs + server.spawn(conf_file) + # test pid file + pid_file = self.join_run_dir('test-server.pid') + self.assert_(os.path.exists(pid_file)) + pid_on_disk = int(open(pid_file).read().strip()) + self.assertEquals(pid_on_disk, 1) + # assert procs args + self.assert_(server.procs) + self.assertEquals(len(server.procs), 1) + proc = server.procs[0] + expected_args = [ + 'swift-test-server', + conf_file, + ] + self.assertEquals(proc.args, expected_args) + # assert stdout is piped + self.assertEquals(proc.stdout, MockProcess.PIPE) + self.assertEquals(proc.stderr, proc.stdout) + # test multi server process calls spawn multiple times + manager.subprocess = MockProcess([11, 12, 13, 14]) + conf1 = self.join_swift_dir('test-server/1.conf') + conf2 = self.join_swift_dir('test-server/2.conf') + conf3 = self.join_swift_dir('test-server/3.conf') + conf4 = self.join_swift_dir('test-server/4.conf') + server = manager.Server('test') + # test server run once + server.spawn(conf1, once=True) + self.assert_(server.procs) + self.assertEquals(len(server.procs), 1) + proc = server.procs[0] + expected_args = ['swift-test-server', conf1, 'once'] + # assert stdout is piped + self.assertEquals(proc.stdout, MockProcess.PIPE) + self.assertEquals(proc.stderr, proc.stdout) + # test server not daemon + server.spawn(conf2, daemon=False) + self.assert_(server.procs) + self.assertEquals(len(server.procs), 2) + proc = server.procs[1] + expected_args = ['swift-test-server', conf2, 'verbose'] + self.assertEquals(proc.args, expected_args) + # assert stdout is not changed + self.assertEquals(proc.stdout, None) + self.assertEquals(proc.stderr, None) + # test server wait + server.spawn(conf3, wait=False) + self.assert_(server.procs) + self.assertEquals(len(server.procs), 3) + proc = server.procs[2] + # assert stdout is /dev/null + self.assert_(isinstance(proc.stdout, file)) + self.assertEquals(proc.stdout.name, os.devnull) + self.assertEquals(proc.stdout.mode, 'w+b') + self.assertEquals(proc.stderr, proc.stdout) + # test not daemon over-rides wait + server.spawn(conf4, wait=False, daemon=False, once=True) + self.assert_(server.procs) + self.assertEquals(len(server.procs), 4) + proc = server.procs[3] + expected_args = ['swift-test-server', conf4, 'once', + 'verbose'] + self.assertEquals(proc.args, expected_args) + # daemon behavior should trump wait, once shouldn't matter + self.assertEquals(proc.stdout, None) + self.assertEquals(proc.stderr, None) + # assert pids + for i, proc in enumerate(server.procs): + pid_file = self.join_run_dir('test-server/%d.pid' % + (i + 1)) + pid_on_disk = int(open(pid_file).read().strip()) + self.assertEquals(pid_on_disk, proc.pid) + finally: + manager.subprocess = old_subprocess + + def test_wait(self): + server = manager.Server('test') + self.assertEquals(server.wait(), 0) + + class MockProcess(Thread): + def __init__(self, delay=0.1, fail_to_start=False): + Thread.__init__(self) + # setup pipe + rfd, wfd = os.pipe() + # subprocess connection to read stdout + self.stdout = os.fdopen(rfd) + # real process connection to write stdout + self._stdout = os.fdopen(wfd, 'w') + self.delay = delay + self.finished = False + self.returncode = None + if fail_to_start: + self._returncode = 1 + self.run = self.fail + else: + self._returncode = 0 + + def __enter__(self): + self.start() + return self + + def __exit__(self, *args): + if self.isAlive(): + self.join() + + def close_stdout(self): + self._stdout.flush() + with open(os.devnull, 'wb') as nullfile: + try: + os.dup2(nullfile.fileno(), self._stdout.fileno()) + except OSError: + pass + + def fail(self): + print >>self._stdout, 'mock process started' + sleep(self.delay) # perform setup processing + print >>self._stdout, 'mock process failed to start' + self.close_stdout() + + def communicate(self): + self.returncode = self._returncode + + def run(self): + print >>self._stdout, 'mock process started' + sleep(self.delay) # perform setup processing + print >>self._stdout, 'setup complete!' + self.close_stdout() + sleep(self.delay) # do some more processing + print >>self._stdout, 'mock process finished' + self.finished = True + + with temptree([]) as t: + old_stdout = sys.stdout + try: + with open(os.path.join(t, 'output'), 'w+') as f: + # acctually capture the read stdout (for prints) + sys.stdout = f + # test closing pipe in subprocess unblocks read + with MockProcess() as proc: + server.procs = [proc] + status = server.wait() + self.assertEquals(status, 0) + # wait should return as soon as stdout is closed + self.assert_(proc.isAlive()) + self.assertFalse(proc.finished) + self.assert_(proc.finished) # make sure it did finish... + # test output kwarg prints subprocess output + with MockProcess() as proc: + server.procs = [proc] + status = server.wait(output=True) + output = pop_stream(f) + self.assert_('mock process started' in output) + self.assert_('setup complete' in output) + # make sure we don't get prints after stdout was closed + self.assert_('mock process finished' not in output) + # test process which fails to start + with MockProcess(fail_to_start=True) as proc: + server.procs = [proc] + status = server.wait() + self.assertEquals(status, 1) + self.assert_('failed' in pop_stream(f)) + # test multiple procs + procs = [MockProcess() for i in range(3)] + for proc in procs: + proc.start() + server.procs = procs + status = server.wait() + self.assertEquals(status, 0) + for proc in procs: + self.assert_(proc.isAlive()) + for proc in procs: + proc.join() + finally: + sys.stdout = old_stdout + + def test_interact(self): + class MockProcess(): + + def __init__(self, fail=False): + self.returncode = None + if fail: + self._returncode = 1 + else: + self._returncode = 0 + + def communicate(self): + self.returncode = self._returncode + return '', '' + + server = manager.Server('test') + server.procs = [MockProcess()] + self.assertEquals(server.interact(), 0) + server.procs = [MockProcess(fail=True)] + self.assertEquals(server.interact(), 1) + procs = [] + for fail in (False, True, True): + procs.append(MockProcess(fail=fail)) + server.procs = procs + self.assert_(server.interact() > 0) + + def test_launch(self): + # stubs + conf_files = ( + 'proxy-server.conf', + 'auth-server.conf', + 'object-server/1.conf', + 'object-server/2.conf', + 'object-server/3.conf', + 'object-server/4.conf', + ) + pid_files = ( + ('proxy-server.pid', 1), + ('proxy-server/2.pid', 2), + ) + + #mocks + class MockSpawn(): + + def __init__(self, pids=None): + self.conf_files = [] + self.kwargs = [] + if not pids: + def one_forever(): + while True: + yield 1 + self.pids = one_forever() + else: + self.pids = (x for x in pids) + + def __call__(self, conf_file, **kwargs): + self.conf_files.append(conf_file) + self.kwargs.append(kwargs) + rv = self.pids.next() + if isinstance(rv, Exception): + raise rv + else: + return rv + + with temptree(conf_files) as swift_dir: + manager.SWIFT_DIR = swift_dir + files, pids = zip(*pid_files) + with temptree(files, pids) as t: + manager.RUN_DIR = t + old_stdout = sys.stdout + try: + with open(os.path.join(t, 'output'), 'w+') as f: + sys.stdout = f + # can't start server w/o an conf + server = manager.Server('test') + self.assertFalse(server.launch()) + # start mock os running all pids + manager.os = MockOs(pids) + server = manager.Server('proxy') + # can't start server if it's already running + self.assertFalse(server.launch()) + output = pop_stream(f) + self.assert_('running' in output) + conf_file = self.join_swift_dir('proxy-server.conf') + self.assert_(conf_file in output) + pid_file = self.join_run_dir('proxy-server/2.pid') + self.assert_(pid_file in output) + self.assert_('already started' in output) + # no running pids + manager.os = MockOs([]) + # test ignore once for non-start-once server + mock_spawn = MockSpawn([1]) + server.spawn = mock_spawn + conf_file = self.join_swift_dir('proxy-server.conf') + expected = { + 1: conf_file, + } + self.assertEquals(server.launch(once=True), expected) + self.assertEquals(mock_spawn.conf_files, [conf_file]) + expected = { + 'once': False, + } + self.assertEquals(mock_spawn.kwargs, [expected]) + output = pop_stream(f) + self.assert_('Starting' in output) + self.assert_('once' not in output) + # test multi-server kwarg once + server = manager.Server('object-replicator') + mock_spawn = MockSpawn([1, 2, 3, 4]) + server.spawn = mock_spawn + conf1 = self.join_swift_dir('object-server/1.conf') + conf2 = self.join_swift_dir('object-server/2.conf') + conf3 = self.join_swift_dir('object-server/3.conf') + conf4 = self.join_swift_dir('object-server/4.conf') + expected = { + 1: conf1, + 2: conf2, + 3: conf3, + 4: conf4, + } + self.assertEquals(server.launch(once=True), expected) + self.assertEquals(mock_spawn.conf_files, [conf1, conf2, + conf3, conf4]) + expected = { + 'once': True, + } + self.assertEquals(len(mock_spawn.kwargs), 4) + for kwargs in mock_spawn.kwargs: + self.assertEquals(kwargs, expected) + # test number kwarg + mock_spawn = MockSpawn([4]) + server.spawn = mock_spawn + expected = { + 4: conf4, + } + self.assertEquals(server.launch(number=4), expected) + self.assertEquals(mock_spawn.conf_files, [conf4]) + expected = { + 'number': 4 + } + self.assertEquals(mock_spawn.kwargs, [expected]) + # test cmd does not exist + server = manager.Server('auth') + mock_spawn = MockSpawn([OSError(errno.ENOENT, 'blah')]) + server.spawn = mock_spawn + self.assertEquals(server.launch(), {}) + self.assert_('swift-auth-server does not exist' in + pop_stream(f)) + finally: + sys.stdout = old_stdout + + def test_stop(self): + conf_files = ( + 'account-server/1.conf', + 'account-server/2.conf', + 'account-server/3.conf', + 'account-server/4.conf', + ) + pid_files = ( + ('account-reaper/1.pid', 1), + ('account-reaper/2.pid', 2), + ('account-reaper/3.pid', 3), + ('account-reaper/4.pid', 4), + ) + + with temptree(conf_files) as swift_dir: + manager.SWIFT_DIR = swift_dir + files, pids = zip(*pid_files) + with temptree(files, pids) as t: + manager.RUN_DIR = t + # start all pids in mock os + manager.os = MockOs(pids) + server = manager.Server('account-reaper') + # test kill all running pids + pids = server.stop() + self.assertEquals(len(pids), 4) + for pid in (1, 2, 3, 4): + self.assert_(pid in pids) + self.assertEquals(manager.os.pid_sigs[pid], + [signal.SIGTERM]) + conf1 = self.join_swift_dir('account-reaper/1.conf') + conf2 = self.join_swift_dir('account-reaper/2.conf') + conf3 = self.join_swift_dir('account-reaper/3.conf') + conf4 = self.join_swift_dir('account-reaper/4.conf') + # reset mock os with only 2 running pids + manager.os = MockOs([3, 4]) + pids = server.stop() + self.assertEquals(len(pids), 2) + for pid in (3, 4): + self.assert_(pid in pids) + self.assertEquals(manager.os.pid_sigs[pid], + [signal.SIGTERM]) + self.assertFalse(os.path.exists(conf1)) + self.assertFalse(os.path.exists(conf2)) + # test number kwarg + manager.os = MockOs([3, 4]) + pids = server.stop(number=3) + self.assertEquals(len(pids), 1) + expected = { + 3: conf3, + } + self.assert_(pids, expected) + self.assertEquals(manager.os.pid_sigs[3], [signal.SIGTERM]) + self.assertFalse(os.path.exists(conf4)) + self.assertFalse(os.path.exists(conf3)) + + +class TestManager(unittest.TestCase): + + def test_create(self): + m = manager.Manager(['test']) + self.assertEquals(len(m.servers), 1) + server = m.servers.pop() + self.assert_(isinstance(server, manager.Server)) + self.assertEquals(server.server, 'test-server') + # test multi-server and simple dedupe + servers = ['object-replicator', 'object-auditor', 'object-replicator'] + m = manager.Manager(servers) + self.assertEquals(len(m.servers), 2) + for server in m.servers: + self.assert_(server.server in servers) + # test all + m = manager.Manager(['all']) + self.assertEquals(len(m.servers), len(manager.ALL_SERVERS)) + for server in m.servers: + self.assert_(server.server in manager.ALL_SERVERS) + # test main + m = manager.Manager(['main']) + self.assertEquals(len(m.servers), len(manager.MAIN_SERVERS)) + for server in m.servers: + self.assert_(server.server in manager.MAIN_SERVERS) + # test rest + m = manager.Manager(['rest']) + self.assertEquals(len(m.servers), len(manager.REST_SERVERS)) + for server in m.servers: + self.assert_(server.server in manager.REST_SERVERS) + # test main + rest == all + m = manager.Manager(['main', 'rest']) + self.assertEquals(len(m.servers), len(manager.ALL_SERVERS)) + for server in m.servers: + self.assert_(server.server in manager.ALL_SERVERS) + # test dedupe + m = manager.Manager(['main', 'rest', 'proxy', 'object', + 'container', 'account']) + self.assertEquals(len(m.servers), len(manager.ALL_SERVERS)) + for server in m.servers: + self.assert_(server.server in manager.ALL_SERVERS) + # test glob + m = manager.Manager(['object-*']) + object_servers = [s for s in manager.ALL_SERVERS if + s.startswith('object')] + self.assertEquals(len(m.servers), len(object_servers)) + for s in m.servers: + self.assert_(str(s) in object_servers) + m = manager.Manager(['*-replicator']) + replicators = [s for s in manager.ALL_SERVERS if + s.endswith('replicator')] + for s in m.servers: + self.assert_(str(s) in replicators) + + def test_status(self): + class MockServer(): + + def __init__(self, server): + self.server = server + self.called_kwargs = [] + + def status(self, **kwargs): + self.called_kwargs.append(kwargs) + if 'error' in self.server: + return 1 + else: + return 0 + + old_server_class = manager.Server + try: + manager.Server = MockServer + m = manager.Manager(['test']) + status = m.status() + self.assertEquals(status, 0) + m = manager.Manager(['error']) + status = m.status() + self.assertEquals(status, 1) + # test multi-server + m = manager.Manager(['test', 'error']) + kwargs = {'key': 'value'} + status = m.status(**kwargs) + self.assertEquals(status, 1) + for server in m.servers: + self.assertEquals(server.called_kwargs, [kwargs]) + finally: + manager.Server = old_server_class + + def test_start(self): + def mock_setup_env(): + getattr(mock_setup_env, 'called', []).append(True) + + class MockServer(): + def __init__(self, server): + self.server = server + self.called = defaultdict(list) + + def launch(self, **kwargs): + self.called['launch'].append(kwargs) + + def wait(self, **kwargs): + self.called['wait'].append(kwargs) + return int('error' in self.server) + + def stop(self, **kwargs): + self.called['stop'].append(kwargs) + + def interact(self, **kwargs): + self.called['interact'].append(kwargs) + if 'raise' in self.server: + raise KeyboardInterrupt + elif 'error' in self.server: + return 1 + else: + return 0 + + old_setup_env = manager.setup_env + old_swift_server = manager.Server + try: + manager.setup_env = mock_setup_env + manager.Server = MockServer + + # test no errors on launch + m = manager.Manager(['proxy']) + status = m.start() + self.assertEquals(status, 0) + for server in m.servers: + self.assertEquals(server.called['launch'], [{}]) + + # test error on launch + m = manager.Manager(['proxy', 'error']) + status = m.start() + self.assertEquals(status, 1) + for server in m.servers: + self.assertEquals(server.called['launch'], [{}]) + self.assertEquals(server.called['wait'], [{}]) + + # test interact + m = manager.Manager(['proxy', 'error']) + kwargs = {'daemon': False} + status = m.start(**kwargs) + self.assertEquals(status, 1) + for server in m.servers: + self.assertEquals(server.called['launch'], [kwargs]) + self.assertEquals(server.called['interact'], [kwargs]) + m = manager.Manager(['raise']) + kwargs = {'daemon': False} + status = m.start(**kwargs) + + finally: + manager.setup_env = old_setup_env + manager.Server = old_swift_server + + def test_no_wait(self): + class MockServer(): + def __init__(self, server): + self.server = server + self.called = defaultdict(list) + + def launch(self, **kwargs): + self.called['launch'].append(kwargs) + + def wait(self, **kwargs): + self.called['wait'].append(kwargs) + return int('error' in self.server) + + orig_swift_server = manager.Server + try: + manager.Server = MockServer + # test success + init = manager.Manager(['proxy']) + status = init.no_wait() + self.assertEquals(status, 0) + for server in init.servers: + self.assertEquals(len(server.called['launch']), 1) + called_kwargs = server.called['launch'][0] + self.assertFalse(called_kwargs['wait']) + self.assertFalse(server.called['wait']) + # test no errocode status even on error + init = manager.Manager(['error']) + status = init.no_wait() + self.assertEquals(status, 0) + for server in init.servers: + self.assertEquals(len(server.called['launch']), 1) + called_kwargs = server.called['launch'][0] + self.assert_('wait' in called_kwargs) + self.assertFalse(called_kwargs['wait']) + self.assertFalse(server.called['wait']) + # test wait with once option + init = manager.Manager(['updater', 'replicator-error']) + status = init.no_wait(once=True) + self.assertEquals(status, 0) + for server in init.servers: + self.assertEquals(len(server.called['launch']), 1) + called_kwargs = server.called['launch'][0] + self.assert_('wait' in called_kwargs) + self.assertFalse(called_kwargs['wait']) + self.assert_('once' in called_kwargs) + self.assert_(called_kwargs['once']) + self.assertFalse(server.called['wait']) + finally: + manager.Server = orig_swift_server + + def test_no_daemon(self): + class MockServer(): + + def __init__(self, server): + self.server = server + self.called = defaultdict(list) + + def launch(self, **kwargs): + self.called['launch'].append(kwargs) + + def interact(self, **kwargs): + self.called['interact'].append(kwargs) + return int('error' in self.server) + + orig_swift_server = manager.Server + try: + manager.Server = MockServer + # test success + init = manager.Manager(['proxy']) + stats = init.no_daemon() + self.assertEquals(stats, 0) + # test error + init = manager.Manager(['proxy', 'object-error']) + stats = init.no_daemon() + self.assertEquals(stats, 1) + # test once + init = manager.Manager(['proxy', 'object-error']) + stats = init.no_daemon() + for server in init.servers: + self.assertEquals(len(server.called['launch']), 1) + self.assertEquals(len(server.called['wait']), 0) + self.assertEquals(len(server.called['interact']), 1) + finally: + manager.Server = orig_swift_server + + def test_once(self): + class MockServer(): + + def __init__(self, server): + self.server = server + self.called = defaultdict(list) + + def wait(self, **kwargs): + self.called['wait'].append(kwargs) + if 'error' in self.server: + return 1 + else: + return 0 + + def launch(self, **kwargs): + return self.called['launch'].append(kwargs) + + orig_swift_server = manager.Server + try: + manager.Server = MockServer + # test no errors + init = manager.Manager(['account-reaper']) + status = init.once() + self.assertEquals(status, 0) + # test error code on error + init = manager.Manager(['error-reaper']) + status = init.once() + self.assertEquals(status, 1) + for server in init.servers: + self.assertEquals(len(server.called['launch']), 1) + called_kwargs = server.called['launch'][0] + self.assertEquals(called_kwargs, {'once': True}) + self.assertEquals(len(server.called['wait']), 1) + self.assertEquals(len(server.called['interact']), 0) + finally: + manager.Server = orig_swift_server + + def test_stop(self): + class MockServerFactory(): + class MockServer(): + def __init__(self, pids): + self.pids = pids + + def stop(self, **kwargs): + return self.pids + + def __init__(self, server_pids): + self.server_pids = server_pids + + def __call__(self, server): + return MockServerFactory.MockServer(self.server_pids[server]) + + def mock_watch_server_pids(server_pids, **kwargs): + for server, pids in server_pids.items(): + for pid in pids: + if pid is None: + continue + yield server, pid + + _orig_server = manager.Server + _orig_watch_server_pids = manager.watch_server_pids + try: + manager.watch_server_pids = mock_watch_server_pids + # test stop one server + server_pids = { + 'test': [1] + } + manager.Server = MockServerFactory(server_pids) + m = manager.Manager(['test']) + status = m.stop() + self.assertEquals(status, 0) + # test not running + server_pids = { + 'test': [] + } + manager.Server = MockServerFactory(server_pids) + m = manager.Manager(['test']) + status = m.stop() + self.assertEquals(status, 1) + # test won't die + server_pids = { + 'test': [None] + } + manager.Server = MockServerFactory(server_pids) + m = manager.Manager(['test']) + status = m.stop() + self.assertEquals(status, 1) + + finally: + manager.Server = _orig_server + manager.watch_server_pids = _orig_watch_server_pids + + # TODO: more tests + def test_shutdown(self): + m = manager.Manager(['test']) + m.stop_was_called = False + + def mock_stop(*args, **kwargs): + m.stop_was_called = True + expected = {'graceful': True} + self.assertEquals(kwargs, expected) + return 0 + m.stop = mock_stop + status = m.shutdown() + self.assertEquals(status, 0) + self.assertEquals(m.stop_was_called, True) + + def test_restart(self): + m = manager.Manager(['test']) + m.stop_was_called = False + + def mock_stop(*args, **kwargs): + m.stop_was_called = True + return 0 + m.start_was_called = False + + def mock_start(*args, **kwargs): + m.start_was_called = True + return 0 + m.stop = mock_stop + m.start = mock_start + status = m.restart() + self.assertEquals(status, 0) + self.assertEquals(m.stop_was_called, True) + self.assertEquals(m.start_was_called, True) + + def test_reload(self): + class MockManager(): + called = defaultdict(list) + + def __init__(self, servers): + pass + + @classmethod + def reset_called(cls): + cls.called = defaultdict(list) + + def stop(self, **kwargs): + MockManager.called['stop'].append(kwargs) + return 0 + + def start(self, **kwargs): + MockManager.called['start'].append(kwargs) + return 0 + + _orig_manager = manager.Manager + try: + m = _orig_manager(['auth']) + for server in m.servers: + self.assert_(server.server in + manager.GRACEFUL_SHUTDOWN_SERVERS) + manager.Manager = MockManager + status = m.reload() + self.assertEquals(status, 0) + expected = { + 'start': [{'graceful': True}], + 'stop': [{'graceful': True}], + } + self.assertEquals(MockManager.called, expected) + # test force graceful + MockManager.reset_called() + m = _orig_manager(['*-server']) + self.assert_(len(m.servers), 4) + for server in m.servers: + self.assert_(server.server in + manager.GRACEFUL_SHUTDOWN_SERVERS) + manager.Manager = MockManager + status = m.reload(graceful=False) + self.assertEquals(status, 0) + expected = { + 'start': [{'graceful': True}] * 4, + 'stop': [{'graceful': True}] * 4, + } + self.assertEquals(MockManager.called, expected) + + finally: + manager.Manager = _orig_manager + + def test_force_reload(self): + m = manager.Manager(['test']) + m.reload_was_called = False + + def mock_reload(*args, **kwargs): + m.reload_was_called = True + return 0 + m.reload = mock_reload + status = m.force_reload() + self.assertEquals(status, 0) + self.assertEquals(m.reload_was_called, True) + + def test_get_command(self): + m = manager.Manager(['test']) + self.assertEquals(m.start, m.get_command('start')) + self.assertEquals(m.force_reload, m.get_command('force-reload')) + self.assertEquals(m.get_command('force-reload'), + m.get_command('force_reload')) + self.assertRaises(manager.UnknownCommandError, m.get_command, + 'no_command') + self.assertRaises(manager.UnknownCommandError, m.get_command, + '__init__') + + def test_list_commands(self): + for cmd, help in manager.Manager.list_commands(): + method = getattr(manager.Manager, cmd.replace('-', '_'), None) + self.assert_(method, '%s is not a command' % cmd) + self.assert_(getattr(method, 'publicly_accessible', False)) + self.assertEquals(method.__doc__.strip(), help) + + def test_run_command(self): + m = manager.Manager(['test']) + m.cmd_was_called = False + + def mock_cmd(*args, **kwargs): + m.cmd_was_called = True + expected = {'kw1': True, 'kw2': False} + self.assertEquals(kwargs, expected) + return 0 + mock_cmd.publicly_accessible = True + m.mock_cmd = mock_cmd + kwargs = {'kw1': True, 'kw2': False} + status = m.run_command('mock_cmd', **kwargs) + self.assertEquals(status, 0) + self.assertEquals(m.cmd_was_called, True) + +if __name__ == '__main__': + unittest.main() diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 94257415f1..9dd375590e 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -16,9 +16,11 @@ """ Tests for swift.common.utils """ from __future__ import with_statement +from test.unit import temptree import logging import mimetools import os +import errno import socket import sys import time @@ -31,6 +33,8 @@ from tempfile import NamedTemporaryFile from eventlet import sleep +from swift.common.exceptions import TimeoutError, MessageTimeout, \ + ConnectionTimeout from swift.common import utils @@ -76,6 +80,17 @@ class MockSys(): __stderr__ = sys.__stderr__ +def reset_loggers(): + if hasattr(utils.get_logger, 'handler4logger'): + for logger, handler in utils.get_logger.handler4logger.items(): + logger.removeHandler(handler) + delattr(utils.get_logger, 'handler4logger') + if hasattr(utils.get_logger, 'console_handler4logger'): + for logger, h in utils.get_logger.console_handler4logger.items(): + logger.removeHandler(h) + delattr(utils.get_logger, 'console_handler4logger') + + class TestUtils(unittest.TestCase): """ Tests for swift.common.utils """ @@ -287,25 +302,154 @@ Error: unable to locate %s def test_get_logger(self): sio = StringIO() - logger = logging.getLogger() + logger = logging.getLogger('server') logger.addHandler(logging.StreamHandler(sio)) - logger = utils.get_logger(None, 'server') + logger = utils.get_logger(None, 'server', log_route='server') logger.warn('test1') self.assertEquals(sio.getvalue(), 'test1\n') logger.debug('test2') self.assertEquals(sio.getvalue(), 'test1\n') - logger = utils.get_logger({'log_level': 'DEBUG'}, 'server') + logger = utils.get_logger({'log_level': 'DEBUG'}, 'server', + log_route='server') logger.debug('test3') self.assertEquals(sio.getvalue(), 'test1\ntest3\n') # Doesn't really test that the log facility is truly being used all the # way to syslog; but exercises the code. - logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server') + logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server', + log_route='server') logger.warn('test4') self.assertEquals(sio.getvalue(), 'test1\ntest3\ntest4\n') + # make sure debug doesn't log by default logger.debug('test5') self.assertEquals(sio.getvalue(), 'test1\ntest3\ntest4\n') + # make sure notice lvl logs by default + logger.notice('test6') + + def test_clean_logger_exception(self): + # setup stream logging + sio = StringIO() + logger = utils.get_logger(None) + handler = logging.StreamHandler(sio) + logger.logger.addHandler(handler) + + def strip_value(sio): + v = sio.getvalue() + sio.truncate(0) + return v + + def log_exception(exc): + try: + raise exc + except (Exception, TimeoutError): + logger.exception('blah') + try: + # establish base case + self.assertEquals(strip_value(sio), '') + logger.info('test') + self.assertEquals(strip_value(sio), 'test\n') + self.assertEquals(strip_value(sio), '') + logger.info('test') + logger.info('test') + self.assertEquals(strip_value(sio), 'test\ntest\n') + self.assertEquals(strip_value(sio), '') + + # test OSError + for en in (errno.EIO, errno.ENOSPC): + log_exception(OSError(en, 'my %s error message' % en)) + log_msg = strip_value(sio) + self.assert_('Traceback' not in log_msg) + self.assert_('my %s error message' % en in log_msg) + # unfiltered + log_exception(OSError()) + self.assert_('Traceback' in strip_value(sio)) + + # test socket.error + log_exception(socket.error(errno.ECONNREFUSED, + 'my error message')) + log_msg = strip_value(sio) + self.assert_('Traceback' not in log_msg) + self.assert_('errno.ECONNREFUSED message test' not in log_msg) + self.assert_('Connection refused' in log_msg) + log_exception(socket.error(errno.EHOSTUNREACH, + 'my error message')) + log_msg = strip_value(sio) + self.assert_('Traceback' not in log_msg) + self.assert_('my error message' not in log_msg) + self.assert_('Host unreachable' in log_msg) + log_exception(socket.error(errno.ETIMEDOUT, 'my error message')) + log_msg = strip_value(sio) + self.assert_('Traceback' not in log_msg) + self.assert_('my error message' not in log_msg) + self.assert_('Connection timeout' in log_msg) + # unfiltered + log_exception(socket.error(0, 'my error message')) + log_msg = strip_value(sio) + self.assert_('Traceback' in log_msg) + self.assert_('my error message' in log_msg) + + # test eventlet.Timeout + log_exception(ConnectionTimeout(42, 'my error message')) + log_msg = strip_value(sio) + self.assert_('Traceback' not in log_msg) + self.assert_('ConnectionTimeout' in log_msg) + self.assert_('(42s)' in log_msg) + self.assert_('my error message' not in log_msg) + log_exception(MessageTimeout(42, 'my error message')) + log_msg = strip_value(sio) + self.assert_('Traceback' not in log_msg) + self.assert_('MessageTimeout' in log_msg) + self.assert_('(42s)' in log_msg) + self.assert_('my error message' in log_msg) + + # test unhandled + log_exception(Exception('my error message')) + log_msg = strip_value(sio) + self.assert_('Traceback' in log_msg) + self.assert_('my error message' in log_msg) + + finally: + logger.logger.removeHandler(handler) + reset_loggers() + + def test_txn_formatter(self): + # setup stream logging + sio = StringIO() + logger = utils.get_logger(None) + handler = logging.StreamHandler(sio) + handler.setFormatter(utils.TxnFormatter()) + logger.logger.addHandler(handler) + + def strip_value(sio): + v = sio.getvalue() + sio.truncate(0) + return v + + try: + self.assertFalse(logger.txn_id) + logger.error('my error message') + log_msg = strip_value(sio) + self.assert_('my error message' in log_msg) + self.assert_('txn' not in log_msg) + logger.txn_id = '12345' + logger.error('test') + log_msg = strip_value(sio) + self.assert_('txn' in log_msg) + self.assert_('12345' in log_msg) + # test no txn on info message + self.assertEquals(logger.txn_id, '12345') + logger.info('test') + log_msg = strip_value(sio) + self.assert_('txn' not in log_msg) + self.assert_('12345' not in log_msg) + # test txn already in message + self.assertEquals(logger.txn_id, '12345') + logger.warn('test 12345 test') + self.assertEquals(strip_value(sio), 'test 12345 test\n') + finally: + logger.logger.removeHandler(handler) + reset_loggers() def test_storage_directory(self): self.assertEquals(utils.storage_directory('objects', '1', 'ABCDEF'), @@ -391,56 +535,71 @@ log_name = yarr''' logger = utils.get_logger(None, 'dummy') # mock utils system modules - utils.sys = MockSys() - utils.os = MockOs() + _orig_sys = utils.sys + _orig_os = utils.os + try: + utils.sys = MockSys() + utils.os = MockOs() - # basic test - utils.capture_stdio(logger) - self.assert_(utils.sys.excepthook is not None) - self.assertEquals(utils.os.closed_fds, [0, 1, 2]) - self.assert_(utils.sys.stdout is not None) - self.assert_(utils.sys.stderr is not None) + # basic test + utils.capture_stdio(logger) + self.assert_(utils.sys.excepthook is not None) + self.assertEquals(utils.os.closed_fds, [0, 1, 2]) + self.assert_(utils.sys.stdout is not None) + self.assert_(utils.sys.stderr is not None) - # reset; test same args, but exc when trying to close stdio - utils.os = MockOs(raise_funcs=('dup2',)) - utils.sys = MockSys() + # reset; test same args, but exc when trying to close stdio + utils.os = MockOs(raise_funcs=('dup2',)) + utils.sys = MockSys() - # test unable to close stdio - utils.capture_stdio(logger) - self.assert_(utils.sys.excepthook is not None) - self.assertEquals(utils.os.closed_fds, []) - self.assert_(utils.sys.stdout is not None) - self.assert_(utils.sys.stderr is not None) + # test unable to close stdio + utils.capture_stdio(logger) + self.assert_(utils.sys.excepthook is not None) + self.assertEquals(utils.os.closed_fds, []) + self.assert_(utils.sys.stdout is not None) + self.assert_(utils.sys.stderr is not None) - # reset; test some other args - logger = utils.get_logger(None, log_to_console=True) - utils.os = MockOs() - utils.sys = MockSys() + # reset; test some other args + logger = utils.get_logger(None, log_to_console=True) + utils.os = MockOs() + utils.sys = MockSys() - # test console log - utils.capture_stdio(logger, capture_stdout=False, - capture_stderr=False) - self.assert_(utils.sys.excepthook is not None) - # when logging to console, stderr remains open - self.assertEquals(utils.os.closed_fds, [0, 1]) - logger.logger.removeHandler(utils.get_logger.console) - # stdio not captured - self.assertFalse(hasattr(utils.sys, 'stdout')) - self.assertFalse(hasattr(utils.sys, 'stderr')) + # test console log + utils.capture_stdio(logger, capture_stdout=False, + capture_stderr=False) + self.assert_(utils.sys.excepthook is not None) + # when logging to console, stderr remains open + self.assertEquals(utils.os.closed_fds, [0, 1]) + reset_loggers() + + # stdio not captured + self.assertFalse(hasattr(utils.sys, 'stdout')) + self.assertFalse(hasattr(utils.sys, 'stderr')) + reset_loggers() + finally: + utils.sys = _orig_sys + utils.os = _orig_os def test_get_logger_console(self): - reload(utils) # reset get_logger attrs + reset_loggers() logger = utils.get_logger(None) - self.assertFalse(hasattr(utils.get_logger, 'console')) + console_handlers = [h for h in logger.logger.handlers if + isinstance(h, logging.StreamHandler)] + self.assertFalse(console_handlers) logger = utils.get_logger(None, log_to_console=True) - self.assert_(hasattr(utils.get_logger, 'console')) - self.assert_(isinstance(utils.get_logger.console, - logging.StreamHandler)) + console_handlers = [h for h in logger.logger.handlers if + isinstance(h, logging.StreamHandler)] + self.assert_(console_handlers) # make sure you can't have two console handlers - old_handler = utils.get_logger.console + self.assertEquals(len(console_handlers), 1) + old_handler = console_handlers[0] logger = utils.get_logger(None, log_to_console=True) - self.assertNotEquals(utils.get_logger.console, old_handler) - logger.logger.removeHandler(utils.get_logger.console) + console_handlers = [h for h in logger.logger.handlers if + isinstance(h, logging.StreamHandler)] + self.assertEquals(len(console_handlers), 1) + new_handler = console_handlers[0] + self.assertNotEquals(new_handler, old_handler) + reset_loggers() def test_ratelimit_sleep(self): running_time = 0 @@ -468,6 +627,28 @@ log_name = yarr''' total += i self.assertTrue(abs(50 - (time.time() - start) * 100) < 10) + def test_urlparse(self): + parsed = utils.urlparse('http://127.0.0.1/') + self.assertEquals(parsed.scheme, 'http') + self.assertEquals(parsed.hostname, '127.0.0.1') + self.assertEquals(parsed.path, '/') + + parsed = utils.urlparse('http://127.0.0.1:8080/') + self.assertEquals(parsed.port, 8080) + + parsed = utils.urlparse('https://127.0.0.1/') + self.assertEquals(parsed.scheme, 'https') + + parsed = utils.urlparse('http://[::1]/') + self.assertEquals(parsed.hostname, '::1') + + parsed = utils.urlparse('http://[::1]:8080/') + self.assertEquals(parsed.hostname, '::1') + self.assertEquals(parsed.port, 8080) + + parsed = utils.urlparse('www.example.com') + self.assertEquals(parsed.hostname, '') + def test_ratelimit_sleep_with_sleep(self): running_time = 0 start = time.time() @@ -480,5 +661,86 @@ log_name = yarr''' self.assertTrue(abs(100 - (time.time() - start) * 100) < 10) + def test_search_tree(self): + # file match & ext miss + with temptree(['asdf.conf', 'blarg.conf', 'asdf.cfg']) as t: + asdf = utils.search_tree(t, 'a*', '.conf') + self.assertEquals(len(asdf), 1) + self.assertEquals(asdf[0], + os.path.join(t, 'asdf.conf')) + + # multi-file match & glob miss & sort + with temptree(['application.bin', 'apple.bin', 'apropos.bin']) as t: + app_bins = utils.search_tree(t, 'app*', 'bin') + self.assertEquals(len(app_bins), 2) + self.assertEquals(app_bins[0], + os.path.join(t, 'apple.bin')) + self.assertEquals(app_bins[1], + os.path.join(t, 'application.bin')) + + # test file in folder & ext miss & glob miss + files = ( + 'sub/file1.ini', + 'sub/file2.conf', + 'sub.bin', + 'bus.ini', + 'bus/file3.ini', + ) + with temptree(files) as t: + sub_ini = utils.search_tree(t, 'sub*', '.ini') + self.assertEquals(len(sub_ini), 1) + self.assertEquals(sub_ini[0], + os.path.join(t, 'sub/file1.ini')) + + # test multi-file in folder & sub-folder & ext miss & glob miss + files = ( + 'folder_file.txt', + 'folder/1.txt', + 'folder/sub/2.txt', + 'folder2/3.txt', + 'Folder3/4.txt' + 'folder.rc', + ) + with temptree(files) as t: + folder_texts = utils.search_tree(t, 'folder*', '.txt') + self.assertEquals(len(folder_texts), 4) + f1 = os.path.join(t, 'folder_file.txt') + f2 = os.path.join(t, 'folder/1.txt') + f3 = os.path.join(t, 'folder/sub/2.txt') + f4 = os.path.join(t, 'folder2/3.txt') + for f in [f1, f2, f3, f4]: + self.assert_(f in folder_texts) + + def test_write_file(self): + with temptree([]) as t: + file_name = os.path.join(t, 'test') + utils.write_file(file_name, 'test') + with open(file_name, 'r') as f: + contents = f.read() + self.assertEquals(contents, 'test') + # and also subdirs + file_name = os.path.join(t, 'subdir/test2') + utils.write_file(file_name, 'test2') + with open(file_name, 'r') as f: + contents = f.read() + self.assertEquals(contents, 'test2') + # but can't over-write files + file_name = os.path.join(t, 'subdir/test2/test3') + self.assertRaises(IOError, utils.write_file, file_name, + 'test3') + + def test_remove_file(self): + with temptree([]) as t: + file_name = os.path.join(t, 'blah.pid') + # assert no raise + self.assertEquals(os.path.exists(file_name), False) + self.assertEquals(utils.remove_file(file_name), None) + with open(file_name, 'w') as f: + f.write('1') + self.assert_(os.path.exists(file_name)) + self.assertEquals(utils.remove_file(file_name), None) + self.assertFalse(os.path.exists(file_name)) + + if __name__ == '__main__': unittest.main() diff --git a/test/unit/obj/test_auditor.py b/test/unit/obj/test_auditor.py index 66540a3693..14d58480dd 100644 --- a/test/unit/obj/test_auditor.py +++ b/test/unit/obj/test_auditor.py @@ -14,7 +14,7 @@ # limitations under the License. # TODO: Tests -from test import unit as _setup_mocks +from test import unit import unittest import tempfile import os @@ -57,6 +57,7 @@ class TestAuditor(unittest.TestCase): def tearDown(self): rmtree(os.path.dirname(self.testdir), ignore_errors=1) + unit.xattr_data = {} def test_object_audit_extra_data(self): self.auditor = auditor.ObjectAuditor(self.conf) diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index fda3281ab9..babcf99eca 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -16,6 +16,7 @@ from __future__ import with_statement import cPickle as pickle import logging +from logging.handlers import SysLogHandler import os import sys import unittest @@ -468,8 +469,138 @@ class TestController(unittest.TestCase): test(404, 507, 503) test(503, 503, 503) + class TestProxyServer(unittest.TestCase): + def test_access_log(self): + + class MyApp(proxy_server.Application): + + def handle_request(self, req): + resp = Response(request=req) + req.response = resp + req.start_time = time() + return resp + + def start_response(*args): + pass + + class MockLogger(): + + def __init__(self): + self.buffer = StringIO() + + def info(self, msg, args=None): + if args: + msg = msg % args + self.buffer.write(msg) + + def strip_value(self): + rv = self.buffer.getvalue() + self.buffer.truncate(0) + return rv + + class SnarfStream(object): + # i can't seem to subclass cStringIO + + def __init__(self, *args, **kwargs): + self.sio = StringIO() + + def strip_value(self): + rv = self.getvalue().strip() + self.truncate(0) + return rv + + def __getattr__(self, name): + try: + return object.__getattr__(self, name) + except AttributeError: + try: + return getattr(self.sio, name) + except AttributeError: + return self.__getattribute__(name) + + snarf = SnarfStream() + _orig_get_logger = proxy_server.get_logger + + def mock_get_logger(*args, **kwargs): + if kwargs.get('log_route') != 'proxy-access': + return _orig_get_logger(*args, **kwargs) + kwargs['log_route'] = 'snarf' + logger = _orig_get_logger(*args, **kwargs) + if [h for h in logger.logger.handlers if + isinstance(h, logging.StreamHandler) and h.stream is snarf]: + # snarf handler already setup! + return logger + formatter = logger.logger.handlers[0].formatter + formatter._fmt += ' %(levelname)s' + snarf_handler = logging.StreamHandler(snarf) + snarf_handler.setFormatter(formatter) + logger.logger.addHandler(snarf_handler) + return logger + + def test_conf(conf): + app = MyApp(conf, memcache=FakeMemcache(), account_ring=FakeRing(), + container_ring=FakeRing(), object_ring=FakeRing()) + req = Request.blank('') + app(req.environ, start_response) + + try: + proxy_server.get_logger = mock_get_logger + test_conf({}) + line = snarf.strip_value() + print line + self.assert_(line.startswith('swift')) + self.assert_(line.endswith('INFO')) + test_conf({'log_name': 'snarf-test'}) + line = snarf.strip_value() + print line + self.assert_(line.startswith('snarf-test')) + self.assert_(line.endswith('INFO')) + test_conf({'log_name': 'snarf-test', 'log_level': 'ERROR'}) + line = snarf.strip_value() + print line + self.assertFalse(line) + test_conf({'log_name': 'snarf-test', 'log_level': 'ERROR', + 'access_log_name': 'access-test', + 'access_log_level': 'INFO'}) + line = snarf.strip_value() + print line + self.assert_(line.startswith('access-test')) + self.assert_(line.endswith('INFO')) + + # test facility + def get_facility(logger): + h = [h for h in logger.logger.handlers if + isinstance(h, SysLogHandler)][0] + return h.facility + + conf = {'log_facility': 'LOG_LOCAL0'} + app = MyApp(conf, memcache=FakeMemcache(), account_ring=FakeRing(), + container_ring=FakeRing(), object_ring=FakeRing()) + self.assertEquals(get_facility(app.logger), + SysLogHandler.LOG_LOCAL0) + self.assertEquals(get_facility(app.access_logger), + SysLogHandler.LOG_LOCAL0) + conf = {'log_facility': 'LOG_LOCAL0', + 'access_log_facility': 'LOG_LOCAL1'} + app = MyApp(conf, memcache=FakeMemcache(), account_ring=FakeRing(), + container_ring=FakeRing(), object_ring=FakeRing()) + self.assertEquals(get_facility(app.logger), + SysLogHandler.LOG_LOCAL0) + self.assertEquals(get_facility(app.access_logger), + SysLogHandler.LOG_LOCAL1) + conf = {'access_log_facility': 'LOG_LOCAL1'} + app = MyApp(conf, memcache=FakeMemcache(), account_ring=FakeRing(), + container_ring=FakeRing(), object_ring=FakeRing()) + self.assertEquals(get_facility(app.logger), + SysLogHandler.LOG_LOCAL0) + self.assertEquals(get_facility(app.access_logger), + SysLogHandler.LOG_LOCAL1) + + finally: + proxy_server.get_logger = _orig_get_logger + def test_unhandled_exception(self): class MyApp(proxy_server.Application): @@ -1808,8 +1939,8 @@ class TestObjectController(unittest.TestCase): def info(self, msg): self.msg = msg - orig_logger = prosrv.logger - prosrv.logger = Logger() + orig_logger, orig_access_logger = prosrv.logger, prosrv.access_logger + prosrv.logger = prosrv.access_logger = Logger() sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() fd.write( @@ -1825,11 +1956,9 @@ class TestObjectController(unittest.TestCase): prosrv.logger.msg) exp = 'host1' self.assertEquals(prosrv.logger.msg[:len(exp)], exp) - prosrv.logger = orig_logger # Turn on header logging. - orig_logger = prosrv.logger - prosrv.logger = Logger() + prosrv.logger = prosrv.access_logger = Logger() prosrv.log_headers = True sock = connect_tcp(('localhost', prolis.getsockname()[1])) fd = sock.makefile() @@ -1843,7 +1972,7 @@ class TestObjectController(unittest.TestCase): self.assert_('Goofy-Header%3A%20True' in prosrv.logger.msg, prosrv.logger.msg) prosrv.log_headers = False - prosrv.logger = orig_logger + prosrv.logger, prosrv.access_logger = orig_logger, orig_access_logger def test_chunked_put_utf8_all_the_way_down(self): # Test UTF-8 Unicode all the way through the system diff --git a/test/unit/stats/test_log_uploader.py b/test/unit/stats/test_log_uploader.py index 3585111750..b82e0ce02c 100644 --- a/test/unit/stats/test_log_uploader.py +++ b/test/unit/stats/test_log_uploader.py @@ -13,16 +13,154 @@ # See the License for the specific language governing permissions and # limitations under the License. -# TODO: Tests +# TODO: More tests import unittest +import os +from datetime import datetime +from tempfile import mkdtemp +from shutil import rmtree + from swift.stats import log_uploader +import logging +logging.basicConfig(level=logging.DEBUG) +LOGGER = logging.getLogger() + +DEFAULT_GLOB = '%Y%m%d%H' + class TestLogUploader(unittest.TestCase): - def test_placeholder(self): - pass + def test_upload_all_logs(self): + + class MockInternalProxy(): + + def create_container(self, *args, **kwargs): + pass + + class MonkeyLogUploader(log_uploader.LogUploader): + + def __init__(self, conf, logger=LOGGER): + self.log_dir = conf['log_dir'] + self.filename_format = conf.get('filename_format', + DEFAULT_GLOB) + self.new_log_cutoff = 0 + self.logger = logger + self.internal_proxy = MockInternalProxy() + self.swift_account = '' + self.container_name = '' + + self.uploaded_files = [] + + def upload_one_log(self, filename, year, month, day, hour): + d = {'year': year, 'month': month, 'day': day, 'hour': hour} + self.uploaded_files.append((filename, d)) + + tmpdir = mkdtemp() + try: + today = datetime.now() + year = today.year + month = today.month + day = today.day + + today_str = today.strftime('%Y%m%d') + time_strs = [] + for i in range(24): + time_strs.append('%s%0.2d' % (today_str, i)) + for ts in time_strs: + open(os.path.join(tmpdir, ts), 'w').close() + + conf = {'log_dir': tmpdir} + uploader = MonkeyLogUploader(conf) + uploader.upload_all_logs() + self.assertEquals(len(uploader.uploaded_files), 24) + for i, file_date in enumerate(sorted(uploader.uploaded_files)): + d = {'year': year, 'month': month, 'day': day, 'hour': i} + for k, v in d.items(): + d[k] = '%0.2d' % v + expected = (os.path.join(tmpdir, '%s%0.2d' % + (today_str, i)), d) + self.assertEquals(file_date, expected) + finally: + rmtree(tmpdir) + + tmpdir = mkdtemp() + try: + today = datetime.now() + year = today.year + month = today.month + day = today.day + + today_str = today.strftime('%Y%m%d') + time_strs = [] + for i in range(24): + time_strs.append('%s-%0.2d00' % (today_str, i)) + for ts in time_strs: + open(os.path.join(tmpdir, 'swift-blah_98764.%s-2400.tar.gz' % + ts), 'w').close() + + open(os.path.join(tmpdir, 'swift.blah_98764.%s-2400.tar.gz' % ts), + 'w').close() + open(os.path.join(tmpdir, 'swift-blah_98764.%s-2400.tar.g' % ts), + 'w').close() + open(os.path.join(tmpdir, + 'swift-blah_201102160100.%s-2400.tar.gz' % + '201102160100'), 'w').close() + + conf = { + 'log_dir': '%s/' % tmpdir, + 'filename_format': 'swift-blah_98764.%Y%m%d-%H*.tar.gz', + } + uploader = MonkeyLogUploader(conf) + uploader.upload_all_logs() + self.assertEquals(len(uploader.uploaded_files), 24) + for i, file_date in enumerate(sorted(uploader.uploaded_files)): + filename, date_dict = file_date + filename = os.path.basename(filename) + self.assert_(today_str in filename, filename) + self.assert_(filename.startswith('swift'), filename) + self.assert_(filename.endswith('tar.gz'), filename) + d = {'year': year, 'month': month, 'day': day, 'hour': i} + for k, v in d.items(): + d[k] = '%0.2d' % v + self.assertEquals(d, date_dict) + finally: + rmtree(tmpdir) + + tmpdir = mkdtemp() + try: + today = datetime.now() + year = today.year + month = today.month + day = today.day + + today_str = today.strftime('%Y%m%d') + time_strs = [] + for i in range(24): + time_strs.append('%s%0.2d' % (today_str, i)) + for i, ts in enumerate(time_strs): + open(os.path.join(tmpdir, '%s.%s.log' % (i, ts)), 'w').close() + + conf = { + 'log_dir': tmpdir, + 'filename_format': '*.%Y%m%d%H.log', + } + uploader = MonkeyLogUploader(conf) + uploader.upload_all_logs() + self.assertEquals(len(uploader.uploaded_files), 24) + for i, file_date in enumerate(sorted(uploader.uploaded_files)): + d = {'year': year, 'month': month, 'day': day, 'hour': i} + for k, v in d.items(): + d[k] = '%0.2d' % v + expected = (os.path.join(tmpdir, '%s.%s%0.2d.log' % + (i, today_str, i)), d) + # TODO: support wildcards before the date pattern + # (i.e. relative offsets) + #print file_date + #self.assertEquals(file_date, expected) + finally: + rmtree(tmpdir) if __name__ == '__main__':