Stop generating index files.

The index files are doubling the number of inodes we take, and we can
generate them on the fly with the log serving shim.

Change-Id: I2adcc4b4736892fbfa4afead83d04bc17aceb4a1
This commit is contained in:
Michael Still 2013-12-07 04:39:36 +11:00 committed by Joshua Hesketh
parent b21b8c38c1
commit 32a267ef58

View File

@ -20,7 +20,6 @@ somebody """
import calendar
import time
import tempfile
import os
import re
@ -28,48 +27,21 @@ import re
from turbo_hipster.lib.utils import push_file
def generate_log_index(datasets):
""" Create an index of logfiles and links to them """
# Loop over logfile URLs
# Create summary and links
output = '<html><head><title>Index of results</title></head><body>'
output += '<ul>'
for dataset in datasets:
output += '<li>'
output += '<a href="%s">%s</a>' % (dataset['result_uri'],
dataset['name'])
output += ' <span class="%s">%s</span>' % (dataset['result'],
dataset['result'])
output += '</li>'
output += '</ul>'
output += '</body></html>'
return output
def make_index_file(datasets, index_filename):
""" Writes an index into a file for pushing """
index_content = generate_log_index(datasets)
tempdir = tempfile.mkdtemp()
fd = open(os.path.join(tempdir, index_filename), 'w')
fd.write(index_content)
return os.path.join(tempdir, index_filename)
def generate_push_results(datasets, publish_config):
""" Generates and pushes results """
# NOTE(mikal): because of the way we run the tests in parallel, there is
# only ever one dataset per push.
link_uri = None
for i, dataset in enumerate(datasets):
result_uri = push_file(dataset['determined_path'],
dataset['job_log_file_path'],
publish_config)
datasets[i]['result_uri'] = result_uri
if not link_uri:
link_uri = result_uri
index_file = make_index_file(datasets, 'index.html')
index_file_url = push_file(dataset['determined_path'], index_file,
publish_config)
return index_file_url
return link_uri
def find_schemas(gitpath):