Check the return code for the test script.

Rework the error reporting from the dataset log analysis to take
into account global failures as well, and only report success if
both global success and dataset success have been achieved.

Fixes bug: 1263828

Change-Id: I53aa5ab949293a02f93f9981c0f98246a77d825f
This commit is contained in:
Michael Still 2013-12-24 17:47:59 +11:00
parent 61daba46bb
commit 5231d4c50b
2 changed files with 25 additions and 9 deletions

View File

@ -184,6 +184,7 @@ def execute_to_log(cmd, logfile, timeout=-1,
logger.removeHandler(log_handler) logger.removeHandler(log_handler)
log_handler.flush() log_handler.flush()
log_handler.close() log_handler.close()
return p.returncode
def push_file(job_log_dir, file_path, publish_config): def push_file(job_log_dir, file_path, publish_config):

View File

@ -66,6 +66,9 @@ class Runner(object):
def start_job(self, job): def start_job(self, job):
self.job = job self.job = job
self.success = True
self.messages = []
if self.job is not None: if self.job is not None:
try: try:
self.job_arguments = \ self.job_arguments = \
@ -88,7 +91,10 @@ class Runner(object):
# Step 3: Run migrations on datasets # Step 3: Run migrations on datasets
self._do_next_step() self._do_next_step()
self._execute_migrations() if self._execute_migrations() > 0:
self.success = False
self.messages.append('Return code from test script was '
'non-zero')
# Step 4: Analyse logs for errors # Step 4: Analyse logs for errors
self._do_next_step() self._do_next_step()
@ -123,22 +129,30 @@ class Runner(object):
def _check_all_dataset_logs_for_errors(self): def _check_all_dataset_logs_for_errors(self):
self.log.debug("Check logs for errors") self.log.debug("Check logs for errors")
success = True
messages = []
for i, dataset in enumerate(self.job_datasets): for i, dataset in enumerate(self.job_datasets):
# Look for the beginning of the migration start # Look for the beginning of the migration start
dataset_success, message = \ dataset_success, message = \
handle_results.check_log_for_errors( handle_results.check_log_for_errors(
dataset['job_log_file_path'], self.git_path, dataset['job_log_file_path'], self.git_path,
dataset['config']) dataset['config'])
self.job_datasets[i]['result'] = message
messages.append(message)
success = False if not dataset_success else success
if success: if self.success:
if dataset_success:
self.job_datasets[i]['result'] = 'SUCCESS'
else:
self.success = False
self.job_datasets[i]['result'] = message
self.messages.append(message)
else:
self.job_datasets[i]['result'] = self.messages[0]
if not dataset_success:
self.messages.append(message)
if self.success:
self.work_data['result'] = "SUCCESS" self.work_data['result'] = "SUCCESS"
else: else:
self.work_data['result'] = "\n".join(messages) self.work_data['result'] = "\n".join(self.messages)
def _get_datasets(self): def _get_datasets(self):
self.log.debug("Get configured datasets to run tests against") self.log.debug("Get configured datasets to run tests against")
@ -255,7 +269,7 @@ class Runner(object):
if 'sqlerr' in self.global_config['logs']: if 'sqlerr' in self.global_config['logs']:
sqlerr = self.global_config['logs']['sqlerr'] sqlerr = self.global_config['logs']['sqlerr']
utils.execute_to_log( rc = utils.execute_to_log(
cmd, cmd,
dataset['job_log_file_path'], dataset['job_log_file_path'],
watch_logs=[ watch_logs=[
@ -264,6 +278,7 @@ class Runner(object):
('[sqlerr]', sqlerr) ('[sqlerr]', sqlerr)
], ],
) )
return rc
def _grab_patchset(self, project_name, zuul_ref): def _grab_patchset(self, project_name, zuul_ref):
""" Checkout the reference into config['git_working_dir'] """ """ Checkout the reference into config['git_working_dir'] """