Tests: more graceful handling of keyboard interrupting benchmarks
Leave current test result unchanged and stop executing immediately, so it can be continued.
This commit is contained in:
parent
eb96f0cf06
commit
42215d7cb8
|
@ -98,6 +98,8 @@ class TestEnvironment:
|
||||||
try:
|
try:
|
||||||
self.call([self.cmake_executable, '.'] + self.cmake_options, self.build_dir)
|
self.call([self.cmake_executable, '.'] + self.cmake_options, self.build_dir)
|
||||||
self.call([self.cmake_executable, '--build', '.', '-j', jobs, '--target', 'install'], self.build_dir)
|
self.call([self.cmake_executable, '--build', '.', '-j', jobs, '--target', 'install'], self.build_dir)
|
||||||
|
except KeyboardInterrupt as e:
|
||||||
|
raise e
|
||||||
except:
|
except:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -193,17 +195,13 @@ class TestEnvironment:
|
||||||
lines.append(line_str)
|
lines.append(line_str)
|
||||||
if f:
|
if f:
|
||||||
f.write(line_str)
|
f.write(line_str)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt as e:
|
||||||
# Avoid processes that keep running when interrupting.
|
# Avoid processes that keep running when interrupting.
|
||||||
proc.terminate()
|
proc.terminate()
|
||||||
|
raise e
|
||||||
|
|
||||||
if f:
|
# Raise error on failure
|
||||||
f.close()
|
|
||||||
|
|
||||||
# Print command output on error
|
|
||||||
if proc.returncode != 0 and not silent:
|
if proc.returncode != 0 and not silent:
|
||||||
for line in lines:
|
|
||||||
print(line.rstrip())
|
|
||||||
raise Exception("Error executing command")
|
raise Exception("Error executing command")
|
||||||
|
|
||||||
return lines
|
return lines
|
||||||
|
|
|
@ -42,7 +42,7 @@ class TestGraph:
|
||||||
|
|
||||||
# Generate one graph for every device x category x result key combination.
|
# Generate one graph for every device x category x result key combination.
|
||||||
for category, category_entries in categories.items():
|
for category, category_entries in categories.items():
|
||||||
entries = sorted(category_entries, key=lambda entry: (entry.revision, entry.test, entry.date))
|
entries = sorted(category_entries, key=lambda entry: (entry.date, entry.revision, entry.test))
|
||||||
|
|
||||||
outputs = set()
|
outputs = set()
|
||||||
for entry in entries:
|
for entry in entries:
|
||||||
|
|
|
@ -141,6 +141,8 @@ def run_entry(env: api.TestEnvironment,
|
||||||
if not entry.output:
|
if not entry.output:
|
||||||
raise Exception("Test produced no output")
|
raise Exception("Test produced no output")
|
||||||
entry.status = 'done'
|
entry.status = 'done'
|
||||||
|
except KeyboardInterrupt as e:
|
||||||
|
raise e
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
entry.status = 'failed'
|
entry.status = 'failed'
|
||||||
entry.error_msg = str(e)
|
entry.error_msg = str(e)
|
||||||
|
@ -236,17 +238,26 @@ def cmd_run(env: api.TestEnvironment, argv: List, update_only: bool):
|
||||||
configs = env.get_configs(args.config)
|
configs = env.get_configs(args.config)
|
||||||
for config in configs:
|
for config in configs:
|
||||||
updated = False
|
updated = False
|
||||||
|
cancel = False
|
||||||
print_header(config)
|
print_header(config)
|
||||||
for row in config.queue.rows(use_revision_columns(config)):
|
for row in config.queue.rows(use_revision_columns(config)):
|
||||||
if match_entry(row[0], args):
|
if match_entry(row[0], args):
|
||||||
for entry in row:
|
for entry in row:
|
||||||
if run_entry(env, config, row, entry, update_only):
|
try:
|
||||||
updated = True
|
if run_entry(env, config, row, entry, update_only):
|
||||||
# Write queue every time in case running gets interrupted,
|
updated = True
|
||||||
# so it can be resumed.
|
# Write queue every time in case running gets interrupted,
|
||||||
config.queue.write()
|
# so it can be resumed.
|
||||||
|
config.queue.write()
|
||||||
|
except KeyboardInterrupt as e:
|
||||||
|
cancel = True
|
||||||
|
break
|
||||||
|
|
||||||
print_row(config, row)
|
print_row(config, row)
|
||||||
|
|
||||||
|
if cancel:
|
||||||
|
break
|
||||||
|
|
||||||
if updated:
|
if updated:
|
||||||
# Generate graph if test were run.
|
# Generate graph if test were run.
|
||||||
json_filepath = config.base_dir / "results.json"
|
json_filepath = config.base_dir / "results.json"
|
||||||
|
|
Loading…
Reference in New Issue