Tests: updates for performance benchmarking

* Make "run" command (re-)run all tests, add "update" command to only
  run queued and outdated tests equivalent to the old "run" command.
* Support specifying environment variables for revisions, to easily
  compare multiple parameter values.
* Better sorting of revisions in graph.
This commit is contained in:
Brecht Van Lommel 2021-09-08 15:56:50 +02:00
parent 6bc6ffc35c
commit 6fc94d1848
4 changed files with 47 additions and 17 deletions

View File

@ -25,6 +25,7 @@ class TestEntry:
category: str = ''
revision: str = ''
git_hash: str = ''
environment: Dict = field(default_factory=dict)
executable: str = ''
date: int = 0
device_type: str = 'CPU'
@ -191,9 +192,10 @@ class TestConfig:
# Get entries for specified commits, tags and branches.
for revision_name, revision_commit in self.revisions.items():
revision_commit, environment = self._split_environment_variables(revision_commit)
git_hash = env.resolve_git_hash(revision_commit)
date = env.git_hash_date(git_hash)
entries += self._get_entries(revision_name, git_hash, '', date)
entries += self._get_entries(revision_name, git_hash, '', environment, date)
# Optimization to avoid rebuilds.
revisions_to_build = set()
@ -204,6 +206,7 @@ class TestConfig:
# Get entries for revisions based on existing builds.
for revision_name, executable in self.builds.items():
executable, environment = self._split_environment_variables(executable)
executable_path = env._blender_executable_from_path(pathlib.Path(executable))
if not executable_path:
sys.stderr.write(f'Error: build {executable} not found\n')
@ -214,7 +217,7 @@ class TestConfig:
env.set_default_blender_executable()
mtime = executable_path.stat().st_mtime
entries += self._get_entries(revision_name, git_hash, executable, mtime)
entries += self._get_entries(revision_name, git_hash, executable, environment, mtime)
# Detect number of categories for more compact printing.
categories = set()
@ -229,6 +232,7 @@ class TestConfig:
revision_name: str,
git_hash: str,
executable: pathlib.Path,
environment: str,
date: int) -> None:
entries = []
for test in self.tests.tests:
@ -241,10 +245,12 @@ class TestConfig:
# Test if revision hash or executable changed.
if entry.git_hash != git_hash or \
entry.executable != executable or \
entry.environment != environment or \
entry.benchmark_type != self.benchmark_type or \
entry.date != date:
# Update existing entry.
entry.git_hash = git_hash
entry.environment = environment
entry.executable = executable
entry.benchmark_type = self.benchmark_type
entry.date = date
@ -256,6 +262,7 @@ class TestConfig:
revision=revision_name,
git_hash=git_hash,
executable=executable,
environment=environment,
date=date,
test=test_name,
category=test_category,
@ -266,3 +273,9 @@ class TestConfig:
entries.append(entry)
return entries
def _split_environment_variables(self, revision):
if isinstance(revision, str):
return revision, {}
else:
return revision[0], revision[1]

View File

@ -104,9 +104,10 @@ class TestEnvironment:
self._init_default_blender_executable()
return True
def set_blender_executable(self, executable_path: pathlib.Path) -> None:
def set_blender_executable(self, executable_path: pathlib.Path, environment: Dict = {}) -> None:
# Run all Blender commands with this executable.
self.blender_executable = executable_path
self.blender_executable_environment = environment
def _blender_executable_name(self) -> pathlib.Path:
if platform.system() == "Windows":
@ -150,6 +151,7 @@ class TestEnvironment:
def set_default_blender_executable(self) -> None:
self.blender_executable = self.default_blender_executable
self.blender_executable_environment = {}
def set_log_file(self, filepath: pathlib.Path, clear=True) -> None:
# Log all commands and output to this file.
@ -161,7 +163,7 @@ class TestEnvironment:
def unset_log_file(self) -> None:
self.log_file = None
def call(self, args: List[str], cwd: pathlib.Path, silent=False) -> List[str]:
def call(self, args: List[str], cwd: pathlib.Path, silent: bool=False, environment: Dict={}) -> List[str]:
# Execute command with arguments in specified directory,
# and return combined stdout and stderr output.
@ -173,7 +175,13 @@ class TestEnvironment:
f = open(self.log_file, 'a')
f.write('\n' + ' '.join([str(arg) for arg in args]) + '\n\n')
proc = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
env = os.environ
if len(environment):
env = env.copy()
for key, value in environment.items():
env[key] = value
proc = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env)
# Read line by line
lines = []
@ -208,7 +216,8 @@ class TestEnvironment:
else:
common_args += ['--background']
return self.call([self.blender_executable] + common_args + args, cwd=self.base_dir)
return self.call([self.blender_executable] + common_args + args, cwd=self.base_dir,
environment=self.blender_executable_environment)
def run_in_blender(self,
function: Callable[[Dict], Dict],

View File

@ -42,7 +42,7 @@ class TestGraph:
# Generate one graph for every device x category x result key combination.
for category, category_entries in categories.items():
entries = sorted(category_entries, key=lambda entry: (entry.revision, entry.test))
entries = sorted(category_entries, key=lambda entry: (entry.revision, entry.test, entry.date))
outputs = set()
for entry in entries:
@ -58,8 +58,6 @@ class TestGraph:
self.json = json.dumps(data, indent=2)
def chart(self, device_name: str, chart_name: str, entries: List, chart_type: str, output: str) -> Dict:
entries = sorted(entries, key=lambda entry: entry.date)
# Gather used tests.
tests = {}
for entry in entries:

View File

@ -83,15 +83,20 @@ def match_entry(entry: api.TestEntry, args: argparse.Namespace):
entry.test.find(args.test) != -1 or \
entry.category.find(args.test) != -1
def run_entry(env: api.TestEnvironment, config: api.TestConfig, row: List, entry: api.TestEntry):
def run_entry(env: api.TestEnvironment,
config: api.TestConfig,
row: List,
entry: api.TestEntry,
update_only: bool):
# Check if entry needs to be run.
if entry.status not in ('queued', 'outdated'):
if update_only and entry.status not in ('queued', 'outdated'):
print_row(config, row, end='\r')
return False
# Run test entry.
revision = entry.revision
git_hash = entry.git_hash
environment = entry.environment
testname = entry.test
testcategory = entry.category
device_type = entry.device_type
@ -116,13 +121,15 @@ def run_entry(env: api.TestEnvironment, config: api.TestConfig, row: List, entry
print_row(config, row, end='\r')
executable_ok = True
if len(entry.executable):
env.set_blender_executable(pathlib.Path(entry.executable))
env.set_blender_executable(pathlib.Path(entry.executable), environment)
else:
env.checkout(git_hash)
executable_ok = env.build()
if not executable_ok:
entry.status = 'failed'
entry.error_msg = 'Failed to build'
else:
env.set_blender_executable(env.blender_executable, environment)
# Run test and update output and status.
if executable_ok:
@ -219,7 +226,7 @@ def cmd_reset(env: api.TestEnvironment, argv: List):
config.queue.write()
def cmd_run(env: api.TestEnvironment, argv: List):
def cmd_run(env: api.TestEnvironment, argv: List, update_only: bool):
# Run tests.
parser = argparse.ArgumentParser()
parser.add_argument('config', nargs='?', default=None)
@ -233,7 +240,7 @@ def cmd_run(env: api.TestEnvironment, argv: List):
for row in config.queue.rows(use_revision_columns(config)):
if match_entry(row[0], args):
for entry in row:
if run_entry(env, config, row, entry):
if run_entry(env, config, row, entry, update_only):
updated = True
# Write queue every time in case running gets interrupted,
# so it can be resumed.
@ -268,8 +275,9 @@ def main():
' \n'
' list List available tests, devices and configurations\n'
' \n'
' run [<config>] [<test>] Execute tests for configuration\n'
' reset [<config>] [<test>] Clear tests results from config, for re-running\n'
' run [<config>] [<test>] Execute all tests in configuration\n'
' update [<config>] [<test>] Execute only queued and outdated tests\n'
' reset [<config>] [<test>] Clear tests results in configuration\n'
' status [<config>] [<test>] List configurations and their tests\n'
' \n'
' graph a.json b.json... -o out.html Create graph from results in JSON files\n')
@ -304,7 +312,9 @@ def main():
if args.command == 'list':
cmd_list(env, argv)
elif args.command == 'run':
cmd_run(env, argv)
cmd_run(env, argv, update_only=False)
elif args.command == 'update':
cmd_run(env, argv, update_only=True)
elif args.command == 'reset':
cmd_reset(env, argv)
elif args.command == 'status':