Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorBrecht Van Lommel <brecht@blender.org>2021-07-20 17:52:22 +0300
committerBrecht Van Lommel <brecht@blender.org>2021-07-22 17:35:00 +0300
commit320f34af86eb4ad4466fb4ef40baa1cf3fd0c617 (patch)
treead4b581ce542056174df011f391793d39a9d885e /tests
parent19b597c55d5d6cd6eb6cecdcff6db3e3fb5525ef (diff)
Tests: continue running benchmarks if some tests fail to build or run
Convenient when testing many revisions where some might be broken.
Diffstat (limited to 'tests')
-rw-r--r--tests/performance/api/config.py4
-rw-r--r--tests/performance/api/environment.py17
-rw-r--r--tests/performance/api/graph.py3
-rwxr-xr-xtests/performance/benchmark25
4 files changed, 40 insertions, 9 deletions
diff --git a/tests/performance/api/config.py b/tests/performance/api/config.py
index 283d1ff16ec..d3a79eede14 100644
--- a/tests/performance/api/config.py
+++ b/tests/performance/api/config.py
@@ -31,6 +31,7 @@ class TestEntry:
device_id: str = 'CPU'
device_name: str = 'Unknown CPU'
status: str = 'queued'
+ error_msg: str = ''
output: Dict = field(default_factory=dict)
benchmark_type: str = 'comparison'
@@ -42,7 +43,8 @@ class TestEntry:
def from_json(self, json_dict):
for field in self.__dataclass_fields__:
- setattr(self, field, json_dict[field])
+ if field in json_dict:
+ setattr(self, field, json_dict[field])
class TestQueue:
diff --git a/tests/performance/api/environment.py b/tests/performance/api/environment.py
index 1a8f5ceab51..3a9b3eaf936 100644
--- a/tests/performance/api/environment.py
+++ b/tests/performance/api/environment.py
@@ -88,16 +88,21 @@ class TestEnvironment:
self.call([self.git_executable, 'reset', '--hard', 'HEAD'], self.blender_dir)
self.call([self.git_executable, 'checkout', '--detach', git_hash], self.blender_dir)
- def build(self) -> None:
+ def build(self) -> bool:
# Build Blender revision
if not self.build_dir.exists():
sys.stderr.write('\n\nError: no build set up, run `./benchmark init --build` first\n')
sys.exit(1)
jobs = str(multiprocessing.cpu_count())
- self.call([self.cmake_executable, '.'] + self.cmake_options, self.build_dir)
- self.call([self.cmake_executable, '--build', '.', '-j', jobs, '--target', 'install'], self.build_dir)
+ try:
+ self.call([self.cmake_executable, '.'] + self.cmake_options, self.build_dir)
+ self.call([self.cmake_executable, '--build', '.', '-j', jobs, '--target', 'install'], self.build_dir)
+ except:
+ return False
+
self._init_default_blender_executable()
+ return True
def set_blender_executable(self, executable_path: pathlib.Path) -> None:
# Run all Blender commands with this executable.
@@ -277,8 +282,10 @@ class TestEnvironment:
def resolve_git_hash(self, revision):
# Get git hash for a tag or branch.
- return self.call([self.git_executable, 'rev-parse', revision], self.blender_git_dir)[0].strip()
+ lines = self.call([self.git_executable, 'rev-parse', revision], self.blender_git_dir)
+ return lines[0].strip() if len(lines) else revision
def git_hash_date(self, git_hash):
# Get commit data for a git hash.
- return int(self.call([self.git_executable, 'log', '-n1', git_hash, '--format=%at'], self.blender_git_dir)[0].strip())
+ lines = self.call([self.git_executable, 'log', '-n1', git_hash, '--format=%at'], self.blender_git_dir)
+ return int(lines[0].strip()) if len(lines) else 0
diff --git a/tests/performance/api/graph.py b/tests/performance/api/graph.py
index b3c8329ff27..4ee5ae7cf0e 100644
--- a/tests/performance/api/graph.py
+++ b/tests/performance/api/graph.py
@@ -30,6 +30,7 @@ class TestGraph:
data = []
for device_name, device_entries in devices.items():
+
# Gather used categories.
categories = {}
for entry in device_entries:
@@ -57,6 +58,8 @@ class TestGraph:
self.json = json.dumps(data, indent=2)
def chart(self, device_name: str, chart_name: str, entries: List, chart_type: str, output: str) -> Dict:
+ entries = sorted(entries, key=lambda entry: entry.date)
+
# Gather used tests.
tests = {}
for entry in entries:
diff --git a/tests/performance/benchmark b/tests/performance/benchmark
index 4ca1d0eda4c..eb01b6053a7 100755
--- a/tests/performance/benchmark
+++ b/tests/performance/benchmark
@@ -66,6 +66,8 @@ def print_row(config: api.TestConfig, entries: List, end='\n') -> None:
if status == 'outdated':
result += " (outdated)"
+ elif status == 'failed':
+ result = "failed: " + entry.error_msg
else:
result = status
@@ -105,20 +107,37 @@ def run_entry(env: api.TestEnvironment, config: api.TestConfig, row: List, entry
logname += '_' + device_id
env.set_log_file(config.logs_dir / (logname + '.log'), clear=True)
+ # Clear output
+ entry.output = None
+ entry.error_msg = ''
+
# Build revision, or just set path to existing executable.
entry.status = 'building'
print_row(config, row, end='\r')
+ executable_ok = True
if len(entry.executable):
env.set_blender_executable(pathlib.Path(entry.executable))
else:
env.checkout(git_hash)
- env.build()
+ executable_ok = env.build()
+ if not executable_ok:
+ entry.status = 'failed'
+ entry.error_msg = 'Failed to build'
# Run test and update output and status.
entry.status = 'running'
print_row(config, row, end='\r')
- entry.output = test.run(env, device_id)
- entry.status = 'done' if entry.output else 'failed'
+
+ if executable_ok:
+ try:
+ entry.output = test.run(env, device_id)
+ if not entry.output:
+ raise Exception("Test produced no output")
+ entry.status = 'done'
+ except Exception as e:
+ entry.status = 'failed'
+ entry.error_msg = str(e)
+
print_row(config, row, end='\r')
# Update device name in case the device changed since the entry was created.