Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorBrecht Van Lommel <brecht@blender.org>2020-10-28 18:19:03 +0300
committerBrecht Van Lommel <brecht@blender.org>2020-11-30 15:40:33 +0300
commitdd391d38f6ef2165bc76a1e69da52e1bd1208e53 (patch)
tree299649b58b0359ca49779bcfd83577f11f8b65f8 /tests
parentc986e46be708f6885a9504d87f58a99259ac63c7 (diff)
Tests: add CMake option to run Cycles regression tests on GPU devices
CYCLES_TEST_DEVICES is a list of devices (CPU, CUDA, OPTIX, OPENCL). It is set to CPU only by default. Test output is now writen to build/tests/cycles/<device>, and the HTML report has separate report pages for the different devices, with option to compare between CPU and GPU renders. Various GPU tests are still failing due to CPU/GPU differences, these are to be fixed or blacklisted still. Ref T82193
Diffstat (limited to 'tests')
-rw-r--r--tests/python/CMakeLists.txt27
-rw-r--r--tests/python/cycles_render_tests.py9
-rw-r--r--tests/python/eevee_render_tests.py2
-rwxr-xr-xtests/python/modules/global_report.py1
-rwxr-xr-xtests/python/modules/render_report.py57
-rw-r--r--tests/python/workbench_render_tests.py2
6 files changed, 69 insertions, 29 deletions
diff --git a/tests/python/CMakeLists.txt b/tests/python/CMakeLists.txt
index 22426a6d6fc..52dafa83c7a 100644
--- a/tests/python/CMakeLists.txt
+++ b/tests/python/CMakeLists.txt
@@ -598,20 +598,28 @@ if(WITH_CYCLES OR WITH_OPENGL_RENDER_TESTS)
list(APPEND render_tests grease_pencil)
endif()
+ # Cycles
if(WITH_CYCLES)
- foreach(render_test bake;${render_tests})
- add_python_test(
- cycles_${render_test}
- ${CMAKE_CURRENT_LIST_DIR}/cycles_render_tests.py
- -blender "${TEST_BLENDER_EXE}"
- -testdir "${TEST_SRC_DIR}/render/${render_test}"
- -idiff "${OPENIMAGEIO_IDIFF}"
- -outdir "${TEST_OUT_DIR}/cycles"
- )
+ foreach(_cycles_device ${CYCLES_TEST_DEVICES})
+ string(TOLOWER "${_cycles_device}" _cycles_device_lower)
+ set(_cycles_render_tests bake;${render_tests})
+
+ foreach(render_test ${_cycles_render_tests})
+ add_python_test(
+ cycles_${render_test}_${_cycles_device_lower}
+ ${CMAKE_CURRENT_LIST_DIR}/cycles_render_tests.py
+ -blender "${TEST_BLENDER_EXE}"
+ -testdir "${TEST_SRC_DIR}/render/${render_test}"
+ -idiff "${OPENIMAGEIO_IDIFF}"
+ -outdir "${TEST_OUT_DIR}/cycles"
+ -device ${_cycles_device}
+ )
+ endforeach()
endforeach()
endif()
if(WITH_OPENGL_RENDER_TESTS)
+ # Eevee
foreach(render_test ${render_tests})
add_python_test(
eevee_${render_test}_test
@@ -624,6 +632,7 @@ if(WITH_CYCLES OR WITH_OPENGL_RENDER_TESTS)
endforeach()
foreach(render_test ${render_tests})
+ # Workbench
add_python_test(
workbench_${render_test}_test
${CMAKE_CURRENT_LIST_DIR}/workbench_render_tests.py
diff --git a/tests/python/cycles_render_tests.py b/tests/python/cycles_render_tests.py
index 3c597b39cb8..9153a2732b5 100644
--- a/tests/python/cycles_render_tests.py
+++ b/tests/python/cycles_render_tests.py
@@ -50,6 +50,7 @@ def create_argparse():
parser.add_argument("-testdir", nargs=1)
parser.add_argument("-outdir", nargs=1)
parser.add_argument("-idiff", nargs=1)
+ parser.add_argument("-device", nargs=1)
return parser
@@ -61,12 +62,16 @@ def main():
test_dir = args.testdir[0]
idiff = args.idiff[0]
output_dir = args.outdir[0]
+ device = args.device[0]
from modules import render_report
- report = render_report.Report("Cycles", output_dir, idiff)
+ report = render_report.Report('Cycles', output_dir, idiff, device)
report.set_pixelated(True)
report.set_reference_dir("cycles_renders")
- report.set_compare_engines('cycles', 'eevee')
+ if device == 'CPU':
+ report.set_compare_engine('eevee')
+ else:
+ report.set_compare_engine('cycles', 'CPU')
# Increase threshold for motion blur, see T78777.
test_dir_name = Path(test_dir).name
diff --git a/tests/python/eevee_render_tests.py b/tests/python/eevee_render_tests.py
index 4c3ca28402f..4f1eba3348f 100644
--- a/tests/python/eevee_render_tests.py
+++ b/tests/python/eevee_render_tests.py
@@ -137,7 +137,7 @@ def main():
report = render_report.Report("Eevee", output_dir, idiff)
report.set_pixelated(True)
report.set_reference_dir("eevee_renders")
- report.set_compare_engines('eevee', 'cycles')
+ report.set_compare_engines('cycles', 'CPU')
ok = report.run(test_dir, blender, get_arguments, batch=True)
sys.exit(not ok)
diff --git a/tests/python/modules/global_report.py b/tests/python/modules/global_report.py
index f7f181c4736..636ec61d471 100755
--- a/tests/python/modules/global_report.py
+++ b/tests/python/modules/global_report.py
@@ -39,6 +39,7 @@ def _write_html(output_dir):
<div class="container">
<br/>
<h1>{title}</h1>
+ <nav aria-label="breadcrumb"><ol class="breadcrumb"><li class="breadcrumb-item active" aria-current="page">Test Reports</li></ol></nav>
{combined_reports}
<br/>
</div>
diff --git a/tests/python/modules/render_report.py b/tests/python/modules/render_report.py
index 832d3849f01..24d0164adf1 100755
--- a/tests/python/modules/render_report.py
+++ b/tests/python/modules/render_report.py
@@ -102,6 +102,7 @@ class Report:
__slots__ = (
'title',
'output_dir',
+ 'global_dir',
'reference_dir',
'idiff',
'pixelated',
@@ -112,17 +113,24 @@ class Report:
'failed_tests',
'passed_tests',
'compare_tests',
- 'compare_engines'
+ 'compare_engine',
+ 'device'
)
- def __init__(self, title, output_dir, idiff):
+ def __init__(self, title, output_dir, idiff, device=None):
self.title = title
self.output_dir = output_dir
+ self.global_dir = os.path.dirname(output_dir)
self.reference_dir = 'reference_renders'
self.idiff = idiff
- self.compare_engines = None
+ self.compare_engine = None
self.fail_threshold = 0.016
self.fail_percent = 1
+ self.device = device
+
+ if device:
+ self.title = self._engine_title(title, device)
+ self.output_dir = self._engine_path(self.output_dir, device.lower())
self.pixelated = False
self.verbose = os.environ.get("BLENDER_VERBOSE") is not None
@@ -147,8 +155,8 @@ class Report:
def set_reference_dir(self, reference_dir):
self.reference_dir = reference_dir
- def set_compare_engines(self, engine, other_engine):
- self.compare_engines = (engine, other_engine)
+ def set_compare_engine(self, other_engine, other_device=None):
+ self.compare_engine = (other_engine, other_device)
def run(self, dirpath, blender, arguments_cb, batch=False):
# Run tests and output report.
@@ -156,7 +164,7 @@ class Report:
ok = self._run_all_tests(dirname, dirpath, blender, arguments_cb, batch)
self._write_data(dirname)
self._write_html()
- if self.compare_engines:
+ if self.compare_engine:
self._write_html(comparison=True)
return ok
@@ -171,7 +179,7 @@ class Report:
filepath = os.path.join(outdir, "passed.data")
pathlib.Path(filepath).write_text(self.passed_tests)
- if self.compare_engines:
+ if self.compare_engine:
filepath = os.path.join(outdir, "compare.data")
pathlib.Path(filepath).write_text(self.compare_tests)
@@ -181,12 +189,26 @@ class Report:
else:
return """<li class="breadcrumb-item"><a href="%s">%s</a></li>""" % (href, title)
+ def _engine_title(self, engine, device):
+ if device:
+ return engine.title() + ' ' + device
+ else:
+ return engine.title()
+
+ def _engine_path(self, path, device):
+ if device:
+ return os.path.join(path, device.lower())
+ else:
+ return path
+
def _navigation_html(self, comparison):
html = """<nav aria-label="breadcrumb"><ol class="breadcrumb">"""
- html += self._navigation_item("Test Reports", "../report.html", False)
+ base_path = os.path.relpath(self.global_dir, self.output_dir)
+ global_report_path = os.path.join(base_path, "report.html")
+ html += self._navigation_item("Test Reports", global_report_path, False)
html += self._navigation_item(self.title, "report.html", not comparison)
- if self.compare_engines:
- compare_title = "Compare with %s" % self.compare_engines[1].capitalize()
+ if self.compare_engine:
+ compare_title = "Compare with %s" % self._engine_title(*self.compare_engine)
html += self._navigation_item(compare_title, "compare.html", comparison)
html += """</ol></nav>"""
@@ -233,8 +255,8 @@ class Report:
if comparison:
title = self.title + " Test Compare"
- engine_self = self.compare_engines[0].capitalize()
- engine_other = self.compare_engines[1].capitalize()
+ engine_self = self.title
+ engine_other = self._engine_title(*self.compare_engine)
columns_html = "<tr><th>Name</th><th>%s</th><th>%s</th>" % (engine_self, engine_other)
else:
title = self.title + " Test Report"
@@ -300,9 +322,8 @@ class Report:
# Update global report
if not comparison:
- global_output_dir = os.path.dirname(self.output_dir)
global_failed = failed if not comparison else None
- global_report.add(global_output_dir, "Render", self.title, filepath, global_failed)
+ global_report.add(self.global_dir, "Render", self.title, filepath, global_failed)
def _relative_url(self, filepath):
relpath = os.path.relpath(filepath, self.output_dir)
@@ -340,8 +361,9 @@ class Report:
else:
self.passed_tests += test_html
- if self.compare_engines:
- ref_url = os.path.join("..", self.compare_engines[1], new_url)
+ if self.compare_engine:
+ base_path = os.path.relpath(self.global_dir, self.output_dir)
+ ref_url = os.path.join(base_path, self._engine_path(*self.compare_engine), new_url)
test_html = """
<tr{tr_style}>
@@ -445,6 +467,9 @@ class Report:
if not batch:
break
+ if self.device:
+ command.extend(['--', '--cycles-device', self.device])
+
# Run process
crash = False
output = None
diff --git a/tests/python/workbench_render_tests.py b/tests/python/workbench_render_tests.py
index 7c9842d5733..03a85c58dd9 100644
--- a/tests/python/workbench_render_tests.py
+++ b/tests/python/workbench_render_tests.py
@@ -72,7 +72,7 @@ def main():
report = render_report.Report("Workbench", output_dir, idiff)
report.set_pixelated(True)
report.set_reference_dir("workbench_renders")
- report.set_compare_engines('workbench', 'eevee')
+ report.set_compare_engine('eevee')
ok = report.run(test_dir, blender, get_arguments, batch=True)
sys.exit(not ok)