Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/performance/api/__init__.py7
-rw-r--r--tests/performance/api/config.py259
-rw-r--r--tests/performance/api/device.py71
-rw-r--r--tests/performance/api/environment.py244
-rw-r--r--tests/performance/api/graph.py114
-rw-r--r--tests/performance/api/graph.template.html86
-rw-r--r--tests/performance/api/test.py73
-rwxr-xr-xtests/performance/benchmark299
-rw-r--r--tests/performance/tests/__init__.py1
-rw-r--r--tests/performance/tests/animation.py41
-rw-r--r--tests/performance/tests/blend_load.py42
-rw-r--r--tests/performance/tests/cycles.py92
-rw-r--r--tests/python/alembic_export_tests.py18
-rw-r--r--tests/python/bl_alembic_io_test.py1
-rw-r--r--tests/python/bl_animation_fcurves.py1
-rw-r--r--tests/python/bl_blendfile_library_overrides.py8
-rw-r--r--tests/python/bl_constraints.py64
-rw-r--r--tests/python/bl_pyapi_idprop.py1
-rw-r--r--tests/python/bl_run_operators_event_simulate.py35
-rw-r--r--tests/python/compositor_render_tests.py1
-rw-r--r--tests/python/cycles_render_tests.py1
-rw-r--r--tests/python/modules/mesh_test.py2
-rwxr-xr-xtests/python/modules/render_report.py2
-rw-r--r--tests/python/operators.py2
24 files changed, 1446 insertions, 19 deletions
diff --git a/tests/performance/api/__init__.py b/tests/performance/api/__init__.py
new file mode 100644
index 00000000000..2dc9283c44a
--- /dev/null
+++ b/tests/performance/api/__init__.py
@@ -0,0 +1,7 @@
+# Apache License, Version 2.0
+
+from .environment import TestEnvironment
+from .device import TestDevice, TestMachine
+from .config import TestEntry, TestQueue, TestConfig
+from .test import Test, TestCollection
+from .graph import TestGraph
diff --git a/tests/performance/api/config.py b/tests/performance/api/config.py
new file mode 100644
index 00000000000..68f4df8d487
--- /dev/null
+++ b/tests/performance/api/config.py
@@ -0,0 +1,259 @@
+# Apache License, Version 2.0
+
+import fnmatch
+import json
+import pathlib
+import sys
+
+from dataclasses import dataclass, field
+from typing import Dict, List
+
+from .test import TestCollection
+
+
+def get_build_hash(args: None) -> str:
+ import bpy
+ import sys
+ build_hash = bpy.app.build_hash.decode('utf-8')
+ return '' if build_hash == 'Unknown' else build_hash
+
+
+@dataclass
+class TestEntry:
+ """Test to run, a combination of revision, test and device."""
+ test: str = ''
+ category: str = ''
+ revision: str = ''
+ git_hash: str = ''
+ executable: str = ''
+ date: int = 0
+ device_type: str = 'CPU'
+ device_id: str = 'CPU'
+ device_name: str = 'Unknown CPU'
+ status: str = 'queued'
+ output: Dict = field(default_factory=dict)
+ benchmark_type: str = 'comparison'
+
+ def to_json(self) -> Dict:
+ json_dict = {}
+ for field in self.__dataclass_fields__:
+ json_dict[field] = getattr(self, field)
+ return json_dict
+
+ def from_json(self, json_dict):
+ for field in self.__dataclass_fields__:
+ setattr(self, field, json_dict[field])
+
+
+class TestQueue:
+ """Queue of tests to be run or inspected. Matches JSON file on disk."""
+
+ def __init__(self, filepath: pathlib.Path):
+ self.filepath = filepath
+ self.has_multiple_revisions_to_build = False
+ self.has_multiple_categories = False
+ self.entries = []
+
+ if self.filepath.is_file():
+ with open(self.filepath, 'r') as f:
+ json_entries = json.load(f)
+
+ for json_entry in json_entries:
+ entry = TestEntry()
+ entry.from_json(json_entry)
+ self.entries.append(entry)
+
+ def rows(self, use_revision_columns: bool) -> List:
+ # Generate rows of entries for printing and running.
+ entries = sorted(self.entries, key=lambda entry:
+ (entry.revision,
+ entry.device_id,
+ entry.category,
+ entry.test))
+
+ if not use_revision_columns:
+ # One entry per row.
+ return [[entry] for entry in entries]
+ else:
+ # Multiple revisions per row.
+ rows = {}
+
+ for entry in entries:
+ key = (entry.device_id, entry.category, entry.test)
+ if key in rows:
+ rows[key].append(entry)
+ else:
+ rows[key] = [entry]
+
+ return [value for _, value in sorted(rows.items())]
+
+ def find(self, revision: str, test: str, category: str, device_id: str) -> Dict:
+ for entry in self.entries:
+ if entry.revision == revision and \
+ entry.test == test and \
+ entry.category == category and \
+ entry.device_id == device_id:
+ return entry
+
+ return None
+
+ def write(self) -> None:
+ json_entries = [entry.to_json() for entry in self.entries]
+ with open(self.filepath, 'w') as f:
+ json.dump(json_entries, f, indent=2)
+
+
+class TestConfig:
+ """Test configuration, containing a subset of revisions, tests and devices."""
+
+ def __init__(self, env, name: str):
+ # Init configuration from config.py file.
+ self.name = name
+ self.base_dir = env.base_dir / name
+ self.logs_dir = self.base_dir / 'logs'
+
+ config = self._read_config_module()
+ self.tests = TestCollection(env,
+ getattr(config, 'tests', ['*']),
+ getattr(config, 'categories', ['*']))
+ self.revisions = getattr(config, 'revisions', {})
+ self.builds = getattr(config, 'builds', {})
+ self.queue = TestQueue(self.base_dir / 'results.json')
+ self.benchmark_type = getattr(config, 'benchmark_type', 'comparison')
+
+ self.devices = []
+ self._update_devices(env, getattr(config, 'devices', ['CPU']))
+
+ self._update_queue(env)
+
+ def revision_names(self) -> List:
+ return sorted(list(self.revisions.keys()) + list(self.builds.keys()))
+
+ def device_name(self, device_id: str) -> str:
+ for device in self.devices:
+ if device.id == device_id:
+ return device.name
+
+ return "Unknown"
+
+ @staticmethod
+ def write_default_config(env, config_dir: pathlib.Path) -> None:
+ config_dir.mkdir(parents=True, exist_ok=True)
+
+ default_config = """devices = ['CPU']\n"""
+ default_config += """tests = ['*']\n"""
+ default_config += """categories = ['*']\n"""
+ default_config += """builds = {\n"""
+ default_config += """ 'master': '/home/user/blender-git/build/bin/blender',"""
+ default_config += """ '2.93': '/home/user/blender-2.93/blender',"""
+ default_config += """}\n"""
+ default_config += """revisions = {\n"""
+ default_config += """}\n"""
+
+ config_file = config_dir / 'config.py'
+ with open(config_file, 'w') as f:
+ f.write(default_config)
+
+ def _read_config_module(self) -> None:
+ # Import config.py as a module.
+ import importlib.util
+ spec = importlib.util.spec_from_file_location("testconfig", self.base_dir / 'config.py')
+ mod = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(mod)
+ return mod
+
+ def _update_devices(self, env, device_filters: List) -> None:
+ # Find devices matching the filters.
+ need_gpus = device_filters != ['CPU']
+ machine = env.get_machine(need_gpus)
+
+ self.devices = []
+ for device in machine.devices:
+ for device_filter in device_filters:
+ if fnmatch.fnmatch(device.id, device_filter):
+ self.devices.append(device)
+ break
+
+ def _update_queue(self, env) -> None:
+ # Update queue to match configuration, adding and removing entries
+ # so that there is one entry for each revision, device and test
+ # combination.
+ entries = []
+
+ # Get entries for specified commits, tags and branches.
+ for revision_name, revision_commit in self.revisions.items():
+ git_hash = env.resolve_git_hash(revision_commit)
+ date = env.git_hash_date(git_hash)
+ entries += self._get_entries(revision_name, git_hash, '', date)
+
+ # Optimization to avoid rebuilds.
+ revisions_to_build = set()
+ for entry in entries:
+ if entry.status in ('queued', 'outdated'):
+ revisions_to_build.add(entry.git_hash)
+ self.queue.has_multiple_revisions_to_build = len(revisions_to_build) > 1
+
+ # Get entries for revisions based on existing builds.
+ for revision_name, executable in self.builds.items():
+ executable_path = pathlib.Path(executable)
+ if not executable_path.exists():
+ sys.stderr.write(f'Error: build {executable} not found\n')
+ sys.exit(1)
+
+ env.set_blender_executable(executable_path)
+ git_hash, _ = env.run_in_blender(get_build_hash, {})
+ env.unset_blender_executable()
+
+ mtime = executable_path.stat().st_mtime
+ entries += self._get_entries(revision_name, git_hash, executable, mtime)
+
+ # Detect number of categories for more compact printing.
+ categories = set()
+ for entry in entries:
+ categories.add(entry.category)
+ self.queue.has_multiple_categories = len(categories) > 1
+
+ # Replace actual entries.
+ self.queue.entries = entries
+
+ def _get_entries(self,
+ revision_name: str,
+ git_hash: str,
+ executable: pathlib.Path,
+ date: int) -> None:
+ entries = []
+ for test in self.tests.tests:
+ test_name = test.name()
+ test_category = test.category()
+
+ for device in self.devices:
+ entry = self.queue.find(revision_name, test_name, test_category, device.id)
+ if entry:
+ # Test if revision hash or executable changed.
+ if entry.git_hash != git_hash or \
+ entry.executable != executable or \
+ entry.benchmark_type != self.benchmark_type or \
+ entry.date != date:
+ # Update existing entry.
+ entry.git_hash = git_hash
+ entry.executable = executable
+ entry.benchmark_type = self.benchmark_type
+ entry.date = date
+ if entry.status in ('done', 'failed'):
+ entry.status = 'outdated'
+ else:
+ # Add new entry if it did not exist yet.
+ entry = TestEntry(
+ revision=revision_name,
+ git_hash=git_hash,
+ executable=executable,
+ date=date,
+ test=test_name,
+ category=test_category,
+ device_type=device.type,
+ device_id=device.id,
+ device_name=device.name,
+ benchmark_type=self.benchmark_type)
+ entries.append(entry)
+
+ return entries
diff --git a/tests/performance/api/device.py b/tests/performance/api/device.py
new file mode 100644
index 00000000000..b61ae42be36
--- /dev/null
+++ b/tests/performance/api/device.py
@@ -0,0 +1,71 @@
+# Apache License, Version 2.0
+
+import platform
+import subprocess
+from typing import List
+
+
+def get_cpu_name() -> str:
+ # Get full CPU name.
+ if platform.system() == "Windows":
+ return platform.processor()
+ elif platform.system() == "Darwin":
+ cmd = ['/usr/sbin/sysctl', "-n", "machdep.cpu.brand_string"]
+ return subprocess.check_output(cmd).strip().decode('utf-8')
+ else:
+ with open('/proc/cpuinfo') as f:
+ for line in f:
+ if line.startswith('model name'):
+ return line.split(':')[1].strip()
+
+ return "Unknown CPU"
+
+
+def get_gpu_device(args: None) -> List:
+ # Get the list of available Cycles GPU devices.
+ import bpy
+ import sys
+
+ prefs = bpy.context.preferences
+ cprefs = prefs.addons['cycles'].preferences
+
+ result = []
+
+ for device_type, _, _, _ in cprefs.get_device_types(bpy.context):
+ cprefs.compute_device_type = device_type
+ devices = cprefs.get_devices_for_type(device_type)
+ index = 0
+ for device in devices:
+ if device.type == device_type:
+ result.append({'type': device.type, 'name': device.name, 'index': index})
+ index += 1
+ break
+
+ return result
+
+
+class TestDevice:
+ def __init__(self, device_type: str, device_id: str, name: str, operating_system: str):
+ self.type = device_type
+ self.id = device_id
+ self.name = name
+ self.operating_system = operating_system
+
+
+class TestMachine:
+ def __init__(self, env, need_gpus: bool):
+ operating_system = platform.system()
+
+ self.devices = [TestDevice('CPU', 'CPU', get_cpu_name(), operating_system)]
+ self.has_gpus = need_gpus
+
+ if need_gpus and env.blender_executable:
+ gpu_devices, _ = env.run_in_blender(get_gpu_device, {})
+ for gpu_device in gpu_devices:
+ device_type = gpu_device['type']
+ device_name = gpu_device['name']
+ device_id = gpu_device['type'] + "_" + str(gpu_device['index'])
+ self.devices.append(TestDevice(device_type, device_id, device_name, operating_system))
+
+ def cpu_device(self) -> TestDevice:
+ return self.devices[0]
diff --git a/tests/performance/api/environment.py b/tests/performance/api/environment.py
new file mode 100644
index 00000000000..c9ddd493394
--- /dev/null
+++ b/tests/performance/api/environment.py
@@ -0,0 +1,244 @@
+# Apache License, Version 2.0
+
+import base64
+import glob
+import inspect
+import multiprocessing
+import os
+import pathlib
+import platform
+import pickle
+import subprocess
+import sys
+from typing import Callable, Dict, List
+
+from .config import TestConfig
+from .device import TestMachine
+
+
+class TestEnvironment:
+ def __init__(self, blender_git_dir: pathlib.Path, base_dir: pathlib.Path):
+ self.blender_git_dir = blender_git_dir
+ self.base_dir = base_dir
+ self.blender_dir = base_dir / 'blender'
+ self.build_dir = base_dir / 'build'
+ self.lib_dir = base_dir / 'lib'
+ self.benchmarks_dir = self.blender_git_dir.parent / 'lib' / 'benchmarks'
+ self.git_executable = 'git'
+ self.cmake_executable = 'cmake'
+ self.cmake_options = ['-DWITH_INTERNATIONAL=OFF', '-DWITH_BUILDINFO=OFF']
+ self.unset_blender_executable()
+ self.log_file = None
+ self.machine = None
+
+ def get_machine(self, need_gpus: bool=True) -> None:
+ if not self.machine or (need_gpus and not self.machine.has_gpus):
+ self.machine = TestMachine(self, need_gpus)
+
+ return self.machine
+
+ def init(self, build) -> None:
+ if not self.benchmarks_dir.exists():
+ sys.stderr.write(f'Error: benchmark files directory not found at {self.benchmarks_dir}')
+ sys.exit(1)
+
+ # Create benchmarks folder contents.
+ print(f'Init {self.base_dir}')
+ self.base_dir.mkdir(parents=True, exist_ok=True)
+
+ if len(self.get_configs(names_only=True)) == 0:
+ config_dir = self.base_dir / 'default'
+ print(f'Creating default configuration in {config_dir}')
+ TestConfig.write_default_config(self, config_dir)
+
+ if build:
+ if not self.lib_dir.exists():
+ print(f'Creating symlink at {self.lib_dir}')
+ self.lib_dir.symlink_to(self.blender_git_dir.parent / 'lib')
+ else:
+ print(f'Exists {self.lib_dir}')
+
+ if not self.blender_dir.exists():
+ print(f'Init git worktree in {self.blender_dir}')
+ self.call([self.git_executable, 'worktree', 'add', '--detach', self.blender_dir, 'HEAD'], self.blender_git_dir)
+ else:
+ print(f'Exists {self.blender_dir}')
+
+ if not self.build_dir.exists():
+ print(f'Init build in {self.build_dir}')
+ self.build_dir.mkdir()
+ # No translation to avoid dealing with submodules
+ self.call([self.cmake_executable, self.blender_dir, '.'] + self.cmake_options, self.build_dir)
+ else:
+ print(f'Exists {self.build_dir}')
+
+ print("Building")
+ self.build()
+
+ print('Done')
+
+ def checkout(self) -> None:
+ # Checkout Blender revision
+ if not self.blender_dir.exists():
+ sys.stderr.write('\n\nError: no build set up, run `./benchmark init --build` first\n')
+ sys.exit(1)
+
+ self.call([self.git_executable, 'clean', '-f', '-d'], self.blender_dir)
+ self.call([self.git_executable, 'reset', '--hard', 'HEAD'], self.blender_dir)
+ self.call([self.git_executable, 'checkout', '--detach', git_hash], self.blender_dir)
+
+ self.build()
+
+ def build(self) -> None:
+ # Build Blender revision
+ if not self.build_dir.exists():
+ sys.stderr.write('\n\nError: no build set up, run `./benchmark init --build` first\n')
+ sys.exit(1)
+
+ jobs = str(multiprocessing.cpu_count())
+ self.call([self.cmake_executable, '.'] + self.cmake_options, self.build_dir)
+ self.call([self.cmake_executable, '--build', '.', '-j', jobs, '--target', 'install'], self.build_dir)
+
+ def set_blender_executable(self, executable_path: pathlib.Path) -> None:
+ # Run all Blender commands with this executable.
+ self.blender_executable = executable_path
+
+ def unset_blender_executable(self) -> None:
+ if platform.system() == "Windows":
+ self.blender_executable = self.build_dir / 'bin' / 'blender.exe'
+ elif platform.system() == "Darwin":
+ self.blender_executable = self.build_dir / 'bin' / 'Blender.app' / 'Contents' / 'MacOS' / 'Blender'
+ else:
+ self.blender_executable = self.build_dir / 'bin' / 'blender'
+
+ if not self.blender_executable.exists():
+ self.blender_executable = 'blender'
+
+ def set_log_file(self, filepath: pathlib.Path, clear=True) -> None:
+ # Log all commands and output to this file.
+ self.log_file = filepath
+
+ if clear:
+ self.log_file.unlink(missing_ok=True)
+
+ def unset_log_file(self) -> None:
+ self.log_file = None
+
+ def call(self, args: List[str], cwd: pathlib.Path, silent=False) -> List[str]:
+ # Execute command with arguments in specified directory,
+ # and return combined stdout and stderr output.
+
+ # Open log file for writing
+ f = None
+ if self.log_file:
+ if not self.log_file.exists():
+ self.log_file.parent.mkdir(parents=True, exist_ok=True)
+ f = open(self.log_file, 'a')
+ f.write('\n' + ' '.join([str(arg) for arg in args]) + '\n\n')
+
+ proc = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+ # Read line by line
+ lines = []
+ try:
+ while proc.poll() is None:
+ line = proc.stdout.readline()
+ if line:
+ line_str = line.decode('utf-8', 'ignore')
+ lines.append(line_str)
+ if f:
+ f.write(line_str)
+ except KeyboardInterrupt:
+ # Avoid processes that keep running when interrupting.
+ proc.terminate()
+
+ if f:
+ f.close()
+
+ # Print command output on error
+ if proc.returncode != 0 and not silent:
+ for line in lines:
+ print(line.rstrip())
+ raise Exception("Error executing command")
+
+ return lines
+
+ def call_blender(self, args: List[str], foreground=False) -> List[str]:
+ # Execute Blender command with arguments.
+ common_args = ['--factory-startup', '--enable-autoexec', '--python-exit-code', '1']
+ if foreground:
+ common_args += ['--no-window-focus', '--window-geometry', '0', '0', '1024', '768']
+ else:
+ common_args += ['--background']
+
+ return self.call([self.blender_executable] + common_args + args, cwd=self.base_dir)
+
+ def run_in_blender(self,
+ function: Callable[[Dict], Dict],
+ args: Dict,
+ blender_args: List=[],
+ foreground=False) -> Dict:
+ # Run function in a Blender instance. Arguments and return values are
+ # passed as a Python object that must be serializable with pickle.
+
+ # Get information to call this function from Blender.
+ package_path = pathlib.Path(__file__).parent.parent
+ functionname = function.__name__
+ modulename = inspect.getmodule(function).__name__
+
+ # Serialize arguments in base64, to avoid having to escape it.
+ args = base64.b64encode(pickle.dumps(args))
+ output_prefix = 'TEST_OUTPUT: '
+
+ expression = (f'import sys, pickle, base64\n'
+ f'sys.path.append("{package_path}")\n'
+ f'import {modulename}\n'
+ f'args = pickle.loads(base64.b64decode({args}))\n'
+ f'result = {modulename}.{functionname}(args)\n'
+ f'result = base64.b64encode(pickle.dumps(result))\n'
+ f'print("{output_prefix}" + result.decode())\n')
+
+ expr_args = blender_args + ['--python-expr', expression]
+ lines = self.call_blender(expr_args, foreground=foreground)
+
+ # Parse output.
+ for line in lines:
+ if line.startswith(output_prefix):
+ output = line[len(output_prefix):].strip()
+ result = pickle.loads(base64.b64decode(output))
+ return result, lines
+
+ return {}, lines
+
+ def find_blend_files(self, dirpath: pathlib.Path) -> List:
+ # Find .blend files in subdirectories of the given directory in the
+ # lib/benchmarks directory.
+ dirpath = self.benchmarks_dir / dirpath
+ filepaths = []
+ for filename in glob.iglob(str(dirpath / '*.blend'), recursive=True):
+ filepaths.append(pathlib.Path(filename))
+ return filepaths
+
+ def get_configs(self, name: str=None, names_only: bool=False) -> List:
+ # Get list of configurations in the benchmarks directory.
+ configs = []
+
+ if self.base_dir.exists():
+ for dirname in os.listdir(self.base_dir):
+ if not name or dirname == name:
+ dirpath = self.base_dir / dirname / 'config.py'
+ if dirpath.exists():
+ if names_only:
+ configs.append(dirname)
+ else:
+ configs.append(TestConfig(self, dirname))
+
+ return configs
+
+ def resolve_git_hash(self, revision):
+ # Get git hash for a tag or branch.
+ return self.call([self.git_executable, 'rev-parse', revision], self.blender_git_dir)[0].strip()
+
+ def git_hash_date(self, git_hash):
+ # Get commit data for a git hash.
+ return int(self.call([self.git_executable, 'log', '-n1', git_hash, '--format=%at'], self.blender_git_dir)[0].strip())
diff --git a/tests/performance/api/graph.py b/tests/performance/api/graph.py
new file mode 100644
index 00000000000..b3c8329ff27
--- /dev/null
+++ b/tests/performance/api/graph.py
@@ -0,0 +1,114 @@
+# Apache License, Version 2.0
+
+from . import TestQueue
+
+import json
+import pathlib
+from typing import Dict, List
+
+
+class TestGraph:
+ def __init__(self, json_filepaths: List[pathlib.Path]):
+ # Initialize graph from JSON file. Note that this is implemented without
+ # accessing any benchmark environment or configuration. This ways benchmarks
+ # run on various machines can be aggregated and the graph generated on another
+ # machine.
+
+ # Gather entries for each device.
+ devices = {}
+
+ for json_filepath in json_filepaths:
+ queue = TestQueue(json_filepath)
+
+ for entry in queue.entries:
+ if entry.status in ('done', 'outdated'):
+ device_name = entry.device_name
+ if device_name in devices.keys():
+ devices[device_name].append(entry)
+ else:
+ devices[device_name] = [entry]
+
+ data = []
+ for device_name, device_entries in devices.items():
+ # Gather used categories.
+ categories = {}
+ for entry in device_entries:
+ category = entry.category
+ if category in categories.keys():
+ categories[category].append(entry)
+ else:
+ categories[category] = [entry]
+
+ # Generate one graph for every device x category x result key combination.
+ for category, category_entries in categories.items():
+ entries = sorted(category_entries, key=lambda entry: (entry.revision, entry.test))
+
+ outputs = set()
+ for entry in entries:
+ for output in entry.output.keys():
+ outputs.add(output)
+
+ chart_type = 'line' if entries[0].benchmark_type == 'time_series' else 'comparison'
+
+ for output in outputs:
+ chart_name = f"{category} ({output})"
+ data.append(self.chart(device_name, chart_name, entries, chart_type, output))
+
+ self.json = json.dumps(data, indent=2)
+
+ def chart(self, device_name: str, chart_name: str, entries: List, chart_type: str, output: str) -> Dict:
+ # Gather used tests.
+ tests = {}
+ for entry in entries:
+ test = entry.test
+ if test not in tests.keys():
+ tests[test] = len(tests)
+
+ # Gather used revisions.
+ revisions = {}
+ revision_dates = {}
+ for entry in entries:
+ revision = entry.revision
+ if revision not in revisions.keys():
+ revisions[revision] = len(revisions)
+ revision_dates[revision] = int(entry.date)
+
+ # Google Charts JSON data layout is like a spreadsheat table, with
+ # colums, rows and cells. We create one column for revision labels,
+ # and one column for each test.
+ cols = []
+ if chart_type == 'line':
+ cols.append({'id': '', 'label': 'Date', 'type': 'date'})
+ else:
+ cols.append({'id': '', 'label': ' ', 'type': 'string'})
+ for test, test_index in tests.items():
+ cols.append({'id': '', 'label': test, 'type': 'number'})
+
+ rows = []
+ for revision, revision_index in revisions.items():
+ if chart_type == 'line':
+ date = revision_dates[revision]
+ row = [{'f': None, 'v': 'Date({0})'.format(date * 1000)}]
+ else:
+ row = [{'f': None, 'v': revision}]
+ row += [{}] * len(tests)
+ rows.append({'c': row})
+
+ for entry in entries:
+ test_index = tests[entry.test]
+ revision_index = revisions[entry.revision]
+ time = entry.output[output] if output in entry.output else -1.0
+ rows[revision_index]['c'][test_index + 1] = {'f': None, 'v': time}
+
+ data = {'cols': cols, 'rows': rows}
+ return {'device': device_name, 'name': chart_name, 'data': data, 'chart_type': chart_type}
+
+ def write(self, filepath: pathlib.Path) -> None:
+ # Write HTML page with JSON graph data embedded.
+ template_dir = pathlib.Path(__file__).parent
+ with open(template_dir / 'graph.template.html', 'r') as f:
+ template = f.read()
+
+ contents = template.replace('%JSON_DATA%', self.json)
+ with open(filepath, "w") as f:
+ f.write(contents)
diff --git a/tests/performance/api/graph.template.html b/tests/performance/api/graph.template.html
new file mode 100644
index 00000000000..147f1628c23
--- /dev/null
+++ b/tests/performance/api/graph.template.html
@@ -0,0 +1,86 @@
+<html>
+<head>
+ <title>Benchmarks</title>
+ <meta charset="UTF-8">
+ <style type="text/css">
+ body { margin: 40px auto;
+ font-family: Arial;
+ font-size: 14px;
+ color: #333;
+ max-width: 900px; }
+ a { text-decoration: none; color: #06b; }
+ </style>
+ <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
+ <script>
+ google.charts.load('current', {'packages':['line', 'bar']});
+ google.charts.setOnLoadCallback(draw_charts);
+
+ function transposeDataTable(dt)
+ {
+ /* Swap rows and columns. Bar and line charts expect different layouts,
+ * with this function we can use the same data source for both. */
+ var ndt = new google.visualization.DataTable;
+ ndt.addColumn('string',dt.getColumnLabel(0));
+ for(var x=1; x<dt.getNumberOfColumns(); x++) {
+ ndt.addRow([dt.getColumnLabel(x)]);
+ }
+ for(var x=0; x<dt.getNumberOfRows(); x++) {
+ ndt.addColumn('number', dt.getValue(x,0));
+ for(var y=1; y<dt.getNumberOfColumns(); y++) {
+ ndt.setValue(y-1, x+1, dt.getValue(x,y));
+ }
+ }
+ return ndt;
+ }
+
+ function draw_charts()
+ {
+ /* Load JSON data. */
+ var json_data = %JSON_DATA%;
+
+ /* Clear contents. */
+ charts_elem = document.getElementById("charts");
+ while(charts_elem.firstChild)
+ {
+ charts_elem.removeChild(charts_elem.firstChild);
+ }
+
+ /* Draw charts for each device. */
+ for (var i = 0; i < json_data.length; i++)
+ {
+ device = json_data[i];
+
+ /* Chart drawing options. */
+ var options = {
+ chart: {title: device["name"], subtitle: device['device']},
+ pointsVisible: true,
+ pointSize: 2.5,
+ height: 500,
+ };
+
+ /* Create chart div. */
+ elem = document.createElement('div');
+ elem.id = device["id"];
+ charts_elem.appendChild(elem)
+
+ /* Create chart. */
+ var data = new google.visualization.DataTable(device["data"]);
+ if (device['chart_type'] == 'line') {
+ var chart = new google.charts.Line(elem);
+ chart.draw(data, options);
+ }
+ else {
+ var chart = new google.charts.Bar(elem);
+ chart.draw(transposeDataTable(data), options);
+ }
+ }
+ }
+ </script>
+</head>
+<body>
+ <h1>Benchmarks</h1>
+ <div id="charts">
+ ...
+ </div>
+</body>
+</html>
diff --git a/tests/performance/api/test.py b/tests/performance/api/test.py
new file mode 100644
index 00000000000..7e8193d2c21
--- /dev/null
+++ b/tests/performance/api/test.py
@@ -0,0 +1,73 @@
+# Apache License, Version 2.0
+
+import abc
+import fnmatch
+from typing import Dict, List
+
+
+class Test:
+ @abc.abstractmethod
+ def name(self) -> str:
+ """
+ Name of the test.
+ """
+
+ @abc.abstractmethod
+ def category(self) -> str:
+ """
+ Category of the test.
+ """
+
+ def use_device(self) -> bool:
+ """
+ Test uses a specific CPU or GPU device.
+ """
+ return False
+
+ @abc.abstractmethod
+ def run(self, env, device_id: str) -> Dict:
+ """
+ Execute the test and report results.
+ """
+
+
+class TestCollection:
+ def __init__(self, env, names_filter: List=['*'], categories_filter: List=['*']):
+ import importlib
+ import pkgutil
+ import tests
+
+ self.tests = []
+
+ # Find and import all Python files in the tests folder, and generate
+ # the list of tests for each.
+ for _, modname, _ in pkgutil.iter_modules(tests.__path__, 'tests.'):
+ module = importlib.import_module(modname)
+ tests = module.generate(env)
+
+ for test in tests:
+ test_category = test.category()
+ found = False
+ for category_filter in categories_filter:
+ if fnmatch.fnmatch(test_category, category_filter):
+ found = True
+ if not found:
+ continue
+
+ test_name = test.name()
+ found = False
+ for name_filter in names_filter:
+ if fnmatch.fnmatch(test_name, name_filter):
+ found = True
+ if not found:
+ continue
+
+ self.tests.append(test)
+
+ def find(self, test_name: str, test_category: str):
+ # Find a test based on name and category.
+ for test in self.tests:
+ if test.name() == test_name and test.category() == test_category:
+ return test
+
+ return None
diff --git a/tests/performance/benchmark b/tests/performance/benchmark
new file mode 100755
index 00000000000..3b43bd0aa96
--- /dev/null
+++ b/tests/performance/benchmark
@@ -0,0 +1,299 @@
+#!/usr/bin/env python3
+# Apache License, Version 2.0
+
+import api
+import argparse
+import fnmatch
+import pathlib
+import shutil
+import sys
+import time
+from typing import List
+
+def find_blender_git_dir() -> pathlib.Path:
+ # Find .git directory of the repository we are in.
+ cwd = pathlib.Path.cwd()
+
+ for path in [cwd] + list(cwd.parents):
+ if (path / '.git').exists():
+ return path
+
+ return None
+
+def get_tests_base_dir(blender_git_dir: pathlib.Path) -> pathlib.Path:
+ # Benchmarks dir is next to the Blender source folder.
+ return blender_git_dir.parent / 'benchmark'
+
+def use_revision_columns(config: api.TestConfig) -> bool:
+ return config.benchmark_type == "comparison" and \
+ len(config.queue.entries) > 0 and \
+ not config.queue.has_multiple_revisions_to_build
+
+def print_header(config: api.TestConfig) -> None:
+ # Print header with revision columns headers.
+ if use_revision_columns(config):
+ header = ""
+ if config.queue.has_multiple_categories:
+ header += f"{'': <15} "
+ header += f"{'': <40} "
+
+ for revision_name in config.revision_names():
+ header += f"{revision_name: <20} "
+ print(header)
+
+def print_row(config: api.TestConfig, entries: List, end='\n') -> None:
+ # Print one or more test entries on a row.
+ row = ""
+
+ # For time series, print revision first.
+ if not use_revision_columns(config):
+ revision = entries[0].revision
+ git_hash = entries[0].git_hash
+
+ row += f"{revision: <15} "
+
+ if config.queue.has_multiple_categories:
+ row += f"{entries[0].category: <15} "
+ row += f"{entries[0].test: <40} "
+
+ for entry in entries:
+ # Show time or status.
+ status = entry.status
+ output = entry.output
+ result = ''
+ if status in ('done', 'outdated') and output:
+ result = '%.4fs' % output['time']
+
+ if status == 'outdated':
+ result += " (outdated)"
+ else:
+ result = status
+
+ row += f"{result: <20} "
+
+ print(row, end=end, flush=True)
+
+
+def match_entry(entry: api.TestEntry, args: argparse.Namespace):
+ # Filter tests by name and category.
+ return fnmatch.fnmatch(entry.test, args.test) or \
+ fnmatch.fnmatch(entry.category, args.test) or \
+ entry.test.find(args.test) != -1 or \
+ entry.category.find(args.test) != -1
+
+def run_entry(env: api.TestEnvironment, config: api.TestConfig, row: List, entry: api.TestEntry):
+ # Check if entry needs to be run.
+ if entry.status not in ('queued', 'outdated'):
+ print_row(config, row, end='\r')
+ return False
+
+ # Run test entry.
+ revision = entry.revision
+ git_hash = entry.git_hash
+ testname = entry.test
+ testcategory = entry.category
+ device_type = entry.device_type
+ device_id = entry.device_id
+
+ test = config.tests.find(testname, testcategory)
+ if not test:
+ return False
+
+ # Log all output to dedicated log file.
+ logname = testcategory + '_' + testname + '_' + revision
+ if device_id != 'CPU':
+ logname += '_' + device_id
+ env.set_log_file(config.logs_dir / (logname + '.log'), clear=True)
+
+ # Build revision, or just set path to existing executable.
+ entry.status = 'building'
+ print_row(config, row, end='\r')
+ if len(entry.executable):
+ env.set_blender_executable(pathlib.Path(entry.executable))
+ else:
+ env.checkout(git_hash)
+ env.build(git_hash)
+
+ # Run test and update output and status.
+ entry.status = 'running'
+ print_row(config, row, end='\r')
+ entry.output = test.run(env, device_id)
+ entry.status = 'done' if entry.output else 'failed'
+ print_row(config, row, end='\r')
+
+ # Update device name in case the device changed since the entry was created.
+ entry.device_name = config.device_name(device_id)
+
+ # Restore default logging and Blender executable.
+ env.unset_log_file()
+ env.unset_blender_executable()
+
+ return True
+
+def cmd_init(env: api.TestEnvironment, argv: List):
+ # Initialize benchmarks folder.
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--build', default=False, action='store_true')
+ args = parser.parse_args(argv)
+ env.set_log_file(env.base_dir / 'setup.log', clear=False)
+ env.init(args.build)
+ env.unset_log_file()
+
+def cmd_list(env: api.TestEnvironment, argv: List) -> None:
+ # List devices, tests and configurations.
+ print('DEVICES')
+ machine = env.get_machine()
+ for device in machine.devices:
+ name = f"{device.name} ({device.operating_system})"
+ print(f"{device.id: <15} {name}")
+ print('')
+
+ print('TESTS')
+ collection = api.TestCollection(env)
+ for test in collection.tests:
+ print(f"{test.category(): <15} {test.name(): <50}")
+ print('')
+
+ print('CONFIGS')
+ configs = env.get_configs(names_only=True)
+ for config_name in configs:
+ print(config_name)
+
+def cmd_status(env: api.TestEnvironment, argv: List):
+ # Print status of tests in configurations.
+ parser = argparse.ArgumentParser()
+ parser.add_argument('config', nargs='?', default=None)
+ parser.add_argument('test', nargs='?', default='*')
+ args = parser.parse_args(argv)
+
+ configs = env.get_configs(args.config)
+ first = True
+ for config in configs:
+ if not args.config:
+ if first:
+ first = False
+ else:
+ print("")
+ print(config.name.upper())
+
+ print_header(config)
+ for row in config.queue.rows(use_revision_columns(config)):
+ if match_entry(row[0], args):
+ print_row(config, row)
+
+def cmd_reset(env: api.TestEnvironment, argv: List):
+ # Reset tests to re-run them.
+ parser = argparse.ArgumentParser()
+ parser.add_argument('config', nargs='?', default=None)
+ parser.add_argument('test', nargs='?', default='*')
+ args = parser.parse_args(argv)
+
+ configs = env.get_configs(args.config)
+ for config in configs:
+ print_header(config)
+ for row in config.queue.rows(use_revision_columns(config)):
+ if match_entry(row[0], args):
+ for entry in row:
+ entry.status = 'queued'
+ entry.result = {}
+ print_row(config, row)
+
+ config.queue.write()
+
+def cmd_run(env: api.TestEnvironment, argv: List):
+ # Run tests.
+ parser = argparse.ArgumentParser()
+ parser.add_argument('config', nargs='?', default=None)
+ parser.add_argument('test', nargs='?', default='*')
+ args = parser.parse_args(argv)
+
+ configs = env.get_configs(args.config)
+ for config in configs:
+ updated = False
+ print_header(config)
+ for row in config.queue.rows(use_revision_columns(config)):
+ if match_entry(row[0], args):
+ for entry in row:
+ if run_entry(env, config, row, entry):
+ updated = True
+ # Write queue every time in case running gets interrupted,
+ # so it can be resumed.
+ config.queue.write()
+ print_row(config, row)
+
+ if updated:
+ # Generate graph if test were run.
+ json_filepath = config.base_dir / "results.json"
+ html_filepath = config.base_dir / "results.html"
+ graph = api.TestGraph([json_filepath])
+ graph.write(html_filepath)
+
+ print("\nfile://" + str(html_filepath))
+
+def cmd_graph(argv: List):
+ # Create graph from a given JSON results file.
+ parser = argparse.ArgumentParser()
+ parser.add_argument('json_file', nargs='+')
+ parser.add_argument('-o', '--output', type=str, required=True)
+ args = parser.parse_args(argv)
+
+ graph = api.TestGraph([pathlib.Path(path) for path in args.json_file])
+ graph.write(pathlib.Path(args.output))
+
+def main():
+ usage = ('benchmark <command> [<args>]\n'
+ '\n'
+ 'Commands:\n'
+ ' init [--build] Init benchmarks directory and default config\n'
+ ' Optionally with automated revision building setup\n'
+ ' \n'
+ ' list List available tests, devices and configurations\n'
+ ' \n'
+ ' run [<config>] [<test>] Execute tests for configuration\n'
+ ' reset [<config>] [<test>] Clear tests results from config, for re-running\n'
+ ' status [<config>] [<test>] List configurations and their tests\n'
+ ' \n'
+ ' graph a.json b.json... -o out.html Create graph from results in JSON files\n')
+
+ parser = argparse.ArgumentParser(
+ description='Blender performance testing',
+ usage=usage)
+
+ parser.add_argument('command', nargs='?', default='help')
+ args = parser.parse_args(sys.argv[1:2])
+
+ argv = sys.argv[2:]
+ blender_git_dir = find_blender_git_dir()
+ if blender_git_dir == None:
+ sys.stderr.write('Error: no blender git repository found from current working directory\n')
+ sys.exit(1)
+
+ if args.command == 'graph':
+ cmd_graph(argv)
+ sys.exit(0)
+
+ base_dir = get_tests_base_dir(blender_git_dir)
+ env = api.TestEnvironment(blender_git_dir, base_dir)
+ if args.command == 'init':
+ cmd_init(env, argv)
+ sys.exit(0)
+
+ if not env.base_dir.exists():
+ sys.stderr.write('Error: benchmark directory not initialized\n')
+ sys.exit(1)
+
+ if args.command == 'list':
+ cmd_list(env, argv)
+ elif args.command == 'run':
+ cmd_run(env, argv)
+ elif args.command == 'reset':
+ cmd_reset(env, argv)
+ elif args.command == 'status':
+ cmd_status(env, argv)
+ elif args.command == 'help':
+ parser.print_usage()
+ else:
+ sys.stderr.write(f'Unknown command: {args.command}\n')
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/performance/tests/__init__.py b/tests/performance/tests/__init__.py
new file mode 100644
index 00000000000..ac3e613174f
--- /dev/null
+++ b/tests/performance/tests/__init__.py
@@ -0,0 +1 @@
+# Apache License, Version 2.0
diff --git a/tests/performance/tests/animation.py b/tests/performance/tests/animation.py
new file mode 100644
index 00000000000..1a92f1a9718
--- /dev/null
+++ b/tests/performance/tests/animation.py
@@ -0,0 +1,41 @@
+# Apache License, Version 2.0
+
+import api
+import os
+
+
+def _run(args):
+ import bpy
+ import time
+
+ start_time = time.time()
+
+ scene = bpy.context.scene
+ for i in range(scene.frame_start, scene.frame_end):
+ scene.frame_set(scene.frame_start)
+
+ elapsed_time = time.time() - start_time
+
+ result = {'time': elapsed_time}
+ return result
+
+
+class AnimationTest(api.Test):
+ def __init__(self, filepath):
+ self.filepath = filepath
+
+ def name(self):
+ return self.filepath.stem
+
+ def category(self):
+ return "animation"
+
+ def run(self, env, device_id):
+ args = {}
+ result, _ = env.run_in_blender(_run, args)
+ return result
+
+
+def generate(env):
+ filepaths = env.find_blend_files('animation')
+ return [AnimationTest(filepath) for filepath in filepaths]
diff --git a/tests/performance/tests/blend_load.py b/tests/performance/tests/blend_load.py
new file mode 100644
index 00000000000..5fe498fd3d7
--- /dev/null
+++ b/tests/performance/tests/blend_load.py
@@ -0,0 +1,42 @@
+# Apache License, Version 2.0
+
+import api
+import os
+import pathlib
+
+
+def _run(filepath):
+ import bpy
+ import time
+
+ # Load once to ensure it's cached by OS
+ bpy.ops.wm.open_mainfile(filepath=filepath)
+ bpy.ops.wm.read_homefile()
+
+ # Measure loading the second time
+ start_time = time.time()
+ bpy.ops.wm.open_mainfile(filepath=filepath)
+ elapsed_time = time.time() - start_time
+
+ result = {'time': elapsed_time}
+ return result
+
+
+class BlendLoadTest(api.Test):
+ def __init__(self, filepath):
+ self.filepath = filepath
+
+ def name(self):
+ return self.filepath.stem
+
+ def category(self):
+ return "blend_load"
+
+ def run(self, env, device_id):
+ result, _ = env.run_in_blender(_run, str(self.filepath))
+ return result
+
+
+def generate(env):
+ filepaths = env.find_blend_files('*/*')
+ return [BlendLoadTest(filepath) for filepath in filepaths]
diff --git a/tests/performance/tests/cycles.py b/tests/performance/tests/cycles.py
new file mode 100644
index 00000000000..bac6b8a7ceb
--- /dev/null
+++ b/tests/performance/tests/cycles.py
@@ -0,0 +1,92 @@
+# Apache License, Version 2.0
+
+import api
+import os
+
+
+def _run(args):
+ import bpy
+ import time
+
+ device_type = args['device_type']
+ device_index = args['device_index']
+
+ scene = bpy.context.scene
+ scene.render.engine = 'CYCLES'
+ scene.render.filepath = args['render_filepath']
+ scene.render.image_settings.file_format = 'PNG'
+ scene.cycles.device = 'CPU' if device_type == 'CPU' else 'GPU'
+
+ if scene.cycles.device == 'GPU':
+ # Enable specified GPU in preferences.
+ prefs = bpy.context.preferences
+ cprefs = prefs.addons['cycles'].preferences
+ cprefs.compute_device_type = device_type
+ devices = cprefs.get_devices_for_type(device_type)
+ for device in devices:
+ device.use = False
+
+ index = 0
+ for device in devices:
+ if device.type == device_type:
+ if index == device_index:
+ device.use = True
+ break
+ else:
+ index += 1
+
+ # Render
+ bpy.ops.render.render(write_still=True)
+
+ return None
+
+
+class CyclesTest(api.Test):
+ def __init__(self, filepath):
+ self.filepath = filepath
+
+ def name(self):
+ return self.filepath.stem
+
+ def category(self):
+ return "cycles"
+
+ def use_device(self):
+ return True
+
+ def run(self, env, device_id):
+ tokens = device_id.split('_')
+ device_type = tokens[0]
+ device_index = int(tokens[1]) if len(tokens) > 1 else 0
+ args = {'device_type': device_type,
+ 'device_index': device_index,
+ 'render_filepath': str(env.log_file.parent / (env.log_file.stem + '.png'))}
+
+ _, lines = env.run_in_blender(_run, args, ['--debug-cycles', '--verbose', '1', self.filepath])
+
+ # Parse render time from output
+ prefix_time = "Render time (without synchronization): "
+ prefix_memory = "Peak: "
+ time = None
+ memory = None
+ for line in lines:
+ line = line.strip()
+ offset = line.find(prefix_time)
+ if offset != -1:
+ time = line[offset + len(prefix_time):]
+ time = float(time)
+ offset = line.find(prefix_memory)
+ if offset != -1:
+ memory = line[offset + len(prefix_memory):]
+ memory = memory.split()[0].replace(',', '')
+ memory = float(memory)
+
+ if not (time and memory):
+ raise Exception("Error parsing render time output")
+
+ return {'time': time, 'peak_memory': memory}
+
+
+def generate(env):
+ filepaths = env.find_blend_files('cycles-x/*')
+ return [CyclesTest(filepath) for filepath in filepaths]
diff --git a/tests/python/alembic_export_tests.py b/tests/python/alembic_export_tests.py
index 9d1738691f0..23a4a376533 100644
--- a/tests/python/alembic_export_tests.py
+++ b/tests/python/alembic_export_tests.py
@@ -197,7 +197,7 @@ class HierarchicalAndFlatExportTest(AbstractAlembicTest):
def test_hierarchical_export(self, tempdir: pathlib.Path):
abc = tempdir / 'cubes_hierarchical.abc'
script = "import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, " \
- "renderable_only=True, visible_objects_only=True, flatten=False)" % abc.as_posix()
+ "visible_objects_only=True, flatten=False)" % abc.as_posix()
self.run_blender('cubes-hierarchy.blend', script)
# Now check the resulting Alembic file.
@@ -215,7 +215,7 @@ class HierarchicalAndFlatExportTest(AbstractAlembicTest):
def test_flat_export(self, tempdir: pathlib.Path):
abc = tempdir / 'cubes_flat.abc'
script = "import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, " \
- "renderable_only=True, visible_objects_only=True, flatten=True)" % abc.as_posix()
+ "visible_objects_only=True, flatten=True)" % abc.as_posix()
self.run_blender('cubes-hierarchy.blend', script)
# Now check the resulting Alembic file.
@@ -236,7 +236,7 @@ class DupliGroupExportTest(AbstractAlembicTest):
def test_hierarchical_export(self, tempdir: pathlib.Path):
abc = tempdir / 'dupligroup_hierarchical.abc'
script = "import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, " \
- "renderable_only=True, visible_objects_only=True, flatten=False)" % abc.as_posix()
+ "visible_objects_only=True, flatten=False)" % abc.as_posix()
self.run_blender('dupligroup-scene.blend', script)
# Now check the resulting Alembic file.
@@ -254,7 +254,7 @@ class DupliGroupExportTest(AbstractAlembicTest):
def test_flat_export(self, tempdir: pathlib.Path):
abc = tempdir / 'dupligroup_hierarchical.abc'
script = "import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, " \
- "renderable_only=True, visible_objects_only=True, flatten=True)" % abc.as_posix()
+ "visible_objects_only=True, flatten=True)" % abc.as_posix()
self.run_blender('dupligroup-scene.blend', script)
# Now check the resulting Alembic file.
@@ -332,7 +332,7 @@ class CurveExportTest(AbstractAlembicTest):
def test_export_single_curve(self, tempdir: pathlib.Path):
abc = tempdir / 'single-curve.abc'
script = "import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, " \
- "renderable_only=True, visible_objects_only=True, flatten=False)" % abc.as_posix()
+ "visible_objects_only=True, flatten=False)" % abc.as_posix()
self.run_blender('single-curve.blend', script)
# Now check the resulting Alembic file.
@@ -353,7 +353,7 @@ class HairParticlesExportTest(AbstractAlembicTest):
def _do_test(self, tempdir: pathlib.Path, export_hair: bool, export_particles: bool) -> pathlib.Path:
abc = tempdir / 'hair-particles.abc'
script = "import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, " \
- "renderable_only=True, visible_objects_only=True, flatten=False, " \
+ "visible_objects_only=True, flatten=False, " \
"export_hair=%r, export_particles=%r, as_background_job=False)" \
% (abc.as_posix(), export_hair, export_particles)
self.run_blender('hair-particles.blend', script)
@@ -419,7 +419,7 @@ class UVMapExportTest(AbstractAlembicTest):
basename = 'T77021-multiple-uvmaps-animated-mesh'
abc = tempdir / f'{basename}.abc'
script = f"import bpy; bpy.ops.wm.alembic_export(filepath='{abc.as_posix()}', start=1, end=1, " \
- f"renderable_only=True, visible_objects_only=True, flatten=False)"
+ f"visible_objects_only=True, flatten=False)"
self.run_blender(f'{basename}.blend', script)
self.maxDiff = 1000
@@ -468,7 +468,7 @@ class LongNamesExportTest(AbstractAlembicTest):
def test_export_long_names(self, tempdir: pathlib.Path):
abc = tempdir / 'long-names.abc'
script = "import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, " \
- "renderable_only=False, visible_objects_only=False, flatten=False)" % abc.as_posix()
+ "visible_objects_only=False, flatten=False)" % abc.as_posix()
self.run_blender('long-names.blend', script)
name_parts = [
@@ -565,7 +565,7 @@ class InvisibleObjectExportTest(AbstractAlembicTest):
def test_hierarchical_export(self, tempdir: pathlib.Path):
abc = tempdir / 'visibility.abc'
script = "import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=2, " \
- "renderable_only=False, visible_objects_only=False)" % abc.as_posix()
+ "visible_objects_only=False)" % abc.as_posix()
self.run_blender('visibility.blend', script)
def test(cube_name: str, expect_visible: bool):
diff --git a/tests/python/bl_alembic_io_test.py b/tests/python/bl_alembic_io_test.py
index 53a0879f160..c0d0bcdea70 100644
--- a/tests/python/bl_alembic_io_test.py
+++ b/tests/python/bl_alembic_io_test.py
@@ -300,7 +300,6 @@ class CameraExportImportTest(unittest.TestCase):
abc_path = self.tempdir / "camera_transforms.abc"
self.assertIn('FINISHED', bpy.ops.wm.alembic_export(
filepath=str(abc_path),
- renderable_only=False,
flatten=flatten,
))
diff --git a/tests/python/bl_animation_fcurves.py b/tests/python/bl_animation_fcurves.py
index 2ec04749d70..9017c1ee037 100644
--- a/tests/python/bl_animation_fcurves.py
+++ b/tests/python/bl_animation_fcurves.py
@@ -40,6 +40,7 @@ class AbstractAnimationTest:
self.assertTrue(self.testdir.exists(),
'Test dir %s should exist' % self.testdir)
+
class FCurveEvaluationTest(AbstractAnimationTest, unittest.TestCase):
def test_fcurve_versioning_291(self):
# See D8752.
diff --git a/tests/python/bl_blendfile_library_overrides.py b/tests/python/bl_blendfile_library_overrides.py
index 48625a1ecdb..c9c89c01cee 100644
--- a/tests/python/bl_blendfile_library_overrides.py
+++ b/tests/python/bl_blendfile_library_overrides.py
@@ -69,7 +69,7 @@ class TestLibraryOverrides(TestHelper, unittest.TestCase):
assert(len(local_id.override_library.properties) == 1)
override_prop = local_id.override_library.properties[0]
- assert(override_prop.rna_path == "location");
+ assert(override_prop.rna_path == "location")
assert(len(override_prop.operations) == 1)
override_operation = override_prop.operations[0]
assert(override_operation.operation == 'REPLACE')
@@ -96,7 +96,7 @@ class TestLibraryOverrides(TestHelper, unittest.TestCase):
self.assertIsNone(local_id.data.override_library)
assert(len(local_id.override_library.properties) == 1)
override_prop = local_id.override_library.properties[0]
- assert(override_prop.rna_path == "scale");
+ assert(override_prop.rna_path == "scale")
assert(len(override_prop.operations) == 1)
override_operation = override_prop.operations[0]
assert(override_operation.operation == 'NOOP')
@@ -116,14 +116,14 @@ class TestLibraryOverrides(TestHelper, unittest.TestCase):
assert(len(local_id.override_library.properties) == 2)
override_prop = local_id.override_library.properties[0]
- assert(override_prop.rna_path == "scale");
+ assert(override_prop.rna_path == "scale")
assert(len(override_prop.operations) == 1)
override_operation = override_prop.operations[0]
assert(override_operation.operation == 'NOOP')
assert(override_operation.subitem_local_index == -1)
override_prop = local_id.override_library.properties[1]
- assert(override_prop.rna_path == "location");
+ assert(override_prop.rna_path == "location")
assert(len(override_prop.operations) == 1)
override_operation = override_prop.operations[0]
assert(override_operation.operation == 'REPLACE')
diff --git a/tests/python/bl_constraints.py b/tests/python/bl_constraints.py
index 279c896c6af..a2690fa4e11 100644
--- a/tests/python/bl_constraints.py
+++ b/tests/python/bl_constraints.py
@@ -375,6 +375,70 @@ class CustomSpaceTest(AbstractConstraintTests):
)))
+class CopyTransformsTest(AbstractConstraintTests):
+ layer_collection = 'Copy Transforms'
+
+ def test_mix_mode_object(self):
+ """Copy Transforms: all mix modes for objects"""
+ constraint = bpy.data.objects["Copy Transforms.object.owner"].constraints["Copy Transforms"]
+
+ constraint.mix_mode = 'REPLACE'
+ self.matrix_test('Copy Transforms.object.owner', Matrix((
+ (-0.7818737626075745, 0.14389121532440186, 0.4845699667930603, -0.017531070858240128),
+ (-0.2741589844226837, -0.591389000415802, -1.2397242784500122, -0.08039521425962448),
+ (0.04909384995698929, -1.0109175443649292, 0.7942137122154236, 0.1584688276052475),
+ (0.0, 0.0, 0.0, 1.0)
+ )))
+
+ constraint.mix_mode = 'BEFORE_FULL'
+ self.matrix_test('Copy Transforms.object.owner', Matrix((
+ (-1.0791258811950684, -0.021011866629123688, 0.3120136260986328, 0.9082338809967041),
+ (0.2128538191318512, -0.3411901891231537, -1.7376484870910645, -0.39762523770332336),
+ (-0.03584420680999756, -1.0162957906723022, 0.8004404306411743, -0.9015425443649292),
+ (0.0, 0.0, 0.0, 1.0)
+ )))
+
+ constraint.mix_mode = 'BEFORE'
+ self.matrix_test('Copy Transforms.object.owner', Matrix((
+ (-0.9952367544174194, -0.03077685832977295, 0.05301344022154808, 0.9082338809967041),
+ (-0.013416174799203873, -0.39984768629074097, -1.8665285110473633, -0.39762523770332336),
+ (0.03660336509346962, -0.9833710193634033, 0.75728839635849, -0.9015425443649292),
+ (0.0, 0.0, 0.0, 1.0)
+ )))
+
+ constraint.mix_mode = 'BEFORE_SPLIT'
+ self.matrix_test('Copy Transforms.object.owner', Matrix((
+ (-0.9952367544174194, -0.03077685832977295, 0.05301344022154808, -1.0175310373306274),
+ (-0.013416174799203873, -0.39984768629074097, -1.8665285110473633, 0.9196047782897949),
+ (0.03660336509346962, -0.9833710193634033, 0.75728839635849, 0.1584688276052475),
+ (0.0, 0.0, 0.0, 1.0)
+ )))
+
+ constraint.mix_mode = 'AFTER_FULL'
+ self.matrix_test('Copy Transforms.object.owner', Matrix((
+ (-0.8939255475997925, -0.2866469621658325, 0.7563635110855103, -0.964445173740387),
+ (-0.09460853785276413, -0.73727947473526, -1.0267245769500732, 0.9622588753700256),
+ (0.37042146921157837, -1.1893107891082764, 1.0113294124603271, 0.21314144134521484),
+ (0.0, 0.0, 0.0, 1.0)
+ )))
+
+ constraint.mix_mode = 'AFTER'
+ self.matrix_test('Copy Transforms.object.owner', Matrix((
+ (-0.9033845067024231, -0.2048732340335846, 0.7542480826377869, -0.964445173740387),
+ (-0.1757974475622177, -0.6721230745315552, -1.5190268754959106, 0.9622588753700256),
+ (0.38079890608787537, -0.7963172793388367, 1.0880682468414307, 0.21314144134521484),
+ (0.0, 0.0, 0.0, 1.0)
+ )))
+
+ constraint.mix_mode = 'AFTER_SPLIT'
+ self.matrix_test('Copy Transforms.object.owner', Matrix((
+ (-0.9033845067024231, -0.2048732340335846, 0.7542480826377869, -1.0175310373306274),
+ (-0.1757974475622177, -0.6721230745315552, -1.5190268754959106, 0.9196047782897949),
+ (0.38079890608787537, -0.7963172793388367, 1.0880682468414307, 0.1584688276052475),
+ (0.0, 0.0, 0.0, 1.0)
+ )))
+
+
def main():
global args
import argparse
diff --git a/tests/python/bl_pyapi_idprop.py b/tests/python/bl_pyapi_idprop.py
index 1e570bf9a7f..38cd9d04a6b 100644
--- a/tests/python/bl_pyapi_idprop.py
+++ b/tests/python/bl_pyapi_idprop.py
@@ -140,6 +140,7 @@ class TestIdPropertyCreation(TestHelper, unittest.TestCase):
with self.assertRaises(TypeError):
self.id["a"] = self
+
class TestIdPropertyGroupView(TestHelper, unittest.TestCase):
def test_type(self):
diff --git a/tests/python/bl_run_operators_event_simulate.py b/tests/python/bl_run_operators_event_simulate.py
index 92315d3e853..1cc621b9684 100644
--- a/tests/python/bl_run_operators_event_simulate.py
+++ b/tests/python/bl_run_operators_event_simulate.py
@@ -165,6 +165,16 @@ def gen_events_type_text(text):
yield dict(type=type, value='RELEASE', **kw_extra)
+def repr_action(name, args, kwargs):
+ return "%s(%s)" % (
+ name,
+ ", ".join(
+ [repr(value) for value in args] +
+ [("%s=%r" % (key, value)) for key, value in kwargs.items()]
+ )
+ )
+
+
# -----------------------------------------------------------------------------
# Simulate Events
@@ -505,6 +515,18 @@ def argparse_create():
required=False,
)
+ parser.add_argument(
+ "--time-actions",
+ dest="time_actions",
+ default=False,
+ action="store_true",
+ help=(
+ "Display the time each action takes\n"
+ "(useful for measuring delay between key-presses)."
+ ),
+ required=False,
+ )
+
# Collect doc-strings from static methods in `actions`.
actions_docstring = []
for action_key in ACTION_DIR:
@@ -554,7 +576,7 @@ def setup_default_preferences(prefs):
# Main Function
-def main_event_iter(*, action_list):
+def main_event_iter(*, action_list, time_actions):
"""
Yield all events from action handlers.
"""
@@ -565,9 +587,18 @@ def main_event_iter(*, action_list):
yield dict(type='MOUSEMOVE', value='NOTHING', x=x_init, y=y_init)
+ if time_actions:
+ import time
+ t_prev = time.time()
+
for (op, args, kwargs) in action_list:
yield from handle_action(op, args, kwargs)
+ if time_actions:
+ t = time.time()
+ print("%.4f: %s" % ((t - t_prev), repr_action(op, args, kwargs)))
+ t_prev = t
+
def main():
from sys import argv
@@ -588,7 +619,7 @@ def main():
bpy.app.use_event_simulate = False
run_event_simulate(
- event_iter=main_event_iter(action_list=args.actions),
+ event_iter=main_event_iter(action_list=args.actions, time_actions=args.time_actions),
exit_fn=exit_fn,
)
diff --git a/tests/python/compositor_render_tests.py b/tests/python/compositor_render_tests.py
index 057d4a2e6dd..199e1c13b8e 100644
--- a/tests/python/compositor_render_tests.py
+++ b/tests/python/compositor_render_tests.py
@@ -16,6 +16,7 @@ try:
except ImportError:
inside_blender = False
+
def get_arguments(filepath, output_filepath):
return [
"--background",
diff --git a/tests/python/cycles_render_tests.py b/tests/python/cycles_render_tests.py
index 36c3f7d9fe5..ca0bc9f18b9 100644
--- a/tests/python/cycles_render_tests.py
+++ b/tests/python/cycles_render_tests.py
@@ -57,6 +57,7 @@ BLACKLIST_GPU = [
'transparent_shadow_hair.*.blend',
]
+
def get_arguments(filepath, output_filepath):
dirname = os.path.dirname(filepath)
basedir = os.path.dirname(dirname)
diff --git a/tests/python/modules/mesh_test.py b/tests/python/modules/mesh_test.py
index 1749e798a32..6d921959e6f 100644
--- a/tests/python/modules/mesh_test.py
+++ b/tests/python/modules/mesh_test.py
@@ -680,7 +680,7 @@ class RunTest:
test_name = each_test.test_name
if self.verbose:
print()
- print("Running test {}/{}: {}...".format(test_number+1, len(self.tests), test_name))
+ print("Running test {}/{}: {}...".format(test_number + 1, len(self.tests), test_name))
success = self.run_test(test_name)
if not success:
diff --git a/tests/python/modules/render_report.py b/tests/python/modules/render_report.py
index c1ae0b05fcd..560f8e33585 100755
--- a/tests/python/modules/render_report.py
+++ b/tests/python/modules/render_report.py
@@ -287,7 +287,7 @@ class Report:
-moz-background-size:50px 50px;
background-size:50px 50px;
- -webkit-background-size:50px 51px; /* override value for shitty webkit */
+ -webkit-background-size:50px 51px; /* Override value for silly webkit. */
background-position:0 0, 25px 0, 25px -25px, 0px 25px;
}}
diff --git a/tests/python/operators.py b/tests/python/operators.py
index c209b01c20c..4501df82175 100644
--- a/tests/python/operators.py
+++ b/tests/python/operators.py
@@ -321,7 +321,7 @@ def main():
MeshTest("CubeEdgeUnsubdivide", "testCubeEdgeUnsubdivide", "expectedCubeEdgeUnsubdivide",
[OperatorSpecEditMode("unsubdivide", {}, "EDGE", {i for i in range(6)})]),
MeshTest("UVSphereUnsubdivide", "testUVSphereUnsubdivide", "expectedUVSphereUnsubdivide",
- [OperatorSpecEditMode("unsubdivide", {'iterations': 9}, "FACE", {i for i in range(512)})]),
+ [OperatorSpecEditMode("unsubdivide", {'iterations': 9}, "FACE", {i for i in range(512)})]),
# vert connect path
# Tip: It works only if there is an already existing face or more than 2 vertices.