Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrecht Van Lommel <brecht@blender.org>2020-03-15 02:30:26 +0300
committerBrecht Van Lommel <brecht@blender.org>2020-03-24 22:58:31 +0300
commitadab11adf58b1ed816ba8f9979739b0b75e1dbb1 (patch)
treeb94165ac004ee841090584756ad10be37b33f16c
parent394a1373a0cd20b7d0660df4bf80e1231e33cba9 (diff)
Tests: prototype for performance testing frameworkperformance-test
-rw-r--r--tests/performance/api/__init__.py6
-rw-r--r--tests/performance/api/device.py15
-rw-r--r--tests/performance/api/environment.py202
-rw-r--r--tests/performance/api/queue.py50
-rw-r--r--tests/performance/api/test.py42
-rwxr-xr-xtests/performance/benchmark251
-rw-r--r--tests/performance/tests/__init__.py0
-rw-r--r--tests/performance/tests/animation.py32
-rw-r--r--tests/performance/tests/cycles.py33
-rw-r--r--tests/performance/tests/undo.py39
10 files changed, 670 insertions, 0 deletions
diff --git a/tests/performance/api/__init__.py b/tests/performance/api/__init__.py
new file mode 100644
index 00000000000..7ca9fcda169
--- /dev/null
+++ b/tests/performance/api/__init__.py
@@ -0,0 +1,6 @@
+
+from .environment import TestEnvironment
+from .device import TestDevice, TestMachine
+from .queue import TestQueue
+from .test import Test, TestCollection
+
diff --git a/tests/performance/api/device.py b/tests/performance/api/device.py
new file mode 100644
index 00000000000..4a5db54a911
--- /dev/null
+++ b/tests/performance/api/device.py
@@ -0,0 +1,15 @@
+
+from . import TestEnvironment
+
+class TestDevice:
+ def __init__(self, name: str):
+ self.name = name
+
+class TestMachine:
+ def __init__(self, env: TestEnvironment):
+ # TODO: implement device detection, matching Blender Benchmark.
+ self.devices = [TestDevice('CPU')]
+
+ def cpu_device(self) -> str:
+ return self.devices[0]
+
diff --git a/tests/performance/api/environment.py b/tests/performance/api/environment.py
new file mode 100644
index 00000000000..bf8b893c664
--- /dev/null
+++ b/tests/performance/api/environment.py
@@ -0,0 +1,202 @@
+
+import base64
+import inspect
+import logging
+import logging.handlers
+import os
+import multiprocessing
+import pathlib
+import pickle
+import subprocess
+import sys
+from typing import Callable, Dict, List
+
+class TestEnvironment:
+ def __init__(self):
+ # Directory paths.
+ self.repo_dir = pathlib.Path(__file__).parent.parent.parent.parent
+ self.base_dir = self.repo_dir.parent / 'benchmark'
+ self.blender_dir = self.base_dir / 'blender.git'
+ self.build_dir = self.base_dir / 'build'
+ self.lib_dir = self.base_dir / 'lib'
+ self.benchmarks_dir = self.lib_dir / 'benchmarks'
+
+ # Executable paths.
+ if sys.platform == 'darwin':
+ blender_executable = 'Blender.app/Contents/MacOS/Blender'
+ elif sys.platform == 'win32':
+ blender_executable = 'blender.exe'
+ else:
+ blender_executable = 'blender'
+
+ self.blender_executable = self.build_dir / 'bin' / blender_executable
+ self.git_executable = 'git'
+ self.cmake_executable = 'cmake'
+
+ self.logger = None
+ self._init_logger()
+
+ def _init_logger(self):
+ # Logging.
+ if os.path.isdir(self.base_dir) and not self.logger:
+ log = self.base_dir / 'command.log'
+ maxbytes = 5 * 1024 * 1024
+ self.logger = logging.getLogger('Blender Benchmark')
+ self.logger.setLevel(logging.INFO)
+ handler = logging.handlers.RotatingFileHandler(log, maxBytes=maxbytes, backupCount=0)
+ self.logger.addHandler(handler)
+
+ def validate(self) -> bool:
+ benchmarks_dir = self.repo_dir.parent / 'lib' / 'benchmarks'
+ if not os.path.isdir(benchmarks_dir):
+ return 'Warning: benchmarks not found at ' + str(benchmarks_dir)
+ return None
+
+ def initialized(self) -> bool:
+ return os.path.isdir(self.base_dir) and \
+ os.path.isdir(self.blender_dir) and \
+ os.path.isdir(self.build_dir) and \
+ os.path.isdir(self.benchmarks_dir)
+
+ def init(self) -> None:
+ blender_dir = self.repo_dir
+ lib_dir = self.repo_dir.parent / 'lib'
+
+ if not os.path.isdir(self.base_dir):
+ print("Creating", self.base_dir)
+ os.makedirs(self.base_dir, exist_ok=True)
+
+ self._init_logger()
+
+ if not os.path.isdir(self.lib_dir):
+ print("Creating symlink at", self.lib_dir)
+ os.symlink(lib_dir, self.lib_dir, target_is_directory=True)
+ if not os.path.isdir(self.blender_dir):
+ print("Creating git worktree in", self.blender_dir)
+ self.call([self.git_executable, 'worktree', 'add', self.blender_dir, 'HEAD'], blender_dir)
+
+ # Setup build directory.
+ print("Configuring cmake in", self.build_dir)
+ os.makedirs(self.build_dir, exist_ok=True)
+ cmakecache = self.build_dir / 'u.txt'
+ if os.path.isfile(cmakecache):
+ os.remove(cmakecache)
+ cmake_options = ['-DWITH_CYCLES_NATIVE_ONLY=ON',
+ '-DWITH_BUILDINFO=OFF',
+ '-DWITH_INTERNATIONAL=OFF']
+ self.call([self.cmake_executable, self.blender_dir] + cmake_options, self.build_dir)
+ print("Done")
+
+ def current_revision(self) -> str:
+ lines = self.call([self.git_executable, 'rev-parse', '--short=7', 'HEAD'], self.repo_dir)
+ return lines[0].strip()
+
+ def build_revision(self, revision: str) -> None:
+ # Checkout Blender revision
+ self.call([self.git_executable, 'clean', '-f', '-d'], self.blender_dir)
+ self.call([self.git_executable, 'reset', '--hard', 'HEAD'], self.blender_dir)
+ self.call([self.git_executable, 'fetch', 'origin'], self.blender_dir)
+ self.call([self.git_executable, 'checkout', '--detach', revision], self.blender_dir)
+
+ # Update submodules not needed for now
+ # make_update = self.blender_dir / 'build_files' / 'utils' / 'make_update.py'
+ # self.call([sys.executable, make_update, '--no-libraries', '--no-blender'], self.blender_dir)
+
+ # Build
+ self.call([self.cmake_executable,
+ '--build', '.',
+ '--parallel', str(multiprocessing.cpu_count()),
+ '--target', 'install',
+ '--config', 'Release'],
+ self.build_dir)
+
+ def info(self, msg):
+ if self.logger:
+ self.logger.info(msg)
+ else:
+ print(msg)
+
+ def call(self, args: List[str], cwd: pathlib.Path, silent=False) -> List[str]:
+ """Execute command with arguments in specified directory,
+ and return combined stdout and stderr output."""
+ self.info("$ " + " ".join([str(arg) for arg in args]))
+ proc = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+ lines = []
+ while proc.poll() is None:
+ line = proc.stdout.readline()
+ if line:
+ line = line.decode('utf-8', 'ignore')
+ self.info(line.strip())
+ lines += [line]
+
+ if proc.returncode != 0 and not silent:
+ raise Exception("Error executing command")
+
+ return lines
+
+ def call_blender(self, args: List[str], foreground=False) -> List[str]:
+ """Execute Blender command with arguments"""
+ common_args = ['--factory-startup', '--enable-autoexec']
+ if foreground:
+ common_args += ['--no-window-focus', '--window-geometry', '0', '0', '1024', '768']
+ else:
+ common_args += ['--background']
+
+ return self.call([self.blender_executable] + common_args + args, cwd=self.base_dir)
+
+ def run_in_blender(self, function: Callable[[Dict], Dict], args: Dict, blendfile=None, foreground=False) -> Dict:
+ """Run function in a Blender instance. Arguments and return values are
+ passed as a dictionary that must be serializable with pickle."""
+ function_path = os.path.abspath(inspect.getfile(function))
+
+ # Get information to call this function from Blender.
+ package_path = pathlib.Path(__file__).parent.parent
+ functionname = function.__name__
+ modulename = inspect.getmodule(function).__name__
+
+ # Serialize arguments in base64, to avoid having to escape it.
+ args = base64.b64encode(pickle.dumps(args))
+ output_prefix = 'TEST_OUTPUT: '
+
+ expression = (f'import sys, pickle, base64\n'
+ f'sys.path.append("{package_path}")\n'
+ f'import {modulename}\n'
+ f'args = pickle.loads(base64.b64decode({args}))\n'
+ f'result = {modulename}.{functionname}(args)\n'
+ f'result = base64.b64encode(pickle.dumps(result))\n'
+ f'print("{output_prefix}" + result.decode())\n')
+
+ blender_args = []
+ if blendfile:
+ blender_args += [blendfile]
+ blender_args += ['--python-expr', expression]
+ lines = self.call_blender(blender_args, foreground=foreground)
+
+ # Parse output.
+ for line in lines:
+ if line.startswith(output_prefix):
+ output = line[len(output_prefix):].strip()
+ result = pickle.loads(base64.b64decode(output))
+ return result
+
+ return {}
+
+ def find_blend_files(self, dirname):
+ """
+ Search for <name>.blend or <name>/<name>.blend files in the given directory
+ under lib/benchmarks.
+ """
+ dirpath = self.benchmarks_dir / dirname
+ filepaths = []
+ if os.path.isdir(dirpath):
+ for filename in os.listdir(dirpath):
+ filepath = dirpath / filename
+ if os.path.isfile(filepath) and filename.endswith('.blend'):
+ filepaths += [filepath]
+ elif os.path.isdir(filepath):
+ filepath = filepath / (filename + ".blend")
+ if os.path.isfile(filepath):
+ filepaths += [filepath]
+
+ return filepaths
diff --git a/tests/performance/api/queue.py b/tests/performance/api/queue.py
new file mode 100644
index 00000000000..2d1a92109a6
--- /dev/null
+++ b/tests/performance/api/queue.py
@@ -0,0 +1,50 @@
+
+import json
+import os
+from . import TestEnvironment
+from typing import Dict
+
+class TestQueue:
+ def __init__(self, env: TestEnvironment):
+ self.filepath = env.base_dir / 'queue.json'
+
+ if os.path.isfile(self.filepath):
+ with open(self.filepath, 'r') as f:
+ self.entries = json.load(f)
+ else:
+ self.entries = []
+
+ def find(self, revision: str, test: str, device: str) -> Dict:
+ for entry in self.entries:
+ if entry['revision'] == revision and entry['test'] == test and entry['device'] == device:
+ return entry
+
+ return None
+
+ def add(self, revision: str, test: str, device: str) -> Dict:
+ if self.find(revision, test, device):
+ return None
+
+ entry = {'revision': revision,
+ 'test': test,
+ 'device': device,
+ 'status': 'queued',
+ 'output': {}}
+ self.entries += [entry]
+ return entry
+
+ def update(self, entry: Dict) -> None:
+ existing = self.find(entry['revision'], entry['test'], entry['device'])
+ if existing:
+ existing['status'] = entry['status']
+ existing['output'] = entry['output']
+
+ def remove(self, entry: Dict) -> Dict:
+ self.entries.remove(entry)
+ entry['status'] = 'removed'
+ return entry
+
+ def write(self) -> None:
+ # TODO: lock file to avoid multiple processes overwrting each other.
+ with open(self.filepath, 'w') as f:
+ json.dump(self.entries, f)
diff --git a/tests/performance/api/test.py b/tests/performance/api/test.py
new file mode 100644
index 00000000000..53c5fc0c3cc
--- /dev/null
+++ b/tests/performance/api/test.py
@@ -0,0 +1,42 @@
+
+import abc
+from . import TestEnvironment
+from typing import Dict
+
+class Test:
+ @abc.abstractmethod
+ def name(self) -> str:
+ """
+ Name of the test.
+ """
+
+ def use_device(self) -> bool:
+ """
+ Test uses a specific CPU or GPU device.
+ """
+ return False
+
+ @abc.abstractmethod
+ def run(self, env: TestEnvironment) -> Dict:
+ """
+ Execute the test and report results.
+ """
+
+class TestCollection:
+ def __init__(self, env: TestEnvironment):
+ import importlib
+ import pkgutil
+ import tests
+
+ self.tests = []
+
+ for _, modname, _ in pkgutil.iter_modules(tests.__path__, 'tests.'):
+ module = importlib.import_module(modname)
+ self.tests += module.generate(env)
+
+ def find(self, test_name: str):
+ for test in self.tests:
+ if test.name() == test_name:
+ return test
+
+ return None
diff --git a/tests/performance/benchmark b/tests/performance/benchmark
new file mode 100755
index 00000000000..5ed4f7f5886
--- /dev/null
+++ b/tests/performance/benchmark
@@ -0,0 +1,251 @@
+#!/usr/bin/env python3
+
+import api
+import argparse
+import fnmatch
+import pathlib
+import sys
+import time
+
+def print_entry(collection, entry, end='\n'):
+ revision = entry['revision']
+ testname = entry['test']
+ device = entry['device']
+
+ test = collection.find(testname)
+ status = entry['status']
+ if status == 'queued' and not test:
+ status = 'missing'
+
+ output = entry['output']
+ result = ''
+ if status == 'done' and output:
+ result = '%.4fs' % output['time']
+
+ print(f"{revision: <20} {testname: <20} {device: <10} {'[' + status + ']': <10} {result: <10}", end=end)
+
+def match_entry(entry, args):
+ revision = entry['revision']
+ testname = entry['test']
+ device = entry['device']
+
+ return (fnmatch.fnmatch(revision, args.revision) and
+ fnmatch.fnmatch(testname, args.test) and
+ fnmatch.fnmatch(device, args.device))
+
+def run_entry(env, collection, entry):
+ revision = entry['revision']
+ testname = entry['test']
+ device = entry['device']
+
+ if entry['status'] != 'queued':
+ return None
+
+ test = collection.find(entry['test'])
+ if not test:
+ return None
+
+ entry['status'] = 'building'
+ print_entry(collection, entry, end='\r')
+ env.build_revision(revision)
+ entry['status'] = 'running'
+ print_entry(collection, entry, end='\r')
+ entry['output'] = test.run(env)
+ entry['status'] = 'done' if entry['output'] else 'failed'
+ print_entry(collection, entry)
+ return entry
+
+def cmd_init(env, argv):
+ env.init()
+
+def cmd_add(env, argv, silent=False):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--test', default='*')
+ parser.add_argument('--device', default='*')
+ parser.add_argument('--revision', default=env.current_revision())
+ args = parser.parse_args(argv)
+
+ collection = api.TestCollection(env)
+ queue = api.TestQueue(env)
+ machine = api.TestMachine(env)
+
+ for test in collection.tests:
+ if not fnmatch.fnmatch(test.name(), args.test):
+ continue
+
+ if not test.use_device():
+ devices = [machine.cpu_device()]
+ else:
+ devices = machine.devices
+
+ for device in devices:
+ if not fnmatch.fnmatch(device.name, args.device):
+ continue
+
+ # TODO: validate revision
+ entry = queue.add(args.revision, test.name(), device.name)
+ if entry and not silent:
+ print_entry(collection, entry)
+
+ queue.write()
+
+def cmd_remove(env, argv, default_revision, silent=False):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--test', default='*')
+ parser.add_argument('--device', default='*')
+ parser.add_argument('--revision', default=default_revision)
+ args = parser.parse_args(argv)
+
+ collection = api.TestCollection(env)
+ queue = api.TestQueue(env)
+ for entry in queue.entries[:]:
+ if match_entry(entry, args):
+ queue.remove(entry)
+ if not silent:
+ print_entry(collection, entry)
+
+ queue.write()
+
+def cmd_run(env, argv):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--test', default='*')
+ parser.add_argument('--device', default='*')
+ parser.add_argument('--revision', default=env.current_revision())
+ args = parser.parse_args(argv)
+
+ cmd_remove(env, argv, env.current_revision())
+ cmd_add(env, argv, silent=True)
+
+ collection = api.TestCollection(env)
+ queue = api.TestQueue(env)
+ for entry in queue.entries[:]:
+ revision = entry['revision']
+ testname = entry['test']
+ device = entry['device']
+
+ if match_entry(entry, args):
+ updated_entry = run_entry(env, collection, entry)
+ if updated_entry:
+ queue = api.TestQueue(env)
+ queue.update(updated_entry)
+ queue.write()
+ else:
+ print_entry(collection, entry)
+
+def cmd_server(env, argv):
+ while True:
+ collection = api.TestCollection(env)
+ queue = api.TestQueue(env)
+ for entry in queue.entries:
+ updated_entry = run_entry(env, collection, entry)
+ if updated_entry:
+ queue = api.TestQueue(env)
+ queue.update(updated_entry)
+ queue.write()
+ break
+
+ time.sleep(1.0)
+
+def cmd_list(env, argv):
+ collection = api.TestCollection(env)
+ machine = api.TestMachine(env)
+ for test in collection.tests:
+ if not test.use_device():
+ devices = [machine.cpu_device()]
+ else:
+ devices = machine.devices
+
+ devices = [device.name for device in devices]
+ devices = ' '.join(devices)
+ print(f"{test.name(): <20} {devices}")
+
+def cmd_devices(env, argv):
+ machine = api.TestMachine(env)
+ for device in machine.devices:
+ print(device.name)
+
+def cmd_status(env, argv):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--test', default='*')
+ parser.add_argument('--device', default='*')
+ parser.add_argument('--revision', default='*')
+ args = parser.parse_args(argv)
+
+ collection = api.TestCollection(env)
+ queue = api.TestQueue(env)
+ for entry in queue.entries:
+ if match_entry(entry, args):
+ print_entry(collection, entry)
+
+def main():
+ usage = ('benchmark <command> [<args>]\n'
+ '\n'
+ 'Commands:\n'
+ ' init Set up git worktree and build in ../benchmark\n'
+ ' \n'
+ ' list List available tests\n'
+ ' devices List available devices\n'
+ ' \n'
+ ' run Execute benchmarks for current revision\n'
+ ' add Queue current revision to be benchmarked\n'
+ ' remove Removed current revision\n'
+ ' clear Removed all queued and completed benchmarks\n'
+ ' \n'
+ ' status List queued and completed tests\n'
+ ' \n'
+ ' server Run as server, executing queued revisions\n'
+ ' \n'
+ 'Arguments for run, add, remove and status:\n'
+ ' --test <pattern> Pattern to match test name, may include wildcards\n'
+ ' --device <device> Use only specified device\n'
+ ' --revision <revision> Use specified instead of current revision\n'
+ )
+
+ env = api.TestEnvironment()
+ warning = env.validate()
+ if warning:
+ usage += '\n' + warning + '\n'
+
+ parser = argparse.ArgumentParser(
+ description='Blender performance testing',
+ usage=usage)
+
+ parser.add_argument('command', nargs='?', default='help')
+ args = parser.parse_args(sys.argv[1:2])
+
+ argv = sys.argv[2:]
+
+ if args.command == 'init':
+ cmd_init(env, argv)
+ return
+ elif args.command == 'list':
+ cmd_list(env, argv)
+ return
+ elif args.command == 'devices':
+ cmd_devices(env, argv)
+ return
+ elif args.command == 'help':
+ parser.print_usage()
+ return
+
+ if not env.initialized():
+ print("Benchmark directory is not (fully) initialized")
+ return
+
+ if args.command == 'add':
+ cmd_add(env, argv)
+ elif args.command == 'remove':
+ cmd_remove(env, argv, env.current_revision())
+ elif args.command == 'clear':
+ cmd_remove(env, argv, '*')
+ elif args.command == 'status':
+ cmd_status(env, argv)
+ elif args.command == 'run':
+ cmd_run(env, argv)
+ elif args.command == 'server':
+ cmd_server(env, argv)
+ else:
+ sys.stderr.write(f'Unknown command: {args.command}\n')
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/performance/tests/__init__.py b/tests/performance/tests/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/tests/performance/tests/__init__.py
diff --git a/tests/performance/tests/animation.py b/tests/performance/tests/animation.py
new file mode 100644
index 00000000000..0ddec327f9f
--- /dev/null
+++ b/tests/performance/tests/animation.py
@@ -0,0 +1,32 @@
+import api
+import os
+
+def _run(args):
+ import bpy
+ import time
+
+ scene = bpy.context.scene
+ scene.frame_set(scene.frame_start)
+
+ start_time = time.time()
+ for frame in range(scene.frame_start + 1, scene.frame_end + 1):
+ scene.frame_set(frame)
+ bpy.context.evaluated_depsgraph_get()
+ elapsed_time = time.time() - start_time
+
+ result = {'time': elapsed_time}
+ return result
+
+class AnimationTest(api.Test):
+ def __init__(self, filepath):
+ self.filepath = filepath
+
+ def name(self):
+ return 'animation_' + self.filepath.stem
+
+ def run(self, env):
+ return env.run_in_blender(_run, {}, blendfile=self.filepath)
+
+def generate(env):
+ filepaths = env.find_blend_files('animation')
+ return [AnimationTest(filepath) for filepath in filepaths]
diff --git a/tests/performance/tests/cycles.py b/tests/performance/tests/cycles.py
new file mode 100644
index 00000000000..c70efe46c6d
--- /dev/null
+++ b/tests/performance/tests/cycles.py
@@ -0,0 +1,33 @@
+import api
+import os
+
+def _run(args):
+ import bpy
+ import time
+
+ scene = bpy.context.scene
+ scene.render.engine = 'CYCLES'
+ scene.cycles.samples = 4
+
+ start_time = time.time()
+ bpy.ops.render.render()
+ elapsed_time = time.time() - start_time
+ result = {'time': elapsed_time}
+ return result
+
+class CyclesTest(api.Test):
+ def __init__(self, filepath):
+ self.filepath = filepath
+
+ def name(self):
+ return 'cycles_' + self.filepath.stem
+
+ def use_device(self):
+ return True
+
+ def run(self, env):
+ return env.run_in_blender(_run, {}, blendfile=self.filepath)
+
+def generate(env):
+ filepaths = env.find_blend_files('cycles')
+ return [CyclesTest(filepath) for filepath in filepaths]
diff --git a/tests/performance/tests/undo.py b/tests/performance/tests/undo.py
new file mode 100644
index 00000000000..5e5e52407d6
--- /dev/null
+++ b/tests/performance/tests/undo.py
@@ -0,0 +1,39 @@
+import api
+import os
+
+def _run(args):
+ import bpy
+ import time
+
+ bpy.context.preferences.view.show_developer_ui = True
+ bpy.context.preferences.experimental.use_undo_speedup = True
+
+ bpy.ops.ed.undo_push()
+ bpy.ops.mesh.primitive_cube_add()
+ bpy.ops.object.modifier_add(type='SUBSURF')
+ bpy.context.object.modifiers["Subdivision"].levels = 10
+ bpy.ops.ed.undo_push()
+ bpy.ops.transform.translate(value=(1.0, 1.0, 1.0))
+ bpy.ops.ed.undo_push()
+ bpy.context.evaluated_depsgraph_get()
+
+ start_time = time.time()
+ bpy.ops.ed.undo()
+ bpy.context.evaluated_depsgraph_get()
+ elapsed_time = time.time() - start_time
+
+ result = {'time': elapsed_time}
+ return result
+
+class UndoTest(api.Test):
+ def __init__(self):
+ pass
+
+ def name(self):
+ return 'undo_translation'
+
+ def run(self, env):
+ return env.run_in_blender(_run, {})
+
+def generate(env):
+ return [UndoTest()]