Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'tests/performance/api')
-rw-r--r--tests/performance/api/__init__.py7
-rw-r--r--tests/performance/api/config.py259
-rw-r--r--tests/performance/api/device.py71
-rw-r--r--tests/performance/api/environment.py244
-rw-r--r--tests/performance/api/graph.py114
-rw-r--r--tests/performance/api/graph.template.html86
-rw-r--r--tests/performance/api/test.py73
7 files changed, 854 insertions, 0 deletions
diff --git a/tests/performance/api/__init__.py b/tests/performance/api/__init__.py
new file mode 100644
index 00000000000..2dc9283c44a
--- /dev/null
+++ b/tests/performance/api/__init__.py
@@ -0,0 +1,7 @@
+# Apache License, Version 2.0
+
+from .environment import TestEnvironment
+from .device import TestDevice, TestMachine
+from .config import TestEntry, TestQueue, TestConfig
+from .test import Test, TestCollection
+from .graph import TestGraph
diff --git a/tests/performance/api/config.py b/tests/performance/api/config.py
new file mode 100644
index 00000000000..68f4df8d487
--- /dev/null
+++ b/tests/performance/api/config.py
@@ -0,0 +1,259 @@
+# Apache License, Version 2.0
+
+import fnmatch
+import json
+import pathlib
+import sys
+
+from dataclasses import dataclass, field
+from typing import Dict, List
+
+from .test import TestCollection
+
+
+def get_build_hash(args: None) -> str:
+ import bpy
+ import sys
+ build_hash = bpy.app.build_hash.decode('utf-8')
+ return '' if build_hash == 'Unknown' else build_hash
+
+
+@dataclass
+class TestEntry:
+ """Test to run, a combination of revision, test and device."""
+ test: str = ''
+ category: str = ''
+ revision: str = ''
+ git_hash: str = ''
+ executable: str = ''
+ date: int = 0
+ device_type: str = 'CPU'
+ device_id: str = 'CPU'
+ device_name: str = 'Unknown CPU'
+ status: str = 'queued'
+ output: Dict = field(default_factory=dict)
+ benchmark_type: str = 'comparison'
+
+ def to_json(self) -> Dict:
+ json_dict = {}
+ for field in self.__dataclass_fields__:
+ json_dict[field] = getattr(self, field)
+ return json_dict
+
+ def from_json(self, json_dict):
+ for field in self.__dataclass_fields__:
+ setattr(self, field, json_dict[field])
+
+
+class TestQueue:
+ """Queue of tests to be run or inspected. Matches JSON file on disk."""
+
+ def __init__(self, filepath: pathlib.Path):
+ self.filepath = filepath
+ self.has_multiple_revisions_to_build = False
+ self.has_multiple_categories = False
+ self.entries = []
+
+ if self.filepath.is_file():
+ with open(self.filepath, 'r') as f:
+ json_entries = json.load(f)
+
+ for json_entry in json_entries:
+ entry = TestEntry()
+ entry.from_json(json_entry)
+ self.entries.append(entry)
+
+ def rows(self, use_revision_columns: bool) -> List:
+ # Generate rows of entries for printing and running.
+ entries = sorted(self.entries, key=lambda entry:
+ (entry.revision,
+ entry.device_id,
+ entry.category,
+ entry.test))
+
+ if not use_revision_columns:
+ # One entry per row.
+ return [[entry] for entry in entries]
+ else:
+ # Multiple revisions per row.
+ rows = {}
+
+ for entry in entries:
+ key = (entry.device_id, entry.category, entry.test)
+ if key in rows:
+ rows[key].append(entry)
+ else:
+ rows[key] = [entry]
+
+ return [value for _, value in sorted(rows.items())]
+
+ def find(self, revision: str, test: str, category: str, device_id: str) -> Dict:
+ for entry in self.entries:
+ if entry.revision == revision and \
+ entry.test == test and \
+ entry.category == category and \
+ entry.device_id == device_id:
+ return entry
+
+ return None
+
+ def write(self) -> None:
+ json_entries = [entry.to_json() for entry in self.entries]
+ with open(self.filepath, 'w') as f:
+ json.dump(json_entries, f, indent=2)
+
+
+class TestConfig:
+ """Test configuration, containing a subset of revisions, tests and devices."""
+
+ def __init__(self, env, name: str):
+ # Init configuration from config.py file.
+ self.name = name
+ self.base_dir = env.base_dir / name
+ self.logs_dir = self.base_dir / 'logs'
+
+ config = self._read_config_module()
+ self.tests = TestCollection(env,
+ getattr(config, 'tests', ['*']),
+ getattr(config, 'categories', ['*']))
+ self.revisions = getattr(config, 'revisions', {})
+ self.builds = getattr(config, 'builds', {})
+ self.queue = TestQueue(self.base_dir / 'results.json')
+ self.benchmark_type = getattr(config, 'benchmark_type', 'comparison')
+
+ self.devices = []
+ self._update_devices(env, getattr(config, 'devices', ['CPU']))
+
+ self._update_queue(env)
+
+ def revision_names(self) -> List:
+ return sorted(list(self.revisions.keys()) + list(self.builds.keys()))
+
+ def device_name(self, device_id: str) -> str:
+ for device in self.devices:
+ if device.id == device_id:
+ return device.name
+
+ return "Unknown"
+
+ @staticmethod
+ def write_default_config(env, config_dir: pathlib.Path) -> None:
+ config_dir.mkdir(parents=True, exist_ok=True)
+
+ default_config = """devices = ['CPU']\n"""
+ default_config += """tests = ['*']\n"""
+ default_config += """categories = ['*']\n"""
+ default_config += """builds = {\n"""
+ default_config += """ 'master': '/home/user/blender-git/build/bin/blender',"""
+ default_config += """ '2.93': '/home/user/blender-2.93/blender',"""
+ default_config += """}\n"""
+ default_config += """revisions = {\n"""
+ default_config += """}\n"""
+
+ config_file = config_dir / 'config.py'
+ with open(config_file, 'w') as f:
+ f.write(default_config)
+
+ def _read_config_module(self) -> None:
+ # Import config.py as a module.
+ import importlib.util
+ spec = importlib.util.spec_from_file_location("testconfig", self.base_dir / 'config.py')
+ mod = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(mod)
+ return mod
+
+ def _update_devices(self, env, device_filters: List) -> None:
+ # Find devices matching the filters.
+ need_gpus = device_filters != ['CPU']
+ machine = env.get_machine(need_gpus)
+
+ self.devices = []
+ for device in machine.devices:
+ for device_filter in device_filters:
+ if fnmatch.fnmatch(device.id, device_filter):
+ self.devices.append(device)
+ break
+
+ def _update_queue(self, env) -> None:
+ # Update queue to match configuration, adding and removing entries
+ # so that there is one entry for each revision, device and test
+ # combination.
+ entries = []
+
+ # Get entries for specified commits, tags and branches.
+ for revision_name, revision_commit in self.revisions.items():
+ git_hash = env.resolve_git_hash(revision_commit)
+ date = env.git_hash_date(git_hash)
+ entries += self._get_entries(revision_name, git_hash, '', date)
+
+ # Optimization to avoid rebuilds.
+ revisions_to_build = set()
+ for entry in entries:
+ if entry.status in ('queued', 'outdated'):
+ revisions_to_build.add(entry.git_hash)
+ self.queue.has_multiple_revisions_to_build = len(revisions_to_build) > 1
+
+ # Get entries for revisions based on existing builds.
+ for revision_name, executable in self.builds.items():
+ executable_path = pathlib.Path(executable)
+ if not executable_path.exists():
+ sys.stderr.write(f'Error: build {executable} not found\n')
+ sys.exit(1)
+
+ env.set_blender_executable(executable_path)
+ git_hash, _ = env.run_in_blender(get_build_hash, {})
+ env.unset_blender_executable()
+
+ mtime = executable_path.stat().st_mtime
+ entries += self._get_entries(revision_name, git_hash, executable, mtime)
+
+ # Detect number of categories for more compact printing.
+ categories = set()
+ for entry in entries:
+ categories.add(entry.category)
+ self.queue.has_multiple_categories = len(categories) > 1
+
+ # Replace actual entries.
+ self.queue.entries = entries
+
+ def _get_entries(self,
+ revision_name: str,
+ git_hash: str,
+ executable: pathlib.Path,
+ date: int) -> None:
+ entries = []
+ for test in self.tests.tests:
+ test_name = test.name()
+ test_category = test.category()
+
+ for device in self.devices:
+ entry = self.queue.find(revision_name, test_name, test_category, device.id)
+ if entry:
+ # Test if revision hash or executable changed.
+ if entry.git_hash != git_hash or \
+ entry.executable != executable or \
+ entry.benchmark_type != self.benchmark_type or \
+ entry.date != date:
+ # Update existing entry.
+ entry.git_hash = git_hash
+ entry.executable = executable
+ entry.benchmark_type = self.benchmark_type
+ entry.date = date
+ if entry.status in ('done', 'failed'):
+ entry.status = 'outdated'
+ else:
+ # Add new entry if it did not exist yet.
+ entry = TestEntry(
+ revision=revision_name,
+ git_hash=git_hash,
+ executable=executable,
+ date=date,
+ test=test_name,
+ category=test_category,
+ device_type=device.type,
+ device_id=device.id,
+ device_name=device.name,
+ benchmark_type=self.benchmark_type)
+ entries.append(entry)
+
+ return entries
diff --git a/tests/performance/api/device.py b/tests/performance/api/device.py
new file mode 100644
index 00000000000..b61ae42be36
--- /dev/null
+++ b/tests/performance/api/device.py
@@ -0,0 +1,71 @@
+# Apache License, Version 2.0
+
+import platform
+import subprocess
+from typing import List
+
+
+def get_cpu_name() -> str:
+ # Get full CPU name.
+ if platform.system() == "Windows":
+ return platform.processor()
+ elif platform.system() == "Darwin":
+ cmd = ['/usr/sbin/sysctl', "-n", "machdep.cpu.brand_string"]
+ return subprocess.check_output(cmd).strip().decode('utf-8')
+ else:
+ with open('/proc/cpuinfo') as f:
+ for line in f:
+ if line.startswith('model name'):
+ return line.split(':')[1].strip()
+
+ return "Unknown CPU"
+
+
+def get_gpu_device(args: None) -> List:
+ # Get the list of available Cycles GPU devices.
+ import bpy
+ import sys
+
+ prefs = bpy.context.preferences
+ cprefs = prefs.addons['cycles'].preferences
+
+ result = []
+
+ for device_type, _, _, _ in cprefs.get_device_types(bpy.context):
+ cprefs.compute_device_type = device_type
+ devices = cprefs.get_devices_for_type(device_type)
+ index = 0
+ for device in devices:
+ if device.type == device_type:
+ result.append({'type': device.type, 'name': device.name, 'index': index})
+ index += 1
+ break
+
+ return result
+
+
+class TestDevice:
+ def __init__(self, device_type: str, device_id: str, name: str, operating_system: str):
+ self.type = device_type
+ self.id = device_id
+ self.name = name
+ self.operating_system = operating_system
+
+
+class TestMachine:
+ def __init__(self, env, need_gpus: bool):
+ operating_system = platform.system()
+
+ self.devices = [TestDevice('CPU', 'CPU', get_cpu_name(), operating_system)]
+ self.has_gpus = need_gpus
+
+ if need_gpus and env.blender_executable:
+ gpu_devices, _ = env.run_in_blender(get_gpu_device, {})
+ for gpu_device in gpu_devices:
+ device_type = gpu_device['type']
+ device_name = gpu_device['name']
+ device_id = gpu_device['type'] + "_" + str(gpu_device['index'])
+ self.devices.append(TestDevice(device_type, device_id, device_name, operating_system))
+
+ def cpu_device(self) -> TestDevice:
+ return self.devices[0]
diff --git a/tests/performance/api/environment.py b/tests/performance/api/environment.py
new file mode 100644
index 00000000000..c9ddd493394
--- /dev/null
+++ b/tests/performance/api/environment.py
@@ -0,0 +1,244 @@
+# Apache License, Version 2.0
+
+import base64
+import glob
+import inspect
+import multiprocessing
+import os
+import pathlib
+import platform
+import pickle
+import subprocess
+import sys
+from typing import Callable, Dict, List
+
+from .config import TestConfig
+from .device import TestMachine
+
+
+class TestEnvironment:
+ def __init__(self, blender_git_dir: pathlib.Path, base_dir: pathlib.Path):
+ self.blender_git_dir = blender_git_dir
+ self.base_dir = base_dir
+ self.blender_dir = base_dir / 'blender'
+ self.build_dir = base_dir / 'build'
+ self.lib_dir = base_dir / 'lib'
+ self.benchmarks_dir = self.blender_git_dir.parent / 'lib' / 'benchmarks'
+ self.git_executable = 'git'
+ self.cmake_executable = 'cmake'
+ self.cmake_options = ['-DWITH_INTERNATIONAL=OFF', '-DWITH_BUILDINFO=OFF']
+ self.unset_blender_executable()
+ self.log_file = None
+ self.machine = None
+
+ def get_machine(self, need_gpus: bool=True) -> None:
+ if not self.machine or (need_gpus and not self.machine.has_gpus):
+ self.machine = TestMachine(self, need_gpus)
+
+ return self.machine
+
+ def init(self, build) -> None:
+ if not self.benchmarks_dir.exists():
+ sys.stderr.write(f'Error: benchmark files directory not found at {self.benchmarks_dir}')
+ sys.exit(1)
+
+ # Create benchmarks folder contents.
+ print(f'Init {self.base_dir}')
+ self.base_dir.mkdir(parents=True, exist_ok=True)
+
+ if len(self.get_configs(names_only=True)) == 0:
+ config_dir = self.base_dir / 'default'
+ print(f'Creating default configuration in {config_dir}')
+ TestConfig.write_default_config(self, config_dir)
+
+ if build:
+ if not self.lib_dir.exists():
+ print(f'Creating symlink at {self.lib_dir}')
+ self.lib_dir.symlink_to(self.blender_git_dir.parent / 'lib')
+ else:
+ print(f'Exists {self.lib_dir}')
+
+ if not self.blender_dir.exists():
+ print(f'Init git worktree in {self.blender_dir}')
+ self.call([self.git_executable, 'worktree', 'add', '--detach', self.blender_dir, 'HEAD'], self.blender_git_dir)
+ else:
+ print(f'Exists {self.blender_dir}')
+
+ if not self.build_dir.exists():
+ print(f'Init build in {self.build_dir}')
+ self.build_dir.mkdir()
+ # No translation to avoid dealing with submodules
+ self.call([self.cmake_executable, self.blender_dir, '.'] + self.cmake_options, self.build_dir)
+ else:
+ print(f'Exists {self.build_dir}')
+
+ print("Building")
+ self.build()
+
+ print('Done')
+
+ def checkout(self) -> None:
+ # Checkout Blender revision
+ if not self.blender_dir.exists():
+ sys.stderr.write('\n\nError: no build set up, run `./benchmark init --build` first\n')
+ sys.exit(1)
+
+ self.call([self.git_executable, 'clean', '-f', '-d'], self.blender_dir)
+ self.call([self.git_executable, 'reset', '--hard', 'HEAD'], self.blender_dir)
+ self.call([self.git_executable, 'checkout', '--detach', git_hash], self.blender_dir)
+
+ self.build()
+
+ def build(self) -> None:
+ # Build Blender revision
+ if not self.build_dir.exists():
+ sys.stderr.write('\n\nError: no build set up, run `./benchmark init --build` first\n')
+ sys.exit(1)
+
+ jobs = str(multiprocessing.cpu_count())
+ self.call([self.cmake_executable, '.'] + self.cmake_options, self.build_dir)
+ self.call([self.cmake_executable, '--build', '.', '-j', jobs, '--target', 'install'], self.build_dir)
+
+ def set_blender_executable(self, executable_path: pathlib.Path) -> None:
+ # Run all Blender commands with this executable.
+ self.blender_executable = executable_path
+
+ def unset_blender_executable(self) -> None:
+ if platform.system() == "Windows":
+ self.blender_executable = self.build_dir / 'bin' / 'blender.exe'
+ elif platform.system() == "Darwin":
+ self.blender_executable = self.build_dir / 'bin' / 'Blender.app' / 'Contents' / 'MacOS' / 'Blender'
+ else:
+ self.blender_executable = self.build_dir / 'bin' / 'blender'
+
+ if not self.blender_executable.exists():
+ self.blender_executable = 'blender'
+
+ def set_log_file(self, filepath: pathlib.Path, clear=True) -> None:
+ # Log all commands and output to this file.
+ self.log_file = filepath
+
+ if clear:
+ self.log_file.unlink(missing_ok=True)
+
+ def unset_log_file(self) -> None:
+ self.log_file = None
+
+ def call(self, args: List[str], cwd: pathlib.Path, silent=False) -> List[str]:
+ # Execute command with arguments in specified directory,
+ # and return combined stdout and stderr output.
+
+ # Open log file for writing
+ f = None
+ if self.log_file:
+ if not self.log_file.exists():
+ self.log_file.parent.mkdir(parents=True, exist_ok=True)
+ f = open(self.log_file, 'a')
+ f.write('\n' + ' '.join([str(arg) for arg in args]) + '\n\n')
+
+ proc = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+ # Read line by line
+ lines = []
+ try:
+ while proc.poll() is None:
+ line = proc.stdout.readline()
+ if line:
+ line_str = line.decode('utf-8', 'ignore')
+ lines.append(line_str)
+ if f:
+ f.write(line_str)
+ except KeyboardInterrupt:
+ # Avoid processes that keep running when interrupting.
+ proc.terminate()
+
+ if f:
+ f.close()
+
+ # Print command output on error
+ if proc.returncode != 0 and not silent:
+ for line in lines:
+ print(line.rstrip())
+ raise Exception("Error executing command")
+
+ return lines
+
+ def call_blender(self, args: List[str], foreground=False) -> List[str]:
+ # Execute Blender command with arguments.
+ common_args = ['--factory-startup', '--enable-autoexec', '--python-exit-code', '1']
+ if foreground:
+ common_args += ['--no-window-focus', '--window-geometry', '0', '0', '1024', '768']
+ else:
+ common_args += ['--background']
+
+ return self.call([self.blender_executable] + common_args + args, cwd=self.base_dir)
+
+ def run_in_blender(self,
+ function: Callable[[Dict], Dict],
+ args: Dict,
+ blender_args: List=[],
+ foreground=False) -> Dict:
+ # Run function in a Blender instance. Arguments and return values are
+ # passed as a Python object that must be serializable with pickle.
+
+ # Get information to call this function from Blender.
+ package_path = pathlib.Path(__file__).parent.parent
+ functionname = function.__name__
+ modulename = inspect.getmodule(function).__name__
+
+ # Serialize arguments in base64, to avoid having to escape it.
+ args = base64.b64encode(pickle.dumps(args))
+ output_prefix = 'TEST_OUTPUT: '
+
+ expression = (f'import sys, pickle, base64\n'
+ f'sys.path.append("{package_path}")\n'
+ f'import {modulename}\n'
+ f'args = pickle.loads(base64.b64decode({args}))\n'
+ f'result = {modulename}.{functionname}(args)\n'
+ f'result = base64.b64encode(pickle.dumps(result))\n'
+ f'print("{output_prefix}" + result.decode())\n')
+
+ expr_args = blender_args + ['--python-expr', expression]
+ lines = self.call_blender(expr_args, foreground=foreground)
+
+ # Parse output.
+ for line in lines:
+ if line.startswith(output_prefix):
+ output = line[len(output_prefix):].strip()
+ result = pickle.loads(base64.b64decode(output))
+ return result, lines
+
+ return {}, lines
+
+ def find_blend_files(self, dirpath: pathlib.Path) -> List:
+ # Find .blend files in subdirectories of the given directory in the
+ # lib/benchmarks directory.
+ dirpath = self.benchmarks_dir / dirpath
+ filepaths = []
+ for filename in glob.iglob(str(dirpath / '*.blend'), recursive=True):
+ filepaths.append(pathlib.Path(filename))
+ return filepaths
+
+ def get_configs(self, name: str=None, names_only: bool=False) -> List:
+ # Get list of configurations in the benchmarks directory.
+ configs = []
+
+ if self.base_dir.exists():
+ for dirname in os.listdir(self.base_dir):
+ if not name or dirname == name:
+ dirpath = self.base_dir / dirname / 'config.py'
+ if dirpath.exists():
+ if names_only:
+ configs.append(dirname)
+ else:
+ configs.append(TestConfig(self, dirname))
+
+ return configs
+
+ def resolve_git_hash(self, revision):
+ # Get git hash for a tag or branch.
+ return self.call([self.git_executable, 'rev-parse', revision], self.blender_git_dir)[0].strip()
+
+ def git_hash_date(self, git_hash):
+ # Get commit data for a git hash.
+ return int(self.call([self.git_executable, 'log', '-n1', git_hash, '--format=%at'], self.blender_git_dir)[0].strip())
diff --git a/tests/performance/api/graph.py b/tests/performance/api/graph.py
new file mode 100644
index 00000000000..b3c8329ff27
--- /dev/null
+++ b/tests/performance/api/graph.py
@@ -0,0 +1,114 @@
+# Apache License, Version 2.0
+
+from . import TestQueue
+
+import json
+import pathlib
+from typing import Dict, List
+
+
+class TestGraph:
+ def __init__(self, json_filepaths: List[pathlib.Path]):
+ # Initialize graph from JSON file. Note that this is implemented without
+ # accessing any benchmark environment or configuration. This ways benchmarks
+ # run on various machines can be aggregated and the graph generated on another
+ # machine.
+
+ # Gather entries for each device.
+ devices = {}
+
+ for json_filepath in json_filepaths:
+ queue = TestQueue(json_filepath)
+
+ for entry in queue.entries:
+ if entry.status in ('done', 'outdated'):
+ device_name = entry.device_name
+ if device_name in devices.keys():
+ devices[device_name].append(entry)
+ else:
+ devices[device_name] = [entry]
+
+ data = []
+ for device_name, device_entries in devices.items():
+ # Gather used categories.
+ categories = {}
+ for entry in device_entries:
+ category = entry.category
+ if category in categories.keys():
+ categories[category].append(entry)
+ else:
+ categories[category] = [entry]
+
+ # Generate one graph for every device x category x result key combination.
+ for category, category_entries in categories.items():
+ entries = sorted(category_entries, key=lambda entry: (entry.revision, entry.test))
+
+ outputs = set()
+ for entry in entries:
+ for output in entry.output.keys():
+ outputs.add(output)
+
+ chart_type = 'line' if entries[0].benchmark_type == 'time_series' else 'comparison'
+
+ for output in outputs:
+ chart_name = f"{category} ({output})"
+ data.append(self.chart(device_name, chart_name, entries, chart_type, output))
+
+ self.json = json.dumps(data, indent=2)
+
+ def chart(self, device_name: str, chart_name: str, entries: List, chart_type: str, output: str) -> Dict:
+ # Gather used tests.
+ tests = {}
+ for entry in entries:
+ test = entry.test
+ if test not in tests.keys():
+ tests[test] = len(tests)
+
+ # Gather used revisions.
+ revisions = {}
+ revision_dates = {}
+ for entry in entries:
+ revision = entry.revision
+ if revision not in revisions.keys():
+ revisions[revision] = len(revisions)
+ revision_dates[revision] = int(entry.date)
+
+ # Google Charts JSON data layout is like a spreadsheat table, with
+ # colums, rows and cells. We create one column for revision labels,
+ # and one column for each test.
+ cols = []
+ if chart_type == 'line':
+ cols.append({'id': '', 'label': 'Date', 'type': 'date'})
+ else:
+ cols.append({'id': '', 'label': ' ', 'type': 'string'})
+ for test, test_index in tests.items():
+ cols.append({'id': '', 'label': test, 'type': 'number'})
+
+ rows = []
+ for revision, revision_index in revisions.items():
+ if chart_type == 'line':
+ date = revision_dates[revision]
+ row = [{'f': None, 'v': 'Date({0})'.format(date * 1000)}]
+ else:
+ row = [{'f': None, 'v': revision}]
+ row += [{}] * len(tests)
+ rows.append({'c': row})
+
+ for entry in entries:
+ test_index = tests[entry.test]
+ revision_index = revisions[entry.revision]
+ time = entry.output[output] if output in entry.output else -1.0
+ rows[revision_index]['c'][test_index + 1] = {'f': None, 'v': time}
+
+ data = {'cols': cols, 'rows': rows}
+ return {'device': device_name, 'name': chart_name, 'data': data, 'chart_type': chart_type}
+
+ def write(self, filepath: pathlib.Path) -> None:
+ # Write HTML page with JSON graph data embedded.
+ template_dir = pathlib.Path(__file__).parent
+ with open(template_dir / 'graph.template.html', 'r') as f:
+ template = f.read()
+
+ contents = template.replace('%JSON_DATA%', self.json)
+ with open(filepath, "w") as f:
+ f.write(contents)
diff --git a/tests/performance/api/graph.template.html b/tests/performance/api/graph.template.html
new file mode 100644
index 00000000000..147f1628c23
--- /dev/null
+++ b/tests/performance/api/graph.template.html
@@ -0,0 +1,86 @@
+<html>
+<head>
+ <title>Benchmarks</title>
+ <meta charset="UTF-8">
+ <style type="text/css">
+ body { margin: 40px auto;
+ font-family: Arial;
+ font-size: 14px;
+ color: #333;
+ max-width: 900px; }
+ a { text-decoration: none; color: #06b; }
+ </style>
+ <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
+ <script>
+ google.charts.load('current', {'packages':['line', 'bar']});
+ google.charts.setOnLoadCallback(draw_charts);
+
+ function transposeDataTable(dt)
+ {
+ /* Swap rows and columns. Bar and line charts expect different layouts,
+ * with this function we can use the same data source for both. */
+ var ndt = new google.visualization.DataTable;
+ ndt.addColumn('string',dt.getColumnLabel(0));
+ for(var x=1; x<dt.getNumberOfColumns(); x++) {
+ ndt.addRow([dt.getColumnLabel(x)]);
+ }
+ for(var x=0; x<dt.getNumberOfRows(); x++) {
+ ndt.addColumn('number', dt.getValue(x,0));
+ for(var y=1; y<dt.getNumberOfColumns(); y++) {
+ ndt.setValue(y-1, x+1, dt.getValue(x,y));
+ }
+ }
+ return ndt;
+ }
+
+ function draw_charts()
+ {
+ /* Load JSON data. */
+ var json_data = %JSON_DATA%;
+
+ /* Clear contents. */
+ charts_elem = document.getElementById("charts");
+ while(charts_elem.firstChild)
+ {
+ charts_elem.removeChild(charts_elem.firstChild);
+ }
+
+ /* Draw charts for each device. */
+ for (var i = 0; i < json_data.length; i++)
+ {
+ device = json_data[i];
+
+ /* Chart drawing options. */
+ var options = {
+ chart: {title: device["name"], subtitle: device['device']},
+ pointsVisible: true,
+ pointSize: 2.5,
+ height: 500,
+ };
+
+ /* Create chart div. */
+ elem = document.createElement('div');
+ elem.id = device["id"];
+ charts_elem.appendChild(elem)
+
+ /* Create chart. */
+ var data = new google.visualization.DataTable(device["data"]);
+ if (device['chart_type'] == 'line') {
+ var chart = new google.charts.Line(elem);
+ chart.draw(data, options);
+ }
+ else {
+ var chart = new google.charts.Bar(elem);
+ chart.draw(transposeDataTable(data), options);
+ }
+ }
+ }
+ </script>
+</head>
+<body>
+ <h1>Benchmarks</h1>
+ <div id="charts">
+ ...
+ </div>
+</body>
+</html>
diff --git a/tests/performance/api/test.py b/tests/performance/api/test.py
new file mode 100644
index 00000000000..7e8193d2c21
--- /dev/null
+++ b/tests/performance/api/test.py
@@ -0,0 +1,73 @@
+# Apache License, Version 2.0
+
+import abc
+import fnmatch
+from typing import Dict, List
+
+
+class Test:
+ @abc.abstractmethod
+ def name(self) -> str:
+ """
+ Name of the test.
+ """
+
+ @abc.abstractmethod
+ def category(self) -> str:
+ """
+ Category of the test.
+ """
+
+ def use_device(self) -> bool:
+ """
+ Test uses a specific CPU or GPU device.
+ """
+ return False
+
+ @abc.abstractmethod
+ def run(self, env, device_id: str) -> Dict:
+ """
+ Execute the test and report results.
+ """
+
+
+class TestCollection:
+ def __init__(self, env, names_filter: List=['*'], categories_filter: List=['*']):
+ import importlib
+ import pkgutil
+ import tests
+
+ self.tests = []
+
+ # Find and import all Python files in the tests folder, and generate
+ # the list of tests for each.
+ for _, modname, _ in pkgutil.iter_modules(tests.__path__, 'tests.'):
+ module = importlib.import_module(modname)
+ tests = module.generate(env)
+
+ for test in tests:
+ test_category = test.category()
+ found = False
+ for category_filter in categories_filter:
+ if fnmatch.fnmatch(test_category, category_filter):
+ found = True
+ if not found:
+ continue
+
+ test_name = test.name()
+ found = False
+ for name_filter in names_filter:
+ if fnmatch.fnmatch(test_name, name_filter):
+ found = True
+ if not found:
+ continue
+
+ self.tests.append(test)
+
+ def find(self, test_name: str, test_category: str):
+ # Find a test based on name and category.
+ for test in self.tests:
+ if test.name() == test_name and test.category() == test_category:
+ return test
+
+ return None