Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorCampbell Barton <campbell@blender.org>2022-04-20 10:04:48 +0300
committerCampbell Barton <campbell@blender.org>2022-04-20 10:07:46 +0300
commit65a1fcdaf76f47bc51b4d2143470bbbec03b7f12 (patch)
treea55705ab811ac36fe2df50ac67ee05751ee53f6c /tests
parent67a4908bfc922439fa889521f1f1b24c09324b4a (diff)
Cleanup: run autopep8 on tests/performance/benchmark
This file was skipped by source/tools/utils/autopep8_clean.py since it doesn't have a .py extension, running the autopep8 tool recursively detects Python scripts without extensions.
Diffstat (limited to 'tests')
-rwxr-xr-xtests/performance/benchmark38
1 files changed, 28 insertions, 10 deletions
diff --git a/tests/performance/benchmark b/tests/performance/benchmark
index 80556674dcc..4ca46a111e1 100755
--- a/tests/performance/benchmark
+++ b/tests/performance/benchmark
@@ -11,24 +11,30 @@ import sys
import time
from typing import List
+
def find_blender_git_dir() -> pathlib.Path:
# Find .git directory of the repository we are in.
cwd = pathlib.Path.cwd()
for path in [cwd] + list(cwd.parents):
if (path / '.git').exists():
- return path
+ return path
return None
+
def get_tests_base_dir(blender_git_dir: pathlib.Path) -> pathlib.Path:
# Benchmarks dir is next to the Blender source folder.
return blender_git_dir.parent / 'benchmark'
+
def use_revision_columns(config: api.TestConfig) -> bool:
- return config.benchmark_type == "comparison" and \
- len(config.queue.entries) > 0 and \
- not config.queue.has_multiple_revisions_to_build
+ return (
+ config.benchmark_type == "comparison" and
+ len(config.queue.entries) > 0 and
+ not config.queue.has_multiple_revisions_to_build
+ )
+
def print_header(config: api.TestConfig) -> None:
# Print header with revision columns headers.
@@ -42,6 +48,7 @@ def print_header(config: api.TestConfig) -> None:
header += f"{revision_name: <20} "
print(header)
+
def print_row(config: api.TestConfig, entries: List, end='\n') -> None:
# Print one or more test entries on a row.
row = ""
@@ -79,10 +86,13 @@ def print_row(config: api.TestConfig, entries: List, end='\n') -> None:
def match_entry(entry: api.TestEntry, args: argparse.Namespace):
# Filter tests by name and category.
- return fnmatch.fnmatch(entry.test, args.test) or \
- fnmatch.fnmatch(entry.category, args.test) or \
- entry.test.find(args.test) != -1 or \
- entry.category.find(args.test) != -1
+ return (
+ fnmatch.fnmatch(entry.test, args.test) or
+ fnmatch.fnmatch(entry.category, args.test) or
+ entry.test.find(args.test) != -1 or
+ entry.category.find(args.test) != -1
+ )
+
def run_entry(env: api.TestEnvironment,
config: api.TestConfig,
@@ -159,6 +169,7 @@ def run_entry(env: api.TestEnvironment,
return True
+
def cmd_init(env: api.TestEnvironment, argv: List):
# Initialize benchmarks folder.
parser = argparse.ArgumentParser()
@@ -168,6 +179,7 @@ def cmd_init(env: api.TestEnvironment, argv: List):
env.init(args.build)
env.unset_log_file()
+
def cmd_list(env: api.TestEnvironment, argv: List) -> None:
# List devices, tests and configurations.
print('DEVICES')
@@ -188,6 +200,7 @@ def cmd_list(env: api.TestEnvironment, argv: List) -> None:
for config_name in configs:
print(config_name)
+
def cmd_status(env: api.TestEnvironment, argv: List):
# Print status of tests in configurations.
parser = argparse.ArgumentParser()
@@ -210,6 +223,7 @@ def cmd_status(env: api.TestEnvironment, argv: List):
if match_entry(row[0], args):
print_row(config, row)
+
def cmd_reset(env: api.TestEnvironment, argv: List):
# Reset tests to re-run them.
parser = argparse.ArgumentParser()
@@ -232,6 +246,7 @@ def cmd_reset(env: api.TestEnvironment, argv: List):
if args.test == '*':
shutil.rmtree(config.logs_dir)
+
def cmd_run(env: api.TestEnvironment, argv: List, update_only: bool):
# Run tests.
parser = argparse.ArgumentParser()
@@ -271,6 +286,7 @@ def cmd_run(env: api.TestEnvironment, argv: List, update_only: bool):
print("\nfile://" + str(html_filepath))
+
def cmd_graph(argv: List):
# Create graph from a given JSON results file.
parser = argparse.ArgumentParser()
@@ -291,6 +307,7 @@ def cmd_graph(argv: List):
graph = api.TestGraph(json_files)
graph.write(pathlib.Path(args.output))
+
def main():
usage = ('benchmark <command> [<args>]\n'
'\n'
@@ -317,8 +334,8 @@ def main():
argv = sys.argv[2:]
blender_git_dir = find_blender_git_dir()
if blender_git_dir == None:
- sys.stderr.write('Error: no blender git repository found from current working directory\n')
- sys.exit(1)
+ sys.stderr.write('Error: no blender git repository found from current working directory\n')
+ sys.exit(1)
if args.command == 'graph':
cmd_graph(argv)
@@ -349,5 +366,6 @@ def main():
else:
sys.stderr.write(f'Unknown command: {args.command}\n')
+
if __name__ == '__main__':
main()