Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/owncloud/client.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorSaw-jan Gurung <saw.jan.grg3e@gmail.com>2022-01-18 09:30:00 +0300
committerGitHub <noreply@github.com>2022-01-18 09:30:00 +0300
commit5d97e1cb9cc92e664b7c8adf76039f261b5d7ac9 (patch)
tree2d6c5e7f052ff20dc0a5b5d4022468a700fce28b /test
parentf88c6adab2662c78e1575043934580402b360cbe (diff)
fix gui test logParser (#9372)
Diffstat (limited to 'test')
-rw-r--r--test/gui/TestLogParser.py28
-rw-r--r--test/gui/tst_vfs/test.feature1
2 files changed, 18 insertions, 11 deletions
diff --git a/test/gui/TestLogParser.py b/test/gui/TestLogParser.py
index 81372f542..2f98abc25 100644
--- a/test/gui/TestLogParser.py
+++ b/test/gui/TestLogParser.py
@@ -25,20 +25,23 @@ def traverse_loop(data):
# So loop through all the errors in the test step
if "tests" in test_step:
- for error in test_step['tests']:
+ for test_log in test_step['tests']:
# Sometimes, the step with assertions operations also contains this "tests" object.
# And we do not consider it to be an error, if there is a result key with value "PASS"
- if 'result' in error and error['result'] == 'PASS':
+ if (
+ 'result' in test_log
+ and test_log['result'] == 'PASS'
+ ):
continue
# Again, we will have to loop through the 'tests' to get failing tests from Scenario Outlines.
- if "tests" in error and len(error['tests']) > 0:
- for outlineStep in error['tests']:
+ if "tests" in test_log and len(test_log['tests']) > 0:
+ for outlineStep in test_log['tests']:
# Append the information of failing tests into the list of failing tests
- if (
- 'result' in outlineStep
- and outlineStep['result'] == 'ERROR'
+ if 'result' in outlineStep and (
+ outlineStep['result'] == 'ERROR'
+ or outlineStep['result'] == 'FAIL'
):
failing_test = {
"Feature File": str(
@@ -47,7 +50,7 @@ def traverse_loop(data):
"Feature": str(feature['name']),
"Scenario": str(scenario['name']),
"Example": str(test_step['name']),
- "Test Step": str(error['name']),
+ "Test Step": str(test_log['name']),
"Error Details": str(
outlineStep['detail']
)
@@ -59,14 +62,17 @@ def traverse_loop(data):
# Append the information of failing tests into the list of failing tests
# If the error detail is missing(occurs mainly in runtime error) then we display "Error details not found" message.
- if 'result' in error and error['result'] == 'ERROR':
+ if 'result' in test_log and (
+ test_log['result'] == 'ERROR'
+ or test_log['result'] == 'FAIL'
+ ):
failing_test = {
"Feature File": str(feature_file['name']),
"Feature": str(feature['name']),
"Scenario": str(scenario['name']),
"Test Step": str(test_step['name']),
- "Error Details": str(error['detail'])
- if ('detail' in error)
+ "Error Details": str(test_log['detail'])
+ if ('detail' in test_log)
else "Error details not found",
}
failing_tests.append(failing_test)
diff --git a/test/gui/tst_vfs/test.feature b/test/gui/tst_vfs/test.feature
index e06660ef1..46ae20bb0 100644
--- a/test/gui/tst_vfs/test.feature
+++ b/test/gui/tst_vfs/test.feature
@@ -18,6 +18,7 @@ Feature: Enable/disable virtual file support
And user "Alice" has set up a client with default settings
Then VFS enabled baseline image should not match the default screenshot
+
Scenario: Disable VFS
Given user "Alice" has been created on the server with default attributes and without skeleton files
And user "Alice" has set up a client with default settings