Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/owncloud/client.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/test/gui
diff options
context:
space:
mode:
authorTalank <talank@gces.edu.np>2021-09-21 10:37:22 +0300
committerGitHub <noreply@github.com>2021-09-21 10:37:22 +0300
commit552ea187b4a7992e85203d06b395acac1cd45c4c (patch)
treea2600277f1fce605ec80581f4bd00285628cd3e4 /test/gui
parent4951b52ac6d62cf6b2601e908d397063d47c489d (diff)
[Tests-Only]Fixed gui test log parser (#9020)
Diffstat (limited to 'test/gui')
-rw-r--r--test/gui/TestLogParser.py74
1 files changed, 47 insertions, 27 deletions
diff --git a/test/gui/TestLogParser.py b/test/gui/TestLogParser.py
index 79d0c9f0e..b10dc19b3 100644
--- a/test/gui/TestLogParser.py
+++ b/test/gui/TestLogParser.py
@@ -17,35 +17,55 @@ def traverse_loop(data):
for scenario in feature['tests']:
# If the scenario is not skipped, then loop through all the steps in the scenario
- if "tests" not in scenario:
- break
- for test_step in scenario['tests']:
-
- # If the test step fails then it contains further "tests" object
- # So loop through all the errors in the test step
- if "tests" not in test_step:
- break
-
- for error in test_step['tests']:
- # Sometimes, the step with assertions operations also contains this "tests" object.
- # And we do not consider it to be an error, if there is a result key with value "PASS"
- if "result" in error and error["result"] == "PASS":
- break
-
- # Append the information of failing tests into the list of failing tests
- test = {
- "Feature File": str(feature_file['name']),
- "Feature": str(feature['name']),
- "Scenario": str(scenario['name']),
- "Test Step": str(test_step['name']),
- "Error Details": str(error['detail']),
- }
-
- failing_tests.append(test)
+ if "tests" in scenario:
+
+ for test_step in scenario['tests']:
+
+ # If the test step fails then it contains further "tests" object
+ # So loop through all the errors in the test step
+ if "tests" in test_step:
+
+ for error in test_step['tests']:
+
+ # Sometimes, the step with assertions operations also contains this "tests" object.
+ # And we do not consider it to be an error, if there is a result key with value "PASS"
+ if 'result' in error and error['result'] == 'PASS':
+ continue
+
+ # Append the information of failing tests into the list of failing tests
+ # If the error detail is missing(occurs mainly in runtime error) then we display the entire error object.
+ test = {
+ "Feature File": str(feature_file['name']),
+ "Feature": str(feature['name']),
+ "Scenario": str(scenario['name']),
+ "Test Step": str(test_step['name']),
+ "Error Details": str(error['detail'])
+ if ('detail' in error)
+ else "Error details not found",
+ }
+
+ failing_tests.append(test)
return failing_tests
+def filter_redundancy(raw_data):
+ unique_scenarios = []
+ filtered_data = []
+
+ for scenario in raw_data:
+ if scenario['Scenario'] not in unique_scenarios:
+ unique_scenarios.append(scenario['Scenario'])
+ filtered_data.append(scenario)
+ else:
+ if scenario['Error Details'] != "Error details not found":
+ for repeated_scenario in filtered_data:
+ if repeated_scenario['Scenario'] == scenario['Scenario']:
+ repeated_scenario['Error Details'] = scenario['Error Details']
+
+ return filtered_data
+
+
f = open(str(sys.argv[1]))
# returns JSON object as a dictionary
@@ -55,8 +75,8 @@ data = json.load(f)
failing_tests_raw = traverse_loop(data)
# Remove duplicate nodes, if exists
-# This step is neccessary because sometimes the data in failing_tests_raw is redundent.
-failing_tests = [dict(y) for y in set(tuple(x.items()) for x in failing_tests_raw)]
+# This step is necessary because sometimes the data in failing_tests_raw is redundant.
+failing_tests = filter_redundancy(failing_tests_raw)
print(json.dumps(failing_tests, indent=4, sort_keys=True))