summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCláudio Maia <clrrm@isep.ipp.pt>2020-12-08 15:40:55 +0000
committerSebastian Huber <sebastian.huber@embedded-brains.de>2020-12-16 07:43:29 +0100
commit2251c5a878c24e9120d9dd53751f69790e3d7387 (patch)
tree7dc01a9f0dd9618a3f8f1d7021603a87bf4937d5
parent4bc285e9880bea93af786945cdf175229480d455 (diff)
tester: Add yaml format to the supported report formatsHEADmaster
-rw-r--r--tester/rt/test.py116
1 files changed, 114 insertions, 2 deletions
diff --git a/tester/rt/test.py b/tester/rt/test.py
index 9b157e9..66f1756 100644
--- a/tester/rt/test.py
+++ b/tester/rt/test.py
@@ -339,9 +339,115 @@ def generate_junit_report(args, reports, start_time, end_time,
with open(junit_file, 'w') as f:
TestSuite.to_file(f, [ts], prettyprint = True)
+
+def generate_yaml_report(args, reports, start_time, end_time,
+ total, yaml_file):
+ """ Generates a YAML file containing information about the test run,
+ including all test outputs """
+
+ import yaml
+
+ def format_output(output_list):
+ return "\n".join(output_list).replace("] ", '').replace('=> ', '')
+
+ yaml_log = {}
+ yaml_log['command-line'] = args
+ yaml_log['host'] = host.label(mode='all')
+ yaml_log['python'] = sys.version.replace('\n', '')
+ yaml_log['summary'] = {}
+ yaml_log['summary']['passed-count'] = reports.passed
+ yaml_log['summary']['failed-count'] = reports.failed
+ yaml_log['summary']['user-input-count'] = reports.user_input
+ yaml_log['summary']['expected-fail-count'] = reports.expected_fail
+ yaml_log['summary']['indeterminate-count'] = reports.indeterminate
+ yaml_log['summary']['benchmark-count'] = reports.benchmark
+ yaml_log['summary']['timeout-count'] = reports.timeouts
+ yaml_log['summary']['test-too-long_count'] = reports.test_too_long
+ yaml_log['summary']['invalid-count'] = reports.invalids
+ yaml_log['summary']['wrong-version-count'] = reports.wrong_version
+ yaml_log['summary']['wrong-build-count'] = reports.wrong_build
+ yaml_log['summary']['wrong-tools-count'] = reports.wrong_tools
+ yaml_log['summary']['total-count'] = reports.total
+ time_delta = end_time - start_time
+ yaml_log['summary']['average-test-time'] = str(time_delta / total)
+ yaml_log['summary']['testing-time'] = str(time_delta)
+
+ result_types = [
+ 'failed', 'user-input', 'expected-fail', 'indeterminate',
+ 'benchmark', 'timeout', 'test-too-long', 'invalid', 'wrong-version',
+ 'wrong-build', 'wrong-tools'
+ ]
+ for result_type in result_types:
+ yaml_log['summary'][result_type] = []
+
+ result_element = {}
+ yaml_log['outputs'] = []
+
+ # process output of each test
+ for exe_name in reports.results:
+ test_parts = exe_name.split("/")
+ test_name = test_parts[-1]
+ result_element['executable-name'] = test_name
+ result_element['executable-sha512'] = get_hash512(exe_name)
+ result_element['execution-start'] = reports.results[exe_name]['start'].isoformat()
+ result_element['execution-end'] = reports.results[exe_name]['end'].isoformat()
+ date_diff = reports.results[exe_name]['end'] - reports.results[exe_name]['start']
+ result_element['execution-duration'] = str(date_diff)
+ result_element['execution-result'] = reports.results[exe_name]['result']
+ result_element['bsp'] = reports.results[exe_name]['bsp']
+ result_element['bsp-arch'] = reports.results[exe_name]['bsp_arch']
+ result_output = reports.results[exe_name]['output']
+
+ dbg_output = []
+ test_output = []
+ idxs_output = [] # store indices of given substrings
+ for elem in result_output:
+ if '=> ' in elem:
+ idxs_output.append(result_output.index(elem))
+ if '*** END' in elem:
+ idxs_output.append(result_output.index(elem))
+
+ if len(idxs_output) == 3: # test executed and has result
+ dbg_output = result_output[idxs_output[0]:idxs_output[1]]
+ dbg_output.append("=== Executed Test ===")
+ dbg_output = dbg_output + result_output[idxs_output[2]+1:len(result_output)]
+ test_output = result_output[idxs_output[1]:idxs_output[2]+1]
+ else:
+ dbg_output = result_output
+
+ result_element['debugger-output'] = format_output(dbg_output)
+ result_element['console-output'] = format_output(test_output)
+ yaml_log['outputs'].append(result_element)
+
+ result_type = reports.results[exe_name]['result']
+ # map "fatal-error" on to "failed"
+ if result_type == "fatal-error":
+ result_type = "failed"
+
+ if result_type != 'passed':
+ yaml_log['summary'][result_type].append(test_name)
+
+ result_element = {}
+
+ with open(yaml_file, 'w') as outfile:
+ yaml.dump(yaml_log, outfile, default_flow_style=False, allow_unicode=True)
+
+
+def get_hash512(exe):
+ """ returns SHA512 hash string of a given binary file passed as argument """
+ import hashlib
+
+ hash = hashlib.sha512()
+ with open(exe, "rb") as f:
+ for byte_block in iter(lambda: f.read(4096), b""):
+ hash.update(byte_block)
+ return hash.hexdigest()
+
+
report_formatters = {
'json': generate_json_report,
- 'junit': generate_junit_report
+ 'junit': generate_junit_report,
+ 'yaml': generate_yaml_report
}
@@ -352,6 +458,12 @@ def check_report_formats(report_formats, report_location):
if report_format not in report_formatters:
raise error.general('invalid RTEMS report formatter: %s'
% report_format)
+ if report_format == 'yaml':
+ try:
+ import yaml
+ except ImportError:
+ raise error.general('Please install PyYAML module. HINT: You can '
+ 'use pip to install it!')
def run(args):
@@ -365,7 +477,7 @@ def run(args):
'--rtems-bsp': 'The RTEMS BSP to run the test on',
'--user-config': 'Path to your local user configuration INI file',
'--report-path': 'Report output base path (file extension will be added)',
- '--report-format': 'Formats in which to report test results in addition to txt: json',
+ '--report-format': 'Formats in which to report test results in addition to txt: json, yaml',
'--log-mode': 'Reporting modes, failures (default),all,none',
'--list-bsps': 'List the supported BSPs',
'--debug-trace': 'Debug trace based on specific flags (console,gdb,output,cov)',