tfiala created this revision.
tfiala added a reviewer: clayborg.
tfiala added a subscriber: lldb-commits.
The new script can be run against output of dotest.py/dosep.py, with the
primary benefit of providing a detailed breakdown of the number of test methods
that skip for a given reason.
Output looks something like this:
Test Counts
---- ------
success: 637
unexpected success: 14
failure: 1
expected failure: 41
skipped: 681
%%%
Skip Reasons
---- -------
requires on of darwin, macosx, ios: 520
debugserver tests: 68
skip on linux: 42
dsym tests: 20
benchmarks tests: 13
Skip this long running test: 8
requires Darwin: 2
The 'expect' program cannot be located, skip the test: 1
requires on of windows: 1
skipping because os:None compiler: None None arch: None: 1
-data-evaluate-expression doesn't work on globals: 1
MacOSX doesn't have a default thread name: 1
due to taking too long to complete.: 1
This test is only for LLDB.framework built 64-bit and !lldb.test_remote: 1
requires one of x86-64, i386, i686: 1
%%%
Use a flow like this:
```
cd {your_lldb_source}/test
python dosep.py -s --options "-q --executable /path/to/lldb -A {your_arch} -C
{your_compiler_path}" 2>&1 | tee /tmp/test_output.log
python reports/dotest_stats.py -t /tmp/test_output.log
```
http://reviews.llvm.org/D12416
Files:
reports/dotest_stats.py
Index: reports/dotest_stats.py
===================================================================
--- reports/dotest_stats.py
+++ reports/dotest_stats.py
@@ -0,0 +1,142 @@
+"""
+Report stats on the test output from dosep.py/dotest.py,
+breaking down reported reasons for skipped tests.
+
+Here is a flow to run this report:
+cd {your_lldb_source_dir}/test
+python dosep.py -s --options "-q --executable /path/to/lldb -A {your_arch} \
+ -C {your_compiler_path} 2>&1 | tee /tmp/test_output.log
+python {path_to_this_script} -t /tmp/test_output.log
+"""
+
+import argparse
+import os.path
+import re
+
+
+def parse_options():
+ parser = argparse.ArgumentParser(
+ description='Collect stats on lldb test run trace output dir')
+ parser.add_argument(
+ '--trace-file', '-t', action='store', required=True,
+ help='trace file to parse')
+ parser.add_argument(
+ '--verbose', '-v', action='store_true',
+ help='produce verbose output during operation')
+ return parser.parse_args()
+
+
+def validate_options(options):
+ if not os.path.isfile(options.trace_file):
+ print 'trace file "{}" does not exist'.format(options.trace_file)
+ return False
+ return True
+
+
+def process_skipped_test(options, line, match, skip_reasons):
+ if len(match.groups()) > 0:
+ key = match.group(1)
+ else:
+ print "*** unspecified skip reason on line:", line
+ exit(1)
+ key = 'unspecified'
+
+ if key in skip_reasons:
+ skip_reasons[key] += 1
+ else:
+ skip_reasons[key] = 1
+
+
+def parse_trace_output(options):
+ skip_reasons = {}
+
+ test_result_types = [
+ {'value_key': 'test suites', 'regex': re.compile(r'^RESULT:.+$')},
+ {'value_key': 'success', 'regex': re.compile(r'^PASS: LLDB.+$')},
+ {'value_key': 'failure', 'regex': re.compile(r'^FAIL: LLDB.+$')},
+ {'value_key': 'expected failure',
+ 'regex': re.compile(r'^XFAIL:.+$')},
+ {'value_key': 'skipped',
+ 'regex': re.compile(r'^UNSUPPORTED:.+\(([^\)]+)[\)\s]*$'),
+ 'substats_func': process_skipped_test,
+ 'substats_dict_arg': skip_reasons},
+ # Catch anything that didn't match the regex above but clearly
+ # is unsupported.
+ {'value_key': 'skipped',
+ 'regex': re.compile(r'^UNSUPPORTED:.+$'),
+ 'substats_func': process_skipped_test,
+ 'substats_dict_arg': skip_reasons},
+ {'value_key': 'unexpected success',
+ 'regex': re.compile(r'^XPASS:.+$')}
+ ]
+
+ early_termination_re = re.compile(r'^Ran \d+ test suites.*$')
+
+ # Initialize count values for each type.
+ counts = {}
+ for tr_type in test_result_types:
+ counts[tr_type['value_key']] = 0
+
+ with open(options.trace_file, 'r') as trace_file:
+ for line in trace_file:
+ # Early termination condition - stop after test suite
+ # counts are printed out so we don't double count fails
+ # and other reported test entries.
+ if early_termination_re.match(line):
+ break
+
+ for tr_type in test_result_types:
+ match = tr_type['regex'].match(line)
+ if match:
+ counts[tr_type['value_key']] += 1
+ if 'substats_func' in tr_type:
+ tr_type['substats_func'](
+ options, line, match, tr_type['substats_dict_arg'])
+ break
+ return (counts, skip_reasons)
+
+
+def print_counts(options, counts, skip_reasons):
+ print 'Test Counts'
+ print '---- ------'
+ # Print entries parsed directly out of filenames.
+ report_entries = [
+ {'name': 'started', 'unit': 'file'},
+ {'name': 'success', 'unit': 'method'},
+ {'name': 'unexpected success', 'unit': 'method'},
+ {'name': 'failure', 'unit': 'method'},
+ {'name': 'expected failure', 'unit': 'method'},
+ {'name': 'skipped', 'unit': 'method'},
+ {'name': 'skipped.no-reason', 'unit': 'method'}
+ ]
+ max_name_len = max(
+ len(report_entry['name']) for report_entry in report_entries)
+ format_str = '{:<' + str(max_name_len + 2) + '}{}'
+
+ for report_entry in report_entries:
+ if report_entry['name'] in counts:
+ print format_str.format(
+ report_entry['name'] + ':',
+ counts[report_entry['name']])
+
+ # Print computed entries.
+ max_skip_reason_len = max(
+ len(reason) for reason in skip_reasons.keys())
+ reason_format_str = '{:<' + str(max_skip_reason_len + 2) + '}{}'
+
+ print
+ print 'Skip Reasons'
+ print '---- -------'
+ for reason_key in sorted(skip_reasons, key=skip_reasons.get, reverse=True):
+ print reason_format_str.format(reason_key + ':', skip_reasons[reason_key])
+
+
+def main():
+ options = parse_options()
+ if not validate_options(options):
+ exit(1)
+
+ (counts, skip_reasons) = parse_trace_output(options)
+ print_counts(options, counts, skip_reasons)
+
+main()
_______________________________________________
lldb-commits mailing list
[email protected]
http://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits