aboutsummaryrefslogtreecommitdiffstats
path: root/compare-results.py
diff options
context:
space:
mode:
authorNeels Hofmeyr <neels@hofmeyr.de>2019-10-22 01:54:43 +0200
committerlaforge <laforge@osmocom.org>2019-11-23 07:59:07 +0000
commitb26196be09519fc32a8b348e02678e75f1164e6a (patch)
tree1399d361a3efa2ecf40405aa6f6c4e5671db87d9 /compare-results.py
parent8df6962dec80e90aedd36984ae179d706235569c (diff)
re-implement compare-results.sh as compare-results.py
The compare-results.sh is annoyingly slow. Since our ttcn3 tests containers support Python 2, re-implement in Python for much quicker evaluation. Change-Id: I0747c9d66ffc7e4121497a2416fca78d7b56c8e6
Diffstat (limited to 'compare-results.py')
-rwxr-xr-xcompare-results.py137
1 files changed, 137 insertions, 0 deletions
diff --git a/compare-results.py b/compare-results.py
new file mode 100755
index 00000000..d1adb20c
--- /dev/null
+++ b/compare-results.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+# Copyright 2018 sysmocom - s.f.m.c. GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import argparse
+import re
+
+doc = "Compare TTCN3 test run results with expected results by junit logs."
+
+# The nicest would be to use an XML library, but I don't want to introduce dependencies on the build slaves.
+re_testcase = re.compile(r'''<testcase classname=['"]([^'"]+)['"].* name=['"]([^'"]+)['"].*>''')
+re_testcase_end = re.compile(r'''(</testcase>|<testcase [^>]*/>)''')
+re_failure = re.compile(r'''(<failure\b|<error\b)''')
+
+RESULT_PASS = 'pass'
+RESULT_FAIL = 'pass->FAIL'
+RESULT_SKIP = 'skip'
+RESULT_XFAIL = 'xfail'
+RESULT_FIXED = 'xfail->PASS'
+RESULT_NEW_PASS = 'NEW: PASS'
+RESULT_NEW_FAIL = 'NEW: FAIL'
+
+RESULTS = (
+ RESULT_FAIL,
+ RESULT_NEW_FAIL,
+ RESULT_XFAIL,
+ RESULT_FIXED,
+ RESULT_PASS,
+ RESULT_NEW_PASS,
+ RESULT_SKIP,
+ )
+
+def count(counter, name, result):
+ v = counter.get(result) or 0
+ v += 1
+ counter[result] = v
+ if result != RESULT_SKIP:
+ print('%s %s' % (result, name))
+
+def compare_one(name, expect, result, counter):
+ if result is None:
+ count(counter, name, RESULT_SKIP)
+ elif result == RESULT_PASS:
+ if expect == RESULT_PASS:
+ count(counter, name, RESULT_PASS)
+ elif expect == RESULT_FAIL:
+ count(counter, name, RESULT_FIXED)
+ elif expect is None:
+ count(counter, name, RESULT_NEW_PASS)
+ elif result == RESULT_FAIL:
+ if expect == RESULT_PASS:
+ count(counter, name, RESULT_FAIL)
+ elif expect == RESULT_FAIL:
+ count(counter, name, RESULT_XFAIL)
+ elif expect is None:
+ count(counter, name, RESULT_NEW_FAIL)
+
+def compare(cmdline, f_expected, f_current):
+ expected_list = parse_results(f_expected)
+ current_list = parse_results(f_current)
+
+ expected_dict = dict(expected_list)
+ current_dict = dict(current_list)
+
+ counter = {}
+
+ for expected_name, expected_result in expected_list:
+ compare_one(expected_name, expected_result, current_dict.get(expected_name), counter)
+
+ # Also count new tests
+ for current_name, current_result in current_list:
+ if current_name in expected_dict:
+ continue
+ compare_one(current_name, None, current_result, counter)
+
+
+ print('\nSummary:')
+ for r in RESULTS:
+ v = counter.get(r)
+ if not v:
+ continue
+ print(' %s: %d' % (r, v))
+ print('\n')
+
+def parse_results(f):
+ tests = []
+ name = None
+ result = None
+ for line in f:
+ m = re_testcase.search(line)
+ if m:
+ class_name, test_name = m.groups()
+ name = '%s.%s' % (class_name, test_name)
+
+ m = re_failure.search(line)
+ if m:
+ result = RESULT_FAIL
+
+ m = re_testcase_end.search(line)
+ if m:
+ if not name:
+ continue
+ if result is None:
+ result = RESULT_PASS
+ tests.append((name, result))
+
+ name = None
+ result = None
+
+ return tests
+
+def main(cmdline):
+ with open(cmdline.expected_results, 'r') as f_expected:
+ with open(cmdline.current_results, 'r') as f_current:
+ print('\nComparing expected results %r against results in %r\n--------------------'
+ % (cmdline.expected_results, cmdline.current_results))
+ compare(cmdline, f_expected, f_current)
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description=doc)
+ parser.add_argument('expected_results', metavar='expected.junit-xml',
+ help='junit XML file listing the expected test results.')
+ parser.add_argument('current_results', metavar='current.junit-xml',
+ help='junit XML file listing the current test results.')
+
+ cmdline = parser.parse_args()
+ main(cmdline)