aboutsummaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
authorGerald Combs <gerald@wireshark.org>2021-04-27 11:06:55 -0700
committerWireshark GitLab Utility <gerald+gitlab-utility@wireshark.org>2021-10-01 23:40:17 +0000
commitce22d958a86de5a75239e3cb480ff50e37f92553 (patch)
tree248345789c4ce4298e62526c130736b947234ebd /test
parent436dd46720052b0077349d597c9399b7f38c224f (diff)
Test: Add external tests.
Add test/suite_external.py, which can dynamically generate tests from a configuration file. This is intended to make happy-shark useful, but it should make it easy to add simple TShark tests elsewhere. The configuration file format must currently be JSON as described in the Developer's Guide.
Diffstat (limited to 'test')
-rw-r--r--test/suite_external.py168
-rwxr-xr-xtest/test.py5
2 files changed, 173 insertions, 0 deletions
diff --git a/test/suite_external.py b/test/suite_external.py
new file mode 100644
index 0000000000..849b9e7e7d
--- /dev/null
+++ b/test/suite_external.py
@@ -0,0 +1,168 @@
+#
+# Externally configured Wireshark tests
+# By Gerald Combs <gerald@wireshark.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+'''
+External tests
+
+This module reads one or more test case configuration files specified using `add_external_configs()`.
+It creates a test case for each file and one or more tests as specfied.
+
+Configuration files are JSON-formatted and must have the following structure:
+
+ {
+ "case_name": "happy_shark",
+ "tests": [
+ {
+ "test_name": "dns",
+ "tshark_args": [ "-r", "${case_dir}/tests/dns-1/dns.pcapng",
+ "-Y", "dns", "-T", "fields", "-e", "dns.qry.name"
+ ],
+ "requirements": [
+ [ "count", "in.m.yahoo.com", 1 ],
+ [ "grep", "in.m.yahoo.com" ],
+ [ "!grep", "in.m.notyahoo.com" ],
+ [ "in", "in.m.yahoo.com", 0 ],
+ [ "!in", "in.m.notyahoo.com", 0 ]
+ ]
+ }
+ ]
+ }
+
+`${case_dir}` will be replaced by the path to the configuration file.
+
+"requirements" is a list of search or count requirements.
+
+Search requirements can have one of the following formats:
+
+ Requirement Python test API equivalent
+ [ "count", "<pattern>", <count> ] assertEqual(countOutput('<pattern'), <count>)
+ [ "grep", "<pattern>" ] assertTrue(grepOutput('<pattern>'))
+ [ "!grep", "<pattern>" ] assertFalse(grepOutput('<pattern>'))
+ [ "in", "<pattern>", <line> ] assertIn('<pattern>', lines[<line>])
+ [ "!in", "<pattern>", <line> ] assertNotIn('<pattern>', lines[<line>])
+'''
+
+# To do:
+# - Add JSON matching so that we can migrate group_asterisk to happy_shark.
+
+import fixtures
+import json
+import os.path
+import subprocesstest
+import unittest
+import traceback
+import sys
+
+external_configs = []
+debug = True
+
+
+def add_external_configs(configs):
+ if configs:
+ external_configs.extend(configs)
+
+
+def make_tshark_test(tshark_args, requirements):
+ '''TShark test function generator'''
+ def tshark_test(self, cmd_tshark, features):
+
+ proc = self.assertRun((cmd_tshark, *tshark_args))
+
+ for requirement in requirements:
+ negated = False
+ try:
+ if requirement[0].startswith('!'):
+ negated = True
+ except IndexError:
+ self.fail('Test type missing.')
+
+ try:
+ pattern = requirement[1]
+ except IndexError:
+ self.fail('Search pattern missing.')
+
+ if requirement[0] == 'count':
+ try:
+ required_count = requirement[2]
+ except IndexError:
+ self.fail('"count" requires a count argument.')
+ self.assertEqual(self.countOutput(pattern), required_count)
+ elif requirement[0].endswith('grep'):
+ if negated:
+ self.assertFalse(self.grepOutput(pattern))
+ else:
+ self.assertTrue(self.grepOutput(pattern))
+ elif requirement[0].endswith('in'):
+ try:
+ stdout_line = proc.stdout_str.splitlines()[requirement[2]]
+ except IndexError:
+ self.fail('"in" requires a line number (starting from zero).')
+ if negated:
+ self.assertNotIn(pattern, stdout_line)
+ else:
+ self.assertIn(pattern, stdout_line)
+ else:
+ self.fail('Unrecognized operation "{}"'.format(requirement[0]))
+
+ return tshark_test
+
+
+def load_tests(loader, standard_tests, pattern):
+ '''Create our cases and suites. Run by unittest.defaultTestLoader.discover'''
+ for config_file in external_configs:
+ try:
+ with open(config_file, 'r') as cf:
+ config_str = cf.read()
+ config_str = config_str.replace('${case_dir}', os.path.dirname(config_file))
+ config = json.loads(config_str)
+ except Error as e:
+ print('Error reading {}: {}'.format(config_file, e))
+ continue
+
+ try:
+ case_name = 'case_{}'.format(config['case_name'])
+ except KeyError:
+ print('Error reading {}: case_name not present'.format(config_file))
+ continue
+
+ case_tests = dict()
+ try:
+ # Create 'test_...' functions to match our configuration.
+ test_num = 1
+ for test_attrs in config['tests']:
+ try:
+ test_name = 'test_{}'.format(test_attrs['test_name'])
+ except KeyError:
+ print('{}: No test name for test {} '.format(config_file, test_num))
+ continue
+
+ try:
+ requirements = test_attrs['requirements']
+ if not isinstance(requirements, list):
+ raise TypeError
+ except:
+ print('{}: Missing or malformed requirements for test {} '.format(config_file, test_num))
+ continue
+
+ tshark_test = make_tshark_test(test_attrs['tshark_args'], requirements)
+ setattr(tshark_test, '__name__', test_name)
+ case_tests[test_name] = tshark_test
+ test_num += 1
+ # Create a SubprocessTestCase name 'case_...' and add our 'test_...' functions.
+ case_class = type(case_name, (subprocesstest.SubprocessTestCase,), case_tests)
+ # Apply @fixtures.mark_usefixtures('test_env') and @fixtures.uses_fixtures
+ case_class = fixtures.mark_usefixtures('test_env')(case_class)
+ case_class = fixtures.uses_fixtures(case_class)
+ globals()[case_name] = case_class
+ # Hand our generated class over to unittest.defaultTestLoader.
+ return loader.loadTestsFromTestCase(case_class)
+ except KeyError:
+ print('{}: Missing or malformed tests'.format(config_file))
+ except:
+ if debug:
+ print(traceback.format_exc())
+ raise
+ return unittest.TestSuite()
diff --git a/test/test.py b/test/test.py
index a21ab615a9..b04eda71d1 100755
--- a/test/test.py
+++ b/test/test.py
@@ -17,6 +17,7 @@
import argparse
import codecs
import os.path
+import suite_external
import sys
import unittest
import fixtures
@@ -43,6 +44,7 @@ def main():
release_group = parser.add_mutually_exclusive_group()
release_group.add_argument('--enable-release', action='store_true', help='Enable release tests')
parser.add_argument('-p', '--program-path', default=os.path.curdir, help='Path to Wireshark executables.')
+ parser.add_argument('-x', '--add-external-tests', action='append', help='Path to an external test definition (.json) file.')
parser.add_argument('--skip-missing-programs',
help='Skip tests that lack programs from this list instead of failing'
' them. Use "all" to ignore all missing programs.')
@@ -55,6 +57,9 @@ def main():
parser.add_argument('tests_to_run', nargs='*', metavar='test', default=['all'], help='Tests to run. One of "all" or a full or partial test name. Default is "all".')
args = parser.parse_args()
+ # XXX This should be a fixture.
+ suite_external.add_external_configs(args.add_external_tests)
+
all_tests = unittest.defaultTestLoader.discover(os.path.dirname(__file__), pattern='suite_*')
all_ids = []