aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGerald Combs <gerald@wireshark.org>2021-04-27 11:06:55 -0700
committerWireshark GitLab Utility <gerald+gitlab-utility@wireshark.org>2021-10-01 23:40:17 +0000
commitce22d958a86de5a75239e3cb480ff50e37f92553 (patch)
tree248345789c4ce4298e62526c130736b947234ebd
parent436dd46720052b0077349d597c9399b7f38c224f (diff)
Test: Add external tests.
Add test/suite_external.py, which can dynamically generate tests from a configuration file. This is intended to make happy-shark useful, but it should make it easy to add simple TShark tests elsewhere. The configuration file format must currently be JSON as described in the Developer's Guide.
-rw-r--r--docbook/wsdg_src/WSDG_chapter_tests.adoc81
-rw-r--r--test/suite_external.py168
-rwxr-xr-xtest/test.py5
3 files changed, 253 insertions, 1 deletions
diff --git a/docbook/wsdg_src/WSDG_chapter_tests.adoc b/docbook/wsdg_src/WSDG_chapter_tests.adoc
index 4d0542f827..b6bee042be 100644
--- a/docbook/wsdg_src/WSDG_chapter_tests.adoc
+++ b/docbook/wsdg_src/WSDG_chapter_tests.adoc
@@ -234,7 +234,7 @@ test failures since the `SubprocessTestCase.tearDown` method is not
executed. This limitation might be addressed in the future.
[[ChTestsDevelop]]
-=== Adding Or Modifying Tests
+=== Adding Or Modifying Built-In Tests
Tests must be in a Python module whose name matches “suite_*.py”. The
module must contain one or more subclasses of “SubprocessTestCase” or
@@ -301,3 +301,82 @@ Tests can be run in parallel. This means that any files you create must
be unique for each test. “subprocesstest.filename_from_id” can be used
to generate a filename based on the current test name. It also ensures
that the file will be automatically removed after the test has run.
+
+[[ChTestsExternal]]
+=== Adding Or Modifying External Tests
+
+You can test the dissection of files outside the Wireshark source code repository by using the external test generator, which creates tests using a JSON configuration file.
+The file must have the following format:
+
+[source]
+----
+{
+ "case_name": "<test case name>",
+ "tests": [
+ {
+ "test_name": "<test name>",
+ "tshark_args": [ <tshark argument array> ],
+ "requirements": [ <one or more requirements> ]
+ }
+ ]
+}
+----
+
+`tshark_args` elements can use `${case_dir}` to specify the path to the JSON configuration file.
+`requirements` can be one or more of
+
+`[ "count", "<pattern>", <count> ]`::
+Require `count` occurrences of `pattern` in the dissection output.
+Equivalent to the built-in Python `assertEqual(countOutput('<pattern'), <count>)`
+
+`[ "grep", "<pattern>" ]`::
+Dissection output must contain `pattern`.
+Equivalent to `assertTrue(grepOutput('<pattern>'))`.
+
+`[ "!grep", "<pattern>" ]`::
+Dissection output must _not_ contain `pattern`.
+Equivalent to `assertFalse(grepOutput('<pattern>'))`.
+
+`[ "in", "<string>", <line> ]`::
+Zero-indexed line `line` of the dissection output must contain `string`.
+Equivalent to `assertIn('<pattern>', lines[<line>])`.
+
+`[ "!in", "<string>", <line> ]`::
+Zero-indexed line `line` of the dissection output must _not_ contain `string`.
+Equivalent to `assertNotIn('<pattern>', lines[<line>])`.
+
+Patterns can be any valid Python regular expression.
+
+The example below defines a single test case, named “external_example”.
+The case has a single test named “dns”, which runs TShark on `tests/dns-1/dns.pcapng`, relative to the JSON configuration file.
+
+[source,json]
+----
+{
+ "case_name": "external_example",
+ "tests": [
+ {
+ "test_name": "dns",
+ "tshark_args": [ "-r", "${case_dir}/tests/dns-1/dns.pcapng",
+ "-Y", "dns", "-T", "fields", "-e", "dns.qry.name"
+ ],
+ "requirements": [
+ [ "count", "in.m.yahoo.com", 1 ],
+ [ "grep", "in.m.yahoo.com" ],
+ [ "!grep", "in.m.notyahoo.com" ],
+ [ "in", "in.m.yahoo.com", 0 ],
+ [ "!in", "in.m.notyahoo.com", 0 ]
+ ]
+ }
+ ]
+}
+----
+
+You can specify external tests using the `test.py --add-external-test`.
+For example, if the JSON file above is named `wireshark-tests.json` you can list its test by running the following:
+
+[source,sh]
+----
+$ ./test/test.py -p ./build/run --add-external-test /path/to/wireshark-tests.json --list external
+suite_external.case_external_example.test_dns
+----
diff --git a/test/suite_external.py b/test/suite_external.py
new file mode 100644
index 0000000000..849b9e7e7d
--- /dev/null
+++ b/test/suite_external.py
@@ -0,0 +1,168 @@
+#
+# Externally configured Wireshark tests
+# By Gerald Combs <gerald@wireshark.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+'''
+External tests
+
+This module reads one or more test case configuration files specified using `add_external_configs()`.
+It creates a test case for each file and one or more tests as specfied.
+
+Configuration files are JSON-formatted and must have the following structure:
+
+ {
+ "case_name": "happy_shark",
+ "tests": [
+ {
+ "test_name": "dns",
+ "tshark_args": [ "-r", "${case_dir}/tests/dns-1/dns.pcapng",
+ "-Y", "dns", "-T", "fields", "-e", "dns.qry.name"
+ ],
+ "requirements": [
+ [ "count", "in.m.yahoo.com", 1 ],
+ [ "grep", "in.m.yahoo.com" ],
+ [ "!grep", "in.m.notyahoo.com" ],
+ [ "in", "in.m.yahoo.com", 0 ],
+ [ "!in", "in.m.notyahoo.com", 0 ]
+ ]
+ }
+ ]
+ }
+
+`${case_dir}` will be replaced by the path to the configuration file.
+
+"requirements" is a list of search or count requirements.
+
+Search requirements can have one of the following formats:
+
+ Requirement Python test API equivalent
+ [ "count", "<pattern>", <count> ] assertEqual(countOutput('<pattern'), <count>)
+ [ "grep", "<pattern>" ] assertTrue(grepOutput('<pattern>'))
+ [ "!grep", "<pattern>" ] assertFalse(grepOutput('<pattern>'))
+ [ "in", "<pattern>", <line> ] assertIn('<pattern>', lines[<line>])
+ [ "!in", "<pattern>", <line> ] assertNotIn('<pattern>', lines[<line>])
+'''
+
+# To do:
+# - Add JSON matching so that we can migrate group_asterisk to happy_shark.
+
+import fixtures
+import json
+import os.path
+import subprocesstest
+import unittest
+import traceback
+import sys
+
+external_configs = []
+debug = True
+
+
+def add_external_configs(configs):
+ if configs:
+ external_configs.extend(configs)
+
+
+def make_tshark_test(tshark_args, requirements):
+ '''TShark test function generator'''
+ def tshark_test(self, cmd_tshark, features):
+
+ proc = self.assertRun((cmd_tshark, *tshark_args))
+
+ for requirement in requirements:
+ negated = False
+ try:
+ if requirement[0].startswith('!'):
+ negated = True
+ except IndexError:
+ self.fail('Test type missing.')
+
+ try:
+ pattern = requirement[1]
+ except IndexError:
+ self.fail('Search pattern missing.')
+
+ if requirement[0] == 'count':
+ try:
+ required_count = requirement[2]
+ except IndexError:
+ self.fail('"count" requires a count argument.')
+ self.assertEqual(self.countOutput(pattern), required_count)
+ elif requirement[0].endswith('grep'):
+ if negated:
+ self.assertFalse(self.grepOutput(pattern))
+ else:
+ self.assertTrue(self.grepOutput(pattern))
+ elif requirement[0].endswith('in'):
+ try:
+ stdout_line = proc.stdout_str.splitlines()[requirement[2]]
+ except IndexError:
+ self.fail('"in" requires a line number (starting from zero).')
+ if negated:
+ self.assertNotIn(pattern, stdout_line)
+ else:
+ self.assertIn(pattern, stdout_line)
+ else:
+ self.fail('Unrecognized operation "{}"'.format(requirement[0]))
+
+ return tshark_test
+
+
+def load_tests(loader, standard_tests, pattern):
+ '''Create our cases and suites. Run by unittest.defaultTestLoader.discover'''
+ for config_file in external_configs:
+ try:
+ with open(config_file, 'r') as cf:
+ config_str = cf.read()
+ config_str = config_str.replace('${case_dir}', os.path.dirname(config_file))
+ config = json.loads(config_str)
+ except Error as e:
+ print('Error reading {}: {}'.format(config_file, e))
+ continue
+
+ try:
+ case_name = 'case_{}'.format(config['case_name'])
+ except KeyError:
+ print('Error reading {}: case_name not present'.format(config_file))
+ continue
+
+ case_tests = dict()
+ try:
+ # Create 'test_...' functions to match our configuration.
+ test_num = 1
+ for test_attrs in config['tests']:
+ try:
+ test_name = 'test_{}'.format(test_attrs['test_name'])
+ except KeyError:
+ print('{}: No test name for test {} '.format(config_file, test_num))
+ continue
+
+ try:
+ requirements = test_attrs['requirements']
+ if not isinstance(requirements, list):
+ raise TypeError
+ except:
+ print('{}: Missing or malformed requirements for test {} '.format(config_file, test_num))
+ continue
+
+ tshark_test = make_tshark_test(test_attrs['tshark_args'], requirements)
+ setattr(tshark_test, '__name__', test_name)
+ case_tests[test_name] = tshark_test
+ test_num += 1
+ # Create a SubprocessTestCase name 'case_...' and add our 'test_...' functions.
+ case_class = type(case_name, (subprocesstest.SubprocessTestCase,), case_tests)
+ # Apply @fixtures.mark_usefixtures('test_env') and @fixtures.uses_fixtures
+ case_class = fixtures.mark_usefixtures('test_env')(case_class)
+ case_class = fixtures.uses_fixtures(case_class)
+ globals()[case_name] = case_class
+ # Hand our generated class over to unittest.defaultTestLoader.
+ return loader.loadTestsFromTestCase(case_class)
+ except KeyError:
+ print('{}: Missing or malformed tests'.format(config_file))
+ except:
+ if debug:
+ print(traceback.format_exc())
+ raise
+ return unittest.TestSuite()
diff --git a/test/test.py b/test/test.py
index a21ab615a9..b04eda71d1 100755
--- a/test/test.py
+++ b/test/test.py
@@ -17,6 +17,7 @@
import argparse
import codecs
import os.path
+import suite_external
import sys
import unittest
import fixtures
@@ -43,6 +44,7 @@ def main():
release_group = parser.add_mutually_exclusive_group()
release_group.add_argument('--enable-release', action='store_true', help='Enable release tests')
parser.add_argument('-p', '--program-path', default=os.path.curdir, help='Path to Wireshark executables.')
+ parser.add_argument('-x', '--add-external-tests', action='append', help='Path to an external test definition (.json) file.')
parser.add_argument('--skip-missing-programs',
help='Skip tests that lack programs from this list instead of failing'
' them. Use "all" to ignore all missing programs.')
@@ -55,6 +57,9 @@ def main():
parser.add_argument('tests_to_run', nargs='*', metavar='test', default=['all'], help='Tests to run. One of "all" or a full or partial test name. Default is "all".')
args = parser.parse_args()
+ # XXX This should be a fixture.
+ suite_external.add_external_configs(args.add_external_tests)
+
all_tests = unittest.defaultTestLoader.discover(os.path.dirname(__file__), pattern='suite_*')
all_ids = []