The (K)TAP spec encourages test output to begin with a 'test plan': a count of the number of tests being run of the form: 1..n
However, some test suites might not know the number of subtests in advance (for example, KUnit's parameterised tests use a generator function). In this case, it's not possible to print the test plan in advance.
kunit_tool already parses test output which doesn't contain a plan, but reports an error. Since we want to use nested subtests with KUnit paramterised tests, remove this error.
Signed-off-by: David Gow davidgow@google.com Reviewed-by: Daniel Latypov dlatypov@google.com ---
Changes since v2: https://lore.kernel.org/linux-kselftest/20211027013702.2039566-1-davidgow@go... - No code changes. - Added Daniel's Reviewed-by.
tools/testing/kunit/kunit_parser.py | 5 ++--- tools/testing/kunit/kunit_tool_test.py | 5 ++++- 2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 3355196d0515..50ded55c168c 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -340,8 +340,8 @@ def parse_test_plan(lines: LineStream, test: Test) -> bool: """ Parses test plan line and stores the expected number of subtests in test object. Reports an error if expected count is 0. - Returns False and reports missing test plan error if fails to parse - test plan. + Returns False and sets expected_count to None if there is no valid test + plan.
Accepted format: - '1..[number of subtests]' @@ -356,7 +356,6 @@ def parse_test_plan(lines: LineStream, test: Test) -> bool: match = TEST_PLAN.match(lines.peek()) if not match: test.expected_count = None - test.add_error('missing plan line!') return False test.log.append(lines.pop()) expected_count = int(match.group(1)) diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 9c4126731457..bc8793145713 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -191,7 +191,10 @@ class KUnitParserTest(unittest.TestCase): result = kunit_parser.parse_run_tests( kunit_parser.extract_tap_lines( file.readlines())) - self.assertEqual(2, result.test.counts.errors) + # A missing test plan is not an error. + self.assertEqual(0, result.test.counts.errors) + # All tests should be accounted for. + self.assertEqual(10, result.test.counts.total()) self.assertEqual( kunit_parser.TestStatus.SUCCESS, result.status)
It's possible for a test to have a subtest header, but zero valid subtests. We used to error on this if the test plan had no subtests listed, but it's possible to have subtests without a test plan (indeed, this is how parameterised tests work).
Tests with 0 subtests now have the result NO_TESTS, and will report an error (which does not halt test execution, but is printed in a scary red colour and is noted in the results summary).
Signed-off-by: David Gow davidgow@google.com ---
Changes since v2: https://lore.kernel.org/linux-kselftest/20211027013702.2039566-2-davidgow@go... - Report NO_TESTS as '[NO TESTS RUN]' in yellow, instead of '[FAILED]' in red, particularly since it doesn't get counted as a failure.
tools/testing/kunit/kunit_parser.py | 16 +++++++++++----- tools/testing/kunit/kunit_tool_test.py | 9 +++++++++ .../test_is_test_passed-no_tests_no_plan.log | 7 +++++++ 3 files changed, 27 insertions(+), 5 deletions(-) create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 50ded55c168c..68c847e8ca58 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -360,9 +360,6 @@ def parse_test_plan(lines: LineStream, test: Test) -> bool: test.log.append(lines.pop()) expected_count = int(match.group(1)) test.expected_count = expected_count - if expected_count == 0: - test.status = TestStatus.NO_TESTS - test.add_error('0 tests run!') return True
TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$') @@ -589,6 +586,8 @@ def format_test_result(test: Test) -> str: return (green('[PASSED] ') + test.name) elif test.status == TestStatus.SKIPPED: return (yellow('[SKIPPED] ') + test.name) + elif test.status == TestStatus.NO_TESTS: + return (yellow('[NO TESTS RUN] ') + test.name) elif test.status == TestStatus.TEST_CRASHED: print_log(test.log) return (red('[CRASHED] ') + test.name) @@ -731,6 +730,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: # test plan test.name = "main" parse_test_plan(lines, test) + parent_test = True else: # If KTAP/TAP header is not found, test must be subtest # header or test result line so parse attempt to parser @@ -744,7 +744,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: expected_count = test.expected_count subtests = [] test_num = 1 - while expected_count is None or test_num <= expected_count: + while parent_test and (expected_count is None or test_num <= expected_count): # Loop to parse any subtests. # Break after parsing expected number of tests or # if expected number of tests is unknown break when test @@ -779,9 +779,15 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: parse_test_result(lines, test, expected_num) else: test.add_error('missing subtest result line!') + + # Check for there being no tests + if parent_test and len(subtests) == 0: + test.status = TestStatus.NO_TESTS + test.add_error('0 tests run!') + # Add statuses to TestCounts attribute in Test object bubble_up_test_results(test) - if parent_test: + if parent_test and not main: # If test has subtests and is not the main test object, print # footer. print_test_footer(test) diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index bc8793145713..c59fe0777387 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -208,6 +208,15 @@ class KUnitParserTest(unittest.TestCase): self.assertEqual( kunit_parser.TestStatus.NO_TESTS, result.status) + no_plan_log = test_data_path('test_is_test_passed-no_tests_no_plan.log') + with open(no_plan_log) as file: + result = kunit_parser.parse_run_tests( + kunit_parser.extract_tap_lines(file.readlines())) + self.assertEqual(0, len(result.test.subtests[0].subtests[0].subtests)) + self.assertEqual( + kunit_parser.TestStatus.NO_TESTS, + result.test.subtests[0].subtests[0].status) +
def test_no_kunit_output(self): crash_log = test_data_path('test_insufficient_memory.log') diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log new file mode 100644 index 000000000000..dd873c981108 --- /dev/null +++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log @@ -0,0 +1,7 @@ +TAP version 14 +1..1 + # Subtest: suite + 1..1 + # Subtest: case + ok 1 - case # SKIP +ok 1 - suite
On Wed, Oct 27, 2021 at 11:42 PM David Gow davidgow@google.com wrote:
It's possible for a test to have a subtest header, but zero valid subtests. We used to error on this if the test plan had no subtests listed, but it's possible to have subtests without a test plan (indeed, this is how parameterised tests work).
Tests with 0 subtests now have the result NO_TESTS, and will report an error (which does not halt test execution, but is printed in a scary red colour and is noted in the results summary).
Tested by tweaking ext4 tests (and running with patch 3)
[15:04:33] =============== ext4_inode_test (1 subtest) ================ [15:04:33] ============== inode_test_xtimestamp_decoding ============== [15:04:33] [ERROR] Test inode_test_xtimestamp_decoding: 0 tests run! [15:04:33] ====== [NO TESTS RUN] inode_test_xtimestamp_decoding ======= [15:04:33] ================ [SKIPPED] ext4_inode_test ================= [15:04:33] ============================================================ [15:04:33] Testing complete. Passed: 0, Failed: 0, Crashed: 0, Skipped: 1, Errors: 1 [15:04:33] Elapsed time: 48.581s total, 0.000s configuring, 45.486s building, 2.992s running
It's maybe a bit confusing to have ERROR, NO TESTS RUN, and SKIPPED all printed for the same thing.
An alternative would be to drop the error, giving [15:04:33] =============== ext4_inode_test (1 subtest) ================ [15:04:33] ============== inode_test_xtimestamp_decoding ============== [15:04:33] ====== [NO TESTS RUN] inode_test_xtimestamp_decoding ======= [15:04:33] ================ [SKIPPED] ext4_inode_test ================= [15:04:33] ============================================================
But looking at it, I think I prefer the more explicit ERROR being there.
Signed-off-by: David Gow davidgow@google.com
Reviewed-by: Daniel Latypov dlatypov@google.com
A few optional nits below.
Changes since v2: https://lore.kernel.org/linux-kselftest/20211027013702.2039566-2-davidgow@go...
- Report NO_TESTS as '[NO TESTS RUN]' in yellow, instead of '[FAILED]' in red, particularly since it doesn't get counted as a failure.
tools/testing/kunit/kunit_parser.py | 16 +++++++++++----- tools/testing/kunit/kunit_tool_test.py | 9 +++++++++ .../test_is_test_passed-no_tests_no_plan.log | 7 +++++++ 3 files changed, 27 insertions(+), 5 deletions(-) create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 50ded55c168c..68c847e8ca58 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -360,9 +360,6 @@ def parse_test_plan(lines: LineStream, test: Test) -> bool: test.log.append(lines.pop()) expected_count = int(match.group(1)) test.expected_count = expected_count
if expected_count == 0:
test.status = TestStatus.NO_TESTS
test.add_error('0 tests run!') return True
TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$') @@ -589,6 +586,8 @@ def format_test_result(test: Test) -> str: return (green('[PASSED] ') + test.name) elif test.status == TestStatus.SKIPPED: return (yellow('[SKIPPED] ') + test.name)
elif test.status == TestStatus.NO_TESTS:
return (yellow('[NO TESTS RUN] ') + test.name) elif test.status == TestStatus.TEST_CRASHED: print_log(test.log) return (red('[CRASHED] ') + test.name)
@@ -731,6 +730,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: # test plan test.name = "main" parse_test_plan(lines, test)
parent_test = True else: # If KTAP/TAP header is not found, test must be subtest # header or test result line so parse attempt to parser
@@ -744,7 +744,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: expected_count = test.expected_count subtests = [] test_num = 1
while expected_count is None or test_num <= expected_count:
while parent_test and (expected_count is None or test_num <= expected_count): # Loop to parse any subtests. # Break after parsing expected number of tests or # if expected number of tests is unknown break when test
@@ -779,9 +779,15 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: parse_test_result(lines, test, expected_num) else: test.add_error('missing subtest result line!')
# Check for there being no tests
if parent_test and len(subtests) == 0:
test.status = TestStatus.NO_TESTS
test.add_error('0 tests run!')
# Add statuses to TestCounts attribute in Test object bubble_up_test_results(test)
if parent_test:
if parent_test and not main: # If test has subtests and is not the main test object, print # footer. print_test_footer(test)
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index bc8793145713..c59fe0777387 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -208,6 +208,15 @@ class KUnitParserTest(unittest.TestCase): self.assertEqual( kunit_parser.TestStatus.NO_TESTS, result.status)
I'd prefer we split these test cases out. Perhaps:
def test_no_tests_empty_plan(self): ...
def test_no_tests_no_plan(self): ... # this new test
no_plan_log = test_data_path('test_is_test_passed-no_tests_no_plan.log')
with open(no_plan_log) as file:
result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(file.readlines()))
self.assertEqual(0, len(result.test.subtests[0].subtests[0].subtests))
self.assertEqual(
kunit_parser.TestStatus.NO_TESTS,
result.test.subtests[0].subtests[0].status)
optional: self.assertEqual(1, result.test.counts.errors)
def test_no_kunit_output(self): crash_log = test_data_path('test_insufficient_memory.log')
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log new file mode 100644 index 000000000000..dd873c981108 --- /dev/null +++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log @@ -0,0 +1,7 @@ +TAP version 14 +1..1
- # Subtest: suite
- 1..1
- # Subtest: case
- ok 1 - case # SKIP
+ok 1 - suite
2.33.0.1079.g6e70778dc9-goog
It's possible that a parameterised test could end up with zero parameters. At the moment, the test function will nevertheless be called with NULL as the parameter. Instead, don't try to run the test code, and just mark the test as SKIPped.
Reported-by: Daniel Latypov dlatypov@google.com Signed-off-by: David Gow davidgow@google.com ---
Changes since v2: https://lore.kernel.org/linux-kselftest/20211027013702.2039566-3-davidgow@go... - Rework to not share the loop between the parameterised and non-parameterised test cases. - Suggested by Daniel Latypov. - Avoids using a magic non-zero pointer value.
lib/kunit/test.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/lib/kunit/test.c b/lib/kunit/test.c index 3bd741e50a2d..dfe1127aacfd 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -508,12 +508,12 @@ int kunit_run_tests(struct kunit_suite *suite) /* Get initial param. */ param_desc[0] = '\0'; test.param_value = test_case->generate_params(NULL, param_desc); - } + kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT + "# Subtest: %s", test_case->name);
- do { - kunit_run_case_catch_errors(suite, test_case, &test); + while (test.param_value) { + kunit_run_case_catch_errors(suite, test_case, &test);
- if (test_case->generate_params) { if (param_desc[0] == '\0') { snprintf(param_desc, sizeof(param_desc), "param-%d", test.param_index); @@ -530,11 +530,15 @@ int kunit_run_tests(struct kunit_suite *suite) param_desc[0] = '\0'; test.param_value = test_case->generate_params(test.param_value, param_desc); test.param_index++; - }
+ kunit_update_stats(¶m_stats, test.status); + } + } else { + /* Non-parameterised test. */ + kunit_run_case_catch_errors(suite, test_case, &test); kunit_update_stats(¶m_stats, test.status); + }
- } while (test.param_value);
kunit_print_test_stats(&test, param_stats);
On Wed, Oct 27, 2021 at 11:42 PM David Gow davidgow@google.com wrote:
It's possible that a parameterised test could end up with zero parameters. At the moment, the test function will nevertheless be called with NULL as the parameter. Instead, don't try to run the test code, and just mark the test as SKIPped.
Reported-by: Daniel Latypov dlatypov@google.com Signed-off-by: David Gow davidgow@google.com
Reviewed-by: Daniel Latypov dlatypov@google.com
There's a small bug in this patch noted below, we just need to move the "# Subtest" change into the child patch. If/when we make that change, I have an optional suggestion about flipping the if/else branch.
But other than that, this looks good to me.
Changes since v2: https://lore.kernel.org/linux-kselftest/20211027013702.2039566-3-davidgow@go...
- Rework to not share the loop between the parameterised and non-parameterised test cases.
- Suggested by Daniel Latypov.
- Avoids using a magic non-zero pointer value.
lib/kunit/test.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/lib/kunit/test.c b/lib/kunit/test.c index 3bd741e50a2d..dfe1127aacfd 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -508,12 +508,12 @@ int kunit_run_tests(struct kunit_suite *suite) /* Get initial param. */ param_desc[0] = '\0'; test.param_value = test_case->generate_params(NULL, param_desc);
}
kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
"# Subtest: %s", test_case->name);
It looks like this change accidentally made its way into this patch as opposed to the child.
This commit itself gives me a traffic light (red/yellow/green statuses):
[ERROR] Test inode_test_xtimestamp_decoding: 0 tests run! ====== [NO TESTS RUN] inode_test_xtimestamp_decoding ======= ================= [PASSED] ext4_inode_test ================= ============================================================
The problem is the output becomes this: # Subtest: ext4_inode_test 1..1 # Subtest: inode_test_xtimestamp_decoding # inode_test_xtimestamp_decoding: ok 1 - 1901-12-13 Lower bound of 32bit < 0 timestamp, no extra bits ...
do {
kunit_run_case_catch_errors(suite, test_case, &test);
while (test.param_value) {
kunit_run_case_catch_errors(suite, test_case, &test);
if (test_case->generate_params) { if (param_desc[0] == '\0') { snprintf(param_desc, sizeof(param_desc), "param-%d", test.param_index);
@@ -530,11 +530,15 @@ int kunit_run_tests(struct kunit_suite *suite) param_desc[0] = '\0'; test.param_value = test_case->generate_params(test.param_value, param_desc); test.param_index++;
}
kunit_update_stats(¶m_stats, test.status);
}
} else {
I have a very slight preference for having the order of these branches swapped. i.e.
if (!test_case->generate_params) { /* Non-parameterised test */ } else { ... }
I prefer this because I think it's more readable for a few reasons: * I like having the "normal" branch come first. This is likely the code path a reader would care more about. * I prefer having the shorter branch come first. It makes it easier to read it through and see "oh, so this branch is just that one but with XYZ"
/* Non-parameterised test. */
kunit_run_case_catch_errors(suite, test_case, &test); kunit_update_stats(¶m_stats, test.status);
}
} while (test.param_value); kunit_print_test_stats(&test, param_stats);
-- 2.33.0.1079.g6e70778dc9-goog
Currently, the results for individial parameters in a parameterised test are simply output as (K)TAP diagnostic lines.
As kunit_tool now supports nested subtests, report each parameter as its own subtest.
For example, here's what the output now looks like: # Subtest: inode_test_xtimestamp_decoding ok 1 - 1901-12-13 Lower bound of 32bit < 0 timestamp, no extra bits ok 2 - 1969-12-31 Upper bound of 32bit < 0 timestamp, no extra bits ok 3 - 1970-01-01 Lower bound of 32bit >=0 timestamp, no extra bits ok 4 - 2038-01-19 Upper bound of 32bit >=0 timestamp, no extra bits ok 5 - 2038-01-19 Lower bound of 32bit <0 timestamp, lo extra sec bit on ok 6 - 2106-02-07 Upper bound of 32bit <0 timestamp, lo extra sec bit on ok 7 - 2106-02-07 Lower bound of 32bit >=0 timestamp, lo extra sec bit on ok 8 - 2174-02-25 Upper bound of 32bit >=0 timestamp, lo extra sec bit on ok 9 - 2174-02-25 Lower bound of 32bit <0 timestamp, hi extra sec bit on ok 10 - 2242-03-16 Upper bound of 32bit <0 timestamp, hi extra sec bit on ok 11 - 2242-03-16 Lower bound of 32bit >=0 timestamp, hi extra sec bit on ok 12 - 2310-04-04 Upper bound of 32bit >=0 timestamp, hi extra sec bit on ok 13 - 2310-04-04 Upper bound of 32bit>=0 timestamp, hi extra sec bit 1. 1 ns ok 14 - 2378-04-22 Lower bound of 32bit>= timestamp. Extra sec bits 1. Max ns ok 15 - 2378-04-22 Lower bound of 32bit >=0 timestamp. All extra sec bits on ok 16 - 2446-05-10 Upper bound of 32bit >=0 timestamp. All extra sec bits on # inode_test_xtimestamp_decoding: pass:16 fail:0 skip:0 total:16 ok 1 - inode_test_xtimestamp_decoding
Signed-off-by: David Gow davidgow@google.com Reviewed-by: Daniel Latypov dlatypov@google.com ---
Changes since v2: https://lore.kernel.org/linux-kselftest/20211027013702.2039566-4-davidgow@go... - No changes to this patch.
lib/kunit/test.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/lib/kunit/test.c b/lib/kunit/test.c index dfe1127aacfd..c66fe1735054 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -520,9 +520,8 @@ int kunit_run_tests(struct kunit_suite *suite) }
kunit_log(KERN_INFO, &test, - KUNIT_SUBTEST_INDENT - "# %s: %s %d - %s", - test_case->name, + KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT + "%s %d - %s", kunit_status_to_ok_not_ok(test.status), test.param_index + 1, param_desc);
linux-kselftest-mirror@lists.linaro.org