Lines Matching +full:self +full:- +full:test
1 # SPDX-License-Identifier: GPL-2.0
3 # Parses KTAP test results from a kernel dmesg log and incrementally prints
4 # results with reader-friendly format. Stores and returns test results in a
5 # Test object.
22 class Test: class
24 A class to represent a test parsed from KTAP results. All KTAP
25 results within a test log are stored in a main Test object as
29 status : TestStatus - status of the test
30 name : str - name of the test
31 expected_count : int - expected number of subtests (0 if single
32 test case and None if unknown expected number of subtests)
33 subtests : List[Test] - list of subtests
34 log : List[str] - log of KTAP lines that correspond to the test
35 counts : TestCounts - counts of the test statuses and errors of
36 subtests or of the test itself if the test is a single
37 test case.
39 def __init__(self) -> None: argument
40 """Creates Test object with default attributes."""
41 self.status = TestStatus.TEST_CRASHED
42 self.name = ''
43 self.expected_count = 0 # type: Optional[int]
44 self.subtests = [] # type: List[Test]
45 self.log = [] # type: List[str]
46 self.counts = TestCounts()
48 def __str__(self) -> str: argument
49 """Returns string representation of a Test class object."""
50 return (f'Test({self.status}, {self.name}, {self.expected_count}, '
51 f'{self.subtests}, {self.log}, {self.counts})')
53 def __repr__(self) -> str: argument
54 """Returns string representation of a Test class object."""
55 return str(self)
57 def add_error(self, error_message: str) -> None: argument
58 """Records an error that occurred while parsing this test."""
59 self.counts.errors += 1
60 stdout.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}')
62 def ok_status(self) -> bool: argument
64 return self.status in (TestStatus.SUCCESS, TestStatus.SKIPPED)
67 """An enumeration class to represent the status of a test."""
78 Tracks the counts of statuses of all test cases and any errors within
79 a Test.
87 def __str__(self) -> str: argument
89 statuses = [('passed', self.passed), ('failed', self.failed),
90 ('crashed', self.crashed), ('skipped', self.skipped),
91 ('errors', self.errors)]
92 return f'Ran {self.total()} tests: ' + \
95 def total(self) -> int: argument
96 """Returns the total number of test cases within a test
97 object, where a test case is a test with no subtests.
99 return (self.passed + self.failed + self.crashed +
100 self.skipped)
102 def add_subtest_counts(self, counts: TestCounts) -> None: argument
106 parent test.
109 counts - a different TestCounts object whose counts
112 self.passed += counts.passed
113 self.failed += counts.failed
114 self.crashed += counts.crashed
115 self.skipped += counts.skipped
116 self.errors += counts.errors
118 def get_status(self) -> TestStatus: argument
119 """Returns the aggregated status of a Test using test
122 if self.total() == 0:
124 if self.crashed:
127 if self.failed:
129 if self.passed:
135 def add_status(self, status: TestStatus) -> None: argument
138 self.passed += 1
140 self.failed += 1
142 self.skipped += 1
144 self.crashed += 1
157 def __init__(self, lines: Iterator[Tuple[int, str]]): argument
159 self._lines = lines
160 self._done = False
161 self._need_next = True
162 self._next = (0, '')
164 def _get_next(self) -> None: argument
166 if not self._need_next:
169 self._next = next(self._lines)
171 self._done = True
173 self._need_next = False
175 def peek(self) -> str: argument
178 self._get_next()
179 return self._next[1]
181 def pop(self) -> str: argument
185 s = self.peek()
186 if self._done:
188 self._need_next = True
191 def __bool__(self) -> bool: argument
193 self._get_next()
194 return not self._done
197 def __iter__(self) -> Iterator[str]: argument
201 while bool(self):
202 yield self.pop()
204 def line_number(self) -> int: argument
206 self._get_next()
207 return self._next[0]
211 KTAP_START = re.compile(r'\s*KTAP version ([0-9]+)$')
212 TAP_START = re.compile(r'\s*TAP version ([0-9]+)$')
214 'Kernel panic - not syncing: VFS:|reboot: System halted)')
217 def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
220 -> Iterator[Tuple[int, str]]:
254 version_type: str, test: Test) -> None: argument
256 Adds error to test object if version number is too high or too
260 version_num - The inputted version number from the parsed KTAP or TAP
262 accepted_version - List of accepted KTAP or TAP versions
263 version_type - 'KTAP' or 'TAP' depending on the type of
265 test - Test object for current test being parsed
268 test.add_error(f'{version_type} version lower than expected!')
270 test.add_error(f'{version_type} version higer than expected!')
272 def parse_ktap_header(lines: LineStream, test: Test) -> bool: argument
278 - 'KTAP version [version number]'
279 - 'TAP version [version number]'
282 lines - LineStream of KTAP output to parse
283 test - Test object for current test being parsed
292 check_version(version_num, KTAP_VERSIONS, 'KTAP', test)
295 check_version(version_num, TAP_VERSIONS, 'TAP', test)
303 def parse_test_header(lines: LineStream, test: Test) -> bool: argument
305 Parses test header and stores test name in test object.
306 Returns False if fails to parse test header line.
309 - '# Subtest: [test name]'
312 lines - LineStream of KTAP output to parse
313 test - Test object for current test being parsed
316 True if successfully parsed test header line
321 test.name = match.group(1)
325 TEST_PLAN = re.compile(r'^\s*1\.\.([0-9]+)')
327 def parse_test_plan(lines: LineStream, test: Test) -> bool: argument
329 Parses test plan line and stores the expected number of subtests in
330 test object. Reports an error if expected count is 0.
331 Returns False and sets expected_count to None if there is no valid test
335 - '1..[number of subtests]'
338 lines - LineStream of KTAP output to parse
339 test - Test object for current test being parsed
342 True if successfully parsed test plan line
346 test.expected_count = None
349 test.expected_count = expected_count
353 TEST_RESULT = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$')
355 TEST_RESULT_SKIP = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$')
357 def peek_test_name_match(lines: LineStream, test: Test) -> bool: argument
359 Matches current line with the format of a test result line and checks
360 if the name matches the name of the current test.
364 - '[ok|not ok] [test number] [-] [test name] [optional skip
368 lines - LineStream of KTAP output to parse
369 test - Test object for current test being parsed
372 True if matched a test result line and the name matching the
373 expected test name
380 return name == test.name
382 def parse_test_result(lines: LineStream, test: Test, argument
383 expected_num: int) -> bool:
385 Parses test result line and stores the status and name in the test
386 object. Reports an error if the test number does not match expected
387 test number.
388 Returns False if fails to parse test result line.
394 - '[ok|not ok] [test number] [-] [test name] [optional skip
398 lines - LineStream of KTAP output to parse
399 test - Test object for current test being parsed
400 expected_num - expected test number for current test
403 True if successfully parsed a test result line.
409 # Check if line matches test result line format
414 # Set name of test object
416 test.name = skip_match.group(4)
418 test.name = match.group(4)
420 # Check test num
423 test.add_error(f'Expected test number {expected_num} but found {num}')
425 # Set status of test object
428 test.status = TestStatus.SKIPPED
430 test.status = TestStatus.SUCCESS
432 test.status = TestStatus.FAILURE
435 def parse_diagnostic(lines: LineStream) -> List[str]:
437 Parse lines that do not match the format of a test result line or
438 test header line and returns them in list.
441 - '# Subtest: [test name]'
442 - '[ok|not ok] [test number] [-] [test name] [optional skip
444 - 'KTAP version [version number]'
447 lines - LineStream of KTAP output to parse
464 def format_test_divider(message: str, len_message: int) -> str:
472 message - message to be centered in divider line
473 len_message - length of the message to be printed such that
482 difference = len(DIVIDER) - len_message - 2 # 2 spaces added
486 len_2 = difference - len_1
489 def print_test_header(test: Test) -> None: argument
491 Prints test header with test name and optionally the expected number
498 test - Test object representing current test being printed
500 message = test.name
502 # Add a leading space before the subtest counts only if a test name
505 if test.expected_count:
506 if test.expected_count == 1:
509 message += f'({test.expected_count} subtests)'
512 def print_log(log: Iterable[str]) -> None:
513 """Prints all strings in saved log for test in yellow."""
518 def format_test_result(test: Test) -> str: argument
520 Returns string with formatted test result with colored status and test
527 test - Test object representing current test being printed
530 String containing formatted test result
532 if test.status == TestStatus.SUCCESS:
533 return stdout.green('[PASSED] ') + test.name
534 if test.status == TestStatus.SKIPPED:
535 return stdout.yellow('[SKIPPED] ') + test.name
536 if test.status == TestStatus.NO_TESTS:
537 return stdout.yellow('[NO TESTS RUN] ') + test.name
538 if test.status == TestStatus.TEST_CRASHED:
539 print_log(test.log)
540 return stdout.red('[CRASHED] ') + test.name
541 print_log(test.log)
542 return stdout.red('[FAILED] ') + test.name
544 def print_test_result(test: Test) -> None: argument
546 Prints result line with status of test.
552 test - Test object representing current test being printed
554 stdout.print_with_timestamp(format_test_result(test))
556 def print_test_footer(test: Test) -> None: argument
558 Prints test footer with status of test.
564 test - Test object representing current test being printed
566 message = format_test_result(test)
568 len(message) - stdout.color_len()))
572 def _summarize_failed_tests(test: Test) -> str: argument
573 """Tries to summarize all the failing subtests in `test`."""
575 def failed_names(test: Test, parent_name: str) -> List[str]: argument
576 # Note: we use 'main' internally for the top-level test.
578 full_name = test.name
580 full_name = parent_name + '.' + test.name
582 if not test.subtests: # this is a leaf node
586 # Don't summarize it down "the top-level test failed", though.
587 failed_subtests = [sub for sub in test.subtests if not sub.ok_status()]
588 if parent_name and len(failed_subtests) == len(test.subtests):
596 failures = failed_names(test, '')
604 def print_summary_line(test: Test) -> None: argument
606 Prints summary line of test object. Color of line is dependent on
607 status of test. Color is green if test passes, yellow if test is
608 skipped, and red if the test fails or crashes. Summary line contains
609 counts of the statuses of the tests subtests or the test itself if it
616 test - Test object representing current test being printed
618 if test.status == TestStatus.SUCCESS:
620 elif test.status in (TestStatus.SKIPPED, TestStatus.NO_TESTS):
624 stdout.print_with_timestamp(color(f'Testing complete. {test.counts}'))
626 # Summarize failures that might have gone off-screen since we had a lot
628 if test.ok_status() or test.counts.total() < 100:
630 summarized = _summarize_failed_tests(test)
637 def bubble_up_test_results(test: Test) -> None: argument
639 If the test has subtests, add the test counts of the subtests to the
640 test and check if any of the tests crashed and if so set the test
641 status to crashed. Otherwise if the test has no subtests add the
642 status of the test to the test counts.
645 test - Test object for current test being parsed
647 subtests = test.subtests
648 counts = test.counts
649 status = test.status
654 elif test.counts.get_status() == TestStatus.TEST_CRASHED:
655 test.status = TestStatus.TEST_CRASHED
657 def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest: bool) -> Test:
659 Finds next test to parse in LineStream, creates new Test object,
660 parses any subtests of the test, populates Test object with all
661 information (status, name) about the test and the Test objects for
662 any subtests, and then returns the Test object. The method accepts
665 Accepted test formats:
667 - Main KTAP/TAP header
675 - Subtest header (must include either the KTAP version line or
701 - Test result line
705 ok 1 - test
708 lines - LineStream of KTAP output to parse
709 expected_num - expected test number for test to be parsed
710 log - list of strings containing any preceding diagnostic lines
711 corresponding to the current test
712 is_subtest - boolean indicating whether test is a subtest
715 Test object populated with characteristics and any subtests
717 test = Test()
718 test.log.extend(log)
722 test.log.extend(err_log)
725 # If parsing the main/top-level test, parse KTAP version line and
726 # test plan
727 test.name = "main"
728 ktap_line = parse_ktap_header(lines, test)
729 test.log.extend(parse_diagnostic(lines))
730 parse_test_plan(lines, test)
733 # If not the main test, attempt to parse a test header containing
735 ktap_line = parse_ktap_header(lines, test)
736 subtest_line = parse_test_header(lines, test)
740 # to parse test plan and print test header
741 test.log.extend(parse_diagnostic(lines))
742 parse_test_plan(lines, test)
743 print_test_header(test)
744 expected_count = test.expected_count
750 # if expected number of tests is unknown break when test
754 sub_test = Test()
755 if not lines or (peek_test_name_match(lines, test) and
758 # If parser reaches end of test before
761 test.add_error('missing expected subtest!')
763 test.counts.add_status(
767 test.log.extend(sub_log)
773 test.subtests = subtests
775 # If not main test, look for test result line
776 test.log.extend(parse_diagnostic(lines))
777 if test.name != "" and not peek_test_name_match(lines, test):
778 test.add_error('missing subtest result line!')
780 parse_test_result(lines, test, expected_num)
782 # Check for there being no subtests within parent test
784 # Don't override a bad status if this test had one reported.
785 # Assumption: no subtests means CRASHED is from Test.__init__()
786 if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS):
787 print_log(test.log)
788 test.status = TestStatus.NO_TESTS
789 test.add_error('0 tests run!')
791 # Add statuses to TestCounts attribute in Test object
792 bubble_up_test_results(test)
794 # If test has subtests and is not the main test object, print
796 print_test_footer(test)
798 print_test_result(test)
799 return test
801 def parse_run_tests(kernel_output: Iterable[str]) -> Test:
803 Using kernel output, extract KTAP lines, parse the lines for test
804 results and print condensed test results and summary line.
807 kernel_output - Iterable object contains lines of kernel output
810 Test - the main test object with all subtests.
814 test = Test()
816 test.name = '<missing>'
817 test.add_error('Could not find any KTAP output. Did any KUnit tests run?')
818 test.status = TestStatus.FAILURE_TO_PARSE_TESTS
820 test = parse_test(lines, 0, [], False)
821 if test.status != TestStatus.NO_TESTS:
822 test.status = test.counts.get_status()
824 print_summary_line(test)
825 return test