xref: /honee/tests/junit.py (revision ee61e64c050ff8f91de3b6de6869e59730df785a)
1#!/usr/bin/env python3
2from junit_common import *
3
4
5def create_argparser() -> argparse.ArgumentParser:
6    """Creates argument parser to read command line arguments
7
8    Returns:
9        argparse.ArgumentParser: Created `ArgumentParser`
10    """
11    parser = argparse.ArgumentParser('Test runner with JUnit and TAP output')
12    parser.add_argument(
13        '-c',
14        '--ceed-backends',
15        type=str,
16        nargs='*',
17        default=['/cpu/self'],
18        help='libCEED backend to use with convergence tests')
19    parser.add_argument(
20        '-m',
21        '--mode',
22        type=RunMode,
23        action=CaseInsensitiveEnumAction,
24        help='Output mode, junit or tap',
25        default=RunMode.JUNIT)
26    parser.add_argument('-n', '--nproc', type=int, default=1, help='number of MPI processes')
27    parser.add_argument('-o', '--output', type=Optional[Path], default=None, help='Output file to write test')
28    parser.add_argument('-b', '--junit-batch', type=str, default='', help='Name of JUnit batch for output file')
29    parser.add_argument('-np', '--pool-size', type=int, default=1, help='Number of test cases to run in parallel')
30    parser.add_argument('--smartredis-dir', type=str, default='', help='path to SmartSim library, if present')
31    parser.add_argument('--has-torch', type=bool, default=False, help='Whether to build with torch')
32    parser.add_argument('-s', '--search', type=str, default='.*',
33                        help='Search string to filter tests, using `re` package format')
34    parser.add_argument('-v', '--verbose', action='store_true', default=False,
35                        help='print details for all runs, not just failures')
36    parser.add_argument('test', help='Test executable', nargs='?')
37
38    return parser
39
40
41def diff_csv_comment_function(test_line: str, true_line: str) -> Optional[str]:
42    test_line_split = test_line.split(':')[0]
43    true_line_split = true_line.split(':')[0]
44    diff_output = ''.join(difflib.unified_diff([test_line_split + '\n'],
45                                               [true_line_split + '\n'],
46                                               tofile='test created file',
47                                               fromfile='expected output'))
48    return diff_output if diff_output else None
49
50
51class HoneeSuiteSpec(SuiteSpec):
52    def __init__(self, has_torch: bool):
53        self.has_torch: bool = has_torch
54        self.csv_rtol = 1e-9
55        self.csv_comment_diff_fn = diff_csv_comment_function
56        self.csv_comment_str = '#'
57
58    def get_source_path(self, test: str) -> Path:
59        """Compute path to test source file
60
61        Args:
62            test (str): Name of test
63
64        Returns:
65            Path: Path to source file
66        """
67        if test.startswith('navierstokes'):
68            return (Path('examples') / 'navierstokes').with_suffix('.c')
69        else:
70            return (Path('tests') / test).with_suffix('.c')
71
72    # get path to executable
73    def get_run_path(self, test: str) -> Path:
74        """Compute path to built test executable file
75
76        Args:
77            test (str): Name of test
78
79        Returns:
80            Path: Path to test executable
81        """
82        return Path('build') / test
83
84    def get_output_path(self, test: str, output_file: str) -> Path:
85        """Compute path to expected output file
86
87        Args:
88            test (str): Name of test
89            output_file (str): File name of output file
90
91        Returns:
92            Path: Path to expected output file
93        """
94        return Path('tests') / 'output' / output_file
95
96    def check_pre_skip(self, test: str, spec: TestSpec, resource: str, nproc: int) -> Optional[str]:
97        """Check if a test case should be skipped prior to running, returning the reason for skipping
98
99        Args:
100            test (str): Name of test
101            spec (TestSpec): Test case specification
102            resource (str): libCEED backend
103            nproc (int): Number of MPI processes to use when running test case
104
105        Returns:
106            Optional[str]: Skip reason, or `None` if test case should not be skipped
107        """
108        for condition in spec.only:
109            if (condition == 'cpu') and ('gpu' in resource):
110                return 'CPU only test with GPU backend'
111            if condition == 'torch' and not self.has_torch:
112                return 'PyTorch only test without USE_TORCH=1'
113
114    def check_post_skip(self, test: str, spec: TestSpec, resource: str, stderr: str) -> Optional[str]:
115        """Check if a test case should be allowed to fail, based on its stderr output
116
117        Args:
118            test (str): Name of test
119            spec (TestSpec): Test case specification
120            resource (str): libCEED backend
121            stderr (str): Standard error output from test case execution
122
123        Returns:
124            Optional[str]: Skip reason, or `None` if unexpeced error
125        """
126        if 'No SYCL devices of the requested type are available' in stderr:
127            return f'SYCL device type not available'
128        elif 'Loading meshes requires CGNS support. Reconfigure using --with-cgns-dir' in stderr:
129            return f'CGNS not installed in PETSc for {test}, {spec.name}'
130        elif 'You may need to add --download-ctetgen or --download-tetgen' in stderr:
131            return f'Tet mesh generator not installed for {test}, {spec.name}'
132        return None
133
134    def check_required_failure(self, test: str, spec: TestSpec, resource: str, stderr: str) -> Tuple[str, bool]:
135        """Check whether a test case is expected to fail and if it failed expectedly
136
137        Args:
138            test (str): Name of test
139            spec (TestSpec): Test case specification
140            resource (str): libCEED backend
141            stderr (str): Standard error output from test case execution
142
143        Returns:
144            tuple[str, bool]: Tuple of the expected failure string and whether it was present in `stderr`
145        """
146        return '', True
147
148    def check_allowed_stdout(self, test: str) -> bool:
149        """Check whether a test is allowed to print console output
150
151        Args:
152            test (str): Name of test
153
154        Returns:
155            bool: True if the test is allowed to print console output
156        """
157        return False
158
159
160if __name__ == '__main__':
161    args = create_argparser().parse_args()
162
163    # run tests
164    if 'smartsim' in args.test:
165        has_smartsim: bool = args.smartredis_dir and Path(args.smartredis_dir).is_dir()
166        test_cases = []
167
168        if args.mode is RunMode.TAP:
169            print(f'1..1')
170        if has_smartsim:
171            from smartsim_regression_framework import SmartSimTest
172
173            test_framework = SmartSimTest(Path(__file__).parent / 'smartsim_test_dir')
174            test_framework.setup()
175
176            is_new_subtest = True
177            subtest_ok = True
178            for i, backend in enumerate(args.ceed_backends):
179                test_cases.append(test_framework.test_junit(backend))
180                if is_new_subtest and args.mode == RunMode.TAP:
181                    is_new_subtest = False
182                    print(f'# Subtest: {test_cases[0].category}')
183                    print(f'    1..{len(args.ceed_backends)}')
184                print(
185                    test_case_output_string(
186                        test_cases[i],
187                        TestSpec("SmartSim Tests"),
188                        args.mode,
189                        backend,
190                        '',
191                        i,
192                        verbose=args.verbose))
193            if args.mode == RunMode.TAP:
194                print(f'{"" if subtest_ok else "not "}ok 1 - {test_cases[0].category}')
195            test_framework.teardown()
196        elif args.mode is RunMode.TAP:
197            print(f'ok 1 - # SKIP SmartSim not installed')
198        result: TestSuite = TestSuite('SmartSim Tests', test_cases)
199    else:
200        result: TestSuite = run_tests(
201            args.test,
202            args.ceed_backends,
203            args.mode,
204            args.nproc,
205            HoneeSuiteSpec(args.has_torch),
206            args.pool_size,
207            search=args.search,
208            verbose=args.verbose)
209
210    # write output and check for failures
211    if args.mode is RunMode.JUNIT:
212        write_junit_xml(result, args.output, args.junit_batch)
213        if has_failures(result):
214            sys.exit(1)
215