xref: /honee/tests/junit.py (revision 98116c12d344a4e022a9a31a5755be7d96f7d5d9)
1#!/usr/bin/env python3
2from junit_common import *
3
4
5def create_argparser() -> argparse.ArgumentParser:
6    """Creates argument parser to read command line arguments
7
8    Returns:
9        argparse.ArgumentParser: Created `ArgumentParser`
10    """
11    parser = argparse.ArgumentParser('Test runner with JUnit and TAP output')
12    parser.add_argument(
13        '-c',
14        '--ceed-backends',
15        type=str,
16        nargs='*',
17        default=['/cpu/self'],
18        help='libCEED backend to use with convergence tests')
19    parser.add_argument(
20        '-m',
21        '--mode',
22        type=RunMode,
23        action=CaseInsensitiveEnumAction,
24        help='Output mode, junit or tap',
25        default=RunMode.JUNIT)
26    parser.add_argument('-n', '--nproc', type=int, default=1, help='number of MPI processes')
27    parser.add_argument('-o', '--output', type=Optional[Path], default=None, help='Output file to write test')
28    parser.add_argument('-b', '--junit-batch', type=str, default='', help='Name of JUnit batch for output file')
29    parser.add_argument('-np', '--pool-size', type=int, default=1, help='Number of test cases to run in parallel')
30    parser.add_argument('-s', '--smartredis_dir', type=str, default='', help='path to SmartSim library, if present')
31    parser.add_argument('--has_torch', type=bool, default=False, help='Whether to build with torch')
32    parser.add_argument('test', help='Test executable', nargs='?')
33
34    return parser
35
36
37def diff_csv_comment_function(test_line: str, true_line: str) -> Optional[str]:
38    test_line_split = test_line.split(':')[0]
39    true_line_split = true_line.split(':')[0]
40    diff_output = ''.join(difflib.unified_diff([test_line_split + '\n'],
41                                               [true_line_split + '\n'],
42                                               tofile='test created file',
43                                               fromfile='expected output'))
44    return diff_output if diff_output else None
45
46
47class HoneeSuiteSpec(SuiteSpec):
48    def __init__(self, has_torch: bool):
49        self.has_torch: bool = has_torch
50        self.diff_csv_kwargs: dict = {'comment_func': diff_csv_comment_function, 'rel_tol': 1e-9}
51
52    def get_source_path(self, test: str) -> Path:
53        """Compute path to test source file
54
55        Args:
56            test (str): Name of test
57
58        Returns:
59            Path: Path to source file
60        """
61        if test.startswith('navierstokes'):
62            return (Path('examples') / 'navierstokes').with_suffix('.c')
63        else:
64            return (Path('tests') / test).with_suffix('.c')
65
66    # get path to executable
67    def get_run_path(self, test: str) -> Path:
68        """Compute path to built test executable file
69
70        Args:
71            test (str): Name of test
72
73        Returns:
74            Path: Path to test executable
75        """
76        return Path('build') / test
77
78    def get_output_path(self, test: str, output_file: str) -> Path:
79        """Compute path to expected output file
80
81        Args:
82            test (str): Name of test
83            output_file (str): File name of output file
84
85        Returns:
86            Path: Path to expected output file
87        """
88        return Path('tests') / 'output' / output_file
89
90    def check_pre_skip(self, test: str, spec: TestSpec, resource: str, nproc: int) -> Optional[str]:
91        """Check if a test case should be skipped prior to running, returning the reason for skipping
92
93        Args:
94            test (str): Name of test
95            spec (TestSpec): Test case specification
96            resource (str): libCEED backend
97            nproc (int): Number of MPI processes to use when running test case
98
99        Returns:
100            Optional[str]: Skip reason, or `None` if test case should not be skipped
101        """
102        for condition in spec.only:
103            if (condition == 'cpu') and ('gpu' in resource):
104                return 'CPU only test with GPU backend'
105            if condition == 'torch' and not self.has_torch:
106                return 'PyTorch only test without USE_TORCH=1'
107
108    def check_post_skip(self, test: str, spec: TestSpec, resource: str, stderr: str) -> Optional[str]:
109        """Check if a test case should be allowed to fail, based on its stderr output
110
111        Args:
112            test (str): Name of test
113            spec (TestSpec): Test case specification
114            resource (str): libCEED backend
115            stderr (str): Standard error output from test case execution
116
117        Returns:
118            Optional[str]: Skip reason, or `None` if unexpeced error
119        """
120        if 'No SYCL devices of the requested type are available' in stderr:
121            return f'SYCL device type not available'
122        elif 'You may need to add --download-ctetgen or --download-tetgen' in stderr:
123            return f'Tet mesh generator not installed for {test}, {spec.name}'
124        return None
125
126    def check_required_failure(self, test: str, spec: TestSpec, resource: str, stderr: str) -> Tuple[str, bool]:
127        """Check whether a test case is expected to fail and if it failed expectedly
128
129        Args:
130            test (str): Name of test
131            spec (TestSpec): Test case specification
132            resource (str): libCEED backend
133            stderr (str): Standard error output from test case execution
134
135        Returns:
136            tuple[str, bool]: Tuple of the expected failure string and whether it was present in `stderr`
137        """
138        return '', True
139
140    def check_allowed_stdout(self, test: str) -> bool:
141        """Check whether a test is allowed to print console output
142
143        Args:
144            test (str): Name of test
145
146        Returns:
147            bool: True if the test is allowed to print console output
148        """
149        return False
150
151
152if __name__ == '__main__':
153    args = create_argparser().parse_args()
154
155    # run tests
156    if 'smartsim' in args.test:
157        has_smartsim: bool = args.smartredis_dir and Path(args.smartredis_dir).is_dir()
158        test_cases = []
159
160        if args.mode is RunMode.TAP:
161            print(f'1..1')
162        if has_smartsim:
163            from smartsim_regression_framework import SmartSimTest
164
165            test_framework = SmartSimTest(Path(__file__).parent / 'smartsim_test_dir')
166            test_framework.setup()
167
168            is_new_subtest = True
169            subtest_ok = True
170            for i, backend in enumerate(args.ceed_backends):
171                test_cases.append(test_framework.test_junit(backend))
172                if is_new_subtest and args.mode == RunMode.TAP:
173                    is_new_subtest = False
174                    print(f'# Subtest: {test_cases[0].category}')
175                    print(f'    1..{len(args.ceed_backends)}')
176                print(test_case_output_string(test_cases[i], TestSpec("SmartSim Tests"), args.mode, backend, '', i))
177            if args.mode == RunMode.TAP:
178                print(f'{"" if subtest_ok else "not "}ok 1 - {test_cases[0].category}')
179            test_framework.teardown()
180        elif args.mode is RunMode.TAP:
181            print(f'ok 1 - # SKIP SmartSim not installed')
182        result: TestSuite = TestSuite('SmartSim Tests', test_cases)
183    else:
184        result: TestSuite = run_tests(
185            args.test,
186            args.ceed_backends,
187            args.mode,
188            args.nproc,
189            HoneeSuiteSpec(args.has_torch),
190            args.pool_size)
191
192    # write output and check for failures
193    if args.mode is RunMode.JUNIT:
194        write_junit_xml(result, args.output, args.junit_batch)
195        if has_failures(result):
196            sys.exit(1)
197