xref: /libCEED/tests/junit.py (revision 9702fad555bca64ebadd65b6683823bdeee0498a)
1#!/usr/bin/env python3
2from junit_common import *
3
4
5def create_argparser() -> argparse.ArgumentParser:
6    """Creates argument parser to read command line arguments
7
8    Returns:
9        argparse.ArgumentParser: Created `ArgumentParser`
10    """
11    parser = argparse.ArgumentParser('Test runner with JUnit and TAP output')
12    parser.add_argument(
13        '-c',
14        '--ceed-backends',
15        type=str,
16        nargs='*',
17        default=['/cpu/self'],
18        help='libCEED backend to use with convergence tests')
19    parser.add_argument(
20        '-m',
21        '--mode',
22        type=RunMode,
23        action=CaseInsensitiveEnumAction,
24        help='Output mode, junit or tap',
25        default=RunMode.JUNIT)
26    parser.add_argument('-n', '--nproc', type=int, default=1, help='number of MPI processes')
27    parser.add_argument('-o', '--output', type=Optional[Path], default=None, help='Output file to write test')
28    parser.add_argument('-b', '--junit-batch', type=str, default='', help='Name of JUnit batch for output file')
29    parser.add_argument('-np', '--pool-size', type=int, default=1, help='Number of test cases to run in parallel')
30    parser.add_argument('-s', '--smartredis_dir', type=str, default='', help='path to SmartSim library, if present')
31    parser.add_argument('--has_torch', type=bool, default=False, help='Whether to build with torch')
32    parser.add_argument('test', help='Test executable', nargs='?')
33
34    return parser
35
36
37# Necessary functions for running tests
38class CeedSuiteSpec(SuiteSpec):
39    def __init__(self, has_torch: bool):
40        self.has_torch: bool = has_torch
41
42    def get_source_path(self, test: str) -> Path:
43        """Compute path to test source file
44
45        Args:
46            test (str): Name of test
47
48        Returns:
49            Path: Path to source file
50        """
51        prefix, rest = test.split('-', 1)
52        if prefix == 'petsc':
53            return (Path('examples') / 'petsc' / rest).with_suffix('.c')
54        elif prefix == 'mfem':
55            return (Path('examples') / 'mfem' / rest).with_suffix('.cpp')
56        elif prefix == 'nek':
57            return (Path('examples') / 'nek' / 'bps' / rest).with_suffix('.usr')
58        elif prefix == 'dealii':
59            return (Path('examples') / 'deal.II' / rest).with_suffix('.cc')
60        elif prefix == 'fluids':
61            return (Path('examples') / 'fluids' / rest).with_suffix('.c')
62        elif prefix == 'solids':
63            return (Path('examples') / 'solids' / rest).with_suffix('.c')
64        elif test.startswith('ex'):
65            return (Path('examples') / 'ceed' / test).with_suffix('.c')
66        elif test.endswith('-f'):
67            return (Path('tests') / test).with_suffix('.f90')
68        else:
69            return (Path('tests') / test).with_suffix('.c')
70
71    # get path to executable
72    def get_run_path(self, test: str) -> Path:
73        """Compute path to built test executable file
74
75        Args:
76            test (str): Name of test
77
78        Returns:
79            Path: Path to test executable
80        """
81        return Path('build') / test
82
83    def get_output_path(self, test: str, output_file: str) -> Path:
84        """Compute path to expected output file
85
86        Args:
87            test (str): Name of test
88            output_file (str): File name of output file
89
90        Returns:
91            Path: Path to expected output file
92        """
93        return Path('tests') / 'output' / output_file
94
95    def check_pre_skip(self, test: str, spec: TestSpec, resource: str, nproc: int) -> Optional[str]:
96        """Check if a test case should be skipped prior to running, returning the reason for skipping
97
98        Args:
99            test (str): Name of test
100            spec (TestSpec): Test case specification
101            resource (str): libCEED backend
102            nproc (int): Number of MPI processes to use when running test case
103
104        Returns:
105            Optional[str]: Skip reason, or `None` if test case should not be skipped
106        """
107        if contains_any(resource, ['occa']) and startswith_any(
108                test, ['t4', 't5', 'ex', 'mfem', 'nek', 'petsc', 'fluids', 'solids']):
109            return 'OCCA mode not supported'
110        if test.startswith('t318') and contains_any(resource, ['/gpu/cuda/ref']):
111            return 'CUDA ref backend not supported'
112        if test.startswith('t506') and contains_any(resource, ['/gpu/cuda/shared']):
113            return 'CUDA shared backend not supported'
114        for condition in spec.only:
115            if (condition == 'cpu') and ('gpu' in resource):
116                return 'CPU only test with GPU backend'
117            if condition == 'torch' and not self.has_torch:
118                return 'PyTorch only test without USE_TORCH=1'
119
120    def check_post_skip(self, test: str, spec: TestSpec, resource: str, stderr: str) -> Optional[str]:
121        """Check if a test case should be allowed to fail, based on its stderr output
122
123        Args:
124            test (str): Name of test
125            spec (TestSpec): Test case specification
126            resource (str): libCEED backend
127            stderr (str): Standard error output from test case execution
128
129        Returns:
130            Optional[str]: Skip reason, or `None` if unexpeced error
131        """
132        if 'OCCA backend failed to use' in stderr:
133            return f'OCCA mode not supported'
134        elif 'Backend does not implement' in stderr:
135            return f'Backend does not implement'
136        elif 'Can only provide HOST memory for this backend' in stderr:
137            return f'Device memory not supported'
138        elif 'Can only set HOST memory for this backend' in stderr:
139            return f'Device memory not supported'
140        elif 'Test not implemented in single precision' in stderr:
141            return f'Test not implemented in single precision'
142        elif 'No SYCL devices of the requested type are available' in stderr:
143            return f'SYCL device type not available'
144        elif 'You may need to add --download-ctetgen or --download-tetgen' in stderr:
145            return f'Tet mesh generator not installed for {test}, {spec.name}'
146        return None
147
148    def check_required_failure(self, test: str, spec: TestSpec, resource: str, stderr: str) -> Tuple[str, bool]:
149        """Check whether a test case is expected to fail and if it failed expectedly
150
151        Args:
152            test (str): Name of test
153            spec (TestSpec): Test case specification
154            resource (str): libCEED backend
155            stderr (str): Standard error output from test case execution
156
157        Returns:
158            tuple[str, bool]: Tuple of the expected failure string and whether it was present in `stderr`
159        """
160        test_id: str = test[:4]
161        fail_str: str = ''
162        if test_id in ['t006', 't007']:
163            fail_str = 'No suitable backend:'
164        elif test_id in ['t008']:
165            fail_str = 'Available backend resources:'
166        elif test_id in ['t110', 't111', 't112', 't113', 't114']:
167            fail_str = 'Cannot grant CeedVector array access'
168        elif test_id in ['t115']:
169            fail_str = 'Cannot grant CeedVector read-only array access, the access lock is already in use'
170        elif test_id in ['t116']:
171            fail_str = 'Cannot destroy CeedVector, the writable access lock is in use'
172        elif test_id in ['t117']:
173            fail_str = 'Cannot restore CeedVector array access, access was not granted'
174        elif test_id in ['t118']:
175            fail_str = 'Cannot sync CeedVector, the access lock is already in use'
176        elif test_id in ['t215']:
177            fail_str = 'Cannot destroy CeedElemRestriction, a process has read access to the offset data'
178        elif test_id in ['t303']:
179            fail_str = 'Length of input/output vectors incompatible with basis dimensions'
180        elif test_id in ['t408']:
181            fail_str = 'CeedQFunctionContextGetData(): Cannot grant CeedQFunctionContext data access, a process has read access'
182        elif test_id in ['t409'] and contains_any(resource, ['memcheck']):
183            fail_str = 'Context data changed while accessed in read-only mode'
184
185        return fail_str, fail_str in stderr
186
187    def check_allowed_stdout(self, test: str) -> bool:
188        """Check whether a test is allowed to print console output
189
190        Args:
191            test (str): Name of test
192
193        Returns:
194            bool: True if the test is allowed to print console output
195        """
196        return test[:4] in ['t003']
197
198
199if __name__ == '__main__':
200    args = create_argparser().parse_args()
201
202    # run tests
203    if 'smartsim' in args.test:
204        has_smartsim: bool = args.smartredis_dir and Path(args.smartredis_dir).is_dir()
205        test_cases = []
206
207        if args.mode is RunMode.TAP:
208            print(f'1..1')
209        if has_smartsim:
210            sys.path.insert(0, str(Path(__file__).parents[1] / "examples" / "fluids"))
211            from smartsim_regression_framework import SmartSimTest
212
213            test_framework = SmartSimTest(Path(__file__).parent / 'test_dir')
214            test_framework.setup()
215
216            is_new_subtest = True
217            subtest_ok = True
218            for i, backend in enumerate(args.ceed_backends):
219                test_cases.append(test_framework.test_junit(backend))
220                if is_new_subtest and args.mode == RunMode.TAP:
221                    is_new_subtest = False
222                    print(f'# Subtest: {test_cases[0].category}')
223                    print(f'    1..{len(args.ceed_backends)}')
224                print(test_case_output_string(test_cases[i], TestSpec("SmartSim Tests"), args.mode, backend, '', i))
225            if args.mode == RunMode.TAP:
226                print(f'{"" if subtest_ok else "not "}ok 1 - {test_cases[0].category}')
227            test_framework.teardown()
228        elif args.mode is RunMode.TAP:
229            print(f'ok 1 - # SKIP SmartSim not installed')
230        result: TestSuite = TestSuite('SmartSim Tests', test_cases)
231    else:
232        result: TestSuite = run_tests(
233            args.test,
234            args.ceed_backends,
235            args.mode,
236            args.nproc,
237            CeedSuiteSpec(args.has_torch),
238            args.pool_size)
239
240    # write output and check for failures
241    if args.mode is RunMode.JUNIT:
242        write_junit_xml(result, args.output, args.junit_batch)
243        if has_failures(result):
244            sys.exit(1)
245