xref: /libCEED/tests/junit.py (revision 58f118fde9e934489c18d94d567a96d887adab93)
1#!/usr/bin/env python3
2from junit_common import *
3
4
5def create_argparser() -> argparse.ArgumentParser:
6    """Creates argument parser to read command line arguments
7
8    Returns:
9        argparse.ArgumentParser: Created `ArgumentParser`
10    """
11    parser = argparse.ArgumentParser('Test runner with JUnit and TAP output')
12    parser.add_argument(
13        '-c',
14        '--ceed-backends',
15        type=str,
16        nargs='*',
17        default=['/cpu/self'],
18        help='libCEED backend to use with convergence tests')
19    parser.add_argument(
20        '-m',
21        '--mode',
22        type=RunMode,
23        action=CaseInsensitiveEnumAction,
24        help='Output mode, junit or tap',
25        default=RunMode.JUNIT)
26    parser.add_argument('-n', '--nproc', type=int, default=1, help='number of MPI processes')
27    parser.add_argument('-o', '--output', type=Optional[Path], default=None, help='Output file to write test')
28    parser.add_argument('-b', '--junit-batch', type=str, default='', help='Name of JUnit batch for output file')
29    parser.add_argument('-np', '--pool-size', type=int, default=1, help='Number of test cases to run in parallel')
30    parser.add_argument('test', help='Test executable', nargs='?')
31
32    return parser
33
34
35# Necessary functions for running tests
36class CeedSuiteSpec(SuiteSpec):
37    def get_source_path(self, test: str) -> Path:
38        """Compute path to test source file
39
40        Args:
41            test (str): Name of test
42
43        Returns:
44            Path: Path to source file
45        """
46        prefix, rest = test.split('-', 1)
47        if prefix == 'petsc':
48            return (Path('examples') / 'petsc' / rest).with_suffix('.c')
49        elif prefix == 'mfem':
50            return (Path('examples') / 'mfem' / rest).with_suffix('.cpp')
51        elif prefix == 'nek':
52            return (Path('examples') / 'nek' / 'bps' / rest).with_suffix('.usr')
53        elif prefix == 'dealii':
54            return (Path('examples') / 'deal.II' / rest).with_suffix('.cc')
55        elif prefix == 'fluids':
56            return (Path('examples') / 'fluids' / rest).with_suffix('.c')
57        elif prefix == 'solids':
58            return (Path('examples') / 'solids' / rest).with_suffix('.c')
59        elif test.startswith('ex'):
60            return (Path('examples') / 'ceed' / test).with_suffix('.c')
61        elif test.endswith('-f'):
62            return (Path('tests') / test).with_suffix('.f90')
63        else:
64            return (Path('tests') / test).with_suffix('.c')
65
66    # get path to executable
67    def get_run_path(self, test: str) -> Path:
68        """Compute path to built test executable file
69
70        Args:
71            test (str): Name of test
72
73        Returns:
74            Path: Path to test executable
75        """
76        return Path('build') / test
77
78    def get_output_path(self, test: str, output_file: str) -> Path:
79        """Compute path to expected output file
80
81        Args:
82            test (str): Name of test
83            output_file (str): File name of output file
84
85        Returns:
86            Path: Path to expected output file
87        """
88        return Path('tests') / 'output' / output_file
89
90    def check_pre_skip(self, test: str, spec: TestSpec, resource: str, nproc: int) -> Optional[str]:
91        """Check if a test case should be skipped prior to running, returning the reason for skipping
92
93        Args:
94            test (str): Name of test
95            spec (TestSpec): Test case specification
96            resource (str): libCEED backend
97            nproc (int): Number of MPI processes to use when running test case
98
99        Returns:
100            Optional[str]: Skip reason, or `None` if test case should not be skipped
101        """
102        if contains_any(resource, ['occa']) and startswith_any(
103                test, ['t4', 't5', 'ex', 'mfem', 'nek', 'petsc', 'fluids', 'solids']):
104            return 'OCCA mode not supported'
105        if test.startswith('t318') and contains_any(resource, ['/gpu/cuda/ref']):
106            return 'CUDA ref backend not supported'
107        if test.startswith('t506') and contains_any(resource, ['/gpu/cuda/shared']):
108            return 'CUDA shared backend not supported'
109        for condition in spec.only:
110            if (condition == 'cpu') and ('gpu' in resource):
111                return 'CPU only test with GPU backend'
112
113    def check_post_skip(self, test: str, spec: TestSpec, resource: str, stderr: str) -> Optional[str]:
114        """Check if a test case should be allowed to fail, based on its stderr output
115
116        Args:
117            test (str): Name of test
118            spec (TestSpec): Test case specification
119            resource (str): libCEED backend
120            stderr (str): Standard error output from test case execution
121
122        Returns:
123            Optional[str]: Skip reason, or `None` if unexpeced error
124        """
125        if 'OCCA backend failed to use' in stderr:
126            return f'OCCA mode not supported'
127        elif 'Backend does not implement' in stderr:
128            return f'Backend does not implement'
129        elif 'Can only provide HOST memory for this backend' in stderr:
130            return f'Device memory not supported'
131        elif 'Can only set HOST memory for this backend' in stderr:
132            return f'Device memory not supported'
133        elif 'Test not implemented in single precision' in stderr:
134            return f'Test not implemented in single precision'
135        elif 'No SYCL devices of the requested type are available' in stderr:
136            return f'SYCL device type not available'
137        return None
138
139    def check_required_failure(self, test: str, spec: TestSpec, resource: str, stderr: str) -> Tuple[str, bool]:
140        """Check whether a test case is expected to fail and if it failed expectedly
141
142        Args:
143            test (str): Name of test
144            spec (TestSpec): Test case specification
145            resource (str): libCEED backend
146            stderr (str): Standard error output from test case execution
147
148        Returns:
149            tuple[str, bool]: Tuple of the expected failure string and whether it was present in `stderr`
150        """
151        test_id: str = test[:4]
152        fail_str: str = ''
153        if test_id in ['t006', 't007']:
154            fail_str = 'No suitable backend:'
155        elif test_id in ['t008']:
156            fail_str = 'Available backend resources:'
157        elif test_id in ['t110', 't111', 't112', 't113', 't114']:
158            fail_str = 'Cannot grant CeedVector array access'
159        elif test_id in ['t115']:
160            fail_str = 'Cannot grant CeedVector read-only array access, the access lock is already in use'
161        elif test_id in ['t116']:
162            fail_str = 'Cannot destroy CeedVector, the writable access lock is in use'
163        elif test_id in ['t117']:
164            fail_str = 'Cannot restore CeedVector array access, access was not granted'
165        elif test_id in ['t118']:
166            fail_str = 'Cannot sync CeedVector, the access lock is already in use'
167        elif test_id in ['t215']:
168            fail_str = 'Cannot destroy CeedElemRestriction, a process has read access to the offset data'
169        elif test_id in ['t303']:
170            fail_str = 'Length of input/output vectors incompatible with basis dimensions'
171        elif test_id in ['t408']:
172            fail_str = 'CeedQFunctionContextGetData(): Cannot grant CeedQFunctionContext data access, a process has read access'
173        elif test_id in ['t409'] and contains_any(resource, ['memcheck']):
174            fail_str = 'Context data changed while accessed in read-only mode'
175
176        return fail_str, fail_str in stderr
177
178    def check_allowed_stdout(self, test: str) -> bool:
179        """Check whether a test is allowed to print console output
180
181        Args:
182            test (str): Name of test
183
184        Returns:
185            bool: True if the test is allowed to print console output
186        """
187        return test[:4] in ['t003']
188
189
190if __name__ == '__main__':
191    args = create_argparser().parse_args()
192
193    # run tests
194    if 'smartsim' in args.test:
195        sys.path.insert(0, str(Path(__file__).parents[1] / "examples" / "fluids"))
196        from smartsim_regression_framework import SmartSimTest
197
198        test_framework = SmartSimTest(Path(__file__).parent / 'test_dir')
199        test_framework.setup()
200        test_cases = []
201        print(f'1..1')
202        is_new_subtest = True
203        subtest_ok = True
204        for i, backend in enumerate(args.ceed_backends):
205            test_cases.append(test_framework.test_junit(backend))
206            if is_new_subtest and args.mode == RunMode.TAP:
207                is_new_subtest = False
208                print(f'# Subtest: {test_cases[0].category}')
209                print(f'    1..{len(args.ceed_backends)}')
210            print(test_case_output_string(test_cases[i], TestSpec("SmartSim Tests"), args.mode, backend, '', i))
211        if args.mode == RunMode.TAP:
212            print(f'{"" if subtest_ok else "not "}ok 1 - {test_cases[0].category}')
213        test_framework.teardown()
214        result: TestSuite = TestSuite('SmartSim Tests', test_cases)
215    else:
216        result: TestSuite = run_tests(
217            args.test,
218            args.ceed_backends,
219            args.mode,
220            args.nproc,
221            CeedSuiteSpec(),
222            args.pool_size)
223
224    # write output and check for failures
225    if args.mode is RunMode.JUNIT:
226        write_junit_xml(result, args.output, args.junit_batch)
227        if has_failures(result):
228            sys.exit(1)
229