1#!/usr/bin/env python3 2from junit_common import * 3 4 5def create_argparser() -> argparse.ArgumentParser: 6 """Creates argument parser to read command line arguments 7 8 Returns: 9 argparse.ArgumentParser: Created `ArgumentParser` 10 """ 11 parser = argparse.ArgumentParser('Test runner with JUnit and TAP output') 12 parser.add_argument( 13 '-c', 14 '--ceed-backends', 15 type=str, 16 nargs='*', 17 default=['/cpu/self'], 18 help='libCEED backend to use with convergence tests') 19 parser.add_argument( 20 '-m', 21 '--mode', 22 type=RunMode, 23 action=CaseInsensitiveEnumAction, 24 help='Output mode, junit or tap', 25 default=RunMode.JUNIT) 26 parser.add_argument('-n', '--nproc', type=int, default=1, help='number of MPI processes') 27 parser.add_argument('-o', '--output', type=Optional[Path], default=None, help='Output file to write test') 28 parser.add_argument('-b', '--junit-batch', type=str, default='', help='Name of JUnit batch for output file') 29 parser.add_argument('-np', '--pool-size', type=int, default=1, help='Number of test cases to run in parallel') 30 parser.add_argument('-s', '--search', type=str, default='.*', 31 help='Search string to filter tests, using `re` package format') 32 parser.add_argument('-v', '--verbose', action='store_true', default=False, 33 help='print details for all runs, not just failures') 34 parser.add_argument('test', help='Test executable', nargs='?') 35 36 return parser 37 38 39# Necessary functions for running tests 40class CeedSuiteSpec(SuiteSpec): 41 def __init__(self): 42 pass 43 44 def get_source_path(self, test: str) -> Path: 45 """Compute path to test source file 46 47 Args: 48 test (str): Name of test 49 50 Returns: 51 Path: Path to source file 52 """ 53 prefix, rest = test.split('-', 1) 54 if prefix == 'petsc': 55 return (Path('examples') / 'petsc' / rest).with_suffix('.c') 56 elif prefix == 'mfem': 57 return (Path('examples') / 'mfem' / rest).with_suffix('.cpp') 58 elif prefix == 'nek': 59 return (Path('examples') / 'nek' / 'bps' / rest).with_suffix('.usr') 60 elif prefix == 'dealii': 61 return (Path('examples') / 'deal.II' / rest).with_suffix('.cc') 62 elif prefix == 'fluids': 63 return (Path('examples') / 'fluids' / rest).with_suffix('.c') 64 elif prefix == 'solids': 65 return (Path('examples') / 'solids' / rest).with_suffix('.c') 66 elif test.startswith('ex'): 67 return (Path('examples') / 'ceed' / test).with_suffix('.c') 68 elif test.endswith('-f'): 69 return (Path('tests') / test).with_suffix('.f90') 70 else: 71 return (Path('tests') / test).with_suffix('.c') 72 73 # get path to executable 74 def get_run_path(self, test: str) -> Path: 75 """Compute path to built test executable file 76 77 Args: 78 test (str): Name of test 79 80 Returns: 81 Path: Path to test executable 82 """ 83 return Path('build') / test 84 85 def get_output_path(self, test: str, output_file: str) -> Path: 86 """Compute path to expected output file 87 88 Args: 89 test (str): Name of test 90 output_file (str): File name of output file 91 92 Returns: 93 Path: Path to expected output file 94 """ 95 return Path('tests') / 'output' / output_file 96 97 def check_pre_skip(self, test: str, spec: TestSpec, resource: str, nproc: int) -> Optional[str]: 98 """Check if a test case should be skipped prior to running, returning the reason for skipping 99 100 Args: 101 test (str): Name of test 102 spec (TestSpec): Test case specification 103 resource (str): libCEED backend 104 nproc (int): Number of MPI processes to use when running test case 105 106 Returns: 107 Optional[str]: Skip reason, or `None` if test case should not be skipped 108 """ 109 if contains_any(resource, ['occa']) and startswith_any( 110 test, ['t4', 't5', 'ex', 'mfem', 'nek', 'petsc', 'fluids', 'solids']): 111 return 'OCCA mode not supported' 112 if test.startswith('t318') and contains_any(resource, ['/gpu/cuda/ref']): 113 return 'CUDA ref backend not supported' 114 if test.startswith('t506') and contains_any(resource, ['/gpu/cuda/shared']): 115 return 'CUDA shared backend not supported' 116 for condition in spec.only: 117 if (condition == 'cpu') and ('gpu' in resource): 118 return 'CPU only test with GPU backend' 119 120 def check_post_skip(self, test: str, spec: TestSpec, resource: str, stderr: str) -> Optional[str]: 121 """Check if a test case should be allowed to fail, based on its stderr output 122 123 Args: 124 test (str): Name of test 125 spec (TestSpec): Test case specification 126 resource (str): libCEED backend 127 stderr (str): Standard error output from test case execution 128 129 Returns: 130 Optional[str]: Skip reason, or `None` if unexpeced error 131 """ 132 if 'OCCA backend failed to use' in stderr: 133 return f'OCCA mode not supported' 134 elif 'Backend does not implement' in stderr: 135 return f'Backend does not implement' 136 elif 'Can only provide HOST memory for this backend' in stderr: 137 return f'Device memory not supported' 138 elif 'Can only set HOST memory for this backend' in stderr: 139 return f'Device memory not supported' 140 elif 'Test not implemented in single precision' in stderr: 141 return f'Test not implemented in single precision' 142 elif 'No SYCL devices of the requested type are available' in stderr: 143 return f'SYCL device type not available' 144 elif 'You may need to add --download-ctetgen or --download-tetgen' in stderr: 145 return f'Tet mesh generator not installed for {test}, {spec.name}' 146 return None 147 148 def check_required_failure(self, test: str, spec: TestSpec, resource: str, stderr: str) -> Tuple[str, bool]: 149 """Check whether a test case is expected to fail and if it failed expectedly 150 151 Args: 152 test (str): Name of test 153 spec (TestSpec): Test case specification 154 resource (str): libCEED backend 155 stderr (str): Standard error output from test case execution 156 157 Returns: 158 tuple[str, bool]: Tuple of the expected failure string and whether it was present in `stderr` 159 """ 160 test_id: str = test[:4] 161 fail_str: str = '' 162 if test_id in ['t006', 't007']: 163 fail_str = 'No suitable backend:' 164 elif test_id in ['t008']: 165 fail_str = 'Available backend resources:' 166 elif test_id in ['t110', 't111', 't112', 't113', 't114']: 167 fail_str = 'Cannot grant CeedVector array access' 168 elif test_id in ['t115']: 169 fail_str = 'Cannot grant CeedVector read-only array access, the access lock is already in use' 170 elif test_id in ['t116']: 171 fail_str = 'Cannot destroy CeedVector, the writable access lock is in use' 172 elif test_id in ['t117']: 173 fail_str = 'Cannot restore CeedVector array access, access was not granted' 174 elif test_id in ['t118']: 175 fail_str = 'Cannot sync CeedVector, the access lock is already in use' 176 elif test_id in ['t215']: 177 fail_str = 'Cannot destroy CeedElemRestriction, a process has read access to the offset data' 178 elif test_id in ['t303']: 179 fail_str = 'Input/output vectors too short for basis and evaluation mode' 180 elif test_id in ['t408']: 181 fail_str = 'CeedQFunctionContextGetData(): Cannot grant CeedQFunctionContext data access, a process has read access' 182 elif test_id in ['t409'] and contains_any(resource, ['memcheck']): 183 fail_str = 'Context data changed while accessed in read-only mode' 184 185 return fail_str, fail_str in stderr 186 187 def check_allowed_stdout(self, test: str) -> bool: 188 """Check whether a test is allowed to print console output 189 190 Args: 191 test (str): Name of test 192 193 Returns: 194 bool: True if the test is allowed to print console output 195 """ 196 return test[:4] in ['t003'] 197 198 199if __name__ == '__main__': 200 args = create_argparser().parse_args() 201 202 result: TestSuite = run_tests( 203 args.test, 204 args.ceed_backends, 205 args.mode, 206 args.nproc, 207 CeedSuiteSpec(), 208 args.pool_size, 209 search=args.search, 210 verbose=args.verbose) 211 212 # write output and check for failures 213 if args.mode is RunMode.JUNIT: 214 write_junit_xml(result, args.output, args.junit_batch) 215 if has_failures(result): 216 sys.exit(1) 217