1#!/usr/bin/env python3 2 3from dataclasses import dataclass, field 4import difflib 5from itertools import combinations 6import os 7from pathlib import Path 8import re 9import subprocess 10import sys 11import time 12sys.path.insert(0, str(Path(__file__).parent / "junit-xml")) 13from junit_xml import TestCase, TestSuite 14 15 16@dataclass 17class TestSpec: 18 name: str 19 only: list = field(default_factory=list) 20 args: list = field(default_factory=list) 21 22 23def parse_test_line(line: str) -> TestSpec: 24 args = line.strip().split() 25 if args[0] == 'TESTARGS': 26 return TestSpec(name='', args=args[1:]) 27 test_args = args[0][args[0].index('TESTARGS(')+9:args[0].rindex(')')] 28 # transform 'name="myname",only="serial,int32"' into {'name': 'myname', 'only': 'serial,int32'} 29 test_args = dict([''.join(t).split('=') for t in re.findall(r"""([^,=]+)(=)"([^"]*)\"""", test_args)]) 30 constraints = test_args['only'].split(',') if 'only' in test_args else [] 31 if len(args) > 1: 32 return TestSpec(name=test_args['name'], only=constraints, args=args[1:]) 33 else: 34 return TestSpec(name=test_args['name'], only=constraints) 35 36 37def get_testargs(file : Path) -> list[TestSpec]: 38 if file.suffix in ['.c', '.cpp']: comment_str = '//' 39 elif file.suffix in ['.py']: comment_str = '#' 40 elif file.suffix in ['.usr']: comment_str = 'C_' 41 elif file.suffix in ['.f90']: comment_str = '! ' 42 else: raise RuntimeError(f'Unrecognized extension for file: {file}') 43 44 return [parse_test_line(line.strip(comment_str)) 45 for line in file.read_text().splitlines() 46 if line.startswith(f'{comment_str}TESTARGS')] or [TestSpec('', args=['{ceed_resource}'])] 47 48 49def get_source(test: str) -> Path: 50 prefix, rest = test.split('-', 1) 51 if prefix == 'petsc': 52 return (Path('examples') / 'petsc' / rest).with_suffix('.c') 53 elif prefix == 'mfem': 54 return (Path('examples') / 'mfem' / rest).with_suffix('.cpp') 55 elif prefix == 'nek': 56 return (Path('examples') / 'nek' / 'bps' / rest).with_suffix('.usr') 57 elif prefix == 'fluids': 58 return (Path('examples') / 'fluids' / rest).with_suffix('.c') 59 elif prefix == 'solids': 60 return (Path('examples') / 'solids' / rest).with_suffix('.c') 61 elif test.startswith('ex'): 62 return (Path('examples') / 'ceed' / test).with_suffix('.c') 63 elif test.endswith('-f'): 64 return (Path('tests') / test).with_suffix('.f90') 65 else: 66 return (Path('tests') / test).with_suffix('.c') 67 68 69def check_required_failure(test_case: TestCase, stderr: str, required: str) -> None: 70 if required in stderr: 71 test_case.status = 'fails with required: {}'.format(required) 72 else: 73 test_case.add_failure_info('required: {}'.format(required)) 74 75 76def contains_any(resource: str, substrings: list[str]) -> bool: 77 return any((sub in resource for sub in substrings)) 78 79 80def skip_rule(test: str, resource: str) -> bool: 81 return any(( 82 test.startswith('t4') and contains_any(resource, ['occa']), 83 test.startswith('t5') and contains_any(resource, ['occa']), 84 test.startswith('ex') and contains_any(resource, ['occa']), 85 test.startswith('mfem') and contains_any(resource, ['occa']), 86 test.startswith('nek') and contains_any(resource, ['occa']), 87 test.startswith('petsc-') and contains_any(resource, ['occa']), 88 test.startswith('fluids-') and contains_any(resource, ['occa']), 89 test.startswith('solids-') and contains_any(resource, ['occa']), 90 test.startswith('t318') and contains_any(resource, ['/gpu/cuda/ref']), 91 test.startswith('t506') and contains_any(resource, ['/gpu/cuda/shared']), 92 )) 93 94 95def run(test: str, backends: list[str], mode: str) -> TestSuite: 96 source = get_source(test) 97 test_specs = get_testargs(source) 98 99 if mode.lower() == "tap": 100 print('1..' + str(len(test_specs) * len(backends))) 101 102 test_cases = [] 103 my_env = os.environ.copy() 104 my_env["CEED_ERROR_HANDLER"] = 'exit' 105 index = 1 106 for spec in test_specs: 107 for ceed_resource in backends: 108 rargs = [str(Path('build') / test), *spec.args] 109 rargs[rargs.index('{ceed_resource}')] = ceed_resource 110 111 # run test 112 if skip_rule(test, ceed_resource): 113 test_case = TestCase(f'{test} {ceed_resource}', 114 elapsed_sec=0, 115 timestamp=time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime()), 116 stdout='', 117 stderr='') 118 test_case.add_skipped_info('Pre-run skip rule') 119 else: 120 start = time.time() 121 proc = subprocess.run(rargs, 122 stdout=subprocess.PIPE, 123 stderr=subprocess.PIPE, 124 env=my_env) 125 proc.stdout = proc.stdout.decode('utf-8') 126 proc.stderr = proc.stderr.decode('utf-8') 127 128 test_case = TestCase(f'{test} {spec.name} {ceed_resource}', 129 classname=source.parent, 130 elapsed_sec=time.time() - start, 131 timestamp=time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(start)), 132 stdout=proc.stdout, 133 stderr=proc.stderr) 134 ref_stdout = (Path('tests') / 'output' / test).with_suffix('.out') 135 136 # check for allowed errors 137 if not test_case.is_skipped() and proc.stderr: 138 if 'OCCA backend failed to use' in proc.stderr: 139 test_case.add_skipped_info('occa mode not supported {} {}'.format(test, ceed_resource)) 140 elif 'Backend does not implement' in proc.stderr: 141 test_case.add_skipped_info('not implemented {} {}'.format(test, ceed_resource)) 142 elif 'Can only provide HOST memory for this backend' in proc.stderr: 143 test_case.add_skipped_info('device memory not supported {} {}'.format(test, ceed_resource)) 144 elif 'Test not implemented in single precision' in proc.stderr: 145 test_case.add_skipped_info('not implemented {} {}'.format(test, ceed_resource)) 146 elif 'No SYCL devices of the requested type are available' in proc.stderr: 147 test_case.add_skipped_info('sycl device type not available {} {}'.format(test, ceed_resource)) 148 149 # check required failures 150 if not test_case.is_skipped(): 151 if test[:4] in ['t006', 't007']: 152 check_required_failure(test_case, proc.stderr, 'No suitable backend:') 153 if test[:4] in ['t008']: 154 check_required_failure(test_case, proc.stderr, 'Available backend resources:') 155 if test[:4] in ['t110', 't111', 't112', 't113', 't114']: 156 check_required_failure(test_case, proc.stderr, 'Cannot grant CeedVector array access') 157 if test[:4] in ['t115']: 158 check_required_failure(test_case, proc.stderr, 'Cannot grant CeedVector read-only array access, the access lock is already in use') 159 if test[:4] in ['t116']: 160 check_required_failure(test_case, proc.stderr, 'Cannot destroy CeedVector, the writable access lock is in use') 161 if test[:4] in ['t117']: 162 check_required_failure(test_case, proc.stderr, 'Cannot restore CeedVector array access, access was not granted') 163 if test[:4] in ['t118']: 164 check_required_failure(test_case, proc.stderr, 'Cannot sync CeedVector, the access lock is already in use') 165 if test[:4] in ['t215']: 166 check_required_failure(test_case, proc.stderr, 'Cannot destroy CeedElemRestriction, a process has read access to the offset data') 167 if test[:4] in ['t303']: 168 check_required_failure(test_case, proc.stderr, 'Length of input/output vectors incompatible with basis dimensions') 169 if test[:4] in ['t408']: 170 check_required_failure(test_case, proc.stderr, 'CeedQFunctionContextGetData(): Cannot grant CeedQFunctionContext data access, a process has read access') 171 if test[:4] in ['t409'] and contains_any(ceed_resource, ['memcheck']): 172 check_required_failure(test_case, proc.stderr, 'Context data changed while accessed in read-only mode') 173 174 # classify other results 175 if not test_case.is_skipped() and not test_case.status: 176 if proc.stderr: 177 test_case.add_failure_info('stderr', proc.stderr) 178 elif proc.returncode != 0: 179 test_case.add_error_info(f'returncode = {proc.returncode}') 180 elif ref_stdout.is_file(): 181 diff = list(difflib.unified_diff(ref_stdout.read_text().splitlines(keepends=True), 182 proc.stdout.splitlines(keepends=True), 183 fromfile=str(ref_stdout), 184 tofile='New')) 185 if diff: 186 test_case.add_failure_info('stdout', output=''.join(diff)) 187 elif proc.stdout and test[:4] not in 't003': 188 test_case.add_failure_info('stdout', output=proc.stdout) 189 190 # store result 191 test_case.args = ' '.join(rargs) 192 test_cases.append(test_case) 193 194 if mode.lower() == "tap": 195 # print incremental output if TAP mode 196 print('# Test: {}'.format(test_case.name.split(' ')[1])) 197 print('# $ {}'.format(test_case.args)) 198 if test_case.is_error(): 199 print('not ok {} - ERROR: {}'.format(index, (test_case.errors[0]['message'] or "NO MESSAGE").strip())) 200 print('Output: \n{}'.format((test_case.errors[0]['output'] or "NO OUTPUT").strip())) 201 if test_case.is_failure(): 202 print(' FAIL: {}'.format(index, (test_case.failures[0]['message'] or "NO MESSAGE").strip())) 203 print('Output: \n{}'.format((test_case.failures[0]['output'] or "NO OUTPUT").strip())) 204 elif test_case.is_failure(): 205 print('not ok {} - FAIL: {}'.format(index, (test_case.failures[0]['message'] or "NO MESSAGE").strip())) 206 print('Output: \n{}'.format((test_case.failures[0]['output'] or "NO OUTPUT").strip())) 207 elif test_case.is_skipped(): 208 print('ok {} - SKIP: {}'.format(index, (test_case.skipped[0]['message'] or "NO MESSAGE").strip())) 209 else: 210 print('ok {} - PASS'.format(index)) 211 sys.stdout.flush() 212 else: 213 # print error or failure information if JUNIT mode 214 if test_case.is_error() or test_case.is_failure(): 215 print('Test: {} {}'.format(test_case.name.split(' ')[0], test_case.name.split(' ')[1])) 216 print(' $ {}'.format(test_case.args)) 217 if test_case.is_error(): 218 print('ERROR: {}'.format((test_case.errors[0]['message'] or "NO MESSAGE").strip())) 219 print('Output: \n{}'.format((test_case.errors[0]['output'] or "NO OUTPUT").strip())) 220 if test_case.is_failure(): 221 print('FAIL: {}'.format((test_case.failures[0]['message'] or "NO MESSAGE").strip())) 222 print('Output: \n{}'.format((test_case.failures[0]['output'] or "NO OUTPUT").strip())) 223 sys.stdout.flush() 224 index += 1 225 226 return TestSuite(test, test_cases) 227 228if __name__ == '__main__': 229 import argparse 230 parser = argparse.ArgumentParser('Test runner with JUnit and TAP output') 231 parser.add_argument('--mode', help='Output mode, JUnit or TAP', default="JUnit") 232 parser.add_argument('--output', help='Output file to write test', default=None) 233 parser.add_argument('--gather', help='Gather all *.junit files into XML', action='store_true') 234 parser.add_argument('test', help='Test executable', nargs='?') 235 args = parser.parse_args() 236 237 if args.gather: 238 gather() 239 else: 240 backends = os.environ['BACKENDS'].split() 241 242 # run tests 243 result = run(args.test, backends, args.mode) 244 245 # build output 246 if args.mode.lower() == "junit": 247 junit_batch = '' 248 try: 249 junit_batch = '-' + os.environ['JUNIT_BATCH'] 250 except: 251 pass 252 output = Path('build') / (args.test + junit_batch + '.junit') if args.output is None else Path(args.output) 253 254 with output.open('w') as fd: 255 TestSuite.to_file(fd, [result]) 256 elif args.mode.lower() != "tap": 257 raise Exception("output mode not recognized") 258 259 # check return code 260 for t in result.test_cases: 261 failures = len([c for c in result.test_cases if c.is_failure()]) 262 errors = len([c for c in result.test_cases if c.is_error()]) 263 if failures + errors > 0 and args.mode.lower() != "tap": 264 sys.exit(1) 265