1#!/usr/bin/env python3 2from junit_common import * 3 4 5def create_argparser() -> argparse.ArgumentParser: 6 """Creates argument parser to read command line arguments 7 8 Returns: 9 argparse.ArgumentParser: Created `ArgumentParser` 10 """ 11 parser = argparse.ArgumentParser('Test runner with JUnit and TAP output') 12 parser.add_argument( 13 '-c', 14 '--ceed-backends', 15 type=str, 16 nargs='*', 17 default=['/cpu/self'], 18 help='libCEED backend to use with convergence tests') 19 parser.add_argument( 20 '-m', 21 '--mode', 22 type=RunMode, 23 action=CaseInsensitiveEnumAction, 24 help='Output mode, junit or tap', 25 default=RunMode.JUNIT) 26 parser.add_argument('-n', '--nproc', type=int, default=1, help='number of MPI processes') 27 parser.add_argument('-o', '--output', type=Optional[Path], default=None, help='Output file to write test') 28 parser.add_argument('-b', '--junit-batch', type=str, default='', help='Name of JUnit batch for output file') 29 parser.add_argument('-np', '--pool-size', type=int, default=1, help='Number of test cases to run in parallel') 30 parser.add_argument('-s', '--smartredis_dir', type=str, default='', help='path to SmartSim library, if present') 31 parser.add_argument('--has_torch', type=bool, default=False, help='Whether to build with torch') 32 parser.add_argument('test', help='Test executable', nargs='?') 33 34 return parser 35 36 37def diff_csv_comment_function(test_line: str, true_line: str) -> Optional[str]: 38 test_line_split = test_line.split(':')[0] 39 true_line_split = true_line.split(':')[0] 40 diff_output = ''.join(difflib.unified_diff([test_line_split + '\n'], 41 [true_line_split + '\n'], 42 tofile='test created file', 43 fromfile='expected output')) 44 return diff_output if diff_output else None 45 46 47class HoneeSuiteSpec(SuiteSpec): 48 def __init__(self, has_torch: bool): 49 self.has_torch: bool = has_torch 50 self.diff_csv_kwargs: dict = {'comment_func': diff_csv_comment_function, 'rel_tol': 1e-9} 51 52 def get_source_path(self, test: str) -> Path: 53 """Compute path to test source file 54 55 Args: 56 test (str): Name of test 57 58 Returns: 59 Path: Path to source file 60 """ 61 if test.startswith('navierstokes'): 62 return (Path('examples') / 'navierstokes').with_suffix('.c') 63 else: 64 return (Path('tests') / test).with_suffix('.c') 65 66 # get path to executable 67 def get_run_path(self, test: str) -> Path: 68 """Compute path to built test executable file 69 70 Args: 71 test (str): Name of test 72 73 Returns: 74 Path: Path to test executable 75 """ 76 return Path('build') / test 77 78 def get_output_path(self, test: str, output_file: str) -> Path: 79 """Compute path to expected output file 80 81 Args: 82 test (str): Name of test 83 output_file (str): File name of output file 84 85 Returns: 86 Path: Path to expected output file 87 """ 88 return Path('tests') / 'output' / output_file 89 90 def check_pre_skip(self, test: str, spec: TestSpec, resource: str, nproc: int) -> Optional[str]: 91 """Check if a test case should be skipped prior to running, returning the reason for skipping 92 93 Args: 94 test (str): Name of test 95 spec (TestSpec): Test case specification 96 resource (str): libCEED backend 97 nproc (int): Number of MPI processes to use when running test case 98 99 Returns: 100 Optional[str]: Skip reason, or `None` if test case should not be skipped 101 """ 102 for condition in spec.only: 103 if (condition == 'cpu') and ('gpu' in resource): 104 return 'CPU only test with GPU backend' 105 if condition == 'torch' and not self.has_torch: 106 return 'PyTorch only test without USE_TORCH=1' 107 108 def check_post_skip(self, test: str, spec: TestSpec, resource: str, stderr: str) -> Optional[str]: 109 """Check if a test case should be allowed to fail, based on its stderr output 110 111 Args: 112 test (str): Name of test 113 spec (TestSpec): Test case specification 114 resource (str): libCEED backend 115 stderr (str): Standard error output from test case execution 116 117 Returns: 118 Optional[str]: Skip reason, or `None` if unexpeced error 119 """ 120 if 'No SYCL devices of the requested type are available' in stderr: 121 return f'SYCL device type not available' 122 elif 'Loading meshes requires CGNS support. Reconfigure using --with-cgns-dir' in stderr: 123 return f'CGNS not installed in PETSc for {test}, {spec.name}' 124 elif 'You may need to add --download-ctetgen or --download-tetgen' in stderr: 125 return f'Tet mesh generator not installed for {test}, {spec.name}' 126 return None 127 128 def check_required_failure(self, test: str, spec: TestSpec, resource: str, stderr: str) -> Tuple[str, bool]: 129 """Check whether a test case is expected to fail and if it failed expectedly 130 131 Args: 132 test (str): Name of test 133 spec (TestSpec): Test case specification 134 resource (str): libCEED backend 135 stderr (str): Standard error output from test case execution 136 137 Returns: 138 tuple[str, bool]: Tuple of the expected failure string and whether it was present in `stderr` 139 """ 140 return '', True 141 142 def check_allowed_stdout(self, test: str) -> bool: 143 """Check whether a test is allowed to print console output 144 145 Args: 146 test (str): Name of test 147 148 Returns: 149 bool: True if the test is allowed to print console output 150 """ 151 return False 152 153 154if __name__ == '__main__': 155 args = create_argparser().parse_args() 156 157 # run tests 158 if 'smartsim' in args.test: 159 has_smartsim: bool = args.smartredis_dir and Path(args.smartredis_dir).is_dir() 160 test_cases = [] 161 162 if args.mode is RunMode.TAP: 163 print(f'1..1') 164 if has_smartsim: 165 from smartsim_regression_framework import SmartSimTest 166 167 test_framework = SmartSimTest(Path(__file__).parent / 'smartsim_test_dir') 168 test_framework.setup() 169 170 is_new_subtest = True 171 subtest_ok = True 172 for i, backend in enumerate(args.ceed_backends): 173 test_cases.append(test_framework.test_junit(backend)) 174 if is_new_subtest and args.mode == RunMode.TAP: 175 is_new_subtest = False 176 print(f'# Subtest: {test_cases[0].category}') 177 print(f' 1..{len(args.ceed_backends)}') 178 print(test_case_output_string(test_cases[i], TestSpec("SmartSim Tests"), args.mode, backend, '', i)) 179 if args.mode == RunMode.TAP: 180 print(f'{"" if subtest_ok else "not "}ok 1 - {test_cases[0].category}') 181 test_framework.teardown() 182 elif args.mode is RunMode.TAP: 183 print(f'ok 1 - # SKIP SmartSim not installed') 184 result: TestSuite = TestSuite('SmartSim Tests', test_cases) 185 else: 186 result: TestSuite = run_tests( 187 args.test, 188 args.ceed_backends, 189 args.mode, 190 args.nproc, 191 HoneeSuiteSpec(args.has_torch), 192 args.pool_size) 193 194 # write output and check for failures 195 if args.mode is RunMode.JUNIT: 196 write_junit_xml(result, args.output, args.junit_batch) 197 if has_failures(result): 198 sys.exit(1) 199