1#!/usr/bin/env python3 2from junit_common import * 3 4 5def create_argparser() -> argparse.ArgumentParser: 6 """Creates argument parser to read command line arguments 7 8 Returns: 9 argparse.ArgumentParser: Created `ArgumentParser` 10 """ 11 parser = argparse.ArgumentParser('Test runner with JUnit and TAP output') 12 parser.add_argument( 13 '-c', 14 '--ceed-backends', 15 type=str, 16 nargs='*', 17 default=['/cpu/self'], 18 help='libCEED backend to use with convergence tests') 19 parser.add_argument( 20 '-m', 21 '--mode', 22 type=RunMode, 23 action=CaseInsensitiveEnumAction, 24 help='Output mode, junit or tap', 25 default=RunMode.JUNIT) 26 parser.add_argument('-n', '--nproc', type=int, default=1, help='number of MPI processes') 27 parser.add_argument('-b', '--junit-batch', type=str, default='', help='Name of JUnit batch for output file') 28 parser.add_argument('-np', '--pool-size', type=int, default=1, help='Number of test cases to run in parallel') 29 parser.add_argument('--smartredis-dir', type=str, default='', help='path to SmartSim library, if present') 30 parser.add_argument('--has-torch', type=bool, default=False, help='Whether to build with torch') 31 parser.add_argument('-s', '--search', type=str, default='.*', 32 help='Search string to filter tests, using `re` package format') 33 parser.add_argument('-v', '--verbose', action='store_true', default=False, 34 help='print details for all runs, not just failures') 35 parser.add_argument('test', help='Test executable', nargs='?') 36 37 return parser 38 39 40def diff_csv_comment_function(test_line: str, true_line: str) -> Optional[str]: 41 test_line_split = test_line.split(':')[0] 42 true_line_split = true_line.split(':')[0] 43 diff_output = ''.join(difflib.unified_diff([test_line_split + '\n'], 44 [true_line_split + '\n'], 45 tofile='test created file', 46 fromfile='expected output')) 47 return diff_output if diff_output else None 48 49 50class HoneeSuiteSpec(SuiteSpec): 51 def __init__(self, has_torch: bool): 52 self.has_torch: bool = has_torch 53 self.csv_rtol = 1e-9 54 self.csv_comment_diff_fn = diff_csv_comment_function 55 self.csv_comment_str = '#' 56 57 def get_source_path(self, test: str) -> Path: 58 """Compute path to test source file 59 60 Args: 61 test (str): Name of test 62 63 Returns: 64 Path: Path to source file 65 """ 66 if test.startswith('navierstokes'): 67 return (Path('examples') / 'navierstokes').with_suffix('.c') 68 else: 69 return (Path('tests') / test).with_suffix('.c') 70 71 # get path to executable 72 def get_run_path(self, test: str) -> Path: 73 """Compute path to built test executable file 74 75 Args: 76 test (str): Name of test 77 78 Returns: 79 Path: Path to test executable 80 """ 81 return Path('build') / test 82 83 def get_output_path(self, test: str, output_file: str) -> Path: 84 """Compute path to expected output file 85 86 Args: 87 test (str): Name of test 88 output_file (str): File name of output file 89 90 Returns: 91 Path: Path to expected output file 92 """ 93 return Path('tests') / 'output' / output_file 94 95 def check_pre_skip(self, test: str, spec: TestSpec, resource: str, nproc: int) -> Optional[str]: 96 """Check if a test case should be skipped prior to running, returning the reason for skipping 97 98 Args: 99 test (str): Name of test 100 spec (TestSpec): Test case specification 101 resource (str): libCEED backend 102 nproc (int): Number of MPI processes to use when running test case 103 104 Returns: 105 Optional[str]: Skip reason, or `None` if test case should not be skipped 106 """ 107 for condition in spec.only: 108 if (condition == 'cpu') and ('gpu' in resource): 109 return 'CPU only test with GPU backend' 110 if condition == 'torch' and not self.has_torch: 111 return 'PyTorch only test without USE_TORCH=1' 112 113 def check_post_skip(self, test: str, spec: TestSpec, resource: str, stderr: str) -> Optional[str]: 114 """Check if a test case should be allowed to fail, based on its stderr output 115 116 Args: 117 test (str): Name of test 118 spec (TestSpec): Test case specification 119 resource (str): libCEED backend 120 stderr (str): Standard error output from test case execution 121 122 Returns: 123 Optional[str]: Skip reason, or `None` if unexpeced error 124 """ 125 if 'No SYCL devices of the requested type are available' in stderr: 126 return f'SYCL device type not available' 127 elif 'Loading meshes requires CGNS support. Reconfigure using --with-cgns-dir' in stderr: 128 return f'CGNS not installed in PETSc for {test}, {spec.name}' 129 elif 'You may need to add --download-ctetgen or --download-tetgen' in stderr: 130 return f'Tet mesh generator not installed for {test}, {spec.name}' 131 return None 132 133 def check_required_failure(self, test: str, spec: TestSpec, resource: str, stderr: str) -> Tuple[str, bool]: 134 """Check whether a test case is expected to fail and if it failed expectedly 135 136 Args: 137 test (str): Name of test 138 spec (TestSpec): Test case specification 139 resource (str): libCEED backend 140 stderr (str): Standard error output from test case execution 141 142 Returns: 143 tuple[str, bool]: Tuple of the expected failure string and whether it was present in `stderr` 144 """ 145 return '', True 146 147 def check_allowed_stdout(self, test: str) -> bool: 148 """Check whether a test is allowed to print console output 149 150 Args: 151 test (str): Name of test 152 153 Returns: 154 bool: True if the test is allowed to print console output 155 """ 156 return False 157 158 159if __name__ == '__main__': 160 args = create_argparser().parse_args() 161 162 # run tests 163 if 'smartsim' in args.test: 164 has_smartsim: bool = args.smartredis_dir and Path(args.smartredis_dir).is_dir() 165 test_cases = [] 166 test_directory = Path(__file__).parent / 'smartsim_test_dir' 167 168 if args.mode is RunMode.TAP: 169 print(f'1..1') 170 if has_smartsim: 171 from smartsim_regression_framework import SmartSimTest 172 173 test_framework = SmartSimTest(test_directory) 174 test_framework.setup() 175 176 is_new_subtest = True 177 subtest_ok = True 178 for i, backend in enumerate(args.ceed_backends): 179 test_cases.append(test_framework.test_junit(backend)) 180 if is_new_subtest and args.mode == RunMode.TAP: 181 is_new_subtest = False 182 print(f'# Subtest: {test_cases[0].category}') 183 print(f' 1..{len(args.ceed_backends)}') 184 print( 185 test_case_output_string( 186 test_cases[i], 187 TestSpec("SmartSim Tests"), 188 args.mode, 189 backend, 190 '', 191 i, 192 verbose=args.verbose)) 193 if args.mode == RunMode.TAP: 194 print(f'{"" if subtest_ok else "not "}ok 1 - {test_cases[0].category}') 195 test_framework.teardown() 196 elif args.mode is RunMode.TAP: 197 print(f'ok 1 - # SKIP SmartSim not installed') 198 result: TestSuite = TestSuite('SmartSim Tests', test_cases) 199 if has_failures(result): 200 shutil.copytree( 201 test_directory, 202 HoneeSuiteSpec( 203 args.has_torch).test_failure_artifacts_path, 204 dirs_exist_ok=True) 205 else: 206 result: TestSuite = run_tests( 207 args.test, 208 args.ceed_backends, 209 args.mode, 210 args.nproc, 211 HoneeSuiteSpec(args.has_torch), 212 args.pool_size, 213 search=args.search, 214 verbose=args.verbose) 215 216 # write output and check for failures 217 if args.mode is RunMode.JUNIT: 218 write_junit_xml(result, args.junit_batch) 219 if has_failures(result): 220 sys.exit(1) 221