xref: /libCEED/tests/junit_common.py (revision 15c97c4ae8c49270e37c73e0b1452810ec6ca882)
1from abc import ABC, abstractmethod
2from collections.abc import Iterable
3import argparse
4import csv
5from dataclasses import dataclass, field, fields
6import difflib
7from enum import Enum
8from math import isclose
9import os
10from pathlib import Path
11import re
12import subprocess
13import multiprocessing as mp
14import sys
15import time
16from typing import Optional, Tuple, List, Dict, Callable, Iterable, get_origin
17import shutil
18
19sys.path.insert(0, str(Path(__file__).parent / "junit-xml"))
20from junit_xml import TestCase, TestSuite, to_xml_report_string  # nopep8
21
22
23class ParseError(RuntimeError):
24    """A custom exception for failed parsing."""
25
26    def __init__(self, message):
27        super().__init__(message)
28
29
30class CaseInsensitiveEnumAction(argparse.Action):
31    """Action to convert input values to lower case prior to converting to an Enum type"""
32
33    def __init__(self, option_strings, dest, type, default, **kwargs):
34        if not issubclass(type, Enum):
35            raise ValueError(f"{type} must be an Enum")
36        # store provided enum type
37        self.enum_type = type
38        if isinstance(default, self.enum_type):
39            pass
40        elif isinstance(default, str):
41            default = self.enum_type(default.lower())
42        elif isinstance(default, Iterable):
43            default = [self.enum_type(v.lower()) for v in default]
44        else:
45            raise argparse.ArgumentTypeError("Invalid value type, must be str or iterable")
46        # prevent automatic type conversion
47        super().__init__(option_strings, dest, default=default, **kwargs)
48
49    def __call__(self, parser, namespace, values, option_string=None):
50        if isinstance(values, self.enum_type):
51            pass
52        elif isinstance(values, str):
53            values = self.enum_type(values.lower())
54        elif isinstance(values, Iterable):
55            values = [self.enum_type(v.lower()) for v in values]
56        else:
57            raise argparse.ArgumentTypeError("Invalid value type, must be str or iterable")
58        setattr(namespace, self.dest, values)
59
60
61@dataclass
62class TestSpec:
63    """Dataclass storing information about a single test case"""
64    name: str = field(default_factory=str)
65    csv_rtol: float = -1
66    csv_ztol: float = -1
67    cgns_tol: float = -1
68    only: List = field(default_factory=list)
69    args: List = field(default_factory=list)
70    key_values: Dict = field(default_factory=dict)
71
72
73class RunMode(Enum):
74    """Enumeration of run modes, either `RunMode.TAP` or `RunMode.JUNIT`"""
75    TAP = 'tap'
76    JUNIT = 'junit'
77
78    def __str__(self):
79        return self.value
80
81    def __repr__(self):
82        return self.value
83
84
85class SuiteSpec(ABC):
86    """Abstract Base Class defining the required interface for running a test suite"""
87    @abstractmethod
88    def get_source_path(self, test: str) -> Path:
89        """Compute path to test source file
90
91        Args:
92            test (str): Name of test
93
94        Returns:
95            Path: Path to source file
96        """
97        raise NotImplementedError
98
99    @abstractmethod
100    def get_run_path(self, test: str) -> Path:
101        """Compute path to built test executable file
102
103        Args:
104            test (str): Name of test
105
106        Returns:
107            Path: Path to test executable
108        """
109        raise NotImplementedError
110
111    @abstractmethod
112    def get_output_path(self, test: str, output_file: str) -> Path:
113        """Compute path to expected output file
114
115        Args:
116            test (str): Name of test
117            output_file (str): File name of output file
118
119        Returns:
120            Path: Path to expected output file
121        """
122        raise NotImplementedError
123
124    @property
125    def test_failure_artifacts_path(self) -> Path:
126        """Path to test failure artifacts"""
127        return Path('build') / 'test_failure_artifacts'
128
129    @property
130    def cgns_tol(self):
131        """Absolute tolerance for CGNS diff"""
132        return getattr(self, '_cgns_tol', 1.0e-12)
133
134    @cgns_tol.setter
135    def cgns_tol(self, val):
136        self._cgns_tol = val
137
138    @property
139    def csv_ztol(self):
140        """Keyword arguments to be passed to diff_csv()"""
141        return getattr(self, '_csv_ztol', 3e-10)
142
143    @csv_ztol.setter
144    def csv_ztol(self, val):
145        self._csv_ztol = val
146
147    @property
148    def csv_rtol(self):
149        """Keyword arguments to be passed to diff_csv()"""
150        return getattr(self, '_csv_rtol', 1e-6)
151
152    @csv_rtol.setter
153    def csv_rtol(self, val):
154        self._csv_rtol = val
155
156    def post_test_hook(self, test: str, spec: TestSpec, backend: str) -> None:
157        """Function callback ran after each test case
158
159        Args:
160            test (str): Name of test
161            spec (TestSpec): Test case specification
162        """
163        pass
164
165    def check_pre_skip(self, test: str, spec: TestSpec, resource: str, nproc: int) -> Optional[str]:
166        """Check if a test case should be skipped prior to running, returning the reason for skipping
167
168        Args:
169            test (str): Name of test
170            spec (TestSpec): Test case specification
171            resource (str): libCEED backend
172            nproc (int): Number of MPI processes to use when running test case
173
174        Returns:
175            Optional[str]: Skip reason, or `None` if test case should not be skipped
176        """
177        return None
178
179    def check_post_skip(self, test: str, spec: TestSpec, resource: str, stderr: str) -> Optional[str]:
180        """Check if a test case should be allowed to fail, based on its stderr output
181
182        Args:
183            test (str): Name of test
184            spec (TestSpec): Test case specification
185            resource (str): libCEED backend
186            stderr (str): Standard error output from test case execution
187
188        Returns:
189            Optional[str]: Skip reason, or `None` if unexpected error
190        """
191        return None
192
193    def check_required_failure(self, test: str, spec: TestSpec, resource: str, stderr: str) -> Tuple[str, bool]:
194        """Check whether a test case is expected to fail and if it failed expectedly
195
196        Args:
197            test (str): Name of test
198            spec (TestSpec): Test case specification
199            resource (str): libCEED backend
200            stderr (str): Standard error output from test case execution
201
202        Returns:
203            tuple[str, bool]: Tuple of the expected failure string and whether it was present in `stderr`
204        """
205        return '', True
206
207    def check_allowed_stdout(self, test: str) -> bool:
208        """Check whether a test is allowed to print console output
209
210        Args:
211            test (str): Name of test
212
213        Returns:
214            bool: True if the test is allowed to print console output
215        """
216        return False
217
218
219def has_cgnsdiff() -> bool:
220    """Check whether `cgnsdiff` is an executable program in the current environment
221
222    Returns:
223        bool: True if `cgnsdiff` is found
224    """
225    my_env: dict = os.environ.copy()
226    proc = subprocess.run('cgnsdiff',
227                          shell=True,
228                          stdout=subprocess.PIPE,
229                          stderr=subprocess.PIPE,
230                          env=my_env)
231    return 'not found' not in proc.stderr.decode('utf-8')
232
233
234def contains_any(base: str, substrings: List[str]) -> bool:
235    """Helper function, checks if any of the substrings are included in the base string
236
237    Args:
238        base (str): Base string to search in
239        substrings (List[str]): List of potential substrings
240
241    Returns:
242        bool: True if any substrings are included in base string
243    """
244    return any((sub in base for sub in substrings))
245
246
247def startswith_any(base: str, prefixes: List[str]) -> bool:
248    """Helper function, checks if the base string is prefixed by any of `prefixes`
249
250    Args:
251        base (str): Base string to search
252        prefixes (List[str]): List of potential prefixes
253
254    Returns:
255        bool: True if base string is prefixed by any of the prefixes
256    """
257    return any((base.startswith(prefix) for prefix in prefixes))
258
259
260def find_matching(line: str, open: str = '(', close: str = ')') -> Tuple[int, int]:
261    """Find the start and end positions of the first outer paired delimeters
262
263    Args:
264        line (str): Line to search
265        open (str, optional): Opening delimiter, must be different than `close`. Defaults to '('.
266        close (str, optional): Closing delimeter, must be different than `open`. Defaults to ')'.
267
268    Raises:
269        RuntimeError: If open or close is not a single character
270        RuntimeError: If open and close are the same characters
271
272    Returns:
273        Tuple[int]: If matching delimeters are found, return indices in `list`. Otherwise, return end < start.
274    """
275    if len(open) != 1 or len(close) != 1:
276        raise RuntimeError("`open` and `close` must be single characters")
277    if open == close:
278        raise RuntimeError("`open` and `close` must be different characters")
279    start: int = line.find(open)
280    if start < 0:
281        return -1, -1
282    count: int = 1
283    for i in range(start + 1, len(line)):
284        if line[i] == open:
285            count += 1
286        if line[i] == close:
287            count -= 1
288            if count == 0:
289                return start, i
290    return start, -1
291
292
293def parse_test_line(line: str, fallback_name: str = '') -> TestSpec:
294    """Parse a single line of TESTARGS and CLI arguments into a `TestSpec` object
295
296    Args:
297        line (str): String containing TESTARGS specification and CLI arguments
298
299    Returns:
300        TestSpec: Parsed specification of test case
301    """
302    test_fields = fields(TestSpec)
303    field_names = [f.name for f in test_fields]
304    known: Dict = dict()
305    other: Dict = dict()
306    if line[0] == "(":
307        # have key/value pairs to parse
308        start, end = find_matching(line)
309        if end < start:
310            raise ParseError(f"Mismatched parentheses in TESTCASE: {line}")
311
312        keyvalues_str = line[start:end + 1]
313        keyvalues_pattern = re.compile(r'''
314            (?:\(\s*|\s*,\s*)   # start with open parentheses or comma, no capture
315            ([A-Za-z]+[\w\-]+)  # match key starting with alpha, containing alphanumeric, _, or -; captured as Group 1
316            \s*=\s*             # key is followed by = (whitespace ignored)
317            (?:                 # uncaptured group for OR
318              "((?:[^"]|\\")+)" #   match quoted value (any internal " must be escaped as \"); captured as Group 2
319            | ([^=]+)           #   OR match unquoted value (no equals signs allowed); captured as Group 3
320            )                   # end uncaptured group for OR
321            \s*(?=,|\))         # lookahead for either next comma or closing parentheses
322        ''', re.VERBOSE)
323
324        for match in re.finditer(keyvalues_pattern, keyvalues_str):
325            if not match:  # empty
326                continue
327            key = match.group(1)
328            value = match.group(2) if match.group(2) else match.group(3)
329            try:
330                index = field_names.index(key)
331                if key == "only":  # weird bc only is a list
332                    value = [constraint.strip() for constraint in value.split(',')]
333                try:
334                    # TODO: stop supporting python <=3.8
335                    known[key] = test_fields[index].type(value)  # type: ignore
336                except TypeError:
337                    # TODO: this is still liable to fail for complex types
338                    known[key] = get_origin(test_fields[index].type)(value)  # type: ignore
339            except ValueError:
340                other[key] = value
341
342        line = line[end + 1:]
343
344    if not 'name' in known.keys():
345        known['name'] = fallback_name
346
347    args_pattern = re.compile(r'''
348        \s+(            # remove leading space
349            (?:"[^"]+") # match quoted CLI option
350          | (?:[\S]+)   # match anything else that is space separated
351        )
352    ''', re.VERBOSE)
353    args: List[str] = re.findall(args_pattern, line)
354    for k, v in other.items():
355        print(f"warning, unknown TESTCASE option for test '{known['name']}': {k}={v}")
356    return TestSpec(**known, key_values=other, args=args)
357
358
359def get_test_args(source_file: Path) -> List[TestSpec]:
360    """Parse all test cases from a given source file
361
362    Args:
363        source_file (Path): Path to source file
364
365    Raises:
366        RuntimeError: Errors if source file extension is unsupported
367
368    Returns:
369        List[TestSpec]: List of parsed `TestSpec` objects, or a list containing a single, default `TestSpec` if none were found
370    """
371    comment_str: str = ''
372    if source_file.suffix in ['.c', '.cc', '.cpp']:
373        comment_str = '//'
374    elif source_file.suffix in ['.py']:
375        comment_str = '#'
376    elif source_file.suffix in ['.usr']:
377        comment_str = 'C_'
378    elif source_file.suffix in ['.f90']:
379        comment_str = '! '
380    else:
381        raise RuntimeError(f'Unrecognized extension for file: {source_file}')
382
383    return [parse_test_line(line.strip(comment_str).removeprefix("TESTARGS"), source_file.stem)
384            for line in source_file.read_text().splitlines()
385            if line.startswith(f'{comment_str}TESTARGS')] or [TestSpec(source_file.stem, args=['{ceed_resource}'])]
386
387
388def diff_csv(test_csv: Path, true_csv: Path, zero_tol: float, rel_tol: float,
389             comment_str: str = '#', comment_func: Optional[Callable[[str, str], Optional[str]]] = None) -> str:
390    """Compare CSV results against an expected CSV file with tolerances
391
392    Args:
393        test_csv (Path): Path to output CSV results
394        true_csv (Path): Path to expected CSV results
395        zero_tol (float): Tolerance below which values are considered to be zero.
396        rel_tol (float): Relative tolerance for comparing non-zero values.
397        comment_str (str, optional): String to denoting commented line
398        comment_func (Callable, optional): Function to determine if test and true line are different
399
400    Returns:
401        str: Diff output between result and expected CSVs
402    """
403    test_lines: List[str] = test_csv.read_text().splitlines()
404    true_lines: List[str] = true_csv.read_text().splitlines()
405    # Files should not be empty
406    if len(test_lines) == 0:
407        return f'No lines found in test output {test_csv}'
408    if len(true_lines) == 0:
409        return f'No lines found in test source {true_csv}'
410    if len(test_lines) != len(true_lines):
411        return f'Number of lines in {test_csv} and {true_csv} do not match'
412
413    # Process commented lines
414    uncommented_lines: List[int] = []
415    for n, (test_line, true_line) in enumerate(zip(test_lines, true_lines)):
416        if test_line[0] == comment_str and true_line[0] == comment_str:
417            if comment_func:
418                output = comment_func(test_line, true_line)
419                if output:
420                    return output
421        elif test_line[0] == comment_str and true_line[0] != comment_str:
422            return f'Commented line found in {test_csv} at line {n} but not in {true_csv}'
423        elif test_line[0] != comment_str and true_line[0] == comment_str:
424            return f'Commented line found in {true_csv} at line {n} but not in {test_csv}'
425        else:
426            uncommented_lines.append(n)
427
428    # Remove commented lines
429    test_lines = [test_lines[line] for line in uncommented_lines]
430    true_lines = [true_lines[line] for line in uncommented_lines]
431
432    test_reader: csv.DictReader = csv.DictReader(test_lines)
433    true_reader: csv.DictReader = csv.DictReader(true_lines)
434    if not test_reader.fieldnames:
435        return f'No CSV columns found in test output {test_csv}'
436    if not true_reader.fieldnames:
437        return f'No CSV columns found in test source {true_csv}'
438    if test_reader.fieldnames != true_reader.fieldnames:
439        return ''.join(difflib.unified_diff([f'{test_lines[0]}\n'], [f'{true_lines[0]}\n'],
440                       tofile='found CSV columns', fromfile='expected CSV columns'))
441
442    diff_lines: List[str] = list()
443    for test_line, true_line in zip(test_reader, true_reader):
444        for key in test_reader.fieldnames:
445            # Check if the value is numeric
446            try:
447                true_val: float = float(true_line[key])
448                test_val: float = float(test_line[key])
449                true_zero: bool = abs(true_val) < zero_tol
450                test_zero: bool = abs(test_val) < zero_tol
451                fail: bool = False
452                if true_zero:
453                    fail = not test_zero
454                else:
455                    fail = not isclose(test_val, true_val, rel_tol=rel_tol)
456                if fail:
457                    diff_lines.append(f'column: {key}, expected: {true_val}, got: {test_val}')
458            except ValueError:
459                if test_line[key] != true_line[key]:
460                    diff_lines.append(f'column: {key}, expected: {true_line[key]}, got: {test_line[key]}')
461
462    return '\n'.join(diff_lines)
463
464
465def diff_cgns(test_cgns: Path, true_cgns: Path, cgns_tol: float) -> str:
466    """Compare CGNS results against an expected CGSN file with tolerance
467
468    Args:
469        test_cgns (Path): Path to output CGNS file
470        true_cgns (Path): Path to expected CGNS file
471        cgns_tol (float): Tolerance for comparing floating-point values
472
473    Returns:
474        str: Diff output between result and expected CGNS files
475    """
476    my_env: dict = os.environ.copy()
477
478    run_args: List[str] = ['cgnsdiff', '-d', '-t', f'{cgns_tol}', str(test_cgns), str(true_cgns)]
479    proc = subprocess.run(' '.join(run_args),
480                          shell=True,
481                          stdout=subprocess.PIPE,
482                          stderr=subprocess.PIPE,
483                          env=my_env)
484
485    return proc.stderr.decode('utf-8') + proc.stdout.decode('utf-8')
486
487
488def diff_ascii(test_file: Path, true_file: Path, backend: str) -> str:
489    """Compare ASCII results against an expected ASCII file
490
491    Args:
492        test_file (Path): Path to output ASCII file
493        true_file (Path): Path to expected ASCII file
494
495    Returns:
496        str: Diff output between result and expected ASCII files
497    """
498    tmp_backend: str = backend.replace('/', '-')
499    true_str: str = true_file.read_text().replace('{ceed_resource}', tmp_backend)
500    diff = list(difflib.unified_diff(test_file.read_text().splitlines(keepends=True),
501                                     true_str.splitlines(keepends=True),
502                                     fromfile=str(test_file),
503                                     tofile=str(true_file)))
504    return ''.join(diff)
505
506
507def test_case_output_string(test_case: TestCase, spec: TestSpec, mode: RunMode,
508                            backend: str, test: str, index: int, verbose: bool) -> str:
509    output_str = ''
510    if mode is RunMode.TAP:
511        # print incremental output if TAP mode
512        if test_case.is_skipped():
513            output_str += f'    ok {index} - {spec.name}, {backend} # SKIP {test_case.skipped[0]["message"]}\n'
514        elif test_case.is_failure() or test_case.is_error():
515            output_str += f'    not ok {index} - {spec.name}, {backend} ({test_case.elapsed_sec} s)\n'
516        else:
517            output_str += f'    ok {index} - {spec.name}, {backend} ({test_case.elapsed_sec} s)\n'
518        if test_case.is_failure() or test_case.is_error() or verbose:
519            output_str += f'      ---\n'
520            if spec.only:
521                output_str += f'      only: {",".join(spec.only)}\n'
522            output_str += f'      args: {test_case.args}\n'
523            if spec.csv_ztol > 0:
524                output_str += f'      csv_ztol: {spec.csv_ztol}\n'
525            if spec.csv_rtol > 0:
526                output_str += f'      csv_rtol: {spec.csv_rtol}\n'
527            if spec.cgns_tol > 0:
528                output_str += f'      cgns_tol: {spec.cgns_tol}\n'
529            for k, v in spec.key_values.items():
530                output_str += f'      {k}: {v}\n'
531            if test_case.is_error():
532                output_str += f'      error: {test_case.errors[0]["message"]}\n'
533            if test_case.is_failure():
534                output_str += f'      failures:\n'
535                for i, failure in enumerate(test_case.failures):
536                    output_str += f'        -\n'
537                    output_str += f'          message: {failure["message"]}\n'
538                    if failure["output"]:
539                        out = failure["output"].strip().replace('\n', '\n            ')
540                        output_str += f'          output: |\n            {out}\n'
541            output_str += f'      ...\n'
542    else:
543        # print error or failure information if JUNIT mode
544        if test_case.is_error() or test_case.is_failure():
545            output_str += f'Test: {test} {spec.name}\n'
546            output_str += f'  $ {test_case.args}\n'
547            if test_case.is_error():
548                output_str += 'ERROR: {}\n'.format((test_case.errors[0]['message'] or 'NO MESSAGE').strip())
549                output_str += 'Output: \n{}\n'.format((test_case.errors[0]['output'] or 'NO MESSAGE').strip())
550            if test_case.is_failure():
551                for failure in test_case.failures:
552                    output_str += 'FAIL: {}\n'.format((failure['message'] or 'NO MESSAGE').strip())
553                    output_str += 'Output: \n{}\n'.format((failure['output'] or 'NO MESSAGE').strip())
554    return output_str
555
556
557def save_failure_artifact(suite_spec: SuiteSpec, file: Path) -> Path:
558    """Attach a file to a test case
559
560    Args:
561        test_case (TestCase): Test case to attach the file to
562        file (Path): Path to the file to attach
563    """
564    save_path: Path = suite_spec.test_failure_artifacts_path / file.name
565    shutil.copyfile(file, save_path)
566    return save_path
567
568
569def run_test(index: int, test: str, spec: TestSpec, backend: str,
570             mode: RunMode, nproc: int, suite_spec: SuiteSpec, verbose: bool = False) -> TestCase:
571    """Run a single test case and backend combination
572
573    Args:
574        index (int): Index of backend for current spec
575        test (str): Path to test
576        spec (TestSpec): Specification of test case
577        backend (str): CEED backend
578        mode (RunMode): Output mode
579        nproc (int): Number of MPI processes to use when running test case
580        suite_spec (SuiteSpec): Specification of test suite
581        verbose (bool, optional): Print detailed output for all runs, not just failures. Defaults to False.
582
583    Returns:
584        TestCase: Test case result
585    """
586    source_path: Path = suite_spec.get_source_path(test)
587    run_args: List = [f'{suite_spec.get_run_path(test)}', *map(str, spec.args)]
588
589    if '{ceed_resource}' in run_args:
590        run_args[run_args.index('{ceed_resource}')] = backend
591    for i, arg in enumerate(run_args):
592        if '{ceed_resource}' in arg:
593            run_args[i] = arg.replace('{ceed_resource}', backend.replace('/', '-'))
594    if '{nproc}' in run_args:
595        run_args[run_args.index('{nproc}')] = f'{nproc}'
596    elif nproc > 1 and source_path.suffix != '.py':
597        run_args = ['mpiexec', '-n', f'{nproc}', *run_args]
598
599    # run test
600    skip_reason: Optional[str] = suite_spec.check_pre_skip(test, spec, backend, nproc)
601    if skip_reason:
602        test_case: TestCase = TestCase(f'{test}, "{spec.name}", n{nproc}, {backend}',
603                                       elapsed_sec=0,
604                                       timestamp=time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime()),
605                                       stdout='',
606                                       stderr='',
607                                       category=spec.name,)
608        test_case.add_skipped_info(skip_reason)
609    else:
610        start: float = time.time()
611        proc = subprocess.run(' '.join(str(arg) for arg in run_args),
612                              shell=True,
613                              stdout=subprocess.PIPE,
614                              stderr=subprocess.PIPE,
615                              env=my_env)
616
617        test_case = TestCase(f'{test}, "{spec.name}", n{nproc}, {backend}',
618                             classname=source_path.parent,
619                             elapsed_sec=time.time() - start,
620                             timestamp=time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(start)),
621                             stdout=proc.stdout.decode('utf-8'),
622                             stderr=proc.stderr.decode('utf-8'),
623                             allow_multiple_subelements=True,
624                             category=spec.name,)
625        ref_csvs: List[Path] = []
626        ref_ascii: List[Path] = []
627        output_files: List[str] = [arg for arg in run_args if 'ascii:' in arg]
628        if output_files:
629            ref_csvs = [suite_spec.get_output_path(test, file.split(':')[1])
630                        for file in output_files if file.endswith('.csv')]
631            ref_ascii = [suite_spec.get_output_path(test, file.split(':')[1])
632                         for file in output_files if not file.endswith('.csv')]
633        ref_cgns: List[Path] = []
634        output_files = [arg for arg in run_args if 'cgns:' in arg]
635        if output_files:
636            ref_cgns = [suite_spec.get_output_path(test, file.split('cgns:')[-1]) for file in output_files]
637        ref_stdout: Path = suite_spec.get_output_path(test, test + '.out')
638        suite_spec.post_test_hook(test, spec, backend)
639
640    # check allowed failures
641    if not test_case.is_skipped() and test_case.stderr:
642        skip_reason: Optional[str] = suite_spec.check_post_skip(test, spec, backend, test_case.stderr)
643        if skip_reason:
644            test_case.add_skipped_info(skip_reason)
645
646    # check required failures
647    if not test_case.is_skipped():
648        required_message, did_fail = suite_spec.check_required_failure(
649            test, spec, backend, test_case.stderr)
650        if required_message and did_fail:
651            test_case.status = f'fails with required: {required_message}'
652        elif required_message:
653            test_case.add_failure_info(f'required failure missing: {required_message}')
654
655    # classify other results
656    if not test_case.is_skipped() and not test_case.status:
657        if test_case.stderr:
658            test_case.add_failure_info('stderr', test_case.stderr)
659        if proc.returncode != 0:
660            test_case.add_error_info(f'returncode = {proc.returncode}')
661        if ref_stdout.is_file():
662            diff = list(difflib.unified_diff(ref_stdout.read_text().splitlines(keepends=True),
663                                             test_case.stdout.splitlines(keepends=True),
664                                             fromfile=str(ref_stdout),
665                                             tofile='New'))
666            if diff:
667                test_case.add_failure_info('stdout', output=''.join(diff))
668        elif test_case.stdout and not suite_spec.check_allowed_stdout(test):
669            test_case.add_failure_info('stdout', output=test_case.stdout)
670        # expected CSV output
671        for ref_csv in ref_csvs:
672            csv_name = ref_csv.name
673            out_file = Path.cwd() / csv_name
674            if not ref_csv.is_file():
675                # remove _{ceed_backend} from path name
676                ref_csv = (ref_csv.parent / ref_csv.name.rsplit('_', 1)[0]).with_suffix('.csv')
677            if not ref_csv.is_file():
678                test_case.add_failure_info('csv', output=f'{ref_csv} not found')
679            elif not out_file.is_file():
680                test_case.add_failure_info('csv', output=f'{out_file} not found')
681            else:
682                csv_ztol: float = spec.csv_ztol if spec.csv_ztol > 0 else suite_spec.csv_ztol
683                csv_rtol: float = spec.csv_rtol if spec.csv_rtol > 0 else suite_spec.csv_rtol
684                diff = diff_csv(out_file, ref_csv, zero_tol=csv_ztol, rel_tol=csv_rtol)
685                if diff:
686                    save_path: Path = suite_spec.test_failure_artifacts_path / csv_name
687                    shutil.move(out_file, save_path)
688                    test_case.add_failure_info(f'csv: {save_path}', output=diff)
689                else:
690                    out_file.unlink()
691        # expected CGNS output
692        for ref_cgn in ref_cgns:
693            cgn_name = ref_cgn.name
694            out_file = Path.cwd() / cgn_name
695            if not ref_cgn.is_file():
696                # remove _{ceed_backend} from path name
697                ref_cgn = (ref_cgn.parent / ref_cgn.name.rsplit('_', 1)[0]).with_suffix('.cgns')
698            if not ref_cgn.is_file():
699                test_case.add_failure_info('cgns', output=f'{ref_cgn} not found')
700            elif not out_file.is_file():
701                test_case.add_failure_info('cgns', output=f'{out_file} not found')
702            else:
703                cgns_tol = spec.cgns_tol if spec.cgns_tol > 0 else suite_spec.cgns_tol
704                diff = diff_cgns(out_file, ref_cgn, cgns_tol=cgns_tol)
705                if diff:
706                    save_path: Path = suite_spec.test_failure_artifacts_path / cgn_name
707                    shutil.move(out_file, save_path)
708                    test_case.add_failure_info(f'cgns: {save_path}', output=diff)
709                else:
710                    out_file.unlink()
711        # expected ASCII output
712        for ref_file in ref_ascii:
713            ref_name = ref_file.name
714            out_file = Path.cwd() / ref_name
715            if not ref_file.is_file():
716                # remove _{ceed_backend} from path name
717                ref_file = (ref_file.parent / ref_file.name.rsplit('_', 1)[0]).with_suffix(ref_file.suffix)
718            if not ref_file.is_file():
719                test_case.add_failure_info('ascii', output=f'{ref_file} not found')
720            elif not out_file.is_file():
721                test_case.add_failure_info('ascii', output=f'{out_file} not found')
722            else:
723                diff = diff_ascii(out_file, ref_file, backend)
724                if diff:
725                    save_path: Path = suite_spec.test_failure_artifacts_path / ref_name
726                    shutil.move(out_file, save_path)
727                    test_case.add_failure_info(f'ascii: {save_path}', output=diff)
728                else:
729                    out_file.unlink()
730
731    # store result
732    test_case.args = ' '.join(str(arg) for arg in run_args)
733    output_str = test_case_output_string(test_case, spec, mode, backend, test, index, verbose)
734
735    return test_case, output_str
736
737
738def init_process():
739    """Initialize multiprocessing process"""
740    # set up error handler
741    global my_env
742    my_env = os.environ.copy()
743    my_env['CEED_ERROR_HANDLER'] = 'exit'
744
745
746def run_tests(test: str, ceed_backends: List[str], mode: RunMode, nproc: int,
747              suite_spec: SuiteSpec, pool_size: int = 1, search: str = ".*", verbose: bool = False) -> TestSuite:
748    """Run all test cases for `test` with each of the provided `ceed_backends`
749
750    Args:
751        test (str): Name of test
752        ceed_backends (List[str]): List of libCEED backends
753        mode (RunMode): Output mode, either `RunMode.TAP` or `RunMode.JUNIT`
754        nproc (int): Number of MPI processes to use when running each test case
755        suite_spec (SuiteSpec): Object defining required methods for running tests
756        pool_size (int, optional): Number of processes to use when running tests in parallel. Defaults to 1.
757        search (str, optional): Regular expression used to match tests. Defaults to ".*".
758        verbose (bool, optional): Print detailed output for all runs, not just failures. Defaults to False.
759
760    Returns:
761        TestSuite: JUnit `TestSuite` containing results of all test cases
762    """
763    test_specs: List[TestSpec] = [
764        t for t in get_test_args(suite_spec.get_source_path(test)) if re.search(search, t.name, re.IGNORECASE)
765    ]
766    suite_spec.test_failure_artifacts_path.mkdir(parents=True, exist_ok=True)
767    if mode is RunMode.TAP:
768        print('TAP version 13')
769        print(f'1..{len(test_specs)}')
770
771    with mp.Pool(processes=pool_size, initializer=init_process) as pool:
772        async_outputs: List[List[mp.pool.AsyncResult]] = [
773            [pool.apply_async(run_test, (i, test, spec, backend, mode, nproc, suite_spec, verbose))
774             for (i, backend) in enumerate(ceed_backends, start=1)]
775            for spec in test_specs
776        ]
777
778        test_cases = []
779        for (i, subtest) in enumerate(async_outputs, start=1):
780            is_new_subtest = True
781            subtest_ok = True
782            for async_output in subtest:
783                test_case, print_output = async_output.get()
784                test_cases.append(test_case)
785                if is_new_subtest and mode == RunMode.TAP:
786                    is_new_subtest = False
787                    print(f'# Subtest: {test_case.category}')
788                    print(f'    1..{len(ceed_backends)}')
789                print(print_output, end='')
790                if test_case.is_failure() or test_case.is_error():
791                    subtest_ok = False
792            if mode == RunMode.TAP:
793                print(f'{"" if subtest_ok else "not "}ok {i} - {test_case.category}')
794
795    return TestSuite(test, test_cases)
796
797
798def write_junit_xml(test_suite: TestSuite, output_file: Optional[Path], batch: str = '') -> None:
799    """Write a JUnit XML file containing the results of a `TestSuite`
800
801    Args:
802        test_suite (TestSuite): JUnit `TestSuite` to write
803        output_file (Optional[Path]): Path to output file, or `None` to generate automatically as `build/{test_suite.name}{batch}.junit`
804        batch (str): Name of JUnit batch, defaults to empty string
805    """
806    output_file = output_file or Path('build') / (f'{test_suite.name}{batch}.junit')
807    output_file.write_text(to_xml_report_string([test_suite]))
808
809
810def has_failures(test_suite: TestSuite) -> bool:
811    """Check whether any test cases in a `TestSuite` failed
812
813    Args:
814        test_suite (TestSuite): JUnit `TestSuite` to check
815
816    Returns:
817        bool: True if any test cases failed
818    """
819    return any(c.is_failure() or c.is_error() for c in test_suite.test_cases)
820