xref: /libCEED/tests/junit.py (revision daadeac6547c0bce0e170b8a41c931051f52e9a3)
1#!/usr/bin/env python3
2
3import os
4import sys
5sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), 'junit-xml')))
6from junit_xml import TestCase, TestSuite
7
8
9def parse_testargs(file):
10    if os.path.splitext(file)[1] in ['.c', '.cpp']:
11        return sum([[[line.split()[1:], [line.split()[0].strip('//TESTARGS(name=').strip(')')]]]
12                    for line in open(file).readlines()
13                    if line.startswith('//TESTARGS')], [])
14    elif os.path.splitext(file)[1] == '.usr':
15        return sum([[[line.split()[1:], [line.split()[0].strip('C_TESTARGS(name=').strip(')')]]]
16                    for line in open(file).readlines()
17                    if line.startswith('C_TESTARGS')], [])
18    elif os.path.splitext(file)[1] in ['.f90']:
19        return sum([[[line.split()[1:], [line.split()[0].strip('C_TESTARGS(name=').strip(')')]]]
20                    for line in open(file).readlines()
21                    if line.startswith('! TESTARGS')], [])
22    raise RuntimeError('Unrecognized extension for file: {}'.format(file))
23
24
25def get_source(test):
26    if test.startswith('petsc-'):
27        return os.path.join('examples', 'petsc', test[6:] + '.c')
28    elif test.startswith('mfem-'):
29        return os.path.join('examples', 'mfem', test[5:] + '.cpp')
30    elif test.startswith('nek-'):
31        return os.path.join('examples', 'nek', 'bps', test[4:] + '.usr')
32    elif test.startswith('fluids-'):
33        return os.path.join('examples', 'fluids', test[7:] + '.c')
34    elif test.startswith('solids-'):
35        return os.path.join('examples', 'solids', test[7:] + '.c')
36    elif test.startswith('ex'):
37        return os.path.join('examples', 'ceed', test + '.c')
38    elif test.endswith('-f'):
39        return os.path.join('tests', test + '.f90')
40    else:
41        return os.path.join('tests', test + '.c')
42
43
44def get_testargs(source):
45    args = parse_testargs(source)
46    if not args:
47        return [(['{ceed_resource}'], [''])]
48    return args
49
50
51def check_required_failure(case, stderr, required):
52    if required in stderr:
53        case.status = 'fails with required: {}'.format(required)
54    else:
55        case.add_failure_info('required: {}'.format(required))
56
57
58def contains_any(resource, substrings):
59    return any((sub in resource for sub in substrings))
60
61
62def skip_rule(test, resource):
63    return any((
64        test.startswith('t4') and contains_any(resource, ['occa']),
65        test.startswith('t5') and contains_any(resource, ['occa']),
66        test.startswith('ex') and contains_any(resource, ['occa']),
67        test.startswith('mfem') and contains_any(resource, ['occa']),
68        test.startswith('nek') and contains_any(resource, ['occa']),
69        test.startswith('petsc-') and contains_any(resource, ['occa']),
70        test.startswith('fluids-') and contains_any(resource, ['occa']),
71        test.startswith('solids-') and contains_any(resource, ['occa']),
72        test.startswith('t318') and contains_any(resource, ['/gpu/cuda/ref']),
73        test.startswith('t506') and contains_any(resource, ['/gpu/cuda/shared']),
74        ))
75
76
77def run(test, backends):
78    import subprocess
79    import time
80    import difflib
81    source = get_source(test)
82    all_args = get_testargs(source)
83
84    test_cases = []
85    my_env = os.environ.copy()
86    my_env["CEED_ERROR_HANDLER"] = 'exit'
87    for args, name in all_args:
88        for ceed_resource in backends:
89            rargs = [os.path.join('build', test)] + args.copy()
90            rargs[rargs.index('{ceed_resource}')] = ceed_resource
91
92            if skip_rule(test, ceed_resource):
93                case = TestCase('{} {}'.format(test, ceed_resource),
94                                elapsed_sec=0,
95                                timestamp=time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime()),
96                                stdout='',
97                                stderr='')
98                case.add_skipped_info('Pre-run skip rule')
99            else:
100                start = time.time()
101                proc = subprocess.run(rargs,
102                                      stdout=subprocess.PIPE,
103                                      stderr=subprocess.PIPE,
104                                      env=my_env)
105                proc.stdout = proc.stdout.decode('utf-8')
106                proc.stderr = proc.stderr.decode('utf-8')
107
108                case = TestCase('{} {} {}'.format(test, *name, ceed_resource),
109                                classname=os.path.dirname(source),
110                                elapsed_sec=time.time() - start,
111                                timestamp=time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(start)),
112                                stdout=proc.stdout,
113                                stderr=proc.stderr)
114                ref_stdout = os.path.join('tests/output', test + '.out')
115
116            if not case.is_skipped() and proc.stderr:
117                if 'OCCA backend failed to use' in proc.stderr:
118                    case.add_skipped_info('occa mode not supported {} {}'.format(test, ceed_resource))
119                elif 'Backend does not implement' in proc.stderr:
120                    case.add_skipped_info('not implemented {} {}'.format(test, ceed_resource))
121                elif 'Can only provide HOST memory for this backend' in proc.stderr:
122                    case.add_skipped_info('device memory not supported {} {}'.format(test, ceed_resource))
123                elif 'Test not implemented in single precision' in proc.stderr:
124                    case.add_skipped_info('not implemented {} {}'.format(test, ceed_resource))
125
126            if not case.is_skipped():
127                if test[:4] in 't006 t007'.split():
128                    check_required_failure(case, proc.stderr, 'No suitable backend:')
129                if test[:4] in 't008'.split():
130                    check_required_failure(case, proc.stderr, 'Available backend resources:')
131                if test[:4] in 't110 t111 t112 t113 t114'.split():
132                    check_required_failure(case, proc.stderr, 'Cannot grant CeedVector array access')
133                if test[:4] in 't115'.split():
134                    check_required_failure(case, proc.stderr, 'Cannot grant CeedVector read-only array access, the access lock is already in use')
135                if test[:4] in 't116'.split():
136                    check_required_failure(case, proc.stderr, 'Cannot destroy CeedVector, the writable access lock is in use')
137                if test[:4] in 't117'.split():
138                    check_required_failure(case, proc.stderr, 'Cannot restore CeedVector array access, access was not granted')
139                if test[:4] in 't118'.split():
140                    check_required_failure(case, proc.stderr, 'Cannot sync CeedVector, the access lock is already in use')
141                if test[:4] in 't215'.split():
142                    check_required_failure(case, proc.stderr, 'Cannot destroy CeedElemRestriction, a process has read access to the offset data')
143                if test[:4] in 't303'.split():
144                    check_required_failure(case, proc.stderr, 'Length of input/output vectors incompatible with basis dimensions')
145                if test[:4] in 't408'.split():
146                    check_required_failure(case, proc.stderr, 'CeedQFunctionContextGetData(): Cannot grant CeedQFunctionContext data access, a process has read access')
147                if test[:4] in 't409'.split() and contains_any(ceed_resource, ['memcheck']):
148                    check_required_failure(case, proc.stderr, 'Context data changed while accessed in read-only mode')
149
150            if not case.is_skipped() and not case.status:
151                if proc.stderr:
152                    case.add_failure_info('stderr', proc.stderr)
153                elif proc.returncode != 0:
154                    case.add_error_info('returncode = {}'.format(proc.returncode))
155                elif os.path.isfile(ref_stdout):
156                    with open(ref_stdout) as ref:
157                        diff = list(difflib.unified_diff(ref.readlines(),
158                                                         proc.stdout.splitlines(keepends=True),
159                                                         fromfile=ref_stdout,
160                                                         tofile='New'))
161                    if diff:
162                        case.add_failure_info('stdout', output=''.join(diff))
163                elif proc.stdout and test[:4] not in 't003':
164                    case.add_failure_info('stdout', output=proc.stdout)
165            case.args = ' '.join(rargs)
166            test_cases.append(case)
167    return TestSuite(test, test_cases)
168
169if __name__ == '__main__':
170    import argparse
171    parser = argparse.ArgumentParser('Test runner with JUnit and TAP output')
172    parser.add_argument('--mode', help='Output mode, JUnit or TAP', default="JUnit")
173    parser.add_argument('--output', help='Output file to write test', default=None)
174    parser.add_argument('--gather', help='Gather all *.junit files into XML', action='store_true')
175    parser.add_argument('test', help='Test executable', nargs='?')
176    args = parser.parse_args()
177
178    if args.gather:
179        gather()
180    else:
181        backends = os.environ['BACKENDS'].split()
182
183        result = run(args.test, backends)
184
185        if args.mode.lower() == "junit":
186            junit_batch = ''
187            try:
188                junit_batch = '-' + os.environ['JUNIT_BATCH']
189            except:
190                pass
191            output = (os.path.join('build', args.test + junit_batch + '.junit')
192                      if args.output is None
193                      else args.output)
194
195            with open(output, 'w') as fd:
196                TestSuite.to_file(fd, [result])
197        elif args.mode.lower() == "tap":
198            print('1..' + str(len(result.test_cases)))
199            for i in range(len(result.test_cases)):
200                test_case = result.test_cases[i]
201                test_index = str(i + 1)
202                print('# Test: ' + test_case.name.split(' ')[1])
203                print('# $ ' + test_case.args)
204                if test_case.is_error():
205                    message = test_case.is_error() if isinstance(test_case.is_error(), str) else test_case.errors[0]['message']
206                    print('not ok {} - ERROR: {}'.format(test_index, message.strip()))
207                    print(test_case.errors[0]['output'].strip())
208                elif test_case.is_failure():
209                    message = test_case.is_failure() if isinstance(test_case.is_failure(), str) else test_case.failures[0]['message']
210                    print('not ok {} - FAIL: {}'.format(test_index, message.strip()))
211                    print(test_case.failures[0]['output'].strip())
212                elif test_case.is_skipped():
213                    message = test_case.is_skipped() if isinstance(test_case.is_skipped(), str) else test_case.skipped[0]['message']
214                    print('ok {} - SKIP: {}'.format(test_index, message.strip()))
215                else:
216                    print('ok {} - PASS'.format(test_index))
217        else:
218            raise Exception("output mode not recognized")
219
220        for t in result.test_cases:
221            failures = len([c for c in result.test_cases if c.is_failure()])
222            errors = len([c for c in result.test_cases if c.is_error()])
223            if failures + errors > 0 and args.mode.lower() != "tap":
224                sys.exit(1)
225