xref: /petsc/config/report_tests.py (revision 09b68a49ed2854d1e4985cc2aa6af33c7c4e69b3)
1#!/usr/bin/env python3
2from __future__ import print_function
3import glob, os, re, stat
4import optparse
5import inspect
6
7"""
8Quick script for parsing the output of the test system and summarizing the results.
9"""
10
11def inInstallDir():
12  """
13  When PETSc is installed then this file in installed in:
14       <PREFIX>/share/petsc/examples/config/gmakegentest.py
15  otherwise the path is:
16       <PETSC_DIR>/config/gmakegentest.py
17  We use this difference to determine if we are in installdir
18  """
19  thisscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
20  dirlist=thisscriptdir.split(os.path.sep)
21  if len(dirlist)>4:
22    lastfour=os.path.sep.join(dirlist[len(dirlist)-4:])
23    if lastfour==os.path.join('share','petsc','examples','config'):
24      return True
25    else:
26      return False
27  else:
28    return False
29
30def summarize_results(directory,make,ntime,etime,show_results):
31  ''' Loop over all of the results files and summarize the results'''
32  startdir = os.getcwd()
33  try:
34    os.chdir(directory)
35  except OSError:
36    print('# No test results in ', directory)
37    return
38  summary={'total':0,'success':0,'failed':0,'failures':[],'todo':0,'skip':0,
39           'time':0, 'cputime':0}
40  timesummary={}
41  cputimesummary={}
42  timelist=[]
43  for cfile in glob.glob('*.counts'):
44    with open(cfile, 'r') as f:
45      for line in f:
46        l = line.split()
47        if l[0] == 'failures':
48           if len(l)>1:
49             summary[l[0]] += l[1:]
50        elif l[0] == 'time':
51           if len(l)==1: continue
52           summary[l[0]] += float(l[1])
53           summary['cputime'] += float(l[2])
54           timesummary[cfile]=float(l[1])
55           cputimesummary[cfile]=float(l[2])
56           timelist.append(float(l[1]))
57        elif l[0] not in summary:
58           continue
59        else:
60           summary[l[0]] += int(l[1])
61
62  failstr=' '.join(summary['failures'])
63  if show_results:
64    print("\n# -------------")
65    print("#   Summary    ")
66    print("# -------------")
67    if failstr.strip(): print("# FAILED " + failstr)
68
69    for t in "success failed todo skip".split():
70      percent=summary[t]/float(summary['total'])*100
71      print("# %s %d/%d tests (%3.1f%%)" % (t, summary[t], summary['total'], percent))
72    print("#")
73    if etime:
74      print("# Wall clock time for tests: %s sec"% etime)
75    print("# Approximate CPU time (not incl. build time): %s sec"% summary['cputime'])
76  else:
77    if failstr.strip(): print("\n\n# FAILED " + failstr)
78
79  if failstr.strip():
80      fail_targets=(
81          re.sub('cmd-','',
82          re.sub('diff-','',failstr+' '))
83          )
84      # Strip off characters from subtests
85      fail_list=[]
86      for failure in fail_targets.split():
87         fail_list.append(failure.split('+')[0])
88      fail_list=list(set(fail_list))
89      fail_targets=' '.join(fail_list)
90
91      # create simple little script
92      sfile=os.path.join(os.path.dirname(os.path.abspath(os.curdir)),'echofailures.sh')
93      with open(sfile,'w') as f:
94          f.write('echo '+fail_targets.strip())
95      st = os.stat(sfile)
96      os.chmod(sfile, st.st_mode | stat.S_IEXEC)
97
98      #Make the message nice
99      makefile="gmakefile.test" if inInstallDir() else "gmakefile"
100
101      print("#\n# To rerun failed tests: ")
102      print("#     "+make+" -f "+makefile+" test test-fail=1")
103
104  if ntime>0 and show_results:
105      print("#\n# Timing summary (actual test time / total CPU time): ")
106      timelist=list(set(timelist))
107      timelist.sort(reverse=True)
108      nlim=(ntime if ntime<len(timelist) else len(timelist))
109      # Do a double loop to sort in order
110      for timelimit in timelist[0:nlim]:
111        for cf in timesummary:
112          if timesummary[cf] == timelimit:
113            print("#   %s: %.2f sec / %.2f sec" % (re.sub('.counts','',cf), timesummary[cf], cputimesummary[cf]))
114  os.chdir(startdir)
115  return
116
117def get_test_data(directory):
118    """
119    Create dictionary structure with test data
120    """
121    startdir= os.getcwd()
122    try:
123        os.chdir(directory)
124    except OSError:
125        return
126    # loop over *.counts files for all the problems tested in the test suite
127    testdata = {}
128    for cfile in glob.glob('*.counts'):
129        # first we get rid of the .counts extension, then we split the name in two
130        # to recover the problem name and the package it belongs to
131        fname = cfile.split('.')[0]
132        testname = fname.split('-')
133        probname = ''
134        for i in range(1,len(testname)):
135            probname += testname[i]
136        # we split the package into its subcomponents of PETSc module (e.g.: snes)
137        # and test type (e.g.: tutorial)
138        testname_list = testname[0].split('_')
139        pkgname = testname_list[0]
140        testtype = testname_list[-1]
141        # in order to correct assemble the folder path for problem outputs, we
142        # iterate over any possible subpackage names and test suffixes
143        testname_short = testname_list[:-1]
144        prob_subdir = os.path.join('', *testname_short)
145        probfolder = 'run%s'%probname
146        probdir = os.path.join('..', prob_subdir, 'examples', testtype, probfolder)
147        if not os.path.exists(probdir):
148            probfolder = probfolder.split('_')[0]
149            probdir = os.path.join('..', prob_subdir, 'examples', testtype, probfolder)
150        probfullpath=os.path.normpath(os.path.join(directory,probdir))
151        # assemble the final full folder path for problem outputs and read the files
152        try:
153            with open('%s/diff-%s.out'%(probdir, probfolder),'r') as probdiff:
154                difflines = probdiff.readlines()
155        except IOError:
156            difflines = []
157        try:
158            with open('%s/%s.err'%(probdir, probfolder),'r') as probstderr:
159                stderrlines = probstderr.readlines()
160        except IOError:
161            stderrlines = []
162        try:
163            with open('%s/%s.tmp'%(probdir, probname), 'r') as probstdout:
164                stdoutlines = probstdout.readlines()
165        except IOError:
166            stdoutlines = []
167        # join the package, subpackage and problem type names into a "class"
168        classname = pkgname
169        for item in testname_list[1:]:
170            classname += '.%s'%item
171        # if this is the first time we see this package, initialize its dict
172        if pkgname not in testdata.keys():
173            testdata[pkgname] = {
174                'total':0,
175                'success':0,
176                'failed':0,
177                'errors':0,
178                'todo':0,
179                'skip':0,
180                'time':0,
181                'problems':{}
182            }
183        # add the dict for the problem into the dict for the package
184        testdata[pkgname]['problems'][probname] = {
185            'classname':classname,
186            'time':0,
187            'failed':False,
188            'skipped':False,
189            'diff':difflines,
190            'stdout':stdoutlines,
191            'stderr':stderrlines,
192            'probdir':probfullpath,
193            'fullname':fname
194        }
195        # process the *.counts file and increment problem status trackers
196        with open(cfile, 'r') as f:
197            for line in f:
198                l = line.split()
199                if l[0] == 'time':
200                    if len(l)==1: continue
201                    testdata[pkgname]['problems'][probname][l[0]] = float(l[1])
202                    testdata[pkgname][l[0]] += float(l[1])
203                elif l[0] in testdata[pkgname].keys():
204                    # This block includes total, success, failed, skip, todo
205                    num_int=int(l[1])
206                    testdata[pkgname][l[0]] += num_int
207                    if l[0] in ['failed']:
208                        # If non-zero error code and non-zero stderr, something wrong
209                        if len(testdata[pkgname]['problems'][probname]['stderr'])>0:
210                            if not num_int: num_int=1
211                        if num_int:
212                            testdata[pkgname]['errors'] += 1
213                            testdata[pkgname]['problems'][probname][l[0]] = True
214                    if l[0] in ['skip'] and num_int:
215                        testdata[pkgname]['problems'][probname][l[0]] = True
216                else:
217                    continue
218    os.chdir(startdir)  # Keep function in good state
219    return testdata
220
221def show_fail(testdata):
222    """ Show the failures and commands to run them
223    """
224    for pkg in testdata.keys():
225        testsuite = testdata[pkg]
226        for prob in testsuite['problems'].keys():
227            p = testsuite['problems'][prob]
228            cdbase='cd '+p['probdir']+' && '
229            if p['skipped']:
230                # if we got here, the TAP output shows a skipped test
231                pass
232            elif len(p['stderr'])>0:
233                # if we got here, the test crashed with an error
234                # we show the stderr output under <error>
235                shbase=os.path.join(p['probdir'], p['fullname'])
236                shfile=shbase+".sh"
237                if not os.path.exists(shfile):
238                    shfile=glob.glob(shbase+"*")[0]
239                with open(shfile, 'r') as sh:
240                    cmd = sh.read()
241                print(p['fullname']+': '+cdbase+cmd.split('>')[0])
242            elif len(p['diff'])>0:
243                # if we got here, the test output did not match the stored output file
244                # we show the diff between new output and old output under <failure>
245                shbase=os.path.join(p['probdir'], 'diff-'+p['fullname'])
246                shfile=shbase+".sh"
247                if not os.path.exists(shfile):
248                    shfile=glob.glob(shbase+"*")[0]
249                with open(shfile, 'r') as sh:
250                    cmd = sh.read()
251                print(p['fullname']+': '+cdbase+cmd.split('>')[0])
252                pass
253    return
254
255def generate_xml(testdata,directory):
256    """ write testdata information into a jUnit formatted XLM file
257    """
258    startdir= os.getcwd()
259    try:
260        os.chdir(directory)
261    except OSError:
262        return
263    junit = open('../testresults.xml', 'w')
264    junit.write('<?xml version="1.0" ?>\n')
265    junit.write('<testsuites>\n')
266    for pkg in testdata.keys():
267        testsuite = testdata[pkg]
268        junit.write('  <testsuite errors="%i" failures="%i" name="%s" tests="%i">\n'%(
269            testsuite['errors'], testsuite['failed'], pkg, testsuite['total']))
270        for prob in testsuite['problems'].keys():
271            p = testsuite['problems'][prob]
272            junit.write('    <testcase classname="%s" name="%s" time="%f">\n'%(
273                p['classname'], prob, p['time']))
274            if p['skipped']:
275                # if we got here, the TAP output shows a skipped test
276                junit.write('      <skipped/>\n')
277            elif p['failed']:
278                # if we got here, the test crashed with an error
279                # we show the stderr output under <error>
280                junit.write('      <error type="crash">\n')
281                junit.write("<![CDATA[\n") # CDATA is necessary to preserve whitespace
282                # many times error messages also go to stdout so we print both
283                junit.write("stdout:\n")
284                if len(p['stdout'])>0:
285                    for line in p['stdout']:
286                        junit.write("%s\n"%line.rstrip())
287                junit.write("\nstderr:\n")
288                for line in p['stderr']:
289                    junit.write("%s\n"%line.rstrip())
290                junit.write("]]>")
291                junit.write('      </error>\n')
292            elif len(p['diff'])>0:
293                # if we got here, the test output did not match the stored output file
294                # we show the diff between new output and old output under <failure>
295                junit.write('      <failure type="output">\n')
296                junit.write("<![CDATA[\n") # CDATA is necessary to preserve whitespace
297                for line in p['diff']:
298                    junit.write("%s\n"%line.rstrip())
299                junit.write("]]>")
300                junit.write('      </failure>\n')
301            junit.write('    </testcase>\n')
302        junit.write('  </testsuite>\n')
303    junit.write('</testsuites>')
304    junit.close()
305    os.chdir(startdir)
306    return
307
308def main():
309    parser = optparse.OptionParser(usage="%prog [options]")
310    parser.add_option('-d', '--directory', dest='directory',
311                      help='Directory containing results of PETSc test system',
312                      default=os.path.join(os.environ.get('PETSC_ARCH',''),
313                                           'tests','counts'))
314    parser.add_option('-e', '--elapsed_time', dest='elapsed_time',
315                      help='Report elapsed time in output',
316                      default=None)
317    parser.add_option('-m', '--make', dest='make',
318                      help='make executable to report in summary',
319                      default='make')
320    parser.add_option('-t', '--time', dest='time',
321                      help='-t n: Report on the n number expensive jobs',
322                      default=0)
323    parser.add_option('-f', '--fail', dest='show_fail', action="store_true",
324                      help='Show the failed tests and how to run them')
325    parser.add_option('-s', '--show', dest='show_results', action="store_true",
326                      help='Summarize the test results')
327    options, args = parser.parse_args()
328
329    # Process arguments
330    if len(args) > 0:
331      parser.print_usage()
332      return
333
334    # gmakefile.test is invoked frequently for searches and in those
335    # cases we want to perform actions, but we don't want to
336    # generate_xml or show the summarized results.
337
338    if not options.show_fail:
339      summarize_results(options.directory,options.make,int(options.time),
340                        options.elapsed_time,options.show_results)
341    testresults=get_test_data(options.directory)
342
343    if options.show_fail:
344      show_fail(testresults)
345    # Don't generate xml if doing searches
346    elif options.show_results:
347      generate_xml(testresults, options.directory)
348
349if __name__ == "__main__":
350        main()
351