xref: /petsc/config/report_tests.py (revision d57bb9db46f99bff4aaa690e4e9a7e55805ae415)
1#!/usr/bin/env python
2from __future__ import print_function
3import glob, os, re, stat
4import optparse
5import inspect
6
7"""
8Quick script for parsing the output of the test system and summarizing the results.
9"""
10
11def inInstallDir():
12  """
13  When petsc is installed then this file in installed in:
14       <PREFIX>/share/petsc/examples/config/gmakegentest.py
15  otherwise the path is:
16       <PETSC_DIR>/config/gmakegentest.py
17  We use this difference to determine if we are in installdir
18  """
19  thisscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
20  dirlist=thisscriptdir.split(os.path.sep)
21  if len(dirlist)>4:
22    lastfour=os.path.sep.join(dirlist[len(dirlist)-4:])
23    if lastfour==os.path.join('share','petsc','examples','config'):
24      return True
25    else:
26      return False
27  else:
28    return False
29
30def summarize_results(directory,make,ntime,etime,show_results):
31  ''' Loop over all of the results files and summarize the results'''
32  startdir = os.getcwd()
33  try:
34    os.chdir(directory)
35  except OSError:
36    print('# No test results in ', directory)
37    return
38  summary={'total':0,'success':0,'failed':0,'failures':[],'todo':0,'skip':0,
39           'time':0, 'cputime':0}
40  timesummary={}
41  cputimesummary={}
42  timelist=[]
43  for cfile in glob.glob('*.counts'):
44    with open(cfile, 'r') as f:
45      for line in f:
46        l = line.split()
47        if l[0] == 'failures':
48           if len(l)>1:
49             summary[l[0]] += l[1:]
50        elif l[0] == 'time':
51           if len(l)==1: continue
52           summary[l[0]] += float(l[1])
53           summary['cputime'] += float(l[2])
54           timesummary[cfile]=float(l[1])
55           cputimesummary[cfile]=float(l[2])
56           timelist.append(float(l[1]))
57        elif l[0] not in summary:
58           continue
59        else:
60           summary[l[0]] += int(l[1])
61
62  failstr=' '.join(summary['failures'])
63  if show_results:
64    print("\n# -------------")
65    print("#   Summary    ")
66    print("# -------------")
67    if failstr.strip(): print("# FAILED " + failstr)
68
69    for t in "success failed todo skip".split():
70      percent=summary[t]/float(summary['total'])*100
71      print("# %s %d/%d tests (%3.1f%%)" % (t, summary[t], summary['total'], percent))
72    print("#")
73    if etime:
74      print("# Wall clock time for tests: %s sec"% etime)
75    print("# Approximate CPU time (not incl. build time): %s sec"% summary['cputime'])
76
77  if failstr.strip():
78      fail_targets=(
79          re.sub('cmd-','',
80          re.sub('diff-','',failstr+' '))
81          )
82      # Strip off characters from subtests
83      fail_list=[]
84      for failure in fail_targets.split():
85         fail_list.append(failure.split('+')[0])
86      fail_list=list(set(fail_list))
87      fail_targets=' '.join(fail_list)
88
89      # create simple little script
90      sfile=os.path.join(os.path.dirname(os.path.abspath(os.curdir)),'echofailures.sh')
91      with open(sfile,'w') as f:
92          f.write('echo '+fail_targets.strip())
93      st = os.stat(sfile)
94      os.chmod(sfile, st.st_mode | stat.S_IEXEC)
95
96      #Make the message nice
97      makefile="gmakefile.test" if inInstallDir() else "gmakefile"
98
99      if show_results:
100        print("#\n# To rerun failed tests: ")
101        print("#     "+make+" -f "+makefile+" test test-fail=1")
102
103  if ntime>0 and show_results:
104      print("#\n# Timing summary (actual test time / total CPU time): ")
105      timelist=list(set(timelist))
106      timelist.sort(reverse=True)
107      nlim=(ntime if ntime<len(timelist) else len(timelist))
108      # Do a double loop to sort in order
109      for timelimit in timelist[0:nlim]:
110        for cf in timesummary:
111          if timesummary[cf] == timelimit:
112            print("#   %s: %.2f sec / %.2f sec" % (re.sub('.counts','',cf), timesummary[cf], cputimesummary[cf]))
113  os.chdir(startdir)
114  return
115
116def get_test_data(directory):
117    """
118    Create dictionary structure with test data
119    """
120    startdir= os.getcwd()
121    try:
122        os.chdir(directory)
123    except OSError:
124        return
125    # loop over *.counts files for all the problems tested in the test suite
126    testdata = {}
127    for cfile in glob.glob('*.counts'):
128        # first we get rid of the .counts extension, then we split the name in two
129        # to recover the problem name and the package it belongs to
130        fname = cfile.split('.')[0]
131        testname = fname.split('-')
132        probname = ''
133        for i in range(1,len(testname)):
134            probname += testname[i]
135        # we split the package into its subcomponents of PETSc module (e.g.: snes)
136        # and test type (e.g.: tutorial)
137        testname_list = testname[0].split('_')
138        pkgname = testname_list[0]
139        testtype = testname_list[-1]
140        # in order to correct assemble the folder path for problem outputs, we
141        # iterate over any possible subpackage names and test suffixes
142        testname_short = testname_list[:-1]
143        prob_subdir = os.path.join('', *testname_short)
144        probfolder = 'run%s'%probname
145        probdir = os.path.join('..', prob_subdir, 'examples', testtype, probfolder)
146        if not os.path.exists(probdir):
147            probfolder = probfolder.split('_')[0]
148            probdir = os.path.join('..', prob_subdir, 'examples', testtype, probfolder)
149        probfullpath=os.path.normpath(os.path.join(directory,probdir))
150        # assemble the final full folder path for problem outputs and read the files
151        try:
152            with open('%s/diff-%s.out'%(probdir, probfolder),'r') as probdiff:
153                difflines = probdiff.readlines()
154        except IOError:
155            difflines = []
156        try:
157            with open('%s/%s.err'%(probdir, probfolder),'r') as probstderr:
158                stderrlines = probstderr.readlines()
159        except IOError:
160            stderrlines = []
161        try:
162            with open('%s/%s.tmp'%(probdir, probname), 'r') as probstdout:
163                stdoutlines = probstdout.readlines()
164        except IOError:
165            stdoutlines = []
166        # join the package, subpackage and problem type names into a "class"
167        classname = pkgname
168        for item in testname_list[1:]:
169            classname += '.%s'%item
170        # if this is the first time we see this package, initialize its dict
171        if pkgname not in testdata.keys():
172            testdata[pkgname] = {
173                'total':0,
174                'success':0,
175                'failed':0,
176                'errors':0,
177                'todo':0,
178                'skip':0,
179                'time':0,
180                'problems':{}
181            }
182        # add the dict for the problem into the dict for the package
183        testdata[pkgname]['problems'][probname] = {
184            'classname':classname,
185            'time':0,
186            'failed':False,
187            'skipped':False,
188            'diff':difflines,
189            'stdout':stdoutlines,
190            'stderr':stderrlines,
191            'probdir':probfullpath,
192            'fullname':fname
193        }
194        # process the *.counts file and increment problem status trackers
195        with open(cfile, 'r') as f:
196            for line in f:
197                l = line.split()
198                if l[0] == 'time':
199                    if len(l)==1: continue
200                    testdata[pkgname]['problems'][probname][l[0]] = float(l[1])
201                    testdata[pkgname][l[0]] += float(l[1])
202                elif l[0] in testdata[pkgname].keys():
203                    # This block includes total, success, failed, skip, todo
204                    num_int=int(l[1])
205                    testdata[pkgname][l[0]] += num_int
206                    if l[0] in ['failed']:
207                        # If non-zero error code and non-zero stderr, something wrong
208                        if len(testdata[pkgname]['problems'][probname]['stderr'])>0:
209                            if not num_int: num_int=1
210                        if num_int:
211                            testdata[pkgname]['errors'] += 1
212                            testdata[pkgname]['problems'][probname][l[0]] = True
213                    if l[0] in ['skip'] and num_int:
214                        testdata[pkgname]['problems'][probname][l[0]] = True
215                else:
216                    continue
217    os.chdir(startdir)  # Keep function in good state
218    return testdata
219
220def show_fail(testdata):
221    """ Show the failures and commands to run them
222    """
223    for pkg in testdata.keys():
224        testsuite = testdata[pkg]
225        for prob in testsuite['problems'].keys():
226            p = testsuite['problems'][prob]
227            cdbase='cd '+p['probdir']+' && '
228            if p['skipped']:
229                # if we got here, the TAP output shows a skipped test
230                pass
231            elif len(p['stderr'])>0:
232                # if we got here, the test crashed with an error
233                # we show the stderr output under <error>
234                shbase=os.path.join(p['probdir'], p['fullname'])
235                shfile=shbase+".sh"
236                if not os.path.exists(shfile):
237                    shfile=glob.glob(shbase+"*")[0]
238                with open(shfile, 'r') as sh:
239                    cmd = sh.read()
240                print(p['fullname']+': '+cdbase+cmd.split('>')[0])
241            elif len(p['diff'])>0:
242                # if we got here, the test output did not match the stored output file
243                # we show the diff between new output and old output under <failure>
244                shbase=os.path.join(p['probdir'], 'diff-'+p['fullname'])
245                shfile=shbase+".sh"
246                if not os.path.exists(shfile):
247                    shfile=glob.glob(shbase+"*")[0]
248                with open(shfile, 'r') as sh:
249                    cmd = sh.read()
250                print(p['fullname']+': '+cdbase+cmd.split('>')[0])
251                pass
252    return
253
254def generate_xml(testdata,directory):
255    """ write testdata information into a jUnit formatted XLM file
256    """
257    startdir= os.getcwd()
258    try:
259        os.chdir(directory)
260    except OSError:
261        return
262    junit = open('../testresults.xml', 'w')
263    junit.write('<?xml version="1.0" ?>\n')
264    junit.write('<testsuites>\n')
265    for pkg in testdata.keys():
266        testsuite = testdata[pkg]
267        junit.write('  <testsuite errors="%i" failures="%i" name="%s" tests="%i">\n'%(
268            testsuite['errors'], testsuite['failed'], pkg, testsuite['total']))
269        for prob in testsuite['problems'].keys():
270            p = testsuite['problems'][prob]
271            junit.write('    <testcase classname="%s" name="%s" time="%f">\n'%(
272                p['classname'], prob, p['time']))
273            if p['skipped']:
274                # if we got here, the TAP output shows a skipped test
275                junit.write('      <skipped/>\n')
276            elif p['failed']:
277                # if we got here, the test crashed with an error
278                # we show the stderr output under <error>
279                junit.write('      <error type="crash">\n')
280                junit.write("<![CDATA[\n") # CDATA is necessary to preserve whitespace
281                # many times error messages also go to stdout so we print both
282                junit.write("stdout:\n")
283                if len(p['stdout'])>0:
284                    for line in p['stdout']:
285                        junit.write("%s\n"%line.rstrip())
286                junit.write("\nstderr:\n")
287                for line in p['stderr']:
288                    junit.write("%s\n"%line.rstrip())
289                junit.write("]]>")
290                junit.write('      </error>\n')
291            elif len(p['diff'])>0:
292                # if we got here, the test output did not match the stored output file
293                # we show the diff between new output and old output under <failure>
294                junit.write('      <failure type="output">\n')
295                junit.write("<![CDATA[\n") # CDATA is necessary to preserve whitespace
296                for line in p['diff']:
297                    junit.write("%s\n"%line.rstrip())
298                junit.write("]]>")
299                junit.write('      </failure>\n')
300            junit.write('    </testcase>\n')
301        junit.write('  </testsuite>\n')
302    junit.write('</testsuites>')
303    junit.close()
304    os.chdir(startdir)
305    return
306
307def main():
308    parser = optparse.OptionParser(usage="%prog [options]")
309    parser.add_option('-d', '--directory', dest='directory',
310                      help='Directory containing results of petsc test system',
311                      default=os.path.join(os.environ.get('PETSC_ARCH',''),
312                                           'tests','counts'))
313    parser.add_option('-e', '--elapsed_time', dest='elapsed_time',
314                      help='Report elapsed time in output',
315                      default=None)
316    parser.add_option('-m', '--make', dest='make',
317                      help='make executable to report in summary',
318                      default='make')
319    parser.add_option('-t', '--time', dest='time',
320                      help='-t n: Report on the n number expensive jobs',
321                      default=0)
322    parser.add_option('-f', '--fail', dest='show_fail', action="store_true",
323                      help='Show the failed tests and how to run them')
324    parser.add_option('-s', '--show', dest='show_results', action="store_true",
325                      help='Summarize the test results')
326    options, args = parser.parse_args()
327
328    # Process arguments
329    if len(args) > 0:
330      parser.print_usage()
331      return
332
333    # gmakefile.test is invoked frequently for searches and in those
334    # cases we want to perform actions, but we don't want to
335    # generate_xml or show the summarized results.
336
337    if not options.show_fail:
338      summarize_results(options.directory,options.make,int(options.time),
339                        options.elapsed_time,options.show_results)
340    testresults=get_test_data(options.directory)
341
342    if options.show_fail:
343      show_fail(testresults)
344    # Don't generate xml if doing searches
345    elif options.show_results:
346      generate_xml(testresults, options.directory)
347
348if __name__ == "__main__":
349        main()
350