xref: /petsc/config/report_tests.py (revision d2fd7bfc6f0fd2e1d083decbb7cc7d77e16824f0)
1#!/usr/bin/env python
2from __future__ import print_function
3import glob, os, re, stat
4import optparse
5import inspect
6
7"""
8Quick script for parsing the output of the test system and summarizing the results.
9"""
10
11def inInstallDir():
12  """
13  When petsc is installed then this file in installed in:
14       <PREFIX>/share/petsc/examples/config/gmakegentest.py
15  otherwise the path is:
16       <PETSC_DIR>/config/gmakegentest.py
17  We use this difference to determine if we are in installdir
18  """
19  thisscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
20  dirlist=thisscriptdir.split(os.path.sep)
21  if len(dirlist)>4:
22    lastfour=os.path.sep.join(dirlist[len(dirlist)-4:])
23    if lastfour==os.path.join('share','petsc','examples','config'):
24      return True
25    else:
26      return False
27  else:
28    return False
29
30def summarize_results(directory,make,ntime,etime):
31  ''' Loop over all of the results files and summarize the results'''
32  startdir = os.getcwd()
33  try:
34    os.chdir(directory)
35  except OSError:
36    print('# No tests run')
37    return
38  summary={'total':0,'success':0,'failed':0,'failures':[],'todo':0,'skip':0,
39           'time':0, 'cputime':0}
40  timesummary={}
41  cputimesummary={}
42  timelist=[]
43  for cfile in glob.glob('*.counts'):
44    with open(cfile, 'r') as f:
45      for line in f:
46        l = line.split()
47        if l[0] == 'failures':
48           if len(l)>1:
49             summary[l[0]] += l[1:]
50        elif l[0] == 'time':
51           if len(l)==1: continue
52           summary[l[0]] += float(l[1])
53           summary['cputime'] += float(l[2])
54           timesummary[cfile]=float(l[1])
55           cputimesummary[cfile]=float(l[2])
56           timelist.append(float(l[1]))
57        elif l[0] not in summary:
58           continue
59        else:
60           summary[l[0]] += int(l[1])
61
62  failstr=' '.join(summary['failures'])
63  print("\n# -------------")
64  print("#   Summary    ")
65  print("# -------------")
66  if failstr.strip(): print("# FAILED " + failstr)
67
68  for t in "success failed todo skip".split():
69    percent=summary[t]/float(summary['total'])*100
70    print("# %s %d/%d tests (%3.1f%%)" % (t, summary[t], summary['total'], percent))
71  print("#")
72  if etime:
73    print("# Wall clock time for tests: %s sec"% etime)
74  print("# Approximate CPU time (not incl. build time): %s sec"% summary['cputime'])
75
76  if failstr.strip():
77      fail_targets=(
78          re.sub('cmd-','',
79          re.sub('diff-','',failstr+' '))
80          )
81      # Strip off characters from subtests
82      fail_list=[]
83      for failure in fail_targets.split():
84         fail_list.append(failure.split('+')[0])
85      fail_list=list(set(fail_list))
86      fail_targets=' '.join(fail_list)
87
88      # create simple little script
89      sfile=os.path.join(os.path.dirname(os.path.abspath(os.curdir)),'echofailures.sh')
90      with open(sfile,'w') as f:
91          f.write('echo '+fail_targets.strip())
92      st = os.stat(sfile)
93      os.chmod(sfile, st.st_mode | stat.S_IEXEC)
94
95      #Make the message nice
96      makefile="gmakefile.test" if inInstallDir() else "gmakefile"
97
98      print("#\n# To rerun failed tests: ")
99      print("#     "+make+" -f "+makefile+" test test-fail=1")
100
101  if ntime>0:
102      print("#\n# Timing summary (actual test time / total CPU time): ")
103      timelist=list(set(timelist))
104      timelist.sort(reverse=True)
105      nlim=(ntime if ntime<len(timelist) else len(timelist))
106      # Do a double loop to sort in order
107      for timelimit in timelist[0:nlim]:
108        for cf in timesummary:
109          if timesummary[cf] == timelimit:
110            print("#   %s: %.2f sec / %.2f sec" % (re.sub('.counts','',cf), timesummary[cf], cputimesummary[cf]))
111  os.chdir(startdir)
112  return
113
114def get_test_data(directory):
115    """
116    Create dictionary structure with test data
117    """
118    startdir= os.getcwd()
119    try:
120        os.chdir(directory)
121    except OSError:
122        print('# No tests run')
123        return
124    # loop over *.counts files for all the problems tested in the test suite
125    testdata = {}
126    for cfile in glob.glob('*.counts'):
127        # first we get rid of the .counts extension, then we split the name in two
128        # to recover the problem name and the package it belongs to
129        fname = cfile.split('.')[0]
130        testname = fname.split('-')
131        probname = ''
132        for i in range(1,len(testname)):
133            probname += testname[i]
134        # we split the package into its subcomponents of PETSc module (e.g.: snes)
135        # and test type (e.g.: tutorial)
136        testname_list = testname[0].split('_')
137        pkgname = testname_list[0]
138        testtype = testname_list[-1]
139        # in order to correct assemble the folder path for problem outputs, we
140        # iterate over any possible subpackage names and test suffixes
141        testname_short = testname_list[:-1]
142        prob_subdir = os.path.join('', *testname_short)
143        probfolder = 'run%s'%probname
144        probdir = os.path.join('..', prob_subdir, 'examples', testtype, probfolder)
145        if not os.path.exists(probdir):
146            probfolder = probfolder.split('_')[0]
147            probdir = os.path.join('..', prob_subdir, 'examples', testtype, probfolder)
148        probfullpath=os.path.normpath(os.path.join(directory,probdir))
149        # assemble the final full folder path for problem outputs and read the files
150        try:
151            with open('%s/diff-%s.out'%(probdir, probfolder),'r') as probdiff:
152                difflines = probdiff.readlines()
153        except IOError:
154            difflines = []
155        try:
156            with open('%s/%s.err'%(probdir, probfolder),'r') as probstderr:
157                stderrlines = probstderr.readlines()
158        except IOError:
159            stderrlines = []
160        try:
161            with open('%s/%s.tmp'%(probdir, probname), 'r') as probstdout:
162                stdoutlines = probstdout.readlines()
163        except IOError:
164            stdoutlines = []
165        # join the package, subpackage and problem type names into a "class"
166        classname = pkgname
167        for item in testname_list[1:]:
168            classname += '.%s'%item
169        # if this is the first time we see this package, initialize its dict
170        if pkgname not in testdata.keys():
171            testdata[pkgname] = {
172                'total':0,
173                'success':0,
174                'failed':0,
175                'errors':0,
176                'todo':0,
177                'skip':0,
178                'time':0,
179                'problems':{}
180            }
181        # add the dict for the problem into the dict for the package
182        testdata[pkgname]['problems'][probname] = {
183            'classname':classname,
184            'time':0,
185            'failed':False,
186            'skipped':False,
187            'diff':difflines,
188            'stdout':stdoutlines,
189            'stderr':stderrlines,
190            'probdir':probfullpath,
191            'fullname':fname
192        }
193        # process the *.counts file and increment problem status trackers
194        if len(testdata[pkgname]['problems'][probname]['stderr'])>0:
195            testdata[pkgname]['errors'] += 1
196        with open(cfile, 'r') as f:
197            for line in f:
198                l = line.split()
199                if l[0] == 'time':
200                    if len(l)==1: continue
201                    testdata[pkgname]['problems'][probname][l[0]] = float(l[1])
202                    testdata[pkgname][l[0]] += float(l[1])
203                elif l[0] in testdata[pkgname].keys():
204                    num_int=int(l[1])
205                    testdata[pkgname][l[0]] += num_int
206                    if l[0] in ['failed','skip'] and num_int:
207                        testdata[pkgname]['problems'][probname][l[0]] = True
208                else:
209                    continue
210    os.chdir(startdir)  # Keep function in good state
211    return testdata
212
213def show_fail(testdata):
214    """ Show the failures and commands to run them
215    """
216    for pkg in testdata.keys():
217        testsuite = testdata[pkg]
218        for prob in testsuite['problems'].keys():
219            p = testsuite['problems'][prob]
220            cdbase='cd '+p['probdir']+' && '
221            if p['skipped']:
222                # if we got here, the TAP output shows a skipped test
223                pass
224            elif len(p['stderr'])>0:
225                # if we got here, the test crashed with an error
226                # we show the stderr output under <error>
227                shbase=os.path.join(p['probdir'], p['fullname'])
228                shfile=shbase+".sh"
229                if not os.path.exists(shfile):
230                    shfile=glob.glob(shbase+"*")[0]
231                with open(shfile, 'r') as sh:
232                    cmd = sh.read()
233                print(p['fullname']+': '+cdbase+cmd.split('>')[0])
234            elif len(p['diff'])>0:
235                # if we got here, the test output did not match the stored output file
236                # we show the diff between new output and old output under <failure>
237                shbase=os.path.join(p['probdir'], 'diff-'+p['fullname'])
238                shfile=shbase+".sh"
239                if not os.path.exists(shfile):
240                    shfile=glob.glob(shbase+"*")[0]
241                with open(shfile, 'r') as sh:
242                    cmd = sh.read()
243                print(p['fullname']+': '+cdbase+cmd.split('>')[0])
244                pass
245    return
246
247def generate_xml(testdata,directory):
248    """ write testdata information into a jUnit formatted XLM file
249    """
250    startdir= os.getcwd()
251    try:
252        os.chdir(directory)
253    except OSError:
254        print('# No tests run')
255        return
256    junit = open('../testresults.xml', 'w')
257    junit.write('<?xml version="1.0" ?>\n')
258    junit.write('<testsuites>\n')
259    for pkg in testdata.keys():
260        testsuite = testdata[pkg]
261        junit.write('  <testsuite errors="%i" failures="%i" name="%s" tests="%i">\n'%(
262            testsuite['errors'], testsuite['failed'], pkg, testsuite['total']))
263        for prob in testsuite['problems'].keys():
264            p = testsuite['problems'][prob]
265            junit.write('    <testcase classname="%s" name="%s" time="%f">\n'%(
266                p['classname'], prob, p['time']))
267            if p['skipped']:
268                # if we got here, the TAP output shows a skipped test
269                junit.write('      <skipped/>\n')
270            elif len(p['stderr'])>0:
271                # if we got here, the test crashed with an error
272                # we show the stderr output under <error>
273                junit.write('      <error type="crash">\n')
274                junit.write("<![CDATA[\n") # CDATA is necessary to preserve whitespace
275                # many times error messages also go to stdout so we print both
276                if len(p['stdout'])>0:
277                    for line in p['stdout']:
278                        junit.write("%s\n"%line.rstrip())
279                for line in p['stderr']:
280                    junit.write("%s\n"%line.rstrip())
281                junit.write("]]>")
282                junit.write('      </error>\n')
283            elif len(p['diff'])>0:
284                # if we got here, the test output did not match the stored output file
285                # we show the diff between new output and old output under <failure>
286                junit.write('      <failure type="output">\n')
287                junit.write("<![CDATA[\n") # CDATA is necessary to preserve whitespace
288                for line in p['diff']:
289                    junit.write("%s\n"%line.rstrip())
290                junit.write("]]>")
291                junit.write('      </failure>\n')
292            junit.write('    </testcase>\n')
293        junit.write('  </testsuite>\n')
294    junit.write('</testsuites>')
295    junit.close()
296    os.chdir(startdir)
297    return
298
299def main():
300    parser = optparse.OptionParser(usage="%prog [options]")
301    parser.add_option('-d', '--directory', dest='directory',
302                      help='Directory containing results of petsc test system',
303                      default=os.path.join(os.environ.get('PETSC_ARCH',''),
304                                           'tests','counts'))
305    parser.add_option('-e', '--elapsed_time', dest='elapsed_time',
306                      help='Report elapsed time in output',
307                      default=None)
308    parser.add_option('-m', '--make', dest='make',
309                      help='make executable to report in summary',
310                      default='make')
311    parser.add_option('-t', '--time', dest='time',
312                      help='-t n: Report on the n number expensive jobs',
313                      default=0)
314    parser.add_option('-f', '--fail', dest='show_fail', action="store_true",
315                      help='Show the failed tests and how to run them')
316    options, args = parser.parse_args()
317
318    # Process arguments
319    if len(args) > 0:
320      parser.print_usage()
321      return
322
323
324    if not options.show_fail:
325      summarize_results(options.directory,options.make,int(options.time),options.elapsed_time)
326    testresults=get_test_data(options.directory)
327
328    if options.show_fail:
329      show_fail(testresults)
330    else:
331      generate_xml(testresults, options.directory)
332
333if __name__ == "__main__":
334        main()
335