xref: /petsc/config/report_tests.py (revision 1143afedca6e8bc84e0774f5da5cd1d6e8e46c2e)
1#!/usr/bin/env python
2from __future__ import print_function
3import glob, os, re
4import optparse
5import inspect
6
7"""
8Quick script for parsing the output of the test system and summarizing the results.
9"""
10
11def inInstallDir():
12  """
13  When petsc is installed then this file in installed in:
14       <PREFIX>/share/petsc/examples/config/gmakegentest.py
15  otherwise the path is:
16       <PETSC_DIR>/config/gmakegentest.py
17  We use this difference to determine if we are in installdir
18  """
19  thisscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
20  dirlist=thisscriptdir.split(os.path.sep)
21  if len(dirlist)>4:
22    lastfour=os.path.sep.join(dirlist[len(dirlist)-4:])
23    if lastfour==os.path.join('share','petsc','examples','config'):
24      return True
25    else:
26      return False
27  else:
28    return False
29
30def summarize_results(directory,make,ntime,etime):
31  ''' Loop over all of the results files and summarize the results'''
32  startdir = os.getcwd()
33  try:
34    os.chdir(directory)
35  except OSError:
36    print('# No tests run')
37    return
38  summary={'total':0,'success':0,'failed':0,'failures':[],'todo':0,'skip':0,
39           'time':0, 'cputime':0}
40  timesummary={}
41  cputimesummary={}
42  timelist=[]
43  for cfile in glob.glob('*.counts'):
44    with open(cfile, 'r') as f:
45      for line in f:
46        l = line.split()
47        if l[0] == 'failures':
48           if len(l)>1:
49             summary[l[0]] += l[1:]
50        elif l[0] == 'time':
51           if len(l)==1: continue
52           summary[l[0]] += float(l[1])
53           summary['cputime'] += float(l[2])
54           timesummary[cfile]=float(l[1])
55           cputimesummary[cfile]=float(l[2])
56           timelist.append(float(l[1]))
57        elif l[0] not in summary:
58           continue
59        else:
60           summary[l[0]] += int(l[1])
61
62  failstr=' '.join(summary['failures'])
63  print("\n# -------------")
64  print("#   Summary    ")
65  print("# -------------")
66  if failstr.strip(): print("# FAILED " + failstr)
67
68  for t in "success failed todo skip".split():
69    percent=summary[t]/float(summary['total'])*100
70    print("# %s %d/%d tests (%3.1f%%)" % (t, summary[t], summary['total'], percent))
71  print("#")
72  if etime:
73    print("# Wall clock time for tests: %s sec"% etime)
74  print("# Approximate CPU time (not incl. build time): %s sec"% summary['cputime'])
75
76  if failstr.strip():
77      fail_targets=(
78          re.sub('cmd-','',
79          re.sub('diff-','',failstr+' '))
80          )
81      print(fail_targets)
82      # Strip off characters from subtests
83      fail_list=[]
84      for failure in fail_targets.split():
85        if failure.split('-')[1].count('_')>1:
86            froot=failure.split('-')[0]
87            flabel='_'.join(failure.split('-')[1].split('_')[0:1])
88            fail_list.append(froot+'-'+flabel+'_*')
89        elif failure.count('-')>1:
90            fail_list.append('-'.join(failure.split('-')[:-1]))
91        else:
92            fail_list.append(failure)
93      fail_list=list(set(fail_list))
94      fail_targets=' '.join(fail_list)
95
96      #Make the message nice
97      makefile="gmakefile.test" if inInstallDir() else "gmakefile"
98
99      print("#\n# To rerun failed tests: ")
100      print("#     "+make+" -f "+makefile+" test globsearch='" + fail_targets.strip()+"'")
101
102  if ntime>0:
103      print("#\n# Timing summary (actual test time / total CPU time): ")
104      timelist=list(set(timelist))
105      timelist.sort(reverse=True)
106      nlim=(ntime if ntime<len(timelist) else len(timelist))
107      # Do a double loop to sort in order
108      for timelimit in timelist[0:nlim]:
109        for cf in timesummary:
110          if timesummary[cf] == timelimit:
111            print("#   %s: %.2f sec / %.2f sec" % (re.sub('.counts','',cf), timesummary[cf], cputimesummary[cf]))
112  os.chdir(startdir)
113  return
114
115def get_test_data(directory):
116    """
117    Create dictionary structure with test data
118    """
119    startdir= os.getcwd()
120    try:
121        os.chdir(directory)
122    except OSError:
123        print('# No tests run')
124        return
125    # loop over *.counts files for all the problems tested in the test suite
126    testdata = {}
127    for cfile in glob.glob('*.counts'):
128        # first we get rid of the .counts extension, then we split the name in two
129        # to recover the problem name and the package it belongs to
130        fname = cfile.split('.')[0]
131        testname = fname.split('-')
132        probname = ''
133        for i in range(1,len(testname)):
134            probname += testname[i]
135        # we split the package into its subcomponents of PETSc module (e.g.: snes)
136        # and test type (e.g.: tutorial)
137        testname_list = testname[0].split('_')
138        pkgname = testname_list[0]
139        testtype = testname_list[-1]
140        # in order to correct assemble the folder path for problem outputs, we
141        # iterate over any possible subpackage names and test suffixes
142        testname_short = testname_list[:-1]
143        prob_subdir = os.path.join('', *testname_short)
144        probfolder = 'run%s'%probname
145        probdir = os.path.join('..', prob_subdir, 'examples', testtype, probfolder)
146        if not os.path.exists(probdir):
147            probfolder = probfolder.split('_')[0]
148            probdir = os.path.join('..', prob_subdir, 'examples', testtype, probfolder)
149        probfullpath=os.path.normpath(os.path.join(directory,probdir))
150        # assemble the final full folder path for problem outputs and read the files
151        try:
152            with open('%s/diff-%s.out'%(probdir, probfolder),'r') as probdiff:
153                difflines = probdiff.readlines()
154        except IOError:
155            difflines = []
156        try:
157            with open('%s/%s.err'%(probdir, probfolder),'r') as probstderr:
158                stderrlines = probstderr.readlines()
159        except IOError:
160            stderrlines = []
161        try:
162            with open('%s/%s.tmp'%(probdir, probname), 'r') as probstdout:
163                stdoutlines = probstdout.readlines()
164        except IOError:
165            stdoutlines = []
166        # join the package, subpackage and problem type names into a "class"
167        classname = pkgname
168        for item in testname_list[1:]:
169            classname += '.%s'%item
170        # if this is the first time we see this package, initialize its dict
171        if pkgname not in testdata.keys():
172            testdata[pkgname] = {
173                'total':0,
174                'success':0,
175                'failed':0,
176                'errors':0,
177                'todo':0,
178                'skip':0,
179                'time':0,
180                'problems':{}
181            }
182        # add the dict for the problem into the dict for the package
183        testdata[pkgname]['problems'][probname] = {
184            'classname':classname,
185            'time':0,
186            'failed':False,
187            'skipped':False,
188            'diff':difflines,
189            'stdout':stdoutlines,
190            'stderr':stderrlines,
191            'probdir':probfullpath,
192            'fullname':fname
193        }
194        # process the *.counts file and increment problem status trackers
195        if len(testdata[pkgname]['problems'][probname]['stderr'])>0:
196            testdata[pkgname]['errors'] += 1
197        with open(cfile, 'r') as f:
198            for line in f:
199                l = line.split()
200                if l[0] == 'failed':
201                    testdata[pkgname]['problems'][probname][l[0]] = True
202                    testdata[pkgname][l[0]] += 1
203                elif l[0] == 'time':
204                    if len(l)==1: continue
205                    testdata[pkgname]['problems'][probname][l[0]] = float(l[1])
206                    testdata[pkgname][l[0]] += float(l[1])
207                elif l[0] == 'skip':
208                    testdata[pkgname]['problems'][probname][l[0]] = True
209                    testdata[pkgname][l[0]] += 1
210                elif l[0] not in testdata[pkgname].keys():
211                    continue
212                else:
213                    testdata[pkgname][l[0]] += 1
214    os.chdir(startdir)  # Keep function in good state
215    return testdata
216
217def show_fail(testdata):
218    """ Show the failures and commands to run them
219    """
220    for pkg in testdata.keys():
221        testsuite = testdata[pkg]
222        for prob in testsuite['problems'].keys():
223            p = testsuite['problems'][prob]
224            cdbase='cd '+p['probdir']+' && '
225            if p['skipped']:
226                # if we got here, the TAP output shows a skipped test
227                pass
228            elif len(p['stderr'])>0:
229                # if we got here, the test crashed with an error
230                # we show the stderr output under <error>
231                shbase=os.path.join(p['probdir'], p['fullname'])
232                shfile=shbase+".sh"
233                if not os.path.exists(shfile):
234                    shfile=glob.glob(shbase+"*")[0]
235                with open(shfile, 'r') as sh:
236                    cmd = sh.read()
237                print(p['fullname']+': '+cdbase+cmd.split('>')[0])
238            elif len(p['diff'])>0:
239                # if we got here, the test output did not match the stored output file
240                # we show the diff between new output and old output under <failure>
241                shbase=os.path.join(p['probdir'], 'diff-'+p['fullname'])
242                shfile=shbase+".sh"
243                if not os.path.exists(shfile):
244                    shfile=glob.glob(shbase+"*")[0]
245                with open(shfile, 'r') as sh:
246                    cmd = sh.read()
247                print(p['fullname']+': '+cdbase+cmd.split('>')[0])
248                pass
249    return
250
251def generate_xml(testdata,directory):
252    """ write testdata information into a jUnit formatted XLM file
253    """
254    startdir= os.getcwd()
255    try:
256        os.chdir(directory)
257    except OSError:
258        print('# No tests run')
259        return
260    junit = open('../testresults.xml', 'w')
261    junit.write('<?xml version="1.0" ?>\n')
262    junit.write('<testsuites>\n')
263    for pkg in testdata.keys():
264        testsuite = testdata[pkg]
265        junit.write('  <testsuite errors="%i" failures="%i" name="%s" tests="%i">\n'%(
266            testsuite['errors'], testsuite['failed'], pkg, testsuite['total']))
267        for prob in testsuite['problems'].keys():
268            p = testsuite['problems'][prob]
269            junit.write('    <testcase classname="%s" name="%s" time="%f">\n'%(
270                p['classname'], prob, p['time']))
271            if p['skipped']:
272                # if we got here, the TAP output shows a skipped test
273                junit.write('      <skipped/>\n')
274            elif len(p['stderr'])>0:
275                # if we got here, the test crashed with an error
276                # we show the stderr output under <error>
277                junit.write('      <error type="crash">\n')
278                junit.write("<![CDATA[\n") # CDATA is necessary to preserve whitespace
279                for line in p['stderr']:
280                    junit.write("%s\n"%line.rstrip())
281                junit.write("]]>")
282                junit.write('      </error>\n')
283            elif len(p['diff'])>0:
284                # if we got here, the test output did not match the stored output file
285                # we show the diff between new output and old output under <failure>
286                junit.write('      <failure type="output">\n')
287                junit.write("<![CDATA[\n") # CDATA is necessary to preserve whitespace
288                for line in p['diff']:
289                    junit.write("%s\n"%line.rstrip())
290                junit.write("]]>")
291                junit.write('      </failure>\n')
292            elif len(p['stdout'])>0:
293                # if we got here, the test succeeded so we just show the stdout
294                # for manual sanity-checks
295                junit.write('      <system-out>\n')
296                junit.write("<![CDATA[\n") # CDATA is necessary to preserve whitespace
297                count = 0
298                for line in p['stdout']:
299                    junit.write("%s\n"%line.rstrip())
300                    count += 1
301                    if count >= 1024:
302                        break
303                junit.write("]]>")
304                junit.write('      </system-out>\n')
305            junit.write('    </testcase>\n')
306        junit.write('  </testsuite>\n')
307    junit.write('</testsuites>')
308    junit.close()
309    os.chdir(startdir)
310    return
311
312def main():
313    parser = optparse.OptionParser(usage="%prog [options]")
314    parser.add_option('-d', '--directory', dest='directory',
315                      help='Directory containing results of petsc test system',
316                      default=os.path.join(os.environ.get('PETSC_ARCH',''),
317                                           'tests','counts'))
318    parser.add_option('-e', '--elapsed_time', dest='elapsed_time',
319                      help='Report elapsed time in output',
320                      default=None)
321    parser.add_option('-m', '--make', dest='make',
322                      help='make executable to report in summary',
323                      default='make')
324    parser.add_option('-t', '--time', dest='time',
325                      help='-t n: Report on the n number expensive jobs',
326                      default=0)
327    parser.add_option('-f', '--fail', dest='show_fail', action="store_true",
328                      help='Show the failed tests and how to run them')
329    options, args = parser.parse_args()
330
331    # Process arguments
332    if len(args) > 0:
333      parser.print_usage()
334      return
335
336
337    if not options.show_fail:
338      summarize_results(options.directory,options.make,int(options.time),options.elapsed_time)
339    testresults=get_test_data(options.directory)
340
341    if options.show_fail:
342      show_fail(testresults)
343    else:
344      generate_xml(testresults, options.directory)
345
346if __name__ == "__main__":
347        main()
348