xref: /petsc/config/report_tests.py (revision 65f8aed5f7eaa1e2ef2ddeffe666264e0669c876)
1#!/usr/bin/env python
2from __future__ import print_function
3import glob, os, re
4import optparse
5import inspect
6
7"""
8Quick script for parsing the output of the test system and summarizing the results.
9"""
10
11def inInstallDir():
12  """
13  When petsc is installed then this file in installed in:
14       <PREFIX>/share/petsc/examples/config/gmakegentest.py
15  otherwise the path is:
16       <PETSC_DIR>/config/gmakegentest.py
17  We use this difference to determine if we are in installdir
18  """
19  thisscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
20  dirlist=thisscriptdir.split(os.path.sep)
21  if len(dirlist)>4:
22    lastfour=os.path.sep.join(dirlist[len(dirlist)-4:])
23    if lastfour==os.path.join('share','petsc','examples','config'):
24      return True
25    else:
26      return False
27  else:
28    return False
29
30def summarize_results(directory,make,ntime,etime):
31  ''' Loop over all of the results files and summarize the results'''
32  startdir = os.getcwd()
33  try:
34    os.chdir(directory)
35  except OSError:
36    print('# No tests run')
37    return
38  summary={'total':0,'success':0,'failed':0,'failures':[],'todo':0,'skip':0,
39           'time':0, 'cputime':0}
40  timesummary={}
41  cputimesummary={}
42  timelist=[]
43  for cfile in glob.glob('*.counts'):
44    with open(cfile, 'r') as f:
45      for line in f:
46        l = line.split()
47        if l[0] == 'failures':
48           if len(l)>1:
49             summary[l[0]] += l[1:]
50        elif l[0] == 'time':
51           if len(l)==1: continue
52           summary[l[0]] += float(l[1])
53           summary['cputime'] += float(l[2])
54           timesummary[cfile]=float(l[1])
55           cputimesummary[cfile]=float(l[2])
56           timelist.append(float(l[1]))
57        elif l[0] not in summary:
58           continue
59        else:
60           summary[l[0]] += int(l[1])
61
62  failstr=' '.join(summary['failures'])
63  print("\n# -------------")
64  print("#   Summary    ")
65  print("# -------------")
66  if failstr.strip(): print("# FAILED " + failstr)
67
68  for t in "success failed todo skip".split():
69    percent=summary[t]/float(summary['total'])*100
70    print("# %s %d/%d tests (%3.1f%%)" % (t, summary[t], summary['total'], percent))
71  print("#")
72  if etime:
73    print("# Wall clock time for tests: %s sec"% etime)
74  print("# Approximate CPU time (not incl. build time): %s sec"% summary['cputime'])
75
76  if failstr.strip():
77      fail_targets=(
78          re.sub('(?<=[0-9]_\w)_.*','',
79          re.sub('cmd-','',
80          re.sub('diff-','',failstr+' ')))
81          )
82      # Strip off characters from subtests
83      fail_list=[]
84      for failure in fail_targets.split():
85        if failure.count('-')>1:
86            fail_list.append('-'.join(failure.split('-')[:-1]))
87        else:
88            fail_list.append(failure)
89      fail_list=list(set(fail_list))
90      fail_targets=' '.join(fail_list)
91
92      #Make the message nice
93      makefile="gmakefile.test" if inInstallDir() else "gmakefile"
94
95      print("#\n# To rerun failed tests: ")
96      print("#     "+make+" -f "+makefile+" test search='" + fail_targets.strip()+"'")
97
98  if ntime>0:
99      print("#\n# Timing summary (actual test time / total CPU time): ")
100      timelist=list(set(timelist))
101      timelist.sort(reverse=True)
102      nlim=(ntime if ntime<len(timelist) else len(timelist))
103      # Do a double loop to sort in order
104      for timelimit in timelist[0:nlim]:
105        for cf in timesummary:
106          if timesummary[cf] == timelimit:
107            print("#   %s: %.2f sec / %.2f sec" % (re.sub('.counts','',cf), timesummary[cf], cputimesummary[cf]))
108  os.chdir(startdir)
109  return
110
111def generate_xml(directory):
112    startdir= os.getcwd()
113    try:
114        os.chdir(directory)
115    except OSError:
116        print('# No tests run')
117        return
118    # loop over *.counts files for all the problems tested in the test suite
119    testdata = {}
120    for cfile in glob.glob('*.counts'):
121        # first we get rid of the .counts extension, then we split the name in two
122        # to recover the problem name and the package it belongs to
123        fname = cfile.split('.')[0]
124        testname = fname.split('-')
125        probname = ''
126        for i in range(1,len(testname)):
127            probname += testname[i]
128        # we split the package into its subcomponents of PETSc module (e.g.: snes)
129        # and test type (e.g.: tutorial)
130        testname_list = testname[0].split('_')
131        pkgname = testname_list[0]
132        testtype = testname_list[-1]
133        # in order to correct assemble the folder path for problem outputs, we
134        # iterate over any possible subpackage names and test suffixes
135        testname_short = testname_list[:-1]
136        prob_subdir = os.path.join(*testname_short)
137        probfolder = 'run%s'%probname
138        probdir = os.path.join('..', prob_subdir, 'examples', testtype, probfolder)
139        if not os.path.exists(probdir):
140            probfolder = probfolder.split('_')[0]
141            probdir = os.path.join('..', prob_subdir, 'examples', testtype, probfolder)
142        # assemble the final full folder path for problem outputs and read the files
143        try:
144            with open('%s/diff-%s.out'%(probdir, probfolder),'r') as probdiff:
145                difflines = probdiff.readlines()
146        except IOError:
147            difflines = []
148        try:
149            with open('%s/%s.err'%(probdir, probfolder),'r') as probstderr:
150                stderrlines = probstderr.readlines()
151        except IOError:
152            stderrlines = []
153        try:
154            with open('%s/%s.tmp'%(probdir, probname), 'r') as probstdout:
155                stdoutlines = probstdout.readlines()
156        except IOError:
157            stdoutlines = []
158        # join the package, subpackage and problem type names into a "class"
159        classname = pkgname
160        for item in testname_list[1:]:
161            classname += '.%s'%item
162        # if this is the first time we see this package, initialize its dict
163        if pkgname not in testdata.keys():
164            testdata[pkgname] = {
165                'total':0,
166                'success':0,
167                'failed':0,
168                'errors':0,
169                'todo':0,
170                'skip':0,
171                'time':0,
172                'problems':{}
173            }
174        # add the dict for the problem into the dict for the package
175        testdata[pkgname]['problems'][probname] = {
176            'classname':classname,
177            'time':0,
178            'failed':False,
179            'skipped':False,
180            'diff':difflines,
181            'stdout':stdoutlines,
182            'stderr':stderrlines
183        }
184        # process the *.counts file and increment problem status trackers
185        if len(testdata[pkgname]['problems'][probname]['stderr'])>0:
186            testdata[pkgname]['errors'] += 1
187        with open(cfile, 'r') as f:
188            for line in f:
189                l = line.split()
190                if l[0] == 'failed':
191                    testdata[pkgname]['problems'][probname][l[0]] = True
192                    testdata[pkgname][l[0]] += 1
193                elif l[0] == 'time':
194                    if len(l)==1: continue
195                    testdata[pkgname]['problems'][probname][l[0]] = float(l[1])
196                    testdata[pkgname][l[0]] += float(l[1])
197                elif l[0] == 'skip':
198                    testdata[pkgname]['problems'][probname][l[0]] = True
199                    testdata[pkgname][l[0]] += 1
200                elif l[0] not in testdata[pkgname].keys():
201                    continue
202                else:
203                    testdata[pkgname][l[0]] += 1
204    # at this point we have the complete test results in dictionary structures
205    # we can now write this information into a jUnit formatted XLM file
206    junit = open('../testresults.xml', 'w')
207    junit.write('<?xml version="1.0" ?>\n')
208    junit.write('<testsuites>\n')
209    for pkg in testdata.keys():
210        testsuite = testdata[pkg]
211        junit.write('  <testsuite errors="%i" failures="%i" name="%s" tests="%i">\n'%(
212            testsuite['errors'], testsuite['failed'], pkg, testsuite['total']))
213        for prob in testsuite['problems'].keys():
214            p = testsuite['problems'][prob]
215            junit.write('    <testcase classname="%s" name="%s" time="%f">\n'%(
216                p['classname'], prob, p['time']))
217            if p['skipped']:
218                # if we got here, the TAP output shows a skipped test
219                junit.write('      <skipped/>\n')
220            elif len(p['stderr'])>0:
221                # if we got here, the test crashed with an error
222                # we show the stderr output under <error>
223                junit.write('      <error type="crash">\n')
224                junit.write("<![CDATA[\n") # CDATA is necessary to preserve whitespace
225                for line in p['stderr']:
226                    junit.write("%s\n"%line.rstrip())
227                junit.write("]]>")
228                junit.write('      </error>\n')
229            elif len(p['diff'])>0:
230                # if we got here, the test output did not match the stored output file
231                # we show the diff between new output and old output under <failure>
232                junit.write('      <failure type="output">\n')
233                junit.write("<![CDATA[\n") # CDATA is necessary to preserve whitespace
234                for line in p['diff']:
235                    junit.write("%s\n"%line.rstrip())
236                junit.write("]]>")
237                junit.write('      </failure>\n')
238            elif len(p['stdout'])>0:
239                # if we got here, the test succeeded so we just show the stdout
240                # for manual sanity-checks
241                junit.write('      <system-out>\n')
242                junit.write("<![CDATA[\n") # CDATA is necessary to preserve whitespace
243                count = 0
244                for line in p['stdout']:
245                    junit.write("%s\n"%line.rstrip())
246                    count += 1
247                    if count >= 1024:
248                        break
249                junit.write("]]>")
250                junit.write('      </system-out>\n')
251            junit.write('    </testcase>\n')
252        junit.write('  </testsuite>\n')
253    junit.write('</testsuites>')
254    junit.close()
255    os.chdir(startdir)
256    return
257
258def main():
259    parser = optparse.OptionParser(usage="%prog [options]")
260    parser.add_option('-d', '--directory', dest='directory',
261                      help='Directory containing results of petsc test system',
262                      default=os.path.join(os.environ.get('PETSC_ARCH',''),
263                                           'tests','counts'))
264    parser.add_option('-e', '--elapsed_time', dest='elapsed_time',
265                      help='Report elapsed time in output',
266                      default=None)
267    parser.add_option('-m', '--make', dest='make',
268                      help='make executable to report in summary',
269                      default='make')
270    parser.add_option('-t', '--time', dest='time',
271                      help='-t n: Report on the n number expensive jobs',
272                      default=0)
273    options, args = parser.parse_args()
274
275    # Process arguments
276    if len(args) > 0:
277      parser.print_usage()
278      return
279
280    summarize_results(options.directory,options.make,int(options.time),options.elapsed_time)
281
282    generate_xml(options.directory)
283
284if __name__ == "__main__":
285        main()
286