xref: /petsc/config/report_tests.py (revision 09b68a49ed2854d1e4985cc2aa6af33c7c4e69b3)
1df3bd252SSatish Balay#!/usr/bin/env python3
25b6bfdb9SJed Brownfrom __future__ import print_function
3feeaa4f6SScott Krugerimport glob, os, re, stat
48ccd5183SScott Krugerimport optparse
53054ff8cSScott Krugerimport inspect
63054ff8cSScott Kruger
78ccd5183SScott Kruger"""
88ccd5183SScott KrugerQuick script for parsing the output of the test system and summarizing the results.
98ccd5183SScott Kruger"""
108ccd5183SScott Kruger
113054ff8cSScott Krugerdef inInstallDir():
123054ff8cSScott Kruger  """
13*f0b74427SPierre Jolivet  When PETSc is installed then this file in installed in:
143054ff8cSScott Kruger       <PREFIX>/share/petsc/examples/config/gmakegentest.py
153054ff8cSScott Kruger  otherwise the path is:
163054ff8cSScott Kruger       <PETSC_DIR>/config/gmakegentest.py
173054ff8cSScott Kruger  We use this difference to determine if we are in installdir
183054ff8cSScott Kruger  """
193054ff8cSScott Kruger  thisscriptdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
203054ff8cSScott Kruger  dirlist=thisscriptdir.split(os.path.sep)
213054ff8cSScott Kruger  if len(dirlist)>4:
223054ff8cSScott Kruger    lastfour=os.path.sep.join(dirlist[len(dirlist)-4:])
233054ff8cSScott Kruger    if lastfour==os.path.join('share','petsc','examples','config'):
243054ff8cSScott Kruger      return True
253054ff8cSScott Kruger    else:
263054ff8cSScott Kruger      return False
273054ff8cSScott Kruger  else:
283054ff8cSScott Kruger    return False
293054ff8cSScott Kruger
30c9625024SScott Krugerdef summarize_results(directory,make,ntime,etime,show_results):
318ccd5183SScott Kruger  ''' Loop over all of the results files and summarize the results'''
32f04d3c48SAlp Dener  startdir = os.getcwd()
336d7e7da8SJed Brown  try:
348ccd5183SScott Kruger    os.chdir(directory)
356d7e7da8SJed Brown  except OSError:
36c9625024SScott Kruger    print('# No test results in ', directory)
376d7e7da8SJed Brown    return
38bbf1c217SScott Kruger  summary={'total':0,'success':0,'failed':0,'failures':[],'todo':0,'skip':0,
399b757ad5SKarl Rupp           'time':0, 'cputime':0}
40bbf1c217SScott Kruger  timesummary={}
419b757ad5SKarl Rupp  cputimesummary={}
42bbf1c217SScott Kruger  timelist=[]
438ccd5183SScott Kruger  for cfile in glob.glob('*.counts'):
44100ef3c7SJed Brown    with open(cfile, 'r') as f:
45100ef3c7SJed Brown      for line in f:
46100ef3c7SJed Brown        l = line.split()
47521be42fSScott Kruger        if l[0] == 'failures':
48d8e31410SScott Kruger           if len(l)>1:
490bffd254SScott Kruger             summary[l[0]] += l[1:]
50521be42fSScott Kruger        elif l[0] == 'time':
51521be42fSScott Kruger           if len(l)==1: continue
52521be42fSScott Kruger           summary[l[0]] += float(l[1])
539b757ad5SKarl Rupp           summary['cputime'] += float(l[2])
54521be42fSScott Kruger           timesummary[cfile]=float(l[1])
559b757ad5SKarl Rupp           cputimesummary[cfile]=float(l[2])
56521be42fSScott Kruger           timelist.append(float(l[1]))
57d8e31410SScott Kruger        elif l[0] not in summary:
58d8e31410SScott Kruger           continue
59521be42fSScott Kruger        else:
60521be42fSScott Kruger           summary[l[0]] += int(l[1])
61a2766241SScott Kruger
6291bc3e46SScott Kruger  failstr=' '.join(summary['failures'])
63c9625024SScott Kruger  if show_results:
64100ef3c7SJed Brown    print("\n# -------------")
65100ef3c7SJed Brown    print("#   Summary    ")
66100ef3c7SJed Brown    print("# -------------")
671a3f486dSScott Kruger    if failstr.strip(): print("# FAILED " + failstr)
688ccd5183SScott Kruger
698ccd5183SScott Kruger    for t in "success failed todo skip".split():
708ccd5183SScott Kruger      percent=summary[t]/float(summary['total'])*100
71100ef3c7SJed Brown      print("# %s %d/%d tests (%3.1f%%)" % (t, summary[t], summary['total'], percent))
7232f4009dSScott Kruger    print("#")
7332f4009dSScott Kruger    if etime:
7432f4009dSScott Kruger      print("# Wall clock time for tests: %s sec"% etime)
759b757ad5SKarl Rupp    print("# Approximate CPU time (not incl. build time): %s sec"% summary['cputime'])
7689b25220SScott Kruger  else:
7789b25220SScott Kruger    if failstr.strip(): print("\n\n# FAILED " + failstr)
7891bc3e46SScott Kruger
79c687a870SSatish Balay  if failstr.strip():
8091bc3e46SScott Kruger      fail_targets=(
8191bc3e46SScott Kruger          re.sub('cmd-','',
825e361860SScott Kruger          re.sub('diff-','',failstr+' '))
8391bc3e46SScott Kruger          )
84d8e31410SScott Kruger      # Strip off characters from subtests
85d8e31410SScott Kruger      fail_list=[]
86d8e31410SScott Kruger      for failure in fail_targets.split():
87feeaa4f6SScott Kruger         fail_list.append(failure.split('+')[0])
88d8e31410SScott Kruger      fail_list=list(set(fail_list))
89d8e31410SScott Kruger      fail_targets=' '.join(fail_list)
903054ff8cSScott Kruger
91feeaa4f6SScott Kruger      # create simple little script
92feeaa4f6SScott Kruger      sfile=os.path.join(os.path.dirname(os.path.abspath(os.curdir)),'echofailures.sh')
93feeaa4f6SScott Kruger      with open(sfile,'w') as f:
94feeaa4f6SScott Kruger          f.write('echo '+fail_targets.strip())
95feeaa4f6SScott Kruger      st = os.stat(sfile)
96feeaa4f6SScott Kruger      os.chmod(sfile, st.st_mode | stat.S_IEXEC)
97feeaa4f6SScott Kruger
983054ff8cSScott Kruger      #Make the message nice
993054ff8cSScott Kruger      makefile="gmakefile.test" if inInstallDir() else "gmakefile"
1003054ff8cSScott Kruger
10191bc3e46SScott Kruger      print("#\n# To rerun failed tests: ")
102feeaa4f6SScott Kruger      print("#     "+make+" -f "+makefile+" test test-fail=1")
103bbf1c217SScott Kruger
104c9625024SScott Kruger  if ntime>0 and show_results:
1059b757ad5SKarl Rupp      print("#\n# Timing summary (actual test time / total CPU time): ")
106bbf1c217SScott Kruger      timelist=list(set(timelist))
107bbf1c217SScott Kruger      timelist.sort(reverse=True)
108bbf1c217SScott Kruger      nlim=(ntime if ntime<len(timelist) else len(timelist))
109bbf1c217SScott Kruger      # Do a double loop to sort in order
110bbf1c217SScott Kruger      for timelimit in timelist[0:nlim]:
111bbf1c217SScott Kruger        for cf in timesummary:
112bbf1c217SScott Kruger          if timesummary[cf] == timelimit:
1139b757ad5SKarl Rupp            print("#   %s: %.2f sec / %.2f sec" % (re.sub('.counts','',cf), timesummary[cf], cputimesummary[cf]))
114f04d3c48SAlp Dener  os.chdir(startdir)
115f04d3c48SAlp Dener  return
116bbf1c217SScott Kruger
1176e5deea7SScott Krugerdef get_test_data(directory):
1186e5deea7SScott Kruger    """
1196e5deea7SScott Kruger    Create dictionary structure with test data
1206e5deea7SScott Kruger    """
121f04d3c48SAlp Dener    startdir= os.getcwd()
122f04d3c48SAlp Dener    try:
123f04d3c48SAlp Dener        os.chdir(directory)
124f04d3c48SAlp Dener    except OSError:
125f04d3c48SAlp Dener        return
126f04d3c48SAlp Dener    # loop over *.counts files for all the problems tested in the test suite
127f04d3c48SAlp Dener    testdata = {}
128f04d3c48SAlp Dener    for cfile in glob.glob('*.counts'):
129f04d3c48SAlp Dener        # first we get rid of the .counts extension, then we split the name in two
130f04d3c48SAlp Dener        # to recover the problem name and the package it belongs to
131f04d3c48SAlp Dener        fname = cfile.split('.')[0]
132f04d3c48SAlp Dener        testname = fname.split('-')
1334d8a629dSAlp Dener        probname = ''
1344d8a629dSAlp Dener        for i in range(1,len(testname)):
1354d8a629dSAlp Dener            probname += testname[i]
136f04d3c48SAlp Dener        # we split the package into its subcomponents of PETSc module (e.g.: snes)
137f04d3c48SAlp Dener        # and test type (e.g.: tutorial)
138f04d3c48SAlp Dener        testname_list = testname[0].split('_')
139f04d3c48SAlp Dener        pkgname = testname_list[0]
140f04d3c48SAlp Dener        testtype = testname_list[-1]
141f04d3c48SAlp Dener        # in order to correct assemble the folder path for problem outputs, we
1424d8a629dSAlp Dener        # iterate over any possible subpackage names and test suffixes
143f04d3c48SAlp Dener        testname_short = testname_list[:-1]
144140150c5SJakub Kruzik        prob_subdir = os.path.join('', *testname_short)
1454d8a629dSAlp Dener        probfolder = 'run%s'%probname
146f04d3c48SAlp Dener        probdir = os.path.join('..', prob_subdir, 'examples', testtype, probfolder)
1474d8a629dSAlp Dener        if not os.path.exists(probdir):
1484d8a629dSAlp Dener            probfolder = probfolder.split('_')[0]
1494d8a629dSAlp Dener            probdir = os.path.join('..', prob_subdir, 'examples', testtype, probfolder)
150b711b6a4SScott Kruger        probfullpath=os.path.normpath(os.path.join(directory,probdir))
1514d8a629dSAlp Dener        # assemble the final full folder path for problem outputs and read the files
152f04d3c48SAlp Dener        try:
153f04d3c48SAlp Dener            with open('%s/diff-%s.out'%(probdir, probfolder),'r') as probdiff:
154f04d3c48SAlp Dener                difflines = probdiff.readlines()
155f04d3c48SAlp Dener        except IOError:
156f04d3c48SAlp Dener            difflines = []
157f04d3c48SAlp Dener        try:
158f04d3c48SAlp Dener            with open('%s/%s.err'%(probdir, probfolder),'r') as probstderr:
159f04d3c48SAlp Dener                stderrlines = probstderr.readlines()
160f04d3c48SAlp Dener        except IOError:
161f04d3c48SAlp Dener            stderrlines = []
162f04d3c48SAlp Dener        try:
163f04d3c48SAlp Dener            with open('%s/%s.tmp'%(probdir, probname), 'r') as probstdout:
164f04d3c48SAlp Dener                stdoutlines = probstdout.readlines()
165f04d3c48SAlp Dener        except IOError:
166f04d3c48SAlp Dener            stdoutlines = []
167f04d3c48SAlp Dener        # join the package, subpackage and problem type names into a "class"
168f04d3c48SAlp Dener        classname = pkgname
169f04d3c48SAlp Dener        for item in testname_list[1:]:
170f04d3c48SAlp Dener            classname += '.%s'%item
171f04d3c48SAlp Dener        # if this is the first time we see this package, initialize its dict
172f04d3c48SAlp Dener        if pkgname not in testdata.keys():
173f04d3c48SAlp Dener            testdata[pkgname] = {
174f04d3c48SAlp Dener                'total':0,
175f04d3c48SAlp Dener                'success':0,
176f04d3c48SAlp Dener                'failed':0,
177f04d3c48SAlp Dener                'errors':0,
178f04d3c48SAlp Dener                'todo':0,
179f04d3c48SAlp Dener                'skip':0,
180f04d3c48SAlp Dener                'time':0,
181f04d3c48SAlp Dener                'problems':{}
182f04d3c48SAlp Dener            }
183f04d3c48SAlp Dener        # add the dict for the problem into the dict for the package
184f04d3c48SAlp Dener        testdata[pkgname]['problems'][probname] = {
185f04d3c48SAlp Dener            'classname':classname,
186f04d3c48SAlp Dener            'time':0,
187f04d3c48SAlp Dener            'failed':False,
188f04d3c48SAlp Dener            'skipped':False,
189f04d3c48SAlp Dener            'diff':difflines,
190f04d3c48SAlp Dener            'stdout':stdoutlines,
191b711b6a4SScott Kruger            'stderr':stderrlines,
192b711b6a4SScott Kruger            'probdir':probfullpath,
193b711b6a4SScott Kruger            'fullname':fname
194f04d3c48SAlp Dener        }
195f04d3c48SAlp Dener        # process the *.counts file and increment problem status trackers
196f04d3c48SAlp Dener        with open(cfile, 'r') as f:
197f04d3c48SAlp Dener            for line in f:
198f04d3c48SAlp Dener                l = line.split()
199ac71c9ffSScott Kruger                if l[0] == 'time':
200f04d3c48SAlp Dener                    if len(l)==1: continue
201f04d3c48SAlp Dener                    testdata[pkgname]['problems'][probname][l[0]] = float(l[1])
202f04d3c48SAlp Dener                    testdata[pkgname][l[0]] += float(l[1])
203ac71c9ffSScott Kruger                elif l[0] in testdata[pkgname].keys():
20472bcef7cSScott Kruger                    # This block includes total, success, failed, skip, todo
205ac71c9ffSScott Kruger                    num_int=int(l[1])
206ac71c9ffSScott Kruger                    testdata[pkgname][l[0]] += num_int
20772bcef7cSScott Kruger                    if l[0] in ['failed']:
20872bcef7cSScott Kruger                        # If non-zero error code and non-zero stderr, something wrong
20972bcef7cSScott Kruger                        if len(testdata[pkgname]['problems'][probname]['stderr'])>0:
21072bcef7cSScott Kruger                            if not num_int: num_int=1
21172bcef7cSScott Kruger                        if num_int:
21272bcef7cSScott Kruger                            testdata[pkgname]['errors'] += 1
21372bcef7cSScott Kruger                            testdata[pkgname]['problems'][probname][l[0]] = True
21472bcef7cSScott Kruger                    if l[0] in ['skip'] and num_int:
215f04d3c48SAlp Dener                        testdata[pkgname]['problems'][probname][l[0]] = True
216f04d3c48SAlp Dener                else:
217ac71c9ffSScott Kruger                    continue
2186e5deea7SScott Kruger    os.chdir(startdir)  # Keep function in good state
2196e5deea7SScott Kruger    return testdata
2206e5deea7SScott Kruger
2216e5deea7SScott Krugerdef show_fail(testdata):
2226e5deea7SScott Kruger    """ Show the failures and commands to run them
2236e5deea7SScott Kruger    """
2246e5deea7SScott Kruger    for pkg in testdata.keys():
2256e5deea7SScott Kruger        testsuite = testdata[pkg]
2266e5deea7SScott Kruger        for prob in testsuite['problems'].keys():
2276e5deea7SScott Kruger            p = testsuite['problems'][prob]
228b711b6a4SScott Kruger            cdbase='cd '+p['probdir']+' && '
2296e5deea7SScott Kruger            if p['skipped']:
2306e5deea7SScott Kruger                # if we got here, the TAP output shows a skipped test
2316e5deea7SScott Kruger                pass
2326e5deea7SScott Kruger            elif len(p['stderr'])>0:
2336e5deea7SScott Kruger                # if we got here, the test crashed with an error
2346e5deea7SScott Kruger                # we show the stderr output under <error>
235b711b6a4SScott Kruger                shbase=os.path.join(p['probdir'], p['fullname'])
236b711b6a4SScott Kruger                shfile=shbase+".sh"
237b711b6a4SScott Kruger                if not os.path.exists(shfile):
238b711b6a4SScott Kruger                    shfile=glob.glob(shbase+"*")[0]
239b711b6a4SScott Kruger                with open(shfile, 'r') as sh:
240b711b6a4SScott Kruger                    cmd = sh.read()
241b711b6a4SScott Kruger                print(p['fullname']+': '+cdbase+cmd.split('>')[0])
2426e5deea7SScott Kruger            elif len(p['diff'])>0:
2436e5deea7SScott Kruger                # if we got here, the test output did not match the stored output file
2446e5deea7SScott Kruger                # we show the diff between new output and old output under <failure>
245b711b6a4SScott Kruger                shbase=os.path.join(p['probdir'], 'diff-'+p['fullname'])
246b711b6a4SScott Kruger                shfile=shbase+".sh"
247b711b6a4SScott Kruger                if not os.path.exists(shfile):
248b711b6a4SScott Kruger                    shfile=glob.glob(shbase+"*")[0]
249b711b6a4SScott Kruger                with open(shfile, 'r') as sh:
250b711b6a4SScott Kruger                    cmd = sh.read()
251b711b6a4SScott Kruger                print(p['fullname']+': '+cdbase+cmd.split('>')[0])
2526e5deea7SScott Kruger                pass
2536e5deea7SScott Kruger    return
2546e5deea7SScott Kruger
2556e5deea7SScott Krugerdef generate_xml(testdata,directory):
2566e5deea7SScott Kruger    """ write testdata information into a jUnit formatted XLM file
2576e5deea7SScott Kruger    """
2586e5deea7SScott Kruger    startdir= os.getcwd()
2596e5deea7SScott Kruger    try:
2606e5deea7SScott Kruger        os.chdir(directory)
2616e5deea7SScott Kruger    except OSError:
2626e5deea7SScott Kruger        return
263f04d3c48SAlp Dener    junit = open('../testresults.xml', 'w')
264f04d3c48SAlp Dener    junit.write('<?xml version="1.0" ?>\n')
265f04d3c48SAlp Dener    junit.write('<testsuites>\n')
266f04d3c48SAlp Dener    for pkg in testdata.keys():
267f04d3c48SAlp Dener        testsuite = testdata[pkg]
268f04d3c48SAlp Dener        junit.write('  <testsuite errors="%i" failures="%i" name="%s" tests="%i">\n'%(
269f04d3c48SAlp Dener            testsuite['errors'], testsuite['failed'], pkg, testsuite['total']))
270f04d3c48SAlp Dener        for prob in testsuite['problems'].keys():
271f04d3c48SAlp Dener            p = testsuite['problems'][prob]
272f04d3c48SAlp Dener            junit.write('    <testcase classname="%s" name="%s" time="%f">\n'%(
273f04d3c48SAlp Dener                p['classname'], prob, p['time']))
274f04d3c48SAlp Dener            if p['skipped']:
275f04d3c48SAlp Dener                # if we got here, the TAP output shows a skipped test
276f04d3c48SAlp Dener                junit.write('      <skipped/>\n')
27772bcef7cSScott Kruger            elif p['failed']:
278f04d3c48SAlp Dener                # if we got here, the test crashed with an error
279f04d3c48SAlp Dener                # we show the stderr output under <error>
280f04d3c48SAlp Dener                junit.write('      <error type="crash">\n')
281f04d3c48SAlp Dener                junit.write("<![CDATA[\n") # CDATA is necessary to preserve whitespace
282ac079439SScott Kruger                # many times error messages also go to stdout so we print both
28372bcef7cSScott Kruger                junit.write("stdout:\n")
284ac079439SScott Kruger                if len(p['stdout'])>0:
285ac079439SScott Kruger                    for line in p['stdout']:
286ac079439SScott Kruger                        junit.write("%s\n"%line.rstrip())
28772bcef7cSScott Kruger                junit.write("\nstderr:\n")
288f04d3c48SAlp Dener                for line in p['stderr']:
289f04d3c48SAlp Dener                    junit.write("%s\n"%line.rstrip())
290f04d3c48SAlp Dener                junit.write("]]>")
291f04d3c48SAlp Dener                junit.write('      </error>\n')
292f04d3c48SAlp Dener            elif len(p['diff'])>0:
293f04d3c48SAlp Dener                # if we got here, the test output did not match the stored output file
294f04d3c48SAlp Dener                # we show the diff between new output and old output under <failure>
295f04d3c48SAlp Dener                junit.write('      <failure type="output">\n')
296f04d3c48SAlp Dener                junit.write("<![CDATA[\n") # CDATA is necessary to preserve whitespace
297f04d3c48SAlp Dener                for line in p['diff']:
298f04d3c48SAlp Dener                    junit.write("%s\n"%line.rstrip())
299f04d3c48SAlp Dener                junit.write("]]>")
300f04d3c48SAlp Dener                junit.write('      </failure>\n')
301f04d3c48SAlp Dener            junit.write('    </testcase>\n')
302f04d3c48SAlp Dener        junit.write('  </testsuite>\n')
303f04d3c48SAlp Dener    junit.write('</testsuites>')
304f04d3c48SAlp Dener    junit.close()
305f04d3c48SAlp Dener    os.chdir(startdir)
3068ccd5183SScott Kruger    return
3078ccd5183SScott Kruger
3088ccd5183SScott Krugerdef main():
3098ccd5183SScott Kruger    parser = optparse.OptionParser(usage="%prog [options]")
3108ccd5183SScott Kruger    parser.add_option('-d', '--directory', dest='directory',
311*f0b74427SPierre Jolivet                      help='Directory containing results of PETSc test system',
312e57812dcSJed Brown                      default=os.path.join(os.environ.get('PETSC_ARCH',''),
313bbf1c217SScott Kruger                                           'tests','counts'))
31432f4009dSScott Kruger    parser.add_option('-e', '--elapsed_time', dest='elapsed_time',
31532f4009dSScott Kruger                      help='Report elapsed time in output',
31632f4009dSScott Kruger                      default=None)
3173054ff8cSScott Kruger    parser.add_option('-m', '--make', dest='make',
3183054ff8cSScott Kruger                      help='make executable to report in summary',
3193054ff8cSScott Kruger                      default='make')
320bbf1c217SScott Kruger    parser.add_option('-t', '--time', dest='time',
321bbf1c217SScott Kruger                      help='-t n: Report on the n number expensive jobs',
322bbf1c217SScott Kruger                      default=0)
3236e5deea7SScott Kruger    parser.add_option('-f', '--fail', dest='show_fail', action="store_true",
3246e5deea7SScott Kruger                      help='Show the failed tests and how to run them')
325c9625024SScott Kruger    parser.add_option('-s', '--show', dest='show_results', action="store_true",
326c9625024SScott Kruger                      help='Summarize the test results')
3278ccd5183SScott Kruger    options, args = parser.parse_args()
3288ccd5183SScott Kruger
3298ccd5183SScott Kruger    # Process arguments
3308ccd5183SScott Kruger    if len(args) > 0:
3318ccd5183SScott Kruger      parser.print_usage()
3328ccd5183SScott Kruger      return
3338ccd5183SScott Kruger
334c9625024SScott Kruger    # gmakefile.test is invoked frequently for searches and in those
335c9625024SScott Kruger    # cases we want to perform actions, but we don't want to
336c9625024SScott Kruger    # generate_xml or show the summarized results.
3378ccd5183SScott Kruger
3386e5deea7SScott Kruger    if not options.show_fail:
339c9625024SScott Kruger      summarize_results(options.directory,options.make,int(options.time),
340c9625024SScott Kruger                        options.elapsed_time,options.show_results)
3416e5deea7SScott Kruger    testresults=get_test_data(options.directory)
3426e5deea7SScott Kruger
3436e5deea7SScott Kruger    if options.show_fail:
3446e5deea7SScott Kruger      show_fail(testresults)
345c9625024SScott Kruger    # Don't generate xml if doing searches
346c9625024SScott Kruger    elif options.show_results:
3476e5deea7SScott Kruger      generate_xml(testresults, options.directory)
348f04d3c48SAlp Dener
3498ccd5183SScott Krugerif __name__ == "__main__":
3508ccd5183SScott Kruger        main()
351