1c 2c-------------- 3c drvAllreduce 4c-------------- 5c 6 subroutine drvAllreduce ( eachproc, result, m ) 7c 8 include "common.h" 9 include "mpif.h" 10c 11 dimension eachproc(m), result(m) 12c 13 if (numpe > 1) then 14 if(impistat.eq.1) then 15 iAllR = iAllR+1 16 elseif(impistat.eq.2) then 17 iAllRScal = iAllRScal+1 18 endif 19 if(impistat2.eq.1) call MPI_BARRIER (MPI_COMM_WORLD, ierr) 20 if(impistat.gt.0) rmpitmr = TMRC() 21 call MPI_ALLREDUCE ( eachproc, result, m, 22 & MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ierr ) 23 if(impistat.eq.1) then 24 rAllR = rAllR+TMRC()-rmpitmr 25 elseif(impistat.eq.2) then 26 rAllRScal = rAllRScal+TMRC()-rmpitmr 27 endif 28 else 29 result = eachproc 30 endif 31c 32 return 33 end 34c 35c------------------ 36c drvAllreducesclr 37c------------------ 38c 39 subroutine drvAllreducesclr ( eachproc, result ) 40c 41 include "common.h" 42 include "mpif.h" 43c 44 if (numpe > 1) then 45 if(impistat.eq.1) then 46 iAllR = iAllR+1 47 elseif(impistat.eq.2) then 48 iAllRScal = iAllRScal+1 49 endif 50 if(impistat2.eq.1) call MPI_BARRIER (MPI_COMM_WORLD, ierr) 51 if(impistat.gt.0) rmpitmr = TMRC() 52 call MPI_ALLREDUCE ( eachproc, result, 1, 53 & MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ierr ) 54 if(impistat.eq.1) then 55 rAllR = rAllR+TMRC()-rmpitmr 56 elseif(impistat.eq.2) then 57 rAllRScal = rAllRScal+TMRC()-rmpitmr 58 endif 59 else 60 result = eachproc 61 endif 62c 63 return 64 end 65c 66c------------------ 67c drvAllreduceMaxInt 68c------------------ 69c 70 subroutine drvAllreduceMaxInt ( eachproc, result ) 71c 72 include "common.h" 73 include "mpif.h" 74c 75 integer :: eachproc, result 76 77 if (numpe > 1) then 78 if(impistat.eq.1) then 79 iAllR = iAllR+1 80 elseif(impistat.eq.2) then 81 iAllRScal = iAllRScal+1 82 endif 83 if(impistat2.eq.1) call MPI_BARRIER (MPI_COMM_WORLD, ierr) 84 if(impistat.gt.0) rmpitmr = TMRC() 85 call MPI_ALLREDUCE ( eachproc, result, 1, 86 & MPI_INTEGER, MPI_MAX, MPI_COMM_WORLD, ierr ) 87 if(impistat.eq.1) then 88 rAllR = rAllR+TMRC()-rmpitmr 89 elseif(impistat.eq.2) then 90 rAllRScal = rAllRScal+TMRC()-rmpitmr 91 endif 92 else 93 result = eachproc 94 endif 95c 96 return 97 end 98 99c------------------------------------------------------------------------ 100c 101c sum real*8 array over all processors 102c 103c------------------------------------------------------------------------ 104 subroutine sumgat (u, n, summed) 105 106 include "common.h" 107 include "mpif.h" 108 include "auxmpi.h" 109 110 dimension u(nshg,n), ilwork(nlwork) 111!SCATTER dimension sumvec(numpe), irecvcount(numpe) 112 113 summed = sum(u) 114 115 if (numpe > 1) then 116 irecvcount = 1 117 sumvec = summed 118 if(impistat.eq.1) then 119 iAllR = iAllR+1 120 elseif(impistat.eq.2) then 121 iAllRScal = iAllRScal+1 122 endif 123 if(impistat2.eq.1) call MPI_BARRIER (MPI_COMM_WORLD, ierr) 124 if(impistat.gt.0) rmpitmr = TMRC() 125 call MPI_ALLREDUCE (sumvec, summed, irecvcount, 126 & MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ierr) 127 if(impistat.eq.1) then 128 rAllR = rAllR+TMRC()-rmpitmr 129 elseif(impistat.eq.2) then 130 rAllRScal = rAllRScal+TMRC()-rmpitmr 131 endif 132c call MPI_REDUCE_SCATTER (sumvec, summed, irecvcount, 133c & MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ierr) 134 135 endif 136 137 return 138 end 139 140c------------------------------------------------------------------------ 141c 142c sum real*8 array of length nnp over all processors 143c 144c------------------------------------------------------------------------ 145 subroutine sumgatN (u, n, summed, nnp) 146 147 include "common.h" 148 include "mpif.h" 149 include "auxmpi.h" 150 151 dimension u(nnp,n), ilwork(nlwork) 152! dimension sumvec(numpe), irecvcount(numpe) 153 154c protect against underflow 155c summed = sum(u) 156 summed = sum(u) + 1.e-20 157 158 if (numpe > 1) then 159 irecvcount = 1 160 sumvec = summed 161 162 if(impistat.eq.1) then 163 iAllR = iAllR+1 164 elseif(impistat.eq.2) then 165 iAllRScal = iAllRScal+1 166 endif 167 if(impistat2.eq.1) call MPI_BARRIER (MPI_COMM_WORLD, ierr) 168 if(impistat.gt.0) rmpitmr = TMRC() 169 call MPI_ALLREDUCE (sumvec, summed, irecvcount, 170 & MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ierr) 171 if(impistat.eq.1) then 172 rAllR = rAllR+TMRC()-rmpitmr 173 elseif(impistat.eq.2) then 174 rAllRScal = rAllRScal+TMRC()-rmpitmr 175 endif 176c call MPI_REDUCE_SCATTER (sumvec, summed, irecvcount, 177c & MPI_DOUBLE_PRECISION, MPI_SUM, MPI_COMM_WORLD, ierr) 178 179 endif 180 181 return 182 end 183 184c------------------------------------------------------------------------ 185c 186c sum integer array over all processors 187c 188c------------------------------------------------------------------------ 189 subroutine sumgatInt (u, n, summed ) 190 191 include "common.h" 192 include "mpif.h" 193 include "auxmpi.h" 194 195 integer u(n), summed, sumvec 196!SCATTER integer sumvec(numpe), irecvcount(numpe) 197 198c$$$ ttim(62) = ttim(62) - tmr() 199 200 summed = sum(u) 201 202 if (numpe > 1) then 203 irecvcount = 1 204 sumvec = summed 205 if(impistat.eq.1) then 206 iAllR = iAllR+1 207 elseif(impistat.eq.2) then 208 iAllRScal = iAllRScal+1 209 endif 210 if(impistat2.eq.1) call MPI_BARRIER (MPI_COMM_WORLD, ierr) 211 if(impistat.gt.0) rmpitmr = TMRC() 212 call MPI_ALLREDUCE (sumvec, summed, irecvcount, 213 & MPI_INTEGER, MPI_SUM, MPI_COMM_WORLD, ierr) 214 if(impistat.eq.1) then 215 rAllR = rAllR+TMRC()-rmpitmr 216 elseif(impistat.eq.2) then 217 rAllRScal = rAllRScal+TMRC()-rmpitmr 218 endif 219c call MPI_REDUCE_SCATTER (sumvec, summed, irecvcount, 220c & MPI_INTEGER, MPI_SUM, MPI_COMM_WORLD, ierr) 221 222 endif 223c$$$ ttim(62) = ttim(62) + tmr() 224 225 return 226 end 227 228