1 subroutine ctypes (ilwork) 2 3 parameter (maxseg = 60000) 4 5 include "commonAcuStat.h" 6 include "mpif.h" 7 include "auxmpiAcuStat.h" 8 9 integer sizeofdouble 10 11 dimension ilwork(nlwork) 12 dimension isbegin(maxseg), lenseg(maxseg), ioffset(maxseg) 13 14 real*8 timer1,timer2,time_ellapsed 15 integer istatRanks(2) 16 real*8 rStats(4) 17 18! timer1 = TMRC() 19 20 CALL MPI_TYPE_EXTENT (MPI_DOUBLE_PRECISION,sizeofdouble,ierr) 21 lstride = nshg * sizeofdouble 22c 23c.... maxfront is a common variable being set in this routine 24c 25 maxfront = 0 26 numtask = ilwork (1) 27 itkbeg = 1 28 29 30 if (numtask .gt. maxtask) 31 & call error('ctypes ','numtask ',numtask) 32 33 nshg0 = nshg 34 35 do itask = 1,numtask 36c 37c.... iacc = 0 ==> this task is a send 38c = 1 ==> this task is a recieve 39c 40 iacc = ilwork (itkbeg + 2) 41c 42c.... numseg : number of data segments to be communicated 43c 44 numseg = ilwork (itkbeg + 4) 45c 46c.... adjust the number of the other processor, since processors 47c are numbered here starting from 0, not 1. 48c 49 ilwork (itkbeg + 3) = ilwork (itkbeg + 3) - 1 50 if (numseg .gt. maxseg) 51 & call error('ctypes ','numseg ',numseg ) 52c 53c.... lfront = total number of nodes involved in this task 54c 55 lfront = 0 56 do is = 1,numseg 57c 58c.... isbegin(is): starting node number for each segment 59c 60 isbegin (is) = ilwork (itkbeg + 3 + 2*is) 61c 62c.... lenseg(is): length of each segment (number of nodes) 63c 64 lenseg (is) = ilwork (itkbeg + 4 + 2*is) 65c 66c.... increment the total node counter 67c 68 lfront = lfront + lenseg(is) 69c 70c.... nshg0: number of nodes to be assembled on this processor, 71c i.e. subtract the number of nodes which will be 72c sent to another processor. 73c 74 if (iacc .eq. 0) nshg0 = nshg0 - lenseg(is) 75 enddo 76c 77c.... maxfront: number of nodes which will be communicated, including 78c all segments. Note that after the loop over tasks 79c is complete, maxfront will contain the maximum number 80c of nodes for any of the tasks. 81c 82 maxfront = MAX(maxfront,lfront) 83c 84c.... ioffset: array offset from the first node in the first segment 85c 86 ioffset(1:numseg) = isbegin(1:numseg) - isbegin(1) 87c 88c.... now set up the MPI data types which will be used in commu.f. 89c These data types represent the indexed sets that will be sent 90c and recieved. 91c 92c 93c.... the following call to MPI_TYPE_INDEXED will create a new data 94c type which will represent the blocks of data we wish to transfer 95c for this task. A handle to the new type is returned 96c (sevsegtype(itask,1)). This data type describes the blocks of 97c data to be transferred in terms of segments. 98c Input to this routine: 99c numseg: number of segments in this task 100c lenseg: length of each segment (number of nodes) 101c ioffset: where to begin each block with respect to the 102c first segment 103c MPI_DOUBLE_PRECISION: type to set for each of the blocks 104c 105 call MPI_TYPE_INDEXED (numseg, lenseg, ioffset, 106 & MPI_DOUBLE_PRECISION, sevsegtype(itask,1), ierr) 107c 108c.... now create a new data type for each of the types of arrays we 109c may wish to communicate with. For example ndof will be used when 110c communicating the residual vector. Each one of these is derived 111c from the first data type defined above, sevsegtype(itask,1). 112c 113 call MPI_TYPE_HVECTOR(nsd, 1, lstride, sevsegtype(itask,1), 114 & sevsegtype(itask,2), ierr) 115c 116 call MPI_TYPE_HVECTOR(ndof, 1, lstride, sevsegtype(itask,1), 117 & sevsegtype(itask,3), ierr) 118c 119 call MPI_TYPE_HVECTOR(nflow*nflow,1, lstride, 120 & sevsegtype(itask,1),sevsegtype(itask,4), ierr) 121 call MPI_TYPE_HVECTOR((nflow-1)*nsd,1,lstride, 122 & sevsegtype(itask,1),sevsegtype(itask,5), ierr) 123 call MPI_TYPE_HVECTOR(nflow,1,lstride,sevsegtype(itask,1), 124 & sevsegtype(itask,6), ierr) 125 call MPI_TYPE_HVECTOR(24,1,lstride,sevsegtype(itask,1), 126 & sevsegtype(itask,7), ierr) 127 call MPI_TYPE_HVECTOR(9,1,lstride,sevsegtype(itask,1), 128 & sevsegtype(itask,8), ierr) 129 call MPI_TYPE_HVECTOR(11,1,lstride,sevsegtype(itask,1), 130 & sevsegtype(itask,9), ierr) 131 call MPI_TYPE_HVECTOR(7,1,lstride,sevsegtype(itask,1), 132 & sevsegtype(itask,10), ierr) 133! call MPI_TYPE_HVECTOR(33,1,lstride,sevsegtype(itask,1), 134! & sevsegtype(itask,11), ierr) 135 call MPI_TYPE_HVECTOR(13,1,lstride,sevsegtype(itask,1), 136 & sevsegtype(itask,11), ierr) 137 call MPI_TYPE_HVECTOR(22,1,lstride,sevsegtype(itask,1), 138 & sevsegtype(itask,12), ierr) 139 call MPI_TYPE_HVECTOR(16,1,lstride,sevsegtype(itask,1), 140 & sevsegtype(itask,13), ierr) 141 call MPI_TYPE_HVECTOR(10,1,lstride,sevsegtype(itask,1), 142 & sevsegtype(itask,14), ierr) 143 call MPI_TYPE_HVECTOR(nflow*nsd,1,lstride,sevsegtype(itask,1), 144 & sevsegtype(itask,15), ierr) 145c 146c 147c.... now this must be done to make MPI recognize each of the data 148c types that were just defined 149c 150 do kdof = 1,15 151 call MPI_TYPE_COMMIT (sevsegtype(itask,kdof), ierr) 152 enddo 153c 154c.... set the counter to the index in ilwork where the next task 155c begins 156c 157 158 itkbeg = itkbeg + 4 + 2*numseg 159c 160c.... end loop over tasks 161c 162 enddo 163 164! timer2 = TMRC() 165! time_ellapsed = timer2 - timer1 166 !write(*,*) 'myrank: ', myrank, '- time in ctype: ',time_ellapsed 167c call rgetMinMaxAvg(time_ellapsed,rStats,istatRanks) 168! if(myrank == 0) then 169! write(*,811) istatRanks(1),rStats(1),istatRanks(2),rStats(2), 170! & rStats(3),rStats(4) 171! endif 172 173811 format('Time in ctype: ', 174 & 'min [',I6,',',F10.5,'], ', 175 & 'max [',I6,',',F10.5,'], ', 176 & 'and avg [.,',F10.5,'], (rms=',F10.5,')') 177 178 179 return 180 end 181