1 subroutine ctypes (ilwork) 2 3 parameter (maxseg = 60000) 4 5 include "commonM2NFixBnd.h" 6 include "mpif.h" 7 include "auxmpiM2NFixBnd.h" 8 9 integer sizeofdouble 10 11 dimension ilwork(nlwork) 12 dimension isbegin(maxseg), lenseg(maxseg), ioffset(maxseg) 13 14 real*8 timer1,timer2,time_ellapsed 15 integer istatRanks(2) 16 real*8 rStats(4) 17 18! timer1 = TMRC() 19 20 CALL MPI_TYPE_EXTENT (MPI_DOUBLE_PRECISION,sizeofdouble,ierr) 21 lstride = nshg * sizeofdouble 22c 23c.... maxfront is a common variable being set in this routine 24c 25 maxfront = 0 26 numtask = ilwork (1) 27 itkbeg = 1 28 29 30 if (numtask .gt. maxtask) 31 & call error('ctypes ','numtask ',numtask) 32 33 nshg0 = nshg 34 35 do itask = 1,numtask 36c 37c.... iacc = 0 ==> this task is a send 38c = 1 ==> this task is a recieve 39c 40 iacc = ilwork (itkbeg + 2) 41c 42c.... numseg : number of data segments to be communicated 43c 44 numseg = ilwork (itkbeg + 4) 45c 46c.... adjust the number of the other processor, since processors 47c are numbered here starting from 0, not 1. 48c 49 ilwork (itkbeg + 3) = ilwork (itkbeg + 3) - 1 50 if (numseg .gt. maxseg) 51 & call error('ctypes ','numseg ',numseg ) 52c 53c.... lfront = total number of nodes involved in this task 54c 55 lfront = 0 56 do is = 1,numseg 57c 58c.... isbegin(is): starting node number for each segment 59c 60 isbegin (is) = ilwork (itkbeg + 3 + 2*is) 61c 62c.... lenseg(is): length of each segment (number of nodes) 63c 64 lenseg (is) = ilwork (itkbeg + 4 + 2*is) 65c 66c.... increment the total node counter 67c 68 lfront = lfront + lenseg(is) 69c 70c.... nshg0: number of nodes to be assembled on this processor, 71c i.e. subtract the number of nodes which will be 72c sent to another processor. 73c 74 if (iacc .eq. 0) nshg0 = nshg0 - lenseg(is) 75 enddo 76c 77c.... maxfront: number of nodes which will be communicated, including 78c all segments. Note that after the loop over tasks 79c is complete, maxfront will contain the maximum number 80c of nodes for any of the tasks. 81c 82 maxfront = MAX(maxfront,lfront) 83c 84c.... ioffset: array offset from the first node in the first segment 85c 86 ioffset(1:numseg) = isbegin(1:numseg) - isbegin(1) 87c 88c.... now set up the MPI data types which will be used in commu.f. 89c These data types represent the indexed sets that will be sent 90c and recieved. 91c 92c 93c.... the following call to MPI_TYPE_INDEXED will create a new data 94c type which will represent the blocks of data we wish to transfer 95c for this task. A handle to the new type is returned 96c (sevsegtype(itask,1)). This data type describes the blocks of 97c data to be transferred in terms of segments. 98c Input to this routine: 99c numseg: number of segments in this task 100c lenseg: length of each segment (number of nodes) 101c ioffset: where to begin each block with respect to the 102c first segment 103c MPI_DOUBLE_PRECISION: type to set for each of the blocks 104c 105 call MPI_TYPE_INDEXED (numseg, lenseg, ioffset, 106 & MPI_DOUBLE_PRECISION, sevsegtype(itask,1), ierr) 107c 108c.... now create a new data type for each of the types of arrays we 109c may wish to communicate with. For example ndof will be used when 110c communicating the residual vector. Each one of these is derived 111c from the first data type defined above, sevsegtype(itask,1). 112c 113 call MPI_TYPE_HVECTOR(nsd, 1, lstride, sevsegtype(itask,1), 114 & sevsegtype(itask,2), ierr) 115c 116 call MPI_TYPE_HVECTOR(ndof, 1, lstride, sevsegtype(itask,1), 117 & sevsegtype(itask,3), ierr) 118c 119 call MPI_TYPE_HVECTOR(nflow*nflow,1, lstride, 120 & sevsegtype(itask,1),sevsegtype(itask,4), ierr) 121 call MPI_TYPE_HVECTOR((nflow-1)*nsd,1,lstride, 122 & sevsegtype(itask,1),sevsegtype(itask,5), ierr) 123 call MPI_TYPE_HVECTOR(nflow,1,lstride,sevsegtype(itask,1), 124 & sevsegtype(itask,6), ierr) 125 call MPI_TYPE_HVECTOR(24,1,lstride,sevsegtype(itask,1), 126 & sevsegtype(itask,7), ierr) 127 call MPI_TYPE_HVECTOR(9,1,lstride,sevsegtype(itask,1), 128 & sevsegtype(itask,8), ierr) 129 call MPI_TYPE_HVECTOR(11,1,lstride,sevsegtype(itask,1), 130 & sevsegtype(itask,9), ierr) 131 call MPI_TYPE_HVECTOR(7,1,lstride,sevsegtype(itask,1), 132 & sevsegtype(itask,10), ierr) 133! call MPI_TYPE_HVECTOR(33,1,lstride,sevsegtype(itask,1), 134! & sevsegtype(itask,11), ierr) 135 call MPI_TYPE_HVECTOR(13,1,lstride,sevsegtype(itask,1), 136 & sevsegtype(itask,11), ierr) 137! call MPI_TYPE_HVECTOR(22,1,lstride,sevsegtype(itask,1), 138! & sevsegtype(itask,12), ierr) 139 call MPI_TYPE_HVECTOR(17,1,lstride,sevsegtype(itask,1), 140 & sevsegtype(itask,12), ierr) 141 call MPI_TYPE_HVECTOR(16,1,lstride,sevsegtype(itask,1), 142 & sevsegtype(itask,13), ierr) 143 call MPI_TYPE_HVECTOR(10,1,lstride,sevsegtype(itask,1), 144 & sevsegtype(itask,14), ierr) 145 call MPI_TYPE_HVECTOR(nflow*nsd,1,lstride,sevsegtype(itask,1), 146 & sevsegtype(itask,15), ierr) 147c 148c 149c.... now this must be done to make MPI recognize each of the data 150c types that were just defined 151c 152 do kdof = 1,15 153 call MPI_TYPE_COMMIT (sevsegtype(itask,kdof), ierr) 154 enddo 155c 156c.... set the counter to the index in ilwork where the next task 157c begins 158c 159 160 itkbeg = itkbeg + 4 + 2*numseg 161c 162c.... end loop over tasks 163c 164 enddo 165 166! timer2 = TMRC() 167! time_ellapsed = timer2 - timer1 168 !write(*,*) 'myrank: ', myrank, '- time in ctype: ',time_ellapsed 169c call rgetMinMaxAvg(time_ellapsed,rStats,istatRanks) 170! if(myrank == 0) then 171! write(*,811) istatRanks(1),rStats(1),istatRanks(2),rStats(2), 172! & rStats(3),rStats(4) 173! endif 174 175811 format('Time in ctype: ', 176 & 'min [',I6,',',F10.5,'], ', 177 & 'max [',I6,',',F10.5,'], ', 178 & 'and avg [.,',F10.5,'], (rms=',F10.5,')') 179 180 181 return 182 end 183