xref: /phasta/M2N/src/ctypes.f (revision 16223cb9c3f88b34f2cb94151b5cf5ffc1aac5e2)
1      subroutine ctypes (ilwork)
2
3      parameter (maxseg = 60000)
4
5      include "commonM2N.h"
6      include "mpif.h"
7      include "auxmpiM2N.h"
8
9      integer  sizeofdouble
10
11      dimension ilwork(nlwork)
12      dimension isbegin(maxseg), lenseg(maxseg), ioffset(maxseg)
13
14      real*8 timer1,timer2,time_ellapsed
15      integer istatRanks(2)
16      real*8 rStats(4)
17
18      timer1 = TMRC()
19
20      CALL MPI_TYPE_EXTENT (MPI_DOUBLE_PRECISION,sizeofdouble,ierr)
21      lstride = nshg * sizeofdouble
22c
23c.... maxfront is a common variable being set in this routine
24c
25      maxfront = 0
26      numtask = ilwork (1)
27      itkbeg  = 1
28
29      if (numtask .gt. maxtask)
30     &  call error('ctypes  ','numtask ',numtask)
31
32      nshg0 = nshg
33
34      do itask = 1,numtask
35c
36c.... iacc = 0 ==> this task is a send
37c          = 1 ==> this task is a recieve
38c
39        iacc   = ilwork (itkbeg + 2)
40c
41c.... numseg : number of data segments to be communicated
42c
43        numseg = ilwork (itkbeg + 4)
44c
45c.... adjust the number of the other processor, since processors
46c     are numbered here starting from 0, not 1.
47c
48        ilwork (itkbeg + 3) = ilwork (itkbeg + 3) - 1
49        if (numseg .gt. maxseg)
50     &    call error('ctypes  ','numseg  ',numseg )
51c
52c.... lfront = total number of nodes involved in this task
53c
54        lfront = 0
55        do is = 1,numseg
56c
57c.... isbegin(is): starting node number for each segment
58c
59          isbegin (is) = ilwork (itkbeg + 3 + 2*is)
60c
61c.... lenseg(is): length of each segment (number of nodes)
62c
63          lenseg  (is) = ilwork (itkbeg + 4 + 2*is)
64c
65c.... increment the total node counter
66c
67          lfront       = lfront + lenseg(is)
68c
69c.... nshg0: number of nodes to be assembled on this processor,
70c             i.e. subtract the number of nodes which will be
71c             sent to another processor.
72c
73	  if (iacc .eq. 0) nshg0 = nshg0 - lenseg(is)
74        enddo
75c
76c.... maxfront: number of nodes which will be communicated, including
77c               all segments. Note that after the loop over tasks
78c               is complete, maxfront will contain the maximum number
79c               of nodes for any of the tasks.
80c
81        maxfront = MAX(maxfront,lfront)
82c
83c.... ioffset: array offset from the first node in the first segment
84c
85        ioffset(1:numseg) = isbegin(1:numseg) - isbegin(1)
86c
87c.... now set up the MPI data types which will be used in commu.f.
88c     These data types represent the indexed sets that will be sent
89c     and recieved.
90c
91c
92c.... the following call to MPI_TYPE_INDEXED will create a new data
93c     type which will represent the blocks of data we wish to transfer
94c     for this task. A handle to the new type is returned
95c     (sevsegtype(itask,1)). This data type describes the blocks of
96c     data to be transferred in terms of segments.
97c     Input to this routine:
98c          numseg: number of segments in this task
99c          lenseg: length of each segment (number of nodes)
100c          ioffset: where to begin each block with respect to the
101c                   first segment
102c          MPI_DOUBLE_PRECISION: type to set for each of the blocks
103c
104        call MPI_TYPE_INDEXED (numseg, lenseg, ioffset,
105     &                  MPI_DOUBLE_PRECISION, sevsegtype(itask,1), ierr)
106c
107c.... now create a new data type for each of the types of arrays we
108c     may wish to communicate with. For example ndof will be used when
109c     communicating the residual vector. Each one of these is derived
110c     from the first data type defined above, sevsegtype(itask,1).
111c
112        call MPI_TYPE_HVECTOR(nsd,    1, lstride, sevsegtype(itask,1),
113     &                                        sevsegtype(itask,2), ierr)
114c
115        call MPI_TYPE_HVECTOR(ndof,   1, lstride, sevsegtype(itask,1),
116     &                                        sevsegtype(itask,3), ierr)
117c
118        call MPI_TYPE_HVECTOR(nflow*nflow,1, lstride,
119     &                    sevsegtype(itask,1),sevsegtype(itask,4), ierr)
120        call MPI_TYPE_HVECTOR((nflow-1)*nsd,1,lstride,
121     &                    sevsegtype(itask,1),sevsegtype(itask,5), ierr)
122       call MPI_TYPE_HVECTOR(nflow,1,lstride,sevsegtype(itask,1),
123     &                                        sevsegtype(itask,6), ierr)
124       call MPI_TYPE_HVECTOR(24,1,lstride,sevsegtype(itask,1),
125     &                                        sevsegtype(itask,7), ierr)
126       call MPI_TYPE_HVECTOR(9,1,lstride,sevsegtype(itask,1),
127     &                                        sevsegtype(itask,8), ierr)
128       call MPI_TYPE_HVECTOR(11,1,lstride,sevsegtype(itask,1),
129     &                                        sevsegtype(itask,9), ierr)
130       call MPI_TYPE_HVECTOR(7,1,lstride,sevsegtype(itask,1),
131     &                                   sevsegtype(itask,10), ierr)
132       call MPI_TYPE_HVECTOR(33,1,lstride,sevsegtype(itask,1),
133     &                                   sevsegtype(itask,11), ierr)
134       call MPI_TYPE_HVECTOR(22,1,lstride,sevsegtype(itask,1),
135     &                                   sevsegtype(itask,12), ierr)
136       call MPI_TYPE_HVECTOR(16,1,lstride,sevsegtype(itask,1),
137     &                                   sevsegtype(itask,13), ierr)
138       call MPI_TYPE_HVECTOR(10,1,lstride,sevsegtype(itask,1),
139     &                                   sevsegtype(itask,14), ierr)
140       call MPI_TYPE_HVECTOR(nflow*nsd,1,lstride,sevsegtype(itask,1),
141     &                                   sevsegtype(itask,15), ierr)
142c
143c
144c.... now this must be done to make MPI recognize each of the data
145c     types that were just defined
146c
147        do kdof = 1,15
148          call MPI_TYPE_COMMIT (sevsegtype(itask,kdof), ierr)
149        enddo
150c
151c.... set the counter to the index in ilwork where the next task
152c     begins
153c
154
155        itkbeg = itkbeg + 4 + 2*numseg
156c
157c.... end loop over tasks
158c
159      enddo
160
161      timer2 = TMRC()
162      time_ellapsed = timer2 - timer1
163      !write(*,*) 'myrank: ', myrank, '- time in ctype: ',time_ellapsed
164      call rgetMinMaxAvg(time_ellapsed,rStats,istatRanks)
165      if(myrank == 0) then
166         write(*,811) istatRanks(1),rStats(1),istatRanks(2),rStats(2),
167     &             rStats(3),rStats(4)
168      endif
169
170811   format('Time in ctype: ',
171     &                'min [',I6,',',F10.5,'], ',
172     &                'max [',I6,',',F10.5,'], ',
173     &                'and avg [.,',F10.5,'], (rms=',F10.5,')')
174
175
176      return
177      end
178