xref: /phasta/phSolver/common/ctypes.f (revision 358c77e96363e9c142e42fdcd4b6bf559a56cc61)
1      subroutine ctypes (ilwork)
2
3      parameter (maxseg = 30000)
4
5      include "common.h"
6      include "mpif.h"
7      include "auxmpi.h"
8
9      integer  sizeofdouble,sizeofInt64
10
11      dimension ilwork(nlwork)
12      dimension isbegin(maxseg), lenseg(maxseg), ioffset(maxseg)
13
14      CALL MPI_TYPE_EXTENT (MPI_DOUBLE_PRECISION,sizeofdouble,ierr)
15      lstride = nshg * sizeofdouble
16c
17c  you would need the next two lines if you commu-ed vectors of integers
18c
19c      CALL MPI_TYPE_EXTENT (MPI_LONG_LONG,sizeofInt64,ierr)
20c      lstrideInt = nshg * sizeofInt64
21c
22c.... maxfront is a common variable being set in this routine
23c
24      maxfront = 0
25      numtask = ilwork (1)
26      itkbeg  = 1
27
28      if (numtask .gt. maxtask)
29     &  call error('ctypes  ','numtask ',numtask)
30
31      nshg0 = nshg
32
33      do itask = 1,numtask
34c
35c.... iacc = 0 ==> this task is a send
36c          = 1 ==> this task is a recieve
37c
38        iacc   = ilwork (itkbeg + 2)
39c
40c.... numseg : number of data segments to be communicated
41c
42        numseg = ilwork (itkbeg + 4)
43c
44c.... adjust the number of the other processor, since processors
45c     are numbered here starting from 0, not 1.
46c
47        ilwork (itkbeg + 3) = ilwork (itkbeg + 3) - 1
48        if (numseg .gt. maxseg)
49     &    call error('ctypes  ','numseg  ',numseg )
50c
51c.... lfront = total number of nodes involved in this task
52c
53        lfront = 0
54        do is = 1,numseg
55c
56c.... isbegin(is): starting node number for each segment
57c
58          isbegin (is) = ilwork (itkbeg + 3 + 2*is)
59c
60c.... lenseg(is): length of each segment (number of nodes)
61c
62          lenseg  (is) = ilwork (itkbeg + 4 + 2*is)
63c
64c.... increment the total node counter
65c
66          lfront       = lfront + lenseg(is)
67c
68c.... nshg0: number of nodes to be assembled on this processor,
69c             i.e. subtract the number of nodes which will be
70c             sent to another processor.
71c
72	  if (iacc .eq. 0) nshg0 = nshg0 - lenseg(is)
73        enddo
74c
75c.... maxfront: number of nodes which will be communicated, including
76c               all segments. Note that after the loop over tasks
77c               is complete, maxfront will contain the maximum number
78c               of nodes for any of the tasks.
79c
80        maxfront = MAX(maxfront,lfront)
81c
82c.... ioffset: array offset from the first node in the first segment
83c
84        ioffset(1:numseg) = isbegin(1:numseg) - isbegin(1)
85c
86c.... now set up the MPI data types which will be used in commu.f.
87c     These data types represent the indexed sets that will be sent
88c     and recieved.
89c
90c
91c.... the following call to MPI_TYPE_INDEXED will create a new data
92c     type which will represent the blocks of data we wish to transfer
93c     for this task. A handle to the new type is returned
94c     (sevsegtype(itask,1)). This data type describes the blocks of
95c     data to be transferred in terms of segments.
96c     Input to this routine:
97c          numseg: number of segments in this task
98c          lenseg: length of each segment (number of nodes)
99c          ioffset: where to begin each block with respect to the
100c                   first segment
101c          MPI_DOUBLE_PRECISION: type to set for each of the blocks
102c
103        call MPI_TYPE_INDEXED (numseg, lenseg, ioffset,
104     &                  MPI_LONG_LONG_INT, sevsegtype(itask,16), ierr)
105
106        call MPI_TYPE_INDEXED (numseg, lenseg, ioffset,
107     &                  MPI_DOUBLE_PRECISION, sevsegtype(itask,1), ierr)
108c
109c.... now create a new data type for each of the types of arrays we
110c     may wish to communicate with. For example ndof will be used when
111c     communicating the residual vector. Each one of these is derived
112c     from the first data type defined above, sevsegtype(itask,1).
113c
114        call MPI_TYPE_HVECTOR(nsd,    1, lstride, sevsegtype(itask,1),
115     &                                        sevsegtype(itask,2), ierr)
116c
117        call MPI_TYPE_HVECTOR(ndof,   1, lstride, sevsegtype(itask,1),
118     &                                        sevsegtype(itask,3), ierr)
119c
120        call MPI_TYPE_HVECTOR(nflow*nflow,1, lstride,
121     &                    sevsegtype(itask,1),sevsegtype(itask,4), ierr)
122        call MPI_TYPE_HVECTOR((nflow-1)*nsd,1,lstride,
123     &                    sevsegtype(itask,1),sevsegtype(itask,5), ierr)
124       call MPI_TYPE_HVECTOR(nflow,1,lstride,sevsegtype(itask,1),
125     &                                        sevsegtype(itask,6), ierr)
126       call MPI_TYPE_HVECTOR(24,1,lstride,sevsegtype(itask,1),
127     &                                        sevsegtype(itask,7), ierr)
128       call MPI_TYPE_HVECTOR(9,1,lstride,sevsegtype(itask,1),
129     &                                        sevsegtype(itask,8), ierr)
130       call MPI_TYPE_HVECTOR(11,1,lstride,sevsegtype(itask,1),
131     &                                        sevsegtype(itask,9), ierr)
132       call MPI_TYPE_HVECTOR(7,1,lstride,sevsegtype(itask,1),
133     &                                   sevsegtype(itask,10), ierr)
134       call MPI_TYPE_HVECTOR(33,1,lstride,sevsegtype(itask,1),
135     &                                   sevsegtype(itask,11), ierr)
136       call MPI_TYPE_HVECTOR(22,1,lstride,sevsegtype(itask,1),
137     &                                   sevsegtype(itask,12), ierr)
138       call MPI_TYPE_HVECTOR(16,1,lstride,sevsegtype(itask,1),
139     &                                   sevsegtype(itask,13), ierr)
140       call MPI_TYPE_HVECTOR(10,1,lstride,sevsegtype(itask,1),
141     &                                   sevsegtype(itask,14), ierr)
142       call MPI_TYPE_HVECTOR(nflow*nsd,1,lstride,sevsegtype(itask,1),
143     &                                   sevsegtype(itask,15), ierr)
144c
145c
146c.... now this must be done to make MPI recognize each of the data
147c     types that were just defined
148c
149        do kdof = 1,16
150          call MPI_TYPE_COMMIT (sevsegtype(itask,kdof), ierr)
151        enddo
152c
153c.... set the counter to the index in ilwork where the next task
154c     begins
155c
156
157        itkbeg = itkbeg + 4 + 2*numseg
158c
159c.... end loop over tasks
160c
161      enddo
162
163      return
164      end
165
166      subroutine Dctypes(numtask) ! {
167        include "auxmpi.h"
168        integer numtask
169        do itask = 1,numtask
170          do kdof = 1,16
171            call MPI_TYPE_FREE(sevsegtype(itask,kdof), ierr)
172          enddo
173        enddo
174      end !}
175