xref: /petsc/src/dm/impls/da/dasub.c (revision fe998a80077c9ee0917a39496df43fc256e1b478)
1 
2 /*
3   Code for manipulating distributed regular arrays in parallel.
4 */
5 
6 #include <petsc-private/dmdaimpl.h>    /*I   "petscdmda.h"   I*/
7 
8 #undef __FUNCT__
9 #define __FUNCT__ "DMDAGetLogicalCoordinate"
10 /*@C
11    DMDAGetLogicalCoordinate - Returns a the i,j,k logical coordinate for the closest mesh point to a x,y,z point in the coordinates of the DMDA
12 
13    Collective on DMDA
14 
15    Input Parameters:
16 +  da - the distributed array
17 -  x,y,z - the physical coordinates
18 
19    Output Parameters:
20 +   II, JJ, KK - the logical coordinate (-1 on processes that do not contain that point)
21 -   X, Y, Z, - (optional) the coordinates of the located grid point
22 
23    Level: advanced
24 
25    Notes:
26    All processors that share the DMDA must call this with the same coordinate value
27 
28 .keywords: distributed array, get, processor subset
29 @*/
30 PetscErrorCode  DMDAGetLogicalCoordinate(DM da,PetscScalar x,PetscScalar y,PetscScalar z,PetscInt *II,PetscInt *JJ,PetscInt *KK,PetscScalar *X,PetscScalar *Y,PetscScalar *Z)
31 {
32   PetscErrorCode ierr;
33   Vec            coors;
34   DM             dacoors;
35   DMDACoor2d     **c;
36   PetscInt       i,j,xs,xm,ys,ym;
37   PetscReal      d,D = PETSC_MAX_REAL,Dv;
38   PetscMPIInt    rank,root;
39 
40   PetscFunctionBegin;
41   if (da->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 1d DMDA");
42   if (da->dim == 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 3d DMDA");
43 
44   *II = -1;
45   *JJ = -1;
46 
47   ierr = DMGetCoordinateDM(da,&dacoors);CHKERRQ(ierr);
48   ierr = DMDAGetCorners(dacoors,&xs,&ys,NULL,&xm,&ym,NULL);CHKERRQ(ierr);
49   ierr = DMGetCoordinates(da,&coors);CHKERRQ(ierr);
50   ierr = DMDAVecGetArray(dacoors,coors,&c);CHKERRQ(ierr);
51   for (j=ys; j<ys+ym; j++) {
52     for (i=xs; i<xs+xm; i++) {
53       d = PetscSqrtReal(PetscRealPart( (c[j][i].x - x)*(c[j][i].x - x) + (c[j][i].y - y)*(c[j][i].y - y) ));
54       if (d < D) {
55         D   = d;
56         *II = i;
57         *JJ = j;
58       }
59     }
60   }
61   ierr = MPI_Allreduce(&D,&Dv,1,MPIU_REAL,MPI_MIN,PetscObjectComm((PetscObject)da));CHKERRQ(ierr);
62   if (D != Dv) {
63     *II  = -1;
64     *JJ  = -1;
65     rank = 0;
66   } else {
67     *X = c[*JJ][*II].x;
68     *Y = c[*JJ][*II].y;
69     ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)da),&rank);CHKERRQ(ierr);
70     rank++;
71   }
72   ierr = MPI_Allreduce(&rank,&root,1,MPI_INT,MPI_SUM,PetscObjectComm((PetscObject)da));CHKERRQ(ierr);
73   root--;
74   ierr = MPI_Bcast(X,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));CHKERRQ(ierr);
75   ierr = MPI_Bcast(Y,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));CHKERRQ(ierr);
76   ierr = DMDAVecRestoreArray(dacoors,coors,&c);CHKERRQ(ierr);
77   PetscFunctionReturn(0);
78 }
79 
80 #undef __FUNCT__
81 #define __FUNCT__ "DMDAGetRay"
82 /*@C
83    DMDAGetRay - Returns a vector on process zero that contains a row or column of the values in a DMDA vector
84 
85    Collective on DMDA
86 
87    Input Parameters:
88 +  da - the distributed array
89 .  vec - the vector
90 .  dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z
91 -  gp - global grid point number in this direction
92 
93    Output Parameters:
94 +  newvec - the new vector that can hold the values (size zero on all processes except process 0)
95 -  scatter - the VecScatter that will map from the original vector to the slice
96 
97    Level: advanced
98 
99    Notes:
100    All processors that share the DMDA must call this with the same gp value
101 
102 .keywords: distributed array, get, processor subset
103 @*/
104 PetscErrorCode  DMDAGetRay(DM da,DMDADirection dir,PetscInt gp,Vec *newvec,VecScatter *scatter)
105 {
106   PetscMPIInt    rank;
107   DM_DA          *dd = (DM_DA*)da->data;
108   PetscErrorCode ierr;
109   IS             is;
110   AO             ao;
111   Vec            vec;
112   PetscInt       *indices,i,j;
113 
114   PetscFunctionBegin;
115   if (da->dim == 3) SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_SUP, "Cannot get slice from 3d DMDA");
116   ierr = MPI_Comm_rank(PetscObjectComm((PetscObject) da), &rank);CHKERRQ(ierr);
117   ierr = DMDAGetAO(da, &ao);CHKERRQ(ierr);
118   if (!rank) {
119     if (da->dim == 1) {
120       if (dir == DMDA_X) {
121         ierr = PetscMalloc1(dd->w, &indices);CHKERRQ(ierr);
122         indices[0] = dd->w*gp;
123         for (i = 1; i < dd->w; ++i) indices[i] = indices[i-1] + 1;
124         ierr = AOApplicationToPetsc(ao, dd->w, indices);CHKERRQ(ierr);
125         ierr = VecCreate(PETSC_COMM_SELF, newvec);CHKERRQ(ierr);
126         ierr = VecSetBlockSize(*newvec, dd->w);CHKERRQ(ierr);
127         ierr = VecSetSizes(*newvec, dd->w, PETSC_DETERMINE);CHKERRQ(ierr);
128         ierr = VecSetType(*newvec, VECSEQ);CHKERRQ(ierr);
129         ierr = ISCreateGeneral(PETSC_COMM_SELF, dd->w, indices, PETSC_OWN_POINTER, &is);CHKERRQ(ierr);
130       } else if (dir == DMDA_Y) SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_SUP, "Cannot get Y slice from 1d DMDA");
131       else SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDADirection");
132     } else {
133       if (dir == DMDA_Y) {
134         ierr       = PetscMalloc1(dd->w*dd->M,&indices);CHKERRQ(ierr);
135         indices[0] = gp*dd->M*dd->w;
136         for (i=1; i<dd->M*dd->w; i++) indices[i] = indices[i-1] + 1;
137 
138         ierr = AOApplicationToPetsc(ao,dd->M*dd->w,indices);CHKERRQ(ierr);
139         ierr = VecCreate(PETSC_COMM_SELF,newvec);CHKERRQ(ierr);
140         ierr = VecSetBlockSize(*newvec,dd->w);CHKERRQ(ierr);
141         ierr = VecSetSizes(*newvec,dd->M*dd->w,PETSC_DETERMINE);CHKERRQ(ierr);
142         ierr = VecSetType(*newvec,VECSEQ);CHKERRQ(ierr);
143         ierr = ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->M,indices,PETSC_OWN_POINTER,&is);CHKERRQ(ierr);
144       } else if (dir == DMDA_X) {
145         ierr       = PetscMalloc1(dd->w*dd->N,&indices);CHKERRQ(ierr);
146         indices[0] = dd->w*gp;
147         for (j=1; j<dd->w; j++) indices[j] = indices[j-1] + 1;
148         for (i=1; i<dd->N; i++) {
149           indices[i*dd->w] = indices[i*dd->w-1] + dd->w*dd->M - dd->w + 1;
150           for (j=1; j<dd->w; j++) indices[i*dd->w + j] = indices[i*dd->w + j - 1] + 1;
151         }
152         ierr = AOApplicationToPetsc(ao,dd->w*dd->N,indices);CHKERRQ(ierr);
153         ierr = VecCreate(PETSC_COMM_SELF,newvec);CHKERRQ(ierr);
154         ierr = VecSetBlockSize(*newvec,dd->w);CHKERRQ(ierr);
155         ierr = VecSetSizes(*newvec,dd->N*dd->w,PETSC_DETERMINE);CHKERRQ(ierr);
156         ierr = VecSetType(*newvec,VECSEQ);CHKERRQ(ierr);
157         ierr = ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->N,indices,PETSC_OWN_POINTER,&is);CHKERRQ(ierr);
158       } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Unknown DMDADirection");
159     }
160   } else {
161     ierr = VecCreateSeq(PETSC_COMM_SELF, 0, newvec);CHKERRQ(ierr);
162     ierr = ISCreateGeneral(PETSC_COMM_SELF, 0, 0, PETSC_COPY_VALUES, &is);CHKERRQ(ierr);
163   }
164   ierr = DMGetGlobalVector(da, &vec);CHKERRQ(ierr);
165   ierr = VecScatterCreate(vec, is, *newvec, NULL, scatter);CHKERRQ(ierr);
166   ierr = DMRestoreGlobalVector(da, &vec);CHKERRQ(ierr);
167   ierr = ISDestroy(&is);CHKERRQ(ierr);
168   PetscFunctionReturn(0);
169 }
170 
171 #undef __FUNCT__
172 #define __FUNCT__ "DMDAGetProcessorSubset"
173 /*@C
174    DMDAGetProcessorSubset - Returns a communicator consisting only of the
175    processors in a DMDA that own a particular global x, y, or z grid point
176    (corresponding to a logical plane in a 3D grid or a line in a 2D grid).
177 
178    Collective on DMDA
179 
180    Input Parameters:
181 +  da - the distributed array
182 .  dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z
183 -  gp - global grid point number in this direction
184 
185    Output Parameters:
186 .  comm - new communicator
187 
188    Level: advanced
189 
190    Notes:
191    All processors that share the DMDA must call this with the same gp value
192 
193    This routine is particularly useful to compute boundary conditions
194    or other application-specific calculations that require manipulating
195    sets of data throughout a logical plane of grid points.
196 
197 .keywords: distributed array, get, processor subset
198 @*/
199 PetscErrorCode  DMDAGetProcessorSubset(DM da,DMDADirection dir,PetscInt gp,MPI_Comm *comm)
200 {
201   MPI_Group      group,subgroup;
202   PetscErrorCode ierr;
203   PetscInt       i,ict,flag,*owners,xs,xm,ys,ym,zs,zm;
204   PetscMPIInt    size,*ranks = NULL;
205   DM_DA          *dd = (DM_DA*)da->data;
206 
207   PetscFunctionBegin;
208   PetscValidHeaderSpecific(da,DM_CLASSID,1);
209   flag = 0;
210   ierr = DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);CHKERRQ(ierr);
211   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)da),&size);CHKERRQ(ierr);
212   if (dir == DMDA_Z) {
213     if (da->dim < 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Z invalid for DMDA dim < 3");
214     if (gp < 0 || gp > dd->P) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
215     if (gp >= zs && gp < zs+zm) flag = 1;
216   } else if (dir == DMDA_Y) {
217     if (da->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Y invalid for DMDA dim = 1");
218     if (gp < 0 || gp > dd->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
219     if (gp >= ys && gp < ys+ym) flag = 1;
220   } else if (dir == DMDA_X) {
221     if (gp < 0 || gp > dd->M) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
222     if (gp >= xs && gp < xs+xm) flag = 1;
223   } else SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
224 
225   ierr = PetscMalloc2(size,&owners,size,&ranks);CHKERRQ(ierr);
226   ierr = MPI_Allgather(&flag,1,MPIU_INT,owners,1,MPIU_INT,PetscObjectComm((PetscObject)da));CHKERRQ(ierr);
227   ict  = 0;
228   ierr = PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",da->dim,(int)dir);CHKERRQ(ierr);
229   for (i=0; i<size; i++) {
230     if (owners[i]) {
231       ranks[ict] = i; ict++;
232       ierr       = PetscInfo1(da,"%D ",i);CHKERRQ(ierr);
233     }
234   }
235   ierr = PetscInfo(da,"\n");CHKERRQ(ierr);
236   ierr = MPI_Comm_group(PetscObjectComm((PetscObject)da),&group);CHKERRQ(ierr);
237   ierr = MPI_Group_incl(group,ict,ranks,&subgroup);CHKERRQ(ierr);
238   ierr = MPI_Comm_create(PetscObjectComm((PetscObject)da),subgroup,comm);CHKERRQ(ierr);
239   ierr = MPI_Group_free(&subgroup);CHKERRQ(ierr);
240   ierr = MPI_Group_free(&group);CHKERRQ(ierr);
241   ierr = PetscFree2(owners,ranks);CHKERRQ(ierr);
242   PetscFunctionReturn(0);
243 }
244 
245 #undef __FUNCT__
246 #define __FUNCT__ "DMDAGetProcessorSubsets"
247 /*@C
248    DMDAGetProcessorSubsets - Returns communicators consisting only of the
249    processors in a DMDA adjacent in a particular dimension,
250    corresponding to a logical plane in a 3D grid or a line in a 2D grid.
251 
252    Collective on DMDA
253 
254    Input Parameters:
255 +  da - the distributed array
256 -  dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z
257 
258    Output Parameters:
259 .  subcomm - new communicator
260 
261    Level: advanced
262 
263    Notes:
264    This routine is useful for distributing one-dimensional data in a tensor product grid.
265 
266 .keywords: distributed array, get, processor subset
267 @*/
268 PetscErrorCode  DMDAGetProcessorSubsets(DM da, DMDADirection dir, MPI_Comm *subcomm)
269 {
270   MPI_Comm       comm;
271   MPI_Group      group, subgroup;
272   PetscInt       subgroupSize = 0;
273   PetscInt       *firstPoints;
274   PetscMPIInt    size, *subgroupRanks = NULL;
275   PetscInt       xs, xm, ys, ym, zs, zm, firstPoint, p;
276   PetscErrorCode ierr;
277 
278   PetscFunctionBegin;
279   PetscValidHeaderSpecific(da, DM_CLASSID, 1);
280   ierr = PetscObjectGetComm((PetscObject)da,&comm);CHKERRQ(ierr);
281   ierr = DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm);CHKERRQ(ierr);
282   ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr);
283   if (dir == DMDA_Z) {
284     if (da->dim < 3) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Z invalid for DMDA dim < 3");
285     firstPoint = zs;
286   } else if (dir == DMDA_Y) {
287     if (da->dim == 1) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Y invalid for DMDA dim = 1");
288     firstPoint = ys;
289   } else if (dir == DMDA_X) {
290     firstPoint = xs;
291   } else SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
292 
293   ierr = PetscMalloc2(size, &firstPoints, size, &subgroupRanks);CHKERRQ(ierr);
294   ierr = MPI_Allgather(&firstPoint, 1, MPIU_INT, firstPoints, 1, MPIU_INT, comm);CHKERRQ(ierr);
295   ierr = PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",da->dim,(int)dir);CHKERRQ(ierr);
296   for (p = 0; p < size; ++p) {
297     if (firstPoints[p] == firstPoint) {
298       subgroupRanks[subgroupSize++] = p;
299       ierr = PetscInfo1(da, "%D ", p);CHKERRQ(ierr);
300     }
301   }
302   ierr = PetscInfo(da, "\n");CHKERRQ(ierr);
303   ierr = MPI_Comm_group(comm, &group);CHKERRQ(ierr);
304   ierr = MPI_Group_incl(group, subgroupSize, subgroupRanks, &subgroup);CHKERRQ(ierr);
305   ierr = MPI_Comm_create(comm, subgroup, subcomm);CHKERRQ(ierr);
306   ierr = MPI_Group_free(&subgroup);CHKERRQ(ierr);
307   ierr = MPI_Group_free(&group);CHKERRQ(ierr);
308   ierr = PetscFree2(firstPoints, subgroupRanks);CHKERRQ(ierr);
309   PetscFunctionReturn(0);
310 }
311