xref: /petsc/src/dm/impls/da/dasub.c (revision 6a98f8dc3f2c9149905a87dc2e9d0fedaf64e09a)
1 
2 /*
3   Code for manipulating distributed regular arrays in parallel.
4 */
5 
6 #include <petsc/private/dmdaimpl.h>    /*I   "petscdmda.h"   I*/
7 
8 /*@
9    DMDAGetLogicalCoordinate - Returns a the i,j,k logical coordinate for the closest mesh point to a x,y,z point in the coordinates of the DMDA
10 
11    Collective on da
12 
13    Input Parameters:
14 +  da - the distributed array
15 -  x,y,z - the physical coordinates
16 
17    Output Parameters:
18 +   II, JJ, KK - the logical coordinate (-1 on processes that do not contain that point)
19 -   X, Y, Z, - (optional) the coordinates of the located grid point
20 
21    Level: advanced
22 
23    Notes:
24    All processors that share the DMDA must call this with the same coordinate value
25 
26 @*/
27 PetscErrorCode  DMDAGetLogicalCoordinate(DM da,PetscScalar x,PetscScalar y,PetscScalar z,PetscInt *II,PetscInt *JJ,PetscInt *KK,PetscScalar *X,PetscScalar *Y,PetscScalar *Z)
28 {
29   PetscErrorCode ierr;
30   Vec            coors;
31   DM             dacoors;
32   DMDACoor2d     **c;
33   PetscInt       i,j,xs,xm,ys,ym;
34   PetscReal      d,D = PETSC_MAX_REAL,Dv;
35   PetscMPIInt    rank,root;
36 
37   PetscFunctionBegin;
38   if (da->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 1d DMDA");
39   if (da->dim == 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 3d DMDA");
40 
41   *II = -1;
42   *JJ = -1;
43 
44   ierr = DMGetCoordinateDM(da,&dacoors);CHKERRQ(ierr);
45   ierr = DMDAGetCorners(dacoors,&xs,&ys,NULL,&xm,&ym,NULL);CHKERRQ(ierr);
46   ierr = DMGetCoordinates(da,&coors);CHKERRQ(ierr);
47   ierr = DMDAVecGetArrayRead(dacoors,coors,&c);CHKERRQ(ierr);
48   for (j=ys; j<ys+ym; j++) {
49     for (i=xs; i<xs+xm; i++) {
50       d = PetscSqrtReal(PetscRealPart( (c[j][i].x - x)*(c[j][i].x - x) + (c[j][i].y - y)*(c[j][i].y - y) ));
51       if (d < D) {
52         D   = d;
53         *II = i;
54         *JJ = j;
55       }
56     }
57   }
58   ierr = MPIU_Allreduce(&D,&Dv,1,MPIU_REAL,MPIU_MIN,PetscObjectComm((PetscObject)da));CHKERRQ(ierr);
59   if (D != Dv) {
60     *II  = -1;
61     *JJ  = -1;
62     rank = 0;
63   } else {
64     *X = c[*JJ][*II].x;
65     *Y = c[*JJ][*II].y;
66     ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)da),&rank);CHKERRQ(ierr);
67     rank++;
68   }
69   ierr = MPIU_Allreduce(&rank,&root,1,MPI_INT,MPI_SUM,PetscObjectComm((PetscObject)da));CHKERRQ(ierr);
70   root--;
71   ierr = MPI_Bcast(X,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));CHKERRQ(ierr);
72   ierr = MPI_Bcast(Y,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));CHKERRQ(ierr);
73   ierr = DMDAVecRestoreArrayRead(dacoors,coors,&c);CHKERRQ(ierr);
74   PetscFunctionReturn(0);
75 }
76 
77 /*@
78    DMDAGetRay - Returns a vector on process zero that contains a row or column of the values in a DMDA vector
79 
80    Collective on DMDA
81 
82    Input Parameters:
83 +  da - the distributed array
84 .  vec - the vector
85 .  dir - Cartesian direction, either DM_X, DM_Y, or DM_Z
86 -  gp - global grid point number in this direction
87 
88    Output Parameters:
89 +  newvec - the new vector that can hold the values (size zero on all processes except process 0)
90 -  scatter - the VecScatter that will map from the original vector to the slice
91 
92    Level: advanced
93 
94    Notes:
95    All processors that share the DMDA must call this with the same gp value
96 
97 @*/
98 PetscErrorCode  DMDAGetRay(DM da,DMDirection dir,PetscInt gp,Vec *newvec,VecScatter *scatter)
99 {
100   PetscMPIInt    rank;
101   DM_DA          *dd = (DM_DA*)da->data;
102   PetscErrorCode ierr;
103   IS             is;
104   AO             ao;
105   Vec            vec;
106   PetscInt       *indices,i,j;
107 
108   PetscFunctionBegin;
109   if (da->dim == 3) SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_SUP, "Cannot get slice from 3d DMDA");
110   ierr = MPI_Comm_rank(PetscObjectComm((PetscObject) da), &rank);CHKERRQ(ierr);
111   ierr = DMDAGetAO(da, &ao);CHKERRQ(ierr);
112   if (!rank) {
113     if (da->dim == 1) {
114       if (dir == DM_X) {
115         ierr = PetscMalloc1(dd->w, &indices);CHKERRQ(ierr);
116         indices[0] = dd->w*gp;
117         for (i = 1; i < dd->w; ++i) indices[i] = indices[i-1] + 1;
118         ierr = AOApplicationToPetsc(ao, dd->w, indices);CHKERRQ(ierr);
119         ierr = VecCreate(PETSC_COMM_SELF, newvec);CHKERRQ(ierr);
120         ierr = VecSetBlockSize(*newvec, dd->w);CHKERRQ(ierr);
121         ierr = VecSetSizes(*newvec, dd->w, PETSC_DETERMINE);CHKERRQ(ierr);
122         ierr = VecSetType(*newvec, VECSEQ);CHKERRQ(ierr);
123         ierr = ISCreateGeneral(PETSC_COMM_SELF, dd->w, indices, PETSC_OWN_POINTER, &is);CHKERRQ(ierr);
124       } else if (dir == DM_Y) SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_SUP, "Cannot get Y slice from 1d DMDA");
125       else SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDirection");
126     } else {
127       if (dir == DM_Y) {
128         ierr       = PetscMalloc1(dd->w*dd->M,&indices);CHKERRQ(ierr);
129         indices[0] = gp*dd->M*dd->w;
130         for (i=1; i<dd->M*dd->w; i++) indices[i] = indices[i-1] + 1;
131 
132         ierr = AOApplicationToPetsc(ao,dd->M*dd->w,indices);CHKERRQ(ierr);
133         ierr = VecCreate(PETSC_COMM_SELF,newvec);CHKERRQ(ierr);
134         ierr = VecSetBlockSize(*newvec,dd->w);CHKERRQ(ierr);
135         ierr = VecSetSizes(*newvec,dd->M*dd->w,PETSC_DETERMINE);CHKERRQ(ierr);
136         ierr = VecSetType(*newvec,VECSEQ);CHKERRQ(ierr);
137         ierr = ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->M,indices,PETSC_OWN_POINTER,&is);CHKERRQ(ierr);
138       } else if (dir == DM_X) {
139         ierr       = PetscMalloc1(dd->w*dd->N,&indices);CHKERRQ(ierr);
140         indices[0] = dd->w*gp;
141         for (j=1; j<dd->w; j++) indices[j] = indices[j-1] + 1;
142         for (i=1; i<dd->N; i++) {
143           indices[i*dd->w] = indices[i*dd->w-1] + dd->w*dd->M - dd->w + 1;
144           for (j=1; j<dd->w; j++) indices[i*dd->w + j] = indices[i*dd->w + j - 1] + 1;
145         }
146         ierr = AOApplicationToPetsc(ao,dd->w*dd->N,indices);CHKERRQ(ierr);
147         ierr = VecCreate(PETSC_COMM_SELF,newvec);CHKERRQ(ierr);
148         ierr = VecSetBlockSize(*newvec,dd->w);CHKERRQ(ierr);
149         ierr = VecSetSizes(*newvec,dd->N*dd->w,PETSC_DETERMINE);CHKERRQ(ierr);
150         ierr = VecSetType(*newvec,VECSEQ);CHKERRQ(ierr);
151         ierr = ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->N,indices,PETSC_OWN_POINTER,&is);CHKERRQ(ierr);
152       } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Unknown DMDirection");
153     }
154   } else {
155     ierr = VecCreateSeq(PETSC_COMM_SELF, 0, newvec);CHKERRQ(ierr);
156     ierr = ISCreateGeneral(PETSC_COMM_SELF, 0, 0, PETSC_COPY_VALUES, &is);CHKERRQ(ierr);
157   }
158   ierr = DMGetGlobalVector(da, &vec);CHKERRQ(ierr);
159   ierr = VecScatterCreate(vec, is, *newvec, NULL, scatter);CHKERRQ(ierr);
160   ierr = DMRestoreGlobalVector(da, &vec);CHKERRQ(ierr);
161   ierr = ISDestroy(&is);CHKERRQ(ierr);
162   PetscFunctionReturn(0);
163 }
164 
165 /*@C
166    DMDAGetProcessorSubset - Returns a communicator consisting only of the
167    processors in a DMDA that own a particular global x, y, or z grid point
168    (corresponding to a logical plane in a 3D grid or a line in a 2D grid).
169 
170    Collective on da
171 
172    Input Parameters:
173 +  da - the distributed array
174 .  dir - Cartesian direction, either DM_X, DM_Y, or DM_Z
175 -  gp - global grid point number in this direction
176 
177    Output Parameters:
178 .  comm - new communicator
179 
180    Level: advanced
181 
182    Notes:
183    All processors that share the DMDA must call this with the same gp value
184 
185    After use, comm should be freed with MPI_Comm_free()
186 
187    This routine is particularly useful to compute boundary conditions
188    or other application-specific calculations that require manipulating
189    sets of data throughout a logical plane of grid points.
190 
191    Not supported from Fortran
192 
193 @*/
194 PetscErrorCode  DMDAGetProcessorSubset(DM da,DMDirection dir,PetscInt gp,MPI_Comm *comm)
195 {
196   MPI_Group      group,subgroup;
197   PetscErrorCode ierr;
198   PetscInt       i,ict,flag,*owners,xs,xm,ys,ym,zs,zm;
199   PetscMPIInt    size,*ranks = NULL;
200   DM_DA          *dd = (DM_DA*)da->data;
201 
202   PetscFunctionBegin;
203   PetscValidHeaderSpecificType(da,DM_CLASSID,1,DMDA);
204   flag = 0;
205   ierr = DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);CHKERRQ(ierr);
206   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)da),&size);CHKERRQ(ierr);
207   if (dir == DM_Z) {
208     if (da->dim < 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DM_Z invalid for DMDA dim < 3");
209     if (gp < 0 || gp > dd->P) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
210     if (gp >= zs && gp < zs+zm) flag = 1;
211   } else if (dir == DM_Y) {
212     if (da->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DM_Y invalid for DMDA dim = 1");
213     if (gp < 0 || gp > dd->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
214     if (gp >= ys && gp < ys+ym) flag = 1;
215   } else if (dir == DM_X) {
216     if (gp < 0 || gp > dd->M) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
217     if (gp >= xs && gp < xs+xm) flag = 1;
218   } else SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
219 
220   ierr = PetscMalloc2(size,&owners,size,&ranks);CHKERRQ(ierr);
221   ierr = MPI_Allgather(&flag,1,MPIU_INT,owners,1,MPIU_INT,PetscObjectComm((PetscObject)da));CHKERRQ(ierr);
222   ict  = 0;
223   ierr = PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",da->dim,(int)dir);CHKERRQ(ierr);
224   for (i=0; i<size; i++) {
225     if (owners[i]) {
226       ranks[ict] = i; ict++;
227       ierr       = PetscInfo1(da,"%D ",i);CHKERRQ(ierr);
228     }
229   }
230   ierr = PetscInfo(da,"\n");CHKERRQ(ierr);
231   ierr = MPI_Comm_group(PetscObjectComm((PetscObject)da),&group);CHKERRQ(ierr);
232   ierr = MPI_Group_incl(group,ict,ranks,&subgroup);CHKERRQ(ierr);
233   ierr = MPI_Comm_create(PetscObjectComm((PetscObject)da),subgroup,comm);CHKERRQ(ierr);
234   ierr = MPI_Group_free(&subgroup);CHKERRQ(ierr);
235   ierr = MPI_Group_free(&group);CHKERRQ(ierr);
236   ierr = PetscFree2(owners,ranks);CHKERRQ(ierr);
237   PetscFunctionReturn(0);
238 }
239 
240 /*@C
241    DMDAGetProcessorSubsets - Returns communicators consisting only of the
242    processors in a DMDA adjacent in a particular dimension,
243    corresponding to a logical plane in a 3D grid or a line in a 2D grid.
244 
245    Collective on da
246 
247    Input Parameters:
248 +  da - the distributed array
249 -  dir - Cartesian direction, either DM_X, DM_Y, or DM_Z
250 
251    Output Parameters:
252 .  subcomm - new communicator
253 
254    Level: advanced
255 
256    Notes:
257    This routine is useful for distributing one-dimensional data in a tensor product grid.
258 
259    After use, comm should be freed with MPI_Comm_free()
260 
261    Not supported from Fortran
262 
263 @*/
264 PetscErrorCode  DMDAGetProcessorSubsets(DM da, DMDirection dir, MPI_Comm *subcomm)
265 {
266   MPI_Comm       comm;
267   MPI_Group      group, subgroup;
268   PetscInt       subgroupSize = 0;
269   PetscInt       *firstPoints;
270   PetscMPIInt    size, *subgroupRanks = NULL;
271   PetscInt       xs, xm, ys, ym, zs, zm, firstPoint, p;
272   PetscErrorCode ierr;
273 
274   PetscFunctionBegin;
275   PetscValidHeaderSpecificType(da, DM_CLASSID, 1,DMDA);
276   ierr = PetscObjectGetComm((PetscObject)da,&comm);CHKERRQ(ierr);
277   ierr = DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm);CHKERRQ(ierr);
278   ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr);
279   if (dir == DM_Z) {
280     if (da->dim < 3) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DM_Z invalid for DMDA dim < 3");
281     firstPoint = zs;
282   } else if (dir == DM_Y) {
283     if (da->dim == 1) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DM_Y invalid for DMDA dim = 1");
284     firstPoint = ys;
285   } else if (dir == DM_X) {
286     firstPoint = xs;
287   } else SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
288 
289   ierr = PetscMalloc2(size, &firstPoints, size, &subgroupRanks);CHKERRQ(ierr);
290   ierr = MPI_Allgather(&firstPoint, 1, MPIU_INT, firstPoints, 1, MPIU_INT, comm);CHKERRQ(ierr);
291   ierr = PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",da->dim,(int)dir);CHKERRQ(ierr);
292   for (p = 0; p < size; ++p) {
293     if (firstPoints[p] == firstPoint) {
294       subgroupRanks[subgroupSize++] = p;
295       ierr = PetscInfo1(da, "%D ", p);CHKERRQ(ierr);
296     }
297   }
298   ierr = PetscInfo(da, "\n");CHKERRQ(ierr);
299   ierr = MPI_Comm_group(comm, &group);CHKERRQ(ierr);
300   ierr = MPI_Group_incl(group, subgroupSize, subgroupRanks, &subgroup);CHKERRQ(ierr);
301   ierr = MPI_Comm_create(comm, subgroup, subcomm);CHKERRQ(ierr);
302   ierr = MPI_Group_free(&subgroup);CHKERRQ(ierr);
303   ierr = MPI_Group_free(&group);CHKERRQ(ierr);
304   ierr = PetscFree2(firstPoints, subgroupRanks);CHKERRQ(ierr);
305   PetscFunctionReturn(0);
306 }
307