xref: /petsc/src/dm/impls/da/dasub.c (revision 2fa40bb9206b96114faa7cb222621ec184d31cd2)
1 
2 /*
3   Code for manipulating distributed regular arrays in parallel.
4 */
5 
6 #include <petsc/private/dmdaimpl.h>    /*I   "petscdmda.h"   I*/
7 
8 /*@
9    DMDAGetLogicalCoordinate - Returns a the i,j,k logical coordinate for the closest mesh point to a x,y,z point in the coordinates of the DMDA
10 
11    Collective on da
12 
13    Input Parameters:
14 +  da - the distributed array
15 .  x  - the first physical coordinate
16 .  y  - the second physical coordinate
17 -  z  - the third physical coordinate
18 
19    Output Parameters:
20 +  II - the first logical coordinate (-1 on processes that do not contain that point)
21 .  JJ - the second logical coordinate (-1 on processes that do not contain that point)
22 .  KK - the third logical coordinate (-1 on processes that do not contain that point)
23 .  X  - (optional) the first coordinate of the located grid point
24 .  Y  - (optional) the second coordinate of the located grid point
25 -  Z  - (optional) the third coordinate of the located grid point
26 
27    Level: advanced
28 
29    Notes:
30    All processors that share the DMDA must call this with the same coordinate value
31 
32 @*/
33 PetscErrorCode  DMDAGetLogicalCoordinate(DM da,PetscScalar x,PetscScalar y,PetscScalar z,PetscInt *II,PetscInt *JJ,PetscInt *KK,PetscScalar *X,PetscScalar *Y,PetscScalar *Z)
34 {
35   PetscErrorCode ierr;
36   Vec            coors;
37   DM             dacoors;
38   DMDACoor2d     **c;
39   PetscInt       i,j,xs,xm,ys,ym;
40   PetscReal      d,D = PETSC_MAX_REAL,Dv;
41   PetscMPIInt    rank,root;
42 
43   PetscFunctionBegin;
44   if (da->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 1d DMDA");
45   if (da->dim == 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 3d DMDA");
46 
47   *II = -1;
48   *JJ = -1;
49 
50   ierr = DMGetCoordinateDM(da,&dacoors);CHKERRQ(ierr);
51   ierr = DMDAGetCorners(dacoors,&xs,&ys,NULL,&xm,&ym,NULL);CHKERRQ(ierr);
52   ierr = DMGetCoordinates(da,&coors);CHKERRQ(ierr);
53   ierr = DMDAVecGetArrayRead(dacoors,coors,&c);CHKERRQ(ierr);
54   for (j=ys; j<ys+ym; j++) {
55     for (i=xs; i<xs+xm; i++) {
56       d = PetscSqrtReal(PetscRealPart((c[j][i].x - x)*(c[j][i].x - x) + (c[j][i].y - y)*(c[j][i].y - y)));
57       if (d < D) {
58         D   = d;
59         *II = i;
60         *JJ = j;
61       }
62     }
63   }
64   ierr = MPIU_Allreduce(&D,&Dv,1,MPIU_REAL,MPIU_MIN,PetscObjectComm((PetscObject)da));CHKERRMPI(ierr);
65   if (D != Dv) {
66     *II  = -1;
67     *JJ  = -1;
68     rank = 0;
69   } else {
70     *X = c[*JJ][*II].x;
71     *Y = c[*JJ][*II].y;
72     ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)da),&rank);CHKERRMPI(ierr);
73     rank++;
74   }
75   ierr = MPIU_Allreduce(&rank,&root,1,MPI_INT,MPI_SUM,PetscObjectComm((PetscObject)da));CHKERRMPI(ierr);
76   root--;
77   ierr = MPI_Bcast(X,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));CHKERRMPI(ierr);
78   ierr = MPI_Bcast(Y,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));CHKERRMPI(ierr);
79   ierr = DMDAVecRestoreArrayRead(dacoors,coors,&c);CHKERRQ(ierr);
80   PetscFunctionReturn(0);
81 }
82 
83 /*@
84    DMDAGetRay - Returns a vector on process zero that contains a row or column of the values in a DMDA vector
85 
86    Collective on DMDA
87 
88    Input Parameters:
89 +  da - the distributed array
90 .  dir - Cartesian direction, either DM_X, DM_Y, or DM_Z
91 -  gp - global grid point number in this direction
92 
93    Output Parameters:
94 +  newvec - the new vector that can hold the values (size zero on all processes except process 0)
95 -  scatter - the VecScatter that will map from the original vector to the slice
96 
97    Level: advanced
98 
99    Notes:
100    All processors that share the DMDA must call this with the same gp value
101 
102 @*/
103 PetscErrorCode  DMDAGetRay(DM da,DMDirection dir,PetscInt gp,Vec *newvec,VecScatter *scatter)
104 {
105   PetscMPIInt    rank;
106   DM_DA          *dd = (DM_DA*)da->data;
107   PetscErrorCode ierr;
108   IS             is;
109   AO             ao;
110   Vec            vec;
111   PetscInt       *indices,i,j;
112 
113   PetscFunctionBegin;
114   if (da->dim == 3) SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_SUP, "Cannot get slice from 3d DMDA");
115   ierr = MPI_Comm_rank(PetscObjectComm((PetscObject) da), &rank);CHKERRMPI(ierr);
116   ierr = DMDAGetAO(da, &ao);CHKERRQ(ierr);
117   if (rank == 0) {
118     if (da->dim == 1) {
119       if (dir == DM_X) {
120         ierr = PetscMalloc1(dd->w, &indices);CHKERRQ(ierr);
121         indices[0] = dd->w*gp;
122         for (i = 1; i < dd->w; ++i) indices[i] = indices[i-1] + 1;
123         ierr = AOApplicationToPetsc(ao, dd->w, indices);CHKERRQ(ierr);
124         ierr = VecCreate(PETSC_COMM_SELF, newvec);CHKERRQ(ierr);
125         ierr = VecSetBlockSize(*newvec, dd->w);CHKERRQ(ierr);
126         ierr = VecSetSizes(*newvec, dd->w, PETSC_DETERMINE);CHKERRQ(ierr);
127         ierr = VecSetType(*newvec, VECSEQ);CHKERRQ(ierr);
128         ierr = ISCreateGeneral(PETSC_COMM_SELF, dd->w, indices, PETSC_OWN_POINTER, &is);CHKERRQ(ierr);
129       } else if (dir == DM_Y) SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_SUP, "Cannot get Y slice from 1d DMDA");
130       else SETERRQ(PetscObjectComm((PetscObject) da), PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDirection");
131     } else {
132       if (dir == DM_Y) {
133         ierr       = PetscMalloc1(dd->w*dd->M,&indices);CHKERRQ(ierr);
134         indices[0] = gp*dd->M*dd->w;
135         for (i=1; i<dd->M*dd->w; i++) indices[i] = indices[i-1] + 1;
136 
137         ierr = AOApplicationToPetsc(ao,dd->M*dd->w,indices);CHKERRQ(ierr);
138         ierr = VecCreate(PETSC_COMM_SELF,newvec);CHKERRQ(ierr);
139         ierr = VecSetBlockSize(*newvec,dd->w);CHKERRQ(ierr);
140         ierr = VecSetSizes(*newvec,dd->M*dd->w,PETSC_DETERMINE);CHKERRQ(ierr);
141         ierr = VecSetType(*newvec,VECSEQ);CHKERRQ(ierr);
142         ierr = ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->M,indices,PETSC_OWN_POINTER,&is);CHKERRQ(ierr);
143       } else if (dir == DM_X) {
144         ierr       = PetscMalloc1(dd->w*dd->N,&indices);CHKERRQ(ierr);
145         indices[0] = dd->w*gp;
146         for (j=1; j<dd->w; j++) indices[j] = indices[j-1] + 1;
147         for (i=1; i<dd->N; i++) {
148           indices[i*dd->w] = indices[i*dd->w-1] + dd->w*dd->M - dd->w + 1;
149           for (j=1; j<dd->w; j++) indices[i*dd->w + j] = indices[i*dd->w + j - 1] + 1;
150         }
151         ierr = AOApplicationToPetsc(ao,dd->w*dd->N,indices);CHKERRQ(ierr);
152         ierr = VecCreate(PETSC_COMM_SELF,newvec);CHKERRQ(ierr);
153         ierr = VecSetBlockSize(*newvec,dd->w);CHKERRQ(ierr);
154         ierr = VecSetSizes(*newvec,dd->N*dd->w,PETSC_DETERMINE);CHKERRQ(ierr);
155         ierr = VecSetType(*newvec,VECSEQ);CHKERRQ(ierr);
156         ierr = ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->N,indices,PETSC_OWN_POINTER,&is);CHKERRQ(ierr);
157       } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Unknown DMDirection");
158     }
159   } else {
160     ierr = VecCreateSeq(PETSC_COMM_SELF, 0, newvec);CHKERRQ(ierr);
161     ierr = ISCreateGeneral(PETSC_COMM_SELF, 0, NULL, PETSC_COPY_VALUES, &is);CHKERRQ(ierr);
162   }
163   ierr = DMGetGlobalVector(da, &vec);CHKERRQ(ierr);
164   ierr = VecScatterCreate(vec, is, *newvec, NULL, scatter);CHKERRQ(ierr);
165   ierr = DMRestoreGlobalVector(da, &vec);CHKERRQ(ierr);
166   ierr = ISDestroy(&is);CHKERRQ(ierr);
167   PetscFunctionReturn(0);
168 }
169 
170 /*@C
171    DMDAGetProcessorSubset - Returns a communicator consisting only of the
172    processors in a DMDA that own a particular global x, y, or z grid point
173    (corresponding to a logical plane in a 3D grid or a line in a 2D grid).
174 
175    Collective on da
176 
177    Input Parameters:
178 +  da - the distributed array
179 .  dir - Cartesian direction, either DM_X, DM_Y, or DM_Z
180 -  gp - global grid point number in this direction
181 
182    Output Parameter:
183 .  comm - new communicator
184 
185    Level: advanced
186 
187    Notes:
188    All processors that share the DMDA must call this with the same gp value
189 
190    After use, comm should be freed with MPI_Comm_free()
191 
192    This routine is particularly useful to compute boundary conditions
193    or other application-specific calculations that require manipulating
194    sets of data throughout a logical plane of grid points.
195 
196    Not supported from Fortran
197 
198 @*/
199 PetscErrorCode  DMDAGetProcessorSubset(DM da,DMDirection dir,PetscInt gp,MPI_Comm *comm)
200 {
201   MPI_Group      group,subgroup;
202   PetscErrorCode ierr;
203   PetscInt       i,ict,flag,*owners,xs,xm,ys,ym,zs,zm;
204   PetscMPIInt    size,*ranks = NULL;
205   DM_DA          *dd = (DM_DA*)da->data;
206 
207   PetscFunctionBegin;
208   PetscValidHeaderSpecificType(da,DM_CLASSID,1,DMDA);
209   flag = 0;
210   ierr = DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);CHKERRQ(ierr);
211   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)da),&size);CHKERRMPI(ierr);
212   if (dir == DM_Z) {
213     if (da->dim < 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DM_Z invalid for DMDA dim < 3");
214     if (gp < 0 || gp > dd->P) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
215     if (gp >= zs && gp < zs+zm) flag = 1;
216   } else if (dir == DM_Y) {
217     if (da->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DM_Y invalid for DMDA dim = 1");
218     if (gp < 0 || gp > dd->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
219     if (gp >= ys && gp < ys+ym) flag = 1;
220   } else if (dir == DM_X) {
221     if (gp < 0 || gp > dd->M) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
222     if (gp >= xs && gp < xs+xm) flag = 1;
223   } else SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
224 
225   ierr = PetscMalloc2(size,&owners,size,&ranks);CHKERRQ(ierr);
226   ierr = MPI_Allgather(&flag,1,MPIU_INT,owners,1,MPIU_INT,PetscObjectComm((PetscObject)da));CHKERRMPI(ierr);
227   ict  = 0;
228   ierr = PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",da->dim,(int)dir);CHKERRQ(ierr);
229   for (i=0; i<size; i++) {
230     if (owners[i]) {
231       ranks[ict] = i; ict++;
232       ierr       = PetscInfo1(da,"%D ",i);CHKERRQ(ierr);
233     }
234   }
235   ierr = PetscInfo(da,"\n");CHKERRQ(ierr);
236   ierr = MPI_Comm_group(PetscObjectComm((PetscObject)da),&group);CHKERRMPI(ierr);
237   ierr = MPI_Group_incl(group,ict,ranks,&subgroup);CHKERRMPI(ierr);
238   ierr = MPI_Comm_create(PetscObjectComm((PetscObject)da),subgroup,comm);CHKERRMPI(ierr);
239   ierr = MPI_Group_free(&subgroup);CHKERRMPI(ierr);
240   ierr = MPI_Group_free(&group);CHKERRMPI(ierr);
241   ierr = PetscFree2(owners,ranks);CHKERRQ(ierr);
242   PetscFunctionReturn(0);
243 }
244 
245 /*@C
246    DMDAGetProcessorSubsets - Returns communicators consisting only of the
247    processors in a DMDA adjacent in a particular dimension,
248    corresponding to a logical plane in a 3D grid or a line in a 2D grid.
249 
250    Collective on da
251 
252    Input Parameters:
253 +  da - the distributed array
254 -  dir - Cartesian direction, either DM_X, DM_Y, or DM_Z
255 
256    Output Parameter:
257 .  subcomm - new communicator
258 
259    Level: advanced
260 
261    Notes:
262    This routine is useful for distributing one-dimensional data in a tensor product grid.
263 
264    After use, comm should be freed with MPI_Comm_free()
265 
266    Not supported from Fortran
267 
268 @*/
269 PetscErrorCode  DMDAGetProcessorSubsets(DM da, DMDirection dir, MPI_Comm *subcomm)
270 {
271   MPI_Comm       comm;
272   MPI_Group      group, subgroup;
273   PetscInt       subgroupSize = 0;
274   PetscInt       *firstPoints;
275   PetscMPIInt    size, *subgroupRanks = NULL;
276   PetscInt       xs, xm, ys, ym, zs, zm, firstPoint, p;
277   PetscErrorCode ierr;
278 
279   PetscFunctionBegin;
280   PetscValidHeaderSpecificType(da, DM_CLASSID, 1,DMDA);
281   ierr = PetscObjectGetComm((PetscObject)da,&comm);CHKERRQ(ierr);
282   ierr = DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm);CHKERRQ(ierr);
283   ierr = MPI_Comm_size(comm, &size);CHKERRMPI(ierr);
284   if (dir == DM_Z) {
285     if (da->dim < 3) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DM_Z invalid for DMDA dim < 3");
286     firstPoint = zs;
287   } else if (dir == DM_Y) {
288     if (da->dim == 1) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DM_Y invalid for DMDA dim = 1");
289     firstPoint = ys;
290   } else if (dir == DM_X) {
291     firstPoint = xs;
292   } else SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
293 
294   ierr = PetscMalloc2(size, &firstPoints, size, &subgroupRanks);CHKERRQ(ierr);
295   ierr = MPI_Allgather(&firstPoint, 1, MPIU_INT, firstPoints, 1, MPIU_INT, comm);CHKERRMPI(ierr);
296   ierr = PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",da->dim,(int)dir);CHKERRQ(ierr);
297   for (p = 0; p < size; ++p) {
298     if (firstPoints[p] == firstPoint) {
299       subgroupRanks[subgroupSize++] = p;
300       ierr = PetscInfo1(da, "%D ", p);CHKERRQ(ierr);
301     }
302   }
303   ierr = PetscInfo(da, "\n");CHKERRQ(ierr);
304   ierr = MPI_Comm_group(comm, &group);CHKERRMPI(ierr);
305   ierr = MPI_Group_incl(group, subgroupSize, subgroupRanks, &subgroup);CHKERRMPI(ierr);
306   ierr = MPI_Comm_create(comm, subgroup, subcomm);CHKERRMPI(ierr);
307   ierr = MPI_Group_free(&subgroup);CHKERRMPI(ierr);
308   ierr = MPI_Group_free(&group);CHKERRMPI(ierr);
309   ierr = PetscFree2(firstPoints, subgroupRanks);CHKERRQ(ierr);
310   PetscFunctionReturn(0);
311 }
312