xref: /petsc/src/dm/impls/da/dasub.c (revision 8cc058d9cd56c1ccb3be12a47760ddfc446aaffc)
1 
2 /*
3   Code for manipulating distributed regular arrays in parallel.
4 */
5 
6 #include <petsc-private/dmdaimpl.h>    /*I   "petscdmda.h"   I*/
7 
8 #undef __FUNCT__
9 #define __FUNCT__ "DMDAGetLogicalCoordinate"
10 /*@C
11    DMDAGetLogicalCoordinate - Returns a the i,j,k logical coordinate for the closest mesh point to a x,y,z point in the coordinates of the DMDA
12 
13    Collective on DMDA
14 
15    Input Parameters:
16 +  da - the distributed array
17 .  x,y,z - the physical coordinates
18 -
19 
20    Output Parameters:
21 +   II, JJ, KK - the logical coordinate (-1 on processes that do not contain that point)
22 -   X, Y, Z, - (optional) the coordinates of the located grid point
23 
24    Level: advanced
25 
26    Notes:
27    All processors that share the DMDA must call this with the same coordinate value
28 
29 .keywords: distributed array, get, processor subset
30 @*/
31 PetscErrorCode  DMDAGetLogicalCoordinate(DM da,PetscScalar x,PetscScalar y,PetscScalar z,PetscInt *II,PetscInt *JJ,PetscInt *KK,PetscScalar *X,PetscScalar *Y,PetscScalar *Z)
32 {
33   DM_DA          *dd = (DM_DA*)da->data;
34   PetscErrorCode ierr;
35   Vec            coors;
36   DM             dacoors;
37   DMDACoor2d     **c;
38   PetscInt       i,j,xs,xm,ys,ym;
39   PetscReal      d,D = PETSC_MAX_REAL,Dv;
40   PetscMPIInt    rank,root;
41 
42   PetscFunctionBegin;
43   if (dd->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 1d DMDA");
44   if (dd->dim == 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 3d DMDA");
45 
46   *II = -1;
47   *JJ = -1;
48 
49   ierr = DMGetCoordinateDM(da,&dacoors);CHKERRQ(ierr);
50   ierr = DMDAGetCorners(dacoors,&xs,&ys,NULL,&xm,&ym,NULL);CHKERRQ(ierr);
51   ierr = DMGetCoordinates(da,&coors);CHKERRQ(ierr);
52   ierr = DMDAVecGetArray(dacoors,coors,&c);CHKERRQ(ierr);
53   for (j=ys; j<ys+ym; j++) {
54     for (i=xs; i<xs+xm; i++) {
55       d = PetscSqrtReal(PetscRealPart( (c[j][i].x - x)*(c[j][i].x - x) + (c[j][i].y - y)*(c[j][i].y - y) ));
56       if (d < D) {
57         D   = d;
58         *II = i;
59         *JJ = j;
60       }
61     }
62   }
63   ierr = MPI_Allreduce(&D,&Dv,1,MPIU_REAL,MPI_MIN,PetscObjectComm((PetscObject)da));CHKERRQ(ierr);
64   if (D != Dv) {
65     *II  = -1;
66     *JJ  = -1;
67     rank = 0;
68   } else {
69     *X = c[*JJ][*II].x;
70     *Y = c[*JJ][*II].y;
71     ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)da),&rank);CHKERRQ(ierr);
72     rank++;
73   }
74   ierr = MPI_Allreduce(&rank,&root,1,MPI_INT,MPI_SUM,PetscObjectComm((PetscObject)da));CHKERRQ(ierr);
75   root--;
76   ierr = MPI_Bcast(X,1,MPIU_REAL,root,PetscObjectComm((PetscObject)da));CHKERRQ(ierr);
77   ierr = MPI_Bcast(Y,1,MPIU_REAL,root,PetscObjectComm((PetscObject)da));CHKERRQ(ierr);
78   ierr = DMDAVecRestoreArray(dacoors,coors,&c);CHKERRQ(ierr);
79   PetscFunctionReturn(0);
80 }
81 
82 #undef __FUNCT__
83 #define __FUNCT__ "DMDAGetRay"
84 /*@C
85    DMDAGetRay - Returns a vector on process zero that contains a row or column of the values in a DMDA vector
86 
87    Collective on DMDA
88 
89    Input Parameters:
90 +  da - the distributed array
91 .  vec - the vector
92 .  dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z
93 -  gp - global grid point number in this direction
94 
95    Output Parameters:
96 +  newvec - the new vector that can hold the values (size zero on all processes except process 0)
97 -  scatter - the VecScatter that will map from the original vector to the slice
98 
99    Level: advanced
100 
101    Notes:
102    All processors that share the DMDA must call this with the same gp value
103 
104 .keywords: distributed array, get, processor subset
105 @*/
106 PetscErrorCode  DMDAGetRay(DM da,DMDADirection dir,PetscInt gp,Vec *newvec,VecScatter *scatter)
107 {
108   PetscMPIInt    rank;
109   DM_DA          *dd = (DM_DA*)da->data;
110   PetscErrorCode ierr;
111   IS             is;
112   AO             ao;
113   Vec            vec;
114   PetscInt       *indices,i,j;
115 
116   PetscFunctionBegin;
117   if (dd->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get slice from 1d DMDA");
118   if (dd->dim == 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get slice from 3d DMDA");
119   ierr = DMDAGetAO(da,&ao);CHKERRQ(ierr);
120   ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)da),&rank);CHKERRQ(ierr);
121   if (!rank) {
122     if (dir == DMDA_Y) {
123       ierr       = PetscMalloc(dd->w*dd->M*sizeof(PetscInt),&indices);CHKERRQ(ierr);
124       indices[0] = gp*dd->M*dd->w;
125       for (i=1; i<dd->M*dd->w; i++) indices[i] = indices[i-1] + 1;
126 
127       ierr = AOApplicationToPetsc(ao,dd->M*dd->w,indices);CHKERRQ(ierr);
128       ierr = VecCreate(PETSC_COMM_SELF,newvec);CHKERRQ(ierr);
129       ierr = VecSetBlockSize(*newvec,dd->w);CHKERRQ(ierr);
130       ierr = VecSetSizes(*newvec,dd->M*dd->w,PETSC_DETERMINE);CHKERRQ(ierr);
131       ierr = VecSetType(*newvec,VECSEQ);CHKERRQ(ierr);
132       ierr = ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->M,indices,PETSC_OWN_POINTER,&is);CHKERRQ(ierr);
133     } else if (dir == DMDA_X) {
134       ierr       = PetscMalloc(dd->w*dd->N*sizeof(PetscInt),&indices);CHKERRQ(ierr);
135       indices[0] = dd->w*gp;
136       for (j=1; j<dd->w; j++) indices[j] = indices[j-1] + 1;
137       for (i=1; i<dd->N; i++) {
138         indices[i*dd->w] = indices[i*dd->w-1] + dd->w*dd->M - dd->w + 1;
139         for (j=1; j<dd->w; j++) indices[i*dd->w + j] = indices[i*dd->w + j - 1] + 1;
140       }
141       ierr = AOApplicationToPetsc(ao,dd->w*dd->N,indices);CHKERRQ(ierr);
142       ierr = VecCreate(PETSC_COMM_SELF,newvec);CHKERRQ(ierr);
143       ierr = VecSetBlockSize(*newvec,dd->w);CHKERRQ(ierr);
144       ierr = VecSetSizes(*newvec,dd->N*dd->w,PETSC_DETERMINE);CHKERRQ(ierr);
145       ierr = VecSetType(*newvec,VECSEQ);CHKERRQ(ierr);
146       ierr = ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->N,indices,PETSC_OWN_POINTER,&is);CHKERRQ(ierr);
147     } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Unknown DMDADirection");
148   } else {
149     ierr = VecCreateSeq(PETSC_COMM_SELF,0,newvec);CHKERRQ(ierr);
150     ierr = ISCreateGeneral(PETSC_COMM_SELF,0,0,PETSC_COPY_VALUES,&is);CHKERRQ(ierr);
151   }
152   ierr = DMGetGlobalVector(da,&vec);CHKERRQ(ierr);
153   ierr = VecScatterCreate(vec,is,*newvec,NULL,scatter);CHKERRQ(ierr);
154   ierr = DMRestoreGlobalVector(da,&vec);CHKERRQ(ierr);
155   ierr = ISDestroy(&is);CHKERRQ(ierr);
156   PetscFunctionReturn(0);
157 }
158 
159 #undef __FUNCT__
160 #define __FUNCT__ "DMDAGetProcessorSubset"
161 /*@C
162    DMDAGetProcessorSubset - Returns a communicator consisting only of the
163    processors in a DMDA that own a particular global x, y, or z grid point
164    (corresponding to a logical plane in a 3D grid or a line in a 2D grid).
165 
166    Collective on DMDA
167 
168    Input Parameters:
169 +  da - the distributed array
170 .  dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z
171 -  gp - global grid point number in this direction
172 
173    Output Parameters:
174 .  comm - new communicator
175 
176    Level: advanced
177 
178    Notes:
179    All processors that share the DMDA must call this with the same gp value
180 
181    This routine is particularly useful to compute boundary conditions
182    or other application-specific calculations that require manipulating
183    sets of data throughout a logical plane of grid points.
184 
185 .keywords: distributed array, get, processor subset
186 @*/
187 PetscErrorCode  DMDAGetProcessorSubset(DM da,DMDADirection dir,PetscInt gp,MPI_Comm *comm)
188 {
189   MPI_Group      group,subgroup;
190   PetscErrorCode ierr;
191   PetscInt       i,ict,flag,*owners,xs,xm,ys,ym,zs,zm;
192   PetscMPIInt    size,*ranks = NULL;
193   DM_DA          *dd = (DM_DA*)da->data;
194 
195   PetscFunctionBegin;
196   PetscValidHeaderSpecific(da,DM_CLASSID,1);
197   flag = 0;
198   ierr = DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);CHKERRQ(ierr);
199   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)da),&size);CHKERRQ(ierr);
200   if (dir == DMDA_Z) {
201     if (dd->dim < 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Z invalid for DMDA dim < 3");
202     if (gp < 0 || gp > dd->P) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
203     if (gp >= zs && gp < zs+zm) flag = 1;
204   } else if (dir == DMDA_Y) {
205     if (dd->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Y invalid for DMDA dim = 1");
206     if (gp < 0 || gp > dd->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
207     if (gp >= ys && gp < ys+ym) flag = 1;
208   } else if (dir == DMDA_X) {
209     if (gp < 0 || gp > dd->M) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
210     if (gp >= xs && gp < xs+xm) flag = 1;
211   } else SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
212 
213   ierr = PetscMalloc2(size,PetscInt,&owners,size,PetscMPIInt,&ranks);CHKERRQ(ierr);
214   ierr = MPI_Allgather(&flag,1,MPIU_INT,owners,1,MPIU_INT,PetscObjectComm((PetscObject)da));CHKERRQ(ierr);
215   ict  = 0;
216   ierr = PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",dd->dim,(int)dir);CHKERRQ(ierr);
217   for (i=0; i<size; i++) {
218     if (owners[i]) {
219       ranks[ict] = i; ict++;
220       ierr       = PetscInfo1(da,"%D ",i);CHKERRQ(ierr);
221     }
222   }
223   ierr = PetscInfo(da,"\n");CHKERRQ(ierr);
224   ierr = MPI_Comm_group(PetscObjectComm((PetscObject)da),&group);CHKERRQ(ierr);
225   ierr = MPI_Group_incl(group,ict,ranks,&subgroup);CHKERRQ(ierr);
226   ierr = MPI_Comm_create(PetscObjectComm((PetscObject)da),subgroup,comm);CHKERRQ(ierr);
227   ierr = MPI_Group_free(&subgroup);CHKERRQ(ierr);
228   ierr = MPI_Group_free(&group);CHKERRQ(ierr);
229   ierr = PetscFree2(owners,ranks);CHKERRQ(ierr);
230   PetscFunctionReturn(0);
231 }
232 
233 #undef __FUNCT__
234 #define __FUNCT__ "DMDAGetProcessorSubsets"
235 /*@C
236    DMDAGetProcessorSubsets - Returns communicators consisting only of the
237    processors in a DMDA adjacent in a particular dimension,
238    corresponding to a logical plane in a 3D grid or a line in a 2D grid.
239 
240    Collective on DMDA
241 
242    Input Parameters:
243 +  da - the distributed array
244 -  dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z
245 
246    Output Parameters:
247 .  subcomm - new communicator
248 
249    Level: advanced
250 
251    Notes:
252    This routine is useful for distributing one-dimensional data in a tensor product grid.
253 
254 .keywords: distributed array, get, processor subset
255 @*/
256 PetscErrorCode  DMDAGetProcessorSubsets(DM da, DMDADirection dir, MPI_Comm *subcomm)
257 {
258   MPI_Comm       comm;
259   MPI_Group      group, subgroup;
260   PetscInt       subgroupSize = 0;
261   PetscInt       *firstPoints;
262   PetscMPIInt    size, *subgroupRanks = NULL;
263   PetscInt       xs, xm, ys, ym, zs, zm, firstPoint, p;
264   PetscErrorCode ierr;
265   DM_DA          *dd = (DM_DA*)da->data;
266 
267   PetscFunctionBegin;
268   PetscValidHeaderSpecific(da, DM_CLASSID, 1);
269   ierr = PetscObjectGetComm((PetscObject)da,&comm);CHKERRQ(ierr);
270   ierr = DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm);CHKERRQ(ierr);
271   ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr);
272   if (dir == DMDA_Z) {
273     if (dd->dim < 3) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Z invalid for DMDA dim < 3");
274     firstPoint = zs;
275   } else if (dir == DMDA_Y) {
276     if (dd->dim == 1) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Y invalid for DMDA dim = 1");
277     firstPoint = ys;
278   } else if (dir == DMDA_X) {
279     firstPoint = xs;
280   } else SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");
281 
282   ierr = PetscMalloc2(size, PetscInt, &firstPoints, size, PetscMPIInt, &subgroupRanks);CHKERRQ(ierr);
283   ierr = MPI_Allgather(&firstPoint, 1, MPIU_INT, firstPoints, 1, MPIU_INT, comm);CHKERRQ(ierr);
284   ierr = PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",dd->dim,(int)dir);CHKERRQ(ierr);
285   for (p = 0; p < size; ++p) {
286     if (firstPoints[p] == firstPoint) {
287       subgroupRanks[subgroupSize++] = p;
288       ierr = PetscInfo1(da, "%D ", p);CHKERRQ(ierr);
289     }
290   }
291   ierr = PetscInfo(da, "\n");CHKERRQ(ierr);
292   ierr = MPI_Comm_group(comm, &group);CHKERRQ(ierr);
293   ierr = MPI_Group_incl(group, subgroupSize, subgroupRanks, &subgroup);CHKERRQ(ierr);
294   ierr = MPI_Comm_create(comm, subgroup, subcomm);CHKERRQ(ierr);
295   ierr = MPI_Group_free(&subgroup);CHKERRQ(ierr);
296   ierr = MPI_Group_free(&group);CHKERRQ(ierr);
297   ierr = PetscFree2(firstPoints, subgroupRanks);CHKERRQ(ierr);
298   PetscFunctionReturn(0);
299 }
300