xref: /petsc/src/dm/impls/da/dasub.c (revision bd412c90fba8895e9763ccee76c7dd2e09a982d7)
1 
2 /*
3   Code for manipulating distributed regular arrays in parallel.
4 */
5 
6 #include <petsc/private/dmdaimpl.h> /*I   "petscdmda.h"   I*/
7 
8 /*@
9    DMDAGetLogicalCoordinate - Returns a the i,j,k logical coordinate for the closest mesh point to a x,y,z point in the coordinates of the DMDA
10 
11    Collective on da
12 
13    Input Parameters:
14 +  da - the distributed array
15 .  x  - the first physical coordinate
16 .  y  - the second physical coordinate
17 -  z  - the third physical coordinate
18 
19    Output Parameters:
20 +  II - the first logical coordinate (-1 on processes that do not contain that point)
21 .  JJ - the second logical coordinate (-1 on processes that do not contain that point)
22 .  KK - the third logical coordinate (-1 on processes that do not contain that point)
23 .  X  - (optional) the first coordinate of the located grid point
24 .  Y  - (optional) the second coordinate of the located grid point
25 -  Z  - (optional) the third coordinate of the located grid point
26 
27    Level: advanced
28 
29    Notes:
30    All processors that share the DMDA must call this with the same coordinate value
31 
32 @*/
33 PetscErrorCode DMDAGetLogicalCoordinate(DM da, PetscScalar x, PetscScalar y, PetscScalar z, PetscInt *II, PetscInt *JJ, PetscInt *KK, PetscScalar *X, PetscScalar *Y, PetscScalar *Z)
34 {
35   Vec          coors;
36   DM           dacoors;
37   DMDACoor2d **c;
38   PetscInt     i, j, xs, xm, ys, ym;
39   PetscReal    d, D = PETSC_MAX_REAL, Dv;
40   PetscMPIInt  rank, root;
41 
42   PetscFunctionBegin;
43   PetscCheck(da->dim != 1, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get point from 1d DMDA");
44   PetscCheck(da->dim != 3, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get point from 3d DMDA");
45 
46   *II = -1;
47   *JJ = -1;
48 
49   PetscCall(DMGetCoordinateDM(da, &dacoors));
50   PetscCall(DMDAGetCorners(dacoors, &xs, &ys, NULL, &xm, &ym, NULL));
51   PetscCall(DMGetCoordinates(da, &coors));
52   PetscCall(DMDAVecGetArrayRead(dacoors, coors, &c));
53   for (j = ys; j < ys + ym; j++) {
54     for (i = xs; i < xs + xm; i++) {
55       d = PetscSqrtReal(PetscRealPart((c[j][i].x - x) * (c[j][i].x - x) + (c[j][i].y - y) * (c[j][i].y - y)));
56       if (d < D) {
57         D   = d;
58         *II = i;
59         *JJ = j;
60       }
61     }
62   }
63   PetscCall(MPIU_Allreduce(&D, &Dv, 1, MPIU_REAL, MPIU_MIN, PetscObjectComm((PetscObject)da)));
64   if (D != Dv) {
65     *II  = -1;
66     *JJ  = -1;
67     rank = 0;
68   } else {
69     *X = c[*JJ][*II].x;
70     *Y = c[*JJ][*II].y;
71     PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)da), &rank));
72     rank++;
73   }
74   PetscCall(MPIU_Allreduce(&rank, &root, 1, MPI_INT, MPI_SUM, PetscObjectComm((PetscObject)da)));
75   root--;
76   PetscCallMPI(MPI_Bcast(X, 1, MPIU_SCALAR, root, PetscObjectComm((PetscObject)da)));
77   PetscCallMPI(MPI_Bcast(Y, 1, MPIU_SCALAR, root, PetscObjectComm((PetscObject)da)));
78   PetscCall(DMDAVecRestoreArrayRead(dacoors, coors, &c));
79   PetscFunctionReturn(0);
80 }
81 
82 /*@
83    DMDAGetRay - Returns a vector on process zero that contains a row or column of the values in a DMDA vector
84 
85    Collective on DMDA
86 
87    Input Parameters:
88 +  da - the distributed array
89 .  dir - Cartesian direction, either DM_X, DM_Y, or DM_Z
90 -  gp - global grid point number in this direction
91 
92    Output Parameters:
93 +  newvec - the new vector that can hold the values (size zero on all processes except process 0)
94 -  scatter - the VecScatter that will map from the original vector to the slice
95 
96    Level: advanced
97 
98    Notes:
99    All processors that share the DMDA must call this with the same gp value
100 
101 @*/
102 PetscErrorCode DMDAGetRay(DM da, DMDirection dir, PetscInt gp, Vec *newvec, VecScatter *scatter)
103 {
104   PetscMPIInt rank;
105   DM_DA      *dd = (DM_DA *)da->data;
106   IS          is;
107   AO          ao;
108   Vec         vec;
109   PetscInt   *indices, i, j;
110 
111   PetscFunctionBegin;
112   PetscCheck(da->dim != 3, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get slice from 3d DMDA");
113   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)da), &rank));
114   PetscCall(DMDAGetAO(da, &ao));
115   if (rank == 0) {
116     if (da->dim == 1) {
117       if (dir == DM_X) {
118         PetscCall(PetscMalloc1(dd->w, &indices));
119         indices[0] = dd->w * gp;
120         for (i = 1; i < dd->w; ++i) indices[i] = indices[i - 1] + 1;
121         PetscCall(AOApplicationToPetsc(ao, dd->w, indices));
122         PetscCall(VecCreate(PETSC_COMM_SELF, newvec));
123         PetscCall(VecSetBlockSize(*newvec, dd->w));
124         PetscCall(VecSetSizes(*newvec, dd->w, PETSC_DETERMINE));
125         PetscCall(VecSetType(*newvec, VECSEQ));
126         PetscCall(ISCreateGeneral(PETSC_COMM_SELF, dd->w, indices, PETSC_OWN_POINTER, &is));
127       } else {
128         PetscCheck(dir != DM_Y, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get Y slice from 1d DMDA");
129         SETERRQ(PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDirection");
130       }
131     } else {
132       if (dir == DM_Y) {
133         PetscCall(PetscMalloc1(dd->w * dd->M, &indices));
134         indices[0] = gp * dd->M * dd->w;
135         for (i = 1; i < dd->M * dd->w; i++) indices[i] = indices[i - 1] + 1;
136 
137         PetscCall(AOApplicationToPetsc(ao, dd->M * dd->w, indices));
138         PetscCall(VecCreate(PETSC_COMM_SELF, newvec));
139         PetscCall(VecSetBlockSize(*newvec, dd->w));
140         PetscCall(VecSetSizes(*newvec, dd->M * dd->w, PETSC_DETERMINE));
141         PetscCall(VecSetType(*newvec, VECSEQ));
142         PetscCall(ISCreateGeneral(PETSC_COMM_SELF, dd->w * dd->M, indices, PETSC_OWN_POINTER, &is));
143       } else if (dir == DM_X) {
144         PetscCall(PetscMalloc1(dd->w * dd->N, &indices));
145         indices[0] = dd->w * gp;
146         for (j = 1; j < dd->w; j++) indices[j] = indices[j - 1] + 1;
147         for (i = 1; i < dd->N; i++) {
148           indices[i * dd->w] = indices[i * dd->w - 1] + dd->w * dd->M - dd->w + 1;
149           for (j = 1; j < dd->w; j++) indices[i * dd->w + j] = indices[i * dd->w + j - 1] + 1;
150         }
151         PetscCall(AOApplicationToPetsc(ao, dd->w * dd->N, indices));
152         PetscCall(VecCreate(PETSC_COMM_SELF, newvec));
153         PetscCall(VecSetBlockSize(*newvec, dd->w));
154         PetscCall(VecSetSizes(*newvec, dd->N * dd->w, PETSC_DETERMINE));
155         PetscCall(VecSetType(*newvec, VECSEQ));
156         PetscCall(ISCreateGeneral(PETSC_COMM_SELF, dd->w * dd->N, indices, PETSC_OWN_POINTER, &is));
157       } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDirection");
158     }
159   } else {
160     PetscCall(VecCreateSeq(PETSC_COMM_SELF, 0, newvec));
161     PetscCall(ISCreateGeneral(PETSC_COMM_SELF, 0, NULL, PETSC_COPY_VALUES, &is));
162   }
163   PetscCall(DMGetGlobalVector(da, &vec));
164   PetscCall(VecScatterCreate(vec, is, *newvec, NULL, scatter));
165   PetscCall(DMRestoreGlobalVector(da, &vec));
166   PetscCall(ISDestroy(&is));
167   PetscFunctionReturn(0);
168 }
169 
170 /*@C
171    DMDAGetProcessorSubset - Returns a communicator consisting only of the
172    processors in a DMDA that own a particular global x, y, or z grid point
173    (corresponding to a logical plane in a 3D grid or a line in a 2D grid).
174 
175    Collective on da
176 
177    Input Parameters:
178 +  da - the distributed array
179 .  dir - Cartesian direction, either DM_X, DM_Y, or DM_Z
180 -  gp - global grid point number in this direction
181 
182    Output Parameter:
183 .  comm - new communicator
184 
185    Level: advanced
186 
187    Notes:
188    All processors that share the DMDA must call this with the same gp value
189 
190    After use, comm should be freed with MPI_Comm_free()
191 
192    This routine is particularly useful to compute boundary conditions
193    or other application-specific calculations that require manipulating
194    sets of data throughout a logical plane of grid points.
195 
196    Not supported from Fortran
197 
198 @*/
199 PetscErrorCode DMDAGetProcessorSubset(DM da, DMDirection dir, PetscInt gp, MPI_Comm *comm)
200 {
201   MPI_Group   group, subgroup;
202   PetscInt    i, ict, flag, *owners, xs, xm, ys, ym, zs, zm;
203   PetscMPIInt size, *ranks = NULL;
204   DM_DA      *dd = (DM_DA *)da->data;
205 
206   PetscFunctionBegin;
207   PetscValidHeaderSpecificType(da, DM_CLASSID, 1, DMDA);
208   flag = 0;
209   PetscCall(DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm));
210   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)da), &size));
211   if (dir == DM_Z) {
212     PetscCheck(da->dim >= 3, PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "DM_Z invalid for DMDA dim < 3");
213     PetscCheck(gp >= 0 && gp <= dd->P, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "invalid grid point");
214     if (gp >= zs && gp < zs + zm) flag = 1;
215   } else if (dir == DM_Y) {
216     PetscCheck(da->dim != 1, PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "DM_Y invalid for DMDA dim = 1");
217     PetscCheck(gp >= 0 && gp <= dd->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "invalid grid point");
218     if (gp >= ys && gp < ys + ym) flag = 1;
219   } else if (dir == DM_X) {
220     PetscCheck(gp >= 0 && gp <= dd->M, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "invalid grid point");
221     if (gp >= xs && gp < xs + xm) flag = 1;
222   } else SETERRQ(PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "Invalid direction");
223 
224   PetscCall(PetscMalloc2(size, &owners, size, &ranks));
225   PetscCallMPI(MPI_Allgather(&flag, 1, MPIU_INT, owners, 1, MPIU_INT, PetscObjectComm((PetscObject)da)));
226   ict = 0;
227   PetscCall(PetscInfo(da, "DMDAGetProcessorSubset: dim=%" PetscInt_FMT ", direction=%d, procs: ", da->dim, (int)dir));
228   for (i = 0; i < size; i++) {
229     if (owners[i]) {
230       ranks[ict] = i;
231       ict++;
232       PetscCall(PetscInfo(da, "%" PetscInt_FMT " ", i));
233     }
234   }
235   PetscCall(PetscInfo(da, "\n"));
236   PetscCallMPI(MPI_Comm_group(PetscObjectComm((PetscObject)da), &group));
237   PetscCallMPI(MPI_Group_incl(group, ict, ranks, &subgroup));
238   PetscCallMPI(MPI_Comm_create(PetscObjectComm((PetscObject)da), subgroup, comm));
239   PetscCallMPI(MPI_Group_free(&subgroup));
240   PetscCallMPI(MPI_Group_free(&group));
241   PetscCall(PetscFree2(owners, ranks));
242   PetscFunctionReturn(0);
243 }
244 
245 /*@C
246    DMDAGetProcessorSubsets - Returns communicators consisting only of the
247    processors in a DMDA adjacent in a particular dimension,
248    corresponding to a logical plane in a 3D grid or a line in a 2D grid.
249 
250    Collective on da
251 
252    Input Parameters:
253 +  da - the distributed array
254 -  dir - Cartesian direction, either DM_X, DM_Y, or DM_Z
255 
256    Output Parameter:
257 .  subcomm - new communicator
258 
259    Level: advanced
260 
261    Notes:
262    This routine is useful for distributing one-dimensional data in a tensor product grid.
263 
264    After use, comm should be freed with MPI_Comm_free()
265 
266    Not supported from Fortran
267 
268 @*/
269 PetscErrorCode DMDAGetProcessorSubsets(DM da, DMDirection dir, MPI_Comm *subcomm)
270 {
271   MPI_Comm    comm;
272   MPI_Group   group, subgroup;
273   PetscInt    subgroupSize = 0;
274   PetscInt   *firstPoints;
275   PetscMPIInt size, *subgroupRanks = NULL;
276   PetscInt    xs, xm, ys, ym, zs, zm, firstPoint, p;
277 
278   PetscFunctionBegin;
279   PetscValidHeaderSpecificType(da, DM_CLASSID, 1, DMDA);
280   PetscCall(PetscObjectGetComm((PetscObject)da, &comm));
281   PetscCall(DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm));
282   PetscCallMPI(MPI_Comm_size(comm, &size));
283   if (dir == DM_Z) {
284     PetscCheck(da->dim >= 3, comm, PETSC_ERR_ARG_OUTOFRANGE, "DM_Z invalid for DMDA dim < 3");
285     firstPoint = zs;
286   } else if (dir == DM_Y) {
287     PetscCheck(da->dim != 1, comm, PETSC_ERR_ARG_OUTOFRANGE, "DM_Y invalid for DMDA dim = 1");
288     firstPoint = ys;
289   } else if (dir == DM_X) {
290     firstPoint = xs;
291   } else SETERRQ(comm, PETSC_ERR_ARG_OUTOFRANGE, "Invalid direction");
292 
293   PetscCall(PetscMalloc2(size, &firstPoints, size, &subgroupRanks));
294   PetscCallMPI(MPI_Allgather(&firstPoint, 1, MPIU_INT, firstPoints, 1, MPIU_INT, comm));
295   PetscCall(PetscInfo(da, "DMDAGetProcessorSubset: dim=%" PetscInt_FMT ", direction=%d, procs: ", da->dim, (int)dir));
296   for (p = 0; p < size; ++p) {
297     if (firstPoints[p] == firstPoint) {
298       subgroupRanks[subgroupSize++] = p;
299       PetscCall(PetscInfo(da, "%" PetscInt_FMT " ", p));
300     }
301   }
302   PetscCall(PetscInfo(da, "\n"));
303   PetscCallMPI(MPI_Comm_group(comm, &group));
304   PetscCallMPI(MPI_Group_incl(group, subgroupSize, subgroupRanks, &subgroup));
305   PetscCallMPI(MPI_Comm_create(comm, subgroup, subcomm));
306   PetscCallMPI(MPI_Group_free(&subgroup));
307   PetscCallMPI(MPI_Group_free(&group));
308   PetscCall(PetscFree2(firstPoints, subgroupRanks));
309   PetscFunctionReturn(0);
310 }
311