1 /*
2 Code for manipulating distributed regular arrays in parallel.
3 */
4
5 #include <petsc/private/dmdaimpl.h> /*I "petscdmda.h" I*/
6
7 /*@
8 DMDAGetLogicalCoordinate - Returns a the i,j,k logical coordinate for the closest mesh point to a `x`, `y`, `z` point in the coordinates of the `DMDA`
9
10 Collective
11
12 Input Parameters:
13 + da - the `DMDA`
14 . x - the first physical coordinate
15 . y - the second physical coordinate
16 - z - the third physical coordinate
17
18 Output Parameters:
19 + II - the first logical coordinate (-1 on processes that do not contain that point)
20 . JJ - the second logical coordinate (-1 on processes that do not contain that point)
21 . KK - the third logical coordinate (-1 on processes that do not contain that point)
22 . X - (optional) the first coordinate of the located grid point
23 . Y - (optional) the second coordinate of the located grid point
24 - Z - (optional) the third coordinate of the located grid point
25
26 Level: advanced
27
28 Note:
29 All processors that share the `DMDA` must call this with the same coordinate value
30
31 .seealso: [](sec_struct), `DM`, `DMDA`
32 @*/
DMDAGetLogicalCoordinate(DM da,PetscScalar x,PetscScalar y,PetscScalar z,PetscInt * II,PetscInt * JJ,PetscInt * KK,PetscScalar * X,PetscScalar * Y,PetscScalar * Z)33 PetscErrorCode DMDAGetLogicalCoordinate(DM da, PetscScalar x, PetscScalar y, PetscScalar z, PetscInt *II, PetscInt *JJ, PetscInt *KK, PetscScalar *X, PetscScalar *Y, PetscScalar *Z)
34 {
35 Vec coors;
36 DM dacoors;
37 DMDACoor2d **c;
38 PetscInt i, j, xs, xm, ys, ym;
39 PetscReal d, D = PETSC_MAX_REAL, Dv;
40 PetscMPIInt rank, root;
41
42 PetscFunctionBegin;
43 PetscCheck(da->dim != 1, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get point from 1d DMDA");
44 PetscCheck(da->dim != 3, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get point from 3d DMDA");
45
46 *II = -1;
47 *JJ = -1;
48
49 PetscCall(DMGetCoordinateDM(da, &dacoors));
50 PetscCall(DMDAGetCorners(dacoors, &xs, &ys, NULL, &xm, &ym, NULL));
51 PetscCall(DMGetCoordinates(da, &coors));
52 PetscCall(DMDAVecGetArrayRead(dacoors, coors, &c));
53 for (j = ys; j < ys + ym; j++) {
54 for (i = xs; i < xs + xm; i++) {
55 d = PetscSqrtReal(PetscRealPart((c[j][i].x - x) * (c[j][i].x - x) + (c[j][i].y - y) * (c[j][i].y - y)));
56 if (d < D) {
57 D = d;
58 *II = i;
59 *JJ = j;
60 }
61 }
62 }
63 PetscCallMPI(MPIU_Allreduce(&D, &Dv, 1, MPIU_REAL, MPIU_MIN, PetscObjectComm((PetscObject)da)));
64 if (D != Dv) {
65 *II = -1;
66 *JJ = -1;
67 rank = 0;
68 } else {
69 *X = c[*JJ][*II].x;
70 *Y = c[*JJ][*II].y;
71 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)da), &rank));
72 rank++;
73 }
74 PetscCallMPI(MPIU_Allreduce(&rank, &root, 1, MPI_INT, MPI_SUM, PetscObjectComm((PetscObject)da)));
75 root--;
76 PetscCallMPI(MPI_Bcast(X, 1, MPIU_SCALAR, root, PetscObjectComm((PetscObject)da)));
77 PetscCallMPI(MPI_Bcast(Y, 1, MPIU_SCALAR, root, PetscObjectComm((PetscObject)da)));
78 PetscCall(DMDAVecRestoreArrayRead(dacoors, coors, &c));
79 PetscFunctionReturn(PETSC_SUCCESS);
80 }
81
82 /*@
83 DMDAGetRay - Returns a vector on process zero that contains a row or column of the values in a `DMDA` vector
84
85 Collective
86
87 Input Parameters:
88 + da - the `DMDA`
89 . dir - Cartesian direction, either `DM_X`, `DM_Y`, or `DM_Z`
90 - gp - global grid point number in this direction
91
92 Output Parameters:
93 + newvec - the new vector that can hold the values (size zero on all processes except MPI rank 0)
94 - scatter - the `VecScatter` that will map from the original vector to the ray
95
96 Level: advanced
97
98 Note:
99 All processors that share the `DMDA` must call this with the same `gp` value
100
101 .seealso: [](sec_struct), `DM`, `DMDA`, `DMDirection`, `Vec`, `VecScatter`
102 @*/
DMDAGetRay(DM da,DMDirection dir,PetscInt gp,Vec * newvec,VecScatter * scatter)103 PetscErrorCode DMDAGetRay(DM da, DMDirection dir, PetscInt gp, Vec *newvec, VecScatter *scatter)
104 {
105 PetscMPIInt rank;
106 DM_DA *dd = (DM_DA *)da->data;
107 IS is;
108 AO ao;
109 Vec vec;
110 PetscInt *indices, i, j;
111
112 PetscFunctionBegin;
113 PetscCheck(da->dim != 3, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get slice from 3d DMDA");
114 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)da), &rank));
115 PetscCall(DMDAGetAO(da, &ao));
116 if (rank == 0) {
117 if (da->dim == 1) {
118 if (dir == DM_X) {
119 PetscCall(PetscMalloc1(dd->w, &indices));
120 indices[0] = dd->w * gp;
121 for (i = 1; i < dd->w; ++i) indices[i] = indices[i - 1] + 1;
122 PetscCall(AOApplicationToPetsc(ao, dd->w, indices));
123 PetscCall(VecCreate(PETSC_COMM_SELF, newvec));
124 PetscCall(VecSetBlockSize(*newvec, dd->w));
125 PetscCall(VecSetSizes(*newvec, dd->w, PETSC_DETERMINE));
126 PetscCall(VecSetType(*newvec, VECSEQ));
127 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, dd->w, indices, PETSC_OWN_POINTER, &is));
128 } else {
129 PetscCheck(dir != DM_Y, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get Y slice from 1d DMDA");
130 SETERRQ(PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDirection");
131 }
132 } else {
133 if (dir == DM_Y) {
134 PetscCall(PetscMalloc1(dd->w * dd->M, &indices));
135 indices[0] = gp * dd->M * dd->w;
136 for (i = 1; i < dd->M * dd->w; i++) indices[i] = indices[i - 1] + 1;
137
138 PetscCall(AOApplicationToPetsc(ao, dd->M * dd->w, indices));
139 PetscCall(VecCreate(PETSC_COMM_SELF, newvec));
140 PetscCall(VecSetBlockSize(*newvec, dd->w));
141 PetscCall(VecSetSizes(*newvec, dd->M * dd->w, PETSC_DETERMINE));
142 PetscCall(VecSetType(*newvec, VECSEQ));
143 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, dd->w * dd->M, indices, PETSC_OWN_POINTER, &is));
144 } else if (dir == DM_X) {
145 PetscCall(PetscMalloc1(dd->w * dd->N, &indices));
146 indices[0] = dd->w * gp;
147 for (j = 1; j < dd->w; j++) indices[j] = indices[j - 1] + 1;
148 for (i = 1; i < dd->N; i++) {
149 indices[i * dd->w] = indices[i * dd->w - 1] + dd->w * dd->M - dd->w + 1;
150 for (j = 1; j < dd->w; j++) indices[i * dd->w + j] = indices[i * dd->w + j - 1] + 1;
151 }
152 PetscCall(AOApplicationToPetsc(ao, dd->w * dd->N, indices));
153 PetscCall(VecCreate(PETSC_COMM_SELF, newvec));
154 PetscCall(VecSetBlockSize(*newvec, dd->w));
155 PetscCall(VecSetSizes(*newvec, dd->N * dd->w, PETSC_DETERMINE));
156 PetscCall(VecSetType(*newvec, VECSEQ));
157 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, dd->w * dd->N, indices, PETSC_OWN_POINTER, &is));
158 } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDirection");
159 }
160 } else {
161 PetscCall(VecCreateSeq(PETSC_COMM_SELF, 0, newvec));
162 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, 0, NULL, PETSC_COPY_VALUES, &is));
163 }
164 PetscCall(DMGetGlobalVector(da, &vec));
165 PetscCall(VecScatterCreate(vec, is, *newvec, NULL, scatter));
166 PetscCall(DMRestoreGlobalVector(da, &vec));
167 PetscCall(ISDestroy(&is));
168 PetscFunctionReturn(PETSC_SUCCESS);
169 }
170
171 /*@C
172 DMDAGetProcessorSubset - Returns a communicator consisting only of the
173 processors in a `DMDA` that own a particular global x, y, or z grid point
174 (corresponding to a logical plane in a 3D grid or a line in a 2D grid).
175
176 Collective; No Fortran Support
177
178 Input Parameters:
179 + da - the `DMDA`
180 . dir - Cartesian direction, either `DM_X`, `DM_Y`, or `DM_Z`
181 - gp - global grid point number in this direction
182
183 Output Parameter:
184 . comm - new communicator
185
186 Level: advanced
187
188 Notes:
189 All processors that share the `DMDA` must call this with the same `gp` value
190
191 After use, `comm` should be freed with `MPI_Comm_free()`
192
193 This routine is particularly useful to compute boundary conditions
194 or other application-specific calculations that require manipulating
195 sets of data throughout a logical plane of grid points.
196
197 .seealso: [](sec_struct), `DM`, `DMDA`, `DMDirection`, `DM_X`, `DM_Y`, `DM_Z`, `DMDAGetProcessorSubsets()`
198 @*/
DMDAGetProcessorSubset(DM da,DMDirection dir,PetscInt gp,MPI_Comm * comm)199 PetscErrorCode DMDAGetProcessorSubset(DM da, DMDirection dir, PetscInt gp, MPI_Comm *comm)
200 {
201 MPI_Group group, subgroup;
202 PetscMPIInt ict;
203 PetscInt flag, *owners, xs, xm, ys, ym, zs, zm;
204 PetscMPIInt size, *ranks = NULL;
205 DM_DA *dd = (DM_DA *)da->data;
206
207 PetscFunctionBegin;
208 PetscValidHeaderSpecificType(da, DM_CLASSID, 1, DMDA);
209 flag = 0;
210 PetscCall(DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm));
211 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)da), &size));
212 if (dir == DM_Z) {
213 PetscCheck(da->dim >= 3, PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "DM_Z invalid for DMDA dim < 3");
214 PetscCheck(gp >= 0 && gp <= dd->P, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "invalid grid point");
215 if (gp >= zs && gp < zs + zm) flag = 1;
216 } else if (dir == DM_Y) {
217 PetscCheck(da->dim != 1, PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "DM_Y invalid for DMDA dim = 1");
218 PetscCheck(gp >= 0 && gp <= dd->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "invalid grid point");
219 if (gp >= ys && gp < ys + ym) flag = 1;
220 } else if (dir == DM_X) {
221 PetscCheck(gp >= 0 && gp <= dd->M, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "invalid grid point");
222 if (gp >= xs && gp < xs + xm) flag = 1;
223 } else SETERRQ(PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "Invalid direction");
224
225 PetscCall(PetscMalloc2(size, &owners, size, &ranks));
226 PetscCallMPI(MPI_Allgather(&flag, 1, MPIU_INT, owners, 1, MPIU_INT, PetscObjectComm((PetscObject)da)));
227 ict = 0;
228 PetscCall(PetscInfo(da, "DMDAGetProcessorSubset: dim=%" PetscInt_FMT ", direction=%d, procs: ", da->dim, (int)dir)); /* checkbadSource \n */
229 for (PetscMPIInt i = 0; i < size; i++) {
230 if (owners[i]) {
231 ranks[ict] = i;
232 ict++;
233 PetscCall(PetscInfo(da, "%d", i)); /* checkbadSource \n */
234 }
235 }
236 PetscCall(PetscInfo(da, "\n"));
237 PetscCallMPI(MPI_Comm_group(PetscObjectComm((PetscObject)da), &group));
238 PetscCallMPI(MPI_Group_incl(group, ict, ranks, &subgroup));
239 PetscCallMPI(MPI_Comm_create(PetscObjectComm((PetscObject)da), subgroup, comm));
240 PetscCallMPI(MPI_Group_free(&subgroup));
241 PetscCallMPI(MPI_Group_free(&group));
242 PetscCall(PetscFree2(owners, ranks));
243 PetscFunctionReturn(PETSC_SUCCESS);
244 }
245
246 /*@C
247 DMDAGetProcessorSubsets - Returns communicators consisting only of the
248 processors in a `DMDA` adjacent in a particular dimension,
249 corresponding to a logical plane in a 3D grid or a line in a 2D grid.
250
251 Collective; No Fortran Support
252
253 Input Parameters:
254 + da - the `DMDA`
255 - dir - Cartesian direction, either `DM_X`, `DM_Y`, or `DM_Z`
256
257 Output Parameter:
258 . subcomm - new communicator
259
260 Level: advanced
261
262 Notes:
263 This routine is useful for distributing one-dimensional data in a tensor product grid.
264
265 After use, `comm` should be freed with `MPI_Comm_free()`
266
267 .seealso: [](sec_struct), `DM`, `DMDA`, `DMDirection`, `DMDAGetProcessorSubset()`, `DM_X`, `DM_Y`, `DM_Z`
268 @*/
DMDAGetProcessorSubsets(DM da,DMDirection dir,MPI_Comm * subcomm)269 PetscErrorCode DMDAGetProcessorSubsets(DM da, DMDirection dir, MPI_Comm *subcomm)
270 {
271 MPI_Comm comm;
272 MPI_Group group, subgroup;
273 PetscMPIInt subgroupSize = 0;
274 PetscInt *firstPoints;
275 PetscMPIInt size, *subgroupRanks = NULL;
276 PetscInt xs, xm, ys, ym, zs, zm, firstPoint;
277
278 PetscFunctionBegin;
279 PetscValidHeaderSpecificType(da, DM_CLASSID, 1, DMDA);
280 PetscCall(PetscObjectGetComm((PetscObject)da, &comm));
281 PetscCall(DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm));
282 PetscCallMPI(MPI_Comm_size(comm, &size));
283 if (dir == DM_Z) {
284 PetscCheck(da->dim >= 3, comm, PETSC_ERR_ARG_OUTOFRANGE, "DM_Z invalid for DMDA dim < 3");
285 firstPoint = zs;
286 } else if (dir == DM_Y) {
287 PetscCheck(da->dim != 1, comm, PETSC_ERR_ARG_OUTOFRANGE, "DM_Y invalid for DMDA dim = 1");
288 firstPoint = ys;
289 } else if (dir == DM_X) {
290 firstPoint = xs;
291 } else SETERRQ(comm, PETSC_ERR_ARG_OUTOFRANGE, "Invalid direction");
292
293 PetscCall(PetscMalloc2(size, &firstPoints, size, &subgroupRanks));
294 PetscCallMPI(MPI_Allgather(&firstPoint, 1, MPIU_INT, firstPoints, 1, MPIU_INT, comm));
295 PetscCall(PetscInfo(da, "DMDAGetProcessorSubset: dim=%" PetscInt_FMT ", direction=%d, procs: ", da->dim, (int)dir)); /* checkbadSource \n */
296 for (PetscMPIInt p = 0; p < size; ++p) {
297 if (firstPoints[p] == firstPoint) {
298 subgroupRanks[subgroupSize++] = p;
299 PetscCall(PetscInfo(da, "%d", p)); /* checkbadSource \n */
300 }
301 }
302 PetscCall(PetscInfo(da, "\n"));
303 PetscCallMPI(MPI_Comm_group(comm, &group));
304 PetscCallMPI(MPI_Group_incl(group, subgroupSize, subgroupRanks, &subgroup));
305 PetscCallMPI(MPI_Comm_create(comm, subgroup, subcomm));
306 PetscCallMPI(MPI_Group_free(&subgroup));
307 PetscCallMPI(MPI_Group_free(&group));
308 PetscCall(PetscFree2(firstPoints, subgroupRanks));
309 PetscFunctionReturn(PETSC_SUCCESS);
310 }
311