1 /* 2 Code for manipulating distributed regular arrays in parallel. 3 */ 4 5 #include <petsc/private/dmdaimpl.h> /*I "petscdmda.h" I*/ 6 7 /*@ 8 DMDAGetLogicalCoordinate - Returns a the i,j,k logical coordinate for the closest mesh point to a x,y,z point in the coordinates of the `DMDA` 9 10 Collective on da 11 12 Input Parameters: 13 + da - the distributed array 14 . x - the first physical coordinate 15 . y - the second physical coordinate 16 - z - the third physical coordinate 17 18 Output Parameters: 19 + II - the first logical coordinate (-1 on processes that do not contain that point) 20 . JJ - the second logical coordinate (-1 on processes that do not contain that point) 21 . KK - the third logical coordinate (-1 on processes that do not contain that point) 22 . X - (optional) the first coordinate of the located grid point 23 . Y - (optional) the second coordinate of the located grid point 24 - Z - (optional) the third coordinate of the located grid point 25 26 Level: advanced 27 28 Note: 29 All processors that share the `DMDA` must call this with the same coordinate value 30 31 .seealso: `DM`, `DMDA` 32 @*/ 33 PetscErrorCode DMDAGetLogicalCoordinate(DM da, PetscScalar x, PetscScalar y, PetscScalar z, PetscInt *II, PetscInt *JJ, PetscInt *KK, PetscScalar *X, PetscScalar *Y, PetscScalar *Z) 34 { 35 Vec coors; 36 DM dacoors; 37 DMDACoor2d **c; 38 PetscInt i, j, xs, xm, ys, ym; 39 PetscReal d, D = PETSC_MAX_REAL, Dv; 40 PetscMPIInt rank, root; 41 42 PetscFunctionBegin; 43 PetscCheck(da->dim != 1, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get point from 1d DMDA"); 44 PetscCheck(da->dim != 3, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get point from 3d DMDA"); 45 46 *II = -1; 47 *JJ = -1; 48 49 PetscCall(DMGetCoordinateDM(da, &dacoors)); 50 PetscCall(DMDAGetCorners(dacoors, &xs, &ys, NULL, &xm, &ym, NULL)); 51 PetscCall(DMGetCoordinates(da, &coors)); 52 PetscCall(DMDAVecGetArrayRead(dacoors, coors, &c)); 53 for (j = ys; j < ys + ym; j++) { 54 for (i = xs; i < xs + xm; i++) { 55 d = PetscSqrtReal(PetscRealPart((c[j][i].x - x) * (c[j][i].x - x) + (c[j][i].y - y) * (c[j][i].y - y))); 56 if (d < D) { 57 D = d; 58 *II = i; 59 *JJ = j; 60 } 61 } 62 } 63 PetscCall(MPIU_Allreduce(&D, &Dv, 1, MPIU_REAL, MPIU_MIN, PetscObjectComm((PetscObject)da))); 64 if (D != Dv) { 65 *II = -1; 66 *JJ = -1; 67 rank = 0; 68 } else { 69 *X = c[*JJ][*II].x; 70 *Y = c[*JJ][*II].y; 71 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)da), &rank)); 72 rank++; 73 } 74 PetscCall(MPIU_Allreduce(&rank, &root, 1, MPI_INT, MPI_SUM, PetscObjectComm((PetscObject)da))); 75 root--; 76 PetscCallMPI(MPI_Bcast(X, 1, MPIU_SCALAR, root, PetscObjectComm((PetscObject)da))); 77 PetscCallMPI(MPI_Bcast(Y, 1, MPIU_SCALAR, root, PetscObjectComm((PetscObject)da))); 78 PetscCall(DMDAVecRestoreArrayRead(dacoors, coors, &c)); 79 PetscFunctionReturn(PETSC_SUCCESS); 80 } 81 82 /*@ 83 DMDAGetRay - Returns a vector on process zero that contains a row or column of the values in a `DMDA` vector 84 85 Collective on da 86 87 Input Parameters: 88 + da - the distributed array 89 . dir - Cartesian direction, either `DM_X`, `DM_Y`, or `DM_Z` 90 - gp - global grid point number in this direction 91 92 Output Parameters: 93 + newvec - the new vector that can hold the values (size zero on all processes except process 0) 94 - scatter - the `VecScatter` that will map from the original vector to the slice 95 96 Level: advanced 97 98 Note: 99 All processors that share the `DMDA` must call this with the same gp value 100 101 .seealso: `DM`, `DMDA`, `DMDirection`, `Vec`, `VecScatter` 102 @*/ 103 PetscErrorCode DMDAGetRay(DM da, DMDirection dir, PetscInt gp, Vec *newvec, VecScatter *scatter) 104 { 105 PetscMPIInt rank; 106 DM_DA *dd = (DM_DA *)da->data; 107 IS is; 108 AO ao; 109 Vec vec; 110 PetscInt *indices, i, j; 111 112 PetscFunctionBegin; 113 PetscCheck(da->dim != 3, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get slice from 3d DMDA"); 114 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)da), &rank)); 115 PetscCall(DMDAGetAO(da, &ao)); 116 if (rank == 0) { 117 if (da->dim == 1) { 118 if (dir == DM_X) { 119 PetscCall(PetscMalloc1(dd->w, &indices)); 120 indices[0] = dd->w * gp; 121 for (i = 1; i < dd->w; ++i) indices[i] = indices[i - 1] + 1; 122 PetscCall(AOApplicationToPetsc(ao, dd->w, indices)); 123 PetscCall(VecCreate(PETSC_COMM_SELF, newvec)); 124 PetscCall(VecSetBlockSize(*newvec, dd->w)); 125 PetscCall(VecSetSizes(*newvec, dd->w, PETSC_DETERMINE)); 126 PetscCall(VecSetType(*newvec, VECSEQ)); 127 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, dd->w, indices, PETSC_OWN_POINTER, &is)); 128 } else { 129 PetscCheck(dir != DM_Y, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get Y slice from 1d DMDA"); 130 SETERRQ(PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDirection"); 131 } 132 } else { 133 if (dir == DM_Y) { 134 PetscCall(PetscMalloc1(dd->w * dd->M, &indices)); 135 indices[0] = gp * dd->M * dd->w; 136 for (i = 1; i < dd->M * dd->w; i++) indices[i] = indices[i - 1] + 1; 137 138 PetscCall(AOApplicationToPetsc(ao, dd->M * dd->w, indices)); 139 PetscCall(VecCreate(PETSC_COMM_SELF, newvec)); 140 PetscCall(VecSetBlockSize(*newvec, dd->w)); 141 PetscCall(VecSetSizes(*newvec, dd->M * dd->w, PETSC_DETERMINE)); 142 PetscCall(VecSetType(*newvec, VECSEQ)); 143 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, dd->w * dd->M, indices, PETSC_OWN_POINTER, &is)); 144 } else if (dir == DM_X) { 145 PetscCall(PetscMalloc1(dd->w * dd->N, &indices)); 146 indices[0] = dd->w * gp; 147 for (j = 1; j < dd->w; j++) indices[j] = indices[j - 1] + 1; 148 for (i = 1; i < dd->N; i++) { 149 indices[i * dd->w] = indices[i * dd->w - 1] + dd->w * dd->M - dd->w + 1; 150 for (j = 1; j < dd->w; j++) indices[i * dd->w + j] = indices[i * dd->w + j - 1] + 1; 151 } 152 PetscCall(AOApplicationToPetsc(ao, dd->w * dd->N, indices)); 153 PetscCall(VecCreate(PETSC_COMM_SELF, newvec)); 154 PetscCall(VecSetBlockSize(*newvec, dd->w)); 155 PetscCall(VecSetSizes(*newvec, dd->N * dd->w, PETSC_DETERMINE)); 156 PetscCall(VecSetType(*newvec, VECSEQ)); 157 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, dd->w * dd->N, indices, PETSC_OWN_POINTER, &is)); 158 } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDirection"); 159 } 160 } else { 161 PetscCall(VecCreateSeq(PETSC_COMM_SELF, 0, newvec)); 162 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, 0, NULL, PETSC_COPY_VALUES, &is)); 163 } 164 PetscCall(DMGetGlobalVector(da, &vec)); 165 PetscCall(VecScatterCreate(vec, is, *newvec, NULL, scatter)); 166 PetscCall(DMRestoreGlobalVector(da, &vec)); 167 PetscCall(ISDestroy(&is)); 168 PetscFunctionReturn(PETSC_SUCCESS); 169 } 170 171 /*@C 172 DMDAGetProcessorSubset - Returns a communicator consisting only of the 173 processors in a `DMDA` that own a particular global x, y, or z grid point 174 (corresponding to a logical plane in a 3D grid or a line in a 2D grid). 175 176 Collective on da 177 178 Input Parameters: 179 + da - the distributed array 180 . dir - Cartesian direction, either `DM_X`, `DM_Y`, or `DM_Z` 181 - gp - global grid point number in this direction 182 183 Output Parameter: 184 . comm - new communicator 185 186 Level: advanced 187 188 Notes: 189 All processors that share the `DMDA` must call this with the same gp value 190 191 After use, comm should be freed with `MPI_Comm_free()` 192 193 This routine is particularly useful to compute boundary conditions 194 or other application-specific calculations that require manipulating 195 sets of data throughout a logical plane of grid points. 196 197 Fortran Note: 198 Not supported from Fortran 199 200 .seealso: `DM`, `DMDA`, `DMDirection` 201 @*/ 202 PetscErrorCode DMDAGetProcessorSubset(DM da, DMDirection dir, PetscInt gp, MPI_Comm *comm) 203 { 204 MPI_Group group, subgroup; 205 PetscInt i, ict, flag, *owners, xs, xm, ys, ym, zs, zm; 206 PetscMPIInt size, *ranks = NULL; 207 DM_DA *dd = (DM_DA *)da->data; 208 209 PetscFunctionBegin; 210 PetscValidHeaderSpecificType(da, DM_CLASSID, 1, DMDA); 211 flag = 0; 212 PetscCall(DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm)); 213 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)da), &size)); 214 if (dir == DM_Z) { 215 PetscCheck(da->dim >= 3, PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "DM_Z invalid for DMDA dim < 3"); 216 PetscCheck(gp >= 0 && gp <= dd->P, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "invalid grid point"); 217 if (gp >= zs && gp < zs + zm) flag = 1; 218 } else if (dir == DM_Y) { 219 PetscCheck(da->dim != 1, PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "DM_Y invalid for DMDA dim = 1"); 220 PetscCheck(gp >= 0 && gp <= dd->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "invalid grid point"); 221 if (gp >= ys && gp < ys + ym) flag = 1; 222 } else if (dir == DM_X) { 223 PetscCheck(gp >= 0 && gp <= dd->M, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "invalid grid point"); 224 if (gp >= xs && gp < xs + xm) flag = 1; 225 } else SETERRQ(PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "Invalid direction"); 226 227 PetscCall(PetscMalloc2(size, &owners, size, &ranks)); 228 PetscCallMPI(MPI_Allgather(&flag, 1, MPIU_INT, owners, 1, MPIU_INT, PetscObjectComm((PetscObject)da))); 229 ict = 0; 230 PetscCall(PetscInfo(da, "DMDAGetProcessorSubset: dim=%" PetscInt_FMT ", direction=%d, procs: ", da->dim, (int)dir)); 231 for (i = 0; i < size; i++) { 232 if (owners[i]) { 233 ranks[ict] = i; 234 ict++; 235 PetscCall(PetscInfo(da, "%" PetscInt_FMT " ", i)); 236 } 237 } 238 PetscCall(PetscInfo(da, "\n")); 239 PetscCallMPI(MPI_Comm_group(PetscObjectComm((PetscObject)da), &group)); 240 PetscCallMPI(MPI_Group_incl(group, ict, ranks, &subgroup)); 241 PetscCallMPI(MPI_Comm_create(PetscObjectComm((PetscObject)da), subgroup, comm)); 242 PetscCallMPI(MPI_Group_free(&subgroup)); 243 PetscCallMPI(MPI_Group_free(&group)); 244 PetscCall(PetscFree2(owners, ranks)); 245 PetscFunctionReturn(PETSC_SUCCESS); 246 } 247 248 /*@C 249 DMDAGetProcessorSubsets - Returns communicators consisting only of the 250 processors in a `DMDA` adjacent in a particular dimension, 251 corresponding to a logical plane in a 3D grid or a line in a 2D grid. 252 253 Collective on da 254 255 Input Parameters: 256 + da - the distributed array 257 - dir - Cartesian direction, either `DM_X`, `DM_Y`, or `DM_Z` 258 259 Output Parameter: 260 . subcomm - new communicator 261 262 Level: advanced 263 264 Notes: 265 This routine is useful for distributing one-dimensional data in a tensor product grid. 266 267 After use, comm should be freed with` MPI_Comm_free()` 268 269 Fortran Note: 270 Not supported from Fortran 271 272 .seealso: `DM`, `DMDA`, `DMDirection` 273 @*/ 274 PetscErrorCode DMDAGetProcessorSubsets(DM da, DMDirection dir, MPI_Comm *subcomm) 275 { 276 MPI_Comm comm; 277 MPI_Group group, subgroup; 278 PetscInt subgroupSize = 0; 279 PetscInt *firstPoints; 280 PetscMPIInt size, *subgroupRanks = NULL; 281 PetscInt xs, xm, ys, ym, zs, zm, firstPoint, p; 282 283 PetscFunctionBegin; 284 PetscValidHeaderSpecificType(da, DM_CLASSID, 1, DMDA); 285 PetscCall(PetscObjectGetComm((PetscObject)da, &comm)); 286 PetscCall(DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm)); 287 PetscCallMPI(MPI_Comm_size(comm, &size)); 288 if (dir == DM_Z) { 289 PetscCheck(da->dim >= 3, comm, PETSC_ERR_ARG_OUTOFRANGE, "DM_Z invalid for DMDA dim < 3"); 290 firstPoint = zs; 291 } else if (dir == DM_Y) { 292 PetscCheck(da->dim != 1, comm, PETSC_ERR_ARG_OUTOFRANGE, "DM_Y invalid for DMDA dim = 1"); 293 firstPoint = ys; 294 } else if (dir == DM_X) { 295 firstPoint = xs; 296 } else SETERRQ(comm, PETSC_ERR_ARG_OUTOFRANGE, "Invalid direction"); 297 298 PetscCall(PetscMalloc2(size, &firstPoints, size, &subgroupRanks)); 299 PetscCallMPI(MPI_Allgather(&firstPoint, 1, MPIU_INT, firstPoints, 1, MPIU_INT, comm)); 300 PetscCall(PetscInfo(da, "DMDAGetProcessorSubset: dim=%" PetscInt_FMT ", direction=%d, procs: ", da->dim, (int)dir)); 301 for (p = 0; p < size; ++p) { 302 if (firstPoints[p] == firstPoint) { 303 subgroupRanks[subgroupSize++] = p; 304 PetscCall(PetscInfo(da, "%" PetscInt_FMT " ", p)); 305 } 306 } 307 PetscCall(PetscInfo(da, "\n")); 308 PetscCallMPI(MPI_Comm_group(comm, &group)); 309 PetscCallMPI(MPI_Group_incl(group, subgroupSize, subgroupRanks, &subgroup)); 310 PetscCallMPI(MPI_Comm_create(comm, subgroup, subcomm)); 311 PetscCallMPI(MPI_Group_free(&subgroup)); 312 PetscCallMPI(MPI_Group_free(&group)); 313 PetscCall(PetscFree2(firstPoints, subgroupRanks)); 314 PetscFunctionReturn(PETSC_SUCCESS); 315 } 316