1 2 /* 3 Code for manipulating distributed regular arrays in parallel. 4 */ 5 6 #include <petsc/private/dmdaimpl.h> /*I "petscdmda.h" I*/ 7 8 /*@ 9 DMDAGetLogicalCoordinate - Returns a the i,j,k logical coordinate for the closest mesh point to a x,y,z point in the coordinates of the DMDA 10 11 Collective on da 12 13 Input Parameters: 14 + da - the distributed array 15 . x - the first physical coordinate 16 . y - the second physical coordinate 17 - z - the third physical coordinate 18 19 Output Parameters: 20 + II - the first logical coordinate (-1 on processes that do not contain that point) 21 . JJ - the second logical coordinate (-1 on processes that do not contain that point) 22 . KK - the third logical coordinate (-1 on processes that do not contain that point) 23 . X - (optional) the first coordinate of the located grid point 24 . Y - (optional) the second coordinate of the located grid point 25 - Z - (optional) the third coordinate of the located grid point 26 27 Level: advanced 28 29 Notes: 30 All processors that share the DMDA must call this with the same coordinate value 31 32 @*/ 33 PetscErrorCode DMDAGetLogicalCoordinate(DM da, PetscScalar x, PetscScalar y, PetscScalar z, PetscInt *II, PetscInt *JJ, PetscInt *KK, PetscScalar *X, PetscScalar *Y, PetscScalar *Z) { 34 Vec coors; 35 DM dacoors; 36 DMDACoor2d **c; 37 PetscInt i, j, xs, xm, ys, ym; 38 PetscReal d, D = PETSC_MAX_REAL, Dv; 39 PetscMPIInt rank, root; 40 41 PetscFunctionBegin; 42 PetscCheck(da->dim != 1, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get point from 1d DMDA"); 43 PetscCheck(da->dim != 3, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get point from 3d DMDA"); 44 45 *II = -1; 46 *JJ = -1; 47 48 PetscCall(DMGetCoordinateDM(da, &dacoors)); 49 PetscCall(DMDAGetCorners(dacoors, &xs, &ys, NULL, &xm, &ym, NULL)); 50 PetscCall(DMGetCoordinates(da, &coors)); 51 PetscCall(DMDAVecGetArrayRead(dacoors, coors, &c)); 52 for (j = ys; j < ys + ym; j++) { 53 for (i = xs; i < xs + xm; i++) { 54 d = PetscSqrtReal(PetscRealPart((c[j][i].x - x) * (c[j][i].x - x) + (c[j][i].y - y) * (c[j][i].y - y))); 55 if (d < D) { 56 D = d; 57 *II = i; 58 *JJ = j; 59 } 60 } 61 } 62 PetscCall(MPIU_Allreduce(&D, &Dv, 1, MPIU_REAL, MPIU_MIN, PetscObjectComm((PetscObject)da))); 63 if (D != Dv) { 64 *II = -1; 65 *JJ = -1; 66 rank = 0; 67 } else { 68 *X = c[*JJ][*II].x; 69 *Y = c[*JJ][*II].y; 70 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)da), &rank)); 71 rank++; 72 } 73 PetscCall(MPIU_Allreduce(&rank, &root, 1, MPI_INT, MPI_SUM, PetscObjectComm((PetscObject)da))); 74 root--; 75 PetscCallMPI(MPI_Bcast(X, 1, MPIU_SCALAR, root, PetscObjectComm((PetscObject)da))); 76 PetscCallMPI(MPI_Bcast(Y, 1, MPIU_SCALAR, root, PetscObjectComm((PetscObject)da))); 77 PetscCall(DMDAVecRestoreArrayRead(dacoors, coors, &c)); 78 PetscFunctionReturn(0); 79 } 80 81 /*@ 82 DMDAGetRay - Returns a vector on process zero that contains a row or column of the values in a DMDA vector 83 84 Collective on DMDA 85 86 Input Parameters: 87 + da - the distributed array 88 . dir - Cartesian direction, either DM_X, DM_Y, or DM_Z 89 - gp - global grid point number in this direction 90 91 Output Parameters: 92 + newvec - the new vector that can hold the values (size zero on all processes except process 0) 93 - scatter - the VecScatter that will map from the original vector to the slice 94 95 Level: advanced 96 97 Notes: 98 All processors that share the DMDA must call this with the same gp value 99 100 @*/ 101 PetscErrorCode DMDAGetRay(DM da, DMDirection dir, PetscInt gp, Vec *newvec, VecScatter *scatter) { 102 PetscMPIInt rank; 103 DM_DA *dd = (DM_DA *)da->data; 104 IS is; 105 AO ao; 106 Vec vec; 107 PetscInt *indices, i, j; 108 109 PetscFunctionBegin; 110 PetscCheck(da->dim != 3, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get slice from 3d DMDA"); 111 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)da), &rank)); 112 PetscCall(DMDAGetAO(da, &ao)); 113 if (rank == 0) { 114 if (da->dim == 1) { 115 if (dir == DM_X) { 116 PetscCall(PetscMalloc1(dd->w, &indices)); 117 indices[0] = dd->w * gp; 118 for (i = 1; i < dd->w; ++i) indices[i] = indices[i - 1] + 1; 119 PetscCall(AOApplicationToPetsc(ao, dd->w, indices)); 120 PetscCall(VecCreate(PETSC_COMM_SELF, newvec)); 121 PetscCall(VecSetBlockSize(*newvec, dd->w)); 122 PetscCall(VecSetSizes(*newvec, dd->w, PETSC_DETERMINE)); 123 PetscCall(VecSetType(*newvec, VECSEQ)); 124 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, dd->w, indices, PETSC_OWN_POINTER, &is)); 125 } else { 126 PetscCheck(dir != DM_Y, PetscObjectComm((PetscObject)da), PETSC_ERR_SUP, "Cannot get Y slice from 1d DMDA"); 127 SETERRQ(PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDirection"); 128 } 129 } else { 130 if (dir == DM_Y) { 131 PetscCall(PetscMalloc1(dd->w * dd->M, &indices)); 132 indices[0] = gp * dd->M * dd->w; 133 for (i = 1; i < dd->M * dd->w; i++) indices[i] = indices[i - 1] + 1; 134 135 PetscCall(AOApplicationToPetsc(ao, dd->M * dd->w, indices)); 136 PetscCall(VecCreate(PETSC_COMM_SELF, newvec)); 137 PetscCall(VecSetBlockSize(*newvec, dd->w)); 138 PetscCall(VecSetSizes(*newvec, dd->M * dd->w, PETSC_DETERMINE)); 139 PetscCall(VecSetType(*newvec, VECSEQ)); 140 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, dd->w * dd->M, indices, PETSC_OWN_POINTER, &is)); 141 } else if (dir == DM_X) { 142 PetscCall(PetscMalloc1(dd->w * dd->N, &indices)); 143 indices[0] = dd->w * gp; 144 for (j = 1; j < dd->w; j++) indices[j] = indices[j - 1] + 1; 145 for (i = 1; i < dd->N; i++) { 146 indices[i * dd->w] = indices[i * dd->w - 1] + dd->w * dd->M - dd->w + 1; 147 for (j = 1; j < dd->w; j++) indices[i * dd->w + j] = indices[i * dd->w + j - 1] + 1; 148 } 149 PetscCall(AOApplicationToPetsc(ao, dd->w * dd->N, indices)); 150 PetscCall(VecCreate(PETSC_COMM_SELF, newvec)); 151 PetscCall(VecSetBlockSize(*newvec, dd->w)); 152 PetscCall(VecSetSizes(*newvec, dd->N * dd->w, PETSC_DETERMINE)); 153 PetscCall(VecSetType(*newvec, VECSEQ)); 154 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, dd->w * dd->N, indices, PETSC_OWN_POINTER, &is)); 155 } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Unknown DMDirection"); 156 } 157 } else { 158 PetscCall(VecCreateSeq(PETSC_COMM_SELF, 0, newvec)); 159 PetscCall(ISCreateGeneral(PETSC_COMM_SELF, 0, NULL, PETSC_COPY_VALUES, &is)); 160 } 161 PetscCall(DMGetGlobalVector(da, &vec)); 162 PetscCall(VecScatterCreate(vec, is, *newvec, NULL, scatter)); 163 PetscCall(DMRestoreGlobalVector(da, &vec)); 164 PetscCall(ISDestroy(&is)); 165 PetscFunctionReturn(0); 166 } 167 168 /*@C 169 DMDAGetProcessorSubset - Returns a communicator consisting only of the 170 processors in a DMDA that own a particular global x, y, or z grid point 171 (corresponding to a logical plane in a 3D grid or a line in a 2D grid). 172 173 Collective on da 174 175 Input Parameters: 176 + da - the distributed array 177 . dir - Cartesian direction, either DM_X, DM_Y, or DM_Z 178 - gp - global grid point number in this direction 179 180 Output Parameter: 181 . comm - new communicator 182 183 Level: advanced 184 185 Notes: 186 All processors that share the DMDA must call this with the same gp value 187 188 After use, comm should be freed with MPI_Comm_free() 189 190 This routine is particularly useful to compute boundary conditions 191 or other application-specific calculations that require manipulating 192 sets of data throughout a logical plane of grid points. 193 194 Not supported from Fortran 195 196 @*/ 197 PetscErrorCode DMDAGetProcessorSubset(DM da, DMDirection dir, PetscInt gp, MPI_Comm *comm) { 198 MPI_Group group, subgroup; 199 PetscInt i, ict, flag, *owners, xs, xm, ys, ym, zs, zm; 200 PetscMPIInt size, *ranks = NULL; 201 DM_DA *dd = (DM_DA *)da->data; 202 203 PetscFunctionBegin; 204 PetscValidHeaderSpecificType(da, DM_CLASSID, 1, DMDA); 205 flag = 0; 206 PetscCall(DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm)); 207 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)da), &size)); 208 if (dir == DM_Z) { 209 PetscCheck(da->dim >= 3, PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "DM_Z invalid for DMDA dim < 3"); 210 PetscCheck(gp >= 0 && gp <= dd->P, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "invalid grid point"); 211 if (gp >= zs && gp < zs + zm) flag = 1; 212 } else if (dir == DM_Y) { 213 PetscCheck(da->dim != 1, PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "DM_Y invalid for DMDA dim = 1"); 214 PetscCheck(gp >= 0 && gp <= dd->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "invalid grid point"); 215 if (gp >= ys && gp < ys + ym) flag = 1; 216 } else if (dir == DM_X) { 217 PetscCheck(gp >= 0 && gp <= dd->M, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "invalid grid point"); 218 if (gp >= xs && gp < xs + xm) flag = 1; 219 } else SETERRQ(PetscObjectComm((PetscObject)da), PETSC_ERR_ARG_OUTOFRANGE, "Invalid direction"); 220 221 PetscCall(PetscMalloc2(size, &owners, size, &ranks)); 222 PetscCallMPI(MPI_Allgather(&flag, 1, MPIU_INT, owners, 1, MPIU_INT, PetscObjectComm((PetscObject)da))); 223 ict = 0; 224 PetscCall(PetscInfo(da, "DMDAGetProcessorSubset: dim=%" PetscInt_FMT ", direction=%d, procs: ", da->dim, (int)dir)); 225 for (i = 0; i < size; i++) { 226 if (owners[i]) { 227 ranks[ict] = i; 228 ict++; 229 PetscCall(PetscInfo(da, "%" PetscInt_FMT " ", i)); 230 } 231 } 232 PetscCall(PetscInfo(da, "\n")); 233 PetscCallMPI(MPI_Comm_group(PetscObjectComm((PetscObject)da), &group)); 234 PetscCallMPI(MPI_Group_incl(group, ict, ranks, &subgroup)); 235 PetscCallMPI(MPI_Comm_create(PetscObjectComm((PetscObject)da), subgroup, comm)); 236 PetscCallMPI(MPI_Group_free(&subgroup)); 237 PetscCallMPI(MPI_Group_free(&group)); 238 PetscCall(PetscFree2(owners, ranks)); 239 PetscFunctionReturn(0); 240 } 241 242 /*@C 243 DMDAGetProcessorSubsets - Returns communicators consisting only of the 244 processors in a DMDA adjacent in a particular dimension, 245 corresponding to a logical plane in a 3D grid or a line in a 2D grid. 246 247 Collective on da 248 249 Input Parameters: 250 + da - the distributed array 251 - dir - Cartesian direction, either DM_X, DM_Y, or DM_Z 252 253 Output Parameter: 254 . subcomm - new communicator 255 256 Level: advanced 257 258 Notes: 259 This routine is useful for distributing one-dimensional data in a tensor product grid. 260 261 After use, comm should be freed with MPI_Comm_free() 262 263 Not supported from Fortran 264 265 @*/ 266 PetscErrorCode DMDAGetProcessorSubsets(DM da, DMDirection dir, MPI_Comm *subcomm) { 267 MPI_Comm comm; 268 MPI_Group group, subgroup; 269 PetscInt subgroupSize = 0; 270 PetscInt *firstPoints; 271 PetscMPIInt size, *subgroupRanks = NULL; 272 PetscInt xs, xm, ys, ym, zs, zm, firstPoint, p; 273 274 PetscFunctionBegin; 275 PetscValidHeaderSpecificType(da, DM_CLASSID, 1, DMDA); 276 PetscCall(PetscObjectGetComm((PetscObject)da, &comm)); 277 PetscCall(DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm)); 278 PetscCallMPI(MPI_Comm_size(comm, &size)); 279 if (dir == DM_Z) { 280 PetscCheck(da->dim >= 3, comm, PETSC_ERR_ARG_OUTOFRANGE, "DM_Z invalid for DMDA dim < 3"); 281 firstPoint = zs; 282 } else if (dir == DM_Y) { 283 PetscCheck(da->dim != 1, comm, PETSC_ERR_ARG_OUTOFRANGE, "DM_Y invalid for DMDA dim = 1"); 284 firstPoint = ys; 285 } else if (dir == DM_X) { 286 firstPoint = xs; 287 } else SETERRQ(comm, PETSC_ERR_ARG_OUTOFRANGE, "Invalid direction"); 288 289 PetscCall(PetscMalloc2(size, &firstPoints, size, &subgroupRanks)); 290 PetscCallMPI(MPI_Allgather(&firstPoint, 1, MPIU_INT, firstPoints, 1, MPIU_INT, comm)); 291 PetscCall(PetscInfo(da, "DMDAGetProcessorSubset: dim=%" PetscInt_FMT ", direction=%d, procs: ", da->dim, (int)dir)); 292 for (p = 0; p < size; ++p) { 293 if (firstPoints[p] == firstPoint) { 294 subgroupRanks[subgroupSize++] = p; 295 PetscCall(PetscInfo(da, "%" PetscInt_FMT " ", p)); 296 } 297 } 298 PetscCall(PetscInfo(da, "\n")); 299 PetscCallMPI(MPI_Comm_group(comm, &group)); 300 PetscCallMPI(MPI_Group_incl(group, subgroupSize, subgroupRanks, &subgroup)); 301 PetscCallMPI(MPI_Comm_create(comm, subgroup, subcomm)); 302 PetscCallMPI(MPI_Group_free(&subgroup)); 303 PetscCallMPI(MPI_Group_free(&group)); 304 PetscCall(PetscFree2(firstPoints, subgroupRanks)); 305 PetscFunctionReturn(0); 306 } 307