1 2 /* 3 Code for manipulating distributed regular arrays in parallel. 4 */ 5 6 #include <petsc-private/dmdaimpl.h> /*I "petscdmda.h" I*/ 7 8 #undef __FUNCT__ 9 #define __FUNCT__ "DMDAGetLogicalCoordinate" 10 /*@C 11 DMDAGetLogicalCoordinate - Returns a the i,j,k logical coordinate for the closest mesh point to a x,y,z point in the coordinates of the DMDA 12 13 Collective on DMDA 14 15 Input Parameters: 16 + da - the distributed array 17 - x,y,z - the physical coordinates 18 19 Output Parameters: 20 + II, JJ, KK - the logical coordinate (-1 on processes that do not contain that point) 21 - X, Y, Z, - (optional) the coordinates of the located grid point 22 23 Level: advanced 24 25 Notes: 26 All processors that share the DMDA must call this with the same coordinate value 27 28 .keywords: distributed array, get, processor subset 29 @*/ 30 PetscErrorCode DMDAGetLogicalCoordinate(DM da,PetscScalar x,PetscScalar y,PetscScalar z,PetscInt *II,PetscInt *JJ,PetscInt *KK,PetscScalar *X,PetscScalar *Y,PetscScalar *Z) 31 { 32 DM_DA *dd = (DM_DA*)da->data; 33 PetscErrorCode ierr; 34 Vec coors; 35 DM dacoors; 36 DMDACoor2d **c; 37 PetscInt i,j,xs,xm,ys,ym; 38 PetscReal d,D = PETSC_MAX_REAL,Dv; 39 PetscMPIInt rank,root; 40 41 PetscFunctionBegin; 42 if (dd->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 1d DMDA"); 43 if (dd->dim == 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get point from 3d DMDA"); 44 45 *II = -1; 46 *JJ = -1; 47 48 ierr = DMGetCoordinateDM(da,&dacoors);CHKERRQ(ierr); 49 ierr = DMDAGetCorners(dacoors,&xs,&ys,NULL,&xm,&ym,NULL);CHKERRQ(ierr); 50 ierr = DMGetCoordinates(da,&coors);CHKERRQ(ierr); 51 ierr = DMDAVecGetArray(dacoors,coors,&c);CHKERRQ(ierr); 52 for (j=ys; j<ys+ym; j++) { 53 for (i=xs; i<xs+xm; i++) { 54 d = PetscSqrtReal(PetscRealPart( (c[j][i].x - x)*(c[j][i].x - x) + (c[j][i].y - y)*(c[j][i].y - y) )); 55 if (d < D) { 56 D = d; 57 *II = i; 58 *JJ = j; 59 } 60 } 61 } 62 ierr = MPI_Allreduce(&D,&Dv,1,MPIU_REAL,MPI_MIN,PetscObjectComm((PetscObject)da));CHKERRQ(ierr); 63 if (D != Dv) { 64 *II = -1; 65 *JJ = -1; 66 rank = 0; 67 } else { 68 *X = c[*JJ][*II].x; 69 *Y = c[*JJ][*II].y; 70 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)da),&rank);CHKERRQ(ierr); 71 rank++; 72 } 73 ierr = MPI_Allreduce(&rank,&root,1,MPI_INT,MPI_SUM,PetscObjectComm((PetscObject)da));CHKERRQ(ierr); 74 root--; 75 ierr = MPI_Bcast(X,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));CHKERRQ(ierr); 76 ierr = MPI_Bcast(Y,1,MPIU_SCALAR,root,PetscObjectComm((PetscObject)da));CHKERRQ(ierr); 77 ierr = DMDAVecRestoreArray(dacoors,coors,&c);CHKERRQ(ierr); 78 PetscFunctionReturn(0); 79 } 80 81 #undef __FUNCT__ 82 #define __FUNCT__ "DMDAGetRay" 83 /*@C 84 DMDAGetRay - Returns a vector on process zero that contains a row or column of the values in a DMDA vector 85 86 Collective on DMDA 87 88 Input Parameters: 89 + da - the distributed array 90 . vec - the vector 91 . dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z 92 - gp - global grid point number in this direction 93 94 Output Parameters: 95 + newvec - the new vector that can hold the values (size zero on all processes except process 0) 96 - scatter - the VecScatter that will map from the original vector to the slice 97 98 Level: advanced 99 100 Notes: 101 All processors that share the DMDA must call this with the same gp value 102 103 .keywords: distributed array, get, processor subset 104 @*/ 105 PetscErrorCode DMDAGetRay(DM da,DMDADirection dir,PetscInt gp,Vec *newvec,VecScatter *scatter) 106 { 107 PetscMPIInt rank; 108 DM_DA *dd = (DM_DA*)da->data; 109 PetscErrorCode ierr; 110 IS is; 111 AO ao; 112 Vec vec; 113 PetscInt *indices,i,j; 114 115 PetscFunctionBegin; 116 if (dd->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get slice from 1d DMDA"); 117 if (dd->dim == 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Cannot get slice from 3d DMDA"); 118 ierr = DMDAGetAO(da,&ao);CHKERRQ(ierr); 119 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)da),&rank);CHKERRQ(ierr); 120 if (!rank) { 121 if (dir == DMDA_Y) { 122 ierr = PetscMalloc(dd->w*dd->M*sizeof(PetscInt),&indices);CHKERRQ(ierr); 123 indices[0] = gp*dd->M*dd->w; 124 for (i=1; i<dd->M*dd->w; i++) indices[i] = indices[i-1] + 1; 125 126 ierr = AOApplicationToPetsc(ao,dd->M*dd->w,indices);CHKERRQ(ierr); 127 ierr = VecCreate(PETSC_COMM_SELF,newvec);CHKERRQ(ierr); 128 ierr = VecSetBlockSize(*newvec,dd->w);CHKERRQ(ierr); 129 ierr = VecSetSizes(*newvec,dd->M*dd->w,PETSC_DETERMINE);CHKERRQ(ierr); 130 ierr = VecSetType(*newvec,VECSEQ);CHKERRQ(ierr); 131 ierr = ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->M,indices,PETSC_OWN_POINTER,&is);CHKERRQ(ierr); 132 } else if (dir == DMDA_X) { 133 ierr = PetscMalloc(dd->w*dd->N*sizeof(PetscInt),&indices);CHKERRQ(ierr); 134 indices[0] = dd->w*gp; 135 for (j=1; j<dd->w; j++) indices[j] = indices[j-1] + 1; 136 for (i=1; i<dd->N; i++) { 137 indices[i*dd->w] = indices[i*dd->w-1] + dd->w*dd->M - dd->w + 1; 138 for (j=1; j<dd->w; j++) indices[i*dd->w + j] = indices[i*dd->w + j - 1] + 1; 139 } 140 ierr = AOApplicationToPetsc(ao,dd->w*dd->N,indices);CHKERRQ(ierr); 141 ierr = VecCreate(PETSC_COMM_SELF,newvec);CHKERRQ(ierr); 142 ierr = VecSetBlockSize(*newvec,dd->w);CHKERRQ(ierr); 143 ierr = VecSetSizes(*newvec,dd->N*dd->w,PETSC_DETERMINE);CHKERRQ(ierr); 144 ierr = VecSetType(*newvec,VECSEQ);CHKERRQ(ierr); 145 ierr = ISCreateGeneral(PETSC_COMM_SELF,dd->w*dd->N,indices,PETSC_OWN_POINTER,&is);CHKERRQ(ierr); 146 } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Unknown DMDADirection"); 147 } else { 148 ierr = VecCreateSeq(PETSC_COMM_SELF,0,newvec);CHKERRQ(ierr); 149 ierr = ISCreateGeneral(PETSC_COMM_SELF,0,0,PETSC_COPY_VALUES,&is);CHKERRQ(ierr); 150 } 151 ierr = DMGetGlobalVector(da,&vec);CHKERRQ(ierr); 152 ierr = VecScatterCreate(vec,is,*newvec,NULL,scatter);CHKERRQ(ierr); 153 ierr = DMRestoreGlobalVector(da,&vec);CHKERRQ(ierr); 154 ierr = ISDestroy(&is);CHKERRQ(ierr); 155 PetscFunctionReturn(0); 156 } 157 158 #undef __FUNCT__ 159 #define __FUNCT__ "DMDAGetProcessorSubset" 160 /*@C 161 DMDAGetProcessorSubset - Returns a communicator consisting only of the 162 processors in a DMDA that own a particular global x, y, or z grid point 163 (corresponding to a logical plane in a 3D grid or a line in a 2D grid). 164 165 Collective on DMDA 166 167 Input Parameters: 168 + da - the distributed array 169 . dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z 170 - gp - global grid point number in this direction 171 172 Output Parameters: 173 . comm - new communicator 174 175 Level: advanced 176 177 Notes: 178 All processors that share the DMDA must call this with the same gp value 179 180 This routine is particularly useful to compute boundary conditions 181 or other application-specific calculations that require manipulating 182 sets of data throughout a logical plane of grid points. 183 184 .keywords: distributed array, get, processor subset 185 @*/ 186 PetscErrorCode DMDAGetProcessorSubset(DM da,DMDADirection dir,PetscInt gp,MPI_Comm *comm) 187 { 188 MPI_Group group,subgroup; 189 PetscErrorCode ierr; 190 PetscInt i,ict,flag,*owners,xs,xm,ys,ym,zs,zm; 191 PetscMPIInt size,*ranks = NULL; 192 DM_DA *dd = (DM_DA*)da->data; 193 194 PetscFunctionBegin; 195 PetscValidHeaderSpecific(da,DM_CLASSID,1); 196 flag = 0; 197 ierr = DMDAGetCorners(da,&xs,&ys,&zs,&xm,&ym,&zm);CHKERRQ(ierr); 198 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)da),&size);CHKERRQ(ierr); 199 if (dir == DMDA_Z) { 200 if (dd->dim < 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Z invalid for DMDA dim < 3"); 201 if (gp < 0 || gp > dd->P) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point"); 202 if (gp >= zs && gp < zs+zm) flag = 1; 203 } else if (dir == DMDA_Y) { 204 if (dd->dim == 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Y invalid for DMDA dim = 1"); 205 if (gp < 0 || gp > dd->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point"); 206 if (gp >= ys && gp < ys+ym) flag = 1; 207 } else if (dir == DMDA_X) { 208 if (gp < 0 || gp > dd->M) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point"); 209 if (gp >= xs && gp < xs+xm) flag = 1; 210 } else SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction"); 211 212 ierr = PetscMalloc2(size,PetscInt,&owners,size,PetscMPIInt,&ranks);CHKERRQ(ierr); 213 ierr = MPI_Allgather(&flag,1,MPIU_INT,owners,1,MPIU_INT,PetscObjectComm((PetscObject)da));CHKERRQ(ierr); 214 ict = 0; 215 ierr = PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",dd->dim,(int)dir);CHKERRQ(ierr); 216 for (i=0; i<size; i++) { 217 if (owners[i]) { 218 ranks[ict] = i; ict++; 219 ierr = PetscInfo1(da,"%D ",i);CHKERRQ(ierr); 220 } 221 } 222 ierr = PetscInfo(da,"\n");CHKERRQ(ierr); 223 ierr = MPI_Comm_group(PetscObjectComm((PetscObject)da),&group);CHKERRQ(ierr); 224 ierr = MPI_Group_incl(group,ict,ranks,&subgroup);CHKERRQ(ierr); 225 ierr = MPI_Comm_create(PetscObjectComm((PetscObject)da),subgroup,comm);CHKERRQ(ierr); 226 ierr = MPI_Group_free(&subgroup);CHKERRQ(ierr); 227 ierr = MPI_Group_free(&group);CHKERRQ(ierr); 228 ierr = PetscFree2(owners,ranks);CHKERRQ(ierr); 229 PetscFunctionReturn(0); 230 } 231 232 #undef __FUNCT__ 233 #define __FUNCT__ "DMDAGetProcessorSubsets" 234 /*@C 235 DMDAGetProcessorSubsets - Returns communicators consisting only of the 236 processors in a DMDA adjacent in a particular dimension, 237 corresponding to a logical plane in a 3D grid or a line in a 2D grid. 238 239 Collective on DMDA 240 241 Input Parameters: 242 + da - the distributed array 243 - dir - Cartesian direction, either DMDA_X, DMDA_Y, or DMDA_Z 244 245 Output Parameters: 246 . subcomm - new communicator 247 248 Level: advanced 249 250 Notes: 251 This routine is useful for distributing one-dimensional data in a tensor product grid. 252 253 .keywords: distributed array, get, processor subset 254 @*/ 255 PetscErrorCode DMDAGetProcessorSubsets(DM da, DMDADirection dir, MPI_Comm *subcomm) 256 { 257 MPI_Comm comm; 258 MPI_Group group, subgroup; 259 PetscInt subgroupSize = 0; 260 PetscInt *firstPoints; 261 PetscMPIInt size, *subgroupRanks = NULL; 262 PetscInt xs, xm, ys, ym, zs, zm, firstPoint, p; 263 PetscErrorCode ierr; 264 DM_DA *dd = (DM_DA*)da->data; 265 266 PetscFunctionBegin; 267 PetscValidHeaderSpecific(da, DM_CLASSID, 1); 268 ierr = PetscObjectGetComm((PetscObject)da,&comm);CHKERRQ(ierr); 269 ierr = DMDAGetCorners(da, &xs, &ys, &zs, &xm, &ym, &zm);CHKERRQ(ierr); 270 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 271 if (dir == DMDA_Z) { 272 if (dd->dim < 3) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Z invalid for DMDA dim < 3"); 273 firstPoint = zs; 274 } else if (dir == DMDA_Y) { 275 if (dd->dim == 1) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"DMDA_Y invalid for DMDA dim = 1"); 276 firstPoint = ys; 277 } else if (dir == DMDA_X) { 278 firstPoint = xs; 279 } else SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction"); 280 281 ierr = PetscMalloc2(size, PetscInt, &firstPoints, size, PetscMPIInt, &subgroupRanks);CHKERRQ(ierr); 282 ierr = MPI_Allgather(&firstPoint, 1, MPIU_INT, firstPoints, 1, MPIU_INT, comm);CHKERRQ(ierr); 283 ierr = PetscInfo2(da,"DMDAGetProcessorSubset: dim=%D, direction=%d, procs: ",dd->dim,(int)dir);CHKERRQ(ierr); 284 for (p = 0; p < size; ++p) { 285 if (firstPoints[p] == firstPoint) { 286 subgroupRanks[subgroupSize++] = p; 287 ierr = PetscInfo1(da, "%D ", p);CHKERRQ(ierr); 288 } 289 } 290 ierr = PetscInfo(da, "\n");CHKERRQ(ierr); 291 ierr = MPI_Comm_group(comm, &group);CHKERRQ(ierr); 292 ierr = MPI_Group_incl(group, subgroupSize, subgroupRanks, &subgroup);CHKERRQ(ierr); 293 ierr = MPI_Comm_create(comm, subgroup, subcomm);CHKERRQ(ierr); 294 ierr = MPI_Group_free(&subgroup);CHKERRQ(ierr); 295 ierr = MPI_Group_free(&group);CHKERRQ(ierr); 296 ierr = PetscFree2(firstPoints, subgroupRanks);CHKERRQ(ierr); 297 PetscFunctionReturn(0); 298 } 299