1 2 /* 3 Code for manipulating distributed regular 1d arrays in parallel. 4 This file was created by Peter Mell 6/30/95 5 */ 6 7 #include <petsc-private/daimpl.h> /*I "petscdmda.h" I*/ 8 9 const char *const DMDABoundaryTypes[] = {"BOUNDARY_NONE","BOUNDARY_GHOSTED","BOUNDARY_PERIODIC","DMDA_",0}; 10 11 #undef __FUNCT__ 12 #define __FUNCT__ "DMView_DA_1d" 13 PetscErrorCode DMView_DA_1d(DM da,PetscViewer viewer) 14 { 15 PetscErrorCode ierr; 16 PetscMPIInt rank; 17 PetscBool iascii,isdraw,isbinary; 18 DM_DA *dd = (DM_DA*)da->data; 19 #if defined(PETSC_HAVE_MATLAB_ENGINE) 20 PetscBool ismatlab; 21 #endif 22 23 PetscFunctionBegin; 24 ierr = MPI_Comm_rank(((PetscObject)da)->comm,&rank);CHKERRQ(ierr); 25 26 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr); 27 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);CHKERRQ(ierr); 28 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);CHKERRQ(ierr); 29 #if defined(PETSC_HAVE_MATLAB_ENGINE) 30 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERMATLAB,&ismatlab);CHKERRQ(ierr); 31 #endif 32 if (iascii) { 33 PetscViewerFormat format; 34 35 ierr = PetscViewerGetFormat(viewer, &format);CHKERRQ(ierr); 36 if (format != PETSC_VIEWER_ASCII_VTK && format != PETSC_VIEWER_ASCII_VTK_CELL) { 37 DMDALocalInfo info; 38 ierr = DMDAGetLocalInfo(da,&info);CHKERRQ(ierr); 39 ierr = PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);CHKERRQ(ierr); 40 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"Processor [%d] M %D m %D w %D s %D\n",rank,dd->M,dd->m,dd->w,dd->s);CHKERRQ(ierr); 41 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"X range of indices: %D %D\n",info.xs,info.xs+info.xm);CHKERRQ(ierr); 42 ierr = PetscViewerFlush(viewer);CHKERRQ(ierr); 43 ierr = PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);CHKERRQ(ierr); 44 } else { 45 ierr = DMView_DA_VTK(da, viewer);CHKERRQ(ierr); 46 } 47 } else if (isdraw) { 48 PetscDraw draw; 49 double ymin = -1,ymax = 1,xmin = -1,xmax = dd->M,x; 50 PetscInt base; 51 char node[10]; 52 PetscBool isnull; 53 54 ierr = PetscViewerDrawGetDraw(viewer,0,&draw);CHKERRQ(ierr); 55 ierr = PetscDrawIsNull(draw,&isnull);CHKERRQ(ierr); if (isnull) PetscFunctionReturn(0); 56 57 ierr = PetscDrawSetCoordinates(draw,xmin,ymin,xmax,ymax);CHKERRQ(ierr); 58 ierr = PetscDrawSynchronizedClear(draw);CHKERRQ(ierr); 59 60 /* first processor draws all node lines */ 61 if (!rank) { 62 PetscInt xmin_tmp; 63 ymin = 0.0; ymax = 0.3; 64 65 /* ADIC doesn't like doubles in a for loop */ 66 for (xmin_tmp =0; xmin_tmp < dd->M; xmin_tmp++) { 67 ierr = PetscDrawLine(draw,(double)xmin_tmp,ymin,(double)xmin_tmp,ymax,PETSC_DRAW_BLACK);CHKERRQ(ierr); 68 } 69 70 xmin = 0.0; xmax = dd->M - 1; 71 ierr = PetscDrawLine(draw,xmin,ymin,xmax,ymin,PETSC_DRAW_BLACK);CHKERRQ(ierr); 72 ierr = PetscDrawLine(draw,xmin,ymax,xmax,ymax,PETSC_DRAW_BLACK);CHKERRQ(ierr); 73 } 74 75 ierr = PetscDrawSynchronizedFlush(draw);CHKERRQ(ierr); 76 ierr = PetscDrawPause(draw);CHKERRQ(ierr); 77 78 /* draw my box */ 79 ymin = 0; ymax = 0.3; xmin = dd->xs / dd->w; xmax = (dd->xe / dd->w) - 1; 80 ierr = PetscDrawLine(draw,xmin,ymin,xmax,ymin,PETSC_DRAW_RED);CHKERRQ(ierr); 81 ierr = PetscDrawLine(draw,xmin,ymin,xmin,ymax,PETSC_DRAW_RED);CHKERRQ(ierr); 82 ierr = PetscDrawLine(draw,xmin,ymax,xmax,ymax,PETSC_DRAW_RED);CHKERRQ(ierr); 83 ierr = PetscDrawLine(draw,xmax,ymin,xmax,ymax,PETSC_DRAW_RED);CHKERRQ(ierr); 84 85 /* Put in index numbers */ 86 base = dd->base / dd->w; 87 for (x=xmin; x<=xmax; x++) { 88 sprintf(node,"%d",(int)base++); 89 ierr = PetscDrawString(draw,x,ymin,PETSC_DRAW_RED,node);CHKERRQ(ierr); 90 } 91 92 ierr = PetscDrawSynchronizedFlush(draw);CHKERRQ(ierr); 93 ierr = PetscDrawPause(draw);CHKERRQ(ierr); 94 } else if (isbinary){ 95 ierr = DMView_DA_Binary(da,viewer);CHKERRQ(ierr); 96 #if defined(PETSC_HAVE_MATLAB_ENGINE) 97 } else if (ismatlab) { 98 ierr = DMView_DA_Matlab(da,viewer);CHKERRQ(ierr); 99 #endif 100 } else SETERRQ1(((PetscObject)da)->comm,PETSC_ERR_SUP,"Viewer type %s not supported for DMDA 1d",((PetscObject)viewer)->type_name); 101 PetscFunctionReturn(0); 102 } 103 104 #undef __FUNCT__ 105 #define __FUNCT__ "DMView_DA_Private" 106 /* 107 Processes command line options to determine if/how a DMDA 108 is to be viewed. Called by DMDACreateXX() 109 */ 110 PetscErrorCode DMView_DA_Private(DM da) 111 { 112 PetscErrorCode ierr; 113 PetscBool flg1 = PETSC_FALSE; 114 PetscViewer view; 115 116 PetscFunctionBegin; 117 ierr = PetscOptionsBegin(((PetscObject)da)->comm,((PetscObject)da)->prefix,"DMDA viewing options","DMDA");CHKERRQ(ierr); 118 ierr = PetscOptionsBool("-da_view","Print information about the DMDA's distribution","DMView",PETSC_FALSE,&flg1,PETSC_NULL);CHKERRQ(ierr); 119 if (flg1) { 120 ierr = PetscViewerASCIIGetStdout(((PetscObject)da)->comm,&view);CHKERRQ(ierr); 121 ierr = DMView(da,view);CHKERRQ(ierr); 122 } 123 flg1 = PETSC_FALSE; 124 ierr = PetscOptionsBool("-da_view_draw","Draw how the DMDA is distributed","DMView",PETSC_FALSE,&flg1,PETSC_NULL);CHKERRQ(ierr); 125 if (flg1) {ierr = DMView(da,PETSC_VIEWER_DRAW_(((PetscObject)da)->comm));CHKERRQ(ierr);} 126 ierr = PetscOptionsEnd();CHKERRQ(ierr); 127 PetscFunctionReturn(0); 128 } 129 130 #undef __FUNCT__ 131 #define __FUNCT__ "DMSetUp_DA_1D" 132 PetscErrorCode DMSetUp_DA_1D(DM da) 133 { 134 DM_DA *dd = (DM_DA*)da->data; 135 const PetscInt M = dd->M; 136 const PetscInt dof = dd->w; 137 const PetscInt s = dd->s; 138 const PetscInt o = dd->overlap; 139 const PetscInt sDist = s*dof; /* absolute stencil distance */ 140 const PetscInt oDist = o*dof; 141 const PetscInt *lx = dd->lx; 142 const DMDABoundaryType bx = dd->bx; 143 MPI_Comm comm; 144 Vec local, global; 145 VecScatter ltog, gtol; 146 IS to, from; 147 PetscBool flg1 = PETSC_FALSE, flg2 = PETSC_FALSE; 148 PetscMPIInt rank, size; 149 PetscInt i,*idx,nn,left,xs,xe,x,Xs,Xe,start,end,m,IXs,IXe; 150 PetscErrorCode ierr; 151 152 PetscFunctionBegin; 153 if (dof < 1) SETERRQ1(((PetscObject)da)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Must have 1 or more degrees of freedom per node: %D",dof); 154 if (s < 0) SETERRQ1(((PetscObject)da)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Stencil width cannot be negative: %D",s); 155 156 dd->dim = 1; 157 ierr = PetscMalloc(dof*sizeof(char*),&dd->fieldname);CHKERRQ(ierr); 158 ierr = PetscMemzero(dd->fieldname,dof*sizeof(char*));CHKERRQ(ierr); 159 ierr = PetscObjectGetComm((PetscObject) da, &comm);CHKERRQ(ierr); 160 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 161 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 162 163 dd->m = size; 164 m = dd->m; 165 166 if (s > 0) { 167 /* if not communicating data then should be ok to have nothing on some processes */ 168 if (M < m) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"More processes than data points! %D %D",m,M); 169 if ((M-1) < s) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Array is too small for stencil! %D %D",M-1,s); 170 } 171 172 /* 173 Determine locally owned region 174 xs is the first local node number, x is the number of local nodes 175 */ 176 if (!lx) { 177 ierr = PetscMalloc(m*sizeof(PetscInt), &dd->lx);CHKERRQ(ierr); 178 ierr = PetscOptionsGetBool(PETSC_NULL,"-da_partition_blockcomm",&flg1,PETSC_NULL);CHKERRQ(ierr); 179 ierr = PetscOptionsGetBool(PETSC_NULL,"-da_partition_nodes_at_end",&flg2,PETSC_NULL);CHKERRQ(ierr); 180 if (flg1) { /* Block Comm type Distribution */ 181 xs = rank*M/m; 182 x = (rank + 1)*M/m - xs; 183 } else if (flg2) { /* The odd nodes are evenly distributed across last nodes */ 184 x = (M + rank)/m; 185 if (M/m == x) { xs = rank*x; } 186 else { xs = rank*(x-1) + (M+rank)%(x*m); } 187 } else { /* The odd nodes are evenly distributed across the first k nodes */ 188 /* Regular PETSc Distribution */ 189 x = M/m + ((M % m) > rank); 190 if (rank >= (M % m)) {xs = (rank * (PetscInt)(M/m) + M % m);} 191 else {xs = rank * (PetscInt)(M/m) + rank;} 192 } 193 ierr = MPI_Allgather(&xs,1,MPIU_INT,dd->lx,1,MPIU_INT,comm);CHKERRQ(ierr); 194 for (i=0; i<m-1; i++) dd->lx[i] = dd->lx[i+1] - dd->lx[i]; 195 dd->lx[m-1] = M - dd->lx[m-1]; 196 } else { 197 x = lx[rank]; 198 xs = 0; 199 for (i=0; i<rank; i++) { 200 xs += lx[i]; 201 } 202 /* verify that data user provided is consistent */ 203 left = xs; 204 for (i=rank; i<size; i++) { 205 left += lx[i]; 206 } 207 if (left != M) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Sum of lx across processors not equal to M %D %D",left,M); 208 } 209 210 /* 211 check if the scatter requires more than one process neighbor or wraps around 212 the domain more than once 213 */ 214 if ((x < s+o) & ((M > 1) | (bx == DMDA_BOUNDARY_PERIODIC))) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local x-width of domain x %D is smaller than stencil width s %D",x,s+o); 215 216 /* From now on x,xs,xe,Xs,Xe are the exact location in the array */ 217 x *= dof; 218 xs *= dof; 219 xe = xs + x; 220 221 /* determine ghost region (Xs) and region scattered into (IXs) */ 222 if (xs-sDist-oDist > 0) { 223 Xs = xs - sDist - oDist; 224 IXs = xs - sDist - oDist; 225 } else { 226 if (bx) { 227 Xs = xs - sDist; 228 } else { 229 Xs = 0; 230 } 231 IXs = 0; 232 } 233 if (xe+sDist+oDist <= M*dof) { 234 Xe = xe + sDist + oDist; 235 IXe = xe + sDist + oDist; 236 } else { 237 if (bx) { 238 Xe = xe + sDist; 239 } else { 240 Xe = M*dof; 241 } 242 IXe = M*dof; 243 } 244 245 if (bx == DMDA_BOUNDARY_PERIODIC) { 246 Xs = xs - sDist - oDist; 247 Xe = xe + sDist + oDist; 248 IXs = xs - sDist - oDist; 249 IXe = xe + sDist + oDist; 250 } 251 252 /* allocate the base parallel and sequential vectors */ 253 dd->Nlocal = x; 254 ierr = VecCreateMPIWithArray(comm,dof,dd->Nlocal,PETSC_DECIDE,0,&global);CHKERRQ(ierr); 255 dd->nlocal = (Xe-Xs); 256 ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,dof,dd->nlocal,0,&local);CHKERRQ(ierr); 257 258 /* Create Local to Global Vector Scatter Context */ 259 /* local to global inserts non-ghost point region into global */ 260 ierr = VecGetOwnershipRange(global,&start,&end);CHKERRQ(ierr); 261 ierr = ISCreateStride(comm,x,start,1,&to);CHKERRQ(ierr); 262 ierr = ISCreateStride(comm,x,xs-Xs,1,&from);CHKERRQ(ierr); 263 ierr = VecScatterCreate(local,from,global,to,<og);CHKERRQ(ierr); 264 ierr = PetscLogObjectParent(da,ltog);CHKERRQ(ierr); 265 ierr = ISDestroy(&from);CHKERRQ(ierr); 266 ierr = ISDestroy(&to);CHKERRQ(ierr); 267 268 /* Create Global to Local Vector Scatter Context */ 269 /* global to local must retrieve ghost points */ 270 ierr = ISCreateStride(comm,(IXe-IXs),IXs-Xs,1,&to);CHKERRQ(ierr); 271 272 ierr = PetscMalloc((x+2*(sDist+oDist))*sizeof(PetscInt),&idx);CHKERRQ(ierr); 273 ierr = PetscLogObjectMemory(da,(x+2*(sDist+oDist))*sizeof(PetscInt));CHKERRQ(ierr); 274 275 for (i=0; i<IXs-Xs; i++) {idx[i] = -1; } /* prepend with -1s if needed for ghosted case*/ 276 277 nn = IXs-Xs; 278 if (bx == DMDA_BOUNDARY_PERIODIC) { /* Handle all cases with wrap first */ 279 for (i=0; i<sDist+oDist; i++) { /* Left ghost points */ 280 if ((xs-sDist-oDist+i)>=0) { idx[nn++] = xs-sDist-oDist+i;} 281 else { idx[nn++] = M*dof+(xs-sDist-oDist+i);} 282 } 283 284 for (i=0; i<x; i++) { idx [nn++] = xs + i;} /* Non-ghost points */ 285 286 for (i=0; i<sDist+oDist; i++) { /* Right ghost points */ 287 if ((xe+i)<M*dof) { idx [nn++] = xe+i; } 288 else { idx [nn++] = (xe+i) - M*dof;} 289 } 290 } else { /* Now do all cases with no wrapping */ 291 if (0 <= xs-sDist-oDist) {for (i=0; i<sDist+oDist; i++) {idx[nn++] = xs - sDist - oDist + i;}} 292 else {for (i=0; i<xs; i++) {idx[nn++] = i;}} 293 294 for (i=0; i<x; i++) { idx [nn++] = xs + i;} 295 296 if ((xe+sDist+oDist)<=M*dof) {for (i=0; i<sDist+oDist; i++) {idx[nn++]=xe+i;}} 297 else {for (i=xe; i<(M*dof); i++) {idx[nn++]=i;}} 298 } 299 300 ierr = ISCreateGeneral(comm,nn-IXs+Xs,&idx[IXs-Xs],PETSC_COPY_VALUES,&from);CHKERRQ(ierr); 301 ierr = VecScatterCreate(global,from,local,to,>ol);CHKERRQ(ierr); 302 ierr = PetscLogObjectParent(da,to);CHKERRQ(ierr); 303 ierr = PetscLogObjectParent(da,from);CHKERRQ(ierr); 304 ierr = PetscLogObjectParent(da,gtol);CHKERRQ(ierr); 305 ierr = ISDestroy(&to);CHKERRQ(ierr); 306 ierr = ISDestroy(&from);CHKERRQ(ierr); 307 ierr = VecDestroy(&local);CHKERRQ(ierr); 308 ierr = VecDestroy(&global);CHKERRQ(ierr); 309 310 dd->xs = xs; dd->xe = xe; dd->ys = 0; dd->ye = 1; dd->zs = 0; dd->ze = 1; 311 dd->Xs = Xs; dd->Xe = Xe; dd->Ys = 0; dd->Ye = 1; dd->Zs = 0; dd->Ze = 1; 312 313 dd->gtol = gtol; 314 dd->ltog = ltog; 315 dd->base = xs; 316 da->ops->view = DMView_DA_1d; 317 318 /* 319 Set the local to global ordering in the global vector, this allows use 320 of VecSetValuesLocal(). 321 */ 322 for (i=0; i<Xe-IXe; i++) {idx[nn++] = -1; } /* pad with -1s if needed for ghosted case*/ 323 324 ierr = ISLocalToGlobalMappingCreate(comm,nn,idx,PETSC_COPY_VALUES,&da->ltogmap);CHKERRQ(ierr); 325 ierr = ISLocalToGlobalMappingBlock(da->ltogmap,dd->w,&da->ltogmapb);CHKERRQ(ierr); 326 ierr = PetscLogObjectParent(da,da->ltogmap);CHKERRQ(ierr); 327 328 dd->idx = idx; 329 dd->Nl = nn; 330 331 PetscFunctionReturn(0); 332 } 333 334 335 #undef __FUNCT__ 336 #define __FUNCT__ "DMDACreate1d" 337 /*@C 338 DMDACreate1d - Creates an object that will manage the communication of one-dimensional 339 regular array data that is distributed across some processors. 340 341 Collective on MPI_Comm 342 343 Input Parameters: 344 + comm - MPI communicator 345 . bx - type of ghost cells at the boundary the array should have, if any. Use 346 DMDA_BOUNDARY_NONE, DMDA_BOUNDARY_GHOSTED, or DMDA_BOUNDARY_PERIODIC. 347 . M - global dimension of the array (use -M to indicate that it may be set to a different value 348 from the command line with -da_grid_x <M>) 349 . dof - number of degrees of freedom per node 350 . s - stencil width 351 - lx - array containing number of nodes in the X direction on each processor, 352 or PETSC_NULL. If non-null, must be of length as the number of processes in the MPI_Comm. 353 354 Output Parameter: 355 . da - the resulting distributed array object 356 357 Options Database Key: 358 + -da_view - Calls DMView() at the conclusion of DMDACreate1d() 359 . -da_grid_x <nx> - number of grid points in x direction; can set if M < 0 360 . -da_refine_x <rx> - refinement factor 361 - -da_refine <n> - refine the DMDA n times before creating it, if M < 0 362 363 Level: beginner 364 365 Notes: 366 The array data itself is NOT stored in the DMDA, it is stored in Vec objects; 367 The appropriate vector objects can be obtained with calls to DMCreateGlobalVector() 368 and DMCreateLocalVector() and calls to VecDuplicate() if more are needed. 369 370 .keywords: distributed array, create, one-dimensional 371 372 .seealso: DMDestroy(), DMView(), DMDACreate2d(), DMDACreate3d(), DMGlobalToLocalBegin(), DMDASetRefinementFactor(), 373 DMGlobalToLocalEnd(), DMLocalToGlobalBegin(), DMDALocalToLocalBegin(), DMDALocalToLocalEnd(), DMDAGetRefinementFactor(), 374 DMDAGetInfo(), DMCreateGlobalVector(), DMCreateLocalVector(), DMDACreateNaturalVector(), DMLoad(), DMDAGetOwnershipRanges() 375 376 @*/ 377 PetscErrorCode DMDACreate1d(MPI_Comm comm, DMDABoundaryType bx, PetscInt M, PetscInt dof, PetscInt s, const PetscInt lx[], DM *da) 378 { 379 PetscErrorCode ierr; 380 PetscMPIInt size; 381 382 PetscFunctionBegin; 383 ierr = DMDACreate(comm, da);CHKERRQ(ierr); 384 ierr = DMDASetDim(*da, 1);CHKERRQ(ierr); 385 ierr = DMDASetSizes(*da, M, 1, 1);CHKERRQ(ierr); 386 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 387 ierr = DMDASetNumProcs(*da, size, PETSC_DECIDE, PETSC_DECIDE);CHKERRQ(ierr); 388 ierr = DMDASetBoundaryType(*da, bx, DMDA_BOUNDARY_NONE, DMDA_BOUNDARY_NONE);CHKERRQ(ierr); 389 ierr = DMDASetDof(*da, dof);CHKERRQ(ierr); 390 ierr = DMDASetStencilWidth(*da, s);CHKERRQ(ierr); 391 ierr = DMDASetOwnershipRanges(*da, lx, PETSC_NULL, PETSC_NULL);CHKERRQ(ierr); 392 /* This violates the behavior for other classes, but right now users expect negative dimensions to be handled this way */ 393 ierr = DMSetFromOptions(*da);CHKERRQ(ierr); 394 ierr = DMSetUp(*da);CHKERRQ(ierr); 395 ierr = DMView_DA_Private(*da);CHKERRQ(ierr); 396 PetscFunctionReturn(0); 397 } 398