1 2 /* 3 Code for manipulating distributed regular 1d arrays in parallel. 4 This file was created by Peter Mell 6/30/95 5 */ 6 7 #include <petsc/private/dmdaimpl.h> /*I "petscdmda.h" I*/ 8 9 #include <petscdraw.h> 10 static PetscErrorCode DMView_DA_1d(DM da,PetscViewer viewer) 11 { 12 PetscErrorCode ierr; 13 PetscMPIInt rank; 14 PetscBool iascii,isdraw,isbinary; 15 DM_DA *dd = (DM_DA*)da->data; 16 #if defined(PETSC_HAVE_MATLAB_ENGINE) 17 PetscBool ismatlab; 18 #endif 19 20 PetscFunctionBegin; 21 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)da),&rank);CHKERRQ(ierr); 22 23 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr); 24 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);CHKERRQ(ierr); 25 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);CHKERRQ(ierr); 26 #if defined(PETSC_HAVE_MATLAB_ENGINE) 27 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERMATLAB,&ismatlab);CHKERRQ(ierr); 28 #endif 29 if (iascii) { 30 PetscViewerFormat format; 31 32 ierr = PetscViewerGetFormat(viewer, &format);CHKERRQ(ierr); 33 if (format != PETSC_VIEWER_ASCII_VTK && format != PETSC_VIEWER_ASCII_VTK_CELL) { 34 DMDALocalInfo info; 35 ierr = DMDAGetLocalInfo(da,&info);CHKERRQ(ierr); 36 ierr = PetscViewerASCIIPushSynchronized(viewer);CHKERRQ(ierr); 37 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"Processor [%d] M %D m %D w %D s %D\n",rank,dd->M,dd->m,dd->w,dd->s);CHKERRQ(ierr); 38 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"X range of indices: %D %D\n",info.xs,info.xs+info.xm);CHKERRQ(ierr); 39 ierr = PetscViewerFlush(viewer);CHKERRQ(ierr); 40 ierr = PetscViewerASCIIPopSynchronized(viewer);CHKERRQ(ierr); 41 } else { 42 ierr = DMView_DA_VTK(da, viewer);CHKERRQ(ierr); 43 } 44 } else if (isdraw) { 45 PetscDraw draw; 46 double ymin = -1,ymax = 1,xmin = -1,xmax = dd->M,x; 47 PetscInt base; 48 char node[10]; 49 PetscBool isnull; 50 51 ierr = PetscViewerDrawGetDraw(viewer,0,&draw);CHKERRQ(ierr); 52 ierr = PetscDrawIsNull(draw,&isnull);CHKERRQ(ierr); 53 if (isnull) PetscFunctionReturn(0); 54 55 ierr = PetscDrawCheckResizedWindow(draw);CHKERRQ(ierr); 56 ierr = PetscDrawClear(draw);CHKERRQ(ierr); 57 ierr = PetscDrawSetCoordinates(draw,xmin,ymin,xmax,ymax);CHKERRQ(ierr); 58 59 ierr = PetscDrawCollectiveBegin(draw);CHKERRQ(ierr); 60 /* first processor draws all node lines */ 61 if (!rank) { 62 PetscInt xmin_tmp; 63 ymin = 0.0; ymax = 0.3; 64 for (xmin_tmp=0; xmin_tmp < dd->M; xmin_tmp++) { 65 ierr = PetscDrawLine(draw,(double)xmin_tmp,ymin,(double)xmin_tmp,ymax,PETSC_DRAW_BLACK);CHKERRQ(ierr); 66 } 67 xmin = 0.0; xmax = dd->M - 1; 68 ierr = PetscDrawLine(draw,xmin,ymin,xmax,ymin,PETSC_DRAW_BLACK);CHKERRQ(ierr); 69 ierr = PetscDrawLine(draw,xmin,ymax,xmax,ymax,PETSC_DRAW_BLACK);CHKERRQ(ierr); 70 } 71 ierr = PetscDrawCollectiveEnd(draw);CHKERRQ(ierr); 72 ierr = PetscDrawFlush(draw);CHKERRQ(ierr); 73 ierr = PetscDrawPause(draw);CHKERRQ(ierr); 74 75 ierr = PetscDrawCollectiveBegin(draw);CHKERRQ(ierr); 76 /* draw my box */ 77 ymin = 0; ymax = 0.3; xmin = dd->xs / dd->w; xmax = (dd->xe / dd->w) - 1; 78 ierr = PetscDrawLine(draw,xmin,ymin,xmax,ymin,PETSC_DRAW_RED);CHKERRQ(ierr); 79 ierr = PetscDrawLine(draw,xmin,ymin,xmin,ymax,PETSC_DRAW_RED);CHKERRQ(ierr); 80 ierr = PetscDrawLine(draw,xmin,ymax,xmax,ymax,PETSC_DRAW_RED);CHKERRQ(ierr); 81 ierr = PetscDrawLine(draw,xmax,ymin,xmax,ymax,PETSC_DRAW_RED);CHKERRQ(ierr); 82 /* Put in index numbers */ 83 base = dd->base / dd->w; 84 for (x=xmin; x<=xmax; x++) { 85 ierr = PetscSNPrintf(node,sizeof(node),"%d",(int)base++);CHKERRQ(ierr); 86 ierr = PetscDrawString(draw,x,ymin,PETSC_DRAW_RED,node);CHKERRQ(ierr); 87 } 88 ierr = PetscDrawCollectiveEnd(draw);CHKERRQ(ierr); 89 ierr = PetscDrawFlush(draw);CHKERRQ(ierr); 90 ierr = PetscDrawPause(draw);CHKERRQ(ierr); 91 ierr = PetscDrawSave(draw);CHKERRQ(ierr); 92 } else if (isbinary) { 93 ierr = DMView_DA_Binary(da,viewer);CHKERRQ(ierr); 94 #if defined(PETSC_HAVE_MATLAB_ENGINE) 95 } else if (ismatlab) { 96 ierr = DMView_DA_Matlab(da,viewer);CHKERRQ(ierr); 97 #endif 98 } 99 PetscFunctionReturn(0); 100 } 101 102 103 PetscErrorCode DMSetUp_DA_1D(DM da) 104 { 105 DM_DA *dd = (DM_DA*)da->data; 106 const PetscInt M = dd->M; 107 const PetscInt dof = dd->w; 108 const PetscInt s = dd->s; 109 const PetscInt sDist = s; /* stencil distance in points */ 110 const PetscInt *lx = dd->lx; 111 DMBoundaryType bx = dd->bx; 112 MPI_Comm comm; 113 Vec local, global; 114 VecScatter gtol; 115 IS to, from; 116 PetscBool flg1 = PETSC_FALSE, flg2 = PETSC_FALSE; 117 PetscMPIInt rank, size; 118 PetscInt i,*idx,nn,left,xs,xe,x,Xs,Xe,start,m,IXs,IXe; 119 PetscErrorCode ierr; 120 121 PetscFunctionBegin; 122 ierr = PetscObjectGetComm((PetscObject) da, &comm);CHKERRQ(ierr); 123 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 124 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 125 126 dd->p = 1; 127 dd->n = 1; 128 dd->m = size; 129 m = dd->m; 130 131 if (s > 0) { 132 /* if not communicating data then should be ok to have nothing on some processes */ 133 if (M < m) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"More processes than data points! %D %D",m,M); 134 if ((M-1) < s && size > 1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Array is too small for stencil! %D %D",M-1,s); 135 } 136 137 /* 138 Determine locally owned region 139 xs is the first local node number, x is the number of local nodes 140 */ 141 if (!lx) { 142 ierr = PetscMalloc1(m, &dd->lx);CHKERRQ(ierr); 143 ierr = PetscOptionsGetBool(((PetscObject)da)->options,((PetscObject)da)->prefix,"-da_partition_blockcomm",&flg1,NULL);CHKERRQ(ierr); 144 ierr = PetscOptionsGetBool(((PetscObject)da)->options,((PetscObject)da)->prefix,"-da_partition_nodes_at_end",&flg2,NULL);CHKERRQ(ierr); 145 if (flg1) { /* Block Comm type Distribution */ 146 xs = rank*M/m; 147 x = (rank + 1)*M/m - xs; 148 } else if (flg2) { /* The odd nodes are evenly distributed across last nodes */ 149 x = (M + rank)/m; 150 if (M/m == x) xs = rank*x; 151 else xs = rank*(x-1) + (M+rank)%(x*m); 152 } else { /* The odd nodes are evenly distributed across the first k nodes */ 153 /* Regular PETSc Distribution */ 154 x = M/m + ((M % m) > rank); 155 if (rank >= (M % m)) xs = (rank * (PetscInt)(M/m) + M % m); 156 else xs = rank * (PetscInt)(M/m) + rank; 157 } 158 ierr = MPI_Allgather(&xs,1,MPIU_INT,dd->lx,1,MPIU_INT,comm);CHKERRQ(ierr); 159 for (i=0; i<m-1; i++) dd->lx[i] = dd->lx[i+1] - dd->lx[i]; 160 dd->lx[m-1] = M - dd->lx[m-1]; 161 } else { 162 x = lx[rank]; 163 xs = 0; 164 for (i=0; i<rank; i++) xs += lx[i]; 165 /* verify that data user provided is consistent */ 166 left = xs; 167 for (i=rank; i<size; i++) left += lx[i]; 168 if (left != M) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Sum of lx across processors not equal to M %D %D",left,M); 169 } 170 171 /* 172 check if the scatter requires more than one process neighbor or wraps around 173 the domain more than once 174 */ 175 if ((x < s) & ((M > 1) | (bx == DM_BOUNDARY_PERIODIC))) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local x-width of domain x %D is smaller than stencil width s %D",x,s); 176 177 xe = xs + x; 178 179 /* determine ghost region (Xs) and region scattered into (IXs) */ 180 if (xs-sDist > 0) { 181 Xs = xs - sDist; 182 IXs = xs - sDist; 183 } else { 184 if (bx) Xs = xs - sDist; 185 else Xs = 0; 186 IXs = 0; 187 } 188 if (xe+sDist <= M) { 189 Xe = xe + sDist; 190 IXe = xe + sDist; 191 } else { 192 if (bx) Xe = xe + sDist; 193 else Xe = M; 194 IXe = M; 195 } 196 197 if (bx == DM_BOUNDARY_PERIODIC || bx == DM_BOUNDARY_MIRROR) { 198 Xs = xs - sDist; 199 Xe = xe + sDist; 200 IXs = xs - sDist; 201 IXe = xe + sDist; 202 } 203 204 /* allocate the base parallel and sequential vectors */ 205 dd->Nlocal = dof*x; 206 ierr = VecCreateMPIWithArray(comm,dof,dd->Nlocal,PETSC_DECIDE,NULL,&global);CHKERRQ(ierr); 207 dd->nlocal = dof*(Xe-Xs); 208 ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,dof,dd->nlocal,NULL,&local);CHKERRQ(ierr); 209 210 ierr = VecGetOwnershipRange(global,&start,NULL);CHKERRQ(ierr); 211 212 /* Create Global to Local Vector Scatter Context */ 213 /* global to local must retrieve ghost points */ 214 ierr = ISCreateStride(comm,dof*(IXe-IXs),dof*(IXs-Xs),1,&to);CHKERRQ(ierr); 215 216 ierr = PetscMalloc1(x+2*sDist,&idx);CHKERRQ(ierr); 217 ierr = PetscLogObjectMemory((PetscObject)da,(x+2*(sDist))*sizeof(PetscInt));CHKERRQ(ierr); 218 219 for (i=0; i<IXs-Xs; i++) idx[i] = -1; /* prepend with -1s if needed for ghosted case*/ 220 221 nn = IXs-Xs; 222 if (bx == DM_BOUNDARY_PERIODIC) { /* Handle all cases with periodic first */ 223 for (i=0; i<sDist; i++) { /* Left ghost points */ 224 if ((xs-sDist+i)>=0) idx[nn++] = xs-sDist+i; 225 else idx[nn++] = M+(xs-sDist+i); 226 } 227 228 for (i=0; i<x; i++) idx [nn++] = xs + i; /* Non-ghost points */ 229 230 for (i=0; i<sDist; i++) { /* Right ghost points */ 231 if ((xe+i)<M) idx [nn++] = xe+i; 232 else idx [nn++] = (xe+i) - M; 233 } 234 } else if (bx == DM_BOUNDARY_MIRROR) { /* Handle all cases with periodic first */ 235 for (i=0; i<(sDist); i++) { /* Left ghost points */ 236 if ((xs-sDist+i)>=0) idx[nn++] = xs-sDist+i; 237 else idx[nn++] = sDist - i; 238 } 239 240 for (i=0; i<x; i++) idx [nn++] = xs + i; /* Non-ghost points */ 241 242 for (i=0; i<(sDist); i++) { /* Right ghost points */ 243 if ((xe+i)<M) idx[nn++] = xe+i; 244 else idx[nn++] = M - (i + 2); 245 } 246 } else { /* Now do all cases with no periodicity */ 247 if (0 <= xs-sDist) { 248 for (i=0; i<sDist; i++) idx[nn++] = xs - sDist + i; 249 } else { 250 for (i=0; i<xs; i++) idx[nn++] = i; 251 } 252 253 for (i=0; i<x; i++) idx [nn++] = xs + i; 254 255 if ((xe+sDist)<=M) { 256 for (i=0; i<sDist; i++) idx[nn++]=xe+i; 257 } else { 258 for (i=xe; i<M; i++) idx[nn++]=i; 259 } 260 } 261 262 ierr = ISCreateBlock(comm,dof,nn-IXs+Xs,&idx[IXs-Xs],PETSC_USE_POINTER,&from);CHKERRQ(ierr); 263 ierr = VecScatterCreate(global,from,local,to,>ol);CHKERRQ(ierr); 264 ierr = PetscLogObjectParent((PetscObject)da,(PetscObject)gtol);CHKERRQ(ierr); 265 ierr = ISDestroy(&to);CHKERRQ(ierr); 266 ierr = ISDestroy(&from);CHKERRQ(ierr); 267 ierr = VecDestroy(&local);CHKERRQ(ierr); 268 ierr = VecDestroy(&global);CHKERRQ(ierr); 269 270 dd->xs = dof*xs; dd->xe = dof*xe; dd->ys = 0; dd->ye = 1; dd->zs = 0; dd->ze = 1; 271 dd->Xs = dof*Xs; dd->Xe = dof*Xe; dd->Ys = 0; dd->Ye = 1; dd->Zs = 0; dd->Ze = 1; 272 273 dd->gtol = gtol; 274 dd->base = dof*xs; 275 da->ops->view = DMView_DA_1d; 276 277 /* 278 Set the local to global ordering in the global vector, this allows use 279 of VecSetValuesLocal(). 280 */ 281 for (i=0; i<Xe-IXe; i++) idx[nn++] = -1; /* pad with -1s if needed for ghosted case*/ 282 283 ierr = ISLocalToGlobalMappingCreate(comm,dof,nn,idx,PETSC_OWN_POINTER,&da->ltogmap);CHKERRQ(ierr); 284 ierr = PetscLogObjectParent((PetscObject)da,(PetscObject)da->ltogmap);CHKERRQ(ierr); 285 286 PetscFunctionReturn(0); 287 } 288 289 290 /*@C 291 DMDACreate1d - Creates an object that will manage the communication of one-dimensional 292 regular array data that is distributed across some processors. 293 294 Collective on MPI_Comm 295 296 Input Parameters: 297 + comm - MPI communicator 298 . bx - type of ghost cells at the boundary the array should have, if any. Use 299 DM_BOUNDARY_NONE, DM_BOUNDARY_GHOSTED, or DM_BOUNDARY_PERIODIC. 300 . M - global dimension of the array 301 from the command line with -da_grid_x <M>) 302 . dof - number of degrees of freedom per node 303 . s - stencil width 304 - lx - array containing number of nodes in the X direction on each processor, 305 or NULL. If non-null, must be of length as the number of processes in the MPI_Comm. 306 307 Output Parameter: 308 . da - the resulting distributed array object 309 310 Options Database Key: 311 + -dm_view - Calls DMView() at the conclusion of DMDACreate1d() 312 . -da_grid_x <nx> - number of grid points in x direction; can set if M < 0 313 . -da_refine_x <rx> - refinement factor 314 - -da_refine <n> - refine the DMDA n times before creating it, if M < 0 315 316 Level: beginner 317 318 Notes: 319 The array data itself is NOT stored in the DMDA, it is stored in Vec objects; 320 The appropriate vector objects can be obtained with calls to DMCreateGlobalVector() 321 and DMCreateLocalVector() and calls to VecDuplicate() if more are needed. 322 323 You must call DMSetUp() after this call before using this DM. 324 325 If you wish to use the options database to change values in the DMDA call DMSetFromOptions() after this call 326 but before DMSetUp(). 327 328 .keywords: distributed array, create, one-dimensional 329 330 .seealso: DMDestroy(), DMView(), DMDACreate2d(), DMDACreate3d(), DMGlobalToLocalBegin(), DMDASetRefinementFactor(), 331 DMGlobalToLocalEnd(), DMLocalToGlobalBegin(), DMLocalToLocalBegin(), DMLocalToLocalEnd(), DMDAGetRefinementFactor(), 332 DMDAGetInfo(), DMCreateGlobalVector(), DMCreateLocalVector(), DMDACreateNaturalVector(), DMLoad(), DMDAGetOwnershipRanges() 333 334 @*/ 335 PetscErrorCode DMDACreate1d(MPI_Comm comm, DMBoundaryType bx, PetscInt M, PetscInt dof, PetscInt s, const PetscInt lx[], DM *da) 336 { 337 PetscErrorCode ierr; 338 PetscMPIInt size; 339 340 PetscFunctionBegin; 341 ierr = DMDACreate(comm, da);CHKERRQ(ierr); 342 ierr = DMSetDimension(*da, 1);CHKERRQ(ierr); 343 ierr = DMDASetSizes(*da, M, 1, 1);CHKERRQ(ierr); 344 ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr); 345 ierr = DMDASetNumProcs(*da, size, PETSC_DECIDE, PETSC_DECIDE);CHKERRQ(ierr); 346 ierr = DMDASetBoundaryType(*da, bx, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE);CHKERRQ(ierr); 347 ierr = DMDASetDof(*da, dof);CHKERRQ(ierr); 348 ierr = DMDASetStencilWidth(*da, s);CHKERRQ(ierr); 349 ierr = DMDASetOwnershipRanges(*da, lx, NULL, NULL);CHKERRQ(ierr); 350 PetscFunctionReturn(0); 351 } 352