1 2 /* 3 Code for interpolating between grids represented by DMDAs 4 */ 5 6 /* 7 For linear elements there are two branches of code to compute the interpolation. They should compute the same results but may not. The "new version" does 8 not work for periodic domains, the old does. Change NEWVERSION to 1 to compile in the new version. Eventually when we are sure the two produce identical results 9 we will remove/merge the new version. Based on current tests, these both produce the same results. We are leaving NEWVERSION for now in the code since some 10 consider it cleaner, but old version is turned on since it handles periodic case. 11 */ 12 #define NEWVERSION 0 13 14 #include <petsc/private/dmdaimpl.h> /*I "petscdmda.h" I*/ 15 16 /*@ 17 DMCreateInterpolationScale - Forms L = R*1/diag(R*1) - L.*v is like a coarse grid average of the 18 nearby fine grid points. 19 20 Input Parameters: 21 + dac - DM that defines a coarse mesh 22 . daf - DM that defines a fine mesh 23 - mat - the restriction (or interpolation operator) from fine to coarse 24 25 Output Parameter: 26 . scale - the scaled vector 27 28 Level: developer 29 30 .seealso: DMCreateInterpolation() 31 32 @*/ 33 PetscErrorCode DMCreateInterpolationScale(DM dac,DM daf,Mat mat,Vec *scale) 34 { 35 PetscErrorCode ierr; 36 Vec fine; 37 PetscScalar one = 1.0; 38 39 PetscFunctionBegin; 40 ierr = DMCreateGlobalVector(daf,&fine);CHKERRQ(ierr); 41 ierr = DMCreateGlobalVector(dac,scale);CHKERRQ(ierr); 42 ierr = VecSet(fine,one);CHKERRQ(ierr); 43 ierr = MatRestrict(mat,fine,*scale);CHKERRQ(ierr); 44 ierr = VecDestroy(&fine);CHKERRQ(ierr); 45 ierr = VecReciprocal(*scale);CHKERRQ(ierr); 46 PetscFunctionReturn(0); 47 } 48 49 /* 50 Since the interpolation uses MATMAIJ for dof > 0 we convert request for non-MATAIJ baseded matrices to MATAIJ. 51 This is a bit of a hack, the reason for it is partially because -dm_mat_type defines the 52 matrix type for both the operator matrices and the interpolation matrices so that users 53 can select matrix types of base MATAIJ for accelerators 54 */ 55 static PetscErrorCode ConvertToAIJ(MatType intype,MatType *outtype) 56 { 57 PetscErrorCode ierr; 58 PetscInt i; 59 char const *types[3] = {MATAIJ,MATSEQAIJ,MATMPIAIJ}; 60 PetscBool flg; 61 62 PetscFunctionBegin; 63 *outtype = MATAIJ; 64 for (i=0; i<3; i++) { 65 ierr = PetscStrbeginswith(intype,types[i],&flg);CHKERRQ(ierr); 66 if (flg) { 67 *outtype = intype; 68 PetscFunctionReturn(0); 69 } 70 } 71 PetscFunctionReturn(0); 72 } 73 74 PetscErrorCode DMCreateInterpolation_DA_1D_Q1(DM dac,DM daf,Mat *A) 75 { 76 PetscErrorCode ierr; 77 PetscInt i,i_start,m_f,Mx; 78 const PetscInt *idx_f,*idx_c; 79 PetscInt m_ghost,m_ghost_c; 80 PetscInt row,col,i_start_ghost,mx,m_c,nc,ratio; 81 PetscInt i_c,i_start_c,i_start_ghost_c,cols[2],dof; 82 PetscScalar v[2],x; 83 Mat mat; 84 DMBoundaryType bx; 85 ISLocalToGlobalMapping ltog_f,ltog_c; 86 MatType mattype; 87 88 PetscFunctionBegin; 89 ierr = DMDAGetInfo(dac,0,&Mx,0,0,0,0,0,0,0,&bx,0,0,0);CHKERRQ(ierr); 90 ierr = DMDAGetInfo(daf,0,&mx,0,0,0,0,0,&dof,0,0,0,0,0);CHKERRQ(ierr); 91 if (bx == DM_BOUNDARY_PERIODIC) { 92 ratio = mx/Mx; 93 if (ratio*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx); 94 } else { 95 ratio = (mx-1)/(Mx-1); 96 if (ratio*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx); 97 } 98 99 ierr = DMDAGetCorners(daf,&i_start,0,0,&m_f,0,0);CHKERRQ(ierr); 100 ierr = DMDAGetGhostCorners(daf,&i_start_ghost,0,0,&m_ghost,0,0);CHKERRQ(ierr); 101 ierr = DMGetLocalToGlobalMapping(daf,<og_f);CHKERRQ(ierr); 102 ierr = ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);CHKERRQ(ierr); 103 104 ierr = DMDAGetCorners(dac,&i_start_c,0,0,&m_c,0,0);CHKERRQ(ierr); 105 ierr = DMDAGetGhostCorners(dac,&i_start_ghost_c,0,0,&m_ghost_c,0,0);CHKERRQ(ierr); 106 ierr = DMGetLocalToGlobalMapping(dac,<og_c);CHKERRQ(ierr); 107 ierr = ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);CHKERRQ(ierr); 108 109 /* create interpolation matrix */ 110 ierr = MatCreate(PetscObjectComm((PetscObject)dac),&mat);CHKERRQ(ierr); 111 #if defined(PETSC_HAVE_CUDA) 112 /* 113 Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU 114 we don't want the original unconverted matrix copied to the GPU 115 */ 116 if (dof > 1) { 117 ierr = MatPinToCPU(mat,PETSC_TRUE);CHKERRQ(ierr); 118 } 119 #endif 120 ierr = MatSetSizes(mat,m_f,m_c,mx,Mx);CHKERRQ(ierr); 121 ierr = ConvertToAIJ(dac->mattype,&mattype);CHKERRQ(ierr); 122 ierr = MatSetType(mat,mattype);CHKERRQ(ierr); 123 ierr = MatSeqAIJSetPreallocation(mat,2,NULL);CHKERRQ(ierr); 124 ierr = MatMPIAIJSetPreallocation(mat,2,NULL,1,NULL);CHKERRQ(ierr); 125 126 /* loop over local fine grid nodes setting interpolation for those*/ 127 if (!NEWVERSION) { 128 129 for (i=i_start; i<i_start+m_f; i++) { 130 /* convert to local "natural" numbering and then to PETSc global numbering */ 131 row = idx_f[i-i_start_ghost]; 132 133 i_c = (i/ratio); /* coarse grid node to left of fine grid node */ 134 if (i_c < i_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 135 i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c); 136 137 /* 138 Only include those interpolation points that are truly 139 nonzero. Note this is very important for final grid lines 140 in x direction; since they have no right neighbor 141 */ 142 x = ((PetscReal)(i - i_c*ratio))/((PetscReal)ratio); 143 nc = 0; 144 /* one left and below; or we are right on it */ 145 col = (i_c-i_start_ghost_c); 146 cols[nc] = idx_c[col]; 147 v[nc++] = -x + 1.0; 148 /* one right? */ 149 if (i_c*ratio != i) { 150 cols[nc] = idx_c[col+1]; 151 v[nc++] = x; 152 } 153 ierr = MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);CHKERRQ(ierr); 154 } 155 156 } else { 157 PetscScalar *xi; 158 PetscInt li,nxi,n; 159 PetscScalar Ni[2]; 160 161 /* compute local coordinate arrays */ 162 nxi = ratio + 1; 163 ierr = PetscMalloc1(nxi,&xi);CHKERRQ(ierr); 164 for (li=0; li<nxi; li++) { 165 xi[li] = -1.0 + (PetscScalar)li*(2.0/(PetscScalar)(nxi-1)); 166 } 167 168 for (i=i_start; i<i_start+m_f; i++) { 169 /* convert to local "natural" numbering and then to PETSc global numbering */ 170 row = idx_f[(i-i_start_ghost)]; 171 172 i_c = (i/ratio); /* coarse grid node to left of fine grid node */ 173 if (i_c < i_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 174 i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c); 175 176 /* remainders */ 177 li = i - ratio * (i/ratio); 178 if (i==mx-1) li = nxi-1; 179 180 /* corners */ 181 col = (i_c-i_start_ghost_c); 182 cols[0] = idx_c[col]; 183 Ni[0] = 1.0; 184 if ((li==0) || (li==nxi-1)) { 185 ierr = MatSetValue(mat,row,cols[0],Ni[0],INSERT_VALUES);CHKERRQ(ierr); 186 continue; 187 } 188 189 /* edges + interior */ 190 /* remainders */ 191 if (i==mx-1) i_c--; 192 193 col = (i_c-i_start_ghost_c); 194 cols[0] = idx_c[col]; /* one left and below; or we are right on it */ 195 cols[1] = idx_c[col+1]; 196 197 Ni[0] = 0.5*(1.0-xi[li]); 198 Ni[1] = 0.5*(1.0+xi[li]); 199 for (n=0; n<2; n++) { 200 if (PetscAbsScalar(Ni[n])<1.0e-32) cols[n]=-1; 201 } 202 ierr = MatSetValues(mat,1,&row,2,cols,Ni,INSERT_VALUES);CHKERRQ(ierr); 203 } 204 ierr = PetscFree(xi);CHKERRQ(ierr); 205 } 206 ierr = ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);CHKERRQ(ierr); 207 ierr = ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);CHKERRQ(ierr); 208 ierr = MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 209 ierr = MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 210 ierr = MatCreateMAIJ(mat,dof,A);CHKERRQ(ierr); 211 ierr = MatDestroy(&mat);CHKERRQ(ierr); 212 PetscFunctionReturn(0); 213 } 214 215 PetscErrorCode DMCreateInterpolation_DA_1D_Q0(DM dac,DM daf,Mat *A) 216 { 217 PetscErrorCode ierr; 218 PetscInt i,i_start,m_f,Mx; 219 const PetscInt *idx_f,*idx_c; 220 ISLocalToGlobalMapping ltog_f,ltog_c; 221 PetscInt m_ghost,m_ghost_c; 222 PetscInt row,col,i_start_ghost,mx,m_c,nc,ratio; 223 PetscInt i_c,i_start_c,i_start_ghost_c,cols[2],dof; 224 PetscScalar v[2],x; 225 Mat mat; 226 DMBoundaryType bx; 227 MatType mattype; 228 229 PetscFunctionBegin; 230 ierr = DMDAGetInfo(dac,0,&Mx,0,0,0,0,0,0,0,&bx,0,0,0);CHKERRQ(ierr); 231 ierr = DMDAGetInfo(daf,0,&mx,0,0,0,0,0,&dof,0,0,0,0,0);CHKERRQ(ierr); 232 if (bx == DM_BOUNDARY_PERIODIC) { 233 if (!Mx) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx); 234 ratio = mx/Mx; 235 if (ratio*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx); 236 } else { 237 if (Mx < 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be greater than 1",Mx); 238 ratio = (mx-1)/(Mx-1); 239 if (ratio*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx); 240 } 241 242 ierr = DMDAGetCorners(daf,&i_start,0,0,&m_f,0,0);CHKERRQ(ierr); 243 ierr = DMDAGetGhostCorners(daf,&i_start_ghost,0,0,&m_ghost,0,0);CHKERRQ(ierr); 244 ierr = DMGetLocalToGlobalMapping(daf,<og_f);CHKERRQ(ierr); 245 ierr = ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);CHKERRQ(ierr); 246 247 ierr = DMDAGetCorners(dac,&i_start_c,0,0,&m_c,0,0);CHKERRQ(ierr); 248 ierr = DMDAGetGhostCorners(dac,&i_start_ghost_c,0,0,&m_ghost_c,0,0);CHKERRQ(ierr); 249 ierr = DMGetLocalToGlobalMapping(dac,<og_c);CHKERRQ(ierr); 250 ierr = ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);CHKERRQ(ierr); 251 252 /* create interpolation matrix */ 253 ierr = MatCreate(PetscObjectComm((PetscObject)dac),&mat);CHKERRQ(ierr); 254 #if defined(PETSC_HAVE_CUDA) 255 /* 256 Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU 257 we don't want the original unconverted matrix copied to the GPU 258 */ 259 if (dof > 1) { 260 ierr = MatPinToCPU(mat,PETSC_TRUE);CHKERRQ(ierr); 261 } 262 #endif 263 ierr = MatSetSizes(mat,m_f,m_c,mx,Mx);CHKERRQ(ierr); 264 ierr = ConvertToAIJ(dac->mattype,&mattype);CHKERRQ(ierr); 265 ierr = MatSetType(mat,mattype);CHKERRQ(ierr); 266 ierr = MatSeqAIJSetPreallocation(mat,2,NULL);CHKERRQ(ierr); 267 ierr = MatMPIAIJSetPreallocation(mat,2,NULL,0,NULL);CHKERRQ(ierr); 268 269 /* loop over local fine grid nodes setting interpolation for those*/ 270 for (i=i_start; i<i_start+m_f; i++) { 271 /* convert to local "natural" numbering and then to PETSc global numbering */ 272 row = idx_f[(i-i_start_ghost)]; 273 274 i_c = (i/ratio); /* coarse grid node to left of fine grid node */ 275 276 /* 277 Only include those interpolation points that are truly 278 nonzero. Note this is very important for final grid lines 279 in x direction; since they have no right neighbor 280 */ 281 x = ((PetscReal)(i - i_c*ratio))/((PetscReal)ratio); 282 nc = 0; 283 /* one left and below; or we are right on it */ 284 col = (i_c-i_start_ghost_c); 285 cols[nc] = idx_c[col]; 286 v[nc++] = -x + 1.0; 287 /* one right? */ 288 if (i_c*ratio != i) { 289 cols[nc] = idx_c[col+1]; 290 v[nc++] = x; 291 } 292 ierr = MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);CHKERRQ(ierr); 293 } 294 ierr = ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);CHKERRQ(ierr); 295 ierr = ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);CHKERRQ(ierr); 296 ierr = MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 297 ierr = MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 298 ierr = MatCreateMAIJ(mat,dof,A);CHKERRQ(ierr); 299 ierr = MatDestroy(&mat);CHKERRQ(ierr); 300 ierr = PetscLogFlops(5.0*m_f);CHKERRQ(ierr); 301 PetscFunctionReturn(0); 302 } 303 304 PetscErrorCode DMCreateInterpolation_DA_2D_Q1(DM dac,DM daf,Mat *A) 305 { 306 PetscErrorCode ierr; 307 PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof; 308 const PetscInt *idx_c,*idx_f; 309 ISLocalToGlobalMapping ltog_f,ltog_c; 310 PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c,*dnz,*onz; 311 PetscInt row,col,i_start_ghost,j_start_ghost,cols[4],mx,m_c,my,nc,ratioi,ratioj; 312 PetscInt i_c,j_c,i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c,col_shift,col_scale; 313 PetscMPIInt size_c,size_f,rank_f; 314 PetscScalar v[4],x,y; 315 Mat mat; 316 DMBoundaryType bx,by; 317 MatType mattype; 318 319 PetscFunctionBegin; 320 ierr = DMDAGetInfo(dac,0,&Mx,&My,0,0,0,0,0,0,&bx,&by,0,0);CHKERRQ(ierr); 321 ierr = DMDAGetInfo(daf,0,&mx,&my,0,0,0,0,&dof,0,0,0,0,0);CHKERRQ(ierr); 322 if (bx == DM_BOUNDARY_PERIODIC) { 323 if (!Mx) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx); 324 ratioi = mx/Mx; 325 if (ratioi*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx); 326 } else { 327 if (Mx < 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be greater than 1",Mx); 328 ratioi = (mx-1)/(Mx-1); 329 if (ratioi*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx); 330 } 331 if (by == DM_BOUNDARY_PERIODIC) { 332 if (!My) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be positive",My); 333 ratioj = my/My; 334 if (ratioj*My != my) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %D My %D",my,My); 335 } else { 336 if (My < 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be greater than 1",My); 337 ratioj = (my-1)/(My-1); 338 if (ratioj*(My-1) != my-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %D My %D",my,My); 339 } 340 341 ierr = DMDAGetCorners(daf,&i_start,&j_start,0,&m_f,&n_f,0);CHKERRQ(ierr); 342 ierr = DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,0,&m_ghost,&n_ghost,0);CHKERRQ(ierr); 343 ierr = DMGetLocalToGlobalMapping(daf,<og_f);CHKERRQ(ierr); 344 ierr = ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);CHKERRQ(ierr); 345 346 ierr = DMDAGetCorners(dac,&i_start_c,&j_start_c,0,&m_c,&n_c,0);CHKERRQ(ierr); 347 ierr = DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,0,&m_ghost_c,&n_ghost_c,0);CHKERRQ(ierr); 348 ierr = DMGetLocalToGlobalMapping(dac,<og_c);CHKERRQ(ierr); 349 ierr = ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);CHKERRQ(ierr); 350 351 /* 352 Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA. 353 The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the 354 processors). It's effective length is hence 4 times its normal length, this is 355 why the col_scale is multiplied by the interpolation matrix column sizes. 356 sol_shift allows each set of 1/4 processors do its own interpolation using ITS 357 copy of the coarse vector. A bit of a hack but you do better. 358 359 In the standard case when size_f == size_c col_scale == 1 and col_shift == 0 360 */ 361 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)dac),&size_c);CHKERRQ(ierr); 362 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)daf),&size_f);CHKERRQ(ierr); 363 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)daf),&rank_f);CHKERRQ(ierr); 364 col_scale = size_f/size_c; 365 col_shift = Mx*My*(rank_f/size_c); 366 367 ierr = MatPreallocateInitialize(PetscObjectComm((PetscObject)daf),m_f*n_f,col_scale*m_c*n_c,dnz,onz);CHKERRQ(ierr); 368 for (j=j_start; j<j_start+n_f; j++) { 369 for (i=i_start; i<i_start+m_f; i++) { 370 /* convert to local "natural" numbering and then to PETSc global numbering */ 371 row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 372 373 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 374 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 375 376 if (j_c < j_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 377 j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c); 378 if (i_c < i_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 379 i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c); 380 381 /* 382 Only include those interpolation points that are truly 383 nonzero. Note this is very important for final grid lines 384 in x and y directions; since they have no right/top neighbors 385 */ 386 nc = 0; 387 /* one left and below; or we are right on it */ 388 col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 389 cols[nc++] = col_shift + idx_c[col]; 390 /* one right and below */ 391 if (i_c*ratioi != i) cols[nc++] = col_shift + idx_c[col+1]; 392 /* one left and above */ 393 if (j_c*ratioj != j) cols[nc++] = col_shift + idx_c[col+m_ghost_c]; 394 /* one right and above */ 395 if (i_c*ratioi != i && j_c*ratioj != j) cols[nc++] = col_shift + idx_c[col+(m_ghost_c+1)]; 396 ierr = MatPreallocateSet(row,nc,cols,dnz,onz);CHKERRQ(ierr); 397 } 398 } 399 ierr = MatCreate(PetscObjectComm((PetscObject)daf),&mat);CHKERRQ(ierr); 400 #if defined(PETSC_HAVE_CUDA) 401 /* 402 Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU 403 we don't want the original unconverted matrix copied to the GPU 404 */ 405 if (dof > 1) { 406 ierr = MatPinToCPU(mat,PETSC_TRUE);CHKERRQ(ierr); 407 } 408 #endif 409 ierr = MatSetSizes(mat,m_f*n_f,col_scale*m_c*n_c,mx*my,col_scale*Mx*My);CHKERRQ(ierr); 410 ierr = ConvertToAIJ(dac->mattype,&mattype);CHKERRQ(ierr); 411 ierr = MatSetType(mat,mattype);CHKERRQ(ierr); 412 ierr = MatSeqAIJSetPreallocation(mat,0,dnz);CHKERRQ(ierr); 413 ierr = MatMPIAIJSetPreallocation(mat,0,dnz,0,onz);CHKERRQ(ierr); 414 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 415 416 /* loop over local fine grid nodes setting interpolation for those*/ 417 if (!NEWVERSION) { 418 419 for (j=j_start; j<j_start+n_f; j++) { 420 for (i=i_start; i<i_start+m_f; i++) { 421 /* convert to local "natural" numbering and then to PETSc global numbering */ 422 row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 423 424 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 425 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 426 427 /* 428 Only include those interpolation points that are truly 429 nonzero. Note this is very important for final grid lines 430 in x and y directions; since they have no right/top neighbors 431 */ 432 x = ((PetscReal)(i - i_c*ratioi))/((PetscReal)ratioi); 433 y = ((PetscReal)(j - j_c*ratioj))/((PetscReal)ratioj); 434 435 nc = 0; 436 /* one left and below; or we are right on it */ 437 col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 438 cols[nc] = col_shift + idx_c[col]; 439 v[nc++] = x*y - x - y + 1.0; 440 /* one right and below */ 441 if (i_c*ratioi != i) { 442 cols[nc] = col_shift + idx_c[col+1]; 443 v[nc++] = -x*y + x; 444 } 445 /* one left and above */ 446 if (j_c*ratioj != j) { 447 cols[nc] = col_shift + idx_c[col+m_ghost_c]; 448 v[nc++] = -x*y + y; 449 } 450 /* one right and above */ 451 if (j_c*ratioj != j && i_c*ratioi != i) { 452 cols[nc] = col_shift + idx_c[col+(m_ghost_c+1)]; 453 v[nc++] = x*y; 454 } 455 ierr = MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);CHKERRQ(ierr); 456 } 457 } 458 459 } else { 460 PetscScalar Ni[4]; 461 PetscScalar *xi,*eta; 462 PetscInt li,nxi,lj,neta; 463 464 /* compute local coordinate arrays */ 465 nxi = ratioi + 1; 466 neta = ratioj + 1; 467 ierr = PetscMalloc1(nxi,&xi);CHKERRQ(ierr); 468 ierr = PetscMalloc1(neta,&eta);CHKERRQ(ierr); 469 for (li=0; li<nxi; li++) { 470 xi[li] = -1.0 + (PetscScalar)li*(2.0/(PetscScalar)(nxi-1)); 471 } 472 for (lj=0; lj<neta; lj++) { 473 eta[lj] = -1.0 + (PetscScalar)lj*(2.0/(PetscScalar)(neta-1)); 474 } 475 476 /* loop over local fine grid nodes setting interpolation for those*/ 477 for (j=j_start; j<j_start+n_f; j++) { 478 for (i=i_start; i<i_start+m_f; i++) { 479 /* convert to local "natural" numbering and then to PETSc global numbering */ 480 row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 481 482 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 483 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 484 485 /* remainders */ 486 li = i - ratioi * (i/ratioi); 487 if (i==mx-1) li = nxi-1; 488 lj = j - ratioj * (j/ratioj); 489 if (j==my-1) lj = neta-1; 490 491 /* corners */ 492 col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 493 cols[0] = col_shift + idx_c[col]; /* left, below */ 494 Ni[0] = 1.0; 495 if ((li==0) || (li==nxi-1)) { 496 if ((lj==0) || (lj==neta-1)) { 497 ierr = MatSetValue(mat,row,cols[0],Ni[0],INSERT_VALUES);CHKERRQ(ierr); 498 continue; 499 } 500 } 501 502 /* edges + interior */ 503 /* remainders */ 504 if (i==mx-1) i_c--; 505 if (j==my-1) j_c--; 506 507 col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 508 cols[0] = col_shift + idx_c[col]; /* left, below */ 509 cols[1] = col_shift + idx_c[col+1]; /* right, below */ 510 cols[2] = col_shift + idx_c[col+m_ghost_c]; /* left, above */ 511 cols[3] = col_shift + idx_c[col+(m_ghost_c+1)]; /* right, above */ 512 513 Ni[0] = 0.25*(1.0-xi[li])*(1.0-eta[lj]); 514 Ni[1] = 0.25*(1.0+xi[li])*(1.0-eta[lj]); 515 Ni[2] = 0.25*(1.0-xi[li])*(1.0+eta[lj]); 516 Ni[3] = 0.25*(1.0+xi[li])*(1.0+eta[lj]); 517 518 nc = 0; 519 if (PetscAbsScalar(Ni[0])<1.0e-32) cols[0]=-1; 520 if (PetscAbsScalar(Ni[1])<1.0e-32) cols[1]=-1; 521 if (PetscAbsScalar(Ni[2])<1.0e-32) cols[2]=-1; 522 if (PetscAbsScalar(Ni[3])<1.0e-32) cols[3]=-1; 523 524 ierr = MatSetValues(mat,1,&row,4,cols,Ni,INSERT_VALUES);CHKERRQ(ierr); 525 } 526 } 527 ierr = PetscFree(xi);CHKERRQ(ierr); 528 ierr = PetscFree(eta);CHKERRQ(ierr); 529 } 530 ierr = ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);CHKERRQ(ierr); 531 ierr = ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);CHKERRQ(ierr); 532 ierr = MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 533 ierr = MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 534 ierr = MatCreateMAIJ(mat,dof,A);CHKERRQ(ierr); 535 ierr = MatDestroy(&mat);CHKERRQ(ierr); 536 PetscFunctionReturn(0); 537 } 538 539 /* 540 Contributed by Andrei Draganescu <aidraga@sandia.gov> 541 */ 542 PetscErrorCode DMCreateInterpolation_DA_2D_Q0(DM dac,DM daf,Mat *A) 543 { 544 PetscErrorCode ierr; 545 PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof; 546 const PetscInt *idx_c,*idx_f; 547 ISLocalToGlobalMapping ltog_f,ltog_c; 548 PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c,*dnz,*onz; 549 PetscInt row,col,i_start_ghost,j_start_ghost,cols[4],mx,m_c,my,nc,ratioi,ratioj; 550 PetscInt i_c,j_c,i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c,col_shift,col_scale; 551 PetscMPIInt size_c,size_f,rank_f; 552 PetscScalar v[4]; 553 Mat mat; 554 DMBoundaryType bx,by; 555 MatType mattype; 556 557 PetscFunctionBegin; 558 ierr = DMDAGetInfo(dac,0,&Mx,&My,0,0,0,0,0,0,&bx,&by,0,0);CHKERRQ(ierr); 559 ierr = DMDAGetInfo(daf,0,&mx,&my,0,0,0,0,&dof,0,0,0,0,0);CHKERRQ(ierr); 560 if (!Mx) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx); 561 if (!My) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be positive",My); 562 ratioi = mx/Mx; 563 ratioj = my/My; 564 if (ratioi*Mx != mx) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in x"); 565 if (ratioj*My != my) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in y"); 566 if (ratioi != 2) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in x must be 2"); 567 if (ratioj != 2) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in y must be 2"); 568 569 ierr = DMDAGetCorners(daf,&i_start,&j_start,0,&m_f,&n_f,0);CHKERRQ(ierr); 570 ierr = DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,0,&m_ghost,&n_ghost,0);CHKERRQ(ierr); 571 ierr = DMGetLocalToGlobalMapping(daf,<og_f);CHKERRQ(ierr); 572 ierr = ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);CHKERRQ(ierr); 573 574 ierr = DMDAGetCorners(dac,&i_start_c,&j_start_c,0,&m_c,&n_c,0);CHKERRQ(ierr); 575 ierr = DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,0,&m_ghost_c,&n_ghost_c,0);CHKERRQ(ierr); 576 ierr = DMGetLocalToGlobalMapping(dac,<og_c);CHKERRQ(ierr); 577 ierr = ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);CHKERRQ(ierr); 578 579 /* 580 Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA. 581 The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the 582 processors). It's effective length is hence 4 times its normal length, this is 583 why the col_scale is multiplied by the interpolation matrix column sizes. 584 sol_shift allows each set of 1/4 processors do its own interpolation using ITS 585 copy of the coarse vector. A bit of a hack but you do better. 586 587 In the standard case when size_f == size_c col_scale == 1 and col_shift == 0 588 */ 589 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)dac),&size_c);CHKERRQ(ierr); 590 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)daf),&size_f);CHKERRQ(ierr); 591 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)daf),&rank_f);CHKERRQ(ierr); 592 col_scale = size_f/size_c; 593 col_shift = Mx*My*(rank_f/size_c); 594 595 ierr = MatPreallocateInitialize(PetscObjectComm((PetscObject)daf),m_f*n_f,col_scale*m_c*n_c,dnz,onz);CHKERRQ(ierr); 596 for (j=j_start; j<j_start+n_f; j++) { 597 for (i=i_start; i<i_start+m_f; i++) { 598 /* convert to local "natural" numbering and then to PETSc global numbering */ 599 row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 600 601 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 602 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 603 604 if (j_c < j_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 605 j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c); 606 if (i_c < i_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 607 i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c); 608 609 /* 610 Only include those interpolation points that are truly 611 nonzero. Note this is very important for final grid lines 612 in x and y directions; since they have no right/top neighbors 613 */ 614 nc = 0; 615 /* one left and below; or we are right on it */ 616 col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 617 cols[nc++] = col_shift + idx_c[col]; 618 ierr = MatPreallocateSet(row,nc,cols,dnz,onz);CHKERRQ(ierr); 619 } 620 } 621 ierr = MatCreate(PetscObjectComm((PetscObject)daf),&mat);CHKERRQ(ierr); 622 #if defined(PETSC_HAVE_CUDA) 623 /* 624 Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU 625 we don't want the original unconverted matrix copied to the GPU 626 */ 627 if (dof > 1) { 628 ierr = MatPinToCPU(mat,PETSC_TRUE);CHKERRQ(ierr); 629 } 630 #endif 631 ierr = MatSetSizes(mat,m_f*n_f,col_scale*m_c*n_c,mx*my,col_scale*Mx*My);CHKERRQ(ierr); 632 ierr = ConvertToAIJ(dac->mattype,&mattype);CHKERRQ(ierr); 633 ierr = MatSetType(mat,mattype);CHKERRQ(ierr); 634 ierr = MatSeqAIJSetPreallocation(mat,0,dnz);CHKERRQ(ierr); 635 ierr = MatMPIAIJSetPreallocation(mat,0,dnz,0,onz);CHKERRQ(ierr); 636 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 637 638 /* loop over local fine grid nodes setting interpolation for those*/ 639 for (j=j_start; j<j_start+n_f; j++) { 640 for (i=i_start; i<i_start+m_f; i++) { 641 /* convert to local "natural" numbering and then to PETSc global numbering */ 642 row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 643 644 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 645 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 646 nc = 0; 647 /* one left and below; or we are right on it */ 648 col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 649 cols[nc] = col_shift + idx_c[col]; 650 v[nc++] = 1.0; 651 652 ierr = MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);CHKERRQ(ierr); 653 } 654 } 655 ierr = ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);CHKERRQ(ierr); 656 ierr = ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);CHKERRQ(ierr); 657 ierr = MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 658 ierr = MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 659 ierr = MatCreateMAIJ(mat,dof,A);CHKERRQ(ierr); 660 ierr = MatDestroy(&mat);CHKERRQ(ierr); 661 ierr = PetscLogFlops(13.0*m_f*n_f);CHKERRQ(ierr); 662 PetscFunctionReturn(0); 663 } 664 665 /* 666 Contributed by Jianming Yang <jianming-yang@uiowa.edu> 667 */ 668 PetscErrorCode DMCreateInterpolation_DA_3D_Q0(DM dac,DM daf,Mat *A) 669 { 670 PetscErrorCode ierr; 671 PetscInt i,j,l,i_start,j_start,l_start,m_f,n_f,p_f,Mx,My,Mz,dof; 672 const PetscInt *idx_c,*idx_f; 673 ISLocalToGlobalMapping ltog_f,ltog_c; 674 PetscInt m_ghost,n_ghost,p_ghost,m_ghost_c,n_ghost_c,p_ghost_c,nc,*dnz,*onz; 675 PetscInt row,col,i_start_ghost,j_start_ghost,l_start_ghost,cols[8],mx,m_c,my,n_c,mz,p_c,ratioi,ratioj,ratiol; 676 PetscInt i_c,j_c,l_c,i_start_c,j_start_c,l_start_c,i_start_ghost_c,j_start_ghost_c,l_start_ghost_c,col_shift,col_scale; 677 PetscMPIInt size_c,size_f,rank_f; 678 PetscScalar v[8]; 679 Mat mat; 680 DMBoundaryType bx,by,bz; 681 MatType mattype; 682 683 PetscFunctionBegin; 684 ierr = DMDAGetInfo(dac,0,&Mx,&My,&Mz,0,0,0,0,0,&bx,&by,&bz,0);CHKERRQ(ierr); 685 if (!Mx) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx); 686 if (!My) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be positive",My); 687 if (!Mz) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of z coarse grid points %D must be positive",Mz); 688 ierr = DMDAGetInfo(daf,0,&mx,&my,&mz,0,0,0,&dof,0,0,0,0,0);CHKERRQ(ierr); 689 ratioi = mx/Mx; 690 ratioj = my/My; 691 ratiol = mz/Mz; 692 if (ratioi*Mx != mx) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in x"); 693 if (ratioj*My != my) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in y"); 694 if (ratiol*Mz != mz) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in z"); 695 if (ratioi != 2 && ratioi != 1) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in x must be 1 or 2"); 696 if (ratioj != 2 && ratioj != 1) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in y must be 1 or 2"); 697 if (ratiol != 2 && ratiol != 1) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in z must be 1 or 2"); 698 699 ierr = DMDAGetCorners(daf,&i_start,&j_start,&l_start,&m_f,&n_f,&p_f);CHKERRQ(ierr); 700 ierr = DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&l_start_ghost,&m_ghost,&n_ghost,&p_ghost);CHKERRQ(ierr); 701 ierr = DMGetLocalToGlobalMapping(daf,<og_f);CHKERRQ(ierr); 702 ierr = ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);CHKERRQ(ierr); 703 704 ierr = DMDAGetCorners(dac,&i_start_c,&j_start_c,&l_start_c,&m_c,&n_c,&p_c);CHKERRQ(ierr); 705 ierr = DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&l_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c);CHKERRQ(ierr); 706 ierr = DMGetLocalToGlobalMapping(dac,<og_c);CHKERRQ(ierr); 707 ierr = ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);CHKERRQ(ierr); 708 709 /* 710 Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA. 711 The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the 712 processors). It's effective length is hence 4 times its normal length, this is 713 why the col_scale is multiplied by the interpolation matrix column sizes. 714 sol_shift allows each set of 1/4 processors do its own interpolation using ITS 715 copy of the coarse vector. A bit of a hack but you do better. 716 717 In the standard case when size_f == size_c col_scale == 1 and col_shift == 0 718 */ 719 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)dac),&size_c);CHKERRQ(ierr); 720 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)daf),&size_f);CHKERRQ(ierr); 721 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)daf),&rank_f);CHKERRQ(ierr); 722 col_scale = size_f/size_c; 723 col_shift = Mx*My*Mz*(rank_f/size_c); 724 725 ierr = MatPreallocateInitialize(PetscObjectComm((PetscObject)daf),m_f*n_f*p_f,col_scale*m_c*n_c*p_c,dnz,onz);CHKERRQ(ierr); 726 for (l=l_start; l<l_start+p_f; l++) { 727 for (j=j_start; j<j_start+n_f; j++) { 728 for (i=i_start; i<i_start+m_f; i++) { 729 /* convert to local "natural" numbering and then to PETSc global numbering */ 730 row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 731 732 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 733 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 734 l_c = (l/ratiol); 735 736 if (l_c < l_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 737 l_start %D l_c %D l_start_ghost_c %D",l_start,l_c,l_start_ghost_c); 738 if (j_c < j_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 739 j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c); 740 if (i_c < i_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 741 i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c); 742 743 /* 744 Only include those interpolation points that are truly 745 nonzero. Note this is very important for final grid lines 746 in x and y directions; since they have no right/top neighbors 747 */ 748 nc = 0; 749 /* one left and below; or we are right on it */ 750 col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 751 cols[nc++] = col_shift + idx_c[col]; 752 ierr = MatPreallocateSet(row,nc,cols,dnz,onz);CHKERRQ(ierr); 753 } 754 } 755 } 756 ierr = MatCreate(PetscObjectComm((PetscObject)daf),&mat);CHKERRQ(ierr); 757 #if defined(PETSC_HAVE_CUDA) 758 /* 759 Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU 760 we don't want the original unconverted matrix copied to the GPU 761 */ 762 if (dof > 1) { 763 ierr = MatPinToCPU(mat,PETSC_TRUE);CHKERRQ(ierr); 764 } 765 #endif 766 ierr = MatSetSizes(mat,m_f*n_f*p_f,col_scale*m_c*n_c*p_c,mx*my*mz,col_scale*Mx*My*Mz);CHKERRQ(ierr); 767 ierr = ConvertToAIJ(dac->mattype,&mattype);CHKERRQ(ierr); 768 ierr = MatSetType(mat,mattype);CHKERRQ(ierr); 769 ierr = MatSeqAIJSetPreallocation(mat,0,dnz);CHKERRQ(ierr); 770 ierr = MatMPIAIJSetPreallocation(mat,0,dnz,0,onz);CHKERRQ(ierr); 771 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 772 773 /* loop over local fine grid nodes setting interpolation for those*/ 774 for (l=l_start; l<l_start+p_f; l++) { 775 for (j=j_start; j<j_start+n_f; j++) { 776 for (i=i_start; i<i_start+m_f; i++) { 777 /* convert to local "natural" numbering and then to PETSc global numbering */ 778 row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 779 780 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 781 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 782 l_c = (l/ratiol); 783 nc = 0; 784 /* one left and below; or we are right on it */ 785 col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 786 cols[nc] = col_shift + idx_c[col]; 787 v[nc++] = 1.0; 788 789 ierr = MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);CHKERRQ(ierr); 790 } 791 } 792 } 793 ierr = ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);CHKERRQ(ierr); 794 ierr = ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);CHKERRQ(ierr); 795 ierr = MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 796 ierr = MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 797 ierr = MatCreateMAIJ(mat,dof,A);CHKERRQ(ierr); 798 ierr = MatDestroy(&mat);CHKERRQ(ierr); 799 ierr = PetscLogFlops(13.0*m_f*n_f*p_f);CHKERRQ(ierr); 800 PetscFunctionReturn(0); 801 } 802 803 PetscErrorCode DMCreateInterpolation_DA_3D_Q1(DM dac,DM daf,Mat *A) 804 { 805 PetscErrorCode ierr; 806 PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof,l; 807 const PetscInt *idx_c,*idx_f; 808 ISLocalToGlobalMapping ltog_f,ltog_c; 809 PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c,Mz,mz; 810 PetscInt row,col,i_start_ghost,j_start_ghost,cols[8],mx,m_c,my,nc,ratioi,ratioj,ratiok; 811 PetscInt i_c,j_c,i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c; 812 PetscInt l_start,p_f,l_start_ghost,p_ghost,l_start_c,p_c; 813 PetscInt l_start_ghost_c,p_ghost_c,l_c,*dnz,*onz; 814 PetscScalar v[8],x,y,z; 815 Mat mat; 816 DMBoundaryType bx,by,bz; 817 MatType mattype; 818 819 PetscFunctionBegin; 820 ierr = DMDAGetInfo(dac,0,&Mx,&My,&Mz,0,0,0,0,0,&bx,&by,&bz,0);CHKERRQ(ierr); 821 ierr = DMDAGetInfo(daf,0,&mx,&my,&mz,0,0,0,&dof,0,0,0,0,0);CHKERRQ(ierr); 822 if (mx == Mx) { 823 ratioi = 1; 824 } else if (bx == DM_BOUNDARY_PERIODIC) { 825 if (!Mx) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx); 826 ratioi = mx/Mx; 827 if (ratioi*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx); 828 } else { 829 if (Mx < 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be greater than 1",Mx); 830 ratioi = (mx-1)/(Mx-1); 831 if (ratioi*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx); 832 } 833 if (my == My) { 834 ratioj = 1; 835 } else if (by == DM_BOUNDARY_PERIODIC) { 836 if (!My) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be positive",My); 837 ratioj = my/My; 838 if (ratioj*My != my) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %D My %D",my,My); 839 } else { 840 if (My < 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be greater than 1",My); 841 ratioj = (my-1)/(My-1); 842 if (ratioj*(My-1) != my-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %D My %D",my,My); 843 } 844 if (mz == Mz) { 845 ratiok = 1; 846 } else if (bz == DM_BOUNDARY_PERIODIC) { 847 if (!Mz) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of z coarse grid points %D must be positive",Mz); 848 ratiok = mz/Mz; 849 if (ratiok*Mz != mz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mz/Mz must be integer: mz %D Mz %D",mz,Mz); 850 } else { 851 if (Mz < 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of z coarse grid points %D must be greater than 1",Mz); 852 ratiok = (mz-1)/(Mz-1); 853 if (ratiok*(Mz-1) != mz-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mz - 1)/(Mz - 1) must be integer: mz %D Mz %D",mz,Mz); 854 } 855 856 ierr = DMDAGetCorners(daf,&i_start,&j_start,&l_start,&m_f,&n_f,&p_f);CHKERRQ(ierr); 857 ierr = DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&l_start_ghost,&m_ghost,&n_ghost,&p_ghost);CHKERRQ(ierr); 858 ierr = DMGetLocalToGlobalMapping(daf,<og_f);CHKERRQ(ierr); 859 ierr = ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);CHKERRQ(ierr); 860 861 ierr = DMDAGetCorners(dac,&i_start_c,&j_start_c,&l_start_c,&m_c,&n_c,&p_c);CHKERRQ(ierr); 862 ierr = DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&l_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c);CHKERRQ(ierr); 863 ierr = DMGetLocalToGlobalMapping(dac,<og_c);CHKERRQ(ierr); 864 ierr = ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);CHKERRQ(ierr); 865 866 /* create interpolation matrix, determining exact preallocation */ 867 ierr = MatPreallocateInitialize(PetscObjectComm((PetscObject)dac),m_f*n_f*p_f,m_c*n_c*p_c,dnz,onz);CHKERRQ(ierr); 868 /* loop over local fine grid nodes counting interpolating points */ 869 for (l=l_start; l<l_start+p_f; l++) { 870 for (j=j_start; j<j_start+n_f; j++) { 871 for (i=i_start; i<i_start+m_f; i++) { 872 /* convert to local "natural" numbering and then to PETSc global numbering */ 873 row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 874 i_c = (i/ratioi); 875 j_c = (j/ratioj); 876 l_c = (l/ratiok); 877 if (l_c < l_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 878 l_start %D l_c %D l_start_ghost_c %D",l_start,l_c,l_start_ghost_c); 879 if (j_c < j_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 880 j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c); 881 if (i_c < i_start_ghost_c) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 882 i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c); 883 884 /* 885 Only include those interpolation points that are truly 886 nonzero. Note this is very important for final grid lines 887 in x and y directions; since they have no right/top neighbors 888 */ 889 nc = 0; 890 col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 891 cols[nc++] = idx_c[col]; 892 if (i_c*ratioi != i) { 893 cols[nc++] = idx_c[col+1]; 894 } 895 if (j_c*ratioj != j) { 896 cols[nc++] = idx_c[col+m_ghost_c]; 897 } 898 if (l_c*ratiok != l) { 899 cols[nc++] = idx_c[col+m_ghost_c*n_ghost_c]; 900 } 901 if (j_c*ratioj != j && i_c*ratioi != i) { 902 cols[nc++] = idx_c[col+(m_ghost_c+1)]; 903 } 904 if (j_c*ratioj != j && l_c*ratiok != l) { 905 cols[nc++] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c)]; 906 } 907 if (i_c*ratioi != i && l_c*ratiok != l) { 908 cols[nc++] = idx_c[col+(m_ghost_c*n_ghost_c+1)]; 909 } 910 if (i_c*ratioi != i && l_c*ratiok != l && j_c*ratioj != j) { 911 cols[nc++] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c+1)]; 912 } 913 ierr = MatPreallocateSet(row,nc,cols,dnz,onz);CHKERRQ(ierr); 914 } 915 } 916 } 917 ierr = MatCreate(PetscObjectComm((PetscObject)dac),&mat);CHKERRQ(ierr); 918 #if defined(PETSC_HAVE_CUDA) 919 /* 920 Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU 921 we don't want the original unconverted matrix copied to the GPU 922 */ 923 if (dof > 1) { 924 ierr = MatPinToCPU(mat,PETSC_TRUE);CHKERRQ(ierr); 925 } 926 #endif 927 ierr = MatSetSizes(mat,m_f*n_f*p_f,m_c*n_c*p_c,mx*my*mz,Mx*My*Mz);CHKERRQ(ierr); 928 ierr = ConvertToAIJ(dac->mattype,&mattype);CHKERRQ(ierr); 929 ierr = MatSetType(mat,mattype);CHKERRQ(ierr); 930 ierr = MatSeqAIJSetPreallocation(mat,0,dnz);CHKERRQ(ierr); 931 ierr = MatMPIAIJSetPreallocation(mat,0,dnz,0,onz);CHKERRQ(ierr); 932 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 933 934 /* loop over local fine grid nodes setting interpolation for those*/ 935 if (!NEWVERSION) { 936 937 for (l=l_start; l<l_start+p_f; l++) { 938 for (j=j_start; j<j_start+n_f; j++) { 939 for (i=i_start; i<i_start+m_f; i++) { 940 /* convert to local "natural" numbering and then to PETSc global numbering */ 941 row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 942 943 i_c = (i/ratioi); 944 j_c = (j/ratioj); 945 l_c = (l/ratiok); 946 947 /* 948 Only include those interpolation points that are truly 949 nonzero. Note this is very important for final grid lines 950 in x and y directions; since they have no right/top neighbors 951 */ 952 x = ((PetscReal)(i - i_c*ratioi))/((PetscReal)ratioi); 953 y = ((PetscReal)(j - j_c*ratioj))/((PetscReal)ratioj); 954 z = ((PetscReal)(l - l_c*ratiok))/((PetscReal)ratiok); 955 956 nc = 0; 957 /* one left and below; or we are right on it */ 958 col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c)+m_ghost_c*(j_c-j_start_ghost_c)+(i_c-i_start_ghost_c)); 959 960 cols[nc] = idx_c[col]; 961 v[nc++] = .125*(1. - (2.0*x-1.))*(1. - (2.0*y-1.))*(1. - (2.0*z-1.)); 962 963 if (i_c*ratioi != i) { 964 cols[nc] = idx_c[col+1]; 965 v[nc++] = .125*(1. + (2.0*x-1.))*(1. - (2.0*y-1.))*(1. - (2.0*z-1.)); 966 } 967 968 if (j_c*ratioj != j) { 969 cols[nc] = idx_c[col+m_ghost_c]; 970 v[nc++] = .125*(1. - (2.0*x-1.))*(1. + (2.0*y-1.))*(1. - (2.0*z-1.)); 971 } 972 973 if (l_c*ratiok != l) { 974 cols[nc] = idx_c[col+m_ghost_c*n_ghost_c]; 975 v[nc++] = .125*(1. - (2.0*x-1.))*(1. - (2.0*y-1.))*(1. + (2.0*z-1.)); 976 } 977 978 if (j_c*ratioj != j && i_c*ratioi != i) { 979 cols[nc] = idx_c[col+(m_ghost_c+1)]; 980 v[nc++] = .125*(1. + (2.0*x-1.))*(1. + (2.0*y-1.))*(1. - (2.0*z-1.)); 981 } 982 983 if (j_c*ratioj != j && l_c*ratiok != l) { 984 cols[nc] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c)]; 985 v[nc++] = .125*(1. - (2.0*x-1.))*(1. + (2.0*y-1.))*(1. + (2.0*z-1.)); 986 } 987 988 if (i_c*ratioi != i && l_c*ratiok != l) { 989 cols[nc] = idx_c[col+(m_ghost_c*n_ghost_c+1)]; 990 v[nc++] = .125*(1. + (2.0*x-1.))*(1. - (2.0*y-1.))*(1. + (2.0*z-1.)); 991 } 992 993 if (i_c*ratioi != i && l_c*ratiok != l && j_c*ratioj != j) { 994 cols[nc] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c+1)]; 995 v[nc++] = .125*(1. + (2.0*x-1.))*(1. + (2.0*y-1.))*(1. + (2.0*z-1.)); 996 } 997 ierr = MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES);CHKERRQ(ierr); 998 } 999 } 1000 } 1001 1002 } else { 1003 PetscScalar *xi,*eta,*zeta; 1004 PetscInt li,nxi,lj,neta,lk,nzeta,n; 1005 PetscScalar Ni[8]; 1006 1007 /* compute local coordinate arrays */ 1008 nxi = ratioi + 1; 1009 neta = ratioj + 1; 1010 nzeta = ratiok + 1; 1011 ierr = PetscMalloc1(nxi,&xi);CHKERRQ(ierr); 1012 ierr = PetscMalloc1(neta,&eta);CHKERRQ(ierr); 1013 ierr = PetscMalloc1(nzeta,&zeta);CHKERRQ(ierr); 1014 for (li=0; li<nxi; li++) xi[li] = -1.0 + (PetscScalar)li*(2.0/(PetscScalar)(nxi-1)); 1015 for (lj=0; lj<neta; lj++) eta[lj] = -1.0 + (PetscScalar)lj*(2.0/(PetscScalar)(neta-1)); 1016 for (lk=0; lk<nzeta; lk++) zeta[lk] = -1.0 + (PetscScalar)lk*(2.0/(PetscScalar)(nzeta-1)); 1017 1018 for (l=l_start; l<l_start+p_f; l++) { 1019 for (j=j_start; j<j_start+n_f; j++) { 1020 for (i=i_start; i<i_start+m_f; i++) { 1021 /* convert to local "natural" numbering and then to PETSc global numbering */ 1022 row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 1023 1024 i_c = (i/ratioi); 1025 j_c = (j/ratioj); 1026 l_c = (l/ratiok); 1027 1028 /* remainders */ 1029 li = i - ratioi * (i/ratioi); 1030 if (i==mx-1) li = nxi-1; 1031 lj = j - ratioj * (j/ratioj); 1032 if (j==my-1) lj = neta-1; 1033 lk = l - ratiok * (l/ratiok); 1034 if (l==mz-1) lk = nzeta-1; 1035 1036 /* corners */ 1037 col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c)+m_ghost_c*(j_c-j_start_ghost_c)+(i_c-i_start_ghost_c)); 1038 cols[0] = idx_c[col]; 1039 Ni[0] = 1.0; 1040 if ((li==0) || (li==nxi-1)) { 1041 if ((lj==0) || (lj==neta-1)) { 1042 if ((lk==0) || (lk==nzeta-1)) { 1043 ierr = MatSetValue(mat,row,cols[0],Ni[0],INSERT_VALUES);CHKERRQ(ierr); 1044 continue; 1045 } 1046 } 1047 } 1048 1049 /* edges + interior */ 1050 /* remainders */ 1051 if (i==mx-1) i_c--; 1052 if (j==my-1) j_c--; 1053 if (l==mz-1) l_c--; 1054 1055 col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 1056 cols[0] = idx_c[col]; /* one left and below; or we are right on it */ 1057 cols[1] = idx_c[col+1]; /* one right and below */ 1058 cols[2] = idx_c[col+m_ghost_c]; /* one left and above */ 1059 cols[3] = idx_c[col+(m_ghost_c+1)]; /* one right and above */ 1060 1061 cols[4] = idx_c[col+m_ghost_c*n_ghost_c]; /* one left and below and front; or we are right on it */ 1062 cols[5] = idx_c[col+(m_ghost_c*n_ghost_c+1)]; /* one right and below, and front */ 1063 cols[6] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c)]; /* one left and above and front*/ 1064 cols[7] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c+1)]; /* one right and above and front */ 1065 1066 Ni[0] = 0.125*(1.0-xi[li])*(1.0-eta[lj])*(1.0-zeta[lk]); 1067 Ni[1] = 0.125*(1.0+xi[li])*(1.0-eta[lj])*(1.0-zeta[lk]); 1068 Ni[2] = 0.125*(1.0-xi[li])*(1.0+eta[lj])*(1.0-zeta[lk]); 1069 Ni[3] = 0.125*(1.0+xi[li])*(1.0+eta[lj])*(1.0-zeta[lk]); 1070 1071 Ni[4] = 0.125*(1.0-xi[li])*(1.0-eta[lj])*(1.0+zeta[lk]); 1072 Ni[5] = 0.125*(1.0+xi[li])*(1.0-eta[lj])*(1.0+zeta[lk]); 1073 Ni[6] = 0.125*(1.0-xi[li])*(1.0+eta[lj])*(1.0+zeta[lk]); 1074 Ni[7] = 0.125*(1.0+xi[li])*(1.0+eta[lj])*(1.0+zeta[lk]); 1075 1076 for (n=0; n<8; n++) { 1077 if (PetscAbsScalar(Ni[n])<1.0e-32) cols[n]=-1; 1078 } 1079 ierr = MatSetValues(mat,1,&row,8,cols,Ni,INSERT_VALUES);CHKERRQ(ierr); 1080 1081 } 1082 } 1083 } 1084 ierr = PetscFree(xi);CHKERRQ(ierr); 1085 ierr = PetscFree(eta);CHKERRQ(ierr); 1086 ierr = PetscFree(zeta);CHKERRQ(ierr); 1087 } 1088 ierr = ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);CHKERRQ(ierr); 1089 ierr = ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);CHKERRQ(ierr); 1090 ierr = MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1091 ierr = MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1092 1093 ierr = MatCreateMAIJ(mat,dof,A);CHKERRQ(ierr); 1094 ierr = MatDestroy(&mat);CHKERRQ(ierr); 1095 PetscFunctionReturn(0); 1096 } 1097 1098 PetscErrorCode DMCreateInterpolation_DA(DM dac,DM daf,Mat *A,Vec *scale) 1099 { 1100 PetscErrorCode ierr; 1101 PetscInt dimc,Mc,Nc,Pc,mc,nc,pc,dofc,sc,dimf,Mf,Nf,Pf,mf,nf,pf,doff,sf; 1102 DMBoundaryType bxc,byc,bzc,bxf,byf,bzf; 1103 DMDAStencilType stc,stf; 1104 DM_DA *ddc = (DM_DA*)dac->data; 1105 1106 PetscFunctionBegin; 1107 PetscValidHeaderSpecific(dac,DM_CLASSID,1); 1108 PetscValidHeaderSpecific(daf,DM_CLASSID,2); 1109 PetscValidPointer(A,3); 1110 if (scale) PetscValidPointer(scale,4); 1111 1112 ierr = DMDAGetInfo(dac,&dimc,&Mc,&Nc,&Pc,&mc,&nc,&pc,&dofc,&sc,&bxc,&byc,&bzc,&stc);CHKERRQ(ierr); 1113 ierr = DMDAGetInfo(daf,&dimf,&Mf,&Nf,&Pf,&mf,&nf,&pf,&doff,&sf,&bxf,&byf,&bzf,&stf);CHKERRQ(ierr); 1114 if (dimc != dimf) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Dimensions of DMDA do not match %D %D",dimc,dimf); 1115 if (dofc != doff) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"DOF of DMDA do not match %D %D",dofc,doff); 1116 if (sc != sf) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil width of DMDA do not match %D %D",sc,sf); 1117 if (bxc != bxf || byc != byf || bzc != bzf) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Boundary type different in two DMDAs"); 1118 if (stc != stf) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil type different in two DMDAs"); 1119 if (Mc < 2 && Mf > 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in x direction"); 1120 if (dimc > 1 && Nc < 2 && Nf > 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in y direction"); 1121 if (dimc > 2 && Pc < 2 && Pf > 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in z direction"); 1122 1123 if (ddc->interptype == DMDA_Q1) { 1124 if (dimc == 1) { 1125 ierr = DMCreateInterpolation_DA_1D_Q1(dac,daf,A);CHKERRQ(ierr); 1126 } else if (dimc == 2) { 1127 ierr = DMCreateInterpolation_DA_2D_Q1(dac,daf,A);CHKERRQ(ierr); 1128 } else if (dimc == 3) { 1129 ierr = DMCreateInterpolation_DA_3D_Q1(dac,daf,A);CHKERRQ(ierr); 1130 } else SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_SUP,"No support for this DMDA dimension %D for interpolation type %d",dimc,(int)ddc->interptype); 1131 } else if (ddc->interptype == DMDA_Q0) { 1132 if (dimc == 1) { 1133 ierr = DMCreateInterpolation_DA_1D_Q0(dac,daf,A);CHKERRQ(ierr); 1134 } else if (dimc == 2) { 1135 ierr = DMCreateInterpolation_DA_2D_Q0(dac,daf,A);CHKERRQ(ierr); 1136 } else if (dimc == 3) { 1137 ierr = DMCreateInterpolation_DA_3D_Q0(dac,daf,A);CHKERRQ(ierr); 1138 } else SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_SUP,"No support for this DMDA dimension %D for interpolation type %d",dimc,(int)ddc->interptype); 1139 } 1140 if (scale) { 1141 ierr = DMCreateInterpolationScale((DM)dac,(DM)daf,*A,scale);CHKERRQ(ierr); 1142 } 1143 PetscFunctionReturn(0); 1144 } 1145 1146 PetscErrorCode DMCreateInjection_DA_1D(DM dac,DM daf,VecScatter *inject) 1147 { 1148 PetscErrorCode ierr; 1149 PetscInt i,i_start,m_f,Mx,dof; 1150 const PetscInt *idx_f; 1151 ISLocalToGlobalMapping ltog_f; 1152 PetscInt m_ghost,m_ghost_c; 1153 PetscInt row,i_start_ghost,mx,m_c,nc,ratioi; 1154 PetscInt i_start_c,i_start_ghost_c; 1155 PetscInt *cols; 1156 DMBoundaryType bx; 1157 Vec vecf,vecc; 1158 IS isf; 1159 1160 PetscFunctionBegin; 1161 ierr = DMDAGetInfo(dac,0,&Mx,0,0,0,0,0,0,0,&bx,0,0,0);CHKERRQ(ierr); 1162 ierr = DMDAGetInfo(daf,0,&mx,0,0,0,0,0,&dof,0,0,0,0,0);CHKERRQ(ierr); 1163 if (bx == DM_BOUNDARY_PERIODIC) { 1164 ratioi = mx/Mx; 1165 if (ratioi*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx); 1166 } else { 1167 ratioi = (mx-1)/(Mx-1); 1168 if (ratioi*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx); 1169 } 1170 1171 ierr = DMDAGetCorners(daf,&i_start,0,0,&m_f,0,0);CHKERRQ(ierr); 1172 ierr = DMDAGetGhostCorners(daf,&i_start_ghost,0,0,&m_ghost,0,0);CHKERRQ(ierr); 1173 ierr = DMGetLocalToGlobalMapping(daf,<og_f);CHKERRQ(ierr); 1174 ierr = ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);CHKERRQ(ierr); 1175 1176 ierr = DMDAGetCorners(dac,&i_start_c,0,0,&m_c,0,0);CHKERRQ(ierr); 1177 ierr = DMDAGetGhostCorners(dac,&i_start_ghost_c,0,0,&m_ghost_c,0,0);CHKERRQ(ierr); 1178 1179 1180 /* loop over local fine grid nodes setting interpolation for those*/ 1181 nc = 0; 1182 ierr = PetscMalloc1(m_f,&cols);CHKERRQ(ierr); 1183 1184 1185 for (i=i_start_c; i<i_start_c+m_c; i++) { 1186 PetscInt i_f = i*ratioi; 1187 1188 if (i_f < i_start_ghost || i_f >= i_start_ghost+m_ghost) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\ni_c %D i_f %D fine ghost range [%D,%D]",i,i_f,i_start_ghost,i_start_ghost+m_ghost); 1189 1190 row = idx_f[(i_f-i_start_ghost)]; 1191 cols[nc++] = row; 1192 } 1193 1194 ierr = ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);CHKERRQ(ierr); 1195 ierr = ISCreateBlock(PetscObjectComm((PetscObject)daf),dof,nc,cols,PETSC_OWN_POINTER,&isf);CHKERRQ(ierr); 1196 ierr = DMGetGlobalVector(dac,&vecc);CHKERRQ(ierr); 1197 ierr = DMGetGlobalVector(daf,&vecf);CHKERRQ(ierr); 1198 ierr = VecScatterCreate(vecf,isf,vecc,NULL,inject);CHKERRQ(ierr); 1199 ierr = DMRestoreGlobalVector(dac,&vecc);CHKERRQ(ierr); 1200 ierr = DMRestoreGlobalVector(daf,&vecf);CHKERRQ(ierr); 1201 ierr = ISDestroy(&isf);CHKERRQ(ierr); 1202 PetscFunctionReturn(0); 1203 } 1204 1205 PetscErrorCode DMCreateInjection_DA_2D(DM dac,DM daf,VecScatter *inject) 1206 { 1207 PetscErrorCode ierr; 1208 PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof; 1209 const PetscInt *idx_c,*idx_f; 1210 ISLocalToGlobalMapping ltog_f,ltog_c; 1211 PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c; 1212 PetscInt row,i_start_ghost,j_start_ghost,mx,m_c,my,nc,ratioi,ratioj; 1213 PetscInt i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c; 1214 PetscInt *cols; 1215 DMBoundaryType bx,by; 1216 Vec vecf,vecc; 1217 IS isf; 1218 1219 PetscFunctionBegin; 1220 ierr = DMDAGetInfo(dac,0,&Mx,&My,0,0,0,0,0,0,&bx,&by,0,0);CHKERRQ(ierr); 1221 ierr = DMDAGetInfo(daf,0,&mx,&my,0,0,0,0,&dof,0,0,0,0,0);CHKERRQ(ierr); 1222 if (bx == DM_BOUNDARY_PERIODIC) { 1223 ratioi = mx/Mx; 1224 if (ratioi*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx); 1225 } else { 1226 ratioi = (mx-1)/(Mx-1); 1227 if (ratioi*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx); 1228 } 1229 if (by == DM_BOUNDARY_PERIODIC) { 1230 ratioj = my/My; 1231 if (ratioj*My != my) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %D My %D",my,My); 1232 } else { 1233 ratioj = (my-1)/(My-1); 1234 if (ratioj*(My-1) != my-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %D My %D",my,My); 1235 } 1236 1237 ierr = DMDAGetCorners(daf,&i_start,&j_start,0,&m_f,&n_f,0);CHKERRQ(ierr); 1238 ierr = DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,0,&m_ghost,&n_ghost,0);CHKERRQ(ierr); 1239 ierr = DMGetLocalToGlobalMapping(daf,<og_f);CHKERRQ(ierr); 1240 ierr = ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);CHKERRQ(ierr); 1241 1242 ierr = DMDAGetCorners(dac,&i_start_c,&j_start_c,0,&m_c,&n_c,0);CHKERRQ(ierr); 1243 ierr = DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,0,&m_ghost_c,&n_ghost_c,0);CHKERRQ(ierr); 1244 ierr = DMGetLocalToGlobalMapping(dac,<og_c);CHKERRQ(ierr); 1245 ierr = ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);CHKERRQ(ierr); 1246 1247 /* loop over local fine grid nodes setting interpolation for those*/ 1248 nc = 0; 1249 ierr = PetscMalloc1(n_f*m_f,&cols);CHKERRQ(ierr); 1250 for (j=j_start_c; j<j_start_c+n_c; j++) { 1251 for (i=i_start_c; i<i_start_c+m_c; i++) { 1252 PetscInt i_f = i*ratioi,j_f = j*ratioj; 1253 if (j_f < j_start_ghost || j_f >= j_start_ghost+n_ghost) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 1254 j_c %D j_f %D fine ghost range [%D,%D]",j,j_f,j_start_ghost,j_start_ghost+n_ghost); 1255 if (i_f < i_start_ghost || i_f >= i_start_ghost+m_ghost) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 1256 i_c %D i_f %D fine ghost range [%D,%D]",i,i_f,i_start_ghost,i_start_ghost+m_ghost); 1257 row = idx_f[(m_ghost*(j_f-j_start_ghost) + (i_f-i_start_ghost))]; 1258 cols[nc++] = row; 1259 } 1260 } 1261 ierr = ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);CHKERRQ(ierr); 1262 ierr = ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);CHKERRQ(ierr); 1263 1264 ierr = ISCreateBlock(PetscObjectComm((PetscObject)daf),dof,nc,cols,PETSC_OWN_POINTER,&isf);CHKERRQ(ierr); 1265 ierr = DMGetGlobalVector(dac,&vecc);CHKERRQ(ierr); 1266 ierr = DMGetGlobalVector(daf,&vecf);CHKERRQ(ierr); 1267 ierr = VecScatterCreate(vecf,isf,vecc,NULL,inject);CHKERRQ(ierr); 1268 ierr = DMRestoreGlobalVector(dac,&vecc);CHKERRQ(ierr); 1269 ierr = DMRestoreGlobalVector(daf,&vecf);CHKERRQ(ierr); 1270 ierr = ISDestroy(&isf);CHKERRQ(ierr); 1271 PetscFunctionReturn(0); 1272 } 1273 1274 PetscErrorCode DMCreateInjection_DA_3D(DM dac,DM daf,VecScatter *inject) 1275 { 1276 PetscErrorCode ierr; 1277 PetscInt i,j,k,i_start,j_start,k_start,m_f,n_f,p_f,Mx,My,Mz; 1278 PetscInt m_ghost,n_ghost,p_ghost,m_ghost_c,n_ghost_c,p_ghost_c; 1279 PetscInt i_start_ghost,j_start_ghost,k_start_ghost; 1280 PetscInt mx,my,mz,ratioi,ratioj,ratiok; 1281 PetscInt i_start_c,j_start_c,k_start_c; 1282 PetscInt m_c,n_c,p_c; 1283 PetscInt i_start_ghost_c,j_start_ghost_c,k_start_ghost_c; 1284 PetscInt row,nc,dof; 1285 const PetscInt *idx_c,*idx_f; 1286 ISLocalToGlobalMapping ltog_f,ltog_c; 1287 PetscInt *cols; 1288 DMBoundaryType bx,by,bz; 1289 Vec vecf,vecc; 1290 IS isf; 1291 1292 PetscFunctionBegin; 1293 ierr = DMDAGetInfo(dac,0,&Mx,&My,&Mz,0,0,0,0,0,&bx,&by,&bz,0);CHKERRQ(ierr); 1294 ierr = DMDAGetInfo(daf,0,&mx,&my,&mz,0,0,0,&dof,0,0,0,0,0);CHKERRQ(ierr); 1295 1296 if (bx == DM_BOUNDARY_PERIODIC) { 1297 ratioi = mx/Mx; 1298 if (ratioi*Mx != mx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx); 1299 } else { 1300 ratioi = (mx-1)/(Mx-1); 1301 if (ratioi*(Mx-1) != mx-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx); 1302 } 1303 if (by == DM_BOUNDARY_PERIODIC) { 1304 ratioj = my/My; 1305 if (ratioj*My != my) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %D My %D",my,My); 1306 } else { 1307 ratioj = (my-1)/(My-1); 1308 if (ratioj*(My-1) != my-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %D My %D",my,My); 1309 } 1310 if (bz == DM_BOUNDARY_PERIODIC) { 1311 ratiok = mz/Mz; 1312 if (ratiok*Mz != mz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mz/Mz must be integer: mz %D My %D",mz,Mz); 1313 } else { 1314 ratiok = (mz-1)/(Mz-1); 1315 if (ratiok*(Mz-1) != mz-1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mz - 1)/(Mz - 1) must be integer: mz %D Mz %D",mz,Mz); 1316 } 1317 1318 ierr = DMDAGetCorners(daf,&i_start,&j_start,&k_start,&m_f,&n_f,&p_f);CHKERRQ(ierr); 1319 ierr = DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&k_start_ghost,&m_ghost,&n_ghost,&p_ghost);CHKERRQ(ierr); 1320 ierr = DMGetLocalToGlobalMapping(daf,<og_f);CHKERRQ(ierr); 1321 ierr = ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f);CHKERRQ(ierr); 1322 1323 ierr = DMDAGetCorners(dac,&i_start_c,&j_start_c,&k_start_c,&m_c,&n_c,&p_c);CHKERRQ(ierr); 1324 ierr = DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&k_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c);CHKERRQ(ierr); 1325 ierr = DMGetLocalToGlobalMapping(dac,<og_c);CHKERRQ(ierr); 1326 ierr = ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c);CHKERRQ(ierr); 1327 1328 1329 /* loop over local fine grid nodes setting interpolation for those*/ 1330 nc = 0; 1331 ierr = PetscMalloc1(n_f*m_f*p_f,&cols);CHKERRQ(ierr); 1332 for (k=k_start_c; k<k_start_c+p_c; k++) { 1333 for (j=j_start_c; j<j_start_c+n_c; j++) { 1334 for (i=i_start_c; i<i_start_c+m_c; i++) { 1335 PetscInt i_f = i*ratioi,j_f = j*ratioj,k_f = k*ratiok; 1336 if (k_f < k_start_ghost || k_f >= k_start_ghost+p_ghost) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA " 1337 "k_c %D k_f %D fine ghost range [%D,%D]",k,k_f,k_start_ghost,k_start_ghost+p_ghost); 1338 if (j_f < j_start_ghost || j_f >= j_start_ghost+n_ghost) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA " 1339 "j_c %D j_f %D fine ghost range [%D,%D]",j,j_f,j_start_ghost,j_start_ghost+n_ghost); 1340 if (i_f < i_start_ghost || i_f >= i_start_ghost+m_ghost) SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA " 1341 "i_c %D i_f %D fine ghost range [%D,%D]",i,i_f,i_start_ghost,i_start_ghost+m_ghost); 1342 row = idx_f[(m_ghost*n_ghost*(k_f-k_start_ghost) + m_ghost*(j_f-j_start_ghost) + (i_f-i_start_ghost))]; 1343 cols[nc++] = row; 1344 } 1345 } 1346 } 1347 ierr = ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f);CHKERRQ(ierr); 1348 ierr = ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c);CHKERRQ(ierr); 1349 1350 ierr = ISCreateBlock(PetscObjectComm((PetscObject)daf),dof,nc,cols,PETSC_OWN_POINTER,&isf);CHKERRQ(ierr); 1351 ierr = DMGetGlobalVector(dac,&vecc);CHKERRQ(ierr); 1352 ierr = DMGetGlobalVector(daf,&vecf);CHKERRQ(ierr); 1353 ierr = VecScatterCreate(vecf,isf,vecc,NULL,inject);CHKERRQ(ierr); 1354 ierr = DMRestoreGlobalVector(dac,&vecc);CHKERRQ(ierr); 1355 ierr = DMRestoreGlobalVector(daf,&vecf);CHKERRQ(ierr); 1356 ierr = ISDestroy(&isf);CHKERRQ(ierr); 1357 PetscFunctionReturn(0); 1358 } 1359 1360 PetscErrorCode DMCreateInjection_DA(DM dac,DM daf,Mat *mat) 1361 { 1362 PetscErrorCode ierr; 1363 PetscInt dimc,Mc,Nc,Pc,mc,nc,pc,dofc,sc,dimf,Mf,Nf,Pf,mf,nf,pf,doff,sf; 1364 DMBoundaryType bxc,byc,bzc,bxf,byf,bzf; 1365 DMDAStencilType stc,stf; 1366 VecScatter inject = NULL; 1367 1368 PetscFunctionBegin; 1369 PetscValidHeaderSpecific(dac,DM_CLASSID,1); 1370 PetscValidHeaderSpecific(daf,DM_CLASSID,2); 1371 PetscValidPointer(mat,3); 1372 1373 ierr = DMDAGetInfo(dac,&dimc,&Mc,&Nc,&Pc,&mc,&nc,&pc,&dofc,&sc,&bxc,&byc,&bzc,&stc);CHKERRQ(ierr); 1374 ierr = DMDAGetInfo(daf,&dimf,&Mf,&Nf,&Pf,&mf,&nf,&pf,&doff,&sf,&bxf,&byf,&bzf,&stf);CHKERRQ(ierr); 1375 if (dimc != dimf) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Dimensions of DMDA do not match %D %D",dimc,dimf); 1376 if (dofc != doff) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"DOF of DMDA do not match %D %D",dofc,doff); 1377 if (sc != sf) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil width of DMDA do not match %D %D",sc,sf); 1378 if (bxc != bxf || byc != byf || bzc != bzf) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Boundary type different in two DMDAs"); 1379 if (stc != stf) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil type different in two DMDAs"); 1380 if (Mc < 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in x direction"); 1381 if (dimc > 1 && Nc < 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in y direction"); 1382 if (dimc > 2 && Pc < 2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in z direction"); 1383 1384 if (dimc == 1) { 1385 ierr = DMCreateInjection_DA_1D(dac,daf,&inject);CHKERRQ(ierr); 1386 } else if (dimc == 2) { 1387 ierr = DMCreateInjection_DA_2D(dac,daf,&inject);CHKERRQ(ierr); 1388 } else if (dimc == 3) { 1389 ierr = DMCreateInjection_DA_3D(dac,daf,&inject);CHKERRQ(ierr); 1390 } 1391 ierr = MatCreateScatter(PetscObjectComm((PetscObject)inject), inject, mat);CHKERRQ(ierr); 1392 ierr = VecScatterDestroy(&inject);CHKERRQ(ierr); 1393 PetscFunctionReturn(0); 1394 } 1395 1396 /*@ 1397 DMCreateAggregates - Deprecated, see DMDACreateAggregates. 1398 @*/ 1399 PetscErrorCode DMCreateAggregates(DM dm1,DM dm2,Mat *mat) 1400 { 1401 return DMDACreateAggregates(dm1,dm2,mat); 1402 } 1403 1404 /*@ 1405 DMDACreateAggregates - Gets the aggregates that map between 1406 grids associated with two DMDAs. 1407 1408 Collective on dmc 1409 1410 Input Parameters: 1411 + dmc - the coarse grid DMDA 1412 - dmf - the fine grid DMDA 1413 1414 Output Parameters: 1415 . rest - the restriction matrix (transpose of the projection matrix) 1416 1417 Level: intermediate 1418 1419 Note: This routine is not used by PETSc. 1420 It is not clear what its use case is and it may be removed in a future release. 1421 Users should contact petsc-maint@mcs.anl.gov if they plan to use it. 1422 1423 .seealso: DMRefine(), DMCreateInjection(), DMCreateInterpolation() 1424 @*/ 1425 PetscErrorCode DMDACreateAggregates(DM dac,DM daf,Mat *rest) 1426 { 1427 PetscErrorCode ierr; 1428 PetscInt dimc,Mc,Nc,Pc,mc,nc,pc,dofc,sc; 1429 PetscInt dimf,Mf,Nf,Pf,mf,nf,pf,doff,sf; 1430 DMBoundaryType bxc,byc,bzc,bxf,byf,bzf; 1431 DMDAStencilType stc,stf; 1432 PetscInt i,j,l; 1433 PetscInt i_start,j_start,l_start, m_f,n_f,p_f; 1434 PetscInt i_start_ghost,j_start_ghost,l_start_ghost,m_ghost,n_ghost,p_ghost; 1435 const PetscInt *idx_f; 1436 PetscInt i_c,j_c,l_c; 1437 PetscInt i_start_c,j_start_c,l_start_c, m_c,n_c,p_c; 1438 PetscInt i_start_ghost_c,j_start_ghost_c,l_start_ghost_c,m_ghost_c,n_ghost_c,p_ghost_c; 1439 const PetscInt *idx_c; 1440 PetscInt d; 1441 PetscInt a; 1442 PetscInt max_agg_size; 1443 PetscInt *fine_nodes; 1444 PetscScalar *one_vec; 1445 PetscInt fn_idx; 1446 ISLocalToGlobalMapping ltogmf,ltogmc; 1447 1448 PetscFunctionBegin; 1449 PetscValidHeaderSpecificType(dac,DM_CLASSID,1,DMDA); 1450 PetscValidHeaderSpecificType(daf,DM_CLASSID,2,DMDA); 1451 PetscValidPointer(rest,3); 1452 1453 ierr = DMDAGetInfo(dac,&dimc,&Mc,&Nc,&Pc,&mc,&nc,&pc,&dofc,&sc,&bxc,&byc,&bzc,&stc);CHKERRQ(ierr); 1454 ierr = DMDAGetInfo(daf,&dimf,&Mf,&Nf,&Pf,&mf,&nf,&pf,&doff,&sf,&bxf,&byf,&bzf,&stf);CHKERRQ(ierr); 1455 if (dimc != dimf) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Dimensions of DMDA do not match %D %D",dimc,dimf); 1456 if (dofc != doff) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"DOF of DMDA do not match %D %D",dofc,doff); 1457 if (sc != sf) SETERRQ2(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil width of DMDA do not match %D %D",sc,sf); 1458 if (bxc != bxf || byc != byf || bzc != bzf) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Boundary type different in two DMDAs"); 1459 if (stc != stf) SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil type different in two DMDAs"); 1460 1461 if (Mf < Mc) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Coarse grid has more points than fine grid, Mc %D, Mf %D", Mc, Mf); 1462 if (Nf < Nc) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Coarse grid has more points than fine grid, Nc %D, Nf %D", Nc, Nf); 1463 if (Pf < Pc) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Coarse grid has more points than fine grid, Pc %D, Pf %D", Pc, Pf); 1464 1465 if (Pc < 0) Pc = 1; 1466 if (Pf < 0) Pf = 1; 1467 if (Nc < 0) Nc = 1; 1468 if (Nf < 0) Nf = 1; 1469 1470 ierr = DMDAGetCorners(daf,&i_start,&j_start,&l_start,&m_f,&n_f,&p_f);CHKERRQ(ierr); 1471 ierr = DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&l_start_ghost,&m_ghost,&n_ghost,&p_ghost);CHKERRQ(ierr); 1472 1473 ierr = DMGetLocalToGlobalMapping(daf,<ogmf);CHKERRQ(ierr); 1474 ierr = ISLocalToGlobalMappingGetIndices(ltogmf,&idx_f);CHKERRQ(ierr); 1475 1476 ierr = DMDAGetCorners(dac,&i_start_c,&j_start_c,&l_start_c,&m_c,&n_c,&p_c);CHKERRQ(ierr); 1477 ierr = DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&l_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c);CHKERRQ(ierr); 1478 1479 ierr = DMGetLocalToGlobalMapping(dac,<ogmc);CHKERRQ(ierr); 1480 ierr = ISLocalToGlobalMappingGetIndices(ltogmc,&idx_c);CHKERRQ(ierr); 1481 1482 /* 1483 Basic idea is as follows. Here's a 2D example, suppose r_x, r_y are the ratios 1484 for dimension 1 and 2 respectively. 1485 Let (i,j) be a coarse grid node. All the fine grid nodes between r_x*i and r_x*(i+1) 1486 and r_y*j and r_y*(j+1) will be grouped into the same coarse grid agregate. 1487 Each specific dof on the fine grid is mapped to one dof on the coarse grid. 1488 */ 1489 1490 max_agg_size = (Mf/Mc+1)*(Nf/Nc+1)*(Pf/Pc+1); 1491 1492 /* create the matrix that will contain the restriction operator */ 1493 ierr = MatCreateAIJ(PetscObjectComm((PetscObject)daf), m_c*n_c*p_c*dofc, m_f*n_f*p_f*doff, Mc*Nc*Pc*dofc, Mf*Nf*Pf*doff, 1494 max_agg_size, NULL, max_agg_size, NULL, rest);CHKERRQ(ierr); 1495 1496 /* store nodes in the fine grid here */ 1497 ierr = PetscMalloc2(max_agg_size, &one_vec,max_agg_size, &fine_nodes);CHKERRQ(ierr); 1498 for (i=0; i<max_agg_size; i++) one_vec[i] = 1.0; 1499 1500 /* loop over all coarse nodes */ 1501 for (l_c=l_start_c; l_c<l_start_c+p_c; l_c++) { 1502 for (j_c=j_start_c; j_c<j_start_c+n_c; j_c++) { 1503 for (i_c=i_start_c; i_c<i_start_c+m_c; i_c++) { 1504 for (d=0; d<dofc; d++) { 1505 /* convert to local "natural" numbering and then to PETSc global numbering */ 1506 a = idx_c[dofc*(m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c))] + d; 1507 1508 fn_idx = 0; 1509 /* Corresponding fine points are all points (i_f, j_f, l_f) such that 1510 i_c*Mf/Mc <= i_f < (i_c+1)*Mf/Mc 1511 (same for other dimensions) 1512 */ 1513 for (l=l_c*Pf/Pc; l<PetscMin((l_c+1)*Pf/Pc,Pf); l++) { 1514 for (j=j_c*Nf/Nc; j<PetscMin((j_c+1)*Nf/Nc,Nf); j++) { 1515 for (i=i_c*Mf/Mc; i<PetscMin((i_c+1)*Mf/Mc,Mf); i++) { 1516 fine_nodes[fn_idx] = idx_f[doff*(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))] + d; 1517 fn_idx++; 1518 } 1519 } 1520 } 1521 /* add all these points to one aggregate */ 1522 ierr = MatSetValues(*rest, 1, &a, fn_idx, fine_nodes, one_vec, INSERT_VALUES);CHKERRQ(ierr); 1523 } 1524 } 1525 } 1526 } 1527 ierr = ISLocalToGlobalMappingRestoreIndices(ltogmf,&idx_f);CHKERRQ(ierr); 1528 ierr = ISLocalToGlobalMappingRestoreIndices(ltogmc,&idx_c);CHKERRQ(ierr); 1529 ierr = PetscFree2(one_vec,fine_nodes);CHKERRQ(ierr); 1530 ierr = MatAssemblyBegin(*rest, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1531 ierr = MatAssemblyEnd(*rest, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1532 PetscFunctionReturn(0); 1533 } 1534