1 2 /* 3 Code for interpolating between grids represented by DMDAs 4 */ 5 6 /* 7 For linear elements there are two branches of code to compute the interpolation. They should compute the same results but may not. The "new version" does 8 not work for periodic domains, the old does. Change NEWVERSION to 1 to compile in the new version. Eventually when we are sure the two produce identical results 9 we will remove/merge the new version. Based on current tests, these both produce the same results. We are leaving NEWVERSION for now in the code since some 10 consider it cleaner, but old version is turned on since it handles periodic case. 11 */ 12 #define NEWVERSION 0 13 14 #include <petsc/private/dmdaimpl.h> /*I "petscdmda.h" I*/ 15 16 /* 17 Since the interpolation uses MATMAIJ for dof > 0 we convert request for non-MATAIJ baseded matrices to MATAIJ. 18 This is a bit of a hack, the reason for it is partially because -dm_mat_type defines the 19 matrix type for both the operator matrices and the interpolation matrices so that users 20 can select matrix types of base MATAIJ for accelerators 21 */ 22 static PetscErrorCode ConvertToAIJ(MatType intype,MatType *outtype) 23 { 24 PetscInt i; 25 char const *types[3] = {MATAIJ,MATSEQAIJ,MATMPIAIJ}; 26 PetscBool flg; 27 28 PetscFunctionBegin; 29 *outtype = MATAIJ; 30 for (i=0; i<3; i++) { 31 PetscCall(PetscStrbeginswith(intype,types[i],&flg)); 32 if (flg) { 33 *outtype = intype; 34 PetscFunctionReturn(0); 35 } 36 } 37 PetscFunctionReturn(0); 38 } 39 40 PetscErrorCode DMCreateInterpolation_DA_1D_Q1(DM dac,DM daf,Mat *A) 41 { 42 PetscInt i,i_start,m_f,Mx; 43 const PetscInt *idx_f,*idx_c; 44 PetscInt m_ghost,m_ghost_c; 45 PetscInt row,col,i_start_ghost,mx,m_c,nc,ratio; 46 PetscInt i_c,i_start_c,i_start_ghost_c,cols[2],dof; 47 PetscScalar v[2],x; 48 Mat mat; 49 DMBoundaryType bx; 50 ISLocalToGlobalMapping ltog_f,ltog_c; 51 MatType mattype; 52 53 PetscFunctionBegin; 54 PetscCall(DMDAGetInfo(dac,NULL,&Mx,NULL,NULL,NULL,NULL,NULL,NULL,NULL,&bx,NULL,NULL,NULL)); 55 PetscCall(DMDAGetInfo(daf,NULL,&mx,NULL,NULL,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL)); 56 if (bx == DM_BOUNDARY_PERIODIC) { 57 ratio = mx/Mx; 58 PetscCheckFalse(ratio*Mx != mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx); 59 } else { 60 ratio = (mx-1)/(Mx-1); 61 PetscCheckFalse(ratio*(Mx-1) != mx-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx); 62 } 63 64 PetscCall(DMDAGetCorners(daf,&i_start,NULL,NULL,&m_f,NULL,NULL)); 65 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,NULL,NULL,&m_ghost,NULL,NULL)); 66 PetscCall(DMGetLocalToGlobalMapping(daf,<og_f)); 67 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f)); 68 69 PetscCall(DMDAGetCorners(dac,&i_start_c,NULL,NULL,&m_c,NULL,NULL)); 70 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,NULL,NULL,&m_ghost_c,NULL,NULL)); 71 PetscCall(DMGetLocalToGlobalMapping(dac,<og_c)); 72 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c)); 73 74 /* create interpolation matrix */ 75 PetscCall(MatCreate(PetscObjectComm((PetscObject)dac),&mat)); 76 #if defined(PETSC_HAVE_CUDA) 77 /* 78 Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU 79 we don't want the original unconverted matrix copied to the GPU 80 */ 81 if (dof > 1) { 82 PetscCall(MatBindToCPU(mat,PETSC_TRUE)); 83 } 84 #endif 85 PetscCall(MatSetSizes(mat,m_f,m_c,mx,Mx)); 86 PetscCall(ConvertToAIJ(dac->mattype,&mattype)); 87 PetscCall(MatSetType(mat,mattype)); 88 PetscCall(MatSeqAIJSetPreallocation(mat,2,NULL)); 89 PetscCall(MatMPIAIJSetPreallocation(mat,2,NULL,1,NULL)); 90 91 /* loop over local fine grid nodes setting interpolation for those*/ 92 if (!NEWVERSION) { 93 94 for (i=i_start; i<i_start+m_f; i++) { 95 /* convert to local "natural" numbering and then to PETSc global numbering */ 96 row = idx_f[i-i_start_ghost]; 97 98 i_c = (i/ratio); /* coarse grid node to left of fine grid node */ 99 PetscCheckFalse(i_c < i_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 100 i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c); 101 102 /* 103 Only include those interpolation points that are truly 104 nonzero. Note this is very important for final grid lines 105 in x direction; since they have no right neighbor 106 */ 107 x = ((PetscReal)(i - i_c*ratio))/((PetscReal)ratio); 108 nc = 0; 109 /* one left and below; or we are right on it */ 110 col = (i_c-i_start_ghost_c); 111 cols[nc] = idx_c[col]; 112 v[nc++] = -x + 1.0; 113 /* one right? */ 114 if (i_c*ratio != i) { 115 cols[nc] = idx_c[col+1]; 116 v[nc++] = x; 117 } 118 PetscCall(MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES)); 119 } 120 121 } else { 122 PetscScalar *xi; 123 PetscInt li,nxi,n; 124 PetscScalar Ni[2]; 125 126 /* compute local coordinate arrays */ 127 nxi = ratio + 1; 128 PetscCall(PetscMalloc1(nxi,&xi)); 129 for (li=0; li<nxi; li++) { 130 xi[li] = -1.0 + (PetscScalar)li*(2.0/(PetscScalar)(nxi-1)); 131 } 132 133 for (i=i_start; i<i_start+m_f; i++) { 134 /* convert to local "natural" numbering and then to PETSc global numbering */ 135 row = idx_f[(i-i_start_ghost)]; 136 137 i_c = (i/ratio); /* coarse grid node to left of fine grid node */ 138 PetscCheckFalse(i_c < i_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 139 i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c); 140 141 /* remainders */ 142 li = i - ratio * (i/ratio); 143 if (i==mx-1) li = nxi-1; 144 145 /* corners */ 146 col = (i_c-i_start_ghost_c); 147 cols[0] = idx_c[col]; 148 Ni[0] = 1.0; 149 if ((li==0) || (li==nxi-1)) { 150 PetscCall(MatSetValue(mat,row,cols[0],Ni[0],INSERT_VALUES)); 151 continue; 152 } 153 154 /* edges + interior */ 155 /* remainders */ 156 if (i==mx-1) i_c--; 157 158 col = (i_c-i_start_ghost_c); 159 cols[0] = idx_c[col]; /* one left and below; or we are right on it */ 160 cols[1] = idx_c[col+1]; 161 162 Ni[0] = 0.5*(1.0-xi[li]); 163 Ni[1] = 0.5*(1.0+xi[li]); 164 for (n=0; n<2; n++) { 165 if (PetscAbsScalar(Ni[n])<1.0e-32) cols[n]=-1; 166 } 167 PetscCall(MatSetValues(mat,1,&row,2,cols,Ni,INSERT_VALUES)); 168 } 169 PetscCall(PetscFree(xi)); 170 } 171 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f)); 172 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c)); 173 PetscCall(MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY)); 174 PetscCall(MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY)); 175 PetscCall(MatCreateMAIJ(mat,dof,A)); 176 PetscCall(MatDestroy(&mat)); 177 PetscFunctionReturn(0); 178 } 179 180 PetscErrorCode DMCreateInterpolation_DA_1D_Q0(DM dac,DM daf,Mat *A) 181 { 182 PetscInt i,i_start,m_f,Mx; 183 const PetscInt *idx_f,*idx_c; 184 ISLocalToGlobalMapping ltog_f,ltog_c; 185 PetscInt m_ghost,m_ghost_c; 186 PetscInt row,col,i_start_ghost,mx,m_c,nc,ratio; 187 PetscInt i_c,i_start_c,i_start_ghost_c,cols[2],dof; 188 PetscScalar v[2],x; 189 Mat mat; 190 DMBoundaryType bx; 191 MatType mattype; 192 193 PetscFunctionBegin; 194 PetscCall(DMDAGetInfo(dac,NULL,&Mx,NULL,NULL,NULL,NULL,NULL,NULL,NULL,&bx,NULL,NULL,NULL)); 195 PetscCall(DMDAGetInfo(daf,NULL,&mx,NULL,NULL,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL)); 196 if (bx == DM_BOUNDARY_PERIODIC) { 197 PetscCheck(Mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx); 198 ratio = mx/Mx; 199 PetscCheckFalse(ratio*Mx != mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx); 200 } else { 201 PetscCheckFalse(Mx < 2,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be greater than 1",Mx); 202 ratio = (mx-1)/(Mx-1); 203 PetscCheckFalse(ratio*(Mx-1) != mx-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx); 204 } 205 206 PetscCall(DMDAGetCorners(daf,&i_start,NULL,NULL,&m_f,NULL,NULL)); 207 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,NULL,NULL,&m_ghost,NULL,NULL)); 208 PetscCall(DMGetLocalToGlobalMapping(daf,<og_f)); 209 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f)); 210 211 PetscCall(DMDAGetCorners(dac,&i_start_c,NULL,NULL,&m_c,NULL,NULL)); 212 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,NULL,NULL,&m_ghost_c,NULL,NULL)); 213 PetscCall(DMGetLocalToGlobalMapping(dac,<og_c)); 214 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c)); 215 216 /* create interpolation matrix */ 217 PetscCall(MatCreate(PetscObjectComm((PetscObject)dac),&mat)); 218 #if defined(PETSC_HAVE_CUDA) 219 /* 220 Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU 221 we don't want the original unconverted matrix copied to the GPU 222 */ 223 if (dof > 1) { 224 PetscCall(MatBindToCPU(mat,PETSC_TRUE)); 225 } 226 #endif 227 PetscCall(MatSetSizes(mat,m_f,m_c,mx,Mx)); 228 PetscCall(ConvertToAIJ(dac->mattype,&mattype)); 229 PetscCall(MatSetType(mat,mattype)); 230 PetscCall(MatSeqAIJSetPreallocation(mat,2,NULL)); 231 PetscCall(MatMPIAIJSetPreallocation(mat,2,NULL,0,NULL)); 232 233 /* loop over local fine grid nodes setting interpolation for those*/ 234 for (i=i_start; i<i_start+m_f; i++) { 235 /* convert to local "natural" numbering and then to PETSc global numbering */ 236 row = idx_f[(i-i_start_ghost)]; 237 238 i_c = (i/ratio); /* coarse grid node to left of fine grid node */ 239 240 /* 241 Only include those interpolation points that are truly 242 nonzero. Note this is very important for final grid lines 243 in x direction; since they have no right neighbor 244 */ 245 x = ((PetscReal)(i - i_c*ratio))/((PetscReal)ratio); 246 nc = 0; 247 /* one left and below; or we are right on it */ 248 col = (i_c-i_start_ghost_c); 249 cols[nc] = idx_c[col]; 250 v[nc++] = -x + 1.0; 251 /* one right? */ 252 if (i_c*ratio != i) { 253 cols[nc] = idx_c[col+1]; 254 v[nc++] = x; 255 } 256 PetscCall(MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES)); 257 } 258 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f)); 259 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c)); 260 PetscCall(MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY)); 261 PetscCall(MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY)); 262 PetscCall(MatCreateMAIJ(mat,dof,A)); 263 PetscCall(MatDestroy(&mat)); 264 PetscCall(PetscLogFlops(5.0*m_f)); 265 PetscFunctionReturn(0); 266 } 267 268 PetscErrorCode DMCreateInterpolation_DA_2D_Q1(DM dac,DM daf,Mat *A) 269 { 270 PetscErrorCode ierr; 271 PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof; 272 const PetscInt *idx_c,*idx_f; 273 ISLocalToGlobalMapping ltog_f,ltog_c; 274 PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c,*dnz,*onz; 275 PetscInt row,col,i_start_ghost,j_start_ghost,cols[4],mx,m_c,my,nc,ratioi,ratioj; 276 PetscInt i_c,j_c,i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c,col_shift,col_scale; 277 PetscMPIInt size_c,size_f,rank_f; 278 PetscScalar v[4],x,y; 279 Mat mat; 280 DMBoundaryType bx,by; 281 MatType mattype; 282 283 PetscFunctionBegin; 284 PetscCall(DMDAGetInfo(dac,NULL,&Mx,&My,NULL,NULL,NULL,NULL,NULL,NULL,&bx,&by,NULL,NULL)); 285 PetscCall(DMDAGetInfo(daf,NULL,&mx,&my,NULL,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL)); 286 if (bx == DM_BOUNDARY_PERIODIC) { 287 PetscCheck(Mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx); 288 ratioi = mx/Mx; 289 PetscCheckFalse(ratioi*Mx != mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx); 290 } else { 291 PetscCheckFalse(Mx < 2,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be greater than 1",Mx); 292 ratioi = (mx-1)/(Mx-1); 293 PetscCheckFalse(ratioi*(Mx-1) != mx-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx); 294 } 295 if (by == DM_BOUNDARY_PERIODIC) { 296 PetscCheck(My,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be positive",My); 297 ratioj = my/My; 298 PetscCheckFalse(ratioj*My != my,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %D My %D",my,My); 299 } else { 300 PetscCheckFalse(My < 2,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be greater than 1",My); 301 ratioj = (my-1)/(My-1); 302 PetscCheckFalse(ratioj*(My-1) != my-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %D My %D",my,My); 303 } 304 305 PetscCall(DMDAGetCorners(daf,&i_start,&j_start,NULL,&m_f,&n_f,NULL)); 306 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,NULL,&m_ghost,&n_ghost,NULL)); 307 PetscCall(DMGetLocalToGlobalMapping(daf,<og_f)); 308 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f)); 309 310 PetscCall(DMDAGetCorners(dac,&i_start_c,&j_start_c,NULL,&m_c,&n_c,NULL)); 311 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,NULL,&m_ghost_c,&n_ghost_c,NULL)); 312 PetscCall(DMGetLocalToGlobalMapping(dac,<og_c)); 313 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c)); 314 315 /* 316 Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA. 317 The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the 318 processors). It's effective length is hence 4 times its normal length, this is 319 why the col_scale is multiplied by the interpolation matrix column sizes. 320 sol_shift allows each set of 1/4 processors do its own interpolation using ITS 321 copy of the coarse vector. A bit of a hack but you do better. 322 323 In the standard case when size_f == size_c col_scale == 1 and col_shift == 0 324 */ 325 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dac),&size_c)); 326 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)daf),&size_f)); 327 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)daf),&rank_f)); 328 col_scale = size_f/size_c; 329 col_shift = Mx*My*(rank_f/size_c); 330 331 ierr = MatPreallocateInitialize(PetscObjectComm((PetscObject)daf),m_f*n_f,col_scale*m_c*n_c,dnz,onz);PetscCall(ierr); 332 for (j=j_start; j<j_start+n_f; j++) { 333 for (i=i_start; i<i_start+m_f; i++) { 334 /* convert to local "natural" numbering and then to PETSc global numbering */ 335 row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 336 337 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 338 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 339 340 PetscCheckFalse(j_c < j_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 341 j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c); 342 PetscCheckFalse(i_c < i_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 343 i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c); 344 345 /* 346 Only include those interpolation points that are truly 347 nonzero. Note this is very important for final grid lines 348 in x and y directions; since they have no right/top neighbors 349 */ 350 nc = 0; 351 /* one left and below; or we are right on it */ 352 col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 353 cols[nc++] = col_shift + idx_c[col]; 354 /* one right and below */ 355 if (i_c*ratioi != i) cols[nc++] = col_shift + idx_c[col+1]; 356 /* one left and above */ 357 if (j_c*ratioj != j) cols[nc++] = col_shift + idx_c[col+m_ghost_c]; 358 /* one right and above */ 359 if (i_c*ratioi != i && j_c*ratioj != j) cols[nc++] = col_shift + idx_c[col+(m_ghost_c+1)]; 360 PetscCall(MatPreallocateSet(row,nc,cols,dnz,onz)); 361 } 362 } 363 PetscCall(MatCreate(PetscObjectComm((PetscObject)daf),&mat)); 364 #if defined(PETSC_HAVE_CUDA) 365 /* 366 Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU 367 we don't want the original unconverted matrix copied to the GPU 368 */ 369 if (dof > 1) { 370 PetscCall(MatBindToCPU(mat,PETSC_TRUE)); 371 } 372 #endif 373 PetscCall(MatSetSizes(mat,m_f*n_f,col_scale*m_c*n_c,mx*my,col_scale*Mx*My)); 374 PetscCall(ConvertToAIJ(dac->mattype,&mattype)); 375 PetscCall(MatSetType(mat,mattype)); 376 PetscCall(MatSeqAIJSetPreallocation(mat,0,dnz)); 377 PetscCall(MatMPIAIJSetPreallocation(mat,0,dnz,0,onz)); 378 ierr = MatPreallocateFinalize(dnz,onz);PetscCall(ierr); 379 380 /* loop over local fine grid nodes setting interpolation for those*/ 381 if (!NEWVERSION) { 382 383 for (j=j_start; j<j_start+n_f; j++) { 384 for (i=i_start; i<i_start+m_f; i++) { 385 /* convert to local "natural" numbering and then to PETSc global numbering */ 386 row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 387 388 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 389 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 390 391 /* 392 Only include those interpolation points that are truly 393 nonzero. Note this is very important for final grid lines 394 in x and y directions; since they have no right/top neighbors 395 */ 396 x = ((PetscReal)(i - i_c*ratioi))/((PetscReal)ratioi); 397 y = ((PetscReal)(j - j_c*ratioj))/((PetscReal)ratioj); 398 399 nc = 0; 400 /* one left and below; or we are right on it */ 401 col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 402 cols[nc] = col_shift + idx_c[col]; 403 v[nc++] = x*y - x - y + 1.0; 404 /* one right and below */ 405 if (i_c*ratioi != i) { 406 cols[nc] = col_shift + idx_c[col+1]; 407 v[nc++] = -x*y + x; 408 } 409 /* one left and above */ 410 if (j_c*ratioj != j) { 411 cols[nc] = col_shift + idx_c[col+m_ghost_c]; 412 v[nc++] = -x*y + y; 413 } 414 /* one right and above */ 415 if (j_c*ratioj != j && i_c*ratioi != i) { 416 cols[nc] = col_shift + idx_c[col+(m_ghost_c+1)]; 417 v[nc++] = x*y; 418 } 419 PetscCall(MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES)); 420 } 421 } 422 423 } else { 424 PetscScalar Ni[4]; 425 PetscScalar *xi,*eta; 426 PetscInt li,nxi,lj,neta; 427 428 /* compute local coordinate arrays */ 429 nxi = ratioi + 1; 430 neta = ratioj + 1; 431 PetscCall(PetscMalloc1(nxi,&xi)); 432 PetscCall(PetscMalloc1(neta,&eta)); 433 for (li=0; li<nxi; li++) { 434 xi[li] = -1.0 + (PetscScalar)li*(2.0/(PetscScalar)(nxi-1)); 435 } 436 for (lj=0; lj<neta; lj++) { 437 eta[lj] = -1.0 + (PetscScalar)lj*(2.0/(PetscScalar)(neta-1)); 438 } 439 440 /* loop over local fine grid nodes setting interpolation for those*/ 441 for (j=j_start; j<j_start+n_f; j++) { 442 for (i=i_start; i<i_start+m_f; i++) { 443 /* convert to local "natural" numbering and then to PETSc global numbering */ 444 row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 445 446 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 447 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 448 449 /* remainders */ 450 li = i - ratioi * (i/ratioi); 451 if (i==mx-1) li = nxi-1; 452 lj = j - ratioj * (j/ratioj); 453 if (j==my-1) lj = neta-1; 454 455 /* corners */ 456 col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 457 cols[0] = col_shift + idx_c[col]; /* left, below */ 458 Ni[0] = 1.0; 459 if ((li==0) || (li==nxi-1)) { 460 if ((lj==0) || (lj==neta-1)) { 461 PetscCall(MatSetValue(mat,row,cols[0],Ni[0],INSERT_VALUES)); 462 continue; 463 } 464 } 465 466 /* edges + interior */ 467 /* remainders */ 468 if (i==mx-1) i_c--; 469 if (j==my-1) j_c--; 470 471 col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 472 cols[0] = col_shift + idx_c[col]; /* left, below */ 473 cols[1] = col_shift + idx_c[col+1]; /* right, below */ 474 cols[2] = col_shift + idx_c[col+m_ghost_c]; /* left, above */ 475 cols[3] = col_shift + idx_c[col+(m_ghost_c+1)]; /* right, above */ 476 477 Ni[0] = 0.25*(1.0-xi[li])*(1.0-eta[lj]); 478 Ni[1] = 0.25*(1.0+xi[li])*(1.0-eta[lj]); 479 Ni[2] = 0.25*(1.0-xi[li])*(1.0+eta[lj]); 480 Ni[3] = 0.25*(1.0+xi[li])*(1.0+eta[lj]); 481 482 nc = 0; 483 if (PetscAbsScalar(Ni[0])<1.0e-32) cols[0]=-1; 484 if (PetscAbsScalar(Ni[1])<1.0e-32) cols[1]=-1; 485 if (PetscAbsScalar(Ni[2])<1.0e-32) cols[2]=-1; 486 if (PetscAbsScalar(Ni[3])<1.0e-32) cols[3]=-1; 487 488 PetscCall(MatSetValues(mat,1,&row,4,cols,Ni,INSERT_VALUES)); 489 } 490 } 491 PetscCall(PetscFree(xi)); 492 PetscCall(PetscFree(eta)); 493 } 494 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f)); 495 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c)); 496 PetscCall(MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY)); 497 PetscCall(MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY)); 498 PetscCall(MatCreateMAIJ(mat,dof,A)); 499 PetscCall(MatDestroy(&mat)); 500 PetscFunctionReturn(0); 501 } 502 503 /* 504 Contributed by Andrei Draganescu <aidraga@sandia.gov> 505 */ 506 PetscErrorCode DMCreateInterpolation_DA_2D_Q0(DM dac,DM daf,Mat *A) 507 { 508 PetscErrorCode ierr; 509 PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof; 510 const PetscInt *idx_c,*idx_f; 511 ISLocalToGlobalMapping ltog_f,ltog_c; 512 PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c,*dnz,*onz; 513 PetscInt row,col,i_start_ghost,j_start_ghost,cols[4],mx,m_c,my,nc,ratioi,ratioj; 514 PetscInt i_c,j_c,i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c,col_shift,col_scale; 515 PetscMPIInt size_c,size_f,rank_f; 516 PetscScalar v[4]; 517 Mat mat; 518 DMBoundaryType bx,by; 519 MatType mattype; 520 521 PetscFunctionBegin; 522 PetscCall(DMDAGetInfo(dac,NULL,&Mx,&My,NULL,NULL,NULL,NULL,NULL,NULL,&bx,&by,NULL,NULL)); 523 PetscCall(DMDAGetInfo(daf,NULL,&mx,&my,NULL,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL)); 524 PetscCheck(Mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx); 525 PetscCheck(My,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be positive",My); 526 ratioi = mx/Mx; 527 ratioj = my/My; 528 PetscCheckFalse(ratioi*Mx != mx,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in x"); 529 PetscCheckFalse(ratioj*My != my,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in y"); 530 PetscCheckFalse(ratioi != 2,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in x must be 2"); 531 PetscCheckFalse(ratioj != 2,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in y must be 2"); 532 533 PetscCall(DMDAGetCorners(daf,&i_start,&j_start,NULL,&m_f,&n_f,NULL)); 534 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,NULL,&m_ghost,&n_ghost,NULL)); 535 PetscCall(DMGetLocalToGlobalMapping(daf,<og_f)); 536 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f)); 537 538 PetscCall(DMDAGetCorners(dac,&i_start_c,&j_start_c,NULL,&m_c,&n_c,NULL)); 539 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,NULL,&m_ghost_c,&n_ghost_c,NULL)); 540 PetscCall(DMGetLocalToGlobalMapping(dac,<og_c)); 541 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c)); 542 543 /* 544 Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA. 545 The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the 546 processors). It's effective length is hence 4 times its normal length, this is 547 why the col_scale is multiplied by the interpolation matrix column sizes. 548 sol_shift allows each set of 1/4 processors do its own interpolation using ITS 549 copy of the coarse vector. A bit of a hack but you do better. 550 551 In the standard case when size_f == size_c col_scale == 1 and col_shift == 0 552 */ 553 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dac),&size_c)); 554 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)daf),&size_f)); 555 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)daf),&rank_f)); 556 col_scale = size_f/size_c; 557 col_shift = Mx*My*(rank_f/size_c); 558 559 ierr = MatPreallocateInitialize(PetscObjectComm((PetscObject)daf),m_f*n_f,col_scale*m_c*n_c,dnz,onz);PetscCall(ierr); 560 for (j=j_start; j<j_start+n_f; j++) { 561 for (i=i_start; i<i_start+m_f; i++) { 562 /* convert to local "natural" numbering and then to PETSc global numbering */ 563 row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 564 565 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 566 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 567 568 PetscCheckFalse(j_c < j_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 569 j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c); 570 PetscCheckFalse(i_c < i_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 571 i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c); 572 573 /* 574 Only include those interpolation points that are truly 575 nonzero. Note this is very important for final grid lines 576 in x and y directions; since they have no right/top neighbors 577 */ 578 nc = 0; 579 /* one left and below; or we are right on it */ 580 col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 581 cols[nc++] = col_shift + idx_c[col]; 582 PetscCall(MatPreallocateSet(row,nc,cols,dnz,onz)); 583 } 584 } 585 PetscCall(MatCreate(PetscObjectComm((PetscObject)daf),&mat)); 586 #if defined(PETSC_HAVE_CUDA) 587 /* 588 Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU 589 we don't want the original unconverted matrix copied to the GPU 590 */ 591 if (dof > 1) { 592 PetscCall(MatBindToCPU(mat,PETSC_TRUE)); 593 } 594 #endif 595 PetscCall(MatSetSizes(mat,m_f*n_f,col_scale*m_c*n_c,mx*my,col_scale*Mx*My)); 596 PetscCall(ConvertToAIJ(dac->mattype,&mattype)); 597 PetscCall(MatSetType(mat,mattype)); 598 PetscCall(MatSeqAIJSetPreallocation(mat,0,dnz)); 599 PetscCall(MatMPIAIJSetPreallocation(mat,0,dnz,0,onz)); 600 ierr = MatPreallocateFinalize(dnz,onz);PetscCall(ierr); 601 602 /* loop over local fine grid nodes setting interpolation for those*/ 603 for (j=j_start; j<j_start+n_f; j++) { 604 for (i=i_start; i<i_start+m_f; i++) { 605 /* convert to local "natural" numbering and then to PETSc global numbering */ 606 row = idx_f[(m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 607 608 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 609 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 610 nc = 0; 611 /* one left and below; or we are right on it */ 612 col = (m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 613 cols[nc] = col_shift + idx_c[col]; 614 v[nc++] = 1.0; 615 616 PetscCall(MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES)); 617 } 618 } 619 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f)); 620 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c)); 621 PetscCall(MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY)); 622 PetscCall(MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY)); 623 PetscCall(MatCreateMAIJ(mat,dof,A)); 624 PetscCall(MatDestroy(&mat)); 625 PetscCall(PetscLogFlops(13.0*m_f*n_f)); 626 PetscFunctionReturn(0); 627 } 628 629 /* 630 Contributed by Jianming Yang <jianming-yang@uiowa.edu> 631 */ 632 PetscErrorCode DMCreateInterpolation_DA_3D_Q0(DM dac,DM daf,Mat *A) 633 { 634 PetscErrorCode ierr; 635 PetscInt i,j,l,i_start,j_start,l_start,m_f,n_f,p_f,Mx,My,Mz,dof; 636 const PetscInt *idx_c,*idx_f; 637 ISLocalToGlobalMapping ltog_f,ltog_c; 638 PetscInt m_ghost,n_ghost,p_ghost,m_ghost_c,n_ghost_c,p_ghost_c,nc,*dnz,*onz; 639 PetscInt row,col,i_start_ghost,j_start_ghost,l_start_ghost,cols[8],mx,m_c,my,n_c,mz,p_c,ratioi,ratioj,ratiol; 640 PetscInt i_c,j_c,l_c,i_start_c,j_start_c,l_start_c,i_start_ghost_c,j_start_ghost_c,l_start_ghost_c,col_shift,col_scale; 641 PetscMPIInt size_c,size_f,rank_f; 642 PetscScalar v[8]; 643 Mat mat; 644 DMBoundaryType bx,by,bz; 645 MatType mattype; 646 647 PetscFunctionBegin; 648 PetscCall(DMDAGetInfo(dac,NULL,&Mx,&My,&Mz,NULL,NULL,NULL,NULL,NULL,&bx,&by,&bz,NULL)); 649 PetscCheck(Mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx); 650 PetscCheck(My,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be positive",My); 651 PetscCheck(Mz,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of z coarse grid points %D must be positive",Mz); 652 PetscCall(DMDAGetInfo(daf,NULL,&mx,&my,&mz,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL)); 653 ratioi = mx/Mx; 654 ratioj = my/My; 655 ratiol = mz/Mz; 656 PetscCheckFalse(ratioi*Mx != mx,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in x"); 657 PetscCheckFalse(ratioj*My != my,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in y"); 658 PetscCheckFalse(ratiol*Mz != mz,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Fine grid points must be multiple of coarse grid points in z"); 659 PetscCheckFalse(ratioi != 2 && ratioi != 1,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in x must be 1 or 2"); 660 PetscCheckFalse(ratioj != 2 && ratioj != 1,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in y must be 1 or 2"); 661 PetscCheckFalse(ratiol != 2 && ratiol != 1,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_WRONG,"Coarsening factor in z must be 1 or 2"); 662 663 PetscCall(DMDAGetCorners(daf,&i_start,&j_start,&l_start,&m_f,&n_f,&p_f)); 664 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&l_start_ghost,&m_ghost,&n_ghost,&p_ghost)); 665 PetscCall(DMGetLocalToGlobalMapping(daf,<og_f)); 666 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f)); 667 668 PetscCall(DMDAGetCorners(dac,&i_start_c,&j_start_c,&l_start_c,&m_c,&n_c,&p_c)); 669 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&l_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c)); 670 PetscCall(DMGetLocalToGlobalMapping(dac,<og_c)); 671 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c)); 672 673 /* 674 Used for handling a coarse DMDA that lives on 1/4 the processors of the fine DMDA. 675 The coarse vector is then duplicated 4 times (each time it lives on 1/4 of the 676 processors). It's effective length is hence 4 times its normal length, this is 677 why the col_scale is multiplied by the interpolation matrix column sizes. 678 sol_shift allows each set of 1/4 processors do its own interpolation using ITS 679 copy of the coarse vector. A bit of a hack but you do better. 680 681 In the standard case when size_f == size_c col_scale == 1 and col_shift == 0 682 */ 683 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)dac),&size_c)); 684 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)daf),&size_f)); 685 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)daf),&rank_f)); 686 col_scale = size_f/size_c; 687 col_shift = Mx*My*Mz*(rank_f/size_c); 688 689 ierr = MatPreallocateInitialize(PetscObjectComm((PetscObject)daf),m_f*n_f*p_f,col_scale*m_c*n_c*p_c,dnz,onz);PetscCall(ierr); 690 for (l=l_start; l<l_start+p_f; l++) { 691 for (j=j_start; j<j_start+n_f; j++) { 692 for (i=i_start; i<i_start+m_f; i++) { 693 /* convert to local "natural" numbering and then to PETSc global numbering */ 694 row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 695 696 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 697 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 698 l_c = (l/ratiol); 699 700 PetscCheckFalse(l_c < l_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 701 l_start %D l_c %D l_start_ghost_c %D",l_start,l_c,l_start_ghost_c); 702 PetscCheckFalse(j_c < j_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 703 j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c); 704 PetscCheckFalse(i_c < i_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 705 i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c); 706 707 /* 708 Only include those interpolation points that are truly 709 nonzero. Note this is very important for final grid lines 710 in x and y directions; since they have no right/top neighbors 711 */ 712 nc = 0; 713 /* one left and below; or we are right on it */ 714 col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 715 cols[nc++] = col_shift + idx_c[col]; 716 PetscCall(MatPreallocateSet(row,nc,cols,dnz,onz)); 717 } 718 } 719 } 720 PetscCall(MatCreate(PetscObjectComm((PetscObject)daf),&mat)); 721 #if defined(PETSC_HAVE_CUDA) 722 /* 723 Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU 724 we don't want the original unconverted matrix copied to the GPU 725 */ 726 if (dof > 1) { 727 PetscCall(MatBindToCPU(mat,PETSC_TRUE)); 728 } 729 #endif 730 PetscCall(MatSetSizes(mat,m_f*n_f*p_f,col_scale*m_c*n_c*p_c,mx*my*mz,col_scale*Mx*My*Mz)); 731 PetscCall(ConvertToAIJ(dac->mattype,&mattype)); 732 PetscCall(MatSetType(mat,mattype)); 733 PetscCall(MatSeqAIJSetPreallocation(mat,0,dnz)); 734 PetscCall(MatMPIAIJSetPreallocation(mat,0,dnz,0,onz)); 735 ierr = MatPreallocateFinalize(dnz,onz);PetscCall(ierr); 736 737 /* loop over local fine grid nodes setting interpolation for those*/ 738 for (l=l_start; l<l_start+p_f; l++) { 739 for (j=j_start; j<j_start+n_f; j++) { 740 for (i=i_start; i<i_start+m_f; i++) { 741 /* convert to local "natural" numbering and then to PETSc global numbering */ 742 row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 743 744 i_c = (i/ratioi); /* coarse grid node to left of fine grid node */ 745 j_c = (j/ratioj); /* coarse grid node below fine grid node */ 746 l_c = (l/ratiol); 747 nc = 0; 748 /* one left and below; or we are right on it */ 749 col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 750 cols[nc] = col_shift + idx_c[col]; 751 v[nc++] = 1.0; 752 753 PetscCall(MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES)); 754 } 755 } 756 } 757 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f)); 758 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c)); 759 PetscCall(MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY)); 760 PetscCall(MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY)); 761 PetscCall(MatCreateMAIJ(mat,dof,A)); 762 PetscCall(MatDestroy(&mat)); 763 PetscCall(PetscLogFlops(13.0*m_f*n_f*p_f)); 764 PetscFunctionReturn(0); 765 } 766 767 PetscErrorCode DMCreateInterpolation_DA_3D_Q1(DM dac,DM daf,Mat *A) 768 { 769 PetscErrorCode ierr; 770 PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof,l; 771 const PetscInt *idx_c,*idx_f; 772 ISLocalToGlobalMapping ltog_f,ltog_c; 773 PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c,Mz,mz; 774 PetscInt row,col,i_start_ghost,j_start_ghost,cols[8],mx,m_c,my,nc,ratioi,ratioj,ratiok; 775 PetscInt i_c,j_c,i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c; 776 PetscInt l_start,p_f,l_start_ghost,p_ghost,l_start_c,p_c; 777 PetscInt l_start_ghost_c,p_ghost_c,l_c,*dnz,*onz; 778 PetscScalar v[8],x,y,z; 779 Mat mat; 780 DMBoundaryType bx,by,bz; 781 MatType mattype; 782 783 PetscFunctionBegin; 784 PetscCall(DMDAGetInfo(dac,NULL,&Mx,&My,&Mz,NULL,NULL,NULL,NULL,NULL,&bx,&by,&bz,NULL)); 785 PetscCall(DMDAGetInfo(daf,NULL,&mx,&my,&mz,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL)); 786 if (mx == Mx) { 787 ratioi = 1; 788 } else if (bx == DM_BOUNDARY_PERIODIC) { 789 PetscCheck(Mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be positive",Mx); 790 ratioi = mx/Mx; 791 PetscCheckFalse(ratioi*Mx != mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx); 792 } else { 793 PetscCheckFalse(Mx < 2,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of x coarse grid points %D must be greater than 1",Mx); 794 ratioi = (mx-1)/(Mx-1); 795 PetscCheckFalse(ratioi*(Mx-1) != mx-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx); 796 } 797 if (my == My) { 798 ratioj = 1; 799 } else if (by == DM_BOUNDARY_PERIODIC) { 800 PetscCheck(My,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be positive",My); 801 ratioj = my/My; 802 PetscCheckFalse(ratioj*My != my,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %D My %D",my,My); 803 } else { 804 PetscCheckFalse(My < 2,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of y coarse grid points %D must be greater than 1",My); 805 ratioj = (my-1)/(My-1); 806 PetscCheckFalse(ratioj*(My-1) != my-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %D My %D",my,My); 807 } 808 if (mz == Mz) { 809 ratiok = 1; 810 } else if (bz == DM_BOUNDARY_PERIODIC) { 811 PetscCheck(Mz,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of z coarse grid points %D must be positive",Mz); 812 ratiok = mz/Mz; 813 PetscCheckFalse(ratiok*Mz != mz,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mz/Mz must be integer: mz %D Mz %D",mz,Mz); 814 } else { 815 PetscCheckFalse(Mz < 2,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Number of z coarse grid points %D must be greater than 1",Mz); 816 ratiok = (mz-1)/(Mz-1); 817 PetscCheckFalse(ratiok*(Mz-1) != mz-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mz - 1)/(Mz - 1) must be integer: mz %D Mz %D",mz,Mz); 818 } 819 820 PetscCall(DMDAGetCorners(daf,&i_start,&j_start,&l_start,&m_f,&n_f,&p_f)); 821 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&l_start_ghost,&m_ghost,&n_ghost,&p_ghost)); 822 PetscCall(DMGetLocalToGlobalMapping(daf,<og_f)); 823 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f)); 824 825 PetscCall(DMDAGetCorners(dac,&i_start_c,&j_start_c,&l_start_c,&m_c,&n_c,&p_c)); 826 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&l_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c)); 827 PetscCall(DMGetLocalToGlobalMapping(dac,<og_c)); 828 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c)); 829 830 /* create interpolation matrix, determining exact preallocation */ 831 ierr = MatPreallocateInitialize(PetscObjectComm((PetscObject)dac),m_f*n_f*p_f,m_c*n_c*p_c,dnz,onz);PetscCall(ierr); 832 /* loop over local fine grid nodes counting interpolating points */ 833 for (l=l_start; l<l_start+p_f; l++) { 834 for (j=j_start; j<j_start+n_f; j++) { 835 for (i=i_start; i<i_start+m_f; i++) { 836 /* convert to local "natural" numbering and then to PETSc global numbering */ 837 row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 838 i_c = (i/ratioi); 839 j_c = (j/ratioj); 840 l_c = (l/ratiok); 841 PetscCheckFalse(l_c < l_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 842 l_start %D l_c %D l_start_ghost_c %D",l_start,l_c,l_start_ghost_c); 843 PetscCheckFalse(j_c < j_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 844 j_start %D j_c %D j_start_ghost_c %D",j_start,j_c,j_start_ghost_c); 845 PetscCheckFalse(i_c < i_start_ghost_c,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 846 i_start %D i_c %D i_start_ghost_c %D",i_start,i_c,i_start_ghost_c); 847 848 /* 849 Only include those interpolation points that are truly 850 nonzero. Note this is very important for final grid lines 851 in x and y directions; since they have no right/top neighbors 852 */ 853 nc = 0; 854 col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 855 cols[nc++] = idx_c[col]; 856 if (i_c*ratioi != i) { 857 cols[nc++] = idx_c[col+1]; 858 } 859 if (j_c*ratioj != j) { 860 cols[nc++] = idx_c[col+m_ghost_c]; 861 } 862 if (l_c*ratiok != l) { 863 cols[nc++] = idx_c[col+m_ghost_c*n_ghost_c]; 864 } 865 if (j_c*ratioj != j && i_c*ratioi != i) { 866 cols[nc++] = idx_c[col+(m_ghost_c+1)]; 867 } 868 if (j_c*ratioj != j && l_c*ratiok != l) { 869 cols[nc++] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c)]; 870 } 871 if (i_c*ratioi != i && l_c*ratiok != l) { 872 cols[nc++] = idx_c[col+(m_ghost_c*n_ghost_c+1)]; 873 } 874 if (i_c*ratioi != i && l_c*ratiok != l && j_c*ratioj != j) { 875 cols[nc++] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c+1)]; 876 } 877 PetscCall(MatPreallocateSet(row,nc,cols,dnz,onz)); 878 } 879 } 880 } 881 PetscCall(MatCreate(PetscObjectComm((PetscObject)dac),&mat)); 882 #if defined(PETSC_HAVE_CUDA) 883 /* 884 Temporary hack: Since the MAIJ matrix must be converted to AIJ before being used by the GPU 885 we don't want the original unconverted matrix copied to the GPU 886 */ 887 if (dof > 1) { 888 PetscCall(MatBindToCPU(mat,PETSC_TRUE)); 889 } 890 #endif 891 PetscCall(MatSetSizes(mat,m_f*n_f*p_f,m_c*n_c*p_c,mx*my*mz,Mx*My*Mz)); 892 PetscCall(ConvertToAIJ(dac->mattype,&mattype)); 893 PetscCall(MatSetType(mat,mattype)); 894 PetscCall(MatSeqAIJSetPreallocation(mat,0,dnz)); 895 PetscCall(MatMPIAIJSetPreallocation(mat,0,dnz,0,onz)); 896 ierr = MatPreallocateFinalize(dnz,onz);PetscCall(ierr); 897 898 /* loop over local fine grid nodes setting interpolation for those*/ 899 if (!NEWVERSION) { 900 901 for (l=l_start; l<l_start+p_f; l++) { 902 for (j=j_start; j<j_start+n_f; j++) { 903 for (i=i_start; i<i_start+m_f; i++) { 904 /* convert to local "natural" numbering and then to PETSc global numbering */ 905 row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 906 907 i_c = (i/ratioi); 908 j_c = (j/ratioj); 909 l_c = (l/ratiok); 910 911 /* 912 Only include those interpolation points that are truly 913 nonzero. Note this is very important for final grid lines 914 in x and y directions; since they have no right/top neighbors 915 */ 916 x = ((PetscReal)(i - i_c*ratioi))/((PetscReal)ratioi); 917 y = ((PetscReal)(j - j_c*ratioj))/((PetscReal)ratioj); 918 z = ((PetscReal)(l - l_c*ratiok))/((PetscReal)ratiok); 919 920 nc = 0; 921 /* one left and below; or we are right on it */ 922 col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c)+m_ghost_c*(j_c-j_start_ghost_c)+(i_c-i_start_ghost_c)); 923 924 cols[nc] = idx_c[col]; 925 v[nc++] = .125*(1. - (2.0*x-1.))*(1. - (2.0*y-1.))*(1. - (2.0*z-1.)); 926 927 if (i_c*ratioi != i) { 928 cols[nc] = idx_c[col+1]; 929 v[nc++] = .125*(1. + (2.0*x-1.))*(1. - (2.0*y-1.))*(1. - (2.0*z-1.)); 930 } 931 932 if (j_c*ratioj != j) { 933 cols[nc] = idx_c[col+m_ghost_c]; 934 v[nc++] = .125*(1. - (2.0*x-1.))*(1. + (2.0*y-1.))*(1. - (2.0*z-1.)); 935 } 936 937 if (l_c*ratiok != l) { 938 cols[nc] = idx_c[col+m_ghost_c*n_ghost_c]; 939 v[nc++] = .125*(1. - (2.0*x-1.))*(1. - (2.0*y-1.))*(1. + (2.0*z-1.)); 940 } 941 942 if (j_c*ratioj != j && i_c*ratioi != i) { 943 cols[nc] = idx_c[col+(m_ghost_c+1)]; 944 v[nc++] = .125*(1. + (2.0*x-1.))*(1. + (2.0*y-1.))*(1. - (2.0*z-1.)); 945 } 946 947 if (j_c*ratioj != j && l_c*ratiok != l) { 948 cols[nc] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c)]; 949 v[nc++] = .125*(1. - (2.0*x-1.))*(1. + (2.0*y-1.))*(1. + (2.0*z-1.)); 950 } 951 952 if (i_c*ratioi != i && l_c*ratiok != l) { 953 cols[nc] = idx_c[col+(m_ghost_c*n_ghost_c+1)]; 954 v[nc++] = .125*(1. + (2.0*x-1.))*(1. - (2.0*y-1.))*(1. + (2.0*z-1.)); 955 } 956 957 if (i_c*ratioi != i && l_c*ratiok != l && j_c*ratioj != j) { 958 cols[nc] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c+1)]; 959 v[nc++] = .125*(1. + (2.0*x-1.))*(1. + (2.0*y-1.))*(1. + (2.0*z-1.)); 960 } 961 PetscCall(MatSetValues(mat,1,&row,nc,cols,v,INSERT_VALUES)); 962 } 963 } 964 } 965 966 } else { 967 PetscScalar *xi,*eta,*zeta; 968 PetscInt li,nxi,lj,neta,lk,nzeta,n; 969 PetscScalar Ni[8]; 970 971 /* compute local coordinate arrays */ 972 nxi = ratioi + 1; 973 neta = ratioj + 1; 974 nzeta = ratiok + 1; 975 PetscCall(PetscMalloc1(nxi,&xi)); 976 PetscCall(PetscMalloc1(neta,&eta)); 977 PetscCall(PetscMalloc1(nzeta,&zeta)); 978 for (li=0; li<nxi; li++) xi[li] = -1.0 + (PetscScalar)li*(2.0/(PetscScalar)(nxi-1)); 979 for (lj=0; lj<neta; lj++) eta[lj] = -1.0 + (PetscScalar)lj*(2.0/(PetscScalar)(neta-1)); 980 for (lk=0; lk<nzeta; lk++) zeta[lk] = -1.0 + (PetscScalar)lk*(2.0/(PetscScalar)(nzeta-1)); 981 982 for (l=l_start; l<l_start+p_f; l++) { 983 for (j=j_start; j<j_start+n_f; j++) { 984 for (i=i_start; i<i_start+m_f; i++) { 985 /* convert to local "natural" numbering and then to PETSc global numbering */ 986 row = idx_f[(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))]; 987 988 i_c = (i/ratioi); 989 j_c = (j/ratioj); 990 l_c = (l/ratiok); 991 992 /* remainders */ 993 li = i - ratioi * (i/ratioi); 994 if (i==mx-1) li = nxi-1; 995 lj = j - ratioj * (j/ratioj); 996 if (j==my-1) lj = neta-1; 997 lk = l - ratiok * (l/ratiok); 998 if (l==mz-1) lk = nzeta-1; 999 1000 /* corners */ 1001 col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c)+m_ghost_c*(j_c-j_start_ghost_c)+(i_c-i_start_ghost_c)); 1002 cols[0] = idx_c[col]; 1003 Ni[0] = 1.0; 1004 if ((li==0) || (li==nxi-1)) { 1005 if ((lj==0) || (lj==neta-1)) { 1006 if ((lk==0) || (lk==nzeta-1)) { 1007 PetscCall(MatSetValue(mat,row,cols[0],Ni[0],INSERT_VALUES)); 1008 continue; 1009 } 1010 } 1011 } 1012 1013 /* edges + interior */ 1014 /* remainders */ 1015 if (i==mx-1) i_c--; 1016 if (j==my-1) j_c--; 1017 if (l==mz-1) l_c--; 1018 1019 col = (m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c)); 1020 cols[0] = idx_c[col]; /* one left and below; or we are right on it */ 1021 cols[1] = idx_c[col+1]; /* one right and below */ 1022 cols[2] = idx_c[col+m_ghost_c]; /* one left and above */ 1023 cols[3] = idx_c[col+(m_ghost_c+1)]; /* one right and above */ 1024 1025 cols[4] = idx_c[col+m_ghost_c*n_ghost_c]; /* one left and below and front; or we are right on it */ 1026 cols[5] = idx_c[col+(m_ghost_c*n_ghost_c+1)]; /* one right and below, and front */ 1027 cols[6] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c)]; /* one left and above and front*/ 1028 cols[7] = idx_c[col+(m_ghost_c*n_ghost_c+m_ghost_c+1)]; /* one right and above and front */ 1029 1030 Ni[0] = 0.125*(1.0-xi[li])*(1.0-eta[lj])*(1.0-zeta[lk]); 1031 Ni[1] = 0.125*(1.0+xi[li])*(1.0-eta[lj])*(1.0-zeta[lk]); 1032 Ni[2] = 0.125*(1.0-xi[li])*(1.0+eta[lj])*(1.0-zeta[lk]); 1033 Ni[3] = 0.125*(1.0+xi[li])*(1.0+eta[lj])*(1.0-zeta[lk]); 1034 1035 Ni[4] = 0.125*(1.0-xi[li])*(1.0-eta[lj])*(1.0+zeta[lk]); 1036 Ni[5] = 0.125*(1.0+xi[li])*(1.0-eta[lj])*(1.0+zeta[lk]); 1037 Ni[6] = 0.125*(1.0-xi[li])*(1.0+eta[lj])*(1.0+zeta[lk]); 1038 Ni[7] = 0.125*(1.0+xi[li])*(1.0+eta[lj])*(1.0+zeta[lk]); 1039 1040 for (n=0; n<8; n++) { 1041 if (PetscAbsScalar(Ni[n])<1.0e-32) cols[n]=-1; 1042 } 1043 PetscCall(MatSetValues(mat,1,&row,8,cols,Ni,INSERT_VALUES)); 1044 1045 } 1046 } 1047 } 1048 PetscCall(PetscFree(xi)); 1049 PetscCall(PetscFree(eta)); 1050 PetscCall(PetscFree(zeta)); 1051 } 1052 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f)); 1053 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c)); 1054 PetscCall(MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY)); 1055 PetscCall(MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY)); 1056 1057 PetscCall(MatCreateMAIJ(mat,dof,A)); 1058 PetscCall(MatDestroy(&mat)); 1059 PetscFunctionReturn(0); 1060 } 1061 1062 PetscErrorCode DMCreateInterpolation_DA(DM dac,DM daf,Mat *A,Vec *scale) 1063 { 1064 PetscInt dimc,Mc,Nc,Pc,mc,nc,pc,dofc,sc,dimf,Mf,Nf,Pf,mf,nf,pf,doff,sf; 1065 DMBoundaryType bxc,byc,bzc,bxf,byf,bzf; 1066 DMDAStencilType stc,stf; 1067 DM_DA *ddc = (DM_DA*)dac->data; 1068 1069 PetscFunctionBegin; 1070 PetscValidHeaderSpecific(dac,DM_CLASSID,1); 1071 PetscValidHeaderSpecific(daf,DM_CLASSID,2); 1072 PetscValidPointer(A,3); 1073 if (scale) PetscValidPointer(scale,4); 1074 1075 PetscCall(DMDAGetInfo(dac,&dimc,&Mc,&Nc,&Pc,&mc,&nc,&pc,&dofc,&sc,&bxc,&byc,&bzc,&stc)); 1076 PetscCall(DMDAGetInfo(daf,&dimf,&Mf,&Nf,&Pf,&mf,&nf,&pf,&doff,&sf,&bxf,&byf,&bzf,&stf)); 1077 PetscCheckFalse(dimc != dimf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Dimensions of DMDA do not match %D %D",dimc,dimf); 1078 PetscCheckFalse(dofc != doff,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"DOF of DMDA do not match %D %D",dofc,doff); 1079 PetscCheckFalse(sc != sf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil width of DMDA do not match %D %D",sc,sf); 1080 PetscCheckFalse(bxc != bxf || byc != byf || bzc != bzf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Boundary type different in two DMDAs"); 1081 PetscCheckFalse(stc != stf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil type different in two DMDAs"); 1082 PetscCheckFalse(Mc < 2 && Mf > 1,PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in x direction"); 1083 PetscCheckFalse(dimc > 1 && Nc < 2 && Nf > 1,PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in y direction"); 1084 PetscCheckFalse(dimc > 2 && Pc < 2 && Pf > 1,PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in z direction"); 1085 1086 if (ddc->interptype == DMDA_Q1) { 1087 if (dimc == 1) { 1088 PetscCall(DMCreateInterpolation_DA_1D_Q1(dac,daf,A)); 1089 } else if (dimc == 2) { 1090 PetscCall(DMCreateInterpolation_DA_2D_Q1(dac,daf,A)); 1091 } else if (dimc == 3) { 1092 PetscCall(DMCreateInterpolation_DA_3D_Q1(dac,daf,A)); 1093 } else SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_SUP,"No support for this DMDA dimension %D for interpolation type %d",dimc,(int)ddc->interptype); 1094 } else if (ddc->interptype == DMDA_Q0) { 1095 if (dimc == 1) { 1096 PetscCall(DMCreateInterpolation_DA_1D_Q0(dac,daf,A)); 1097 } else if (dimc == 2) { 1098 PetscCall(DMCreateInterpolation_DA_2D_Q0(dac,daf,A)); 1099 } else if (dimc == 3) { 1100 PetscCall(DMCreateInterpolation_DA_3D_Q0(dac,daf,A)); 1101 } else SETERRQ(PetscObjectComm((PetscObject)daf),PETSC_ERR_SUP,"No support for this DMDA dimension %D for interpolation type %d",dimc,(int)ddc->interptype); 1102 } 1103 if (scale) { 1104 PetscCall(DMCreateInterpolationScale((DM)dac,(DM)daf,*A,scale)); 1105 } 1106 PetscFunctionReturn(0); 1107 } 1108 1109 PetscErrorCode DMCreateInjection_DA_1D(DM dac,DM daf,VecScatter *inject) 1110 { 1111 PetscInt i,i_start,m_f,Mx,dof; 1112 const PetscInt *idx_f; 1113 ISLocalToGlobalMapping ltog_f; 1114 PetscInt m_ghost,m_ghost_c; 1115 PetscInt row,i_start_ghost,mx,m_c,nc,ratioi; 1116 PetscInt i_start_c,i_start_ghost_c; 1117 PetscInt *cols; 1118 DMBoundaryType bx; 1119 Vec vecf,vecc; 1120 IS isf; 1121 1122 PetscFunctionBegin; 1123 PetscCall(DMDAGetInfo(dac,NULL,&Mx,NULL,NULL,NULL,NULL,NULL,NULL,NULL,&bx,NULL,NULL,NULL)); 1124 PetscCall(DMDAGetInfo(daf,NULL,&mx,NULL,NULL,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL)); 1125 if (bx == DM_BOUNDARY_PERIODIC) { 1126 ratioi = mx/Mx; 1127 PetscCheckFalse(ratioi*Mx != mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx); 1128 } else { 1129 ratioi = (mx-1)/(Mx-1); 1130 PetscCheckFalse(ratioi*(Mx-1) != mx-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx); 1131 } 1132 1133 PetscCall(DMDAGetCorners(daf,&i_start,NULL,NULL,&m_f,NULL,NULL)); 1134 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,NULL,NULL,&m_ghost,NULL,NULL)); 1135 PetscCall(DMGetLocalToGlobalMapping(daf,<og_f)); 1136 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f)); 1137 1138 PetscCall(DMDAGetCorners(dac,&i_start_c,NULL,NULL,&m_c,NULL,NULL)); 1139 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,NULL,NULL,&m_ghost_c,NULL,NULL)); 1140 1141 /* loop over local fine grid nodes setting interpolation for those*/ 1142 nc = 0; 1143 PetscCall(PetscMalloc1(m_f,&cols)); 1144 1145 for (i=i_start_c; i<i_start_c+m_c; i++) { 1146 PetscInt i_f = i*ratioi; 1147 1148 PetscCheckFalse(i_f < i_start_ghost || i_f >= i_start_ghost+m_ghost,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\ni_c %D i_f %D fine ghost range [%D,%D]",i,i_f,i_start_ghost,i_start_ghost+m_ghost); 1149 1150 row = idx_f[(i_f-i_start_ghost)]; 1151 cols[nc++] = row; 1152 } 1153 1154 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f)); 1155 PetscCall(ISCreateBlock(PetscObjectComm((PetscObject)daf),dof,nc,cols,PETSC_OWN_POINTER,&isf)); 1156 PetscCall(DMGetGlobalVector(dac,&vecc)); 1157 PetscCall(DMGetGlobalVector(daf,&vecf)); 1158 PetscCall(VecScatterCreate(vecf,isf,vecc,NULL,inject)); 1159 PetscCall(DMRestoreGlobalVector(dac,&vecc)); 1160 PetscCall(DMRestoreGlobalVector(daf,&vecf)); 1161 PetscCall(ISDestroy(&isf)); 1162 PetscFunctionReturn(0); 1163 } 1164 1165 PetscErrorCode DMCreateInjection_DA_2D(DM dac,DM daf,VecScatter *inject) 1166 { 1167 PetscInt i,j,i_start,j_start,m_f,n_f,Mx,My,dof; 1168 const PetscInt *idx_c,*idx_f; 1169 ISLocalToGlobalMapping ltog_f,ltog_c; 1170 PetscInt m_ghost,n_ghost,m_ghost_c,n_ghost_c; 1171 PetscInt row,i_start_ghost,j_start_ghost,mx,m_c,my,nc,ratioi,ratioj; 1172 PetscInt i_start_c,j_start_c,n_c,i_start_ghost_c,j_start_ghost_c; 1173 PetscInt *cols; 1174 DMBoundaryType bx,by; 1175 Vec vecf,vecc; 1176 IS isf; 1177 1178 PetscFunctionBegin; 1179 PetscCall(DMDAGetInfo(dac,NULL,&Mx,&My,NULL,NULL,NULL,NULL,NULL,NULL,&bx,&by,NULL,NULL)); 1180 PetscCall(DMDAGetInfo(daf,NULL,&mx,&my,NULL,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL)); 1181 if (bx == DM_BOUNDARY_PERIODIC) { 1182 ratioi = mx/Mx; 1183 PetscCheckFalse(ratioi*Mx != mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx); 1184 } else { 1185 ratioi = (mx-1)/(Mx-1); 1186 PetscCheckFalse(ratioi*(Mx-1) != mx-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx); 1187 } 1188 if (by == DM_BOUNDARY_PERIODIC) { 1189 ratioj = my/My; 1190 PetscCheckFalse(ratioj*My != my,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %D My %D",my,My); 1191 } else { 1192 ratioj = (my-1)/(My-1); 1193 PetscCheckFalse(ratioj*(My-1) != my-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %D My %D",my,My); 1194 } 1195 1196 PetscCall(DMDAGetCorners(daf,&i_start,&j_start,NULL,&m_f,&n_f,NULL)); 1197 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,NULL,&m_ghost,&n_ghost,NULL)); 1198 PetscCall(DMGetLocalToGlobalMapping(daf,<og_f)); 1199 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f)); 1200 1201 PetscCall(DMDAGetCorners(dac,&i_start_c,&j_start_c,NULL,&m_c,&n_c,NULL)); 1202 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,NULL,&m_ghost_c,&n_ghost_c,NULL)); 1203 PetscCall(DMGetLocalToGlobalMapping(dac,<og_c)); 1204 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c)); 1205 1206 /* loop over local fine grid nodes setting interpolation for those*/ 1207 nc = 0; 1208 PetscCall(PetscMalloc1(n_f*m_f,&cols)); 1209 for (j=j_start_c; j<j_start_c+n_c; j++) { 1210 for (i=i_start_c; i<i_start_c+m_c; i++) { 1211 PetscInt i_f = i*ratioi,j_f = j*ratioj; 1212 PetscCheckFalse(j_f < j_start_ghost || j_f >= j_start_ghost+n_ghost,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 1213 j_c %D j_f %D fine ghost range [%D,%D]",j,j_f,j_start_ghost,j_start_ghost+n_ghost); 1214 PetscCheckFalse(i_f < i_start_ghost || i_f >= i_start_ghost+m_ghost,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA\n\ 1215 i_c %D i_f %D fine ghost range [%D,%D]",i,i_f,i_start_ghost,i_start_ghost+m_ghost); 1216 row = idx_f[(m_ghost*(j_f-j_start_ghost) + (i_f-i_start_ghost))]; 1217 cols[nc++] = row; 1218 } 1219 } 1220 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f)); 1221 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c)); 1222 1223 PetscCall(ISCreateBlock(PetscObjectComm((PetscObject)daf),dof,nc,cols,PETSC_OWN_POINTER,&isf)); 1224 PetscCall(DMGetGlobalVector(dac,&vecc)); 1225 PetscCall(DMGetGlobalVector(daf,&vecf)); 1226 PetscCall(VecScatterCreate(vecf,isf,vecc,NULL,inject)); 1227 PetscCall(DMRestoreGlobalVector(dac,&vecc)); 1228 PetscCall(DMRestoreGlobalVector(daf,&vecf)); 1229 PetscCall(ISDestroy(&isf)); 1230 PetscFunctionReturn(0); 1231 } 1232 1233 PetscErrorCode DMCreateInjection_DA_3D(DM dac,DM daf,VecScatter *inject) 1234 { 1235 PetscInt i,j,k,i_start,j_start,k_start,m_f,n_f,p_f,Mx,My,Mz; 1236 PetscInt m_ghost,n_ghost,p_ghost,m_ghost_c,n_ghost_c,p_ghost_c; 1237 PetscInt i_start_ghost,j_start_ghost,k_start_ghost; 1238 PetscInt mx,my,mz,ratioi,ratioj,ratiok; 1239 PetscInt i_start_c,j_start_c,k_start_c; 1240 PetscInt m_c,n_c,p_c; 1241 PetscInt i_start_ghost_c,j_start_ghost_c,k_start_ghost_c; 1242 PetscInt row,nc,dof; 1243 const PetscInt *idx_c,*idx_f; 1244 ISLocalToGlobalMapping ltog_f,ltog_c; 1245 PetscInt *cols; 1246 DMBoundaryType bx,by,bz; 1247 Vec vecf,vecc; 1248 IS isf; 1249 1250 PetscFunctionBegin; 1251 PetscCall(DMDAGetInfo(dac,NULL,&Mx,&My,&Mz,NULL,NULL,NULL,NULL,NULL,&bx,&by,&bz,NULL)); 1252 PetscCall(DMDAGetInfo(daf,NULL,&mx,&my,&mz,NULL,NULL,NULL,&dof,NULL,NULL,NULL,NULL,NULL)); 1253 1254 if (bx == DM_BOUNDARY_PERIODIC) { 1255 ratioi = mx/Mx; 1256 PetscCheckFalse(ratioi*Mx != mx,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mx/Mx must be integer: mx %D Mx %D",mx,Mx); 1257 } else { 1258 ratioi = (mx-1)/(Mx-1); 1259 PetscCheckFalse(ratioi*(Mx-1) != mx-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mx - 1)/(Mx - 1) must be integer: mx %D Mx %D",mx,Mx); 1260 } 1261 if (by == DM_BOUNDARY_PERIODIC) { 1262 ratioj = my/My; 1263 PetscCheckFalse(ratioj*My != my,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: my/My must be integer: my %D My %D",my,My); 1264 } else { 1265 ratioj = (my-1)/(My-1); 1266 PetscCheckFalse(ratioj*(My-1) != my-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (my - 1)/(My - 1) must be integer: my %D My %D",my,My); 1267 } 1268 if (bz == DM_BOUNDARY_PERIODIC) { 1269 ratiok = mz/Mz; 1270 PetscCheckFalse(ratiok*Mz != mz,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: mz/Mz must be integer: mz %D My %D",mz,Mz); 1271 } else { 1272 ratiok = (mz-1)/(Mz-1); 1273 PetscCheckFalse(ratiok*(Mz-1) != mz-1,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Ratio between levels: (mz - 1)/(Mz - 1) must be integer: mz %D Mz %D",mz,Mz); 1274 } 1275 1276 PetscCall(DMDAGetCorners(daf,&i_start,&j_start,&k_start,&m_f,&n_f,&p_f)); 1277 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&k_start_ghost,&m_ghost,&n_ghost,&p_ghost)); 1278 PetscCall(DMGetLocalToGlobalMapping(daf,<og_f)); 1279 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_f,&idx_f)); 1280 1281 PetscCall(DMDAGetCorners(dac,&i_start_c,&j_start_c,&k_start_c,&m_c,&n_c,&p_c)); 1282 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&k_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c)); 1283 PetscCall(DMGetLocalToGlobalMapping(dac,<og_c)); 1284 PetscCall(ISLocalToGlobalMappingGetBlockIndices(ltog_c,&idx_c)); 1285 1286 /* loop over local fine grid nodes setting interpolation for those*/ 1287 nc = 0; 1288 PetscCall(PetscMalloc1(n_f*m_f*p_f,&cols)); 1289 for (k=k_start_c; k<k_start_c+p_c; k++) { 1290 for (j=j_start_c; j<j_start_c+n_c; j++) { 1291 for (i=i_start_c; i<i_start_c+m_c; i++) { 1292 PetscInt i_f = i*ratioi,j_f = j*ratioj,k_f = k*ratiok; 1293 PetscCheckFalse(k_f < k_start_ghost || k_f >= k_start_ghost+p_ghost,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA " 1294 "k_c %D k_f %D fine ghost range [%D,%D]",k,k_f,k_start_ghost,k_start_ghost+p_ghost); 1295 PetscCheckFalse(j_f < j_start_ghost || j_f >= j_start_ghost+n_ghost,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA " 1296 "j_c %D j_f %D fine ghost range [%D,%D]",j,j_f,j_start_ghost,j_start_ghost+n_ghost); 1297 PetscCheckFalse(i_f < i_start_ghost || i_f >= i_start_ghost+m_ghost,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Processor's coarse DMDA must lie over fine DMDA " 1298 "i_c %D i_f %D fine ghost range [%D,%D]",i,i_f,i_start_ghost,i_start_ghost+m_ghost); 1299 row = idx_f[(m_ghost*n_ghost*(k_f-k_start_ghost) + m_ghost*(j_f-j_start_ghost) + (i_f-i_start_ghost))]; 1300 cols[nc++] = row; 1301 } 1302 } 1303 } 1304 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_f,&idx_f)); 1305 PetscCall(ISLocalToGlobalMappingRestoreBlockIndices(ltog_c,&idx_c)); 1306 1307 PetscCall(ISCreateBlock(PetscObjectComm((PetscObject)daf),dof,nc,cols,PETSC_OWN_POINTER,&isf)); 1308 PetscCall(DMGetGlobalVector(dac,&vecc)); 1309 PetscCall(DMGetGlobalVector(daf,&vecf)); 1310 PetscCall(VecScatterCreate(vecf,isf,vecc,NULL,inject)); 1311 PetscCall(DMRestoreGlobalVector(dac,&vecc)); 1312 PetscCall(DMRestoreGlobalVector(daf,&vecf)); 1313 PetscCall(ISDestroy(&isf)); 1314 PetscFunctionReturn(0); 1315 } 1316 1317 PetscErrorCode DMCreateInjection_DA(DM dac,DM daf,Mat *mat) 1318 { 1319 PetscInt dimc,Mc,Nc,Pc,mc,nc,pc,dofc,sc,dimf,Mf,Nf,Pf,mf,nf,pf,doff,sf; 1320 DMBoundaryType bxc,byc,bzc,bxf,byf,bzf; 1321 DMDAStencilType stc,stf; 1322 VecScatter inject = NULL; 1323 1324 PetscFunctionBegin; 1325 PetscValidHeaderSpecific(dac,DM_CLASSID,1); 1326 PetscValidHeaderSpecific(daf,DM_CLASSID,2); 1327 PetscValidPointer(mat,3); 1328 1329 PetscCall(DMDAGetInfo(dac,&dimc,&Mc,&Nc,&Pc,&mc,&nc,&pc,&dofc,&sc,&bxc,&byc,&bzc,&stc)); 1330 PetscCall(DMDAGetInfo(daf,&dimf,&Mf,&Nf,&Pf,&mf,&nf,&pf,&doff,&sf,&bxf,&byf,&bzf,&stf)); 1331 PetscCheckFalse(dimc != dimf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Dimensions of DMDA do not match %D %D",dimc,dimf); 1332 PetscCheckFalse(dofc != doff,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"DOF of DMDA do not match %D %D",dofc,doff); 1333 PetscCheckFalse(sc != sf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil width of DMDA do not match %D %D",sc,sf); 1334 PetscCheckFalse(bxc != bxf || byc != byf || bzc != bzf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Boundary type different in two DMDAs"); 1335 PetscCheckFalse(stc != stf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil type different in two DMDAs"); 1336 PetscCheckFalse(Mc < 2,PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in x direction"); 1337 PetscCheckFalse(dimc > 1 && Nc < 2,PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in y direction"); 1338 PetscCheckFalse(dimc > 2 && Pc < 2,PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Coarse grid requires at least 2 points in z direction"); 1339 1340 if (dimc == 1) { 1341 PetscCall(DMCreateInjection_DA_1D(dac,daf,&inject)); 1342 } else if (dimc == 2) { 1343 PetscCall(DMCreateInjection_DA_2D(dac,daf,&inject)); 1344 } else if (dimc == 3) { 1345 PetscCall(DMCreateInjection_DA_3D(dac,daf,&inject)); 1346 } 1347 PetscCall(MatCreateScatter(PetscObjectComm((PetscObject)inject), inject, mat)); 1348 PetscCall(VecScatterDestroy(&inject)); 1349 PetscFunctionReturn(0); 1350 } 1351 1352 /*@ 1353 DMCreateAggregates - Deprecated, see DMDACreateAggregates. 1354 1355 Level: intermediate 1356 @*/ 1357 PetscErrorCode DMCreateAggregates(DM dac,DM daf,Mat *mat) 1358 { 1359 return DMDACreateAggregates(dac,daf,mat); 1360 } 1361 1362 /*@ 1363 DMDACreateAggregates - Gets the aggregates that map between 1364 grids associated with two DMDAs. 1365 1366 Collective on dmc 1367 1368 Input Parameters: 1369 + dmc - the coarse grid DMDA 1370 - dmf - the fine grid DMDA 1371 1372 Output Parameters: 1373 . rest - the restriction matrix (transpose of the projection matrix) 1374 1375 Level: intermediate 1376 1377 Note: This routine is not used by PETSc. 1378 It is not clear what its use case is and it may be removed in a future release. 1379 Users should contact petsc-maint@mcs.anl.gov if they plan to use it. 1380 1381 .seealso: DMRefine(), DMCreateInjection(), DMCreateInterpolation() 1382 @*/ 1383 PetscErrorCode DMDACreateAggregates(DM dac,DM daf,Mat *rest) 1384 { 1385 PetscErrorCode ierr; 1386 PetscInt dimc,Mc,Nc,Pc,mc,nc,pc,dofc,sc; 1387 PetscInt dimf,Mf,Nf,Pf,mf,nf,pf,doff,sf; 1388 DMBoundaryType bxc,byc,bzc,bxf,byf,bzf; 1389 DMDAStencilType stc,stf; 1390 PetscInt i,j,l; 1391 PetscInt i_start,j_start,l_start, m_f,n_f,p_f; 1392 PetscInt i_start_ghost,j_start_ghost,l_start_ghost,m_ghost,n_ghost,p_ghost; 1393 const PetscInt *idx_f; 1394 PetscInt i_c,j_c,l_c; 1395 PetscInt i_start_c,j_start_c,l_start_c, m_c,n_c,p_c; 1396 PetscInt i_start_ghost_c,j_start_ghost_c,l_start_ghost_c,m_ghost_c,n_ghost_c,p_ghost_c; 1397 const PetscInt *idx_c; 1398 PetscInt d; 1399 PetscInt a; 1400 PetscInt max_agg_size; 1401 PetscInt *fine_nodes; 1402 PetscScalar *one_vec; 1403 PetscInt fn_idx; 1404 ISLocalToGlobalMapping ltogmf,ltogmc; 1405 1406 PetscFunctionBegin; 1407 PetscValidHeaderSpecificType(dac,DM_CLASSID,1,DMDA); 1408 PetscValidHeaderSpecificType(daf,DM_CLASSID,2,DMDA); 1409 PetscValidPointer(rest,3); 1410 1411 PetscCall(DMDAGetInfo(dac,&dimc,&Mc,&Nc,&Pc,&mc,&nc,&pc,&dofc,&sc,&bxc,&byc,&bzc,&stc)); 1412 PetscCall(DMDAGetInfo(daf,&dimf,&Mf,&Nf,&Pf,&mf,&nf,&pf,&doff,&sf,&bxf,&byf,&bzf,&stf)); 1413 PetscCheckFalse(dimc != dimf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Dimensions of DMDA do not match %D %D",dimc,dimf); 1414 PetscCheckFalse(dofc != doff,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"DOF of DMDA do not match %D %D",dofc,doff); 1415 PetscCheckFalse(sc != sf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil width of DMDA do not match %D %D",sc,sf); 1416 PetscCheckFalse(bxc != bxf || byc != byf || bzc != bzf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Boundary type different in two DMDAs"); 1417 PetscCheckFalse(stc != stf,PetscObjectComm((PetscObject)daf),PETSC_ERR_ARG_INCOMP,"Stencil type different in two DMDAs"); 1418 1419 PetscCheckFalse(Mf < Mc,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Coarse grid has more points than fine grid, Mc %D, Mf %D", Mc, Mf); 1420 PetscCheckFalse(Nf < Nc,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Coarse grid has more points than fine grid, Nc %D, Nf %D", Nc, Nf); 1421 PetscCheckFalse(Pf < Pc,PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Coarse grid has more points than fine grid, Pc %D, Pf %D", Pc, Pf); 1422 1423 if (Pc < 0) Pc = 1; 1424 if (Pf < 0) Pf = 1; 1425 if (Nc < 0) Nc = 1; 1426 if (Nf < 0) Nf = 1; 1427 1428 PetscCall(DMDAGetCorners(daf,&i_start,&j_start,&l_start,&m_f,&n_f,&p_f)); 1429 PetscCall(DMDAGetGhostCorners(daf,&i_start_ghost,&j_start_ghost,&l_start_ghost,&m_ghost,&n_ghost,&p_ghost)); 1430 1431 PetscCall(DMGetLocalToGlobalMapping(daf,<ogmf)); 1432 PetscCall(ISLocalToGlobalMappingGetIndices(ltogmf,&idx_f)); 1433 1434 PetscCall(DMDAGetCorners(dac,&i_start_c,&j_start_c,&l_start_c,&m_c,&n_c,&p_c)); 1435 PetscCall(DMDAGetGhostCorners(dac,&i_start_ghost_c,&j_start_ghost_c,&l_start_ghost_c,&m_ghost_c,&n_ghost_c,&p_ghost_c)); 1436 1437 PetscCall(DMGetLocalToGlobalMapping(dac,<ogmc)); 1438 PetscCall(ISLocalToGlobalMappingGetIndices(ltogmc,&idx_c)); 1439 1440 /* 1441 Basic idea is as follows. Here's a 2D example, suppose r_x, r_y are the ratios 1442 for dimension 1 and 2 respectively. 1443 Let (i,j) be a coarse grid node. All the fine grid nodes between r_x*i and r_x*(i+1) 1444 and r_y*j and r_y*(j+1) will be grouped into the same coarse grid agregate. 1445 Each specific dof on the fine grid is mapped to one dof on the coarse grid. 1446 */ 1447 1448 max_agg_size = (Mf/Mc+1)*(Nf/Nc+1)*(Pf/Pc+1); 1449 1450 /* create the matrix that will contain the restriction operator */ 1451 ierr = MatCreateAIJ(PetscObjectComm((PetscObject)daf), m_c*n_c*p_c*dofc, m_f*n_f*p_f*doff, Mc*Nc*Pc*dofc, Mf*Nf*Pf*doff, 1452 max_agg_size, NULL, max_agg_size, NULL, rest);PetscCall(ierr); 1453 1454 /* store nodes in the fine grid here */ 1455 PetscCall(PetscMalloc2(max_agg_size, &one_vec,max_agg_size, &fine_nodes)); 1456 for (i=0; i<max_agg_size; i++) one_vec[i] = 1.0; 1457 1458 /* loop over all coarse nodes */ 1459 for (l_c=l_start_c; l_c<l_start_c+p_c; l_c++) { 1460 for (j_c=j_start_c; j_c<j_start_c+n_c; j_c++) { 1461 for (i_c=i_start_c; i_c<i_start_c+m_c; i_c++) { 1462 for (d=0; d<dofc; d++) { 1463 /* convert to local "natural" numbering and then to PETSc global numbering */ 1464 a = idx_c[dofc*(m_ghost_c*n_ghost_c*(l_c-l_start_ghost_c) + m_ghost_c*(j_c-j_start_ghost_c) + (i_c-i_start_ghost_c))] + d; 1465 1466 fn_idx = 0; 1467 /* Corresponding fine points are all points (i_f, j_f, l_f) such that 1468 i_c*Mf/Mc <= i_f < (i_c+1)*Mf/Mc 1469 (same for other dimensions) 1470 */ 1471 for (l=l_c*Pf/Pc; l<PetscMin((l_c+1)*Pf/Pc,Pf); l++) { 1472 for (j=j_c*Nf/Nc; j<PetscMin((j_c+1)*Nf/Nc,Nf); j++) { 1473 for (i=i_c*Mf/Mc; i<PetscMin((i_c+1)*Mf/Mc,Mf); i++) { 1474 fine_nodes[fn_idx] = idx_f[doff*(m_ghost*n_ghost*(l-l_start_ghost) + m_ghost*(j-j_start_ghost) + (i-i_start_ghost))] + d; 1475 fn_idx++; 1476 } 1477 } 1478 } 1479 /* add all these points to one aggregate */ 1480 PetscCall(MatSetValues(*rest, 1, &a, fn_idx, fine_nodes, one_vec, INSERT_VALUES)); 1481 } 1482 } 1483 } 1484 } 1485 PetscCall(ISLocalToGlobalMappingRestoreIndices(ltogmf,&idx_f)); 1486 PetscCall(ISLocalToGlobalMappingRestoreIndices(ltogmc,&idx_c)); 1487 PetscCall(PetscFree2(one_vec,fine_nodes)); 1488 PetscCall(MatAssemblyBegin(*rest, MAT_FINAL_ASSEMBLY)); 1489 PetscCall(MatAssemblyEnd(*rest, MAT_FINAL_ASSEMBLY)); 1490 PetscFunctionReturn(0); 1491 } 1492