1 2 /* 3 Provides an interface to the ML smoothed Aggregation 4 Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs 5 Jed Brown, see [PETSC #18321, #18449]. 6 */ 7 #include <petsc-private/pcimpl.h> /*I "petscpc.h" I*/ 8 #include <../src/ksp/pc/impls/mg/mgimpl.h> /*I "petscpcmg.h" I*/ 9 #include <../src/mat/impls/aij/seq/aij.h> 10 #include <../src/mat/impls/aij/mpi/mpiaij.h> 11 12 #include <math.h> 13 EXTERN_C_BEGIN 14 /* HAVE_CONFIG_H flag is required by ML include files */ 15 #if !defined(HAVE_CONFIG_H) 16 #define HAVE_CONFIG_H 17 #endif 18 #include <ml_include.h> 19 EXTERN_C_END 20 21 /* The context (data structure) at each grid level */ 22 typedef struct { 23 Vec x,b,r; /* global vectors */ 24 Mat A,P,R; 25 KSP ksp; 26 } GridCtx; 27 28 /* The context used to input PETSc matrix into ML at fine grid */ 29 typedef struct { 30 Mat A; /* Petsc matrix in aij format */ 31 Mat Aloc; /* local portion of A to be used by ML */ 32 Vec x,y; 33 ML_Operator *mlmat; 34 PetscScalar *pwork; /* tmp array used by PetscML_comm() */ 35 } FineGridCtx; 36 37 /* The context associates a ML matrix with a PETSc shell matrix */ 38 typedef struct { 39 Mat A; /* PETSc shell matrix associated with mlmat */ 40 ML_Operator *mlmat; /* ML matrix assorciated with A */ 41 Vec y, work; 42 } Mat_MLShell; 43 44 /* Private context for the ML preconditioner */ 45 typedef struct { 46 ML *ml_object; 47 ML_Aggregate *agg_object; 48 GridCtx *gridctx; 49 FineGridCtx *PetscMLdata; 50 PetscInt Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization; 51 PetscReal Threshold,DampingFactor,EnergyMinimizationDropTol; 52 PetscBool SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable; 53 PetscBool reuse_interpolation; 54 PetscMPIInt size; /* size of communicator for pc->pmat */ 55 } PC_ML; 56 57 #undef __FUNCT__ 58 #define __FUNCT__ "PetscML_getrow" 59 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[]) 60 { 61 PetscErrorCode ierr; 62 PetscInt m,i,j,k=0,row,*aj; 63 PetscScalar *aa; 64 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data); 65 Mat_SeqAIJ *a = (Mat_SeqAIJ*)ml->Aloc->data; 66 67 68 ierr = MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0); 69 for (i = 0; i<N_requested_rows; i++) { 70 row = requested_rows[i]; 71 row_lengths[i] = a->ilen[row]; 72 if (allocated_space < k+row_lengths[i]) return(0); 73 if ( (row >= 0) || (row <= (m-1)) ) { 74 aj = a->j + a->i[row]; 75 aa = a->a + a->i[row]; 76 for (j=0; j<row_lengths[i]; j++){ 77 columns[k] = aj[j]; 78 values[k++] = aa[j]; 79 } 80 } 81 } 82 return(1); 83 } 84 85 #undef __FUNCT__ 86 #define __FUNCT__ "PetscML_comm" 87 static PetscErrorCode PetscML_comm(double p[],void *ML_data) 88 { 89 PetscErrorCode ierr; 90 FineGridCtx *ml=(FineGridCtx*)ML_data; 91 Mat A=ml->A; 92 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 93 PetscMPIInt size; 94 PetscInt i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n; 95 PetscScalar *array; 96 97 PetscFunctionBegin; 98 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 99 if (size == 1) return 0; 100 101 ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr); 102 ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 103 ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 104 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 105 ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr); 106 for (i=in_length; i<out_length; i++){ 107 p[i] = array[i-in_length]; 108 } 109 ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr); 110 PetscFunctionReturn(0); 111 } 112 113 #undef __FUNCT__ 114 #define __FUNCT__ "PetscML_matvec" 115 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[]) 116 { 117 PetscErrorCode ierr; 118 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data); 119 Mat A=ml->A, Aloc=ml->Aloc; 120 PetscMPIInt size; 121 PetscScalar *pwork=ml->pwork; 122 PetscInt i; 123 124 PetscFunctionBegin; 125 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 126 if (size == 1){ 127 ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr); 128 } else { 129 for (i=0; i<in_length; i++) pwork[i] = p[i]; 130 PetscML_comm(pwork,ml); 131 ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr); 132 } 133 ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr); 134 ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr); 135 ierr = VecResetArray(ml->x);CHKERRQ(ierr); 136 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 137 PetscFunctionReturn(0); 138 } 139 140 #undef __FUNCT__ 141 #define __FUNCT__ "MatMult_ML" 142 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y) 143 { 144 PetscErrorCode ierr; 145 Mat_MLShell *shell; 146 PetscScalar *xarray,*yarray; 147 PetscInt x_length,y_length; 148 149 PetscFunctionBegin; 150 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 151 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 152 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 153 x_length = shell->mlmat->invec_leng; 154 y_length = shell->mlmat->outvec_leng; 155 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 156 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 157 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 158 PetscFunctionReturn(0); 159 } 160 161 #undef __FUNCT__ 162 #define __FUNCT__ "MatMultAdd_ML" 163 /* Computes y = w + A * x 164 It is possible that w == y, but not x == y 165 */ 166 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y) 167 { 168 Mat_MLShell *shell; 169 PetscScalar *xarray,*yarray; 170 PetscInt x_length,y_length; 171 PetscErrorCode ierr; 172 173 PetscFunctionBegin; 174 ierr = MatShellGetContext(A, (void **) &shell);CHKERRQ(ierr); 175 if (y == w) { 176 if (!shell->work) { 177 ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr); 178 } 179 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 180 ierr = VecGetArray(shell->work, &yarray);CHKERRQ(ierr); 181 x_length = shell->mlmat->invec_leng; 182 y_length = shell->mlmat->outvec_leng; 183 ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray); 184 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 185 ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr); 186 ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr); 187 } else { 188 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 189 ierr = VecGetArray(y, &yarray);CHKERRQ(ierr); 190 x_length = shell->mlmat->invec_leng; 191 y_length = shell->mlmat->outvec_leng; 192 ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray); 193 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 194 ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr); 195 ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr); 196 } 197 PetscFunctionReturn(0); 198 } 199 200 /* newtype is ignored because "ml" is not listed under Petsc MatType */ 201 #undef __FUNCT__ 202 #define __FUNCT__ "MatConvert_MPIAIJ_ML" 203 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc) 204 { 205 PetscErrorCode ierr; 206 Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data; 207 Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data; 208 PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j; 209 PetscScalar *aa=a->a,*ba=b->a,*ca; 210 PetscInt am=A->rmap->n,an=A->cmap->n,i,j,k; 211 PetscInt *ci,*cj,ncols; 212 213 PetscFunctionBegin; 214 if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an); 215 216 if (scall == MAT_INITIAL_MATRIX){ 217 ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 218 ci[0] = 0; 219 for (i=0; i<am; i++){ 220 ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]); 221 } 222 ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr); 223 ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr); 224 225 k = 0; 226 for (i=0; i<am; i++){ 227 /* diagonal portion of A */ 228 ncols = ai[i+1] - ai[i]; 229 for (j=0; j<ncols; j++) { 230 cj[k] = *aj++; 231 ca[k++] = *aa++; 232 } 233 /* off-diagonal portion of A */ 234 ncols = bi[i+1] - bi[i]; 235 for (j=0; j<ncols; j++) { 236 cj[k] = an + (*bj); bj++; 237 ca[k++] = *ba++; 238 } 239 } 240 if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]); 241 242 /* put together the new matrix */ 243 an = mpimat->A->cmap->n+mpimat->B->cmap->n; 244 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr); 245 246 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 247 /* Since these are PETSc arrays, change flags to free them as necessary. */ 248 mat = (Mat_SeqAIJ*)(*Aloc)->data; 249 mat->free_a = PETSC_TRUE; 250 mat->free_ij = PETSC_TRUE; 251 252 mat->nonew = 0; 253 } else if (scall == MAT_REUSE_MATRIX){ 254 mat=(Mat_SeqAIJ*)(*Aloc)->data; 255 ci = mat->i; cj = mat->j; ca = mat->a; 256 for (i=0; i<am; i++) { 257 /* diagonal portion of A */ 258 ncols = ai[i+1] - ai[i]; 259 for (j=0; j<ncols; j++) *ca++ = *aa++; 260 /* off-diagonal portion of A */ 261 ncols = bi[i+1] - bi[i]; 262 for (j=0; j<ncols; j++) *ca++ = *ba++; 263 } 264 } else { 265 SETERRQ1(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 266 } 267 PetscFunctionReturn(0); 268 } 269 270 extern PetscErrorCode MatDestroy_Shell(Mat); 271 #undef __FUNCT__ 272 #define __FUNCT__ "MatDestroy_ML" 273 static PetscErrorCode MatDestroy_ML(Mat A) 274 { 275 PetscErrorCode ierr; 276 Mat_MLShell *shell; 277 278 PetscFunctionBegin; 279 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 280 ierr = VecDestroy(&shell->y);CHKERRQ(ierr); 281 if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);} 282 ierr = PetscFree(shell);CHKERRQ(ierr); 283 ierr = MatDestroy_Shell(A);CHKERRQ(ierr); 284 ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr); 285 PetscFunctionReturn(0); 286 } 287 288 #undef __FUNCT__ 289 #define __FUNCT__ "MatWrapML_SeqAIJ" 290 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 291 { 292 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 293 PetscErrorCode ierr; 294 PetscInt m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = PETSC_NULL,nz_max; 295 PetscInt *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k; 296 PetscScalar *ml_vals=matdata->values,*aa; 297 298 PetscFunctionBegin; 299 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 300 if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */ 301 if (reuse){ 302 Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data; 303 aij->i = ml_rowptr; 304 aij->j = ml_cols; 305 aij->a = ml_vals; 306 } else { 307 /* sort ml_cols and ml_vals */ 308 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 309 for (i=0; i<m; i++) { 310 nnz[i] = ml_rowptr[i+1] - ml_rowptr[i]; 311 } 312 aj = ml_cols; aa = ml_vals; 313 for (i=0; i<m; i++){ 314 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 315 aj += nnz[i]; aa += nnz[i]; 316 } 317 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr); 318 ierr = PetscFree(nnz);CHKERRQ(ierr); 319 } 320 PetscFunctionReturn(0); 321 } 322 323 /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */ 324 if (reuse) { 325 for (nz_max=0,i=0; i<m; i++) nz_max = PetscMax(nz_max,ml_cols[i+1] - ml_cols[i] + 1); 326 } else { 327 ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr); 328 ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 329 ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr); 330 331 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 332 nz_max = 1; 333 for (i=0; i<m; i++) { 334 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 335 if (nnz[i] > nz_max) nz_max = nnz[i]; 336 } 337 ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr); 338 } 339 ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr); 340 for (i=0; i<m; i++) { 341 PetscInt ncols; 342 k = 0; 343 /* diagonal entry */ 344 aj[k] = i; aa[k++] = ml_vals[i]; 345 /* off diagonal entries */ 346 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 347 aj[k] = ml_cols[j]; aa[k++] = ml_vals[j]; 348 } 349 ncols = ml_cols[i+1] - ml_cols[i] + 1; 350 /* sort aj and aa */ 351 ierr = PetscSortIntWithScalarArray(ncols,aj,aa);CHKERRQ(ierr); 352 ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr); 353 } 354 ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 355 ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 356 357 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 358 ierr = PetscFree(nnz);CHKERRQ(ierr); 359 PetscFunctionReturn(0); 360 } 361 362 #undef __FUNCT__ 363 #define __FUNCT__ "MatWrapML_SHELL" 364 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 365 { 366 PetscErrorCode ierr; 367 PetscInt m,n; 368 ML_Comm *MLcomm; 369 Mat_MLShell *shellctx; 370 371 PetscFunctionBegin; 372 m = mlmat->outvec_leng; 373 n = mlmat->invec_leng; 374 if (!m || !n){ 375 newmat = PETSC_NULL; 376 PetscFunctionReturn(0); 377 } 378 379 if (reuse){ 380 ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr); 381 shellctx->mlmat = mlmat; 382 PetscFunctionReturn(0); 383 } 384 385 MLcomm = mlmat->comm; 386 ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr); 387 ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr); 388 ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr); 389 ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr); 390 shellctx->A = *newmat; 391 shellctx->mlmat = mlmat; 392 shellctx->work = PETSC_NULL; 393 ierr = VecCreate(MLcomm->USR_comm,&shellctx->y);CHKERRQ(ierr); 394 ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr); 395 ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr); 396 (*newmat)->ops->destroy = MatDestroy_ML; 397 PetscFunctionReturn(0); 398 } 399 400 #undef __FUNCT__ 401 #define __FUNCT__ "MatWrapML_MPIAIJ" 402 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 403 { 404 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 405 PetscInt *ml_cols=matdata->columns,*aj; 406 PetscScalar *ml_vals=matdata->values,*aa; 407 PetscErrorCode ierr; 408 PetscInt i,j,k,*gordering; 409 PetscInt m=mlmat->outvec_leng,n,nz_max,row; 410 Mat A; 411 412 PetscFunctionBegin; 413 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 414 n = mlmat->invec_leng; 415 if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n); 416 417 if (reuse) { 418 A = *newmat; 419 for (nz_max=0,i=0; i<m; i++) nz_max = PetscMax(nz_max,ml_cols[i+1] - ml_cols[i] + 1); 420 } else { 421 PetscInt *nnzA,*nnzB,*nnz; 422 ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr); 423 ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 424 ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr); 425 ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr); 426 427 nz_max = 0; 428 for (i=0; i<m; i++){ 429 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 430 if (nz_max < nnz[i]) nz_max = nnz[i]; 431 nnzA[i] = 1; /* diag */ 432 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 433 if (ml_cols[j] < m) nnzA[i]++; 434 } 435 nnzB[i] = nnz[i] - nnzA[i]; 436 } 437 ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr); 438 ierr = PetscFree3(nnzA,nnzB,nnz); 439 } 440 441 /* insert mat values -- remap row and column indices */ 442 nz_max++; 443 ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr); 444 /* create global row numbering for a ML_Operator */ 445 ML_build_global_numbering(mlmat,&gordering,"rows"); 446 for (i=0; i<m; i++) { 447 PetscInt ncols; 448 row = gordering[i]; 449 k = 0; 450 /* diagonal entry */ 451 aj[k] = row; aa[k++] = ml_vals[i]; 452 /* off diagonal entries */ 453 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 454 aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j]; 455 } 456 ncols = ml_cols[i+1] - ml_cols[i] + 1; 457 ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr); 458 } 459 ML_free(gordering); 460 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 461 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 462 *newmat = A; 463 464 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 465 PetscFunctionReturn(0); 466 } 467 468 /* -----------------------------------------------------------------------------*/ 469 #undef __FUNCT__ 470 #define __FUNCT__ "PCReset_ML" 471 PetscErrorCode PCReset_ML(PC pc) 472 { 473 PetscErrorCode ierr; 474 PC_MG *mg = (PC_MG*)pc->data; 475 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 476 PetscInt level,fine_level=pc_ml->Nlevels-1; 477 478 PetscFunctionBegin; 479 ML_Aggregate_Destroy(&pc_ml->agg_object); 480 ML_Destroy(&pc_ml->ml_object); 481 482 if (pc_ml->PetscMLdata) { 483 ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr); 484 ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr); 485 ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr); 486 ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr); 487 } 488 ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr); 489 490 if (pc_ml->gridctx) { 491 for (level=0; level<fine_level; level++){ 492 if (pc_ml->gridctx[level].A){ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);} 493 if (pc_ml->gridctx[level].P){ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);} 494 if (pc_ml->gridctx[level].R){ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);} 495 if (pc_ml->gridctx[level].x){ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);} 496 if (pc_ml->gridctx[level].b){ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);} 497 if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);} 498 } 499 } 500 ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr); 501 PetscFunctionReturn(0); 502 } 503 /* -------------------------------------------------------------------------- */ 504 /* 505 PCSetUp_ML - Prepares for the use of the ML preconditioner 506 by setting data structures and options. 507 508 Input Parameter: 509 . pc - the preconditioner context 510 511 Application Interface Routine: PCSetUp() 512 513 Notes: 514 The interface routine PCSetUp() is not usually called directly by 515 the user, but instead is called by PCApply() if necessary. 516 */ 517 extern PetscErrorCode PCSetFromOptions_MG(PC); 518 extern PetscErrorCode PCReset_MG(PC); 519 520 #undef __FUNCT__ 521 #define __FUNCT__ "PCSetUp_ML" 522 PetscErrorCode PCSetUp_ML(PC pc) 523 { 524 PetscErrorCode ierr; 525 PetscMPIInt size; 526 FineGridCtx *PetscMLdata; 527 ML *ml_object; 528 ML_Aggregate *agg_object; 529 ML_Operator *mlmat; 530 PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs; 531 Mat A,Aloc; 532 GridCtx *gridctx; 533 PC_MG *mg = (PC_MG*)pc->data; 534 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 535 PetscBool isSeq, isMPI; 536 KSP smoother; 537 PC subpc; 538 PetscInt mesh_level, old_mesh_level; 539 540 541 PetscFunctionBegin; 542 A = pc->pmat; 543 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 544 545 if (pc->setupcalled) { 546 if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) { 547 /* 548 Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for 549 multiple solves in which the matrix is not changing too quickly. 550 */ 551 ml_object = pc_ml->ml_object; 552 gridctx = pc_ml->gridctx; 553 Nlevels = pc_ml->Nlevels; 554 fine_level = Nlevels - 1; 555 gridctx[fine_level].A = A; 556 557 ierr = PetscTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); 558 ierr = PetscTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); 559 if (isMPI){ 560 ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); 561 } else if (isSeq) { 562 Aloc = A; 563 ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr); 564 } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name); 565 566 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 567 PetscMLdata = pc_ml->PetscMLdata; 568 ierr = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr); 569 PetscMLdata->A = A; 570 PetscMLdata->Aloc = Aloc; 571 ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); 572 ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); 573 574 mesh_level = ml_object->ML_finest_level; 575 while (ml_object->SingleLevel[mesh_level].Rmat->to) { 576 old_mesh_level = mesh_level; 577 mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum; 578 579 /* clean and regenerate A */ 580 mlmat = &(ml_object->Amat[mesh_level]); 581 ML_Operator_Clean(mlmat); 582 ML_Operator_Init(mlmat,ml_object->comm); 583 ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level); 584 } 585 586 level = fine_level - 1; 587 if (size == 1) { /* convert ML P, R and A into seqaij format */ 588 for (mllevel=1; mllevel<Nlevels; mllevel++){ 589 mlmat = &(ml_object->Amat[mllevel]); 590 ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 591 level--; 592 } 593 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 594 for (mllevel=1; mllevel<Nlevels; mllevel++){ 595 mlmat = &(ml_object->Amat[mllevel]); 596 ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 597 level--; 598 } 599 } 600 601 for (level=0; level<fine_level; level++) { 602 if (level > 0){ 603 ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); 604 } 605 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr); 606 } 607 ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); 608 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr); 609 610 ierr = PCSetUp_MG(pc);CHKERRQ(ierr); 611 PetscFunctionReturn(0); 612 } else { 613 /* since ML can change the size of vectors/matrices at any level we must destroy everything */ 614 ierr = PCReset_ML(pc);CHKERRQ(ierr); 615 ierr = PCReset_MG(pc);CHKERRQ(ierr); 616 } 617 } 618 619 /* setup special features of PCML */ 620 /*--------------------------------*/ 621 /* covert A to Aloc to be used by ML at fine grid */ 622 pc_ml->size = size; 623 ierr = PetscTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); 624 ierr = PetscTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); 625 if (isMPI){ 626 ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); 627 } else if (isSeq) { 628 Aloc = A; 629 ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr); 630 } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name); 631 632 /* create and initialize struct 'PetscMLdata' */ 633 ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr); 634 pc_ml->PetscMLdata = PetscMLdata; 635 ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr); 636 637 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr); 638 ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr); 639 ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); 640 641 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); 642 ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 643 ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr); 644 PetscMLdata->A = A; 645 PetscMLdata->Aloc = Aloc; 646 647 /* create ML discretization matrix at fine grid */ 648 /* ML requires input of fine-grid matrix. It determines nlevels. */ 649 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 650 ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); 651 ML_Create(&ml_object,pc_ml->MaxNlevels); 652 ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm); 653 pc_ml->ml_object = ml_object; 654 ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); 655 ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols); 656 ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); 657 658 ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO); 659 660 /* aggregation */ 661 ML_Aggregate_Create(&agg_object); 662 pc_ml->agg_object = agg_object; 663 664 ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr); 665 ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize); 666 /* set options */ 667 switch (pc_ml->CoarsenScheme) { 668 case 1: 669 ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break; 670 case 2: 671 ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break; 672 case 3: 673 ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break; 674 } 675 ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold); 676 ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor); 677 if (pc_ml->SpectralNormScheme_Anorm){ 678 ML_Set_SpectralNormScheme_Anorm(ml_object); 679 } 680 agg_object->keep_agg_information = (int)pc_ml->KeepAggInfo; 681 agg_object->keep_P_tentative = (int)pc_ml->Reusable; 682 agg_object->block_scaled_SA = (int)pc_ml->BlockScaling; 683 agg_object->minimizing_energy = (int)pc_ml->EnergyMinimization; 684 agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol; 685 agg_object->cheap_minimizing_energy = (int)pc_ml->EnergyMinimizationCheap; 686 687 if (pc_ml->OldHierarchy) { 688 Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 689 } else { 690 Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 691 } 692 if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels); 693 pc_ml->Nlevels = Nlevels; 694 fine_level = Nlevels - 1; 695 696 ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr); 697 /* set default smoothers */ 698 for (level=1; level<=fine_level; level++){ 699 if (size == 1){ 700 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 701 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 702 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 703 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 704 } else { 705 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 706 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 707 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 708 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 709 } 710 } 711 ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */ 712 713 ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr); 714 pc_ml->gridctx = gridctx; 715 716 /* wrap ML matrices by PETSc shell matrices at coarsened grids. 717 Level 0 is the finest grid for ML, but coarsest for PETSc! */ 718 gridctx[fine_level].A = A; 719 720 level = fine_level - 1; 721 if (size == 1){ /* convert ML P, R and A into seqaij format */ 722 for (mllevel=1; mllevel<Nlevels; mllevel++){ 723 mlmat = &(ml_object->Pmat[mllevel]); 724 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 725 mlmat = &(ml_object->Rmat[mllevel-1]); 726 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 727 728 mlmat = &(ml_object->Amat[mllevel]); 729 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 730 level--; 731 } 732 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 733 for (mllevel=1; mllevel<Nlevels; mllevel++){ 734 mlmat = &(ml_object->Pmat[mllevel]); 735 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 736 mlmat = &(ml_object->Rmat[mllevel-1]); 737 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 738 739 mlmat = &(ml_object->Amat[mllevel]); 740 ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 741 level--; 742 } 743 } 744 745 /* create vectors and ksp at all levels */ 746 for (level=0; level<fine_level; level++){ 747 level1 = level + 1; 748 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr); 749 ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr); 750 ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); 751 ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); 752 753 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr); 754 ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 755 ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); 756 ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); 757 758 ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr); 759 ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 760 ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr); 761 ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr); 762 763 if (level == 0){ 764 ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr); 765 } else { 766 ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr); 767 } 768 } 769 ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr); 770 771 /* create coarse level and the interpolation between the levels */ 772 for (level=0; level<fine_level; level++){ 773 level1 = level + 1; 774 ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr); 775 ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr); 776 if (level > 0){ 777 ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); 778 } 779 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 780 } 781 ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); 782 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 783 784 /* setupcalled is set to 0 so that MG is setup from scratch */ 785 pc->setupcalled = 0; 786 ierr = PCSetUp_MG(pc);CHKERRQ(ierr); 787 PetscFunctionReturn(0); 788 } 789 790 /* -------------------------------------------------------------------------- */ 791 /* 792 PCDestroy_ML - Destroys the private context for the ML preconditioner 793 that was created with PCCreate_ML(). 794 795 Input Parameter: 796 . pc - the preconditioner context 797 798 Application Interface Routine: PCDestroy() 799 */ 800 #undef __FUNCT__ 801 #define __FUNCT__ "PCDestroy_ML" 802 PetscErrorCode PCDestroy_ML(PC pc) 803 { 804 PetscErrorCode ierr; 805 PC_MG *mg = (PC_MG*)pc->data; 806 PC_ML *pc_ml= (PC_ML*)mg->innerctx; 807 808 PetscFunctionBegin; 809 ierr = PCReset_ML(pc);CHKERRQ(ierr); 810 ierr = PetscFree(pc_ml);CHKERRQ(ierr); 811 ierr = PCDestroy_MG(pc);CHKERRQ(ierr); 812 PetscFunctionReturn(0); 813 } 814 815 #undef __FUNCT__ 816 #define __FUNCT__ "PCSetFromOptions_ML" 817 PetscErrorCode PCSetFromOptions_ML(PC pc) 818 { 819 PetscErrorCode ierr; 820 PetscInt indx,PrintLevel; 821 const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"}; 822 PC_MG *mg = (PC_MG*)pc->data; 823 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 824 PetscMPIInt size; 825 MPI_Comm comm = ((PetscObject)pc)->comm; 826 827 PetscFunctionBegin; 828 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 829 ierr = PetscOptionsHead("ML options");CHKERRQ(ierr); 830 PrintLevel = 0; 831 indx = 0; 832 ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr); 833 ML_Set_PrintLevel(PrintLevel); 834 ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr); 835 ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr); 836 ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr); 837 pc_ml->CoarsenScheme = indx; 838 ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr); 839 ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr); 840 ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);CHKERRQ(ierr); 841 ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,PETSC_NULL);CHKERRQ(ierr); 842 ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,PETSC_NULL);CHKERRQ(ierr); 843 ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,PETSC_NULL);CHKERRQ(ierr); 844 ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,PETSC_NULL);CHKERRQ(ierr); 845 /* 846 The following checks a number of conditions. If we let this stuff slip by, then ML's error handling will take over. 847 This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions. 848 849 We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working 850 combination of options and ML's exit(1) explanations don't help matters. 851 */ 852 if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4"); 853 if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel"); 854 if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);} 855 if (pc_ml->EnergyMinimization) { 856 ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,PETSC_NULL);CHKERRQ(ierr); 857 } 858 if (pc_ml->EnergyMinimization == 2) { 859 /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */ 860 ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,PETSC_NULL);CHKERRQ(ierr); 861 } 862 /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */ 863 if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE; 864 ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,PETSC_NULL);CHKERRQ(ierr); 865 /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */ 866 if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE; 867 ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,PETSC_NULL);CHKERRQ(ierr); 868 /* 869 ML's C API is severely underdocumented and lacks significant functionality. The C++ API calls 870 ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function 871 ML_Gen_MGHierarchy_UsingAggregation(). This modification, however, does not provide a strict superset of the 872 functionality in the old function, so some users may still want to use it. Note that many options are ignored in 873 this context, but ML doesn't provide a way to find out which ones. 874 */ 875 ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,PETSC_NULL);CHKERRQ(ierr); 876 ierr = PetscOptionsTail();CHKERRQ(ierr); 877 PetscFunctionReturn(0); 878 } 879 880 /* -------------------------------------------------------------------------- */ 881 /* 882 PCCreate_ML - Creates a ML preconditioner context, PC_ML, 883 and sets this as the private data within the generic preconditioning 884 context, PC, that was created within PCCreate(). 885 886 Input Parameter: 887 . pc - the preconditioner context 888 889 Application Interface Routine: PCCreate() 890 */ 891 892 /*MC 893 PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide 894 fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 895 operators are computed by ML, with the matrices coverted to PETSc matrices in aij format 896 and the restriction/interpolation operators wrapped as PETSc shell matrices. 897 898 Options Database Key: 899 Multigrid options(inherited) 900 + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles) 901 . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp) 902 . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown) 903 -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade 904 ML options: 905 . -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel) 906 . -pc_ml_maxNlevels <10>: Maximum number of levels (None) 907 . -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize) 908 . -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS 909 . -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor) 910 . -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold) 911 - -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm) 912 913 Level: intermediate 914 915 Concepts: multigrid 916 917 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 918 PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(), 919 PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(), 920 PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(), 921 PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR() 922 M*/ 923 924 EXTERN_C_BEGIN 925 #undef __FUNCT__ 926 #define __FUNCT__ "PCCreate_ML" 927 PetscErrorCode PCCreate_ML(PC pc) 928 { 929 PetscErrorCode ierr; 930 PC_ML *pc_ml; 931 PC_MG *mg; 932 933 PetscFunctionBegin; 934 /* PCML is an inherited class of PCMG. Initialize pc as PCMG */ 935 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 936 ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr); 937 /* Since PCMG tries to use DM assocated with PC must delete it */ 938 ierr = DMDestroy(&pc->dm);CHKERRQ(ierr); 939 mg = (PC_MG*)pc->data; 940 mg->galerkin = 2; /* Use Galerkin, but it is computed externally */ 941 942 /* create a supporting struct and attach it to pc */ 943 ierr = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr); 944 mg->innerctx = pc_ml; 945 946 pc_ml->ml_object = 0; 947 pc_ml->agg_object = 0; 948 pc_ml->gridctx = 0; 949 pc_ml->PetscMLdata = 0; 950 pc_ml->Nlevels = -1; 951 pc_ml->MaxNlevels = 10; 952 pc_ml->MaxCoarseSize = 1; 953 pc_ml->CoarsenScheme = 1; 954 pc_ml->Threshold = 0.0; 955 pc_ml->DampingFactor = 4.0/3.0; 956 pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE; 957 pc_ml->size = 0; 958 959 /* overwrite the pointers of PCMG by the functions of PCML */ 960 pc->ops->setfromoptions = PCSetFromOptions_ML; 961 pc->ops->setup = PCSetUp_ML; 962 pc->ops->reset = PCReset_ML; 963 pc->ops->destroy = PCDestroy_ML; 964 PetscFunctionReturn(0); 965 } 966 EXTERN_C_END 967