1 #define PETSCKSP_DLL 2 3 /* 4 Provides an interface to the ML smoothed Aggregation 5 Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs 6 Jed Brown, see [PETSC #18321, #18449]. 7 */ 8 #include "private/pcimpl.h" /*I "petscpc.h" I*/ 9 #include "../src/ksp/pc/impls/mg/mgimpl.h" /*I "petscmg.h" I*/ 10 #include "../src/mat/impls/aij/seq/aij.h" 11 #include "../src/mat/impls/aij/mpi/mpiaij.h" 12 13 #include <math.h> 14 EXTERN_C_BEGIN 15 /* HAVE_CONFIG_H flag is required by ML include files */ 16 #if !defined(HAVE_CONFIG_H) 17 #define HAVE_CONFIG_H 18 #endif 19 #include "ml_include.h" 20 EXTERN_C_END 21 22 /* The context (data structure) at each grid level */ 23 typedef struct { 24 Vec x,b,r; /* global vectors */ 25 Mat A,P,R; 26 KSP ksp; 27 } GridCtx; 28 29 /* The context used to input PETSc matrix into ML at fine grid */ 30 typedef struct { 31 Mat A; /* Petsc matrix in aij format */ 32 Mat Aloc; /* local portion of A to be used by ML */ 33 Vec x,y; 34 ML_Operator *mlmat; 35 PetscScalar *pwork; /* tmp array used by PetscML_comm() */ 36 } FineGridCtx; 37 38 /* The context associates a ML matrix with a PETSc shell matrix */ 39 typedef struct { 40 Mat A; /* PETSc shell matrix associated with mlmat */ 41 ML_Operator *mlmat; /* ML matrix assorciated with A */ 42 Vec y, work; 43 } Mat_MLShell; 44 45 /* Private context for the ML preconditioner */ 46 typedef struct { 47 ML *ml_object; 48 ML_Aggregate *agg_object; 49 GridCtx *gridctx; 50 FineGridCtx *PetscMLdata; 51 PetscInt Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization; 52 PetscReal Threshold,DampingFactor,EnergyMinimizationDropTol; 53 PetscTruth SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable; 54 PetscMPIInt size; /* size of communicator for pc->pmat */ 55 } PC_ML; 56 57 #undef __FUNCT__ 58 #define __FUNCT__ "PetscML_getrow" 59 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[]) 60 { 61 PetscErrorCode ierr; 62 PetscInt m,i,j,k=0,row,*aj; 63 PetscScalar *aa; 64 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data); 65 Mat_SeqAIJ *a = (Mat_SeqAIJ*)ml->Aloc->data; 66 67 68 ierr = MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0); 69 for (i = 0; i<N_requested_rows; i++) { 70 row = requested_rows[i]; 71 row_lengths[i] = a->ilen[row]; 72 if (allocated_space < k+row_lengths[i]) return(0); 73 if ( (row >= 0) || (row <= (m-1)) ) { 74 aj = a->j + a->i[row]; 75 aa = a->a + a->i[row]; 76 for (j=0; j<row_lengths[i]; j++){ 77 columns[k] = aj[j]; 78 values[k++] = aa[j]; 79 } 80 } 81 } 82 return(1); 83 } 84 85 #undef __FUNCT__ 86 #define __FUNCT__ "PetscML_comm" 87 static PetscErrorCode PetscML_comm(double p[],void *ML_data) 88 { 89 PetscErrorCode ierr; 90 FineGridCtx *ml=(FineGridCtx*)ML_data; 91 Mat A=ml->A; 92 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 93 PetscMPIInt size; 94 PetscInt i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n; 95 PetscScalar *array; 96 97 PetscFunctionBegin; 98 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 99 if (size == 1) return 0; 100 101 ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr); 102 ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 103 ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 104 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 105 ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr); 106 for (i=in_length; i<out_length; i++){ 107 p[i] = array[i-in_length]; 108 } 109 ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr); 110 PetscFunctionReturn(0); 111 } 112 113 #undef __FUNCT__ 114 #define __FUNCT__ "PetscML_matvec" 115 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[]) 116 { 117 PetscErrorCode ierr; 118 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data); 119 Mat A=ml->A, Aloc=ml->Aloc; 120 PetscMPIInt size; 121 PetscScalar *pwork=ml->pwork; 122 PetscInt i; 123 124 PetscFunctionBegin; 125 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 126 if (size == 1){ 127 ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr); 128 } else { 129 for (i=0; i<in_length; i++) pwork[i] = p[i]; 130 PetscML_comm(pwork,ml); 131 ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr); 132 } 133 ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr); 134 ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr); 135 ierr = VecResetArray(ml->x);CHKERRQ(ierr); 136 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 137 PetscFunctionReturn(0); 138 } 139 140 #undef __FUNCT__ 141 #define __FUNCT__ "MatMult_ML" 142 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y) 143 { 144 PetscErrorCode ierr; 145 Mat_MLShell *shell; 146 PetscScalar *xarray,*yarray; 147 PetscInt x_length,y_length; 148 149 PetscFunctionBegin; 150 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 151 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 152 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 153 x_length = shell->mlmat->invec_leng; 154 y_length = shell->mlmat->outvec_leng; 155 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 156 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 157 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 158 PetscFunctionReturn(0); 159 } 160 161 #undef __FUNCT__ 162 #define __FUNCT__ "MatMultAdd_ML" 163 /* Computes y = w + A * x 164 It is possible that w == y, but not x == y 165 */ 166 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y) 167 { 168 Mat_MLShell *shell; 169 PetscScalar *xarray,*yarray; 170 PetscInt x_length,y_length; 171 PetscErrorCode ierr; 172 173 PetscFunctionBegin; 174 ierr = MatShellGetContext(A, (void **) &shell);CHKERRQ(ierr); 175 if (y == w) { 176 if (!shell->work) { 177 ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr); 178 } 179 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 180 ierr = VecGetArray(shell->work, &yarray);CHKERRQ(ierr); 181 x_length = shell->mlmat->invec_leng; 182 y_length = shell->mlmat->outvec_leng; 183 ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray); 184 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 185 ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr); 186 ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr); 187 } else { 188 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 189 ierr = VecGetArray(y, &yarray);CHKERRQ(ierr); 190 x_length = shell->mlmat->invec_leng; 191 y_length = shell->mlmat->outvec_leng; 192 ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray); 193 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 194 ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr); 195 ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr); 196 } 197 PetscFunctionReturn(0); 198 } 199 200 /* newtype is ignored because "ml" is not listed under Petsc MatType */ 201 #undef __FUNCT__ 202 #define __FUNCT__ "MatConvert_MPIAIJ_ML" 203 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc) 204 { 205 PetscErrorCode ierr; 206 Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data; 207 Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data; 208 PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j; 209 PetscScalar *aa=a->a,*ba=b->a,*ca; 210 PetscInt am=A->rmap->n,an=A->cmap->n,i,j,k; 211 PetscInt *ci,*cj,ncols; 212 213 PetscFunctionBegin; 214 if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an); 215 216 if (scall == MAT_INITIAL_MATRIX){ 217 ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 218 ci[0] = 0; 219 for (i=0; i<am; i++){ 220 ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]); 221 } 222 ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr); 223 ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr); 224 225 k = 0; 226 for (i=0; i<am; i++){ 227 /* diagonal portion of A */ 228 ncols = ai[i+1] - ai[i]; 229 for (j=0; j<ncols; j++) { 230 cj[k] = *aj++; 231 ca[k++] = *aa++; 232 } 233 /* off-diagonal portion of A */ 234 ncols = bi[i+1] - bi[i]; 235 for (j=0; j<ncols; j++) { 236 cj[k] = an + (*bj); bj++; 237 ca[k++] = *ba++; 238 } 239 } 240 if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]); 241 242 /* put together the new matrix */ 243 an = mpimat->A->cmap->n+mpimat->B->cmap->n; 244 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr); 245 246 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 247 /* Since these are PETSc arrays, change flags to free them as necessary. */ 248 mat = (Mat_SeqAIJ*)(*Aloc)->data; 249 mat->free_a = PETSC_TRUE; 250 mat->free_ij = PETSC_TRUE; 251 252 mat->nonew = 0; 253 } else if (scall == MAT_REUSE_MATRIX){ 254 mat=(Mat_SeqAIJ*)(*Aloc)->data; 255 ci = mat->i; cj = mat->j; ca = mat->a; 256 for (i=0; i<am; i++) { 257 /* diagonal portion of A */ 258 ncols = ai[i+1] - ai[i]; 259 for (j=0; j<ncols; j++) *ca++ = *aa++; 260 /* off-diagonal portion of A */ 261 ncols = bi[i+1] - bi[i]; 262 for (j=0; j<ncols; j++) *ca++ = *ba++; 263 } 264 } else { 265 SETERRQ1(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 266 } 267 PetscFunctionReturn(0); 268 } 269 270 extern PetscErrorCode MatDestroy_Shell(Mat); 271 #undef __FUNCT__ 272 #define __FUNCT__ "MatDestroy_ML" 273 static PetscErrorCode MatDestroy_ML(Mat A) 274 { 275 PetscErrorCode ierr; 276 Mat_MLShell *shell; 277 278 PetscFunctionBegin; 279 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 280 ierr = VecDestroy(shell->y);CHKERRQ(ierr); 281 if (shell->work) {ierr = VecDestroy(shell->work);CHKERRQ(ierr);} 282 ierr = PetscFree(shell);CHKERRQ(ierr); 283 ierr = MatDestroy_Shell(A);CHKERRQ(ierr); 284 ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr); 285 PetscFunctionReturn(0); 286 } 287 288 #undef __FUNCT__ 289 #define __FUNCT__ "MatWrapML_SeqAIJ" 290 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 291 { 292 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 293 PetscErrorCode ierr; 294 PetscInt m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz,nz_max; 295 PetscInt *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k; 296 PetscScalar *ml_vals=matdata->values,*aa; 297 298 PetscFunctionBegin; 299 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 300 if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */ 301 if (reuse){ 302 Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data; 303 aij->i = ml_rowptr; 304 aij->j = ml_cols; 305 aij->a = ml_vals; 306 } else { 307 /* sort ml_cols and ml_vals */ 308 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 309 for (i=0; i<m; i++) { 310 nnz[i] = ml_rowptr[i+1] - ml_rowptr[i]; 311 } 312 aj = ml_cols; aa = ml_vals; 313 for (i=0; i<m; i++){ 314 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 315 aj += nnz[i]; aa += nnz[i]; 316 } 317 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr); 318 ierr = PetscFree(nnz);CHKERRQ(ierr); 319 } 320 PetscFunctionReturn(0); 321 } 322 323 /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */ 324 ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr); 325 ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 326 ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr); 327 328 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 329 nz_max = 1; 330 for (i=0; i<m; i++) { 331 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 332 if (nnz[i] > nz_max) nz_max += nnz[i]; 333 } 334 335 ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr); 336 ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr); 337 for (i=0; i<m; i++){ 338 k = 0; 339 /* diagonal entry */ 340 aj[k] = i; aa[k++] = ml_vals[i]; 341 /* off diagonal entries */ 342 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 343 aj[k] = ml_cols[j]; aa[k++] = ml_vals[j]; 344 } 345 /* sort aj and aa */ 346 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 347 ierr = MatSetValues(*newmat,1,&i,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr); 348 } 349 ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 350 ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 351 352 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 353 ierr = PetscFree(nnz);CHKERRQ(ierr); 354 PetscFunctionReturn(0); 355 } 356 357 #undef __FUNCT__ 358 #define __FUNCT__ "MatWrapML_SHELL" 359 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 360 { 361 PetscErrorCode ierr; 362 PetscInt m,n; 363 ML_Comm *MLcomm; 364 Mat_MLShell *shellctx; 365 366 PetscFunctionBegin; 367 m = mlmat->outvec_leng; 368 n = mlmat->invec_leng; 369 if (!m || !n){ 370 newmat = PETSC_NULL; 371 PetscFunctionReturn(0); 372 } 373 374 if (reuse){ 375 ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr); 376 shellctx->mlmat = mlmat; 377 PetscFunctionReturn(0); 378 } 379 380 MLcomm = mlmat->comm; 381 ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr); 382 ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr); 383 ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr); 384 ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr); 385 shellctx->A = *newmat; 386 shellctx->mlmat = mlmat; 387 shellctx->work = PETSC_NULL; 388 ierr = VecCreate(PETSC_COMM_WORLD,&shellctx->y);CHKERRQ(ierr); 389 ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr); 390 ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr); 391 (*newmat)->ops->destroy = MatDestroy_ML; 392 PetscFunctionReturn(0); 393 } 394 395 #undef __FUNCT__ 396 #define __FUNCT__ "MatWrapML_MPIAIJ" 397 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,Mat *newmat) 398 { 399 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 400 PetscInt *ml_cols=matdata->columns,*aj; 401 PetscScalar *ml_vals=matdata->values,*aa; 402 PetscErrorCode ierr; 403 PetscInt i,j,k,*gordering; 404 PetscInt m=mlmat->outvec_leng,n,*nnzA,*nnzB,*nnz,nz_max,row; 405 Mat A; 406 407 PetscFunctionBegin; 408 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 409 n = mlmat->invec_leng; 410 if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n); 411 412 ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr); 413 ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 414 ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr); 415 ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr); 416 417 nz_max = 0; 418 for (i=0; i<m; i++){ 419 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 420 if (nz_max < nnz[i]) nz_max = nnz[i]; 421 nnzA[i] = 1; /* diag */ 422 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 423 if (ml_cols[j] < m) nnzA[i]++; 424 } 425 nnzB[i] = nnz[i] - nnzA[i]; 426 } 427 ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr); 428 429 /* insert mat values -- remap row and column indices */ 430 nz_max++; 431 ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr); 432 /* create global row numbering for a ML_Operator */ 433 ML_build_global_numbering(mlmat,&gordering,"rows"); 434 for (i=0; i<m; i++){ 435 row = gordering[i]; 436 k = 0; 437 /* diagonal entry */ 438 aj[k] = row; aa[k++] = ml_vals[i]; 439 /* off diagonal entries */ 440 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 441 aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j]; 442 } 443 ierr = MatSetValues(A,1,&row,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr); 444 } 445 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 446 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 447 *newmat = A; 448 449 ierr = PetscFree3(nnzA,nnzB,nnz); 450 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 451 PetscFunctionReturn(0); 452 } 453 454 /* -----------------------------------------------------------------------------*/ 455 #undef __FUNCT__ 456 #define __FUNCT__ "PCDestroy_ML_Private" 457 PetscErrorCode PCDestroy_ML_Private(void *ptr) 458 { 459 PetscErrorCode ierr; 460 PC_ML *pc_ml = (PC_ML*)ptr; 461 PetscInt level,fine_level=pc_ml->Nlevels-1; 462 463 PetscFunctionBegin; 464 ML_Aggregate_Destroy(&pc_ml->agg_object); 465 ML_Destroy(&pc_ml->ml_object); 466 467 if (pc_ml->PetscMLdata) { 468 ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr); 469 if (pc_ml->size > 1) {ierr = MatDestroy(pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);} 470 if (pc_ml->PetscMLdata->x){ierr = VecDestroy(pc_ml->PetscMLdata->x);CHKERRQ(ierr);} 471 if (pc_ml->PetscMLdata->y){ierr = VecDestroy(pc_ml->PetscMLdata->y);CHKERRQ(ierr);} 472 } 473 ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr); 474 475 for (level=0; level<fine_level; level++){ 476 if (pc_ml->gridctx[level].A){ierr = MatDestroy(pc_ml->gridctx[level].A);CHKERRQ(ierr);} 477 if (pc_ml->gridctx[level].P){ierr = MatDestroy(pc_ml->gridctx[level].P);CHKERRQ(ierr);} 478 if (pc_ml->gridctx[level].R){ierr = MatDestroy(pc_ml->gridctx[level].R);CHKERRQ(ierr);} 479 if (pc_ml->gridctx[level].x){ierr = VecDestroy(pc_ml->gridctx[level].x);CHKERRQ(ierr);} 480 if (pc_ml->gridctx[level].b){ierr = VecDestroy(pc_ml->gridctx[level].b);CHKERRQ(ierr);} 481 if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(pc_ml->gridctx[level+1].r);CHKERRQ(ierr);} 482 } 483 ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr); 484 PetscFunctionReturn(0); 485 } 486 /* -------------------------------------------------------------------------- */ 487 /* 488 PCSetUp_ML - Prepares for the use of the ML preconditioner 489 by setting data structures and options. 490 491 Input Parameter: 492 . pc - the preconditioner context 493 494 Application Interface Routine: PCSetUp() 495 496 Notes: 497 The interface routine PCSetUp() is not usually called directly by 498 the user, but instead is called by PCApply() if necessary. 499 */ 500 extern PetscErrorCode PCSetFromOptions_MG(PC); 501 extern PetscErrorCode PCDestroy_MG_Private(PC); 502 503 #undef __FUNCT__ 504 #define __FUNCT__ "PCSetUp_ML" 505 PetscErrorCode PCSetUp_ML(PC pc) 506 { 507 PetscErrorCode ierr; 508 PetscMPIInt size; 509 FineGridCtx *PetscMLdata; 510 ML *ml_object; 511 ML_Aggregate *agg_object; 512 ML_Operator *mlmat; 513 PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs; 514 Mat A,Aloc; 515 GridCtx *gridctx; 516 PC_MG *mg = (PC_MG*)pc->data; 517 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 518 PetscTruth isSeq, isMPI; 519 KSP smoother; 520 PC subpc; 521 522 PetscFunctionBegin; 523 if (pc->setupcalled){ 524 /* since ML can change the size of vectors/matrices at any level we must destroy everything */ 525 ierr = PCDestroy_ML_Private(pc_ml);CHKERRQ(ierr); 526 ierr = PCDestroy_MG_Private(pc);CHKERRQ(ierr); 527 } 528 529 /* setup special features of PCML */ 530 /*--------------------------------*/ 531 /* covert A to Aloc to be used by ML at fine grid */ 532 A = pc->pmat; 533 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 534 pc_ml->size = size; 535 ierr = PetscTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); 536 ierr = PetscTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); 537 if (isMPI){ 538 ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); 539 } else if (isSeq) { 540 Aloc = A; 541 } else SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Invalid matrix type for ML. ML can only handle AIJ matrices."); 542 543 /* create and initialize struct 'PetscMLdata' */ 544 ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr); 545 pc_ml->PetscMLdata = PetscMLdata; 546 ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr); 547 548 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr); 549 ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr); 550 ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); 551 552 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); 553 ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 554 ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr); 555 PetscMLdata->A = A; 556 PetscMLdata->Aloc = Aloc; 557 558 /* create ML discretization matrix at fine grid */ 559 /* ML requires input of fine-grid matrix. It determines nlevels. */ 560 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 561 ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); 562 ML_Create(&ml_object,pc_ml->MaxNlevels); 563 ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm); 564 pc_ml->ml_object = ml_object; 565 ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); 566 ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols); 567 ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); 568 569 ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO); 570 571 /* aggregation */ 572 ML_Aggregate_Create(&agg_object); 573 pc_ml->agg_object = agg_object; 574 575 ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr); 576 ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize); 577 /* set options */ 578 switch (pc_ml->CoarsenScheme) { 579 case 1: 580 ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break; 581 case 2: 582 ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break; 583 case 3: 584 ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break; 585 } 586 ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold); 587 ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor); 588 if (pc_ml->SpectralNormScheme_Anorm){ 589 ML_Set_SpectralNormScheme_Anorm(ml_object); 590 } 591 agg_object->keep_agg_information = (int)pc_ml->KeepAggInfo; 592 agg_object->keep_P_tentative = (int)pc_ml->Reusable; 593 agg_object->block_scaled_SA = (int)pc_ml->BlockScaling; 594 agg_object->minimizing_energy = (int)pc_ml->EnergyMinimization; 595 agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol; 596 agg_object->cheap_minimizing_energy = (int)pc_ml->EnergyMinimizationCheap; 597 598 if (pc_ml->OldHierarchy) { 599 Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 600 } else { 601 Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 602 } 603 if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels); 604 pc_ml->Nlevels = Nlevels; 605 fine_level = Nlevels - 1; 606 607 ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr); 608 /* set default smoothers */ 609 for (level=1; level<=fine_level; level++){ 610 if (size == 1){ 611 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 612 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 613 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 614 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 615 } else { 616 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 617 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 618 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 619 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 620 } 621 } 622 ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */ 623 624 ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr); 625 pc_ml->gridctx = gridctx; 626 627 /* wrap ML matrices by PETSc shell matrices at coarsened grids. 628 Level 0 is the finest grid for ML, but coarsest for PETSc! */ 629 gridctx[fine_level].A = A; 630 631 level = fine_level - 1; 632 if (size == 1){ /* convert ML P, R and A into seqaij format */ 633 for (mllevel=1; mllevel<Nlevels; mllevel++){ 634 mlmat = &(ml_object->Pmat[mllevel]); 635 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 636 mlmat = &(ml_object->Rmat[mllevel-1]); 637 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 638 639 mlmat = &(ml_object->Amat[mllevel]); 640 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 641 level--; 642 } 643 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 644 for (mllevel=1; mllevel<Nlevels; mllevel++){ 645 mlmat = &(ml_object->Pmat[mllevel]); 646 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 647 mlmat = &(ml_object->Rmat[mllevel-1]); 648 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 649 650 mlmat = &(ml_object->Amat[mllevel]); 651 ierr = MatWrapML_MPIAIJ(mlmat,&gridctx[level].A);CHKERRQ(ierr); 652 level--; 653 } 654 } 655 656 /* create vectors and ksp at all levels */ 657 for (level=0; level<fine_level; level++){ 658 level1 = level + 1; 659 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr); 660 ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr); 661 ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); 662 ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); 663 664 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr); 665 ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 666 ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); 667 ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); 668 669 ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr); 670 ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 671 ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr); 672 ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr); 673 674 if (level == 0){ 675 ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr); 676 } else { 677 ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr); 678 } 679 } 680 ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr); 681 682 /* create coarse level and the interpolation between the levels */ 683 for (level=0; level<fine_level; level++){ 684 level1 = level + 1; 685 ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr); 686 ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr); 687 if (level > 0){ 688 ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); 689 } 690 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 691 } 692 ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); 693 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 694 695 /* setupcalled is set to 0 so that MG is setup from scratch */ 696 pc->setupcalled = 0; 697 ierr = PCSetUp_MG(pc);CHKERRQ(ierr); 698 PetscFunctionReturn(0); 699 } 700 701 /* -------------------------------------------------------------------------- */ 702 /* 703 PCDestroy_ML - Destroys the private context for the ML preconditioner 704 that was created with PCCreate_ML(). 705 706 Input Parameter: 707 . pc - the preconditioner context 708 709 Application Interface Routine: PCDestroy() 710 */ 711 #undef __FUNCT__ 712 #define __FUNCT__ "PCDestroy_ML" 713 PetscErrorCode PCDestroy_ML(PC pc) 714 { 715 PetscErrorCode ierr; 716 PC_MG *mg = (PC_MG*)pc->data; 717 PC_ML *pc_ml= (PC_ML*)mg->innerctx; 718 719 PetscFunctionBegin; 720 ierr = PCDestroy_ML_Private(pc_ml);CHKERRQ(ierr); 721 ierr = PetscFree(pc_ml);CHKERRQ(ierr); 722 ierr = PCDestroy_MG(pc);CHKERRQ(ierr); 723 PetscFunctionReturn(0); 724 } 725 726 #undef __FUNCT__ 727 #define __FUNCT__ "PCSetFromOptions_ML" 728 PetscErrorCode PCSetFromOptions_ML(PC pc) 729 { 730 PetscErrorCode ierr; 731 PetscInt indx,PrintLevel; 732 const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"}; 733 PC_MG *mg = (PC_MG*)pc->data; 734 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 735 PetscMPIInt size; 736 MPI_Comm comm = ((PetscObject)pc)->comm; 737 738 PetscFunctionBegin; 739 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 740 ierr = PetscOptionsHead("ML options");CHKERRQ(ierr); 741 PrintLevel = 0; 742 indx = 0; 743 ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr); 744 ML_Set_PrintLevel(PrintLevel); 745 ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr); 746 ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr); 747 ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr); 748 pc_ml->CoarsenScheme = indx; 749 ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr); 750 ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr); 751 ierr = PetscOptionsTruth("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);CHKERRQ(ierr); 752 ierr = PetscOptionsTruth("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,PETSC_NULL);CHKERRQ(ierr); 753 ierr = PetscOptionsTruth("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,PETSC_NULL);CHKERRQ(ierr); 754 ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,PETSC_NULL);CHKERRQ(ierr); 755 /* 756 The following checks a number of conditions. If we let this stuff slip by, then ML's error handling will take over. 757 This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions. 758 759 We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working 760 combination of options and ML's exit(1) explanations don't help matters. 761 */ 762 if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4"); 763 if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel"); 764 if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);} 765 if (pc_ml->EnergyMinimization) { 766 ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,PETSC_NULL);CHKERRQ(ierr); 767 } 768 if (pc_ml->EnergyMinimization == 2) { 769 /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */ 770 ierr = PetscOptionsTruth("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,PETSC_NULL);CHKERRQ(ierr); 771 } 772 /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */ 773 if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE; 774 ierr = PetscOptionsTruth("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,PETSC_NULL);CHKERRQ(ierr); 775 /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */ 776 if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE; 777 ierr = PetscOptionsTruth("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,PETSC_NULL);CHKERRQ(ierr); 778 /* 779 ML's C API is severely underdocumented and lacks significant functionality. The C++ API calls 780 ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function 781 ML_Gen_MGHierarchy_UsingAggregation(). This modification, however, does not provide a strict superset of the 782 functionality in the old function, so some users may still want to use it. Note that many options are ignored in 783 this context, but ML doesn't provide a way to find out which ones. 784 */ 785 ierr = PetscOptionsTruth("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,PETSC_NULL);CHKERRQ(ierr); 786 ierr = PetscOptionsTail();CHKERRQ(ierr); 787 PetscFunctionReturn(0); 788 } 789 790 /* -------------------------------------------------------------------------- */ 791 /* 792 PCCreate_ML - Creates a ML preconditioner context, PC_ML, 793 and sets this as the private data within the generic preconditioning 794 context, PC, that was created within PCCreate(). 795 796 Input Parameter: 797 . pc - the preconditioner context 798 799 Application Interface Routine: PCCreate() 800 */ 801 802 /*MC 803 PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide 804 fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 805 operators are computed by ML, with the matrices coverted to PETSc matrices in aij format 806 and the restriction/interpolation operators wrapped as PETSc shell matrices. 807 808 Options Database Key: 809 Multigrid options(inherited) 810 + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles) 811 . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp) 812 . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown) 813 - -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade 814 815 ML options: 816 + -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel) 817 . -pc_ml_maxNlevels <10>: Maximum number of levels (None) 818 . -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize) 819 . -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS 820 . -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor) 821 . -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold) 822 - -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm) 823 824 Level: intermediate 825 826 Concepts: multigrid 827 828 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 829 PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(), 830 PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(), 831 PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(), 832 PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR() 833 M*/ 834 835 EXTERN_C_BEGIN 836 #undef __FUNCT__ 837 #define __FUNCT__ "PCCreate_ML" 838 PetscErrorCode PETSCKSP_DLLEXPORT PCCreate_ML(PC pc) 839 { 840 PetscErrorCode ierr; 841 PC_ML *pc_ml; 842 PC_MG *mg; 843 844 PetscFunctionBegin; 845 /* PCML is an inherited class of PCMG. Initialize pc as PCMG */ 846 ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr); 847 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 848 849 /* create a supporting struct and attach it to pc */ 850 ierr = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr); 851 mg = (PC_MG*)pc->data; 852 mg->innerctx = pc_ml; 853 854 pc_ml->ml_object = 0; 855 pc_ml->agg_object = 0; 856 pc_ml->gridctx = 0; 857 pc_ml->PetscMLdata = 0; 858 pc_ml->Nlevels = -1; 859 pc_ml->MaxNlevels = 10; 860 pc_ml->MaxCoarseSize = 1; 861 pc_ml->CoarsenScheme = 1; 862 pc_ml->Threshold = 0.0; 863 pc_ml->DampingFactor = 4.0/3.0; 864 pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE; 865 pc_ml->size = 0; 866 867 /* overwrite the pointers of PCMG by the functions of PCML */ 868 pc->ops->setfromoptions = PCSetFromOptions_ML; 869 pc->ops->setup = PCSetUp_ML; 870 pc->ops->destroy = PCDestroy_ML; 871 PetscFunctionReturn(0); 872 } 873 EXTERN_C_END 874