1 #define PETSCKSP_DLL 2 3 /* 4 Provides an interface to the ML smoothed Aggregation 5 Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs 6 Jed Brown, see [PETSC #18321, #18449]. 7 */ 8 #include "private/pcimpl.h" /*I "petscpc.h" I*/ 9 #include "../src/ksp/pc/impls/mg/mgimpl.h" /*I "petscmg.h" I*/ 10 #include "../src/mat/impls/aij/seq/aij.h" 11 #include "../src/mat/impls/aij/mpi/mpiaij.h" 12 13 #include <math.h> 14 EXTERN_C_BEGIN 15 /* HAVE_CONFIG_H flag is required by ML include files */ 16 #if !defined(HAVE_CONFIG_H) 17 #define HAVE_CONFIG_H 18 #endif 19 #include "ml_include.h" 20 EXTERN_C_END 21 22 /* The context (data structure) at each grid level */ 23 typedef struct { 24 Vec x,b,r; /* global vectors */ 25 Mat A,P,R; 26 KSP ksp; 27 } GridCtx; 28 29 /* The context used to input PETSc matrix into ML at fine grid */ 30 typedef struct { 31 Mat A; /* Petsc matrix in aij format */ 32 Mat Aloc; /* local portion of A to be used by ML */ 33 Vec x,y; 34 ML_Operator *mlmat; 35 PetscScalar *pwork; /* tmp array used by PetscML_comm() */ 36 } FineGridCtx; 37 38 /* The context associates a ML matrix with a PETSc shell matrix */ 39 typedef struct { 40 Mat A; /* PETSc shell matrix associated with mlmat */ 41 ML_Operator *mlmat; /* ML matrix assorciated with A */ 42 Vec y, work; 43 } Mat_MLShell; 44 45 /* Private context for the ML preconditioner */ 46 typedef struct { 47 ML *ml_object; 48 ML_Aggregate *agg_object; 49 GridCtx *gridctx; 50 FineGridCtx *PetscMLdata; 51 PetscInt Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization; 52 PetscReal Threshold,DampingFactor,EnergyMinimizationDropTol; 53 PetscBool SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable; 54 PetscMPIInt size; /* size of communicator for pc->pmat */ 55 } PC_ML; 56 57 #undef __FUNCT__ 58 #define __FUNCT__ "PetscML_getrow" 59 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[]) 60 { 61 PetscErrorCode ierr; 62 PetscInt m,i,j,k=0,row,*aj; 63 PetscScalar *aa; 64 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data); 65 Mat_SeqAIJ *a = (Mat_SeqAIJ*)ml->Aloc->data; 66 67 68 ierr = MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0); 69 for (i = 0; i<N_requested_rows; i++) { 70 row = requested_rows[i]; 71 row_lengths[i] = a->ilen[row]; 72 if (allocated_space < k+row_lengths[i]) return(0); 73 if ( (row >= 0) || (row <= (m-1)) ) { 74 aj = a->j + a->i[row]; 75 aa = a->a + a->i[row]; 76 for (j=0; j<row_lengths[i]; j++){ 77 columns[k] = aj[j]; 78 values[k++] = aa[j]; 79 } 80 } 81 } 82 return(1); 83 } 84 85 #undef __FUNCT__ 86 #define __FUNCT__ "PetscML_comm" 87 static PetscErrorCode PetscML_comm(double p[],void *ML_data) 88 { 89 PetscErrorCode ierr; 90 FineGridCtx *ml=(FineGridCtx*)ML_data; 91 Mat A=ml->A; 92 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 93 PetscMPIInt size; 94 PetscInt i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n; 95 PetscScalar *array; 96 97 PetscFunctionBegin; 98 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 99 if (size == 1) return 0; 100 101 ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr); 102 ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 103 ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 104 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 105 ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr); 106 for (i=in_length; i<out_length; i++){ 107 p[i] = array[i-in_length]; 108 } 109 ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr); 110 PetscFunctionReturn(0); 111 } 112 113 #undef __FUNCT__ 114 #define __FUNCT__ "PetscML_matvec" 115 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[]) 116 { 117 PetscErrorCode ierr; 118 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data); 119 Mat A=ml->A, Aloc=ml->Aloc; 120 PetscMPIInt size; 121 PetscScalar *pwork=ml->pwork; 122 PetscInt i; 123 124 PetscFunctionBegin; 125 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 126 if (size == 1){ 127 ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr); 128 } else { 129 for (i=0; i<in_length; i++) pwork[i] = p[i]; 130 PetscML_comm(pwork,ml); 131 ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr); 132 } 133 ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr); 134 ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr); 135 ierr = VecResetArray(ml->x);CHKERRQ(ierr); 136 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 137 PetscFunctionReturn(0); 138 } 139 140 #undef __FUNCT__ 141 #define __FUNCT__ "MatMult_ML" 142 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y) 143 { 144 PetscErrorCode ierr; 145 Mat_MLShell *shell; 146 PetscScalar *xarray,*yarray; 147 PetscInt x_length,y_length; 148 149 PetscFunctionBegin; 150 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 151 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 152 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 153 x_length = shell->mlmat->invec_leng; 154 y_length = shell->mlmat->outvec_leng; 155 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 156 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 157 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 158 PetscFunctionReturn(0); 159 } 160 161 #undef __FUNCT__ 162 #define __FUNCT__ "MatMultAdd_ML" 163 /* Computes y = w + A * x 164 It is possible that w == y, but not x == y 165 */ 166 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y) 167 { 168 Mat_MLShell *shell; 169 PetscScalar *xarray,*yarray; 170 PetscInt x_length,y_length; 171 PetscErrorCode ierr; 172 173 PetscFunctionBegin; 174 ierr = MatShellGetContext(A, (void **) &shell);CHKERRQ(ierr); 175 if (y == w) { 176 if (!shell->work) { 177 ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr); 178 } 179 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 180 ierr = VecGetArray(shell->work, &yarray);CHKERRQ(ierr); 181 x_length = shell->mlmat->invec_leng; 182 y_length = shell->mlmat->outvec_leng; 183 ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray); 184 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 185 ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr); 186 ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr); 187 } else { 188 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 189 ierr = VecGetArray(y, &yarray);CHKERRQ(ierr); 190 x_length = shell->mlmat->invec_leng; 191 y_length = shell->mlmat->outvec_leng; 192 ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray); 193 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 194 ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr); 195 ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr); 196 } 197 PetscFunctionReturn(0); 198 } 199 200 /* newtype is ignored because "ml" is not listed under Petsc MatType */ 201 #undef __FUNCT__ 202 #define __FUNCT__ "MatConvert_MPIAIJ_ML" 203 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc) 204 { 205 PetscErrorCode ierr; 206 Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data; 207 Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data; 208 PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j; 209 PetscScalar *aa=a->a,*ba=b->a,*ca; 210 PetscInt am=A->rmap->n,an=A->cmap->n,i,j,k; 211 PetscInt *ci,*cj,ncols; 212 213 PetscFunctionBegin; 214 if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an); 215 216 if (scall == MAT_INITIAL_MATRIX){ 217 ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 218 ci[0] = 0; 219 for (i=0; i<am; i++){ 220 ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]); 221 } 222 ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr); 223 ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr); 224 225 k = 0; 226 for (i=0; i<am; i++){ 227 /* diagonal portion of A */ 228 ncols = ai[i+1] - ai[i]; 229 for (j=0; j<ncols; j++) { 230 cj[k] = *aj++; 231 ca[k++] = *aa++; 232 } 233 /* off-diagonal portion of A */ 234 ncols = bi[i+1] - bi[i]; 235 for (j=0; j<ncols; j++) { 236 cj[k] = an + (*bj); bj++; 237 ca[k++] = *ba++; 238 } 239 } 240 if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]); 241 242 /* put together the new matrix */ 243 an = mpimat->A->cmap->n+mpimat->B->cmap->n; 244 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr); 245 246 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 247 /* Since these are PETSc arrays, change flags to free them as necessary. */ 248 mat = (Mat_SeqAIJ*)(*Aloc)->data; 249 mat->free_a = PETSC_TRUE; 250 mat->free_ij = PETSC_TRUE; 251 252 mat->nonew = 0; 253 } else if (scall == MAT_REUSE_MATRIX){ 254 mat=(Mat_SeqAIJ*)(*Aloc)->data; 255 ci = mat->i; cj = mat->j; ca = mat->a; 256 for (i=0; i<am; i++) { 257 /* diagonal portion of A */ 258 ncols = ai[i+1] - ai[i]; 259 for (j=0; j<ncols; j++) *ca++ = *aa++; 260 /* off-diagonal portion of A */ 261 ncols = bi[i+1] - bi[i]; 262 for (j=0; j<ncols; j++) *ca++ = *ba++; 263 } 264 } else { 265 SETERRQ1(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 266 } 267 PetscFunctionReturn(0); 268 } 269 270 extern PetscErrorCode MatDestroy_Shell(Mat); 271 #undef __FUNCT__ 272 #define __FUNCT__ "MatDestroy_ML" 273 static PetscErrorCode MatDestroy_ML(Mat A) 274 { 275 PetscErrorCode ierr; 276 Mat_MLShell *shell; 277 278 PetscFunctionBegin; 279 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 280 ierr = VecDestroy(shell->y);CHKERRQ(ierr); 281 if (shell->work) {ierr = VecDestroy(shell->work);CHKERRQ(ierr);} 282 ierr = PetscFree(shell);CHKERRQ(ierr); 283 ierr = MatDestroy_Shell(A);CHKERRQ(ierr); 284 ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr); 285 PetscFunctionReturn(0); 286 } 287 288 #undef __FUNCT__ 289 #define __FUNCT__ "MatWrapML_SeqAIJ" 290 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 291 { 292 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 293 PetscErrorCode ierr; 294 PetscInt m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz,nz_max; 295 PetscInt *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k; 296 PetscScalar *ml_vals=matdata->values,*aa; 297 298 PetscFunctionBegin; 299 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 300 if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */ 301 if (reuse){ 302 Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data; 303 aij->i = ml_rowptr; 304 aij->j = ml_cols; 305 aij->a = ml_vals; 306 } else { 307 /* sort ml_cols and ml_vals */ 308 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 309 for (i=0; i<m; i++) { 310 nnz[i] = ml_rowptr[i+1] - ml_rowptr[i]; 311 } 312 aj = ml_cols; aa = ml_vals; 313 for (i=0; i<m; i++){ 314 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 315 aj += nnz[i]; aa += nnz[i]; 316 } 317 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr); 318 ierr = PetscFree(nnz);CHKERRQ(ierr); 319 } 320 PetscFunctionReturn(0); 321 } 322 323 /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */ 324 ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr); 325 ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 326 ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr); 327 328 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 329 nz_max = 1; 330 for (i=0; i<m; i++) { 331 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 332 if (nnz[i] > nz_max) nz_max += nnz[i]; 333 } 334 335 ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr); 336 ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr); 337 for (i=0; i<m; i++){ 338 k = 0; 339 /* diagonal entry */ 340 aj[k] = i; aa[k++] = ml_vals[i]; 341 /* off diagonal entries */ 342 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 343 aj[k] = ml_cols[j]; aa[k++] = ml_vals[j]; 344 } 345 /* sort aj and aa */ 346 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 347 ierr = MatSetValues(*newmat,1,&i,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr); 348 } 349 ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 350 ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 351 352 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 353 ierr = PetscFree(nnz);CHKERRQ(ierr); 354 PetscFunctionReturn(0); 355 } 356 357 #undef __FUNCT__ 358 #define __FUNCT__ "MatWrapML_SHELL" 359 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 360 { 361 PetscErrorCode ierr; 362 PetscInt m,n; 363 ML_Comm *MLcomm; 364 Mat_MLShell *shellctx; 365 366 PetscFunctionBegin; 367 m = mlmat->outvec_leng; 368 n = mlmat->invec_leng; 369 if (!m || !n){ 370 newmat = PETSC_NULL; 371 PetscFunctionReturn(0); 372 } 373 374 if (reuse){ 375 ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr); 376 shellctx->mlmat = mlmat; 377 PetscFunctionReturn(0); 378 } 379 380 MLcomm = mlmat->comm; 381 ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr); 382 ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr); 383 ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr); 384 ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr); 385 shellctx->A = *newmat; 386 shellctx->mlmat = mlmat; 387 shellctx->work = PETSC_NULL; 388 ierr = VecCreate(PETSC_COMM_WORLD,&shellctx->y);CHKERRQ(ierr); 389 ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr); 390 ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr); 391 (*newmat)->ops->destroy = MatDestroy_ML; 392 PetscFunctionReturn(0); 393 } 394 395 #undef __FUNCT__ 396 #define __FUNCT__ "MatWrapML_MPIAIJ" 397 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,Mat *newmat) 398 { 399 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 400 PetscInt *ml_cols=matdata->columns,*aj; 401 PetscScalar *ml_vals=matdata->values,*aa; 402 PetscErrorCode ierr; 403 PetscInt i,j,k,*gordering; 404 PetscInt m=mlmat->outvec_leng,n,*nnzA,*nnzB,*nnz,nz_max,row; 405 Mat A; 406 407 PetscFunctionBegin; 408 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 409 n = mlmat->invec_leng; 410 if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n); 411 412 ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr); 413 ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 414 ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr); 415 ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr); 416 417 nz_max = 0; 418 for (i=0; i<m; i++){ 419 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 420 if (nz_max < nnz[i]) nz_max = nnz[i]; 421 nnzA[i] = 1; /* diag */ 422 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 423 if (ml_cols[j] < m) nnzA[i]++; 424 } 425 nnzB[i] = nnz[i] - nnzA[i]; 426 } 427 ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr); 428 429 /* insert mat values -- remap row and column indices */ 430 nz_max++; 431 ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr); 432 /* create global row numbering for a ML_Operator */ 433 ML_build_global_numbering(mlmat,&gordering,"rows"); 434 for (i=0; i<m; i++){ 435 row = gordering[i]; 436 k = 0; 437 /* diagonal entry */ 438 aj[k] = row; aa[k++] = ml_vals[i]; 439 /* off diagonal entries */ 440 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 441 aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j]; 442 } 443 ierr = MatSetValues(A,1,&row,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr); 444 } 445 ML_free(gordering); 446 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 447 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 448 *newmat = A; 449 450 ierr = PetscFree3(nnzA,nnzB,nnz); 451 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 452 PetscFunctionReturn(0); 453 } 454 455 /* -----------------------------------------------------------------------------*/ 456 #undef __FUNCT__ 457 #define __FUNCT__ "PCDestroy_ML_Private" 458 PetscErrorCode PCDestroy_ML_Private(void *ptr) 459 { 460 PetscErrorCode ierr; 461 PC_ML *pc_ml = (PC_ML*)ptr; 462 PetscInt level,fine_level=pc_ml->Nlevels-1; 463 464 PetscFunctionBegin; 465 ML_Aggregate_Destroy(&pc_ml->agg_object); 466 ML_Destroy(&pc_ml->ml_object); 467 468 if (pc_ml->PetscMLdata) { 469 ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr); 470 if (pc_ml->size > 1) {ierr = MatDestroy(pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);} 471 if (pc_ml->PetscMLdata->x){ierr = VecDestroy(pc_ml->PetscMLdata->x);CHKERRQ(ierr);} 472 if (pc_ml->PetscMLdata->y){ierr = VecDestroy(pc_ml->PetscMLdata->y);CHKERRQ(ierr);} 473 } 474 ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr); 475 476 for (level=0; level<fine_level; level++){ 477 if (pc_ml->gridctx[level].A){ierr = MatDestroy(pc_ml->gridctx[level].A);CHKERRQ(ierr);} 478 if (pc_ml->gridctx[level].P){ierr = MatDestroy(pc_ml->gridctx[level].P);CHKERRQ(ierr);} 479 if (pc_ml->gridctx[level].R){ierr = MatDestroy(pc_ml->gridctx[level].R);CHKERRQ(ierr);} 480 if (pc_ml->gridctx[level].x){ierr = VecDestroy(pc_ml->gridctx[level].x);CHKERRQ(ierr);} 481 if (pc_ml->gridctx[level].b){ierr = VecDestroy(pc_ml->gridctx[level].b);CHKERRQ(ierr);} 482 if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(pc_ml->gridctx[level+1].r);CHKERRQ(ierr);} 483 } 484 ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr); 485 PetscFunctionReturn(0); 486 } 487 /* -------------------------------------------------------------------------- */ 488 /* 489 PCSetUp_ML - Prepares for the use of the ML preconditioner 490 by setting data structures and options. 491 492 Input Parameter: 493 . pc - the preconditioner context 494 495 Application Interface Routine: PCSetUp() 496 497 Notes: 498 The interface routine PCSetUp() is not usually called directly by 499 the user, but instead is called by PCApply() if necessary. 500 */ 501 extern PetscErrorCode PCSetFromOptions_MG(PC); 502 extern PetscErrorCode PCDestroy_MG_Private(PC); 503 504 #undef __FUNCT__ 505 #define __FUNCT__ "PCSetUp_ML" 506 PetscErrorCode PCSetUp_ML(PC pc) 507 { 508 PetscErrorCode ierr; 509 PetscMPIInt size; 510 FineGridCtx *PetscMLdata; 511 ML *ml_object; 512 ML_Aggregate *agg_object; 513 ML_Operator *mlmat; 514 PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs; 515 Mat A,Aloc; 516 GridCtx *gridctx; 517 PC_MG *mg = (PC_MG*)pc->data; 518 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 519 PetscBool isSeq, isMPI; 520 KSP smoother; 521 PC subpc; 522 523 PetscFunctionBegin; 524 if (pc->setupcalled){ 525 /* since ML can change the size of vectors/matrices at any level we must destroy everything */ 526 ierr = PCDestroy_ML_Private(pc_ml);CHKERRQ(ierr); 527 ierr = PCDestroy_MG_Private(pc);CHKERRQ(ierr); 528 } 529 530 /* setup special features of PCML */ 531 /*--------------------------------*/ 532 /* covert A to Aloc to be used by ML at fine grid */ 533 A = pc->pmat; 534 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 535 pc_ml->size = size; 536 ierr = PetscTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); 537 ierr = PetscTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); 538 if (isMPI){ 539 ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); 540 } else if (isSeq) { 541 Aloc = A; 542 } else SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Invalid matrix type for ML. ML can only handle AIJ matrices."); 543 544 /* create and initialize struct 'PetscMLdata' */ 545 ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr); 546 pc_ml->PetscMLdata = PetscMLdata; 547 ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr); 548 549 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr); 550 ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr); 551 ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); 552 553 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); 554 ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 555 ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr); 556 PetscMLdata->A = A; 557 PetscMLdata->Aloc = Aloc; 558 559 /* create ML discretization matrix at fine grid */ 560 /* ML requires input of fine-grid matrix. It determines nlevels. */ 561 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 562 ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); 563 ML_Create(&ml_object,pc_ml->MaxNlevels); 564 ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm); 565 pc_ml->ml_object = ml_object; 566 ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); 567 ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols); 568 ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); 569 570 ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO); 571 572 /* aggregation */ 573 ML_Aggregate_Create(&agg_object); 574 pc_ml->agg_object = agg_object; 575 576 ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr); 577 ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize); 578 /* set options */ 579 switch (pc_ml->CoarsenScheme) { 580 case 1: 581 ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break; 582 case 2: 583 ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break; 584 case 3: 585 ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break; 586 } 587 ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold); 588 ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor); 589 if (pc_ml->SpectralNormScheme_Anorm){ 590 ML_Set_SpectralNormScheme_Anorm(ml_object); 591 } 592 agg_object->keep_agg_information = (int)pc_ml->KeepAggInfo; 593 agg_object->keep_P_tentative = (int)pc_ml->Reusable; 594 agg_object->block_scaled_SA = (int)pc_ml->BlockScaling; 595 agg_object->minimizing_energy = (int)pc_ml->EnergyMinimization; 596 agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol; 597 agg_object->cheap_minimizing_energy = (int)pc_ml->EnergyMinimizationCheap; 598 599 if (pc_ml->OldHierarchy) { 600 Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 601 } else { 602 Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 603 } 604 if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels); 605 pc_ml->Nlevels = Nlevels; 606 fine_level = Nlevels - 1; 607 608 ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr); 609 /* set default smoothers */ 610 for (level=1; level<=fine_level; level++){ 611 if (size == 1){ 612 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 613 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 614 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 615 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 616 } else { 617 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 618 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 619 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 620 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 621 } 622 } 623 ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */ 624 625 ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr); 626 pc_ml->gridctx = gridctx; 627 628 /* wrap ML matrices by PETSc shell matrices at coarsened grids. 629 Level 0 is the finest grid for ML, but coarsest for PETSc! */ 630 gridctx[fine_level].A = A; 631 632 level = fine_level - 1; 633 if (size == 1){ /* convert ML P, R and A into seqaij format */ 634 for (mllevel=1; mllevel<Nlevels; mllevel++){ 635 mlmat = &(ml_object->Pmat[mllevel]); 636 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 637 mlmat = &(ml_object->Rmat[mllevel-1]); 638 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 639 640 mlmat = &(ml_object->Amat[mllevel]); 641 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 642 level--; 643 } 644 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 645 for (mllevel=1; mllevel<Nlevels; mllevel++){ 646 mlmat = &(ml_object->Pmat[mllevel]); 647 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 648 mlmat = &(ml_object->Rmat[mllevel-1]); 649 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 650 651 mlmat = &(ml_object->Amat[mllevel]); 652 ierr = MatWrapML_MPIAIJ(mlmat,&gridctx[level].A);CHKERRQ(ierr); 653 level--; 654 } 655 } 656 657 /* create vectors and ksp at all levels */ 658 for (level=0; level<fine_level; level++){ 659 level1 = level + 1; 660 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr); 661 ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr); 662 ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); 663 ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); 664 665 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr); 666 ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 667 ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); 668 ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); 669 670 ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr); 671 ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 672 ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr); 673 ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr); 674 675 if (level == 0){ 676 ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr); 677 } else { 678 ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr); 679 } 680 } 681 ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr); 682 683 /* create coarse level and the interpolation between the levels */ 684 for (level=0; level<fine_level; level++){ 685 level1 = level + 1; 686 ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr); 687 ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr); 688 if (level > 0){ 689 ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); 690 } 691 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 692 } 693 ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); 694 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 695 696 /* setupcalled is set to 0 so that MG is setup from scratch */ 697 pc->setupcalled = 0; 698 ierr = PCSetUp_MG(pc);CHKERRQ(ierr); 699 PetscFunctionReturn(0); 700 } 701 702 /* -------------------------------------------------------------------------- */ 703 /* 704 PCDestroy_ML - Destroys the private context for the ML preconditioner 705 that was created with PCCreate_ML(). 706 707 Input Parameter: 708 . pc - the preconditioner context 709 710 Application Interface Routine: PCDestroy() 711 */ 712 #undef __FUNCT__ 713 #define __FUNCT__ "PCDestroy_ML" 714 PetscErrorCode PCDestroy_ML(PC pc) 715 { 716 PetscErrorCode ierr; 717 PC_MG *mg = (PC_MG*)pc->data; 718 PC_ML *pc_ml= (PC_ML*)mg->innerctx; 719 720 PetscFunctionBegin; 721 ierr = PCDestroy_ML_Private(pc_ml);CHKERRQ(ierr); 722 ierr = PetscFree(pc_ml);CHKERRQ(ierr); 723 ierr = PCDestroy_MG(pc);CHKERRQ(ierr); 724 PetscFunctionReturn(0); 725 } 726 727 #undef __FUNCT__ 728 #define __FUNCT__ "PCSetFromOptions_ML" 729 PetscErrorCode PCSetFromOptions_ML(PC pc) 730 { 731 PetscErrorCode ierr; 732 PetscInt indx,PrintLevel; 733 const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"}; 734 PC_MG *mg = (PC_MG*)pc->data; 735 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 736 PetscMPIInt size; 737 MPI_Comm comm = ((PetscObject)pc)->comm; 738 739 PetscFunctionBegin; 740 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 741 ierr = PetscOptionsHead("ML options");CHKERRQ(ierr); 742 PrintLevel = 0; 743 indx = 0; 744 ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr); 745 ML_Set_PrintLevel(PrintLevel); 746 ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr); 747 ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr); 748 ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr); 749 pc_ml->CoarsenScheme = indx; 750 ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr); 751 ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr); 752 ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);CHKERRQ(ierr); 753 ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,PETSC_NULL);CHKERRQ(ierr); 754 ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,PETSC_NULL);CHKERRQ(ierr); 755 ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,PETSC_NULL);CHKERRQ(ierr); 756 /* 757 The following checks a number of conditions. If we let this stuff slip by, then ML's error handling will take over. 758 This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions. 759 760 We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working 761 combination of options and ML's exit(1) explanations don't help matters. 762 */ 763 if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4"); 764 if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel"); 765 if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);} 766 if (pc_ml->EnergyMinimization) { 767 ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,PETSC_NULL);CHKERRQ(ierr); 768 } 769 if (pc_ml->EnergyMinimization == 2) { 770 /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */ 771 ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,PETSC_NULL);CHKERRQ(ierr); 772 } 773 /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */ 774 if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE; 775 ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,PETSC_NULL);CHKERRQ(ierr); 776 /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */ 777 if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE; 778 ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,PETSC_NULL);CHKERRQ(ierr); 779 /* 780 ML's C API is severely underdocumented and lacks significant functionality. The C++ API calls 781 ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function 782 ML_Gen_MGHierarchy_UsingAggregation(). This modification, however, does not provide a strict superset of the 783 functionality in the old function, so some users may still want to use it. Note that many options are ignored in 784 this context, but ML doesn't provide a way to find out which ones. 785 */ 786 ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,PETSC_NULL);CHKERRQ(ierr); 787 ierr = PetscOptionsTail();CHKERRQ(ierr); 788 PetscFunctionReturn(0); 789 } 790 791 /* -------------------------------------------------------------------------- */ 792 /* 793 PCCreate_ML - Creates a ML preconditioner context, PC_ML, 794 and sets this as the private data within the generic preconditioning 795 context, PC, that was created within PCCreate(). 796 797 Input Parameter: 798 . pc - the preconditioner context 799 800 Application Interface Routine: PCCreate() 801 */ 802 803 /*MC 804 PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide 805 fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 806 operators are computed by ML, with the matrices coverted to PETSc matrices in aij format 807 and the restriction/interpolation operators wrapped as PETSc shell matrices. 808 809 Options Database Key: 810 Multigrid options(inherited) 811 + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles) 812 . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp) 813 . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown) 814 - -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade 815 816 ML options: 817 + -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel) 818 . -pc_ml_maxNlevels <10>: Maximum number of levels (None) 819 . -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize) 820 . -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS 821 . -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor) 822 . -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold) 823 - -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm) 824 825 Level: intermediate 826 827 Concepts: multigrid 828 829 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 830 PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(), 831 PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(), 832 PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(), 833 PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR() 834 M*/ 835 836 EXTERN_C_BEGIN 837 #undef __FUNCT__ 838 #define __FUNCT__ "PCCreate_ML" 839 PetscErrorCode PCCreate_ML(PC pc) 840 { 841 PetscErrorCode ierr; 842 PC_ML *pc_ml; 843 PC_MG *mg; 844 845 PetscFunctionBegin; 846 /* PCML is an inherited class of PCMG. Initialize pc as PCMG */ 847 ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr); 848 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 849 850 /* create a supporting struct and attach it to pc */ 851 ierr = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr); 852 mg = (PC_MG*)pc->data; 853 mg->innerctx = pc_ml; 854 855 pc_ml->ml_object = 0; 856 pc_ml->agg_object = 0; 857 pc_ml->gridctx = 0; 858 pc_ml->PetscMLdata = 0; 859 pc_ml->Nlevels = -1; 860 pc_ml->MaxNlevels = 10; 861 pc_ml->MaxCoarseSize = 1; 862 pc_ml->CoarsenScheme = 1; 863 pc_ml->Threshold = 0.0; 864 pc_ml->DampingFactor = 4.0/3.0; 865 pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE; 866 pc_ml->size = 0; 867 868 /* overwrite the pointers of PCMG by the functions of PCML */ 869 pc->ops->setfromoptions = PCSetFromOptions_ML; 870 pc->ops->setup = PCSetUp_ML; 871 pc->ops->destroy = PCDestroy_ML; 872 PetscFunctionReturn(0); 873 } 874 EXTERN_C_END 875