xref: /petsc/src/ksp/pc/impls/ml/ml.c (revision d736bfeb4d37a01fcbdf00fe73fb60d6f0ba2142)
1 #define PETSCKSP_DLL
2 
3 /*
4    Provides an interface to the ML smoothed Aggregation
5    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
6                                     Jed Brown, see [PETSC #18321, #18449].
7 */
8 #include "private/pcimpl.h"   /*I "petscpc.h" I*/
9 #include "../src/ksp/pc/impls/mg/mgimpl.h"                    /*I "petscmg.h" I*/
10 #include "../src/mat/impls/aij/seq/aij.h"
11 #include "../src/mat/impls/aij/mpi/mpiaij.h"
12 
13 #include <math.h>
14 EXTERN_C_BEGIN
15 /* HAVE_CONFIG_H flag is required by ML include files */
16 #if !defined(HAVE_CONFIG_H)
17 #define HAVE_CONFIG_H
18 #endif
19 #include "ml_include.h"
20 EXTERN_C_END
21 
22 /* The context (data structure) at each grid level */
23 typedef struct {
24   Vec        x,b,r;           /* global vectors */
25   Mat        A,P,R;
26   KSP        ksp;
27 } GridCtx;
28 
29 /* The context used to input PETSc matrix into ML at fine grid */
30 typedef struct {
31   Mat          A;      /* Petsc matrix in aij format */
32   Mat          Aloc;   /* local portion of A to be used by ML */
33   Vec          x,y;
34   ML_Operator  *mlmat;
35   PetscScalar  *pwork; /* tmp array used by PetscML_comm() */
36 } FineGridCtx;
37 
38 /* The context associates a ML matrix with a PETSc shell matrix */
39 typedef struct {
40   Mat          A;       /* PETSc shell matrix associated with mlmat */
41   ML_Operator  *mlmat;  /* ML matrix assorciated with A */
42   Vec          y;
43 } Mat_MLShell;
44 
45 /* Private context for the ML preconditioner */
46 typedef struct {
47   ML             *ml_object;
48   ML_Aggregate   *agg_object;
49   GridCtx        *gridctx;
50   FineGridCtx    *PetscMLdata;
51   PetscInt       Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization;
52   PetscReal      Threshold,DampingFactor,EnergyMinimizationDropTol;
53   PetscTruth     SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable;
54   PetscMPIInt    size; /* size of communicator for pc->pmat */
55 } PC_ML;
56 
57 #undef __FUNCT__
58 #define __FUNCT__ "PetscML_getrow"
59 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
60 {
61   PetscErrorCode ierr;
62   PetscInt       m,i,j,k=0,row,*aj;
63   PetscScalar    *aa;
64   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
65   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;
66 
67 
68   ierr = MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0);
69   for (i = 0; i<N_requested_rows; i++) {
70     row   = requested_rows[i];
71     row_lengths[i] = a->ilen[row];
72     if (allocated_space < k+row_lengths[i]) return(0);
73     if ( (row >= 0) || (row <= (m-1)) ) {
74       aj = a->j + a->i[row];
75       aa = a->a + a->i[row];
76       for (j=0; j<row_lengths[i]; j++){
77         columns[k]  = aj[j];
78         values[k++] = aa[j];
79       }
80     }
81   }
82   return(1);
83 }
84 
85 #undef __FUNCT__
86 #define __FUNCT__ "PetscML_comm"
87 static PetscErrorCode PetscML_comm(double p[],void *ML_data)
88 {
89   PetscErrorCode ierr;
90   FineGridCtx    *ml=(FineGridCtx*)ML_data;
91   Mat            A=ml->A;
92   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
93   PetscMPIInt    size;
94   PetscInt       i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
95   PetscScalar    *array;
96 
97   PetscFunctionBegin;
98   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
99   if (size == 1) return 0;
100 
101   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
102   ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
103   ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
104   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
105   ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr);
106   for (i=in_length; i<out_length; i++){
107     p[i] = array[i-in_length];
108   }
109   ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr);
110   PetscFunctionReturn(0);
111 }
112 
113 #undef __FUNCT__
114 #define __FUNCT__ "PetscML_matvec"
115 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
116 {
117   PetscErrorCode ierr;
118   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data);
119   Mat            A=ml->A, Aloc=ml->Aloc;
120   PetscMPIInt    size;
121   PetscScalar    *pwork=ml->pwork;
122   PetscInt       i;
123 
124   PetscFunctionBegin;
125   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
126   if (size == 1){
127     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
128   } else {
129     for (i=0; i<in_length; i++) pwork[i] = p[i];
130     PetscML_comm(pwork,ml);
131     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
132   }
133   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
134   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
135   ierr = VecResetArray(ml->x);CHKERRQ(ierr);
136   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
137   PetscFunctionReturn(0);
138 }
139 
140 #undef __FUNCT__
141 #define __FUNCT__ "MatMult_ML"
142 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
143 {
144   PetscErrorCode   ierr;
145   Mat_MLShell      *shell;
146   PetscScalar      *xarray,*yarray;
147   PetscInt         x_length,y_length;
148 
149   PetscFunctionBegin;
150   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
151   ierr = VecGetArray(x,&xarray);CHKERRQ(ierr);
152   ierr = VecGetArray(y,&yarray);CHKERRQ(ierr);
153   x_length = shell->mlmat->invec_leng;
154   y_length = shell->mlmat->outvec_leng;
155   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
156   ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
157   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
158   PetscFunctionReturn(0);
159 }
160 
161 #undef __FUNCT__
162 #define __FUNCT__ "MatMultAdd_ML"
163 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
164 {
165   PetscErrorCode    ierr;
166   Mat_MLShell       *shell;
167   PetscScalar       *xarray,*yarray;
168   PetscInt          x_length,y_length;
169 
170   PetscFunctionBegin;
171   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
172   ierr = VecGetArray(x,&xarray);CHKERRQ(ierr);
173   ierr = VecGetArray(y,&yarray);CHKERRQ(ierr);
174   x_length = shell->mlmat->invec_leng;
175   y_length = shell->mlmat->outvec_leng;
176   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
177   ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
178   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
179   ierr = VecAXPY(y,1.0,w);CHKERRQ(ierr);
180   PetscFunctionReturn(0);
181 }
182 
183 /* newtype is ignored because "ml" is not listed under Petsc MatType */
184 #undef __FUNCT__
185 #define __FUNCT__ "MatConvert_MPIAIJ_ML"
186 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
187 {
188   PetscErrorCode  ierr;
189   Mat_MPIAIJ      *mpimat=(Mat_MPIAIJ*)A->data;
190   Mat_SeqAIJ      *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
191   PetscInt        *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
192   PetscScalar     *aa=a->a,*ba=b->a,*ca;
193   PetscInt        am=A->rmap->n,an=A->cmap->n,i,j,k;
194   PetscInt        *ci,*cj,ncols;
195 
196   PetscFunctionBegin;
197   if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
198 
199   if (scall == MAT_INITIAL_MATRIX){
200     ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr);
201     ci[0] = 0;
202     for (i=0; i<am; i++){
203       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
204     }
205     ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr);
206     ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr);
207 
208     k = 0;
209     for (i=0; i<am; i++){
210       /* diagonal portion of A */
211       ncols = ai[i+1] - ai[i];
212       for (j=0; j<ncols; j++) {
213         cj[k]   = *aj++;
214         ca[k++] = *aa++;
215       }
216       /* off-diagonal portion of A */
217       ncols = bi[i+1] - bi[i];
218       for (j=0; j<ncols; j++) {
219         cj[k]   = an + (*bj); bj++;
220         ca[k++] = *ba++;
221       }
222     }
223     if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
224 
225     /* put together the new matrix */
226     an = mpimat->A->cmap->n+mpimat->B->cmap->n;
227     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
228 
229     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
230     /* Since these are PETSc arrays, change flags to free them as necessary. */
231     mat = (Mat_SeqAIJ*)(*Aloc)->data;
232     mat->free_a       = PETSC_TRUE;
233     mat->free_ij      = PETSC_TRUE;
234 
235     mat->nonew    = 0;
236   } else if (scall == MAT_REUSE_MATRIX){
237     mat=(Mat_SeqAIJ*)(*Aloc)->data;
238     ci = mat->i; cj = mat->j; ca = mat->a;
239     for (i=0; i<am; i++) {
240       /* diagonal portion of A */
241       ncols = ai[i+1] - ai[i];
242       for (j=0; j<ncols; j++) *ca++ = *aa++;
243       /* off-diagonal portion of A */
244       ncols = bi[i+1] - bi[i];
245       for (j=0; j<ncols; j++) *ca++ = *ba++;
246     }
247   } else {
248     SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
249   }
250   PetscFunctionReturn(0);
251 }
252 
253 extern PetscErrorCode MatDestroy_Shell(Mat);
254 #undef __FUNCT__
255 #define __FUNCT__ "MatDestroy_ML"
256 static PetscErrorCode MatDestroy_ML(Mat A)
257 {
258   PetscErrorCode ierr;
259   Mat_MLShell    *shell;
260 
261   PetscFunctionBegin;
262   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
263   ierr = VecDestroy(shell->y);CHKERRQ(ierr);
264   ierr = PetscFree(shell);CHKERRQ(ierr);
265   ierr = MatDestroy_Shell(A);CHKERRQ(ierr);
266   ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr);
267   PetscFunctionReturn(0);
268 }
269 
270 #undef __FUNCT__
271 #define __FUNCT__ "MatWrapML_SeqAIJ"
272 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
273 {
274   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
275   PetscErrorCode        ierr;
276   PetscInt              m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz,nz_max;
277   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k;
278   PetscScalar           *ml_vals=matdata->values,*aa;
279 
280   PetscFunctionBegin;
281   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
282   if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
283     if (reuse){
284       Mat_SeqAIJ  *aij= (Mat_SeqAIJ*)(*newmat)->data;
285       aij->i = ml_rowptr;
286       aij->j = ml_cols;
287       aij->a = ml_vals;
288     } else {
289       /* sort ml_cols and ml_vals */
290       ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
291       for (i=0; i<m; i++) {
292         nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
293       }
294       aj = ml_cols; aa = ml_vals;
295       for (i=0; i<m; i++){
296         ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
297         aj += nnz[i]; aa += nnz[i];
298       }
299       ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
300       ierr = PetscFree(nnz);CHKERRQ(ierr);
301     }
302     PetscFunctionReturn(0);
303   }
304 
305   /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */
306   ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr);
307   ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
308   ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
309 
310   ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
311   nz_max = 1;
312   for (i=0; i<m; i++) {
313     nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
314     if (nnz[i] > nz_max) nz_max += nnz[i];
315   }
316 
317   ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
318   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
319   for (i=0; i<m; i++){
320     k = 0;
321     /* diagonal entry */
322     aj[k] = i; aa[k++] = ml_vals[i];
323     /* off diagonal entries */
324     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
325       aj[k] = ml_cols[j]; aa[k++] = ml_vals[j];
326     }
327     /* sort aj and aa */
328     ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
329     ierr = MatSetValues(*newmat,1,&i,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr);
330   }
331   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
332   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
333 
334   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
335   ierr = PetscFree(nnz);CHKERRQ(ierr);
336   PetscFunctionReturn(0);
337 }
338 
339 #undef __FUNCT__
340 #define __FUNCT__ "MatWrapML_SHELL"
341 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
342 {
343   PetscErrorCode ierr;
344   PetscInt       m,n;
345   ML_Comm        *MLcomm;
346   Mat_MLShell    *shellctx;
347 
348   PetscFunctionBegin;
349   m = mlmat->outvec_leng;
350   n = mlmat->invec_leng;
351   if (!m || !n){
352     newmat = PETSC_NULL;
353     PetscFunctionReturn(0);
354   }
355 
356   if (reuse){
357     ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr);
358     shellctx->mlmat = mlmat;
359     PetscFunctionReturn(0);
360   }
361 
362   MLcomm = mlmat->comm;
363   ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr);
364   ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
365   ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr);
366   ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr);
367   shellctx->A         = *newmat;
368   shellctx->mlmat     = mlmat;
369   ierr = VecCreate(PETSC_COMM_WORLD,&shellctx->y);CHKERRQ(ierr);
370   ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr);
371   ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr);
372   (*newmat)->ops->destroy = MatDestroy_ML;
373   PetscFunctionReturn(0);
374 }
375 
376 #undef __FUNCT__
377 #define __FUNCT__ "MatWrapML_MPIAIJ"
378 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,Mat *newmat)
379 {
380   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
381   PetscInt              *ml_cols=matdata->columns,*aj;
382   PetscScalar           *ml_vals=matdata->values,*aa;
383   PetscErrorCode        ierr;
384   PetscInt              i,j,k,*gordering;
385   PetscInt              m=mlmat->outvec_leng,n,*nnzA,*nnzB,*nnz,nz_max,row;
386   Mat                   A;
387 
388   PetscFunctionBegin;
389   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
390   n = mlmat->invec_leng;
391   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
392 
393   ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr);
394   ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
395   ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
396   ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr);
397 
398   nz_max = 0;
399   for (i=0; i<m; i++){
400     nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
401     if (nz_max < nnz[i]) nz_max = nnz[i];
402     nnzA[i] = 1; /* diag */
403     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
404       if (ml_cols[j] < m) nnzA[i]++;
405     }
406     nnzB[i] = nnz[i] - nnzA[i];
407   }
408   ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
409 
410   /* insert mat values -- remap row and column indices */
411   nz_max++;
412   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
413   /* create global row numbering for a ML_Operator */
414   ML_build_global_numbering(mlmat,&gordering,"rows");
415   for (i=0; i<m; i++){
416     row = gordering[i];
417     k = 0;
418     /* diagonal entry */
419     aj[k] = row; aa[k++] = ml_vals[i];
420     /* off diagonal entries */
421     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
422       aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j];
423     }
424     ierr = MatSetValues(A,1,&row,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr);
425   }
426   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
427   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
428   *newmat = A;
429 
430   ierr = PetscFree3(nnzA,nnzB,nnz);
431   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
432   PetscFunctionReturn(0);
433 }
434 
435 /* -----------------------------------------------------------------------------*/
436 #undef __FUNCT__
437 #define __FUNCT__ "PCDestroy_ML_Private"
438 PetscErrorCode PCDestroy_ML_Private(void *ptr)
439 {
440   PetscErrorCode  ierr;
441   PC_ML           *pc_ml = (PC_ML*)ptr;
442   PetscInt        level,fine_level=pc_ml->Nlevels-1;
443 
444   PetscFunctionBegin;
445   ML_Aggregate_Destroy(&pc_ml->agg_object);
446   ML_Destroy(&pc_ml->ml_object);
447 
448   if (pc_ml->PetscMLdata) {
449     ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
450     if (pc_ml->size > 1)      {ierr = MatDestroy(pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);}
451     if (pc_ml->PetscMLdata->x){ierr = VecDestroy(pc_ml->PetscMLdata->x);CHKERRQ(ierr);}
452     if (pc_ml->PetscMLdata->y){ierr = VecDestroy(pc_ml->PetscMLdata->y);CHKERRQ(ierr);}
453   }
454   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
455 
456   for (level=0; level<fine_level; level++){
457     if (pc_ml->gridctx[level].A){ierr = MatDestroy(pc_ml->gridctx[level].A);CHKERRQ(ierr);}
458     if (pc_ml->gridctx[level].P){ierr = MatDestroy(pc_ml->gridctx[level].P);CHKERRQ(ierr);}
459     if (pc_ml->gridctx[level].R){ierr = MatDestroy(pc_ml->gridctx[level].R);CHKERRQ(ierr);}
460     if (pc_ml->gridctx[level].x){ierr = VecDestroy(pc_ml->gridctx[level].x);CHKERRQ(ierr);}
461     if (pc_ml->gridctx[level].b){ierr = VecDestroy(pc_ml->gridctx[level].b);CHKERRQ(ierr);}
462     if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
463   }
464   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
465   PetscFunctionReturn(0);
466 }
467 /* -------------------------------------------------------------------------- */
468 /*
469    PCSetUp_ML - Prepares for the use of the ML preconditioner
470                     by setting data structures and options.
471 
472    Input Parameter:
473 .  pc - the preconditioner context
474 
475    Application Interface Routine: PCSetUp()
476 
477    Notes:
478    The interface routine PCSetUp() is not usually called directly by
479    the user, but instead is called by PCApply() if necessary.
480 */
481 extern PetscErrorCode PCSetFromOptions_MG(PC);
482 extern PetscErrorCode PCDestroy_MG_Private(PC);
483 
484 #undef __FUNCT__
485 #define __FUNCT__ "PCSetUp_ML"
486 PetscErrorCode PCSetUp_ML(PC pc)
487 {
488   PetscErrorCode  ierr;
489   PetscMPIInt     size;
490   FineGridCtx     *PetscMLdata;
491   ML              *ml_object;
492   ML_Aggregate    *agg_object;
493   ML_Operator     *mlmat;
494   PetscInt        nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
495   Mat             A,Aloc;
496   GridCtx         *gridctx;
497   PC_MG           *mg = (PC_MG*)pc->data;
498   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
499   PetscTruth      isSeq, isMPI;
500   KSP             smoother;
501   PC              subpc;
502 
503   PetscFunctionBegin;
504   if (pc->setupcalled){
505     /* since ML can change the size of vectors/matrices at any level we must destroy everything */
506     ierr = PCDestroy_ML_Private(pc_ml);CHKERRQ(ierr);
507     ierr = PCDestroy_MG_Private(pc);CHKERRQ(ierr);
508   }
509 
510   /* setup special features of PCML */
511   /*--------------------------------*/
512   /* covert A to Aloc to be used by ML at fine grid */
513   A = pc->pmat;
514   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
515   pc_ml->size = size;
516   ierr = PetscTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
517   ierr = PetscTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
518   if (isMPI){
519     ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
520   } else if (isSeq) {
521     Aloc = A;
522   } else SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Invalid matrix type for ML. ML can only handle AIJ matrices.");
523 
524   /* create and initialize struct 'PetscMLdata' */
525   ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr);
526   pc_ml->PetscMLdata = PetscMLdata;
527   ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr);
528 
529   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);
530   ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr);
531   ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr);
532 
533   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr);
534   ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
535   ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
536   PetscMLdata->A    = A;
537   PetscMLdata->Aloc = Aloc;
538 
539   /* create ML discretization matrix at fine grid */
540   /* ML requires input of fine-grid matrix. It determines nlevels. */
541   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
542   ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
543   ML_Create(&ml_object,pc_ml->MaxNlevels);
544   ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm);
545   pc_ml->ml_object = ml_object;
546   ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
547   ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols);
548   ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
549 
550   ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO);
551 
552   /* aggregation */
553   ML_Aggregate_Create(&agg_object);
554   pc_ml->agg_object = agg_object;
555 
556   ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr);
557   ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
558   /* set options */
559   switch (pc_ml->CoarsenScheme) {
560   case 1:
561     ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
562   case 2:
563     ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
564   case 3:
565     ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
566   }
567   ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold);
568   ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor);
569   if (pc_ml->SpectralNormScheme_Anorm){
570     ML_Set_SpectralNormScheme_Anorm(ml_object);
571   }
572   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
573   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
574   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
575   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
576   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
577   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;
578 
579   if (pc_ml->OldHierarchy) {
580     Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
581   } else {
582     Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
583   }
584   if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
585   pc_ml->Nlevels = Nlevels;
586   fine_level = Nlevels - 1;
587 
588   ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr);
589   /* set default smoothers */
590   for (level=1; level<=fine_level; level++){
591     if (size == 1){
592       ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
593       ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
594       ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
595       ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
596     } else {
597       ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
598       ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
599       ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
600       ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
601     }
602   }
603   ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
604 
605   ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr);
606   pc_ml->gridctx = gridctx;
607 
608   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
609      Level 0 is the finest grid for ML, but coarsest for PETSc! */
610   gridctx[fine_level].A = A;
611 
612   level = fine_level - 1;
613   if (size == 1){ /* convert ML P, R and A into seqaij format */
614     for (mllevel=1; mllevel<Nlevels; mllevel++){
615       mlmat = &(ml_object->Pmat[mllevel]);
616       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
617       mlmat = &(ml_object->Rmat[mllevel-1]);
618       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
619 
620       mlmat = &(ml_object->Amat[mllevel]);
621       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
622       level--;
623     }
624   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
625     for (mllevel=1; mllevel<Nlevels; mllevel++){
626       mlmat  = &(ml_object->Pmat[mllevel]);
627       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
628       mlmat  = &(ml_object->Rmat[mllevel-1]);
629       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
630 
631       mlmat  = &(ml_object->Amat[mllevel]);
632       ierr = MatWrapML_MPIAIJ(mlmat,&gridctx[level].A);CHKERRQ(ierr);
633       level--;
634     }
635   }
636 
637   /* create vectors and ksp at all levels */
638   for (level=0; level<fine_level; level++){
639     level1 = level + 1;
640     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr);
641     ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr);
642     ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr);
643     ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
644 
645     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr);
646     ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
647     ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr);
648     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
649 
650     ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr);
651     ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
652     ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr);
653     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
654 
655     if (level == 0){
656       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
657     } else {
658       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
659     }
660   }
661   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
662 
663   /* create coarse level and the interpolation between the levels */
664   for (level=0; level<fine_level; level++){
665     level1 = level + 1;
666     ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
667     ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
668     if (level > 0){
669       ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
670     }
671     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
672   }
673   ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
674   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
675 
676   /* setupcalled is set to 0 so that MG is setup from scratch */
677   pc->setupcalled = 0;
678   ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
679   PetscFunctionReturn(0);
680 }
681 
682 /* -------------------------------------------------------------------------- */
683 /*
684    PCDestroy_ML - Destroys the private context for the ML preconditioner
685    that was created with PCCreate_ML().
686 
687    Input Parameter:
688 .  pc - the preconditioner context
689 
690    Application Interface Routine: PCDestroy()
691 */
692 #undef __FUNCT__
693 #define __FUNCT__ "PCDestroy_ML"
694 PetscErrorCode PCDestroy_ML(PC pc)
695 {
696   PetscErrorCode  ierr;
697   PC_MG           *mg = (PC_MG*)pc->data;
698   PC_ML           *pc_ml= (PC_ML*)mg->innerctx;
699 
700   PetscFunctionBegin;
701   ierr = PCDestroy_ML_Private(pc_ml);CHKERRQ(ierr);
702   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
703   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
704   PetscFunctionReturn(0);
705 }
706 
707 #undef __FUNCT__
708 #define __FUNCT__ "PCSetFromOptions_ML"
709 PetscErrorCode PCSetFromOptions_ML(PC pc)
710 {
711   PetscErrorCode  ierr;
712   PetscInt        indx,PrintLevel;
713   const char      *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
714   PC_MG           *mg = (PC_MG*)pc->data;
715   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
716   PetscMPIInt     size;
717   MPI_Comm        comm = ((PetscObject)pc)->comm;
718 
719   PetscFunctionBegin;
720   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
721   ierr = PetscOptionsHead("ML options");CHKERRQ(ierr);
722   PrintLevel    = 0;
723   indx          = 0;
724   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr);
725   ML_Set_PrintLevel(PrintLevel);
726   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr);
727   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr);
728   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr);
729   pc_ml->CoarsenScheme = indx;
730   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr);
731   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr);
732   ierr = PetscOptionsTruth("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);CHKERRQ(ierr);
733   ierr = PetscOptionsTruth("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,PETSC_NULL);CHKERRQ(ierr);
734   ierr = PetscOptionsTruth("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,PETSC_NULL);CHKERRQ(ierr);
735   ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,PETSC_NULL);CHKERRQ(ierr);
736   /*
737     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
738     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.
739 
740     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
741     combination of options and ML's exit(1) explanations don't help matters.
742   */
743   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
744   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
745   if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);}
746   if (pc_ml->EnergyMinimization) {
747     ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,PETSC_NULL);CHKERRQ(ierr);
748   }
749   if (pc_ml->EnergyMinimization == 2) {
750     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
751     ierr = PetscOptionsTruth("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,PETSC_NULL);CHKERRQ(ierr);
752   }
753   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
754   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
755   ierr = PetscOptionsTruth("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,PETSC_NULL);CHKERRQ(ierr);
756   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
757   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
758   ierr = PetscOptionsTruth("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,PETSC_NULL);CHKERRQ(ierr);
759   /*
760     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
761     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
762     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
763     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
764     this context, but ML doesn't provide a way to find out which ones.
765    */
766   ierr = PetscOptionsTruth("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,PETSC_NULL);CHKERRQ(ierr);
767   ierr = PetscOptionsTail();CHKERRQ(ierr);
768   PetscFunctionReturn(0);
769 }
770 
771 /* -------------------------------------------------------------------------- */
772 /*
773    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
774    and sets this as the private data within the generic preconditioning
775    context, PC, that was created within PCCreate().
776 
777    Input Parameter:
778 .  pc - the preconditioner context
779 
780    Application Interface Routine: PCCreate()
781 */
782 
783 /*MC
784      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
785        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
786        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
787        and the restriction/interpolation operators wrapped as PETSc shell matrices.
788 
789    Options Database Key:
790    Multigrid options(inherited)
791 +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
792 .  -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
793 .  -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
794 -  -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade
795 
796    ML options:
797 +  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
798 .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
799 .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
800 .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
801 .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
802 .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
803 -  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
804 
805    Level: intermediate
806 
807   Concepts: multigrid
808 
809 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
810            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
811            PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
812            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
813            PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
814 M*/
815 
816 EXTERN_C_BEGIN
817 #undef __FUNCT__
818 #define __FUNCT__ "PCCreate_ML"
819 PetscErrorCode PETSCKSP_DLLEXPORT PCCreate_ML(PC pc)
820 {
821   PetscErrorCode  ierr;
822   PC_ML           *pc_ml;
823   PC_MG           *mg;
824 
825   PetscFunctionBegin;
826   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
827   ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr);
828   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
829 
830   /* create a supporting struct and attach it to pc */
831   ierr = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr);
832   mg = (PC_MG*)pc->data;
833   mg->innerctx = pc_ml;
834 
835   pc_ml->ml_object     = 0;
836   pc_ml->agg_object    = 0;
837   pc_ml->gridctx       = 0;
838   pc_ml->PetscMLdata   = 0;
839   pc_ml->Nlevels       = -1;
840   pc_ml->MaxNlevels    = 10;
841   pc_ml->MaxCoarseSize = 1;
842   pc_ml->CoarsenScheme = 1;
843   pc_ml->Threshold     = 0.0;
844   pc_ml->DampingFactor = 4.0/3.0;
845   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
846   pc_ml->size          = 0;
847 
848   /* overwrite the pointers of PCMG by the functions of PCML */
849   pc->ops->setfromoptions = PCSetFromOptions_ML;
850   pc->ops->setup          = PCSetUp_ML;
851   pc->ops->destroy        = PCDestroy_ML;
852   PetscFunctionReturn(0);
853 }
854 EXTERN_C_END
855