xref: /petsc/src/ksp/pc/impls/ml/ml.c (revision f22f69f01ec1ca0bb29797ff3752b65f1e1c47db)
1 #define PETSCKSP_DLL
2 
3 /*
4    Provides an interface to the ML smoothed Aggregation
5    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
6                                     Jed Brown, see [PETSC #18321, #18449].
7 */
8 #include "private/pcimpl.h"   /*I "petscpc.h" I*/
9 #include "../src/ksp/pc/impls/mg/mgimpl.h"                    /*I "petscmg.h" I*/
10 #include "../src/mat/impls/aij/seq/aij.h"
11 #include "../src/mat/impls/aij/mpi/mpiaij.h"
12 
13 #include <math.h>
14 EXTERN_C_BEGIN
15 /* HAVE_CONFIG_H flag is required by ML include files */
16 #if !defined(HAVE_CONFIG_H)
17 #define HAVE_CONFIG_H
18 #endif
19 #include "ml_include.h"
20 EXTERN_C_END
21 
22 /* The context (data structure) at each grid level */
23 typedef struct {
24   Vec        x,b,r;           /* global vectors */
25   Mat        A,P,R;
26   KSP        ksp;
27 } GridCtx;
28 
29 /* The context used to input PETSc matrix into ML at fine grid */
30 typedef struct {
31   Mat          A;      /* Petsc matrix in aij format */
32   Mat          Aloc;   /* local portion of A to be used by ML */
33   Vec          x,y;
34   ML_Operator  *mlmat;
35   PetscScalar  *pwork; /* tmp array used by PetscML_comm() */
36 } FineGridCtx;
37 
38 /* The context associates a ML matrix with a PETSc shell matrix */
39 typedef struct {
40   Mat          A;       /* PETSc shell matrix associated with mlmat */
41   ML_Operator  *mlmat;  /* ML matrix assorciated with A */
42   Vec          y;
43 } Mat_MLShell;
44 
45 /* Private context for the ML preconditioner */
46 typedef struct {
47   ML             *ml_object;
48   ML_Aggregate   *agg_object;
49   GridCtx        *gridctx;
50   FineGridCtx    *PetscMLdata;
51   PetscInt       Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization;
52   PetscReal      Threshold,DampingFactor,EnergyMinimizationDropTol;
53   PetscTruth     SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable;
54   PetscMPIInt    size; /* size of communicator for pc->pmat */
55 } PC_ML;
56 
57 #undef __FUNCT__
58 #define __FUNCT__ "PetscML_getrow"
59 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
60 {
61   PetscErrorCode ierr;
62   PetscInt       m,i,j,k=0,row,*aj;
63   PetscScalar    *aa;
64   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
65   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;
66 
67 
68   ierr = MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0);
69   for (i = 0; i<N_requested_rows; i++) {
70     row   = requested_rows[i];
71     row_lengths[i] = a->ilen[row];
72     if (allocated_space < k+row_lengths[i]) return(0);
73     if ( (row >= 0) || (row <= (m-1)) ) {
74       aj = a->j + a->i[row];
75       aa = a->a + a->i[row];
76       for (j=0; j<row_lengths[i]; j++){
77         columns[k]  = aj[j];
78         values[k++] = aa[j];
79       }
80     }
81   }
82   return(1);
83 }
84 
85 #undef __FUNCT__
86 #define __FUNCT__ "PetscML_comm"
87 static PetscErrorCode PetscML_comm(double p[],void *ML_data)
88 {
89   PetscErrorCode ierr;
90   FineGridCtx    *ml=(FineGridCtx*)ML_data;
91   Mat            A=ml->A;
92   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
93   PetscMPIInt    size;
94   PetscInt       i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
95   PetscScalar    *array;
96 
97   PetscFunctionBegin;
98   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
99   if (size == 1) return 0;
100 
101   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
102   ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
103   ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
104   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
105   ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr);
106   for (i=in_length; i<out_length; i++){
107     p[i] = array[i-in_length];
108   }
109   ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr);
110   PetscFunctionReturn(0);
111 }
112 
113 #undef __FUNCT__
114 #define __FUNCT__ "PetscML_matvec"
115 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
116 {
117   PetscErrorCode ierr;
118   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data);
119   Mat            A=ml->A, Aloc=ml->Aloc;
120   PetscMPIInt    size;
121   PetscScalar    *pwork=ml->pwork;
122   PetscInt       i;
123 
124   PetscFunctionBegin;
125   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
126   if (size == 1){
127     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
128   } else {
129     for (i=0; i<in_length; i++) pwork[i] = p[i];
130     PetscML_comm(pwork,ml);
131     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
132   }
133   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
134   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
135   ierr = VecResetArray(ml->x);CHKERRQ(ierr);
136   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
137   PetscFunctionReturn(0);
138 }
139 
140 #undef __FUNCT__
141 #define __FUNCT__ "MatMult_ML"
142 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
143 {
144   PetscErrorCode   ierr;
145   Mat_MLShell      *shell;
146   PetscScalar      *xarray,*yarray;
147   PetscInt         x_length,y_length;
148 
149   PetscFunctionBegin;
150   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
151   ierr = VecGetArray(x,&xarray);CHKERRQ(ierr);
152   ierr = VecGetArray(y,&yarray);CHKERRQ(ierr);
153   x_length = shell->mlmat->invec_leng;
154   y_length = shell->mlmat->outvec_leng;
155   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
156   ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
157   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
158   PetscFunctionReturn(0);
159 }
160 
161 #undef __FUNCT__
162 #define __FUNCT__ "MatMultAdd_ML"
163 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
164 {
165   PetscErrorCode    ierr;
166   Mat_MLShell       *shell;
167   PetscScalar       *xarray,*yarray;
168   PetscInt          x_length,y_length;
169 
170   PetscFunctionBegin;
171   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
172   ierr = VecGetArray(x,&xarray);CHKERRQ(ierr);
173   ierr = VecGetArray(y,&yarray);CHKERRQ(ierr);
174   x_length = shell->mlmat->invec_leng;
175   y_length = shell->mlmat->outvec_leng;
176   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
177   ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
178   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
179   ierr = VecAXPY(y,1.0,w);CHKERRQ(ierr);
180   PetscFunctionReturn(0);
181 }
182 
183 /* newtype is ignored because "ml" is not listed under Petsc MatType */
184 #undef __FUNCT__
185 #define __FUNCT__ "MatConvert_MPIAIJ_ML"
186 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
187 {
188   PetscErrorCode  ierr;
189   Mat_MPIAIJ      *mpimat=(Mat_MPIAIJ*)A->data;
190   Mat_SeqAIJ      *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
191   PetscInt        *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
192   PetscScalar     *aa=a->a,*ba=b->a,*ca;
193   PetscInt        am=A->rmap->n,an=A->cmap->n,i,j,k;
194   PetscInt        *ci,*cj,ncols;
195 
196   PetscFunctionBegin;
197   if (am != an) SETERRQ2(PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
198 
199   if (scall == MAT_INITIAL_MATRIX){
200     ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr);
201     ci[0] = 0;
202     for (i=0; i<am; i++){
203       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
204     }
205     ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr);
206     ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr);
207 
208     k = 0;
209     for (i=0; i<am; i++){
210       /* diagonal portion of A */
211       ncols = ai[i+1] - ai[i];
212       for (j=0; j<ncols; j++) {
213         cj[k]   = *aj++;
214         ca[k++] = *aa++;
215       }
216       /* off-diagonal portion of A */
217       ncols = bi[i+1] - bi[i];
218       for (j=0; j<ncols; j++) {
219         cj[k]   = an + (*bj); bj++;
220         ca[k++] = *ba++;
221       }
222     }
223     if (k != ci[am]) SETERRQ2(PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
224 
225     /* put together the new matrix */
226     an = mpimat->A->cmap->n+mpimat->B->cmap->n;
227     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
228 
229     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
230     /* Since these are PETSc arrays, change flags to free them as necessary. */
231     mat = (Mat_SeqAIJ*)(*Aloc)->data;
232     mat->free_a       = PETSC_TRUE;
233     mat->free_ij      = PETSC_TRUE;
234 
235     mat->nonew    = 0;
236   } else if (scall == MAT_REUSE_MATRIX){
237     mat=(Mat_SeqAIJ*)(*Aloc)->data;
238     ci = mat->i; cj = mat->j; ca = mat->a;
239     for (i=0; i<am; i++) {
240       /* diagonal portion of A */
241       ncols = ai[i+1] - ai[i];
242       for (j=0; j<ncols; j++) *ca++ = *aa++;
243       /* off-diagonal portion of A */
244       ncols = bi[i+1] - bi[i];
245       for (j=0; j<ncols; j++) *ca++ = *ba++;
246     }
247   } else {
248     SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
249   }
250   PetscFunctionReturn(0);
251 }
252 
253 extern PetscErrorCode MatDestroy_Shell(Mat);
254 #undef __FUNCT__
255 #define __FUNCT__ "MatDestroy_ML"
256 static PetscErrorCode MatDestroy_ML(Mat A)
257 {
258   PetscErrorCode ierr;
259   Mat_MLShell    *shell;
260 
261   PetscFunctionBegin;
262   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
263   ierr = VecDestroy(shell->y);CHKERRQ(ierr);
264   ierr = PetscFree(shell);CHKERRQ(ierr);
265   ierr = MatDestroy_Shell(A);CHKERRQ(ierr);
266   ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr);
267   PetscFunctionReturn(0);
268 }
269 
270 #undef __FUNCT__
271 #define __FUNCT__ "MatWrapML_SeqAIJ"
272 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
273 {
274   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
275   PetscErrorCode        ierr;
276   PetscInt              m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz,nz_max;
277   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k;
278   PetscScalar           *ml_vals=matdata->values,*aa;
279 
280   PetscFunctionBegin;
281   if ( mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
282   if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
283     if (reuse){
284       Mat_SeqAIJ  *aij= (Mat_SeqAIJ*)(*newmat)->data;
285       aij->i = ml_rowptr;
286       aij->j = ml_cols;
287       aij->a = ml_vals;
288     } else {
289       /* sort ml_cols and ml_vals */
290       ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
291       for (i=0; i<m; i++) {
292         nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
293       }
294       aj = ml_cols; aa = ml_vals;
295       for (i=0; i<m; i++){
296         ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
297         aj += nnz[i]; aa += nnz[i];
298       }
299       ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
300       ierr = PetscFree(nnz);CHKERRQ(ierr);
301     }
302     PetscFunctionReturn(0);
303   }
304 
305   /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */
306   ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr);
307   ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
308   ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
309 
310   ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
311   nz_max = 1;
312   for (i=0; i<m; i++) {
313     nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
314     if (nnz[i] > nz_max) nz_max += nnz[i];
315   }
316 
317   ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
318   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
319   for (i=0; i<m; i++){
320     k = 0;
321     /* diagonal entry */
322     aj[k] = i; aa[k++] = ml_vals[i];
323     /* off diagonal entries */
324     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
325       aj[k] = ml_cols[j]; aa[k++] = ml_vals[j];
326     }
327     /* sort aj and aa */
328     ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
329     ierr = MatSetValues(*newmat,1,&i,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr);
330   }
331   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
332   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
333 
334   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
335   ierr = PetscFree(nnz);CHKERRQ(ierr);
336   PetscFunctionReturn(0);
337 }
338 
339 #undef __FUNCT__
340 #define __FUNCT__ "MatWrapML_SHELL"
341 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
342 {
343   PetscErrorCode ierr;
344   PetscInt       m,n;
345   ML_Comm        *MLcomm;
346   Mat_MLShell    *shellctx;
347 
348   PetscFunctionBegin;
349   m = mlmat->outvec_leng;
350   n = mlmat->invec_leng;
351   if (!m || !n){
352     newmat = PETSC_NULL;
353     PetscFunctionReturn(0);
354   }
355 
356   if (reuse){
357     ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr);
358     shellctx->mlmat = mlmat;
359     PetscFunctionReturn(0);
360   }
361 
362   MLcomm = mlmat->comm;
363   ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr);
364   ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
365   ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr);
366   ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr);
367   shellctx->A         = *newmat;
368   shellctx->mlmat     = mlmat;
369   ierr = VecCreate(PETSC_COMM_WORLD,&shellctx->y);CHKERRQ(ierr);
370   ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr);
371   ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr);
372   (*newmat)->ops->destroy = MatDestroy_ML;
373   PetscFunctionReturn(0);
374 }
375 
376 #undef __FUNCT__
377 #define __FUNCT__ "MatWrapML_MPIAIJ"
378 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,Mat *newmat)
379 {
380   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
381   PetscInt              *ml_cols=matdata->columns,*aj;
382   PetscScalar           *ml_vals=matdata->values,*aa;
383   PetscErrorCode        ierr;
384   PetscInt              i,j,k,*gordering;
385   PetscInt              m=mlmat->outvec_leng,n,*nnzA,*nnzB,*nnz,nz_max,row;
386   Mat                   A;
387 
388   PetscFunctionBegin;
389   if (mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
390   n = mlmat->invec_leng;
391   if (m != n) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
392 
393   ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr);
394   ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
395   ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
396   ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr);
397 
398   nz_max = 0;
399   for (i=0; i<m; i++){
400     nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
401     if (nz_max < nnz[i]) nz_max = nnz[i];
402     nnzA[i] = 1; /* diag */
403     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
404       if (ml_cols[j] < m) nnzA[i]++;
405     }
406     nnzB[i] = nnz[i] - nnzA[i];
407   }
408   ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
409 
410   /* insert mat values -- remap row and column indices */
411   nz_max++;
412   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
413   /* create global row numbering for a ML_Operator */
414   ML_build_global_numbering(mlmat,&gordering,"rows");
415   for (i=0; i<m; i++){
416     row = gordering[i];
417     k = 0;
418     /* diagonal entry */
419     aj[k] = row; aa[k++] = ml_vals[i];
420     /* off diagonal entries */
421     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
422       aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j];
423     }
424     ierr = MatSetValues(A,1,&row,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr);
425   }
426   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
427   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
428   *newmat = A;
429 
430   ierr = PetscFree3(nnzA,nnzB,nnz);
431   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
432   PetscFunctionReturn(0);
433 }
434 
435 /* -----------------------------------------------------------------------------*/
436 #undef __FUNCT__
437 #define __FUNCT__ "PCDestroy_ML_Private"
438 PetscErrorCode PCDestroy_ML_Private(void *ptr)
439 {
440   PetscErrorCode  ierr;
441   PC_ML           *pc_ml = (PC_ML*)ptr;
442   PetscInt        level,fine_level=pc_ml->Nlevels-1;
443 
444   PetscFunctionBegin;
445   ML_Aggregate_Destroy(&pc_ml->agg_object);
446   ML_Destroy(&pc_ml->ml_object);
447 
448   if (pc_ml->PetscMLdata) {
449     ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
450     if (pc_ml->size > 1)      {ierr = MatDestroy(pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);}
451     if (pc_ml->PetscMLdata->x){ierr = VecDestroy(pc_ml->PetscMLdata->x);CHKERRQ(ierr);}
452     if (pc_ml->PetscMLdata->y){ierr = VecDestroy(pc_ml->PetscMLdata->y);CHKERRQ(ierr);}
453   }
454   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
455 
456   for (level=0; level<fine_level; level++){
457     if (pc_ml->gridctx[level].A){ierr = MatDestroy(pc_ml->gridctx[level].A);CHKERRQ(ierr);}
458     if (pc_ml->gridctx[level].P){ierr = MatDestroy(pc_ml->gridctx[level].P);CHKERRQ(ierr);}
459     if (pc_ml->gridctx[level].R){ierr = MatDestroy(pc_ml->gridctx[level].R);CHKERRQ(ierr);}
460     if (pc_ml->gridctx[level].x){ierr = VecDestroy(pc_ml->gridctx[level].x);CHKERRQ(ierr);}
461     if (pc_ml->gridctx[level].b){ierr = VecDestroy(pc_ml->gridctx[level].b);CHKERRQ(ierr);}
462     if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
463   }
464   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
465   PetscFunctionReturn(0);
466 }
467 /* -------------------------------------------------------------------------- */
468 /*
469    PCSetUp_ML - Prepares for the use of the ML preconditioner
470                     by setting data structures and options.
471 
472    Input Parameter:
473 .  pc - the preconditioner context
474 
475    Application Interface Routine: PCSetUp()
476 
477    Notes:
478    The interface routine PCSetUp() is not usually called directly by
479    the user, but instead is called by PCApply() if necessary.
480 */
481 extern PetscErrorCode PCSetFromOptions_MG(PC);
482 extern PetscErrorCode PCDestroy_MG_Private(PC);
483 
484 #undef __FUNCT__
485 #define __FUNCT__ "PCSetUp_ML"
486 PetscErrorCode PCSetUp_ML(PC pc)
487 {
488   PetscErrorCode  ierr;
489   PetscMPIInt     size;
490   FineGridCtx     *PetscMLdata;
491   ML              *ml_object;
492   ML_Aggregate    *agg_object;
493   ML_Operator     *mlmat;
494   PetscInt        nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
495   Mat             A,Aloc;
496   GridCtx         *gridctx;
497   PC_MG           *mg = (PC_MG*)pc->data;
498   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
499   PetscTruth      isSeq, isMPI;
500   KSP             smoother;
501   PC              subpc;
502 
503   PetscFunctionBegin;
504   if (pc->setupcalled){
505     /* since ML can change the size of vectors/matrices at any level we must destroy everything */
506     ierr = PCDestroy_ML_Private(pc_ml);CHKERRQ(ierr);
507     ierr = PCDestroy_MG_Private(pc);CHKERRQ(ierr);
508   }
509 
510   /* setup special features of PCML */
511   /*--------------------------------*/
512   /* covert A to Aloc to be used by ML at fine grid */
513   A = pc->pmat;
514   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
515   pc_ml->size = size;
516   ierr = PetscTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
517   ierr = PetscTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
518   if (isMPI){
519     ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
520   } else if (isSeq) {
521     Aloc = A;
522   } else {
523     SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid matrix type for ML. ML can only handle AIJ matrices.");
524   }
525 
526   /* create and initialize struct 'PetscMLdata' */
527   ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr);
528   pc_ml->PetscMLdata = PetscMLdata;
529   ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr);
530 
531   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);
532   ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr);
533   ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr);
534 
535   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr);
536   ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
537   ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
538   PetscMLdata->A    = A;
539   PetscMLdata->Aloc = Aloc;
540 
541   /* create ML discretization matrix at fine grid */
542   /* ML requires input of fine-grid matrix. It determines nlevels. */
543   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
544   ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
545   ML_Create(&ml_object,pc_ml->MaxNlevels);
546   pc_ml->ml_object = ml_object;
547   ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
548   ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols);
549   ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
550 
551   ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO);
552 
553   /* aggregation */
554   ML_Aggregate_Create(&agg_object);
555   pc_ml->agg_object = agg_object;
556 
557   ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr);
558   ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
559   /* set options */
560   switch (pc_ml->CoarsenScheme) {
561   case 1:
562     ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
563   case 2:
564     ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
565   case 3:
566     ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
567   }
568   ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold);
569   ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor);
570   if (pc_ml->SpectralNormScheme_Anorm){
571     ML_Set_SpectralNormScheme_Anorm(ml_object);
572   }
573   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
574   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
575   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
576   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
577   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
578   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;
579 
580   if (pc_ml->OldHierarchy) {
581     Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
582   } else {
583     Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
584   }
585   if (Nlevels<=0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
586   pc_ml->Nlevels = Nlevels;
587   fine_level = Nlevels - 1;
588 
589   ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr);
590   /* set default smoothers */
591   for (level=1; level<=fine_level; level++){
592     if (size == 1){
593       ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
594       ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
595       ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
596       ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
597     } else {
598       ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
599       ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
600       ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
601       ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
602     }
603   }
604   ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
605 
606   ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr);
607   pc_ml->gridctx = gridctx;
608 
609   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
610      Level 0 is the finest grid for ML, but coarsest for PETSc! */
611   gridctx[fine_level].A = A;
612 
613   level = fine_level - 1;
614   if (size == 1){ /* convert ML P, R and A into seqaij format */
615     for (mllevel=1; mllevel<Nlevels; mllevel++){
616       mlmat = &(ml_object->Pmat[mllevel]);
617       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
618       mlmat = &(ml_object->Rmat[mllevel-1]);
619       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
620 
621       mlmat = &(ml_object->Amat[mllevel]);
622       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
623       level--;
624     }
625   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
626     for (mllevel=1; mllevel<Nlevels; mllevel++){
627       mlmat  = &(ml_object->Pmat[mllevel]);
628       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
629       mlmat  = &(ml_object->Rmat[mllevel-1]);
630       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
631 
632       mlmat  = &(ml_object->Amat[mllevel]);
633       ierr = MatWrapML_MPIAIJ(mlmat,&gridctx[level].A);CHKERRQ(ierr);
634       level--;
635     }
636   }
637 
638   /* create vectors and ksp at all levels */
639   for (level=0; level<fine_level; level++){
640     level1 = level + 1;
641     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr);
642     ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr);
643     ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr);
644     ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
645 
646     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr);
647     ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
648     ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr);
649     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
650 
651     ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr);
652     ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
653     ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr);
654     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
655 
656     if (level == 0){
657       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
658     } else {
659       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
660     }
661   }
662   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
663 
664   /* create coarse level and the interpolation between the levels */
665   for (level=0; level<fine_level; level++){
666     level1 = level + 1;
667     ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
668     ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
669     if (level > 0){
670       ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
671     }
672     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
673   }
674   ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
675   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
676 
677   /* setupcalled is set to 0 so that MG is setup from scratch */
678   pc->setupcalled = 0;
679   ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
680   PetscFunctionReturn(0);
681 }
682 
683 /* -------------------------------------------------------------------------- */
684 /*
685    PCDestroy_ML - Destroys the private context for the ML preconditioner
686    that was created with PCCreate_ML().
687 
688    Input Parameter:
689 .  pc - the preconditioner context
690 
691    Application Interface Routine: PCDestroy()
692 */
693 #undef __FUNCT__
694 #define __FUNCT__ "PCDestroy_ML"
695 PetscErrorCode PCDestroy_ML(PC pc)
696 {
697   PetscErrorCode  ierr;
698   PC_MG           *mg = (PC_MG*)pc->data;
699   PC_ML           *pc_ml= (PC_ML*)mg->innerctx;
700 
701   PetscFunctionBegin;
702   ierr = PCDestroy_ML_Private(pc_ml);CHKERRQ(ierr);
703   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
704   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
705   PetscFunctionReturn(0);
706 }
707 
708 #undef __FUNCT__
709 #define __FUNCT__ "PCSetFromOptions_ML"
710 PetscErrorCode PCSetFromOptions_ML(PC pc)
711 {
712   PetscErrorCode  ierr;
713   PetscInt        indx,PrintLevel;
714   const char      *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
715   PC_MG           *mg = (PC_MG*)pc->data;
716   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
717   PetscMPIInt     size;
718 
719   PetscFunctionBegin;
720   ierr = MPI_Comm_size(((PetscObject)pc)->comm,&size);CHKERRQ(ierr);
721   ierr = PetscOptionsHead("ML options");CHKERRQ(ierr);
722   PrintLevel    = 0;
723   indx          = 0;
724   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr);
725   ML_Set_PrintLevel(PrintLevel);
726   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr);
727   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr);
728   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr);
729   pc_ml->CoarsenScheme = indx;
730   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr);
731   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr);
732   ierr = PetscOptionsTruth("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);CHKERRQ(ierr);
733   ierr = PetscOptionsTruth("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,PETSC_NULL);CHKERRQ(ierr);
734   ierr = PetscOptionsTruth("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,PETSC_NULL);CHKERRQ(ierr);
735   ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,PETSC_NULL);CHKERRQ(ierr);
736   /*
737     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
738     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.
739 
740     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
741     combination of options and ML's exit(1) explanations don't help matters.
742   */
743   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
744   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
745   if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);}
746   if (pc_ml->EnergyMinimization) {
747     ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,PETSC_NULL);CHKERRQ(ierr);
748   }
749   if (pc_ml->EnergyMinimization == 2) {
750     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
751     ierr = PetscOptionsTruth("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,PETSC_NULL);CHKERRQ(ierr);
752   }
753   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
754   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
755   ierr = PetscOptionsTruth("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,PETSC_NULL);CHKERRQ(ierr);
756   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
757   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
758   ierr = PetscOptionsTruth("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,PETSC_NULL);CHKERRQ(ierr);
759   /*
760     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
761     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
762     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
763     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
764     this context, but ML doesn't provide a way to find out which ones.
765    */
766   ierr = PetscOptionsTruth("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,PETSC_NULL);CHKERRQ(ierr);
767   ierr = PetscOptionsTail();CHKERRQ(ierr);
768   PetscFunctionReturn(0);
769 }
770 
771 /* -------------------------------------------------------------------------- */
772 /*
773    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
774    and sets this as the private data within the generic preconditioning
775    context, PC, that was created within PCCreate().
776 
777    Input Parameter:
778 .  pc - the preconditioner context
779 
780    Application Interface Routine: PCCreate()
781 */
782 
783 /*MC
784      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
785        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
786        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
787        and the restriction/interpolation operators wrapped as PETSc shell matrices.
788 
789    Options Database Key:
790    Multigrid options(inherited)
791 +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
792 .  -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
793 .  -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
794 -  -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade
795 
796    ML options:
797 +  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
798 .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
799 .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
800 .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
801 .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
802 .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
803 -  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
804 
805    Level: intermediate
806 
807   Concepts: multigrid
808 
809 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
810            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
811            PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
812            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
813            PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
814 M*/
815 
816 EXTERN_C_BEGIN
817 #undef __FUNCT__
818 #define __FUNCT__ "PCCreate_ML"
819 PetscErrorCode PETSCKSP_DLLEXPORT PCCreate_ML(PC pc)
820 {
821   PetscErrorCode  ierr;
822   PC_ML           *pc_ml;
823   PC_MG           *mg;
824 
825   PetscFunctionBegin;
826   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
827   ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr);
828   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
829 
830   /* create a supporting struct and attach it to pc */
831   ierr = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr);
832   mg = (PC_MG*)pc->data;
833   mg->innerctx = pc_ml;
834 
835   pc_ml->ml_object     = 0;
836   pc_ml->agg_object    = 0;
837   pc_ml->gridctx       = 0;
838   pc_ml->PetscMLdata   = 0;
839   pc_ml->Nlevels       = -1;
840   pc_ml->MaxNlevels    = 10;
841   pc_ml->MaxCoarseSize = 1;
842   pc_ml->CoarsenScheme = 1;
843   pc_ml->Threshold     = 0.0;
844   pc_ml->DampingFactor = 4.0/3.0;
845   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
846   pc_ml->size          = 0;
847 
848   /* overwrite the pointers of PCMG by the functions of PCML */
849   pc->ops->setfromoptions = PCSetFromOptions_ML;
850   pc->ops->setup          = PCSetUp_ML;
851   pc->ops->destroy        = PCDestroy_ML;
852   PetscFunctionReturn(0);
853 }
854 EXTERN_C_END
855