xref: /petsc/src/ksp/pc/impls/ml/ml.c (revision 78e224df856aa9ec3fb1842e2d0dab3c0cee0b7a)
1 
2 /*
3    Provides an interface to the ML smoothed Aggregation
4    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
5                                     Jed Brown, see [PETSC #18321, #18449].
6 */
7 #include <private/pcimpl.h>   /*I "petscpc.h" I*/
8 #include <../src/ksp/pc/impls/mg/mgimpl.h>                    /*I "petscpcmg.h" I*/
9 #include <../src/mat/impls/aij/seq/aij.h>
10 #include <../src/mat/impls/aij/mpi/mpiaij.h>
11 
12 #include <math.h>
13 EXTERN_C_BEGIN
14 /* HAVE_CONFIG_H flag is required by ML include files */
15 #if !defined(HAVE_CONFIG_H)
16 #define HAVE_CONFIG_H
17 #endif
18 #include <ml_include.h>
19 EXTERN_C_END
20 
21 /* The context (data structure) at each grid level */
22 typedef struct {
23   Vec        x,b,r;           /* global vectors */
24   Mat        A,P,R;
25   KSP        ksp;
26 } GridCtx;
27 
28 /* The context used to input PETSc matrix into ML at fine grid */
29 typedef struct {
30   Mat          A;      /* Petsc matrix in aij format */
31   Mat          Aloc;   /* local portion of A to be used by ML */
32   Vec          x,y;
33   ML_Operator  *mlmat;
34   PetscScalar  *pwork; /* tmp array used by PetscML_comm() */
35 } FineGridCtx;
36 
37 /* The context associates a ML matrix with a PETSc shell matrix */
38 typedef struct {
39   Mat          A;       /* PETSc shell matrix associated with mlmat */
40   ML_Operator  *mlmat;  /* ML matrix assorciated with A */
41   Vec          y, work;
42 } Mat_MLShell;
43 
44 /* Private context for the ML preconditioner */
45 typedef struct {
46   ML             *ml_object;
47   ML_Aggregate   *agg_object;
48   GridCtx        *gridctx;
49   FineGridCtx    *PetscMLdata;
50   PetscInt       Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization;
51   PetscReal      Threshold,DampingFactor,EnergyMinimizationDropTol;
52   PetscBool      SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable;
53   PetscBool      reuse_interpolation;
54   PetscMPIInt    size; /* size of communicator for pc->pmat */
55 } PC_ML;
56 
57 #undef __FUNCT__
58 #define __FUNCT__ "PetscML_getrow"
59 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
60 {
61   PetscErrorCode ierr;
62   PetscInt       m,i,j,k=0,row,*aj;
63   PetscScalar    *aa;
64   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
65   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;
66 
67 
68   ierr = MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0);
69   for (i = 0; i<N_requested_rows; i++) {
70     row   = requested_rows[i];
71     row_lengths[i] = a->ilen[row];
72     if (allocated_space < k+row_lengths[i]) return(0);
73     if ( (row >= 0) || (row <= (m-1)) ) {
74       aj = a->j + a->i[row];
75       aa = a->a + a->i[row];
76       for (j=0; j<row_lengths[i]; j++){
77         columns[k]  = aj[j];
78         values[k++] = aa[j];
79       }
80     }
81   }
82   return(1);
83 }
84 
85 #undef __FUNCT__
86 #define __FUNCT__ "PetscML_comm"
87 static PetscErrorCode PetscML_comm(double p[],void *ML_data)
88 {
89   PetscErrorCode ierr;
90   FineGridCtx    *ml=(FineGridCtx*)ML_data;
91   Mat            A=ml->A;
92   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
93   PetscMPIInt    size;
94   PetscInt       i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
95   PetscScalar    *array;
96 
97   PetscFunctionBegin;
98   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
99   if (size == 1) return 0;
100 
101   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
102   ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
103   ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
104   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
105   ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr);
106   for (i=in_length; i<out_length; i++){
107     p[i] = array[i-in_length];
108   }
109   ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr);
110   PetscFunctionReturn(0);
111 }
112 
113 #undef __FUNCT__
114 #define __FUNCT__ "PetscML_matvec"
115 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
116 {
117   PetscErrorCode ierr;
118   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data);
119   Mat            A=ml->A, Aloc=ml->Aloc;
120   PetscMPIInt    size;
121   PetscScalar    *pwork=ml->pwork;
122   PetscInt       i;
123 
124   PetscFunctionBegin;
125   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
126   if (size == 1){
127     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
128   } else {
129     for (i=0; i<in_length; i++) pwork[i] = p[i];
130     PetscML_comm(pwork,ml);
131     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
132   }
133   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
134   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
135   ierr = VecResetArray(ml->x);CHKERRQ(ierr);
136   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
137   PetscFunctionReturn(0);
138 }
139 
140 #undef __FUNCT__
141 #define __FUNCT__ "MatMult_ML"
142 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
143 {
144   PetscErrorCode   ierr;
145   Mat_MLShell      *shell;
146   PetscScalar      *xarray,*yarray;
147   PetscInt         x_length,y_length;
148 
149   PetscFunctionBegin;
150   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
151   ierr = VecGetArray(x,&xarray);CHKERRQ(ierr);
152   ierr = VecGetArray(y,&yarray);CHKERRQ(ierr);
153   x_length = shell->mlmat->invec_leng;
154   y_length = shell->mlmat->outvec_leng;
155   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
156   ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
157   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
158   PetscFunctionReturn(0);
159 }
160 
161 #undef __FUNCT__
162 #define __FUNCT__ "MatMultAdd_ML"
163 /* Computes y = w + A * x
164    It is possible that w == y, but not x == y
165 */
166 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
167 {
168   Mat_MLShell   *shell;
169   PetscScalar   *xarray,*yarray;
170   PetscInt       x_length,y_length;
171   PetscErrorCode ierr;
172 
173   PetscFunctionBegin;
174   ierr = MatShellGetContext(A, (void **) &shell);CHKERRQ(ierr);
175   if (y == w) {
176     if (!shell->work) {
177       ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr);
178     }
179     ierr = VecGetArray(x,           &xarray);CHKERRQ(ierr);
180     ierr = VecGetArray(shell->work, &yarray);CHKERRQ(ierr);
181     x_length = shell->mlmat->invec_leng;
182     y_length = shell->mlmat->outvec_leng;
183     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
184     ierr = VecRestoreArray(x,           &xarray);CHKERRQ(ierr);
185     ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr);
186     ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr);
187   } else {
188     ierr = VecGetArray(x, &xarray);CHKERRQ(ierr);
189     ierr = VecGetArray(y, &yarray);CHKERRQ(ierr);
190     x_length = shell->mlmat->invec_leng;
191     y_length = shell->mlmat->outvec_leng;
192     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
193     ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr);
194     ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr);
195     ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr);
196   }
197   PetscFunctionReturn(0);
198 }
199 
200 /* newtype is ignored because "ml" is not listed under Petsc MatType */
201 #undef __FUNCT__
202 #define __FUNCT__ "MatConvert_MPIAIJ_ML"
203 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
204 {
205   PetscErrorCode  ierr;
206   Mat_MPIAIJ      *mpimat=(Mat_MPIAIJ*)A->data;
207   Mat_SeqAIJ      *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
208   PetscInt        *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
209   PetscScalar     *aa=a->a,*ba=b->a,*ca;
210   PetscInt        am=A->rmap->n,an=A->cmap->n,i,j,k;
211   PetscInt        *ci,*cj,ncols;
212 
213   PetscFunctionBegin;
214   if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
215 
216   if (scall == MAT_INITIAL_MATRIX){
217     ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr);
218     ci[0] = 0;
219     for (i=0; i<am; i++){
220       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
221     }
222     ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr);
223     ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr);
224 
225     k = 0;
226     for (i=0; i<am; i++){
227       /* diagonal portion of A */
228       ncols = ai[i+1] - ai[i];
229       for (j=0; j<ncols; j++) {
230         cj[k]   = *aj++;
231         ca[k++] = *aa++;
232       }
233       /* off-diagonal portion of A */
234       ncols = bi[i+1] - bi[i];
235       for (j=0; j<ncols; j++) {
236         cj[k]   = an + (*bj); bj++;
237         ca[k++] = *ba++;
238       }
239     }
240     if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
241 
242     /* put together the new matrix */
243     an = mpimat->A->cmap->n+mpimat->B->cmap->n;
244     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
245 
246     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
247     /* Since these are PETSc arrays, change flags to free them as necessary. */
248     mat = (Mat_SeqAIJ*)(*Aloc)->data;
249     mat->free_a       = PETSC_TRUE;
250     mat->free_ij      = PETSC_TRUE;
251 
252     mat->nonew    = 0;
253   } else if (scall == MAT_REUSE_MATRIX){
254     mat=(Mat_SeqAIJ*)(*Aloc)->data;
255     ci = mat->i; cj = mat->j; ca = mat->a;
256     for (i=0; i<am; i++) {
257       /* diagonal portion of A */
258       ncols = ai[i+1] - ai[i];
259       for (j=0; j<ncols; j++) *ca++ = *aa++;
260       /* off-diagonal portion of A */
261       ncols = bi[i+1] - bi[i];
262       for (j=0; j<ncols; j++) *ca++ = *ba++;
263     }
264   } else {
265     SETERRQ1(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
266   }
267   PetscFunctionReturn(0);
268 }
269 
270 extern PetscErrorCode MatDestroy_Shell(Mat);
271 #undef __FUNCT__
272 #define __FUNCT__ "MatDestroy_ML"
273 static PetscErrorCode MatDestroy_ML(Mat A)
274 {
275   PetscErrorCode ierr;
276   Mat_MLShell    *shell;
277 
278   PetscFunctionBegin;
279   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
280   ierr = VecDestroy(&shell->y);CHKERRQ(ierr);
281   if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);}
282   ierr = PetscFree(shell);CHKERRQ(ierr);
283   ierr = MatDestroy_Shell(A);CHKERRQ(ierr);
284   ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr);
285   PetscFunctionReturn(0);
286 }
287 
288 #undef __FUNCT__
289 #define __FUNCT__ "MatWrapML_SeqAIJ"
290 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
291 {
292   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
293   PetscErrorCode        ierr;
294   PetscInt              m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz,nz_max;
295   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k;
296   PetscScalar           *ml_vals=matdata->values,*aa;
297 
298   PetscFunctionBegin;
299   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
300   if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
301     if (reuse){
302       Mat_SeqAIJ  *aij= (Mat_SeqAIJ*)(*newmat)->data;
303       aij->i = ml_rowptr;
304       aij->j = ml_cols;
305       aij->a = ml_vals;
306     } else {
307       /* sort ml_cols and ml_vals */
308       ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
309       for (i=0; i<m; i++) {
310         nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
311       }
312       aj = ml_cols; aa = ml_vals;
313       for (i=0; i<m; i++){
314         ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
315         aj += nnz[i]; aa += nnz[i];
316       }
317       ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
318       ierr = PetscFree(nnz);CHKERRQ(ierr);
319     }
320     PetscFunctionReturn(0);
321   }
322 
323   /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */
324   ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr);
325   ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
326   ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
327 
328   ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
329   nz_max = 1;
330   for (i=0; i<m; i++) {
331     nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
332     if (nnz[i] > nz_max) nz_max += nnz[i];
333   }
334 
335   ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
336   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
337   for (i=0; i<m; i++){
338     k = 0;
339     /* diagonal entry */
340     aj[k] = i; aa[k++] = ml_vals[i];
341     /* off diagonal entries */
342     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
343       aj[k] = ml_cols[j]; aa[k++] = ml_vals[j];
344     }
345     /* sort aj and aa */
346     ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
347     ierr = MatSetValues(*newmat,1,&i,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr);
348   }
349   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
350   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
351 
352   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
353   ierr = PetscFree(nnz);CHKERRQ(ierr);
354   PetscFunctionReturn(0);
355 }
356 
357 #undef __FUNCT__
358 #define __FUNCT__ "MatWrapML_SHELL"
359 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
360 {
361   PetscErrorCode ierr;
362   PetscInt       m,n;
363   ML_Comm        *MLcomm;
364   Mat_MLShell    *shellctx;
365 
366   PetscFunctionBegin;
367   m = mlmat->outvec_leng;
368   n = mlmat->invec_leng;
369   if (!m || !n){
370     newmat = PETSC_NULL;
371     PetscFunctionReturn(0);
372   }
373 
374   if (reuse){
375     ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr);
376     shellctx->mlmat = mlmat;
377     PetscFunctionReturn(0);
378   }
379 
380   MLcomm = mlmat->comm;
381   ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr);
382   ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
383   ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr);
384   ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr);
385   shellctx->A         = *newmat;
386   shellctx->mlmat     = mlmat;
387   shellctx->work      = PETSC_NULL;
388   ierr = VecCreate(PETSC_COMM_WORLD,&shellctx->y);CHKERRQ(ierr);
389   ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr);
390   ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr);
391   (*newmat)->ops->destroy = MatDestroy_ML;
392   PetscFunctionReturn(0);
393 }
394 
395 #undef __FUNCT__
396 #define __FUNCT__ "MatWrapML_MPIAIJ"
397 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,Mat *newmat)
398 {
399   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
400   PetscInt              *ml_cols=matdata->columns,*aj;
401   PetscScalar           *ml_vals=matdata->values,*aa;
402   PetscErrorCode        ierr;
403   PetscInt              i,j,k,*gordering;
404   PetscInt              m=mlmat->outvec_leng,n,*nnzA,*nnzB,*nnz,nz_max,row;
405   Mat                   A;
406 
407   PetscFunctionBegin;
408   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
409   n = mlmat->invec_leng;
410   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
411 
412   ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr);
413   ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
414   ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
415   ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr);
416 
417   nz_max = 0;
418   for (i=0; i<m; i++){
419     nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
420     if (nz_max < nnz[i]) nz_max = nnz[i];
421     nnzA[i] = 1; /* diag */
422     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
423       if (ml_cols[j] < m) nnzA[i]++;
424     }
425     nnzB[i] = nnz[i] - nnzA[i];
426   }
427   ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
428 
429   /* insert mat values -- remap row and column indices */
430   nz_max++;
431   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
432   /* create global row numbering for a ML_Operator */
433   ML_build_global_numbering(mlmat,&gordering,"rows");
434   for (i=0; i<m; i++){
435     row = gordering[i];
436     k = 0;
437     /* diagonal entry */
438     aj[k] = row; aa[k++] = ml_vals[i];
439     /* off diagonal entries */
440     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
441       aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j];
442     }
443     ierr = MatSetValues(A,1,&row,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr);
444   }
445   ML_free(gordering);
446   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
447   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
448   *newmat = A;
449 
450   ierr = PetscFree3(nnzA,nnzB,nnz);
451   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
452   PetscFunctionReturn(0);
453 }
454 
455 /* -----------------------------------------------------------------------------*/
456 #undef __FUNCT__
457 #define __FUNCT__ "PCReset_ML"
458 PetscErrorCode PCReset_ML(PC pc)
459 {
460   PetscErrorCode  ierr;
461   PC_MG           *mg = (PC_MG*)pc->data;
462   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
463   PetscInt        level,fine_level=pc_ml->Nlevels-1;
464 
465   PetscFunctionBegin;
466   ML_Aggregate_Destroy(&pc_ml->agg_object);
467   ML_Destroy(&pc_ml->ml_object);
468 
469   if (pc_ml->PetscMLdata) {
470     ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
471     if (pc_ml->size > 1)      {ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);}
472     if (pc_ml->PetscMLdata->x){ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr);}
473     if (pc_ml->PetscMLdata->y){ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr);}
474   }
475   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
476 
477   if (pc_ml->gridctx) {
478     for (level=0; level<fine_level; level++){
479       if (pc_ml->gridctx[level].A){ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);}
480       if (pc_ml->gridctx[level].P){ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);}
481       if (pc_ml->gridctx[level].R){ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);}
482       if (pc_ml->gridctx[level].x){ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);}
483       if (pc_ml->gridctx[level].b){ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);}
484       if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
485     }
486   }
487   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
488   PetscFunctionReturn(0);
489 }
490 /* -------------------------------------------------------------------------- */
491 /*
492    PCSetUp_ML - Prepares for the use of the ML preconditioner
493                     by setting data structures and options.
494 
495    Input Parameter:
496 .  pc - the preconditioner context
497 
498    Application Interface Routine: PCSetUp()
499 
500    Notes:
501    The interface routine PCSetUp() is not usually called directly by
502    the user, but instead is called by PCApply() if necessary.
503 */
504 extern PetscErrorCode PCSetFromOptions_MG(PC);
505 extern PetscErrorCode PCReset_MG(PC);
506 
507 #undef __FUNCT__
508 #define __FUNCT__ "PCSetUp_ML"
509 PetscErrorCode PCSetUp_ML(PC pc)
510 {
511   PetscErrorCode  ierr;
512   PetscMPIInt     size;
513   FineGridCtx     *PetscMLdata;
514   ML              *ml_object;
515   ML_Aggregate    *agg_object;
516   ML_Operator     *mlmat;
517   PetscInt        nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
518   Mat             A,Aloc;
519   GridCtx         *gridctx;
520   PC_MG           *mg = (PC_MG*)pc->data;
521   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
522   PetscBool       isSeq, isMPI;
523   KSP             smoother;
524   PC              subpc;
525   PetscInt        mesh_level, old_mesh_level;
526 
527 
528   PetscFunctionBegin;
529   /* Since PCMG tries to use DM assocated with PC must delete it */
530   ierr = DMDestroy(&pc->dm);CHKERRQ(ierr);
531 
532   A = pc->pmat;
533   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
534 
535   if (pc->setupcalled) {
536     if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
537       /*
538        Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
539        multiple solves in which the matrix is not changing too quickly.
540        */
541       ml_object = pc_ml->ml_object;
542       gridctx = pc_ml->gridctx;
543       Nlevels = pc_ml->Nlevels;
544       fine_level = Nlevels - 1;
545       gridctx[fine_level].A = A;
546 
547       ierr = PetscTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
548       ierr = PetscTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
549       if (isMPI){
550         ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
551       } else if (isSeq) {
552         Aloc = A;
553       } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
554 
555       ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
556       PetscMLdata = pc_ml->PetscMLdata;
557       PetscMLdata->A    = A;
558       PetscMLdata->Aloc = Aloc;
559       ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
560       ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
561 
562       mesh_level = ml_object->ML_finest_level;
563       while (ml_object->SingleLevel[mesh_level].Rmat->to) {
564         old_mesh_level = mesh_level;
565         mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;
566 
567         /* clean and regenerate A */
568         mlmat = &(ml_object->Amat[mesh_level]);
569         ML_Operator_Clean(mlmat);
570         ML_Operator_Init(mlmat,ml_object->comm);
571         ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level);
572       }
573 
574       level = fine_level - 1;
575       if (size == 1) { /* convert ML P, R and A into seqaij format */
576         for (mllevel=1; mllevel<Nlevels; mllevel++){
577           mlmat = &(ml_object->Amat[mllevel]);
578           ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
579           level--;
580         }
581       } else { /* convert ML P and R into shell format, ML A into mpiaij format */
582         for (mllevel=1; mllevel<Nlevels; mllevel++){
583           mlmat  = &(ml_object->Amat[mllevel]);
584           ierr = MatWrapML_MPIAIJ(mlmat,&gridctx[level].A);CHKERRQ(ierr);
585           level--;
586         }
587       }
588 
589       for (level=0; level<fine_level; level++) {
590         if (level > 0){
591           ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
592         }
593         ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
594       }
595       ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
596       ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
597 
598       ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
599       PetscFunctionReturn(0);
600     } else {
601       /* since ML can change the size of vectors/matrices at any level we must destroy everything */
602       ierr = PCReset_ML(pc);CHKERRQ(ierr);
603       ierr = PCReset_MG(pc);CHKERRQ(ierr);
604     }
605   }
606 
607   /* setup special features of PCML */
608   /*--------------------------------*/
609   /* covert A to Aloc to be used by ML at fine grid */
610   pc_ml->size = size;
611   ierr = PetscTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
612   ierr = PetscTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
613   if (isMPI){
614     ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
615   } else if (isSeq) {
616     Aloc = A;
617   } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
618 
619   /* create and initialize struct 'PetscMLdata' */
620   ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr);
621   pc_ml->PetscMLdata = PetscMLdata;
622   ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr);
623 
624   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);
625   ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr);
626   ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr);
627 
628   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr);
629   ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
630   ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
631   PetscMLdata->A    = A;
632   PetscMLdata->Aloc = Aloc;
633 
634   /* create ML discretization matrix at fine grid */
635   /* ML requires input of fine-grid matrix. It determines nlevels. */
636   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
637   ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
638   ML_Create(&ml_object,pc_ml->MaxNlevels);
639   ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm);
640   pc_ml->ml_object = ml_object;
641   ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
642   ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols);
643   ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
644 
645   ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO);
646 
647   /* aggregation */
648   ML_Aggregate_Create(&agg_object);
649   pc_ml->agg_object = agg_object;
650 
651   ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr);
652   ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
653   /* set options */
654   switch (pc_ml->CoarsenScheme) {
655   case 1:
656     ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
657   case 2:
658     ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
659   case 3:
660     ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
661   }
662   ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold);
663   ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor);
664   if (pc_ml->SpectralNormScheme_Anorm){
665     ML_Set_SpectralNormScheme_Anorm(ml_object);
666   }
667   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
668   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
669   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
670   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
671   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
672   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;
673 
674   if (pc_ml->OldHierarchy) {
675     Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
676   } else {
677     Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
678   }
679   if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
680   pc_ml->Nlevels = Nlevels;
681   fine_level = Nlevels - 1;
682 
683   ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr);
684   /* set default smoothers */
685   for (level=1; level<=fine_level; level++){
686     if (size == 1){
687       ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
688       ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
689       ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
690       ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
691     } else {
692       ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
693       ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
694       ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
695       ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
696     }
697   }
698   ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
699 
700   ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr);
701   pc_ml->gridctx = gridctx;
702 
703   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
704      Level 0 is the finest grid for ML, but coarsest for PETSc! */
705   gridctx[fine_level].A = A;
706 
707   level = fine_level - 1;
708   if (size == 1){ /* convert ML P, R and A into seqaij format */
709     for (mllevel=1; mllevel<Nlevels; mllevel++){
710       mlmat = &(ml_object->Pmat[mllevel]);
711       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
712       mlmat = &(ml_object->Rmat[mllevel-1]);
713       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
714 
715       mlmat = &(ml_object->Amat[mllevel]);
716       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
717       level--;
718     }
719   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
720     for (mllevel=1; mllevel<Nlevels; mllevel++){
721       mlmat  = &(ml_object->Pmat[mllevel]);
722       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
723       mlmat  = &(ml_object->Rmat[mllevel-1]);
724       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
725 
726       mlmat  = &(ml_object->Amat[mllevel]);
727       ierr = MatWrapML_MPIAIJ(mlmat,&gridctx[level].A);CHKERRQ(ierr);
728       level--;
729     }
730   }
731 
732   /* create vectors and ksp at all levels */
733   for (level=0; level<fine_level; level++){
734     level1 = level + 1;
735     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr);
736     ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr);
737     ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr);
738     ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
739 
740     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr);
741     ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
742     ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr);
743     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
744 
745     ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr);
746     ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
747     ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr);
748     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
749 
750     if (level == 0){
751       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
752     } else {
753       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
754     }
755   }
756   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
757 
758   /* create coarse level and the interpolation between the levels */
759   for (level=0; level<fine_level; level++){
760     level1 = level + 1;
761     ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
762     ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
763     if (level > 0){
764       ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
765     }
766     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
767   }
768   ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
769   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
770 
771   /* setupcalled is set to 0 so that MG is setup from scratch */
772   pc->setupcalled = 0;
773   ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
774   PetscFunctionReturn(0);
775 }
776 
777 /* -------------------------------------------------------------------------- */
778 /*
779    PCDestroy_ML - Destroys the private context for the ML preconditioner
780    that was created with PCCreate_ML().
781 
782    Input Parameter:
783 .  pc - the preconditioner context
784 
785    Application Interface Routine: PCDestroy()
786 */
787 #undef __FUNCT__
788 #define __FUNCT__ "PCDestroy_ML"
789 PetscErrorCode PCDestroy_ML(PC pc)
790 {
791   PetscErrorCode  ierr;
792   PC_MG           *mg = (PC_MG*)pc->data;
793   PC_ML           *pc_ml= (PC_ML*)mg->innerctx;
794 
795   PetscFunctionBegin;
796   ierr = PCReset_ML(pc);CHKERRQ(ierr);
797   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
798   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
799   PetscFunctionReturn(0);
800 }
801 
802 #undef __FUNCT__
803 #define __FUNCT__ "PCSetFromOptions_ML"
804 PetscErrorCode PCSetFromOptions_ML(PC pc)
805 {
806   PetscErrorCode  ierr;
807   PetscInt        indx,PrintLevel;
808   const char      *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
809   PC_MG           *mg = (PC_MG*)pc->data;
810   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
811   PetscMPIInt     size;
812   MPI_Comm        comm = ((PetscObject)pc)->comm;
813 
814   PetscFunctionBegin;
815   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
816   ierr = PetscOptionsHead("ML options");CHKERRQ(ierr);
817   PrintLevel    = 0;
818   indx          = 0;
819   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr);
820   ML_Set_PrintLevel(PrintLevel);
821   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr);
822   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr);
823   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr);
824   pc_ml->CoarsenScheme = indx;
825   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr);
826   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr);
827   ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);CHKERRQ(ierr);
828   ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,PETSC_NULL);CHKERRQ(ierr);
829   ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,PETSC_NULL);CHKERRQ(ierr);
830   ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,PETSC_NULL);CHKERRQ(ierr);
831   ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,PETSC_NULL);CHKERRQ(ierr);
832   /*
833     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
834     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.
835 
836     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
837     combination of options and ML's exit(1) explanations don't help matters.
838   */
839   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
840   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
841   if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);}
842   if (pc_ml->EnergyMinimization) {
843     ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,PETSC_NULL);CHKERRQ(ierr);
844   }
845   if (pc_ml->EnergyMinimization == 2) {
846     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
847     ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,PETSC_NULL);CHKERRQ(ierr);
848   }
849   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
850   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
851   ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,PETSC_NULL);CHKERRQ(ierr);
852   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
853   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
854   ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,PETSC_NULL);CHKERRQ(ierr);
855   /*
856     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
857     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
858     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
859     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
860     this context, but ML doesn't provide a way to find out which ones.
861    */
862   ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,PETSC_NULL);CHKERRQ(ierr);
863   ierr = PetscOptionsTail();CHKERRQ(ierr);
864   PetscFunctionReturn(0);
865 }
866 
867 /* -------------------------------------------------------------------------- */
868 /*
869    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
870    and sets this as the private data within the generic preconditioning
871    context, PC, that was created within PCCreate().
872 
873    Input Parameter:
874 .  pc - the preconditioner context
875 
876    Application Interface Routine: PCCreate()
877 */
878 
879 /*MC
880      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
881        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
882        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
883        and the restriction/interpolation operators wrapped as PETSc shell matrices.
884 
885    Options Database Key:
886    Multigrid options(inherited)
887 +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
888 .  -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
889 .  -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
890    -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade
891    ML options:
892 .  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
893 .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
894 .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
895 .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
896 .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
897 .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
898 -  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
899 
900    Level: intermediate
901 
902   Concepts: multigrid
903 
904 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
905            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
906            PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
907            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
908            PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
909 M*/
910 
911 EXTERN_C_BEGIN
912 #undef __FUNCT__
913 #define __FUNCT__ "PCCreate_ML"
914 PetscErrorCode  PCCreate_ML(PC pc)
915 {
916   PetscErrorCode  ierr;
917   PC_ML           *pc_ml;
918   PC_MG           *mg;
919 
920   PetscFunctionBegin;
921   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
922   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
923   ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr);
924   /* Since PCMG tries to use DM assocated with PC must delete it */
925   ierr = DMDestroy(&pc->dm);CHKERRQ(ierr);
926   mg = (PC_MG*)pc->data;
927   mg->galerkin = 2;             /* Use Galerkin, but it is computed externally */
928 
929   /* create a supporting struct and attach it to pc */
930   ierr = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr);
931   mg->innerctx = pc_ml;
932 
933   pc_ml->ml_object     = 0;
934   pc_ml->agg_object    = 0;
935   pc_ml->gridctx       = 0;
936   pc_ml->PetscMLdata   = 0;
937   pc_ml->Nlevels       = -1;
938   pc_ml->MaxNlevels    = 10;
939   pc_ml->MaxCoarseSize = 1;
940   pc_ml->CoarsenScheme = 1;
941   pc_ml->Threshold     = 0.0;
942   pc_ml->DampingFactor = 4.0/3.0;
943   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
944   pc_ml->size          = 0;
945 
946   /* overwrite the pointers of PCMG by the functions of PCML */
947   pc->ops->setfromoptions = PCSetFromOptions_ML;
948   pc->ops->setup          = PCSetUp_ML;
949   pc->ops->reset          = PCReset_ML;
950   pc->ops->destroy        = PCDestroy_ML;
951   PetscFunctionReturn(0);
952 }
953 EXTERN_C_END
954