xref: /petsc/src/ksp/pc/impls/ml/ml.c (revision 84df9cb40eca90ea9b18a456fab7a4ecc7f6c1a4)
1 
2 /*
3    Provides an interface to the ML smoothed Aggregation
4    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
5                                     Jed Brown, see [PETSC #18321, #18449].
6 */
7 #include <private/pcimpl.h>   /*I "petscpc.h" I*/
8 #include <../src/ksp/pc/impls/mg/mgimpl.h>                    /*I "petscpcmg.h" I*/
9 #include <../src/mat/impls/aij/seq/aij.h>
10 #include <../src/mat/impls/aij/mpi/mpiaij.h>
11 
12 #include <math.h>
13 EXTERN_C_BEGIN
14 /* HAVE_CONFIG_H flag is required by ML include files */
15 #if !defined(HAVE_CONFIG_H)
16 #define HAVE_CONFIG_H
17 #endif
18 #include <ml_include.h>
19 EXTERN_C_END
20 
21 /* The context (data structure) at each grid level */
22 typedef struct {
23   Vec        x,b,r;           /* global vectors */
24   Mat        A,P,R;
25   KSP        ksp;
26 } GridCtx;
27 
28 /* The context used to input PETSc matrix into ML at fine grid */
29 typedef struct {
30   Mat          A;      /* Petsc matrix in aij format */
31   Mat          Aloc;   /* local portion of A to be used by ML */
32   Vec          x,y;
33   ML_Operator  *mlmat;
34   PetscScalar  *pwork; /* tmp array used by PetscML_comm() */
35 } FineGridCtx;
36 
37 /* The context associates a ML matrix with a PETSc shell matrix */
38 typedef struct {
39   Mat          A;       /* PETSc shell matrix associated with mlmat */
40   ML_Operator  *mlmat;  /* ML matrix assorciated with A */
41   Vec          y, work;
42 } Mat_MLShell;
43 
44 /* Private context for the ML preconditioner */
45 typedef struct {
46   ML             *ml_object;
47   ML_Aggregate   *agg_object;
48   GridCtx        *gridctx;
49   FineGridCtx    *PetscMLdata;
50   PetscInt       Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization;
51   PetscReal      Threshold,DampingFactor,EnergyMinimizationDropTol;
52   PetscBool      SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable;
53   PetscBool      reuse_interpolation;
54   PetscMPIInt    size; /* size of communicator for pc->pmat */
55 } PC_ML;
56 
57 #undef __FUNCT__
58 #define __FUNCT__ "PetscML_getrow"
59 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
60 {
61   PetscErrorCode ierr;
62   PetscInt       m,i,j,k=0,row,*aj;
63   PetscScalar    *aa;
64   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
65   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;
66 
67 
68   ierr = MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0);
69   for (i = 0; i<N_requested_rows; i++) {
70     row   = requested_rows[i];
71     row_lengths[i] = a->ilen[row];
72     if (allocated_space < k+row_lengths[i]) return(0);
73     if ( (row >= 0) || (row <= (m-1)) ) {
74       aj = a->j + a->i[row];
75       aa = a->a + a->i[row];
76       for (j=0; j<row_lengths[i]; j++){
77         columns[k]  = aj[j];
78         values[k++] = aa[j];
79       }
80     }
81   }
82   return(1);
83 }
84 
85 #undef __FUNCT__
86 #define __FUNCT__ "PetscML_comm"
87 static PetscErrorCode PetscML_comm(double p[],void *ML_data)
88 {
89   PetscErrorCode ierr;
90   FineGridCtx    *ml=(FineGridCtx*)ML_data;
91   Mat            A=ml->A;
92   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
93   PetscMPIInt    size;
94   PetscInt       i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
95   PetscScalar    *array;
96 
97   PetscFunctionBegin;
98   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
99   if (size == 1) return 0;
100 
101   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
102   ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
103   ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
104   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
105   ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr);
106   for (i=in_length; i<out_length; i++){
107     p[i] = array[i-in_length];
108   }
109   ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr);
110   PetscFunctionReturn(0);
111 }
112 
113 #undef __FUNCT__
114 #define __FUNCT__ "PetscML_matvec"
115 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
116 {
117   PetscErrorCode ierr;
118   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data);
119   Mat            A=ml->A, Aloc=ml->Aloc;
120   PetscMPIInt    size;
121   PetscScalar    *pwork=ml->pwork;
122   PetscInt       i;
123 
124   PetscFunctionBegin;
125   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
126   if (size == 1){
127     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
128   } else {
129     for (i=0; i<in_length; i++) pwork[i] = p[i];
130     PetscML_comm(pwork,ml);
131     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
132   }
133   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
134   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
135   ierr = VecResetArray(ml->x);CHKERRQ(ierr);
136   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
137   PetscFunctionReturn(0);
138 }
139 
140 #undef __FUNCT__
141 #define __FUNCT__ "MatMult_ML"
142 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
143 {
144   PetscErrorCode   ierr;
145   Mat_MLShell      *shell;
146   PetscScalar      *xarray,*yarray;
147   PetscInt         x_length,y_length;
148 
149   PetscFunctionBegin;
150   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
151   ierr = VecGetArray(x,&xarray);CHKERRQ(ierr);
152   ierr = VecGetArray(y,&yarray);CHKERRQ(ierr);
153   x_length = shell->mlmat->invec_leng;
154   y_length = shell->mlmat->outvec_leng;
155   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
156   ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
157   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
158   PetscFunctionReturn(0);
159 }
160 
161 #undef __FUNCT__
162 #define __FUNCT__ "MatMultAdd_ML"
163 /* Computes y = w + A * x
164    It is possible that w == y, but not x == y
165 */
166 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
167 {
168   Mat_MLShell   *shell;
169   PetscScalar   *xarray,*yarray;
170   PetscInt       x_length,y_length;
171   PetscErrorCode ierr;
172 
173   PetscFunctionBegin;
174   ierr = MatShellGetContext(A, (void **) &shell);CHKERRQ(ierr);
175   if (y == w) {
176     if (!shell->work) {
177       ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr);
178     }
179     ierr = VecGetArray(x,           &xarray);CHKERRQ(ierr);
180     ierr = VecGetArray(shell->work, &yarray);CHKERRQ(ierr);
181     x_length = shell->mlmat->invec_leng;
182     y_length = shell->mlmat->outvec_leng;
183     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
184     ierr = VecRestoreArray(x,           &xarray);CHKERRQ(ierr);
185     ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr);
186     ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr);
187   } else {
188     ierr = VecGetArray(x, &xarray);CHKERRQ(ierr);
189     ierr = VecGetArray(y, &yarray);CHKERRQ(ierr);
190     x_length = shell->mlmat->invec_leng;
191     y_length = shell->mlmat->outvec_leng;
192     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
193     ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr);
194     ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr);
195     ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr);
196   }
197   PetscFunctionReturn(0);
198 }
199 
200 /* newtype is ignored because "ml" is not listed under Petsc MatType */
201 #undef __FUNCT__
202 #define __FUNCT__ "MatConvert_MPIAIJ_ML"
203 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
204 {
205   PetscErrorCode  ierr;
206   Mat_MPIAIJ      *mpimat=(Mat_MPIAIJ*)A->data;
207   Mat_SeqAIJ      *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
208   PetscInt        *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
209   PetscScalar     *aa=a->a,*ba=b->a,*ca;
210   PetscInt        am=A->rmap->n,an=A->cmap->n,i,j,k;
211   PetscInt        *ci,*cj,ncols;
212 
213   PetscFunctionBegin;
214   if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
215 
216   if (scall == MAT_INITIAL_MATRIX){
217     ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr);
218     ci[0] = 0;
219     for (i=0; i<am; i++){
220       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
221     }
222     ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr);
223     ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr);
224 
225     k = 0;
226     for (i=0; i<am; i++){
227       /* diagonal portion of A */
228       ncols = ai[i+1] - ai[i];
229       for (j=0; j<ncols; j++) {
230         cj[k]   = *aj++;
231         ca[k++] = *aa++;
232       }
233       /* off-diagonal portion of A */
234       ncols = bi[i+1] - bi[i];
235       for (j=0; j<ncols; j++) {
236         cj[k]   = an + (*bj); bj++;
237         ca[k++] = *ba++;
238       }
239     }
240     if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
241 
242     /* put together the new matrix */
243     an = mpimat->A->cmap->n+mpimat->B->cmap->n;
244     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
245 
246     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
247     /* Since these are PETSc arrays, change flags to free them as necessary. */
248     mat = (Mat_SeqAIJ*)(*Aloc)->data;
249     mat->free_a       = PETSC_TRUE;
250     mat->free_ij      = PETSC_TRUE;
251 
252     mat->nonew    = 0;
253   } else if (scall == MAT_REUSE_MATRIX){
254     mat=(Mat_SeqAIJ*)(*Aloc)->data;
255     ci = mat->i; cj = mat->j; ca = mat->a;
256     for (i=0; i<am; i++) {
257       /* diagonal portion of A */
258       ncols = ai[i+1] - ai[i];
259       for (j=0; j<ncols; j++) *ca++ = *aa++;
260       /* off-diagonal portion of A */
261       ncols = bi[i+1] - bi[i];
262       for (j=0; j<ncols; j++) *ca++ = *ba++;
263     }
264   } else {
265     SETERRQ1(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
266   }
267   PetscFunctionReturn(0);
268 }
269 
270 extern PetscErrorCode MatDestroy_Shell(Mat);
271 #undef __FUNCT__
272 #define __FUNCT__ "MatDestroy_ML"
273 static PetscErrorCode MatDestroy_ML(Mat A)
274 {
275   PetscErrorCode ierr;
276   Mat_MLShell    *shell;
277 
278   PetscFunctionBegin;
279   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
280   ierr = VecDestroy(&shell->y);CHKERRQ(ierr);
281   if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);}
282   ierr = PetscFree(shell);CHKERRQ(ierr);
283   ierr = MatDestroy_Shell(A);CHKERRQ(ierr);
284   ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr);
285   PetscFunctionReturn(0);
286 }
287 
288 #undef __FUNCT__
289 #define __FUNCT__ "MatWrapML_SeqAIJ"
290 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
291 {
292   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
293   PetscErrorCode        ierr;
294   PetscInt              m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = PETSC_NULL,nz_max;
295   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k;
296   PetscScalar           *ml_vals=matdata->values,*aa;
297 
298   PetscFunctionBegin;
299   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
300   if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
301     if (reuse){
302       Mat_SeqAIJ  *aij= (Mat_SeqAIJ*)(*newmat)->data;
303       aij->i = ml_rowptr;
304       aij->j = ml_cols;
305       aij->a = ml_vals;
306     } else {
307       /* sort ml_cols and ml_vals */
308       ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
309       for (i=0; i<m; i++) {
310         nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
311       }
312       aj = ml_cols; aa = ml_vals;
313       for (i=0; i<m; i++){
314         ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
315         aj += nnz[i]; aa += nnz[i];
316       }
317       ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
318       ierr = PetscFree(nnz);CHKERRQ(ierr);
319     }
320     PetscFunctionReturn(0);
321   }
322 
323   /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */
324   if (reuse) {
325     for (nz_max=0,i=0; i<m; i++) nz_max = PetscMax(nz_max,ml_cols[i+1] - ml_cols[i] + 1);
326   } else {
327     ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr);
328     ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
329     ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
330 
331     ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
332     nz_max = 1;
333     for (i=0; i<m; i++) {
334       nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
335       if (nnz[i] > nz_max) nz_max = nnz[i];
336     }
337     ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
338   }
339   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
340   for (i=0; i<m; i++) {
341     PetscInt ncols;
342     k = 0;
343     /* diagonal entry */
344     aj[k] = i; aa[k++] = ml_vals[i];
345     /* off diagonal entries */
346     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
347       aj[k] = ml_cols[j]; aa[k++] = ml_vals[j];
348     }
349     ncols = ml_cols[i+1] - ml_cols[i] + 1;
350     /* sort aj and aa */
351     ierr = PetscSortIntWithScalarArray(ncols,aj,aa);CHKERRQ(ierr);
352     ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
353   }
354   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
355   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
356 
357   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
358   ierr = PetscFree(nnz);CHKERRQ(ierr);
359   PetscFunctionReturn(0);
360 }
361 
362 #undef __FUNCT__
363 #define __FUNCT__ "MatWrapML_SHELL"
364 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
365 {
366   PetscErrorCode ierr;
367   PetscInt       m,n;
368   ML_Comm        *MLcomm;
369   Mat_MLShell    *shellctx;
370 
371   PetscFunctionBegin;
372   m = mlmat->outvec_leng;
373   n = mlmat->invec_leng;
374   if (!m || !n){
375     newmat = PETSC_NULL;
376     PetscFunctionReturn(0);
377   }
378 
379   if (reuse){
380     ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr);
381     shellctx->mlmat = mlmat;
382     PetscFunctionReturn(0);
383   }
384 
385   MLcomm = mlmat->comm;
386   ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr);
387   ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
388   ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr);
389   ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr);
390   shellctx->A         = *newmat;
391   shellctx->mlmat     = mlmat;
392   shellctx->work      = PETSC_NULL;
393   ierr = VecCreate(PETSC_COMM_WORLD,&shellctx->y);CHKERRQ(ierr);
394   ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr);
395   ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr);
396   (*newmat)->ops->destroy = MatDestroy_ML;
397   PetscFunctionReturn(0);
398 }
399 
400 #undef __FUNCT__
401 #define __FUNCT__ "MatWrapML_MPIAIJ"
402 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
403 {
404   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
405   PetscInt              *ml_cols=matdata->columns,*aj;
406   PetscScalar           *ml_vals=matdata->values,*aa;
407   PetscErrorCode        ierr;
408   PetscInt              i,j,k,*gordering;
409   PetscInt              m=mlmat->outvec_leng,n,nz_max,row;
410   Mat                   A;
411 
412   PetscFunctionBegin;
413   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
414   n = mlmat->invec_leng;
415   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
416 
417   if (reuse) {
418     A = *newmat;
419     for (nz_max=0,i=0; i<m; i++) nz_max = PetscMax(nz_max,ml_cols[i+1] - ml_cols[i] + 1);
420   } else {
421     PetscInt *nnzA,*nnzB,*nnz;
422     ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr);
423     ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
424     ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
425     ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr);
426 
427     nz_max = 0;
428     for (i=0; i<m; i++){
429       nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
430       if (nz_max < nnz[i]) nz_max = nnz[i];
431       nnzA[i] = 1; /* diag */
432       for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
433         if (ml_cols[j] < m) nnzA[i]++;
434       }
435       nnzB[i] = nnz[i] - nnzA[i];
436     }
437     ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
438     ierr = PetscFree3(nnzA,nnzB,nnz);
439   }
440 
441   /* insert mat values -- remap row and column indices */
442   nz_max++;
443   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
444   /* create global row numbering for a ML_Operator */
445   ML_build_global_numbering(mlmat,&gordering,"rows");
446   for (i=0; i<m; i++) {
447     PetscInt ncols;
448     row = gordering[i];
449     k = 0;
450     /* diagonal entry */
451     aj[k] = row; aa[k++] = ml_vals[i];
452     /* off diagonal entries */
453     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
454       aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j];
455     }
456     ncols = ml_cols[i+1] - ml_cols[i] + 1;
457     ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
458   }
459   ML_free(gordering);
460   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
461   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
462   *newmat = A;
463 
464   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
465   PetscFunctionReturn(0);
466 }
467 
468 /* -----------------------------------------------------------------------------*/
469 #undef __FUNCT__
470 #define __FUNCT__ "PCReset_ML"
471 PetscErrorCode PCReset_ML(PC pc)
472 {
473   PetscErrorCode  ierr;
474   PC_MG           *mg = (PC_MG*)pc->data;
475   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
476   PetscInt        level,fine_level=pc_ml->Nlevels-1;
477 
478   PetscFunctionBegin;
479   ML_Aggregate_Destroy(&pc_ml->agg_object);
480   ML_Destroy(&pc_ml->ml_object);
481 
482   if (pc_ml->PetscMLdata) {
483     ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
484     ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);
485     ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr);
486     ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr);
487   }
488   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
489 
490   if (pc_ml->gridctx) {
491     for (level=0; level<fine_level; level++){
492       if (pc_ml->gridctx[level].A){ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);}
493       if (pc_ml->gridctx[level].P){ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);}
494       if (pc_ml->gridctx[level].R){ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);}
495       if (pc_ml->gridctx[level].x){ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);}
496       if (pc_ml->gridctx[level].b){ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);}
497       if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
498     }
499   }
500   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
501   PetscFunctionReturn(0);
502 }
503 /* -------------------------------------------------------------------------- */
504 /*
505    PCSetUp_ML - Prepares for the use of the ML preconditioner
506                     by setting data structures and options.
507 
508    Input Parameter:
509 .  pc - the preconditioner context
510 
511    Application Interface Routine: PCSetUp()
512 
513    Notes:
514    The interface routine PCSetUp() is not usually called directly by
515    the user, but instead is called by PCApply() if necessary.
516 */
517 extern PetscErrorCode PCSetFromOptions_MG(PC);
518 extern PetscErrorCode PCReset_MG(PC);
519 
520 #undef __FUNCT__
521 #define __FUNCT__ "PCSetUp_ML"
522 PetscErrorCode PCSetUp_ML(PC pc)
523 {
524   PetscErrorCode  ierr;
525   PetscMPIInt     size;
526   FineGridCtx     *PetscMLdata;
527   ML              *ml_object;
528   ML_Aggregate    *agg_object;
529   ML_Operator     *mlmat;
530   PetscInt        nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
531   Mat             A,Aloc;
532   GridCtx         *gridctx;
533   PC_MG           *mg = (PC_MG*)pc->data;
534   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
535   PetscBool       isSeq, isMPI;
536   KSP             smoother;
537   PC              subpc;
538   PetscInt        mesh_level, old_mesh_level;
539 
540 
541   PetscFunctionBegin;
542   /* Since PCMG tries to use DM assocated with PC must delete it */
543   ierr = DMDestroy(&pc->dm);CHKERRQ(ierr);
544 
545   A = pc->pmat;
546   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
547 
548   if (pc->setupcalled) {
549     if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
550       /*
551        Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
552        multiple solves in which the matrix is not changing too quickly.
553        */
554       ml_object = pc_ml->ml_object;
555       gridctx = pc_ml->gridctx;
556       Nlevels = pc_ml->Nlevels;
557       fine_level = Nlevels - 1;
558       gridctx[fine_level].A = A;
559 
560       ierr = PetscTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
561       ierr = PetscTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
562       if (isMPI){
563         ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
564       } else if (isSeq) {
565         Aloc = A;
566         ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
567       } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
568 
569       ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
570       PetscMLdata = pc_ml->PetscMLdata;
571       ierr = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr);
572       PetscMLdata->A    = A;
573       PetscMLdata->Aloc = Aloc;
574       ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
575       ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
576 
577       mesh_level = ml_object->ML_finest_level;
578       while (ml_object->SingleLevel[mesh_level].Rmat->to) {
579         old_mesh_level = mesh_level;
580         mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;
581 
582         /* clean and regenerate A */
583         mlmat = &(ml_object->Amat[mesh_level]);
584         ML_Operator_Clean(mlmat);
585         ML_Operator_Init(mlmat,ml_object->comm);
586         ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level);
587       }
588 
589       level = fine_level - 1;
590       if (size == 1) { /* convert ML P, R and A into seqaij format */
591         for (mllevel=1; mllevel<Nlevels; mllevel++){
592           mlmat = &(ml_object->Amat[mllevel]);
593           ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
594           level--;
595         }
596       } else { /* convert ML P and R into shell format, ML A into mpiaij format */
597         for (mllevel=1; mllevel<Nlevels; mllevel++){
598           mlmat  = &(ml_object->Amat[mllevel]);
599           ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
600           level--;
601         }
602       }
603 
604       for (level=0; level<fine_level; level++) {
605         if (level > 0){
606           ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
607         }
608         ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
609       }
610       ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
611       ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
612 
613       ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
614       PetscFunctionReturn(0);
615     } else {
616       /* since ML can change the size of vectors/matrices at any level we must destroy everything */
617       ierr = PCReset_ML(pc);CHKERRQ(ierr);
618       ierr = PCReset_MG(pc);CHKERRQ(ierr);
619     }
620   }
621 
622   /* setup special features of PCML */
623   /*--------------------------------*/
624   /* covert A to Aloc to be used by ML at fine grid */
625   pc_ml->size = size;
626   ierr = PetscTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
627   ierr = PetscTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
628   if (isMPI){
629     ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
630   } else if (isSeq) {
631     Aloc = A;
632     ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
633   } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
634 
635   /* create and initialize struct 'PetscMLdata' */
636   ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr);
637   pc_ml->PetscMLdata = PetscMLdata;
638   ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr);
639 
640   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);
641   ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr);
642   ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr);
643 
644   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr);
645   ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
646   ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
647   PetscMLdata->A    = A;
648   PetscMLdata->Aloc = Aloc;
649 
650   /* create ML discretization matrix at fine grid */
651   /* ML requires input of fine-grid matrix. It determines nlevels. */
652   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
653   ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
654   ML_Create(&ml_object,pc_ml->MaxNlevels);
655   ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm);
656   pc_ml->ml_object = ml_object;
657   ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
658   ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols);
659   ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
660 
661   ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO);
662 
663   /* aggregation */
664   ML_Aggregate_Create(&agg_object);
665   pc_ml->agg_object = agg_object;
666 
667   ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr);
668   ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
669   /* set options */
670   switch (pc_ml->CoarsenScheme) {
671   case 1:
672     ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
673   case 2:
674     ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
675   case 3:
676     ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
677   }
678   ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold);
679   ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor);
680   if (pc_ml->SpectralNormScheme_Anorm){
681     ML_Set_SpectralNormScheme_Anorm(ml_object);
682   }
683   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
684   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
685   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
686   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
687   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
688   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;
689 
690   if (pc_ml->OldHierarchy) {
691     Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
692   } else {
693     Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
694   }
695   if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
696   pc_ml->Nlevels = Nlevels;
697   fine_level = Nlevels - 1;
698 
699   ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr);
700   /* set default smoothers */
701   for (level=1; level<=fine_level; level++){
702     if (size == 1){
703       ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
704       ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
705       ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
706       ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
707     } else {
708       ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
709       ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
710       ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
711       ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
712     }
713   }
714   ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
715 
716   ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr);
717   pc_ml->gridctx = gridctx;
718 
719   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
720      Level 0 is the finest grid for ML, but coarsest for PETSc! */
721   gridctx[fine_level].A = A;
722 
723   level = fine_level - 1;
724   if (size == 1){ /* convert ML P, R and A into seqaij format */
725     for (mllevel=1; mllevel<Nlevels; mllevel++){
726       mlmat = &(ml_object->Pmat[mllevel]);
727       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
728       mlmat = &(ml_object->Rmat[mllevel-1]);
729       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
730 
731       mlmat = &(ml_object->Amat[mllevel]);
732       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
733       level--;
734     }
735   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
736     for (mllevel=1; mllevel<Nlevels; mllevel++){
737       mlmat  = &(ml_object->Pmat[mllevel]);
738       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
739       mlmat  = &(ml_object->Rmat[mllevel-1]);
740       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
741 
742       mlmat  = &(ml_object->Amat[mllevel]);
743       ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
744       level--;
745     }
746   }
747 
748   /* create vectors and ksp at all levels */
749   for (level=0; level<fine_level; level++){
750     level1 = level + 1;
751     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr);
752     ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr);
753     ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr);
754     ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
755 
756     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr);
757     ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
758     ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr);
759     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
760 
761     ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr);
762     ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
763     ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr);
764     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
765 
766     if (level == 0){
767       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
768     } else {
769       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
770     }
771   }
772   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
773 
774   /* create coarse level and the interpolation between the levels */
775   for (level=0; level<fine_level; level++){
776     level1 = level + 1;
777     ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
778     ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
779     if (level > 0){
780       ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
781     }
782     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
783   }
784   ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
785   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
786 
787   /* setupcalled is set to 0 so that MG is setup from scratch */
788   pc->setupcalled = 0;
789   ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
790   PetscFunctionReturn(0);
791 }
792 
793 /* -------------------------------------------------------------------------- */
794 /*
795    PCDestroy_ML - Destroys the private context for the ML preconditioner
796    that was created with PCCreate_ML().
797 
798    Input Parameter:
799 .  pc - the preconditioner context
800 
801    Application Interface Routine: PCDestroy()
802 */
803 #undef __FUNCT__
804 #define __FUNCT__ "PCDestroy_ML"
805 PetscErrorCode PCDestroy_ML(PC pc)
806 {
807   PetscErrorCode  ierr;
808   PC_MG           *mg = (PC_MG*)pc->data;
809   PC_ML           *pc_ml= (PC_ML*)mg->innerctx;
810 
811   PetscFunctionBegin;
812   ierr = PCReset_ML(pc);CHKERRQ(ierr);
813   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
814   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
815   PetscFunctionReturn(0);
816 }
817 
818 #undef __FUNCT__
819 #define __FUNCT__ "PCSetFromOptions_ML"
820 PetscErrorCode PCSetFromOptions_ML(PC pc)
821 {
822   PetscErrorCode  ierr;
823   PetscInt        indx,PrintLevel;
824   const char      *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
825   PC_MG           *mg = (PC_MG*)pc->data;
826   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
827   PetscMPIInt     size;
828   MPI_Comm        comm = ((PetscObject)pc)->comm;
829 
830   PetscFunctionBegin;
831   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
832   ierr = PetscOptionsHead("ML options");CHKERRQ(ierr);
833   PrintLevel    = 0;
834   indx          = 0;
835   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr);
836   ML_Set_PrintLevel(PrintLevel);
837   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr);
838   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr);
839   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr);
840   pc_ml->CoarsenScheme = indx;
841   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr);
842   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr);
843   ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);CHKERRQ(ierr);
844   ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,PETSC_NULL);CHKERRQ(ierr);
845   ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,PETSC_NULL);CHKERRQ(ierr);
846   ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,PETSC_NULL);CHKERRQ(ierr);
847   ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,PETSC_NULL);CHKERRQ(ierr);
848   /*
849     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
850     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.
851 
852     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
853     combination of options and ML's exit(1) explanations don't help matters.
854   */
855   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
856   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
857   if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);}
858   if (pc_ml->EnergyMinimization) {
859     ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,PETSC_NULL);CHKERRQ(ierr);
860   }
861   if (pc_ml->EnergyMinimization == 2) {
862     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
863     ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,PETSC_NULL);CHKERRQ(ierr);
864   }
865   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
866   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
867   ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,PETSC_NULL);CHKERRQ(ierr);
868   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
869   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
870   ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,PETSC_NULL);CHKERRQ(ierr);
871   /*
872     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
873     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
874     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
875     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
876     this context, but ML doesn't provide a way to find out which ones.
877    */
878   ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,PETSC_NULL);CHKERRQ(ierr);
879   ierr = PetscOptionsTail();CHKERRQ(ierr);
880   PetscFunctionReturn(0);
881 }
882 
883 /* -------------------------------------------------------------------------- */
884 /*
885    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
886    and sets this as the private data within the generic preconditioning
887    context, PC, that was created within PCCreate().
888 
889    Input Parameter:
890 .  pc - the preconditioner context
891 
892    Application Interface Routine: PCCreate()
893 */
894 
895 /*MC
896      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
897        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
898        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
899        and the restriction/interpolation operators wrapped as PETSc shell matrices.
900 
901    Options Database Key:
902    Multigrid options(inherited)
903 +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
904 .  -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
905 .  -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
906    -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade
907    ML options:
908 .  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
909 .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
910 .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
911 .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
912 .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
913 .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
914 -  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
915 
916    Level: intermediate
917 
918   Concepts: multigrid
919 
920 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
921            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
922            PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
923            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
924            PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
925 M*/
926 
927 EXTERN_C_BEGIN
928 #undef __FUNCT__
929 #define __FUNCT__ "PCCreate_ML"
930 PetscErrorCode  PCCreate_ML(PC pc)
931 {
932   PetscErrorCode  ierr;
933   PC_ML           *pc_ml;
934   PC_MG           *mg;
935 
936   PetscFunctionBegin;
937   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
938   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
939   ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr);
940   /* Since PCMG tries to use DM assocated with PC must delete it */
941   ierr = DMDestroy(&pc->dm);CHKERRQ(ierr);
942   mg = (PC_MG*)pc->data;
943   mg->galerkin = 2;             /* Use Galerkin, but it is computed externally */
944 
945   /* create a supporting struct and attach it to pc */
946   ierr = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr);
947   mg->innerctx = pc_ml;
948 
949   pc_ml->ml_object     = 0;
950   pc_ml->agg_object    = 0;
951   pc_ml->gridctx       = 0;
952   pc_ml->PetscMLdata   = 0;
953   pc_ml->Nlevels       = -1;
954   pc_ml->MaxNlevels    = 10;
955   pc_ml->MaxCoarseSize = 1;
956   pc_ml->CoarsenScheme = 1;
957   pc_ml->Threshold     = 0.0;
958   pc_ml->DampingFactor = 4.0/3.0;
959   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
960   pc_ml->size          = 0;
961 
962   /* overwrite the pointers of PCMG by the functions of PCML */
963   pc->ops->setfromoptions = PCSetFromOptions_ML;
964   pc->ops->setup          = PCSetUp_ML;
965   pc->ops->reset          = PCReset_ML;
966   pc->ops->destroy        = PCDestroy_ML;
967   PetscFunctionReturn(0);
968 }
969 EXTERN_C_END
970