xref: /petsc/src/ksp/pc/impls/ml/ml.c (revision f6ced4a3a39ccf7a960164980470990e2b3aa153) !
1 
2 /*
3    Provides an interface to the ML smoothed Aggregation
4    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
5                                     Jed Brown, see [PETSC #18321, #18449].
6 */
7 #include <petsc-private/pcimpl.h>   /*I "petscpc.h" I*/
8 #include <../src/ksp/pc/impls/mg/mgimpl.h>                    /*I "petscpcmg.h" I*/
9 #include <../src/mat/impls/aij/seq/aij.h>
10 #include <../src/mat/impls/aij/mpi/mpiaij.h>
11 
12 #include <math.h>
13 EXTERN_C_BEGIN
14 /* HAVE_CONFIG_H flag is required by ML include files */
15 #if !defined(HAVE_CONFIG_H)
16 #define HAVE_CONFIG_H
17 #endif
18 #include <ml_include.h>
19 EXTERN_C_END
20 
21 typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType;
22 static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0};
23 
24 /* The context (data structure) at each grid level */
25 typedef struct {
26   Vec        x,b,r;           /* global vectors */
27   Mat        A,P,R;
28   KSP        ksp;
29 } GridCtx;
30 
31 /* The context used to input PETSc matrix into ML at fine grid */
32 typedef struct {
33   Mat          A;      /* Petsc matrix in aij format */
34   Mat          Aloc;   /* local portion of A to be used by ML */
35   Vec          x,y;
36   ML_Operator  *mlmat;
37   PetscScalar  *pwork; /* tmp array used by PetscML_comm() */
38 } FineGridCtx;
39 
40 /* The context associates a ML matrix with a PETSc shell matrix */
41 typedef struct {
42   Mat          A;       /* PETSc shell matrix associated with mlmat */
43   ML_Operator  *mlmat;  /* ML matrix assorciated with A */
44   Vec          y, work;
45 } Mat_MLShell;
46 
47 /* Private context for the ML preconditioner */
48 typedef struct {
49   ML                *ml_object;
50   ML_Aggregate      *agg_object;
51   GridCtx           *gridctx;
52   FineGridCtx       *PetscMLdata;
53   PetscInt          Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization;
54   PetscReal         Threshold,DampingFactor,EnergyMinimizationDropTol;
55   PetscBool         SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable;
56   PetscBool         reuse_interpolation;
57   PCMLNullSpaceType nulltype;
58   PetscMPIInt       size; /* size of communicator for pc->pmat */
59 } PC_ML;
60 
61 #undef __FUNCT__
62 #define __FUNCT__ "PetscML_getrow"
63 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
64 {
65   PetscErrorCode ierr;
66   PetscInt       m,i,j,k=0,row,*aj;
67   PetscScalar    *aa;
68   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
69   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;
70 
71   ierr = MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0);
72   for (i = 0; i<N_requested_rows; i++) {
73     row   = requested_rows[i];
74     row_lengths[i] = a->ilen[row];
75     if (allocated_space < k+row_lengths[i]) return(0);
76     if ( (row >= 0) || (row <= (m-1)) ) {
77       aj = a->j + a->i[row];
78       aa = a->a + a->i[row];
79       for (j=0; j<row_lengths[i]; j++){
80         columns[k]  = aj[j];
81         values[k++] = aa[j];
82       }
83     }
84   }
85   return(1);
86 }
87 
88 #undef __FUNCT__
89 #define __FUNCT__ "PetscML_comm"
90 static PetscErrorCode PetscML_comm(double p[],void *ML_data)
91 {
92   PetscErrorCode ierr;
93   FineGridCtx    *ml=(FineGridCtx*)ML_data;
94   Mat            A=ml->A;
95   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
96   PetscMPIInt    size;
97   PetscInt       i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
98   PetscScalar    *array;
99 
100   PetscFunctionBegin;
101   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
102   if (size == 1) return 0;
103 
104   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
105   ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
106   ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
107   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
108   ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr);
109   for (i=in_length; i<out_length; i++){
110     p[i] = array[i-in_length];
111   }
112   ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr);
113   PetscFunctionReturn(0);
114 }
115 
116 #undef __FUNCT__
117 #define __FUNCT__ "PetscML_matvec"
118 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
119 {
120   PetscErrorCode ierr;
121   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data);
122   Mat            A=ml->A, Aloc=ml->Aloc;
123   PetscMPIInt    size;
124   PetscScalar    *pwork=ml->pwork;
125   PetscInt       i;
126 
127   PetscFunctionBegin;
128   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
129   if (size == 1){
130     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
131   } else {
132     for (i=0; i<in_length; i++) pwork[i] = p[i];
133     PetscML_comm(pwork,ml);
134     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
135   }
136   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
137   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
138   ierr = VecResetArray(ml->x);CHKERRQ(ierr);
139   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
140   PetscFunctionReturn(0);
141 }
142 
143 #undef __FUNCT__
144 #define __FUNCT__ "MatMult_ML"
145 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
146 {
147   PetscErrorCode   ierr;
148   Mat_MLShell      *shell;
149   PetscScalar      *xarray,*yarray;
150   PetscInt         x_length,y_length;
151 
152   PetscFunctionBegin;
153   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
154   ierr = VecGetArray(x,&xarray);CHKERRQ(ierr);
155   ierr = VecGetArray(y,&yarray);CHKERRQ(ierr);
156   x_length = shell->mlmat->invec_leng;
157   y_length = shell->mlmat->outvec_leng;
158   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
159   ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
160   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
161   PetscFunctionReturn(0);
162 }
163 
164 #undef __FUNCT__
165 #define __FUNCT__ "MatMultAdd_ML"
166 /* Computes y = w + A * x
167    It is possible that w == y, but not x == y
168 */
169 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
170 {
171   Mat_MLShell   *shell;
172   PetscScalar   *xarray,*yarray;
173   PetscInt       x_length,y_length;
174   PetscErrorCode ierr;
175 
176   PetscFunctionBegin;
177   ierr = MatShellGetContext(A, (void **) &shell);CHKERRQ(ierr);
178   if (y == w) {
179     if (!shell->work) {
180       ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr);
181     }
182     ierr = VecGetArray(x,           &xarray);CHKERRQ(ierr);
183     ierr = VecGetArray(shell->work, &yarray);CHKERRQ(ierr);
184     x_length = shell->mlmat->invec_leng;
185     y_length = shell->mlmat->outvec_leng;
186     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
187     ierr = VecRestoreArray(x,           &xarray);CHKERRQ(ierr);
188     ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr);
189     ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr);
190   } else {
191     ierr = VecGetArray(x, &xarray);CHKERRQ(ierr);
192     ierr = VecGetArray(y, &yarray);CHKERRQ(ierr);
193     x_length = shell->mlmat->invec_leng;
194     y_length = shell->mlmat->outvec_leng;
195     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
196     ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr);
197     ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr);
198     ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr);
199   }
200   PetscFunctionReturn(0);
201 }
202 
203 /* newtype is ignored since only handles one case */
204 #undef __FUNCT__
205 #define __FUNCT__ "MatConvert_MPIAIJ_ML"
206 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
207 {
208   PetscErrorCode  ierr;
209   Mat_MPIAIJ      *mpimat=(Mat_MPIAIJ*)A->data;
210   Mat_SeqAIJ      *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
211   PetscInt        *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
212   PetscScalar     *aa=a->a,*ba=b->a,*ca;
213   PetscInt        am=A->rmap->n,an=A->cmap->n,i,j,k;
214   PetscInt        *ci,*cj,ncols;
215 
216   PetscFunctionBegin;
217   if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
218 
219   if (scall == MAT_INITIAL_MATRIX){
220     ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr);
221     ci[0] = 0;
222     for (i=0; i<am; i++){
223       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
224     }
225     ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr);
226     ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr);
227 
228     k = 0;
229     for (i=0; i<am; i++){
230       /* diagonal portion of A */
231       ncols = ai[i+1] - ai[i];
232       for (j=0; j<ncols; j++) {
233         cj[k]   = *aj++;
234         ca[k++] = *aa++;
235       }
236       /* off-diagonal portion of A */
237       ncols = bi[i+1] - bi[i];
238       for (j=0; j<ncols; j++) {
239         cj[k]   = an + (*bj); bj++;
240         ca[k++] = *ba++;
241       }
242     }
243     if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
244 
245     /* put together the new matrix */
246     an = mpimat->A->cmap->n+mpimat->B->cmap->n;
247     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
248 
249     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
250     /* Since these are PETSc arrays, change flags to free them as necessary. */
251     mat = (Mat_SeqAIJ*)(*Aloc)->data;
252     mat->free_a       = PETSC_TRUE;
253     mat->free_ij      = PETSC_TRUE;
254 
255     mat->nonew    = 0;
256   } else if (scall == MAT_REUSE_MATRIX){
257     mat=(Mat_SeqAIJ*)(*Aloc)->data;
258     ci = mat->i; cj = mat->j; ca = mat->a;
259     for (i=0; i<am; i++) {
260       /* diagonal portion of A */
261       ncols = ai[i+1] - ai[i];
262       for (j=0; j<ncols; j++) *ca++ = *aa++;
263       /* off-diagonal portion of A */
264       ncols = bi[i+1] - bi[i];
265       for (j=0; j<ncols; j++) *ca++ = *ba++;
266     }
267   } else SETERRQ1(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
268   PetscFunctionReturn(0);
269 }
270 
271 extern PetscErrorCode MatDestroy_Shell(Mat);
272 #undef __FUNCT__
273 #define __FUNCT__ "MatDestroy_ML"
274 static PetscErrorCode MatDestroy_ML(Mat A)
275 {
276   PetscErrorCode ierr;
277   Mat_MLShell    *shell;
278 
279   PetscFunctionBegin;
280   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
281   ierr = VecDestroy(&shell->y);CHKERRQ(ierr);
282   if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);}
283   ierr = PetscFree(shell);CHKERRQ(ierr);
284   ierr = MatDestroy_Shell(A);CHKERRQ(ierr);
285   ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr);
286   PetscFunctionReturn(0);
287 }
288 
289 #undef __FUNCT__
290 #define __FUNCT__ "MatWrapML_SeqAIJ"
291 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
292 {
293   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
294   PetscErrorCode        ierr;
295   PetscInt              m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = PETSC_NULL,nz_max;
296   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k;
297   PetscScalar           *ml_vals=matdata->values,*aa;
298 
299   PetscFunctionBegin;
300   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
301   if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
302     if (reuse){
303       Mat_SeqAIJ  *aij= (Mat_SeqAIJ*)(*newmat)->data;
304       aij->i = ml_rowptr;
305       aij->j = ml_cols;
306       aij->a = ml_vals;
307     } else {
308       /* sort ml_cols and ml_vals */
309       ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
310       for (i=0; i<m; i++) {
311         nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
312       }
313       aj = ml_cols; aa = ml_vals;
314       for (i=0; i<m; i++){
315         ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
316         aj += nnz[i]; aa += nnz[i];
317       }
318       ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
319       ierr = PetscFree(nnz);CHKERRQ(ierr);
320     }
321     PetscFunctionReturn(0);
322   }
323 
324   /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */
325   if (reuse) {
326     for (nz_max=0,i=0; i<m; i++) nz_max = PetscMax(nz_max,ml_cols[i+1] - ml_cols[i] + 1);
327   } else {
328     ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr);
329     ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
330     ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
331 
332     ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
333     nz_max = 1;
334     for (i=0; i<m; i++) {
335       nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
336       if (nnz[i] > nz_max) nz_max = nnz[i];
337     }
338     ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
339   }
340   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
341   for (i=0; i<m; i++) {
342     PetscInt ncols;
343     k = 0;
344     /* diagonal entry */
345     aj[k] = i; aa[k++] = ml_vals[i];
346     /* off diagonal entries */
347     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
348       aj[k] = ml_cols[j]; aa[k++] = ml_vals[j];
349     }
350     ncols = ml_cols[i+1] - ml_cols[i] + 1;
351     /* sort aj and aa */
352     ierr = PetscSortIntWithScalarArray(ncols,aj,aa);CHKERRQ(ierr);
353     ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
354   }
355   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
356   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
357 
358   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
359   ierr = PetscFree(nnz);CHKERRQ(ierr);
360   PetscFunctionReturn(0);
361 }
362 
363 #undef __FUNCT__
364 #define __FUNCT__ "MatWrapML_SHELL"
365 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
366 {
367   PetscErrorCode ierr;
368   PetscInt       m,n;
369   ML_Comm        *MLcomm;
370   Mat_MLShell    *shellctx;
371 
372   PetscFunctionBegin;
373   m = mlmat->outvec_leng;
374   n = mlmat->invec_leng;
375 
376   if (reuse){
377     ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr);
378     shellctx->mlmat = mlmat;
379     PetscFunctionReturn(0);
380   }
381 
382   MLcomm = mlmat->comm;
383   ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr);
384   ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
385   ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr);
386   ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr);
387   shellctx->A         = *newmat;
388   shellctx->mlmat     = mlmat;
389   shellctx->work      = PETSC_NULL;
390   ierr = VecCreate(MLcomm->USR_comm,&shellctx->y);CHKERRQ(ierr);
391   ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr);
392   ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr);
393   (*newmat)->ops->destroy = MatDestroy_ML;
394   PetscFunctionReturn(0);
395 }
396 
397 #undef __FUNCT__
398 #define __FUNCT__ "MatWrapML_MPIAIJ"
399 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
400 {
401   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
402   PetscInt              *ml_cols=matdata->columns,*aj;
403   PetscScalar           *ml_vals=matdata->values,*aa;
404   PetscErrorCode        ierr;
405   PetscInt              i,j,k,*gordering;
406   PetscInt              m=mlmat->outvec_leng,n,nz_max,row;
407   Mat                   A;
408 
409   PetscFunctionBegin;
410   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
411   n = mlmat->invec_leng;
412   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
413 
414   if (reuse) {
415     A = *newmat;
416     for (nz_max=0,i=0; i<m; i++) nz_max = PetscMax(nz_max,ml_cols[i+1] - ml_cols[i] + 1);
417   } else {
418     PetscInt *nnzA,*nnzB,*nnz;
419     ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr);
420     ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
421     ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
422     ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr);
423 
424     nz_max = 0;
425     for (i=0; i<m; i++){
426       nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
427       if (nz_max < nnz[i]) nz_max = nnz[i];
428       nnzA[i] = 1; /* diag */
429       for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
430         if (ml_cols[j] < m) nnzA[i]++;
431       }
432       nnzB[i] = nnz[i] - nnzA[i];
433     }
434     ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
435     ierr = PetscFree3(nnzA,nnzB,nnz);
436   }
437 
438   /* insert mat values -- remap row and column indices */
439   nz_max++;
440   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
441   /* create global row numbering for a ML_Operator */
442   ML_build_global_numbering(mlmat,&gordering,"rows");
443   for (i=0; i<m; i++) {
444     PetscInt ncols;
445     row = gordering[i];
446     k = 0;
447     /* diagonal entry */
448     aj[k] = row; aa[k++] = ml_vals[i];
449     /* off diagonal entries */
450     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
451       aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j];
452     }
453     ncols = ml_cols[i+1] - ml_cols[i] + 1;
454     ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
455   }
456   ML_free(gordering);
457   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
458   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
459   *newmat = A;
460 
461   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
462   PetscFunctionReturn(0);
463 }
464 
465 /* -----------------------------------------------------------------------------*/
466 #undef __FUNCT__
467 #define __FUNCT__ "PCReset_ML"
468 PetscErrorCode PCReset_ML(PC pc)
469 {
470   PetscErrorCode  ierr;
471   PC_MG           *mg = (PC_MG*)pc->data;
472   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
473   PetscInt        level,fine_level=pc_ml->Nlevels-1;
474 
475   PetscFunctionBegin;
476   ML_Aggregate_Destroy(&pc_ml->agg_object);
477   ML_Destroy(&pc_ml->ml_object);
478 
479   if (pc_ml->PetscMLdata) {
480     ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
481     ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);
482     ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr);
483     ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr);
484   }
485   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
486 
487   if (pc_ml->gridctx) {
488     for (level=0; level<fine_level; level++){
489       if (pc_ml->gridctx[level].A){ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);}
490       if (pc_ml->gridctx[level].P){ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);}
491       if (pc_ml->gridctx[level].R){ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);}
492       if (pc_ml->gridctx[level].x){ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);}
493       if (pc_ml->gridctx[level].b){ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);}
494       if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
495     }
496   }
497   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
498   PetscFunctionReturn(0);
499 }
500 /* -------------------------------------------------------------------------- */
501 /*
502    PCSetUp_ML - Prepares for the use of the ML preconditioner
503                     by setting data structures and options.
504 
505    Input Parameter:
506 .  pc - the preconditioner context
507 
508    Application Interface Routine: PCSetUp()
509 
510    Notes:
511    The interface routine PCSetUp() is not usually called directly by
512    the user, but instead is called by PCApply() if necessary.
513 */
514 extern PetscErrorCode PCSetFromOptions_MG(PC);
515 extern PetscErrorCode PCReset_MG(PC);
516 
517 #undef __FUNCT__
518 #define __FUNCT__ "PCSetUp_ML"
519 PetscErrorCode PCSetUp_ML(PC pc)
520 {
521   PetscErrorCode  ierr;
522   PetscMPIInt     size;
523   FineGridCtx     *PetscMLdata;
524   ML              *ml_object;
525   ML_Aggregate    *agg_object;
526   ML_Operator     *mlmat;
527   PetscInt        nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
528   Mat             A,Aloc;
529   GridCtx         *gridctx;
530   PC_MG           *mg = (PC_MG*)pc->data;
531   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
532   PetscBool       isSeq, isMPI;
533   KSP             smoother;
534   PC              subpc;
535   PetscInt        mesh_level, old_mesh_level;
536 
537   PetscFunctionBegin;
538   A = pc->pmat;
539   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
540 
541   if (pc->setupcalled) {
542     if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
543       /*
544        Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
545        multiple solves in which the matrix is not changing too quickly.
546        */
547       ml_object = pc_ml->ml_object;
548       gridctx = pc_ml->gridctx;
549       Nlevels = pc_ml->Nlevels;
550       fine_level = Nlevels - 1;
551       gridctx[fine_level].A = A;
552 
553       ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
554       ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
555       if (isMPI){
556         ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
557       } else if (isSeq) {
558         Aloc = A;
559         ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
560       } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
561 
562       ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
563       PetscMLdata = pc_ml->PetscMLdata;
564       ierr = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr);
565       PetscMLdata->A    = A;
566       PetscMLdata->Aloc = Aloc;
567       ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
568       ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
569 
570       mesh_level = ml_object->ML_finest_level;
571       while (ml_object->SingleLevel[mesh_level].Rmat->to) {
572         old_mesh_level = mesh_level;
573         mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;
574 
575         /* clean and regenerate A */
576         mlmat = &(ml_object->Amat[mesh_level]);
577         ML_Operator_Clean(mlmat);
578         ML_Operator_Init(mlmat,ml_object->comm);
579         ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level);
580       }
581 
582       level = fine_level - 1;
583       if (size == 1) { /* convert ML P, R and A into seqaij format */
584         for (mllevel=1; mllevel<Nlevels; mllevel++){
585           mlmat = &(ml_object->Amat[mllevel]);
586           ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
587           level--;
588         }
589       } else { /* convert ML P and R into shell format, ML A into mpiaij format */
590         for (mllevel=1; mllevel<Nlevels; mllevel++){
591           mlmat  = &(ml_object->Amat[mllevel]);
592           ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
593           level--;
594         }
595       }
596 
597       for (level=0; level<fine_level; level++) {
598         if (level > 0){
599           ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
600         }
601         ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
602       }
603       ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
604       ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
605 
606       ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
607       PetscFunctionReturn(0);
608     } else {
609       /* since ML can change the size of vectors/matrices at any level we must destroy everything */
610       ierr = PCReset_ML(pc);CHKERRQ(ierr);
611       ierr = PCReset_MG(pc);CHKERRQ(ierr);
612     }
613   }
614 
615   /* setup special features of PCML */
616   /*--------------------------------*/
617   /* covert A to Aloc to be used by ML at fine grid */
618   pc_ml->size = size;
619   ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
620   ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
621   if (isMPI){
622     ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
623   } else if (isSeq) {
624     Aloc = A;
625     ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
626   } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
627 
628   /* create and initialize struct 'PetscMLdata' */
629   ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr);
630   pc_ml->PetscMLdata = PetscMLdata;
631   ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr);
632 
633   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);
634   ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr);
635   ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr);
636 
637   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr);
638   ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
639   ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
640   PetscMLdata->A    = A;
641   PetscMLdata->Aloc = Aloc;
642 
643   /* create ML discretization matrix at fine grid */
644   /* ML requires input of fine-grid matrix. It determines nlevels. */
645   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
646   ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
647   ML_Create(&ml_object,pc_ml->MaxNlevels);
648   ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm);
649   pc_ml->ml_object = ml_object;
650   ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
651   ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols);
652   ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
653 
654   ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO);
655 
656   /* aggregation */
657   ML_Aggregate_Create(&agg_object);
658   pc_ml->agg_object = agg_object;
659 
660   {
661     MatNullSpace mnull;
662     ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr);
663     if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) {
664       if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER;
665       else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK;
666       else pc_ml->nulltype = PCML_NULLSPACE_SCALAR;
667     }
668     switch (pc_ml->nulltype) {
669     case PCML_NULLSPACE_USER: {
670       PetscScalar *nullvec;
671       const PetscScalar *v;
672       PetscBool has_const;
673       PetscInt i,j,mlocal,nvec,M;
674       const Vec *vecs;
675       if (!mnull) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space");
676       ierr = MatGetSize(A,&M,PETSC_NULL);CHKERRQ(ierr);
677       ierr = MatGetLocalSize(Aloc,&mlocal,PETSC_NULL);CHKERRQ(ierr);
678       ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr);
679       ierr = PetscMalloc((nvec+!!has_const)*mlocal*sizeof(*nullvec),&nullvec);CHKERRQ(ierr);
680       if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M;
681       for (i=0; i<nvec; i++) {
682         ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr);
683         for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j];
684         ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr);
685       }
686       ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr);
687       ierr = PetscFree(nullvec);CHKERRQ(ierr);
688     } break;
689     case PCML_NULLSPACE_BLOCK:
690       ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr);
691       break;
692     case PCML_NULLSPACE_SCALAR:
693       break;
694     default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_SUP,"Unknown null space type");
695     }
696   }
697   ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
698   /* set options */
699   switch (pc_ml->CoarsenScheme) {
700   case 1:
701     ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
702   case 2:
703     ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
704   case 3:
705     ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
706   }
707   ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold);
708   ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor);
709   if (pc_ml->SpectralNormScheme_Anorm){
710     ML_Set_SpectralNormScheme_Anorm(ml_object);
711   }
712   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
713   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
714   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
715   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
716   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
717   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;
718 
719   if (pc_ml->OldHierarchy) {
720     Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
721   } else {
722     Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
723   }
724   if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
725   pc_ml->Nlevels = Nlevels;
726   fine_level = Nlevels - 1;
727 
728   ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr);
729   /* set default smoothers */
730   for (level=1; level<=fine_level; level++){
731     ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
732     ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
733     ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
734     ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
735   }
736   ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr);
737   ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
738   ierr = PetscOptionsEnd();CHKERRQ(ierr);
739 
740   ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr);
741   pc_ml->gridctx = gridctx;
742 
743   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
744      Level 0 is the finest grid for ML, but coarsest for PETSc! */
745   gridctx[fine_level].A = A;
746 
747   level = fine_level - 1;
748   if (size == 1){ /* convert ML P, R and A into seqaij format */
749     for (mllevel=1; mllevel<Nlevels; mllevel++){
750       mlmat = &(ml_object->Pmat[mllevel]);
751       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
752       mlmat = &(ml_object->Rmat[mllevel-1]);
753       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
754 
755       mlmat = &(ml_object->Amat[mllevel]);
756       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
757       level--;
758     }
759   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
760     for (mllevel=1; mllevel<Nlevels; mllevel++){
761       mlmat  = &(ml_object->Pmat[mllevel]);
762       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
763       mlmat  = &(ml_object->Rmat[mllevel-1]);
764       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
765 
766       mlmat  = &(ml_object->Amat[mllevel]);
767       ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
768       level--;
769     }
770   }
771 
772   /* create vectors and ksp at all levels */
773   for (level=0; level<fine_level; level++){
774     level1 = level + 1;
775     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr);
776     ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr);
777     ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr);
778     ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
779 
780     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr);
781     ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
782     ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr);
783     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
784 
785     ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr);
786     ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
787     ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr);
788     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
789 
790     if (level == 0){
791       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
792     } else {
793       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
794     }
795   }
796   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
797 
798   /* create coarse level and the interpolation between the levels */
799   for (level=0; level<fine_level; level++){
800     level1 = level + 1;
801     ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
802     ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
803     if (level > 0){
804       ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
805     }
806     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
807   }
808   ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
809   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
810 
811   /* setupcalled is set to 0 so that MG is setup from scratch */
812   pc->setupcalled = 0;
813   ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
814   PetscFunctionReturn(0);
815 }
816 
817 /* -------------------------------------------------------------------------- */
818 /*
819    PCDestroy_ML - Destroys the private context for the ML preconditioner
820    that was created with PCCreate_ML().
821 
822    Input Parameter:
823 .  pc - the preconditioner context
824 
825    Application Interface Routine: PCDestroy()
826 */
827 #undef __FUNCT__
828 #define __FUNCT__ "PCDestroy_ML"
829 PetscErrorCode PCDestroy_ML(PC pc)
830 {
831   PetscErrorCode  ierr;
832   PC_MG           *mg = (PC_MG*)pc->data;
833   PC_ML           *pc_ml= (PC_ML*)mg->innerctx;
834 
835   PetscFunctionBegin;
836   ierr = PCReset_ML(pc);CHKERRQ(ierr);
837   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
838   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
839   PetscFunctionReturn(0);
840 }
841 
842 #undef __FUNCT__
843 #define __FUNCT__ "PCSetFromOptions_ML"
844 PetscErrorCode PCSetFromOptions_ML(PC pc)
845 {
846   PetscErrorCode  ierr;
847   PetscInt        indx,PrintLevel;
848   const char      *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
849   PC_MG           *mg = (PC_MG*)pc->data;
850   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
851   PetscMPIInt     size;
852   MPI_Comm        comm = ((PetscObject)pc)->comm;
853 
854   PetscFunctionBegin;
855   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
856   ierr = PetscOptionsHead("ML options");CHKERRQ(ierr);
857   PrintLevel    = 0;
858   indx          = 0;
859   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr);
860   ML_Set_PrintLevel(PrintLevel);
861   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr);
862   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr);
863   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr);
864   pc_ml->CoarsenScheme = indx;
865   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr);
866   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr);
867   ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);CHKERRQ(ierr);
868   ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,PETSC_NULL);CHKERRQ(ierr);
869   ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,PETSC_NULL);CHKERRQ(ierr);
870   ierr = PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,PETSC_NULL);CHKERRQ(ierr);
871   ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,PETSC_NULL);CHKERRQ(ierr);
872   ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,PETSC_NULL);CHKERRQ(ierr);
873   /*
874     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
875     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.
876 
877     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
878     combination of options and ML's exit(1) explanations don't help matters.
879   */
880   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
881   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
882   if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);}
883   if (pc_ml->EnergyMinimization) {
884     ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,PETSC_NULL);CHKERRQ(ierr);
885   }
886   if (pc_ml->EnergyMinimization == 2) {
887     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
888     ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,PETSC_NULL);CHKERRQ(ierr);
889   }
890   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
891   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
892   ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,PETSC_NULL);CHKERRQ(ierr);
893   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
894   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
895   ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,PETSC_NULL);CHKERRQ(ierr);
896   /*
897     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
898     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
899     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
900     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
901     this context, but ML doesn't provide a way to find out which ones.
902    */
903   ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,PETSC_NULL);CHKERRQ(ierr);
904   ierr = PetscOptionsTail();CHKERRQ(ierr);
905   PetscFunctionReturn(0);
906 }
907 
908 /* -------------------------------------------------------------------------- */
909 /*
910    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
911    and sets this as the private data within the generic preconditioning
912    context, PC, that was created within PCCreate().
913 
914    Input Parameter:
915 .  pc - the preconditioner context
916 
917    Application Interface Routine: PCCreate()
918 */
919 
920 /*MC
921      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
922        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
923        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
924        and the restriction/interpolation operators wrapped as PETSc shell matrices.
925 
926    Options Database Key:
927    Multigrid options(inherited)
928 +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
929 .  -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
930 .  -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
931    -pc_mg_type <multiplicative>: (one of) additive multiplicative full kascade
932    ML options:
933 .  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
934 .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
935 .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
936 .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
937 .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
938 .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
939 -  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
940 
941    Level: intermediate
942 
943   Concepts: multigrid
944 
945 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
946            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
947            PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
948            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
949            PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
950 M*/
951 
952 EXTERN_C_BEGIN
953 #undef __FUNCT__
954 #define __FUNCT__ "PCCreate_ML"
955 PetscErrorCode  PCCreate_ML(PC pc)
956 {
957   PetscErrorCode  ierr;
958   PC_ML           *pc_ml;
959   PC_MG           *mg;
960 
961   PetscFunctionBegin;
962   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
963   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
964   ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr);
965   /* Since PCMG tries to use DM assocated with PC must delete it */
966   ierr = DMDestroy(&pc->dm);CHKERRQ(ierr);
967   mg = (PC_MG*)pc->data;
968   mg->galerkin = 2;             /* Use Galerkin, but it is computed externally */
969 
970   /* create a supporting struct and attach it to pc */
971   ierr = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr);
972   mg->innerctx = pc_ml;
973 
974   pc_ml->ml_object     = 0;
975   pc_ml->agg_object    = 0;
976   pc_ml->gridctx       = 0;
977   pc_ml->PetscMLdata   = 0;
978   pc_ml->Nlevels       = -1;
979   pc_ml->MaxNlevels    = 10;
980   pc_ml->MaxCoarseSize = 1;
981   pc_ml->CoarsenScheme = 1;
982   pc_ml->Threshold     = 0.0;
983   pc_ml->DampingFactor = 4.0/3.0;
984   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
985   pc_ml->size          = 0;
986 
987   /* overwrite the pointers of PCMG by the functions of PCML */
988   pc->ops->setfromoptions = PCSetFromOptions_ML;
989   pc->ops->setup          = PCSetUp_ML;
990   pc->ops->reset          = PCReset_ML;
991   pc->ops->destroy        = PCDestroy_ML;
992   PetscFunctionReturn(0);
993 }
994 EXTERN_C_END
995