xref: /petsc/src/ksp/pc/impls/ml/ml.c (revision c688d0420b4e513ff34944d1e1ad7d4e50aafa8d)
1 
2 /*
3    Provides an interface to the ML smoothed Aggregation
4    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
5                                     Jed Brown, see [PETSC #18321, #18449].
6 */
7 #include <petsc/private/pcimpl.h>   /*I "petscpc.h" I*/
8 #include <petsc/private/pcmgimpl.h>                    /*I "petscksp.h" I*/
9 #include <../src/mat/impls/aij/seq/aij.h>
10 #include <../src/mat/impls/aij/mpi/mpiaij.h>
11 #include <petscdm.h>            /* for DMDestroy(&pc->mg) hack */
12 
13 EXTERN_C_BEGIN
14 /* HAVE_CONFIG_H flag is required by ML include files */
15 #if !defined(HAVE_CONFIG_H)
16 #define HAVE_CONFIG_H
17 #endif
18 #include <ml_include.h>
19 #include <ml_viz_stats.h>
20 EXTERN_C_END
21 
22 typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType;
23 static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0};
24 
25 /* The context (data structure) at each grid level */
26 typedef struct {
27   Vec x,b,r;                  /* global vectors */
28   Mat A,P,R;
29   KSP ksp;
30   Vec coords;                 /* projected by ML, if PCSetCoordinates is called; values packed by node */
31 } GridCtx;
32 
33 /* The context used to input PETSc matrix into ML at fine grid */
34 typedef struct {
35   Mat         A;       /* Petsc matrix in aij format */
36   Mat         Aloc;    /* local portion of A to be used by ML */
37   Vec         x,y;
38   ML_Operator *mlmat;
39   PetscScalar *pwork;  /* tmp array used by PetscML_comm() */
40 } FineGridCtx;
41 
42 /* The context associates a ML matrix with a PETSc shell matrix */
43 typedef struct {
44   Mat         A;        /* PETSc shell matrix associated with mlmat */
45   ML_Operator *mlmat;   /* ML matrix assorciated with A */
46   Vec         y, work;
47 } Mat_MLShell;
48 
49 /* Private context for the ML preconditioner */
50 typedef struct {
51   ML                *ml_object;
52   ML_Aggregate      *agg_object;
53   GridCtx           *gridctx;
54   FineGridCtx       *PetscMLdata;
55   PetscInt          Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization,MinPerProc,PutOnSingleProc,RepartitionType,ZoltanScheme;
56   PetscReal         Threshold,DampingFactor,EnergyMinimizationDropTol,MaxMinRatio,AuxThreshold;
57   PetscBool         SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable,Repartition,Aux;
58   PetscBool         reuse_interpolation;
59   PCMLNullSpaceType nulltype;
60   PetscMPIInt       size; /* size of communicator for pc->pmat */
61   PetscInt          dim;  /* data from PCSetCoordinates(_ML) */
62   PetscInt          nloc;
63   PetscReal         *coords; /* ML has a grid object for each level: the finest grid will point into coords */
64 } PC_ML;
65 
66 #undef __FUNCT__
67 #define __FUNCT__ "PetscML_getrow"
68 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
69 {
70   PetscErrorCode ierr;
71   PetscInt       m,i,j,k=0,row,*aj;
72   PetscScalar    *aa;
73   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
74   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;
75 
76   ierr = MatGetSize(ml->Aloc,&m,NULL); if (ierr) return(0);
77   for (i = 0; i<N_requested_rows; i++) {
78     row            = requested_rows[i];
79     row_lengths[i] = a->ilen[row];
80     if (allocated_space < k+row_lengths[i]) return(0);
81     if ((row >= 0) || (row <= (m-1))) {
82       aj = a->j + a->i[row];
83       aa = a->a + a->i[row];
84       for (j=0; j<row_lengths[i]; j++) {
85         columns[k]  = aj[j];
86         values[k++] = aa[j];
87       }
88     }
89   }
90   return(1);
91 }
92 
93 #undef __FUNCT__
94 #define __FUNCT__ "PetscML_comm"
95 static PetscErrorCode PetscML_comm(double p[],void *ML_data)
96 {
97   PetscErrorCode    ierr;
98   FineGridCtx       *ml = (FineGridCtx*)ML_data;
99   Mat               A   = ml->A;
100   Mat_MPIAIJ        *a  = (Mat_MPIAIJ*)A->data;
101   PetscMPIInt       size;
102   PetscInt          i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
103   const PetscScalar *array;
104 
105   PetscFunctionBegin;
106   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
107   if (size == 1) return 0;
108 
109   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
110   ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
111   ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
112   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
113   ierr = VecGetArrayRead(a->lvec,&array);CHKERRQ(ierr);
114   for (i=in_length; i<out_length; i++) p[i] = array[i-in_length];
115   ierr = VecRestoreArrayRead(a->lvec,&array);CHKERRQ(ierr);
116   PetscFunctionReturn(0);
117 }
118 
119 #undef __FUNCT__
120 #define __FUNCT__ "PetscML_matvec"
121 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
122 {
123   PetscErrorCode ierr;
124   FineGridCtx    *ml = (FineGridCtx*)ML_Get_MyMatvecData(ML_data);
125   Mat            A   = ml->A, Aloc=ml->Aloc;
126   PetscMPIInt    size;
127   PetscScalar    *pwork=ml->pwork;
128   PetscInt       i;
129 
130   PetscFunctionBegin;
131   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
132   if (size == 1) {
133     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
134   } else {
135     for (i=0; i<in_length; i++) pwork[i] = p[i];
136     ierr = PetscML_comm(pwork,ml);CHKERRQ(ierr);
137     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
138   }
139   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
140   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
141   ierr = VecResetArray(ml->x);CHKERRQ(ierr);
142   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
143   PetscFunctionReturn(0);
144 }
145 
146 #undef __FUNCT__
147 #define __FUNCT__ "MatMult_ML"
148 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
149 {
150   PetscErrorCode    ierr;
151   Mat_MLShell       *shell;
152   PetscScalar       *yarray;
153   const PetscScalar *xarray;
154   PetscInt          x_length,y_length;
155 
156   PetscFunctionBegin;
157   ierr     = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr);
158   ierr     = VecGetArrayRead(x,&xarray);CHKERRQ(ierr);
159   ierr     = VecGetArray(y,&yarray);CHKERRQ(ierr);
160   x_length = shell->mlmat->invec_leng;
161   y_length = shell->mlmat->outvec_leng;
162   PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat,x_length,(PetscScalar*)xarray,y_length,yarray));
163   ierr = VecRestoreArrayRead(x,&xarray);CHKERRQ(ierr);
164   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
165   PetscFunctionReturn(0);
166 }
167 
168 #undef __FUNCT__
169 #define __FUNCT__ "MatMultAdd_ML"
170 /* Computes y = w + A * x
171    It is possible that w == y, but not x == y
172 */
173 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
174 {
175   Mat_MLShell       *shell;
176   PetscScalar       *yarray;
177   const PetscScalar *xarray;
178   PetscInt          x_length,y_length;
179   PetscErrorCode    ierr;
180 
181   PetscFunctionBegin;
182   ierr = MatShellGetContext(A, (void**) &shell);CHKERRQ(ierr);
183   if (y == w) {
184     if (!shell->work) {
185       ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr);
186     }
187     ierr     = VecGetArrayRead(x,           &xarray);CHKERRQ(ierr);
188     ierr     = VecGetArray(shell->work, &yarray);CHKERRQ(ierr);
189     x_length = shell->mlmat->invec_leng;
190     y_length = shell->mlmat->outvec_leng;
191     PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, (PetscScalar*)xarray, y_length, yarray));
192     ierr = VecRestoreArrayRead(x,           &xarray);CHKERRQ(ierr);
193     ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr);
194     ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr);
195   } else {
196     ierr     = VecGetArrayRead(x, &xarray);CHKERRQ(ierr);
197     ierr     = VecGetArray(y, &yarray);CHKERRQ(ierr);
198     x_length = shell->mlmat->invec_leng;
199     y_length = shell->mlmat->outvec_leng;
200     PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, (PetscScalar *)xarray, y_length, yarray));
201     ierr = VecRestoreArrayRead(x, &xarray);CHKERRQ(ierr);
202     ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr);
203     ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr);
204   }
205   PetscFunctionReturn(0);
206 }
207 
208 /* newtype is ignored since only handles one case */
209 #undef __FUNCT__
210 #define __FUNCT__ "MatConvert_MPIAIJ_ML"
211 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
212 {
213   PetscErrorCode ierr;
214   Mat_MPIAIJ     *mpimat=(Mat_MPIAIJ*)A->data;
215   Mat_SeqAIJ     *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
216   PetscInt       *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
217   PetscScalar    *aa=a->a,*ba=b->a,*ca;
218   PetscInt       am =A->rmap->n,an=A->cmap->n,i,j,k;
219   PetscInt       *ci,*cj,ncols;
220 
221   PetscFunctionBegin;
222   if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
223 
224   if (scall == MAT_INITIAL_MATRIX) {
225     ierr  = PetscMalloc1(1+am,&ci);CHKERRQ(ierr);
226     ci[0] = 0;
227     for (i=0; i<am; i++) ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
228     ierr = PetscMalloc1(1+ci[am],&cj);CHKERRQ(ierr);
229     ierr = PetscMalloc1(1+ci[am],&ca);CHKERRQ(ierr);
230 
231     k = 0;
232     for (i=0; i<am; i++) {
233       /* diagonal portion of A */
234       ncols = ai[i+1] - ai[i];
235       for (j=0; j<ncols; j++) {
236         cj[k]   = *aj++;
237         ca[k++] = *aa++;
238       }
239       /* off-diagonal portion of A */
240       ncols = bi[i+1] - bi[i];
241       for (j=0; j<ncols; j++) {
242         cj[k]   = an + (*bj); bj++;
243         ca[k++] = *ba++;
244       }
245     }
246     if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
247 
248     /* put together the new matrix */
249     an   = mpimat->A->cmap->n+mpimat->B->cmap->n;
250     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
251 
252     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
253     /* Since these are PETSc arrays, change flags to free them as necessary. */
254     mat          = (Mat_SeqAIJ*)(*Aloc)->data;
255     mat->free_a  = PETSC_TRUE;
256     mat->free_ij = PETSC_TRUE;
257 
258     mat->nonew = 0;
259   } else if (scall == MAT_REUSE_MATRIX) {
260     mat=(Mat_SeqAIJ*)(*Aloc)->data;
261     ci = mat->i; cj = mat->j; ca = mat->a;
262     for (i=0; i<am; i++) {
263       /* diagonal portion of A */
264       ncols = ai[i+1] - ai[i];
265       for (j=0; j<ncols; j++) *ca++ = *aa++;
266       /* off-diagonal portion of A */
267       ncols = bi[i+1] - bi[i];
268       for (j=0; j<ncols; j++) *ca++ = *ba++;
269     }
270   } else SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
271   PetscFunctionReturn(0);
272 }
273 
274 #undef __FUNCT__
275 #define __FUNCT__ "MatDestroy_ML"
276 static PetscErrorCode MatDestroy_ML(Mat A)
277 {
278   PetscErrorCode ierr;
279   Mat_MLShell    *shell;
280 
281   PetscFunctionBegin;
282   ierr = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr);
283   ierr = VecDestroy(&shell->y);CHKERRQ(ierr);
284   if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);}
285   ierr = PetscFree(shell);CHKERRQ(ierr);
286   PetscFunctionReturn(0);
287 }
288 
289 #undef __FUNCT__
290 #define __FUNCT__ "MatWrapML_SeqAIJ"
291 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
292 {
293   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata*)mlmat->data;
294   PetscErrorCode        ierr;
295   PetscInt              m       =mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = NULL,nz_max;
296   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i;
297   PetscScalar           *ml_vals=matdata->values,*aa;
298 
299   PetscFunctionBegin;
300   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
301   if (m != n) { /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
302     if (reuse) {
303       Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data;
304       aij->i = ml_rowptr;
305       aij->j = ml_cols;
306       aij->a = ml_vals;
307     } else {
308       /* sort ml_cols and ml_vals */
309       ierr = PetscMalloc1(m+1,&nnz);
310       for (i=0; i<m; i++) nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
311       aj = ml_cols; aa = ml_vals;
312       for (i=0; i<m; i++) {
313         ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
314         aj  += nnz[i]; aa += nnz[i];
315       }
316       ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
317       ierr = PetscFree(nnz);CHKERRQ(ierr);
318     }
319     PetscFunctionReturn(0);
320   }
321 
322   nz_max = PetscMax(1,mlmat->max_nz_per_row);
323   ierr   = PetscMalloc2(nz_max,&aa,nz_max,&aj);CHKERRQ(ierr);
324   if (!reuse) {
325     ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr);
326     ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
327     ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
328     /* keep track of block size for A matrices */
329     ierr = MatSetBlockSize (*newmat, mlmat->num_PDEs);CHKERRQ(ierr);
330 
331     ierr = PetscMalloc1(m,&nnz);CHKERRQ(ierr);
332     for (i=0; i<m; i++) {
333       PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]));
334     }
335     ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
336   }
337   for (i=0; i<m; i++) {
338     PetscInt ncols;
339 
340     PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols));
341     ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
342   }
343   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
344   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
345 
346   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
347   ierr = PetscFree(nnz);CHKERRQ(ierr);
348   PetscFunctionReturn(0);
349 }
350 
351 #undef __FUNCT__
352 #define __FUNCT__ "MatWrapML_SHELL"
353 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
354 {
355   PetscErrorCode ierr;
356   PetscInt       m,n;
357   ML_Comm        *MLcomm;
358   Mat_MLShell    *shellctx;
359 
360   PetscFunctionBegin;
361   m = mlmat->outvec_leng;
362   n = mlmat->invec_leng;
363 
364   if (reuse) {
365     ierr            = MatShellGetContext(*newmat,(void**)&shellctx);CHKERRQ(ierr);
366     shellctx->mlmat = mlmat;
367     PetscFunctionReturn(0);
368   }
369 
370   MLcomm = mlmat->comm;
371 
372   ierr = PetscNew(&shellctx);CHKERRQ(ierr);
373   ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
374   ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr);
375   ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr);
376   ierr = MatShellSetOperation(*newmat,MATOP_DESTROY,(void(*)(void))MatDestroy_ML);CHKERRQ(ierr);
377 
378   shellctx->A         = *newmat;
379   shellctx->mlmat     = mlmat;
380   shellctx->work      = NULL;
381 
382   ierr = VecCreate(MLcomm->USR_comm,&shellctx->y);CHKERRQ(ierr);
383   ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr);
384   ierr = VecSetType(shellctx->y,VECSTANDARD);CHKERRQ(ierr);
385   PetscFunctionReturn(0);
386 }
387 
388 #undef __FUNCT__
389 #define __FUNCT__ "MatWrapML_MPIAIJ"
390 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
391 {
392   PetscInt       *aj;
393   PetscScalar    *aa;
394   PetscErrorCode ierr;
395   PetscInt       i,j,*gordering;
396   PetscInt       m=mlmat->outvec_leng,n,nz_max,row;
397   Mat            A;
398 
399   PetscFunctionBegin;
400   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
401   n = mlmat->invec_leng;
402   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
403 
404   /* create global row numbering for a ML_Operator */
405   PetscStackCall("ML_build_global_numbering",ML_build_global_numbering(mlmat,&gordering,"rows"));
406 
407   nz_max = PetscMax(1,mlmat->max_nz_per_row) + 1;
408   ierr = PetscMalloc2(nz_max,&aa,nz_max,&aj);CHKERRQ(ierr);
409   if (reuse) {
410     A = *newmat;
411   } else {
412     PetscInt *nnzA,*nnzB,*nnz;
413     PetscInt rstart;
414     ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr);
415     ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
416     ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
417     /* keep track of block size for A matrices */
418     ierr = MatSetBlockSize (A,mlmat->num_PDEs);CHKERRQ(ierr);
419     ierr = PetscMalloc3(m,&nnzA,m,&nnzB,m,&nnz);CHKERRQ(ierr);
420     ierr = MPI_Scan(&m,&rstart,1,MPIU_INT,MPI_SUM,mlmat->comm->USR_comm);CHKERRQ(ierr);
421     rstart -= m;
422 
423     for (i=0; i<m; i++) {
424       row = gordering[i] - rstart;
425       PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]));
426       nnzA[row] = 0;
427       for (j=0; j<nnz[i]; j++) {
428         if (aj[j] < m) nnzA[row]++;
429       }
430       nnzB[row] = nnz[i] - nnzA[row];
431     }
432     ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
433     ierr = PetscFree3(nnzA,nnzB,nnz);
434   }
435   for (i=0; i<m; i++) {
436     PetscInt ncols;
437     row = gordering[i];
438 
439     PetscStackCall(",ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols));
440     for (j = 0; j < ncols; j++) aj[j] = gordering[aj[j]];
441     ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
442   }
443   PetscStackCall("ML_free",ML_free(gordering));
444   ierr    = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
445   ierr    = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
446   *newmat = A;
447 
448   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
449   PetscFunctionReturn(0);
450 }
451 
452 /* -------------------------------------------------------------------------- */
453 /*
454    PCSetCoordinates_ML
455 
456    Input Parameter:
457    .  pc - the preconditioner context
458 */
459 #undef __FUNCT__
460 #define __FUNCT__ "PCSetCoordinates_ML"
461 static PetscErrorCode PCSetCoordinates_ML(PC pc, PetscInt ndm, PetscInt a_nloc, PetscReal *coords)
462 {
463   PC_MG          *mg    = (PC_MG*)pc->data;
464   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
465   PetscErrorCode ierr;
466   PetscInt       arrsz,oldarrsz,bs,my0,kk,ii,nloc,Iend;
467   Mat            Amat = pc->pmat;
468 
469   /* this function copied and modified from PCSetCoordinates_GEO -TGI */
470   PetscFunctionBegin;
471   PetscValidHeaderSpecific(Amat, MAT_CLASSID, 1);
472   ierr = MatGetBlockSize(Amat, &bs);CHKERRQ(ierr);
473 
474   ierr = MatGetOwnershipRange(Amat, &my0, &Iend);CHKERRQ(ierr);
475   nloc = (Iend-my0)/bs;
476 
477   if (nloc!=a_nloc) SETERRQ2(PetscObjectComm((PetscObject)Amat),PETSC_ERR_ARG_WRONG, "Number of local blocks must locations = %d %d.",a_nloc,nloc);
478   if ((Iend-my0)%bs!=0) SETERRQ1(PetscObjectComm((PetscObject)Amat),PETSC_ERR_ARG_WRONG, "Bad local size %d.",nloc);
479 
480   oldarrsz    = pc_ml->dim * pc_ml->nloc;
481   pc_ml->dim  = ndm;
482   pc_ml->nloc = a_nloc;
483   arrsz       = ndm * a_nloc;
484 
485   /* create data - syntactic sugar that should be refactored at some point */
486   if (pc_ml->coords==0 || (oldarrsz != arrsz)) {
487     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
488     ierr = PetscMalloc1(arrsz, &pc_ml->coords);CHKERRQ(ierr);
489   }
490   for (kk=0; kk<arrsz; kk++) pc_ml->coords[kk] = -999.;
491   /* copy data in - column oriented */
492   for (kk = 0; kk < nloc; kk++) {
493     for (ii = 0; ii < ndm; ii++) {
494       pc_ml->coords[ii*nloc + kk] =  coords[kk*ndm + ii];
495     }
496   }
497   PetscFunctionReturn(0);
498 }
499 
500 /* -----------------------------------------------------------------------------*/
501 extern PetscErrorCode PCReset_MG(PC);
502 #undef __FUNCT__
503 #define __FUNCT__ "PCReset_ML"
504 PetscErrorCode PCReset_ML(PC pc)
505 {
506   PetscErrorCode ierr;
507   PC_MG          *mg    = (PC_MG*)pc->data;
508   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
509   PetscInt       level,fine_level=pc_ml->Nlevels-1,dim=pc_ml->dim;
510 
511   PetscFunctionBegin;
512   if (dim) {
513     ML_Aggregate_Viz_Stats * grid_info = (ML_Aggregate_Viz_Stats*) pc_ml->ml_object->Grid[0].Grid;
514 
515     for (level=0; level<=fine_level; level++) {
516       ierr = VecDestroy(&pc_ml->gridctx[level].coords);CHKERRQ(ierr);
517     }
518 
519     grid_info->x = 0; /* do this so ML doesn't try to free coordinates */
520     grid_info->y = 0;
521     grid_info->z = 0;
522 
523     PetscStackCall("ML_Operator_Getrow",ML_Aggregate_VizAndStats_Clean(pc_ml->ml_object));
524   }
525   PetscStackCall("ML_Aggregate_Destroy",ML_Aggregate_Destroy(&pc_ml->agg_object));
526   PetscStackCall("ML_Aggregate_Destroy",ML_Destroy(&pc_ml->ml_object));
527 
528   if (pc_ml->PetscMLdata) {
529     ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
530     ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);
531     ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr);
532     ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr);
533   }
534   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
535 
536   if (pc_ml->gridctx) {
537     for (level=0; level<fine_level; level++) {
538       if (pc_ml->gridctx[level].A) {ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);}
539       if (pc_ml->gridctx[level].P) {ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);}
540       if (pc_ml->gridctx[level].R) {ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);}
541       if (pc_ml->gridctx[level].x) {ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);}
542       if (pc_ml->gridctx[level].b) {ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);}
543       if (pc_ml->gridctx[level+1].r) {ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
544     }
545   }
546   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
547   ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
548 
549   pc_ml->dim  = 0;
550   pc_ml->nloc = 0;
551   ierr = PCReset_MG(pc);CHKERRQ(ierr);
552   PetscFunctionReturn(0);
553 }
554 /* -------------------------------------------------------------------------- */
555 /*
556    PCSetUp_ML - Prepares for the use of the ML preconditioner
557                     by setting data structures and options.
558 
559    Input Parameter:
560 .  pc - the preconditioner context
561 
562    Application Interface Routine: PCSetUp()
563 
564    Notes:
565    The interface routine PCSetUp() is not usually called directly by
566    the user, but instead is called by PCApply() if necessary.
567 */
568 extern PetscErrorCode PCSetFromOptions_MG(PetscOptionItems *PetscOptionsObject,PC);
569 extern PetscErrorCode PCReset_MG(PC);
570 
571 #undef __FUNCT__
572 #define __FUNCT__ "PCSetUp_ML"
573 PetscErrorCode PCSetUp_ML(PC pc)
574 {
575   PetscErrorCode   ierr;
576   PetscMPIInt      size;
577   FineGridCtx      *PetscMLdata;
578   ML               *ml_object;
579   ML_Aggregate     *agg_object;
580   ML_Operator      *mlmat;
581   PetscInt         nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
582   Mat              A,Aloc;
583   GridCtx          *gridctx;
584   PC_MG            *mg    = (PC_MG*)pc->data;
585   PC_ML            *pc_ml = (PC_ML*)mg->innerctx;
586   PetscBool        isSeq, isMPI;
587   KSP              smoother;
588   PC               subpc;
589   PetscInt         mesh_level, old_mesh_level;
590   MatInfo          info;
591   static PetscBool cite = PETSC_FALSE;
592 
593   PetscFunctionBegin;
594   ierr = PetscCitationsRegister("@TechReport{ml_users_guide,\n  author = {M. Sala and J.J. Hu and R.S. Tuminaro},\n  title = {{ML}3.1 {S}moothed {A}ggregation {U}ser's {G}uide},\n  institution =  {Sandia National Laboratories},\n  number = {SAND2004-4821},\n  year = 2004\n}\n",&cite);CHKERRQ(ierr);
595   A    = pc->pmat;
596   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
597 
598   if (pc->setupcalled) {
599     if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
600       /*
601        Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
602        multiple solves in which the matrix is not changing too quickly.
603        */
604       ml_object             = pc_ml->ml_object;
605       gridctx               = pc_ml->gridctx;
606       Nlevels               = pc_ml->Nlevels;
607       fine_level            = Nlevels - 1;
608       gridctx[fine_level].A = A;
609 
610       ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
611       ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
612       if (isMPI) {
613         ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
614       } else if (isSeq) {
615         Aloc = A;
616         ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
617       } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
618 
619       ierr              = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
620       PetscMLdata       = pc_ml->PetscMLdata;
621       ierr              = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr);
622       PetscMLdata->A    = A;
623       PetscMLdata->Aloc = Aloc;
624       PetscStackCall("ML_Aggregate_Destroy",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata));
625       PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec));
626 
627       mesh_level = ml_object->ML_finest_level;
628       while (ml_object->SingleLevel[mesh_level].Rmat->to) {
629         old_mesh_level = mesh_level;
630         mesh_level     = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;
631 
632         /* clean and regenerate A */
633         mlmat = &(ml_object->Amat[mesh_level]);
634         PetscStackCall("ML_Operator_Clean",ML_Operator_Clean(mlmat));
635         PetscStackCall("ML_Operator_Init",ML_Operator_Init(mlmat,ml_object->comm));
636         PetscStackCall("ML_Gen_AmatrixRAP",ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level));
637       }
638 
639       level = fine_level - 1;
640       if (size == 1) { /* convert ML P, R and A into seqaij format */
641         for (mllevel=1; mllevel<Nlevels; mllevel++) {
642           mlmat = &(ml_object->Amat[mllevel]);
643           ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
644           level--;
645         }
646       } else { /* convert ML P and R into shell format, ML A into mpiaij format */
647         for (mllevel=1; mllevel<Nlevels; mllevel++) {
648           mlmat  = &(ml_object->Amat[mllevel]);
649           ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
650           level--;
651         }
652       }
653 
654       for (level=0; level<fine_level; level++) {
655         if (level > 0) {
656           ierr = PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);CHKERRQ(ierr);
657         }
658         ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A);CHKERRQ(ierr);
659       }
660       ierr = PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);CHKERRQ(ierr);
661       ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A);CHKERRQ(ierr);
662 
663       ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
664       PetscFunctionReturn(0);
665     } else {
666       /* since ML can change the size of vectors/matrices at any level we must destroy everything */
667       ierr = PCReset_ML(pc);CHKERRQ(ierr);
668     }
669   }
670 
671   /* setup special features of PCML */
672   /*--------------------------------*/
673   /* covert A to Aloc to be used by ML at fine grid */
674   pc_ml->size = size;
675   ierr        = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
676   ierr        = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
677   if (isMPI) {
678     ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
679   } else if (isSeq) {
680     Aloc = A;
681     ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
682   } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
683 
684   /* create and initialize struct 'PetscMLdata' */
685   ierr               = PetscNewLog(pc,&PetscMLdata);CHKERRQ(ierr);
686   pc_ml->PetscMLdata = PetscMLdata;
687   ierr               = PetscMalloc1(Aloc->cmap->n+1,&PetscMLdata->pwork);CHKERRQ(ierr);
688 
689   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);
690   ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr);
691   ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr);
692 
693   ierr              = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr);
694   ierr              = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
695   ierr              = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
696   PetscMLdata->A    = A;
697   PetscMLdata->Aloc = Aloc;
698   if (pc_ml->dim) { /* create vecs around the coordinate data given */
699     PetscInt  i,j,dim=pc_ml->dim;
700     PetscInt  nloc = pc_ml->nloc,nlocghost;
701     PetscReal *ghostedcoords;
702 
703     ierr      = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
704     nlocghost = Aloc->cmap->n / bs;
705     ierr      = PetscMalloc1(dim*nlocghost,&ghostedcoords);CHKERRQ(ierr);
706     for (i = 0; i < dim; i++) {
707       /* copy coordinate values into first component of pwork */
708       for (j = 0; j < nloc; j++) {
709         PetscMLdata->pwork[bs * j] = pc_ml->coords[nloc * i + j];
710       }
711       /* get the ghost values */
712       ierr = PetscML_comm(PetscMLdata->pwork,PetscMLdata);CHKERRQ(ierr);
713       /* write into the vector */
714       for (j = 0; j < nlocghost; j++) {
715         ghostedcoords[i * nlocghost + j] = PetscMLdata->pwork[bs * j];
716       }
717     }
718     /* replace the original coords with the ghosted coords, because these are
719      * what ML needs */
720     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
721     pc_ml->coords = ghostedcoords;
722   }
723 
724   /* create ML discretization matrix at fine grid */
725   /* ML requires input of fine-grid matrix. It determines nlevels. */
726   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
727   ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
728   PetscStackCall("ML_Create",ML_Create(&ml_object,pc_ml->MaxNlevels));
729   PetscStackCall("ML_Comm_Set_UsrComm",ML_Comm_Set_UsrComm(ml_object->comm,PetscObjectComm((PetscObject)A)));
730   pc_ml->ml_object = ml_object;
731   PetscStackCall("ML_Init_Amatrix",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata));
732   PetscStackCall("ML_Set_Amatrix_Getrow",ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols));
733   PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec));
734 
735   PetscStackCall("ML_Set_Symmetrize",ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO));
736 
737   /* aggregation */
738   PetscStackCall("ML_Aggregate_Create",ML_Aggregate_Create(&agg_object));
739   pc_ml->agg_object = agg_object;
740 
741   {
742     MatNullSpace mnull;
743     ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr);
744     if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) {
745       if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER;
746       else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK;
747       else pc_ml->nulltype = PCML_NULLSPACE_SCALAR;
748     }
749     switch (pc_ml->nulltype) {
750     case PCML_NULLSPACE_USER: {
751       PetscScalar       *nullvec;
752       const PetscScalar *v;
753       PetscBool         has_const;
754       PetscInt          i,j,mlocal,nvec,M;
755       const Vec         *vecs;
756 
757       if (!mnull) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space");
758       ierr = MatGetSize(A,&M,NULL);CHKERRQ(ierr);
759       ierr = MatGetLocalSize(Aloc,&mlocal,NULL);CHKERRQ(ierr);
760       ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr);
761       ierr = PetscMalloc1((nvec+!!has_const)*mlocal,&nullvec);CHKERRQ(ierr);
762       if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M;
763       for (i=0; i<nvec; i++) {
764         ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr);
765         for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j];
766         ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr);
767       }
768       PetscStackCall("ML_Aggregate_Create",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr));
769       ierr = PetscFree(nullvec);CHKERRQ(ierr);
770     } break;
771     case PCML_NULLSPACE_BLOCK:
772       PetscStackCall("ML_Aggregate_Set_NullSpace",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr));
773       break;
774     case PCML_NULLSPACE_SCALAR:
775       break;
776     default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP,"Unknown null space type");
777     }
778   }
779   PetscStackCall("ML_Aggregate_Set_MaxCoarseSize",ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize));
780   /* set options */
781   switch (pc_ml->CoarsenScheme) {
782   case 1:
783     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_Coupled",ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object));break;
784   case 2:
785     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_MIS",ML_Aggregate_Set_CoarsenScheme_MIS(agg_object));break;
786   case 3:
787     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_METIS",ML_Aggregate_Set_CoarsenScheme_METIS(agg_object));break;
788   }
789   PetscStackCall("ML_Aggregate_Set_Threshold",ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold));
790   PetscStackCall("ML_Aggregate_Set_DampingFactor",ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor));
791   if (pc_ml->SpectralNormScheme_Anorm) {
792     PetscStackCall("ML_Set_SpectralNormScheme_Anorm",ML_Set_SpectralNormScheme_Anorm(ml_object));
793   }
794   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
795   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
796   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
797   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
798   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
799   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;
800 
801   if (pc_ml->Aux) {
802     if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Auxiliary matrix requires coordinates");
803     ml_object->Amat[0].aux_data->threshold = pc_ml->AuxThreshold;
804     ml_object->Amat[0].aux_data->enable    = 1;
805     ml_object->Amat[0].aux_data->max_level = 10;
806     ml_object->Amat[0].num_PDEs            = bs;
807   }
808 
809   ierr = MatGetInfo(A,MAT_LOCAL,&info);CHKERRQ(ierr);
810   ml_object->Amat[0].N_nonzeros = (int) info.nz_used;
811 
812   if (pc_ml->dim) {
813     PetscInt               i,dim = pc_ml->dim;
814     ML_Aggregate_Viz_Stats *grid_info;
815     PetscInt               nlocghost;
816 
817     ierr      = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
818     nlocghost = Aloc->cmap->n / bs;
819 
820     PetscStackCall("ML_Aggregate_VizAndStats_Setup(",ML_Aggregate_VizAndStats_Setup(ml_object)); /* create ml info for coords */
821     grid_info = (ML_Aggregate_Viz_Stats*) ml_object->Grid[0].Grid;
822     for (i = 0; i < dim; i++) {
823       /* set the finest level coordinates to point to the column-order array
824        * in pc_ml */
825       /* NOTE: must point away before VizAndStats_Clean so ML doesn't free */
826       switch (i) {
827       case 0: grid_info->x = pc_ml->coords + nlocghost * i; break;
828       case 1: grid_info->y = pc_ml->coords + nlocghost * i; break;
829       case 2: grid_info->z = pc_ml->coords + nlocghost * i; break;
830       default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
831       }
832     }
833     grid_info->Ndim = dim;
834   }
835 
836   /* repartitioning */
837   if (pc_ml->Repartition) {
838     PetscStackCall("ML_Repartition_Activate",ML_Repartition_Activate(ml_object));
839     PetscStackCall("ML_Repartition_Set_LargestMinMaxRatio",ML_Repartition_Set_LargestMinMaxRatio(ml_object,pc_ml->MaxMinRatio));
840     PetscStackCall("ML_Repartition_Set_MinPerProc",ML_Repartition_Set_MinPerProc(ml_object,pc_ml->MinPerProc));
841     PetscStackCall("ML_Repartition_Set_PutOnSingleProc",ML_Repartition_Set_PutOnSingleProc(ml_object,pc_ml->PutOnSingleProc));
842 #if 0                           /* Function not yet defined in ml-6.2 */
843     /* I'm not sure what compatibility issues might crop up if we partitioned
844      * on the finest level, so to be safe repartition starts on the next
845      * finest level (reflection default behavior in
846      * ml_MultiLevelPreconditioner) */
847     PetscStackCall("ML_Repartition_Set_StartLevel",ML_Repartition_Set_StartLevel(ml_object,1));
848 #endif
849 
850     if (!pc_ml->RepartitionType) {
851       PetscInt i;
852 
853       if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"ML Zoltan repartitioning requires coordinates");
854       PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEZOLTAN));
855       PetscStackCall("ML_Aggregate_Set_Dimensions",ML_Aggregate_Set_Dimensions(agg_object, pc_ml->dim));
856 
857       for (i = 0; i < ml_object->ML_num_levels; i++) {
858         ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Grid[i].Grid;
859         grid_info->zoltan_type = pc_ml->ZoltanScheme + 1; /* ml numbers options 1, 2, 3 */
860         /* defaults from ml_agg_info.c */
861         grid_info->zoltan_estimated_its = 40; /* only relevant to hypergraph / fast hypergraph */
862         grid_info->zoltan_timers        = 0;
863         grid_info->smoothing_steps      = 4;  /* only relevant to hypergraph / fast hypergraph */
864       }
865     } else {
866       PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEPARMETIS));
867     }
868   }
869 
870   if (pc_ml->OldHierarchy) {
871     PetscStackCall("ML_Gen_MGHierarchy_UsingAggregation",Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object));
872   } else {
873     PetscStackCall("ML_Gen_MultiLevelHierarchy_UsingAggregation",Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object));
874   }
875   if (Nlevels<=0) SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
876   pc_ml->Nlevels = Nlevels;
877   fine_level     = Nlevels - 1;
878 
879   ierr = PCMGSetLevels(pc,Nlevels,NULL);CHKERRQ(ierr);
880   /* set default smoothers */
881   for (level=1; level<=fine_level; level++) {
882     ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
883     ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
884     ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
885     ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
886   }
887   ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr);
888   ierr = PCSetFromOptions_MG(PetscOptionsObject,pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
889   ierr = PetscOptionsEnd();CHKERRQ(ierr);
890 
891   ierr = PetscMalloc1(Nlevels,&gridctx);CHKERRQ(ierr);
892 
893   pc_ml->gridctx = gridctx;
894 
895   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
896      Level 0 is the finest grid for ML, but coarsest for PETSc! */
897   gridctx[fine_level].A = A;
898 
899   level = fine_level - 1;
900   if (size == 1) { /* convert ML P, R and A into seqaij format */
901     for (mllevel=1; mllevel<Nlevels; mllevel++) {
902       mlmat = &(ml_object->Pmat[mllevel]);
903       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
904       mlmat = &(ml_object->Rmat[mllevel-1]);
905       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
906 
907       mlmat = &(ml_object->Amat[mllevel]);
908       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
909       level--;
910     }
911   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
912     for (mllevel=1; mllevel<Nlevels; mllevel++) {
913       mlmat  = &(ml_object->Pmat[mllevel]);
914       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
915       mlmat  = &(ml_object->Rmat[mllevel-1]);
916       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
917 
918       mlmat  = &(ml_object->Amat[mllevel]);
919       ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
920       level--;
921     }
922   }
923 
924   /* create vectors and ksp at all levels */
925   for (level=0; level<fine_level; level++) {
926     level1 = level + 1;
927     ierr   = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr);
928     ierr   = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr);
929     ierr   = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr);
930     ierr   = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
931 
932     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr);
933     ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
934     ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr);
935     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
936 
937     ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr);
938     ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
939     ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr);
940     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
941 
942     if (level == 0) {
943       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
944     } else {
945       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
946     }
947   }
948   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
949 
950   /* create coarse level and the interpolation between the levels */
951   for (level=0; level<fine_level; level++) {
952     level1 = level + 1;
953     ierr   = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
954     ierr   = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
955     if (level > 0) {
956       ierr = PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);CHKERRQ(ierr);
957     }
958     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A);CHKERRQ(ierr);
959   }
960   ierr = PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);CHKERRQ(ierr);
961   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A);CHKERRQ(ierr);
962 
963   /* put coordinate info in levels */
964   if (pc_ml->dim) {
965     PetscInt  i,j,dim = pc_ml->dim;
966     PetscInt  bs, nloc;
967     PC        subpc;
968     PetscReal *array;
969 
970     level = fine_level;
971     for (mllevel = 0; mllevel < Nlevels; mllevel++) {
972       ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Amat[mllevel].to->Grid->Grid;
973       MPI_Comm               comm       = ((PetscObject)gridctx[level].A)->comm;
974 
975       ierr  = MatGetBlockSize (gridctx[level].A, &bs);CHKERRQ(ierr);
976       ierr  = MatGetLocalSize (gridctx[level].A, NULL, &nloc);CHKERRQ(ierr);
977       nloc /= bs; /* number of local nodes */
978 
979       ierr = VecCreate(comm,&gridctx[level].coords);CHKERRQ(ierr);
980       ierr = VecSetSizes(gridctx[level].coords,dim * nloc,PETSC_DECIDE);CHKERRQ(ierr);
981       ierr = VecSetType(gridctx[level].coords,VECMPI);CHKERRQ(ierr);
982       ierr = VecGetArray(gridctx[level].coords,&array);CHKERRQ(ierr);
983       for (j = 0; j < nloc; j++) {
984         for (i = 0; i < dim; i++) {
985           switch (i) {
986           case 0: array[dim * j + i] = grid_info->x[j]; break;
987           case 1: array[dim * j + i] = grid_info->y[j]; break;
988           case 2: array[dim * j + i] = grid_info->z[j]; break;
989           default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
990           }
991         }
992       }
993 
994       /* passing coordinates to smoothers/coarse solver, should they need them */
995       ierr = KSPGetPC(gridctx[level].ksp,&subpc);CHKERRQ(ierr);
996       ierr = PCSetCoordinates(subpc,dim,nloc,array);CHKERRQ(ierr);
997       ierr = VecRestoreArray(gridctx[level].coords,&array);CHKERRQ(ierr);
998       level--;
999     }
1000   }
1001 
1002   /* setupcalled is set to 0 so that MG is setup from scratch */
1003   pc->setupcalled = 0;
1004   ierr            = PCSetUp_MG(pc);CHKERRQ(ierr);
1005   PetscFunctionReturn(0);
1006 }
1007 
1008 /* -------------------------------------------------------------------------- */
1009 /*
1010    PCDestroy_ML - Destroys the private context for the ML preconditioner
1011    that was created with PCCreate_ML().
1012 
1013    Input Parameter:
1014 .  pc - the preconditioner context
1015 
1016    Application Interface Routine: PCDestroy()
1017 */
1018 #undef __FUNCT__
1019 #define __FUNCT__ "PCDestroy_ML"
1020 PetscErrorCode PCDestroy_ML(PC pc)
1021 {
1022   PetscErrorCode ierr;
1023   PC_MG          *mg   = (PC_MG*)pc->data;
1024   PC_ML          *pc_ml= (PC_ML*)mg->innerctx;
1025 
1026   PetscFunctionBegin;
1027   ierr = PCReset_ML(pc);CHKERRQ(ierr);
1028   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
1029   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
1030   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",NULL);CHKERRQ(ierr);
1031   PetscFunctionReturn(0);
1032 }
1033 
1034 #undef __FUNCT__
1035 #define __FUNCT__ "PCSetFromOptions_ML"
1036 PetscErrorCode PCSetFromOptions_ML(PetscOptionItems *PetscOptionsObject,PC pc)
1037 {
1038   PetscErrorCode ierr;
1039   PetscInt       indx,PrintLevel,partindx;
1040   const char     *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
1041   const char     *part[]   = {"Zoltan","ParMETIS"};
1042 #if defined(HAVE_ML_ZOLTAN)
1043   const char *zscheme[] = {"RCB","hypergraph","fast_hypergraph"};
1044 #endif
1045   PC_MG       *mg    = (PC_MG*)pc->data;
1046   PC_ML       *pc_ml = (PC_ML*)mg->innerctx;
1047   PetscMPIInt size;
1048   MPI_Comm    comm;
1049 
1050   PetscFunctionBegin;
1051   ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr);
1052   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1053   ierr = PetscOptionsHead(PetscOptionsObject,"ML options");CHKERRQ(ierr);
1054 
1055   PrintLevel = 0;
1056   indx       = 0;
1057   partindx   = 0;
1058 
1059   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,NULL);CHKERRQ(ierr);
1060   PetscStackCall("ML_Set_PrintLeve",ML_Set_PrintLevel(PrintLevel));
1061   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,NULL);CHKERRQ(ierr);
1062   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,NULL);CHKERRQ(ierr);
1063   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,NULL);CHKERRQ(ierr);
1064 
1065   pc_ml->CoarsenScheme = indx;
1066 
1067   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,NULL);CHKERRQ(ierr);
1068   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,NULL);CHKERRQ(ierr);
1069   ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,NULL);CHKERRQ(ierr);
1070   ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,NULL);CHKERRQ(ierr);
1071   ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,NULL);CHKERRQ(ierr);
1072   ierr = PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,NULL);CHKERRQ(ierr);
1073   ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,NULL);CHKERRQ(ierr);
1074   ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,NULL);CHKERRQ(ierr);
1075   /*
1076     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
1077     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.
1078 
1079     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
1080     combination of options and ML's exit(1) explanations don't help matters.
1081   */
1082   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
1083   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
1084   if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2\n");CHKERRQ(ierr);}
1085   if (pc_ml->EnergyMinimization) {
1086     ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,NULL);CHKERRQ(ierr);
1087   }
1088   if (pc_ml->EnergyMinimization == 2) {
1089     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
1090     ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,NULL);CHKERRQ(ierr);
1091   }
1092   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
1093   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
1094   ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,NULL);CHKERRQ(ierr);
1095   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
1096   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
1097   ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,NULL);CHKERRQ(ierr);
1098   /*
1099     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
1100     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
1101     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
1102     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
1103     this context, but ML doesn't provide a way to find out which ones.
1104    */
1105   ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,NULL);CHKERRQ(ierr);
1106   ierr = PetscOptionsBool("-pc_ml_repartition", "Allow ML to repartition levels of the heirarchy","ML_Repartition_Activate",pc_ml->Repartition,&pc_ml->Repartition,NULL);CHKERRQ(ierr);
1107   if (pc_ml->Repartition) {
1108     ierr = PetscOptionsReal("-pc_ml_repartitionMaxMinRatio", "Acceptable ratio of repartitioned sizes","ML_Repartition_Set_LargestMinMaxRatio",pc_ml->MaxMinRatio,&pc_ml->MaxMinRatio,NULL);CHKERRQ(ierr);
1109     ierr = PetscOptionsInt("-pc_ml_repartitionMinPerProc", "Smallest repartitioned size","ML_Repartition_Set_MinPerProc",pc_ml->MinPerProc,&pc_ml->MinPerProc,NULL);CHKERRQ(ierr);
1110     ierr = PetscOptionsInt("-pc_ml_repartitionPutOnSingleProc", "Problem size automatically repartitioned to one processor","ML_Repartition_Set_PutOnSingleProc",pc_ml->PutOnSingleProc,&pc_ml->PutOnSingleProc,NULL);CHKERRQ(ierr);
1111 #if defined(HAVE_ML_ZOLTAN)
1112     partindx = 0;
1113     ierr     = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[0],&partindx,NULL);CHKERRQ(ierr);
1114 
1115     pc_ml->RepartitionType = partindx;
1116     if (!partindx) {
1117       PetscInt zindx = 0;
1118 
1119       ierr = PetscOptionsEList("-pc_ml_repartitionZoltanScheme", "Repartitioning scheme to use","None",zscheme,3,zscheme[0],&zindx,NULL);CHKERRQ(ierr);
1120 
1121       pc_ml->ZoltanScheme = zindx;
1122     }
1123 #else
1124     partindx = 1;
1125     ierr     = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[1],&partindx,NULL);CHKERRQ(ierr);
1126     pc_ml->RepartitionType = partindx;
1127     if (!partindx) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP_SYS,"ML not compiled with Zoltan");
1128 #endif
1129     ierr = PetscOptionsBool("-pc_ml_Aux","Aggregate using auxiliary coordinate-based laplacian","None",pc_ml->Aux,&pc_ml->Aux,NULL);CHKERRQ(ierr);
1130     ierr = PetscOptionsReal("-pc_ml_AuxThreshold","Auxiliary smoother drop tol","None",pc_ml->AuxThreshold,&pc_ml->AuxThreshold,NULL);CHKERRQ(ierr);
1131   }
1132   ierr = PetscOptionsTail();CHKERRQ(ierr);
1133   PetscFunctionReturn(0);
1134 }
1135 
1136 /* -------------------------------------------------------------------------- */
1137 /*
1138    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
1139    and sets this as the private data within the generic preconditioning
1140    context, PC, that was created within PCCreate().
1141 
1142    Input Parameter:
1143 .  pc - the preconditioner context
1144 
1145    Application Interface Routine: PCCreate()
1146 */
1147 
1148 /*MC
1149      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
1150        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
1151        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
1152        and the restriction/interpolation operators wrapped as PETSc shell matrices.
1153 
1154    Options Database Key:
1155    Multigrid options(inherited):
1156 +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
1157 .  -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
1158 .  -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
1159 -  -pc_mg_type <multiplicative>: (one of) additive multiplicative full kascade
1160    ML options:
1161 +  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
1162 .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
1163 .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
1164 .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
1165 .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
1166 .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
1167 .  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
1168 .  -pc_ml_repartition <false>: Allow ML to repartition levels of the heirarchy (ML_Repartition_Activate)
1169 .  -pc_ml_repartitionMaxMinRatio <1.3>: Acceptable ratio of repartitioned sizes (ML_Repartition_Set_LargestMinMaxRatio)
1170 .  -pc_ml_repartitionMinPerProc <512>: Smallest repartitioned size (ML_Repartition_Set_MinPerProc)
1171 .  -pc_ml_repartitionPutOnSingleProc <5000>: Problem size automatically repartitioned to one processor (ML_Repartition_Set_PutOnSingleProc)
1172 .  -pc_ml_repartitionType <Zoltan>: Repartitioning library to use (ML_Repartition_Set_Partitioner)
1173 .  -pc_ml_repartitionZoltanScheme <RCB>: Repartitioning scheme to use (None)
1174 .  -pc_ml_Aux <false>: Aggregate using auxiliary coordinate-based laplacian (None)
1175 -  -pc_ml_AuxThreshold <0.0>: Auxiliary smoother drop tol (None)
1176 
1177    Level: intermediate
1178 
1179   Concepts: multigrid
1180 
1181 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
1182            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
1183            PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
1184            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
1185            PCMGSetCycleTypeOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
1186 M*/
1187 
1188 #undef __FUNCT__
1189 #define __FUNCT__ "PCCreate_ML"
1190 PETSC_EXTERN PetscErrorCode PCCreate_ML(PC pc)
1191 {
1192   PetscErrorCode ierr;
1193   PC_ML          *pc_ml;
1194   PC_MG          *mg;
1195 
1196   PetscFunctionBegin;
1197   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
1198   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
1199   ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr);
1200   /* Since PCMG tries to use DM assocated with PC must delete it */
1201   ierr = DMDestroy(&pc->dm);CHKERRQ(ierr);
1202   ierr = PCMGSetGalerkin(pc,PC_MG_GALERKIN_EXTERNAL);CHKERRQ(ierr);
1203   mg   = (PC_MG*)pc->data;
1204 
1205   /* create a supporting struct and attach it to pc */
1206   ierr         = PetscNewLog(pc,&pc_ml);CHKERRQ(ierr);
1207   mg->innerctx = pc_ml;
1208 
1209   pc_ml->ml_object                = 0;
1210   pc_ml->agg_object               = 0;
1211   pc_ml->gridctx                  = 0;
1212   pc_ml->PetscMLdata              = 0;
1213   pc_ml->Nlevels                  = -1;
1214   pc_ml->MaxNlevels               = 10;
1215   pc_ml->MaxCoarseSize            = 1;
1216   pc_ml->CoarsenScheme            = 1;
1217   pc_ml->Threshold                = 0.0;
1218   pc_ml->DampingFactor            = 4.0/3.0;
1219   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
1220   pc_ml->size                     = 0;
1221   pc_ml->dim                      = 0;
1222   pc_ml->nloc                     = 0;
1223   pc_ml->coords                   = 0;
1224   pc_ml->Repartition              = PETSC_FALSE;
1225   pc_ml->MaxMinRatio              = 1.3;
1226   pc_ml->MinPerProc               = 512;
1227   pc_ml->PutOnSingleProc          = 5000;
1228   pc_ml->RepartitionType          = 0;
1229   pc_ml->ZoltanScheme             = 0;
1230   pc_ml->Aux                      = PETSC_FALSE;
1231   pc_ml->AuxThreshold             = 0.0;
1232 
1233   /* allow for coordinates to be passed */
1234   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",PCSetCoordinates_ML);CHKERRQ(ierr);
1235 
1236   /* overwrite the pointers of PCMG by the functions of PCML */
1237   pc->ops->setfromoptions = PCSetFromOptions_ML;
1238   pc->ops->setup          = PCSetUp_ML;
1239   pc->ops->reset          = PCReset_ML;
1240   pc->ops->destroy        = PCDestroy_ML;
1241   PetscFunctionReturn(0);
1242 }
1243