xref: /petsc/src/ksp/pc/impls/ml/ml.c (revision fb8e56e08d4d0bfe9fc63603ca1f7fddd68abbdb)
1 
2 /*
3    Provides an interface to the ML smoothed Aggregation
4    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
5                                     Jed Brown, see [PETSC #18321, #18449].
6 */
7 #include <petsc-private/pcimpl.h>   /*I "petscpc.h" I*/
8 #include <../src/ksp/pc/impls/mg/mgimpl.h>                    /*I "petscksp.h" I*/
9 #include <../src/mat/impls/aij/seq/aij.h>
10 #include <../src/mat/impls/aij/mpi/mpiaij.h>
11 #include <petscdm.h>            /* for DMDestroy(&pc->mg) hack */
12 
13 EXTERN_C_BEGIN
14 /* HAVE_CONFIG_H flag is required by ML include files */
15 #if !defined(HAVE_CONFIG_H)
16 #define HAVE_CONFIG_H
17 #endif
18 #include <ml_include.h>
19 #include <ml_viz_stats.h>
20 EXTERN_C_END
21 
22 typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType;
23 static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0};
24 
25 /* The context (data structure) at each grid level */
26 typedef struct {
27   Vec x,b,r;                  /* global vectors */
28   Mat A,P,R;
29   KSP ksp;
30   Vec coords;                 /* projected by ML, if PCSetCoordinates is called; values packed by node */
31 } GridCtx;
32 
33 /* The context used to input PETSc matrix into ML at fine grid */
34 typedef struct {
35   Mat         A;       /* Petsc matrix in aij format */
36   Mat         Aloc;    /* local portion of A to be used by ML */
37   Vec         x,y;
38   ML_Operator *mlmat;
39   PetscScalar *pwork;  /* tmp array used by PetscML_comm() */
40 } FineGridCtx;
41 
42 /* The context associates a ML matrix with a PETSc shell matrix */
43 typedef struct {
44   Mat         A;        /* PETSc shell matrix associated with mlmat */
45   ML_Operator *mlmat;   /* ML matrix assorciated with A */
46   Vec         y, work;
47 } Mat_MLShell;
48 
49 /* Private context for the ML preconditioner */
50 typedef struct {
51   ML                *ml_object;
52   ML_Aggregate      *agg_object;
53   GridCtx           *gridctx;
54   FineGridCtx       *PetscMLdata;
55   PetscInt          Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization,MinPerProc,PutOnSingleProc,RepartitionType,ZoltanScheme;
56   PetscReal         Threshold,DampingFactor,EnergyMinimizationDropTol,MaxMinRatio,AuxThreshold;
57   PetscBool         SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable,Repartition,Aux;
58   PetscBool         reuse_interpolation;
59   PCMLNullSpaceType nulltype;
60   PetscMPIInt       size; /* size of communicator for pc->pmat */
61   PetscInt          dim;  /* data from PCSetCoordinates(_ML) */
62   PetscInt          nloc;
63   PetscReal         *coords; /* ML has a grid object for each level: the finest grid will point into coords */
64 } PC_ML;
65 
66 #undef __FUNCT__
67 #define __FUNCT__ "PetscML_getrow"
68 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
69 {
70   PetscErrorCode ierr;
71   PetscInt       m,i,j,k=0,row,*aj;
72   PetscScalar    *aa;
73   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
74   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;
75 
76   ierr = MatGetSize(ml->Aloc,&m,NULL); if (ierr) return(0);
77   for (i = 0; i<N_requested_rows; i++) {
78     row            = requested_rows[i];
79     row_lengths[i] = a->ilen[row];
80     if (allocated_space < k+row_lengths[i]) return(0);
81     if ((row >= 0) || (row <= (m-1))) {
82       aj = a->j + a->i[row];
83       aa = a->a + a->i[row];
84       for (j=0; j<row_lengths[i]; j++) {
85         columns[k]  = aj[j];
86         values[k++] = aa[j];
87       }
88     }
89   }
90   return(1);
91 }
92 
93 #undef __FUNCT__
94 #define __FUNCT__ "PetscML_comm"
95 static PetscErrorCode PetscML_comm(double p[],void *ML_data)
96 {
97   PetscErrorCode ierr;
98   FineGridCtx    *ml = (FineGridCtx*)ML_data;
99   Mat            A   = ml->A;
100   Mat_MPIAIJ     *a  = (Mat_MPIAIJ*)A->data;
101   PetscMPIInt    size;
102   PetscInt       i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
103   PetscScalar    *array;
104 
105   PetscFunctionBegin;
106   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
107   if (size == 1) return 0;
108 
109   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
110   ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
111   ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
112   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
113   ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr);
114   for (i=in_length; i<out_length; i++) p[i] = array[i-in_length];
115   ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr);
116   PetscFunctionReturn(0);
117 }
118 
119 #undef __FUNCT__
120 #define __FUNCT__ "PetscML_matvec"
121 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
122 {
123   PetscErrorCode ierr;
124   FineGridCtx    *ml = (FineGridCtx*)ML_Get_MyMatvecData(ML_data);
125   Mat            A   = ml->A, Aloc=ml->Aloc;
126   PetscMPIInt    size;
127   PetscScalar    *pwork=ml->pwork;
128   PetscInt       i;
129 
130   PetscFunctionBegin;
131   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
132   if (size == 1) {
133     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
134   } else {
135     for (i=0; i<in_length; i++) pwork[i] = p[i];
136     ierr = PetscML_comm(pwork,ml);CHKERRQ(ierr);
137     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
138   }
139   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
140   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
141   ierr = VecResetArray(ml->x);CHKERRQ(ierr);
142   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
143   PetscFunctionReturn(0);
144 }
145 
146 #undef __FUNCT__
147 #define __FUNCT__ "MatMult_ML"
148 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
149 {
150   PetscErrorCode ierr;
151   Mat_MLShell    *shell;
152   PetscScalar    *xarray,*yarray;
153   PetscInt       x_length,y_length;
154 
155   PetscFunctionBegin;
156   ierr     = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr);
157   ierr     = VecGetArray(x,&xarray);CHKERRQ(ierr);
158   ierr     = VecGetArray(y,&yarray);CHKERRQ(ierr);
159   x_length = shell->mlmat->invec_leng;
160   y_length = shell->mlmat->outvec_leng;
161   PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray));
162   ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
163   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
164   PetscFunctionReturn(0);
165 }
166 
167 #undef __FUNCT__
168 #define __FUNCT__ "MatMultAdd_ML"
169 /* Computes y = w + A * x
170    It is possible that w == y, but not x == y
171 */
172 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
173 {
174   Mat_MLShell    *shell;
175   PetscScalar    *xarray,*yarray;
176   PetscInt       x_length,y_length;
177   PetscErrorCode ierr;
178 
179   PetscFunctionBegin;
180   ierr = MatShellGetContext(A, (void**) &shell);CHKERRQ(ierr);
181   if (y == w) {
182     if (!shell->work) {
183       ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr);
184     }
185     ierr     = VecGetArray(x,           &xarray);CHKERRQ(ierr);
186     ierr     = VecGetArray(shell->work, &yarray);CHKERRQ(ierr);
187     x_length = shell->mlmat->invec_leng;
188     y_length = shell->mlmat->outvec_leng;
189     PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray));
190     ierr = VecRestoreArray(x,           &xarray);CHKERRQ(ierr);
191     ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr);
192     ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr);
193   } else {
194     ierr     = VecGetArray(x, &xarray);CHKERRQ(ierr);
195     ierr     = VecGetArray(y, &yarray);CHKERRQ(ierr);
196     x_length = shell->mlmat->invec_leng;
197     y_length = shell->mlmat->outvec_leng;
198     PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray));
199     ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr);
200     ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr);
201     ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr);
202   }
203   PetscFunctionReturn(0);
204 }
205 
206 /* newtype is ignored since only handles one case */
207 #undef __FUNCT__
208 #define __FUNCT__ "MatConvert_MPIAIJ_ML"
209 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
210 {
211   PetscErrorCode ierr;
212   Mat_MPIAIJ     *mpimat=(Mat_MPIAIJ*)A->data;
213   Mat_SeqAIJ     *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
214   PetscInt       *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
215   PetscScalar    *aa=a->a,*ba=b->a,*ca;
216   PetscInt       am =A->rmap->n,an=A->cmap->n,i,j,k;
217   PetscInt       *ci,*cj,ncols;
218 
219   PetscFunctionBegin;
220   if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
221 
222   if (scall == MAT_INITIAL_MATRIX) {
223     ierr  = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr);
224     ci[0] = 0;
225     for (i=0; i<am; i++) ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
226     ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr);
227     ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr);
228 
229     k = 0;
230     for (i=0; i<am; i++) {
231       /* diagonal portion of A */
232       ncols = ai[i+1] - ai[i];
233       for (j=0; j<ncols; j++) {
234         cj[k]   = *aj++;
235         ca[k++] = *aa++;
236       }
237       /* off-diagonal portion of A */
238       ncols = bi[i+1] - bi[i];
239       for (j=0; j<ncols; j++) {
240         cj[k]   = an + (*bj); bj++;
241         ca[k++] = *ba++;
242       }
243     }
244     if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
245 
246     /* put together the new matrix */
247     an   = mpimat->A->cmap->n+mpimat->B->cmap->n;
248     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
249 
250     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
251     /* Since these are PETSc arrays, change flags to free them as necessary. */
252     mat          = (Mat_SeqAIJ*)(*Aloc)->data;
253     mat->free_a  = PETSC_TRUE;
254     mat->free_ij = PETSC_TRUE;
255 
256     mat->nonew = 0;
257   } else if (scall == MAT_REUSE_MATRIX) {
258     mat=(Mat_SeqAIJ*)(*Aloc)->data;
259     ci = mat->i; cj = mat->j; ca = mat->a;
260     for (i=0; i<am; i++) {
261       /* diagonal portion of A */
262       ncols = ai[i+1] - ai[i];
263       for (j=0; j<ncols; j++) *ca++ = *aa++;
264       /* off-diagonal portion of A */
265       ncols = bi[i+1] - bi[i];
266       for (j=0; j<ncols; j++) *ca++ = *ba++;
267     }
268   } else SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
269   PetscFunctionReturn(0);
270 }
271 
272 extern PetscErrorCode MatDestroy_Shell(Mat);
273 #undef __FUNCT__
274 #define __FUNCT__ "MatDestroy_ML"
275 static PetscErrorCode MatDestroy_ML(Mat A)
276 {
277   PetscErrorCode ierr;
278   Mat_MLShell    *shell;
279 
280   PetscFunctionBegin;
281   ierr = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr);
282   ierr = VecDestroy(&shell->y);CHKERRQ(ierr);
283   if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);}
284   ierr = PetscFree(shell);CHKERRQ(ierr);
285   ierr = MatDestroy_Shell(A);CHKERRQ(ierr);
286   ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr);
287   PetscFunctionReturn(0);
288 }
289 
290 #undef __FUNCT__
291 #define __FUNCT__ "MatWrapML_SeqAIJ"
292 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
293 {
294   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata*)mlmat->data;
295   PetscErrorCode        ierr;
296   PetscInt              m       =mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = NULL,nz_max;
297   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i;
298   PetscScalar           *ml_vals=matdata->values,*aa;
299 
300   PetscFunctionBegin;
301   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
302   if (m != n) { /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
303     if (reuse) {
304       Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data;
305       aij->i = ml_rowptr;
306       aij->j = ml_cols;
307       aij->a = ml_vals;
308     } else {
309       /* sort ml_cols and ml_vals */
310       ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
311       for (i=0; i<m; i++) nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
312       aj = ml_cols; aa = ml_vals;
313       for (i=0; i<m; i++) {
314         ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
315         aj  += nnz[i]; aa += nnz[i];
316       }
317       ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
318       ierr = PetscFree(nnz);CHKERRQ(ierr);
319     }
320     PetscFunctionReturn(0);
321   }
322 
323   nz_max = PetscMax(1,mlmat->max_nz_per_row);
324   ierr   = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
325   if (!reuse) {
326     ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr);
327     ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
328     ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
329     /* keep track of block size for A matrices */
330     ierr = MatSetBlockSize (*newmat, mlmat->num_PDEs);CHKERRQ(ierr);
331 
332     ierr = PetscMalloc(m*sizeof(PetscInt),&nnz);CHKERRQ(ierr);
333     for (i=0; i<m; i++) {
334       PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]));
335     }
336     ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
337   }
338   for (i=0; i<m; i++) {
339     PetscInt ncols;
340 
341     PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols));
342     ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
343   }
344   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
345   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
346 
347   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
348   ierr = PetscFree(nnz);CHKERRQ(ierr);
349   PetscFunctionReturn(0);
350 }
351 
352 #undef __FUNCT__
353 #define __FUNCT__ "MatWrapML_SHELL"
354 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
355 {
356   PetscErrorCode ierr;
357   PetscInt       m,n;
358   ML_Comm        *MLcomm;
359   Mat_MLShell    *shellctx;
360 
361   PetscFunctionBegin;
362   m = mlmat->outvec_leng;
363   n = mlmat->invec_leng;
364 
365   if (reuse) {
366     ierr            = MatShellGetContext(*newmat,(void**)&shellctx);CHKERRQ(ierr);
367     shellctx->mlmat = mlmat;
368     PetscFunctionReturn(0);
369   }
370 
371   MLcomm = mlmat->comm;
372 
373   ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr);
374   ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
375   ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr);
376   ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr);
377 
378   shellctx->A         = *newmat;
379   shellctx->mlmat     = mlmat;
380   shellctx->work      = NULL;
381 
382   ierr = VecCreate(MLcomm->USR_comm,&shellctx->y);CHKERRQ(ierr);
383   ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr);
384   ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr);
385 
386   (*newmat)->ops->destroy = MatDestroy_ML;
387   PetscFunctionReturn(0);
388 }
389 
390 #undef __FUNCT__
391 #define __FUNCT__ "MatWrapML_MPIAIJ"
392 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
393 {
394   PetscInt       *aj;
395   PetscScalar    *aa;
396   PetscErrorCode ierr;
397   PetscInt       i,j,*gordering;
398   PetscInt       m=mlmat->outvec_leng,n,nz_max,row;
399   Mat            A;
400 
401   PetscFunctionBegin;
402   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
403   n = mlmat->invec_leng;
404   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
405 
406   nz_max = PetscMax(1,mlmat->max_nz_per_row);
407   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
408   if (reuse) A = *newmat;
409   else {
410     PetscInt *nnzA,*nnzB,*nnz;
411     ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr);
412     ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
413     ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
414     /* keep track of block size for A matrices */
415     ierr = MatSetBlockSize (A,mlmat->num_PDEs);CHKERRQ(ierr);
416     ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr);
417 
418     for (i=0; i<m; i++) {
419       PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]));
420       nnzA[i] = 0;
421       for (j=0; j<nnz[i]; j++) {
422         if (aj[j] < m) nnzA[i]++;
423       }
424       nnzB[i] = nnz[i] - nnzA[i];
425     }
426     ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
427     ierr = PetscFree3(nnzA,nnzB,nnz);
428   }
429   /* create global row numbering for a ML_Operator */
430   PetscStackCall("ML_build_global_numbering",ML_build_global_numbering(mlmat,&gordering,"rows"));
431   for (i=0; i<m; i++) {
432     PetscInt ncols;
433     row = gordering[i];
434 
435     PetscStackCall(",ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols));
436     for (j = 0; j < ncols; j++) aj[j] = gordering[aj[j]];
437     ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
438   }
439   PetscStackCall("ML_Operator_Getrow",ML_free(gordering));
440   ierr    = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
441   ierr    = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
442   *newmat = A;
443 
444   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
445   PetscFunctionReturn(0);
446 }
447 
448 /* -------------------------------------------------------------------------- */
449 /*
450    PCSetCoordinates_ML
451 
452    Input Parameter:
453    .  pc - the preconditioner context
454 */
455 #undef __FUNCT__
456 #define __FUNCT__ "PCSetCoordinates_ML"
457 static PetscErrorCode PCSetCoordinates_ML(PC pc, PetscInt ndm, PetscInt a_nloc, PetscReal *coords)
458 {
459   PC_MG          *mg    = (PC_MG*)pc->data;
460   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
461   PetscErrorCode ierr;
462   PetscInt       arrsz,oldarrsz,bs,my0,kk,ii,nloc,Iend;
463   Mat            Amat = pc->pmat;
464 
465   /* this function copied and modified from PCSetCoordinates_GEO -TGI */
466   PetscFunctionBegin;
467   PetscValidHeaderSpecific(Amat, MAT_CLASSID, 1);
468   ierr = MatGetBlockSize(Amat, &bs);CHKERRQ(ierr);
469 
470   ierr = MatGetOwnershipRange(Amat, &my0, &Iend);CHKERRQ(ierr);
471   nloc = (Iend-my0)/bs;
472 
473   if (nloc!=a_nloc) SETERRQ2(PetscObjectComm((PetscObject)Amat),PETSC_ERR_ARG_WRONG, "Number of local blocks must locations = %d %d.",a_nloc,nloc);
474   if ((Iend-my0)%bs!=0) SETERRQ1(PetscObjectComm((PetscObject)Amat),PETSC_ERR_ARG_WRONG, "Bad local size %d.",nloc);
475 
476   oldarrsz    = pc_ml->dim * pc_ml->nloc;
477   pc_ml->dim  = ndm;
478   pc_ml->nloc = a_nloc;
479   arrsz       = ndm * a_nloc;
480 
481   /* create data - syntactic sugar that should be refactored at some point */
482   if (pc_ml->coords==0 || (oldarrsz != arrsz)) {
483     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
484     ierr = PetscMalloc((arrsz)*sizeof(PetscReal), &pc_ml->coords);CHKERRQ(ierr);
485   }
486   for (kk=0; kk<arrsz; kk++) pc_ml->coords[kk] = -999.;
487   /* copy data in - column oriented */
488   for (kk = 0; kk < nloc; kk++) {
489     for (ii = 0; ii < ndm; ii++) {
490       pc_ml->coords[ii*nloc + kk] =  coords[kk*ndm + ii];
491     }
492   }
493   PetscFunctionReturn(0);
494 }
495 
496 /* -----------------------------------------------------------------------------*/
497 #undef __FUNCT__
498 #define __FUNCT__ "PCReset_ML"
499 PetscErrorCode PCReset_ML(PC pc)
500 {
501   PetscErrorCode ierr;
502   PC_MG          *mg    = (PC_MG*)pc->data;
503   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
504   PetscInt       level,fine_level=pc_ml->Nlevels-1,dim=pc_ml->dim;
505 
506   PetscFunctionBegin;
507   if (dim) {
508     ML_Aggregate_Viz_Stats * grid_info = (ML_Aggregate_Viz_Stats*) pc_ml->ml_object->Grid[0].Grid;
509 
510     for (level=0; level<=fine_level; level++) {
511       ierr = VecDestroy(&pc_ml->gridctx[level].coords);CHKERRQ(ierr);
512     }
513 
514     grid_info->x = 0; /* do this so ML doesn't try to free coordinates */
515     grid_info->y = 0;
516     grid_info->z = 0;
517 
518     PetscStackCall("ML_Operator_Getrow",ML_Aggregate_VizAndStats_Clean(pc_ml->ml_object));
519   }
520   PetscStackCall("ML_Aggregate_Destroy",ML_Aggregate_Destroy(&pc_ml->agg_object));
521   PetscStackCall("ML_Aggregate_Destroy",ML_Destroy(&pc_ml->ml_object));
522 
523   if (pc_ml->PetscMLdata) {
524     ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
525     ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);
526     ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr);
527     ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr);
528   }
529   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
530 
531   if (pc_ml->gridctx) {
532     for (level=0; level<fine_level; level++) {
533       if (pc_ml->gridctx[level].A) {ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);}
534       if (pc_ml->gridctx[level].P) {ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);}
535       if (pc_ml->gridctx[level].R) {ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);}
536       if (pc_ml->gridctx[level].x) {ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);}
537       if (pc_ml->gridctx[level].b) {ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);}
538       if (pc_ml->gridctx[level+1].r) {ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
539     }
540   }
541   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
542   ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
543 
544   pc_ml->dim  = 0;
545   pc_ml->nloc = 0;
546   PetscFunctionReturn(0);
547 }
548 /* -------------------------------------------------------------------------- */
549 /*
550    PCSetUp_ML - Prepares for the use of the ML preconditioner
551                     by setting data structures and options.
552 
553    Input Parameter:
554 .  pc - the preconditioner context
555 
556    Application Interface Routine: PCSetUp()
557 
558    Notes:
559    The interface routine PCSetUp() is not usually called directly by
560    the user, but instead is called by PCApply() if necessary.
561 */
562 extern PetscErrorCode PCSetFromOptions_MG(PC);
563 extern PetscErrorCode PCReset_MG(PC);
564 
565 #undef __FUNCT__
566 #define __FUNCT__ "PCSetUp_ML"
567 PetscErrorCode PCSetUp_ML(PC pc)
568 {
569   PetscErrorCode ierr;
570   PetscMPIInt    size;
571   FineGridCtx    *PetscMLdata;
572   ML             *ml_object;
573   ML_Aggregate   *agg_object;
574   ML_Operator    *mlmat;
575   PetscInt       nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
576   Mat            A,Aloc;
577   GridCtx        *gridctx;
578   PC_MG          *mg    = (PC_MG*)pc->data;
579   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
580   PetscBool      isSeq, isMPI;
581   KSP            smoother;
582   PC             subpc;
583   PetscInt       mesh_level, old_mesh_level;
584   MatInfo        info;
585 
586   PetscFunctionBegin;
587   A    = pc->pmat;
588   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
589 
590   if (pc->setupcalled) {
591     if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
592       /*
593        Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
594        multiple solves in which the matrix is not changing too quickly.
595        */
596       ml_object             = pc_ml->ml_object;
597       gridctx               = pc_ml->gridctx;
598       Nlevels               = pc_ml->Nlevels;
599       fine_level            = Nlevels - 1;
600       gridctx[fine_level].A = A;
601 
602       ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
603       ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
604       if (isMPI) {
605         ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
606       } else if (isSeq) {
607         Aloc = A;
608         ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
609       } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
610 
611       ierr              = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
612       PetscMLdata       = pc_ml->PetscMLdata;
613       ierr              = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr);
614       PetscMLdata->A    = A;
615       PetscMLdata->Aloc = Aloc;
616       PetscStackCall("ML_Aggregate_Destroy",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata));
617       PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec));
618 
619       mesh_level = ml_object->ML_finest_level;
620       while (ml_object->SingleLevel[mesh_level].Rmat->to) {
621         old_mesh_level = mesh_level;
622         mesh_level     = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;
623 
624         /* clean and regenerate A */
625         mlmat = &(ml_object->Amat[mesh_level]);
626         PetscStackCall("ML_Operator_Clean",ML_Operator_Clean(mlmat));
627         PetscStackCall("ML_Operator_Init",ML_Operator_Init(mlmat,ml_object->comm));
628         PetscStackCall("ML_Gen_AmatrixRAP",ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level));
629       }
630 
631       level = fine_level - 1;
632       if (size == 1) { /* convert ML P, R and A into seqaij format */
633         for (mllevel=1; mllevel<Nlevels; mllevel++) {
634           mlmat = &(ml_object->Amat[mllevel]);
635           ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
636           level--;
637         }
638       } else { /* convert ML P and R into shell format, ML A into mpiaij format */
639         for (mllevel=1; mllevel<Nlevels; mllevel++) {
640           mlmat  = &(ml_object->Amat[mllevel]);
641           ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
642           level--;
643         }
644       }
645 
646       for (level=0; level<fine_level; level++) {
647         if (level > 0) {
648           ierr = PCMGSetResidual(pc,level,PCMGResidual_Default,gridctx[level].A);CHKERRQ(ierr);
649         }
650         ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
651       }
652       ierr = PCMGSetResidual(pc,fine_level,PCMGResidual_Default,gridctx[fine_level].A);CHKERRQ(ierr);
653       ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
654 
655       ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
656       PetscFunctionReturn(0);
657     } else {
658       /* since ML can change the size of vectors/matrices at any level we must destroy everything */
659       ierr = PCReset_ML(pc);CHKERRQ(ierr);
660       ierr = PCReset_MG(pc);CHKERRQ(ierr);
661     }
662   }
663 
664   /* setup special features of PCML */
665   /*--------------------------------*/
666   /* covert A to Aloc to be used by ML at fine grid */
667   pc_ml->size = size;
668   ierr        = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
669   ierr        = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
670   if (isMPI) {
671     ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
672   } else if (isSeq) {
673     Aloc = A;
674     ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
675   } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
676 
677   /* create and initialize struct 'PetscMLdata' */
678   ierr               = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr);
679   pc_ml->PetscMLdata = PetscMLdata;
680   ierr               = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr);
681 
682   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);
683   ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr);
684   ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr);
685 
686   ierr              = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr);
687   ierr              = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
688   ierr              = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
689   PetscMLdata->A    = A;
690   PetscMLdata->Aloc = Aloc;
691   if (pc_ml->dim) { /* create vecs around the coordinate data given */
692     PetscInt  i,j,dim=pc_ml->dim;
693     PetscInt  nloc = pc_ml->nloc,nlocghost;
694     PetscReal *ghostedcoords;
695 
696     ierr      = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
697     nlocghost = Aloc->cmap->n / bs;
698     ierr      = PetscMalloc(dim*nlocghost*sizeof(PetscReal),&ghostedcoords);CHKERRQ(ierr);
699     for (i = 0; i < dim; i++) {
700       /* copy coordinate values into first component of pwork */
701       for (j = 0; j < nloc; j++) {
702         PetscMLdata->pwork[bs * j] = pc_ml->coords[nloc * i + j];
703       }
704       /* get the ghost values */
705       ierr = PetscML_comm(PetscMLdata->pwork,PetscMLdata);CHKERRQ(ierr);
706       /* write into the vector */
707       for (j = 0; j < nlocghost; j++) {
708         ghostedcoords[i * nlocghost + j] = PetscMLdata->pwork[bs * j];
709       }
710     }
711     /* replace the original coords with the ghosted coords, because these are
712      * what ML needs */
713     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
714     pc_ml->coords = ghostedcoords;
715   }
716 
717   /* create ML discretization matrix at fine grid */
718   /* ML requires input of fine-grid matrix. It determines nlevels. */
719   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
720   ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
721   PetscStackCall("ML_Create",ML_Create(&ml_object,pc_ml->MaxNlevels));
722   PetscStackCall("ML_Comm_Set_UsrComm",ML_Comm_Set_UsrComm(ml_object->comm,PetscObjectComm((PetscObject)A)));
723   pc_ml->ml_object = ml_object;
724   PetscStackCall("ML_Init_Amatrix",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata));
725   PetscStackCall("ML_Set_Amatrix_Getrow",ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols));
726   PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec));
727 
728   PetscStackCall("ML_Set_Symmetrize",ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO));
729 
730   /* aggregation */
731   PetscStackCall("ML_Aggregate_Create",ML_Aggregate_Create(&agg_object));
732   pc_ml->agg_object = agg_object;
733 
734   {
735     MatNullSpace mnull;
736     ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr);
737     if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) {
738       if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER;
739       else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK;
740       else pc_ml->nulltype = PCML_NULLSPACE_SCALAR;
741     }
742     switch (pc_ml->nulltype) {
743     case PCML_NULLSPACE_USER: {
744       PetscScalar       *nullvec;
745       const PetscScalar *v;
746       PetscBool         has_const;
747       PetscInt          i,j,mlocal,nvec,M;
748       const Vec         *vecs;
749 
750       if (!mnull) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space");
751       ierr = MatGetSize(A,&M,NULL);CHKERRQ(ierr);
752       ierr = MatGetLocalSize(Aloc,&mlocal,NULL);CHKERRQ(ierr);
753       ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr);
754       ierr = PetscMalloc((nvec+!!has_const)*mlocal*sizeof(*nullvec),&nullvec);CHKERRQ(ierr);
755       if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M;
756       for (i=0; i<nvec; i++) {
757         ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr);
758         for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j];
759         ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr);
760       }
761       PetscStackCall("ML_Aggregate_Create",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr));
762       ierr = PetscFree(nullvec);CHKERRQ(ierr);
763     } break;
764     case PCML_NULLSPACE_BLOCK:
765       PetscStackCall("ML_Aggregate_Set_NullSpace",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr));
766       break;
767     case PCML_NULLSPACE_SCALAR:
768       break;
769     default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP,"Unknown null space type");
770     }
771   }
772   PetscStackCall("ML_Aggregate_Set_MaxCoarseSize",ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize));
773   /* set options */
774   switch (pc_ml->CoarsenScheme) {
775   case 1:
776     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_Coupled",ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object));break;
777   case 2:
778     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_MIS",ML_Aggregate_Set_CoarsenScheme_MIS(agg_object));break;
779   case 3:
780     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_METIS",ML_Aggregate_Set_CoarsenScheme_METIS(agg_object));break;
781   }
782   PetscStackCall("ML_Aggregate_Set_Threshold",ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold));
783   PetscStackCall("ML_Aggregate_Set_DampingFactor",ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor));
784   if (pc_ml->SpectralNormScheme_Anorm) {
785     PetscStackCall("ML_Set_SpectralNormScheme_Anorm",ML_Set_SpectralNormScheme_Anorm(ml_object));
786   }
787   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
788   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
789   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
790   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
791   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
792   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;
793 
794   if (pc_ml->Aux) {
795     if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Auxiliary matrix requires coordinates");
796     ml_object->Amat[0].aux_data->threshold = pc_ml->AuxThreshold;
797     ml_object->Amat[0].aux_data->enable    = 1;
798     ml_object->Amat[0].aux_data->max_level = 10;
799     ml_object->Amat[0].num_PDEs            = bs;
800   }
801 
802   ierr = MatGetInfo(A,MAT_LOCAL,&info);CHKERRQ(ierr);
803   ml_object->Amat[0].N_nonzeros = (int) info.nz_used;
804 
805   if (pc_ml->dim) {
806     PetscInt               i,dim = pc_ml->dim;
807     ML_Aggregate_Viz_Stats *grid_info;
808     PetscInt               nlocghost;
809 
810     ierr      = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
811     nlocghost = Aloc->cmap->n / bs;
812 
813     PetscStackCall("ML_Aggregate_VizAndStats_Setup(",ML_Aggregate_VizAndStats_Setup(ml_object)); /* create ml info for coords */
814     grid_info = (ML_Aggregate_Viz_Stats*) ml_object->Grid[0].Grid;
815     for (i = 0; i < dim; i++) {
816       /* set the finest level coordinates to point to the column-order array
817        * in pc_ml */
818       /* NOTE: must point away before VizAndStats_Clean so ML doesn't free */
819       switch (i) {
820       case 0: grid_info->x = pc_ml->coords + nlocghost * i; break;
821       case 1: grid_info->y = pc_ml->coords + nlocghost * i; break;
822       case 2: grid_info->z = pc_ml->coords + nlocghost * i; break;
823       default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
824       }
825     }
826     grid_info->Ndim = dim;
827   }
828 
829   /* repartitioning */
830   if (pc_ml->Repartition) {
831     PetscStackCall("ML_Repartition_Activate",ML_Repartition_Activate(ml_object));
832     PetscStackCall("ML_Repartition_Set_LargestMinMaxRatio",ML_Repartition_Set_LargestMinMaxRatio(ml_object,pc_ml->MaxMinRatio));
833     PetscStackCall("ML_Repartition_Set_MinPerProc",ML_Repartition_Set_MinPerProc(ml_object,pc_ml->MinPerProc));
834     PetscStackCall("ML_Repartition_Set_PutOnSingleProc",ML_Repartition_Set_PutOnSingleProc(ml_object,pc_ml->PutOnSingleProc));
835 #if 0                           /* Function not yet defined in ml-6.2 */
836     /* I'm not sure what compatibility issues might crop up if we partitioned
837      * on the finest level, so to be safe repartition starts on the next
838      * finest level (reflection default behavior in
839      * ml_MultiLevelPreconditioner) */
840     PetscStackCall("ML_Repartition_Set_StartLevel",ML_Repartition_Set_StartLevel(ml_object,1));
841 #endif
842 
843     if (!pc_ml->RepartitionType) {
844       PetscInt i;
845 
846       if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"ML Zoltan repartitioning requires coordinates");
847       PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEZOLTAN));
848       PetscStackCall("ML_Aggregate_Set_Dimensions",ML_Aggregate_Set_Dimensions(agg_object, pc_ml->dim));
849 
850       for (i = 0; i < ml_object->ML_num_levels; i++) {
851         ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Grid[i].Grid;
852         grid_info->zoltan_type = pc_ml->ZoltanScheme + 1; /* ml numbers options 1, 2, 3 */
853         /* defaults from ml_agg_info.c */
854         grid_info->zoltan_estimated_its = 40; /* only relevant to hypergraph / fast hypergraph */
855         grid_info->zoltan_timers        = 0;
856         grid_info->smoothing_steps      = 4;  /* only relevant to hypergraph / fast hypergraph */
857       }
858     } else {
859       PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEPARMETIS));
860     }
861   }
862 
863   if (pc_ml->OldHierarchy) {
864     PetscStackCall("ML_Gen_MGHierarchy_UsingAggregation",Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object));
865   } else {
866     PetscStackCall("ML_Gen_MultiLevelHierarchy_UsingAggregation",Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object));
867   }
868   if (Nlevels<=0) SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
869   pc_ml->Nlevels = Nlevels;
870   fine_level     = Nlevels - 1;
871 
872   ierr = PCMGSetLevels(pc,Nlevels,NULL);CHKERRQ(ierr);
873   /* set default smoothers */
874   for (level=1; level<=fine_level; level++) {
875     ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
876     ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
877     ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
878     ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
879   }
880   ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr);
881   ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
882   ierr = PetscOptionsEnd();CHKERRQ(ierr);
883 
884   ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr);
885 
886   pc_ml->gridctx = gridctx;
887 
888   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
889      Level 0 is the finest grid for ML, but coarsest for PETSc! */
890   gridctx[fine_level].A = A;
891 
892   level = fine_level - 1;
893   if (size == 1) { /* convert ML P, R and A into seqaij format */
894     for (mllevel=1; mllevel<Nlevels; mllevel++) {
895       mlmat = &(ml_object->Pmat[mllevel]);
896       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
897       mlmat = &(ml_object->Rmat[mllevel-1]);
898       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
899 
900       mlmat = &(ml_object->Amat[mllevel]);
901       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
902       level--;
903     }
904   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
905     for (mllevel=1; mllevel<Nlevels; mllevel++) {
906       mlmat  = &(ml_object->Pmat[mllevel]);
907       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
908       mlmat  = &(ml_object->Rmat[mllevel-1]);
909       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
910 
911       mlmat  = &(ml_object->Amat[mllevel]);
912       ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
913       level--;
914     }
915   }
916 
917   /* create vectors and ksp at all levels */
918   for (level=0; level<fine_level; level++) {
919     level1 = level + 1;
920     ierr   = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr);
921     ierr   = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr);
922     ierr   = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr);
923     ierr   = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
924 
925     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr);
926     ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
927     ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr);
928     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
929 
930     ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr);
931     ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
932     ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr);
933     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
934 
935     if (level == 0) {
936       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
937     } else {
938       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
939     }
940   }
941   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
942 
943   /* create coarse level and the interpolation between the levels */
944   for (level=0; level<fine_level; level++) {
945     level1 = level + 1;
946     ierr   = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
947     ierr   = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
948     if (level > 0) {
949       ierr = PCMGSetResidual(pc,level,PCMGResidual_Default,gridctx[level].A);CHKERRQ(ierr);
950     }
951     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
952   }
953   ierr = PCMGSetResidual(pc,fine_level,PCMGResidual_Default,gridctx[fine_level].A);CHKERRQ(ierr);
954   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
955 
956   /* put coordinate info in levels */
957   if (pc_ml->dim) {
958     PetscInt  i,j,dim = pc_ml->dim;
959     PetscInt  bs, nloc;
960     PC        subpc;
961     PetscReal *array;
962 
963     level = fine_level;
964     for (mllevel = 0; mllevel < Nlevels; mllevel++) {
965       ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Amat[mllevel].to->Grid->Grid;
966       MPI_Comm               comm       = ((PetscObject)gridctx[level].A)->comm;
967 
968       ierr  = MatGetBlockSize (gridctx[level].A, &bs);CHKERRQ(ierr);
969       ierr  = MatGetLocalSize (gridctx[level].A, NULL, &nloc);CHKERRQ(ierr);
970       nloc /= bs; /* number of local nodes */
971 
972       ierr = VecCreate(comm,&gridctx[level].coords);CHKERRQ(ierr);
973       ierr = VecSetSizes(gridctx[level].coords,dim * nloc,PETSC_DECIDE);CHKERRQ(ierr);
974       ierr = VecSetType(gridctx[level].coords,VECMPI);CHKERRQ(ierr);
975       ierr = VecGetArray(gridctx[level].coords,&array);CHKERRQ(ierr);
976       for (j = 0; j < nloc; j++) {
977         for (i = 0; i < dim; i++) {
978           switch (i) {
979           case 0: array[dim * j + i] = grid_info->x[j]; break;
980           case 1: array[dim * j + i] = grid_info->y[j]; break;
981           case 2: array[dim * j + i] = grid_info->z[j]; break;
982           default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
983           }
984         }
985       }
986 
987       /* passing coordinates to smoothers/coarse solver, should they need them */
988       ierr = KSPGetPC(gridctx[level].ksp,&subpc);CHKERRQ(ierr);
989       ierr = PCSetCoordinates(subpc,dim,nloc,array);CHKERRQ(ierr);
990       ierr = VecRestoreArray(gridctx[level].coords,&array);CHKERRQ(ierr);
991       level--;
992     }
993   }
994 
995   /* setupcalled is set to 0 so that MG is setup from scratch */
996   pc->setupcalled = 0;
997   ierr            = PCSetUp_MG(pc);CHKERRQ(ierr);
998   PetscFunctionReturn(0);
999 }
1000 
1001 /* -------------------------------------------------------------------------- */
1002 /*
1003    PCDestroy_ML - Destroys the private context for the ML preconditioner
1004    that was created with PCCreate_ML().
1005 
1006    Input Parameter:
1007 .  pc - the preconditioner context
1008 
1009    Application Interface Routine: PCDestroy()
1010 */
1011 #undef __FUNCT__
1012 #define __FUNCT__ "PCDestroy_ML"
1013 PetscErrorCode PCDestroy_ML(PC pc)
1014 {
1015   PetscErrorCode ierr;
1016   PC_MG          *mg   = (PC_MG*)pc->data;
1017   PC_ML          *pc_ml= (PC_ML*)mg->innerctx;
1018 
1019   PetscFunctionBegin;
1020   ierr = PCReset_ML(pc);CHKERRQ(ierr);
1021   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
1022   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
1023   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",NULL);CHKERRQ(ierr);
1024   PetscFunctionReturn(0);
1025 }
1026 
1027 #undef __FUNCT__
1028 #define __FUNCT__ "PCSetFromOptions_ML"
1029 PetscErrorCode PCSetFromOptions_ML(PC pc)
1030 {
1031   PetscErrorCode ierr;
1032   PetscInt       indx,PrintLevel,partindx;
1033   const char     *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
1034   const char     *part[]   = {"Zoltan","ParMETIS"};
1035 #if defined(HAVE_ML_ZOLTAN)
1036   PetscInt   zidx;
1037   const char *zscheme[] = {"RCB","hypergraph","fast_hypergraph"};
1038 #endif
1039   PC_MG       *mg    = (PC_MG*)pc->data;
1040   PC_ML       *pc_ml = (PC_ML*)mg->innerctx;
1041   PetscMPIInt size;
1042   MPI_Comm    comm;
1043 
1044   PetscFunctionBegin;
1045   ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr);
1046   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1047   ierr = PetscOptionsHead("ML options");CHKERRQ(ierr);
1048 
1049   PrintLevel = 0;
1050   indx       = 0;
1051   partindx   = 0;
1052 
1053   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,NULL);CHKERRQ(ierr);
1054   PetscStackCall("ML_Set_PrintLeve",ML_Set_PrintLevel(PrintLevel));
1055   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,NULL);CHKERRQ(ierr);
1056   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,NULL);CHKERRQ(ierr);
1057   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,NULL);CHKERRQ(ierr);
1058 
1059   pc_ml->CoarsenScheme = indx;
1060 
1061   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,NULL);CHKERRQ(ierr);
1062   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,NULL);CHKERRQ(ierr);
1063   ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,NULL);CHKERRQ(ierr);
1064   ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,NULL);CHKERRQ(ierr);
1065   ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,NULL);CHKERRQ(ierr);
1066   ierr = PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,NULL);CHKERRQ(ierr);
1067   ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,NULL);CHKERRQ(ierr);
1068   ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,NULL);CHKERRQ(ierr);
1069   /*
1070     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
1071     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.
1072 
1073     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
1074     combination of options and ML's exit(1) explanations don't help matters.
1075   */
1076   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
1077   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
1078   if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);}
1079   if (pc_ml->EnergyMinimization) {
1080     ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,NULL);CHKERRQ(ierr);
1081   }
1082   if (pc_ml->EnergyMinimization == 2) {
1083     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
1084     ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,NULL);CHKERRQ(ierr);
1085   }
1086   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
1087   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
1088   ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,NULL);CHKERRQ(ierr);
1089   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
1090   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
1091   ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,NULL);CHKERRQ(ierr);
1092   /*
1093     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
1094     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
1095     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
1096     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
1097     this context, but ML doesn't provide a way to find out which ones.
1098    */
1099   ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,NULL);CHKERRQ(ierr);
1100   ierr = PetscOptionsBool("-pc_ml_repartition", "Allow ML to repartition levels of the heirarchy","ML_Repartition_Activate",pc_ml->Repartition,&pc_ml->Repartition,NULL);CHKERRQ(ierr);
1101   if (pc_ml->Repartition) {
1102     ierr = PetscOptionsReal("-pc_ml_repartitionMaxMinRatio", "Acceptable ratio of repartitioned sizes","ML_Repartition_Set_LargestMinMaxRatio",pc_ml->MaxMinRatio,&pc_ml->MaxMinRatio,NULL);CHKERRQ(ierr);
1103     ierr = PetscOptionsInt("-pc_ml_repartitionMinPerProc", "Smallest repartitioned size","ML_Repartition_Set_MinPerProc",pc_ml->MinPerProc,&pc_ml->MinPerProc,NULL);CHKERRQ(ierr);
1104     ierr = PetscOptionsInt("-pc_ml_repartitionPutOnSingleProc", "Problem size automatically repartitioned to one processor","ML_Repartition_Set_PutOnSingleProc",pc_ml->PutOnSingleProc,&pc_ml->PutOnSingleProc,NULL);CHKERRQ(ierr);
1105 #if defined(HAVE_ML_ZOLTAN)
1106     partindx = 0;
1107     ierr     = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[0],&partindx,NULL);CHKERRQ(ierr);
1108 
1109     pc_ml->RepartitionType = partindx;
1110     if (!partindx) {
1111       PetscInt zindx = 0;
1112 
1113       ierr = PetscOptionsEList("-pc_ml_repartitionZoltanScheme", "Repartitioning scheme to use","None",zscheme,3,zscheme[0],&zindx,NULL);CHKERRQ(ierr);
1114 
1115       pc_ml->ZoltanScheme = zindx;
1116     }
1117 #else
1118     partindx = 1;
1119     ierr     = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[1],&partindx,NULL);CHKERRQ(ierr);
1120     if (!partindx) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP_SYS,"ML not compiled with Zoltan");
1121 #endif
1122     ierr = PetscOptionsBool("-pc_ml_Aux","Aggregate using auxiliary coordinate-based laplacian","None",pc_ml->Aux,&pc_ml->Aux,NULL);CHKERRQ(ierr);
1123     ierr = PetscOptionsReal("-pc_ml_AuxThreshold","Auxiliary smoother drop tol","None",pc_ml->AuxThreshold,&pc_ml->AuxThreshold,NULL);CHKERRQ(ierr);
1124   }
1125   ierr = PetscOptionsTail();CHKERRQ(ierr);
1126   PetscFunctionReturn(0);
1127 }
1128 
1129 /* -------------------------------------------------------------------------- */
1130 /*
1131    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
1132    and sets this as the private data within the generic preconditioning
1133    context, PC, that was created within PCCreate().
1134 
1135    Input Parameter:
1136 .  pc - the preconditioner context
1137 
1138    Application Interface Routine: PCCreate()
1139 */
1140 
1141 /*MC
1142      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
1143        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
1144        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
1145        and the restriction/interpolation operators wrapped as PETSc shell matrices.
1146 
1147    Options Database Key:
1148    Multigrid options(inherited)
1149 +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
1150 .  -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
1151 .  -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
1152    -pc_mg_type <multiplicative>: (one of) additive multiplicative full kascade
1153    ML options:
1154 .  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
1155 .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
1156 .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
1157 .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
1158 .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
1159 .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
1160 .  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
1161 .  -pc_ml_repartition <false>: Allow ML to repartition levels of the heirarchy (ML_Repartition_Activate)
1162 .  -pc_ml_repartitionMaxMinRatio <1.3>: Acceptable ratio of repartitioned sizes (ML_Repartition_Set_LargestMinMaxRatio)
1163 .  -pc_ml_repartitionMinPerProc <512>: Smallest repartitioned size (ML_Repartition_Set_MinPerProc)
1164 .  -pc_ml_repartitionPutOnSingleProc <5000>: Problem size automatically repartitioned to one processor (ML_Repartition_Set_PutOnSingleProc)
1165 .  -pc_ml_repartitionType <Zoltan>: Repartitioning library to use (ML_Repartition_Set_Partitioner)
1166 .  -pc_ml_repartitionZoltanScheme <RCB>: Repartitioning scheme to use (None)
1167 .  -pc_ml_Aux <false>: Aggregate using auxiliary coordinate-based laplacian (None)
1168 -  -pc_ml_AuxThreshold <0.0>: Auxiliary smoother drop tol (None)
1169 
1170    Level: intermediate
1171 
1172   Concepts: multigrid
1173 
1174 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
1175            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
1176            PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
1177            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
1178            PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
1179 M*/
1180 
1181 #undef __FUNCT__
1182 #define __FUNCT__ "PCCreate_ML"
1183 PETSC_EXTERN PetscErrorCode PCCreate_ML(PC pc)
1184 {
1185   PetscErrorCode ierr;
1186   PC_ML          *pc_ml;
1187   PC_MG          *mg;
1188 
1189   PetscFunctionBegin;
1190   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
1191   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
1192   ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr);
1193   /* Since PCMG tries to use DM assocated with PC must delete it */
1194   ierr         = DMDestroy(&pc->dm);CHKERRQ(ierr);
1195   mg           = (PC_MG*)pc->data;
1196   mg->galerkin = 2;             /* Use Galerkin, but it is computed externally */
1197 
1198   /* create a supporting struct and attach it to pc */
1199   ierr         = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr);
1200   mg->innerctx = pc_ml;
1201 
1202   pc_ml->ml_object                = 0;
1203   pc_ml->agg_object               = 0;
1204   pc_ml->gridctx                  = 0;
1205   pc_ml->PetscMLdata              = 0;
1206   pc_ml->Nlevels                  = -1;
1207   pc_ml->MaxNlevels               = 10;
1208   pc_ml->MaxCoarseSize            = 1;
1209   pc_ml->CoarsenScheme            = 1;
1210   pc_ml->Threshold                = 0.0;
1211   pc_ml->DampingFactor            = 4.0/3.0;
1212   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
1213   pc_ml->size                     = 0;
1214   pc_ml->dim                      = 0;
1215   pc_ml->nloc                     = 0;
1216   pc_ml->coords                   = 0;
1217   pc_ml->Repartition              = PETSC_FALSE;
1218   pc_ml->MaxMinRatio              = 1.3;
1219   pc_ml->MinPerProc               = 512;
1220   pc_ml->PutOnSingleProc          = 5000;
1221   pc_ml->RepartitionType          = 0;
1222   pc_ml->ZoltanScheme             = 0;
1223   pc_ml->Aux                      = PETSC_FALSE;
1224   pc_ml->AuxThreshold             = 0.0;
1225 
1226   /* allow for coordinates to be passed */
1227   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",PCSetCoordinates_ML);CHKERRQ(ierr);
1228 
1229   /* overwrite the pointers of PCMG by the functions of PCML */
1230   pc->ops->setfromoptions = PCSetFromOptions_ML;
1231   pc->ops->setup          = PCSetUp_ML;
1232   pc->ops->reset          = PCReset_ML;
1233   pc->ops->destroy        = PCDestroy_ML;
1234   PetscFunctionReturn(0);
1235 }
1236