xref: /petsc/src/ksp/pc/impls/ml/ml.c (revision 2b8d69ca7ea5fe9190df62c1dce3bbd66fce84dd)
1 
2 /*
3    Provides an interface to the ML smoothed Aggregation
4    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
5                                     Jed Brown, see [PETSC #18321, #18449].
6 */
7 #include <petsc/private/pcimpl.h>   /*I "petscpc.h" I*/
8 #include <petsc/private/pcmgimpl.h>                    /*I "petscksp.h" I*/
9 #include <../src/mat/impls/aij/seq/aij.h>
10 #include <../src/mat/impls/aij/mpi/mpiaij.h>
11 #include <petscdm.h>            /* for DMDestroy(&pc->mg) hack */
12 
13 EXTERN_C_BEGIN
14 /* HAVE_CONFIG_H flag is required by ML include files */
15 #if !defined(HAVE_CONFIG_H)
16 #define HAVE_CONFIG_H
17 #endif
18 #include <ml_include.h>
19 #include <ml_viz_stats.h>
20 EXTERN_C_END
21 
22 typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType;
23 static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0};
24 
25 /* The context (data structure) at each grid level */
26 typedef struct {
27   Vec x,b,r;                  /* global vectors */
28   Mat A,P,R;
29   KSP ksp;
30   Vec coords;                 /* projected by ML, if PCSetCoordinates is called; values packed by node */
31 } GridCtx;
32 
33 /* The context used to input PETSc matrix into ML at fine grid */
34 typedef struct {
35   Mat         A;       /* Petsc matrix in aij format */
36   Mat         Aloc;    /* local portion of A to be used by ML */
37   Vec         x,y;
38   ML_Operator *mlmat;
39   PetscScalar *pwork;  /* tmp array used by PetscML_comm() */
40 } FineGridCtx;
41 
42 /* The context associates a ML matrix with a PETSc shell matrix */
43 typedef struct {
44   Mat         A;        /* PETSc shell matrix associated with mlmat */
45   ML_Operator *mlmat;   /* ML matrix assorciated with A */
46   Vec         y, work;
47 } Mat_MLShell;
48 
49 /* Private context for the ML preconditioner */
50 typedef struct {
51   ML                *ml_object;
52   ML_Aggregate      *agg_object;
53   GridCtx           *gridctx;
54   FineGridCtx       *PetscMLdata;
55   PetscInt          Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization,MinPerProc,PutOnSingleProc,RepartitionType,ZoltanScheme;
56   PetscReal         Threshold,DampingFactor,EnergyMinimizationDropTol,MaxMinRatio,AuxThreshold;
57   PetscBool         SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable,Repartition,Aux;
58   PetscBool         reuse_interpolation;
59   PCMLNullSpaceType nulltype;
60   PetscMPIInt       size; /* size of communicator for pc->pmat */
61   PetscInt          dim;  /* data from PCSetCoordinates(_ML) */
62   PetscInt          nloc;
63   PetscReal         *coords; /* ML has a grid object for each level: the finest grid will point into coords */
64 } PC_ML;
65 
66 #undef __FUNCT__
67 #define __FUNCT__ "PetscML_getrow"
68 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
69 {
70   PetscErrorCode ierr;
71   PetscInt       m,i,j,k=0,row,*aj;
72   PetscScalar    *aa;
73   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
74   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;
75 
76   ierr = MatGetSize(ml->Aloc,&m,NULL); if (ierr) return(0);
77   for (i = 0; i<N_requested_rows; i++) {
78     row            = requested_rows[i];
79     row_lengths[i] = a->ilen[row];
80     if (allocated_space < k+row_lengths[i]) return(0);
81     if ((row >= 0) || (row <= (m-1))) {
82       aj = a->j + a->i[row];
83       aa = a->a + a->i[row];
84       for (j=0; j<row_lengths[i]; j++) {
85         columns[k]  = aj[j];
86         values[k++] = aa[j];
87       }
88     }
89   }
90   return(1);
91 }
92 
93 #undef __FUNCT__
94 #define __FUNCT__ "PetscML_comm"
95 static PetscErrorCode PetscML_comm(double p[],void *ML_data)
96 {
97   PetscErrorCode    ierr;
98   FineGridCtx       *ml = (FineGridCtx*)ML_data;
99   Mat               A   = ml->A;
100   Mat_MPIAIJ        *a  = (Mat_MPIAIJ*)A->data;
101   PetscMPIInt       size;
102   PetscInt          i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
103   const PetscScalar *array;
104 
105   PetscFunctionBegin;
106   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
107   if (size == 1) return 0;
108 
109   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
110   ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
111   ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
112   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
113   ierr = VecGetArrayRead(a->lvec,&array);CHKERRQ(ierr);
114   for (i=in_length; i<out_length; i++) p[i] = array[i-in_length];
115   ierr = VecRestoreArrayRead(a->lvec,&array);CHKERRQ(ierr);
116   PetscFunctionReturn(0);
117 }
118 
119 #undef __FUNCT__
120 #define __FUNCT__ "PetscML_matvec"
121 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
122 {
123   PetscErrorCode ierr;
124   FineGridCtx    *ml = (FineGridCtx*)ML_Get_MyMatvecData(ML_data);
125   Mat            A   = ml->A, Aloc=ml->Aloc;
126   PetscMPIInt    size;
127   PetscScalar    *pwork=ml->pwork;
128   PetscInt       i;
129 
130   PetscFunctionBegin;
131   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
132   if (size == 1) {
133     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
134   } else {
135     for (i=0; i<in_length; i++) pwork[i] = p[i];
136     ierr = PetscML_comm(pwork,ml);CHKERRQ(ierr);
137     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
138   }
139   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
140   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
141   ierr = VecResetArray(ml->x);CHKERRQ(ierr);
142   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
143   PetscFunctionReturn(0);
144 }
145 
146 #undef __FUNCT__
147 #define __FUNCT__ "MatMult_ML"
148 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
149 {
150   PetscErrorCode    ierr;
151   Mat_MLShell       *shell;
152   PetscScalar       *yarray;
153   const PetscScalar *xarray;
154   PetscInt          x_length,y_length;
155 
156   PetscFunctionBegin;
157   ierr     = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr);
158   ierr     = VecGetArrayRead(x,&xarray);CHKERRQ(ierr);
159   ierr     = VecGetArray(y,&yarray);CHKERRQ(ierr);
160   x_length = shell->mlmat->invec_leng;
161   y_length = shell->mlmat->outvec_leng;
162   PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat,x_length,(PetscScalar*)xarray,y_length,yarray));
163   ierr = VecRestoreArrayRead(x,&xarray);CHKERRQ(ierr);
164   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
165   PetscFunctionReturn(0);
166 }
167 
168 #undef __FUNCT__
169 #define __FUNCT__ "MatMultAdd_ML"
170 /* Computes y = w + A * x
171    It is possible that w == y, but not x == y
172 */
173 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
174 {
175   Mat_MLShell       *shell;
176   PetscScalar       *yarray;
177   const PetscScalar *xarray;
178   PetscInt          x_length,y_length;
179   PetscErrorCode    ierr;
180 
181   PetscFunctionBegin;
182   ierr = MatShellGetContext(A, (void**) &shell);CHKERRQ(ierr);
183   if (y == w) {
184     if (!shell->work) {
185       ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr);
186     }
187     ierr     = VecGetArrayRead(x,           &xarray);CHKERRQ(ierr);
188     ierr     = VecGetArray(shell->work, &yarray);CHKERRQ(ierr);
189     x_length = shell->mlmat->invec_leng;
190     y_length = shell->mlmat->outvec_leng;
191     PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, (PetscScalar*)xarray, y_length, yarray));
192     ierr = VecRestoreArrayRead(x,           &xarray);CHKERRQ(ierr);
193     ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr);
194     ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr);
195   } else {
196     ierr     = VecGetArrayRead(x, &xarray);CHKERRQ(ierr);
197     ierr     = VecGetArray(y, &yarray);CHKERRQ(ierr);
198     x_length = shell->mlmat->invec_leng;
199     y_length = shell->mlmat->outvec_leng;
200     PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, (PetscScalar *)xarray, y_length, yarray));
201     ierr = VecRestoreArrayRead(x, &xarray);CHKERRQ(ierr);
202     ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr);
203     ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr);
204   }
205   PetscFunctionReturn(0);
206 }
207 
208 /* newtype is ignored since only handles one case */
209 #undef __FUNCT__
210 #define __FUNCT__ "MatConvert_MPIAIJ_ML"
211 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
212 {
213   PetscErrorCode ierr;
214   Mat_MPIAIJ     *mpimat=(Mat_MPIAIJ*)A->data;
215   Mat_SeqAIJ     *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
216   PetscInt       *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
217   PetscScalar    *aa=a->a,*ba=b->a,*ca;
218   PetscInt       am =A->rmap->n,an=A->cmap->n,i,j,k;
219   PetscInt       *ci,*cj,ncols;
220 
221   PetscFunctionBegin;
222   if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
223 
224   if (scall == MAT_INITIAL_MATRIX) {
225     ierr  = PetscMalloc1(1+am,&ci);CHKERRQ(ierr);
226     ci[0] = 0;
227     for (i=0; i<am; i++) ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
228     ierr = PetscMalloc1(1+ci[am],&cj);CHKERRQ(ierr);
229     ierr = PetscMalloc1(1+ci[am],&ca);CHKERRQ(ierr);
230 
231     k = 0;
232     for (i=0; i<am; i++) {
233       /* diagonal portion of A */
234       ncols = ai[i+1] - ai[i];
235       for (j=0; j<ncols; j++) {
236         cj[k]   = *aj++;
237         ca[k++] = *aa++;
238       }
239       /* off-diagonal portion of A */
240       ncols = bi[i+1] - bi[i];
241       for (j=0; j<ncols; j++) {
242         cj[k]   = an + (*bj); bj++;
243         ca[k++] = *ba++;
244       }
245     }
246     if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
247 
248     /* put together the new matrix */
249     an   = mpimat->A->cmap->n+mpimat->B->cmap->n;
250     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
251 
252     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
253     /* Since these are PETSc arrays, change flags to free them as necessary. */
254     mat          = (Mat_SeqAIJ*)(*Aloc)->data;
255     mat->free_a  = PETSC_TRUE;
256     mat->free_ij = PETSC_TRUE;
257 
258     mat->nonew = 0;
259   } else if (scall == MAT_REUSE_MATRIX) {
260     mat=(Mat_SeqAIJ*)(*Aloc)->data;
261     ci = mat->i; cj = mat->j; ca = mat->a;
262     for (i=0; i<am; i++) {
263       /* diagonal portion of A */
264       ncols = ai[i+1] - ai[i];
265       for (j=0; j<ncols; j++) *ca++ = *aa++;
266       /* off-diagonal portion of A */
267       ncols = bi[i+1] - bi[i];
268       for (j=0; j<ncols; j++) *ca++ = *ba++;
269     }
270   } else SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
271   PetscFunctionReturn(0);
272 }
273 
274 extern PetscErrorCode MatDestroy_Shell(Mat);
275 #undef __FUNCT__
276 #define __FUNCT__ "MatDestroy_ML"
277 static PetscErrorCode MatDestroy_ML(Mat A)
278 {
279   PetscErrorCode ierr;
280   Mat_MLShell    *shell;
281 
282   PetscFunctionBegin;
283   ierr = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr);
284   ierr = VecDestroy(&shell->y);CHKERRQ(ierr);
285   if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);}
286   ierr = PetscFree(shell);CHKERRQ(ierr);
287   ierr = MatDestroy_Shell(A);CHKERRQ(ierr);
288   ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr);
289   PetscFunctionReturn(0);
290 }
291 
292 #undef __FUNCT__
293 #define __FUNCT__ "MatWrapML_SeqAIJ"
294 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
295 {
296   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata*)mlmat->data;
297   PetscErrorCode        ierr;
298   PetscInt              m       =mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = NULL,nz_max;
299   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i;
300   PetscScalar           *ml_vals=matdata->values,*aa;
301 
302   PetscFunctionBegin;
303   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
304   if (m != n) { /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
305     if (reuse) {
306       Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data;
307       aij->i = ml_rowptr;
308       aij->j = ml_cols;
309       aij->a = ml_vals;
310     } else {
311       /* sort ml_cols and ml_vals */
312       ierr = PetscMalloc1(m+1,&nnz);
313       for (i=0; i<m; i++) nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
314       aj = ml_cols; aa = ml_vals;
315       for (i=0; i<m; i++) {
316         ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
317         aj  += nnz[i]; aa += nnz[i];
318       }
319       ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
320       ierr = PetscFree(nnz);CHKERRQ(ierr);
321     }
322     PetscFunctionReturn(0);
323   }
324 
325   nz_max = PetscMax(1,mlmat->max_nz_per_row);
326   ierr   = PetscMalloc2(nz_max,&aa,nz_max,&aj);CHKERRQ(ierr);
327   if (!reuse) {
328     ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr);
329     ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
330     ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
331     /* keep track of block size for A matrices */
332     ierr = MatSetBlockSize (*newmat, mlmat->num_PDEs);CHKERRQ(ierr);
333 
334     ierr = PetscMalloc1(m,&nnz);CHKERRQ(ierr);
335     for (i=0; i<m; i++) {
336       PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]));
337     }
338     ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
339   }
340   for (i=0; i<m; i++) {
341     PetscInt ncols;
342 
343     PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols));
344     ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
345   }
346   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
347   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
348 
349   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
350   ierr = PetscFree(nnz);CHKERRQ(ierr);
351   PetscFunctionReturn(0);
352 }
353 
354 #undef __FUNCT__
355 #define __FUNCT__ "MatWrapML_SHELL"
356 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
357 {
358   PetscErrorCode ierr;
359   PetscInt       m,n;
360   ML_Comm        *MLcomm;
361   Mat_MLShell    *shellctx;
362 
363   PetscFunctionBegin;
364   m = mlmat->outvec_leng;
365   n = mlmat->invec_leng;
366 
367   if (reuse) {
368     ierr            = MatShellGetContext(*newmat,(void**)&shellctx);CHKERRQ(ierr);
369     shellctx->mlmat = mlmat;
370     PetscFunctionReturn(0);
371   }
372 
373   MLcomm = mlmat->comm;
374 
375   ierr = PetscNew(&shellctx);CHKERRQ(ierr);
376   ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
377   ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr);
378   ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr);
379 
380   shellctx->A         = *newmat;
381   shellctx->mlmat     = mlmat;
382   shellctx->work      = NULL;
383 
384   ierr = VecCreate(MLcomm->USR_comm,&shellctx->y);CHKERRQ(ierr);
385   ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr);
386   ierr = VecSetType(shellctx->y,VECSTANDARD);CHKERRQ(ierr);
387 
388   (*newmat)->ops->destroy = MatDestroy_ML;
389   PetscFunctionReturn(0);
390 }
391 
392 #undef __FUNCT__
393 #define __FUNCT__ "MatWrapML_MPIAIJ"
394 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
395 {
396   PetscInt       *aj;
397   PetscScalar    *aa;
398   PetscErrorCode ierr;
399   PetscInt       i,j,*gordering;
400   PetscInt       m=mlmat->outvec_leng,n,nz_max,row;
401   Mat            A;
402 
403   PetscFunctionBegin;
404   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
405   n = mlmat->invec_leng;
406   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
407 
408   /* create global row numbering for a ML_Operator */
409   PetscStackCall("ML_build_global_numbering",ML_build_global_numbering(mlmat,&gordering,"rows"));
410 
411   nz_max = PetscMax(1,mlmat->max_nz_per_row) + 1;
412   ierr = PetscMalloc2(nz_max,&aa,nz_max,&aj);CHKERRQ(ierr);
413   if (reuse) {
414     A = *newmat;
415   } else {
416     PetscInt *nnzA,*nnzB,*nnz;
417     PetscInt rstart;
418     ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr);
419     ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
420     ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
421     /* keep track of block size for A matrices */
422     ierr = MatSetBlockSize (A,mlmat->num_PDEs);CHKERRQ(ierr);
423     ierr = PetscMalloc3(m,&nnzA,m,&nnzB,m,&nnz);CHKERRQ(ierr);
424     ierr = MPI_Scan(&m,&rstart,1,MPIU_INT,MPI_SUM,mlmat->comm->USR_comm);CHKERRQ(ierr);
425     rstart -= m;
426 
427     for (i=0; i<m; i++) {
428       row = gordering[i] - rstart;
429       PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]));
430       nnzA[row] = 0;
431       for (j=0; j<nnz[i]; j++) {
432         if (aj[j] < m) nnzA[row]++;
433       }
434       nnzB[row] = nnz[i] - nnzA[row];
435     }
436     ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
437     ierr = PetscFree3(nnzA,nnzB,nnz);
438   }
439   for (i=0; i<m; i++) {
440     PetscInt ncols;
441     row = gordering[i];
442 
443     PetscStackCall(",ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols));
444     for (j = 0; j < ncols; j++) aj[j] = gordering[aj[j]];
445     ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
446   }
447   PetscStackCall("ML_free",ML_free(gordering));
448   ierr    = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
449   ierr    = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
450   *newmat = A;
451 
452   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
453   PetscFunctionReturn(0);
454 }
455 
456 /* -------------------------------------------------------------------------- */
457 /*
458    PCSetCoordinates_ML
459 
460    Input Parameter:
461    .  pc - the preconditioner context
462 */
463 #undef __FUNCT__
464 #define __FUNCT__ "PCSetCoordinates_ML"
465 static PetscErrorCode PCSetCoordinates_ML(PC pc, PetscInt ndm, PetscInt a_nloc, PetscReal *coords)
466 {
467   PC_MG          *mg    = (PC_MG*)pc->data;
468   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
469   PetscErrorCode ierr;
470   PetscInt       arrsz,oldarrsz,bs,my0,kk,ii,nloc,Iend;
471   Mat            Amat = pc->pmat;
472 
473   /* this function copied and modified from PCSetCoordinates_GEO -TGI */
474   PetscFunctionBegin;
475   PetscValidHeaderSpecific(Amat, MAT_CLASSID, 1);
476   ierr = MatGetBlockSize(Amat, &bs);CHKERRQ(ierr);
477 
478   ierr = MatGetOwnershipRange(Amat, &my0, &Iend);CHKERRQ(ierr);
479   nloc = (Iend-my0)/bs;
480 
481   if (nloc!=a_nloc) SETERRQ2(PetscObjectComm((PetscObject)Amat),PETSC_ERR_ARG_WRONG, "Number of local blocks must locations = %d %d.",a_nloc,nloc);
482   if ((Iend-my0)%bs!=0) SETERRQ1(PetscObjectComm((PetscObject)Amat),PETSC_ERR_ARG_WRONG, "Bad local size %d.",nloc);
483 
484   oldarrsz    = pc_ml->dim * pc_ml->nloc;
485   pc_ml->dim  = ndm;
486   pc_ml->nloc = a_nloc;
487   arrsz       = ndm * a_nloc;
488 
489   /* create data - syntactic sugar that should be refactored at some point */
490   if (pc_ml->coords==0 || (oldarrsz != arrsz)) {
491     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
492     ierr = PetscMalloc1(arrsz, &pc_ml->coords);CHKERRQ(ierr);
493   }
494   for (kk=0; kk<arrsz; kk++) pc_ml->coords[kk] = -999.;
495   /* copy data in - column oriented */
496   for (kk = 0; kk < nloc; kk++) {
497     for (ii = 0; ii < ndm; ii++) {
498       pc_ml->coords[ii*nloc + kk] =  coords[kk*ndm + ii];
499     }
500   }
501   PetscFunctionReturn(0);
502 }
503 
504 /* -----------------------------------------------------------------------------*/
505 extern PetscErrorCode PCReset_MG(PC);
506 #undef __FUNCT__
507 #define __FUNCT__ "PCReset_ML"
508 PetscErrorCode PCReset_ML(PC pc)
509 {
510   PetscErrorCode ierr;
511   PC_MG          *mg    = (PC_MG*)pc->data;
512   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
513   PetscInt       level,fine_level=pc_ml->Nlevels-1,dim=pc_ml->dim;
514 
515   PetscFunctionBegin;
516   if (dim) {
517     ML_Aggregate_Viz_Stats * grid_info = (ML_Aggregate_Viz_Stats*) pc_ml->ml_object->Grid[0].Grid;
518 
519     for (level=0; level<=fine_level; level++) {
520       ierr = VecDestroy(&pc_ml->gridctx[level].coords);CHKERRQ(ierr);
521     }
522 
523     grid_info->x = 0; /* do this so ML doesn't try to free coordinates */
524     grid_info->y = 0;
525     grid_info->z = 0;
526 
527     PetscStackCall("ML_Operator_Getrow",ML_Aggregate_VizAndStats_Clean(pc_ml->ml_object));
528   }
529   PetscStackCall("ML_Aggregate_Destroy",ML_Aggregate_Destroy(&pc_ml->agg_object));
530   PetscStackCall("ML_Aggregate_Destroy",ML_Destroy(&pc_ml->ml_object));
531 
532   if (pc_ml->PetscMLdata) {
533     ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
534     ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);
535     ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr);
536     ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr);
537   }
538   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
539 
540   if (pc_ml->gridctx) {
541     for (level=0; level<fine_level; level++) {
542       if (pc_ml->gridctx[level].A) {ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);}
543       if (pc_ml->gridctx[level].P) {ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);}
544       if (pc_ml->gridctx[level].R) {ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);}
545       if (pc_ml->gridctx[level].x) {ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);}
546       if (pc_ml->gridctx[level].b) {ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);}
547       if (pc_ml->gridctx[level+1].r) {ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
548     }
549   }
550   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
551   ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
552 
553   pc_ml->dim  = 0;
554   pc_ml->nloc = 0;
555   ierr = PCReset_MG(pc);CHKERRQ(ierr);
556   PetscFunctionReturn(0);
557 }
558 /* -------------------------------------------------------------------------- */
559 /*
560    PCSetUp_ML - Prepares for the use of the ML preconditioner
561                     by setting data structures and options.
562 
563    Input Parameter:
564 .  pc - the preconditioner context
565 
566    Application Interface Routine: PCSetUp()
567 
568    Notes:
569    The interface routine PCSetUp() is not usually called directly by
570    the user, but instead is called by PCApply() if necessary.
571 */
572 extern PetscErrorCode PCSetFromOptions_MG(PetscOptionItems *PetscOptionsObject,PC);
573 extern PetscErrorCode PCReset_MG(PC);
574 
575 #undef __FUNCT__
576 #define __FUNCT__ "PCSetUp_ML"
577 PetscErrorCode PCSetUp_ML(PC pc)
578 {
579   PetscErrorCode   ierr;
580   PetscMPIInt      size;
581   FineGridCtx      *PetscMLdata;
582   ML               *ml_object;
583   ML_Aggregate     *agg_object;
584   ML_Operator      *mlmat;
585   PetscInt         nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
586   Mat              A,Aloc;
587   GridCtx          *gridctx;
588   PC_MG            *mg    = (PC_MG*)pc->data;
589   PC_ML            *pc_ml = (PC_ML*)mg->innerctx;
590   PetscBool        isSeq, isMPI;
591   KSP              smoother;
592   PC               subpc;
593   PetscInt         mesh_level, old_mesh_level;
594   MatInfo          info;
595   static PetscBool cite = PETSC_FALSE;
596 
597   PetscFunctionBegin;
598   ierr = PetscCitationsRegister("@TechReport{ml_users_guide,\n  author = {M. Sala and J.J. Hu and R.S. Tuminaro},\n  title = {{ML}3.1 {S}moothed {A}ggregation {U}ser's {G}uide},\n  institution =  {Sandia National Laboratories},\n  number = {SAND2004-4821},\n  year = 2004\n}\n",&cite);CHKERRQ(ierr);
599   A    = pc->pmat;
600   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
601 
602   if (pc->setupcalled) {
603     if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
604       /*
605        Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
606        multiple solves in which the matrix is not changing too quickly.
607        */
608       ml_object             = pc_ml->ml_object;
609       gridctx               = pc_ml->gridctx;
610       Nlevels               = pc_ml->Nlevels;
611       fine_level            = Nlevels - 1;
612       gridctx[fine_level].A = A;
613 
614       ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
615       ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
616       if (isMPI) {
617         ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
618       } else if (isSeq) {
619         Aloc = A;
620         ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
621       } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
622 
623       ierr              = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
624       PetscMLdata       = pc_ml->PetscMLdata;
625       ierr              = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr);
626       PetscMLdata->A    = A;
627       PetscMLdata->Aloc = Aloc;
628       PetscStackCall("ML_Aggregate_Destroy",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata));
629       PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec));
630 
631       mesh_level = ml_object->ML_finest_level;
632       while (ml_object->SingleLevel[mesh_level].Rmat->to) {
633         old_mesh_level = mesh_level;
634         mesh_level     = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;
635 
636         /* clean and regenerate A */
637         mlmat = &(ml_object->Amat[mesh_level]);
638         PetscStackCall("ML_Operator_Clean",ML_Operator_Clean(mlmat));
639         PetscStackCall("ML_Operator_Init",ML_Operator_Init(mlmat,ml_object->comm));
640         PetscStackCall("ML_Gen_AmatrixRAP",ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level));
641       }
642 
643       level = fine_level - 1;
644       if (size == 1) { /* convert ML P, R and A into seqaij format */
645         for (mllevel=1; mllevel<Nlevels; mllevel++) {
646           mlmat = &(ml_object->Amat[mllevel]);
647           ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
648           level--;
649         }
650       } else { /* convert ML P and R into shell format, ML A into mpiaij format */
651         for (mllevel=1; mllevel<Nlevels; mllevel++) {
652           mlmat  = &(ml_object->Amat[mllevel]);
653           ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
654           level--;
655         }
656       }
657 
658       for (level=0; level<fine_level; level++) {
659         if (level > 0) {
660           ierr = PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);CHKERRQ(ierr);
661         }
662         ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A);CHKERRQ(ierr);
663       }
664       ierr = PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);CHKERRQ(ierr);
665       ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A);CHKERRQ(ierr);
666 
667       ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
668       PetscFunctionReturn(0);
669     } else {
670       /* since ML can change the size of vectors/matrices at any level we must destroy everything */
671       ierr = PCReset_ML(pc);CHKERRQ(ierr);
672     }
673   }
674 
675   /* setup special features of PCML */
676   /*--------------------------------*/
677   /* covert A to Aloc to be used by ML at fine grid */
678   pc_ml->size = size;
679   ierr        = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
680   ierr        = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
681   if (isMPI) {
682     ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
683   } else if (isSeq) {
684     Aloc = A;
685     ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
686   } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
687 
688   /* create and initialize struct 'PetscMLdata' */
689   ierr               = PetscNewLog(pc,&PetscMLdata);CHKERRQ(ierr);
690   pc_ml->PetscMLdata = PetscMLdata;
691   ierr               = PetscMalloc1(Aloc->cmap->n+1,&PetscMLdata->pwork);CHKERRQ(ierr);
692 
693   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);
694   ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr);
695   ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr);
696 
697   ierr              = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr);
698   ierr              = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
699   ierr              = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
700   PetscMLdata->A    = A;
701   PetscMLdata->Aloc = Aloc;
702   if (pc_ml->dim) { /* create vecs around the coordinate data given */
703     PetscInt  i,j,dim=pc_ml->dim;
704     PetscInt  nloc = pc_ml->nloc,nlocghost;
705     PetscReal *ghostedcoords;
706 
707     ierr      = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
708     nlocghost = Aloc->cmap->n / bs;
709     ierr      = PetscMalloc1(dim*nlocghost,&ghostedcoords);CHKERRQ(ierr);
710     for (i = 0; i < dim; i++) {
711       /* copy coordinate values into first component of pwork */
712       for (j = 0; j < nloc; j++) {
713         PetscMLdata->pwork[bs * j] = pc_ml->coords[nloc * i + j];
714       }
715       /* get the ghost values */
716       ierr = PetscML_comm(PetscMLdata->pwork,PetscMLdata);CHKERRQ(ierr);
717       /* write into the vector */
718       for (j = 0; j < nlocghost; j++) {
719         ghostedcoords[i * nlocghost + j] = PetscMLdata->pwork[bs * j];
720       }
721     }
722     /* replace the original coords with the ghosted coords, because these are
723      * what ML needs */
724     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
725     pc_ml->coords = ghostedcoords;
726   }
727 
728   /* create ML discretization matrix at fine grid */
729   /* ML requires input of fine-grid matrix. It determines nlevels. */
730   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
731   ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
732   PetscStackCall("ML_Create",ML_Create(&ml_object,pc_ml->MaxNlevels));
733   PetscStackCall("ML_Comm_Set_UsrComm",ML_Comm_Set_UsrComm(ml_object->comm,PetscObjectComm((PetscObject)A)));
734   pc_ml->ml_object = ml_object;
735   PetscStackCall("ML_Init_Amatrix",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata));
736   PetscStackCall("ML_Set_Amatrix_Getrow",ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols));
737   PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec));
738 
739   PetscStackCall("ML_Set_Symmetrize",ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO));
740 
741   /* aggregation */
742   PetscStackCall("ML_Aggregate_Create",ML_Aggregate_Create(&agg_object));
743   pc_ml->agg_object = agg_object;
744 
745   {
746     MatNullSpace mnull;
747     ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr);
748     if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) {
749       if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER;
750       else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK;
751       else pc_ml->nulltype = PCML_NULLSPACE_SCALAR;
752     }
753     switch (pc_ml->nulltype) {
754     case PCML_NULLSPACE_USER: {
755       PetscScalar       *nullvec;
756       const PetscScalar *v;
757       PetscBool         has_const;
758       PetscInt          i,j,mlocal,nvec,M;
759       const Vec         *vecs;
760 
761       if (!mnull) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space");
762       ierr = MatGetSize(A,&M,NULL);CHKERRQ(ierr);
763       ierr = MatGetLocalSize(Aloc,&mlocal,NULL);CHKERRQ(ierr);
764       ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr);
765       ierr = PetscMalloc1((nvec+!!has_const)*mlocal,&nullvec);CHKERRQ(ierr);
766       if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M;
767       for (i=0; i<nvec; i++) {
768         ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr);
769         for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j];
770         ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr);
771       }
772       PetscStackCall("ML_Aggregate_Create",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr));
773       ierr = PetscFree(nullvec);CHKERRQ(ierr);
774     } break;
775     case PCML_NULLSPACE_BLOCK:
776       PetscStackCall("ML_Aggregate_Set_NullSpace",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr));
777       break;
778     case PCML_NULLSPACE_SCALAR:
779       break;
780     default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP,"Unknown null space type");
781     }
782   }
783   PetscStackCall("ML_Aggregate_Set_MaxCoarseSize",ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize));
784   /* set options */
785   switch (pc_ml->CoarsenScheme) {
786   case 1:
787     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_Coupled",ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object));break;
788   case 2:
789     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_MIS",ML_Aggregate_Set_CoarsenScheme_MIS(agg_object));break;
790   case 3:
791     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_METIS",ML_Aggregate_Set_CoarsenScheme_METIS(agg_object));break;
792   }
793   PetscStackCall("ML_Aggregate_Set_Threshold",ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold));
794   PetscStackCall("ML_Aggregate_Set_DampingFactor",ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor));
795   if (pc_ml->SpectralNormScheme_Anorm) {
796     PetscStackCall("ML_Set_SpectralNormScheme_Anorm",ML_Set_SpectralNormScheme_Anorm(ml_object));
797   }
798   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
799   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
800   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
801   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
802   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
803   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;
804 
805   if (pc_ml->Aux) {
806     if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Auxiliary matrix requires coordinates");
807     ml_object->Amat[0].aux_data->threshold = pc_ml->AuxThreshold;
808     ml_object->Amat[0].aux_data->enable    = 1;
809     ml_object->Amat[0].aux_data->max_level = 10;
810     ml_object->Amat[0].num_PDEs            = bs;
811   }
812 
813   ierr = MatGetInfo(A,MAT_LOCAL,&info);CHKERRQ(ierr);
814   ml_object->Amat[0].N_nonzeros = (int) info.nz_used;
815 
816   if (pc_ml->dim) {
817     PetscInt               i,dim = pc_ml->dim;
818     ML_Aggregate_Viz_Stats *grid_info;
819     PetscInt               nlocghost;
820 
821     ierr      = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
822     nlocghost = Aloc->cmap->n / bs;
823 
824     PetscStackCall("ML_Aggregate_VizAndStats_Setup(",ML_Aggregate_VizAndStats_Setup(ml_object)); /* create ml info for coords */
825     grid_info = (ML_Aggregate_Viz_Stats*) ml_object->Grid[0].Grid;
826     for (i = 0; i < dim; i++) {
827       /* set the finest level coordinates to point to the column-order array
828        * in pc_ml */
829       /* NOTE: must point away before VizAndStats_Clean so ML doesn't free */
830       switch (i) {
831       case 0: grid_info->x = pc_ml->coords + nlocghost * i; break;
832       case 1: grid_info->y = pc_ml->coords + nlocghost * i; break;
833       case 2: grid_info->z = pc_ml->coords + nlocghost * i; break;
834       default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
835       }
836     }
837     grid_info->Ndim = dim;
838   }
839 
840   /* repartitioning */
841   if (pc_ml->Repartition) {
842     PetscStackCall("ML_Repartition_Activate",ML_Repartition_Activate(ml_object));
843     PetscStackCall("ML_Repartition_Set_LargestMinMaxRatio",ML_Repartition_Set_LargestMinMaxRatio(ml_object,pc_ml->MaxMinRatio));
844     PetscStackCall("ML_Repartition_Set_MinPerProc",ML_Repartition_Set_MinPerProc(ml_object,pc_ml->MinPerProc));
845     PetscStackCall("ML_Repartition_Set_PutOnSingleProc",ML_Repartition_Set_PutOnSingleProc(ml_object,pc_ml->PutOnSingleProc));
846 #if 0                           /* Function not yet defined in ml-6.2 */
847     /* I'm not sure what compatibility issues might crop up if we partitioned
848      * on the finest level, so to be safe repartition starts on the next
849      * finest level (reflection default behavior in
850      * ml_MultiLevelPreconditioner) */
851     PetscStackCall("ML_Repartition_Set_StartLevel",ML_Repartition_Set_StartLevel(ml_object,1));
852 #endif
853 
854     if (!pc_ml->RepartitionType) {
855       PetscInt i;
856 
857       if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"ML Zoltan repartitioning requires coordinates");
858       PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEZOLTAN));
859       PetscStackCall("ML_Aggregate_Set_Dimensions",ML_Aggregate_Set_Dimensions(agg_object, pc_ml->dim));
860 
861       for (i = 0; i < ml_object->ML_num_levels; i++) {
862         ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Grid[i].Grid;
863         grid_info->zoltan_type = pc_ml->ZoltanScheme + 1; /* ml numbers options 1, 2, 3 */
864         /* defaults from ml_agg_info.c */
865         grid_info->zoltan_estimated_its = 40; /* only relevant to hypergraph / fast hypergraph */
866         grid_info->zoltan_timers        = 0;
867         grid_info->smoothing_steps      = 4;  /* only relevant to hypergraph / fast hypergraph */
868       }
869     } else {
870       PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEPARMETIS));
871     }
872   }
873 
874   if (pc_ml->OldHierarchy) {
875     PetscStackCall("ML_Gen_MGHierarchy_UsingAggregation",Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object));
876   } else {
877     PetscStackCall("ML_Gen_MultiLevelHierarchy_UsingAggregation",Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object));
878   }
879   if (Nlevels<=0) SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
880   pc_ml->Nlevels = Nlevels;
881   fine_level     = Nlevels - 1;
882 
883   ierr = PCMGSetLevels(pc,Nlevels,NULL);CHKERRQ(ierr);
884   /* set default smoothers */
885   for (level=1; level<=fine_level; level++) {
886     ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
887     ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
888     ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
889     ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
890   }
891   ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr);
892   ierr = PCSetFromOptions_MG(PetscOptionsObject,pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
893   ierr = PetscOptionsEnd();CHKERRQ(ierr);
894 
895   ierr = PetscMalloc1(Nlevels,&gridctx);CHKERRQ(ierr);
896 
897   pc_ml->gridctx = gridctx;
898 
899   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
900      Level 0 is the finest grid for ML, but coarsest for PETSc! */
901   gridctx[fine_level].A = A;
902 
903   level = fine_level - 1;
904   if (size == 1) { /* convert ML P, R and A into seqaij format */
905     for (mllevel=1; mllevel<Nlevels; mllevel++) {
906       mlmat = &(ml_object->Pmat[mllevel]);
907       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
908       mlmat = &(ml_object->Rmat[mllevel-1]);
909       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
910 
911       mlmat = &(ml_object->Amat[mllevel]);
912       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
913       level--;
914     }
915   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
916     for (mllevel=1; mllevel<Nlevels; mllevel++) {
917       mlmat  = &(ml_object->Pmat[mllevel]);
918       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
919       mlmat  = &(ml_object->Rmat[mllevel-1]);
920       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
921 
922       mlmat  = &(ml_object->Amat[mllevel]);
923       ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
924       level--;
925     }
926   }
927 
928   /* create vectors and ksp at all levels */
929   for (level=0; level<fine_level; level++) {
930     level1 = level + 1;
931     ierr   = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr);
932     ierr   = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr);
933     ierr   = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr);
934     ierr   = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
935 
936     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr);
937     ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
938     ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr);
939     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
940 
941     ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr);
942     ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
943     ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr);
944     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
945 
946     if (level == 0) {
947       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
948     } else {
949       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
950     }
951   }
952   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
953 
954   /* create coarse level and the interpolation between the levels */
955   for (level=0; level<fine_level; level++) {
956     level1 = level + 1;
957     ierr   = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
958     ierr   = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
959     if (level > 0) {
960       ierr = PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);CHKERRQ(ierr);
961     }
962     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A);CHKERRQ(ierr);
963   }
964   ierr = PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);CHKERRQ(ierr);
965   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A);CHKERRQ(ierr);
966 
967   /* put coordinate info in levels */
968   if (pc_ml->dim) {
969     PetscInt  i,j,dim = pc_ml->dim;
970     PetscInt  bs, nloc;
971     PC        subpc;
972     PetscReal *array;
973 
974     level = fine_level;
975     for (mllevel = 0; mllevel < Nlevels; mllevel++) {
976       ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Amat[mllevel].to->Grid->Grid;
977       MPI_Comm               comm       = ((PetscObject)gridctx[level].A)->comm;
978 
979       ierr  = MatGetBlockSize (gridctx[level].A, &bs);CHKERRQ(ierr);
980       ierr  = MatGetLocalSize (gridctx[level].A, NULL, &nloc);CHKERRQ(ierr);
981       nloc /= bs; /* number of local nodes */
982 
983       ierr = VecCreate(comm,&gridctx[level].coords);CHKERRQ(ierr);
984       ierr = VecSetSizes(gridctx[level].coords,dim * nloc,PETSC_DECIDE);CHKERRQ(ierr);
985       ierr = VecSetType(gridctx[level].coords,VECMPI);CHKERRQ(ierr);
986       ierr = VecGetArray(gridctx[level].coords,&array);CHKERRQ(ierr);
987       for (j = 0; j < nloc; j++) {
988         for (i = 0; i < dim; i++) {
989           switch (i) {
990           case 0: array[dim * j + i] = grid_info->x[j]; break;
991           case 1: array[dim * j + i] = grid_info->y[j]; break;
992           case 2: array[dim * j + i] = grid_info->z[j]; break;
993           default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
994           }
995         }
996       }
997 
998       /* passing coordinates to smoothers/coarse solver, should they need them */
999       ierr = KSPGetPC(gridctx[level].ksp,&subpc);CHKERRQ(ierr);
1000       ierr = PCSetCoordinates(subpc,dim,nloc,array);CHKERRQ(ierr);
1001       ierr = VecRestoreArray(gridctx[level].coords,&array);CHKERRQ(ierr);
1002       level--;
1003     }
1004   }
1005 
1006   /* setupcalled is set to 0 so that MG is setup from scratch */
1007   pc->setupcalled = 0;
1008   ierr            = PCSetUp_MG(pc);CHKERRQ(ierr);
1009   PetscFunctionReturn(0);
1010 }
1011 
1012 /* -------------------------------------------------------------------------- */
1013 /*
1014    PCDestroy_ML - Destroys the private context for the ML preconditioner
1015    that was created with PCCreate_ML().
1016 
1017    Input Parameter:
1018 .  pc - the preconditioner context
1019 
1020    Application Interface Routine: PCDestroy()
1021 */
1022 #undef __FUNCT__
1023 #define __FUNCT__ "PCDestroy_ML"
1024 PetscErrorCode PCDestroy_ML(PC pc)
1025 {
1026   PetscErrorCode ierr;
1027   PC_MG          *mg   = (PC_MG*)pc->data;
1028   PC_ML          *pc_ml= (PC_ML*)mg->innerctx;
1029 
1030   PetscFunctionBegin;
1031   ierr = PCReset_ML(pc);CHKERRQ(ierr);
1032   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
1033   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
1034   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",NULL);CHKERRQ(ierr);
1035   PetscFunctionReturn(0);
1036 }
1037 
1038 #undef __FUNCT__
1039 #define __FUNCT__ "PCSetFromOptions_ML"
1040 PetscErrorCode PCSetFromOptions_ML(PetscOptionItems *PetscOptionsObject,PC pc)
1041 {
1042   PetscErrorCode ierr;
1043   PetscInt       indx,PrintLevel,partindx;
1044   const char     *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
1045   const char     *part[]   = {"Zoltan","ParMETIS"};
1046 #if defined(HAVE_ML_ZOLTAN)
1047   PetscInt   zidx;
1048   const char *zscheme[] = {"RCB","hypergraph","fast_hypergraph"};
1049 #endif
1050   PC_MG       *mg    = (PC_MG*)pc->data;
1051   PC_ML       *pc_ml = (PC_ML*)mg->innerctx;
1052   PetscMPIInt size;
1053   MPI_Comm    comm;
1054 
1055   PetscFunctionBegin;
1056   ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr);
1057   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1058   ierr = PetscOptionsHead(PetscOptionsObject,"ML options");CHKERRQ(ierr);
1059 
1060   PrintLevel = 0;
1061   indx       = 0;
1062   partindx   = 0;
1063 
1064   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,NULL);CHKERRQ(ierr);
1065   PetscStackCall("ML_Set_PrintLeve",ML_Set_PrintLevel(PrintLevel));
1066   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,NULL);CHKERRQ(ierr);
1067   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,NULL);CHKERRQ(ierr);
1068   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,NULL);CHKERRQ(ierr);
1069 
1070   pc_ml->CoarsenScheme = indx;
1071 
1072   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,NULL);CHKERRQ(ierr);
1073   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,NULL);CHKERRQ(ierr);
1074   ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,NULL);CHKERRQ(ierr);
1075   ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,NULL);CHKERRQ(ierr);
1076   ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,NULL);CHKERRQ(ierr);
1077   ierr = PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,NULL);CHKERRQ(ierr);
1078   ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,NULL);CHKERRQ(ierr);
1079   ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,NULL);CHKERRQ(ierr);
1080   /*
1081     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
1082     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.
1083 
1084     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
1085     combination of options and ML's exit(1) explanations don't help matters.
1086   */
1087   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
1088   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
1089   if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2\n");CHKERRQ(ierr);}
1090   if (pc_ml->EnergyMinimization) {
1091     ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,NULL);CHKERRQ(ierr);
1092   }
1093   if (pc_ml->EnergyMinimization == 2) {
1094     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
1095     ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,NULL);CHKERRQ(ierr);
1096   }
1097   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
1098   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
1099   ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,NULL);CHKERRQ(ierr);
1100   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
1101   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
1102   ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,NULL);CHKERRQ(ierr);
1103   /*
1104     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
1105     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
1106     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
1107     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
1108     this context, but ML doesn't provide a way to find out which ones.
1109    */
1110   ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,NULL);CHKERRQ(ierr);
1111   ierr = PetscOptionsBool("-pc_ml_repartition", "Allow ML to repartition levels of the heirarchy","ML_Repartition_Activate",pc_ml->Repartition,&pc_ml->Repartition,NULL);CHKERRQ(ierr);
1112   if (pc_ml->Repartition) {
1113     ierr = PetscOptionsReal("-pc_ml_repartitionMaxMinRatio", "Acceptable ratio of repartitioned sizes","ML_Repartition_Set_LargestMinMaxRatio",pc_ml->MaxMinRatio,&pc_ml->MaxMinRatio,NULL);CHKERRQ(ierr);
1114     ierr = PetscOptionsInt("-pc_ml_repartitionMinPerProc", "Smallest repartitioned size","ML_Repartition_Set_MinPerProc",pc_ml->MinPerProc,&pc_ml->MinPerProc,NULL);CHKERRQ(ierr);
1115     ierr = PetscOptionsInt("-pc_ml_repartitionPutOnSingleProc", "Problem size automatically repartitioned to one processor","ML_Repartition_Set_PutOnSingleProc",pc_ml->PutOnSingleProc,&pc_ml->PutOnSingleProc,NULL);CHKERRQ(ierr);
1116 #if defined(HAVE_ML_ZOLTAN)
1117     partindx = 0;
1118     ierr     = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[0],&partindx,NULL);CHKERRQ(ierr);
1119 
1120     pc_ml->RepartitionType = partindx;
1121     if (!partindx) {
1122       PetscInt zindx = 0;
1123 
1124       ierr = PetscOptionsEList("-pc_ml_repartitionZoltanScheme", "Repartitioning scheme to use","None",zscheme,3,zscheme[0],&zindx,NULL);CHKERRQ(ierr);
1125 
1126       pc_ml->ZoltanScheme = zindx;
1127     }
1128 #else
1129     partindx = 1;
1130     ierr     = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[1],&partindx,NULL);CHKERRQ(ierr);
1131     pc_ml->RepartitionType = partindx;
1132     if (!partindx) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP_SYS,"ML not compiled with Zoltan");
1133 #endif
1134     ierr = PetscOptionsBool("-pc_ml_Aux","Aggregate using auxiliary coordinate-based laplacian","None",pc_ml->Aux,&pc_ml->Aux,NULL);CHKERRQ(ierr);
1135     ierr = PetscOptionsReal("-pc_ml_AuxThreshold","Auxiliary smoother drop tol","None",pc_ml->AuxThreshold,&pc_ml->AuxThreshold,NULL);CHKERRQ(ierr);
1136   }
1137   ierr = PetscOptionsTail();CHKERRQ(ierr);
1138   PetscFunctionReturn(0);
1139 }
1140 
1141 /* -------------------------------------------------------------------------- */
1142 /*
1143    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
1144    and sets this as the private data within the generic preconditioning
1145    context, PC, that was created within PCCreate().
1146 
1147    Input Parameter:
1148 .  pc - the preconditioner context
1149 
1150    Application Interface Routine: PCCreate()
1151 */
1152 
1153 /*MC
1154      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
1155        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
1156        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
1157        and the restriction/interpolation operators wrapped as PETSc shell matrices.
1158 
1159    Options Database Key:
1160    Multigrid options(inherited):
1161 +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
1162 .  -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
1163 .  -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
1164 -  -pc_mg_type <multiplicative>: (one of) additive multiplicative full kascade
1165    ML options:
1166 +  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
1167 .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
1168 .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
1169 .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
1170 .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
1171 .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
1172 .  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
1173 .  -pc_ml_repartition <false>: Allow ML to repartition levels of the heirarchy (ML_Repartition_Activate)
1174 .  -pc_ml_repartitionMaxMinRatio <1.3>: Acceptable ratio of repartitioned sizes (ML_Repartition_Set_LargestMinMaxRatio)
1175 .  -pc_ml_repartitionMinPerProc <512>: Smallest repartitioned size (ML_Repartition_Set_MinPerProc)
1176 .  -pc_ml_repartitionPutOnSingleProc <5000>: Problem size automatically repartitioned to one processor (ML_Repartition_Set_PutOnSingleProc)
1177 .  -pc_ml_repartitionType <Zoltan>: Repartitioning library to use (ML_Repartition_Set_Partitioner)
1178 .  -pc_ml_repartitionZoltanScheme <RCB>: Repartitioning scheme to use (None)
1179 .  -pc_ml_Aux <false>: Aggregate using auxiliary coordinate-based laplacian (None)
1180 -  -pc_ml_AuxThreshold <0.0>: Auxiliary smoother drop tol (None)
1181 
1182    Level: intermediate
1183 
1184   Concepts: multigrid
1185 
1186 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
1187            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
1188            PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
1189            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
1190            PCMGSetCycleTypeOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
1191 M*/
1192 
1193 #undef __FUNCT__
1194 #define __FUNCT__ "PCCreate_ML"
1195 PETSC_EXTERN PetscErrorCode PCCreate_ML(PC pc)
1196 {
1197   PetscErrorCode ierr;
1198   PC_ML          *pc_ml;
1199   PC_MG          *mg;
1200 
1201   PetscFunctionBegin;
1202   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
1203   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
1204   ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr);
1205   /* Since PCMG tries to use DM assocated with PC must delete it */
1206   ierr         = DMDestroy(&pc->dm);CHKERRQ(ierr);
1207   mg           = (PC_MG*)pc->data;
1208   mg->galerkin = 2;             /* Use Galerkin, but it is computed externally */
1209 
1210   /* create a supporting struct and attach it to pc */
1211   ierr         = PetscNewLog(pc,&pc_ml);CHKERRQ(ierr);
1212   mg->innerctx = pc_ml;
1213 
1214   pc_ml->ml_object                = 0;
1215   pc_ml->agg_object               = 0;
1216   pc_ml->gridctx                  = 0;
1217   pc_ml->PetscMLdata              = 0;
1218   pc_ml->Nlevels                  = -1;
1219   pc_ml->MaxNlevels               = 10;
1220   pc_ml->MaxCoarseSize            = 1;
1221   pc_ml->CoarsenScheme            = 1;
1222   pc_ml->Threshold                = 0.0;
1223   pc_ml->DampingFactor            = 4.0/3.0;
1224   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
1225   pc_ml->size                     = 0;
1226   pc_ml->dim                      = 0;
1227   pc_ml->nloc                     = 0;
1228   pc_ml->coords                   = 0;
1229   pc_ml->Repartition              = PETSC_FALSE;
1230   pc_ml->MaxMinRatio              = 1.3;
1231   pc_ml->MinPerProc               = 512;
1232   pc_ml->PutOnSingleProc          = 5000;
1233   pc_ml->RepartitionType          = 0;
1234   pc_ml->ZoltanScheme             = 0;
1235   pc_ml->Aux                      = PETSC_FALSE;
1236   pc_ml->AuxThreshold             = 0.0;
1237 
1238   /* allow for coordinates to be passed */
1239   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",PCSetCoordinates_ML);CHKERRQ(ierr);
1240 
1241   /* overwrite the pointers of PCMG by the functions of PCML */
1242   pc->ops->setfromoptions = PCSetFromOptions_ML;
1243   pc->ops->setup          = PCSetUp_ML;
1244   pc->ops->reset          = PCReset_ML;
1245   pc->ops->destroy        = PCDestroy_ML;
1246   PetscFunctionReturn(0);
1247 }
1248