xref: /petsc/src/ksp/pc/impls/ml/ml.c (revision bb74729cafbfcb6265ea097979a2a2cac4d11507)
1 
2 /*
3    Provides an interface to the ML smoothed Aggregation
4    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
5                                     Jed Brown, see [PETSC #18321, #18449].
6 */
7 #include <petsc-private/pcimpl.h>   /*I "petscpc.h" I*/
8 #include <../src/ksp/pc/impls/mg/mgimpl.h>                    /*I "petscpcmg.h" I*/
9 #include <../src/mat/impls/aij/seq/aij.h>
10 #include <../src/mat/impls/aij/mpi/mpiaij.h>
11 
12 EXTERN_C_BEGIN
13 /* HAVE_CONFIG_H flag is required by ML include files */
14 #if !defined(HAVE_CONFIG_H)
15 #define HAVE_CONFIG_H
16 #endif
17 #include <ml_include.h>
18 #include <ml_viz_stats.h>
19 EXTERN_C_END
20 
21 typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType;
22 static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0};
23 
24 /* The context (data structure) at each grid level */
25 typedef struct {
26   Vec x,b,r;                  /* global vectors */
27   Mat A,P,R;
28   KSP ksp;
29   Vec coords;                 /* projected by ML, if PCSetCoordinates is called; values packed by node */
30 } GridCtx;
31 
32 /* The context used to input PETSc matrix into ML at fine grid */
33 typedef struct {
34   Mat         A;       /* Petsc matrix in aij format */
35   Mat         Aloc;    /* local portion of A to be used by ML */
36   Vec         x,y;
37   ML_Operator *mlmat;
38   PetscScalar *pwork;  /* tmp array used by PetscML_comm() */
39 } FineGridCtx;
40 
41 /* The context associates a ML matrix with a PETSc shell matrix */
42 typedef struct {
43   Mat         A;        /* PETSc shell matrix associated with mlmat */
44   ML_Operator *mlmat;   /* ML matrix assorciated with A */
45   Vec         y, work;
46 } Mat_MLShell;
47 
48 /* Private context for the ML preconditioner */
49 typedef struct {
50   ML                *ml_object;
51   ML_Aggregate      *agg_object;
52   GridCtx           *gridctx;
53   FineGridCtx       *PetscMLdata;
54   PetscInt          Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization,MinPerProc,PutOnSingleProc,RepartitionType,ZoltanScheme;
55   PetscReal         Threshold,DampingFactor,EnergyMinimizationDropTol,MaxMinRatio,AuxThreshold;
56   PetscBool         SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable,Repartition,Aux;
57   PetscBool         reuse_interpolation;
58   PCMLNullSpaceType nulltype;
59   PetscMPIInt       size; /* size of communicator for pc->pmat */
60   PetscInt          dim;  /* data from PCSetCoordinates(_ML) */
61   PetscInt          nloc;
62   PetscReal         *coords; /* ML has a grid object for each level: the finest grid will point into coords */
63 } PC_ML;
64 
65 #undef __FUNCT__
66 #define __FUNCT__ "PetscML_getrow"
67 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
68 {
69   PetscErrorCode ierr;
70   PetscInt       m,i,j,k=0,row,*aj;
71   PetscScalar    *aa;
72   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
73   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;
74 
75   ierr = MatGetSize(ml->Aloc,&m,NULL); if (ierr) return(0);
76   for (i = 0; i<N_requested_rows; i++) {
77     row            = requested_rows[i];
78     row_lengths[i] = a->ilen[row];
79     if (allocated_space < k+row_lengths[i]) return(0);
80     if ((row >= 0) || (row <= (m-1))) {
81       aj = a->j + a->i[row];
82       aa = a->a + a->i[row];
83       for (j=0; j<row_lengths[i]; j++) {
84         columns[k]  = aj[j];
85         values[k++] = aa[j];
86       }
87     }
88   }
89   return(1);
90 }
91 
92 #undef __FUNCT__
93 #define __FUNCT__ "PetscML_comm"
94 static PetscErrorCode PetscML_comm(double p[],void *ML_data)
95 {
96   PetscErrorCode ierr;
97   FineGridCtx    *ml = (FineGridCtx*)ML_data;
98   Mat            A   = ml->A;
99   Mat_MPIAIJ     *a  = (Mat_MPIAIJ*)A->data;
100   PetscMPIInt    size;
101   PetscInt       i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
102   PetscScalar    *array;
103 
104   PetscFunctionBegin;
105   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
106   if (size == 1) return 0;
107 
108   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
109   ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
110   ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
111   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
112   ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr);
113   for (i=in_length; i<out_length; i++) p[i] = array[i-in_length];
114   ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr);
115   PetscFunctionReturn(0);
116 }
117 
118 #undef __FUNCT__
119 #define __FUNCT__ "PetscML_matvec"
120 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
121 {
122   PetscErrorCode ierr;
123   FineGridCtx    *ml = (FineGridCtx*)ML_Get_MyMatvecData(ML_data);
124   Mat            A   = ml->A, Aloc=ml->Aloc;
125   PetscMPIInt    size;
126   PetscScalar    *pwork=ml->pwork;
127   PetscInt       i;
128 
129   PetscFunctionBegin;
130   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
131   if (size == 1) {
132     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
133   } else {
134     for (i=0; i<in_length; i++) pwork[i] = p[i];
135     ierr = PetscML_comm(pwork,ml);CHKERRQ(ierr);
136     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
137   }
138   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
139   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
140   ierr = VecResetArray(ml->x);CHKERRQ(ierr);
141   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
142   PetscFunctionReturn(0);
143 }
144 
145 #undef __FUNCT__
146 #define __FUNCT__ "MatMult_ML"
147 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
148 {
149   PetscErrorCode ierr;
150   Mat_MLShell    *shell;
151   PetscScalar    *xarray,*yarray;
152   PetscInt       x_length,y_length;
153 
154   PetscFunctionBegin;
155   ierr     = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr);
156   ierr     = VecGetArray(x,&xarray);CHKERRQ(ierr);
157   ierr     = VecGetArray(y,&yarray);CHKERRQ(ierr);
158   x_length = shell->mlmat->invec_leng;
159   y_length = shell->mlmat->outvec_leng;
160   PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray));
161   ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
162   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
163   PetscFunctionReturn(0);
164 }
165 
166 #undef __FUNCT__
167 #define __FUNCT__ "MatMultAdd_ML"
168 /* Computes y = w + A * x
169    It is possible that w == y, but not x == y
170 */
171 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
172 {
173   Mat_MLShell    *shell;
174   PetscScalar    *xarray,*yarray;
175   PetscInt       x_length,y_length;
176   PetscErrorCode ierr;
177 
178   PetscFunctionBegin;
179   ierr = MatShellGetContext(A, (void**) &shell);CHKERRQ(ierr);
180   if (y == w) {
181     if (!shell->work) {
182       ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr);
183     }
184     ierr     = VecGetArray(x,           &xarray);CHKERRQ(ierr);
185     ierr     = VecGetArray(shell->work, &yarray);CHKERRQ(ierr);
186     x_length = shell->mlmat->invec_leng;
187     y_length = shell->mlmat->outvec_leng;
188     PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray));
189     ierr = VecRestoreArray(x,           &xarray);CHKERRQ(ierr);
190     ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr);
191     ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr);
192   } else {
193     ierr     = VecGetArray(x, &xarray);CHKERRQ(ierr);
194     ierr     = VecGetArray(y, &yarray);CHKERRQ(ierr);
195     x_length = shell->mlmat->invec_leng;
196     y_length = shell->mlmat->outvec_leng;
197     PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray));
198     ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr);
199     ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr);
200     ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr);
201   }
202   PetscFunctionReturn(0);
203 }
204 
205 /* newtype is ignored since only handles one case */
206 #undef __FUNCT__
207 #define __FUNCT__ "MatConvert_MPIAIJ_ML"
208 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
209 {
210   PetscErrorCode ierr;
211   Mat_MPIAIJ     *mpimat=(Mat_MPIAIJ*)A->data;
212   Mat_SeqAIJ     *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
213   PetscInt       *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
214   PetscScalar    *aa=a->a,*ba=b->a,*ca;
215   PetscInt       am =A->rmap->n,an=A->cmap->n,i,j,k;
216   PetscInt       *ci,*cj,ncols;
217 
218   PetscFunctionBegin;
219   if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
220 
221   if (scall == MAT_INITIAL_MATRIX) {
222     ierr  = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr);
223     ci[0] = 0;
224     for (i=0; i<am; i++) ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
225     ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr);
226     ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr);
227 
228     k = 0;
229     for (i=0; i<am; i++) {
230       /* diagonal portion of A */
231       ncols = ai[i+1] - ai[i];
232       for (j=0; j<ncols; j++) {
233         cj[k]   = *aj++;
234         ca[k++] = *aa++;
235       }
236       /* off-diagonal portion of A */
237       ncols = bi[i+1] - bi[i];
238       for (j=0; j<ncols; j++) {
239         cj[k]   = an + (*bj); bj++;
240         ca[k++] = *ba++;
241       }
242     }
243     if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
244 
245     /* put together the new matrix */
246     an   = mpimat->A->cmap->n+mpimat->B->cmap->n;
247     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
248 
249     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
250     /* Since these are PETSc arrays, change flags to free them as necessary. */
251     mat          = (Mat_SeqAIJ*)(*Aloc)->data;
252     mat->free_a  = PETSC_TRUE;
253     mat->free_ij = PETSC_TRUE;
254 
255     mat->nonew = 0;
256   } else if (scall == MAT_REUSE_MATRIX) {
257     mat=(Mat_SeqAIJ*)(*Aloc)->data;
258     ci = mat->i; cj = mat->j; ca = mat->a;
259     for (i=0; i<am; i++) {
260       /* diagonal portion of A */
261       ncols = ai[i+1] - ai[i];
262       for (j=0; j<ncols; j++) *ca++ = *aa++;
263       /* off-diagonal portion of A */
264       ncols = bi[i+1] - bi[i];
265       for (j=0; j<ncols; j++) *ca++ = *ba++;
266     }
267   } else SETERRQ1(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
268   PetscFunctionReturn(0);
269 }
270 
271 extern PetscErrorCode MatDestroy_Shell(Mat);
272 #undef __FUNCT__
273 #define __FUNCT__ "MatDestroy_ML"
274 static PetscErrorCode MatDestroy_ML(Mat A)
275 {
276   PetscErrorCode ierr;
277   Mat_MLShell    *shell;
278 
279   PetscFunctionBegin;
280   ierr = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr);
281   ierr = VecDestroy(&shell->y);CHKERRQ(ierr);
282   if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);}
283   ierr = PetscFree(shell);CHKERRQ(ierr);
284   ierr = MatDestroy_Shell(A);CHKERRQ(ierr);
285   ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr);
286   PetscFunctionReturn(0);
287 }
288 
289 #undef __FUNCT__
290 #define __FUNCT__ "MatWrapML_SeqAIJ"
291 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
292 {
293   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata*)mlmat->data;
294   PetscErrorCode        ierr;
295   PetscInt              m       =mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = NULL,nz_max;
296   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i;
297   PetscScalar           *ml_vals=matdata->values,*aa;
298 
299   PetscFunctionBegin;
300   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
301   if (m != n) { /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
302     if (reuse) {
303       Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data;
304       aij->i = ml_rowptr;
305       aij->j = ml_cols;
306       aij->a = ml_vals;
307     } else {
308       /* sort ml_cols and ml_vals */
309       ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
310       for (i=0; i<m; i++) nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
311       aj = ml_cols; aa = ml_vals;
312       for (i=0; i<m; i++) {
313         ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
314         aj  += nnz[i]; aa += nnz[i];
315       }
316       ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
317       ierr = PetscFree(nnz);CHKERRQ(ierr);
318     }
319     PetscFunctionReturn(0);
320   }
321 
322   nz_max = PetscMax(1,mlmat->max_nz_per_row);
323   ierr   = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
324   if (!reuse) {
325     ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr);
326     ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
327     ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
328     /* keep track of block size for A matrices */
329     ierr = MatSetBlockSize (*newmat, mlmat->num_PDEs);CHKERRQ(ierr);
330 
331     ierr = PetscMalloc(m*sizeof(PetscInt),&nnz);CHKERRQ(ierr);
332     for (i=0; i<m; i++) {
333       PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]));
334     }
335     ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
336   }
337   for (i=0; i<m; i++) {
338     PetscInt ncols;
339 
340     PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols));
341     ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
342   }
343   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
344   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
345 
346   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
347   ierr = PetscFree(nnz);CHKERRQ(ierr);
348   PetscFunctionReturn(0);
349 }
350 
351 #undef __FUNCT__
352 #define __FUNCT__ "MatWrapML_SHELL"
353 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
354 {
355   PetscErrorCode ierr;
356   PetscInt       m,n;
357   ML_Comm        *MLcomm;
358   Mat_MLShell    *shellctx;
359 
360   PetscFunctionBegin;
361   m = mlmat->outvec_leng;
362   n = mlmat->invec_leng;
363 
364   if (reuse) {
365     ierr            = MatShellGetContext(*newmat,(void**)&shellctx);CHKERRQ(ierr);
366     shellctx->mlmat = mlmat;
367     PetscFunctionReturn(0);
368   }
369 
370   MLcomm = mlmat->comm;
371 
372   ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr);
373   ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
374   ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr);
375   ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr);
376 
377   shellctx->A         = *newmat;
378   shellctx->mlmat     = mlmat;
379   shellctx->work      = NULL;
380 
381   ierr = VecCreate(MLcomm->USR_comm,&shellctx->y);CHKERRQ(ierr);
382   ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr);
383   ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr);
384 
385   (*newmat)->ops->destroy = MatDestroy_ML;
386   PetscFunctionReturn(0);
387 }
388 
389 #undef __FUNCT__
390 #define __FUNCT__ "MatWrapML_MPIAIJ"
391 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
392 {
393   PetscInt       *aj;
394   PetscScalar    *aa;
395   PetscErrorCode ierr;
396   PetscInt       i,j,*gordering;
397   PetscInt       m=mlmat->outvec_leng,n,nz_max,row;
398   Mat            A;
399 
400   PetscFunctionBegin;
401   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
402   n = mlmat->invec_leng;
403   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
404 
405   nz_max = PetscMax(1,mlmat->max_nz_per_row);
406   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
407   if (reuse) A = *newmat;
408   else {
409     PetscInt *nnzA,*nnzB,*nnz;
410     ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr);
411     ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
412     ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
413     /* keep track of block size for A matrices */
414     ierr = MatSetBlockSize (A,mlmat->num_PDEs);CHKERRQ(ierr);
415     ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr);
416 
417     for (i=0; i<m; i++) {
418       PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]));
419       nnzA[i] = 0;
420       for (j=0; j<nnz[i]; j++) {
421         if (aj[j] < m) nnzA[i]++;
422       }
423       nnzB[i] = nnz[i] - nnzA[i];
424     }
425     ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
426     ierr = PetscFree3(nnzA,nnzB,nnz);
427   }
428   /* create global row numbering for a ML_Operator */
429   PetscStackCall("ML_build_global_numbering",ML_build_global_numbering(mlmat,&gordering,"rows"));
430   for (i=0; i<m; i++) {
431     PetscInt ncols;
432     row = gordering[i];
433 
434     PetscStackCall(",ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols));
435     for (j = 0; j < ncols; j++) aj[j] = gordering[aj[j]];
436     ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
437   }
438   PetscStackCall("ML_Operator_Getrow",ML_free(gordering));
439   ierr    = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
440   ierr    = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
441   *newmat = A;
442 
443   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
444   PetscFunctionReturn(0);
445 }
446 
447 /* -------------------------------------------------------------------------- */
448 /*
449    PCSetCoordinates_ML
450 
451    Input Parameter:
452    .  pc - the preconditioner context
453 */
454 #undef __FUNCT__
455 #define __FUNCT__ "PCSetCoordinates_ML"
456 PETSC_EXTERN_C PetscErrorCode PCSetCoordinates_ML(PC pc, PetscInt ndm, PetscInt a_nloc, PetscReal *coords)
457 {
458   PC_MG          *mg    = (PC_MG*)pc->data;
459   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
460   PetscErrorCode ierr;
461   PetscInt       arrsz,oldarrsz,bs,my0,kk,ii,nloc,Iend;
462   Mat            Amat = pc->pmat;
463 
464   /* this function copied and modified from PCSetCoordinates_GEO -TGI */
465   PetscFunctionBegin;
466   PetscValidHeaderSpecific(Amat, MAT_CLASSID, 1);
467   ierr = MatGetBlockSize(Amat, &bs);CHKERRQ(ierr);
468 
469   ierr = MatGetOwnershipRange(Amat, &my0, &Iend);CHKERRQ(ierr);
470   nloc = (Iend-my0)/bs;
471 
472   if (nloc!=a_nloc) SETERRQ2(((PetscObject)Amat)->comm,PETSC_ERR_ARG_WRONG, "Number of local blocks must locations = %d %d.",a_nloc,nloc);
473   if ((Iend-my0)%bs!=0) SETERRQ1(((PetscObject)Amat)->comm,PETSC_ERR_ARG_WRONG, "Bad local size %d.",nloc);
474 
475   oldarrsz    = pc_ml->dim * pc_ml->nloc;
476   pc_ml->dim  = ndm;
477   pc_ml->nloc = a_nloc;
478   arrsz       = ndm * a_nloc;
479 
480   /* create data - syntactic sugar that should be refactored at some point */
481   if (pc_ml->coords==0 || (oldarrsz != arrsz)) {
482     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
483     ierr = PetscMalloc((arrsz)*sizeof(PetscReal), &pc_ml->coords);CHKERRQ(ierr);
484   }
485   for (kk=0; kk<arrsz; kk++) pc_ml->coords[kk] = -999.;
486   /* copy data in - column oriented */
487   for (kk = 0; kk < nloc; kk++) {
488     for (ii = 0; ii < ndm; ii++) {
489       pc_ml->coords[ii*nloc + kk] =  coords[kk*ndm + ii];
490     }
491   }
492   PetscFunctionReturn(0);
493 }
494 
495 /* -----------------------------------------------------------------------------*/
496 #undef __FUNCT__
497 #define __FUNCT__ "PCReset_ML"
498 PetscErrorCode PCReset_ML(PC pc)
499 {
500   PetscErrorCode ierr;
501   PC_MG          *mg    = (PC_MG*)pc->data;
502   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
503   PetscInt       level,fine_level=pc_ml->Nlevels-1,dim=pc_ml->dim;
504 
505   PetscFunctionBegin;
506   if (dim) {
507     ML_Aggregate_Viz_Stats * grid_info = (ML_Aggregate_Viz_Stats*) pc_ml->ml_object->Grid[0].Grid;
508 
509     for (level=0; level<=fine_level; level++) {
510       ierr = VecDestroy(&pc_ml->gridctx[level].coords);CHKERRQ(ierr);
511     }
512 
513     grid_info->x = 0; /* do this so ML doesn't try to free coordinates */
514     grid_info->y = 0;
515     grid_info->z = 0;
516 
517     PetscStackCall("ML_Operator_Getrow",ML_Aggregate_VizAndStats_Clean(pc_ml->ml_object));
518   }
519   PetscStackCall("ML_Aggregate_Destroy",ML_Aggregate_Destroy(&pc_ml->agg_object));
520   PetscStackCall("ML_Aggregate_Destroy",ML_Destroy(&pc_ml->ml_object));
521 
522   if (pc_ml->PetscMLdata) {
523     ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
524     ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);
525     ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr);
526     ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr);
527   }
528   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
529 
530   if (pc_ml->gridctx) {
531     for (level=0; level<fine_level; level++) {
532       if (pc_ml->gridctx[level].A) {ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);}
533       if (pc_ml->gridctx[level].P) {ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);}
534       if (pc_ml->gridctx[level].R) {ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);}
535       if (pc_ml->gridctx[level].x) {ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);}
536       if (pc_ml->gridctx[level].b) {ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);}
537       if (pc_ml->gridctx[level+1].r) {ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
538     }
539   }
540   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
541   ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
542 
543   pc_ml->dim  = 0;
544   pc_ml->nloc = 0;
545   PetscFunctionReturn(0);
546 }
547 /* -------------------------------------------------------------------------- */
548 /*
549    PCSetUp_ML - Prepares for the use of the ML preconditioner
550                     by setting data structures and options.
551 
552    Input Parameter:
553 .  pc - the preconditioner context
554 
555    Application Interface Routine: PCSetUp()
556 
557    Notes:
558    The interface routine PCSetUp() is not usually called directly by
559    the user, but instead is called by PCApply() if necessary.
560 */
561 extern PetscErrorCode PCSetFromOptions_MG(PC);
562 extern PetscErrorCode PCReset_MG(PC);
563 
564 #undef __FUNCT__
565 #define __FUNCT__ "PCSetUp_ML"
566 PetscErrorCode PCSetUp_ML(PC pc)
567 {
568   PetscErrorCode ierr;
569   PetscMPIInt    size;
570   FineGridCtx    *PetscMLdata;
571   ML             *ml_object;
572   ML_Aggregate   *agg_object;
573   ML_Operator    *mlmat;
574   PetscInt       nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
575   Mat            A,Aloc;
576   GridCtx        *gridctx;
577   PC_MG          *mg    = (PC_MG*)pc->data;
578   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
579   PetscBool      isSeq, isMPI;
580   KSP            smoother;
581   PC             subpc;
582   PetscInt       mesh_level, old_mesh_level;
583 
584   PetscFunctionBegin;
585   A    = pc->pmat;
586   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
587 
588   if (pc->setupcalled) {
589     if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
590       /*
591        Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
592        multiple solves in which the matrix is not changing too quickly.
593        */
594       ml_object             = pc_ml->ml_object;
595       gridctx               = pc_ml->gridctx;
596       Nlevels               = pc_ml->Nlevels;
597       fine_level            = Nlevels - 1;
598       gridctx[fine_level].A = A;
599 
600       ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
601       ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
602       if (isMPI) {
603         ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
604       } else if (isSeq) {
605         Aloc = A;
606         ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
607       } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
608 
609       ierr              = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
610       PetscMLdata       = pc_ml->PetscMLdata;
611       ierr              = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr);
612       PetscMLdata->A    = A;
613       PetscMLdata->Aloc = Aloc;
614       PetscStackCall("ML_Aggregate_Destroy",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata));
615       PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec));
616 
617       mesh_level = ml_object->ML_finest_level;
618       while (ml_object->SingleLevel[mesh_level].Rmat->to) {
619         old_mesh_level = mesh_level;
620         mesh_level     = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;
621 
622         /* clean and regenerate A */
623         mlmat = &(ml_object->Amat[mesh_level]);
624         PetscStackCall("ML_Operator_Clean",ML_Operator_Clean(mlmat));
625         PetscStackCall("ML_Operator_Init",ML_Operator_Init(mlmat,ml_object->comm));
626         PetscStackCall("ML_Gen_AmatrixRAP",ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level));
627       }
628 
629       level = fine_level - 1;
630       if (size == 1) { /* convert ML P, R and A into seqaij format */
631         for (mllevel=1; mllevel<Nlevels; mllevel++) {
632           mlmat = &(ml_object->Amat[mllevel]);
633           ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
634           level--;
635         }
636       } else { /* convert ML P and R into shell format, ML A into mpiaij format */
637         for (mllevel=1; mllevel<Nlevels; mllevel++) {
638           mlmat  = &(ml_object->Amat[mllevel]);
639           ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
640           level--;
641         }
642       }
643 
644       for (level=0; level<fine_level; level++) {
645         if (level > 0) {
646           ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
647         }
648         ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
649       }
650       ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
651       ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
652 
653       ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
654       PetscFunctionReturn(0);
655     } else {
656       /* since ML can change the size of vectors/matrices at any level we must destroy everything */
657       ierr = PCReset_ML(pc);CHKERRQ(ierr);
658       ierr = PCReset_MG(pc);CHKERRQ(ierr);
659     }
660   }
661 
662   /* setup special features of PCML */
663   /*--------------------------------*/
664   /* covert A to Aloc to be used by ML at fine grid */
665   pc_ml->size = size;
666   ierr        = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
667   ierr        = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
668   if (isMPI) {
669     ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
670   } else if (isSeq) {
671     Aloc = A;
672     ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
673   } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
674 
675   /* create and initialize struct 'PetscMLdata' */
676   ierr               = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr);
677   pc_ml->PetscMLdata = PetscMLdata;
678   ierr               = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr);
679 
680   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);
681   ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr);
682   ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr);
683 
684   ierr              = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr);
685   ierr              = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
686   ierr              = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
687   PetscMLdata->A    = A;
688   PetscMLdata->Aloc = Aloc;
689   if (pc_ml->dim) { /* create vecs around the coordinate data given */
690     PetscInt  i,j,dim=pc_ml->dim;
691     PetscInt  nloc = pc_ml->nloc,nlocghost;
692     PetscReal *ghostedcoords;
693 
694     ierr      = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
695     nlocghost = Aloc->cmap->n / bs;
696     ierr      = PetscMalloc(dim*nlocghost*sizeof(PetscReal),&ghostedcoords);CHKERRQ(ierr);
697     for (i = 0; i < dim; i++) {
698       /* copy coordinate values into first component of pwork */
699       for (j = 0; j < nloc; j++) {
700         PetscMLdata->pwork[bs * j] = pc_ml->coords[nloc * i + j];
701       }
702       /* get the ghost values */
703       ierr = PetscML_comm(PetscMLdata->pwork,PetscMLdata);CHKERRQ(ierr);
704       /* write into the vector */
705       for (j = 0; j < nlocghost; j++) {
706         ghostedcoords[i * nlocghost + j] = PetscMLdata->pwork[bs * j];
707       }
708     }
709     /* replace the original coords with the ghosted coords, because these are
710      * what ML needs */
711     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
712     pc_ml->coords = ghostedcoords;
713   }
714 
715   /* create ML discretization matrix at fine grid */
716   /* ML requires input of fine-grid matrix. It determines nlevels. */
717   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
718   ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
719   PetscStackCall("ML_Create",ML_Create(&ml_object,pc_ml->MaxNlevels));
720   PetscStackCall("ML_Comm_Set_UsrComm",ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm));
721   pc_ml->ml_object = ml_object;
722   PetscStackCall("ML_Init_Amatrix",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata));
723   PetscStackCall("ML_Set_Amatrix_Getrow",ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols));
724   PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec));
725 
726   PetscStackCall("ML_Set_Symmetrize",ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO));
727 
728   /* aggregation */
729   PetscStackCall("ML_Aggregate_Create",ML_Aggregate_Create(&agg_object));
730   pc_ml->agg_object = agg_object;
731 
732   {
733     MatNullSpace mnull;
734     ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr);
735     if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) {
736       if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER;
737       else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK;
738       else pc_ml->nulltype = PCML_NULLSPACE_SCALAR;
739     }
740     switch (pc_ml->nulltype) {
741     case PCML_NULLSPACE_USER: {
742       PetscScalar       *nullvec;
743       const PetscScalar *v;
744       PetscBool         has_const;
745       PetscInt          i,j,mlocal,nvec,M;
746       const Vec         *vecs;
747 
748       if (!mnull) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space");
749       ierr = MatGetSize(A,&M,NULL);CHKERRQ(ierr);
750       ierr = MatGetLocalSize(Aloc,&mlocal,NULL);CHKERRQ(ierr);
751       ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr);
752       ierr = PetscMalloc((nvec+!!has_const)*mlocal*sizeof(*nullvec),&nullvec);CHKERRQ(ierr);
753       if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M;
754       for (i=0; i<nvec; i++) {
755         ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr);
756         for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j];
757         ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr);
758       }
759       PetscStackCall("ML_Aggregate_Create",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr));
760       ierr = PetscFree(nullvec);CHKERRQ(ierr);
761     } break;
762     case PCML_NULLSPACE_BLOCK:
763       PetscStackCall("ML_Aggregate_Set_NullSpace",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr));
764       break;
765     case PCML_NULLSPACE_SCALAR:
766       break;
767     default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_SUP,"Unknown null space type");
768     }
769   }
770   PetscStackCall("ML_Aggregate_Set_MaxCoarseSize",ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize));
771   /* set options */
772   switch (pc_ml->CoarsenScheme) {
773   case 1:
774     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_Coupled",ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object));break;
775   case 2:
776     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_MIS",ML_Aggregate_Set_CoarsenScheme_MIS(agg_object));break;
777   case 3:
778     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_METIS",ML_Aggregate_Set_CoarsenScheme_METIS(agg_object));break;
779   }
780   PetscStackCall("ML_Aggregate_Set_Threshold",ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold));
781   PetscStackCall("ML_Aggregate_Set_DampingFactor",ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor));
782   if (pc_ml->SpectralNormScheme_Anorm) {
783     PetscStackCall("ML_Set_SpectralNormScheme_Anorm",ML_Set_SpectralNormScheme_Anorm(ml_object));
784   }
785   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
786   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
787   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
788   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
789   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
790   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;
791 
792   if (pc_ml->Aux) {
793     if (!pc_ml->dim) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"Auxiliary matrix requires coordinates");
794     ml_object->Amat[0].aux_data->threshold = pc_ml->AuxThreshold;
795     ml_object->Amat[0].aux_data->enable    = 1;
796     ml_object->Amat[0].aux_data->max_level = 10;
797     ml_object->Amat[0].num_PDEs            = bs;
798   }
799 
800   if (pc_ml->dim) {
801     PetscInt               i,dim = pc_ml->dim;
802     ML_Aggregate_Viz_Stats *grid_info;
803     PetscInt               nlocghost;
804 
805     ierr      = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
806     nlocghost = Aloc->cmap->n / bs;
807 
808     PetscStackCall("ML_Aggregate_VizAndStats_Setup(",ML_Aggregate_VizAndStats_Setup(ml_object)); /* create ml info for coords */
809     grid_info = (ML_Aggregate_Viz_Stats*) ml_object->Grid[0].Grid;
810     for (i = 0; i < dim; i++) {
811       /* set the finest level coordinates to point to the column-order array
812        * in pc_ml */
813       /* NOTE: must point away before VizAndStats_Clean so ML doesn't free */
814       switch (i) {
815       case 0: grid_info->x = pc_ml->coords + nlocghost * i; break;
816       case 1: grid_info->y = pc_ml->coords + nlocghost * i; break;
817       case 2: grid_info->z = pc_ml->coords + nlocghost * i; break;
818       default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
819       }
820     }
821     grid_info->Ndim = dim;
822   }
823 
824   /* repartitioning */
825   if (pc_ml->Repartition) {
826     PetscStackCall("ML_Repartition_Activate",ML_Repartition_Activate(ml_object));
827     PetscStackCall("ML_Repartition_Set_LargestMinMaxRatio",ML_Repartition_Set_LargestMinMaxRatio(ml_object,pc_ml->MaxMinRatio));
828     PetscStackCall("ML_Repartition_Set_MinPerProc",ML_Repartition_Set_MinPerProc(ml_object,pc_ml->MinPerProc));
829     PetscStackCall("ML_Repartition_Set_PutOnSingleProc",ML_Repartition_Set_PutOnSingleProc(ml_object,pc_ml->PutOnSingleProc));
830 #if 0                           /* Function not yet defined in ml-6.2 */
831     /* I'm not sure what compatibility issues might crop up if we partitioned
832      * on the finest level, so to be safe repartition starts on the next
833      * finest level (reflection default behavior in
834      * ml_MultiLevelPreconditioner) */
835     PetscStackCall("ML_Repartition_Set_StartLevel",ML_Repartition_Set_StartLevel(ml_object,1));
836 #endif
837 
838     if (!pc_ml->RepartitionType) {
839       PetscInt i;
840 
841       if (!pc_ml->dim) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"ML Zoltan repartitioning requires coordinates");
842       PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEZOLTAN));
843       PetscStackCall("ML_Aggregate_Set_Dimensions",ML_Aggregate_Set_Dimensions(agg_object, pc_ml->dim));
844 
845       for (i = 0; i < ml_object->ML_num_levels; i++) {
846         ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Grid[i].Grid;
847         grid_info->zoltan_type = pc_ml->ZoltanScheme + 1; /* ml numbers options 1, 2, 3 */
848         /* defaults from ml_agg_info.c */
849         grid_info->zoltan_estimated_its = 40; /* only relevant to hypergraph / fast hypergraph */
850         grid_info->zoltan_timers        = 0;
851         grid_info->smoothing_steps      = 4;  /* only relevant to hypergraph / fast hypergraph */
852       }
853     } else {
854       PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEPARMETIS));
855     }
856   }
857 
858   if (pc_ml->OldHierarchy) {
859     PetscStackCall("ML_Gen_MGHierarchy_UsingAggregation",Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object));
860   } else {
861     PetscStackCall("ML_Gen_MultiLevelHierarchy_UsingAggregation",Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object));
862   }
863   if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
864   pc_ml->Nlevels = Nlevels;
865   fine_level     = Nlevels - 1;
866 
867   ierr = PCMGSetLevels(pc,Nlevels,NULL);CHKERRQ(ierr);
868   /* set default smoothers */
869   for (level=1; level<=fine_level; level++) {
870     ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
871     ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
872     ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
873     ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
874   }
875   ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr);
876   ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
877   ierr = PetscOptionsEnd();CHKERRQ(ierr);
878 
879   ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr);
880 
881   pc_ml->gridctx = gridctx;
882 
883   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
884      Level 0 is the finest grid for ML, but coarsest for PETSc! */
885   gridctx[fine_level].A = A;
886 
887   level = fine_level - 1;
888   if (size == 1) { /* convert ML P, R and A into seqaij format */
889     for (mllevel=1; mllevel<Nlevels; mllevel++) {
890       mlmat = &(ml_object->Pmat[mllevel]);
891       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
892       mlmat = &(ml_object->Rmat[mllevel-1]);
893       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
894 
895       mlmat = &(ml_object->Amat[mllevel]);
896       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
897       level--;
898     }
899   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
900     for (mllevel=1; mllevel<Nlevels; mllevel++) {
901       mlmat  = &(ml_object->Pmat[mllevel]);
902       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
903       mlmat  = &(ml_object->Rmat[mllevel-1]);
904       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
905 
906       mlmat  = &(ml_object->Amat[mllevel]);
907       ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
908       level--;
909     }
910   }
911 
912   /* create vectors and ksp at all levels */
913   for (level=0; level<fine_level; level++) {
914     level1 = level + 1;
915     ierr   = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr);
916     ierr   = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr);
917     ierr   = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr);
918     ierr   = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
919 
920     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr);
921     ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
922     ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr);
923     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
924 
925     ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr);
926     ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
927     ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr);
928     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
929 
930     if (level == 0) {
931       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
932     } else {
933       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
934     }
935   }
936   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
937 
938   /* create coarse level and the interpolation between the levels */
939   for (level=0; level<fine_level; level++) {
940     level1 = level + 1;
941     ierr   = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
942     ierr   = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
943     if (level > 0) {
944       ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
945     }
946     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
947   }
948   ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
949   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
950 
951   /* put coordinate info in levels */
952   if (pc_ml->dim) {
953     PetscInt  i,j,dim = pc_ml->dim;
954     PetscInt  bs, nloc;
955     PC        subpc;
956     PetscReal *array;
957 
958     level = fine_level;
959     for (mllevel = 0; mllevel < Nlevels; mllevel++) {
960       ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Amat[mllevel].to->Grid->Grid;
961       MPI_Comm               comm       = ((PetscObject)gridctx[level].A)->comm;
962 
963       ierr  = MatGetBlockSize (gridctx[level].A, &bs);CHKERRQ(ierr);
964       ierr  = MatGetLocalSize (gridctx[level].A, NULL, &nloc);CHKERRQ(ierr);
965       nloc /= bs; /* number of local nodes */
966 
967       ierr = VecCreate(comm,&gridctx[level].coords);CHKERRQ(ierr);
968       ierr = VecSetSizes(gridctx[level].coords,dim * nloc,PETSC_DECIDE);CHKERRQ(ierr);
969       ierr = VecSetType(gridctx[level].coords,VECMPI);CHKERRQ(ierr);
970       ierr = VecGetArray(gridctx[level].coords,&array);CHKERRQ(ierr);
971       for (j = 0; j < nloc; j++) {
972         for (i = 0; i < dim; i++) {
973           switch (i) {
974           case 0: array[dim * j + i] = grid_info->x[j]; break;
975           case 1: array[dim * j + i] = grid_info->y[j]; break;
976           case 2: array[dim * j + i] = grid_info->z[j]; break;
977           default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
978           }
979         }
980       }
981 
982       /* passing coordinates to smoothers/coarse solver, should they need them */
983       ierr = KSPGetPC(gridctx[level].ksp,&subpc);CHKERRQ(ierr);
984       ierr = PCSetCoordinates(subpc,dim,nloc,array);CHKERRQ(ierr);
985       ierr = VecRestoreArray(gridctx[level].coords,&array);CHKERRQ(ierr);
986       level--;
987     }
988   }
989 
990   /* setupcalled is set to 0 so that MG is setup from scratch */
991   pc->setupcalled = 0;
992   ierr            = PCSetUp_MG(pc);CHKERRQ(ierr);
993   PetscFunctionReturn(0);
994 }
995 
996 /* -------------------------------------------------------------------------- */
997 /*
998    PCDestroy_ML - Destroys the private context for the ML preconditioner
999    that was created with PCCreate_ML().
1000 
1001    Input Parameter:
1002 .  pc - the preconditioner context
1003 
1004    Application Interface Routine: PCDestroy()
1005 */
1006 #undef __FUNCT__
1007 #define __FUNCT__ "PCDestroy_ML"
1008 PetscErrorCode PCDestroy_ML(PC pc)
1009 {
1010   PetscErrorCode ierr;
1011   PC_MG          *mg   = (PC_MG*)pc->data;
1012   PC_ML          *pc_ml= (PC_ML*)mg->innerctx;
1013 
1014   PetscFunctionBegin;
1015   ierr = PCReset_ML(pc);CHKERRQ(ierr);
1016   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
1017   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
1018   ierr = PetscObjectComposeFunctionDynamic((PetscObject)pc,"PCSetCoordinates_C","",NULL);CHKERRQ(ierr);
1019   PetscFunctionReturn(0);
1020 }
1021 
1022 #undef __FUNCT__
1023 #define __FUNCT__ "PCSetFromOptions_ML"
1024 PetscErrorCode PCSetFromOptions_ML(PC pc)
1025 {
1026   PetscErrorCode ierr;
1027   PetscInt       indx,PrintLevel,partindx;
1028   const char     *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
1029   const char     *part[]   = {"Zoltan","ParMETIS"};
1030 #if defined(HAVE_ML_ZOLTAN)
1031   PetscInt   zidx;
1032   const char *zscheme[] = {"RCB","hypergraph","fast_hypergraph"};
1033 #endif
1034   PC_MG       *mg    = (PC_MG*)pc->data;
1035   PC_ML       *pc_ml = (PC_ML*)mg->innerctx;
1036   PetscMPIInt size;
1037   MPI_Comm    comm = ((PetscObject)pc)->comm;
1038 
1039   PetscFunctionBegin;
1040   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1041   ierr = PetscOptionsHead("ML options");CHKERRQ(ierr);
1042 
1043   PrintLevel = 0;
1044   indx       = 0;
1045   partindx   = 0;
1046 
1047   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,NULL);CHKERRQ(ierr);
1048   PetscStackCall("ML_Set_PrintLeve",ML_Set_PrintLevel(PrintLevel));
1049   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,NULL);CHKERRQ(ierr);
1050   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,NULL);CHKERRQ(ierr);
1051   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,NULL);CHKERRQ(ierr);
1052 
1053   pc_ml->CoarsenScheme = indx;
1054 
1055   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,NULL);CHKERRQ(ierr);
1056   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,NULL);CHKERRQ(ierr);
1057   ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,NULL);CHKERRQ(ierr);
1058   ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,NULL);CHKERRQ(ierr);
1059   ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,NULL);CHKERRQ(ierr);
1060   ierr = PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,NULL);CHKERRQ(ierr);
1061   ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,NULL);CHKERRQ(ierr);
1062   ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,NULL);CHKERRQ(ierr);
1063   /*
1064     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
1065     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.
1066 
1067     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
1068     combination of options and ML's exit(1) explanations don't help matters.
1069   */
1070   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
1071   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
1072   if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);}
1073   if (pc_ml->EnergyMinimization) {
1074     ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,NULL);CHKERRQ(ierr);
1075   }
1076   if (pc_ml->EnergyMinimization == 2) {
1077     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
1078     ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,NULL);CHKERRQ(ierr);
1079   }
1080   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
1081   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
1082   ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,NULL);CHKERRQ(ierr);
1083   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
1084   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
1085   ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,NULL);CHKERRQ(ierr);
1086   /*
1087     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
1088     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
1089     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
1090     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
1091     this context, but ML doesn't provide a way to find out which ones.
1092    */
1093   ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,NULL);CHKERRQ(ierr);
1094   ierr = PetscOptionsBool("-pc_ml_repartition", "Allow ML to repartition levels of the heirarchy","ML_Repartition_Activate",pc_ml->Repartition,&pc_ml->Repartition,NULL);CHKERRQ(ierr);
1095   if (pc_ml->Repartition) {
1096     ierr = PetscOptionsReal("-pc_ml_repartitionMaxMinRatio", "Acceptable ratio of repartitioned sizes","ML_Repartition_Set_LargestMinMaxRatio",pc_ml->MaxMinRatio,&pc_ml->MaxMinRatio,NULL);CHKERRQ(ierr);
1097     ierr = PetscOptionsInt("-pc_ml_repartitionMinPerProc", "Smallest repartitioned size","ML_Repartition_Set_MinPerProc",pc_ml->MinPerProc,&pc_ml->MinPerProc,NULL);CHKERRQ(ierr);
1098     ierr = PetscOptionsInt("-pc_ml_repartitionPutOnSingleProc", "Problem size automatically repartitioned to one processor","ML_Repartition_Set_PutOnSingleProc",pc_ml->PutOnSingleProc,&pc_ml->PutOnSingleProc,NULL);CHKERRQ(ierr);
1099 #if defined(HAVE_ML_ZOLTAN)
1100     partindx = 0;
1101     ierr     = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[0],&partindx,NULL);CHKERRQ(ierr);
1102 
1103     pc_ml->RepartitionType = partindx;
1104     if (!partindx) {
1105       PetscInt zindx = 0;
1106 
1107       ierr = PetscOptionsEList("-pc_ml_repartitionZoltanScheme", "Repartitioning scheme to use","None",zscheme,3,zscheme[0],&zindx,NULL);CHKERRQ(ierr);
1108 
1109       pc_ml->ZoltanScheme = zindx;
1110     }
1111 #else
1112     partindx = 1;
1113     ierr     = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[1],&partindx,NULL);CHKERRQ(ierr);
1114     if (!partindx) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_SUP_SYS,"ML not compiled with Zoltan");
1115 #endif
1116     ierr = PetscOptionsBool("-pc_ml_Aux","Aggregate using auxiliary coordinate-based laplacian","None",pc_ml->Aux,&pc_ml->Aux,NULL);CHKERRQ(ierr);
1117     ierr = PetscOptionsReal("-pc_ml_AuxThreshold","Auxiliary smoother drop tol","None",pc_ml->AuxThreshold,&pc_ml->AuxThreshold,NULL);CHKERRQ(ierr);
1118   }
1119   ierr = PetscOptionsTail();CHKERRQ(ierr);
1120   PetscFunctionReturn(0);
1121 }
1122 
1123 /* -------------------------------------------------------------------------- */
1124 /*
1125    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
1126    and sets this as the private data within the generic preconditioning
1127    context, PC, that was created within PCCreate().
1128 
1129    Input Parameter:
1130 .  pc - the preconditioner context
1131 
1132    Application Interface Routine: PCCreate()
1133 */
1134 
1135 /*MC
1136      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
1137        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
1138        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
1139        and the restriction/interpolation operators wrapped as PETSc shell matrices.
1140 
1141    Options Database Key:
1142    Multigrid options(inherited)
1143 +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
1144 .  -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
1145 .  -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
1146    -pc_mg_type <multiplicative>: (one of) additive multiplicative full kascade
1147    ML options:
1148 .  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
1149 .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
1150 .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
1151 .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
1152 .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
1153 .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
1154 .  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
1155 .  -pc_ml_repartition <false>: Allow ML to repartition levels of the heirarchy (ML_Repartition_Activate)
1156 .  -pc_ml_repartitionMaxMinRatio <1.3>: Acceptable ratio of repartitioned sizes (ML_Repartition_Set_LargestMinMaxRatio)
1157 .  -pc_ml_repartitionMinPerProc <512>: Smallest repartitioned size (ML_Repartition_Set_MinPerProc)
1158 .  -pc_ml_repartitionPutOnSingleProc <5000>: Problem size automatically repartitioned to one processor (ML_Repartition_Set_PutOnSingleProc)
1159 .  -pc_ml_repartitionType <Zoltan>: Repartitioning library to use (ML_Repartition_Set_Partitioner)
1160 .  -pc_ml_repartitionZoltanScheme <RCB>: Repartitioning scheme to use (None)
1161 .  -pc_ml_Aux <false>: Aggregate using auxiliary coordinate-based laplacian (None)
1162 -  -pc_ml_AuxThreshold <0.0>: Auxiliary smoother drop tol (None)
1163 
1164    Level: intermediate
1165 
1166   Concepts: multigrid
1167 
1168 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
1169            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
1170            PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
1171            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
1172            PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
1173 M*/
1174 
1175 EXTERN_C_BEGIN
1176 #undef __FUNCT__
1177 #define __FUNCT__ "PCCreate_ML"
1178 PetscErrorCode  PCCreate_ML(PC pc)
1179 {
1180   PetscErrorCode ierr;
1181   PC_ML          *pc_ml;
1182   PC_MG          *mg;
1183 
1184   PetscFunctionBegin;
1185   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
1186   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
1187   ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr);
1188   /* Since PCMG tries to use DM assocated with PC must delete it */
1189   ierr         = DMDestroy(&pc->dm);CHKERRQ(ierr);
1190   mg           = (PC_MG*)pc->data;
1191   mg->galerkin = 2;             /* Use Galerkin, but it is computed externally */
1192 
1193   /* create a supporting struct and attach it to pc */
1194   ierr         = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr);
1195   mg->innerctx = pc_ml;
1196 
1197   pc_ml->ml_object                = 0;
1198   pc_ml->agg_object               = 0;
1199   pc_ml->gridctx                  = 0;
1200   pc_ml->PetscMLdata              = 0;
1201   pc_ml->Nlevels                  = -1;
1202   pc_ml->MaxNlevels               = 10;
1203   pc_ml->MaxCoarseSize            = 1;
1204   pc_ml->CoarsenScheme            = 1;
1205   pc_ml->Threshold                = 0.0;
1206   pc_ml->DampingFactor            = 4.0/3.0;
1207   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
1208   pc_ml->size                     = 0;
1209   pc_ml->dim                      = 0;
1210   pc_ml->nloc                     = 0;
1211   pc_ml->coords                   = 0;
1212   pc_ml->Repartition              = PETSC_FALSE;
1213   pc_ml->MaxMinRatio              = 1.3;
1214   pc_ml->MinPerProc               = 512;
1215   pc_ml->PutOnSingleProc          = 5000;
1216   pc_ml->RepartitionType          = 0;
1217   pc_ml->ZoltanScheme             = 0;
1218   pc_ml->Aux                      = PETSC_FALSE;
1219   pc_ml->AuxThreshold             = 0.0;
1220 
1221   /* allow for coordinates to be passed */
1222   ierr = PetscObjectComposeFunctionDynamic((PetscObject)pc,"PCSetCoordinates_C","PCSetCoordinates_ML",PCSetCoordinates_ML);CHKERRQ(ierr);
1223 
1224   /* overwrite the pointers of PCMG by the functions of PCML */
1225   pc->ops->setfromoptions = PCSetFromOptions_ML;
1226   pc->ops->setup          = PCSetUp_ML;
1227   pc->ops->reset          = PCReset_ML;
1228   pc->ops->destroy        = PCDestroy_ML;
1229   PetscFunctionReturn(0);
1230 }
1231 EXTERN_C_END
1232