xref: /petsc/src/ksp/pc/impls/ml/ml.c (revision a685ae26bbf9fb229cf02e15e73d16b8a9cd69e6)
1 
2 /*
3    Provides an interface to the ML smoothed Aggregation
4    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
5                                     Jed Brown, see [PETSC #18321, #18449].
6 */
7 #include <petsc/private/pcimpl.h>   /*I "petscpc.h" I*/
8 #include <petsc/private/pcmgimpl.h>                    /*I "petscksp.h" I*/
9 #include <../src/mat/impls/aij/seq/aij.h>
10 #include <../src/mat/impls/aij/mpi/mpiaij.h>
11 #include <petscdm.h>            /* for DMDestroy(&pc->mg) hack */
12 
13 EXTERN_C_BEGIN
14 /* HAVE_CONFIG_H flag is required by ML include files */
15 #if !defined(HAVE_CONFIG_H)
16 #define HAVE_CONFIG_H
17 #endif
18 #include <ml_include.h>
19 #include <ml_viz_stats.h>
20 EXTERN_C_END
21 
22 typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType;
23 static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0};
24 
25 /* The context (data structure) at each grid level */
26 typedef struct {
27   Vec x,b,r;                  /* global vectors */
28   Mat A,P,R;
29   KSP ksp;
30   Vec coords;                 /* projected by ML, if PCSetCoordinates is called; values packed by node */
31 } GridCtx;
32 
33 /* The context used to input PETSc matrix into ML at fine grid */
34 typedef struct {
35   Mat         A;       /* Petsc matrix in aij format */
36   Mat         Aloc;    /* local portion of A to be used by ML */
37   Vec         x,y;
38   ML_Operator *mlmat;
39   PetscScalar *pwork;  /* tmp array used by PetscML_comm() */
40 } FineGridCtx;
41 
42 /* The context associates a ML matrix with a PETSc shell matrix */
43 typedef struct {
44   Mat         A;        /* PETSc shell matrix associated with mlmat */
45   ML_Operator *mlmat;   /* ML matrix assorciated with A */
46   Vec         y, work;
47 } Mat_MLShell;
48 
49 /* Private context for the ML preconditioner */
50 typedef struct {
51   ML                *ml_object;
52   ML_Aggregate      *agg_object;
53   GridCtx           *gridctx;
54   FineGridCtx       *PetscMLdata;
55   PetscInt          Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization,MinPerProc,PutOnSingleProc,RepartitionType,ZoltanScheme;
56   PetscReal         Threshold,DampingFactor,EnergyMinimizationDropTol,MaxMinRatio,AuxThreshold;
57   PetscBool         SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable,Repartition,Aux;
58   PetscBool         reuse_interpolation;
59   PCMLNullSpaceType nulltype;
60   PetscMPIInt       size; /* size of communicator for pc->pmat */
61   PetscInt          dim;  /* data from PCSetCoordinates(_ML) */
62   PetscInt          nloc;
63   PetscReal         *coords; /* ML has a grid object for each level: the finest grid will point into coords */
64 } PC_ML;
65 
66 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
67 {
68   PetscErrorCode ierr;
69   PetscInt       m,i,j,k=0,row,*aj;
70   PetscScalar    *aa;
71   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
72   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;
73 
74   ierr = MatGetSize(ml->Aloc,&m,NULL); if (ierr) return(0);
75   for (i = 0; i<N_requested_rows; i++) {
76     row            = requested_rows[i];
77     row_lengths[i] = a->ilen[row];
78     if (allocated_space < k+row_lengths[i]) return(0);
79     if ((row >= 0) || (row <= (m-1))) {
80       aj = a->j + a->i[row];
81       aa = a->a + a->i[row];
82       for (j=0; j<row_lengths[i]; j++) {
83         columns[k]  = aj[j];
84         values[k++] = aa[j];
85       }
86     }
87   }
88   return(1);
89 }
90 
91 static PetscErrorCode PetscML_comm(double p[],void *ML_data)
92 {
93   PetscErrorCode    ierr;
94   FineGridCtx       *ml = (FineGridCtx*)ML_data;
95   Mat               A   = ml->A;
96   Mat_MPIAIJ        *a  = (Mat_MPIAIJ*)A->data;
97   PetscMPIInt       size;
98   PetscInt          i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
99   const PetscScalar *array;
100 
101   PetscFunctionBegin;
102   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
103   if (size == 1) return 0;
104 
105   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
106   ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
107   ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
108   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
109   ierr = VecGetArrayRead(a->lvec,&array);CHKERRQ(ierr);
110   for (i=in_length; i<out_length; i++) p[i] = array[i-in_length];
111   ierr = VecRestoreArrayRead(a->lvec,&array);CHKERRQ(ierr);
112   PetscFunctionReturn(0);
113 }
114 
115 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
116 {
117   PetscErrorCode ierr;
118   FineGridCtx    *ml = (FineGridCtx*)ML_Get_MyMatvecData(ML_data);
119   Mat            A   = ml->A, Aloc=ml->Aloc;
120   PetscMPIInt    size;
121   PetscScalar    *pwork=ml->pwork;
122   PetscInt       i;
123 
124   PetscFunctionBegin;
125   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
126   if (size == 1) {
127     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
128   } else {
129     for (i=0; i<in_length; i++) pwork[i] = p[i];
130     ierr = PetscML_comm(pwork,ml);CHKERRQ(ierr);
131     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
132   }
133   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
134   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
135   ierr = VecResetArray(ml->x);CHKERRQ(ierr);
136   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
137   PetscFunctionReturn(0);
138 }
139 
140 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
141 {
142   PetscErrorCode    ierr;
143   Mat_MLShell       *shell;
144   PetscScalar       *yarray;
145   const PetscScalar *xarray;
146   PetscInt          x_length,y_length;
147 
148   PetscFunctionBegin;
149   ierr     = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr);
150   ierr     = VecGetArrayRead(x,&xarray);CHKERRQ(ierr);
151   ierr     = VecGetArray(y,&yarray);CHKERRQ(ierr);
152   x_length = shell->mlmat->invec_leng;
153   y_length = shell->mlmat->outvec_leng;
154   PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat,x_length,(PetscScalar*)xarray,y_length,yarray));
155   ierr = VecRestoreArrayRead(x,&xarray);CHKERRQ(ierr);
156   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
157   PetscFunctionReturn(0);
158 }
159 
160 /* Computes y = w + A * x
161    It is possible that w == y, but not x == y
162 */
163 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
164 {
165   Mat_MLShell       *shell;
166   PetscScalar       *yarray;
167   const PetscScalar *xarray;
168   PetscInt          x_length,y_length;
169   PetscErrorCode    ierr;
170 
171   PetscFunctionBegin;
172   ierr = MatShellGetContext(A, (void**) &shell);CHKERRQ(ierr);
173   if (y == w) {
174     if (!shell->work) {
175       ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr);
176     }
177     ierr     = VecGetArrayRead(x,           &xarray);CHKERRQ(ierr);
178     ierr     = VecGetArray(shell->work, &yarray);CHKERRQ(ierr);
179     x_length = shell->mlmat->invec_leng;
180     y_length = shell->mlmat->outvec_leng;
181     PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, (PetscScalar*)xarray, y_length, yarray));
182     ierr = VecRestoreArrayRead(x,           &xarray);CHKERRQ(ierr);
183     ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr);
184     ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr);
185   } else {
186     ierr     = VecGetArrayRead(x, &xarray);CHKERRQ(ierr);
187     ierr     = VecGetArray(y, &yarray);CHKERRQ(ierr);
188     x_length = shell->mlmat->invec_leng;
189     y_length = shell->mlmat->outvec_leng;
190     PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, (PetscScalar *)xarray, y_length, yarray));
191     ierr = VecRestoreArrayRead(x, &xarray);CHKERRQ(ierr);
192     ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr);
193     ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr);
194   }
195   PetscFunctionReturn(0);
196 }
197 
198 /* newtype is ignored since only handles one case */
199 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
200 {
201   PetscErrorCode ierr;
202   Mat_MPIAIJ     *mpimat=(Mat_MPIAIJ*)A->data;
203   Mat_SeqAIJ     *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
204   PetscInt       *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
205   PetscScalar    *aa=a->a,*ba=b->a,*ca;
206   PetscInt       am =A->rmap->n,an=A->cmap->n,i,j,k;
207   PetscInt       *ci,*cj,ncols;
208 
209   PetscFunctionBegin;
210   if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
211 
212   if (scall == MAT_INITIAL_MATRIX) {
213     ierr  = PetscMalloc1(1+am,&ci);CHKERRQ(ierr);
214     ci[0] = 0;
215     for (i=0; i<am; i++) ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
216     ierr = PetscMalloc1(1+ci[am],&cj);CHKERRQ(ierr);
217     ierr = PetscMalloc1(1+ci[am],&ca);CHKERRQ(ierr);
218 
219     k = 0;
220     for (i=0; i<am; i++) {
221       /* diagonal portion of A */
222       ncols = ai[i+1] - ai[i];
223       for (j=0; j<ncols; j++) {
224         cj[k]   = *aj++;
225         ca[k++] = *aa++;
226       }
227       /* off-diagonal portion of A */
228       ncols = bi[i+1] - bi[i];
229       for (j=0; j<ncols; j++) {
230         cj[k]   = an + (*bj); bj++;
231         ca[k++] = *ba++;
232       }
233     }
234     if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
235 
236     /* put together the new matrix */
237     an   = mpimat->A->cmap->n+mpimat->B->cmap->n;
238     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
239 
240     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
241     /* Since these are PETSc arrays, change flags to free them as necessary. */
242     mat          = (Mat_SeqAIJ*)(*Aloc)->data;
243     mat->free_a  = PETSC_TRUE;
244     mat->free_ij = PETSC_TRUE;
245 
246     mat->nonew = 0;
247   } else if (scall == MAT_REUSE_MATRIX) {
248     mat=(Mat_SeqAIJ*)(*Aloc)->data;
249     ci = mat->i; cj = mat->j; ca = mat->a;
250     for (i=0; i<am; i++) {
251       /* diagonal portion of A */
252       ncols = ai[i+1] - ai[i];
253       for (j=0; j<ncols; j++) *ca++ = *aa++;
254       /* off-diagonal portion of A */
255       ncols = bi[i+1] - bi[i];
256       for (j=0; j<ncols; j++) *ca++ = *ba++;
257     }
258   } else SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
259   PetscFunctionReturn(0);
260 }
261 
262 static PetscErrorCode MatDestroy_ML(Mat A)
263 {
264   PetscErrorCode ierr;
265   Mat_MLShell    *shell;
266 
267   PetscFunctionBegin;
268   ierr = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr);
269   ierr = VecDestroy(&shell->y);CHKERRQ(ierr);
270   if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);}
271   ierr = PetscFree(shell);CHKERRQ(ierr);
272   PetscFunctionReturn(0);
273 }
274 
275 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
276 {
277   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata*)mlmat->data;
278   PetscErrorCode        ierr;
279   PetscInt              m       =mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = NULL,nz_max;
280   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i;
281   PetscScalar           *ml_vals=matdata->values,*aa;
282 
283   PetscFunctionBegin;
284   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
285   if (m != n) { /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
286     if (reuse) {
287       Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data;
288       aij->i = ml_rowptr;
289       aij->j = ml_cols;
290       aij->a = ml_vals;
291     } else {
292       /* sort ml_cols and ml_vals */
293       ierr = PetscMalloc1(m+1,&nnz);
294       for (i=0; i<m; i++) nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
295       aj = ml_cols; aa = ml_vals;
296       for (i=0; i<m; i++) {
297         ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
298         aj  += nnz[i]; aa += nnz[i];
299       }
300       ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
301       ierr = PetscFree(nnz);CHKERRQ(ierr);
302     }
303     PetscFunctionReturn(0);
304   }
305 
306   nz_max = PetscMax(1,mlmat->max_nz_per_row);
307   ierr   = PetscMalloc2(nz_max,&aa,nz_max,&aj);CHKERRQ(ierr);
308   if (!reuse) {
309     ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr);
310     ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
311     ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
312     /* keep track of block size for A matrices */
313     ierr = MatSetBlockSize (*newmat, mlmat->num_PDEs);CHKERRQ(ierr);
314 
315     ierr = PetscMalloc1(m,&nnz);CHKERRQ(ierr);
316     for (i=0; i<m; i++) {
317       PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]));
318     }
319     ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
320   }
321   for (i=0; i<m; i++) {
322     PetscInt ncols;
323 
324     PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols));
325     ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
326   }
327   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
328   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
329 
330   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
331   ierr = PetscFree(nnz);CHKERRQ(ierr);
332   PetscFunctionReturn(0);
333 }
334 
335 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
336 {
337   PetscErrorCode ierr;
338   PetscInt       m,n;
339   ML_Comm        *MLcomm;
340   Mat_MLShell    *shellctx;
341 
342   PetscFunctionBegin;
343   m = mlmat->outvec_leng;
344   n = mlmat->invec_leng;
345 
346   if (reuse) {
347     ierr            = MatShellGetContext(*newmat,(void**)&shellctx);CHKERRQ(ierr);
348     shellctx->mlmat = mlmat;
349     PetscFunctionReturn(0);
350   }
351 
352   MLcomm = mlmat->comm;
353 
354   ierr = PetscNew(&shellctx);CHKERRQ(ierr);
355   ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
356   ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr);
357   ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr);
358   ierr = MatShellSetOperation(*newmat,MATOP_DESTROY,(void(*)(void))MatDestroy_ML);CHKERRQ(ierr);
359 
360   shellctx->A         = *newmat;
361   shellctx->mlmat     = mlmat;
362   shellctx->work      = NULL;
363 
364   ierr = VecCreate(MLcomm->USR_comm,&shellctx->y);CHKERRQ(ierr);
365   ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr);
366   ierr = VecSetType(shellctx->y,VECSTANDARD);CHKERRQ(ierr);
367   PetscFunctionReturn(0);
368 }
369 
370 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
371 {
372   PetscInt       *aj;
373   PetscScalar    *aa;
374   PetscErrorCode ierr;
375   PetscInt       i,j,*gordering;
376   PetscInt       m=mlmat->outvec_leng,n,nz_max,row;
377   Mat            A;
378 
379   PetscFunctionBegin;
380   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
381   n = mlmat->invec_leng;
382   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
383 
384   /* create global row numbering for a ML_Operator */
385   PetscStackCall("ML_build_global_numbering",ML_build_global_numbering(mlmat,&gordering,"rows"));
386 
387   nz_max = PetscMax(1,mlmat->max_nz_per_row) + 1;
388   ierr = PetscMalloc2(nz_max,&aa,nz_max,&aj);CHKERRQ(ierr);
389   if (reuse) {
390     A = *newmat;
391   } else {
392     PetscInt *nnzA,*nnzB,*nnz;
393     PetscInt rstart;
394     ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr);
395     ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
396     ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
397     /* keep track of block size for A matrices */
398     ierr = MatSetBlockSize (A,mlmat->num_PDEs);CHKERRQ(ierr);
399     ierr = PetscMalloc3(m,&nnzA,m,&nnzB,m,&nnz);CHKERRQ(ierr);
400     ierr = MPI_Scan(&m,&rstart,1,MPIU_INT,MPI_SUM,mlmat->comm->USR_comm);CHKERRQ(ierr);
401     rstart -= m;
402 
403     for (i=0; i<m; i++) {
404       row = gordering[i] - rstart;
405       PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]));
406       nnzA[row] = 0;
407       for (j=0; j<nnz[i]; j++) {
408         if (aj[j] < m) nnzA[row]++;
409       }
410       nnzB[row] = nnz[i] - nnzA[row];
411     }
412     ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
413     ierr = PetscFree3(nnzA,nnzB,nnz);
414   }
415   for (i=0; i<m; i++) {
416     PetscInt ncols;
417     row = gordering[i];
418 
419     PetscStackCall(",ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols));
420     for (j = 0; j < ncols; j++) aj[j] = gordering[aj[j]];
421     ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
422   }
423   PetscStackCall("ML_free",ML_free(gordering));
424   ierr    = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
425   ierr    = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
426   *newmat = A;
427 
428   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
429   PetscFunctionReturn(0);
430 }
431 
432 /* -------------------------------------------------------------------------- */
433 /*
434    PCSetCoordinates_ML
435 
436    Input Parameter:
437    .  pc - the preconditioner context
438 */
439 static PetscErrorCode PCSetCoordinates_ML(PC pc, PetscInt ndm, PetscInt a_nloc, PetscReal *coords)
440 {
441   PC_MG          *mg    = (PC_MG*)pc->data;
442   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
443   PetscErrorCode ierr;
444   PetscInt       arrsz,oldarrsz,bs,my0,kk,ii,nloc,Iend,aloc;
445   Mat            Amat = pc->pmat;
446 
447   /* this function copied and modified from PCSetCoordinates_GEO -TGI */
448   PetscFunctionBegin;
449   PetscValidHeaderSpecific(Amat, MAT_CLASSID, 1);
450   ierr = MatGetBlockSize(Amat, &bs);CHKERRQ(ierr);
451 
452   ierr = MatGetOwnershipRange(Amat, &my0, &Iend);CHKERRQ(ierr);
453   aloc = (Iend-my0);
454   nloc = (Iend-my0)/bs;
455 
456   if (nloc!=a_nloc && aloc != a_nloc) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Number of local blocks %D must be %D or %D.",a_nloc,nloc,aloc);
457 
458   oldarrsz    = pc_ml->dim * pc_ml->nloc;
459   pc_ml->dim  = ndm;
460   pc_ml->nloc = nloc;
461   arrsz       = ndm * nloc;
462 
463   /* create data - syntactic sugar that should be refactored at some point */
464   if (pc_ml->coords==0 || (oldarrsz != arrsz)) {
465     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
466     ierr = PetscMalloc1(arrsz, &pc_ml->coords);CHKERRQ(ierr);
467   }
468   for (kk=0; kk<arrsz; kk++) pc_ml->coords[kk] = -999.;
469   /* copy data in - column oriented */
470   if (nloc == a_nloc) {
471     for (kk = 0; kk < nloc; kk++) {
472       for (ii = 0; ii < ndm; ii++) {
473         pc_ml->coords[ii*nloc + kk] =  coords[kk*ndm + ii];
474       }
475     }
476   } else { /* assumes the coordinates are blocked */
477     for (kk = 0; kk < nloc; kk++) {
478       for (ii = 0; ii < ndm; ii++) {
479         pc_ml->coords[ii*nloc + kk] =  coords[bs*kk*ndm + ii];
480       }
481     }
482   }
483   PetscFunctionReturn(0);
484 }
485 
486 /* -----------------------------------------------------------------------------*/
487 extern PetscErrorCode PCReset_MG(PC);
488 PetscErrorCode PCReset_ML(PC pc)
489 {
490   PetscErrorCode ierr;
491   PC_MG          *mg    = (PC_MG*)pc->data;
492   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
493   PetscInt       level,fine_level=pc_ml->Nlevels-1,dim=pc_ml->dim;
494 
495   PetscFunctionBegin;
496   if (dim) {
497     for (level=0; level<=fine_level; level++) {
498       ierr = VecDestroy(&pc_ml->gridctx[level].coords);CHKERRQ(ierr);
499     }
500     if (pc_ml->ml_object && pc_ml->ml_object->Grid) {
501       ML_Aggregate_Viz_Stats * grid_info = (ML_Aggregate_Viz_Stats*) pc_ml->ml_object->Grid[0].Grid;
502       grid_info->x = 0; /* do this so ML doesn't try to free coordinates */
503       grid_info->y = 0;
504       grid_info->z = 0;
505       PetscStackCall("ML_Operator_Getrow",ML_Aggregate_VizAndStats_Clean(pc_ml->ml_object));
506     }
507   }
508   PetscStackCall("ML_Aggregate_Destroy",ML_Aggregate_Destroy(&pc_ml->agg_object));
509   PetscStackCall("ML_Aggregate_Destroy",ML_Destroy(&pc_ml->ml_object));
510 
511   if (pc_ml->PetscMLdata) {
512     ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
513     ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);
514     ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr);
515     ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr);
516   }
517   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
518 
519   if (pc_ml->gridctx) {
520     for (level=0; level<fine_level; level++) {
521       if (pc_ml->gridctx[level].A) {ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);}
522       if (pc_ml->gridctx[level].P) {ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);}
523       if (pc_ml->gridctx[level].R) {ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);}
524       if (pc_ml->gridctx[level].x) {ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);}
525       if (pc_ml->gridctx[level].b) {ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);}
526       if (pc_ml->gridctx[level+1].r) {ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
527     }
528   }
529   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
530   ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
531 
532   pc_ml->dim  = 0;
533   pc_ml->nloc = 0;
534   ierr = PCReset_MG(pc);CHKERRQ(ierr);
535   PetscFunctionReturn(0);
536 }
537 /* -------------------------------------------------------------------------- */
538 /*
539    PCSetUp_ML - Prepares for the use of the ML preconditioner
540                     by setting data structures and options.
541 
542    Input Parameter:
543 .  pc - the preconditioner context
544 
545    Application Interface Routine: PCSetUp()
546 
547    Notes:
548    The interface routine PCSetUp() is not usually called directly by
549    the user, but instead is called by PCApply() if necessary.
550 */
551 extern PetscErrorCode PCSetFromOptions_MG(PetscOptionItems *PetscOptionsObject,PC);
552 extern PetscErrorCode PCReset_MG(PC);
553 
554 PetscErrorCode PCSetUp_ML(PC pc)
555 {
556   PetscErrorCode   ierr;
557   PetscMPIInt      size;
558   FineGridCtx      *PetscMLdata;
559   ML               *ml_object;
560   ML_Aggregate     *agg_object;
561   ML_Operator      *mlmat;
562   PetscInt         nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
563   Mat              A,Aloc;
564   GridCtx          *gridctx;
565   PC_MG            *mg    = (PC_MG*)pc->data;
566   PC_ML            *pc_ml = (PC_ML*)mg->innerctx;
567   PetscBool        isSeq, isMPI;
568   KSP              smoother;
569   PC               subpc;
570   PetscInt         mesh_level, old_mesh_level;
571   MatInfo          info;
572   static PetscBool cite = PETSC_FALSE;
573 
574   PetscFunctionBegin;
575   ierr = PetscCitationsRegister("@TechReport{ml_users_guide,\n  author = {M. Sala and J.J. Hu and R.S. Tuminaro},\n  title = {{ML}3.1 {S}moothed {A}ggregation {U}ser's {G}uide},\n  institution =  {Sandia National Laboratories},\n  number = {SAND2004-4821},\n  year = 2004\n}\n",&cite);CHKERRQ(ierr);
576   A    = pc->pmat;
577   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
578 
579   if (pc->setupcalled) {
580     if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
581       /*
582        Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
583        multiple solves in which the matrix is not changing too quickly.
584        */
585       ml_object             = pc_ml->ml_object;
586       gridctx               = pc_ml->gridctx;
587       Nlevels               = pc_ml->Nlevels;
588       fine_level            = Nlevels - 1;
589       gridctx[fine_level].A = A;
590 
591       ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
592       ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
593       if (isMPI) {
594         ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
595       } else if (isSeq) {
596         Aloc = A;
597         ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
598       } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
599 
600       ierr              = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
601       PetscMLdata       = pc_ml->PetscMLdata;
602       ierr              = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr);
603       PetscMLdata->A    = A;
604       PetscMLdata->Aloc = Aloc;
605       PetscStackCall("ML_Aggregate_Destroy",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata));
606       PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec));
607 
608       mesh_level = ml_object->ML_finest_level;
609       while (ml_object->SingleLevel[mesh_level].Rmat->to) {
610         old_mesh_level = mesh_level;
611         mesh_level     = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;
612 
613         /* clean and regenerate A */
614         mlmat = &(ml_object->Amat[mesh_level]);
615         PetscStackCall("ML_Operator_Clean",ML_Operator_Clean(mlmat));
616         PetscStackCall("ML_Operator_Init",ML_Operator_Init(mlmat,ml_object->comm));
617         PetscStackCall("ML_Gen_AmatrixRAP",ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level));
618       }
619 
620       level = fine_level - 1;
621       if (size == 1) { /* convert ML P, R and A into seqaij format */
622         for (mllevel=1; mllevel<Nlevels; mllevel++) {
623           mlmat = &(ml_object->Amat[mllevel]);
624           ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
625           level--;
626         }
627       } else { /* convert ML P and R into shell format, ML A into mpiaij format */
628         for (mllevel=1; mllevel<Nlevels; mllevel++) {
629           mlmat  = &(ml_object->Amat[mllevel]);
630           ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
631           level--;
632         }
633       }
634 
635       for (level=0; level<fine_level; level++) {
636         if (level > 0) {
637           ierr = PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);CHKERRQ(ierr);
638         }
639         ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A);CHKERRQ(ierr);
640       }
641       ierr = PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);CHKERRQ(ierr);
642       ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A);CHKERRQ(ierr);
643 
644       ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
645       PetscFunctionReturn(0);
646     } else {
647       /* since ML can change the size of vectors/matrices at any level we must destroy everything */
648       ierr = PCReset_ML(pc);CHKERRQ(ierr);
649     }
650   }
651 
652   /* setup special features of PCML */
653   /*--------------------------------*/
654   /* covert A to Aloc to be used by ML at fine grid */
655   pc_ml->size = size;
656   ierr        = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
657   ierr        = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
658   if (isMPI) {
659     ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
660   } else if (isSeq) {
661     Aloc = A;
662     ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
663   } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
664 
665   /* create and initialize struct 'PetscMLdata' */
666   ierr               = PetscNewLog(pc,&PetscMLdata);CHKERRQ(ierr);
667   pc_ml->PetscMLdata = PetscMLdata;
668   ierr               = PetscMalloc1(Aloc->cmap->n+1,&PetscMLdata->pwork);CHKERRQ(ierr);
669 
670   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);
671   ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr);
672   ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr);
673 
674   ierr              = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr);
675   ierr              = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
676   ierr              = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
677   PetscMLdata->A    = A;
678   PetscMLdata->Aloc = Aloc;
679   if (pc_ml->dim) { /* create vecs around the coordinate data given */
680     PetscInt  i,j,dim=pc_ml->dim;
681     PetscInt  nloc = pc_ml->nloc,nlocghost;
682     PetscReal *ghostedcoords;
683 
684     ierr      = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
685     nlocghost = Aloc->cmap->n / bs;
686     ierr      = PetscMalloc1(dim*nlocghost,&ghostedcoords);CHKERRQ(ierr);
687     for (i = 0; i < dim; i++) {
688       /* copy coordinate values into first component of pwork */
689       for (j = 0; j < nloc; j++) {
690         PetscMLdata->pwork[bs * j] = pc_ml->coords[nloc * i + j];
691       }
692       /* get the ghost values */
693       ierr = PetscML_comm(PetscMLdata->pwork,PetscMLdata);CHKERRQ(ierr);
694       /* write into the vector */
695       for (j = 0; j < nlocghost; j++) {
696         ghostedcoords[i * nlocghost + j] = PetscMLdata->pwork[bs * j];
697       }
698     }
699     /* replace the original coords with the ghosted coords, because these are
700      * what ML needs */
701     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
702     pc_ml->coords = ghostedcoords;
703   }
704 
705   /* create ML discretization matrix at fine grid */
706   /* ML requires input of fine-grid matrix. It determines nlevels. */
707   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
708   ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
709   PetscStackCall("ML_Create",ML_Create(&ml_object,pc_ml->MaxNlevels));
710   PetscStackCall("ML_Comm_Set_UsrComm",ML_Comm_Set_UsrComm(ml_object->comm,PetscObjectComm((PetscObject)A)));
711   pc_ml->ml_object = ml_object;
712   PetscStackCall("ML_Init_Amatrix",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata));
713   PetscStackCall("ML_Set_Amatrix_Getrow",ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols));
714   PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec));
715 
716   PetscStackCall("ML_Set_Symmetrize",ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO));
717 
718   /* aggregation */
719   PetscStackCall("ML_Aggregate_Create",ML_Aggregate_Create(&agg_object));
720   pc_ml->agg_object = agg_object;
721 
722   {
723     MatNullSpace mnull;
724     ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr);
725     if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) {
726       if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER;
727       else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK;
728       else pc_ml->nulltype = PCML_NULLSPACE_SCALAR;
729     }
730     switch (pc_ml->nulltype) {
731     case PCML_NULLSPACE_USER: {
732       PetscScalar       *nullvec;
733       const PetscScalar *v;
734       PetscBool         has_const;
735       PetscInt          i,j,mlocal,nvec,M;
736       const Vec         *vecs;
737 
738       if (!mnull) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space");
739       ierr = MatGetSize(A,&M,NULL);CHKERRQ(ierr);
740       ierr = MatGetLocalSize(Aloc,&mlocal,NULL);CHKERRQ(ierr);
741       ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr);
742       ierr = PetscMalloc1((nvec+!!has_const)*mlocal,&nullvec);CHKERRQ(ierr);
743       if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M;
744       for (i=0; i<nvec; i++) {
745         ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr);
746         for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j];
747         ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr);
748       }
749       PetscStackCall("ML_Aggregate_Create",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr));
750       ierr = PetscFree(nullvec);CHKERRQ(ierr);
751     } break;
752     case PCML_NULLSPACE_BLOCK:
753       PetscStackCall("ML_Aggregate_Set_NullSpace",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr));
754       break;
755     case PCML_NULLSPACE_SCALAR:
756       break;
757     default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP,"Unknown null space type");
758     }
759   }
760   PetscStackCall("ML_Aggregate_Set_MaxCoarseSize",ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize));
761   /* set options */
762   switch (pc_ml->CoarsenScheme) {
763   case 1:
764     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_Coupled",ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object));break;
765   case 2:
766     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_MIS",ML_Aggregate_Set_CoarsenScheme_MIS(agg_object));break;
767   case 3:
768     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_METIS",ML_Aggregate_Set_CoarsenScheme_METIS(agg_object));break;
769   }
770   PetscStackCall("ML_Aggregate_Set_Threshold",ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold));
771   PetscStackCall("ML_Aggregate_Set_DampingFactor",ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor));
772   if (pc_ml->SpectralNormScheme_Anorm) {
773     PetscStackCall("ML_Set_SpectralNormScheme_Anorm",ML_Set_SpectralNormScheme_Anorm(ml_object));
774   }
775   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
776   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
777   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
778   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
779   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
780   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;
781 
782   if (pc_ml->Aux) {
783     if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Auxiliary matrix requires coordinates");
784     ml_object->Amat[0].aux_data->threshold = pc_ml->AuxThreshold;
785     ml_object->Amat[0].aux_data->enable    = 1;
786     ml_object->Amat[0].aux_data->max_level = 10;
787     ml_object->Amat[0].num_PDEs            = bs;
788   }
789 
790   ierr = MatGetInfo(A,MAT_LOCAL,&info);CHKERRQ(ierr);
791   ml_object->Amat[0].N_nonzeros = (int) info.nz_used;
792 
793   if (pc_ml->dim) {
794     PetscInt               i,dim = pc_ml->dim;
795     ML_Aggregate_Viz_Stats *grid_info;
796     PetscInt               nlocghost;
797 
798     ierr      = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
799     nlocghost = Aloc->cmap->n / bs;
800 
801     PetscStackCall("ML_Aggregate_VizAndStats_Setup(",ML_Aggregate_VizAndStats_Setup(ml_object)); /* create ml info for coords */
802     grid_info = (ML_Aggregate_Viz_Stats*) ml_object->Grid[0].Grid;
803     for (i = 0; i < dim; i++) {
804       /* set the finest level coordinates to point to the column-order array
805        * in pc_ml */
806       /* NOTE: must point away before VizAndStats_Clean so ML doesn't free */
807       switch (i) {
808       case 0: grid_info->x = pc_ml->coords + nlocghost * i; break;
809       case 1: grid_info->y = pc_ml->coords + nlocghost * i; break;
810       case 2: grid_info->z = pc_ml->coords + nlocghost * i; break;
811       default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
812       }
813     }
814     grid_info->Ndim = dim;
815   }
816 
817   /* repartitioning */
818   if (pc_ml->Repartition) {
819     PetscStackCall("ML_Repartition_Activate",ML_Repartition_Activate(ml_object));
820     PetscStackCall("ML_Repartition_Set_LargestMinMaxRatio",ML_Repartition_Set_LargestMinMaxRatio(ml_object,pc_ml->MaxMinRatio));
821     PetscStackCall("ML_Repartition_Set_MinPerProc",ML_Repartition_Set_MinPerProc(ml_object,pc_ml->MinPerProc));
822     PetscStackCall("ML_Repartition_Set_PutOnSingleProc",ML_Repartition_Set_PutOnSingleProc(ml_object,pc_ml->PutOnSingleProc));
823 #if 0                           /* Function not yet defined in ml-6.2 */
824     /* I'm not sure what compatibility issues might crop up if we partitioned
825      * on the finest level, so to be safe repartition starts on the next
826      * finest level (reflection default behavior in
827      * ml_MultiLevelPreconditioner) */
828     PetscStackCall("ML_Repartition_Set_StartLevel",ML_Repartition_Set_StartLevel(ml_object,1));
829 #endif
830 
831     if (!pc_ml->RepartitionType) {
832       PetscInt i;
833 
834       if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"ML Zoltan repartitioning requires coordinates");
835       PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEZOLTAN));
836       PetscStackCall("ML_Aggregate_Set_Dimensions",ML_Aggregate_Set_Dimensions(agg_object, pc_ml->dim));
837 
838       for (i = 0; i < ml_object->ML_num_levels; i++) {
839         ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Grid[i].Grid;
840         grid_info->zoltan_type = pc_ml->ZoltanScheme + 1; /* ml numbers options 1, 2, 3 */
841         /* defaults from ml_agg_info.c */
842         grid_info->zoltan_estimated_its = 40; /* only relevant to hypergraph / fast hypergraph */
843         grid_info->zoltan_timers        = 0;
844         grid_info->smoothing_steps      = 4;  /* only relevant to hypergraph / fast hypergraph */
845       }
846     } else {
847       PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEPARMETIS));
848     }
849   }
850 
851   if (pc_ml->OldHierarchy) {
852     PetscStackCall("ML_Gen_MGHierarchy_UsingAggregation",Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object));
853   } else {
854     PetscStackCall("ML_Gen_MultiLevelHierarchy_UsingAggregation",Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object));
855   }
856   if (Nlevels<=0) SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
857   pc_ml->Nlevels = Nlevels;
858   fine_level     = Nlevels - 1;
859 
860   ierr = PCMGSetLevels(pc,Nlevels,NULL);CHKERRQ(ierr);
861   /* set default smoothers */
862   for (level=1; level<=fine_level; level++) {
863     ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
864     ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
865     ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
866     ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
867   }
868   ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr);
869   ierr = PCSetFromOptions_MG(PetscOptionsObject,pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
870   ierr = PetscOptionsEnd();CHKERRQ(ierr);
871 
872   ierr = PetscMalloc1(Nlevels,&gridctx);CHKERRQ(ierr);
873 
874   pc_ml->gridctx = gridctx;
875 
876   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
877      Level 0 is the finest grid for ML, but coarsest for PETSc! */
878   gridctx[fine_level].A = A;
879 
880   level = fine_level - 1;
881   if (size == 1) { /* convert ML P, R and A into seqaij format */
882     for (mllevel=1; mllevel<Nlevels; mllevel++) {
883       mlmat = &(ml_object->Pmat[mllevel]);
884       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
885       mlmat = &(ml_object->Rmat[mllevel-1]);
886       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
887 
888       mlmat = &(ml_object->Amat[mllevel]);
889       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
890       level--;
891     }
892   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
893     for (mllevel=1; mllevel<Nlevels; mllevel++) {
894       mlmat  = &(ml_object->Pmat[mllevel]);
895       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
896       mlmat  = &(ml_object->Rmat[mllevel-1]);
897       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
898 
899       mlmat  = &(ml_object->Amat[mllevel]);
900       ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
901       level--;
902     }
903   }
904 
905   /* create vectors and ksp at all levels */
906   for (level=0; level<fine_level; level++) {
907     level1 = level + 1;
908     ierr   = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr);
909     ierr   = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr);
910     ierr   = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr);
911     ierr   = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
912 
913     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr);
914     ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
915     ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr);
916     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
917 
918     ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr);
919     ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
920     ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr);
921     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
922 
923     if (level == 0) {
924       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
925     } else {
926       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
927     }
928   }
929   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
930 
931   /* create coarse level and the interpolation between the levels */
932   for (level=0; level<fine_level; level++) {
933     level1 = level + 1;
934     ierr   = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
935     ierr   = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
936     if (level > 0) {
937       ierr = PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);CHKERRQ(ierr);
938     }
939     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A);CHKERRQ(ierr);
940   }
941   ierr = PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);CHKERRQ(ierr);
942   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A);CHKERRQ(ierr);
943 
944   /* put coordinate info in levels */
945   if (pc_ml->dim) {
946     PetscInt  i,j,dim = pc_ml->dim;
947     PetscInt  bs, nloc;
948     PC        subpc;
949     PetscReal *array;
950 
951     level = fine_level;
952     for (mllevel = 0; mllevel < Nlevels; mllevel++) {
953       ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Amat[mllevel].to->Grid->Grid;
954       MPI_Comm               comm       = ((PetscObject)gridctx[level].A)->comm;
955 
956       ierr  = MatGetBlockSize (gridctx[level].A, &bs);CHKERRQ(ierr);
957       ierr  = MatGetLocalSize (gridctx[level].A, NULL, &nloc);CHKERRQ(ierr);
958       nloc /= bs; /* number of local nodes */
959 
960       ierr = VecCreate(comm,&gridctx[level].coords);CHKERRQ(ierr);
961       ierr = VecSetSizes(gridctx[level].coords,dim * nloc,PETSC_DECIDE);CHKERRQ(ierr);
962       ierr = VecSetType(gridctx[level].coords,VECMPI);CHKERRQ(ierr);
963       ierr = VecGetArray(gridctx[level].coords,&array);CHKERRQ(ierr);
964       for (j = 0; j < nloc; j++) {
965         for (i = 0; i < dim; i++) {
966           switch (i) {
967           case 0: array[dim * j + i] = grid_info->x[j]; break;
968           case 1: array[dim * j + i] = grid_info->y[j]; break;
969           case 2: array[dim * j + i] = grid_info->z[j]; break;
970           default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
971           }
972         }
973       }
974 
975       /* passing coordinates to smoothers/coarse solver, should they need them */
976       ierr = KSPGetPC(gridctx[level].ksp,&subpc);CHKERRQ(ierr);
977       ierr = PCSetCoordinates(subpc,dim,nloc,array);CHKERRQ(ierr);
978       ierr = VecRestoreArray(gridctx[level].coords,&array);CHKERRQ(ierr);
979       level--;
980     }
981   }
982 
983   /* setupcalled is set to 0 so that MG is setup from scratch */
984   pc->setupcalled = 0;
985   ierr            = PCSetUp_MG(pc);CHKERRQ(ierr);
986   PetscFunctionReturn(0);
987 }
988 
989 /* -------------------------------------------------------------------------- */
990 /*
991    PCDestroy_ML - Destroys the private context for the ML preconditioner
992    that was created with PCCreate_ML().
993 
994    Input Parameter:
995 .  pc - the preconditioner context
996 
997    Application Interface Routine: PCDestroy()
998 */
999 PetscErrorCode PCDestroy_ML(PC pc)
1000 {
1001   PetscErrorCode ierr;
1002   PC_MG          *mg   = (PC_MG*)pc->data;
1003   PC_ML          *pc_ml= (PC_ML*)mg->innerctx;
1004 
1005   PetscFunctionBegin;
1006   ierr = PCReset_ML(pc);CHKERRQ(ierr);
1007   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
1008   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
1009   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",NULL);CHKERRQ(ierr);
1010   PetscFunctionReturn(0);
1011 }
1012 
1013 PetscErrorCode PCSetFromOptions_ML(PetscOptionItems *PetscOptionsObject,PC pc)
1014 {
1015   PetscErrorCode ierr;
1016   PetscInt       indx,PrintLevel,partindx;
1017   const char     *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
1018   const char     *part[]   = {"Zoltan","ParMETIS"};
1019 #if defined(HAVE_ML_ZOLTAN)
1020   const char *zscheme[] = {"RCB","hypergraph","fast_hypergraph"};
1021 #endif
1022   PC_MG       *mg    = (PC_MG*)pc->data;
1023   PC_ML       *pc_ml = (PC_ML*)mg->innerctx;
1024   PetscMPIInt size;
1025   MPI_Comm    comm;
1026 
1027   PetscFunctionBegin;
1028   ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr);
1029   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1030   ierr = PetscOptionsHead(PetscOptionsObject,"ML options");CHKERRQ(ierr);
1031 
1032   PrintLevel = 0;
1033   indx       = 0;
1034   partindx   = 0;
1035 
1036   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,NULL);CHKERRQ(ierr);
1037   PetscStackCall("ML_Set_PrintLevel",ML_Set_PrintLevel(PrintLevel));
1038   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,NULL);CHKERRQ(ierr);
1039   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,NULL);CHKERRQ(ierr);
1040   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,NULL);CHKERRQ(ierr);
1041 
1042   pc_ml->CoarsenScheme = indx;
1043 
1044   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,NULL);CHKERRQ(ierr);
1045   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,NULL);CHKERRQ(ierr);
1046   ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,NULL);CHKERRQ(ierr);
1047   ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,NULL);CHKERRQ(ierr);
1048   ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,NULL);CHKERRQ(ierr);
1049   ierr = PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,NULL);CHKERRQ(ierr);
1050   ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,NULL);CHKERRQ(ierr);
1051   ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,NULL);CHKERRQ(ierr);
1052   /*
1053     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
1054     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.
1055 
1056     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
1057     combination of options and ML's exit(1) explanations don't help matters.
1058   */
1059   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
1060   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
1061   if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2\n");CHKERRQ(ierr);}
1062   if (pc_ml->EnergyMinimization) {
1063     ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,NULL);CHKERRQ(ierr);
1064   }
1065   if (pc_ml->EnergyMinimization == 2) {
1066     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
1067     ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,NULL);CHKERRQ(ierr);
1068   }
1069   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
1070   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
1071   ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,NULL);CHKERRQ(ierr);
1072   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
1073   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
1074   ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,NULL);CHKERRQ(ierr);
1075   /*
1076     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
1077     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
1078     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
1079     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
1080     this context, but ML doesn't provide a way to find out which ones.
1081    */
1082   ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,NULL);CHKERRQ(ierr);
1083   ierr = PetscOptionsBool("-pc_ml_repartition", "Allow ML to repartition levels of the heirarchy","ML_Repartition_Activate",pc_ml->Repartition,&pc_ml->Repartition,NULL);CHKERRQ(ierr);
1084   if (pc_ml->Repartition) {
1085     ierr = PetscOptionsReal("-pc_ml_repartitionMaxMinRatio", "Acceptable ratio of repartitioned sizes","ML_Repartition_Set_LargestMinMaxRatio",pc_ml->MaxMinRatio,&pc_ml->MaxMinRatio,NULL);CHKERRQ(ierr);
1086     ierr = PetscOptionsInt("-pc_ml_repartitionMinPerProc", "Smallest repartitioned size","ML_Repartition_Set_MinPerProc",pc_ml->MinPerProc,&pc_ml->MinPerProc,NULL);CHKERRQ(ierr);
1087     ierr = PetscOptionsInt("-pc_ml_repartitionPutOnSingleProc", "Problem size automatically repartitioned to one processor","ML_Repartition_Set_PutOnSingleProc",pc_ml->PutOnSingleProc,&pc_ml->PutOnSingleProc,NULL);CHKERRQ(ierr);
1088 #if defined(HAVE_ML_ZOLTAN)
1089     partindx = 0;
1090     ierr     = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[0],&partindx,NULL);CHKERRQ(ierr);
1091 
1092     pc_ml->RepartitionType = partindx;
1093     if (!partindx) {
1094       PetscInt zindx = 0;
1095 
1096       ierr = PetscOptionsEList("-pc_ml_repartitionZoltanScheme", "Repartitioning scheme to use","None",zscheme,3,zscheme[0],&zindx,NULL);CHKERRQ(ierr);
1097 
1098       pc_ml->ZoltanScheme = zindx;
1099     }
1100 #else
1101     partindx = 1;
1102     ierr     = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[1],&partindx,NULL);CHKERRQ(ierr);
1103     pc_ml->RepartitionType = partindx;
1104     if (!partindx) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP_SYS,"ML not compiled with Zoltan");
1105 #endif
1106     ierr = PetscOptionsBool("-pc_ml_Aux","Aggregate using auxiliary coordinate-based laplacian","None",pc_ml->Aux,&pc_ml->Aux,NULL);CHKERRQ(ierr);
1107     ierr = PetscOptionsReal("-pc_ml_AuxThreshold","Auxiliary smoother drop tol","None",pc_ml->AuxThreshold,&pc_ml->AuxThreshold,NULL);CHKERRQ(ierr);
1108   }
1109   ierr = PetscOptionsTail();CHKERRQ(ierr);
1110   PetscFunctionReturn(0);
1111 }
1112 
1113 /* -------------------------------------------------------------------------- */
1114 /*
1115    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
1116    and sets this as the private data within the generic preconditioning
1117    context, PC, that was created within PCCreate().
1118 
1119    Input Parameter:
1120 .  pc - the preconditioner context
1121 
1122    Application Interface Routine: PCCreate()
1123 */
1124 
1125 /*MC
1126      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
1127        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
1128        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
1129        and the restriction/interpolation operators wrapped as PETSc shell matrices.
1130 
1131    Options Database Key:
1132    Multigrid options(inherited):
1133 +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
1134 .  -pc_mg_distinct_smoothup: Should one configure the up and down smoothers separately (PCMGSetDistinctSmoothUp)
1135 -  -pc_mg_type <multiplicative>: (one of) additive multiplicative full kascade
1136    ML options:
1137 +  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
1138 .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
1139 .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
1140 .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
1141 .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
1142 .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
1143 .  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
1144 .  -pc_ml_repartition <false>: Allow ML to repartition levels of the heirarchy (ML_Repartition_Activate)
1145 .  -pc_ml_repartitionMaxMinRatio <1.3>: Acceptable ratio of repartitioned sizes (ML_Repartition_Set_LargestMinMaxRatio)
1146 .  -pc_ml_repartitionMinPerProc <512>: Smallest repartitioned size (ML_Repartition_Set_MinPerProc)
1147 .  -pc_ml_repartitionPutOnSingleProc <5000>: Problem size automatically repartitioned to one processor (ML_Repartition_Set_PutOnSingleProc)
1148 .  -pc_ml_repartitionType <Zoltan>: Repartitioning library to use (ML_Repartition_Set_Partitioner)
1149 .  -pc_ml_repartitionZoltanScheme <RCB>: Repartitioning scheme to use (None)
1150 .  -pc_ml_Aux <false>: Aggregate using auxiliary coordinate-based laplacian (None)
1151 -  -pc_ml_AuxThreshold <0.0>: Auxiliary smoother drop tol (None)
1152 
1153    Level: intermediate
1154 
1155   Concepts: multigrid
1156 
1157 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
1158            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetDistinctSmoothUp(),
1159            PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
1160            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
1161            PCMGSetCycleTypeOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
1162 M*/
1163 
1164 PETSC_EXTERN PetscErrorCode PCCreate_ML(PC pc)
1165 {
1166   PetscErrorCode ierr;
1167   PC_ML          *pc_ml;
1168   PC_MG          *mg;
1169 
1170   PetscFunctionBegin;
1171   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
1172   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
1173   ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr);
1174   /* Since PCMG tries to use DM assocated with PC must delete it */
1175   ierr = DMDestroy(&pc->dm);CHKERRQ(ierr);
1176   ierr = PCMGSetGalerkin(pc,PC_MG_GALERKIN_EXTERNAL);CHKERRQ(ierr);
1177   mg   = (PC_MG*)pc->data;
1178 
1179   /* create a supporting struct and attach it to pc */
1180   ierr         = PetscNewLog(pc,&pc_ml);CHKERRQ(ierr);
1181   mg->innerctx = pc_ml;
1182 
1183   pc_ml->ml_object                = 0;
1184   pc_ml->agg_object               = 0;
1185   pc_ml->gridctx                  = 0;
1186   pc_ml->PetscMLdata              = 0;
1187   pc_ml->Nlevels                  = -1;
1188   pc_ml->MaxNlevels               = 10;
1189   pc_ml->MaxCoarseSize            = 1;
1190   pc_ml->CoarsenScheme            = 1;
1191   pc_ml->Threshold                = 0.0;
1192   pc_ml->DampingFactor            = 4.0/3.0;
1193   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
1194   pc_ml->size                     = 0;
1195   pc_ml->dim                      = 0;
1196   pc_ml->nloc                     = 0;
1197   pc_ml->coords                   = 0;
1198   pc_ml->Repartition              = PETSC_FALSE;
1199   pc_ml->MaxMinRatio              = 1.3;
1200   pc_ml->MinPerProc               = 512;
1201   pc_ml->PutOnSingleProc          = 5000;
1202   pc_ml->RepartitionType          = 0;
1203   pc_ml->ZoltanScheme             = 0;
1204   pc_ml->Aux                      = PETSC_FALSE;
1205   pc_ml->AuxThreshold             = 0.0;
1206 
1207   /* allow for coordinates to be passed */
1208   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",PCSetCoordinates_ML);CHKERRQ(ierr);
1209 
1210   /* overwrite the pointers of PCMG by the functions of PCML */
1211   pc->ops->setfromoptions = PCSetFromOptions_ML;
1212   pc->ops->setup          = PCSetUp_ML;
1213   pc->ops->reset          = PCReset_ML;
1214   pc->ops->destroy        = PCDestroy_ML;
1215   PetscFunctionReturn(0);
1216 }
1217