xref: /petsc/src/ksp/pc/impls/ml/ml.c (revision 55e7fe800d976e85ed2b5cd8bfdef564daa37bd9)
1 
2 /*
3    Provides an interface to the ML smoothed Aggregation
4    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
5                                     Jed Brown, see [PETSC #18321, #18449].
6 */
7 #include <petsc/private/pcimpl.h>   /*I "petscpc.h" I*/
8 #include <petsc/private/pcmgimpl.h>                    /*I "petscksp.h" I*/
9 #include <../src/mat/impls/aij/seq/aij.h>
10 #include <../src/mat/impls/aij/mpi/mpiaij.h>
11 #include <petscdm.h>            /* for DMDestroy(&pc->mg) hack */
12 
13 EXTERN_C_BEGIN
14 /* HAVE_CONFIG_H flag is required by ML include files */
15 #if !defined(HAVE_CONFIG_H)
16 #define HAVE_CONFIG_H
17 #endif
18 #include <ml_include.h>
19 #include <ml_viz_stats.h>
20 EXTERN_C_END
21 
22 typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType;
23 static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0};
24 
25 /* The context (data structure) at each grid level */
26 typedef struct {
27   Vec x,b,r;                  /* global vectors */
28   Mat A,P,R;
29   KSP ksp;
30   Vec coords;                 /* projected by ML, if PCSetCoordinates is called; values packed by node */
31 } GridCtx;
32 
33 /* The context used to input PETSc matrix into ML at fine grid */
34 typedef struct {
35   Mat         A;       /* Petsc matrix in aij format */
36   Mat         Aloc;    /* local portion of A to be used by ML */
37   Vec         x,y;
38   ML_Operator *mlmat;
39   PetscScalar *pwork;  /* tmp array used by PetscML_comm() */
40 } FineGridCtx;
41 
42 /* The context associates a ML matrix with a PETSc shell matrix */
43 typedef struct {
44   Mat         A;        /* PETSc shell matrix associated with mlmat */
45   ML_Operator *mlmat;   /* ML matrix assorciated with A */
46   Vec         y, work;
47 } Mat_MLShell;
48 
49 /* Private context for the ML preconditioner */
50 typedef struct {
51   ML                *ml_object;
52   ML_Aggregate      *agg_object;
53   GridCtx           *gridctx;
54   FineGridCtx       *PetscMLdata;
55   PetscInt          Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization,MinPerProc,PutOnSingleProc,RepartitionType,ZoltanScheme;
56   PetscReal         Threshold,DampingFactor,EnergyMinimizationDropTol,MaxMinRatio,AuxThreshold;
57   PetscBool         SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable,Repartition,Aux;
58   PetscBool         reuse_interpolation;
59   PCMLNullSpaceType nulltype;
60   PetscMPIInt       size; /* size of communicator for pc->pmat */
61   PetscInt          dim;  /* data from PCSetCoordinates(_ML) */
62   PetscInt          nloc;
63   PetscReal         *coords; /* ML has a grid object for each level: the finest grid will point into coords */
64 } PC_ML;
65 
66 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
67 {
68   PetscErrorCode ierr;
69   PetscInt       m,i,j,k=0,row,*aj;
70   PetscScalar    *aa;
71   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
72   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;
73 
74   ierr = MatGetSize(ml->Aloc,&m,NULL); if (ierr) return(0);
75   for (i = 0; i<N_requested_rows; i++) {
76     row            = requested_rows[i];
77     row_lengths[i] = a->ilen[row];
78     if (allocated_space < k+row_lengths[i]) return(0);
79     if ((row >= 0) || (row <= (m-1))) {
80       aj = a->j + a->i[row];
81       aa = a->a + a->i[row];
82       for (j=0; j<row_lengths[i]; j++) {
83         columns[k]  = aj[j];
84         values[k++] = aa[j];
85       }
86     }
87   }
88   return(1);
89 }
90 
91 static PetscErrorCode PetscML_comm(double p[],void *ML_data)
92 {
93   PetscErrorCode    ierr;
94   FineGridCtx       *ml = (FineGridCtx*)ML_data;
95   Mat               A   = ml->A;
96   Mat_MPIAIJ        *a  = (Mat_MPIAIJ*)A->data;
97   PetscMPIInt       size;
98   PetscInt          i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
99   const PetscScalar *array;
100 
101   PetscFunctionBegin;
102   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
103   if (size == 1) return 0;
104 
105   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
106   ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
107   ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
108   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
109   ierr = VecGetArrayRead(a->lvec,&array);CHKERRQ(ierr);
110   for (i=in_length; i<out_length; i++) p[i] = array[i-in_length];
111   ierr = VecRestoreArrayRead(a->lvec,&array);CHKERRQ(ierr);
112   PetscFunctionReturn(0);
113 }
114 
115 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
116 {
117   PetscErrorCode ierr;
118   FineGridCtx    *ml = (FineGridCtx*)ML_Get_MyMatvecData(ML_data);
119   Mat            A   = ml->A, Aloc=ml->Aloc;
120   PetscMPIInt    size;
121   PetscScalar    *pwork=ml->pwork;
122   PetscInt       i;
123 
124   PetscFunctionBegin;
125   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
126   if (size == 1) {
127     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
128   } else {
129     for (i=0; i<in_length; i++) pwork[i] = p[i];
130     ierr = PetscML_comm(pwork,ml);CHKERRQ(ierr);
131     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
132   }
133   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
134   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
135   ierr = VecResetArray(ml->x);CHKERRQ(ierr);
136   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
137   PetscFunctionReturn(0);
138 }
139 
140 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
141 {
142   PetscErrorCode    ierr;
143   Mat_MLShell       *shell;
144   PetscScalar       *yarray;
145   const PetscScalar *xarray;
146   PetscInt          x_length,y_length;
147 
148   PetscFunctionBegin;
149   ierr     = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr);
150   ierr     = VecGetArrayRead(x,&xarray);CHKERRQ(ierr);
151   ierr     = VecGetArray(y,&yarray);CHKERRQ(ierr);
152   x_length = shell->mlmat->invec_leng;
153   y_length = shell->mlmat->outvec_leng;
154   PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat,x_length,(PetscScalar*)xarray,y_length,yarray));
155   ierr = VecRestoreArrayRead(x,&xarray);CHKERRQ(ierr);
156   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
157   PetscFunctionReturn(0);
158 }
159 
160 /* Computes y = w + A * x
161    It is possible that w == y, but not x == y
162 */
163 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
164 {
165   Mat_MLShell       *shell;
166   PetscScalar       *yarray;
167   const PetscScalar *xarray;
168   PetscInt          x_length,y_length;
169   PetscErrorCode    ierr;
170 
171   PetscFunctionBegin;
172   ierr = MatShellGetContext(A, (void**) &shell);CHKERRQ(ierr);
173   if (y == w) {
174     if (!shell->work) {
175       ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr);
176     }
177     ierr     = VecGetArrayRead(x,           &xarray);CHKERRQ(ierr);
178     ierr     = VecGetArray(shell->work, &yarray);CHKERRQ(ierr);
179     x_length = shell->mlmat->invec_leng;
180     y_length = shell->mlmat->outvec_leng;
181     PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, (PetscScalar*)xarray, y_length, yarray));
182     ierr = VecRestoreArrayRead(x,           &xarray);CHKERRQ(ierr);
183     ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr);
184     ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr);
185   } else {
186     ierr     = VecGetArrayRead(x, &xarray);CHKERRQ(ierr);
187     ierr     = VecGetArray(y, &yarray);CHKERRQ(ierr);
188     x_length = shell->mlmat->invec_leng;
189     y_length = shell->mlmat->outvec_leng;
190     PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, (PetscScalar *)xarray, y_length, yarray));
191     ierr = VecRestoreArrayRead(x, &xarray);CHKERRQ(ierr);
192     ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr);
193     ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr);
194   }
195   PetscFunctionReturn(0);
196 }
197 
198 /* newtype is ignored since only handles one case */
199 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
200 {
201   PetscErrorCode ierr;
202   Mat_MPIAIJ     *mpimat=(Mat_MPIAIJ*)A->data;
203   Mat_SeqAIJ     *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
204   PetscInt       *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
205   PetscScalar    *aa=a->a,*ba=b->a,*ca;
206   PetscInt       am =A->rmap->n,an=A->cmap->n,i,j,k;
207   PetscInt       *ci,*cj,ncols;
208 
209   PetscFunctionBegin;
210   if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
211 
212   if (scall == MAT_INITIAL_MATRIX) {
213     ierr  = PetscMalloc1(1+am,&ci);CHKERRQ(ierr);
214     ci[0] = 0;
215     for (i=0; i<am; i++) ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
216     ierr = PetscMalloc1(1+ci[am],&cj);CHKERRQ(ierr);
217     ierr = PetscMalloc1(1+ci[am],&ca);CHKERRQ(ierr);
218 
219     k = 0;
220     for (i=0; i<am; i++) {
221       /* diagonal portion of A */
222       ncols = ai[i+1] - ai[i];
223       for (j=0; j<ncols; j++) {
224         cj[k]   = *aj++;
225         ca[k++] = *aa++;
226       }
227       /* off-diagonal portion of A */
228       ncols = bi[i+1] - bi[i];
229       for (j=0; j<ncols; j++) {
230         cj[k]   = an + (*bj); bj++;
231         ca[k++] = *ba++;
232       }
233     }
234     if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
235 
236     /* put together the new matrix */
237     an   = mpimat->A->cmap->n+mpimat->B->cmap->n;
238     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
239 
240     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
241     /* Since these are PETSc arrays, change flags to free them as necessary. */
242     mat          = (Mat_SeqAIJ*)(*Aloc)->data;
243     mat->free_a  = PETSC_TRUE;
244     mat->free_ij = PETSC_TRUE;
245 
246     mat->nonew = 0;
247   } else if (scall == MAT_REUSE_MATRIX) {
248     mat=(Mat_SeqAIJ*)(*Aloc)->data;
249     ci = mat->i; cj = mat->j; ca = mat->a;
250     for (i=0; i<am; i++) {
251       /* diagonal portion of A */
252       ncols = ai[i+1] - ai[i];
253       for (j=0; j<ncols; j++) *ca++ = *aa++;
254       /* off-diagonal portion of A */
255       ncols = bi[i+1] - bi[i];
256       for (j=0; j<ncols; j++) *ca++ = *ba++;
257     }
258   } else SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
259   PetscFunctionReturn(0);
260 }
261 
262 static PetscErrorCode MatDestroy_ML(Mat A)
263 {
264   PetscErrorCode ierr;
265   Mat_MLShell    *shell;
266 
267   PetscFunctionBegin;
268   ierr = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr);
269   ierr = VecDestroy(&shell->y);CHKERRQ(ierr);
270   if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);}
271   ierr = PetscFree(shell);CHKERRQ(ierr);
272   PetscFunctionReturn(0);
273 }
274 
275 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
276 {
277   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata*)mlmat->data;
278   PetscErrorCode        ierr;
279   PetscInt              m       =mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = NULL,nz_max;
280   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i;
281   PetscScalar           *ml_vals=matdata->values,*aa;
282 
283   PetscFunctionBegin;
284   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
285   if (m != n) { /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
286     if (reuse) {
287       Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data;
288       aij->i = ml_rowptr;
289       aij->j = ml_cols;
290       aij->a = ml_vals;
291     } else {
292       /* sort ml_cols and ml_vals */
293       ierr = PetscMalloc1(m+1,&nnz);
294       for (i=0; i<m; i++) nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
295       aj = ml_cols; aa = ml_vals;
296       for (i=0; i<m; i++) {
297         ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
298         aj  += nnz[i]; aa += nnz[i];
299       }
300       ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
301       ierr = PetscFree(nnz);CHKERRQ(ierr);
302     }
303     PetscFunctionReturn(0);
304   }
305 
306   nz_max = PetscMax(1,mlmat->max_nz_per_row);
307   ierr   = PetscMalloc2(nz_max,&aa,nz_max,&aj);CHKERRQ(ierr);
308   if (!reuse) {
309     ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr);
310     ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
311     ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
312     /* keep track of block size for A matrices */
313     ierr = MatSetBlockSize (*newmat, mlmat->num_PDEs);CHKERRQ(ierr);
314 
315     ierr = PetscMalloc1(m,&nnz);CHKERRQ(ierr);
316     for (i=0; i<m; i++) {
317       PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]));
318     }
319     ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
320   }
321   for (i=0; i<m; i++) {
322     PetscInt ncols;
323 
324     PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols));
325     ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
326   }
327   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
328   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
329 
330   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
331   ierr = PetscFree(nnz);CHKERRQ(ierr);
332   PetscFunctionReturn(0);
333 }
334 
335 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
336 {
337   PetscErrorCode ierr;
338   PetscInt       m,n;
339   ML_Comm        *MLcomm;
340   Mat_MLShell    *shellctx;
341 
342   PetscFunctionBegin;
343   m = mlmat->outvec_leng;
344   n = mlmat->invec_leng;
345 
346   if (reuse) {
347     ierr            = MatShellGetContext(*newmat,(void**)&shellctx);CHKERRQ(ierr);
348     shellctx->mlmat = mlmat;
349     PetscFunctionReturn(0);
350   }
351 
352   MLcomm = mlmat->comm;
353 
354   ierr = PetscNew(&shellctx);CHKERRQ(ierr);
355   ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
356   ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr);
357   ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr);
358   ierr = MatShellSetOperation(*newmat,MATOP_DESTROY,(void(*)(void))MatDestroy_ML);CHKERRQ(ierr);
359 
360   shellctx->A         = *newmat;
361   shellctx->mlmat     = mlmat;
362   shellctx->work      = NULL;
363 
364   ierr = VecCreate(MLcomm->USR_comm,&shellctx->y);CHKERRQ(ierr);
365   ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr);
366   ierr = VecSetType(shellctx->y,VECSTANDARD);CHKERRQ(ierr);
367   PetscFunctionReturn(0);
368 }
369 
370 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
371 {
372   PetscInt       *aj;
373   PetscScalar    *aa;
374   PetscErrorCode ierr;
375   PetscInt       i,j,*gordering;
376   PetscInt       m=mlmat->outvec_leng,n,nz_max,row;
377   Mat            A;
378 
379   PetscFunctionBegin;
380   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
381   n = mlmat->invec_leng;
382   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
383 
384   /* create global row numbering for a ML_Operator */
385   PetscStackCall("ML_build_global_numbering",ML_build_global_numbering(mlmat,&gordering,"rows"));
386 
387   nz_max = PetscMax(1,mlmat->max_nz_per_row) + 1;
388   ierr = PetscMalloc2(nz_max,&aa,nz_max,&aj);CHKERRQ(ierr);
389   if (reuse) {
390     A = *newmat;
391   } else {
392     PetscInt *nnzA,*nnzB,*nnz;
393     PetscInt rstart;
394     ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr);
395     ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
396     ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
397     /* keep track of block size for A matrices */
398     ierr = MatSetBlockSize (A,mlmat->num_PDEs);CHKERRQ(ierr);
399     ierr = PetscMalloc3(m,&nnzA,m,&nnzB,m,&nnz);CHKERRQ(ierr);
400     ierr = MPI_Scan(&m,&rstart,1,MPIU_INT,MPI_SUM,mlmat->comm->USR_comm);CHKERRQ(ierr);
401     rstart -= m;
402 
403     for (i=0; i<m; i++) {
404       row = gordering[i] - rstart;
405       PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]));
406       nnzA[row] = 0;
407       for (j=0; j<nnz[i]; j++) {
408         if (aj[j] < m) nnzA[row]++;
409       }
410       nnzB[row] = nnz[i] - nnzA[row];
411     }
412     ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
413     ierr = PetscFree3(nnzA,nnzB,nnz);
414   }
415   for (i=0; i<m; i++) {
416     PetscInt ncols;
417     row = gordering[i];
418 
419     PetscStackCall(",ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols));
420     for (j = 0; j < ncols; j++) aj[j] = gordering[aj[j]];
421     ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
422   }
423   PetscStackCall("ML_free",ML_free(gordering));
424   ierr    = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
425   ierr    = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
426   *newmat = A;
427 
428   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
429   PetscFunctionReturn(0);
430 }
431 
432 /* -------------------------------------------------------------------------- */
433 /*
434    PCSetCoordinates_ML
435 
436    Input Parameter:
437    .  pc - the preconditioner context
438 */
439 static PetscErrorCode PCSetCoordinates_ML(PC pc, PetscInt ndm, PetscInt a_nloc, PetscReal *coords)
440 {
441   PC_MG          *mg    = (PC_MG*)pc->data;
442   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
443   PetscErrorCode ierr;
444   PetscInt       arrsz,oldarrsz,bs,my0,kk,ii,nloc,Iend;
445   Mat            Amat = pc->pmat;
446 
447   /* this function copied and modified from PCSetCoordinates_GEO -TGI */
448   PetscFunctionBegin;
449   PetscValidHeaderSpecific(Amat, MAT_CLASSID, 1);
450   ierr = MatGetBlockSize(Amat, &bs);CHKERRQ(ierr);
451 
452   ierr = MatGetOwnershipRange(Amat, &my0, &Iend);CHKERRQ(ierr);
453   nloc = (Iend-my0)/bs;
454 
455   if (nloc!=a_nloc) SETERRQ2(PetscObjectComm((PetscObject)Amat),PETSC_ERR_ARG_WRONG, "Number of local blocks must locations = %d %d.",a_nloc,nloc);
456   if ((Iend-my0)%bs!=0) SETERRQ1(PetscObjectComm((PetscObject)Amat),PETSC_ERR_ARG_WRONG, "Bad local size %d.",nloc);
457 
458   oldarrsz    = pc_ml->dim * pc_ml->nloc;
459   pc_ml->dim  = ndm;
460   pc_ml->nloc = a_nloc;
461   arrsz       = ndm * a_nloc;
462 
463   /* create data - syntactic sugar that should be refactored at some point */
464   if (pc_ml->coords==0 || (oldarrsz != arrsz)) {
465     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
466     ierr = PetscMalloc1(arrsz, &pc_ml->coords);CHKERRQ(ierr);
467   }
468   for (kk=0; kk<arrsz; kk++) pc_ml->coords[kk] = -999.;
469   /* copy data in - column oriented */
470   for (kk = 0; kk < nloc; kk++) {
471     for (ii = 0; ii < ndm; ii++) {
472       pc_ml->coords[ii*nloc + kk] =  coords[kk*ndm + ii];
473     }
474   }
475   PetscFunctionReturn(0);
476 }
477 
478 /* -----------------------------------------------------------------------------*/
479 extern PetscErrorCode PCReset_MG(PC);
480 PetscErrorCode PCReset_ML(PC pc)
481 {
482   PetscErrorCode ierr;
483   PC_MG          *mg    = (PC_MG*)pc->data;
484   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
485   PetscInt       level,fine_level=pc_ml->Nlevels-1,dim=pc_ml->dim;
486 
487   PetscFunctionBegin;
488   if (dim) {
489     ML_Aggregate_Viz_Stats * grid_info = (ML_Aggregate_Viz_Stats*) pc_ml->ml_object->Grid[0].Grid;
490 
491     for (level=0; level<=fine_level; level++) {
492       ierr = VecDestroy(&pc_ml->gridctx[level].coords);CHKERRQ(ierr);
493     }
494 
495     grid_info->x = 0; /* do this so ML doesn't try to free coordinates */
496     grid_info->y = 0;
497     grid_info->z = 0;
498 
499     PetscStackCall("ML_Operator_Getrow",ML_Aggregate_VizAndStats_Clean(pc_ml->ml_object));
500   }
501   PetscStackCall("ML_Aggregate_Destroy",ML_Aggregate_Destroy(&pc_ml->agg_object));
502   PetscStackCall("ML_Aggregate_Destroy",ML_Destroy(&pc_ml->ml_object));
503 
504   if (pc_ml->PetscMLdata) {
505     ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
506     ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);
507     ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr);
508     ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr);
509   }
510   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
511 
512   if (pc_ml->gridctx) {
513     for (level=0; level<fine_level; level++) {
514       if (pc_ml->gridctx[level].A) {ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);}
515       if (pc_ml->gridctx[level].P) {ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);}
516       if (pc_ml->gridctx[level].R) {ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);}
517       if (pc_ml->gridctx[level].x) {ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);}
518       if (pc_ml->gridctx[level].b) {ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);}
519       if (pc_ml->gridctx[level+1].r) {ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
520     }
521   }
522   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
523   ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
524 
525   pc_ml->dim  = 0;
526   pc_ml->nloc = 0;
527   ierr = PCReset_MG(pc);CHKERRQ(ierr);
528   PetscFunctionReturn(0);
529 }
530 /* -------------------------------------------------------------------------- */
531 /*
532    PCSetUp_ML - Prepares for the use of the ML preconditioner
533                     by setting data structures and options.
534 
535    Input Parameter:
536 .  pc - the preconditioner context
537 
538    Application Interface Routine: PCSetUp()
539 
540    Notes:
541    The interface routine PCSetUp() is not usually called directly by
542    the user, but instead is called by PCApply() if necessary.
543 */
544 extern PetscErrorCode PCSetFromOptions_MG(PetscOptionItems *PetscOptionsObject,PC);
545 extern PetscErrorCode PCReset_MG(PC);
546 
547 PetscErrorCode PCSetUp_ML(PC pc)
548 {
549   PetscErrorCode   ierr;
550   PetscMPIInt      size;
551   FineGridCtx      *PetscMLdata;
552   ML               *ml_object;
553   ML_Aggregate     *agg_object;
554   ML_Operator      *mlmat;
555   PetscInt         nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
556   Mat              A,Aloc;
557   GridCtx          *gridctx;
558   PC_MG            *mg    = (PC_MG*)pc->data;
559   PC_ML            *pc_ml = (PC_ML*)mg->innerctx;
560   PetscBool        isSeq, isMPI;
561   KSP              smoother;
562   PC               subpc;
563   PetscInt         mesh_level, old_mesh_level;
564   MatInfo          info;
565   static PetscBool cite = PETSC_FALSE;
566 
567   PetscFunctionBegin;
568   ierr = PetscCitationsRegister("@TechReport{ml_users_guide,\n  author = {M. Sala and J.J. Hu and R.S. Tuminaro},\n  title = {{ML}3.1 {S}moothed {A}ggregation {U}ser's {G}uide},\n  institution =  {Sandia National Laboratories},\n  number = {SAND2004-4821},\n  year = 2004\n}\n",&cite);CHKERRQ(ierr);
569   A    = pc->pmat;
570   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
571 
572   if (pc->setupcalled) {
573     if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
574       /*
575        Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
576        multiple solves in which the matrix is not changing too quickly.
577        */
578       ml_object             = pc_ml->ml_object;
579       gridctx               = pc_ml->gridctx;
580       Nlevels               = pc_ml->Nlevels;
581       fine_level            = Nlevels - 1;
582       gridctx[fine_level].A = A;
583 
584       ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
585       ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
586       if (isMPI) {
587         ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
588       } else if (isSeq) {
589         Aloc = A;
590         ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
591       } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
592 
593       ierr              = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
594       PetscMLdata       = pc_ml->PetscMLdata;
595       ierr              = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr);
596       PetscMLdata->A    = A;
597       PetscMLdata->Aloc = Aloc;
598       PetscStackCall("ML_Aggregate_Destroy",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata));
599       PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec));
600 
601       mesh_level = ml_object->ML_finest_level;
602       while (ml_object->SingleLevel[mesh_level].Rmat->to) {
603         old_mesh_level = mesh_level;
604         mesh_level     = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;
605 
606         /* clean and regenerate A */
607         mlmat = &(ml_object->Amat[mesh_level]);
608         PetscStackCall("ML_Operator_Clean",ML_Operator_Clean(mlmat));
609         PetscStackCall("ML_Operator_Init",ML_Operator_Init(mlmat,ml_object->comm));
610         PetscStackCall("ML_Gen_AmatrixRAP",ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level));
611       }
612 
613       level = fine_level - 1;
614       if (size == 1) { /* convert ML P, R and A into seqaij format */
615         for (mllevel=1; mllevel<Nlevels; mllevel++) {
616           mlmat = &(ml_object->Amat[mllevel]);
617           ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
618           level--;
619         }
620       } else { /* convert ML P and R into shell format, ML A into mpiaij format */
621         for (mllevel=1; mllevel<Nlevels; mllevel++) {
622           mlmat  = &(ml_object->Amat[mllevel]);
623           ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
624           level--;
625         }
626       }
627 
628       for (level=0; level<fine_level; level++) {
629         if (level > 0) {
630           ierr = PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);CHKERRQ(ierr);
631         }
632         ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A);CHKERRQ(ierr);
633       }
634       ierr = PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);CHKERRQ(ierr);
635       ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A);CHKERRQ(ierr);
636 
637       ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
638       PetscFunctionReturn(0);
639     } else {
640       /* since ML can change the size of vectors/matrices at any level we must destroy everything */
641       ierr = PCReset_ML(pc);CHKERRQ(ierr);
642     }
643   }
644 
645   /* setup special features of PCML */
646   /*--------------------------------*/
647   /* covert A to Aloc to be used by ML at fine grid */
648   pc_ml->size = size;
649   ierr        = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
650   ierr        = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
651   if (isMPI) {
652     ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
653   } else if (isSeq) {
654     Aloc = A;
655     ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
656   } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
657 
658   /* create and initialize struct 'PetscMLdata' */
659   ierr               = PetscNewLog(pc,&PetscMLdata);CHKERRQ(ierr);
660   pc_ml->PetscMLdata = PetscMLdata;
661   ierr               = PetscMalloc1(Aloc->cmap->n+1,&PetscMLdata->pwork);CHKERRQ(ierr);
662 
663   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);
664   ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr);
665   ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr);
666 
667   ierr              = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr);
668   ierr              = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
669   ierr              = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
670   PetscMLdata->A    = A;
671   PetscMLdata->Aloc = Aloc;
672   if (pc_ml->dim) { /* create vecs around the coordinate data given */
673     PetscInt  i,j,dim=pc_ml->dim;
674     PetscInt  nloc = pc_ml->nloc,nlocghost;
675     PetscReal *ghostedcoords;
676 
677     ierr      = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
678     nlocghost = Aloc->cmap->n / bs;
679     ierr      = PetscMalloc1(dim*nlocghost,&ghostedcoords);CHKERRQ(ierr);
680     for (i = 0; i < dim; i++) {
681       /* copy coordinate values into first component of pwork */
682       for (j = 0; j < nloc; j++) {
683         PetscMLdata->pwork[bs * j] = pc_ml->coords[nloc * i + j];
684       }
685       /* get the ghost values */
686       ierr = PetscML_comm(PetscMLdata->pwork,PetscMLdata);CHKERRQ(ierr);
687       /* write into the vector */
688       for (j = 0; j < nlocghost; j++) {
689         ghostedcoords[i * nlocghost + j] = PetscMLdata->pwork[bs * j];
690       }
691     }
692     /* replace the original coords with the ghosted coords, because these are
693      * what ML needs */
694     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
695     pc_ml->coords = ghostedcoords;
696   }
697 
698   /* create ML discretization matrix at fine grid */
699   /* ML requires input of fine-grid matrix. It determines nlevels. */
700   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
701   ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
702   PetscStackCall("ML_Create",ML_Create(&ml_object,pc_ml->MaxNlevels));
703   PetscStackCall("ML_Comm_Set_UsrComm",ML_Comm_Set_UsrComm(ml_object->comm,PetscObjectComm((PetscObject)A)));
704   pc_ml->ml_object = ml_object;
705   PetscStackCall("ML_Init_Amatrix",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata));
706   PetscStackCall("ML_Set_Amatrix_Getrow",ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols));
707   PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec));
708 
709   PetscStackCall("ML_Set_Symmetrize",ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO));
710 
711   /* aggregation */
712   PetscStackCall("ML_Aggregate_Create",ML_Aggregate_Create(&agg_object));
713   pc_ml->agg_object = agg_object;
714 
715   {
716     MatNullSpace mnull;
717     ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr);
718     if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) {
719       if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER;
720       else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK;
721       else pc_ml->nulltype = PCML_NULLSPACE_SCALAR;
722     }
723     switch (pc_ml->nulltype) {
724     case PCML_NULLSPACE_USER: {
725       PetscScalar       *nullvec;
726       const PetscScalar *v;
727       PetscBool         has_const;
728       PetscInt          i,j,mlocal,nvec,M;
729       const Vec         *vecs;
730 
731       if (!mnull) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space");
732       ierr = MatGetSize(A,&M,NULL);CHKERRQ(ierr);
733       ierr = MatGetLocalSize(Aloc,&mlocal,NULL);CHKERRQ(ierr);
734       ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr);
735       ierr = PetscMalloc1((nvec+!!has_const)*mlocal,&nullvec);CHKERRQ(ierr);
736       if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M;
737       for (i=0; i<nvec; i++) {
738         ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr);
739         for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j];
740         ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr);
741       }
742       PetscStackCall("ML_Aggregate_Create",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr));
743       ierr = PetscFree(nullvec);CHKERRQ(ierr);
744     } break;
745     case PCML_NULLSPACE_BLOCK:
746       PetscStackCall("ML_Aggregate_Set_NullSpace",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr));
747       break;
748     case PCML_NULLSPACE_SCALAR:
749       break;
750     default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP,"Unknown null space type");
751     }
752   }
753   PetscStackCall("ML_Aggregate_Set_MaxCoarseSize",ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize));
754   /* set options */
755   switch (pc_ml->CoarsenScheme) {
756   case 1:
757     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_Coupled",ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object));break;
758   case 2:
759     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_MIS",ML_Aggregate_Set_CoarsenScheme_MIS(agg_object));break;
760   case 3:
761     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_METIS",ML_Aggregate_Set_CoarsenScheme_METIS(agg_object));break;
762   }
763   PetscStackCall("ML_Aggregate_Set_Threshold",ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold));
764   PetscStackCall("ML_Aggregate_Set_DampingFactor",ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor));
765   if (pc_ml->SpectralNormScheme_Anorm) {
766     PetscStackCall("ML_Set_SpectralNormScheme_Anorm",ML_Set_SpectralNormScheme_Anorm(ml_object));
767   }
768   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
769   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
770   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
771   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
772   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
773   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;
774 
775   if (pc_ml->Aux) {
776     if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Auxiliary matrix requires coordinates");
777     ml_object->Amat[0].aux_data->threshold = pc_ml->AuxThreshold;
778     ml_object->Amat[0].aux_data->enable    = 1;
779     ml_object->Amat[0].aux_data->max_level = 10;
780     ml_object->Amat[0].num_PDEs            = bs;
781   }
782 
783   ierr = MatGetInfo(A,MAT_LOCAL,&info);CHKERRQ(ierr);
784   ml_object->Amat[0].N_nonzeros = (int) info.nz_used;
785 
786   if (pc_ml->dim) {
787     PetscInt               i,dim = pc_ml->dim;
788     ML_Aggregate_Viz_Stats *grid_info;
789     PetscInt               nlocghost;
790 
791     ierr      = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
792     nlocghost = Aloc->cmap->n / bs;
793 
794     PetscStackCall("ML_Aggregate_VizAndStats_Setup(",ML_Aggregate_VizAndStats_Setup(ml_object)); /* create ml info for coords */
795     grid_info = (ML_Aggregate_Viz_Stats*) ml_object->Grid[0].Grid;
796     for (i = 0; i < dim; i++) {
797       /* set the finest level coordinates to point to the column-order array
798        * in pc_ml */
799       /* NOTE: must point away before VizAndStats_Clean so ML doesn't free */
800       switch (i) {
801       case 0: grid_info->x = pc_ml->coords + nlocghost * i; break;
802       case 1: grid_info->y = pc_ml->coords + nlocghost * i; break;
803       case 2: grid_info->z = pc_ml->coords + nlocghost * i; break;
804       default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
805       }
806     }
807     grid_info->Ndim = dim;
808   }
809 
810   /* repartitioning */
811   if (pc_ml->Repartition) {
812     PetscStackCall("ML_Repartition_Activate",ML_Repartition_Activate(ml_object));
813     PetscStackCall("ML_Repartition_Set_LargestMinMaxRatio",ML_Repartition_Set_LargestMinMaxRatio(ml_object,pc_ml->MaxMinRatio));
814     PetscStackCall("ML_Repartition_Set_MinPerProc",ML_Repartition_Set_MinPerProc(ml_object,pc_ml->MinPerProc));
815     PetscStackCall("ML_Repartition_Set_PutOnSingleProc",ML_Repartition_Set_PutOnSingleProc(ml_object,pc_ml->PutOnSingleProc));
816 #if 0                           /* Function not yet defined in ml-6.2 */
817     /* I'm not sure what compatibility issues might crop up if we partitioned
818      * on the finest level, so to be safe repartition starts on the next
819      * finest level (reflection default behavior in
820      * ml_MultiLevelPreconditioner) */
821     PetscStackCall("ML_Repartition_Set_StartLevel",ML_Repartition_Set_StartLevel(ml_object,1));
822 #endif
823 
824     if (!pc_ml->RepartitionType) {
825       PetscInt i;
826 
827       if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"ML Zoltan repartitioning requires coordinates");
828       PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEZOLTAN));
829       PetscStackCall("ML_Aggregate_Set_Dimensions",ML_Aggregate_Set_Dimensions(agg_object, pc_ml->dim));
830 
831       for (i = 0; i < ml_object->ML_num_levels; i++) {
832         ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Grid[i].Grid;
833         grid_info->zoltan_type = pc_ml->ZoltanScheme + 1; /* ml numbers options 1, 2, 3 */
834         /* defaults from ml_agg_info.c */
835         grid_info->zoltan_estimated_its = 40; /* only relevant to hypergraph / fast hypergraph */
836         grid_info->zoltan_timers        = 0;
837         grid_info->smoothing_steps      = 4;  /* only relevant to hypergraph / fast hypergraph */
838       }
839     } else {
840       PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEPARMETIS));
841     }
842   }
843 
844   if (pc_ml->OldHierarchy) {
845     PetscStackCall("ML_Gen_MGHierarchy_UsingAggregation",Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object));
846   } else {
847     PetscStackCall("ML_Gen_MultiLevelHierarchy_UsingAggregation",Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object));
848   }
849   if (Nlevels<=0) SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
850   pc_ml->Nlevels = Nlevels;
851   fine_level     = Nlevels - 1;
852 
853   ierr = PCMGSetLevels(pc,Nlevels,NULL);CHKERRQ(ierr);
854   /* set default smoothers */
855   for (level=1; level<=fine_level; level++) {
856     ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
857     ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
858     ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
859     ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
860   }
861   ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr);
862   ierr = PCSetFromOptions_MG(PetscOptionsObject,pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
863   ierr = PetscOptionsEnd();CHKERRQ(ierr);
864 
865   ierr = PetscMalloc1(Nlevels,&gridctx);CHKERRQ(ierr);
866 
867   pc_ml->gridctx = gridctx;
868 
869   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
870      Level 0 is the finest grid for ML, but coarsest for PETSc! */
871   gridctx[fine_level].A = A;
872 
873   level = fine_level - 1;
874   if (size == 1) { /* convert ML P, R and A into seqaij format */
875     for (mllevel=1; mllevel<Nlevels; mllevel++) {
876       mlmat = &(ml_object->Pmat[mllevel]);
877       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
878       mlmat = &(ml_object->Rmat[mllevel-1]);
879       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
880 
881       mlmat = &(ml_object->Amat[mllevel]);
882       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
883       level--;
884     }
885   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
886     for (mllevel=1; mllevel<Nlevels; mllevel++) {
887       mlmat  = &(ml_object->Pmat[mllevel]);
888       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
889       mlmat  = &(ml_object->Rmat[mllevel-1]);
890       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
891 
892       mlmat  = &(ml_object->Amat[mllevel]);
893       ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
894       level--;
895     }
896   }
897 
898   /* create vectors and ksp at all levels */
899   for (level=0; level<fine_level; level++) {
900     level1 = level + 1;
901     ierr   = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr);
902     ierr   = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr);
903     ierr   = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr);
904     ierr   = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
905 
906     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr);
907     ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
908     ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr);
909     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
910 
911     ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr);
912     ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
913     ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr);
914     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
915 
916     if (level == 0) {
917       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
918     } else {
919       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
920     }
921   }
922   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
923 
924   /* create coarse level and the interpolation between the levels */
925   for (level=0; level<fine_level; level++) {
926     level1 = level + 1;
927     ierr   = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
928     ierr   = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
929     if (level > 0) {
930       ierr = PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);CHKERRQ(ierr);
931     }
932     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A);CHKERRQ(ierr);
933   }
934   ierr = PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);CHKERRQ(ierr);
935   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A);CHKERRQ(ierr);
936 
937   /* put coordinate info in levels */
938   if (pc_ml->dim) {
939     PetscInt  i,j,dim = pc_ml->dim;
940     PetscInt  bs, nloc;
941     PC        subpc;
942     PetscReal *array;
943 
944     level = fine_level;
945     for (mllevel = 0; mllevel < Nlevels; mllevel++) {
946       ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Amat[mllevel].to->Grid->Grid;
947       MPI_Comm               comm       = ((PetscObject)gridctx[level].A)->comm;
948 
949       ierr  = MatGetBlockSize (gridctx[level].A, &bs);CHKERRQ(ierr);
950       ierr  = MatGetLocalSize (gridctx[level].A, NULL, &nloc);CHKERRQ(ierr);
951       nloc /= bs; /* number of local nodes */
952 
953       ierr = VecCreate(comm,&gridctx[level].coords);CHKERRQ(ierr);
954       ierr = VecSetSizes(gridctx[level].coords,dim * nloc,PETSC_DECIDE);CHKERRQ(ierr);
955       ierr = VecSetType(gridctx[level].coords,VECMPI);CHKERRQ(ierr);
956       ierr = VecGetArray(gridctx[level].coords,&array);CHKERRQ(ierr);
957       for (j = 0; j < nloc; j++) {
958         for (i = 0; i < dim; i++) {
959           switch (i) {
960           case 0: array[dim * j + i] = grid_info->x[j]; break;
961           case 1: array[dim * j + i] = grid_info->y[j]; break;
962           case 2: array[dim * j + i] = grid_info->z[j]; break;
963           default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
964           }
965         }
966       }
967 
968       /* passing coordinates to smoothers/coarse solver, should they need them */
969       ierr = KSPGetPC(gridctx[level].ksp,&subpc);CHKERRQ(ierr);
970       ierr = PCSetCoordinates(subpc,dim,nloc,array);CHKERRQ(ierr);
971       ierr = VecRestoreArray(gridctx[level].coords,&array);CHKERRQ(ierr);
972       level--;
973     }
974   }
975 
976   /* setupcalled is set to 0 so that MG is setup from scratch */
977   pc->setupcalled = 0;
978   ierr            = PCSetUp_MG(pc);CHKERRQ(ierr);
979   PetscFunctionReturn(0);
980 }
981 
982 /* -------------------------------------------------------------------------- */
983 /*
984    PCDestroy_ML - Destroys the private context for the ML preconditioner
985    that was created with PCCreate_ML().
986 
987    Input Parameter:
988 .  pc - the preconditioner context
989 
990    Application Interface Routine: PCDestroy()
991 */
992 PetscErrorCode PCDestroy_ML(PC pc)
993 {
994   PetscErrorCode ierr;
995   PC_MG          *mg   = (PC_MG*)pc->data;
996   PC_ML          *pc_ml= (PC_ML*)mg->innerctx;
997 
998   PetscFunctionBegin;
999   ierr = PCReset_ML(pc);CHKERRQ(ierr);
1000   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
1001   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
1002   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",NULL);CHKERRQ(ierr);
1003   PetscFunctionReturn(0);
1004 }
1005 
1006 PetscErrorCode PCSetFromOptions_ML(PetscOptionItems *PetscOptionsObject,PC pc)
1007 {
1008   PetscErrorCode ierr;
1009   PetscInt       indx,PrintLevel,partindx;
1010   const char     *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
1011   const char     *part[]   = {"Zoltan","ParMETIS"};
1012 #if defined(HAVE_ML_ZOLTAN)
1013   const char *zscheme[] = {"RCB","hypergraph","fast_hypergraph"};
1014 #endif
1015   PC_MG       *mg    = (PC_MG*)pc->data;
1016   PC_ML       *pc_ml = (PC_ML*)mg->innerctx;
1017   PetscMPIInt size;
1018   MPI_Comm    comm;
1019 
1020   PetscFunctionBegin;
1021   ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr);
1022   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1023   ierr = PetscOptionsHead(PetscOptionsObject,"ML options");CHKERRQ(ierr);
1024 
1025   PrintLevel = 0;
1026   indx       = 0;
1027   partindx   = 0;
1028 
1029   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,NULL);CHKERRQ(ierr);
1030   PetscStackCall("ML_Set_PrintLeve",ML_Set_PrintLevel(PrintLevel));
1031   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,NULL);CHKERRQ(ierr);
1032   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,NULL);CHKERRQ(ierr);
1033   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,NULL);CHKERRQ(ierr);
1034 
1035   pc_ml->CoarsenScheme = indx;
1036 
1037   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,NULL);CHKERRQ(ierr);
1038   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,NULL);CHKERRQ(ierr);
1039   ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,NULL);CHKERRQ(ierr);
1040   ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,NULL);CHKERRQ(ierr);
1041   ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,NULL);CHKERRQ(ierr);
1042   ierr = PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,NULL);CHKERRQ(ierr);
1043   ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,NULL);CHKERRQ(ierr);
1044   ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,NULL);CHKERRQ(ierr);
1045   /*
1046     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
1047     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.
1048 
1049     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
1050     combination of options and ML's exit(1) explanations don't help matters.
1051   */
1052   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
1053   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
1054   if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2\n");CHKERRQ(ierr);}
1055   if (pc_ml->EnergyMinimization) {
1056     ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,NULL);CHKERRQ(ierr);
1057   }
1058   if (pc_ml->EnergyMinimization == 2) {
1059     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
1060     ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,NULL);CHKERRQ(ierr);
1061   }
1062   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
1063   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
1064   ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,NULL);CHKERRQ(ierr);
1065   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
1066   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
1067   ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,NULL);CHKERRQ(ierr);
1068   /*
1069     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
1070     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
1071     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
1072     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
1073     this context, but ML doesn't provide a way to find out which ones.
1074    */
1075   ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,NULL);CHKERRQ(ierr);
1076   ierr = PetscOptionsBool("-pc_ml_repartition", "Allow ML to repartition levels of the heirarchy","ML_Repartition_Activate",pc_ml->Repartition,&pc_ml->Repartition,NULL);CHKERRQ(ierr);
1077   if (pc_ml->Repartition) {
1078     ierr = PetscOptionsReal("-pc_ml_repartitionMaxMinRatio", "Acceptable ratio of repartitioned sizes","ML_Repartition_Set_LargestMinMaxRatio",pc_ml->MaxMinRatio,&pc_ml->MaxMinRatio,NULL);CHKERRQ(ierr);
1079     ierr = PetscOptionsInt("-pc_ml_repartitionMinPerProc", "Smallest repartitioned size","ML_Repartition_Set_MinPerProc",pc_ml->MinPerProc,&pc_ml->MinPerProc,NULL);CHKERRQ(ierr);
1080     ierr = PetscOptionsInt("-pc_ml_repartitionPutOnSingleProc", "Problem size automatically repartitioned to one processor","ML_Repartition_Set_PutOnSingleProc",pc_ml->PutOnSingleProc,&pc_ml->PutOnSingleProc,NULL);CHKERRQ(ierr);
1081 #if defined(HAVE_ML_ZOLTAN)
1082     partindx = 0;
1083     ierr     = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[0],&partindx,NULL);CHKERRQ(ierr);
1084 
1085     pc_ml->RepartitionType = partindx;
1086     if (!partindx) {
1087       PetscInt zindx = 0;
1088 
1089       ierr = PetscOptionsEList("-pc_ml_repartitionZoltanScheme", "Repartitioning scheme to use","None",zscheme,3,zscheme[0],&zindx,NULL);CHKERRQ(ierr);
1090 
1091       pc_ml->ZoltanScheme = zindx;
1092     }
1093 #else
1094     partindx = 1;
1095     ierr     = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[1],&partindx,NULL);CHKERRQ(ierr);
1096     pc_ml->RepartitionType = partindx;
1097     if (!partindx) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP_SYS,"ML not compiled with Zoltan");
1098 #endif
1099     ierr = PetscOptionsBool("-pc_ml_Aux","Aggregate using auxiliary coordinate-based laplacian","None",pc_ml->Aux,&pc_ml->Aux,NULL);CHKERRQ(ierr);
1100     ierr = PetscOptionsReal("-pc_ml_AuxThreshold","Auxiliary smoother drop tol","None",pc_ml->AuxThreshold,&pc_ml->AuxThreshold,NULL);CHKERRQ(ierr);
1101   }
1102   ierr = PetscOptionsTail();CHKERRQ(ierr);
1103   PetscFunctionReturn(0);
1104 }
1105 
1106 /* -------------------------------------------------------------------------- */
1107 /*
1108    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
1109    and sets this as the private data within the generic preconditioning
1110    context, PC, that was created within PCCreate().
1111 
1112    Input Parameter:
1113 .  pc - the preconditioner context
1114 
1115    Application Interface Routine: PCCreate()
1116 */
1117 
1118 /*MC
1119      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
1120        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
1121        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
1122        and the restriction/interpolation operators wrapped as PETSc shell matrices.
1123 
1124    Options Database Key:
1125    Multigrid options(inherited):
1126 +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
1127 .  -pc_mg_distinct_smoothup: Should one configure the up and down smoothers separately (PCMGSetDistinctSmoothUp)
1128 -  -pc_mg_type <multiplicative>: (one of) additive multiplicative full kascade
1129    ML options:
1130 +  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
1131 .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
1132 .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
1133 .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
1134 .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
1135 .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
1136 .  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
1137 .  -pc_ml_repartition <false>: Allow ML to repartition levels of the heirarchy (ML_Repartition_Activate)
1138 .  -pc_ml_repartitionMaxMinRatio <1.3>: Acceptable ratio of repartitioned sizes (ML_Repartition_Set_LargestMinMaxRatio)
1139 .  -pc_ml_repartitionMinPerProc <512>: Smallest repartitioned size (ML_Repartition_Set_MinPerProc)
1140 .  -pc_ml_repartitionPutOnSingleProc <5000>: Problem size automatically repartitioned to one processor (ML_Repartition_Set_PutOnSingleProc)
1141 .  -pc_ml_repartitionType <Zoltan>: Repartitioning library to use (ML_Repartition_Set_Partitioner)
1142 .  -pc_ml_repartitionZoltanScheme <RCB>: Repartitioning scheme to use (None)
1143 .  -pc_ml_Aux <false>: Aggregate using auxiliary coordinate-based laplacian (None)
1144 -  -pc_ml_AuxThreshold <0.0>: Auxiliary smoother drop tol (None)
1145 
1146    Level: intermediate
1147 
1148   Concepts: multigrid
1149 
1150 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
1151            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetDistinctSmoothUp(),
1152            PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
1153            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
1154            PCMGSetCycleTypeOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
1155 M*/
1156 
1157 PETSC_EXTERN PetscErrorCode PCCreate_ML(PC pc)
1158 {
1159   PetscErrorCode ierr;
1160   PC_ML          *pc_ml;
1161   PC_MG          *mg;
1162 
1163   PetscFunctionBegin;
1164   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
1165   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
1166   ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr);
1167   /* Since PCMG tries to use DM assocated with PC must delete it */
1168   ierr = DMDestroy(&pc->dm);CHKERRQ(ierr);
1169   ierr = PCMGSetGalerkin(pc,PC_MG_GALERKIN_EXTERNAL);CHKERRQ(ierr);
1170   mg   = (PC_MG*)pc->data;
1171 
1172   /* create a supporting struct and attach it to pc */
1173   ierr         = PetscNewLog(pc,&pc_ml);CHKERRQ(ierr);
1174   mg->innerctx = pc_ml;
1175 
1176   pc_ml->ml_object                = 0;
1177   pc_ml->agg_object               = 0;
1178   pc_ml->gridctx                  = 0;
1179   pc_ml->PetscMLdata              = 0;
1180   pc_ml->Nlevels                  = -1;
1181   pc_ml->MaxNlevels               = 10;
1182   pc_ml->MaxCoarseSize            = 1;
1183   pc_ml->CoarsenScheme            = 1;
1184   pc_ml->Threshold                = 0.0;
1185   pc_ml->DampingFactor            = 4.0/3.0;
1186   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
1187   pc_ml->size                     = 0;
1188   pc_ml->dim                      = 0;
1189   pc_ml->nloc                     = 0;
1190   pc_ml->coords                   = 0;
1191   pc_ml->Repartition              = PETSC_FALSE;
1192   pc_ml->MaxMinRatio              = 1.3;
1193   pc_ml->MinPerProc               = 512;
1194   pc_ml->PutOnSingleProc          = 5000;
1195   pc_ml->RepartitionType          = 0;
1196   pc_ml->ZoltanScheme             = 0;
1197   pc_ml->Aux                      = PETSC_FALSE;
1198   pc_ml->AuxThreshold             = 0.0;
1199 
1200   /* allow for coordinates to be passed */
1201   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",PCSetCoordinates_ML);CHKERRQ(ierr);
1202 
1203   /* overwrite the pointers of PCMG by the functions of PCML */
1204   pc->ops->setfromoptions = PCSetFromOptions_ML;
1205   pc->ops->setup          = PCSetUp_ML;
1206   pc->ops->reset          = PCReset_ML;
1207   pc->ops->destroy        = PCDestroy_ML;
1208   PetscFunctionReturn(0);
1209 }
1210