xref: /petsc/src/ksp/pc/impls/ml/ml.c (revision 009bbdc485cd9ad46be9940d3549e2dde9cdc322)
1 
2 /*
3    Provides an interface to the ML smoothed Aggregation
4    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
5                                     Jed Brown, see [PETSC #18321, #18449].
6 */
7 #include <petsc-private/pcimpl.h>   /*I "petscpc.h" I*/
8 #include <../src/ksp/pc/impls/mg/mgimpl.h>                    /*I "petscpcmg.h" I*/
9 #include <../src/mat/impls/aij/seq/aij.h>
10 #include <../src/mat/impls/aij/mpi/mpiaij.h>
11 
12 #include <math.h>
13 EXTERN_C_BEGIN
14 /* HAVE_CONFIG_H flag is required by ML include files */
15 #if !defined(HAVE_CONFIG_H)
16 #define HAVE_CONFIG_H
17 #endif
18 #include <ml_include.h>
19 #include <ml_viz_stats.h>
20 EXTERN_C_END
21 
22 typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType;
23 static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0};
24 
25 /* The context (data structure) at each grid level */
26 typedef struct {
27   Vec        x,b,r;           /* global vectors */
28   Mat        A,P,R;
29   KSP        ksp;
30   Vec        coords;          /* projected by ML, if PCSetCoordinates is called; values packed by node */
31 } GridCtx;
32 
33 /* The context used to input PETSc matrix into ML at fine grid */
34 typedef struct {
35   Mat          A;      /* Petsc matrix in aij format */
36   Mat          Aloc;   /* local portion of A to be used by ML */
37   Vec          x,y;
38   ML_Operator  *mlmat;
39   PetscScalar  *pwork; /* tmp array used by PetscML_comm() */
40 } FineGridCtx;
41 
42 /* The context associates a ML matrix with a PETSc shell matrix */
43 typedef struct {
44   Mat          A;       /* PETSc shell matrix associated with mlmat */
45   ML_Operator  *mlmat;  /* ML matrix assorciated with A */
46   Vec          y, work;
47 } Mat_MLShell;
48 
49 /* Private context for the ML preconditioner */
50 typedef struct {
51   ML                *ml_object;
52   ML_Aggregate      *agg_object;
53   GridCtx           *gridctx;
54   FineGridCtx       *PetscMLdata;
55   PetscInt          Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization,MinPerProc,PutOnSingleProc,RepartitionType,ZoltanScheme;
56   PetscReal         Threshold,DampingFactor,EnergyMinimizationDropTol,MaxMinRatio,AuxThreshold;
57   PetscBool         SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable,Repartition,Aux;
58   PetscBool         reuse_interpolation;
59   PCMLNullSpaceType nulltype;
60   PetscMPIInt       size; /* size of communicator for pc->pmat */
61   PetscInt          dim;  /* data from PCSetCoordinates(_ML) */
62   PetscInt          nloc;
63   PetscReal        *coords; /* ML has a grid object for each level: the finest grid will point into coords */
64 } PC_ML;
65 
66 #undef __FUNCT__
67 #define __FUNCT__ "PetscML_getrow"
68 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
69 {
70   PetscErrorCode ierr;
71   PetscInt       m,i,j,k=0,row,*aj;
72   PetscScalar    *aa;
73   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
74   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;
75 
76   ierr = MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0);
77   for (i = 0; i<N_requested_rows; i++) {
78     row   = requested_rows[i];
79     row_lengths[i] = a->ilen[row];
80     if (allocated_space < k+row_lengths[i]) return(0);
81     if ((row >= 0) || (row <= (m-1))) {
82       aj = a->j + a->i[row];
83       aa = a->a + a->i[row];
84       for (j=0; j<row_lengths[i]; j++) {
85         columns[k]  = aj[j];
86         values[k++] = aa[j];
87       }
88     }
89   }
90   return(1);
91 }
92 
93 #undef __FUNCT__
94 #define __FUNCT__ "PetscML_comm"
95 static PetscErrorCode PetscML_comm(double p[],void *ML_data)
96 {
97   PetscErrorCode ierr;
98   FineGridCtx    *ml=(FineGridCtx*)ML_data;
99   Mat            A=ml->A;
100   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
101   PetscMPIInt    size;
102   PetscInt       i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
103   PetscScalar    *array;
104 
105   PetscFunctionBegin;
106   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
107   if (size == 1) return 0;
108 
109   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
110   ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
111   ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
112   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
113   ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr);
114   for (i=in_length; i<out_length; i++) {
115     p[i] = array[i-in_length];
116   }
117   ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr);
118   PetscFunctionReturn(0);
119 }
120 
121 #undef __FUNCT__
122 #define __FUNCT__ "PetscML_matvec"
123 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
124 {
125   PetscErrorCode ierr;
126   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data);
127   Mat            A=ml->A, Aloc=ml->Aloc;
128   PetscMPIInt    size;
129   PetscScalar    *pwork=ml->pwork;
130   PetscInt       i;
131 
132   PetscFunctionBegin;
133   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
134   if (size == 1) {
135     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
136   } else {
137     for (i=0; i<in_length; i++) pwork[i] = p[i];
138     PetscML_comm(pwork,ml);
139     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
140   }
141   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
142   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
143   ierr = VecResetArray(ml->x);CHKERRQ(ierr);
144   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
145   PetscFunctionReturn(0);
146 }
147 
148 #undef __FUNCT__
149 #define __FUNCT__ "MatMult_ML"
150 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
151 {
152   PetscErrorCode   ierr;
153   Mat_MLShell      *shell;
154   PetscScalar      *xarray,*yarray;
155   PetscInt         x_length,y_length;
156 
157   PetscFunctionBegin;
158   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
159   ierr = VecGetArray(x,&xarray);CHKERRQ(ierr);
160   ierr = VecGetArray(y,&yarray);CHKERRQ(ierr);
161   x_length = shell->mlmat->invec_leng;
162   y_length = shell->mlmat->outvec_leng;
163   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
164   ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
165   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
166   PetscFunctionReturn(0);
167 }
168 
169 #undef __FUNCT__
170 #define __FUNCT__ "MatMultAdd_ML"
171 /* Computes y = w + A * x
172    It is possible that w == y, but not x == y
173 */
174 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
175 {
176   Mat_MLShell   *shell;
177   PetscScalar   *xarray,*yarray;
178   PetscInt       x_length,y_length;
179   PetscErrorCode ierr;
180 
181   PetscFunctionBegin;
182   ierr = MatShellGetContext(A, (void **) &shell);CHKERRQ(ierr);
183   if (y == w) {
184     if (!shell->work) {
185       ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr);
186     }
187     ierr = VecGetArray(x,           &xarray);CHKERRQ(ierr);
188     ierr = VecGetArray(shell->work, &yarray);CHKERRQ(ierr);
189     x_length = shell->mlmat->invec_leng;
190     y_length = shell->mlmat->outvec_leng;
191     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
192     ierr = VecRestoreArray(x,           &xarray);CHKERRQ(ierr);
193     ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr);
194     ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr);
195   } else {
196     ierr = VecGetArray(x, &xarray);CHKERRQ(ierr);
197     ierr = VecGetArray(y, &yarray);CHKERRQ(ierr);
198     x_length = shell->mlmat->invec_leng;
199     y_length = shell->mlmat->outvec_leng;
200     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
201     ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr);
202     ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr);
203     ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr);
204   }
205   PetscFunctionReturn(0);
206 }
207 
208 /* newtype is ignored since only handles one case */
209 #undef __FUNCT__
210 #define __FUNCT__ "MatConvert_MPIAIJ_ML"
211 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
212 {
213   PetscErrorCode  ierr;
214   Mat_MPIAIJ      *mpimat=(Mat_MPIAIJ*)A->data;
215   Mat_SeqAIJ      *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
216   PetscInt        *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
217   PetscScalar     *aa=a->a,*ba=b->a,*ca;
218   PetscInt        am=A->rmap->n,an=A->cmap->n,i,j,k;
219   PetscInt        *ci,*cj,ncols;
220 
221   PetscFunctionBegin;
222   if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
223 
224   if (scall == MAT_INITIAL_MATRIX) {
225     ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr);
226     ci[0] = 0;
227     for (i=0; i<am; i++) {
228       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
229     }
230     ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr);
231     ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr);
232 
233     k = 0;
234     for (i=0; i<am; i++) {
235       /* diagonal portion of A */
236       ncols = ai[i+1] - ai[i];
237       for (j=0; j<ncols; j++) {
238         cj[k]   = *aj++;
239         ca[k++] = *aa++;
240       }
241       /* off-diagonal portion of A */
242       ncols = bi[i+1] - bi[i];
243       for (j=0; j<ncols; j++) {
244         cj[k]   = an + (*bj); bj++;
245         ca[k++] = *ba++;
246       }
247     }
248     if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
249 
250     /* put together the new matrix */
251     an = mpimat->A->cmap->n+mpimat->B->cmap->n;
252     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
253 
254     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
255     /* Since these are PETSc arrays, change flags to free them as necessary. */
256     mat = (Mat_SeqAIJ*)(*Aloc)->data;
257     mat->free_a       = PETSC_TRUE;
258     mat->free_ij      = PETSC_TRUE;
259 
260     mat->nonew    = 0;
261   } else if (scall == MAT_REUSE_MATRIX) {
262     mat=(Mat_SeqAIJ*)(*Aloc)->data;
263     ci = mat->i; cj = mat->j; ca = mat->a;
264     for (i=0; i<am; i++) {
265       /* diagonal portion of A */
266       ncols = ai[i+1] - ai[i];
267       for (j=0; j<ncols; j++) *ca++ = *aa++;
268       /* off-diagonal portion of A */
269       ncols = bi[i+1] - bi[i];
270       for (j=0; j<ncols; j++) *ca++ = *ba++;
271     }
272   } else SETERRQ1(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
273   PetscFunctionReturn(0);
274 }
275 
276 extern PetscErrorCode MatDestroy_Shell(Mat);
277 #undef __FUNCT__
278 #define __FUNCT__ "MatDestroy_ML"
279 static PetscErrorCode MatDestroy_ML(Mat A)
280 {
281   PetscErrorCode ierr;
282   Mat_MLShell    *shell;
283 
284   PetscFunctionBegin;
285   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
286   ierr = VecDestroy(&shell->y);CHKERRQ(ierr);
287   if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);}
288   ierr = PetscFree(shell);CHKERRQ(ierr);
289   ierr = MatDestroy_Shell(A);CHKERRQ(ierr);
290   ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr);
291   PetscFunctionReturn(0);
292 }
293 
294 #undef __FUNCT__
295 #define __FUNCT__ "MatWrapML_SeqAIJ"
296 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
297 {
298   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
299   PetscErrorCode        ierr;
300   PetscInt              m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = PETSC_NULL,nz_max;
301   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i;
302   PetscScalar           *ml_vals=matdata->values,*aa;
303 
304   PetscFunctionBegin;
305   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
306   if (m != n) { /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
307     if (reuse) {
308       Mat_SeqAIJ  *aij= (Mat_SeqAIJ*)(*newmat)->data;
309       aij->i = ml_rowptr;
310       aij->j = ml_cols;
311       aij->a = ml_vals;
312     } else {
313       /* sort ml_cols and ml_vals */
314       ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
315       for (i=0; i<m; i++) {
316         nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
317       }
318       aj = ml_cols; aa = ml_vals;
319       for (i=0; i<m; i++) {
320         ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
321         aj += nnz[i]; aa += nnz[i];
322       }
323       ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
324       ierr = PetscFree(nnz);CHKERRQ(ierr);
325     }
326     PetscFunctionReturn(0);
327   }
328 
329   nz_max = PetscMax(1,mlmat->max_nz_per_row);
330   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
331   if (!reuse) {
332     ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr);
333     ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
334     ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
335     /* keep track of block size for A matrices */
336     ierr = MatSetBlockSize (*newmat, mlmat->num_PDEs);CHKERRQ(ierr);
337 
338     ierr = PetscMalloc(m*sizeof(PetscInt),&nnz);CHKERRQ(ierr);
339     for (i=0; i<m; i++) {
340       ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]);
341     }
342     ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
343   }
344   for (i=0; i<m; i++) {
345     PetscInt ncols;
346 
347     ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols);
348     ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
349   }
350   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
351   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
352 
353   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
354   ierr = PetscFree(nnz);CHKERRQ(ierr);
355   PetscFunctionReturn(0);
356 }
357 
358 #undef __FUNCT__
359 #define __FUNCT__ "MatWrapML_SHELL"
360 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
361 {
362   PetscErrorCode ierr;
363   PetscInt       m,n;
364   ML_Comm        *MLcomm;
365   Mat_MLShell    *shellctx;
366 
367   PetscFunctionBegin;
368   m = mlmat->outvec_leng;
369   n = mlmat->invec_leng;
370 
371   if (reuse) {
372     ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr);
373     shellctx->mlmat = mlmat;
374     PetscFunctionReturn(0);
375   }
376 
377   MLcomm = mlmat->comm;
378   ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr);
379   ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
380   ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr);
381   ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr);
382   shellctx->A         = *newmat;
383   shellctx->mlmat     = mlmat;
384   shellctx->work      = PETSC_NULL;
385   ierr = VecCreate(MLcomm->USR_comm,&shellctx->y);CHKERRQ(ierr);
386   ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr);
387   ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr);
388   (*newmat)->ops->destroy = MatDestroy_ML;
389   PetscFunctionReturn(0);
390 }
391 
392 #undef __FUNCT__
393 #define __FUNCT__ "MatWrapML_MPIAIJ"
394 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
395 {
396   PetscInt              *aj;
397   PetscScalar           *aa;
398   PetscErrorCode        ierr;
399   PetscInt              i,j,*gordering;
400   PetscInt              m=mlmat->outvec_leng,n,nz_max,row;
401   Mat                   A;
402 
403   PetscFunctionBegin;
404   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
405   n = mlmat->invec_leng;
406   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
407 
408   nz_max = PetscMax(1,mlmat->max_nz_per_row);
409   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
410   if (reuse) {
411     A = *newmat;
412   } else {
413     PetscInt *nnzA,*nnzB,*nnz;
414     ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr);
415     ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
416     ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
417     /* keep track of block size for A matrices */
418     ierr = MatSetBlockSize (A,mlmat->num_PDEs);CHKERRQ(ierr);
419     ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr);
420 
421     for (i=0; i<m; i++) {
422       ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]);
423       nnzA[i] = 0;
424       for (j=0; j<nnz[i]; j++) {
425         if (aj[j] < m) nnzA[i]++;
426       }
427       nnzB[i] = nnz[i] - nnzA[i];
428     }
429     ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
430     ierr = PetscFree3(nnzA,nnzB,nnz);
431   }
432   /* create global row numbering for a ML_Operator */
433   ML_build_global_numbering(mlmat,&gordering,"rows");
434   for (i=0; i<m; i++) {
435     PetscInt ncols;
436     row = gordering[i];
437 
438     ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols);
439     for (j = 0; j < ncols; j++) {
440       aj[j] = gordering[aj[j]];
441     }
442     ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
443   }
444   ML_free(gordering);
445   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
446   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
447   *newmat = A;
448 
449   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
450   PetscFunctionReturn(0);
451 }
452 
453 /* -------------------------------------------------------------------------- */
454 /*
455    PCSetCoordinates_ML
456 
457    Input Parameter:
458    .  pc - the preconditioner context
459 */
460 #undef __FUNCT__
461 #define __FUNCT__ "PCSetCoordinates_ML"
462 PETSC_EXTERN_C PetscErrorCode PCSetCoordinates_ML(PC pc, PetscInt ndm, PetscInt a_nloc, PetscReal *coords)
463 {
464   PC_MG          *mg = (PC_MG*)pc->data;
465   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
466   PetscErrorCode ierr;
467   PetscInt       arrsz,oldarrsz,bs,my0,kk,ii,nloc,Iend;
468   Mat            Amat = pc->pmat;
469 
470   /* this function copied and modified from PCSetCoordinates_GEO -TGI */
471   PetscFunctionBegin;
472   PetscValidHeaderSpecific(Amat, MAT_CLASSID, 1);
473   ierr  = MatGetBlockSize(Amat, &bs);CHKERRQ(ierr);
474 
475   ierr  = MatGetOwnershipRange(Amat, &my0, &Iend);CHKERRQ(ierr);
476   nloc = (Iend-my0)/bs;
477 
478   if (nloc!=a_nloc)SETERRQ2(((PetscObject)Amat)->comm,PETSC_ERR_ARG_WRONG, "Number of local blocks must locations = %d %d.",a_nloc,nloc);
479   if ((Iend-my0)%bs!=0) SETERRQ1(((PetscObject)Amat)->comm,PETSC_ERR_ARG_WRONG, "Bad local size %d.",nloc);
480 
481   oldarrsz = pc_ml->dim * pc_ml->nloc;
482   pc_ml->dim = ndm;
483   pc_ml->nloc = a_nloc;
484   arrsz = ndm * a_nloc;
485 
486   /* create data - syntactic sugar that should be refactored at some point */
487   if (pc_ml->coords==0 || (oldarrsz != arrsz)) {
488     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
489     ierr = PetscMalloc((arrsz)*sizeof(PetscReal), &pc_ml->coords);CHKERRQ(ierr);
490   }
491   for (kk=0;kk<arrsz;kk++)pc_ml->coords[kk] = -999.;
492   /* copy data in - column oriented */
493   for (kk = 0; kk < nloc; kk++) {
494     for (ii = 0; ii < ndm; ii++) {
495       pc_ml->coords[ii*nloc + kk] =  coords[kk*ndm + ii];
496     }
497   }
498   PetscFunctionReturn(0);
499 }
500 
501 /* -----------------------------------------------------------------------------*/
502 #undef __FUNCT__
503 #define __FUNCT__ "PCReset_ML"
504 PetscErrorCode PCReset_ML(PC pc)
505 {
506   PetscErrorCode  ierr;
507   PC_MG           *mg = (PC_MG*)pc->data;
508   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
509   PetscInt        level,fine_level=pc_ml->Nlevels-1,dim=pc_ml->dim;
510 
511   PetscFunctionBegin;
512   if (dim) {
513     ML_Aggregate_Viz_Stats * grid_info = (ML_Aggregate_Viz_Stats *) pc_ml->ml_object->Grid[0].Grid;
514 
515     for (level=0; level<=fine_level; level++) {
516       ierr = VecDestroy(&pc_ml->gridctx[level].coords);CHKERRQ(ierr);
517     }
518 
519     grid_info->x = 0; /* do this so ML doesn't try to free coordinates */
520     grid_info->y = 0;
521     grid_info->z = 0;
522 
523     ML_Aggregate_VizAndStats_Clean(pc_ml->ml_object);
524   }
525   ML_Aggregate_Destroy(&pc_ml->agg_object);
526   ML_Destroy(&pc_ml->ml_object);
527 
528   if (pc_ml->PetscMLdata) {
529     ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
530     ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);
531     ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr);
532     ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr);
533   }
534   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
535 
536   if (pc_ml->gridctx) {
537     for (level=0; level<fine_level; level++) {
538       if (pc_ml->gridctx[level].A) {ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);}
539       if (pc_ml->gridctx[level].P) {ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);}
540       if (pc_ml->gridctx[level].R) {ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);}
541       if (pc_ml->gridctx[level].x) {ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);}
542       if (pc_ml->gridctx[level].b) {ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);}
543       if (pc_ml->gridctx[level+1].r) {ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
544     }
545   }
546   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
547   ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
548   pc_ml->dim = 0;
549   pc_ml->nloc = 0;
550   PetscFunctionReturn(0);
551 }
552 /* -------------------------------------------------------------------------- */
553 /*
554    PCSetUp_ML - Prepares for the use of the ML preconditioner
555                     by setting data structures and options.
556 
557    Input Parameter:
558 .  pc - the preconditioner context
559 
560    Application Interface Routine: PCSetUp()
561 
562    Notes:
563    The interface routine PCSetUp() is not usually called directly by
564    the user, but instead is called by PCApply() if necessary.
565 */
566 extern PetscErrorCode PCSetFromOptions_MG(PC);
567 extern PetscErrorCode PCReset_MG(PC);
568 
569 #undef __FUNCT__
570 #define __FUNCT__ "PCSetUp_ML"
571 PetscErrorCode PCSetUp_ML(PC pc)
572 {
573   PetscErrorCode  ierr;
574   PetscMPIInt     size;
575   FineGridCtx     *PetscMLdata;
576   ML              *ml_object;
577   ML_Aggregate    *agg_object;
578   ML_Operator     *mlmat;
579   PetscInt        nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
580   Mat             A,Aloc;
581   GridCtx         *gridctx;
582   PC_MG           *mg = (PC_MG*)pc->data;
583   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
584   PetscBool       isSeq, isMPI;
585   KSP             smoother;
586   PC              subpc;
587   PetscInt        mesh_level, old_mesh_level;
588 
589   PetscFunctionBegin;
590   A = pc->pmat;
591   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
592 
593   if (pc->setupcalled) {
594     if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
595       /*
596        Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
597        multiple solves in which the matrix is not changing too quickly.
598        */
599       ml_object = pc_ml->ml_object;
600       gridctx = pc_ml->gridctx;
601       Nlevels = pc_ml->Nlevels;
602       fine_level = Nlevels - 1;
603       gridctx[fine_level].A = A;
604 
605       ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
606       ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
607       if (isMPI) {
608         ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
609       } else if (isSeq) {
610         Aloc = A;
611         ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
612       } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
613 
614       ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
615       PetscMLdata = pc_ml->PetscMLdata;
616       ierr = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr);
617       PetscMLdata->A    = A;
618       PetscMLdata->Aloc = Aloc;
619       ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
620       ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
621 
622       mesh_level = ml_object->ML_finest_level;
623       while (ml_object->SingleLevel[mesh_level].Rmat->to) {
624         old_mesh_level = mesh_level;
625         mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;
626 
627         /* clean and regenerate A */
628         mlmat = &(ml_object->Amat[mesh_level]);
629         ML_Operator_Clean(mlmat);
630         ML_Operator_Init(mlmat,ml_object->comm);
631         ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level);
632       }
633 
634       level = fine_level - 1;
635       if (size == 1) { /* convert ML P, R and A into seqaij format */
636         for (mllevel=1; mllevel<Nlevels; mllevel++) {
637           mlmat = &(ml_object->Amat[mllevel]);
638           ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
639           level--;
640         }
641       } else { /* convert ML P and R into shell format, ML A into mpiaij format */
642         for (mllevel=1; mllevel<Nlevels; mllevel++) {
643           mlmat  = &(ml_object->Amat[mllevel]);
644           ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
645           level--;
646         }
647       }
648 
649       for (level=0; level<fine_level; level++) {
650         if (level > 0) {
651           ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
652         }
653         ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
654       }
655       ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
656       ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
657 
658       ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
659       PetscFunctionReturn(0);
660     } else {
661       /* since ML can change the size of vectors/matrices at any level we must destroy everything */
662       ierr = PCReset_ML(pc);CHKERRQ(ierr);
663       ierr = PCReset_MG(pc);CHKERRQ(ierr);
664     }
665   }
666 
667   /* setup special features of PCML */
668   /*--------------------------------*/
669   /* covert A to Aloc to be used by ML at fine grid */
670   pc_ml->size = size;
671   ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
672   ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
673   if (isMPI) {
674     ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
675   } else if (isSeq) {
676     Aloc = A;
677     ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
678   } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
679 
680   /* create and initialize struct 'PetscMLdata' */
681   ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr);
682   pc_ml->PetscMLdata = PetscMLdata;
683   ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr);
684 
685   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);
686   ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr);
687   ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr);
688 
689   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr);
690   ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
691   ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
692   PetscMLdata->A    = A;
693   PetscMLdata->Aloc = Aloc;
694   if (pc_ml->dim) { /* create vecs around the coordinate data given */
695     PetscInt i,j,dim=pc_ml->dim;
696     PetscInt nloc = pc_ml->nloc,nlocghost;
697     PetscReal *ghostedcoords;
698 
699     ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
700     nlocghost = Aloc->cmap->n / bs;
701     ierr = PetscMalloc(dim*nlocghost*sizeof(PetscReal),&ghostedcoords);CHKERRQ(ierr);
702     for (i = 0; i < dim; i++) {
703       /* copy coordinate values into first component of pwork */
704       for (j = 0; j < nloc; j++) {
705         PetscMLdata->pwork[bs * j] = pc_ml->coords[nloc * i + j];
706       }
707       /* get the ghost values */
708       ierr = PetscML_comm(PetscMLdata->pwork,PetscMLdata);CHKERRQ(ierr);
709       /* write into the vector */
710       for (j = 0; j < nlocghost; j++) {
711         ghostedcoords[i * nlocghost + j] = PetscMLdata->pwork[bs * j];
712       }
713     }
714     /* replace the original coords with the ghosted coords, because these are
715      * what ML needs */
716     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
717     pc_ml->coords = ghostedcoords;
718   }
719 
720   /* create ML discretization matrix at fine grid */
721   /* ML requires input of fine-grid matrix. It determines nlevels. */
722   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
723   ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
724   ML_Create(&ml_object,pc_ml->MaxNlevels);
725   ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm);
726   pc_ml->ml_object = ml_object;
727   ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
728   ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols);
729   ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
730 
731   ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO);
732 
733   /* aggregation */
734   ML_Aggregate_Create(&agg_object);
735   pc_ml->agg_object = agg_object;
736 
737   {
738     MatNullSpace mnull;
739     ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr);
740     if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) {
741       if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER;
742       else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK;
743       else pc_ml->nulltype = PCML_NULLSPACE_SCALAR;
744     }
745     switch (pc_ml->nulltype) {
746     case PCML_NULLSPACE_USER: {
747       PetscScalar *nullvec;
748       const PetscScalar *v;
749       PetscBool has_const;
750       PetscInt i,j,mlocal,nvec,M;
751       const Vec *vecs;
752       if (!mnull) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space");
753       ierr = MatGetSize(A,&M,PETSC_NULL);CHKERRQ(ierr);
754       ierr = MatGetLocalSize(Aloc,&mlocal,PETSC_NULL);CHKERRQ(ierr);
755       ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr);
756       ierr = PetscMalloc((nvec+!!has_const)*mlocal*sizeof(*nullvec),&nullvec);CHKERRQ(ierr);
757       if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M;
758       for (i=0; i<nvec; i++) {
759         ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr);
760         for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j];
761         ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr);
762       }
763       ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr);
764       ierr = PetscFree(nullvec);CHKERRQ(ierr);
765     } break;
766     case PCML_NULLSPACE_BLOCK:
767       ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr);
768       break;
769     case PCML_NULLSPACE_SCALAR:
770       break;
771     default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_SUP,"Unknown null space type");
772     }
773   }
774   ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
775   /* set options */
776   switch (pc_ml->CoarsenScheme) {
777   case 1:
778     ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
779   case 2:
780     ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
781   case 3:
782     ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
783   }
784   ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold);
785   ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor);
786   if (pc_ml->SpectralNormScheme_Anorm) {
787     ML_Set_SpectralNormScheme_Anorm(ml_object);
788   }
789   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
790   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
791   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
792   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
793   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
794   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;
795 
796   if (pc_ml->Aux) {
797     if (!pc_ml->dim) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"Auxiliary matrix requires coordinates");
798     ml_object->Amat[0].aux_data->threshold = pc_ml->AuxThreshold;
799     ml_object->Amat[0].aux_data->enable = 1;
800     ml_object->Amat[0].aux_data->max_level = 10;
801     ml_object->Amat[0].num_PDEs = bs;
802   }
803 
804   if (pc_ml->dim) {
805     PetscInt i,dim = pc_ml->dim;
806     ML_Aggregate_Viz_Stats *grid_info;
807     PetscInt nlocghost;
808 
809     ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
810     nlocghost = Aloc->cmap->n / bs;
811 
812     ML_Aggregate_VizAndStats_Setup(ml_object); /* create ml info for coords */
813     grid_info = (ML_Aggregate_Viz_Stats *) ml_object->Grid[0].Grid;
814     for (i = 0; i < dim; i++) {
815       /* set the finest level coordinates to point to the column-order array
816        * in pc_ml */
817       /* NOTE: must point away before VizAndStats_Clean so ML doesn't free */
818       switch (i) {
819       case 0: grid_info->x = pc_ml->coords + nlocghost * i; break;
820       case 1: grid_info->y = pc_ml->coords + nlocghost * i; break;
821       case 2: grid_info->z = pc_ml->coords + nlocghost * i; break;
822       default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
823       }
824     }
825     grid_info->Ndim = dim;
826   }
827 
828   /* repartitioning */
829   if (pc_ml->Repartition) {
830     ML_Repartition_Activate (ml_object);
831     ML_Repartition_Set_LargestMinMaxRatio(ml_object,pc_ml->MaxMinRatio);
832     ML_Repartition_Set_MinPerProc(ml_object,pc_ml->MinPerProc);
833     ML_Repartition_Set_PutOnSingleProc(ml_object,pc_ml->PutOnSingleProc);
834 #if 0                           /* Function not yet defined in ml-6.2 */
835     /* I'm not sure what compatibility issues might crop up if we partitioned
836      * on the finest level, so to be safe repartition starts on the next
837      * finest level (reflection default behavior in
838      * ml_MultiLevelPreconditioner) */
839     ML_Repartition_Set_StartLevel(ml_object,1);
840 #endif
841 
842     if (!pc_ml->RepartitionType) {
843       PetscInt i;
844 
845       if (!pc_ml->dim) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"ML Zoltan repartitioning requires coordinates");
846       ML_Repartition_Set_Partitioner(ml_object,ML_USEZOLTAN);
847       ML_Aggregate_Set_Dimensions(agg_object, pc_ml->dim);
848 
849       for (i = 0;i < ml_object->ML_num_levels;i++) {
850         ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats *)ml_object->Grid[i].Grid;
851         grid_info->zoltan_type = pc_ml->ZoltanScheme + 1; /* ml numbers options 1, 2, 3 */
852         /* defaults from ml_agg_info.c */
853         grid_info->zoltan_estimated_its = 40; /* only relevant to hypergraph / fast hypergraph */
854         grid_info->zoltan_timers = 0;
855         grid_info->smoothing_steps = 4;       /* only relevant to hypergraph / fast hypergraph */
856       }
857     }
858     else {
859       ML_Repartition_Set_Partitioner(ml_object,ML_USEPARMETIS);
860     }
861   }
862 
863   if (pc_ml->OldHierarchy) {
864     Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
865   } else {
866     Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
867   }
868   if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
869   pc_ml->Nlevels = Nlevels;
870   fine_level = Nlevels - 1;
871 
872   ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr);
873   /* set default smoothers */
874   for (level=1; level<=fine_level; level++) {
875     ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
876     ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
877     ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
878     ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
879   }
880   ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr);
881   ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
882   ierr = PetscOptionsEnd();CHKERRQ(ierr);
883 
884   ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr);
885   pc_ml->gridctx = gridctx;
886 
887   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
888      Level 0 is the finest grid for ML, but coarsest for PETSc! */
889   gridctx[fine_level].A = A;
890 
891   level = fine_level - 1;
892   if (size == 1) { /* convert ML P, R and A into seqaij format */
893     for (mllevel=1; mllevel<Nlevels; mllevel++) {
894       mlmat = &(ml_object->Pmat[mllevel]);
895       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
896       mlmat = &(ml_object->Rmat[mllevel-1]);
897       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
898 
899       mlmat = &(ml_object->Amat[mllevel]);
900       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
901       level--;
902     }
903   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
904     for (mllevel=1; mllevel<Nlevels; mllevel++) {
905       mlmat  = &(ml_object->Pmat[mllevel]);
906       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
907       mlmat  = &(ml_object->Rmat[mllevel-1]);
908       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
909 
910       mlmat  = &(ml_object->Amat[mllevel]);
911       ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
912       level--;
913     }
914   }
915 
916   /* create vectors and ksp at all levels */
917   for (level=0; level<fine_level; level++) {
918     level1 = level + 1;
919     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr);
920     ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr);
921     ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr);
922     ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
923 
924     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr);
925     ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
926     ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr);
927     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
928 
929     ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr);
930     ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
931     ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr);
932     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
933 
934     if (level == 0) {
935       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
936     } else {
937       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
938     }
939   }
940   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
941 
942   /* create coarse level and the interpolation between the levels */
943   for (level=0; level<fine_level; level++) {
944     level1 = level + 1;
945     ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
946     ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
947     if (level > 0) {
948       ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
949     }
950     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
951   }
952   ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
953   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
954 
955   /* put coordinate info in levels */
956   if (pc_ml->dim) {
957     PetscInt i,j,dim = pc_ml->dim;
958     PetscInt bs, nloc;
959     PC subpc;
960     PetscReal *array;
961 
962     level = fine_level;
963     for (mllevel = 0; mllevel < Nlevels; mllevel++) {
964       ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats *)ml_object->Amat[mllevel].to->Grid->Grid;
965       MPI_Comm comm = ((PetscObject)gridctx[level].A)->comm;
966 
967       ierr = MatGetBlockSize (gridctx[level].A, &bs);CHKERRQ(ierr);
968       ierr = MatGetLocalSize (gridctx[level].A, PETSC_NULL, &nloc);CHKERRQ(ierr);
969       nloc /= bs; /* number of local nodes */
970 
971       ierr = VecCreate(comm,&gridctx[level].coords);CHKERRQ(ierr);
972       ierr = VecSetSizes(gridctx[level].coords,dim * nloc,PETSC_DECIDE);CHKERRQ(ierr);
973       ierr = VecSetType(gridctx[level].coords,VECMPI);CHKERRQ(ierr);
974       ierr = VecGetArray(gridctx[level].coords,&array);CHKERRQ(ierr);
975       for (j = 0; j < nloc; j++) {
976         for (i = 0; i < dim; i++) {
977           switch (i) {
978           case 0: array[dim * j + i] = grid_info->x[j]; break;
979           case 1: array[dim * j + i] = grid_info->y[j]; break;
980           case 2: array[dim * j + i] = grid_info->z[j]; break;
981           default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
982           }
983         }
984       }
985 
986       /* passing coordinates to smoothers/coarse solver, should they need them */
987       ierr = KSPGetPC(gridctx[level].ksp,&subpc);CHKERRQ(ierr);
988       ierr = PCSetCoordinates(subpc,dim,nloc,array);CHKERRQ(ierr);
989       ierr = VecRestoreArray(gridctx[level].coords,&array);CHKERRQ(ierr);
990       level--;
991     }
992   }
993 
994   /* setupcalled is set to 0 so that MG is setup from scratch */
995   pc->setupcalled = 0;
996   ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
997   PetscFunctionReturn(0);
998 }
999 
1000 /* -------------------------------------------------------------------------- */
1001 /*
1002    PCDestroy_ML - Destroys the private context for the ML preconditioner
1003    that was created with PCCreate_ML().
1004 
1005    Input Parameter:
1006 .  pc - the preconditioner context
1007 
1008    Application Interface Routine: PCDestroy()
1009 */
1010 #undef __FUNCT__
1011 #define __FUNCT__ "PCDestroy_ML"
1012 PetscErrorCode PCDestroy_ML(PC pc)
1013 {
1014   PetscErrorCode  ierr;
1015   PC_MG           *mg = (PC_MG*)pc->data;
1016   PC_ML           *pc_ml= (PC_ML*)mg->innerctx;
1017 
1018   PetscFunctionBegin;
1019   ierr = PCReset_ML(pc);CHKERRQ(ierr);
1020   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
1021   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
1022   ierr = PetscObjectComposeFunctionDynamic((PetscObject)pc,"PCSetCoordinates_C","",PETSC_NULL);CHKERRQ(ierr);
1023   PetscFunctionReturn(0);
1024 }
1025 
1026 #undef __FUNCT__
1027 #define __FUNCT__ "PCSetFromOptions_ML"
1028 PetscErrorCode PCSetFromOptions_ML(PC pc)
1029 {
1030   PetscErrorCode  ierr;
1031   PetscInt        indx,PrintLevel,partindx;
1032   const char      *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
1033   const char      *part[] = {"Zoltan","ParMETIS"};
1034 #if defined(HAVE_ML_ZOLTAN)
1035   PetscInt zidx;
1036   const char      *zscheme[] = {"RCB","hypergraph","fast_hypergraph"};
1037 #endif
1038   PC_MG           *mg = (PC_MG*)pc->data;
1039   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
1040   PetscMPIInt     size;
1041   MPI_Comm        comm = ((PetscObject)pc)->comm;
1042 
1043   PetscFunctionBegin;
1044   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1045   ierr = PetscOptionsHead("ML options");CHKERRQ(ierr);
1046   PrintLevel    = 0;
1047   indx          = 0;
1048   partindx      = 0;
1049   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr);
1050   ML_Set_PrintLevel(PrintLevel);
1051   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr);
1052   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr);
1053   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr);
1054   pc_ml->CoarsenScheme = indx;
1055   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr);
1056   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr);
1057   ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);CHKERRQ(ierr);
1058   ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,PETSC_NULL);CHKERRQ(ierr);
1059   ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,PETSC_NULL);CHKERRQ(ierr);
1060   ierr = PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,PETSC_NULL);CHKERRQ(ierr);
1061   ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,PETSC_NULL);CHKERRQ(ierr);
1062   ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,PETSC_NULL);CHKERRQ(ierr);
1063   /*
1064     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
1065     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.
1066 
1067     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
1068     combination of options and ML's exit(1) explanations don't help matters.
1069   */
1070   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
1071   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
1072   if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);}
1073   if (pc_ml->EnergyMinimization) {
1074     ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,PETSC_NULL);CHKERRQ(ierr);
1075   }
1076   if (pc_ml->EnergyMinimization == 2) {
1077     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
1078     ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,PETSC_NULL);CHKERRQ(ierr);
1079   }
1080   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
1081   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
1082   ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,PETSC_NULL);CHKERRQ(ierr);
1083   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
1084   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
1085   ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,PETSC_NULL);CHKERRQ(ierr);
1086   /*
1087     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
1088     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
1089     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
1090     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
1091     this context, but ML doesn't provide a way to find out which ones.
1092    */
1093   ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,PETSC_NULL);CHKERRQ(ierr);
1094   ierr = PetscOptionsBool("-pc_ml_repartition", "Allow ML to repartition levels of the heirarchy","ML_Repartition_Activate",pc_ml->Repartition,&pc_ml->Repartition,PETSC_NULL);CHKERRQ(ierr);
1095   if (pc_ml->Repartition) {
1096     ierr = PetscOptionsReal("-pc_ml_repartitionMaxMinRatio", "Acceptable ratio of repartitioned sizes","ML_Repartition_Set_LargestMinMaxRatio",pc_ml->MaxMinRatio,&pc_ml->MaxMinRatio,PETSC_NULL);CHKERRQ(ierr);
1097     ierr = PetscOptionsInt("-pc_ml_repartitionMinPerProc", "Smallest repartitioned size","ML_Repartition_Set_MinPerProc",pc_ml->MinPerProc,&pc_ml->MinPerProc,PETSC_NULL);CHKERRQ(ierr);
1098     ierr = PetscOptionsInt("-pc_ml_repartitionPutOnSingleProc", "Problem size automatically repartitioned to one processor","ML_Repartition_Set_PutOnSingleProc",pc_ml->PutOnSingleProc,&pc_ml->PutOnSingleProc,PETSC_NULL);CHKERRQ(ierr);
1099 #if defined(HAVE_ML_ZOLTAN)
1100     partindx = 0;
1101     ierr = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[0],&partindx,PETSC_NULL);CHKERRQ(ierr);
1102     pc_ml->RepartitionType = partindx;
1103     if (!partindx) {
1104       PetscInt zindx = 0;
1105       ierr = PetscOptionsEList("-pc_ml_repartitionZoltanScheme", "Repartitioning scheme to use","None",zscheme,3,zscheme[0],&zindx,PETSC_NULL);CHKERRQ(ierr);
1106       pc_ml->ZoltanScheme = zindx;
1107     }
1108 #else
1109     partindx = 1;
1110     ierr = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[1],&partindx,PETSC_NULL);CHKERRQ(ierr);
1111     if (!partindx) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_SUP_SYS,"ML not compiled with Zoltan");
1112 #endif
1113     ierr = PetscOptionsBool("-pc_ml_Aux","Aggregate using auxiliary coordinate-based laplacian","None",pc_ml->Aux,&pc_ml->Aux,PETSC_NULL);CHKERRQ(ierr);
1114     ierr = PetscOptionsReal("-pc_ml_AuxThreshold","Auxiliary smoother drop tol","None",pc_ml->AuxThreshold,&pc_ml->AuxThreshold,PETSC_NULL);CHKERRQ(ierr);
1115   }
1116   ierr = PetscOptionsTail();CHKERRQ(ierr);
1117   PetscFunctionReturn(0);
1118 }
1119 
1120 /* -------------------------------------------------------------------------- */
1121 /*
1122    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
1123    and sets this as the private data within the generic preconditioning
1124    context, PC, that was created within PCCreate().
1125 
1126    Input Parameter:
1127 .  pc - the preconditioner context
1128 
1129    Application Interface Routine: PCCreate()
1130 */
1131 
1132 /*MC
1133      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
1134        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
1135        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
1136        and the restriction/interpolation operators wrapped as PETSc shell matrices.
1137 
1138    Options Database Key:
1139    Multigrid options(inherited)
1140 +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
1141 .  -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
1142 .  -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
1143    -pc_mg_type <multiplicative>: (one of) additive multiplicative full kascade
1144    ML options:
1145 .  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
1146 .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
1147 .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
1148 .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
1149 .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
1150 .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
1151 .  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
1152 .  -pc_ml_repartition <false>: Allow ML to repartition levels of the heirarchy (ML_Repartition_Activate)
1153 .  -pc_ml_repartitionMaxMinRatio <1.3>: Acceptable ratio of repartitioned sizes (ML_Repartition_Set_LargestMinMaxRatio)
1154 .  -pc_ml_repartitionMinPerProc <512>: Smallest repartitioned size (ML_Repartition_Set_MinPerProc)
1155 .  -pc_ml_repartitionPutOnSingleProc <5000>: Problem size automatically repartitioned to one processor (ML_Repartition_Set_PutOnSingleProc)
1156 .  -pc_ml_repartitionType <Zoltan>: Repartitioning library to use (ML_Repartition_Set_Partitioner)
1157 .  -pc_ml_repartitionZoltanScheme <RCB>: Repartitioning scheme to use (None)
1158 .  -pc_ml_Aux <false>: Aggregate using auxiliary coordinate-based laplacian (None)
1159 -  -pc_ml_AuxThreshold <0.0>: Auxiliary smoother drop tol (None)
1160 
1161    Level: intermediate
1162 
1163   Concepts: multigrid
1164 
1165 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
1166            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
1167            PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
1168            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
1169            PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
1170 M*/
1171 
1172 EXTERN_C_BEGIN
1173 #undef __FUNCT__
1174 #define __FUNCT__ "PCCreate_ML"
1175 PetscErrorCode  PCCreate_ML(PC pc)
1176 {
1177   PetscErrorCode  ierr;
1178   PC_ML           *pc_ml;
1179   PC_MG           *mg;
1180 
1181   PetscFunctionBegin;
1182   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
1183   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
1184   ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr);
1185   /* Since PCMG tries to use DM assocated with PC must delete it */
1186   ierr = DMDestroy(&pc->dm);CHKERRQ(ierr);
1187   mg = (PC_MG*)pc->data;
1188   mg->galerkin = 2;             /* Use Galerkin, but it is computed externally */
1189 
1190   /* create a supporting struct and attach it to pc */
1191   ierr = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr);
1192   mg->innerctx = pc_ml;
1193 
1194   pc_ml->ml_object     = 0;
1195   pc_ml->agg_object    = 0;
1196   pc_ml->gridctx       = 0;
1197   pc_ml->PetscMLdata   = 0;
1198   pc_ml->Nlevels       = -1;
1199   pc_ml->MaxNlevels    = 10;
1200   pc_ml->MaxCoarseSize = 1;
1201   pc_ml->CoarsenScheme = 1;
1202   pc_ml->Threshold     = 0.0;
1203   pc_ml->DampingFactor = 4.0/3.0;
1204   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
1205   pc_ml->size          = 0;
1206   pc_ml->dim           = 0;
1207   pc_ml->nloc          = 0;
1208   pc_ml->coords        = 0;
1209   pc_ml->Repartition   = PETSC_FALSE;
1210   pc_ml->MaxMinRatio   = 1.3;
1211   pc_ml->MinPerProc    = 512;
1212   pc_ml->PutOnSingleProc = 5000;
1213   pc_ml->RepartitionType = 0;
1214   pc_ml->ZoltanScheme  = 0;
1215   pc_ml->Aux           = PETSC_FALSE;
1216   pc_ml->AuxThreshold  = 0.0;
1217 
1218   /* allow for coordinates to be passed */
1219   ierr = PetscObjectComposeFunctionDynamic((PetscObject)pc,"PCSetCoordinates_C","PCSetCoordinates_ML",PCSetCoordinates_ML);CHKERRQ(ierr);
1220 
1221   /* overwrite the pointers of PCMG by the functions of PCML */
1222   pc->ops->setfromoptions = PCSetFromOptions_ML;
1223   pc->ops->setup          = PCSetUp_ML;
1224   pc->ops->reset          = PCReset_ML;
1225   pc->ops->destroy        = PCDestroy_ML;
1226   PetscFunctionReturn(0);
1227 }
1228 EXTERN_C_END
1229