xref: /petsc/src/ksp/pc/impls/ml/ml.c (revision 5c8f6a953e7ed1c81f507d64aebddb11080b60e9)
1 
2 /*
3    Provides an interface to the ML smoothed Aggregation
4    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
5                                     Jed Brown, see [PETSC #18321, #18449].
6 */
7 #include <petsc-private/pcimpl.h>   /*I "petscpc.h" I*/
8 #include <../src/ksp/pc/impls/mg/mgimpl.h>                    /*I "petscpcmg.h" I*/
9 #include <../src/mat/impls/aij/seq/aij.h>
10 #include <../src/mat/impls/aij/mpi/mpiaij.h>
11 
12 #include <math.h>
13 EXTERN_C_BEGIN
14 /* HAVE_CONFIG_H flag is required by ML include files */
15 #if !defined(HAVE_CONFIG_H)
16 #define HAVE_CONFIG_H
17 #endif
18 #include <ml_include.h>
19 #include <ml_viz_stats.h>
20 EXTERN_C_END
21 
22 typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType;
23 static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0};
24 
25 /* The context (data structure) at each grid level */
26 typedef struct {
27   Vec        x,b,r;           /* global vectors */
28   Mat        A,P,R;
29   KSP        ksp;
30   Vec        coords;          /* projected by ML, if PCSetCoordinates is called; values packed by node */
31 } GridCtx;
32 
33 /* The context used to input PETSc matrix into ML at fine grid */
34 typedef struct {
35   Mat          A;      /* Petsc matrix in aij format */
36   Mat          Aloc;   /* local portion of A to be used by ML */
37   Vec          x,y;
38   ML_Operator  *mlmat;
39   PetscScalar  *pwork; /* tmp array used by PetscML_comm() */
40 } FineGridCtx;
41 
42 /* The context associates a ML matrix with a PETSc shell matrix */
43 typedef struct {
44   Mat          A;       /* PETSc shell matrix associated with mlmat */
45   ML_Operator  *mlmat;  /* ML matrix assorciated with A */
46   Vec          y, work;
47 } Mat_MLShell;
48 
49 /* Private context for the ML preconditioner */
50 typedef struct {
51   ML                *ml_object;
52   ML_Aggregate      *agg_object;
53   GridCtx           *gridctx;
54   FineGridCtx       *PetscMLdata;
55   PetscInt          Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization,MinPerProc,PutOnSingleProc,RepartitionType,ZoltanScheme;
56   PetscReal         Threshold,DampingFactor,EnergyMinimizationDropTol,MaxMinRatio,AuxThreshold;
57   PetscBool         SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable,Repartition,Aux;
58   PetscBool         reuse_interpolation;
59   PCMLNullSpaceType nulltype;
60   PetscMPIInt       size; /* size of communicator for pc->pmat */
61   PetscInt          dim;  /* data from PCSetCoordinates(_ML) */
62   PetscInt          nloc;
63   PetscReal        *coords; /* ML has a grid object for each level: the finest grid will point into coords */
64 } PC_ML;
65 
66 #undef __FUNCT__
67 #define __FUNCT__ "PetscML_getrow"
68 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
69 {
70   PetscErrorCode ierr;
71   PetscInt       m,i,j,k=0,row,*aj;
72   PetscScalar    *aa;
73   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
74   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;
75 
76   ierr = MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0);
77   for (i = 0; i<N_requested_rows; i++) {
78     row   = requested_rows[i];
79     row_lengths[i] = a->ilen[row];
80     if (allocated_space < k+row_lengths[i]) return(0);
81     if ((row >= 0) || (row <= (m-1))) {
82       aj = a->j + a->i[row];
83       aa = a->a + a->i[row];
84       for (j=0; j<row_lengths[i]; j++) {
85         columns[k]  = aj[j];
86         values[k++] = aa[j];
87       }
88     }
89   }
90   return(1);
91 }
92 
93 #undef __FUNCT__
94 #define __FUNCT__ "PetscML_comm"
95 static PetscErrorCode PetscML_comm(double p[],void *ML_data)
96 {
97   PetscErrorCode ierr;
98   FineGridCtx    *ml=(FineGridCtx*)ML_data;
99   Mat            A=ml->A;
100   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
101   PetscMPIInt    size;
102   PetscInt       i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
103   PetscScalar    *array;
104 
105   PetscFunctionBegin;
106   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
107   if (size == 1) return 0;
108 
109   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
110   ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
111   ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
112   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
113   ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr);
114   for (i=in_length; i<out_length; i++) {
115     p[i] = array[i-in_length];
116   }
117   ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr);
118   PetscFunctionReturn(0);
119 }
120 
121 #undef __FUNCT__
122 #define __FUNCT__ "PetscML_matvec"
123 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
124 {
125   PetscErrorCode ierr;
126   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data);
127   Mat            A=ml->A, Aloc=ml->Aloc;
128   PetscMPIInt    size;
129   PetscScalar    *pwork=ml->pwork;
130   PetscInt       i;
131 
132   PetscFunctionBegin;
133   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
134   if (size == 1) {
135     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
136   } else {
137     for (i=0; i<in_length; i++) pwork[i] = p[i];
138     PetscML_comm(pwork,ml);
139     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
140   }
141   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
142   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
143   ierr = VecResetArray(ml->x);CHKERRQ(ierr);
144   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
145   PetscFunctionReturn(0);
146 }
147 
148 #undef __FUNCT__
149 #define __FUNCT__ "MatMult_ML"
150 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
151 {
152   PetscErrorCode   ierr;
153   Mat_MLShell      *shell;
154   PetscScalar      *xarray,*yarray;
155   PetscInt         x_length,y_length;
156 
157   PetscFunctionBegin;
158   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
159   ierr = VecGetArray(x,&xarray);CHKERRQ(ierr);
160   ierr = VecGetArray(y,&yarray);CHKERRQ(ierr);
161   x_length = shell->mlmat->invec_leng;
162   y_length = shell->mlmat->outvec_leng;
163   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
164   ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
165   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
166   PetscFunctionReturn(0);
167 }
168 
169 #undef __FUNCT__
170 #define __FUNCT__ "MatMultAdd_ML"
171 /* Computes y = w + A * x
172    It is possible that w == y, but not x == y
173 */
174 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
175 {
176   Mat_MLShell   *shell;
177   PetscScalar   *xarray,*yarray;
178   PetscInt       x_length,y_length;
179   PetscErrorCode ierr;
180 
181   PetscFunctionBegin;
182   ierr = MatShellGetContext(A, (void **) &shell);CHKERRQ(ierr);
183   if (y == w) {
184     if (!shell->work) {
185       ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr);
186     }
187     ierr = VecGetArray(x,           &xarray);CHKERRQ(ierr);
188     ierr = VecGetArray(shell->work, &yarray);CHKERRQ(ierr);
189     x_length = shell->mlmat->invec_leng;
190     y_length = shell->mlmat->outvec_leng;
191     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
192     ierr = VecRestoreArray(x,           &xarray);CHKERRQ(ierr);
193     ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr);
194     ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr);
195   } else {
196     ierr = VecGetArray(x, &xarray);CHKERRQ(ierr);
197     ierr = VecGetArray(y, &yarray);CHKERRQ(ierr);
198     x_length = shell->mlmat->invec_leng;
199     y_length = shell->mlmat->outvec_leng;
200     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
201     ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr);
202     ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr);
203     ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr);
204   }
205   PetscFunctionReturn(0);
206 }
207 
208 /* newtype is ignored since only handles one case */
209 #undef __FUNCT__
210 #define __FUNCT__ "MatConvert_MPIAIJ_ML"
211 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
212 {
213   PetscErrorCode  ierr;
214   Mat_MPIAIJ      *mpimat=(Mat_MPIAIJ*)A->data;
215   Mat_SeqAIJ      *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
216   PetscInt        *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
217   PetscScalar     *aa=a->a,*ba=b->a,*ca;
218   PetscInt        am=A->rmap->n,an=A->cmap->n,i,j,k;
219   PetscInt        *ci,*cj,ncols;
220 
221   PetscFunctionBegin;
222   if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
223 
224   if (scall == MAT_INITIAL_MATRIX) {
225     ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr);
226     ci[0] = 0;
227     for (i=0; i<am; i++) {
228       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
229     }
230     ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr);
231     ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr);
232 
233     k = 0;
234     for (i=0; i<am; i++) {
235       /* diagonal portion of A */
236       ncols = ai[i+1] - ai[i];
237       for (j=0; j<ncols; j++) {
238         cj[k]   = *aj++;
239         ca[k++] = *aa++;
240       }
241       /* off-diagonal portion of A */
242       ncols = bi[i+1] - bi[i];
243       for (j=0; j<ncols; j++) {
244         cj[k]   = an + (*bj); bj++;
245         ca[k++] = *ba++;
246       }
247     }
248     if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
249 
250     /* put together the new matrix */
251     an = mpimat->A->cmap->n+mpimat->B->cmap->n;
252     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
253 
254     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
255     /* Since these are PETSc arrays, change flags to free them as necessary. */
256     mat = (Mat_SeqAIJ*)(*Aloc)->data;
257     mat->free_a       = PETSC_TRUE;
258     mat->free_ij      = PETSC_TRUE;
259 
260     mat->nonew    = 0;
261   } else if (scall == MAT_REUSE_MATRIX) {
262     mat=(Mat_SeqAIJ*)(*Aloc)->data;
263     ci = mat->i; cj = mat->j; ca = mat->a;
264     for (i=0; i<am; i++) {
265       /* diagonal portion of A */
266       ncols = ai[i+1] - ai[i];
267       for (j=0; j<ncols; j++) *ca++ = *aa++;
268       /* off-diagonal portion of A */
269       ncols = bi[i+1] - bi[i];
270       for (j=0; j<ncols; j++) *ca++ = *ba++;
271     }
272   } else SETERRQ1(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
273   PetscFunctionReturn(0);
274 }
275 
276 extern PetscErrorCode MatDestroy_Shell(Mat);
277 #undef __FUNCT__
278 #define __FUNCT__ "MatDestroy_ML"
279 static PetscErrorCode MatDestroy_ML(Mat A)
280 {
281   PetscErrorCode ierr;
282   Mat_MLShell    *shell;
283 
284   PetscFunctionBegin;
285   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
286   ierr = VecDestroy(&shell->y);CHKERRQ(ierr);
287   if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);}
288   ierr = PetscFree(shell);CHKERRQ(ierr);
289   ierr = MatDestroy_Shell(A);CHKERRQ(ierr);
290   ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr);
291   PetscFunctionReturn(0);
292 }
293 
294 #undef __FUNCT__
295 #define __FUNCT__ "MatWrapML_SeqAIJ"
296 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
297 {
298   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
299   PetscErrorCode        ierr;
300   PetscInt              m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = PETSC_NULL,nz_max;
301   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i;
302   PetscScalar           *ml_vals=matdata->values,*aa;
303 
304   PetscFunctionBegin;
305   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
306   if (m != n) { /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
307     if (reuse) {
308       Mat_SeqAIJ  *aij= (Mat_SeqAIJ*)(*newmat)->data;
309       aij->i = ml_rowptr;
310       aij->j = ml_cols;
311       aij->a = ml_vals;
312     } else {
313       /* sort ml_cols and ml_vals */
314       ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
315       for (i=0; i<m; i++) {
316         nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
317       }
318       aj = ml_cols; aa = ml_vals;
319       for (i=0; i<m; i++) {
320         ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
321         aj += nnz[i]; aa += nnz[i];
322       }
323       ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
324       ierr = PetscFree(nnz);CHKERRQ(ierr);
325     }
326     PetscFunctionReturn(0);
327   }
328 
329   nz_max = PetscMax(1,mlmat->max_nz_per_row);
330   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
331   if (!reuse) {
332     ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr);
333     ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
334     ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
335     /* keep track of block size for A matrices */
336     ierr = MatSetBlockSize (*newmat, mlmat->num_PDEs);CHKERRQ(ierr);
337 
338     ierr = PetscMalloc(m*sizeof(PetscInt),&nnz);CHKERRQ(ierr);
339     for (i=0; i<m; i++) {
340       ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]);
341     }
342     ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
343   }
344   for (i=0; i<m; i++) {
345     PetscInt ncols;
346 
347     ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols);
348     ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
349   }
350   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
351   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
352 
353   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
354   ierr = PetscFree(nnz);CHKERRQ(ierr);
355   PetscFunctionReturn(0);
356 }
357 
358 #undef __FUNCT__
359 #define __FUNCT__ "MatWrapML_SHELL"
360 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
361 {
362   PetscErrorCode ierr;
363   PetscInt       m,n;
364   ML_Comm        *MLcomm;
365   Mat_MLShell    *shellctx;
366 
367   PetscFunctionBegin;
368   m = mlmat->outvec_leng;
369   n = mlmat->invec_leng;
370 
371   if (reuse) {
372     ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr);
373     shellctx->mlmat = mlmat;
374     PetscFunctionReturn(0);
375   }
376 
377   MLcomm = mlmat->comm;
378   ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr);
379   ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
380   ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr);
381   ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr);
382   shellctx->A         = *newmat;
383   shellctx->mlmat     = mlmat;
384   shellctx->work      = PETSC_NULL;
385   ierr = VecCreate(MLcomm->USR_comm,&shellctx->y);CHKERRQ(ierr);
386   ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr);
387   ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr);
388   (*newmat)->ops->destroy = MatDestroy_ML;
389   PetscFunctionReturn(0);
390 }
391 
392 #undef __FUNCT__
393 #define __FUNCT__ "MatWrapML_MPIAIJ"
394 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
395 {
396   PetscInt              *aj;
397   PetscScalar           *aa;
398   PetscErrorCode        ierr;
399   PetscInt              i,j,*gordering;
400   PetscInt              m=mlmat->outvec_leng,n,nz_max,row;
401   Mat                   A;
402 
403   PetscFunctionBegin;
404   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
405   n = mlmat->invec_leng;
406   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
407 
408   nz_max = PetscMax(1,mlmat->max_nz_per_row);
409   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
410   if (reuse) {
411     A = *newmat;
412   } else {
413     PetscInt *nnzA,*nnzB,*nnz;
414     ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr);
415     ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
416     ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
417     /* keep track of block size for A matrices */
418     ierr = MatSetBlockSize (A,mlmat->num_PDEs);CHKERRQ(ierr);
419     ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr);
420 
421     for (i=0; i<m; i++) {
422       ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]);
423       nnzA[i] = 0;
424       for (j=0; j<nnz[i]; j++) {
425         if (aj[j] < m) nnzA[i]++;
426       }
427       nnzB[i] = nnz[i] - nnzA[i];
428     }
429     ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
430     ierr = PetscFree3(nnzA,nnzB,nnz);
431   }
432   /* create global row numbering for a ML_Operator */
433   ML_build_global_numbering(mlmat,&gordering,"rows");
434   for (i=0; i<m; i++) {
435     PetscInt ncols;
436     row = gordering[i];
437 
438     ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols);
439     for (j = 0; j < ncols; j++) {
440       aj[j] = gordering[aj[j]];
441     }
442     ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
443   }
444   ML_free(gordering);
445   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
446   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
447   *newmat = A;
448 
449   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
450   PetscFunctionReturn(0);
451 }
452 
453 /* -------------------------------------------------------------------------- */
454 /*
455    PCSetCoordinates_ML
456 
457    Input Parameter:
458    .  pc - the preconditioner context
459 */
460 #undef __FUNCT__
461 #define __FUNCT__ "PCSetCoordinates_ML"
462 PETSC_EXTERN_C PetscErrorCode PCSetCoordinates_ML(PC pc, PetscInt ndm, PetscInt a_nloc, PetscReal *coords)
463 {
464   PC_MG          *mg = (PC_MG*)pc->data;
465   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
466   PetscErrorCode ierr;
467   PetscInt       arrsz,oldarrsz,bs,my0,kk,ii,nloc,Iend;
468   Mat            Amat = pc->pmat;
469 
470   /* this function copied and modified from PCSetCoordinates_GEO -TGI */
471   PetscFunctionBegin;
472   PetscValidHeaderSpecific(Amat, MAT_CLASSID, 1);
473   ierr  = MatGetBlockSize(Amat, &bs);CHKERRQ(ierr);
474 
475   ierr  = MatGetOwnershipRange(Amat, &my0, &Iend);CHKERRQ(ierr);
476   nloc = (Iend-my0)/bs;
477 
478   if (nloc!=a_nloc)SETERRQ2(((PetscObject)Amat)->comm,PETSC_ERR_ARG_WRONG, "Number of local blocks must locations = %d %d.",a_nloc,nloc);
479   if ((Iend-my0)%bs!=0) SETERRQ1(((PetscObject)Amat)->comm,PETSC_ERR_ARG_WRONG, "Bad local size %d.",nloc);
480 
481   oldarrsz = pc_ml->dim * pc_ml->nloc;
482   pc_ml->dim = ndm;
483   pc_ml->nloc = a_nloc;
484   arrsz = ndm * a_nloc;
485 
486   /* create data - syntactic sugar that should be refactored at some point */
487   if (pc_ml->coords==0 || (oldarrsz != arrsz)) {
488     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
489     ierr = PetscMalloc((arrsz)*sizeof(PetscReal), &pc_ml->coords);CHKERRQ(ierr);
490   }
491   for (kk=0;kk<arrsz;kk++)pc_ml->coords[kk] = -999.;
492   /* copy data in - column oriented */
493   for (kk = 0; kk < nloc; kk++) {
494     for (ii = 0; ii < ndm; ii++) {
495       pc_ml->coords[ii*nloc + kk] =  coords[kk*ndm + ii];
496     }
497   }
498 
499   PetscFunctionReturn(0);
500 }
501 
502 /* -----------------------------------------------------------------------------*/
503 #undef __FUNCT__
504 #define __FUNCT__ "PCReset_ML"
505 PetscErrorCode PCReset_ML(PC pc)
506 {
507   PetscErrorCode  ierr;
508   PC_MG           *mg = (PC_MG*)pc->data;
509   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
510   PetscInt        level,fine_level=pc_ml->Nlevels-1,dim=pc_ml->dim;
511 
512   PetscFunctionBegin;
513   if (dim) {
514     ML_Aggregate_Viz_Stats * grid_info = (ML_Aggregate_Viz_Stats *) pc_ml->ml_object->Grid[0].Grid;
515 
516     for (level=0; level<=fine_level; level++) {
517       ierr = VecDestroy(&pc_ml->gridctx[level].coords);CHKERRQ(ierr);
518     }
519 
520     grid_info->x = 0; /* do this so ML doesn't try to free coordinates */
521     grid_info->y = 0;
522     grid_info->z = 0;
523 
524     ML_Aggregate_VizAndStats_Clean(pc_ml->ml_object);
525   }
526   ML_Aggregate_Destroy(&pc_ml->agg_object);
527   ML_Destroy(&pc_ml->ml_object);
528 
529   if (pc_ml->PetscMLdata) {
530     ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
531     ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);
532     ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr);
533     ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr);
534   }
535   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
536 
537   if (pc_ml->gridctx) {
538     for (level=0; level<fine_level; level++) {
539       if (pc_ml->gridctx[level].A) {ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);}
540       if (pc_ml->gridctx[level].P) {ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);}
541       if (pc_ml->gridctx[level].R) {ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);}
542       if (pc_ml->gridctx[level].x) {ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);}
543       if (pc_ml->gridctx[level].b) {ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);}
544       if (pc_ml->gridctx[level+1].r) {ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
545     }
546   }
547   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
548   ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
549   pc_ml->dim = 0;
550   pc_ml->nloc = 0;
551   PetscFunctionReturn(0);
552 }
553 /* -------------------------------------------------------------------------- */
554 /*
555    PCSetUp_ML - Prepares for the use of the ML preconditioner
556                     by setting data structures and options.
557 
558    Input Parameter:
559 .  pc - the preconditioner context
560 
561    Application Interface Routine: PCSetUp()
562 
563    Notes:
564    The interface routine PCSetUp() is not usually called directly by
565    the user, but instead is called by PCApply() if necessary.
566 */
567 extern PetscErrorCode PCSetFromOptions_MG(PC);
568 extern PetscErrorCode PCReset_MG(PC);
569 
570 #undef __FUNCT__
571 #define __FUNCT__ "PCSetUp_ML"
572 PetscErrorCode PCSetUp_ML(PC pc)
573 {
574   PetscErrorCode  ierr;
575   PetscMPIInt     size;
576   FineGridCtx     *PetscMLdata;
577   ML              *ml_object;
578   ML_Aggregate    *agg_object;
579   ML_Operator     *mlmat;
580   PetscInt        nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
581   Mat             A,Aloc;
582   GridCtx         *gridctx;
583   PC_MG           *mg = (PC_MG*)pc->data;
584   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
585   PetscBool       isSeq, isMPI;
586   KSP             smoother;
587   PC              subpc;
588   PetscInt        mesh_level, old_mesh_level;
589 
590   PetscFunctionBegin;
591   A = pc->pmat;
592   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
593 
594   if (pc->setupcalled) {
595     if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
596       /*
597        Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
598        multiple solves in which the matrix is not changing too quickly.
599        */
600       ml_object = pc_ml->ml_object;
601       gridctx = pc_ml->gridctx;
602       Nlevels = pc_ml->Nlevels;
603       fine_level = Nlevels - 1;
604       gridctx[fine_level].A = A;
605 
606       ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
607       ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
608       if (isMPI) {
609         ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
610       } else if (isSeq) {
611         Aloc = A;
612         ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
613       } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
614 
615       ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
616       PetscMLdata = pc_ml->PetscMLdata;
617       ierr = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr);
618       PetscMLdata->A    = A;
619       PetscMLdata->Aloc = Aloc;
620       ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
621       ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
622 
623       mesh_level = ml_object->ML_finest_level;
624       while (ml_object->SingleLevel[mesh_level].Rmat->to) {
625         old_mesh_level = mesh_level;
626         mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;
627 
628         /* clean and regenerate A */
629         mlmat = &(ml_object->Amat[mesh_level]);
630         ML_Operator_Clean(mlmat);
631         ML_Operator_Init(mlmat,ml_object->comm);
632         ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level);
633       }
634 
635       level = fine_level - 1;
636       if (size == 1) { /* convert ML P, R and A into seqaij format */
637         for (mllevel=1; mllevel<Nlevels; mllevel++) {
638           mlmat = &(ml_object->Amat[mllevel]);
639           ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
640           level--;
641         }
642       } else { /* convert ML P and R into shell format, ML A into mpiaij format */
643         for (mllevel=1; mllevel<Nlevels; mllevel++) {
644           mlmat  = &(ml_object->Amat[mllevel]);
645           ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
646           level--;
647         }
648       }
649 
650       for (level=0; level<fine_level; level++) {
651         if (level > 0) {
652           ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
653         }
654         ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
655       }
656       ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
657       ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
658 
659       ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
660       PetscFunctionReturn(0);
661     } else {
662       /* since ML can change the size of vectors/matrices at any level we must destroy everything */
663       ierr = PCReset_ML(pc);CHKERRQ(ierr);
664       ierr = PCReset_MG(pc);CHKERRQ(ierr);
665     }
666   }
667 
668   /* setup special features of PCML */
669   /*--------------------------------*/
670   /* covert A to Aloc to be used by ML at fine grid */
671   pc_ml->size = size;
672   ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
673   ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
674   if (isMPI) {
675     ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
676   } else if (isSeq) {
677     Aloc = A;
678     ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
679   } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
680 
681   /* create and initialize struct 'PetscMLdata' */
682   ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr);
683   pc_ml->PetscMLdata = PetscMLdata;
684   ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr);
685 
686   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);
687   ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr);
688   ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr);
689 
690   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr);
691   ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
692   ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
693   PetscMLdata->A    = A;
694   PetscMLdata->Aloc = Aloc;
695   if (pc_ml->dim) { /* create vecs around the coordinate data given */
696     PetscInt i,j,dim=pc_ml->dim;
697     PetscInt nloc = pc_ml->nloc,nlocghost;
698     PetscReal *ghostedcoords;
699 
700     ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
701     nlocghost = Aloc->cmap->n / bs;
702     ierr = PetscMalloc(dim*nlocghost*sizeof(PetscReal),&ghostedcoords);CHKERRQ(ierr);
703     for (i = 0; i < dim; i++) {
704       /* copy coordinate values into first component of pwork */
705       for (j = 0; j < nloc; j++) {
706         PetscMLdata->pwork[bs * j] = pc_ml->coords[nloc * i + j];
707       }
708       /* get the ghost values */
709       ierr = PetscML_comm(PetscMLdata->pwork,PetscMLdata);CHKERRQ(ierr);
710       /* write into the vector */
711       for (j = 0; j < nlocghost; j++) {
712         ghostedcoords[i * nlocghost + j] = PetscMLdata->pwork[bs * j];
713       }
714     }
715     /* replace the original coords with the ghosted coords, because these are
716      * what ML needs */
717     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
718     pc_ml->coords = ghostedcoords;
719   }
720 
721   /* create ML discretization matrix at fine grid */
722   /* ML requires input of fine-grid matrix. It determines nlevels. */
723   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
724   ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
725   ML_Create(&ml_object,pc_ml->MaxNlevels);
726   ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm);
727   pc_ml->ml_object = ml_object;
728   ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
729   ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols);
730   ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
731 
732   ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO);
733 
734   /* aggregation */
735   ML_Aggregate_Create(&agg_object);
736   pc_ml->agg_object = agg_object;
737 
738   {
739     MatNullSpace mnull;
740     ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr);
741     if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) {
742       if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER;
743       else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK;
744       else pc_ml->nulltype = PCML_NULLSPACE_SCALAR;
745     }
746     switch (pc_ml->nulltype) {
747     case PCML_NULLSPACE_USER: {
748       PetscScalar *nullvec;
749       const PetscScalar *v;
750       PetscBool has_const;
751       PetscInt i,j,mlocal,nvec,M;
752       const Vec *vecs;
753       if (!mnull) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space");
754       ierr = MatGetSize(A,&M,PETSC_NULL);CHKERRQ(ierr);
755       ierr = MatGetLocalSize(Aloc,&mlocal,PETSC_NULL);CHKERRQ(ierr);
756       ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr);
757       ierr = PetscMalloc((nvec+!!has_const)*mlocal*sizeof(*nullvec),&nullvec);CHKERRQ(ierr);
758       if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M;
759       for (i=0; i<nvec; i++) {
760         ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr);
761         for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j];
762         ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr);
763       }
764       ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr);
765       ierr = PetscFree(nullvec);CHKERRQ(ierr);
766     } break;
767     case PCML_NULLSPACE_BLOCK:
768       ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr);
769       break;
770     case PCML_NULLSPACE_SCALAR:
771       break;
772     default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_SUP,"Unknown null space type");
773     }
774   }
775   ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
776   /* set options */
777   switch (pc_ml->CoarsenScheme) {
778   case 1:
779     ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
780   case 2:
781     ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
782   case 3:
783     ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
784   }
785   ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold);
786   ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor);
787   if (pc_ml->SpectralNormScheme_Anorm) {
788     ML_Set_SpectralNormScheme_Anorm(ml_object);
789   }
790   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
791   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
792   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
793   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
794   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
795   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;
796 
797   if (pc_ml->Aux) {
798     if (!pc_ml->dim) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"Auxiliary matrix requires coordinates");
799     ml_object->Amat[0].aux_data->threshold = pc_ml->AuxThreshold;
800     ml_object->Amat[0].aux_data->enable = 1;
801     ml_object->Amat[0].aux_data->max_level = 10;
802     ml_object->Amat[0].num_PDEs = bs;
803   }
804 
805   if (pc_ml->dim) {
806     PetscInt i,dim = pc_ml->dim;
807     ML_Aggregate_Viz_Stats *grid_info;
808     PetscInt nlocghost;
809 
810     ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
811     nlocghost = Aloc->cmap->n / bs;
812 
813     ML_Aggregate_VizAndStats_Setup(ml_object); /* create ml info for coords */
814     grid_info = (ML_Aggregate_Viz_Stats *) ml_object->Grid[0].Grid;
815     for (i = 0; i < dim; i++) {
816       /* set the finest level coordinates to point to the column-order array
817        * in pc_ml */
818       /* NOTE: must point away before VizAndStats_Clean so ML doesn't free */
819       switch (i) {
820       case 0: grid_info->x = pc_ml->coords + nlocghost * i; break;
821       case 1: grid_info->y = pc_ml->coords + nlocghost * i; break;
822       case 2: grid_info->z = pc_ml->coords + nlocghost * i; break;
823       default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
824       }
825     }
826     grid_info->Ndim = dim;
827   }
828 
829   /* repartitioning */
830   if (pc_ml->Repartition) {
831     ML_Repartition_Activate (ml_object);
832     ML_Repartition_Set_LargestMinMaxRatio(ml_object,pc_ml->MaxMinRatio);
833     ML_Repartition_Set_MinPerProc(ml_object,pc_ml->MinPerProc);
834     ML_Repartition_Set_PutOnSingleProc(ml_object,pc_ml->PutOnSingleProc);
835 #if 0                           /* Function not yet defined in ml-6.2 */
836     /* I'm not sure what compatibility issues might crop up if we partitioned
837      * on the finest level, so to be safe repartition starts on the next
838      * finest level (reflection default behavior in
839      * ml_MultiLevelPreconditioner) */
840     ML_Repartition_Set_StartLevel(ml_object,1);
841 #endif
842 
843     if (!pc_ml->RepartitionType) {
844       PetscInt i;
845 
846       if (!pc_ml->dim) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"ML Zoltan repartitioning requires coordinates");
847       ML_Repartition_Set_Partitioner(ml_object,ML_USEZOLTAN);
848       ML_Aggregate_Set_Dimensions(agg_object, pc_ml->dim);
849 
850       for (i = 0;i < ml_object->ML_num_levels;i++) {
851         ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats *)ml_object->Grid[i].Grid;
852         grid_info->zoltan_type = pc_ml->ZoltanScheme + 1; /* ml numbers options 1, 2, 3 */
853         /* defaults from ml_agg_info.c */
854         grid_info->zoltan_estimated_its = 40; /* only relevant to hypergraph / fast hypergraph */
855         grid_info->zoltan_timers = 0;
856         grid_info->smoothing_steps = 4;       /* only relevant to hypergraph / fast hypergraph */
857       }
858     }
859     else {
860       ML_Repartition_Set_Partitioner(ml_object,ML_USEPARMETIS);
861     }
862   }
863 
864   if (pc_ml->OldHierarchy) {
865     Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
866   } else {
867     Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
868   }
869   if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
870   pc_ml->Nlevels = Nlevels;
871   fine_level = Nlevels - 1;
872 
873   ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr);
874   /* set default smoothers */
875   for (level=1; level<=fine_level; level++) {
876     ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
877     ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
878     ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
879     ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
880   }
881   ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr);
882   ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
883   ierr = PetscOptionsEnd();CHKERRQ(ierr);
884 
885   ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr);
886   pc_ml->gridctx = gridctx;
887 
888   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
889      Level 0 is the finest grid for ML, but coarsest for PETSc! */
890   gridctx[fine_level].A = A;
891 
892   level = fine_level - 1;
893   if (size == 1) { /* convert ML P, R and A into seqaij format */
894     for (mllevel=1; mllevel<Nlevels; mllevel++) {
895       mlmat = &(ml_object->Pmat[mllevel]);
896       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
897       mlmat = &(ml_object->Rmat[mllevel-1]);
898       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
899 
900       mlmat = &(ml_object->Amat[mllevel]);
901       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
902       level--;
903     }
904   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
905     for (mllevel=1; mllevel<Nlevels; mllevel++) {
906       mlmat  = &(ml_object->Pmat[mllevel]);
907       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
908       mlmat  = &(ml_object->Rmat[mllevel-1]);
909       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
910 
911       mlmat  = &(ml_object->Amat[mllevel]);
912       ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
913       level--;
914     }
915   }
916 
917   /* create vectors and ksp at all levels */
918   for (level=0; level<fine_level; level++) {
919     level1 = level + 1;
920     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr);
921     ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr);
922     ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr);
923     ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
924 
925     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr);
926     ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
927     ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr);
928     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
929 
930     ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr);
931     ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
932     ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr);
933     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
934 
935     if (level == 0) {
936       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
937     } else {
938       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
939     }
940   }
941   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
942 
943   /* create coarse level and the interpolation between the levels */
944   for (level=0; level<fine_level; level++) {
945     level1 = level + 1;
946     ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
947     ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
948     if (level > 0) {
949       ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
950     }
951     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
952   }
953   ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
954   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
955 
956   /* put coordinate info in levels */
957   if (pc_ml->dim) {
958     PetscInt i,j,dim = pc_ml->dim;
959     PetscInt bs, nloc;
960     PC subpc;
961     PetscReal *array;
962 
963     level = fine_level;
964     for (mllevel = 0; mllevel < Nlevels; mllevel++) {
965       ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats *)ml_object->Amat[mllevel].to->Grid->Grid;
966       MPI_Comm comm = ((PetscObject)gridctx[level].A)->comm;
967 
968       ierr = MatGetBlockSize (gridctx[level].A, &bs);CHKERRQ(ierr);
969       ierr = MatGetLocalSize (gridctx[level].A, PETSC_NULL, &nloc);CHKERRQ(ierr);
970       nloc /= bs; /* number of local nodes */
971 
972       ierr = VecCreate(comm,&gridctx[level].coords);CHKERRQ(ierr);
973       ierr = VecSetSizes(gridctx[level].coords,dim * nloc,PETSC_DECIDE);CHKERRQ(ierr);
974       ierr = VecSetType(gridctx[level].coords,VECMPI);CHKERRQ(ierr);
975       ierr = VecGetArray(gridctx[level].coords,&array);CHKERRQ(ierr);
976       for (j = 0; j < nloc; j++) {
977         for (i = 0; i < dim; i++) {
978           switch (i) {
979           case 0: array[dim * j + i] = grid_info->x[j]; break;
980           case 1: array[dim * j + i] = grid_info->y[j]; break;
981           case 2: array[dim * j + i] = grid_info->z[j]; break;
982           default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
983           }
984         }
985       }
986 
987       /* passing coordinates to smoothers/coarse solver, should they need them */
988       ierr = KSPGetPC(gridctx[level].ksp,&subpc);CHKERRQ(ierr);
989       ierr = PCSetCoordinates(subpc,dim,nloc,array);CHKERRQ(ierr);
990       ierr = VecRestoreArray(gridctx[level].coords,&array);CHKERRQ(ierr);
991       level--;
992     }
993   }
994 
995   /* setupcalled is set to 0 so that MG is setup from scratch */
996   pc->setupcalled = 0;
997   ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
998   PetscFunctionReturn(0);
999 }
1000 
1001 /* -------------------------------------------------------------------------- */
1002 /*
1003    PCDestroy_ML - Destroys the private context for the ML preconditioner
1004    that was created with PCCreate_ML().
1005 
1006    Input Parameter:
1007 .  pc - the preconditioner context
1008 
1009    Application Interface Routine: PCDestroy()
1010 */
1011 #undef __FUNCT__
1012 #define __FUNCT__ "PCDestroy_ML"
1013 PetscErrorCode PCDestroy_ML(PC pc)
1014 {
1015   PetscErrorCode  ierr;
1016   PC_MG           *mg = (PC_MG*)pc->data;
1017   PC_ML           *pc_ml= (PC_ML*)mg->innerctx;
1018 
1019   PetscFunctionBegin;
1020   ierr = PCReset_ML(pc);CHKERRQ(ierr);
1021   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
1022   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
1023   ierr = PetscObjectComposeFunctionDynamic((PetscObject)pc,"PCSetCoordinates_C","",PETSC_NULL);CHKERRQ(ierr);
1024   PetscFunctionReturn(0);
1025 }
1026 
1027 #undef __FUNCT__
1028 #define __FUNCT__ "PCSetFromOptions_ML"
1029 PetscErrorCode PCSetFromOptions_ML(PC pc)
1030 {
1031   PetscErrorCode  ierr;
1032   PetscInt        indx,PrintLevel,partindx;
1033   const char      *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
1034   const char      *part[] = {"Zoltan","ParMETIS"};
1035 #if defined(HAVE_ML_ZOLTAN)
1036   PetscInt zidx;
1037   const char      *zscheme[] = {"RCB","hypergraph","fast_hypergraph"};
1038 #endif
1039   PC_MG           *mg = (PC_MG*)pc->data;
1040   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
1041   PetscMPIInt     size;
1042   MPI_Comm        comm = ((PetscObject)pc)->comm;
1043 
1044   PetscFunctionBegin;
1045   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1046   ierr = PetscOptionsHead("ML options");CHKERRQ(ierr);
1047   PrintLevel    = 0;
1048   indx          = 0;
1049   partindx      = 0;
1050   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr);
1051   ML_Set_PrintLevel(PrintLevel);
1052   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr);
1053   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr);
1054   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr);
1055   pc_ml->CoarsenScheme = indx;
1056   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr);
1057   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr);
1058   ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);CHKERRQ(ierr);
1059   ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,PETSC_NULL);CHKERRQ(ierr);
1060   ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,PETSC_NULL);CHKERRQ(ierr);
1061   ierr = PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,PETSC_NULL);CHKERRQ(ierr);
1062   ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,PETSC_NULL);CHKERRQ(ierr);
1063   ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,PETSC_NULL);CHKERRQ(ierr);
1064   /*
1065     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
1066     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.
1067 
1068     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
1069     combination of options and ML's exit(1) explanations don't help matters.
1070   */
1071   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
1072   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
1073   if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);}
1074   if (pc_ml->EnergyMinimization) {
1075     ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,PETSC_NULL);CHKERRQ(ierr);
1076   }
1077   if (pc_ml->EnergyMinimization == 2) {
1078     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
1079     ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,PETSC_NULL);CHKERRQ(ierr);
1080   }
1081   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
1082   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
1083   ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,PETSC_NULL);CHKERRQ(ierr);
1084   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
1085   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
1086   ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,PETSC_NULL);CHKERRQ(ierr);
1087   /*
1088     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
1089     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
1090     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
1091     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
1092     this context, but ML doesn't provide a way to find out which ones.
1093    */
1094   ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,PETSC_NULL);CHKERRQ(ierr);
1095   ierr = PetscOptionsBool("-pc_ml_repartition", "Allow ML to repartition levels of the heirarchy","ML_Repartition_Activate",pc_ml->Repartition,&pc_ml->Repartition,PETSC_NULL);CHKERRQ(ierr);
1096   if (pc_ml->Repartition) {
1097     ierr = PetscOptionsReal("-pc_ml_repartitionMaxMinRatio", "Acceptable ratio of repartitioned sizes","ML_Repartition_Set_LargestMinMaxRatio",pc_ml->MaxMinRatio,&pc_ml->MaxMinRatio,PETSC_NULL);CHKERRQ(ierr);
1098     ierr = PetscOptionsInt("-pc_ml_repartitionMinPerProc", "Smallest repartitioned size","ML_Repartition_Set_MinPerProc",pc_ml->MinPerProc,&pc_ml->MinPerProc,PETSC_NULL);CHKERRQ(ierr);
1099     ierr = PetscOptionsInt("-pc_ml_repartitionPutOnSingleProc", "Problem size automatically repartitioned to one processor","ML_Repartition_Set_PutOnSingleProc",pc_ml->PutOnSingleProc,&pc_ml->PutOnSingleProc,PETSC_NULL);CHKERRQ(ierr);
1100 #if defined(HAVE_ML_ZOLTAN)
1101     partindx = 0;
1102     ierr = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[0],&partindx,PETSC_NULL);CHKERRQ(ierr);
1103     pc_ml->RepartitionType = partindx;
1104     if (!partindx) {
1105       PetscInt zindx = 0;
1106       ierr = PetscOptionsEList("-pc_ml_repartitionZoltanScheme", "Repartitioning scheme to use","None",zscheme,3,zscheme[0],&zindx,PETSC_NULL);CHKERRQ(ierr);
1107       pc_ml->ZoltanScheme = zindx;
1108     }
1109 #else
1110     partindx = 1;
1111     ierr = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[1],&partindx,PETSC_NULL);CHKERRQ(ierr);
1112     if (!partindx) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_SUP_SYS,"ML not compiled with Zoltan");
1113 #endif
1114     ierr = PetscOptionsBool("-pc_ml_Aux","Aggregate using auxiliary coordinate-based laplacian","None",pc_ml->Aux,&pc_ml->Aux,PETSC_NULL);CHKERRQ(ierr);
1115     ierr = PetscOptionsReal("-pc_ml_AuxThreshold","Auxiliary smoother drop tol","None",pc_ml->AuxThreshold,&pc_ml->AuxThreshold,PETSC_NULL);CHKERRQ(ierr);
1116   }
1117   ierr = PetscOptionsTail();CHKERRQ(ierr);
1118   PetscFunctionReturn(0);
1119 }
1120 
1121 /* -------------------------------------------------------------------------- */
1122 /*
1123    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
1124    and sets this as the private data within the generic preconditioning
1125    context, PC, that was created within PCCreate().
1126 
1127    Input Parameter:
1128 .  pc - the preconditioner context
1129 
1130    Application Interface Routine: PCCreate()
1131 */
1132 
1133 /*MC
1134      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
1135        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
1136        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
1137        and the restriction/interpolation operators wrapped as PETSc shell matrices.
1138 
1139    Options Database Key:
1140    Multigrid options(inherited)
1141 +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
1142 .  -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
1143 .  -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
1144    -pc_mg_type <multiplicative>: (one of) additive multiplicative full kascade
1145    ML options:
1146 .  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
1147 .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
1148 .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
1149 .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
1150 .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
1151 .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
1152 .  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
1153 .  -pc_ml_repartition <false>: Allow ML to repartition levels of the heirarchy (ML_Repartition_Activate)
1154 .  -pc_ml_repartitionMaxMinRatio <1.3>: Acceptable ratio of repartitioned sizes (ML_Repartition_Set_LargestMinMaxRatio)
1155 .  -pc_ml_repartitionMinPerProc <512>: Smallest repartitioned size (ML_Repartition_Set_MinPerProc)
1156 .  -pc_ml_repartitionPutOnSingleProc <5000>: Problem size automatically repartitioned to one processor (ML_Repartition_Set_PutOnSingleProc)
1157 .  -pc_ml_repartitionType <Zoltan>: Repartitioning library to use (ML_Repartition_Set_Partitioner)
1158 .  -pc_ml_repartitionZoltanScheme <RCB>: Repartitioning scheme to use (None)
1159 .  -pc_ml_Aux <false>: Aggregate using auxiliary coordinate-based laplacian (None)
1160 -  -pc_ml_AuxThreshold <0.0>: Auxiliary smoother drop tol (None)
1161 
1162    Level: intermediate
1163 
1164   Concepts: multigrid
1165 
1166 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
1167            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
1168            PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
1169            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
1170            PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
1171 M*/
1172 
1173 EXTERN_C_BEGIN
1174 #undef __FUNCT__
1175 #define __FUNCT__ "PCCreate_ML"
1176 PetscErrorCode  PCCreate_ML(PC pc)
1177 {
1178   PetscErrorCode  ierr;
1179   PC_ML           *pc_ml;
1180   PC_MG           *mg;
1181 
1182   PetscFunctionBegin;
1183   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
1184   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
1185   ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr);
1186   /* Since PCMG tries to use DM assocated with PC must delete it */
1187   ierr = DMDestroy(&pc->dm);CHKERRQ(ierr);
1188   mg = (PC_MG*)pc->data;
1189   mg->galerkin = 2;             /* Use Galerkin, but it is computed externally */
1190 
1191   /* create a supporting struct and attach it to pc */
1192   ierr = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr);
1193   mg->innerctx = pc_ml;
1194 
1195   pc_ml->ml_object     = 0;
1196   pc_ml->agg_object    = 0;
1197   pc_ml->gridctx       = 0;
1198   pc_ml->PetscMLdata   = 0;
1199   pc_ml->Nlevels       = -1;
1200   pc_ml->MaxNlevels    = 10;
1201   pc_ml->MaxCoarseSize = 1;
1202   pc_ml->CoarsenScheme = 1;
1203   pc_ml->Threshold     = 0.0;
1204   pc_ml->DampingFactor = 4.0/3.0;
1205   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
1206   pc_ml->size          = 0;
1207   pc_ml->dim           = 0;
1208   pc_ml->nloc          = 0;
1209   pc_ml->coords        = 0;
1210   pc_ml->Repartition   = PETSC_FALSE;
1211   pc_ml->MaxMinRatio   = 1.3;
1212   pc_ml->MinPerProc    = 512;
1213   pc_ml->PutOnSingleProc = 5000;
1214   pc_ml->RepartitionType = 0;
1215   pc_ml->ZoltanScheme  = 0;
1216   pc_ml->Aux           = PETSC_FALSE;
1217   pc_ml->AuxThreshold  = 0.0;
1218 
1219   /* allow for coordinates to be passed */
1220   ierr = PetscObjectComposeFunctionDynamic((PetscObject)pc,"PCSetCoordinates_C","PCSetCoordinates_ML",PCSetCoordinates_ML);CHKERRQ(ierr);
1221 
1222   /* overwrite the pointers of PCMG by the functions of PCML */
1223   pc->ops->setfromoptions = PCSetFromOptions_ML;
1224   pc->ops->setup          = PCSetUp_ML;
1225   pc->ops->reset          = PCReset_ML;
1226   pc->ops->destroy        = PCDestroy_ML;
1227   PetscFunctionReturn(0);
1228 }
1229 EXTERN_C_END
1230