xref: /petsc/src/ksp/pc/impls/ml/ml.c (revision 047240e14af00aad1ef65e96f6fface8924f7f7e)
1 
2 /*
3    Provides an interface to the ML smoothed Aggregation
4    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
5                                     Jed Brown, see [PETSC #18321, #18449].
6 */
7 #include <petsc-private/pcimpl.h>   /*I "petscpc.h" I*/
8 #include <../src/ksp/pc/impls/mg/mgimpl.h>                    /*I "petscpcmg.h" I*/
9 #include <../src/mat/impls/aij/seq/aij.h>
10 #include <../src/mat/impls/aij/mpi/mpiaij.h>
11 
12 #include <math.h>
13 EXTERN_C_BEGIN
14 /* HAVE_CONFIG_H flag is required by ML include files */
15 #if !defined(HAVE_CONFIG_H)
16 #define HAVE_CONFIG_H
17 #endif
18 #include <ml_include.h>
19 #include <ml_viz_stats.h>
20 EXTERN_C_END
21 
22 typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType;
23 static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0};
24 
25 /* The context (data structure) at each grid level */
26 typedef struct {
27   Vec x,b,r;                  /* global vectors */
28   Mat A,P,R;
29   KSP ksp;
30   Vec coords;                 /* projected by ML, if PCSetCoordinates is called; values packed by node */
31 } GridCtx;
32 
33 /* The context used to input PETSc matrix into ML at fine grid */
34 typedef struct {
35   Mat         A;       /* Petsc matrix in aij format */
36   Mat         Aloc;    /* local portion of A to be used by ML */
37   Vec         x,y;
38   ML_Operator *mlmat;
39   PetscScalar *pwork;  /* tmp array used by PetscML_comm() */
40 } FineGridCtx;
41 
42 /* The context associates a ML matrix with a PETSc shell matrix */
43 typedef struct {
44   Mat         A;        /* PETSc shell matrix associated with mlmat */
45   ML_Operator *mlmat;   /* ML matrix assorciated with A */
46   Vec         y, work;
47 } Mat_MLShell;
48 
49 /* Private context for the ML preconditioner */
50 typedef struct {
51   ML                *ml_object;
52   ML_Aggregate      *agg_object;
53   GridCtx           *gridctx;
54   FineGridCtx       *PetscMLdata;
55   PetscInt          Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization,MinPerProc,PutOnSingleProc,RepartitionType,ZoltanScheme;
56   PetscReal         Threshold,DampingFactor,EnergyMinimizationDropTol,MaxMinRatio,AuxThreshold;
57   PetscBool         SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable,Repartition,Aux;
58   PetscBool         reuse_interpolation;
59   PCMLNullSpaceType nulltype;
60   PetscMPIInt       size; /* size of communicator for pc->pmat */
61   PetscInt          dim;  /* data from PCSetCoordinates(_ML) */
62   PetscInt          nloc;
63   PetscReal         *coords; /* ML has a grid object for each level: the finest grid will point into coords */
64 } PC_ML;
65 
66 #undef __FUNCT__
67 #define __FUNCT__ "PetscML_getrow"
68 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
69 {
70   PetscErrorCode ierr;
71   PetscInt       m,i,j,k=0,row,*aj;
72   PetscScalar    *aa;
73   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
74   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;
75 
76   ierr = MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0);
77   for (i = 0; i<N_requested_rows; i++) {
78     row            = requested_rows[i];
79     row_lengths[i] = a->ilen[row];
80     if (allocated_space < k+row_lengths[i]) return(0);
81     if ((row >= 0) || (row <= (m-1))) {
82       aj = a->j + a->i[row];
83       aa = a->a + a->i[row];
84       for (j=0; j<row_lengths[i]; j++) {
85         columns[k]  = aj[j];
86         values[k++] = aa[j];
87       }
88     }
89   }
90   return(1);
91 }
92 
93 #undef __FUNCT__
94 #define __FUNCT__ "PetscML_comm"
95 static PetscErrorCode PetscML_comm(double p[],void *ML_data)
96 {
97   PetscErrorCode ierr;
98   FineGridCtx    *ml = (FineGridCtx*)ML_data;
99   Mat            A   = ml->A;
100   Mat_MPIAIJ     *a  = (Mat_MPIAIJ*)A->data;
101   PetscMPIInt    size;
102   PetscInt       i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
103   PetscScalar    *array;
104 
105   PetscFunctionBegin;
106   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
107   if (size == 1) return 0;
108 
109   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
110   ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
111   ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
112   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
113   ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr);
114   for (i=in_length; i<out_length; i++) p[i] = array[i-in_length];
115   ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr);
116   PetscFunctionReturn(0);
117 }
118 
119 #undef __FUNCT__
120 #define __FUNCT__ "PetscML_matvec"
121 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
122 {
123   PetscErrorCode ierr;
124   FineGridCtx    *ml = (FineGridCtx*)ML_Get_MyMatvecData(ML_data);
125   Mat            A   = ml->A, Aloc=ml->Aloc;
126   PetscMPIInt    size;
127   PetscScalar    *pwork=ml->pwork;
128   PetscInt       i;
129 
130   PetscFunctionBegin;
131   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
132   if (size == 1) {
133     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
134   } else {
135     for (i=0; i<in_length; i++) pwork[i] = p[i];
136     PetscML_comm(pwork,ml);
137     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
138   }
139   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
140   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
141   ierr = VecResetArray(ml->x);CHKERRQ(ierr);
142   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
143   PetscFunctionReturn(0);
144 }
145 
146 #undef __FUNCT__
147 #define __FUNCT__ "MatMult_ML"
148 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
149 {
150   PetscErrorCode ierr;
151   Mat_MLShell    *shell;
152   PetscScalar    *xarray,*yarray;
153   PetscInt       x_length,y_length;
154 
155   PetscFunctionBegin;
156   ierr     = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr);
157   ierr     = VecGetArray(x,&xarray);CHKERRQ(ierr);
158   ierr     = VecGetArray(y,&yarray);CHKERRQ(ierr);
159   x_length = shell->mlmat->invec_leng;
160   y_length = shell->mlmat->outvec_leng;
161   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
162   ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
163   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
164   PetscFunctionReturn(0);
165 }
166 
167 #undef __FUNCT__
168 #define __FUNCT__ "MatMultAdd_ML"
169 /* Computes y = w + A * x
170    It is possible that w == y, but not x == y
171 */
172 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
173 {
174   Mat_MLShell    *shell;
175   PetscScalar    *xarray,*yarray;
176   PetscInt       x_length,y_length;
177   PetscErrorCode ierr;
178 
179   PetscFunctionBegin;
180   ierr = MatShellGetContext(A, (void**) &shell);CHKERRQ(ierr);
181   if (y == w) {
182     if (!shell->work) {
183       ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr);
184     }
185     ierr     = VecGetArray(x,           &xarray);CHKERRQ(ierr);
186     ierr     = VecGetArray(shell->work, &yarray);CHKERRQ(ierr);
187     x_length = shell->mlmat->invec_leng;
188     y_length = shell->mlmat->outvec_leng;
189     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
190     ierr = VecRestoreArray(x,           &xarray);CHKERRQ(ierr);
191     ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr);
192     ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr);
193   } else {
194     ierr     = VecGetArray(x, &xarray);CHKERRQ(ierr);
195     ierr     = VecGetArray(y, &yarray);CHKERRQ(ierr);
196     x_length = shell->mlmat->invec_leng;
197     y_length = shell->mlmat->outvec_leng;
198     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
199     ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr);
200     ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr);
201     ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr);
202   }
203   PetscFunctionReturn(0);
204 }
205 
206 /* newtype is ignored since only handles one case */
207 #undef __FUNCT__
208 #define __FUNCT__ "MatConvert_MPIAIJ_ML"
209 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
210 {
211   PetscErrorCode ierr;
212   Mat_MPIAIJ     *mpimat=(Mat_MPIAIJ*)A->data;
213   Mat_SeqAIJ     *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
214   PetscInt       *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
215   PetscScalar    *aa=a->a,*ba=b->a,*ca;
216   PetscInt       am =A->rmap->n,an=A->cmap->n,i,j,k;
217   PetscInt       *ci,*cj,ncols;
218 
219   PetscFunctionBegin;
220   if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
221 
222   if (scall == MAT_INITIAL_MATRIX) {
223     ierr  = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr);
224     ci[0] = 0;
225     for (i=0; i<am; i++) ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
226     ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr);
227     ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr);
228 
229     k = 0;
230     for (i=0; i<am; i++) {
231       /* diagonal portion of A */
232       ncols = ai[i+1] - ai[i];
233       for (j=0; j<ncols; j++) {
234         cj[k]   = *aj++;
235         ca[k++] = *aa++;
236       }
237       /* off-diagonal portion of A */
238       ncols = bi[i+1] - bi[i];
239       for (j=0; j<ncols; j++) {
240         cj[k]   = an + (*bj); bj++;
241         ca[k++] = *ba++;
242       }
243     }
244     if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
245 
246     /* put together the new matrix */
247     an   = mpimat->A->cmap->n+mpimat->B->cmap->n;
248     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
249 
250     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
251     /* Since these are PETSc arrays, change flags to free them as necessary. */
252     mat          = (Mat_SeqAIJ*)(*Aloc)->data;
253     mat->free_a  = PETSC_TRUE;
254     mat->free_ij = PETSC_TRUE;
255 
256     mat->nonew = 0;
257   } else if (scall == MAT_REUSE_MATRIX) {
258     mat=(Mat_SeqAIJ*)(*Aloc)->data;
259     ci = mat->i; cj = mat->j; ca = mat->a;
260     for (i=0; i<am; i++) {
261       /* diagonal portion of A */
262       ncols = ai[i+1] - ai[i];
263       for (j=0; j<ncols; j++) *ca++ = *aa++;
264       /* off-diagonal portion of A */
265       ncols = bi[i+1] - bi[i];
266       for (j=0; j<ncols; j++) *ca++ = *ba++;
267     }
268   } else SETERRQ1(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
269   PetscFunctionReturn(0);
270 }
271 
272 extern PetscErrorCode MatDestroy_Shell(Mat);
273 #undef __FUNCT__
274 #define __FUNCT__ "MatDestroy_ML"
275 static PetscErrorCode MatDestroy_ML(Mat A)
276 {
277   PetscErrorCode ierr;
278   Mat_MLShell    *shell;
279 
280   PetscFunctionBegin;
281   ierr = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr);
282   ierr = VecDestroy(&shell->y);CHKERRQ(ierr);
283   if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);}
284   ierr = PetscFree(shell);CHKERRQ(ierr);
285   ierr = MatDestroy_Shell(A);CHKERRQ(ierr);
286   ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr);
287   PetscFunctionReturn(0);
288 }
289 
290 #undef __FUNCT__
291 #define __FUNCT__ "MatWrapML_SeqAIJ"
292 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
293 {
294   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata*)mlmat->data;
295   PetscErrorCode        ierr;
296   PetscInt              m       =mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = PETSC_NULL,nz_max;
297   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i;
298   PetscScalar           *ml_vals=matdata->values,*aa;
299 
300   PetscFunctionBegin;
301   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
302   if (m != n) { /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
303     if (reuse) {
304       Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data;
305       aij->i = ml_rowptr;
306       aij->j = ml_cols;
307       aij->a = ml_vals;
308     } else {
309       /* sort ml_cols and ml_vals */
310       ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
311       for (i=0; i<m; i++) nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
312       aj = ml_cols; aa = ml_vals;
313       for (i=0; i<m; i++) {
314         ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
315         aj  += nnz[i]; aa += nnz[i];
316       }
317       ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
318       ierr = PetscFree(nnz);CHKERRQ(ierr);
319     }
320     PetscFunctionReturn(0);
321   }
322 
323   nz_max = PetscMax(1,mlmat->max_nz_per_row);
324   ierr   = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
325   if (!reuse) {
326     ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr);
327     ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
328     ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
329     /* keep track of block size for A matrices */
330     ierr = MatSetBlockSize (*newmat, mlmat->num_PDEs);CHKERRQ(ierr);
331 
332     ierr = PetscMalloc(m*sizeof(PetscInt),&nnz);CHKERRQ(ierr);
333     for (i=0; i<m; i++) {
334       ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]);
335     }
336     ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
337   }
338   for (i=0; i<m; i++) {
339     PetscInt ncols;
340 
341     ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols);
342     ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
343   }
344   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
345   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
346 
347   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
348   ierr = PetscFree(nnz);CHKERRQ(ierr);
349   PetscFunctionReturn(0);
350 }
351 
352 #undef __FUNCT__
353 #define __FUNCT__ "MatWrapML_SHELL"
354 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
355 {
356   PetscErrorCode ierr;
357   PetscInt       m,n;
358   ML_Comm        *MLcomm;
359   Mat_MLShell    *shellctx;
360 
361   PetscFunctionBegin;
362   m = mlmat->outvec_leng;
363   n = mlmat->invec_leng;
364 
365   if (reuse) {
366     ierr            = MatShellGetContext(*newmat,(void**)&shellctx);CHKERRQ(ierr);
367     shellctx->mlmat = mlmat;
368     PetscFunctionReturn(0);
369   }
370 
371   MLcomm = mlmat->comm;
372 
373   ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr);
374   ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
375   ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr);
376   ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr);
377 
378   shellctx->A         = *newmat;
379   shellctx->mlmat     = mlmat;
380   shellctx->work      = PETSC_NULL;
381 
382   ierr = VecCreate(MLcomm->USR_comm,&shellctx->y);CHKERRQ(ierr);
383   ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr);
384   ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr);
385 
386   (*newmat)->ops->destroy = MatDestroy_ML;
387   PetscFunctionReturn(0);
388 }
389 
390 #undef __FUNCT__
391 #define __FUNCT__ "MatWrapML_MPIAIJ"
392 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
393 {
394   PetscInt       *aj;
395   PetscScalar    *aa;
396   PetscErrorCode ierr;
397   PetscInt       i,j,*gordering;
398   PetscInt       m=mlmat->outvec_leng,n,nz_max,row;
399   Mat            A;
400 
401   PetscFunctionBegin;
402   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
403   n = mlmat->invec_leng;
404   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
405 
406   nz_max = PetscMax(1,mlmat->max_nz_per_row);
407   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
408   if (reuse) A = *newmat;
409   else {
410     PetscInt *nnzA,*nnzB,*nnz;
411     ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr);
412     ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
413     ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
414     /* keep track of block size for A matrices */
415     ierr = MatSetBlockSize (A,mlmat->num_PDEs);CHKERRQ(ierr);
416     ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr);
417 
418     for (i=0; i<m; i++) {
419       ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]);
420       nnzA[i] = 0;
421       for (j=0; j<nnz[i]; j++) {
422         if (aj[j] < m) nnzA[i]++;
423       }
424       nnzB[i] = nnz[i] - nnzA[i];
425     }
426     ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
427     ierr = PetscFree3(nnzA,nnzB,nnz);
428   }
429   /* create global row numbering for a ML_Operator */
430   ML_build_global_numbering(mlmat,&gordering,"rows");
431   for (i=0; i<m; i++) {
432     PetscInt ncols;
433     row = gordering[i];
434 
435     ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols);
436     for (j = 0; j < ncols; j++) aj[j] = gordering[aj[j]];
437     ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
438   }
439   ML_free(gordering);
440   ierr    = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
441   ierr    = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
442   *newmat = A;
443 
444   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
445   PetscFunctionReturn(0);
446 }
447 
448 /* -------------------------------------------------------------------------- */
449 /*
450    PCSetCoordinates_ML
451 
452    Input Parameter:
453    .  pc - the preconditioner context
454 */
455 #undef __FUNCT__
456 #define __FUNCT__ "PCSetCoordinates_ML"
457 PETSC_EXTERN_C PetscErrorCode PCSetCoordinates_ML(PC pc, PetscInt ndm, PetscInt a_nloc, PetscReal *coords)
458 {
459   PC_MG          *mg    = (PC_MG*)pc->data;
460   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
461   PetscErrorCode ierr;
462   PetscInt       arrsz,oldarrsz,bs,my0,kk,ii,nloc,Iend;
463   Mat            Amat = pc->pmat;
464 
465   /* this function copied and modified from PCSetCoordinates_GEO -TGI */
466   PetscFunctionBegin;
467   PetscValidHeaderSpecific(Amat, MAT_CLASSID, 1);
468   ierr = MatGetBlockSize(Amat, &bs);CHKERRQ(ierr);
469 
470   ierr = MatGetOwnershipRange(Amat, &my0, &Iend);CHKERRQ(ierr);
471   nloc = (Iend-my0)/bs;
472 
473   if (nloc!=a_nloc) SETERRQ2(((PetscObject)Amat)->comm,PETSC_ERR_ARG_WRONG, "Number of local blocks must locations = %d %d.",a_nloc,nloc);
474   if ((Iend-my0)%bs!=0) SETERRQ1(((PetscObject)Amat)->comm,PETSC_ERR_ARG_WRONG, "Bad local size %d.",nloc);
475 
476   oldarrsz    = pc_ml->dim * pc_ml->nloc;
477   pc_ml->dim  = ndm;
478   pc_ml->nloc = a_nloc;
479   arrsz       = ndm * a_nloc;
480 
481   /* create data - syntactic sugar that should be refactored at some point */
482   if (pc_ml->coords==0 || (oldarrsz != arrsz)) {
483     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
484     ierr = PetscMalloc((arrsz)*sizeof(PetscReal), &pc_ml->coords);CHKERRQ(ierr);
485   }
486   for (kk=0; kk<arrsz; kk++) pc_ml->coords[kk] = -999.;
487   /* copy data in - column oriented */
488   for (kk = 0; kk < nloc; kk++) {
489     for (ii = 0; ii < ndm; ii++) {
490       pc_ml->coords[ii*nloc + kk] =  coords[kk*ndm + ii];
491     }
492   }
493   PetscFunctionReturn(0);
494 }
495 
496 /* -----------------------------------------------------------------------------*/
497 #undef __FUNCT__
498 #define __FUNCT__ "PCReset_ML"
499 PetscErrorCode PCReset_ML(PC pc)
500 {
501   PetscErrorCode ierr;
502   PC_MG          *mg    = (PC_MG*)pc->data;
503   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
504   PetscInt       level,fine_level=pc_ml->Nlevels-1,dim=pc_ml->dim;
505 
506   PetscFunctionBegin;
507   if (dim) {
508     ML_Aggregate_Viz_Stats * grid_info = (ML_Aggregate_Viz_Stats*) pc_ml->ml_object->Grid[0].Grid;
509 
510     for (level=0; level<=fine_level; level++) {
511       ierr = VecDestroy(&pc_ml->gridctx[level].coords);CHKERRQ(ierr);
512     }
513 
514     grid_info->x = 0; /* do this so ML doesn't try to free coordinates */
515     grid_info->y = 0;
516     grid_info->z = 0;
517 
518     ML_Aggregate_VizAndStats_Clean(pc_ml->ml_object);
519   }
520   ML_Aggregate_Destroy(&pc_ml->agg_object);
521   ML_Destroy(&pc_ml->ml_object);
522 
523   if (pc_ml->PetscMLdata) {
524     ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
525     ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);
526     ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr);
527     ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr);
528   }
529   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
530 
531   if (pc_ml->gridctx) {
532     for (level=0; level<fine_level; level++) {
533       if (pc_ml->gridctx[level].A) {ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);}
534       if (pc_ml->gridctx[level].P) {ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);}
535       if (pc_ml->gridctx[level].R) {ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);}
536       if (pc_ml->gridctx[level].x) {ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);}
537       if (pc_ml->gridctx[level].b) {ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);}
538       if (pc_ml->gridctx[level+1].r) {ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
539     }
540   }
541   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
542   ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
543 
544   pc_ml->dim  = 0;
545   pc_ml->nloc = 0;
546   PetscFunctionReturn(0);
547 }
548 /* -------------------------------------------------------------------------- */
549 /*
550    PCSetUp_ML - Prepares for the use of the ML preconditioner
551                     by setting data structures and options.
552 
553    Input Parameter:
554 .  pc - the preconditioner context
555 
556    Application Interface Routine: PCSetUp()
557 
558    Notes:
559    The interface routine PCSetUp() is not usually called directly by
560    the user, but instead is called by PCApply() if necessary.
561 */
562 extern PetscErrorCode PCSetFromOptions_MG(PC);
563 extern PetscErrorCode PCReset_MG(PC);
564 
565 #undef __FUNCT__
566 #define __FUNCT__ "PCSetUp_ML"
567 PetscErrorCode PCSetUp_ML(PC pc)
568 {
569   PetscErrorCode ierr;
570   PetscMPIInt    size;
571   FineGridCtx    *PetscMLdata;
572   ML             *ml_object;
573   ML_Aggregate   *agg_object;
574   ML_Operator    *mlmat;
575   PetscInt       nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
576   Mat            A,Aloc;
577   GridCtx        *gridctx;
578   PC_MG          *mg    = (PC_MG*)pc->data;
579   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
580   PetscBool      isSeq, isMPI;
581   KSP            smoother;
582   PC             subpc;
583   PetscInt       mesh_level, old_mesh_level;
584 
585   PetscFunctionBegin;
586   A    = pc->pmat;
587   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
588 
589   if (pc->setupcalled) {
590     if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
591       /*
592        Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
593        multiple solves in which the matrix is not changing too quickly.
594        */
595       ml_object             = pc_ml->ml_object;
596       gridctx               = pc_ml->gridctx;
597       Nlevels               = pc_ml->Nlevels;
598       fine_level            = Nlevels - 1;
599       gridctx[fine_level].A = A;
600 
601       ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
602       ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
603       if (isMPI) {
604         ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
605       } else if (isSeq) {
606         Aloc = A;
607         ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
608       } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
609 
610       ierr              = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
611       PetscMLdata       = pc_ml->PetscMLdata;
612       ierr              = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr);
613       PetscMLdata->A    = A;
614       PetscMLdata->Aloc = Aloc;
615       ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
616       ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
617 
618       mesh_level = ml_object->ML_finest_level;
619       while (ml_object->SingleLevel[mesh_level].Rmat->to) {
620         old_mesh_level = mesh_level;
621         mesh_level     = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;
622 
623         /* clean and regenerate A */
624         mlmat = &(ml_object->Amat[mesh_level]);
625         ML_Operator_Clean(mlmat);
626         ML_Operator_Init(mlmat,ml_object->comm);
627         ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level);
628       }
629 
630       level = fine_level - 1;
631       if (size == 1) { /* convert ML P, R and A into seqaij format */
632         for (mllevel=1; mllevel<Nlevels; mllevel++) {
633           mlmat = &(ml_object->Amat[mllevel]);
634           ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
635           level--;
636         }
637       } else { /* convert ML P and R into shell format, ML A into mpiaij format */
638         for (mllevel=1; mllevel<Nlevels; mllevel++) {
639           mlmat  = &(ml_object->Amat[mllevel]);
640           ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
641           level--;
642         }
643       }
644 
645       for (level=0; level<fine_level; level++) {
646         if (level > 0) {
647           ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
648         }
649         ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
650       }
651       ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
652       ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
653 
654       ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
655       PetscFunctionReturn(0);
656     } else {
657       /* since ML can change the size of vectors/matrices at any level we must destroy everything */
658       ierr = PCReset_ML(pc);CHKERRQ(ierr);
659       ierr = PCReset_MG(pc);CHKERRQ(ierr);
660     }
661   }
662 
663   /* setup special features of PCML */
664   /*--------------------------------*/
665   /* covert A to Aloc to be used by ML at fine grid */
666   pc_ml->size = size;
667   ierr        = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
668   ierr        = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
669   if (isMPI) {
670     ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
671   } else if (isSeq) {
672     Aloc = A;
673     ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
674   } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
675 
676   /* create and initialize struct 'PetscMLdata' */
677   ierr               = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr);
678   pc_ml->PetscMLdata = PetscMLdata;
679   ierr               = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr);
680 
681   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);
682   ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr);
683   ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr);
684 
685   ierr              = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr);
686   ierr              = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
687   ierr              = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
688   PetscMLdata->A    = A;
689   PetscMLdata->Aloc = Aloc;
690   if (pc_ml->dim) { /* create vecs around the coordinate data given */
691     PetscInt  i,j,dim=pc_ml->dim;
692     PetscInt  nloc = pc_ml->nloc,nlocghost;
693     PetscReal *ghostedcoords;
694 
695     ierr      = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
696     nlocghost = Aloc->cmap->n / bs;
697     ierr      = PetscMalloc(dim*nlocghost*sizeof(PetscReal),&ghostedcoords);CHKERRQ(ierr);
698     for (i = 0; i < dim; i++) {
699       /* copy coordinate values into first component of pwork */
700       for (j = 0; j < nloc; j++) {
701         PetscMLdata->pwork[bs * j] = pc_ml->coords[nloc * i + j];
702       }
703       /* get the ghost values */
704       ierr = PetscML_comm(PetscMLdata->pwork,PetscMLdata);CHKERRQ(ierr);
705       /* write into the vector */
706       for (j = 0; j < nlocghost; j++) {
707         ghostedcoords[i * nlocghost + j] = PetscMLdata->pwork[bs * j];
708       }
709     }
710     /* replace the original coords with the ghosted coords, because these are
711      * what ML needs */
712     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
713     pc_ml->coords = ghostedcoords;
714   }
715 
716   /* create ML discretization matrix at fine grid */
717   /* ML requires input of fine-grid matrix. It determines nlevels. */
718   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
719   ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
720   ML_Create(&ml_object,pc_ml->MaxNlevels);
721   ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm);
722   pc_ml->ml_object = ml_object;
723   ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
724   ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols);
725   ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
726 
727   ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO);
728 
729   /* aggregation */
730   ML_Aggregate_Create(&agg_object);
731   pc_ml->agg_object = agg_object;
732 
733   {
734     MatNullSpace mnull;
735     ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr);
736     if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) {
737       if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER;
738       else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK;
739       else pc_ml->nulltype = PCML_NULLSPACE_SCALAR;
740     }
741     switch (pc_ml->nulltype) {
742     case PCML_NULLSPACE_USER: {
743       PetscScalar       *nullvec;
744       const PetscScalar *v;
745       PetscBool         has_const;
746       PetscInt          i,j,mlocal,nvec,M;
747       const Vec         *vecs;
748 
749       if (!mnull) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space");
750       ierr = MatGetSize(A,&M,PETSC_NULL);CHKERRQ(ierr);
751       ierr = MatGetLocalSize(Aloc,&mlocal,PETSC_NULL);CHKERRQ(ierr);
752       ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr);
753       ierr = PetscMalloc((nvec+!!has_const)*mlocal*sizeof(*nullvec),&nullvec);CHKERRQ(ierr);
754       if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M;
755       for (i=0; i<nvec; i++) {
756         ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr);
757         for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j];
758         ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr);
759       }
760       ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr);
761       ierr = PetscFree(nullvec);CHKERRQ(ierr);
762     } break;
763     case PCML_NULLSPACE_BLOCK:
764       ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr);
765       break;
766     case PCML_NULLSPACE_SCALAR:
767       break;
768     default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_SUP,"Unknown null space type");
769     }
770   }
771   ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
772   /* set options */
773   switch (pc_ml->CoarsenScheme) {
774   case 1:
775     ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
776   case 2:
777     ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
778   case 3:
779     ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
780   }
781   ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold);
782   ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor);
783   if (pc_ml->SpectralNormScheme_Anorm) {
784     ML_Set_SpectralNormScheme_Anorm(ml_object);
785   }
786   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
787   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
788   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
789   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
790   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
791   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;
792 
793   if (pc_ml->Aux) {
794     if (!pc_ml->dim) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"Auxiliary matrix requires coordinates");
795     ml_object->Amat[0].aux_data->threshold = pc_ml->AuxThreshold;
796     ml_object->Amat[0].aux_data->enable    = 1;
797     ml_object->Amat[0].aux_data->max_level = 10;
798     ml_object->Amat[0].num_PDEs            = bs;
799   }
800 
801   if (pc_ml->dim) {
802     PetscInt               i,dim = pc_ml->dim;
803     ML_Aggregate_Viz_Stats *grid_info;
804     PetscInt               nlocghost;
805 
806     ierr      = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
807     nlocghost = Aloc->cmap->n / bs;
808 
809     ML_Aggregate_VizAndStats_Setup(ml_object); /* create ml info for coords */
810     grid_info = (ML_Aggregate_Viz_Stats*) ml_object->Grid[0].Grid;
811     for (i = 0; i < dim; i++) {
812       /* set the finest level coordinates to point to the column-order array
813        * in pc_ml */
814       /* NOTE: must point away before VizAndStats_Clean so ML doesn't free */
815       switch (i) {
816       case 0: grid_info->x = pc_ml->coords + nlocghost * i; break;
817       case 1: grid_info->y = pc_ml->coords + nlocghost * i; break;
818       case 2: grid_info->z = pc_ml->coords + nlocghost * i; break;
819       default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
820       }
821     }
822     grid_info->Ndim = dim;
823   }
824 
825   /* repartitioning */
826   if (pc_ml->Repartition) {
827     ML_Repartition_Activate (ml_object);
828     ML_Repartition_Set_LargestMinMaxRatio(ml_object,pc_ml->MaxMinRatio);
829     ML_Repartition_Set_MinPerProc(ml_object,pc_ml->MinPerProc);
830     ML_Repartition_Set_PutOnSingleProc(ml_object,pc_ml->PutOnSingleProc);
831 #if 0                           /* Function not yet defined in ml-6.2 */
832     /* I'm not sure what compatibility issues might crop up if we partitioned
833      * on the finest level, so to be safe repartition starts on the next
834      * finest level (reflection default behavior in
835      * ml_MultiLevelPreconditioner) */
836     ML_Repartition_Set_StartLevel(ml_object,1);
837 #endif
838 
839     if (!pc_ml->RepartitionType) {
840       PetscInt i;
841 
842       if (!pc_ml->dim) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"ML Zoltan repartitioning requires coordinates");
843       ML_Repartition_Set_Partitioner(ml_object,ML_USEZOLTAN);
844       ML_Aggregate_Set_Dimensions(agg_object, pc_ml->dim);
845 
846       for (i = 0; i < ml_object->ML_num_levels; i++) {
847         ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Grid[i].Grid;
848         grid_info->zoltan_type = pc_ml->ZoltanScheme + 1; /* ml numbers options 1, 2, 3 */
849         /* defaults from ml_agg_info.c */
850         grid_info->zoltan_estimated_its = 40; /* only relevant to hypergraph / fast hypergraph */
851         grid_info->zoltan_timers        = 0;
852         grid_info->smoothing_steps      = 4;  /* only relevant to hypergraph / fast hypergraph */
853       }
854     } else {
855       ML_Repartition_Set_Partitioner(ml_object,ML_USEPARMETIS);
856     }
857   }
858 
859   if (pc_ml->OldHierarchy) {
860     Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
861   } else {
862     Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
863   }
864   if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
865   pc_ml->Nlevels = Nlevels;
866   fine_level     = Nlevels - 1;
867 
868   ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr);
869   /* set default smoothers */
870   for (level=1; level<=fine_level; level++) {
871     ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
872     ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
873     ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
874     ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
875   }
876   ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr);
877   ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
878   ierr = PetscOptionsEnd();CHKERRQ(ierr);
879 
880   ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr);
881 
882   pc_ml->gridctx = gridctx;
883 
884   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
885      Level 0 is the finest grid for ML, but coarsest for PETSc! */
886   gridctx[fine_level].A = A;
887 
888   level = fine_level - 1;
889   if (size == 1) { /* convert ML P, R and A into seqaij format */
890     for (mllevel=1; mllevel<Nlevels; mllevel++) {
891       mlmat = &(ml_object->Pmat[mllevel]);
892       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
893       mlmat = &(ml_object->Rmat[mllevel-1]);
894       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
895 
896       mlmat = &(ml_object->Amat[mllevel]);
897       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
898       level--;
899     }
900   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
901     for (mllevel=1; mllevel<Nlevels; mllevel++) {
902       mlmat  = &(ml_object->Pmat[mllevel]);
903       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
904       mlmat  = &(ml_object->Rmat[mllevel-1]);
905       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
906 
907       mlmat  = &(ml_object->Amat[mllevel]);
908       ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
909       level--;
910     }
911   }
912 
913   /* create vectors and ksp at all levels */
914   for (level=0; level<fine_level; level++) {
915     level1 = level + 1;
916     ierr   = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr);
917     ierr   = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr);
918     ierr   = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr);
919     ierr   = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
920 
921     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr);
922     ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
923     ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr);
924     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
925 
926     ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr);
927     ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
928     ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr);
929     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
930 
931     if (level == 0) {
932       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
933     } else {
934       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
935     }
936   }
937   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
938 
939   /* create coarse level and the interpolation between the levels */
940   for (level=0; level<fine_level; level++) {
941     level1 = level + 1;
942     ierr   = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
943     ierr   = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
944     if (level > 0) {
945       ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
946     }
947     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
948   }
949   ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
950   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
951 
952   /* put coordinate info in levels */
953   if (pc_ml->dim) {
954     PetscInt  i,j,dim = pc_ml->dim;
955     PetscInt  bs, nloc;
956     PC        subpc;
957     PetscReal *array;
958 
959     level = fine_level;
960     for (mllevel = 0; mllevel < Nlevels; mllevel++) {
961       ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Amat[mllevel].to->Grid->Grid;
962       MPI_Comm               comm       = ((PetscObject)gridctx[level].A)->comm;
963 
964       ierr  = MatGetBlockSize (gridctx[level].A, &bs);CHKERRQ(ierr);
965       ierr  = MatGetLocalSize (gridctx[level].A, PETSC_NULL, &nloc);CHKERRQ(ierr);
966       nloc /= bs; /* number of local nodes */
967 
968       ierr = VecCreate(comm,&gridctx[level].coords);CHKERRQ(ierr);
969       ierr = VecSetSizes(gridctx[level].coords,dim * nloc,PETSC_DECIDE);CHKERRQ(ierr);
970       ierr = VecSetType(gridctx[level].coords,VECMPI);CHKERRQ(ierr);
971       ierr = VecGetArray(gridctx[level].coords,&array);CHKERRQ(ierr);
972       for (j = 0; j < nloc; j++) {
973         for (i = 0; i < dim; i++) {
974           switch (i) {
975           case 0: array[dim * j + i] = grid_info->x[j]; break;
976           case 1: array[dim * j + i] = grid_info->y[j]; break;
977           case 2: array[dim * j + i] = grid_info->z[j]; break;
978           default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
979           }
980         }
981       }
982 
983       /* passing coordinates to smoothers/coarse solver, should they need them */
984       ierr = KSPGetPC(gridctx[level].ksp,&subpc);CHKERRQ(ierr);
985       ierr = PCSetCoordinates(subpc,dim,nloc,array);CHKERRQ(ierr);
986       ierr = VecRestoreArray(gridctx[level].coords,&array);CHKERRQ(ierr);
987       level--;
988     }
989   }
990 
991   /* setupcalled is set to 0 so that MG is setup from scratch */
992   pc->setupcalled = 0;
993   ierr            = PCSetUp_MG(pc);CHKERRQ(ierr);
994   PetscFunctionReturn(0);
995 }
996 
997 /* -------------------------------------------------------------------------- */
998 /*
999    PCDestroy_ML - Destroys the private context for the ML preconditioner
1000    that was created with PCCreate_ML().
1001 
1002    Input Parameter:
1003 .  pc - the preconditioner context
1004 
1005    Application Interface Routine: PCDestroy()
1006 */
1007 #undef __FUNCT__
1008 #define __FUNCT__ "PCDestroy_ML"
1009 PetscErrorCode PCDestroy_ML(PC pc)
1010 {
1011   PetscErrorCode ierr;
1012   PC_MG          *mg   = (PC_MG*)pc->data;
1013   PC_ML          *pc_ml= (PC_ML*)mg->innerctx;
1014 
1015   PetscFunctionBegin;
1016   ierr = PCReset_ML(pc);CHKERRQ(ierr);
1017   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
1018   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
1019   ierr = PetscObjectComposeFunctionDynamic((PetscObject)pc,"PCSetCoordinates_C","",PETSC_NULL);CHKERRQ(ierr);
1020   PetscFunctionReturn(0);
1021 }
1022 
1023 #undef __FUNCT__
1024 #define __FUNCT__ "PCSetFromOptions_ML"
1025 PetscErrorCode PCSetFromOptions_ML(PC pc)
1026 {
1027   PetscErrorCode ierr;
1028   PetscInt       indx,PrintLevel,partindx;
1029   const char     *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
1030   const char     *part[]   = {"Zoltan","ParMETIS"};
1031 #if defined(HAVE_ML_ZOLTAN)
1032   PetscInt   zidx;
1033   const char *zscheme[] = {"RCB","hypergraph","fast_hypergraph"};
1034 #endif
1035   PC_MG       *mg    = (PC_MG*)pc->data;
1036   PC_ML       *pc_ml = (PC_ML*)mg->innerctx;
1037   PetscMPIInt size;
1038   MPI_Comm    comm = ((PetscObject)pc)->comm;
1039 
1040   PetscFunctionBegin;
1041   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1042   ierr = PetscOptionsHead("ML options");CHKERRQ(ierr);
1043 
1044   PrintLevel = 0;
1045   indx       = 0;
1046   partindx   = 0;
1047 
1048   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr);
1049   ML_Set_PrintLevel(PrintLevel);
1050   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr);
1051   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr);
1052   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr);
1053 
1054   pc_ml->CoarsenScheme = indx;
1055 
1056   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr);
1057   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr);
1058   ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);CHKERRQ(ierr);
1059   ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,PETSC_NULL);CHKERRQ(ierr);
1060   ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,PETSC_NULL);CHKERRQ(ierr);
1061   ierr = PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,PETSC_NULL);CHKERRQ(ierr);
1062   ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,PETSC_NULL);CHKERRQ(ierr);
1063   ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,PETSC_NULL);CHKERRQ(ierr);
1064   /*
1065     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
1066     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.
1067 
1068     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
1069     combination of options and ML's exit(1) explanations don't help matters.
1070   */
1071   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
1072   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
1073   if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);}
1074   if (pc_ml->EnergyMinimization) {
1075     ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,PETSC_NULL);CHKERRQ(ierr);
1076   }
1077   if (pc_ml->EnergyMinimization == 2) {
1078     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
1079     ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,PETSC_NULL);CHKERRQ(ierr);
1080   }
1081   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
1082   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
1083   ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,PETSC_NULL);CHKERRQ(ierr);
1084   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
1085   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
1086   ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,PETSC_NULL);CHKERRQ(ierr);
1087   /*
1088     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
1089     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
1090     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
1091     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
1092     this context, but ML doesn't provide a way to find out which ones.
1093    */
1094   ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,PETSC_NULL);CHKERRQ(ierr);
1095   ierr = PetscOptionsBool("-pc_ml_repartition", "Allow ML to repartition levels of the heirarchy","ML_Repartition_Activate",pc_ml->Repartition,&pc_ml->Repartition,PETSC_NULL);CHKERRQ(ierr);
1096   if (pc_ml->Repartition) {
1097     ierr = PetscOptionsReal("-pc_ml_repartitionMaxMinRatio", "Acceptable ratio of repartitioned sizes","ML_Repartition_Set_LargestMinMaxRatio",pc_ml->MaxMinRatio,&pc_ml->MaxMinRatio,PETSC_NULL);CHKERRQ(ierr);
1098     ierr = PetscOptionsInt("-pc_ml_repartitionMinPerProc", "Smallest repartitioned size","ML_Repartition_Set_MinPerProc",pc_ml->MinPerProc,&pc_ml->MinPerProc,PETSC_NULL);CHKERRQ(ierr);
1099     ierr = PetscOptionsInt("-pc_ml_repartitionPutOnSingleProc", "Problem size automatically repartitioned to one processor","ML_Repartition_Set_PutOnSingleProc",pc_ml->PutOnSingleProc,&pc_ml->PutOnSingleProc,PETSC_NULL);CHKERRQ(ierr);
1100 #if defined(HAVE_ML_ZOLTAN)
1101     partindx = 0;
1102     ierr     = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[0],&partindx,PETSC_NULL);CHKERRQ(ierr);
1103 
1104     pc_ml->RepartitionType = partindx;
1105     if (!partindx) {
1106       PetscInt zindx = 0;
1107 
1108       ierr = PetscOptionsEList("-pc_ml_repartitionZoltanScheme", "Repartitioning scheme to use","None",zscheme,3,zscheme[0],&zindx,PETSC_NULL);CHKERRQ(ierr);
1109 
1110       pc_ml->ZoltanScheme = zindx;
1111     }
1112 #else
1113     partindx = 1;
1114     ierr     = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[1],&partindx,PETSC_NULL);CHKERRQ(ierr);
1115     if (!partindx) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_SUP_SYS,"ML not compiled with Zoltan");
1116 #endif
1117     ierr = PetscOptionsBool("-pc_ml_Aux","Aggregate using auxiliary coordinate-based laplacian","None",pc_ml->Aux,&pc_ml->Aux,PETSC_NULL);CHKERRQ(ierr);
1118     ierr = PetscOptionsReal("-pc_ml_AuxThreshold","Auxiliary smoother drop tol","None",pc_ml->AuxThreshold,&pc_ml->AuxThreshold,PETSC_NULL);CHKERRQ(ierr);
1119   }
1120   ierr = PetscOptionsTail();CHKERRQ(ierr);
1121   PetscFunctionReturn(0);
1122 }
1123 
1124 /* -------------------------------------------------------------------------- */
1125 /*
1126    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
1127    and sets this as the private data within the generic preconditioning
1128    context, PC, that was created within PCCreate().
1129 
1130    Input Parameter:
1131 .  pc - the preconditioner context
1132 
1133    Application Interface Routine: PCCreate()
1134 */
1135 
1136 /*MC
1137      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
1138        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
1139        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
1140        and the restriction/interpolation operators wrapped as PETSc shell matrices.
1141 
1142    Options Database Key:
1143    Multigrid options(inherited)
1144 +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
1145 .  -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
1146 .  -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
1147    -pc_mg_type <multiplicative>: (one of) additive multiplicative full kascade
1148    ML options:
1149 .  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
1150 .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
1151 .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
1152 .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
1153 .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
1154 .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
1155 .  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
1156 .  -pc_ml_repartition <false>: Allow ML to repartition levels of the heirarchy (ML_Repartition_Activate)
1157 .  -pc_ml_repartitionMaxMinRatio <1.3>: Acceptable ratio of repartitioned sizes (ML_Repartition_Set_LargestMinMaxRatio)
1158 .  -pc_ml_repartitionMinPerProc <512>: Smallest repartitioned size (ML_Repartition_Set_MinPerProc)
1159 .  -pc_ml_repartitionPutOnSingleProc <5000>: Problem size automatically repartitioned to one processor (ML_Repartition_Set_PutOnSingleProc)
1160 .  -pc_ml_repartitionType <Zoltan>: Repartitioning library to use (ML_Repartition_Set_Partitioner)
1161 .  -pc_ml_repartitionZoltanScheme <RCB>: Repartitioning scheme to use (None)
1162 .  -pc_ml_Aux <false>: Aggregate using auxiliary coordinate-based laplacian (None)
1163 -  -pc_ml_AuxThreshold <0.0>: Auxiliary smoother drop tol (None)
1164 
1165    Level: intermediate
1166 
1167   Concepts: multigrid
1168 
1169 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
1170            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
1171            PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
1172            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
1173            PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
1174 M*/
1175 
1176 EXTERN_C_BEGIN
1177 #undef __FUNCT__
1178 #define __FUNCT__ "PCCreate_ML"
1179 PetscErrorCode  PCCreate_ML(PC pc)
1180 {
1181   PetscErrorCode ierr;
1182   PC_ML          *pc_ml;
1183   PC_MG          *mg;
1184 
1185   PetscFunctionBegin;
1186   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
1187   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
1188   ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr);
1189   /* Since PCMG tries to use DM assocated with PC must delete it */
1190   ierr         = DMDestroy(&pc->dm);CHKERRQ(ierr);
1191   mg           = (PC_MG*)pc->data;
1192   mg->galerkin = 2;             /* Use Galerkin, but it is computed externally */
1193 
1194   /* create a supporting struct and attach it to pc */
1195   ierr         = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr);
1196   mg->innerctx = pc_ml;
1197 
1198   pc_ml->ml_object                = 0;
1199   pc_ml->agg_object               = 0;
1200   pc_ml->gridctx                  = 0;
1201   pc_ml->PetscMLdata              = 0;
1202   pc_ml->Nlevels                  = -1;
1203   pc_ml->MaxNlevels               = 10;
1204   pc_ml->MaxCoarseSize            = 1;
1205   pc_ml->CoarsenScheme            = 1;
1206   pc_ml->Threshold                = 0.0;
1207   pc_ml->DampingFactor            = 4.0/3.0;
1208   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
1209   pc_ml->size                     = 0;
1210   pc_ml->dim                      = 0;
1211   pc_ml->nloc                     = 0;
1212   pc_ml->coords                   = 0;
1213   pc_ml->Repartition              = PETSC_FALSE;
1214   pc_ml->MaxMinRatio              = 1.3;
1215   pc_ml->MinPerProc               = 512;
1216   pc_ml->PutOnSingleProc          = 5000;
1217   pc_ml->RepartitionType          = 0;
1218   pc_ml->ZoltanScheme             = 0;
1219   pc_ml->Aux                      = PETSC_FALSE;
1220   pc_ml->AuxThreshold             = 0.0;
1221 
1222   /* allow for coordinates to be passed */
1223   ierr = PetscObjectComposeFunctionDynamic((PetscObject)pc,"PCSetCoordinates_C","PCSetCoordinates_ML",PCSetCoordinates_ML);CHKERRQ(ierr);
1224 
1225   /* overwrite the pointers of PCMG by the functions of PCML */
1226   pc->ops->setfromoptions = PCSetFromOptions_ML;
1227   pc->ops->setup          = PCSetUp_ML;
1228   pc->ops->reset          = PCReset_ML;
1229   pc->ops->destroy        = PCDestroy_ML;
1230   PetscFunctionReturn(0);
1231 }
1232 EXTERN_C_END
1233