xref: /petsc/src/ksp/pc/impls/ml/ml.c (revision 089b283744364aef00a310a92368c00bc3aa30b8)
1 
2 /*
3    Provides an interface to the ML smoothed Aggregation
4    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
5                                     Jed Brown, see [PETSC #18321, #18449].
6 */
7 #include <private/pcimpl.h>   /*I "petscpc.h" I*/
8 #include <../src/ksp/pc/impls/mg/mgimpl.h>                    /*I "petscpcmg.h" I*/
9 #include <../src/mat/impls/aij/seq/aij.h>
10 #include <../src/mat/impls/aij/mpi/mpiaij.h>
11 
12 #include <math.h>
13 EXTERN_C_BEGIN
14 /* HAVE_CONFIG_H flag is required by ML include files */
15 #if !defined(HAVE_CONFIG_H)
16 #define HAVE_CONFIG_H
17 #endif
18 #include <ml_include.h>
19 EXTERN_C_END
20 
21 /* The context (data structure) at each grid level */
22 typedef struct {
23   Vec        x,b,r;           /* global vectors */
24   Mat        A,P,R;
25   KSP        ksp;
26 } GridCtx;
27 
28 /* The context used to input PETSc matrix into ML at fine grid */
29 typedef struct {
30   Mat          A;      /* Petsc matrix in aij format */
31   Mat          Aloc;   /* local portion of A to be used by ML */
32   Vec          x,y;
33   ML_Operator  *mlmat;
34   PetscScalar  *pwork; /* tmp array used by PetscML_comm() */
35 } FineGridCtx;
36 
37 /* The context associates a ML matrix with a PETSc shell matrix */
38 typedef struct {
39   Mat          A;       /* PETSc shell matrix associated with mlmat */
40   ML_Operator  *mlmat;  /* ML matrix assorciated with A */
41   Vec          y, work;
42 } Mat_MLShell;
43 
44 /* Private context for the ML preconditioner */
45 typedef struct {
46   ML             *ml_object;
47   ML_Aggregate   *agg_object;
48   GridCtx        *gridctx;
49   FineGridCtx    *PetscMLdata;
50   PetscInt       Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization;
51   PetscReal      Threshold,DampingFactor,EnergyMinimizationDropTol;
52   PetscBool      SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable;
53   PetscMPIInt    size; /* size of communicator for pc->pmat */
54 } PC_ML;
55 
56 #undef __FUNCT__
57 #define __FUNCT__ "PetscML_getrow"
58 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
59 {
60   PetscErrorCode ierr;
61   PetscInt       m,i,j,k=0,row,*aj;
62   PetscScalar    *aa;
63   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
64   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;
65 
66 
67   ierr = MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0);
68   for (i = 0; i<N_requested_rows; i++) {
69     row   = requested_rows[i];
70     row_lengths[i] = a->ilen[row];
71     if (allocated_space < k+row_lengths[i]) return(0);
72     if ( (row >= 0) || (row <= (m-1)) ) {
73       aj = a->j + a->i[row];
74       aa = a->a + a->i[row];
75       for (j=0; j<row_lengths[i]; j++){
76         columns[k]  = aj[j];
77         values[k++] = aa[j];
78       }
79     }
80   }
81   return(1);
82 }
83 
84 #undef __FUNCT__
85 #define __FUNCT__ "PetscML_comm"
86 static PetscErrorCode PetscML_comm(double p[],void *ML_data)
87 {
88   PetscErrorCode ierr;
89   FineGridCtx    *ml=(FineGridCtx*)ML_data;
90   Mat            A=ml->A;
91   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
92   PetscMPIInt    size;
93   PetscInt       i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
94   PetscScalar    *array;
95 
96   PetscFunctionBegin;
97   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
98   if (size == 1) return 0;
99 
100   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
101   ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
102   ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
103   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
104   ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr);
105   for (i=in_length; i<out_length; i++){
106     p[i] = array[i-in_length];
107   }
108   ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr);
109   PetscFunctionReturn(0);
110 }
111 
112 #undef __FUNCT__
113 #define __FUNCT__ "PetscML_matvec"
114 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
115 {
116   PetscErrorCode ierr;
117   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data);
118   Mat            A=ml->A, Aloc=ml->Aloc;
119   PetscMPIInt    size;
120   PetscScalar    *pwork=ml->pwork;
121   PetscInt       i;
122 
123   PetscFunctionBegin;
124   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
125   if (size == 1){
126     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
127   } else {
128     for (i=0; i<in_length; i++) pwork[i] = p[i];
129     PetscML_comm(pwork,ml);
130     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
131   }
132   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
133   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
134   ierr = VecResetArray(ml->x);CHKERRQ(ierr);
135   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
136   PetscFunctionReturn(0);
137 }
138 
139 #undef __FUNCT__
140 #define __FUNCT__ "MatMult_ML"
141 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
142 {
143   PetscErrorCode   ierr;
144   Mat_MLShell      *shell;
145   PetscScalar      *xarray,*yarray;
146   PetscInt         x_length,y_length;
147 
148   PetscFunctionBegin;
149   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
150   ierr = VecGetArray(x,&xarray);CHKERRQ(ierr);
151   ierr = VecGetArray(y,&yarray);CHKERRQ(ierr);
152   x_length = shell->mlmat->invec_leng;
153   y_length = shell->mlmat->outvec_leng;
154   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
155   ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
156   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
157   PetscFunctionReturn(0);
158 }
159 
160 #undef __FUNCT__
161 #define __FUNCT__ "MatMultAdd_ML"
162 /* Computes y = w + A * x
163    It is possible that w == y, but not x == y
164 */
165 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
166 {
167   Mat_MLShell   *shell;
168   PetscScalar   *xarray,*yarray;
169   PetscInt       x_length,y_length;
170   PetscErrorCode ierr;
171 
172   PetscFunctionBegin;
173   ierr = MatShellGetContext(A, (void **) &shell);CHKERRQ(ierr);
174   if (y == w) {
175     if (!shell->work) {
176       ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr);
177     }
178     ierr = VecGetArray(x,           &xarray);CHKERRQ(ierr);
179     ierr = VecGetArray(shell->work, &yarray);CHKERRQ(ierr);
180     x_length = shell->mlmat->invec_leng;
181     y_length = shell->mlmat->outvec_leng;
182     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
183     ierr = VecRestoreArray(x,           &xarray);CHKERRQ(ierr);
184     ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr);
185     ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr);
186   } else {
187     ierr = VecGetArray(x, &xarray);CHKERRQ(ierr);
188     ierr = VecGetArray(y, &yarray);CHKERRQ(ierr);
189     x_length = shell->mlmat->invec_leng;
190     y_length = shell->mlmat->outvec_leng;
191     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
192     ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr);
193     ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr);
194     ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr);
195   }
196   PetscFunctionReturn(0);
197 }
198 
199 /* newtype is ignored because "ml" is not listed under Petsc MatType */
200 #undef __FUNCT__
201 #define __FUNCT__ "MatConvert_MPIAIJ_ML"
202 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
203 {
204   PetscErrorCode  ierr;
205   Mat_MPIAIJ      *mpimat=(Mat_MPIAIJ*)A->data;
206   Mat_SeqAIJ      *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
207   PetscInt        *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
208   PetscScalar     *aa=a->a,*ba=b->a,*ca;
209   PetscInt        am=A->rmap->n,an=A->cmap->n,i,j,k;
210   PetscInt        *ci,*cj,ncols;
211 
212   PetscFunctionBegin;
213   if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
214 
215   if (scall == MAT_INITIAL_MATRIX){
216     ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr);
217     ci[0] = 0;
218     for (i=0; i<am; i++){
219       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
220     }
221     ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr);
222     ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr);
223 
224     k = 0;
225     for (i=0; i<am; i++){
226       /* diagonal portion of A */
227       ncols = ai[i+1] - ai[i];
228       for (j=0; j<ncols; j++) {
229         cj[k]   = *aj++;
230         ca[k++] = *aa++;
231       }
232       /* off-diagonal portion of A */
233       ncols = bi[i+1] - bi[i];
234       for (j=0; j<ncols; j++) {
235         cj[k]   = an + (*bj); bj++;
236         ca[k++] = *ba++;
237       }
238     }
239     if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
240 
241     /* put together the new matrix */
242     an = mpimat->A->cmap->n+mpimat->B->cmap->n;
243     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
244 
245     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
246     /* Since these are PETSc arrays, change flags to free them as necessary. */
247     mat = (Mat_SeqAIJ*)(*Aloc)->data;
248     mat->free_a       = PETSC_TRUE;
249     mat->free_ij      = PETSC_TRUE;
250 
251     mat->nonew    = 0;
252   } else if (scall == MAT_REUSE_MATRIX){
253     mat=(Mat_SeqAIJ*)(*Aloc)->data;
254     ci = mat->i; cj = mat->j; ca = mat->a;
255     for (i=0; i<am; i++) {
256       /* diagonal portion of A */
257       ncols = ai[i+1] - ai[i];
258       for (j=0; j<ncols; j++) *ca++ = *aa++;
259       /* off-diagonal portion of A */
260       ncols = bi[i+1] - bi[i];
261       for (j=0; j<ncols; j++) *ca++ = *ba++;
262     }
263   } else {
264     SETERRQ1(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
265   }
266   PetscFunctionReturn(0);
267 }
268 
269 extern PetscErrorCode MatDestroy_Shell(Mat);
270 #undef __FUNCT__
271 #define __FUNCT__ "MatDestroy_ML"
272 static PetscErrorCode MatDestroy_ML(Mat A)
273 {
274   PetscErrorCode ierr;
275   Mat_MLShell    *shell;
276 
277   PetscFunctionBegin;
278   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
279   ierr = VecDestroy(&shell->y);CHKERRQ(ierr);
280   if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);}
281   ierr = PetscFree(shell);CHKERRQ(ierr);
282   ierr = MatDestroy_Shell(A);CHKERRQ(ierr);
283   ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr);
284   PetscFunctionReturn(0);
285 }
286 
287 #undef __FUNCT__
288 #define __FUNCT__ "MatWrapML_SeqAIJ"
289 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
290 {
291   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
292   PetscErrorCode        ierr;
293   PetscInt              m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz,nz_max;
294   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k;
295   PetscScalar           *ml_vals=matdata->values,*aa;
296 
297   PetscFunctionBegin;
298   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
299   if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
300     if (reuse){
301       Mat_SeqAIJ  *aij= (Mat_SeqAIJ*)(*newmat)->data;
302       aij->i = ml_rowptr;
303       aij->j = ml_cols;
304       aij->a = ml_vals;
305     } else {
306       /* sort ml_cols and ml_vals */
307       ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
308       for (i=0; i<m; i++) {
309         nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
310       }
311       aj = ml_cols; aa = ml_vals;
312       for (i=0; i<m; i++){
313         ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
314         aj += nnz[i]; aa += nnz[i];
315       }
316       ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
317       ierr = PetscFree(nnz);CHKERRQ(ierr);
318     }
319     PetscFunctionReturn(0);
320   }
321 
322   /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */
323   ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr);
324   ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
325   ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
326 
327   ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
328   nz_max = 1;
329   for (i=0; i<m; i++) {
330     nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
331     if (nnz[i] > nz_max) nz_max += nnz[i];
332   }
333 
334   ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
335   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
336   for (i=0; i<m; i++){
337     k = 0;
338     /* diagonal entry */
339     aj[k] = i; aa[k++] = ml_vals[i];
340     /* off diagonal entries */
341     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
342       aj[k] = ml_cols[j]; aa[k++] = ml_vals[j];
343     }
344     /* sort aj and aa */
345     ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
346     ierr = MatSetValues(*newmat,1,&i,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr);
347   }
348   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
349   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
350 
351   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
352   ierr = PetscFree(nnz);CHKERRQ(ierr);
353   PetscFunctionReturn(0);
354 }
355 
356 #undef __FUNCT__
357 #define __FUNCT__ "MatWrapML_SHELL"
358 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
359 {
360   PetscErrorCode ierr;
361   PetscInt       m,n;
362   ML_Comm        *MLcomm;
363   Mat_MLShell    *shellctx;
364 
365   PetscFunctionBegin;
366   m = mlmat->outvec_leng;
367   n = mlmat->invec_leng;
368   if (!m || !n){
369     newmat = PETSC_NULL;
370     PetscFunctionReturn(0);
371   }
372 
373   if (reuse){
374     ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr);
375     shellctx->mlmat = mlmat;
376     PetscFunctionReturn(0);
377   }
378 
379   MLcomm = mlmat->comm;
380   ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr);
381   ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
382   ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr);
383   ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr);
384   shellctx->A         = *newmat;
385   shellctx->mlmat     = mlmat;
386   shellctx->work      = PETSC_NULL;
387   ierr = VecCreate(PETSC_COMM_WORLD,&shellctx->y);CHKERRQ(ierr);
388   ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr);
389   ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr);
390   (*newmat)->ops->destroy = MatDestroy_ML;
391   PetscFunctionReturn(0);
392 }
393 
394 #undef __FUNCT__
395 #define __FUNCT__ "MatWrapML_MPIAIJ"
396 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,Mat *newmat)
397 {
398   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
399   PetscInt              *ml_cols=matdata->columns,*aj;
400   PetscScalar           *ml_vals=matdata->values,*aa;
401   PetscErrorCode        ierr;
402   PetscInt              i,j,k,*gordering;
403   PetscInt              m=mlmat->outvec_leng,n,*nnzA,*nnzB,*nnz,nz_max,row;
404   Mat                   A;
405 
406   PetscFunctionBegin;
407   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
408   n = mlmat->invec_leng;
409   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
410 
411   ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr);
412   ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
413   ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
414   ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr);
415 
416   nz_max = 0;
417   for (i=0; i<m; i++){
418     nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
419     if (nz_max < nnz[i]) nz_max = nnz[i];
420     nnzA[i] = 1; /* diag */
421     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
422       if (ml_cols[j] < m) nnzA[i]++;
423     }
424     nnzB[i] = nnz[i] - nnzA[i];
425   }
426   ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
427 
428   /* insert mat values -- remap row and column indices */
429   nz_max++;
430   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
431   /* create global row numbering for a ML_Operator */
432   ML_build_global_numbering(mlmat,&gordering,"rows");
433   for (i=0; i<m; i++){
434     row = gordering[i];
435     k = 0;
436     /* diagonal entry */
437     aj[k] = row; aa[k++] = ml_vals[i];
438     /* off diagonal entries */
439     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
440       aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j];
441     }
442     ierr = MatSetValues(A,1,&row,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr);
443   }
444   ML_free(gordering);
445   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
446   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
447   *newmat = A;
448 
449   ierr = PetscFree3(nnzA,nnzB,nnz);
450   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
451   PetscFunctionReturn(0);
452 }
453 
454 /* -----------------------------------------------------------------------------*/
455 #undef __FUNCT__
456 #define __FUNCT__ "PCReset_ML"
457 PetscErrorCode PCReset_ML(PC pc)
458 {
459   PetscErrorCode  ierr;
460   PC_MG           *mg = (PC_MG*)pc->data;
461   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
462   PetscInt        level,fine_level=pc_ml->Nlevels-1;
463 
464   PetscFunctionBegin;
465   ML_Aggregate_Destroy(&pc_ml->agg_object);
466   ML_Destroy(&pc_ml->ml_object);
467 
468   if (pc_ml->PetscMLdata) {
469     ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
470     if (pc_ml->size > 1)      {ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);}
471     if (pc_ml->PetscMLdata->x){ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr);}
472     if (pc_ml->PetscMLdata->y){ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr);}
473   }
474   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
475 
476   if (pc_ml->gridctx) {
477     for (level=0; level<fine_level; level++){
478       if (pc_ml->gridctx[level].A){ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);}
479       if (pc_ml->gridctx[level].P){ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);}
480       if (pc_ml->gridctx[level].R){ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);}
481       if (pc_ml->gridctx[level].x){ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);}
482       if (pc_ml->gridctx[level].b){ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);}
483       if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
484     }
485   }
486   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
487   PetscFunctionReturn(0);
488 }
489 /* -------------------------------------------------------------------------- */
490 /*
491    PCSetUp_ML - Prepares for the use of the ML preconditioner
492                     by setting data structures and options.
493 
494    Input Parameter:
495 .  pc - the preconditioner context
496 
497    Application Interface Routine: PCSetUp()
498 
499    Notes:
500    The interface routine PCSetUp() is not usually called directly by
501    the user, but instead is called by PCApply() if necessary.
502 */
503 extern PetscErrorCode PCSetFromOptions_MG(PC);
504 extern PetscErrorCode PCReset_MG(PC);
505 
506 #undef __FUNCT__
507 #define __FUNCT__ "PCSetUp_ML"
508 PetscErrorCode PCSetUp_ML(PC pc)
509 {
510   PetscErrorCode  ierr;
511   PetscMPIInt     size;
512   FineGridCtx     *PetscMLdata;
513   ML              *ml_object;
514   ML_Aggregate    *agg_object;
515   ML_Operator     *mlmat;
516   PetscInt        nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
517   Mat             A,Aloc;
518   GridCtx         *gridctx;
519   PC_MG           *mg = (PC_MG*)pc->data;
520   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
521   PetscBool       isSeq, isMPI;
522   KSP             smoother;
523   PC              subpc;
524 
525   PetscFunctionBegin;
526   /* Since PCMG tries to use DM assocated with PC must delete it */
527   ierr = DMDestroy(&pc->dm);CHKERRQ(ierr);
528 
529   if (pc->setupcalled){
530     /* since ML can change the size of vectors/matrices at any level we must destroy everything */
531     ierr = PCReset_ML(pc);CHKERRQ(ierr);
532     ierr = PCReset_MG(pc);CHKERRQ(ierr);
533   }
534 
535   /* setup special features of PCML */
536   /*--------------------------------*/
537   /* covert A to Aloc to be used by ML at fine grid */
538   A = pc->pmat;
539   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
540   pc_ml->size = size;
541   ierr = PetscTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
542   ierr = PetscTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
543   if (isMPI){
544     ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
545   } else if (isSeq) {
546     Aloc = A;
547   } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
548 
549   /* create and initialize struct 'PetscMLdata' */
550   ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr);
551   pc_ml->PetscMLdata = PetscMLdata;
552   ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr);
553 
554   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);
555   ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr);
556   ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr);
557 
558   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr);
559   ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
560   ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
561   PetscMLdata->A    = A;
562   PetscMLdata->Aloc = Aloc;
563 
564   /* create ML discretization matrix at fine grid */
565   /* ML requires input of fine-grid matrix. It determines nlevels. */
566   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
567   ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
568   ML_Create(&ml_object,pc_ml->MaxNlevels);
569   ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm);
570   pc_ml->ml_object = ml_object;
571   ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
572   ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols);
573   ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
574 
575   ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO);
576 
577   /* aggregation */
578   ML_Aggregate_Create(&agg_object);
579   pc_ml->agg_object = agg_object;
580 
581   ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr);
582   ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
583   /* set options */
584   switch (pc_ml->CoarsenScheme) {
585   case 1:
586     ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
587   case 2:
588     ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
589   case 3:
590     ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
591   }
592   ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold);
593   ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor);
594   if (pc_ml->SpectralNormScheme_Anorm){
595     ML_Set_SpectralNormScheme_Anorm(ml_object);
596   }
597   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
598   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
599   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
600   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
601   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
602   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;
603 
604   if (pc_ml->OldHierarchy) {
605     Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
606   } else {
607     Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
608   }
609   if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
610   pc_ml->Nlevels = Nlevels;
611   fine_level = Nlevels - 1;
612 
613   ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr);
614   /* set default smoothers */
615   for (level=1; level<=fine_level; level++){
616     if (size == 1){
617       ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
618       ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
619       ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
620       ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
621     } else {
622       ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
623       ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
624       ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
625       ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
626     }
627   }
628   ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
629 
630   ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr);
631   pc_ml->gridctx = gridctx;
632 
633   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
634      Level 0 is the finest grid for ML, but coarsest for PETSc! */
635   gridctx[fine_level].A = A;
636 
637   level = fine_level - 1;
638   if (size == 1){ /* convert ML P, R and A into seqaij format */
639     for (mllevel=1; mllevel<Nlevels; mllevel++){
640       mlmat = &(ml_object->Pmat[mllevel]);
641       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
642       mlmat = &(ml_object->Rmat[mllevel-1]);
643       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
644 
645       mlmat = &(ml_object->Amat[mllevel]);
646       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
647       level--;
648     }
649   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
650     for (mllevel=1; mllevel<Nlevels; mllevel++){
651       mlmat  = &(ml_object->Pmat[mllevel]);
652       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
653       mlmat  = &(ml_object->Rmat[mllevel-1]);
654       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
655 
656       mlmat  = &(ml_object->Amat[mllevel]);
657       ierr = MatWrapML_MPIAIJ(mlmat,&gridctx[level].A);CHKERRQ(ierr);
658       level--;
659     }
660   }
661 
662   /* create vectors and ksp at all levels */
663   for (level=0; level<fine_level; level++){
664     level1 = level + 1;
665     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr);
666     ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr);
667     ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr);
668     ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
669 
670     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr);
671     ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
672     ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr);
673     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
674 
675     ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr);
676     ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
677     ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr);
678     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
679 
680     if (level == 0){
681       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
682     } else {
683       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
684     }
685   }
686   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
687 
688   /* create coarse level and the interpolation between the levels */
689   for (level=0; level<fine_level; level++){
690     level1 = level + 1;
691     ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
692     ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
693     if (level > 0){
694       ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
695     }
696     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
697   }
698   ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
699   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
700 
701   /* setupcalled is set to 0 so that MG is setup from scratch */
702   pc->setupcalled = 0;
703   ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
704   PetscFunctionReturn(0);
705 }
706 
707 /* -------------------------------------------------------------------------- */
708 /*
709    PCDestroy_ML - Destroys the private context for the ML preconditioner
710    that was created with PCCreate_ML().
711 
712    Input Parameter:
713 .  pc - the preconditioner context
714 
715    Application Interface Routine: PCDestroy()
716 */
717 #undef __FUNCT__
718 #define __FUNCT__ "PCDestroy_ML"
719 PetscErrorCode PCDestroy_ML(PC pc)
720 {
721   PetscErrorCode  ierr;
722   PC_MG           *mg = (PC_MG*)pc->data;
723   PC_ML           *pc_ml= (PC_ML*)mg->innerctx;
724 
725   PetscFunctionBegin;
726   ierr = PCReset_ML(pc);CHKERRQ(ierr);
727   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
728   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
729   PetscFunctionReturn(0);
730 }
731 
732 #undef __FUNCT__
733 #define __FUNCT__ "PCSetFromOptions_ML"
734 PetscErrorCode PCSetFromOptions_ML(PC pc)
735 {
736   PetscErrorCode  ierr;
737   PetscInt        indx,PrintLevel;
738   const char      *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
739   PC_MG           *mg = (PC_MG*)pc->data;
740   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
741   PetscMPIInt     size;
742   MPI_Comm        comm = ((PetscObject)pc)->comm;
743 
744   PetscFunctionBegin;
745   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
746   ierr = PetscOptionsHead("ML options");CHKERRQ(ierr);
747   PrintLevel    = 0;
748   indx          = 0;
749   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr);
750   ML_Set_PrintLevel(PrintLevel);
751   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr);
752   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr);
753   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr);
754   pc_ml->CoarsenScheme = indx;
755   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr);
756   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr);
757   ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);CHKERRQ(ierr);
758   ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,PETSC_NULL);CHKERRQ(ierr);
759   ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,PETSC_NULL);CHKERRQ(ierr);
760   ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,PETSC_NULL);CHKERRQ(ierr);
761   /*
762     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
763     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.
764 
765     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
766     combination of options and ML's exit(1) explanations don't help matters.
767   */
768   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
769   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
770   if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);}
771   if (pc_ml->EnergyMinimization) {
772     ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,PETSC_NULL);CHKERRQ(ierr);
773   }
774   if (pc_ml->EnergyMinimization == 2) {
775     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
776     ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,PETSC_NULL);CHKERRQ(ierr);
777   }
778   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
779   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
780   ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,PETSC_NULL);CHKERRQ(ierr);
781   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
782   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
783   ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,PETSC_NULL);CHKERRQ(ierr);
784   /*
785     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
786     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
787     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
788     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
789     this context, but ML doesn't provide a way to find out which ones.
790    */
791   ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,PETSC_NULL);CHKERRQ(ierr);
792   ierr = PetscOptionsTail();CHKERRQ(ierr);
793   PetscFunctionReturn(0);
794 }
795 
796 /* -------------------------------------------------------------------------- */
797 /*
798    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
799    and sets this as the private data within the generic preconditioning
800    context, PC, that was created within PCCreate().
801 
802    Input Parameter:
803 .  pc - the preconditioner context
804 
805    Application Interface Routine: PCCreate()
806 */
807 
808 /*MC
809      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
810        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
811        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
812        and the restriction/interpolation operators wrapped as PETSc shell matrices.
813 
814    Options Database Key:
815    Multigrid options(inherited)
816 +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
817 .  -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
818 .  -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
819    -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade
820    ML options:
821 .  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
822 .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
823 .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
824 .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
825 .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
826 .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
827 -  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
828 
829    Level: intermediate
830 
831   Concepts: multigrid
832 
833 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
834            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
835            PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
836            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
837            PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
838 M*/
839 
840 EXTERN_C_BEGIN
841 #undef __FUNCT__
842 #define __FUNCT__ "PCCreate_ML"
843 PetscErrorCode  PCCreate_ML(PC pc)
844 {
845   PetscErrorCode  ierr;
846   PC_ML           *pc_ml;
847   PC_MG           *mg;
848 
849   PetscFunctionBegin;
850   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
851   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
852   ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr);
853   /* Since PCMG tries to use DM assocated with PC must delete it */
854   ierr = DMDestroy(&pc->dm);CHKERRQ(ierr);
855   mg = (PC_MG*)pc->data;
856   mg->galerkin = PETSC_TRUE;
857 
858   /* create a supporting struct and attach it to pc */
859   ierr = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr);
860   mg->innerctx = pc_ml;
861 
862   pc_ml->ml_object     = 0;
863   pc_ml->agg_object    = 0;
864   pc_ml->gridctx       = 0;
865   pc_ml->PetscMLdata   = 0;
866   pc_ml->Nlevels       = -1;
867   pc_ml->MaxNlevels    = 10;
868   pc_ml->MaxCoarseSize = 1;
869   pc_ml->CoarsenScheme = 1;
870   pc_ml->Threshold     = 0.0;
871   pc_ml->DampingFactor = 4.0/3.0;
872   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
873   pc_ml->size          = 0;
874 
875   /* overwrite the pointers of PCMG by the functions of PCML */
876   pc->ops->setfromoptions = PCSetFromOptions_ML;
877   pc->ops->setup          = PCSetUp_ML;
878   pc->ops->reset          = PCReset_ML;
879   pc->ops->destroy        = PCDestroy_ML;
880   PetscFunctionReturn(0);
881 }
882 EXTERN_C_END
883