xref: /petsc/src/ksp/pc/impls/ml/ml.c (revision d20b5d95fb5ed4c01fb0da19d63ae2a035fb24e8)
1 
2 /*
3    Provides an interface to the ML smoothed Aggregation
4    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
5                                     Jed Brown, see [PETSC #18321, #18449].
6 */
7 #include <petsc-private/pcimpl.h>   /*I "petscpc.h" I*/
8 #include <../src/ksp/pc/impls/mg/mgimpl.h>                    /*I "petscpcmg.h" I*/
9 #include <../src/mat/impls/aij/seq/aij.h>
10 #include <../src/mat/impls/aij/mpi/mpiaij.h>
11 
12 #include <math.h>
13 EXTERN_C_BEGIN
14 /* HAVE_CONFIG_H flag is required by ML include files */
15 #if !defined(HAVE_CONFIG_H)
16 #define HAVE_CONFIG_H
17 #endif
18 #include <ml_include.h>
19 EXTERN_C_END
20 
21 typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType;
22 static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0};
23 
24 /* The context (data structure) at each grid level */
25 typedef struct {
26   Vec        x,b,r;           /* global vectors */
27   Mat        A,P,R;
28   KSP        ksp;
29 } GridCtx;
30 
31 /* The context used to input PETSc matrix into ML at fine grid */
32 typedef struct {
33   Mat          A;      /* Petsc matrix in aij format */
34   Mat          Aloc;   /* local portion of A to be used by ML */
35   Vec          x,y;
36   ML_Operator  *mlmat;
37   PetscScalar  *pwork; /* tmp array used by PetscML_comm() */
38 } FineGridCtx;
39 
40 /* The context associates a ML matrix with a PETSc shell matrix */
41 typedef struct {
42   Mat          A;       /* PETSc shell matrix associated with mlmat */
43   ML_Operator  *mlmat;  /* ML matrix assorciated with A */
44   Vec          y, work;
45 } Mat_MLShell;
46 
47 /* Private context for the ML preconditioner */
48 typedef struct {
49   ML             *ml_object;
50   ML_Aggregate   *agg_object;
51   GridCtx        *gridctx;
52   FineGridCtx    *PetscMLdata;
53   PetscInt       Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization;
54   PetscReal      Threshold,DampingFactor,EnergyMinimizationDropTol;
55   PetscBool      SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable;
56   PetscBool      reuse_interpolation;
57   PCMLNullSpaceType nulltype;
58   PetscMPIInt    size; /* size of communicator for pc->pmat */
59 } PC_ML;
60 
61 #undef __FUNCT__
62 #define __FUNCT__ "PetscML_getrow"
63 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
64 {
65   PetscErrorCode ierr;
66   PetscInt       m,i,j,k=0,row,*aj;
67   PetscScalar    *aa;
68   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
69   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;
70 
71 
72   ierr = MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0);
73   for (i = 0; i<N_requested_rows; i++) {
74     row   = requested_rows[i];
75     row_lengths[i] = a->ilen[row];
76     if (allocated_space < k+row_lengths[i]) return(0);
77     if ( (row >= 0) || (row <= (m-1)) ) {
78       aj = a->j + a->i[row];
79       aa = a->a + a->i[row];
80       for (j=0; j<row_lengths[i]; j++){
81         columns[k]  = aj[j];
82         values[k++] = aa[j];
83       }
84     }
85   }
86   return(1);
87 }
88 
89 #undef __FUNCT__
90 #define __FUNCT__ "PetscML_comm"
91 static PetscErrorCode PetscML_comm(double p[],void *ML_data)
92 {
93   PetscErrorCode ierr;
94   FineGridCtx    *ml=(FineGridCtx*)ML_data;
95   Mat            A=ml->A;
96   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
97   PetscMPIInt    size;
98   PetscInt       i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
99   PetscScalar    *array;
100 
101   PetscFunctionBegin;
102   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
103   if (size == 1) return 0;
104 
105   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
106   ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
107   ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
108   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
109   ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr);
110   for (i=in_length; i<out_length; i++){
111     p[i] = array[i-in_length];
112   }
113   ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr);
114   PetscFunctionReturn(0);
115 }
116 
117 #undef __FUNCT__
118 #define __FUNCT__ "PetscML_matvec"
119 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
120 {
121   PetscErrorCode ierr;
122   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data);
123   Mat            A=ml->A, Aloc=ml->Aloc;
124   PetscMPIInt    size;
125   PetscScalar    *pwork=ml->pwork;
126   PetscInt       i;
127 
128   PetscFunctionBegin;
129   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
130   if (size == 1){
131     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
132   } else {
133     for (i=0; i<in_length; i++) pwork[i] = p[i];
134     PetscML_comm(pwork,ml);
135     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
136   }
137   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
138   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
139   ierr = VecResetArray(ml->x);CHKERRQ(ierr);
140   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
141   PetscFunctionReturn(0);
142 }
143 
144 #undef __FUNCT__
145 #define __FUNCT__ "MatMult_ML"
146 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
147 {
148   PetscErrorCode   ierr;
149   Mat_MLShell      *shell;
150   PetscScalar      *xarray,*yarray;
151   PetscInt         x_length,y_length;
152 
153   PetscFunctionBegin;
154   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
155   ierr = VecGetArray(x,&xarray);CHKERRQ(ierr);
156   ierr = VecGetArray(y,&yarray);CHKERRQ(ierr);
157   x_length = shell->mlmat->invec_leng;
158   y_length = shell->mlmat->outvec_leng;
159   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
160   ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
161   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
162   PetscFunctionReturn(0);
163 }
164 
165 #undef __FUNCT__
166 #define __FUNCT__ "MatMultAdd_ML"
167 /* Computes y = w + A * x
168    It is possible that w == y, but not x == y
169 */
170 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
171 {
172   Mat_MLShell   *shell;
173   PetscScalar   *xarray,*yarray;
174   PetscInt       x_length,y_length;
175   PetscErrorCode ierr;
176 
177   PetscFunctionBegin;
178   ierr = MatShellGetContext(A, (void **) &shell);CHKERRQ(ierr);
179   if (y == w) {
180     if (!shell->work) {
181       ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr);
182     }
183     ierr = VecGetArray(x,           &xarray);CHKERRQ(ierr);
184     ierr = VecGetArray(shell->work, &yarray);CHKERRQ(ierr);
185     x_length = shell->mlmat->invec_leng;
186     y_length = shell->mlmat->outvec_leng;
187     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
188     ierr = VecRestoreArray(x,           &xarray);CHKERRQ(ierr);
189     ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr);
190     ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr);
191   } else {
192     ierr = VecGetArray(x, &xarray);CHKERRQ(ierr);
193     ierr = VecGetArray(y, &yarray);CHKERRQ(ierr);
194     x_length = shell->mlmat->invec_leng;
195     y_length = shell->mlmat->outvec_leng;
196     ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray);
197     ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr);
198     ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr);
199     ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr);
200   }
201   PetscFunctionReturn(0);
202 }
203 
204 /* newtype is ignored because "ml" is not listed under Petsc MatType */
205 #undef __FUNCT__
206 #define __FUNCT__ "MatConvert_MPIAIJ_ML"
207 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
208 {
209   PetscErrorCode  ierr;
210   Mat_MPIAIJ      *mpimat=(Mat_MPIAIJ*)A->data;
211   Mat_SeqAIJ      *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
212   PetscInt        *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
213   PetscScalar     *aa=a->a,*ba=b->a,*ca;
214   PetscInt        am=A->rmap->n,an=A->cmap->n,i,j,k;
215   PetscInt        *ci,*cj,ncols;
216 
217   PetscFunctionBegin;
218   if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
219 
220   if (scall == MAT_INITIAL_MATRIX){
221     ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr);
222     ci[0] = 0;
223     for (i=0; i<am; i++){
224       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
225     }
226     ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr);
227     ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr);
228 
229     k = 0;
230     for (i=0; i<am; i++){
231       /* diagonal portion of A */
232       ncols = ai[i+1] - ai[i];
233       for (j=0; j<ncols; j++) {
234         cj[k]   = *aj++;
235         ca[k++] = *aa++;
236       }
237       /* off-diagonal portion of A */
238       ncols = bi[i+1] - bi[i];
239       for (j=0; j<ncols; j++) {
240         cj[k]   = an + (*bj); bj++;
241         ca[k++] = *ba++;
242       }
243     }
244     if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
245 
246     /* put together the new matrix */
247     an = mpimat->A->cmap->n+mpimat->B->cmap->n;
248     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
249 
250     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
251     /* Since these are PETSc arrays, change flags to free them as necessary. */
252     mat = (Mat_SeqAIJ*)(*Aloc)->data;
253     mat->free_a       = PETSC_TRUE;
254     mat->free_ij      = PETSC_TRUE;
255 
256     mat->nonew    = 0;
257   } else if (scall == MAT_REUSE_MATRIX){
258     mat=(Mat_SeqAIJ*)(*Aloc)->data;
259     ci = mat->i; cj = mat->j; ca = mat->a;
260     for (i=0; i<am; i++) {
261       /* diagonal portion of A */
262       ncols = ai[i+1] - ai[i];
263       for (j=0; j<ncols; j++) *ca++ = *aa++;
264       /* off-diagonal portion of A */
265       ncols = bi[i+1] - bi[i];
266       for (j=0; j<ncols; j++) *ca++ = *ba++;
267     }
268   } else {
269     SETERRQ1(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
270   }
271   PetscFunctionReturn(0);
272 }
273 
274 extern PetscErrorCode MatDestroy_Shell(Mat);
275 #undef __FUNCT__
276 #define __FUNCT__ "MatDestroy_ML"
277 static PetscErrorCode MatDestroy_ML(Mat A)
278 {
279   PetscErrorCode ierr;
280   Mat_MLShell    *shell;
281 
282   PetscFunctionBegin;
283   ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr);
284   ierr = VecDestroy(&shell->y);CHKERRQ(ierr);
285   if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);}
286   ierr = PetscFree(shell);CHKERRQ(ierr);
287   ierr = MatDestroy_Shell(A);CHKERRQ(ierr);
288   ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr);
289   PetscFunctionReturn(0);
290 }
291 
292 #undef __FUNCT__
293 #define __FUNCT__ "MatWrapML_SeqAIJ"
294 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
295 {
296   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
297   PetscErrorCode        ierr;
298   PetscInt              m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = PETSC_NULL,nz_max;
299   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k;
300   PetscScalar           *ml_vals=matdata->values,*aa;
301 
302   PetscFunctionBegin;
303   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
304   if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
305     if (reuse){
306       Mat_SeqAIJ  *aij= (Mat_SeqAIJ*)(*newmat)->data;
307       aij->i = ml_rowptr;
308       aij->j = ml_cols;
309       aij->a = ml_vals;
310     } else {
311       /* sort ml_cols and ml_vals */
312       ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
313       for (i=0; i<m; i++) {
314         nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
315       }
316       aj = ml_cols; aa = ml_vals;
317       for (i=0; i<m; i++){
318         ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
319         aj += nnz[i]; aa += nnz[i];
320       }
321       ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
322       ierr = PetscFree(nnz);CHKERRQ(ierr);
323     }
324     PetscFunctionReturn(0);
325   }
326 
327   /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */
328   if (reuse) {
329     for (nz_max=0,i=0; i<m; i++) nz_max = PetscMax(nz_max,ml_cols[i+1] - ml_cols[i] + 1);
330   } else {
331     ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr);
332     ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
333     ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
334 
335     ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
336     nz_max = 1;
337     for (i=0; i<m; i++) {
338       nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
339       if (nnz[i] > nz_max) nz_max = nnz[i];
340     }
341     ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
342   }
343   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
344   for (i=0; i<m; i++) {
345     PetscInt ncols;
346     k = 0;
347     /* diagonal entry */
348     aj[k] = i; aa[k++] = ml_vals[i];
349     /* off diagonal entries */
350     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
351       aj[k] = ml_cols[j]; aa[k++] = ml_vals[j];
352     }
353     ncols = ml_cols[i+1] - ml_cols[i] + 1;
354     /* sort aj and aa */
355     ierr = PetscSortIntWithScalarArray(ncols,aj,aa);CHKERRQ(ierr);
356     ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
357   }
358   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
359   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
360 
361   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
362   ierr = PetscFree(nnz);CHKERRQ(ierr);
363   PetscFunctionReturn(0);
364 }
365 
366 #undef __FUNCT__
367 #define __FUNCT__ "MatWrapML_SHELL"
368 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
369 {
370   PetscErrorCode ierr;
371   PetscInt       m,n;
372   ML_Comm        *MLcomm;
373   Mat_MLShell    *shellctx;
374 
375   PetscFunctionBegin;
376   m = mlmat->outvec_leng;
377   n = mlmat->invec_leng;
378   if (!m || !n){
379     newmat = PETSC_NULL;
380     PetscFunctionReturn(0);
381   }
382 
383   if (reuse){
384     ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr);
385     shellctx->mlmat = mlmat;
386     PetscFunctionReturn(0);
387   }
388 
389   MLcomm = mlmat->comm;
390   ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr);
391   ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
392   ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr);
393   ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr);
394   shellctx->A         = *newmat;
395   shellctx->mlmat     = mlmat;
396   shellctx->work      = PETSC_NULL;
397   ierr = VecCreate(MLcomm->USR_comm,&shellctx->y);CHKERRQ(ierr);
398   ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr);
399   ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr);
400   (*newmat)->ops->destroy = MatDestroy_ML;
401   PetscFunctionReturn(0);
402 }
403 
404 #undef __FUNCT__
405 #define __FUNCT__ "MatWrapML_MPIAIJ"
406 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
407 {
408   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
409   PetscInt              *ml_cols=matdata->columns,*aj;
410   PetscScalar           *ml_vals=matdata->values,*aa;
411   PetscErrorCode        ierr;
412   PetscInt              i,j,k,*gordering;
413   PetscInt              m=mlmat->outvec_leng,n,nz_max,row;
414   Mat                   A;
415 
416   PetscFunctionBegin;
417   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
418   n = mlmat->invec_leng;
419   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
420 
421   if (reuse) {
422     A = *newmat;
423     for (nz_max=0,i=0; i<m; i++) nz_max = PetscMax(nz_max,ml_cols[i+1] - ml_cols[i] + 1);
424   } else {
425     PetscInt *nnzA,*nnzB,*nnz;
426     ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr);
427     ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
428     ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
429     ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr);
430 
431     nz_max = 0;
432     for (i=0; i<m; i++){
433       nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
434       if (nz_max < nnz[i]) nz_max = nnz[i];
435       nnzA[i] = 1; /* diag */
436       for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
437         if (ml_cols[j] < m) nnzA[i]++;
438       }
439       nnzB[i] = nnz[i] - nnzA[i];
440     }
441     ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
442     ierr = PetscFree3(nnzA,nnzB,nnz);
443   }
444 
445   /* insert mat values -- remap row and column indices */
446   nz_max++;
447   ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr);
448   /* create global row numbering for a ML_Operator */
449   ML_build_global_numbering(mlmat,&gordering,"rows");
450   for (i=0; i<m; i++) {
451     PetscInt ncols;
452     row = gordering[i];
453     k = 0;
454     /* diagonal entry */
455     aj[k] = row; aa[k++] = ml_vals[i];
456     /* off diagonal entries */
457     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
458       aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j];
459     }
460     ncols = ml_cols[i+1] - ml_cols[i] + 1;
461     ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
462   }
463   ML_free(gordering);
464   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
465   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
466   *newmat = A;
467 
468   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
469   PetscFunctionReturn(0);
470 }
471 
472 /* -----------------------------------------------------------------------------*/
473 #undef __FUNCT__
474 #define __FUNCT__ "PCReset_ML"
475 PetscErrorCode PCReset_ML(PC pc)
476 {
477   PetscErrorCode  ierr;
478   PC_MG           *mg = (PC_MG*)pc->data;
479   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
480   PetscInt        level,fine_level=pc_ml->Nlevels-1;
481 
482   PetscFunctionBegin;
483   ML_Aggregate_Destroy(&pc_ml->agg_object);
484   ML_Destroy(&pc_ml->ml_object);
485 
486   if (pc_ml->PetscMLdata) {
487     ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
488     ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);
489     ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr);
490     ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr);
491   }
492   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
493 
494   if (pc_ml->gridctx) {
495     for (level=0; level<fine_level; level++){
496       if (pc_ml->gridctx[level].A){ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);}
497       if (pc_ml->gridctx[level].P){ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);}
498       if (pc_ml->gridctx[level].R){ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);}
499       if (pc_ml->gridctx[level].x){ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);}
500       if (pc_ml->gridctx[level].b){ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);}
501       if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
502     }
503   }
504   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
505   PetscFunctionReturn(0);
506 }
507 /* -------------------------------------------------------------------------- */
508 /*
509    PCSetUp_ML - Prepares for the use of the ML preconditioner
510                     by setting data structures and options.
511 
512    Input Parameter:
513 .  pc - the preconditioner context
514 
515    Application Interface Routine: PCSetUp()
516 
517    Notes:
518    The interface routine PCSetUp() is not usually called directly by
519    the user, but instead is called by PCApply() if necessary.
520 */
521 extern PetscErrorCode PCSetFromOptions_MG(PC);
522 extern PetscErrorCode PCReset_MG(PC);
523 
524 #undef __FUNCT__
525 #define __FUNCT__ "PCSetUp_ML"
526 PetscErrorCode PCSetUp_ML(PC pc)
527 {
528   PetscErrorCode  ierr;
529   PetscMPIInt     size;
530   FineGridCtx     *PetscMLdata;
531   ML              *ml_object;
532   ML_Aggregate    *agg_object;
533   ML_Operator     *mlmat;
534   PetscInt        nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
535   Mat             A,Aloc;
536   GridCtx         *gridctx;
537   PC_MG           *mg = (PC_MG*)pc->data;
538   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
539   PetscBool       isSeq, isMPI;
540   KSP             smoother;
541   PC              subpc;
542   PetscInt        mesh_level, old_mesh_level;
543 
544 
545   PetscFunctionBegin;
546   A = pc->pmat;
547   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
548 
549   if (pc->setupcalled) {
550     if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
551       /*
552        Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
553        multiple solves in which the matrix is not changing too quickly.
554        */
555       ml_object = pc_ml->ml_object;
556       gridctx = pc_ml->gridctx;
557       Nlevels = pc_ml->Nlevels;
558       fine_level = Nlevels - 1;
559       gridctx[fine_level].A = A;
560 
561       ierr = PetscTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
562       ierr = PetscTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
563       if (isMPI){
564         ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
565       } else if (isSeq) {
566         Aloc = A;
567         ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
568       } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
569 
570       ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
571       PetscMLdata = pc_ml->PetscMLdata;
572       ierr = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr);
573       PetscMLdata->A    = A;
574       PetscMLdata->Aloc = Aloc;
575       ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
576       ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
577 
578       mesh_level = ml_object->ML_finest_level;
579       while (ml_object->SingleLevel[mesh_level].Rmat->to) {
580         old_mesh_level = mesh_level;
581         mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;
582 
583         /* clean and regenerate A */
584         mlmat = &(ml_object->Amat[mesh_level]);
585         ML_Operator_Clean(mlmat);
586         ML_Operator_Init(mlmat,ml_object->comm);
587         ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level);
588       }
589 
590       level = fine_level - 1;
591       if (size == 1) { /* convert ML P, R and A into seqaij format */
592         for (mllevel=1; mllevel<Nlevels; mllevel++){
593           mlmat = &(ml_object->Amat[mllevel]);
594           ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
595           level--;
596         }
597       } else { /* convert ML P and R into shell format, ML A into mpiaij format */
598         for (mllevel=1; mllevel<Nlevels; mllevel++){
599           mlmat  = &(ml_object->Amat[mllevel]);
600           ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
601           level--;
602         }
603       }
604 
605       for (level=0; level<fine_level; level++) {
606         if (level > 0){
607           ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
608         }
609         ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
610       }
611       ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
612       ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
613 
614       ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
615       PetscFunctionReturn(0);
616     } else {
617       /* since ML can change the size of vectors/matrices at any level we must destroy everything */
618       ierr = PCReset_ML(pc);CHKERRQ(ierr);
619       ierr = PCReset_MG(pc);CHKERRQ(ierr);
620     }
621   }
622 
623   /* setup special features of PCML */
624   /*--------------------------------*/
625   /* covert A to Aloc to be used by ML at fine grid */
626   pc_ml->size = size;
627   ierr = PetscTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
628   ierr = PetscTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
629   if (isMPI){
630     ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
631   } else if (isSeq) {
632     Aloc = A;
633     ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
634   } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
635 
636   /* create and initialize struct 'PetscMLdata' */
637   ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr);
638   pc_ml->PetscMLdata = PetscMLdata;
639   ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr);
640 
641   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);
642   ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr);
643   ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr);
644 
645   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr);
646   ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
647   ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
648   PetscMLdata->A    = A;
649   PetscMLdata->Aloc = Aloc;
650 
651   /* create ML discretization matrix at fine grid */
652   /* ML requires input of fine-grid matrix. It determines nlevels. */
653   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
654   ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
655   ML_Create(&ml_object,pc_ml->MaxNlevels);
656   ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm);
657   pc_ml->ml_object = ml_object;
658   ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
659   ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols);
660   ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
661 
662   ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO);
663 
664   /* aggregation */
665   ML_Aggregate_Create(&agg_object);
666   pc_ml->agg_object = agg_object;
667 
668   {
669     MatNullSpace mnull;
670     ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr);
671     if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) {
672       if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER;
673       else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK;
674       else pc_ml->nulltype = PCML_NULLSPACE_SCALAR;
675     }
676     switch (pc_ml->nulltype) {
677     case PCML_NULLSPACE_USER: {
678       PetscScalar *nullvec;
679       const PetscScalar *v;
680       PetscBool has_const;
681       PetscInt i,j,mlocal,nvec,M;
682       const Vec *vecs;
683       if (!mnull) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space");
684       ierr = MatGetSize(A,&M,PETSC_NULL);CHKERRQ(ierr);
685       ierr = MatGetLocalSize(Aloc,&mlocal,PETSC_NULL);CHKERRQ(ierr);
686       ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr);
687       ierr = PetscMalloc((nvec+!!has_const)*mlocal*sizeof *nullvec,&nullvec);CHKERRQ(ierr);
688       if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M;
689       for (i=0; i<nvec; i++) {
690         ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr);
691         for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j];
692         ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr);
693       }
694       ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr);
695       ierr = PetscFree(nullvec);CHKERRQ(ierr);
696     } break;
697     case PCML_NULLSPACE_BLOCK:
698       ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr);
699       break;
700     case PCML_NULLSPACE_SCALAR:
701       break;
702     default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_SUP,"Unknown null space type");
703     }
704   }
705   ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
706   /* set options */
707   switch (pc_ml->CoarsenScheme) {
708   case 1:
709     ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
710   case 2:
711     ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
712   case 3:
713     ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
714   }
715   ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold);
716   ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor);
717   if (pc_ml->SpectralNormScheme_Anorm){
718     ML_Set_SpectralNormScheme_Anorm(ml_object);
719   }
720   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
721   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
722   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
723   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
724   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
725   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;
726 
727   if (pc_ml->OldHierarchy) {
728     Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
729   } else {
730     Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
731   }
732   if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
733   pc_ml->Nlevels = Nlevels;
734   fine_level = Nlevels - 1;
735 
736   ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr);
737   /* set default smoothers */
738   for (level=1; level<=fine_level; level++){
739     if (size == 1){
740       ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
741       ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
742       ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
743       ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
744     } else {
745       ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
746       ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
747       ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
748       ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
749     }
750   }
751   ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
752 
753   ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr);
754   pc_ml->gridctx = gridctx;
755 
756   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
757      Level 0 is the finest grid for ML, but coarsest for PETSc! */
758   gridctx[fine_level].A = A;
759 
760   level = fine_level - 1;
761   if (size == 1){ /* convert ML P, R and A into seqaij format */
762     for (mllevel=1; mllevel<Nlevels; mllevel++){
763       mlmat = &(ml_object->Pmat[mllevel]);
764       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
765       mlmat = &(ml_object->Rmat[mllevel-1]);
766       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
767 
768       mlmat = &(ml_object->Amat[mllevel]);
769       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
770       level--;
771     }
772   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
773     for (mllevel=1; mllevel<Nlevels; mllevel++){
774       mlmat  = &(ml_object->Pmat[mllevel]);
775       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
776       mlmat  = &(ml_object->Rmat[mllevel-1]);
777       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
778 
779       mlmat  = &(ml_object->Amat[mllevel]);
780       ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
781       level--;
782     }
783   }
784 
785   /* create vectors and ksp at all levels */
786   for (level=0; level<fine_level; level++){
787     level1 = level + 1;
788     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr);
789     ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr);
790     ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr);
791     ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
792 
793     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr);
794     ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
795     ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr);
796     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
797 
798     ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr);
799     ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
800     ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr);
801     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
802 
803     if (level == 0){
804       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
805     } else {
806       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
807     }
808   }
809   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
810 
811   /* create coarse level and the interpolation between the levels */
812   for (level=0; level<fine_level; level++){
813     level1 = level + 1;
814     ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
815     ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
816     if (level > 0){
817       ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
818     }
819     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
820   }
821   ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
822   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
823 
824   /* setupcalled is set to 0 so that MG is setup from scratch */
825   pc->setupcalled = 0;
826   ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
827   PetscFunctionReturn(0);
828 }
829 
830 /* -------------------------------------------------------------------------- */
831 /*
832    PCDestroy_ML - Destroys the private context for the ML preconditioner
833    that was created with PCCreate_ML().
834 
835    Input Parameter:
836 .  pc - the preconditioner context
837 
838    Application Interface Routine: PCDestroy()
839 */
840 #undef __FUNCT__
841 #define __FUNCT__ "PCDestroy_ML"
842 PetscErrorCode PCDestroy_ML(PC pc)
843 {
844   PetscErrorCode  ierr;
845   PC_MG           *mg = (PC_MG*)pc->data;
846   PC_ML           *pc_ml= (PC_ML*)mg->innerctx;
847 
848   PetscFunctionBegin;
849   ierr = PCReset_ML(pc);CHKERRQ(ierr);
850   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
851   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
852   PetscFunctionReturn(0);
853 }
854 
855 #undef __FUNCT__
856 #define __FUNCT__ "PCSetFromOptions_ML"
857 PetscErrorCode PCSetFromOptions_ML(PC pc)
858 {
859   PetscErrorCode  ierr;
860   PetscInt        indx,PrintLevel;
861   const char      *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
862   PC_MG           *mg = (PC_MG*)pc->data;
863   PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
864   PetscMPIInt     size;
865   MPI_Comm        comm = ((PetscObject)pc)->comm;
866 
867   PetscFunctionBegin;
868   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
869   ierr = PetscOptionsHead("ML options");CHKERRQ(ierr);
870   PrintLevel    = 0;
871   indx          = 0;
872   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr);
873   ML_Set_PrintLevel(PrintLevel);
874   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr);
875   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr);
876   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr);
877   pc_ml->CoarsenScheme = indx;
878   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr);
879   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr);
880   ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);CHKERRQ(ierr);
881   ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,PETSC_NULL);CHKERRQ(ierr);
882   ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,PETSC_NULL);CHKERRQ(ierr);
883   ierr = PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,PETSC_NULL);CHKERRQ(ierr);
884   ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,PETSC_NULL);CHKERRQ(ierr);
885   ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,PETSC_NULL);CHKERRQ(ierr);
886   /*
887     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
888     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.
889 
890     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
891     combination of options and ML's exit(1) explanations don't help matters.
892   */
893   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
894   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
895   if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);}
896   if (pc_ml->EnergyMinimization) {
897     ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,PETSC_NULL);CHKERRQ(ierr);
898   }
899   if (pc_ml->EnergyMinimization == 2) {
900     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
901     ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,PETSC_NULL);CHKERRQ(ierr);
902   }
903   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
904   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
905   ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,PETSC_NULL);CHKERRQ(ierr);
906   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
907   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
908   ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,PETSC_NULL);CHKERRQ(ierr);
909   /*
910     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
911     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
912     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
913     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
914     this context, but ML doesn't provide a way to find out which ones.
915    */
916   ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,PETSC_NULL);CHKERRQ(ierr);
917   ierr = PetscOptionsTail();CHKERRQ(ierr);
918   PetscFunctionReturn(0);
919 }
920 
921 /* -------------------------------------------------------------------------- */
922 /*
923    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
924    and sets this as the private data within the generic preconditioning
925    context, PC, that was created within PCCreate().
926 
927    Input Parameter:
928 .  pc - the preconditioner context
929 
930    Application Interface Routine: PCCreate()
931 */
932 
933 /*MC
934      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
935        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
936        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
937        and the restriction/interpolation operators wrapped as PETSc shell matrices.
938 
939    Options Database Key:
940    Multigrid options(inherited)
941 +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
942 .  -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
943 .  -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
944    -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade
945    ML options:
946 .  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
947 .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
948 .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
949 .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
950 .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
951 .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
952 -  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
953 
954    Level: intermediate
955 
956   Concepts: multigrid
957 
958 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
959            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
960            PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
961            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
962            PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
963 M*/
964 
965 EXTERN_C_BEGIN
966 #undef __FUNCT__
967 #define __FUNCT__ "PCCreate_ML"
968 PetscErrorCode  PCCreate_ML(PC pc)
969 {
970   PetscErrorCode  ierr;
971   PC_ML           *pc_ml;
972   PC_MG           *mg;
973 
974   PetscFunctionBegin;
975   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
976   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
977   ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr);
978   /* Since PCMG tries to use DM assocated with PC must delete it */
979   ierr = DMDestroy(&pc->dm);CHKERRQ(ierr);
980   mg = (PC_MG*)pc->data;
981   mg->galerkin = 2;             /* Use Galerkin, but it is computed externally */
982 
983   /* create a supporting struct and attach it to pc */
984   ierr = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr);
985   mg->innerctx = pc_ml;
986 
987   pc_ml->ml_object     = 0;
988   pc_ml->agg_object    = 0;
989   pc_ml->gridctx       = 0;
990   pc_ml->PetscMLdata   = 0;
991   pc_ml->Nlevels       = -1;
992   pc_ml->MaxNlevels    = 10;
993   pc_ml->MaxCoarseSize = 1;
994   pc_ml->CoarsenScheme = 1;
995   pc_ml->Threshold     = 0.0;
996   pc_ml->DampingFactor = 4.0/3.0;
997   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
998   pc_ml->size          = 0;
999 
1000   /* overwrite the pointers of PCMG by the functions of PCML */
1001   pc->ops->setfromoptions = PCSetFromOptions_ML;
1002   pc->ops->setup          = PCSetUp_ML;
1003   pc->ops->reset          = PCReset_ML;
1004   pc->ops->destroy        = PCDestroy_ML;
1005   PetscFunctionReturn(0);
1006 }
1007 EXTERN_C_END
1008