xref: /petsc/src/ksp/pc/impls/ml/ml.c (revision 7c307921e16164bcabd288ffaae1e907d3e04a89)
1 #define PETSCKSP_DLL
2 
3 /*
4    Provides an interface to the ML 3.0 smoothed Aggregation
5 */
6 #include "src/ksp/pc/pcimpl.h"   /*I "petscpc.h" I*/
7 #include "src/ksp/pc/impls/mg/mgimpl.h"                    /*I "petscmg.h" I*/
8 #include "src/mat/impls/aij/seq/aij.h"
9 #include "src/mat/impls/aij/mpi/mpiaij.h"
10 
11 EXTERN_C_BEGIN
12 #include <math.h>
13 #include "ml_include.h"
14 EXTERN_C_END
15 
16 /* The context (data structure) at each grid level */
17 typedef struct {
18   Vec        x,b,r;           /* global vectors */
19   Mat        A,P,R;
20   KSP        ksp;
21 } GridCtx;
22 
23 /* The context used to input PETSc matrix into ML at fine grid */
24 typedef struct {
25   Mat          A,Aloc;
26   Vec          x,y;
27   ML_Operator  *mlmat;
28   PetscScalar  *pwork; /* tmp array used by PetscML_comm() */
29 } FineGridCtx;
30 
31 /* The context associates a ML matrix with a PETSc shell matrix */
32 typedef struct {
33   Mat          A;       /* PETSc shell matrix associated with mlmat */
34   ML_Operator  *mlmat;  /* ML matrix assorciated with A */
35   Vec          y;
36 } Mat_MLShell;
37 
38 /* Private context for the ML preconditioner */
39 typedef struct {
40   ML           *ml_object;
41   ML_Aggregate *agg_object;
42   GridCtx      *gridctx;
43   FineGridCtx  *PetscMLdata;
44   PetscInt     fine_level,MaxNlevels,MaxCoarseSize,CoarsenScheme;
45   PetscReal    Threshold,DampingFactor;
46   PetscTruth   SpectralNormScheme_Anorm;
47   PetscMPIInt  size;
48   PetscErrorCode (*PCSetUp)(PC);
49   PetscErrorCode (*PCDestroy)(PC);
50 } PC_ML;
51 extern int PetscML_getrow(void *ML_data,int N_requested_rows,int requested_rows[],
52             int allocated_space,int columns[],double values[],int row_lengths[]);
53 extern int PetscML_matvec(void *ML_data, int in_length, double p[], int out_length,double ap[]);
54 extern int PetscML_comm(double x[], void *ML_data);
55 extern PetscErrorCode MatMult_ML(Mat,Vec,Vec);
56 extern PetscErrorCode MatMultAdd_ML(Mat,Vec,Vec,Vec);
57 extern PetscErrorCode MatConvert_MPIAIJ_ML(Mat,const MatType,MatReuse,Mat*);
58 extern PetscErrorCode MatDestroy_ML(Mat);
59 extern PetscErrorCode MatWrapML_SeqAIJ(ML_Operator*,Mat*);
60 extern PetscErrorCode MatWrapML_MPIAIJ(ML_Operator*,Mat*);
61 extern PetscErrorCode MatWrapML_SHELL(ML_Operator*,Mat*);
62 
63 /* -------------------------------------------------------------------------- */
64 /*
65    PCSetUp_ML - Prepares for the use of the ML preconditioner
66                     by setting data structures and options.
67 
68    Input Parameter:
69 .  pc - the preconditioner context
70 
71    Application Interface Routine: PCSetUp()
72 
73    Notes:
74    The interface routine PCSetUp() is not usually called directly by
75    the user, but instead is called by PCApply() if necessary.
76 */
77 extern PetscErrorCode PCSetFromOptions_MG(PC);
78 #undef __FUNCT__
79 #define __FUNCT__ "PCSetUp_ML"
80 PetscErrorCode PCSetUp_ML(PC pc)
81 {
82   PetscErrorCode       ierr;
83   PetscMPIInt          size;
84   FineGridCtx          *PetscMLdata;
85   ML                   *ml_object;
86   ML_Aggregate         *agg_object;
87   ML_Operator          *mlmat;
88   PetscInt             nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level;
89   Mat                  A,Aloc;
90   GridCtx              *gridctx;
91   PC_ML                *pc_ml=PETSC_NULL;
92   PetscObjectContainer container;
93 
94   PetscFunctionBegin;
95   ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr);
96   if (container) {
97     ierr = PetscObjectContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr);
98   } else {
99     SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit");
100   }
101 
102   /* setup special features of PCML */
103   /*--------------------------------*/
104   /* covert A to Aloc to be used by ML at fine grid */
105   A = pc->pmat;
106   ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr);
107   pc_ml->size = size;
108   if (size > 1){
109     ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
110   } else {
111     Aloc = A;
112   }
113 
114   /* create and initialize struct 'PetscMLdata' */
115   ierr = PetscNew(FineGridCtx,&PetscMLdata);CHKERRQ(ierr);
116   PetscMLdata->A    = A;
117   PetscMLdata->Aloc = Aloc;
118   ierr = PetscMalloc((Aloc->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr);
119   pc_ml->PetscMLdata = PetscMLdata;
120 
121   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);
122   if (size == 1){
123     ierr = VecSetSizes(PetscMLdata->x,A->n,A->n);CHKERRQ(ierr);
124   } else {
125     ierr = VecSetSizes(PetscMLdata->x,Aloc->n,Aloc->n);CHKERRQ(ierr);
126   }
127   ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr);
128 
129   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr);
130   ierr = VecSetSizes(PetscMLdata->y,A->m,PETSC_DECIDE);CHKERRQ(ierr);
131   ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
132 
133   /* create ML discretization matrix at fine grid */
134   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
135   ML_Create(&ml_object,pc_ml->MaxNlevels);
136   ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
137   ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols);
138   ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
139 
140   /* aggregation */
141   ML_Aggregate_Create(&agg_object);
142   ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
143   /* set options */
144   switch (pc_ml->CoarsenScheme) {
145   case 1:
146     ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
147   case 2:
148     ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
149   case 3:
150     ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
151   }
152   ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold);
153   ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor);
154   if (pc_ml->SpectralNormScheme_Anorm){
155     ML_Aggregate_Set_SpectralNormScheme_Anorm(agg_object);
156   }
157 
158   Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
159   if (Nlevels<=0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
160   ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr);
161   ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
162   pc_ml->ml_object  = ml_object;
163   pc_ml->agg_object = agg_object;
164 
165   ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr);
166   fine_level = Nlevels - 1;
167   pc_ml->gridctx = gridctx;
168   pc_ml->fine_level = fine_level;
169 
170   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
171      Level 0 is the finest grid for ML, but coarsest for PETSc! */
172   gridctx[fine_level].A = A;
173   level = fine_level - 1;
174   if (size == 1){ /* convert ML P, R and A into seqaij format */
175     for (mllevel=1; mllevel<Nlevels; mllevel++){
176       mlmat  = &(ml_object->Pmat[mllevel]);
177       ierr = MatWrapML_SeqAIJ(mlmat,&gridctx[level].P);CHKERRQ(ierr);
178       mlmat  = &(ml_object->Amat[mllevel]);
179       ierr = MatWrapML_SeqAIJ(mlmat,&gridctx[level].A);CHKERRQ(ierr);
180       mlmat  = &(ml_object->Rmat[mllevel-1]);
181       ierr = MatWrapML_SeqAIJ(mlmat,&gridctx[level].R);CHKERRQ(ierr);
182       level--;
183     }
184   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
185     for (mllevel=1; mllevel<Nlevels; mllevel++){
186       mlmat  = &(ml_object->Pmat[mllevel]);
187       ierr = MatWrapML_SHELL(mlmat,&gridctx[level].P);CHKERRQ(ierr);
188       mlmat  = &(ml_object->Rmat[mllevel-1]);
189       ierr = MatWrapML_SHELL(mlmat,&gridctx[level].R);CHKERRQ(ierr);
190       mlmat  = &(ml_object->Amat[mllevel]);
191       ierr = MatWrapML_MPIAIJ(mlmat,&gridctx[level].A);CHKERRQ(ierr);
192       level--;
193     }
194   }
195 
196   /* create coarse level and the interpolation between the levels */
197   for (level=0; level<fine_level; level++){
198     ierr = VecCreate(gridctx[level].A->comm,&gridctx[level].x);CHKERRQ(ierr);
199     ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->n,PETSC_DECIDE);CHKERRQ(ierr);
200     ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr);
201     ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
202 
203     ierr = VecCreate(gridctx[level].A->comm,&gridctx[level].b);CHKERRQ(ierr);
204     ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->m,PETSC_DECIDE);CHKERRQ(ierr);
205     ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr);
206     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
207 
208     level1 = level + 1;
209     ierr = VecCreate(gridctx[level1].A->comm,&gridctx[level1].r);CHKERRQ(ierr);
210     ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->m,PETSC_DECIDE);CHKERRQ(ierr);
211     ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr);
212     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
213 
214     ierr = PCMGSetInterpolate(pc,level1,gridctx[level].P);CHKERRQ(ierr);
215     ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
216 
217     if (level == 0){
218       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
219     } else {
220       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
221       ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
222     }
223     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
224   }
225   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
226   ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
227   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
228   ierr = KSPSetOptionsPrefix(gridctx[fine_level].ksp,"mg_fine_");CHKERRQ(ierr);
229 
230   /* now call PCSetUp_MG()         */
231   /*--------------------------------*/
232   ierr = (*pc_ml->PCSetUp)(pc);CHKERRQ(ierr);
233   PetscFunctionReturn(0);
234 }
235 
236 #undef __FUNCT__
237 #define __FUNCT__ "PetscObjectContainerDestroy_PC_ML"
238 PetscErrorCode PetscObjectContainerDestroy_PC_ML(void *ptr)
239 {
240   PetscErrorCode       ierr;
241   PC_ML                *pc_ml = (PC_ML*)ptr;
242   PetscInt             level;
243 
244   PetscFunctionBegin;
245   if (pc_ml->size > 1){ierr = MatDestroy(pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);}
246   ML_Aggregate_Destroy(&pc_ml->agg_object);
247   ML_Destroy(&pc_ml->ml_object);
248 
249   ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
250   if (pc_ml->PetscMLdata->x){ierr = VecDestroy(pc_ml->PetscMLdata->x);CHKERRQ(ierr);}
251   if (pc_ml->PetscMLdata->y){ierr = VecDestroy(pc_ml->PetscMLdata->y);CHKERRQ(ierr);}
252   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
253 
254   for (level=0; level<pc_ml->fine_level; level++){
255     if (pc_ml->gridctx[level].A){ierr = MatDestroy(pc_ml->gridctx[level].A);CHKERRQ(ierr);}
256     if (pc_ml->gridctx[level].P){ierr = MatDestroy(pc_ml->gridctx[level].P);CHKERRQ(ierr);}
257     if (pc_ml->gridctx[level].R){ierr = MatDestroy(pc_ml->gridctx[level].R);CHKERRQ(ierr);}
258     if (pc_ml->gridctx[level].x){ierr = VecDestroy(pc_ml->gridctx[level].x);CHKERRQ(ierr);}
259     if (pc_ml->gridctx[level].b){ierr = VecDestroy(pc_ml->gridctx[level].b);CHKERRQ(ierr);}
260     if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
261   }
262   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
263   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
264   PetscFunctionReturn(0);
265 }
266 /* -------------------------------------------------------------------------- */
267 /*
268    PCDestroy_ML - Destroys the private context for the ML preconditioner
269    that was created with PCCreate_ML().
270 
271    Input Parameter:
272 .  pc - the preconditioner context
273 
274    Application Interface Routine: PCDestroy()
275 */
276 #undef __FUNCT__
277 #define __FUNCT__ "PCDestroy_ML"
278 PetscErrorCode PCDestroy_ML(PC pc)
279 {
280   PetscErrorCode       ierr;
281   PC_ML                *pc_ml=PETSC_NULL;
282   PetscObjectContainer container;
283 
284   PetscFunctionBegin;
285   ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr);
286   if (container) {
287     ierr = PetscObjectContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr);
288     pc->ops->destroy = pc_ml->PCDestroy;
289   } else {
290     SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit");
291   }
292   /* detach pc and PC_ML and dereference container */
293   ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",0);CHKERRQ(ierr);
294   ierr = (*pc->ops->destroy)(pc);CHKERRQ(ierr);
295 
296   ierr = PetscObjectContainerDestroy(container);CHKERRQ(ierr);
297   PetscFunctionReturn(0);
298 }
299 
300 #undef __FUNCT__
301 #define __FUNCT__ "PCSetFromOptions_ML"
302 PetscErrorCode PCSetFromOptions_ML(PC pc)
303 {
304   PetscErrorCode ierr;
305   PetscInt       indx,m,PrintLevel,MaxNlevels,MaxCoarseSize;
306   PetscReal      Threshold,DampingFactor;
307   PetscTruth     flg;
308   const char     *type[] = {"additive","multiplicative","full","cascade","kascade"};
309   const char     *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
310   PC_ML          *pc_ml=PETSC_NULL;
311   PetscObjectContainer container;
312 
313   PetscFunctionBegin;
314   ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr);
315   if (container) {
316     ierr = PetscObjectContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr);
317   } else {
318     SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit");
319   }
320 
321   /* inherited MG options */
322   ierr = PetscOptionsHead("Multigrid options(inherited)");CHKERRQ(ierr);
323   ierr = PetscOptionsInt("-pc_mg_cycles","1 for V cycle, 2 for W-cycle","MGSetCycles",1,&m,&flg);CHKERRQ(ierr);
324   ierr = PetscOptionsInt("-pc_mg_smoothup","Number of post-smoothing steps","MGSetNumberSmoothUp",1,&m,&flg);CHKERRQ(ierr);
325   ierr = PetscOptionsInt("-pc_mg_smoothdown","Number of pre-smoothing steps","MGSetNumberSmoothDown",1,&m,&flg);CHKERRQ(ierr);
326   ierr = PetscOptionsEList("-pc_mg_type","Multigrid type","MGSetType",type,5,type[1],&indx,&flg);CHKERRQ(ierr);
327   ierr = PetscOptionsTail();CHKERRQ(ierr);
328 
329   /* ML options */
330   ierr = PetscOptionsHead("ML options");CHKERRQ(ierr);
331   /* set defaults */
332   PrintLevel    = 0;
333   MaxNlevels    = 10;
334   MaxCoarseSize = 1;
335   indx          = 0;
336   Threshold     = 0.0;
337   DampingFactor = 4.0/3.0;
338 
339   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr);
340   ML_Set_PrintLevel(PrintLevel);
341 
342   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",MaxNlevels,&MaxNlevels,PETSC_NULL);CHKERRQ(ierr);
343   pc_ml->MaxNlevels = MaxNlevels;
344 
345   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",MaxCoarseSize,&MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr);
346   pc_ml->MaxCoarseSize = MaxCoarseSize;
347 
348   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr);
349   pc_ml->CoarsenScheme = indx;
350 
351   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",DampingFactor,&DampingFactor,PETSC_NULL);CHKERRQ(ierr);
352   pc_ml->DampingFactor = DampingFactor;
353 
354   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",Threshold,&Threshold,PETSC_NULL);CHKERRQ(ierr);
355   pc_ml->Threshold = Threshold;
356 
357   ierr = PetscOptionsLogical("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Aggregate_Set_SpectralNormScheme_Anorm",PETSC_FALSE,&pc_ml->SpectralNormScheme_Anorm,PETSC_FALSE);
358 
359   ierr = PetscOptionsTail();CHKERRQ(ierr);
360   PetscFunctionReturn(0);
361 }
362 
363 /* -------------------------------------------------------------------------- */
364 /*
365    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
366    and sets this as the private data within the generic preconditioning
367    context, PC, that was created within PCCreate().
368 
369    Input Parameter:
370 .  pc - the preconditioner context
371 
372    Application Interface Routine: PCCreate()
373 */
374 
375 /*MC
376      PCML - Use geometric multigrid preconditioning. This preconditioner requires you provide
377        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
378        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
379        and the restriction/interpolation operators wrapped as PETSc shell matrices.
380 
381    Options Database Key:
382    Multigrid options(inherited)
383 +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
384 .  -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
385 .  -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
386 -  -pc_mg_type <multiplicative> (one of) additive multiplicative full cascade kascade
387 
388    ML options
389 +  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
390 .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
391 .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
392 .  -pc_ml_CoarsenScheme <Uncoupled> (one of) Uncoupled Coupled MIS METIS
393 .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
394 .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
395 -  -pc_ml_SpectralNormScheme_Anorm: <false> Method used for estimating spectral radius (ML_Aggregate_Set_SpectralNormScheme_Anorm)
396 
397    Level: intermediate
398 
399   Concepts: multigrid
400 
401 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
402            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
403            PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
404            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
405            PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
406 M*/
407 
408 EXTERN_C_BEGIN
409 #undef __FUNCT__
410 #define __FUNCT__ "PCCreate_ML"
411 PetscErrorCode PETSCKSP_DLLEXPORT PCCreate_ML(PC pc)
412 {
413   PetscErrorCode       ierr;
414   PC_ML                *pc_ml;
415   PetscObjectContainer container;
416 
417   PetscFunctionBegin;
418   /* initialize pc as PCMG */
419   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
420 
421   /* create a supporting struct and attach it to pc */
422   ierr = PetscNew(PC_ML,&pc_ml);CHKERRQ(ierr);
423   ierr = PetscObjectContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr);
424   ierr = PetscObjectContainerSetPointer(container,pc_ml);CHKERRQ(ierr);
425   ierr = PetscObjectContainerSetUserDestroy(container,PetscObjectContainerDestroy_PC_ML);CHKERRQ(ierr);
426   ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",(PetscObject)container);CHKERRQ(ierr);
427 
428   pc_ml->PCSetUp   = pc->ops->setup;
429   pc_ml->PCDestroy = pc->ops->destroy;
430 
431   /* overwrite the pointers of PCMG by the functions of PCML */
432   pc->ops->setfromoptions = PCSetFromOptions_ML;
433   pc->ops->setup          = PCSetUp_ML;
434   pc->ops->destroy        = PCDestroy_ML;
435   PetscFunctionReturn(0);
436 }
437 EXTERN_C_END
438 
439 int PetscML_getrow(void *ML_data, int N_requested_rows, int requested_rows[],
440    int allocated_space, int columns[], double values[], int row_lengths[])
441 {
442   PetscErrorCode ierr;
443   Mat            Aloc;
444   Mat_SeqAIJ     *a;
445   PetscInt       m,i,j,k=0,row,*aj;
446   PetscScalar    *aa;
447   FineGridCtx    *ml=(FineGridCtx*)ML_data;
448 
449   Aloc = ml->Aloc;
450   a    = (Mat_SeqAIJ*)Aloc->data;
451   ierr = MatGetSize(Aloc,&m,PETSC_NULL);CHKERRQ(ierr);
452 
453   for (i = 0; i<N_requested_rows; i++) {
454     row   = requested_rows[i];
455     row_lengths[i] = a->ilen[row];
456     if (allocated_space < k+row_lengths[i]) return(0);
457     if ( (row >= 0) || (row <= (m-1)) ) {
458       aj = a->j + a->i[row];
459       aa = a->a + a->i[row];
460       for (j=0; j<row_lengths[i]; j++){
461         columns[k]  = aj[j];
462         values[k++] = aa[j];
463       }
464     }
465   }
466   return(1);
467 }
468 
469 int PetscML_matvec(void *ML_data,int in_length,double p[],int out_length,double ap[])
470 {
471   PetscErrorCode ierr;
472   FineGridCtx    *ml=(FineGridCtx*)ML_data;
473   Mat            A=ml->A, Aloc=ml->Aloc;
474   PetscMPIInt    size;
475   PetscScalar    *pwork=ml->pwork;
476   PetscInt       i;
477 
478   ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr);
479   if (size == 1){
480     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
481   } else {
482     for (i=0; i<in_length; i++) pwork[i] = p[i];
483     PetscML_comm(pwork,ml);
484     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
485   }
486   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
487   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
488   return 0;
489 }
490 
491 int PetscML_comm(double p[],void *ML_data)
492 {
493   PetscErrorCode ierr;
494   FineGridCtx    *ml=(FineGridCtx*)ML_data;
495   Mat            A=ml->A;
496   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
497   PetscMPIInt    size;
498   PetscInt       i,in_length=A->m,out_length=ml->Aloc->n;
499   PetscScalar    *array;
500 
501   ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr);
502   if (size == 1) return 0;
503 
504   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
505   ierr = VecScatterBegin(ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
506   ierr = VecScatterEnd(ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
507   ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr);
508   for (i=in_length; i<out_length; i++){
509     p[i] = array[i-in_length];
510   }
511   return 0;
512 }
513 #undef __FUNCT__
514 #define __FUNCT__ "MatMult_ML"
515 PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
516 {
517   PetscErrorCode   ierr;
518   Mat_MLShell      *shell;
519   PetscScalar      *xarray,*yarray;
520   PetscInt         x_length,y_length;
521 
522   PetscFunctionBegin;
523   ierr = MatShellGetContext(A,(void *)&shell);CHKERRQ(ierr);
524   ierr = VecGetArray(x,&xarray);CHKERRQ(ierr);
525   ierr = VecGetArray(y,&yarray);CHKERRQ(ierr);
526   x_length = shell->mlmat->invec_leng;
527   y_length = shell->mlmat->outvec_leng;
528 
529   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
530 
531   ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
532   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
533   PetscFunctionReturn(0);
534 }
535 /* MatMultAdd_ML -  Compute y = w + A*x */
536 #undef __FUNCT__
537 #define __FUNCT__ "MatMultAdd_ML"
538 PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
539 {
540   PetscErrorCode    ierr;
541   Mat_MLShell       *shell;
542   PetscScalar       *xarray,*yarray;
543   const PetscScalar one=1.0;
544   PetscInt          x_length,y_length;
545 
546   PetscFunctionBegin;
547   ierr = MatShellGetContext(A,(void *)&shell);CHKERRQ(ierr);
548   ierr = VecGetArray(x,&xarray);CHKERRQ(ierr);
549   ierr = VecGetArray(y,&yarray);CHKERRQ(ierr);
550 
551   x_length = shell->mlmat->invec_leng;
552   y_length = shell->mlmat->outvec_leng;
553 
554   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
555 
556   ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
557   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
558   ierr = VecAXPY(&one,w,y);CHKERRQ(ierr);
559 
560   PetscFunctionReturn(0);
561 }
562 
563 /* newtype is ignored because "ml" is not listed under Petsc MatType yet */
564 #undef __FUNCT__
565 #define __FUNCT__ "MatConvert_MPIAIJ_ML"
566 PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,const MatType newtype,MatReuse scall,Mat *Aloc)
567 {
568   PetscErrorCode  ierr;
569   Mat_MPIAIJ      *mpimat=(Mat_MPIAIJ*)A->data;
570   Mat_SeqAIJ      *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
571   PetscInt        *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
572   PetscScalar     *aa=a->a,*ba=b->a,*ca;
573   PetscInt        am=A->m,an=A->n,i,j,k;
574   PetscInt        *ci,*cj,ncols;
575 
576   PetscFunctionBegin;
577   if (am != an) SETERRQ2(PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
578 
579   if (scall == MAT_INITIAL_MATRIX){
580     ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr);
581     ci[0] = 0;
582     for (i=0; i<am; i++){
583       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
584     }
585     ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr);
586     ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr);
587 
588     k = 0;
589     for (i=0; i<am; i++){
590       /* diagonal portion of A */
591       ncols = ai[i+1] - ai[i];
592       for (j=0; j<ncols; j++) {
593         cj[k]   = *aj++;
594         ca[k++] = *aa++;
595       }
596       /* off-diagonal portion of A */
597       ncols = bi[i+1] - bi[i];
598       for (j=0; j<ncols; j++) {
599         cj[k]   = an + (*bj); bj++;
600         ca[k++] = *ba++;
601       }
602     }
603     if (k != ci[am]) SETERRQ2(PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
604 
605     /* put together the new matrix */
606     an = mpimat->A->n+mpimat->B->n;
607     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
608 
609     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
610     /* Since these are PETSc arrays, change flags to free them as necessary. */
611     mat = (Mat_SeqAIJ*)(*Aloc)->data;
612     mat->freedata = PETSC_TRUE;
613     mat->nonew    = 0;
614   } else if (scall == MAT_REUSE_MATRIX){
615     mat=(Mat_SeqAIJ*)(*Aloc)->data;
616     ci = mat->i; cj = mat->j; ca = mat->a;
617     for (i=0; i<am; i++) {
618       /* diagonal portion of A */
619       ncols = ai[i+1] - ai[i];
620       for (j=0; j<ncols; j++) *ca++ = *aa++;
621       /* off-diagonal portion of A */
622       ncols = bi[i+1] - bi[i];
623       for (j=0; j<ncols; j++) *ca++ = *ba++;
624     }
625   } else {
626     SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
627   }
628   PetscFunctionReturn(0);
629 }
630 extern PetscErrorCode MatDestroy_Shell(Mat);
631 #undef __FUNCT__
632 #define __FUNCT__ "MatDestroy_ML"
633 PetscErrorCode MatDestroy_ML(Mat A)
634 {
635   PetscErrorCode ierr;
636   Mat_MLShell    *shell;
637 
638   PetscFunctionBegin;
639   ierr = MatShellGetContext(A,(void *)&shell);CHKERRQ(ierr);
640   ierr = VecDestroy(shell->y);CHKERRQ(ierr);
641   ierr = PetscFree(shell);CHKERRQ(ierr);
642   ierr = MatDestroy_Shell(A);CHKERRQ(ierr);
643   PetscFunctionReturn(0);
644 }
645 
646 #undef __FUNCT__
647 #define __FUNCT__ "MatWrapML_SeqAIJ"
648 PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,Mat *newmat)
649 {
650   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
651   PetscErrorCode  ierr;
652   PetscInt        m=mlmat->outvec_leng,n=mlmat->invec_leng,nnz[m],nz_max;
653   PetscInt        *ml_cols=matdata->columns,*aj,i,j,k;
654   PetscScalar     *ml_vals=matdata->values,*aa;
655 
656   PetscFunctionBegin;
657   if ( mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
658   if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
659     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,matdata->rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
660     PetscFunctionReturn(0);
661   }
662 
663   /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */
664   ierr = MatCreate(PETSC_COMM_SELF,m,n,PETSC_DECIDE,PETSC_DECIDE,newmat);CHKERRQ(ierr);
665   ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
666 
667   nz_max = 0;
668   for (i=0; i<m; i++) {
669     nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
670     if (nnz[i] > nz_max) nz_max = nnz[i];
671   }
672   ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
673   ierr = MatSetOption(*newmat,MAT_COLUMNS_SORTED);CHKERRQ(ierr); /* check! */
674 
675   nz_max++;
676   ierr = PetscMalloc(nz_max*(sizeof(PetscInt)+sizeof(PetscScalar)),&aj);CHKERRQ(ierr);
677   aa = (PetscScalar*)(aj + nz_max);
678 
679   for (i=0; i<m; i++){
680     k = 0;
681     /* diagonal entry */
682     aj[k] = i; aa[k++] = ml_vals[i];
683     /* off diagonal entries */
684     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
685       aj[k] = ml_cols[j]; aa[k++] = ml_vals[j];
686     }
687     /* sort aj and aa */
688     ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
689     ierr = MatSetValues(*newmat,1,&i,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr);
690   }
691   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
692   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
693   ierr = PetscFree(aj);CHKERRQ(ierr);
694   PetscFunctionReturn(0);
695 }
696 
697 #undef __FUNCT__
698 #define __FUNCT__ "MatWrapML_SHELL"
699 PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,Mat *newmat)
700 {
701   PetscErrorCode ierr;
702   PetscInt       m,n;
703   ML_Comm        *MLcomm;
704   Mat_MLShell    *shellctx;
705 
706   PetscFunctionBegin;
707   m = mlmat->outvec_leng;
708   n = mlmat->invec_leng;
709   if (!m || !n){
710     newmat = PETSC_NULL;
711   } else {
712     MLcomm = mlmat->comm;
713     ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr);
714     ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
715     ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void *)MatMult_ML);CHKERRQ(ierr);
716     ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void *)MatMultAdd_ML);CHKERRQ(ierr);
717     shellctx->A         = *newmat;
718     shellctx->mlmat     = mlmat;
719     ierr = VecCreate(PETSC_COMM_WORLD,&shellctx->y);CHKERRQ(ierr);
720     ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr);
721     ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr);
722     (*newmat)->ops->destroy = MatDestroy_ML;
723   }
724   PetscFunctionReturn(0);
725 }
726 
727 #undef __FUNCT__
728 #define __FUNCT__ "MatWrapML_MPIAIJ"
729 PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,Mat *newmat)
730 {
731   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
732   PetscInt        *ml_cols=matdata->columns,*aj;
733   PetscScalar     *ml_vals=matdata->values,*aa;
734   PetscErrorCode  ierr;
735   PetscInt        i,j,k,*gordering;
736   PetscInt        m=mlmat->outvec_leng,n,nnzA[m],nnzB[m],nnz[m],nz_max,row;
737   Mat             A;
738 
739   PetscFunctionBegin;
740   if (mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
741   n = mlmat->invec_leng;
742   if (m != n) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
743 
744   ierr = MatCreate(mlmat->comm->USR_comm,m,n,PETSC_DECIDE,PETSC_DECIDE,&A);CHKERRQ(ierr);
745   ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
746   nz_max = 0;
747   for (i=0; i<m; i++){
748     nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
749     if (nz_max < nnz[i]) nz_max = nnz[i];
750     nnzA[i] = 1; /* diag */
751     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
752       if (ml_cols[j] < m) nnzA[i]++;
753     }
754     nnzB[i] = nnz[i] - nnzA[i];
755   }
756   ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
757 
758   /* insert mat values -- remap row and column indices */
759   nz_max++;
760   ierr = PetscMalloc(nz_max*(sizeof(PetscInt)+sizeof(PetscScalar)),&aj);CHKERRQ(ierr);
761   aa = (PetscScalar*)(aj + nz_max);
762   ML_build_global_numbering(mlmat,mlmat->comm,&gordering);
763   for (i=0; i<m; i++){
764     row = gordering[i];
765     k = 0;
766     /* diagonal entry */
767     aj[k] = row; aa[k++] = ml_vals[i];
768     /* off diagonal entries */
769     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
770       aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j];
771     }
772     ierr = MatSetValues(A,1,&row,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr);
773   }
774   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
775   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
776   *newmat = A;
777 
778   ierr = PetscFree(aj);CHKERRQ(ierr);
779   PetscFunctionReturn(0);
780 }
781