xref: /petsc/src/ksp/pc/impls/ml/ml.c (revision 18be62a5feccf172f7bc80c15c4be8f6d6443e8b)
1 #define PETSCKSP_DLL
2 
3 /*
4    Provides an interface to the ML 3.0 smoothed Aggregation
5 */
6 #include "src/ksp/pc/pcimpl.h"   /*I "petscpc.h" I*/
7 #include "src/ksp/pc/impls/mg/mgimpl.h"                    /*I "petscmg.h" I*/
8 #include "src/mat/impls/aij/seq/aij.h"
9 #include "src/mat/impls/aij/mpi/mpiaij.h"
10 
11 EXTERN_C_BEGIN
12 #include <math.h>
13 #include "ml_include.h"
14 EXTERN_C_END
15 
16 /* The context (data structure) at each grid level */
17 typedef struct {
18   Vec        x,b,r;           /* global vectors */
19   Mat        A,P,R;
20   KSP        ksp;
21 } GridCtx;
22 
23 /* The context used to input PETSc matrix into ML at fine grid */
24 typedef struct {
25   Mat          A,Aloc;
26   Vec          x,y;
27   ML_Operator  *mlmat;
28   PetscScalar  *pwork; /* tmp array used by PetscML_comm() */
29 } FineGridCtx;
30 
31 /* The context associates a ML matrix with a PETSc shell matrix */
32 typedef struct {
33   Mat          A;       /* PETSc shell matrix associated with mlmat */
34   ML_Operator  *mlmat;  /* ML matrix assorciated with A */
35   Vec          y;
36 } Mat_MLShell;
37 
38 /* Private context for the ML preconditioner */
39 typedef struct {
40   ML           *ml_object;
41   ML_Aggregate *agg_object;
42   GridCtx      *gridctx;
43   FineGridCtx  *PetscMLdata;
44   PetscInt     fine_level,MaxNlevels,MaxCoarseSize,CoarsenScheme;
45   PetscReal    Threshold,DampingFactor;
46   PetscTruth   SpectralNormScheme_Anorm;
47   PetscMPIInt  size;
48   PetscErrorCode (*PCSetUp)(PC);
49   PetscErrorCode (*PCDestroy)(PC);
50 } PC_ML;
51 
52 extern int PetscML_getrow(ML_Operator *ML_data,int N_requested_rows,int requested_rows[],
53    int allocated_space,int columns[],double values[],int row_lengths[]);
54 extern int PetscML_matvec(ML_Operator *ML_data, int in_length, double p[], int out_length,double ap[]);
55 extern int PetscML_comm(double x[], void *ML_data);
56 extern PetscErrorCode MatMult_ML(Mat,Vec,Vec);
57 extern PetscErrorCode MatMultAdd_ML(Mat,Vec,Vec,Vec);
58 extern PetscErrorCode MatConvert_MPIAIJ_ML(Mat,MatType,MatReuse,Mat*);
59 extern PetscErrorCode MatDestroy_ML(Mat);
60 extern PetscErrorCode MatWrapML_SeqAIJ(ML_Operator*,Mat*);
61 extern PetscErrorCode MatWrapML_MPIAIJ(ML_Operator*,Mat*);
62 extern PetscErrorCode MatWrapML_SHELL(ML_Operator*,Mat*);
63 
64 /* -------------------------------------------------------------------------- */
65 /*
66    PCSetUp_ML - Prepares for the use of the ML preconditioner
67                     by setting data structures and options.
68 
69    Input Parameter:
70 .  pc - the preconditioner context
71 
72    Application Interface Routine: PCSetUp()
73 
74    Notes:
75    The interface routine PCSetUp() is not usually called directly by
76    the user, but instead is called by PCApply() if necessary.
77 */
78 extern PetscErrorCode PCSetFromOptions_MG(PC);
79 #undef __FUNCT__
80 #define __FUNCT__ "PCSetUp_ML"
81 PetscErrorCode PCSetUp_ML(PC pc)
82 {
83   PetscErrorCode       ierr;
84   PetscMPIInt          size;
85   FineGridCtx          *PetscMLdata;
86   ML                   *ml_object;
87   ML_Aggregate         *agg_object;
88   ML_Operator          *mlmat;
89   PetscInt             nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level;
90   Mat                  A,Aloc;
91   GridCtx              *gridctx;
92   PC_ML                *pc_ml=PETSC_NULL;
93   PetscObjectContainer container;
94 
95   PetscFunctionBegin;
96   ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr);
97   if (container) {
98     ierr = PetscObjectContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr);
99   } else {
100     SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit");
101   }
102 
103   /* setup special features of PCML */
104   /*--------------------------------*/
105   /* covert A to Aloc to be used by ML at fine grid */
106   A = pc->pmat;
107   ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr);
108   pc_ml->size = size;
109   if (size > 1){
110     ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
111   } else {
112     Aloc = A;
113   }
114 
115   /* create and initialize struct 'PetscMLdata' */
116   ierr = PetscNew(FineGridCtx,&PetscMLdata);CHKERRQ(ierr);
117   PetscMLdata->A    = A;
118   PetscMLdata->Aloc = Aloc;
119   ierr = PetscMalloc((Aloc->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr);
120   pc_ml->PetscMLdata = PetscMLdata;
121 
122   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);
123   if (size == 1){
124     ierr = VecSetSizes(PetscMLdata->x,A->n,A->n);CHKERRQ(ierr);
125   } else {
126     ierr = VecSetSizes(PetscMLdata->x,Aloc->n,Aloc->n);CHKERRQ(ierr);
127   }
128   ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr);
129 
130   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr);
131   ierr = VecSetSizes(PetscMLdata->y,A->m,PETSC_DECIDE);CHKERRQ(ierr);
132   ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
133 
134   /* create ML discretization matrix at fine grid */
135   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
136   ML_Create(&ml_object,pc_ml->MaxNlevels);
137   ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
138   ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols);
139   ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
140 
141   /* aggregation */
142   ML_Aggregate_Create(&agg_object);
143   ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
144   /* set options */
145   switch (pc_ml->CoarsenScheme) {
146   case 1:
147     ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
148   case 2:
149     ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
150   case 3:
151     ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
152   }
153   ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold);
154   ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor);
155   if (pc_ml->SpectralNormScheme_Anorm){
156     ML_Aggregate_Set_SpectralNormScheme_Anorm(agg_object);
157   }
158 
159   Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
160   if (Nlevels<=0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
161   ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr);
162   ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
163   pc_ml->ml_object  = ml_object;
164   pc_ml->agg_object = agg_object;
165 
166   ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr);
167   fine_level = Nlevels - 1;
168   pc_ml->gridctx = gridctx;
169   pc_ml->fine_level = fine_level;
170 
171   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
172      Level 0 is the finest grid for ML, but coarsest for PETSc! */
173   gridctx[fine_level].A = A;
174   level = fine_level - 1;
175   if (size == 1){ /* convert ML P, R and A into seqaij format */
176     for (mllevel=1; mllevel<Nlevels; mllevel++){
177       mlmat  = &(ml_object->Pmat[mllevel]);
178       ierr = MatWrapML_SeqAIJ(mlmat,&gridctx[level].P);CHKERRQ(ierr);
179       mlmat  = &(ml_object->Amat[mllevel]);
180       ierr = MatWrapML_SeqAIJ(mlmat,&gridctx[level].A);CHKERRQ(ierr);
181       mlmat  = &(ml_object->Rmat[mllevel-1]);
182       ierr = MatWrapML_SeqAIJ(mlmat,&gridctx[level].R);CHKERRQ(ierr);
183       level--;
184     }
185   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
186     for (mllevel=1; mllevel<Nlevels; mllevel++){
187       mlmat  = &(ml_object->Pmat[mllevel]);
188       ierr = MatWrapML_SHELL(mlmat,&gridctx[level].P);CHKERRQ(ierr);
189       mlmat  = &(ml_object->Rmat[mllevel-1]);
190       ierr = MatWrapML_SHELL(mlmat,&gridctx[level].R);CHKERRQ(ierr);
191       mlmat  = &(ml_object->Amat[mllevel]);
192       ierr = MatWrapML_MPIAIJ(mlmat,&gridctx[level].A);CHKERRQ(ierr);
193       level--;
194     }
195   }
196 
197   /* create coarse level and the interpolation between the levels */
198   for (level=0; level<fine_level; level++){
199     ierr = VecCreate(gridctx[level].A->comm,&gridctx[level].x);CHKERRQ(ierr);
200     ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->n,PETSC_DECIDE);CHKERRQ(ierr);
201     ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr);
202     ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
203 
204     ierr = VecCreate(gridctx[level].A->comm,&gridctx[level].b);CHKERRQ(ierr);
205     ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->m,PETSC_DECIDE);CHKERRQ(ierr);
206     ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr);
207     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
208 
209     level1 = level + 1;
210     ierr = VecCreate(gridctx[level1].A->comm,&gridctx[level1].r);CHKERRQ(ierr);
211     ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->m,PETSC_DECIDE);CHKERRQ(ierr);
212     ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr);
213     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
214 
215     ierr = PCMGSetInterpolate(pc,level1,gridctx[level].P);CHKERRQ(ierr);
216     ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
217 
218     if (level == 0){
219       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
220     } else {
221       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
222       ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
223     }
224     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
225   }
226   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
227   ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
228   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);
229   ierr = KSPSetOptionsPrefix(gridctx[fine_level].ksp,"mg_fine_");CHKERRQ(ierr);
230 
231   /* now call PCSetUp_MG()         */
232   /*--------------------------------*/
233   ierr = (*pc_ml->PCSetUp)(pc);CHKERRQ(ierr);
234   PetscFunctionReturn(0);
235 }
236 
237 #undef __FUNCT__
238 #define __FUNCT__ "PetscObjectContainerDestroy_PC_ML"
239 PetscErrorCode PetscObjectContainerDestroy_PC_ML(void *ptr)
240 {
241   PetscErrorCode       ierr;
242   PC_ML                *pc_ml = (PC_ML*)ptr;
243   PetscInt             level;
244 
245   PetscFunctionBegin;
246   if (pc_ml->size > 1){ierr = MatDestroy(pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);}
247   ML_Aggregate_Destroy(&pc_ml->agg_object);
248   ML_Destroy(&pc_ml->ml_object);
249 
250   ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
251   if (pc_ml->PetscMLdata->x){ierr = VecDestroy(pc_ml->PetscMLdata->x);CHKERRQ(ierr);}
252   if (pc_ml->PetscMLdata->y){ierr = VecDestroy(pc_ml->PetscMLdata->y);CHKERRQ(ierr);}
253   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
254 
255   for (level=0; level<pc_ml->fine_level; level++){
256     if (pc_ml->gridctx[level].A){ierr = MatDestroy(pc_ml->gridctx[level].A);CHKERRQ(ierr);}
257     if (pc_ml->gridctx[level].P){ierr = MatDestroy(pc_ml->gridctx[level].P);CHKERRQ(ierr);}
258     if (pc_ml->gridctx[level].R){ierr = MatDestroy(pc_ml->gridctx[level].R);CHKERRQ(ierr);}
259     if (pc_ml->gridctx[level].x){ierr = VecDestroy(pc_ml->gridctx[level].x);CHKERRQ(ierr);}
260     if (pc_ml->gridctx[level].b){ierr = VecDestroy(pc_ml->gridctx[level].b);CHKERRQ(ierr);}
261     if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
262   }
263   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
264   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
265   PetscFunctionReturn(0);
266 }
267 /* -------------------------------------------------------------------------- */
268 /*
269    PCDestroy_ML - Destroys the private context for the ML preconditioner
270    that was created with PCCreate_ML().
271 
272    Input Parameter:
273 .  pc - the preconditioner context
274 
275    Application Interface Routine: PCDestroy()
276 */
277 #undef __FUNCT__
278 #define __FUNCT__ "PCDestroy_ML"
279 PetscErrorCode PCDestroy_ML(PC pc)
280 {
281   PetscErrorCode       ierr;
282   PC_ML                *pc_ml=PETSC_NULL;
283   PetscObjectContainer container;
284 
285   PetscFunctionBegin;
286   ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr);
287   if (container) {
288     ierr = PetscObjectContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr);
289     pc->ops->destroy = pc_ml->PCDestroy;
290   } else {
291     SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit");
292   }
293   /* detach pc and PC_ML and dereference container */
294   ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",0);CHKERRQ(ierr);
295   ierr = (*pc->ops->destroy)(pc);CHKERRQ(ierr);
296 
297   ierr = PetscObjectContainerDestroy(container);CHKERRQ(ierr);
298   PetscFunctionReturn(0);
299 }
300 
301 #undef __FUNCT__
302 #define __FUNCT__ "PCSetFromOptions_ML"
303 PetscErrorCode PCSetFromOptions_ML(PC pc)
304 {
305   PetscErrorCode       ierr;
306   PetscInt             indx,m,PrintLevel,MaxNlevels,MaxCoarseSize;
307   PetscReal            Threshold,DampingFactor;
308   PetscTruth           flg;
309   const char           *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
310   PC_ML                *pc_ml=PETSC_NULL;
311   PetscObjectContainer container;
312   PCMGType             mgtype;
313 
314   PetscFunctionBegin;
315   ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr);
316   if (container) {
317     ierr = PetscObjectContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr);
318   } else {
319     SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit");
320   }
321 
322   /* inherited MG options */
323   ierr = PetscOptionsHead("Multigrid options(inherited)");CHKERRQ(ierr);
324     ierr = PetscOptionsInt("-pc_mg_cycles","1 for V cycle, 2 for W-cycle","MGSetCycles",1,&m,&flg);CHKERRQ(ierr);
325     ierr = PetscOptionsInt("-pc_mg_smoothup","Number of post-smoothing steps","MGSetNumberSmoothUp",1,&m,&flg);CHKERRQ(ierr);
326     ierr = PetscOptionsInt("-pc_mg_smoothdown","Number of pre-smoothing steps","MGSetNumberSmoothDown",1,&m,&flg);CHKERRQ(ierr);
327     ierr = PetscOptionsEnum("-pc_mg_type","Multigrid type","PCMGSetType",PCMGTypes,(PetscEnum)PC_MG_MULTIPLICATIVE,(PetscEnum*)&mgtype,&flg);CHKERRQ(ierr);
328   ierr = PetscOptionsTail();CHKERRQ(ierr);
329 
330   /* ML options */
331   ierr = PetscOptionsHead("ML options");CHKERRQ(ierr);
332   /* set defaults */
333   PrintLevel    = 0;
334   MaxNlevels    = 10;
335   MaxCoarseSize = 1;
336   indx          = 0;
337   Threshold     = 0.0;
338   DampingFactor = 4.0/3.0;
339 
340   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr);
341   ML_Set_PrintLevel(PrintLevel);
342 
343   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",MaxNlevels,&MaxNlevels,PETSC_NULL);CHKERRQ(ierr);
344   pc_ml->MaxNlevels = MaxNlevels;
345 
346   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",MaxCoarseSize,&MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr);
347   pc_ml->MaxCoarseSize = MaxCoarseSize;
348 
349   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr);
350   pc_ml->CoarsenScheme = indx;
351 
352   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",DampingFactor,&DampingFactor,PETSC_NULL);CHKERRQ(ierr);
353   pc_ml->DampingFactor = DampingFactor;
354 
355   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",Threshold,&Threshold,PETSC_NULL);CHKERRQ(ierr);
356   pc_ml->Threshold = Threshold;
357 
358   ierr = PetscOptionsTruth("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Aggregate_Set_SpectralNormScheme_Anorm",PETSC_FALSE,&pc_ml->SpectralNormScheme_Anorm,PETSC_FALSE);
359 
360   ierr = PetscOptionsTail();CHKERRQ(ierr);
361   PetscFunctionReturn(0);
362 }
363 
364 /* -------------------------------------------------------------------------- */
365 /*
366    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
367    and sets this as the private data within the generic preconditioning
368    context, PC, that was created within PCCreate().
369 
370    Input Parameter:
371 .  pc - the preconditioner context
372 
373    Application Interface Routine: PCCreate()
374 */
375 
376 /*MC
377      PCML - Use geometric multigrid preconditioning. This preconditioner requires you provide
378        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
379        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
380        and the restriction/interpolation operators wrapped as PETSc shell matrices.
381 
382    Options Database Key:
383    Multigrid options(inherited)
384 +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
385 .  -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
386 .  -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
387 -  -pc_mg_type <multiplicative> (one of) additive multiplicative full cascade kascade
388 
389    ML options
390 +  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
391 .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
392 .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
393 .  -pc_ml_CoarsenScheme <Uncoupled> (one of) Uncoupled Coupled MIS METIS
394 .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
395 .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
396 -  -pc_ml_SpectralNormScheme_Anorm: <false> Method used for estimating spectral radius (ML_Aggregate_Set_SpectralNormScheme_Anorm)
397 
398    Level: intermediate
399 
400   Concepts: multigrid
401 
402 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
403            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
404            PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
405            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
406            PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
407 M*/
408 
409 EXTERN_C_BEGIN
410 #undef __FUNCT__
411 #define __FUNCT__ "PCCreate_ML"
412 PetscErrorCode PETSCKSP_DLLEXPORT PCCreate_ML(PC pc)
413 {
414   PetscErrorCode       ierr;
415   PC_ML                *pc_ml;
416   PetscObjectContainer container;
417 
418   PetscFunctionBegin;
419   /* initialize pc as PCMG */
420   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
421 
422   /* create a supporting struct and attach it to pc */
423   ierr = PetscNew(PC_ML,&pc_ml);CHKERRQ(ierr);
424   ierr = PetscObjectContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr);
425   ierr = PetscObjectContainerSetPointer(container,pc_ml);CHKERRQ(ierr);
426   ierr = PetscObjectContainerSetUserDestroy(container,PetscObjectContainerDestroy_PC_ML);CHKERRQ(ierr);
427   ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",(PetscObject)container);CHKERRQ(ierr);
428 
429   pc_ml->PCSetUp   = pc->ops->setup;
430   pc_ml->PCDestroy = pc->ops->destroy;
431 
432   /* overwrite the pointers of PCMG by the functions of PCML */
433   pc->ops->setfromoptions = PCSetFromOptions_ML;
434   pc->ops->setup          = PCSetUp_ML;
435   pc->ops->destroy        = PCDestroy_ML;
436   PetscFunctionReturn(0);
437 }
438 EXTERN_C_END
439 
440 int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],
441    int allocated_space, int columns[], double values[], int row_lengths[])
442 {
443   PetscErrorCode ierr;
444   Mat            Aloc;
445   Mat_SeqAIJ     *a;
446   PetscInt       m,i,j,k=0,row,*aj;
447   PetscScalar    *aa;
448   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
449 
450   Aloc = ml->Aloc;
451   a    = (Mat_SeqAIJ*)Aloc->data;
452   ierr = MatGetSize(Aloc,&m,PETSC_NULL);CHKERRQ(ierr);
453 
454   for (i = 0; i<N_requested_rows; i++) {
455     row   = requested_rows[i];
456     row_lengths[i] = a->ilen[row];
457     if (allocated_space < k+row_lengths[i]) return(0);
458     if ( (row >= 0) || (row <= (m-1)) ) {
459       aj = a->j + a->i[row];
460       aa = a->a + a->i[row];
461       for (j=0; j<row_lengths[i]; j++){
462         columns[k]  = aj[j];
463         values[k++] = aa[j];
464       }
465     }
466   }
467   return(1);
468 }
469 
470 int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
471 {
472   PetscErrorCode ierr;
473   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data);
474   Mat            A=ml->A, Aloc=ml->Aloc;
475   PetscMPIInt    size;
476   PetscScalar    *pwork=ml->pwork;
477   PetscInt       i;
478 
479   ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr);
480   if (size == 1){
481     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
482   } else {
483     for (i=0; i<in_length; i++) pwork[i] = p[i];
484     PetscML_comm(pwork,ml);
485     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
486   }
487   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
488   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
489   return 0;
490 }
491 
492 int PetscML_comm(double p[],void *ML_data)
493 {
494   PetscErrorCode ierr;
495   FineGridCtx    *ml=(FineGridCtx*)ML_data;
496   Mat            A=ml->A;
497   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
498   PetscMPIInt    size;
499   PetscInt       i,in_length=A->m,out_length=ml->Aloc->n;
500   PetscScalar    *array;
501 
502   ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr);
503   if (size == 1) return 0;
504 
505   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
506   ierr = VecScatterBegin(ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
507   ierr = VecScatterEnd(ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
508   ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr);
509   for (i=in_length; i<out_length; i++){
510     p[i] = array[i-in_length];
511   }
512   return 0;
513 }
514 #undef __FUNCT__
515 #define __FUNCT__ "MatMult_ML"
516 PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
517 {
518   PetscErrorCode   ierr;
519   Mat_MLShell      *shell;
520   PetscScalar      *xarray,*yarray;
521   PetscInt         x_length,y_length;
522 
523   PetscFunctionBegin;
524   ierr = MatShellGetContext(A,(void *)&shell);CHKERRQ(ierr);
525   ierr = VecGetArray(x,&xarray);CHKERRQ(ierr);
526   ierr = VecGetArray(y,&yarray);CHKERRQ(ierr);
527   x_length = shell->mlmat->invec_leng;
528   y_length = shell->mlmat->outvec_leng;
529 
530   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
531 
532   ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
533   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
534   PetscFunctionReturn(0);
535 }
536 /* MatMultAdd_ML -  Compute y = w + A*x */
537 #undef __FUNCT__
538 #define __FUNCT__ "MatMultAdd_ML"
539 PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
540 {
541   PetscErrorCode    ierr;
542   Mat_MLShell       *shell;
543   PetscScalar       *xarray,*yarray;
544   const PetscScalar one=1.0;
545   PetscInt          x_length,y_length;
546 
547   PetscFunctionBegin;
548   ierr = MatShellGetContext(A,(void *)&shell);CHKERRQ(ierr);
549   ierr = VecGetArray(x,&xarray);CHKERRQ(ierr);
550   ierr = VecGetArray(y,&yarray);CHKERRQ(ierr);
551 
552   x_length = shell->mlmat->invec_leng;
553   y_length = shell->mlmat->outvec_leng;
554 
555   ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
556 
557   ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
558   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
559   ierr = VecAXPY(y,one,w);CHKERRQ(ierr);
560 
561   PetscFunctionReturn(0);
562 }
563 
564 /* newtype is ignored because "ml" is not listed under Petsc MatType yet */
565 #undef __FUNCT__
566 #define __FUNCT__ "MatConvert_MPIAIJ_ML"
567 PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
568 {
569   PetscErrorCode  ierr;
570   Mat_MPIAIJ      *mpimat=(Mat_MPIAIJ*)A->data;
571   Mat_SeqAIJ      *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
572   PetscInt        *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
573   PetscScalar     *aa=a->a,*ba=b->a,*ca;
574   PetscInt        am=A->m,an=A->n,i,j,k;
575   PetscInt        *ci,*cj,ncols;
576 
577   PetscFunctionBegin;
578   if (am != an) SETERRQ2(PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
579 
580   if (scall == MAT_INITIAL_MATRIX){
581     ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr);
582     ci[0] = 0;
583     for (i=0; i<am; i++){
584       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
585     }
586     ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr);
587     ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr);
588 
589     k = 0;
590     for (i=0; i<am; i++){
591       /* diagonal portion of A */
592       ncols = ai[i+1] - ai[i];
593       for (j=0; j<ncols; j++) {
594         cj[k]   = *aj++;
595         ca[k++] = *aa++;
596       }
597       /* off-diagonal portion of A */
598       ncols = bi[i+1] - bi[i];
599       for (j=0; j<ncols; j++) {
600         cj[k]   = an + (*bj); bj++;
601         ca[k++] = *ba++;
602       }
603     }
604     if (k != ci[am]) SETERRQ2(PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
605 
606     /* put together the new matrix */
607     an = mpimat->A->n+mpimat->B->n;
608     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
609 
610     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
611     /* Since these are PETSc arrays, change flags to free them as necessary. */
612     mat = (Mat_SeqAIJ*)(*Aloc)->data;
613     mat->freedata = PETSC_TRUE;
614     mat->nonew    = 0;
615   } else if (scall == MAT_REUSE_MATRIX){
616     mat=(Mat_SeqAIJ*)(*Aloc)->data;
617     ci = mat->i; cj = mat->j; ca = mat->a;
618     for (i=0; i<am; i++) {
619       /* diagonal portion of A */
620       ncols = ai[i+1] - ai[i];
621       for (j=0; j<ncols; j++) *ca++ = *aa++;
622       /* off-diagonal portion of A */
623       ncols = bi[i+1] - bi[i];
624       for (j=0; j<ncols; j++) *ca++ = *ba++;
625     }
626   } else {
627     SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
628   }
629   PetscFunctionReturn(0);
630 }
631 extern PetscErrorCode MatDestroy_Shell(Mat);
632 #undef __FUNCT__
633 #define __FUNCT__ "MatDestroy_ML"
634 PetscErrorCode MatDestroy_ML(Mat A)
635 {
636   PetscErrorCode ierr;
637   Mat_MLShell    *shell;
638 
639   PetscFunctionBegin;
640   ierr = MatShellGetContext(A,(void *)&shell);CHKERRQ(ierr);
641   ierr = VecDestroy(shell->y);CHKERRQ(ierr);
642   ierr = PetscFree(shell);CHKERRQ(ierr);
643   ierr = MatDestroy_Shell(A);CHKERRQ(ierr);
644   PetscFunctionReturn(0);
645 }
646 
647 #undef __FUNCT__
648 #define __FUNCT__ "MatWrapML_SeqAIJ"
649 PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,Mat *newmat)
650 {
651   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
652   PetscErrorCode        ierr;
653   PetscInt              m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz,nz_max;
654   PetscInt              *ml_cols=matdata->columns,*aj,i,j,k;
655   PetscScalar           *ml_vals=matdata->values,*aa;
656 
657   PetscFunctionBegin;
658   if ( mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
659   if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
660     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,matdata->rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
661     PetscFunctionReturn(0);
662   }
663 
664   /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */
665   ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr);
666   ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
667   ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
668   ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
669 
670   nz_max = 0;
671   for (i=0; i<m; i++) {
672     nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
673     if (nnz[i] > nz_max) nz_max = nnz[i];
674   }
675   ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
676   ierr = MatSetOption(*newmat,MAT_COLUMNS_SORTED);CHKERRQ(ierr); /* check! */
677 
678   nz_max++;
679   ierr = PetscMalloc(nz_max*(sizeof(PetscInt)+sizeof(PetscScalar)),&aj);CHKERRQ(ierr);
680   aa = (PetscScalar*)(aj + nz_max);
681 
682   for (i=0; i<m; i++){
683     k = 0;
684     /* diagonal entry */
685     aj[k] = i; aa[k++] = ml_vals[i];
686     /* off diagonal entries */
687     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
688       aj[k] = ml_cols[j]; aa[k++] = ml_vals[j];
689     }
690     /* sort aj and aa */
691     ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
692     ierr = MatSetValues(*newmat,1,&i,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr);
693   }
694   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
695   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
696   ierr = PetscFree(aj);CHKERRQ(ierr);
697   ierr = PetscFree(nnz);CHKERRQ(ierr);
698   PetscFunctionReturn(0);
699 }
700 
701 #undef __FUNCT__
702 #define __FUNCT__ "MatWrapML_SHELL"
703 PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,Mat *newmat)
704 {
705   PetscErrorCode ierr;
706   PetscInt       m,n;
707   ML_Comm        *MLcomm;
708   Mat_MLShell    *shellctx;
709 
710   PetscFunctionBegin;
711   m = mlmat->outvec_leng;
712   n = mlmat->invec_leng;
713   if (!m || !n){
714     newmat = PETSC_NULL;
715   } else {
716     MLcomm = mlmat->comm;
717     ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr);
718     ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
719     ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr);
720     ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr);
721     shellctx->A         = *newmat;
722     shellctx->mlmat     = mlmat;
723     ierr = VecCreate(PETSC_COMM_WORLD,&shellctx->y);CHKERRQ(ierr);
724     ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr);
725     ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr);
726     (*newmat)->ops->destroy = MatDestroy_ML;
727   }
728   PetscFunctionReturn(0);
729 }
730 
731 #undef __FUNCT__
732 #define __FUNCT__ "MatWrapML_MPIAIJ"
733 PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,Mat *newmat)
734 {
735   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
736   PetscInt              *ml_cols=matdata->columns,*aj;
737   PetscScalar           *ml_vals=matdata->values,*aa;
738   PetscErrorCode        ierr;
739   PetscInt              i,j,k,*gordering;
740   PetscInt              m=mlmat->outvec_leng,n,*nnzA,*nnzB,*nnz,nz_max,row;
741   Mat                   A;
742 
743   PetscFunctionBegin;
744   if (mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
745   n = mlmat->invec_leng;
746   if (m != n) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
747 
748   ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr);
749   ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
750   ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
751   ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr);
752 
753   nz_max = 0;
754   for (i=0; i<m; i++){
755     nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
756     if (nz_max < nnz[i]) nz_max = nnz[i];
757     nnzA[i] = 1; /* diag */
758     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
759       if (ml_cols[j] < m) nnzA[i]++;
760     }
761     nnzB[i] = nnz[i] - nnzA[i];
762   }
763   ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
764 
765   /* insert mat values -- remap row and column indices */
766   nz_max++;
767   ierr = PetscMalloc(nz_max*(sizeof(PetscInt)+sizeof(PetscScalar)),&aj);CHKERRQ(ierr);
768   aa = (PetscScalar*)(aj + nz_max);
769   ML_build_global_numbering(mlmat,mlmat->comm,&gordering);
770   for (i=0; i<m; i++){
771     row = gordering[i];
772     k = 0;
773     /* diagonal entry */
774     aj[k] = row; aa[k++] = ml_vals[i];
775     /* off diagonal entries */
776     for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
777       aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j];
778     }
779     ierr = MatSetValues(A,1,&row,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr);
780   }
781   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
782   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
783   *newmat = A;
784 
785   ierr = PetscFree3(nnzA,nnzB,nnz);
786   ierr = PetscFree(aj);CHKERRQ(ierr);
787   PetscFunctionReturn(0);
788 }
789