1 #define PETSCKSP_DLL 2 3 /* 4 Provides an interface to the ML 5.0 smoothed Aggregation 5 */ 6 #include "private/pcimpl.h" /*I "petscpc.h" I*/ 7 #include "src/ksp/pc/impls/mg/mgimpl.h" /*I "petscmg.h" I*/ 8 #include "src/mat/impls/aij/seq/aij.h" 9 #include "src/mat/impls/aij/mpi/mpiaij.h" 10 11 #include <math.h> 12 EXTERN_C_BEGIN 13 /* HAVE_CONFIG_H flag is required by ML include files */ 14 #if !defined(HAVE_CONFIG_H) 15 #define HAVE_CONFIG_H 16 #endif 17 #include "ml_include.h" 18 EXTERN_C_END 19 20 /* The context (data structure) at each grid level */ 21 typedef struct { 22 Vec x,b,r; /* global vectors */ 23 Mat A,P,R; 24 KSP ksp; 25 } GridCtx; 26 27 /* The context used to input PETSc matrix into ML at fine grid */ 28 typedef struct { 29 Mat A; /* Petsc matrix in aij format */ 30 Mat Aloc; /* local portion of A to be used by ML */ 31 Vec x,y; 32 ML_Operator *mlmat; 33 PetscScalar *pwork; /* tmp array used by PetscML_comm() */ 34 } FineGridCtx; 35 36 /* The context associates a ML matrix with a PETSc shell matrix */ 37 typedef struct { 38 Mat A; /* PETSc shell matrix associated with mlmat */ 39 ML_Operator *mlmat; /* ML matrix assorciated with A */ 40 Vec y; 41 } Mat_MLShell; 42 43 /* Private context for the ML preconditioner */ 44 typedef struct { 45 ML *ml_object; 46 ML_Aggregate *agg_object; 47 GridCtx *gridctx; 48 FineGridCtx *PetscMLdata; 49 PetscInt Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme; 50 PetscReal Threshold,DampingFactor; 51 PetscTruth SpectralNormScheme_Anorm; 52 PetscMPIInt size; /* size of communicator for pc->pmat */ 53 PetscErrorCode (*PCSetUp)(PC); 54 PetscErrorCode (*PCDestroy)(PC); 55 } PC_ML; 56 57 extern int PetscML_getrow(ML_Operator *ML_data,int N_requested_rows,int requested_rows[], 58 int allocated_space,int columns[],double values[],int row_lengths[]); 59 extern int PetscML_matvec(ML_Operator *ML_data, int in_length, double p[], int out_length,double ap[]); 60 extern int PetscML_comm(double x[], void *ML_data); 61 extern PetscErrorCode MatMult_ML(Mat,Vec,Vec); 62 extern PetscErrorCode MatMultAdd_ML(Mat,Vec,Vec,Vec); 63 extern PetscErrorCode MatConvert_MPIAIJ_ML(Mat,MatType,MatReuse,Mat*); 64 extern PetscErrorCode MatDestroy_ML(Mat); 65 extern PetscErrorCode MatWrapML_SeqAIJ(ML_Operator*,MatReuse,Mat*); 66 extern PetscErrorCode MatWrapML_MPIAIJ(ML_Operator*,Mat*); 67 extern PetscErrorCode MatWrapML_SHELL(ML_Operator*,MatReuse,Mat*); 68 extern PetscErrorCode PetscContainerDestroy_PC_ML(void *); 69 70 /* -------------------------------------------------------------------------- */ 71 /* 72 PCSetUp_ML - Prepares for the use of the ML preconditioner 73 by setting data structures and options. 74 75 Input Parameter: 76 . pc - the preconditioner context 77 78 Application Interface Routine: PCSetUp() 79 80 Notes: 81 The interface routine PCSetUp() is not usually called directly by 82 the user, but instead is called by PCApply() if necessary. 83 */ 84 extern PetscErrorCode PCSetFromOptions_MG(PC); 85 #undef __FUNCT__ 86 #define __FUNCT__ "PCSetUp_ML" 87 PetscErrorCode PCSetUp_ML(PC pc) 88 { 89 PetscErrorCode ierr; 90 PetscMPIInt size; 91 FineGridCtx *PetscMLdata; 92 ML *ml_object; 93 ML_Aggregate *agg_object; 94 ML_Operator *mlmat; 95 PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level; 96 Mat A,Aloc; 97 GridCtx *gridctx; 98 PC_ML *pc_ml=PETSC_NULL; 99 PetscContainer container; 100 MatReuse reuse = MAT_INITIAL_MATRIX; 101 102 PetscFunctionBegin; 103 ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr); 104 if (container) { 105 ierr = PetscContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr); 106 } else { 107 SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit"); 108 } 109 110 if (pc->setupcalled){ 111 if (pc->flag == SAME_NONZERO_PATTERN){ 112 reuse = MAT_REUSE_MATRIX; 113 PetscMLdata = pc_ml->PetscMLdata; 114 gridctx = pc_ml->gridctx; 115 /* ML objects cannot be reused */ 116 ML_Destroy(&pc_ml->ml_object); 117 ML_Aggregate_Destroy(&pc_ml->agg_object); 118 } else { 119 PC_ML *pc_ml_new = PETSC_NULL; 120 PetscContainer container_new; 121 ierr = PetscNewLog(pc,PC_ML,&pc_ml_new);CHKERRQ(ierr); 122 ierr = PetscContainerCreate(PETSC_COMM_SELF,&container_new);CHKERRQ(ierr); 123 ierr = PetscContainerSetPointer(container_new,pc_ml_new);CHKERRQ(ierr); 124 ierr = PetscContainerSetUserDestroy(container_new,PetscContainerDestroy_PC_ML);CHKERRQ(ierr); 125 ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",(PetscObject)container_new);CHKERRQ(ierr); 126 127 ierr = PetscMemcpy(pc_ml_new,pc_ml,sizeof(PC_ML));CHKERRQ(ierr); 128 ierr = PetscContainerDestroy(container);CHKERRQ(ierr); 129 pc_ml = pc_ml_new; 130 } 131 } 132 133 /* setup special features of PCML */ 134 /*--------------------------------*/ 135 /* covert A to Aloc to be used by ML at fine grid */ 136 A = pc->pmat; 137 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 138 pc_ml->size = size; 139 if (size > 1){ 140 if (reuse) Aloc = PetscMLdata->Aloc; 141 ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,reuse,&Aloc);CHKERRQ(ierr); 142 } else { 143 Aloc = A; 144 } 145 146 /* create and initialize struct 'PetscMLdata' */ 147 if (!reuse){ 148 ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr); 149 pc_ml->PetscMLdata = PetscMLdata; 150 ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr); 151 152 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr); 153 ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr); 154 ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); 155 156 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); 157 ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 158 ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr); 159 } 160 PetscMLdata->A = A; 161 PetscMLdata->Aloc = Aloc; 162 163 /* create ML discretization matrix at fine grid */ 164 /* ML requires input of fine-grid matrix. It determines nlevels. */ 165 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 166 ML_Create(&ml_object,pc_ml->MaxNlevels); 167 pc_ml->ml_object = ml_object; 168 ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); 169 ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols); 170 ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); 171 172 /* aggregation */ 173 ML_Aggregate_Create(&agg_object); 174 pc_ml->agg_object = agg_object; 175 176 ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize); 177 /* set options */ 178 switch (pc_ml->CoarsenScheme) { 179 case 1: 180 ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break; 181 case 2: 182 ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break; 183 case 3: 184 ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break; 185 } 186 ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold); 187 ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor); 188 if (pc_ml->SpectralNormScheme_Anorm){ 189 ML_Aggregate_Set_SpectralNormScheme_Anorm(agg_object); 190 } 191 192 Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 193 if (Nlevels<=0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels); 194 if (pc->setupcalled && pc_ml->Nlevels != Nlevels) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"previous Nlevels %D and current Nlevels %d must be same", pc_ml->Nlevels,Nlevels); 195 pc_ml->Nlevels = Nlevels; 196 fine_level = Nlevels - 1; 197 if (!pc->setupcalled){ 198 ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr); 199 /* set default smoothers */ 200 KSP smoother; 201 PC subpc; 202 for (level=1; level<=fine_level; level++){ 203 if (size == 1){ 204 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 205 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 206 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 207 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 208 } else { 209 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 210 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 211 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 212 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 213 } 214 } 215 ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */ 216 } 217 218 if (!reuse){ 219 ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr); 220 pc_ml->gridctx = gridctx; 221 } 222 223 /* wrap ML matrices by PETSc shell matrices at coarsened grids. 224 Level 0 is the finest grid for ML, but coarsest for PETSc! */ 225 gridctx[fine_level].A = A; 226 227 level = fine_level - 1; 228 if (size == 1){ /* convert ML P, R and A into seqaij format */ 229 for (mllevel=1; mllevel<Nlevels; mllevel++){ 230 mlmat = &(ml_object->Pmat[mllevel]); 231 ierr = MatWrapML_SeqAIJ(mlmat,reuse,&gridctx[level].P);CHKERRQ(ierr); 232 mlmat = &(ml_object->Rmat[mllevel-1]); 233 ierr = MatWrapML_SeqAIJ(mlmat,reuse,&gridctx[level].R);CHKERRQ(ierr); 234 235 mlmat = &(ml_object->Amat[mllevel]); 236 if (reuse){ 237 /* ML matrix A changes sparse pattern although PETSc A doesn't, thus gridctx[level].A must be recreated! */ 238 ierr = MatDestroy(gridctx[level].A);CHKERRQ(ierr); 239 } 240 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 241 level--; 242 } 243 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 244 for (mllevel=1; mllevel<Nlevels; mllevel++){ 245 mlmat = &(ml_object->Pmat[mllevel]); 246 ierr = MatWrapML_SHELL(mlmat,reuse,&gridctx[level].P);CHKERRQ(ierr); 247 mlmat = &(ml_object->Rmat[mllevel-1]); 248 ierr = MatWrapML_SHELL(mlmat,reuse,&gridctx[level].R);CHKERRQ(ierr); 249 250 mlmat = &(ml_object->Amat[mllevel]); 251 if (reuse){ 252 ierr = MatDestroy(gridctx[level].A);CHKERRQ(ierr); 253 } 254 ierr = MatWrapML_MPIAIJ(mlmat,&gridctx[level].A);CHKERRQ(ierr); 255 level--; 256 } 257 } 258 259 /* create vectors and ksp at all levels */ 260 if (!reuse){ 261 for (level=0; level<fine_level; level++){ 262 level1 = level + 1; 263 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr); 264 ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr); 265 ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); 266 ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); 267 268 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr); 269 ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 270 ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); 271 ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); 272 273 ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr); 274 ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 275 ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr); 276 ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr); 277 278 if (level == 0){ 279 ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr); 280 } else { 281 ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr); 282 } 283 } 284 ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr); 285 } 286 287 /* create coarse level and the interpolation between the levels */ 288 for (level=0; level<fine_level; level++){ 289 level1 = level + 1; 290 ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr); 291 ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr); 292 if (level > 0){ 293 ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); 294 } 295 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 296 } 297 ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); 298 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 299 300 /* now call PCSetUp_MG() */ 301 /*-------------------------------*/ 302 ierr = (*pc_ml->PCSetUp)(pc);CHKERRQ(ierr); 303 PetscFunctionReturn(0); 304 } 305 306 #undef __FUNCT__ 307 #define __FUNCT__ "PetscContainerDestroy_PC_ML" 308 PetscErrorCode PetscContainerDestroy_PC_ML(void *ptr) 309 { 310 PetscErrorCode ierr; 311 PC_ML *pc_ml = (PC_ML*)ptr; 312 PetscInt level,fine_level=pc_ml->Nlevels-1; 313 314 PetscFunctionBegin; 315 ML_Aggregate_Destroy(&pc_ml->agg_object); 316 ML_Destroy(&pc_ml->ml_object); 317 318 if (pc_ml->PetscMLdata) { 319 ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr); 320 if (pc_ml->size > 1) {ierr = MatDestroy(pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);} 321 if (pc_ml->PetscMLdata->x){ierr = VecDestroy(pc_ml->PetscMLdata->x);CHKERRQ(ierr);} 322 if (pc_ml->PetscMLdata->y){ierr = VecDestroy(pc_ml->PetscMLdata->y);CHKERRQ(ierr);} 323 } 324 ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr); 325 326 for (level=0; level<fine_level; level++){ 327 if (pc_ml->gridctx[level].A){ierr = MatDestroy(pc_ml->gridctx[level].A);CHKERRQ(ierr);} 328 if (pc_ml->gridctx[level].P){ierr = MatDestroy(pc_ml->gridctx[level].P);CHKERRQ(ierr);} 329 if (pc_ml->gridctx[level].R){ierr = MatDestroy(pc_ml->gridctx[level].R);CHKERRQ(ierr);} 330 if (pc_ml->gridctx[level].x){ierr = VecDestroy(pc_ml->gridctx[level].x);CHKERRQ(ierr);} 331 if (pc_ml->gridctx[level].b){ierr = VecDestroy(pc_ml->gridctx[level].b);CHKERRQ(ierr);} 332 if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(pc_ml->gridctx[level+1].r);CHKERRQ(ierr);} 333 } 334 ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr); 335 ierr = PetscFree(pc_ml);CHKERRQ(ierr); 336 PetscFunctionReturn(0); 337 } 338 /* -------------------------------------------------------------------------- */ 339 /* 340 PCDestroy_ML - Destroys the private context for the ML preconditioner 341 that was created with PCCreate_ML(). 342 343 Input Parameter: 344 . pc - the preconditioner context 345 346 Application Interface Routine: PCDestroy() 347 */ 348 #undef __FUNCT__ 349 #define __FUNCT__ "PCDestroy_ML" 350 PetscErrorCode PCDestroy_ML(PC pc) 351 { 352 PetscErrorCode ierr; 353 PC_ML *pc_ml=PETSC_NULL; 354 PetscContainer container; 355 356 PetscFunctionBegin; 357 ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr); 358 if (container) { 359 ierr = PetscContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr); 360 pc->ops->destroy = pc_ml->PCDestroy; 361 } else { 362 SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit"); 363 } 364 /* detach pc and PC_ML and dereference container */ 365 ierr = PetscContainerDestroy(container);CHKERRQ(ierr); 366 ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",0);CHKERRQ(ierr); 367 if (pc->ops->destroy) { 368 ierr = (*pc->ops->destroy)(pc);CHKERRQ(ierr); 369 } 370 PetscFunctionReturn(0); 371 } 372 373 #undef __FUNCT__ 374 #define __FUNCT__ "PCSetFromOptions_ML" 375 PetscErrorCode PCSetFromOptions_ML(PC pc) 376 { 377 PetscErrorCode ierr; 378 PetscInt indx,m,PrintLevel; 379 PetscTruth flg; 380 const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"}; 381 PC_ML *pc_ml=PETSC_NULL; 382 PetscContainer container; 383 PCMGType mgtype; 384 385 PetscFunctionBegin; 386 ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr); 387 if (container) { 388 ierr = PetscContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr); 389 } else { 390 SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit"); 391 } 392 393 /* inherited MG options */ 394 ierr = PetscOptionsHead("Multigrid options(inherited)");CHKERRQ(ierr); 395 ierr = PetscOptionsInt("-pc_mg_cycles","1 for V cycle, 2 for W-cycle","MGSetCycles",1,&m,&flg);CHKERRQ(ierr); 396 ierr = PetscOptionsInt("-pc_mg_smoothup","Number of post-smoothing steps","MGSetNumberSmoothUp",1,&m,&flg);CHKERRQ(ierr); 397 ierr = PetscOptionsInt("-pc_mg_smoothdown","Number of pre-smoothing steps","MGSetNumberSmoothDown",1,&m,&flg);CHKERRQ(ierr); 398 ierr = PetscOptionsEnum("-pc_mg_type","Multigrid type","PCMGSetType",PCMGTypes,(PetscEnum)PC_MG_MULTIPLICATIVE,(PetscEnum*)&mgtype,&flg);CHKERRQ(ierr); 399 ierr = PetscOptionsTail();CHKERRQ(ierr); 400 401 /* ML options */ 402 ierr = PetscOptionsHead("ML options");CHKERRQ(ierr); 403 /* set defaults */ 404 PrintLevel = 0; 405 indx = 0; 406 ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr); 407 ML_Set_PrintLevel(PrintLevel); 408 ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr); 409 ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr); 410 ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr); /* ??? */ 411 pc_ml->CoarsenScheme = indx; 412 413 ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr); 414 415 ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr); 416 417 ierr = PetscOptionsTruth("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Aggregate_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL); 418 419 ierr = PetscOptionsTail();CHKERRQ(ierr); 420 PetscFunctionReturn(0); 421 } 422 423 /* -------------------------------------------------------------------------- */ 424 /* 425 PCCreate_ML - Creates a ML preconditioner context, PC_ML, 426 and sets this as the private data within the generic preconditioning 427 context, PC, that was created within PCCreate(). 428 429 Input Parameter: 430 . pc - the preconditioner context 431 432 Application Interface Routine: PCCreate() 433 */ 434 435 /*MC 436 PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide 437 fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 438 operators are computed by ML, with the matrices coverted to PETSc matrices in aij format 439 and the restriction/interpolation operators wrapped as PETSc shell matrices. 440 441 Options Database Key: 442 Multigrid options(inherited) 443 + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles) 444 . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp) 445 . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown) 446 - -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade 447 448 ML options: 449 + -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel) 450 . -pc_ml_maxNlevels <10>: Maximum number of levels (None) 451 . -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize) 452 . -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS 453 . -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor) 454 . -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold) 455 - -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Aggregate_Set_SpectralNormScheme_Anorm) 456 457 Level: intermediate 458 459 Concepts: multigrid 460 461 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 462 PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(), 463 PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(), 464 PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(), 465 PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR() 466 M*/ 467 468 EXTERN_C_BEGIN 469 #undef __FUNCT__ 470 #define __FUNCT__ "PCCreate_ML" 471 PetscErrorCode PETSCKSP_DLLEXPORT PCCreate_ML(PC pc) 472 { 473 PetscErrorCode ierr; 474 PC_ML *pc_ml; 475 PetscContainer container; 476 477 PetscFunctionBegin; 478 /* PCML is an inherited class of PCMG. Initialize pc as PCMG */ 479 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 480 481 /* create a supporting struct and attach it to pc */ 482 ierr = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr); 483 ierr = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr); 484 ierr = PetscContainerSetPointer(container,pc_ml);CHKERRQ(ierr); 485 ierr = PetscContainerSetUserDestroy(container,PetscContainerDestroy_PC_ML);CHKERRQ(ierr); 486 ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",(PetscObject)container);CHKERRQ(ierr); 487 488 pc_ml->ml_object = 0; 489 pc_ml->agg_object = 0; 490 pc_ml->gridctx = 0; 491 pc_ml->PetscMLdata = 0; 492 pc_ml->Nlevels = -1; 493 pc_ml->MaxNlevels = 10; 494 pc_ml->MaxCoarseSize = 1; 495 pc_ml->CoarsenScheme = 1; /* ??? */ 496 pc_ml->Threshold = 0.0; 497 pc_ml->DampingFactor = 4.0/3.0; 498 pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE; 499 pc_ml->size = 0; 500 501 pc_ml->PCSetUp = pc->ops->setup; 502 pc_ml->PCDestroy = pc->ops->destroy; 503 504 /* overwrite the pointers of PCMG by the functions of PCML */ 505 pc->ops->setfromoptions = PCSetFromOptions_ML; 506 pc->ops->setup = PCSetUp_ML; 507 pc->ops->destroy = PCDestroy_ML; 508 PetscFunctionReturn(0); 509 } 510 EXTERN_C_END 511 512 int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[], 513 int allocated_space, int columns[], double values[], int row_lengths[]) 514 { 515 PetscErrorCode ierr; 516 Mat Aloc; 517 Mat_SeqAIJ *a; 518 PetscInt m,i,j,k=0,row,*aj; 519 PetscScalar *aa; 520 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data); 521 522 Aloc = ml->Aloc; 523 a = (Mat_SeqAIJ*)Aloc->data; 524 ierr = MatGetSize(Aloc,&m,PETSC_NULL);CHKERRQ(ierr); 525 526 for (i = 0; i<N_requested_rows; i++) { 527 row = requested_rows[i]; 528 row_lengths[i] = a->ilen[row]; 529 if (allocated_space < k+row_lengths[i]) return(0); 530 if ( (row >= 0) || (row <= (m-1)) ) { 531 aj = a->j + a->i[row]; 532 aa = a->a + a->i[row]; 533 for (j=0; j<row_lengths[i]; j++){ 534 columns[k] = aj[j]; 535 values[k++] = aa[j]; 536 } 537 } 538 } 539 return(1); 540 } 541 542 int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[]) 543 { 544 PetscErrorCode ierr; 545 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data); 546 Mat A=ml->A, Aloc=ml->Aloc; 547 PetscMPIInt size; 548 PetscScalar *pwork=ml->pwork; 549 PetscInt i; 550 551 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 552 if (size == 1){ 553 ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr); 554 } else { 555 for (i=0; i<in_length; i++) pwork[i] = p[i]; 556 PetscML_comm(pwork,ml); 557 ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr); 558 } 559 ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr); 560 ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr); 561 ierr = VecResetArray(ml->x);CHKERRQ(ierr); 562 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 563 return 0; 564 } 565 566 int PetscML_comm(double p[],void *ML_data) 567 { 568 PetscErrorCode ierr; 569 FineGridCtx *ml=(FineGridCtx*)ML_data; 570 Mat A=ml->A; 571 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 572 PetscMPIInt size; 573 PetscInt i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n; 574 PetscScalar *array; 575 576 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 577 if (size == 1) return 0; 578 579 ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr); 580 ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 581 ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 582 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 583 ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr); 584 for (i=in_length; i<out_length; i++){ 585 p[i] = array[i-in_length]; 586 } 587 ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr); 588 return 0; 589 } 590 #undef __FUNCT__ 591 #define __FUNCT__ "MatMult_ML" 592 PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y) 593 { 594 PetscErrorCode ierr; 595 Mat_MLShell *shell; 596 PetscScalar *xarray,*yarray; 597 PetscInt x_length,y_length; 598 599 PetscFunctionBegin; 600 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 601 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 602 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 603 x_length = shell->mlmat->invec_leng; 604 y_length = shell->mlmat->outvec_leng; 605 606 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 607 608 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 609 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 610 PetscFunctionReturn(0); 611 } 612 /* MatMultAdd_ML - Compute y = w + A*x */ 613 #undef __FUNCT__ 614 #define __FUNCT__ "MatMultAdd_ML" 615 PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y) 616 { 617 PetscErrorCode ierr; 618 Mat_MLShell *shell; 619 PetscScalar *xarray,*yarray; 620 PetscInt x_length,y_length; 621 622 PetscFunctionBegin; 623 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 624 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 625 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 626 627 x_length = shell->mlmat->invec_leng; 628 y_length = shell->mlmat->outvec_leng; 629 630 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 631 632 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 633 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 634 ierr = VecAXPY(y,1.0,w);CHKERRQ(ierr); 635 636 PetscFunctionReturn(0); 637 } 638 639 /* newtype is ignored because "ml" is not listed under Petsc MatType yet */ 640 #undef __FUNCT__ 641 #define __FUNCT__ "MatConvert_MPIAIJ_ML" 642 PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc) 643 { 644 PetscErrorCode ierr; 645 Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data; 646 Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data; 647 PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j; 648 PetscScalar *aa=a->a,*ba=b->a,*ca; 649 PetscInt am=A->rmap->n,an=A->cmap->n,i,j,k; 650 PetscInt *ci,*cj,ncols; 651 652 PetscFunctionBegin; 653 if (am != an) SETERRQ2(PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an); 654 655 if (scall == MAT_INITIAL_MATRIX){ 656 ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 657 ci[0] = 0; 658 for (i=0; i<am; i++){ 659 ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]); 660 } 661 ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr); 662 ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr); 663 664 k = 0; 665 for (i=0; i<am; i++){ 666 /* diagonal portion of A */ 667 ncols = ai[i+1] - ai[i]; 668 for (j=0; j<ncols; j++) { 669 cj[k] = *aj++; 670 ca[k++] = *aa++; 671 } 672 /* off-diagonal portion of A */ 673 ncols = bi[i+1] - bi[i]; 674 for (j=0; j<ncols; j++) { 675 cj[k] = an + (*bj); bj++; 676 ca[k++] = *ba++; 677 } 678 } 679 if (k != ci[am]) SETERRQ2(PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]); 680 681 /* put together the new matrix */ 682 an = mpimat->A->cmap->n+mpimat->B->cmap->n; 683 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr); 684 685 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 686 /* Since these are PETSc arrays, change flags to free them as necessary. */ 687 mat = (Mat_SeqAIJ*)(*Aloc)->data; 688 mat->free_a = PETSC_TRUE; 689 mat->free_ij = PETSC_TRUE; 690 691 mat->nonew = 0; 692 } else if (scall == MAT_REUSE_MATRIX){ 693 mat=(Mat_SeqAIJ*)(*Aloc)->data; 694 ci = mat->i; cj = mat->j; ca = mat->a; 695 for (i=0; i<am; i++) { 696 /* diagonal portion of A */ 697 ncols = ai[i+1] - ai[i]; 698 for (j=0; j<ncols; j++) *ca++ = *aa++; 699 /* off-diagonal portion of A */ 700 ncols = bi[i+1] - bi[i]; 701 for (j=0; j<ncols; j++) *ca++ = *ba++; 702 } 703 } else { 704 SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 705 } 706 PetscFunctionReturn(0); 707 } 708 extern PetscErrorCode MatDestroy_Shell(Mat); 709 #undef __FUNCT__ 710 #define __FUNCT__ "MatDestroy_ML" 711 PetscErrorCode MatDestroy_ML(Mat A) 712 { 713 PetscErrorCode ierr; 714 Mat_MLShell *shell; 715 716 PetscFunctionBegin; 717 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 718 ierr = VecDestroy(shell->y);CHKERRQ(ierr); 719 ierr = PetscFree(shell);CHKERRQ(ierr); 720 ierr = MatDestroy_Shell(A);CHKERRQ(ierr); 721 ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr); 722 PetscFunctionReturn(0); 723 } 724 725 #undef __FUNCT__ 726 #define __FUNCT__ "MatWrapML_SeqAIJ" 727 PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 728 { 729 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 730 PetscErrorCode ierr; 731 PetscInt m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz,nz_max; 732 PetscInt *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k; 733 PetscScalar *ml_vals=matdata->values,*aa; 734 735 PetscFunctionBegin; 736 if ( mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 737 if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */ 738 if (reuse){ 739 Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data; 740 aij->i = ml_rowptr; 741 aij->j = ml_cols; 742 aij->a = ml_vals; 743 } else { 744 /* sort ml_cols and ml_vals */ 745 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 746 for (i=0; i<m; i++) { 747 nnz[i] = ml_rowptr[i+1] - ml_rowptr[i]; 748 } 749 aj = ml_cols; aa = ml_vals; 750 for (i=0; i<m; i++){ 751 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 752 aj += nnz[i]; aa += nnz[i]; 753 } 754 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr); 755 ierr = PetscFree(nnz);CHKERRQ(ierr); 756 } 757 PetscFunctionReturn(0); 758 } 759 760 /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */ 761 ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr); 762 ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 763 ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr); 764 765 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 766 nz_max = 1; 767 for (i=0; i<m; i++) { 768 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 769 if (nnz[i] > nz_max) nz_max += nnz[i]; 770 } 771 772 ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr); 773 ierr = PetscMalloc(nz_max*(sizeof(PetscInt)+sizeof(PetscScalar)),&aj);CHKERRQ(ierr); 774 aa = (PetscScalar*)(aj + nz_max); 775 776 for (i=0; i<m; i++){ 777 k = 0; 778 /* diagonal entry */ 779 aj[k] = i; aa[k++] = ml_vals[i]; 780 /* off diagonal entries */ 781 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 782 aj[k] = ml_cols[j]; aa[k++] = ml_vals[j]; 783 } 784 /* sort aj and aa */ 785 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 786 ierr = MatSetValues(*newmat,1,&i,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr); 787 } 788 ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 789 ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 790 791 ierr = PetscFree(aj);CHKERRQ(ierr); 792 ierr = PetscFree(nnz);CHKERRQ(ierr); 793 PetscFunctionReturn(0); 794 } 795 796 #undef __FUNCT__ 797 #define __FUNCT__ "MatWrapML_SHELL" 798 PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 799 { 800 PetscErrorCode ierr; 801 PetscInt m,n; 802 ML_Comm *MLcomm; 803 Mat_MLShell *shellctx; 804 805 PetscFunctionBegin; 806 m = mlmat->outvec_leng; 807 n = mlmat->invec_leng; 808 if (!m || !n){ 809 newmat = PETSC_NULL; 810 PetscFunctionReturn(0); 811 } 812 813 if (reuse){ 814 ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr); 815 shellctx->mlmat = mlmat; 816 PetscFunctionReturn(0); 817 } 818 819 MLcomm = mlmat->comm; 820 ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr); 821 ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr); 822 ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr); 823 ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr); 824 shellctx->A = *newmat; 825 shellctx->mlmat = mlmat; 826 ierr = VecCreate(PETSC_COMM_WORLD,&shellctx->y);CHKERRQ(ierr); 827 ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr); 828 ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr); 829 (*newmat)->ops->destroy = MatDestroy_ML; 830 PetscFunctionReturn(0); 831 } 832 833 #undef __FUNCT__ 834 #define __FUNCT__ "MatWrapML_MPIAIJ" 835 PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,Mat *newmat) 836 { 837 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 838 PetscInt *ml_cols=matdata->columns,*aj; 839 PetscScalar *ml_vals=matdata->values,*aa; 840 PetscErrorCode ierr; 841 PetscInt i,j,k,*gordering; 842 PetscInt m=mlmat->outvec_leng,n,*nnzA,*nnzB,*nnz,nz_max,row; 843 Mat A; 844 845 PetscFunctionBegin; 846 if (mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 847 n = mlmat->invec_leng; 848 if (m != n) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n); 849 850 ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr); 851 ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 852 ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr); 853 ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr); 854 855 nz_max = 0; 856 for (i=0; i<m; i++){ 857 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 858 if (nz_max < nnz[i]) nz_max = nnz[i]; 859 nnzA[i] = 1; /* diag */ 860 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 861 if (ml_cols[j] < m) nnzA[i]++; 862 } 863 nnzB[i] = nnz[i] - nnzA[i]; 864 } 865 ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr); 866 867 /* insert mat values -- remap row and column indices */ 868 nz_max++; 869 ierr = PetscMalloc(nz_max*(sizeof(PetscInt)+sizeof(PetscScalar)),&aj);CHKERRQ(ierr); 870 aa = (PetscScalar*)(aj + nz_max); 871 /* create global row numbering for a ML_Operator */ 872 ML_build_global_numbering(mlmat,&gordering,"rows"); 873 for (i=0; i<m; i++){ 874 row = gordering[i]; 875 k = 0; 876 /* diagonal entry */ 877 aj[k] = row; aa[k++] = ml_vals[i]; 878 /* off diagonal entries */ 879 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 880 aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j]; 881 } 882 ierr = MatSetValues(A,1,&row,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr); 883 } 884 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 885 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 886 *newmat = A; 887 888 ierr = PetscFree3(nnzA,nnzB,nnz); 889 ierr = PetscFree(aj);CHKERRQ(ierr); 890 PetscFunctionReturn(0); 891 } 892