1 #define PETSCKSP_DLL 2 3 /* 4 Provides an interface to the ML 5.0 smoothed Aggregation 5 */ 6 #include "private/pcimpl.h" /*I "petscpc.h" I*/ 7 #include "src/ksp/pc/impls/mg/mgimpl.h" /*I "petscmg.h" I*/ 8 #include "src/mat/impls/aij/seq/aij.h" 9 #include "src/mat/impls/aij/mpi/mpiaij.h" 10 11 #include <math.h> 12 EXTERN_C_BEGIN 13 #include "ml_config.h" 14 #include "ml_include.h" 15 EXTERN_C_END 16 17 /* The context (data structure) at each grid level */ 18 typedef struct { 19 Vec x,b,r; /* global vectors */ 20 Mat A,P,R; 21 KSP ksp; 22 } GridCtx; 23 24 /* The context used to input PETSc matrix into ML at fine grid */ 25 typedef struct { 26 Mat A; /* Petsc matrix in aij format */ 27 Mat Aloc; /* local portion of A to be used by ML */ 28 Vec x,y; 29 ML_Operator *mlmat; 30 PetscScalar *pwork; /* tmp array used by PetscML_comm() */ 31 } FineGridCtx; 32 33 /* The context associates a ML matrix with a PETSc shell matrix */ 34 typedef struct { 35 Mat A; /* PETSc shell matrix associated with mlmat */ 36 ML_Operator *mlmat; /* ML matrix assorciated with A */ 37 Vec y; 38 } Mat_MLShell; 39 40 /* Private context for the ML preconditioner */ 41 typedef struct { 42 ML *ml_object; 43 ML_Aggregate *agg_object; 44 GridCtx *gridctx; 45 FineGridCtx *PetscMLdata; 46 PetscInt Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme; 47 PetscReal Threshold,DampingFactor; 48 PetscTruth SpectralNormScheme_Anorm; 49 PetscMPIInt size; /* size of communicator for pc->pmat */ 50 PetscErrorCode (*PCSetUp)(PC); 51 PetscErrorCode (*PCDestroy)(PC); 52 } PC_ML; 53 54 extern int PetscML_getrow(ML_Operator *ML_data,int N_requested_rows,int requested_rows[], 55 int allocated_space,int columns[],double values[],int row_lengths[]); 56 extern int PetscML_matvec(ML_Operator *ML_data, int in_length, double p[], int out_length,double ap[]); 57 extern int PetscML_comm(double x[], void *ML_data); 58 extern PetscErrorCode MatMult_ML(Mat,Vec,Vec); 59 extern PetscErrorCode MatMultAdd_ML(Mat,Vec,Vec,Vec); 60 extern PetscErrorCode MatConvert_MPIAIJ_ML(Mat,MatType,MatReuse,Mat*); 61 extern PetscErrorCode MatDestroy_ML(Mat); 62 extern PetscErrorCode MatWrapML_SeqAIJ(ML_Operator*,MatReuse,Mat*); 63 extern PetscErrorCode MatWrapML_MPIAIJ(ML_Operator*,Mat*); 64 extern PetscErrorCode MatWrapML_SHELL(ML_Operator*,MatReuse,Mat*); 65 extern PetscErrorCode PetscContainerDestroy_PC_ML(void *); 66 67 /* -------------------------------------------------------------------------- */ 68 /* 69 PCSetUp_ML - Prepares for the use of the ML preconditioner 70 by setting data structures and options. 71 72 Input Parameter: 73 . pc - the preconditioner context 74 75 Application Interface Routine: PCSetUp() 76 77 Notes: 78 The interface routine PCSetUp() is not usually called directly by 79 the user, but instead is called by PCApply() if necessary. 80 */ 81 extern PetscErrorCode PCSetFromOptions_MG(PC); 82 #undef __FUNCT__ 83 #define __FUNCT__ "PCSetUp_ML" 84 PetscErrorCode PCSetUp_ML(PC pc) 85 { 86 PetscErrorCode ierr; 87 PetscMPIInt size; 88 FineGridCtx *PetscMLdata; 89 ML *ml_object; 90 ML_Aggregate *agg_object; 91 ML_Operator *mlmat; 92 PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level; 93 Mat A,Aloc; 94 GridCtx *gridctx; 95 PC_ML *pc_ml=PETSC_NULL; 96 PetscContainer container; 97 MatReuse reuse = MAT_INITIAL_MATRIX; 98 99 PetscFunctionBegin; 100 ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr); 101 if (container) { 102 ierr = PetscContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr); 103 } else { 104 SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit"); 105 } 106 107 if (pc->setupcalled){ 108 if (pc->flag == SAME_NONZERO_PATTERN){ 109 reuse = MAT_REUSE_MATRIX; 110 PetscMLdata = pc_ml->PetscMLdata; 111 gridctx = pc_ml->gridctx; 112 /* ML objects cannot be reused */ 113 ML_Destroy(&pc_ml->ml_object); 114 ML_Aggregate_Destroy(&pc_ml->agg_object); 115 } else { 116 PC_ML *pc_ml_new = PETSC_NULL; 117 PetscContainer container_new; 118 ierr = PetscNewLog(pc,PC_ML,&pc_ml_new);CHKERRQ(ierr); 119 ierr = PetscContainerCreate(PETSC_COMM_SELF,&container_new);CHKERRQ(ierr); 120 ierr = PetscContainerSetPointer(container_new,pc_ml_new);CHKERRQ(ierr); 121 ierr = PetscContainerSetUserDestroy(container_new,PetscContainerDestroy_PC_ML);CHKERRQ(ierr); 122 ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",(PetscObject)container_new);CHKERRQ(ierr); 123 124 ierr = PetscMemcpy(pc_ml_new,pc_ml,sizeof(PC_ML));CHKERRQ(ierr); 125 ierr = PetscContainerDestroy(container);CHKERRQ(ierr); 126 pc_ml = pc_ml_new; 127 } 128 } 129 130 /* setup special features of PCML */ 131 /*--------------------------------*/ 132 /* covert A to Aloc to be used by ML at fine grid */ 133 A = pc->pmat; 134 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 135 pc_ml->size = size; 136 if (size > 1){ 137 if (reuse) Aloc = PetscMLdata->Aloc; 138 ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,reuse,&Aloc);CHKERRQ(ierr); 139 } else { 140 Aloc = A; 141 } 142 143 /* create and initialize struct 'PetscMLdata' */ 144 if (!reuse){ 145 ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr); 146 pc_ml->PetscMLdata = PetscMLdata; 147 ierr = PetscMalloc((Aloc->cmap.n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr); 148 149 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr); 150 ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap.n,Aloc->cmap.n);CHKERRQ(ierr); 151 ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); 152 153 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); 154 ierr = VecSetSizes(PetscMLdata->y,A->rmap.n,PETSC_DECIDE);CHKERRQ(ierr); 155 ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr); 156 } 157 PetscMLdata->A = A; 158 PetscMLdata->Aloc = Aloc; 159 160 /* create ML discretization matrix at fine grid */ 161 /* ML requires input of fine-grid matrix. It determines nlevels. */ 162 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 163 ML_Create(&ml_object,pc_ml->MaxNlevels); 164 pc_ml->ml_object = ml_object; 165 ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); 166 ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols); 167 ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); 168 169 /* aggregation */ 170 ML_Aggregate_Create(&agg_object); 171 pc_ml->agg_object = agg_object; 172 173 ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize); 174 /* set options */ 175 switch (pc_ml->CoarsenScheme) { 176 case 1: 177 ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break; 178 case 2: 179 ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break; 180 case 3: 181 ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break; 182 } 183 ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold); 184 ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor); 185 if (pc_ml->SpectralNormScheme_Anorm){ 186 ML_Aggregate_Set_SpectralNormScheme_Anorm(agg_object); 187 } 188 189 Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 190 if (Nlevels<=0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels); 191 if (pc->setupcalled && pc_ml->Nlevels != Nlevels) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"previous Nlevels %D and current Nlevels %d must be same", pc_ml->Nlevels,Nlevels); 192 pc_ml->Nlevels = Nlevels; 193 fine_level = Nlevels - 1; 194 if (!pc->setupcalled){ 195 ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr); 196 /* set default smoothers */ 197 KSP smoother; 198 PC subpc; 199 for (level=1; level<=fine_level; level++){ 200 if (size == 1){ 201 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 202 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 203 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 204 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 205 } else { 206 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 207 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 208 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 209 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 210 } 211 } 212 ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */ 213 } 214 215 if (!reuse){ 216 ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr); 217 pc_ml->gridctx = gridctx; 218 } 219 220 /* wrap ML matrices by PETSc shell matrices at coarsened grids. 221 Level 0 is the finest grid for ML, but coarsest for PETSc! */ 222 gridctx[fine_level].A = A; 223 224 level = fine_level - 1; 225 if (size == 1){ /* convert ML P, R and A into seqaij format */ 226 for (mllevel=1; mllevel<Nlevels; mllevel++){ 227 mlmat = &(ml_object->Pmat[mllevel]); 228 ierr = MatWrapML_SeqAIJ(mlmat,reuse,&gridctx[level].P);CHKERRQ(ierr); 229 mlmat = &(ml_object->Rmat[mllevel-1]); 230 ierr = MatWrapML_SeqAIJ(mlmat,reuse,&gridctx[level].R);CHKERRQ(ierr); 231 232 mlmat = &(ml_object->Amat[mllevel]); 233 if (reuse){ 234 /* ML matrix A changes sparse pattern although PETSc A doesn't, thus gridctx[level].A must be recreated! */ 235 ierr = MatDestroy(gridctx[level].A);CHKERRQ(ierr); 236 } 237 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 238 level--; 239 } 240 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 241 for (mllevel=1; mllevel<Nlevels; mllevel++){ 242 mlmat = &(ml_object->Pmat[mllevel]); 243 ierr = MatWrapML_SHELL(mlmat,reuse,&gridctx[level].P);CHKERRQ(ierr); 244 mlmat = &(ml_object->Rmat[mllevel-1]); 245 ierr = MatWrapML_SHELL(mlmat,reuse,&gridctx[level].R);CHKERRQ(ierr); 246 247 mlmat = &(ml_object->Amat[mllevel]); 248 if (reuse){ 249 ierr = MatDestroy(gridctx[level].A);CHKERRQ(ierr); 250 } 251 ierr = MatWrapML_MPIAIJ(mlmat,&gridctx[level].A);CHKERRQ(ierr); 252 level--; 253 } 254 } 255 256 /* create vectors and ksp at all levels */ 257 if (!reuse){ 258 for (level=0; level<fine_level; level++){ 259 level1 = level + 1; 260 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr); 261 ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap.n,PETSC_DECIDE);CHKERRQ(ierr); 262 ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); 263 ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); 264 265 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr); 266 ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap.n,PETSC_DECIDE);CHKERRQ(ierr); 267 ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); 268 ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); 269 270 ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr); 271 ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap.n,PETSC_DECIDE);CHKERRQ(ierr); 272 ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr); 273 ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr); 274 275 if (level == 0){ 276 ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr); 277 } else { 278 ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr); 279 } 280 } 281 ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr); 282 } 283 284 /* create coarse level and the interpolation between the levels */ 285 for (level=0; level<fine_level; level++){ 286 level1 = level + 1; 287 ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr); 288 ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr); 289 if (level > 0){ 290 ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); 291 } 292 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 293 } 294 ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); 295 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 296 297 /* now call PCSetUp_MG() */ 298 /*-------------------------------*/ 299 ierr = (*pc_ml->PCSetUp)(pc);CHKERRQ(ierr); 300 PetscFunctionReturn(0); 301 } 302 303 #undef __FUNCT__ 304 #define __FUNCT__ "PetscContainerDestroy_PC_ML" 305 PetscErrorCode PetscContainerDestroy_PC_ML(void *ptr) 306 { 307 PetscErrorCode ierr; 308 PC_ML *pc_ml = (PC_ML*)ptr; 309 PetscInt level,fine_level=pc_ml->Nlevels-1; 310 311 PetscFunctionBegin; 312 ML_Aggregate_Destroy(&pc_ml->agg_object); 313 ML_Destroy(&pc_ml->ml_object); 314 315 if (pc_ml->PetscMLdata) { 316 ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr); 317 if (pc_ml->size > 1) {ierr = MatDestroy(pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);} 318 if (pc_ml->PetscMLdata->x){ierr = VecDestroy(pc_ml->PetscMLdata->x);CHKERRQ(ierr);} 319 if (pc_ml->PetscMLdata->y){ierr = VecDestroy(pc_ml->PetscMLdata->y);CHKERRQ(ierr);} 320 } 321 ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr); 322 323 for (level=0; level<fine_level; level++){ 324 if (pc_ml->gridctx[level].A){ierr = MatDestroy(pc_ml->gridctx[level].A);CHKERRQ(ierr);} 325 if (pc_ml->gridctx[level].P){ierr = MatDestroy(pc_ml->gridctx[level].P);CHKERRQ(ierr);} 326 if (pc_ml->gridctx[level].R){ierr = MatDestroy(pc_ml->gridctx[level].R);CHKERRQ(ierr);} 327 if (pc_ml->gridctx[level].x){ierr = VecDestroy(pc_ml->gridctx[level].x);CHKERRQ(ierr);} 328 if (pc_ml->gridctx[level].b){ierr = VecDestroy(pc_ml->gridctx[level].b);CHKERRQ(ierr);} 329 if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(pc_ml->gridctx[level+1].r);CHKERRQ(ierr);} 330 } 331 ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr); 332 ierr = PetscFree(pc_ml);CHKERRQ(ierr); 333 PetscFunctionReturn(0); 334 } 335 /* -------------------------------------------------------------------------- */ 336 /* 337 PCDestroy_ML - Destroys the private context for the ML preconditioner 338 that was created with PCCreate_ML(). 339 340 Input Parameter: 341 . pc - the preconditioner context 342 343 Application Interface Routine: PCDestroy() 344 */ 345 #undef __FUNCT__ 346 #define __FUNCT__ "PCDestroy_ML" 347 PetscErrorCode PCDestroy_ML(PC pc) 348 { 349 PetscErrorCode ierr; 350 PC_ML *pc_ml=PETSC_NULL; 351 PetscContainer container; 352 353 PetscFunctionBegin; 354 ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr); 355 if (container) { 356 ierr = PetscContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr); 357 pc->ops->destroy = pc_ml->PCDestroy; 358 } else { 359 SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit"); 360 } 361 /* detach pc and PC_ML and dereference container */ 362 ierr = PetscContainerDestroy(container);CHKERRQ(ierr); 363 ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",0);CHKERRQ(ierr); 364 if (pc->ops->destroy) { 365 ierr = (*pc->ops->destroy)(pc);CHKERRQ(ierr); 366 } 367 PetscFunctionReturn(0); 368 } 369 370 #undef __FUNCT__ 371 #define __FUNCT__ "PCSetFromOptions_ML" 372 PetscErrorCode PCSetFromOptions_ML(PC pc) 373 { 374 PetscErrorCode ierr; 375 PetscInt indx,m,PrintLevel; 376 PetscTruth flg; 377 const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"}; 378 PC_ML *pc_ml=PETSC_NULL; 379 PetscContainer container; 380 PCMGType mgtype; 381 382 PetscFunctionBegin; 383 ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr); 384 if (container) { 385 ierr = PetscContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr); 386 } else { 387 SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit"); 388 } 389 390 /* inherited MG options */ 391 ierr = PetscOptionsHead("Multigrid options(inherited)");CHKERRQ(ierr); 392 ierr = PetscOptionsInt("-pc_mg_cycles","1 for V cycle, 2 for W-cycle","MGSetCycles",1,&m,&flg);CHKERRQ(ierr); 393 ierr = PetscOptionsInt("-pc_mg_smoothup","Number of post-smoothing steps","MGSetNumberSmoothUp",1,&m,&flg);CHKERRQ(ierr); 394 ierr = PetscOptionsInt("-pc_mg_smoothdown","Number of pre-smoothing steps","MGSetNumberSmoothDown",1,&m,&flg);CHKERRQ(ierr); 395 ierr = PetscOptionsEnum("-pc_mg_type","Multigrid type","PCMGSetType",PCMGTypes,(PetscEnum)PC_MG_MULTIPLICATIVE,(PetscEnum*)&mgtype,&flg);CHKERRQ(ierr); 396 ierr = PetscOptionsTail();CHKERRQ(ierr); 397 398 /* ML options */ 399 ierr = PetscOptionsHead("ML options");CHKERRQ(ierr); 400 /* set defaults */ 401 PrintLevel = 0; 402 indx = 0; 403 ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr); 404 ML_Set_PrintLevel(PrintLevel); 405 ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr); 406 ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr); 407 ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr); /* ??? */ 408 pc_ml->CoarsenScheme = indx; 409 410 ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr); 411 412 ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr); 413 414 ierr = PetscOptionsTruth("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Aggregate_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL); 415 416 ierr = PetscOptionsTail();CHKERRQ(ierr); 417 PetscFunctionReturn(0); 418 } 419 420 /* -------------------------------------------------------------------------- */ 421 /* 422 PCCreate_ML - Creates a ML preconditioner context, PC_ML, 423 and sets this as the private data within the generic preconditioning 424 context, PC, that was created within PCCreate(). 425 426 Input Parameter: 427 . pc - the preconditioner context 428 429 Application Interface Routine: PCCreate() 430 */ 431 432 /*MC 433 PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide 434 fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 435 operators are computed by ML, with the matrices coverted to PETSc matrices in aij format 436 and the restriction/interpolation operators wrapped as PETSc shell matrices. 437 438 Options Database Key: 439 Multigrid options(inherited) 440 + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles) 441 . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp) 442 . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown) 443 - -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade 444 445 ML options: 446 + -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel) 447 . -pc_ml_maxNlevels <10>: Maximum number of levels (None) 448 . -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize) 449 . -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS 450 . -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor) 451 . -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold) 452 - -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Aggregate_Set_SpectralNormScheme_Anorm) 453 454 Level: intermediate 455 456 Concepts: multigrid 457 458 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 459 PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(), 460 PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(), 461 PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(), 462 PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR() 463 M*/ 464 465 EXTERN_C_BEGIN 466 #undef __FUNCT__ 467 #define __FUNCT__ "PCCreate_ML" 468 PetscErrorCode PETSCKSP_DLLEXPORT PCCreate_ML(PC pc) 469 { 470 PetscErrorCode ierr; 471 PC_ML *pc_ml; 472 PetscContainer container; 473 474 PetscFunctionBegin; 475 /* PCML is an inherited class of PCMG. Initialize pc as PCMG */ 476 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 477 478 /* create a supporting struct and attach it to pc */ 479 ierr = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr); 480 ierr = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr); 481 ierr = PetscContainerSetPointer(container,pc_ml);CHKERRQ(ierr); 482 ierr = PetscContainerSetUserDestroy(container,PetscContainerDestroy_PC_ML);CHKERRQ(ierr); 483 ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",(PetscObject)container);CHKERRQ(ierr); 484 485 pc_ml->ml_object = 0; 486 pc_ml->agg_object = 0; 487 pc_ml->gridctx = 0; 488 pc_ml->PetscMLdata = 0; 489 pc_ml->Nlevels = -1; 490 pc_ml->MaxNlevels = 10; 491 pc_ml->MaxCoarseSize = 1; 492 pc_ml->CoarsenScheme = 1; /* ??? */ 493 pc_ml->Threshold = 0.0; 494 pc_ml->DampingFactor = 4.0/3.0; 495 pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE; 496 pc_ml->size = 0; 497 498 pc_ml->PCSetUp = pc->ops->setup; 499 pc_ml->PCDestroy = pc->ops->destroy; 500 501 /* overwrite the pointers of PCMG by the functions of PCML */ 502 pc->ops->setfromoptions = PCSetFromOptions_ML; 503 pc->ops->setup = PCSetUp_ML; 504 pc->ops->destroy = PCDestroy_ML; 505 PetscFunctionReturn(0); 506 } 507 EXTERN_C_END 508 509 int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[], 510 int allocated_space, int columns[], double values[], int row_lengths[]) 511 { 512 PetscErrorCode ierr; 513 Mat Aloc; 514 Mat_SeqAIJ *a; 515 PetscInt m,i,j,k=0,row,*aj; 516 PetscScalar *aa; 517 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data); 518 519 Aloc = ml->Aloc; 520 a = (Mat_SeqAIJ*)Aloc->data; 521 ierr = MatGetSize(Aloc,&m,PETSC_NULL);CHKERRQ(ierr); 522 523 for (i = 0; i<N_requested_rows; i++) { 524 row = requested_rows[i]; 525 row_lengths[i] = a->ilen[row]; 526 if (allocated_space < k+row_lengths[i]) return(0); 527 if ( (row >= 0) || (row <= (m-1)) ) { 528 aj = a->j + a->i[row]; 529 aa = a->a + a->i[row]; 530 for (j=0; j<row_lengths[i]; j++){ 531 columns[k] = aj[j]; 532 values[k++] = aa[j]; 533 } 534 } 535 } 536 return(1); 537 } 538 539 int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[]) 540 { 541 PetscErrorCode ierr; 542 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data); 543 Mat A=ml->A, Aloc=ml->Aloc; 544 PetscMPIInt size; 545 PetscScalar *pwork=ml->pwork; 546 PetscInt i; 547 548 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 549 if (size == 1){ 550 ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr); 551 } else { 552 for (i=0; i<in_length; i++) pwork[i] = p[i]; 553 PetscML_comm(pwork,ml); 554 ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr); 555 } 556 ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr); 557 ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr); 558 ierr = VecResetArray(ml->x);CHKERRQ(ierr); 559 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 560 return 0; 561 } 562 563 int PetscML_comm(double p[],void *ML_data) 564 { 565 PetscErrorCode ierr; 566 FineGridCtx *ml=(FineGridCtx*)ML_data; 567 Mat A=ml->A; 568 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 569 PetscMPIInt size; 570 PetscInt i,in_length=A->rmap.n,out_length=ml->Aloc->cmap.n; 571 PetscScalar *array; 572 573 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 574 if (size == 1) return 0; 575 576 ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr); 577 ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 578 ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 579 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 580 ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr); 581 for (i=in_length; i<out_length; i++){ 582 p[i] = array[i-in_length]; 583 } 584 ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr); 585 return 0; 586 } 587 #undef __FUNCT__ 588 #define __FUNCT__ "MatMult_ML" 589 PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y) 590 { 591 PetscErrorCode ierr; 592 Mat_MLShell *shell; 593 PetscScalar *xarray,*yarray; 594 PetscInt x_length,y_length; 595 596 PetscFunctionBegin; 597 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 598 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 599 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 600 x_length = shell->mlmat->invec_leng; 601 y_length = shell->mlmat->outvec_leng; 602 603 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 604 605 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 606 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 607 PetscFunctionReturn(0); 608 } 609 /* MatMultAdd_ML - Compute y = w + A*x */ 610 #undef __FUNCT__ 611 #define __FUNCT__ "MatMultAdd_ML" 612 PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y) 613 { 614 PetscErrorCode ierr; 615 Mat_MLShell *shell; 616 PetscScalar *xarray,*yarray; 617 PetscInt x_length,y_length; 618 619 PetscFunctionBegin; 620 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 621 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 622 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 623 624 x_length = shell->mlmat->invec_leng; 625 y_length = shell->mlmat->outvec_leng; 626 627 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 628 629 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 630 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 631 ierr = VecAXPY(y,1.0,w);CHKERRQ(ierr); 632 633 PetscFunctionReturn(0); 634 } 635 636 /* newtype is ignored because "ml" is not listed under Petsc MatType yet */ 637 #undef __FUNCT__ 638 #define __FUNCT__ "MatConvert_MPIAIJ_ML" 639 PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc) 640 { 641 PetscErrorCode ierr; 642 Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data; 643 Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data; 644 PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j; 645 PetscScalar *aa=a->a,*ba=b->a,*ca; 646 PetscInt am=A->rmap.n,an=A->cmap.n,i,j,k; 647 PetscInt *ci,*cj,ncols; 648 649 PetscFunctionBegin; 650 if (am != an) SETERRQ2(PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an); 651 652 if (scall == MAT_INITIAL_MATRIX){ 653 ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 654 ci[0] = 0; 655 for (i=0; i<am; i++){ 656 ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]); 657 } 658 ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr); 659 ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr); 660 661 k = 0; 662 for (i=0; i<am; i++){ 663 /* diagonal portion of A */ 664 ncols = ai[i+1] - ai[i]; 665 for (j=0; j<ncols; j++) { 666 cj[k] = *aj++; 667 ca[k++] = *aa++; 668 } 669 /* off-diagonal portion of A */ 670 ncols = bi[i+1] - bi[i]; 671 for (j=0; j<ncols; j++) { 672 cj[k] = an + (*bj); bj++; 673 ca[k++] = *ba++; 674 } 675 } 676 if (k != ci[am]) SETERRQ2(PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]); 677 678 /* put together the new matrix */ 679 an = mpimat->A->cmap.n+mpimat->B->cmap.n; 680 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr); 681 682 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 683 /* Since these are PETSc arrays, change flags to free them as necessary. */ 684 mat = (Mat_SeqAIJ*)(*Aloc)->data; 685 mat->free_a = PETSC_TRUE; 686 mat->free_ij = PETSC_TRUE; 687 688 mat->nonew = 0; 689 } else if (scall == MAT_REUSE_MATRIX){ 690 mat=(Mat_SeqAIJ*)(*Aloc)->data; 691 ci = mat->i; cj = mat->j; ca = mat->a; 692 for (i=0; i<am; i++) { 693 /* diagonal portion of A */ 694 ncols = ai[i+1] - ai[i]; 695 for (j=0; j<ncols; j++) *ca++ = *aa++; 696 /* off-diagonal portion of A */ 697 ncols = bi[i+1] - bi[i]; 698 for (j=0; j<ncols; j++) *ca++ = *ba++; 699 } 700 } else { 701 SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 702 } 703 PetscFunctionReturn(0); 704 } 705 extern PetscErrorCode MatDestroy_Shell(Mat); 706 #undef __FUNCT__ 707 #define __FUNCT__ "MatDestroy_ML" 708 PetscErrorCode MatDestroy_ML(Mat A) 709 { 710 PetscErrorCode ierr; 711 Mat_MLShell *shell; 712 713 PetscFunctionBegin; 714 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 715 ierr = VecDestroy(shell->y);CHKERRQ(ierr); 716 ierr = PetscFree(shell);CHKERRQ(ierr); 717 ierr = MatDestroy_Shell(A);CHKERRQ(ierr); 718 ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr); 719 PetscFunctionReturn(0); 720 } 721 722 #undef __FUNCT__ 723 #define __FUNCT__ "MatWrapML_SeqAIJ" 724 PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 725 { 726 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 727 PetscErrorCode ierr; 728 PetscInt m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz,nz_max; 729 PetscInt *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k; 730 PetscScalar *ml_vals=matdata->values,*aa; 731 732 PetscFunctionBegin; 733 if ( mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 734 if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */ 735 if (reuse){ 736 Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data; 737 aij->i = ml_rowptr; 738 aij->j = ml_cols; 739 aij->a = ml_vals; 740 } else { 741 /* sort ml_cols and ml_vals */ 742 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 743 for (i=0; i<m; i++) { 744 nnz[i] = ml_rowptr[i+1] - ml_rowptr[i]; 745 } 746 aj = ml_cols; aa = ml_vals; 747 for (i=0; i<m; i++){ 748 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 749 aj += nnz[i]; aa += nnz[i]; 750 } 751 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr); 752 ierr = PetscFree(nnz);CHKERRQ(ierr); 753 } 754 PetscFunctionReturn(0); 755 } 756 757 /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */ 758 ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr); 759 ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 760 ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr); 761 762 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 763 nz_max = 1; 764 for (i=0; i<m; i++) { 765 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 766 if (nnz[i] > nz_max) nz_max += nnz[i]; 767 } 768 769 ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr); 770 ierr = PetscMalloc(nz_max*(sizeof(PetscInt)+sizeof(PetscScalar)),&aj);CHKERRQ(ierr); 771 aa = (PetscScalar*)(aj + nz_max); 772 773 for (i=0; i<m; i++){ 774 k = 0; 775 /* diagonal entry */ 776 aj[k] = i; aa[k++] = ml_vals[i]; 777 /* off diagonal entries */ 778 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 779 aj[k] = ml_cols[j]; aa[k++] = ml_vals[j]; 780 } 781 /* sort aj and aa */ 782 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 783 ierr = MatSetValues(*newmat,1,&i,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr); 784 } 785 ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 786 ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 787 788 ierr = PetscFree(aj);CHKERRQ(ierr); 789 ierr = PetscFree(nnz);CHKERRQ(ierr); 790 PetscFunctionReturn(0); 791 } 792 793 #undef __FUNCT__ 794 #define __FUNCT__ "MatWrapML_SHELL" 795 PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 796 { 797 PetscErrorCode ierr; 798 PetscInt m,n; 799 ML_Comm *MLcomm; 800 Mat_MLShell *shellctx; 801 802 PetscFunctionBegin; 803 m = mlmat->outvec_leng; 804 n = mlmat->invec_leng; 805 if (!m || !n){ 806 newmat = PETSC_NULL; 807 PetscFunctionReturn(0); 808 } 809 810 if (reuse){ 811 ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr); 812 shellctx->mlmat = mlmat; 813 PetscFunctionReturn(0); 814 } 815 816 MLcomm = mlmat->comm; 817 ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr); 818 ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr); 819 ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr); 820 ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr); 821 shellctx->A = *newmat; 822 shellctx->mlmat = mlmat; 823 ierr = VecCreate(PETSC_COMM_WORLD,&shellctx->y);CHKERRQ(ierr); 824 ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr); 825 ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr); 826 (*newmat)->ops->destroy = MatDestroy_ML; 827 PetscFunctionReturn(0); 828 } 829 830 #undef __FUNCT__ 831 #define __FUNCT__ "MatWrapML_MPIAIJ" 832 PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,Mat *newmat) 833 { 834 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 835 PetscInt *ml_cols=matdata->columns,*aj; 836 PetscScalar *ml_vals=matdata->values,*aa; 837 PetscErrorCode ierr; 838 PetscInt i,j,k,*gordering; 839 PetscInt m=mlmat->outvec_leng,n,*nnzA,*nnzB,*nnz,nz_max,row; 840 Mat A; 841 842 PetscFunctionBegin; 843 if (mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 844 n = mlmat->invec_leng; 845 if (m != n) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n); 846 847 ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr); 848 ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 849 ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr); 850 ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr); 851 852 nz_max = 0; 853 for (i=0; i<m; i++){ 854 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 855 if (nz_max < nnz[i]) nz_max = nnz[i]; 856 nnzA[i] = 1; /* diag */ 857 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 858 if (ml_cols[j] < m) nnzA[i]++; 859 } 860 nnzB[i] = nnz[i] - nnzA[i]; 861 } 862 ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr); 863 864 /* insert mat values -- remap row and column indices */ 865 nz_max++; 866 ierr = PetscMalloc(nz_max*(sizeof(PetscInt)+sizeof(PetscScalar)),&aj);CHKERRQ(ierr); 867 aa = (PetscScalar*)(aj + nz_max); 868 /* create global row numbering for a ML_Operator */ 869 ML_build_global_numbering(mlmat,&gordering,"rows"); 870 for (i=0; i<m; i++){ 871 row = gordering[i]; 872 k = 0; 873 /* diagonal entry */ 874 aj[k] = row; aa[k++] = ml_vals[i]; 875 /* off diagonal entries */ 876 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 877 aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j]; 878 } 879 ierr = MatSetValues(A,1,&row,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr); 880 } 881 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 882 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 883 *newmat = A; 884 885 ierr = PetscFree3(nnzA,nnzB,nnz); 886 ierr = PetscFree(aj);CHKERRQ(ierr); 887 PetscFunctionReturn(0); 888 } 889