1 #define PETSCKSP_DLL 2 3 /* 4 Provides an interface to the ML 4.0 smoothed Aggregation 5 */ 6 #include "private/pcimpl.h" /*I "petscpc.h" I*/ 7 #include "src/ksp/pc/impls/mg/mgimpl.h" /*I "petscmg.h" I*/ 8 #include "src/mat/impls/aij/seq/aij.h" 9 #include "src/mat/impls/aij/mpi/mpiaij.h" 10 11 #include <math.h> 12 EXTERN_C_BEGIN 13 #include "ml_config.h" 14 #include "ml_include.h" 15 EXTERN_C_END 16 17 /* The context (data structure) at each grid level */ 18 typedef struct { 19 Vec x,b,r; /* global vectors */ 20 Mat A,P,R; 21 KSP ksp; 22 } GridCtx; 23 24 /* The context used to input PETSc matrix into ML at fine grid */ 25 typedef struct { 26 Mat A; /* Petsc matrix in aij format */ 27 Mat Aloc; /* local portion of A to be used by ML */ 28 Vec x,y; 29 ML_Operator *mlmat; 30 PetscScalar *pwork; /* tmp array used by PetscML_comm() */ 31 } FineGridCtx; 32 33 /* The context associates a ML matrix with a PETSc shell matrix */ 34 typedef struct { 35 Mat A; /* PETSc shell matrix associated with mlmat */ 36 ML_Operator *mlmat; /* ML matrix assorciated with A */ 37 Vec y; 38 } Mat_MLShell; 39 40 /* Private context for the ML preconditioner */ 41 typedef struct { 42 ML *ml_object; 43 ML_Aggregate *agg_object; 44 GridCtx *gridctx; 45 FineGridCtx *PetscMLdata; 46 PetscInt Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme; 47 PetscReal Threshold,DampingFactor; 48 PetscTruth SpectralNormScheme_Anorm; 49 PetscMPIInt size; /* size of communicator for pc->pmat */ 50 PetscErrorCode (*PCSetUp)(PC); 51 PetscErrorCode (*PCDestroy)(PC); 52 } PC_ML; 53 54 extern int PetscML_getrow(ML_Operator *ML_data,int N_requested_rows,int requested_rows[], 55 int allocated_space,int columns[],double values[],int row_lengths[]); 56 extern int PetscML_matvec(ML_Operator *ML_data, int in_length, double p[], int out_length,double ap[]); 57 extern int PetscML_comm(double x[], void *ML_data); 58 extern PetscErrorCode MatMult_ML(Mat,Vec,Vec); 59 extern PetscErrorCode MatMultAdd_ML(Mat,Vec,Vec,Vec); 60 extern PetscErrorCode MatConvert_MPIAIJ_ML(Mat,MatType,MatReuse,Mat*); 61 extern PetscErrorCode MatDestroy_ML(Mat); 62 extern PetscErrorCode MatWrapML_SeqAIJ(ML_Operator*,MatReuse,Mat*); 63 extern PetscErrorCode MatWrapML_MPIAIJ(ML_Operator*,Mat*); 64 extern PetscErrorCode MatWrapML_SHELL(ML_Operator*,MatReuse,Mat*); 65 extern PetscErrorCode PetscContainerDestroy_PC_ML(void *); 66 67 /* -------------------------------------------------------------------------- */ 68 /* 69 PCSetUp_ML - Prepares for the use of the ML preconditioner 70 by setting data structures and options. 71 72 Input Parameter: 73 . pc - the preconditioner context 74 75 Application Interface Routine: PCSetUp() 76 77 Notes: 78 The interface routine PCSetUp() is not usually called directly by 79 the user, but instead is called by PCApply() if necessary. 80 */ 81 extern PetscErrorCode PCSetFromOptions_MG(PC); 82 #undef __FUNCT__ 83 #define __FUNCT__ "PCSetUp_ML" 84 PetscErrorCode PCSetUp_ML(PC pc) 85 { 86 PetscErrorCode ierr; 87 PetscMPIInt size; 88 FineGridCtx *PetscMLdata; 89 ML *ml_object; 90 ML_Aggregate *agg_object; 91 ML_Operator *mlmat; 92 PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level; 93 Mat A,Aloc; 94 GridCtx *gridctx; 95 PC_ML *pc_ml=PETSC_NULL; 96 PetscContainer container; 97 MatReuse reuse = MAT_INITIAL_MATRIX; 98 99 PetscFunctionBegin; 100 ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr); 101 if (container) { 102 ierr = PetscContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr); 103 } else { 104 SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit"); 105 } 106 107 if (pc->setupcalled){ 108 if (pc->flag == SAME_NONZERO_PATTERN){ 109 reuse = MAT_REUSE_MATRIX; 110 PetscMLdata = pc_ml->PetscMLdata; 111 gridctx = pc_ml->gridctx; 112 /* ML objects cannot be reused */ 113 ML_Destroy(&pc_ml->ml_object); 114 ML_Aggregate_Destroy(&pc_ml->agg_object); 115 } else { 116 PC_ML *pc_ml_new = PETSC_NULL; 117 PetscContainer container_new; 118 ierr = PetscNew(PC_ML,&pc_ml_new);CHKERRQ(ierr); 119 ierr = PetscLogObjectMemory(pc,sizeof(PC_ML));CHKERRQ(ierr); 120 ierr = PetscContainerCreate(PETSC_COMM_SELF,&container_new);CHKERRQ(ierr); 121 ierr = PetscContainerSetPointer(container_new,pc_ml_new);CHKERRQ(ierr); 122 ierr = PetscContainerSetUserDestroy(container_new,PetscContainerDestroy_PC_ML);CHKERRQ(ierr); 123 ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",(PetscObject)container_new);CHKERRQ(ierr); 124 125 ierr = PetscMemcpy(pc_ml_new,pc_ml,sizeof(PC_ML));CHKERRQ(ierr); 126 ierr = PetscContainerDestroy(container);CHKERRQ(ierr); 127 pc_ml = pc_ml_new; 128 } 129 } 130 131 /* setup special features of PCML */ 132 /*--------------------------------*/ 133 /* covert A to Aloc to be used by ML at fine grid */ 134 A = pc->pmat; 135 ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr); 136 pc_ml->size = size; 137 if (size > 1){ 138 if (reuse) Aloc = PetscMLdata->Aloc; 139 ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,reuse,&Aloc);CHKERRQ(ierr); 140 } else { 141 Aloc = A; 142 } 143 144 /* create and initialize struct 'PetscMLdata' */ 145 if (!reuse){ 146 ierr = PetscNew(FineGridCtx,&PetscMLdata);CHKERRQ(ierr); 147 pc_ml->PetscMLdata = PetscMLdata; 148 ierr = PetscMalloc((Aloc->cmap.n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr); 149 150 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr); 151 ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap.n,Aloc->cmap.n);CHKERRQ(ierr); 152 ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); 153 154 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); 155 ierr = VecSetSizes(PetscMLdata->y,A->rmap.n,PETSC_DECIDE);CHKERRQ(ierr); 156 ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr); 157 } 158 PetscMLdata->A = A; 159 PetscMLdata->Aloc = Aloc; 160 161 /* create ML discretization matrix at fine grid */ 162 /* ML requires input of fine-grid matrix. It determines nlevels. */ 163 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 164 ML_Create(&ml_object,pc_ml->MaxNlevels); 165 pc_ml->ml_object = ml_object; 166 ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); 167 ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols); 168 ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); 169 170 /* aggregation */ 171 ML_Aggregate_Create(&agg_object); 172 pc_ml->agg_object = agg_object; 173 174 ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize); 175 /* set options */ 176 switch (pc_ml->CoarsenScheme) { 177 case 1: 178 ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break; 179 case 2: 180 ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break; 181 case 3: 182 ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break; 183 } 184 ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold); 185 ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor); 186 if (pc_ml->SpectralNormScheme_Anorm){ 187 ML_Aggregate_Set_SpectralNormScheme_Anorm(agg_object); 188 } 189 190 Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 191 if (Nlevels<=0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels); 192 if (pc->setupcalled && pc_ml->Nlevels != Nlevels) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"previous Nlevels %D and current Nlevels %d must be same", pc_ml->Nlevels,Nlevels); 193 pc_ml->Nlevels = Nlevels; 194 if (!pc->setupcalled){ 195 ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr); 196 ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */ 197 } 198 199 if (!reuse){ 200 ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr); 201 pc_ml->gridctx = gridctx; 202 } 203 fine_level = Nlevels - 1; 204 205 /* wrap ML matrices by PETSc shell matrices at coarsened grids. 206 Level 0 is the finest grid for ML, but coarsest for PETSc! */ 207 gridctx[fine_level].A = A; 208 209 level = fine_level - 1; 210 if (size == 1){ /* convert ML P, R and A into seqaij format */ 211 for (mllevel=1; mllevel<Nlevels; mllevel++){ 212 mlmat = &(ml_object->Pmat[mllevel]); 213 ierr = MatWrapML_SeqAIJ(mlmat,reuse,&gridctx[level].P);CHKERRQ(ierr); 214 mlmat = &(ml_object->Rmat[mllevel-1]); 215 ierr = MatWrapML_SeqAIJ(mlmat,reuse,&gridctx[level].R);CHKERRQ(ierr); 216 217 mlmat = &(ml_object->Amat[mllevel]); 218 if (reuse){ 219 /* ML matrix A changes sparse pattern although PETSc A doesn't, thus gridctx[level].A must be recreated! */ 220 ierr = MatDestroy(gridctx[level].A);CHKERRQ(ierr); 221 } 222 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 223 level--; 224 } 225 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 226 for (mllevel=1; mllevel<Nlevels; mllevel++){ 227 mlmat = &(ml_object->Pmat[mllevel]); 228 ierr = MatWrapML_SHELL(mlmat,reuse,&gridctx[level].P);CHKERRQ(ierr); 229 mlmat = &(ml_object->Rmat[mllevel-1]); 230 ierr = MatWrapML_SHELL(mlmat,reuse,&gridctx[level].R);CHKERRQ(ierr); 231 232 mlmat = &(ml_object->Amat[mllevel]); 233 if (reuse){ 234 ierr = MatDestroy(gridctx[level].A);CHKERRQ(ierr); 235 } 236 ierr = MatWrapML_MPIAIJ(mlmat,&gridctx[level].A);CHKERRQ(ierr); 237 level--; 238 } 239 } 240 241 /* create vectors and ksp at all levels */ 242 if (!reuse){ 243 for (level=0; level<fine_level; level++){ 244 level1 = level + 1; 245 ierr = VecCreate(gridctx[level].A->comm,&gridctx[level].x);CHKERRQ(ierr); 246 ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap.n,PETSC_DECIDE);CHKERRQ(ierr); 247 ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); 248 ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); 249 250 ierr = VecCreate(gridctx[level].A->comm,&gridctx[level].b);CHKERRQ(ierr); 251 ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap.n,PETSC_DECIDE);CHKERRQ(ierr); 252 ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); 253 ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); 254 255 ierr = VecCreate(gridctx[level1].A->comm,&gridctx[level1].r);CHKERRQ(ierr); 256 ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap.n,PETSC_DECIDE);CHKERRQ(ierr); 257 ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr); 258 ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr); 259 260 if (level == 0){ 261 ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr); 262 } else { 263 ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr); 264 } 265 } 266 ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr); 267 } 268 269 /* create coarse level and the interpolation between the levels */ 270 for (level=0; level<fine_level; level++){ 271 level1 = level + 1; 272 ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr); 273 ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr); 274 if (level > 0){ 275 ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); 276 } 277 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 278 } 279 ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); 280 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 281 282 /* now call PCSetUp_MG() */ 283 /*-------------------------------*/ 284 ierr = (*pc_ml->PCSetUp)(pc);CHKERRQ(ierr); 285 PetscFunctionReturn(0); 286 } 287 288 #undef __FUNCT__ 289 #define __FUNCT__ "PetscContainerDestroy_PC_ML" 290 PetscErrorCode PetscContainerDestroy_PC_ML(void *ptr) 291 { 292 PetscErrorCode ierr; 293 PC_ML *pc_ml = (PC_ML*)ptr; 294 PetscInt level,fine_level=pc_ml->Nlevels-1; 295 296 PetscFunctionBegin; 297 if (pc_ml->size > 1){ierr = MatDestroy(pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);} 298 ML_Aggregate_Destroy(&pc_ml->agg_object); 299 ML_Destroy(&pc_ml->ml_object); 300 301 ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr); 302 if (pc_ml->PetscMLdata->x){ierr = VecDestroy(pc_ml->PetscMLdata->x);CHKERRQ(ierr);} 303 if (pc_ml->PetscMLdata->y){ierr = VecDestroy(pc_ml->PetscMLdata->y);CHKERRQ(ierr);} 304 ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr); 305 306 for (level=0; level<fine_level; level++){ 307 if (pc_ml->gridctx[level].A){ierr = MatDestroy(pc_ml->gridctx[level].A);CHKERRQ(ierr);} 308 if (pc_ml->gridctx[level].P){ierr = MatDestroy(pc_ml->gridctx[level].P);CHKERRQ(ierr);} 309 if (pc_ml->gridctx[level].R){ierr = MatDestroy(pc_ml->gridctx[level].R);CHKERRQ(ierr);} 310 if (pc_ml->gridctx[level].x){ierr = VecDestroy(pc_ml->gridctx[level].x);CHKERRQ(ierr);} 311 if (pc_ml->gridctx[level].b){ierr = VecDestroy(pc_ml->gridctx[level].b);CHKERRQ(ierr);} 312 if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(pc_ml->gridctx[level+1].r);CHKERRQ(ierr);} 313 } 314 ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr); 315 ierr = PetscFree(pc_ml);CHKERRQ(ierr); 316 PetscFunctionReturn(0); 317 } 318 /* -------------------------------------------------------------------------- */ 319 /* 320 PCDestroy_ML - Destroys the private context for the ML preconditioner 321 that was created with PCCreate_ML(). 322 323 Input Parameter: 324 . pc - the preconditioner context 325 326 Application Interface Routine: PCDestroy() 327 */ 328 #undef __FUNCT__ 329 #define __FUNCT__ "PCDestroy_ML" 330 PetscErrorCode PCDestroy_ML(PC pc) 331 { 332 PetscErrorCode ierr; 333 PC_ML *pc_ml=PETSC_NULL; 334 PetscContainer container; 335 336 PetscFunctionBegin; 337 ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr); 338 if (container) { 339 ierr = PetscContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr); 340 pc->ops->destroy = pc_ml->PCDestroy; 341 } else { 342 SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit"); 343 } 344 ierr = PetscContainerDestroy(container);CHKERRQ(ierr); 345 346 /* detach pc and PC_ML and dereference container */ 347 ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",0);CHKERRQ(ierr); 348 ierr = (*pc->ops->destroy)(pc);CHKERRQ(ierr); 349 PetscFunctionReturn(0); 350 } 351 352 #undef __FUNCT__ 353 #define __FUNCT__ "PCSetFromOptions_ML" 354 PetscErrorCode PCSetFromOptions_ML(PC pc) 355 { 356 PetscErrorCode ierr; 357 PetscInt indx,m,PrintLevel,MaxNlevels,MaxCoarseSize; 358 PetscReal Threshold,DampingFactor; 359 PetscTruth flg; 360 const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"}; 361 PC_ML *pc_ml=PETSC_NULL; 362 PetscContainer container; 363 PCMGType mgtype; 364 365 PetscFunctionBegin; 366 ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr); 367 if (container) { 368 ierr = PetscContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr); 369 } else { 370 SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit"); 371 } 372 373 /* inherited MG options */ 374 ierr = PetscOptionsHead("Multigrid options(inherited)");CHKERRQ(ierr); 375 ierr = PetscOptionsInt("-pc_mg_cycles","1 for V cycle, 2 for W-cycle","MGSetCycles",1,&m,&flg);CHKERRQ(ierr); 376 ierr = PetscOptionsInt("-pc_mg_smoothup","Number of post-smoothing steps","MGSetNumberSmoothUp",1,&m,&flg);CHKERRQ(ierr); 377 ierr = PetscOptionsInt("-pc_mg_smoothdown","Number of pre-smoothing steps","MGSetNumberSmoothDown",1,&m,&flg);CHKERRQ(ierr); 378 ierr = PetscOptionsEnum("-pc_mg_type","Multigrid type","PCMGSetType",PCMGTypes,(PetscEnum)PC_MG_MULTIPLICATIVE,(PetscEnum*)&mgtype,&flg);CHKERRQ(ierr); 379 ierr = PetscOptionsTail();CHKERRQ(ierr); 380 381 /* ML options */ 382 ierr = PetscOptionsHead("ML options");CHKERRQ(ierr); 383 /* set defaults */ 384 PrintLevel = 0; 385 indx = 0; 386 ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr); 387 ML_Set_PrintLevel(PrintLevel); 388 ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr); 389 ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr); 390 ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr); /* ??? */ 391 pc_ml->CoarsenScheme = indx; 392 393 ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr); 394 395 ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr); 396 397 ierr = PetscOptionsTruth("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Aggregate_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL); 398 399 ierr = PetscOptionsTail();CHKERRQ(ierr); 400 PetscFunctionReturn(0); 401 } 402 403 /* -------------------------------------------------------------------------- */ 404 /* 405 PCCreate_ML - Creates a ML preconditioner context, PC_ML, 406 and sets this as the private data within the generic preconditioning 407 context, PC, that was created within PCCreate(). 408 409 Input Parameter: 410 . pc - the preconditioner context 411 412 Application Interface Routine: PCCreate() 413 */ 414 415 /*MC 416 PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide 417 fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 418 operators are computed by ML, with the matrices coverted to PETSc matrices in aij format 419 and the restriction/interpolation operators wrapped as PETSc shell matrices. 420 421 Options Database Key: 422 Multigrid options(inherited) 423 + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles) 424 . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp) 425 . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown) 426 - -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade 427 428 ML options: 429 + -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel) 430 . -pc_ml_maxNlevels <10>: Maximum number of levels (None) 431 . -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize) 432 . -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS 433 . -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor) 434 . -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold) 435 - -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Aggregate_Set_SpectralNormScheme_Anorm) 436 437 Level: intermediate 438 439 Concepts: multigrid 440 441 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 442 PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(), 443 PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(), 444 PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(), 445 PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR() 446 M*/ 447 448 EXTERN_C_BEGIN 449 #undef __FUNCT__ 450 #define __FUNCT__ "PCCreate_ML" 451 PetscErrorCode PETSCKSP_DLLEXPORT PCCreate_ML(PC pc) 452 { 453 PetscErrorCode ierr; 454 PC_ML *pc_ml; 455 PetscContainer container; 456 457 PetscFunctionBegin; 458 /* PCML is an inherited class of PCMG. Initialize pc as PCMG */ 459 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 460 461 /* create a supporting struct and attach it to pc */ 462 ierr = PetscNew(PC_ML,&pc_ml);CHKERRQ(ierr); 463 ierr = PetscLogObjectMemory(pc,sizeof(PC_ML));CHKERRQ(ierr); 464 ierr = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr); 465 ierr = PetscContainerSetPointer(container,pc_ml);CHKERRQ(ierr); 466 ierr = PetscContainerSetUserDestroy(container,PetscContainerDestroy_PC_ML);CHKERRQ(ierr); 467 ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",(PetscObject)container);CHKERRQ(ierr); 468 469 pc_ml->ml_object = 0; 470 pc_ml->agg_object = 0; 471 pc_ml->gridctx = 0; 472 pc_ml->PetscMLdata = 0; 473 pc_ml->Nlevels = -1; 474 pc_ml->MaxNlevels = 10; 475 pc_ml->MaxCoarseSize = 1; 476 pc_ml->CoarsenScheme = 1; /* ??? */ 477 pc_ml->Threshold = 0.0; 478 pc_ml->DampingFactor = 4.0/3.0; 479 pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE; 480 pc_ml->size = 0; 481 482 pc_ml->PCSetUp = pc->ops->setup; 483 pc_ml->PCDestroy = pc->ops->destroy; 484 485 /* overwrite the pointers of PCMG by the functions of PCML */ 486 pc->ops->setfromoptions = PCSetFromOptions_ML; 487 pc->ops->setup = PCSetUp_ML; 488 pc->ops->destroy = PCDestroy_ML; 489 PetscFunctionReturn(0); 490 } 491 EXTERN_C_END 492 493 int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[], 494 int allocated_space, int columns[], double values[], int row_lengths[]) 495 { 496 PetscErrorCode ierr; 497 Mat Aloc; 498 Mat_SeqAIJ *a; 499 PetscInt m,i,j,k=0,row,*aj; 500 PetscScalar *aa; 501 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data); 502 503 Aloc = ml->Aloc; 504 a = (Mat_SeqAIJ*)Aloc->data; 505 ierr = MatGetSize(Aloc,&m,PETSC_NULL);CHKERRQ(ierr); 506 507 for (i = 0; i<N_requested_rows; i++) { 508 row = requested_rows[i]; 509 row_lengths[i] = a->ilen[row]; 510 if (allocated_space < k+row_lengths[i]) return(0); 511 if ( (row >= 0) || (row <= (m-1)) ) { 512 aj = a->j + a->i[row]; 513 aa = a->a + a->i[row]; 514 for (j=0; j<row_lengths[i]; j++){ 515 columns[k] = aj[j]; 516 values[k++] = aa[j]; 517 } 518 } 519 } 520 return(1); 521 } 522 523 int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[]) 524 { 525 PetscErrorCode ierr; 526 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data); 527 Mat A=ml->A, Aloc=ml->Aloc; 528 PetscMPIInt size; 529 PetscScalar *pwork=ml->pwork; 530 PetscInt i; 531 532 ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr); 533 if (size == 1){ 534 ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr); 535 } else { 536 for (i=0; i<in_length; i++) pwork[i] = p[i]; 537 PetscML_comm(pwork,ml); 538 ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr); 539 } 540 ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr); 541 ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr); 542 ierr = VecResetArray(ml->x);CHKERRQ(ierr); 543 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 544 return 0; 545 } 546 547 int PetscML_comm(double p[],void *ML_data) 548 { 549 PetscErrorCode ierr; 550 FineGridCtx *ml=(FineGridCtx*)ML_data; 551 Mat A=ml->A; 552 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 553 PetscMPIInt size; 554 PetscInt i,in_length=A->rmap.n,out_length=ml->Aloc->cmap.n; 555 PetscScalar *array; 556 557 ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr); 558 if (size == 1) return 0; 559 560 ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr); 561 ierr = VecScatterBegin(ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr); 562 ierr = VecScatterEnd(ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr); 563 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 564 ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr); 565 for (i=in_length; i<out_length; i++){ 566 p[i] = array[i-in_length]; 567 } 568 ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr); 569 return 0; 570 } 571 #undef __FUNCT__ 572 #define __FUNCT__ "MatMult_ML" 573 PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y) 574 { 575 PetscErrorCode ierr; 576 Mat_MLShell *shell; 577 PetscScalar *xarray,*yarray; 578 PetscInt x_length,y_length; 579 580 PetscFunctionBegin; 581 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 582 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 583 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 584 x_length = shell->mlmat->invec_leng; 585 y_length = shell->mlmat->outvec_leng; 586 587 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 588 589 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 590 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 591 PetscFunctionReturn(0); 592 } 593 /* MatMultAdd_ML - Compute y = w + A*x */ 594 #undef __FUNCT__ 595 #define __FUNCT__ "MatMultAdd_ML" 596 PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y) 597 { 598 PetscErrorCode ierr; 599 Mat_MLShell *shell; 600 PetscScalar *xarray,*yarray; 601 PetscInt x_length,y_length; 602 603 PetscFunctionBegin; 604 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 605 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 606 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 607 608 x_length = shell->mlmat->invec_leng; 609 y_length = shell->mlmat->outvec_leng; 610 611 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 612 613 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 614 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 615 ierr = VecAXPY(y,1.0,w);CHKERRQ(ierr); 616 617 PetscFunctionReturn(0); 618 } 619 620 /* newtype is ignored because "ml" is not listed under Petsc MatType yet */ 621 #undef __FUNCT__ 622 #define __FUNCT__ "MatConvert_MPIAIJ_ML" 623 PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc) 624 { 625 PetscErrorCode ierr; 626 Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data; 627 Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data; 628 PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j; 629 PetscScalar *aa=a->a,*ba=b->a,*ca; 630 PetscInt am=A->rmap.n,an=A->cmap.n,i,j,k; 631 PetscInt *ci,*cj,ncols; 632 633 PetscFunctionBegin; 634 if (am != an) SETERRQ2(PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an); 635 636 if (scall == MAT_INITIAL_MATRIX){ 637 ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 638 ci[0] = 0; 639 for (i=0; i<am; i++){ 640 ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]); 641 } 642 ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr); 643 ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr); 644 645 k = 0; 646 for (i=0; i<am; i++){ 647 /* diagonal portion of A */ 648 ncols = ai[i+1] - ai[i]; 649 for (j=0; j<ncols; j++) { 650 cj[k] = *aj++; 651 ca[k++] = *aa++; 652 } 653 /* off-diagonal portion of A */ 654 ncols = bi[i+1] - bi[i]; 655 for (j=0; j<ncols; j++) { 656 cj[k] = an + (*bj); bj++; 657 ca[k++] = *ba++; 658 } 659 } 660 if (k != ci[am]) SETERRQ2(PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]); 661 662 /* put together the new matrix */ 663 an = mpimat->A->cmap.n+mpimat->B->cmap.n; 664 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr); 665 666 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 667 /* Since these are PETSc arrays, change flags to free them as necessary. */ 668 mat = (Mat_SeqAIJ*)(*Aloc)->data; 669 mat->free_a = PETSC_TRUE; 670 mat->free_ij = PETSC_TRUE; 671 672 mat->nonew = 0; 673 } else if (scall == MAT_REUSE_MATRIX){ 674 mat=(Mat_SeqAIJ*)(*Aloc)->data; 675 ci = mat->i; cj = mat->j; ca = mat->a; 676 for (i=0; i<am; i++) { 677 /* diagonal portion of A */ 678 ncols = ai[i+1] - ai[i]; 679 for (j=0; j<ncols; j++) *ca++ = *aa++; 680 /* off-diagonal portion of A */ 681 ncols = bi[i+1] - bi[i]; 682 for (j=0; j<ncols; j++) *ca++ = *ba++; 683 } 684 } else { 685 SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 686 } 687 PetscFunctionReturn(0); 688 } 689 extern PetscErrorCode MatDestroy_Shell(Mat); 690 #undef __FUNCT__ 691 #define __FUNCT__ "MatDestroy_ML" 692 PetscErrorCode MatDestroy_ML(Mat A) 693 { 694 PetscErrorCode ierr; 695 Mat_MLShell *shell; 696 697 PetscFunctionBegin; 698 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 699 ierr = VecDestroy(shell->y);CHKERRQ(ierr); 700 ierr = PetscFree(shell);CHKERRQ(ierr); 701 ierr = MatDestroy_Shell(A);CHKERRQ(ierr); 702 ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr); 703 PetscFunctionReturn(0); 704 } 705 706 #undef __FUNCT__ 707 #define __FUNCT__ "MatWrapML_SeqAIJ" 708 PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 709 { 710 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 711 PetscErrorCode ierr; 712 PetscInt m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz,nz_max; 713 PetscInt *ml_cols=matdata->columns,*aj,i,j,k; 714 PetscScalar *ml_vals=matdata->values,*aa; 715 716 PetscFunctionBegin; 717 if ( mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 718 if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */ 719 if (reuse){ 720 Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data; 721 aij->i = matdata->rowptr; 722 aij->j = ml_cols; 723 aij->a = ml_vals; 724 } else { 725 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,matdata->rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr); 726 } 727 PetscFunctionReturn(0); 728 } 729 730 /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */ 731 ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr); 732 ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 733 ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr); 734 735 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 736 nz_max = 1; 737 for (i=0; i<m; i++) { 738 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 739 if (nnz[i] > nz_max) nz_max += nnz[i]; 740 } 741 742 ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr); 743 ierr = MatSetOption(*newmat,MAT_COLUMNS_SORTED);CHKERRQ(ierr); 744 745 ierr = PetscMalloc(nz_max*(sizeof(PetscInt)+sizeof(PetscScalar)),&aj);CHKERRQ(ierr); 746 aa = (PetscScalar*)(aj + nz_max); 747 748 for (i=0; i<m; i++){ 749 k = 0; 750 /* diagonal entry */ 751 aj[k] = i; aa[k++] = ml_vals[i]; 752 /* off diagonal entries */ 753 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 754 aj[k] = ml_cols[j]; aa[k++] = ml_vals[j]; 755 } 756 /* sort aj and aa */ 757 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 758 ierr = MatSetValues(*newmat,1,&i,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr); 759 } 760 ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 761 ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 762 763 ierr = PetscFree(aj);CHKERRQ(ierr); 764 ierr = PetscFree(nnz);CHKERRQ(ierr); 765 PetscFunctionReturn(0); 766 } 767 768 #undef __FUNCT__ 769 #define __FUNCT__ "MatWrapML_SHELL" 770 PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 771 { 772 PetscErrorCode ierr; 773 PetscInt m,n; 774 ML_Comm *MLcomm; 775 Mat_MLShell *shellctx; 776 777 PetscFunctionBegin; 778 m = mlmat->outvec_leng; 779 n = mlmat->invec_leng; 780 if (!m || !n){ 781 newmat = PETSC_NULL; 782 PetscFunctionReturn(0); 783 } 784 785 if (reuse){ 786 ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr); 787 shellctx->mlmat = mlmat; 788 PetscFunctionReturn(0); 789 } 790 791 MLcomm = mlmat->comm; 792 ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr); 793 ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr); 794 ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr); 795 ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr); 796 shellctx->A = *newmat; 797 shellctx->mlmat = mlmat; 798 ierr = VecCreate(PETSC_COMM_WORLD,&shellctx->y);CHKERRQ(ierr); 799 ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr); 800 ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr); 801 (*newmat)->ops->destroy = MatDestroy_ML; 802 PetscFunctionReturn(0); 803 } 804 805 #undef __FUNCT__ 806 #define __FUNCT__ "MatWrapML_MPIAIJ" 807 PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,Mat *newmat) 808 { 809 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 810 PetscInt *ml_cols=matdata->columns,*aj; 811 PetscScalar *ml_vals=matdata->values,*aa; 812 PetscErrorCode ierr; 813 PetscInt i,j,k,*gordering; 814 PetscInt m=mlmat->outvec_leng,n,*nnzA,*nnzB,*nnz,nz_max,row; 815 Mat A; 816 817 PetscFunctionBegin; 818 if (mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 819 n = mlmat->invec_leng; 820 if (m != n) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n); 821 822 ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr); 823 ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 824 ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr); 825 ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr); 826 827 nz_max = 0; 828 for (i=0; i<m; i++){ 829 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 830 if (nz_max < nnz[i]) nz_max = nnz[i]; 831 nnzA[i] = 1; /* diag */ 832 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 833 if (ml_cols[j] < m) nnzA[i]++; 834 } 835 nnzB[i] = nnz[i] - nnzA[i]; 836 } 837 ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr); 838 839 /* insert mat values -- remap row and column indices */ 840 nz_max++; 841 ierr = PetscMalloc(nz_max*(sizeof(PetscInt)+sizeof(PetscScalar)),&aj);CHKERRQ(ierr); 842 aa = (PetscScalar*)(aj + nz_max); 843 /* create global row numbering for a ML_Operator */ 844 ML_build_global_numbering(mlmat,&gordering,"rows"); 845 for (i=0; i<m; i++){ 846 row = gordering[i]; 847 k = 0; 848 /* diagonal entry */ 849 aj[k] = row; aa[k++] = ml_vals[i]; 850 /* off diagonal entries */ 851 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 852 aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j]; 853 } 854 ierr = MatSetValues(A,1,&row,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr); 855 } 856 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 857 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 858 *newmat = A; 859 860 ierr = PetscFree3(nnzA,nnzB,nnz); 861 ierr = PetscFree(aj);CHKERRQ(ierr); 862 PetscFunctionReturn(0); 863 } 864