1 2 /* 3 Provides an interface to the ML smoothed Aggregation 4 Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs 5 Jed Brown, see [PETSC #18321, #18449]. 6 */ 7 #include <petsc-private/pcimpl.h> /*I "petscpc.h" I*/ 8 #include <../src/ksp/pc/impls/mg/mgimpl.h> /*I "petscpcmg.h" I*/ 9 #include <../src/mat/impls/aij/seq/aij.h> 10 #include <../src/mat/impls/aij/mpi/mpiaij.h> 11 12 #include <math.h> 13 EXTERN_C_BEGIN 14 /* HAVE_CONFIG_H flag is required by ML include files */ 15 #if !defined(HAVE_CONFIG_H) 16 #define HAVE_CONFIG_H 17 #endif 18 #include <ml_include.h> 19 #include <ml_viz_stats.h> 20 EXTERN_C_END 21 22 typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType; 23 static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0}; 24 25 /* The context (data structure) at each grid level */ 26 typedef struct { 27 Vec x,b,r; /* global vectors */ 28 Mat A,P,R; 29 KSP ksp; 30 Vec coords; /* projected by ML, if PCSetCoordinates is called; values packed by node */ 31 } GridCtx; 32 33 /* The context used to input PETSc matrix into ML at fine grid */ 34 typedef struct { 35 Mat A; /* Petsc matrix in aij format */ 36 Mat Aloc; /* local portion of A to be used by ML */ 37 Vec x,y; 38 ML_Operator *mlmat; 39 PetscScalar *pwork; /* tmp array used by PetscML_comm() */ 40 } FineGridCtx; 41 42 /* The context associates a ML matrix with a PETSc shell matrix */ 43 typedef struct { 44 Mat A; /* PETSc shell matrix associated with mlmat */ 45 ML_Operator *mlmat; /* ML matrix assorciated with A */ 46 Vec y, work; 47 } Mat_MLShell; 48 49 /* Private context for the ML preconditioner */ 50 typedef struct { 51 ML *ml_object; 52 ML_Aggregate *agg_object; 53 GridCtx *gridctx; 54 FineGridCtx *PetscMLdata; 55 PetscInt Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization,MinPerProc,PutOnSingleProc,RepartitionType,ZoltanScheme; 56 PetscReal Threshold,DampingFactor,EnergyMinimizationDropTol,MaxMinRatio,AuxThreshold; 57 PetscBool SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable,Repartition,Aux; 58 PetscBool reuse_interpolation; 59 PCMLNullSpaceType nulltype; 60 PetscMPIInt size; /* size of communicator for pc->pmat */ 61 PetscInt dim; /* data from PCSetCoordinates(_ML) */ 62 PetscInt nloc; 63 PetscReal *coords; /* ML has a grid object for each level: the finest grid will point into coords */ 64 } PC_ML; 65 66 #undef __FUNCT__ 67 #define __FUNCT__ "PetscML_getrow" 68 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[]) 69 { 70 PetscErrorCode ierr; 71 PetscInt m,i,j,k=0,row,*aj; 72 PetscScalar *aa; 73 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data); 74 Mat_SeqAIJ *a = (Mat_SeqAIJ*)ml->Aloc->data; 75 76 ierr = MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0); 77 for (i = 0; i<N_requested_rows; i++) { 78 row = requested_rows[i]; 79 row_lengths[i] = a->ilen[row]; 80 if (allocated_space < k+row_lengths[i]) return(0); 81 if ( (row >= 0) || (row <= (m-1)) ) { 82 aj = a->j + a->i[row]; 83 aa = a->a + a->i[row]; 84 for (j=0; j<row_lengths[i]; j++){ 85 columns[k] = aj[j]; 86 values[k++] = aa[j]; 87 } 88 } 89 } 90 return(1); 91 } 92 93 #undef __FUNCT__ 94 #define __FUNCT__ "PetscML_comm" 95 static PetscErrorCode PetscML_comm(double p[],void *ML_data) 96 { 97 PetscErrorCode ierr; 98 FineGridCtx *ml=(FineGridCtx*)ML_data; 99 Mat A=ml->A; 100 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 101 PetscMPIInt size; 102 PetscInt i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n; 103 PetscScalar *array; 104 105 PetscFunctionBegin; 106 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 107 if (size == 1) return 0; 108 109 ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr); 110 ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 111 ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 112 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 113 ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr); 114 for (i=in_length; i<out_length; i++){ 115 p[i] = array[i-in_length]; 116 } 117 ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr); 118 PetscFunctionReturn(0); 119 } 120 121 #undef __FUNCT__ 122 #define __FUNCT__ "PetscML_matvec" 123 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[]) 124 { 125 PetscErrorCode ierr; 126 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data); 127 Mat A=ml->A, Aloc=ml->Aloc; 128 PetscMPIInt size; 129 PetscScalar *pwork=ml->pwork; 130 PetscInt i; 131 132 PetscFunctionBegin; 133 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 134 if (size == 1){ 135 ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr); 136 } else { 137 for (i=0; i<in_length; i++) pwork[i] = p[i]; 138 PetscML_comm(pwork,ml); 139 ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr); 140 } 141 ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr); 142 ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr); 143 ierr = VecResetArray(ml->x);CHKERRQ(ierr); 144 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 145 PetscFunctionReturn(0); 146 } 147 148 #undef __FUNCT__ 149 #define __FUNCT__ "MatMult_ML" 150 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y) 151 { 152 PetscErrorCode ierr; 153 Mat_MLShell *shell; 154 PetscScalar *xarray,*yarray; 155 PetscInt x_length,y_length; 156 157 PetscFunctionBegin; 158 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 159 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 160 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 161 x_length = shell->mlmat->invec_leng; 162 y_length = shell->mlmat->outvec_leng; 163 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 164 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 165 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 166 PetscFunctionReturn(0); 167 } 168 169 #undef __FUNCT__ 170 #define __FUNCT__ "MatMultAdd_ML" 171 /* Computes y = w + A * x 172 It is possible that w == y, but not x == y 173 */ 174 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y) 175 { 176 Mat_MLShell *shell; 177 PetscScalar *xarray,*yarray; 178 PetscInt x_length,y_length; 179 PetscErrorCode ierr; 180 181 PetscFunctionBegin; 182 ierr = MatShellGetContext(A, (void **) &shell);CHKERRQ(ierr); 183 if (y == w) { 184 if (!shell->work) { 185 ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr); 186 } 187 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 188 ierr = VecGetArray(shell->work, &yarray);CHKERRQ(ierr); 189 x_length = shell->mlmat->invec_leng; 190 y_length = shell->mlmat->outvec_leng; 191 ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray); 192 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 193 ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr); 194 ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr); 195 } else { 196 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 197 ierr = VecGetArray(y, &yarray);CHKERRQ(ierr); 198 x_length = shell->mlmat->invec_leng; 199 y_length = shell->mlmat->outvec_leng; 200 ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray); 201 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 202 ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr); 203 ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr); 204 } 205 PetscFunctionReturn(0); 206 } 207 208 /* newtype is ignored since only handles one case */ 209 #undef __FUNCT__ 210 #define __FUNCT__ "MatConvert_MPIAIJ_ML" 211 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc) 212 { 213 PetscErrorCode ierr; 214 Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data; 215 Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data; 216 PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j; 217 PetscScalar *aa=a->a,*ba=b->a,*ca; 218 PetscInt am=A->rmap->n,an=A->cmap->n,i,j,k; 219 PetscInt *ci,*cj,ncols; 220 221 PetscFunctionBegin; 222 if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an); 223 224 if (scall == MAT_INITIAL_MATRIX){ 225 ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 226 ci[0] = 0; 227 for (i=0; i<am; i++){ 228 ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]); 229 } 230 ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr); 231 ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr); 232 233 k = 0; 234 for (i=0; i<am; i++){ 235 /* diagonal portion of A */ 236 ncols = ai[i+1] - ai[i]; 237 for (j=0; j<ncols; j++) { 238 cj[k] = *aj++; 239 ca[k++] = *aa++; 240 } 241 /* off-diagonal portion of A */ 242 ncols = bi[i+1] - bi[i]; 243 for (j=0; j<ncols; j++) { 244 cj[k] = an + (*bj); bj++; 245 ca[k++] = *ba++; 246 } 247 } 248 if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]); 249 250 /* put together the new matrix */ 251 an = mpimat->A->cmap->n+mpimat->B->cmap->n; 252 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr); 253 254 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 255 /* Since these are PETSc arrays, change flags to free them as necessary. */ 256 mat = (Mat_SeqAIJ*)(*Aloc)->data; 257 mat->free_a = PETSC_TRUE; 258 mat->free_ij = PETSC_TRUE; 259 260 mat->nonew = 0; 261 } else if (scall == MAT_REUSE_MATRIX){ 262 mat=(Mat_SeqAIJ*)(*Aloc)->data; 263 ci = mat->i; cj = mat->j; ca = mat->a; 264 for (i=0; i<am; i++) { 265 /* diagonal portion of A */ 266 ncols = ai[i+1] - ai[i]; 267 for (j=0; j<ncols; j++) *ca++ = *aa++; 268 /* off-diagonal portion of A */ 269 ncols = bi[i+1] - bi[i]; 270 for (j=0; j<ncols; j++) *ca++ = *ba++; 271 } 272 } else SETERRQ1(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 273 PetscFunctionReturn(0); 274 } 275 276 extern PetscErrorCode MatDestroy_Shell(Mat); 277 #undef __FUNCT__ 278 #define __FUNCT__ "MatDestroy_ML" 279 static PetscErrorCode MatDestroy_ML(Mat A) 280 { 281 PetscErrorCode ierr; 282 Mat_MLShell *shell; 283 284 PetscFunctionBegin; 285 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 286 ierr = VecDestroy(&shell->y);CHKERRQ(ierr); 287 if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);} 288 ierr = PetscFree(shell);CHKERRQ(ierr); 289 ierr = MatDestroy_Shell(A);CHKERRQ(ierr); 290 ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr); 291 PetscFunctionReturn(0); 292 } 293 294 #undef __FUNCT__ 295 #define __FUNCT__ "MatWrapML_SeqAIJ" 296 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 297 { 298 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 299 PetscErrorCode ierr; 300 PetscInt m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = PETSC_NULL,nz_max; 301 PetscInt *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i; 302 PetscScalar *ml_vals=matdata->values,*aa; 303 304 PetscFunctionBegin; 305 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 306 if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */ 307 if (reuse){ 308 Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data; 309 aij->i = ml_rowptr; 310 aij->j = ml_cols; 311 aij->a = ml_vals; 312 } else { 313 /* sort ml_cols and ml_vals */ 314 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 315 for (i=0; i<m; i++) { 316 nnz[i] = ml_rowptr[i+1] - ml_rowptr[i]; 317 } 318 aj = ml_cols; aa = ml_vals; 319 for (i=0; i<m; i++){ 320 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 321 aj += nnz[i]; aa += nnz[i]; 322 } 323 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr); 324 ierr = PetscFree(nnz);CHKERRQ(ierr); 325 } 326 PetscFunctionReturn(0); 327 } 328 329 nz_max = PetscMax(1,mlmat->max_nz_per_row); 330 ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr); 331 if (!reuse) { 332 ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr); 333 ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 334 ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr); 335 /* keep track of block size for A matrices */ 336 ierr = MatSetBlockSize (*newmat, mlmat->num_PDEs);CHKERRQ(ierr); 337 338 ierr = PetscMalloc(m*sizeof(PetscInt),&nnz);CHKERRQ(ierr); 339 for (i=0; i<m; i++){ 340 ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]); 341 } 342 ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr); 343 } 344 for (i=0; i<m; i++) { 345 PetscInt ncols; 346 347 ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols); 348 ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr); 349 } 350 ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 351 ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 352 353 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 354 ierr = PetscFree(nnz);CHKERRQ(ierr); 355 PetscFunctionReturn(0); 356 } 357 358 #undef __FUNCT__ 359 #define __FUNCT__ "MatWrapML_SHELL" 360 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 361 { 362 PetscErrorCode ierr; 363 PetscInt m,n; 364 ML_Comm *MLcomm; 365 Mat_MLShell *shellctx; 366 367 PetscFunctionBegin; 368 m = mlmat->outvec_leng; 369 n = mlmat->invec_leng; 370 371 if (reuse){ 372 ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr); 373 shellctx->mlmat = mlmat; 374 PetscFunctionReturn(0); 375 } 376 377 MLcomm = mlmat->comm; 378 ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr); 379 ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr); 380 ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr); 381 ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr); 382 shellctx->A = *newmat; 383 shellctx->mlmat = mlmat; 384 shellctx->work = PETSC_NULL; 385 ierr = VecCreate(MLcomm->USR_comm,&shellctx->y);CHKERRQ(ierr); 386 ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr); 387 ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr); 388 (*newmat)->ops->destroy = MatDestroy_ML; 389 PetscFunctionReturn(0); 390 } 391 392 #undef __FUNCT__ 393 #define __FUNCT__ "MatWrapML_MPIAIJ" 394 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 395 { 396 PetscInt *aj; 397 PetscScalar *aa; 398 PetscErrorCode ierr; 399 PetscInt i,j,*gordering; 400 PetscInt m=mlmat->outvec_leng,n,nz_max,row; 401 Mat A; 402 403 PetscFunctionBegin; 404 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 405 n = mlmat->invec_leng; 406 if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n); 407 408 nz_max = PetscMax(1,mlmat->max_nz_per_row); 409 ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr); 410 if (reuse) { 411 A = *newmat; 412 } else { 413 PetscInt *nnzA,*nnzB,*nnz; 414 ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr); 415 ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 416 ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr); 417 /* keep track of block size for A matrices */ 418 ierr = MatSetBlockSize (A,mlmat->num_PDEs);CHKERRQ(ierr); 419 ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr); 420 421 for (i=0; i<m; i++){ 422 ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]); 423 nnzA[i] = 0; 424 for (j=0; j<nnz[i]; j++){ 425 if (aj[j] < m) nnzA[i]++; 426 } 427 nnzB[i] = nnz[i] - nnzA[i]; 428 } 429 ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr); 430 ierr = PetscFree3(nnzA,nnzB,nnz); 431 } 432 /* create global row numbering for a ML_Operator */ 433 ML_build_global_numbering(mlmat,&gordering,"rows"); 434 for (i=0; i<m; i++) { 435 PetscInt ncols; 436 row = gordering[i]; 437 438 ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols); 439 for (j = 0; j < ncols; j++) { 440 aj[j] = gordering[aj[j]]; 441 } 442 ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr); 443 } 444 ML_free(gordering); 445 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 446 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 447 *newmat = A; 448 449 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 450 PetscFunctionReturn(0); 451 } 452 453 /* -------------------------------------------------------------------------- */ 454 /* 455 PCSetCoordinates_ML 456 457 Input Parameter: 458 . pc - the preconditioner context 459 */ 460 #undef __FUNCT__ 461 #define __FUNCT__ "PCSetCoordinates_ML" 462 PETSC_EXTERN_C PetscErrorCode PCSetCoordinates_ML( PC pc, PetscInt ndm, PetscInt a_nloc, PetscReal *coords ) 463 { 464 PC_MG *mg = (PC_MG*)pc->data; 465 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 466 PetscErrorCode ierr; 467 PetscInt arrsz,oldarrsz,bs,my0,kk,ii,nloc,Iend; 468 Mat Amat = pc->pmat; 469 470 /* this function copied and modified from PCSetCoordinates_GEO -TGI */ 471 PetscFunctionBegin; 472 PetscValidHeaderSpecific( Amat, MAT_CLASSID, 1 ); 473 ierr = MatGetBlockSize( Amat, &bs ); CHKERRQ( ierr ); 474 475 ierr = MatGetOwnershipRange( Amat, &my0, &Iend ); CHKERRQ(ierr); 476 nloc = (Iend-my0)/bs; 477 478 if(nloc!=a_nloc)SETERRQ2(((PetscObject)Amat)->comm,PETSC_ERR_ARG_WRONG, "Number of local blocks must locations = %d %d.",a_nloc,nloc); 479 if((Iend-my0)%bs!=0) SETERRQ1(((PetscObject)Amat)->comm,PETSC_ERR_ARG_WRONG, "Bad local size %d.",nloc); 480 481 oldarrsz = pc_ml->dim * pc_ml->nloc; 482 pc_ml->dim = ndm; 483 pc_ml->nloc = a_nloc; 484 arrsz = ndm * a_nloc; 485 486 /* create data - syntactic sugar that should be refactored at some point */ 487 if (pc_ml->coords==0 || (oldarrsz != arrsz)) { 488 ierr = PetscFree( pc_ml->coords ); CHKERRQ(ierr); 489 ierr = PetscMalloc((arrsz)*sizeof(PetscReal), &pc_ml->coords ); CHKERRQ(ierr); 490 } 491 for(kk=0;kk<arrsz;kk++)pc_ml->coords[kk] = -999.; 492 /* copy data in - column oriented */ 493 for( kk = 0 ; kk < nloc ; kk++ ){ 494 for( ii = 0 ; ii < ndm ; ii++ ) { 495 pc_ml->coords[ii*nloc + kk] = coords[kk*ndm + ii]; 496 } 497 } 498 499 PetscFunctionReturn(0); 500 } 501 502 /* -----------------------------------------------------------------------------*/ 503 #undef __FUNCT__ 504 #define __FUNCT__ "PCReset_ML" 505 PetscErrorCode PCReset_ML(PC pc) 506 { 507 PetscErrorCode ierr; 508 PC_MG *mg = (PC_MG*)pc->data; 509 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 510 PetscInt level,fine_level=pc_ml->Nlevels-1,dim=pc_ml->dim; 511 512 PetscFunctionBegin; 513 if (dim) { 514 ML_Aggregate_Viz_Stats * grid_info = (ML_Aggregate_Viz_Stats *) pc_ml->ml_object->Grid[0].Grid; 515 516 for (level=0; level<=fine_level; level++){ 517 ierr = VecDestroy(&pc_ml->gridctx[level].coords);CHKERRQ(ierr); 518 } 519 520 grid_info->x = 0; /* do this so ML doesn't try to free coordinates */ 521 grid_info->y = 0; 522 grid_info->z = 0; 523 524 ML_Aggregate_VizAndStats_Clean(pc_ml->ml_object); 525 } 526 ML_Aggregate_Destroy(&pc_ml->agg_object); 527 ML_Destroy(&pc_ml->ml_object); 528 529 if (pc_ml->PetscMLdata) { 530 ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr); 531 ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr); 532 ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr); 533 ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr); 534 } 535 ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr); 536 537 if (pc_ml->gridctx) { 538 for (level=0; level<fine_level; level++){ 539 if (pc_ml->gridctx[level].A){ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);} 540 if (pc_ml->gridctx[level].P){ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);} 541 if (pc_ml->gridctx[level].R){ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);} 542 if (pc_ml->gridctx[level].x){ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);} 543 if (pc_ml->gridctx[level].b){ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);} 544 if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);} 545 } 546 } 547 ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr); 548 ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr); 549 pc_ml->dim = 0; 550 pc_ml->nloc = 0; 551 PetscFunctionReturn(0); 552 } 553 /* -------------------------------------------------------------------------- */ 554 /* 555 PCSetUp_ML - Prepares for the use of the ML preconditioner 556 by setting data structures and options. 557 558 Input Parameter: 559 . pc - the preconditioner context 560 561 Application Interface Routine: PCSetUp() 562 563 Notes: 564 The interface routine PCSetUp() is not usually called directly by 565 the user, but instead is called by PCApply() if necessary. 566 */ 567 extern PetscErrorCode PCSetFromOptions_MG(PC); 568 extern PetscErrorCode PCReset_MG(PC); 569 570 #undef __FUNCT__ 571 #define __FUNCT__ "PCSetUp_ML" 572 PetscErrorCode PCSetUp_ML(PC pc) 573 { 574 PetscErrorCode ierr; 575 PetscMPIInt size; 576 FineGridCtx *PetscMLdata; 577 ML *ml_object; 578 ML_Aggregate *agg_object; 579 ML_Operator *mlmat; 580 PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs; 581 Mat A,Aloc; 582 GridCtx *gridctx; 583 PC_MG *mg = (PC_MG*)pc->data; 584 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 585 PetscBool isSeq, isMPI; 586 KSP smoother; 587 PC subpc; 588 PetscInt mesh_level, old_mesh_level; 589 590 PetscFunctionBegin; 591 A = pc->pmat; 592 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 593 594 if (pc->setupcalled) { 595 if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) { 596 /* 597 Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for 598 multiple solves in which the matrix is not changing too quickly. 599 */ 600 ml_object = pc_ml->ml_object; 601 gridctx = pc_ml->gridctx; 602 Nlevels = pc_ml->Nlevels; 603 fine_level = Nlevels - 1; 604 gridctx[fine_level].A = A; 605 606 ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); 607 ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); 608 if (isMPI){ 609 ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); 610 } else if (isSeq) { 611 Aloc = A; 612 ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr); 613 } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name); 614 615 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 616 PetscMLdata = pc_ml->PetscMLdata; 617 ierr = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr); 618 PetscMLdata->A = A; 619 PetscMLdata->Aloc = Aloc; 620 ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); 621 ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); 622 623 mesh_level = ml_object->ML_finest_level; 624 while (ml_object->SingleLevel[mesh_level].Rmat->to) { 625 old_mesh_level = mesh_level; 626 mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum; 627 628 /* clean and regenerate A */ 629 mlmat = &(ml_object->Amat[mesh_level]); 630 ML_Operator_Clean(mlmat); 631 ML_Operator_Init(mlmat,ml_object->comm); 632 ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level); 633 } 634 635 level = fine_level - 1; 636 if (size == 1) { /* convert ML P, R and A into seqaij format */ 637 for (mllevel=1; mllevel<Nlevels; mllevel++){ 638 mlmat = &(ml_object->Amat[mllevel]); 639 ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 640 level--; 641 } 642 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 643 for (mllevel=1; mllevel<Nlevels; mllevel++){ 644 mlmat = &(ml_object->Amat[mllevel]); 645 ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 646 level--; 647 } 648 } 649 650 for (level=0; level<fine_level; level++) { 651 if (level > 0){ 652 ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); 653 } 654 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr); 655 } 656 ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); 657 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr); 658 659 ierr = PCSetUp_MG(pc);CHKERRQ(ierr); 660 PetscFunctionReturn(0); 661 } else { 662 /* since ML can change the size of vectors/matrices at any level we must destroy everything */ 663 ierr = PCReset_ML(pc);CHKERRQ(ierr); 664 ierr = PCReset_MG(pc);CHKERRQ(ierr); 665 } 666 } 667 668 /* setup special features of PCML */ 669 /*--------------------------------*/ 670 /* covert A to Aloc to be used by ML at fine grid */ 671 pc_ml->size = size; 672 ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); 673 ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); 674 if (isMPI){ 675 ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); 676 } else if (isSeq) { 677 Aloc = A; 678 ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr); 679 } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name); 680 681 /* create and initialize struct 'PetscMLdata' */ 682 ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr); 683 pc_ml->PetscMLdata = PetscMLdata; 684 ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr); 685 686 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr); 687 ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr); 688 ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); 689 690 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); 691 ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 692 ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr); 693 PetscMLdata->A = A; 694 PetscMLdata->Aloc = Aloc; 695 if (pc_ml->dim) { /* create vecs around the coordinate data given */ 696 PetscInt i,j,dim=pc_ml->dim; 697 PetscInt nloc = pc_ml->nloc,nlocghost; 698 PetscReal *ghostedcoords; 699 700 ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); 701 nlocghost = Aloc->cmap->n / bs; 702 ierr = PetscMalloc(dim*nlocghost*sizeof(PetscReal),&ghostedcoords);CHKERRQ(ierr); 703 for (i = 0; i < dim; i++) { 704 /* copy coordinate values into first component of pwork */ 705 for (j = 0; j < nloc; j++) { 706 PetscMLdata->pwork[bs * j] = pc_ml->coords[nloc * i + j]; 707 } 708 /* get the ghost values */ 709 ierr = PetscML_comm(PetscMLdata->pwork,PetscMLdata);CHKERRQ(ierr); 710 /* write into the vector */ 711 for (j = 0; j < nlocghost; j++) { 712 ghostedcoords[i * nlocghost + j] = PetscMLdata->pwork[bs * j]; 713 } 714 } 715 /* replace the original coords with the ghosted coords, because these are 716 * what ML needs */ 717 ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr); 718 pc_ml->coords = ghostedcoords; 719 } 720 721 /* create ML discretization matrix at fine grid */ 722 /* ML requires input of fine-grid matrix. It determines nlevels. */ 723 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 724 ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); 725 ML_Create(&ml_object,pc_ml->MaxNlevels); 726 ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm); 727 pc_ml->ml_object = ml_object; 728 ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); 729 ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols); 730 ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); 731 732 ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO); 733 734 /* aggregation */ 735 ML_Aggregate_Create(&agg_object); 736 pc_ml->agg_object = agg_object; 737 738 { 739 MatNullSpace mnull; 740 ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr); 741 if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) { 742 if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER; 743 else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK; 744 else pc_ml->nulltype = PCML_NULLSPACE_SCALAR; 745 } 746 switch (pc_ml->nulltype) { 747 case PCML_NULLSPACE_USER: { 748 PetscScalar *nullvec; 749 const PetscScalar *v; 750 PetscBool has_const; 751 PetscInt i,j,mlocal,nvec,M; 752 const Vec *vecs; 753 if (!mnull) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space"); 754 ierr = MatGetSize(A,&M,PETSC_NULL);CHKERRQ(ierr); 755 ierr = MatGetLocalSize(Aloc,&mlocal,PETSC_NULL);CHKERRQ(ierr); 756 ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr); 757 ierr = PetscMalloc((nvec+!!has_const)*mlocal*sizeof(*nullvec),&nullvec);CHKERRQ(ierr); 758 if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M; 759 for (i=0; i<nvec; i++) { 760 ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr); 761 for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j]; 762 ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr); 763 } 764 ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr); 765 ierr = PetscFree(nullvec);CHKERRQ(ierr); 766 } break; 767 case PCML_NULLSPACE_BLOCK: 768 ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr); 769 break; 770 case PCML_NULLSPACE_SCALAR: 771 break; 772 default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_SUP,"Unknown null space type"); 773 } 774 } 775 ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize); 776 /* set options */ 777 switch (pc_ml->CoarsenScheme) { 778 case 1: 779 ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break; 780 case 2: 781 ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break; 782 case 3: 783 ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break; 784 } 785 ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold); 786 ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor); 787 if (pc_ml->SpectralNormScheme_Anorm){ 788 ML_Set_SpectralNormScheme_Anorm(ml_object); 789 } 790 agg_object->keep_agg_information = (int)pc_ml->KeepAggInfo; 791 agg_object->keep_P_tentative = (int)pc_ml->Reusable; 792 agg_object->block_scaled_SA = (int)pc_ml->BlockScaling; 793 agg_object->minimizing_energy = (int)pc_ml->EnergyMinimization; 794 agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol; 795 agg_object->cheap_minimizing_energy = (int)pc_ml->EnergyMinimizationCheap; 796 797 if (pc_ml->Aux) { 798 if (!pc_ml->dim) { 799 SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"Auxiliary matrix requires coordinates"); 800 } 801 ml_object->Amat[0].aux_data->threshold = pc_ml->AuxThreshold; 802 ml_object->Amat[0].aux_data->enable = 1; 803 ml_object->Amat[0].aux_data->max_level = 10; 804 ml_object->Amat[0].num_PDEs = bs; 805 } 806 807 if (pc_ml->dim) { 808 PetscInt i,dim = pc_ml->dim; 809 ML_Aggregate_Viz_Stats *grid_info; 810 PetscInt nlocghost; 811 812 ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); 813 nlocghost = Aloc->cmap->n / bs; 814 815 ML_Aggregate_VizAndStats_Setup(ml_object); /* create ml info for coords */ 816 grid_info = (ML_Aggregate_Viz_Stats *) ml_object->Grid[0].Grid; 817 for (i = 0; i < dim; i++) { 818 /* set the finest level coordinates to point to the column-order array 819 * in pc_ml */ 820 /* NOTE: must point away before VizAndStats_Clean so ML doesn't free */ 821 switch (i) { 822 case 0: grid_info->x = pc_ml->coords + nlocghost * i; break; 823 case 1: grid_info->y = pc_ml->coords + nlocghost * i; break; 824 case 2: grid_info->z = pc_ml->coords + nlocghost * i; break; 825 default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3"); 826 } 827 } 828 grid_info->Ndim = dim; 829 } 830 831 /* repartitioning */ 832 if (pc_ml->Repartition) { 833 ML_Repartition_Activate (ml_object); 834 ML_Repartition_Set_LargestMinMaxRatio(ml_object,pc_ml->MaxMinRatio); 835 ML_Repartition_Set_MinPerProc(ml_object,pc_ml->MinPerProc); 836 ML_Repartition_Set_PutOnSingleProc(ml_object,pc_ml->PutOnSingleProc); 837 #if 0 /* Function not yet defined in ml-6.2 */ 838 /* I'm not sure what compatibility issues might crop up if we partitioned 839 * on the finest level, so to be safe repartition starts on the next 840 * finest level (reflection default behavior in 841 * ml_MultiLevelPreconditioner) */ 842 ML_Repartition_Set_StartLevel(ml_object,1); 843 #endif 844 845 if (!pc_ml->RepartitionType) { 846 PetscInt i; 847 848 if (!pc_ml->dim) { 849 SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"ML Zoltan repartitioning requires coordinates"); 850 } 851 ML_Repartition_Set_Partitioner(ml_object,ML_USEZOLTAN); 852 ML_Aggregate_Set_Dimensions(agg_object, pc_ml->dim); 853 854 for (i = 0;i < ml_object->ML_num_levels;i++) { 855 ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats *)ml_object->Grid[i].Grid; 856 grid_info->zoltan_type = pc_ml->ZoltanScheme + 1; /* ml numbers options 1, 2, 3 */ 857 /* defaults from ml_agg_info.c */ 858 grid_info->zoltan_estimated_its = 40; /* only relevant to hypergraph / fast hypergraph */ 859 grid_info->zoltan_timers = 0; 860 grid_info->smoothing_steps = 4; /* only relevant to hypergraph / fast hypergraph */ 861 } 862 } 863 else { 864 ML_Repartition_Set_Partitioner(ml_object,ML_USEPARMETIS); 865 } 866 } 867 868 if (pc_ml->OldHierarchy) { 869 Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 870 } else { 871 Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 872 } 873 if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels); 874 pc_ml->Nlevels = Nlevels; 875 fine_level = Nlevels - 1; 876 877 ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr); 878 /* set default smoothers */ 879 for (level=1; level<=fine_level; level++){ 880 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 881 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 882 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 883 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 884 } 885 ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr); 886 ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */ 887 ierr = PetscOptionsEnd();CHKERRQ(ierr); 888 889 ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr); 890 pc_ml->gridctx = gridctx; 891 892 /* wrap ML matrices by PETSc shell matrices at coarsened grids. 893 Level 0 is the finest grid for ML, but coarsest for PETSc! */ 894 gridctx[fine_level].A = A; 895 896 level = fine_level - 1; 897 if (size == 1){ /* convert ML P, R and A into seqaij format */ 898 for (mllevel=1; mllevel<Nlevels; mllevel++){ 899 mlmat = &(ml_object->Pmat[mllevel]); 900 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 901 mlmat = &(ml_object->Rmat[mllevel-1]); 902 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 903 904 mlmat = &(ml_object->Amat[mllevel]); 905 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 906 level--; 907 } 908 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 909 for (mllevel=1; mllevel<Nlevels; mllevel++){ 910 mlmat = &(ml_object->Pmat[mllevel]); 911 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 912 mlmat = &(ml_object->Rmat[mllevel-1]); 913 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 914 915 mlmat = &(ml_object->Amat[mllevel]); 916 ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 917 level--; 918 } 919 } 920 921 /* create vectors and ksp at all levels */ 922 for (level=0; level<fine_level; level++){ 923 level1 = level + 1; 924 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr); 925 ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr); 926 ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); 927 ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); 928 929 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr); 930 ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 931 ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); 932 ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); 933 934 ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr); 935 ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 936 ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr); 937 ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr); 938 939 if (level == 0){ 940 ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr); 941 } else { 942 ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr); 943 } 944 } 945 ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr); 946 947 /* create coarse level and the interpolation between the levels */ 948 for (level=0; level<fine_level; level++){ 949 level1 = level + 1; 950 ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr); 951 ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr); 952 if (level > 0){ 953 ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); 954 } 955 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 956 } 957 ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); 958 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 959 960 /* put coordinate info in levels */ 961 if (pc_ml->dim) { 962 PetscInt i,j,dim = pc_ml->dim; 963 PetscInt bs, nloc; 964 PC subpc; 965 PetscReal *array; 966 967 level = fine_level; 968 for (mllevel = 0; mllevel < Nlevels; mllevel++) { 969 ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats *)ml_object->Amat[mllevel].to->Grid->Grid; 970 MPI_Comm comm = ((PetscObject)gridctx[level].A)->comm; 971 972 ierr = MatGetBlockSize (gridctx[level].A, &bs);CHKERRQ(ierr); 973 ierr = MatGetLocalSize (gridctx[level].A, PETSC_NULL, &nloc);CHKERRQ(ierr); 974 nloc /= bs; /* number of local nodes */ 975 976 ierr = VecCreate(comm,&gridctx[level].coords);CHKERRQ(ierr); 977 ierr = VecSetSizes(gridctx[level].coords,dim * nloc,PETSC_DECIDE);CHKERRQ(ierr); 978 ierr = VecSetType(gridctx[level].coords,VECMPI);CHKERRQ(ierr); 979 ierr = VecGetArray(gridctx[level].coords,&array);CHKERRQ(ierr); 980 for (j = 0; j < nloc; j++) { 981 for (i = 0; i < dim; i++) { 982 switch (i) { 983 case 0: array[dim * j + i] = grid_info->x[j]; break; 984 case 1: array[dim * j + i] = grid_info->y[j]; break; 985 case 2: array[dim * j + i] = grid_info->z[j]; break; 986 default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3"); 987 } 988 } 989 } 990 991 /* passing coordinates to smoothers/coarse solver, should they need them */ 992 ierr = KSPGetPC(gridctx[level].ksp,&subpc);CHKERRQ(ierr); 993 ierr = PCSetCoordinates(subpc,dim,nloc,array);CHKERRQ(ierr); 994 ierr = VecRestoreArray(gridctx[level].coords,&array);CHKERRQ(ierr); 995 level--; 996 } 997 } 998 999 /* setupcalled is set to 0 so that MG is setup from scratch */ 1000 pc->setupcalled = 0; 1001 ierr = PCSetUp_MG(pc);CHKERRQ(ierr); 1002 PetscFunctionReturn(0); 1003 } 1004 1005 /* -------------------------------------------------------------------------- */ 1006 /* 1007 PCDestroy_ML - Destroys the private context for the ML preconditioner 1008 that was created with PCCreate_ML(). 1009 1010 Input Parameter: 1011 . pc - the preconditioner context 1012 1013 Application Interface Routine: PCDestroy() 1014 */ 1015 #undef __FUNCT__ 1016 #define __FUNCT__ "PCDestroy_ML" 1017 PetscErrorCode PCDestroy_ML(PC pc) 1018 { 1019 PetscErrorCode ierr; 1020 PC_MG *mg = (PC_MG*)pc->data; 1021 PC_ML *pc_ml= (PC_ML*)mg->innerctx; 1022 1023 PetscFunctionBegin; 1024 ierr = PCReset_ML(pc);CHKERRQ(ierr); 1025 ierr = PetscFree(pc_ml);CHKERRQ(ierr); 1026 ierr = PCDestroy_MG(pc);CHKERRQ(ierr); 1027 ierr = PetscObjectComposeFunctionDynamic((PetscObject)pc,"PCSetCoordinates_C","",PETSC_NULL);CHKERRQ(ierr); 1028 PetscFunctionReturn(0); 1029 } 1030 1031 #undef __FUNCT__ 1032 #define __FUNCT__ "PCSetFromOptions_ML" 1033 PetscErrorCode PCSetFromOptions_ML(PC pc) 1034 { 1035 PetscErrorCode ierr; 1036 PetscInt indx,PrintLevel,partindx; 1037 const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"}; 1038 const char *part[] = {"Zoltan","ParMETIS"}; 1039 #if defined(HAVE_ML_ZOLTAN) 1040 PetscInt zidx; 1041 const char *zscheme[] = {"RCB","hypergraph","fast_hypergraph"}; 1042 #endif 1043 PC_MG *mg = (PC_MG*)pc->data; 1044 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 1045 PetscMPIInt size; 1046 MPI_Comm comm = ((PetscObject)pc)->comm; 1047 1048 PetscFunctionBegin; 1049 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1050 ierr = PetscOptionsHead("ML options");CHKERRQ(ierr); 1051 PrintLevel = 0; 1052 indx = 0; 1053 partindx = 0; 1054 ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr); 1055 ML_Set_PrintLevel(PrintLevel); 1056 ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr); 1057 ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr); 1058 ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr); 1059 pc_ml->CoarsenScheme = indx; 1060 ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr); 1061 ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr); 1062 ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);CHKERRQ(ierr); 1063 ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,PETSC_NULL);CHKERRQ(ierr); 1064 ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,PETSC_NULL);CHKERRQ(ierr); 1065 ierr = PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,PETSC_NULL);CHKERRQ(ierr); 1066 ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,PETSC_NULL);CHKERRQ(ierr); 1067 ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,PETSC_NULL);CHKERRQ(ierr); 1068 /* 1069 The following checks a number of conditions. If we let this stuff slip by, then ML's error handling will take over. 1070 This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions. 1071 1072 We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working 1073 combination of options and ML's exit(1) explanations don't help matters. 1074 */ 1075 if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4"); 1076 if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel"); 1077 if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);} 1078 if (pc_ml->EnergyMinimization) { 1079 ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,PETSC_NULL);CHKERRQ(ierr); 1080 } 1081 if (pc_ml->EnergyMinimization == 2) { 1082 /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */ 1083 ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,PETSC_NULL);CHKERRQ(ierr); 1084 } 1085 /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */ 1086 if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE; 1087 ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,PETSC_NULL);CHKERRQ(ierr); 1088 /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */ 1089 if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE; 1090 ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,PETSC_NULL);CHKERRQ(ierr); 1091 /* 1092 ML's C API is severely underdocumented and lacks significant functionality. The C++ API calls 1093 ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function 1094 ML_Gen_MGHierarchy_UsingAggregation(). This modification, however, does not provide a strict superset of the 1095 functionality in the old function, so some users may still want to use it. Note that many options are ignored in 1096 this context, but ML doesn't provide a way to find out which ones. 1097 */ 1098 ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,PETSC_NULL);CHKERRQ(ierr); 1099 ierr = PetscOptionsBool("-pc_ml_repartition", "Allow ML to repartition levels of the heirarchy","ML_Repartition_Activate",pc_ml->Repartition,&pc_ml->Repartition,PETSC_NULL);CHKERRQ(ierr); 1100 if (pc_ml->Repartition) { 1101 ierr = PetscOptionsReal("-pc_ml_repartitionMaxMinRatio", "Acceptable ratio of repartitioned sizes","ML_Repartition_Set_LargestMinMaxRatio",pc_ml->MaxMinRatio,&pc_ml->MaxMinRatio,PETSC_NULL);CHKERRQ(ierr); 1102 ierr = PetscOptionsInt("-pc_ml_repartitionMinPerProc", "Smallest repartitioned size","ML_Repartition_Set_MinPerProc",pc_ml->MinPerProc,&pc_ml->MinPerProc,PETSC_NULL);CHKERRQ(ierr); 1103 ierr = PetscOptionsInt("-pc_ml_repartitionPutOnSingleProc", "Problem size automatically repartitioned to one processor","ML_Repartition_Set_PutOnSingleProc",pc_ml->PutOnSingleProc,&pc_ml->PutOnSingleProc,PETSC_NULL);CHKERRQ(ierr); 1104 #if defined(HAVE_ML_ZOLTAN) 1105 partindx = 0; 1106 ierr = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[0],&partindx,PETSC_NULL);CHKERRQ(ierr); 1107 pc_ml->RepartitionType = partindx; 1108 if (!partindx) { 1109 zindx = 0; 1110 ierr = PetscOptionsEList("-pc_ml_repartitionZoltanScheme", "Repartitioning scheme to use","None",zscheme,3,zscheme[0],&zindx,PETSC_NULL);CHKERRQ(ierr); 1111 pc_ml->ZoltanScheme = zindx; 1112 } 1113 #else 1114 partindx = 1; 1115 ierr = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[1],&partindx,PETSC_NULL);CHKERRQ(ierr); 1116 if (!partindx) { 1117 SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_SUP_SYS,"ML not compiled with Zoltan"); 1118 } 1119 #endif 1120 ierr = PetscOptionsBool("-pc_ml_Aux","Aggregate using auxiliary coordinate-based laplacian","None",pc_ml->Aux,&pc_ml->Aux,PETSC_NULL);CHKERRQ(ierr); 1121 ierr = PetscOptionsReal("-pc_ml_AuxThreshold","Auxiliary smoother drop tol","None",pc_ml->AuxThreshold,&pc_ml->AuxThreshold,PETSC_NULL);CHKERRQ(ierr); 1122 } 1123 ierr = PetscOptionsTail();CHKERRQ(ierr); 1124 PetscFunctionReturn(0); 1125 } 1126 1127 /* -------------------------------------------------------------------------- */ 1128 /* 1129 PCCreate_ML - Creates a ML preconditioner context, PC_ML, 1130 and sets this as the private data within the generic preconditioning 1131 context, PC, that was created within PCCreate(). 1132 1133 Input Parameter: 1134 . pc - the preconditioner context 1135 1136 Application Interface Routine: PCCreate() 1137 */ 1138 1139 /*MC 1140 PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide 1141 fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 1142 operators are computed by ML, with the matrices coverted to PETSc matrices in aij format 1143 and the restriction/interpolation operators wrapped as PETSc shell matrices. 1144 1145 Options Database Key: 1146 Multigrid options(inherited) 1147 + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles) 1148 . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp) 1149 . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown) 1150 -pc_mg_type <multiplicative>: (one of) additive multiplicative full kascade 1151 ML options: 1152 . -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel) 1153 . -pc_ml_maxNlevels <10>: Maximum number of levels (None) 1154 . -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize) 1155 . -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS 1156 . -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor) 1157 . -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold) 1158 . -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm) 1159 . -pc_ml_repartition <false>: Allow ML to repartition levels of the heirarchy (ML_Repartition_Activate) 1160 . -pc_ml_repartitionMaxMinRatio <1.3>: Acceptable ratio of repartitioned sizes (ML_Repartition_Set_LargestMinMaxRatio) 1161 . -pc_ml_repartitionMinPerProc <512>: Smallest repartitioned size (ML_Repartition_Set_MinPerProc) 1162 . -pc_ml_repartitionPutOnSingleProc <5000>: Problem size automatically repartitioned to one processor (ML_Repartition_Set_PutOnSingleProc) 1163 . -pc_ml_repartitionType <Zoltan>: Repartitioning library to use (ML_Repartition_Set_Partitioner) 1164 . -pc_ml_repartitionZoltanScheme <RCB>: Repartitioning scheme to use (None) 1165 . -pc_ml_Aux <false>: Aggregate using auxiliary coordinate-based laplacian (None) 1166 - -pc_ml_AuxThreshold <0.0>: Auxiliary smoother drop tol (None) 1167 1168 Level: intermediate 1169 1170 Concepts: multigrid 1171 1172 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 1173 PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(), 1174 PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(), 1175 PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(), 1176 PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR() 1177 M*/ 1178 1179 EXTERN_C_BEGIN 1180 #undef __FUNCT__ 1181 #define __FUNCT__ "PCCreate_ML" 1182 PetscErrorCode PCCreate_ML(PC pc) 1183 { 1184 PetscErrorCode ierr; 1185 PC_ML *pc_ml; 1186 PC_MG *mg; 1187 1188 PetscFunctionBegin; 1189 /* PCML is an inherited class of PCMG. Initialize pc as PCMG */ 1190 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 1191 ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr); 1192 /* Since PCMG tries to use DM assocated with PC must delete it */ 1193 ierr = DMDestroy(&pc->dm);CHKERRQ(ierr); 1194 mg = (PC_MG*)pc->data; 1195 mg->galerkin = 2; /* Use Galerkin, but it is computed externally */ 1196 1197 /* create a supporting struct and attach it to pc */ 1198 ierr = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr); 1199 mg->innerctx = pc_ml; 1200 1201 pc_ml->ml_object = 0; 1202 pc_ml->agg_object = 0; 1203 pc_ml->gridctx = 0; 1204 pc_ml->PetscMLdata = 0; 1205 pc_ml->Nlevels = -1; 1206 pc_ml->MaxNlevels = 10; 1207 pc_ml->MaxCoarseSize = 1; 1208 pc_ml->CoarsenScheme = 1; 1209 pc_ml->Threshold = 0.0; 1210 pc_ml->DampingFactor = 4.0/3.0; 1211 pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE; 1212 pc_ml->size = 0; 1213 pc_ml->dim = 0; 1214 pc_ml->nloc = 0; 1215 pc_ml->coords = 0; 1216 pc_ml->Repartition = PETSC_FALSE; 1217 pc_ml->MaxMinRatio = 1.3; 1218 pc_ml->MinPerProc = 512; 1219 pc_ml->PutOnSingleProc = 5000; 1220 pc_ml->RepartitionType = 0; 1221 pc_ml->ZoltanScheme = 0; 1222 pc_ml->Aux = PETSC_FALSE; 1223 pc_ml->AuxThreshold = 0.0; 1224 1225 /* allow for coordinates to be passed */ 1226 ierr = PetscObjectComposeFunctionDynamic((PetscObject)pc,"PCSetCoordinates_C","PCSetCoordinates_ML",PCSetCoordinates_ML);CHKERRQ(ierr); 1227 1228 /* overwrite the pointers of PCMG by the functions of PCML */ 1229 pc->ops->setfromoptions = PCSetFromOptions_ML; 1230 pc->ops->setup = PCSetUp_ML; 1231 pc->ops->reset = PCReset_ML; 1232 pc->ops->destroy = PCDestroy_ML; 1233 PetscFunctionReturn(0); 1234 } 1235 EXTERN_C_END 1236