1 2 /* 3 Provides an interface to the ML smoothed Aggregation 4 Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs 5 Jed Brown, see [PETSC #18321, #18449]. 6 */ 7 #include <petsc-private/pcimpl.h> /*I "petscpc.h" I*/ 8 #include <../src/ksp/pc/impls/mg/mgimpl.h> /*I "petscksp.h" I*/ 9 #include <../src/mat/impls/aij/seq/aij.h> 10 #include <../src/mat/impls/aij/mpi/mpiaij.h> 11 #include <petscdm.h> /* for DMDestroy(&pc->mg) hack */ 12 13 EXTERN_C_BEGIN 14 /* HAVE_CONFIG_H flag is required by ML include files */ 15 #if !defined(HAVE_CONFIG_H) 16 #define HAVE_CONFIG_H 17 #endif 18 #include <ml_include.h> 19 #include <ml_viz_stats.h> 20 EXTERN_C_END 21 22 typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType; 23 static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0}; 24 25 /* The context (data structure) at each grid level */ 26 typedef struct { 27 Vec x,b,r; /* global vectors */ 28 Mat A,P,R; 29 KSP ksp; 30 Vec coords; /* projected by ML, if PCSetCoordinates is called; values packed by node */ 31 } GridCtx; 32 33 /* The context used to input PETSc matrix into ML at fine grid */ 34 typedef struct { 35 Mat A; /* Petsc matrix in aij format */ 36 Mat Aloc; /* local portion of A to be used by ML */ 37 Vec x,y; 38 ML_Operator *mlmat; 39 PetscScalar *pwork; /* tmp array used by PetscML_comm() */ 40 } FineGridCtx; 41 42 /* The context associates a ML matrix with a PETSc shell matrix */ 43 typedef struct { 44 Mat A; /* PETSc shell matrix associated with mlmat */ 45 ML_Operator *mlmat; /* ML matrix assorciated with A */ 46 Vec y, work; 47 } Mat_MLShell; 48 49 /* Private context for the ML preconditioner */ 50 typedef struct { 51 ML *ml_object; 52 ML_Aggregate *agg_object; 53 GridCtx *gridctx; 54 FineGridCtx *PetscMLdata; 55 PetscInt Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization,MinPerProc,PutOnSingleProc,RepartitionType,ZoltanScheme; 56 PetscReal Threshold,DampingFactor,EnergyMinimizationDropTol,MaxMinRatio,AuxThreshold; 57 PetscBool SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable,Repartition,Aux; 58 PetscBool reuse_interpolation; 59 PCMLNullSpaceType nulltype; 60 PetscMPIInt size; /* size of communicator for pc->pmat */ 61 PetscInt dim; /* data from PCSetCoordinates(_ML) */ 62 PetscInt nloc; 63 PetscReal *coords; /* ML has a grid object for each level: the finest grid will point into coords */ 64 } PC_ML; 65 66 #undef __FUNCT__ 67 #define __FUNCT__ "PetscML_getrow" 68 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[]) 69 { 70 PetscErrorCode ierr; 71 PetscInt m,i,j,k=0,row,*aj; 72 PetscScalar *aa; 73 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data); 74 Mat_SeqAIJ *a = (Mat_SeqAIJ*)ml->Aloc->data; 75 76 ierr = MatGetSize(ml->Aloc,&m,NULL); if (ierr) return(0); 77 for (i = 0; i<N_requested_rows; i++) { 78 row = requested_rows[i]; 79 row_lengths[i] = a->ilen[row]; 80 if (allocated_space < k+row_lengths[i]) return(0); 81 if ((row >= 0) || (row <= (m-1))) { 82 aj = a->j + a->i[row]; 83 aa = a->a + a->i[row]; 84 for (j=0; j<row_lengths[i]; j++) { 85 columns[k] = aj[j]; 86 values[k++] = aa[j]; 87 } 88 } 89 } 90 return(1); 91 } 92 93 #undef __FUNCT__ 94 #define __FUNCT__ "PetscML_comm" 95 static PetscErrorCode PetscML_comm(double p[],void *ML_data) 96 { 97 PetscErrorCode ierr; 98 FineGridCtx *ml = (FineGridCtx*)ML_data; 99 Mat A = ml->A; 100 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 101 PetscMPIInt size; 102 PetscInt i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n; 103 PetscScalar *array; 104 105 PetscFunctionBegin; 106 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr); 107 if (size == 1) return 0; 108 109 ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr); 110 ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 111 ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 112 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 113 ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr); 114 for (i=in_length; i<out_length; i++) p[i] = array[i-in_length]; 115 ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr); 116 PetscFunctionReturn(0); 117 } 118 119 #undef __FUNCT__ 120 #define __FUNCT__ "PetscML_matvec" 121 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[]) 122 { 123 PetscErrorCode ierr; 124 FineGridCtx *ml = (FineGridCtx*)ML_Get_MyMatvecData(ML_data); 125 Mat A = ml->A, Aloc=ml->Aloc; 126 PetscMPIInt size; 127 PetscScalar *pwork=ml->pwork; 128 PetscInt i; 129 130 PetscFunctionBegin; 131 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr); 132 if (size == 1) { 133 ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr); 134 } else { 135 for (i=0; i<in_length; i++) pwork[i] = p[i]; 136 ierr = PetscML_comm(pwork,ml);CHKERRQ(ierr); 137 ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr); 138 } 139 ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr); 140 ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr); 141 ierr = VecResetArray(ml->x);CHKERRQ(ierr); 142 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 143 PetscFunctionReturn(0); 144 } 145 146 #undef __FUNCT__ 147 #define __FUNCT__ "MatMult_ML" 148 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y) 149 { 150 PetscErrorCode ierr; 151 Mat_MLShell *shell; 152 PetscScalar *xarray,*yarray; 153 PetscInt x_length,y_length; 154 155 PetscFunctionBegin; 156 ierr = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr); 157 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 158 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 159 x_length = shell->mlmat->invec_leng; 160 y_length = shell->mlmat->outvec_leng; 161 PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray)); 162 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 163 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 164 PetscFunctionReturn(0); 165 } 166 167 #undef __FUNCT__ 168 #define __FUNCT__ "MatMultAdd_ML" 169 /* Computes y = w + A * x 170 It is possible that w == y, but not x == y 171 */ 172 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y) 173 { 174 Mat_MLShell *shell; 175 PetscScalar *xarray,*yarray; 176 PetscInt x_length,y_length; 177 PetscErrorCode ierr; 178 179 PetscFunctionBegin; 180 ierr = MatShellGetContext(A, (void**) &shell);CHKERRQ(ierr); 181 if (y == w) { 182 if (!shell->work) { 183 ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr); 184 } 185 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 186 ierr = VecGetArray(shell->work, &yarray);CHKERRQ(ierr); 187 x_length = shell->mlmat->invec_leng; 188 y_length = shell->mlmat->outvec_leng; 189 PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray)); 190 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 191 ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr); 192 ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr); 193 } else { 194 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 195 ierr = VecGetArray(y, &yarray);CHKERRQ(ierr); 196 x_length = shell->mlmat->invec_leng; 197 y_length = shell->mlmat->outvec_leng; 198 PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray)); 199 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 200 ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr); 201 ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr); 202 } 203 PetscFunctionReturn(0); 204 } 205 206 /* newtype is ignored since only handles one case */ 207 #undef __FUNCT__ 208 #define __FUNCT__ "MatConvert_MPIAIJ_ML" 209 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc) 210 { 211 PetscErrorCode ierr; 212 Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data; 213 Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data; 214 PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j; 215 PetscScalar *aa=a->a,*ba=b->a,*ca; 216 PetscInt am =A->rmap->n,an=A->cmap->n,i,j,k; 217 PetscInt *ci,*cj,ncols; 218 219 PetscFunctionBegin; 220 if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an); 221 222 if (scall == MAT_INITIAL_MATRIX) { 223 ierr = PetscMalloc1((1+am),&ci);CHKERRQ(ierr); 224 ci[0] = 0; 225 for (i=0; i<am; i++) ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]); 226 ierr = PetscMalloc1((1+ci[am]),&cj);CHKERRQ(ierr); 227 ierr = PetscMalloc1((1+ci[am]),&ca);CHKERRQ(ierr); 228 229 k = 0; 230 for (i=0; i<am; i++) { 231 /* diagonal portion of A */ 232 ncols = ai[i+1] - ai[i]; 233 for (j=0; j<ncols; j++) { 234 cj[k] = *aj++; 235 ca[k++] = *aa++; 236 } 237 /* off-diagonal portion of A */ 238 ncols = bi[i+1] - bi[i]; 239 for (j=0; j<ncols; j++) { 240 cj[k] = an + (*bj); bj++; 241 ca[k++] = *ba++; 242 } 243 } 244 if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]); 245 246 /* put together the new matrix */ 247 an = mpimat->A->cmap->n+mpimat->B->cmap->n; 248 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr); 249 250 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 251 /* Since these are PETSc arrays, change flags to free them as necessary. */ 252 mat = (Mat_SeqAIJ*)(*Aloc)->data; 253 mat->free_a = PETSC_TRUE; 254 mat->free_ij = PETSC_TRUE; 255 256 mat->nonew = 0; 257 } else if (scall == MAT_REUSE_MATRIX) { 258 mat=(Mat_SeqAIJ*)(*Aloc)->data; 259 ci = mat->i; cj = mat->j; ca = mat->a; 260 for (i=0; i<am; i++) { 261 /* diagonal portion of A */ 262 ncols = ai[i+1] - ai[i]; 263 for (j=0; j<ncols; j++) *ca++ = *aa++; 264 /* off-diagonal portion of A */ 265 ncols = bi[i+1] - bi[i]; 266 for (j=0; j<ncols; j++) *ca++ = *ba++; 267 } 268 } else SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 269 PetscFunctionReturn(0); 270 } 271 272 extern PetscErrorCode MatDestroy_Shell(Mat); 273 #undef __FUNCT__ 274 #define __FUNCT__ "MatDestroy_ML" 275 static PetscErrorCode MatDestroy_ML(Mat A) 276 { 277 PetscErrorCode ierr; 278 Mat_MLShell *shell; 279 280 PetscFunctionBegin; 281 ierr = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr); 282 ierr = VecDestroy(&shell->y);CHKERRQ(ierr); 283 if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);} 284 ierr = PetscFree(shell);CHKERRQ(ierr); 285 ierr = MatDestroy_Shell(A);CHKERRQ(ierr); 286 ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr); 287 PetscFunctionReturn(0); 288 } 289 290 #undef __FUNCT__ 291 #define __FUNCT__ "MatWrapML_SeqAIJ" 292 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 293 { 294 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata*)mlmat->data; 295 PetscErrorCode ierr; 296 PetscInt m =mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = NULL,nz_max; 297 PetscInt *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i; 298 PetscScalar *ml_vals=matdata->values,*aa; 299 300 PetscFunctionBegin; 301 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 302 if (m != n) { /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */ 303 if (reuse) { 304 Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data; 305 aij->i = ml_rowptr; 306 aij->j = ml_cols; 307 aij->a = ml_vals; 308 } else { 309 /* sort ml_cols and ml_vals */ 310 ierr = PetscMalloc1((m+1),&nnz); 311 for (i=0; i<m; i++) nnz[i] = ml_rowptr[i+1] - ml_rowptr[i]; 312 aj = ml_cols; aa = ml_vals; 313 for (i=0; i<m; i++) { 314 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 315 aj += nnz[i]; aa += nnz[i]; 316 } 317 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr); 318 ierr = PetscFree(nnz);CHKERRQ(ierr); 319 } 320 PetscFunctionReturn(0); 321 } 322 323 nz_max = PetscMax(1,mlmat->max_nz_per_row); 324 ierr = PetscMalloc2(nz_max,&aa,nz_max,&aj);CHKERRQ(ierr); 325 if (!reuse) { 326 ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr); 327 ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 328 ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr); 329 /* keep track of block size for A matrices */ 330 ierr = MatSetBlockSize (*newmat, mlmat->num_PDEs);CHKERRQ(ierr); 331 332 ierr = PetscMalloc1(m,&nnz);CHKERRQ(ierr); 333 for (i=0; i<m; i++) { 334 PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i])); 335 } 336 ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr); 337 } 338 for (i=0; i<m; i++) { 339 PetscInt ncols; 340 341 PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols)); 342 ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr); 343 } 344 ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 345 ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 346 347 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 348 ierr = PetscFree(nnz);CHKERRQ(ierr); 349 PetscFunctionReturn(0); 350 } 351 352 #undef __FUNCT__ 353 #define __FUNCT__ "MatWrapML_SHELL" 354 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 355 { 356 PetscErrorCode ierr; 357 PetscInt m,n; 358 ML_Comm *MLcomm; 359 Mat_MLShell *shellctx; 360 361 PetscFunctionBegin; 362 m = mlmat->outvec_leng; 363 n = mlmat->invec_leng; 364 365 if (reuse) { 366 ierr = MatShellGetContext(*newmat,(void**)&shellctx);CHKERRQ(ierr); 367 shellctx->mlmat = mlmat; 368 PetscFunctionReturn(0); 369 } 370 371 MLcomm = mlmat->comm; 372 373 ierr = PetscNew(&shellctx);CHKERRQ(ierr); 374 ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr); 375 ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr); 376 ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr); 377 378 shellctx->A = *newmat; 379 shellctx->mlmat = mlmat; 380 shellctx->work = NULL; 381 382 ierr = VecCreate(MLcomm->USR_comm,&shellctx->y);CHKERRQ(ierr); 383 ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr); 384 ierr = VecSetType(shellctx->y,VECSTANDARD);CHKERRQ(ierr); 385 386 (*newmat)->ops->destroy = MatDestroy_ML; 387 PetscFunctionReturn(0); 388 } 389 390 #undef __FUNCT__ 391 #define __FUNCT__ "MatWrapML_MPIAIJ" 392 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 393 { 394 PetscInt *aj; 395 PetscScalar *aa; 396 PetscErrorCode ierr; 397 PetscInt i,j,*gordering; 398 PetscInt m=mlmat->outvec_leng,n,nz_max,row; 399 Mat A; 400 401 PetscFunctionBegin; 402 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 403 n = mlmat->invec_leng; 404 if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n); 405 406 nz_max = PetscMax(1,mlmat->max_nz_per_row); 407 ierr = PetscMalloc2(nz_max,&aa,nz_max,&aj);CHKERRQ(ierr); 408 if (reuse) A = *newmat; 409 else { 410 PetscInt *nnzA,*nnzB,*nnz; 411 ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr); 412 ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 413 ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr); 414 /* keep track of block size for A matrices */ 415 ierr = MatSetBlockSize (A,mlmat->num_PDEs);CHKERRQ(ierr); 416 ierr = PetscMalloc3(m,&nnzA,m,&nnzB,m,&nnz);CHKERRQ(ierr); 417 418 for (i=0; i<m; i++) { 419 PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i])); 420 nnzA[i] = 0; 421 for (j=0; j<nnz[i]; j++) { 422 if (aj[j] < m) nnzA[i]++; 423 } 424 nnzB[i] = nnz[i] - nnzA[i]; 425 } 426 ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr); 427 ierr = PetscFree3(nnzA,nnzB,nnz); 428 } 429 /* create global row numbering for a ML_Operator */ 430 PetscStackCall("ML_build_global_numbering",ML_build_global_numbering(mlmat,&gordering,"rows")); 431 for (i=0; i<m; i++) { 432 PetscInt ncols; 433 row = gordering[i]; 434 435 PetscStackCall(",ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols)); 436 for (j = 0; j < ncols; j++) aj[j] = gordering[aj[j]]; 437 ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr); 438 } 439 PetscStackCall("ML_Operator_Getrow",ML_free(gordering)); 440 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 441 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 442 *newmat = A; 443 444 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 445 PetscFunctionReturn(0); 446 } 447 448 /* -------------------------------------------------------------------------- */ 449 /* 450 PCSetCoordinates_ML 451 452 Input Parameter: 453 . pc - the preconditioner context 454 */ 455 #undef __FUNCT__ 456 #define __FUNCT__ "PCSetCoordinates_ML" 457 static PetscErrorCode PCSetCoordinates_ML(PC pc, PetscInt ndm, PetscInt a_nloc, PetscReal *coords) 458 { 459 PC_MG *mg = (PC_MG*)pc->data; 460 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 461 PetscErrorCode ierr; 462 PetscInt arrsz,oldarrsz,bs,my0,kk,ii,nloc,Iend; 463 Mat Amat = pc->pmat; 464 465 /* this function copied and modified from PCSetCoordinates_GEO -TGI */ 466 PetscFunctionBegin; 467 PetscValidHeaderSpecific(Amat, MAT_CLASSID, 1); 468 ierr = MatGetBlockSize(Amat, &bs);CHKERRQ(ierr); 469 470 ierr = MatGetOwnershipRange(Amat, &my0, &Iend);CHKERRQ(ierr); 471 nloc = (Iend-my0)/bs; 472 473 if (nloc!=a_nloc) SETERRQ2(PetscObjectComm((PetscObject)Amat),PETSC_ERR_ARG_WRONG, "Number of local blocks must locations = %d %d.",a_nloc,nloc); 474 if ((Iend-my0)%bs!=0) SETERRQ1(PetscObjectComm((PetscObject)Amat),PETSC_ERR_ARG_WRONG, "Bad local size %d.",nloc); 475 476 oldarrsz = pc_ml->dim * pc_ml->nloc; 477 pc_ml->dim = ndm; 478 pc_ml->nloc = a_nloc; 479 arrsz = ndm * a_nloc; 480 481 /* create data - syntactic sugar that should be refactored at some point */ 482 if (pc_ml->coords==0 || (oldarrsz != arrsz)) { 483 ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr); 484 ierr = PetscMalloc1((arrsz), &pc_ml->coords);CHKERRQ(ierr); 485 } 486 for (kk=0; kk<arrsz; kk++) pc_ml->coords[kk] = -999.; 487 /* copy data in - column oriented */ 488 for (kk = 0; kk < nloc; kk++) { 489 for (ii = 0; ii < ndm; ii++) { 490 pc_ml->coords[ii*nloc + kk] = coords[kk*ndm + ii]; 491 } 492 } 493 PetscFunctionReturn(0); 494 } 495 496 /* -----------------------------------------------------------------------------*/ 497 #undef __FUNCT__ 498 #define __FUNCT__ "PCReset_ML" 499 PetscErrorCode PCReset_ML(PC pc) 500 { 501 PetscErrorCode ierr; 502 PC_MG *mg = (PC_MG*)pc->data; 503 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 504 PetscInt level,fine_level=pc_ml->Nlevels-1,dim=pc_ml->dim; 505 506 PetscFunctionBegin; 507 if (dim) { 508 ML_Aggregate_Viz_Stats * grid_info = (ML_Aggregate_Viz_Stats*) pc_ml->ml_object->Grid[0].Grid; 509 510 for (level=0; level<=fine_level; level++) { 511 ierr = VecDestroy(&pc_ml->gridctx[level].coords);CHKERRQ(ierr); 512 } 513 514 grid_info->x = 0; /* do this so ML doesn't try to free coordinates */ 515 grid_info->y = 0; 516 grid_info->z = 0; 517 518 PetscStackCall("ML_Operator_Getrow",ML_Aggregate_VizAndStats_Clean(pc_ml->ml_object)); 519 } 520 PetscStackCall("ML_Aggregate_Destroy",ML_Aggregate_Destroy(&pc_ml->agg_object)); 521 PetscStackCall("ML_Aggregate_Destroy",ML_Destroy(&pc_ml->ml_object)); 522 523 if (pc_ml->PetscMLdata) { 524 ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr); 525 ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr); 526 ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr); 527 ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr); 528 } 529 ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr); 530 531 if (pc_ml->gridctx) { 532 for (level=0; level<fine_level; level++) { 533 if (pc_ml->gridctx[level].A) {ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);} 534 if (pc_ml->gridctx[level].P) {ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);} 535 if (pc_ml->gridctx[level].R) {ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);} 536 if (pc_ml->gridctx[level].x) {ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);} 537 if (pc_ml->gridctx[level].b) {ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);} 538 if (pc_ml->gridctx[level+1].r) {ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);} 539 } 540 } 541 ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr); 542 ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr); 543 544 pc_ml->dim = 0; 545 pc_ml->nloc = 0; 546 PetscFunctionReturn(0); 547 } 548 /* -------------------------------------------------------------------------- */ 549 /* 550 PCSetUp_ML - Prepares for the use of the ML preconditioner 551 by setting data structures and options. 552 553 Input Parameter: 554 . pc - the preconditioner context 555 556 Application Interface Routine: PCSetUp() 557 558 Notes: 559 The interface routine PCSetUp() is not usually called directly by 560 the user, but instead is called by PCApply() if necessary. 561 */ 562 extern PetscErrorCode PCSetFromOptions_MG(PC); 563 extern PetscErrorCode PCReset_MG(PC); 564 565 #undef __FUNCT__ 566 #define __FUNCT__ "PCSetUp_ML" 567 PetscErrorCode PCSetUp_ML(PC pc) 568 { 569 PetscErrorCode ierr; 570 PetscMPIInt size; 571 FineGridCtx *PetscMLdata; 572 ML *ml_object; 573 ML_Aggregate *agg_object; 574 ML_Operator *mlmat; 575 PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs; 576 Mat A,Aloc; 577 GridCtx *gridctx; 578 PC_MG *mg = (PC_MG*)pc->data; 579 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 580 PetscBool isSeq, isMPI; 581 KSP smoother; 582 PC subpc; 583 PetscInt mesh_level, old_mesh_level; 584 MatInfo info; 585 static PetscBool cite = PETSC_FALSE; 586 587 PetscFunctionBegin; 588 ierr = PetscCitationsRegister("@TechReport{ml_users_guide,\n author = {M. Sala and J.J. Hu and R.S. Tuminaro},\n title = {{ML}3.1 {S}moothed {A}ggregation {U}ser's {G}uide},\n institution = {Sandia National Laboratories},\n number = {SAND2004-4821},\n year = 2004\n}\n",&cite);CHKERRQ(ierr); 589 A = pc->pmat; 590 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr); 591 592 if (pc->setupcalled) { 593 if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) { 594 /* 595 Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for 596 multiple solves in which the matrix is not changing too quickly. 597 */ 598 ml_object = pc_ml->ml_object; 599 gridctx = pc_ml->gridctx; 600 Nlevels = pc_ml->Nlevels; 601 fine_level = Nlevels - 1; 602 gridctx[fine_level].A = A; 603 604 ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); 605 ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); 606 if (isMPI) { 607 ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); 608 } else if (isSeq) { 609 Aloc = A; 610 ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr); 611 } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name); 612 613 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 614 PetscMLdata = pc_ml->PetscMLdata; 615 ierr = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr); 616 PetscMLdata->A = A; 617 PetscMLdata->Aloc = Aloc; 618 PetscStackCall("ML_Aggregate_Destroy",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata)); 619 PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec)); 620 621 mesh_level = ml_object->ML_finest_level; 622 while (ml_object->SingleLevel[mesh_level].Rmat->to) { 623 old_mesh_level = mesh_level; 624 mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum; 625 626 /* clean and regenerate A */ 627 mlmat = &(ml_object->Amat[mesh_level]); 628 PetscStackCall("ML_Operator_Clean",ML_Operator_Clean(mlmat)); 629 PetscStackCall("ML_Operator_Init",ML_Operator_Init(mlmat,ml_object->comm)); 630 PetscStackCall("ML_Gen_AmatrixRAP",ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level)); 631 } 632 633 level = fine_level - 1; 634 if (size == 1) { /* convert ML P, R and A into seqaij format */ 635 for (mllevel=1; mllevel<Nlevels; mllevel++) { 636 mlmat = &(ml_object->Amat[mllevel]); 637 ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 638 level--; 639 } 640 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 641 for (mllevel=1; mllevel<Nlevels; mllevel++) { 642 mlmat = &(ml_object->Amat[mllevel]); 643 ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 644 level--; 645 } 646 } 647 648 for (level=0; level<fine_level; level++) { 649 if (level > 0) { 650 ierr = PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);CHKERRQ(ierr); 651 } 652 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr); 653 } 654 ierr = PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);CHKERRQ(ierr); 655 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr); 656 657 ierr = PCSetUp_MG(pc);CHKERRQ(ierr); 658 PetscFunctionReturn(0); 659 } else { 660 /* since ML can change the size of vectors/matrices at any level we must destroy everything */ 661 ierr = PCReset_ML(pc);CHKERRQ(ierr); 662 ierr = PCReset_MG(pc);CHKERRQ(ierr); 663 } 664 } 665 666 /* setup special features of PCML */ 667 /*--------------------------------*/ 668 /* covert A to Aloc to be used by ML at fine grid */ 669 pc_ml->size = size; 670 ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); 671 ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); 672 if (isMPI) { 673 ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); 674 } else if (isSeq) { 675 Aloc = A; 676 ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr); 677 } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name); 678 679 /* create and initialize struct 'PetscMLdata' */ 680 ierr = PetscNewLog(pc,&PetscMLdata);CHKERRQ(ierr); 681 pc_ml->PetscMLdata = PetscMLdata; 682 ierr = PetscMalloc1((Aloc->cmap->n+1),&PetscMLdata->pwork);CHKERRQ(ierr); 683 684 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr); 685 ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr); 686 ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); 687 688 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); 689 ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 690 ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr); 691 PetscMLdata->A = A; 692 PetscMLdata->Aloc = Aloc; 693 if (pc_ml->dim) { /* create vecs around the coordinate data given */ 694 PetscInt i,j,dim=pc_ml->dim; 695 PetscInt nloc = pc_ml->nloc,nlocghost; 696 PetscReal *ghostedcoords; 697 698 ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); 699 nlocghost = Aloc->cmap->n / bs; 700 ierr = PetscMalloc1(dim*nlocghost,&ghostedcoords);CHKERRQ(ierr); 701 for (i = 0; i < dim; i++) { 702 /* copy coordinate values into first component of pwork */ 703 for (j = 0; j < nloc; j++) { 704 PetscMLdata->pwork[bs * j] = pc_ml->coords[nloc * i + j]; 705 } 706 /* get the ghost values */ 707 ierr = PetscML_comm(PetscMLdata->pwork,PetscMLdata);CHKERRQ(ierr); 708 /* write into the vector */ 709 for (j = 0; j < nlocghost; j++) { 710 ghostedcoords[i * nlocghost + j] = PetscMLdata->pwork[bs * j]; 711 } 712 } 713 /* replace the original coords with the ghosted coords, because these are 714 * what ML needs */ 715 ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr); 716 pc_ml->coords = ghostedcoords; 717 } 718 719 /* create ML discretization matrix at fine grid */ 720 /* ML requires input of fine-grid matrix. It determines nlevels. */ 721 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 722 ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); 723 PetscStackCall("ML_Create",ML_Create(&ml_object,pc_ml->MaxNlevels)); 724 PetscStackCall("ML_Comm_Set_UsrComm",ML_Comm_Set_UsrComm(ml_object->comm,PetscObjectComm((PetscObject)A))); 725 pc_ml->ml_object = ml_object; 726 PetscStackCall("ML_Init_Amatrix",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata)); 727 PetscStackCall("ML_Set_Amatrix_Getrow",ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols)); 728 PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec)); 729 730 PetscStackCall("ML_Set_Symmetrize",ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO)); 731 732 /* aggregation */ 733 PetscStackCall("ML_Aggregate_Create",ML_Aggregate_Create(&agg_object)); 734 pc_ml->agg_object = agg_object; 735 736 { 737 MatNullSpace mnull; 738 ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr); 739 if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) { 740 if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER; 741 else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK; 742 else pc_ml->nulltype = PCML_NULLSPACE_SCALAR; 743 } 744 switch (pc_ml->nulltype) { 745 case PCML_NULLSPACE_USER: { 746 PetscScalar *nullvec; 747 const PetscScalar *v; 748 PetscBool has_const; 749 PetscInt i,j,mlocal,nvec,M; 750 const Vec *vecs; 751 752 if (!mnull) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space"); 753 ierr = MatGetSize(A,&M,NULL);CHKERRQ(ierr); 754 ierr = MatGetLocalSize(Aloc,&mlocal,NULL);CHKERRQ(ierr); 755 ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr); 756 ierr = PetscMalloc1((nvec+!!has_const)*mlocal,&nullvec);CHKERRQ(ierr); 757 if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M; 758 for (i=0; i<nvec; i++) { 759 ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr); 760 for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j]; 761 ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr); 762 } 763 PetscStackCall("ML_Aggregate_Create",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr)); 764 ierr = PetscFree(nullvec);CHKERRQ(ierr); 765 } break; 766 case PCML_NULLSPACE_BLOCK: 767 PetscStackCall("ML_Aggregate_Set_NullSpace",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr)); 768 break; 769 case PCML_NULLSPACE_SCALAR: 770 break; 771 default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP,"Unknown null space type"); 772 } 773 } 774 PetscStackCall("ML_Aggregate_Set_MaxCoarseSize",ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize)); 775 /* set options */ 776 switch (pc_ml->CoarsenScheme) { 777 case 1: 778 PetscStackCall("ML_Aggregate_Set_CoarsenScheme_Coupled",ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object));break; 779 case 2: 780 PetscStackCall("ML_Aggregate_Set_CoarsenScheme_MIS",ML_Aggregate_Set_CoarsenScheme_MIS(agg_object));break; 781 case 3: 782 PetscStackCall("ML_Aggregate_Set_CoarsenScheme_METIS",ML_Aggregate_Set_CoarsenScheme_METIS(agg_object));break; 783 } 784 PetscStackCall("ML_Aggregate_Set_Threshold",ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold)); 785 PetscStackCall("ML_Aggregate_Set_DampingFactor",ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor)); 786 if (pc_ml->SpectralNormScheme_Anorm) { 787 PetscStackCall("ML_Set_SpectralNormScheme_Anorm",ML_Set_SpectralNormScheme_Anorm(ml_object)); 788 } 789 agg_object->keep_agg_information = (int)pc_ml->KeepAggInfo; 790 agg_object->keep_P_tentative = (int)pc_ml->Reusable; 791 agg_object->block_scaled_SA = (int)pc_ml->BlockScaling; 792 agg_object->minimizing_energy = (int)pc_ml->EnergyMinimization; 793 agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol; 794 agg_object->cheap_minimizing_energy = (int)pc_ml->EnergyMinimizationCheap; 795 796 if (pc_ml->Aux) { 797 if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Auxiliary matrix requires coordinates"); 798 ml_object->Amat[0].aux_data->threshold = pc_ml->AuxThreshold; 799 ml_object->Amat[0].aux_data->enable = 1; 800 ml_object->Amat[0].aux_data->max_level = 10; 801 ml_object->Amat[0].num_PDEs = bs; 802 } 803 804 ierr = MatGetInfo(A,MAT_LOCAL,&info);CHKERRQ(ierr); 805 ml_object->Amat[0].N_nonzeros = (int) info.nz_used; 806 807 if (pc_ml->dim) { 808 PetscInt i,dim = pc_ml->dim; 809 ML_Aggregate_Viz_Stats *grid_info; 810 PetscInt nlocghost; 811 812 ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); 813 nlocghost = Aloc->cmap->n / bs; 814 815 PetscStackCall("ML_Aggregate_VizAndStats_Setup(",ML_Aggregate_VizAndStats_Setup(ml_object)); /* create ml info for coords */ 816 grid_info = (ML_Aggregate_Viz_Stats*) ml_object->Grid[0].Grid; 817 for (i = 0; i < dim; i++) { 818 /* set the finest level coordinates to point to the column-order array 819 * in pc_ml */ 820 /* NOTE: must point away before VizAndStats_Clean so ML doesn't free */ 821 switch (i) { 822 case 0: grid_info->x = pc_ml->coords + nlocghost * i; break; 823 case 1: grid_info->y = pc_ml->coords + nlocghost * i; break; 824 case 2: grid_info->z = pc_ml->coords + nlocghost * i; break; 825 default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3"); 826 } 827 } 828 grid_info->Ndim = dim; 829 } 830 831 /* repartitioning */ 832 if (pc_ml->Repartition) { 833 PetscStackCall("ML_Repartition_Activate",ML_Repartition_Activate(ml_object)); 834 PetscStackCall("ML_Repartition_Set_LargestMinMaxRatio",ML_Repartition_Set_LargestMinMaxRatio(ml_object,pc_ml->MaxMinRatio)); 835 PetscStackCall("ML_Repartition_Set_MinPerProc",ML_Repartition_Set_MinPerProc(ml_object,pc_ml->MinPerProc)); 836 PetscStackCall("ML_Repartition_Set_PutOnSingleProc",ML_Repartition_Set_PutOnSingleProc(ml_object,pc_ml->PutOnSingleProc)); 837 #if 0 /* Function not yet defined in ml-6.2 */ 838 /* I'm not sure what compatibility issues might crop up if we partitioned 839 * on the finest level, so to be safe repartition starts on the next 840 * finest level (reflection default behavior in 841 * ml_MultiLevelPreconditioner) */ 842 PetscStackCall("ML_Repartition_Set_StartLevel",ML_Repartition_Set_StartLevel(ml_object,1)); 843 #endif 844 845 if (!pc_ml->RepartitionType) { 846 PetscInt i; 847 848 if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"ML Zoltan repartitioning requires coordinates"); 849 PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEZOLTAN)); 850 PetscStackCall("ML_Aggregate_Set_Dimensions",ML_Aggregate_Set_Dimensions(agg_object, pc_ml->dim)); 851 852 for (i = 0; i < ml_object->ML_num_levels; i++) { 853 ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Grid[i].Grid; 854 grid_info->zoltan_type = pc_ml->ZoltanScheme + 1; /* ml numbers options 1, 2, 3 */ 855 /* defaults from ml_agg_info.c */ 856 grid_info->zoltan_estimated_its = 40; /* only relevant to hypergraph / fast hypergraph */ 857 grid_info->zoltan_timers = 0; 858 grid_info->smoothing_steps = 4; /* only relevant to hypergraph / fast hypergraph */ 859 } 860 } else { 861 PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEPARMETIS)); 862 } 863 } 864 865 if (pc_ml->OldHierarchy) { 866 PetscStackCall("ML_Gen_MGHierarchy_UsingAggregation",Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object)); 867 } else { 868 PetscStackCall("ML_Gen_MultiLevelHierarchy_UsingAggregation",Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object)); 869 } 870 if (Nlevels<=0) SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels); 871 pc_ml->Nlevels = Nlevels; 872 fine_level = Nlevels - 1; 873 874 ierr = PCMGSetLevels(pc,Nlevels,NULL);CHKERRQ(ierr); 875 /* set default smoothers */ 876 for (level=1; level<=fine_level; level++) { 877 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 878 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 879 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 880 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 881 } 882 ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr); 883 ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */ 884 ierr = PetscOptionsEnd();CHKERRQ(ierr); 885 886 ierr = PetscMalloc1(Nlevels,&gridctx);CHKERRQ(ierr); 887 888 pc_ml->gridctx = gridctx; 889 890 /* wrap ML matrices by PETSc shell matrices at coarsened grids. 891 Level 0 is the finest grid for ML, but coarsest for PETSc! */ 892 gridctx[fine_level].A = A; 893 894 level = fine_level - 1; 895 if (size == 1) { /* convert ML P, R and A into seqaij format */ 896 for (mllevel=1; mllevel<Nlevels; mllevel++) { 897 mlmat = &(ml_object->Pmat[mllevel]); 898 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 899 mlmat = &(ml_object->Rmat[mllevel-1]); 900 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 901 902 mlmat = &(ml_object->Amat[mllevel]); 903 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 904 level--; 905 } 906 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 907 for (mllevel=1; mllevel<Nlevels; mllevel++) { 908 mlmat = &(ml_object->Pmat[mllevel]); 909 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 910 mlmat = &(ml_object->Rmat[mllevel-1]); 911 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 912 913 mlmat = &(ml_object->Amat[mllevel]); 914 ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 915 level--; 916 } 917 } 918 919 /* create vectors and ksp at all levels */ 920 for (level=0; level<fine_level; level++) { 921 level1 = level + 1; 922 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr); 923 ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr); 924 ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); 925 ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); 926 927 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr); 928 ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 929 ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); 930 ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); 931 932 ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr); 933 ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 934 ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr); 935 ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr); 936 937 if (level == 0) { 938 ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr); 939 } else { 940 ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr); 941 } 942 } 943 ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr); 944 945 /* create coarse level and the interpolation between the levels */ 946 for (level=0; level<fine_level; level++) { 947 level1 = level + 1; 948 ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr); 949 ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr); 950 if (level > 0) { 951 ierr = PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);CHKERRQ(ierr); 952 } 953 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 954 } 955 ierr = PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);CHKERRQ(ierr); 956 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 957 958 /* put coordinate info in levels */ 959 if (pc_ml->dim) { 960 PetscInt i,j,dim = pc_ml->dim; 961 PetscInt bs, nloc; 962 PC subpc; 963 PetscReal *array; 964 965 level = fine_level; 966 for (mllevel = 0; mllevel < Nlevels; mllevel++) { 967 ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Amat[mllevel].to->Grid->Grid; 968 MPI_Comm comm = ((PetscObject)gridctx[level].A)->comm; 969 970 ierr = MatGetBlockSize (gridctx[level].A, &bs);CHKERRQ(ierr); 971 ierr = MatGetLocalSize (gridctx[level].A, NULL, &nloc);CHKERRQ(ierr); 972 nloc /= bs; /* number of local nodes */ 973 974 ierr = VecCreate(comm,&gridctx[level].coords);CHKERRQ(ierr); 975 ierr = VecSetSizes(gridctx[level].coords,dim * nloc,PETSC_DECIDE);CHKERRQ(ierr); 976 ierr = VecSetType(gridctx[level].coords,VECMPI);CHKERRQ(ierr); 977 ierr = VecGetArray(gridctx[level].coords,&array);CHKERRQ(ierr); 978 for (j = 0; j < nloc; j++) { 979 for (i = 0; i < dim; i++) { 980 switch (i) { 981 case 0: array[dim * j + i] = grid_info->x[j]; break; 982 case 1: array[dim * j + i] = grid_info->y[j]; break; 983 case 2: array[dim * j + i] = grid_info->z[j]; break; 984 default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3"); 985 } 986 } 987 } 988 989 /* passing coordinates to smoothers/coarse solver, should they need them */ 990 ierr = KSPGetPC(gridctx[level].ksp,&subpc);CHKERRQ(ierr); 991 ierr = PCSetCoordinates(subpc,dim,nloc,array);CHKERRQ(ierr); 992 ierr = VecRestoreArray(gridctx[level].coords,&array);CHKERRQ(ierr); 993 level--; 994 } 995 } 996 997 /* setupcalled is set to 0 so that MG is setup from scratch */ 998 pc->setupcalled = 0; 999 ierr = PCSetUp_MG(pc);CHKERRQ(ierr); 1000 PetscFunctionReturn(0); 1001 } 1002 1003 /* -------------------------------------------------------------------------- */ 1004 /* 1005 PCDestroy_ML - Destroys the private context for the ML preconditioner 1006 that was created with PCCreate_ML(). 1007 1008 Input Parameter: 1009 . pc - the preconditioner context 1010 1011 Application Interface Routine: PCDestroy() 1012 */ 1013 #undef __FUNCT__ 1014 #define __FUNCT__ "PCDestroy_ML" 1015 PetscErrorCode PCDestroy_ML(PC pc) 1016 { 1017 PetscErrorCode ierr; 1018 PC_MG *mg = (PC_MG*)pc->data; 1019 PC_ML *pc_ml= (PC_ML*)mg->innerctx; 1020 1021 PetscFunctionBegin; 1022 ierr = PCReset_ML(pc);CHKERRQ(ierr); 1023 ierr = PetscFree(pc_ml);CHKERRQ(ierr); 1024 ierr = PCDestroy_MG(pc);CHKERRQ(ierr); 1025 ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",NULL);CHKERRQ(ierr); 1026 PetscFunctionReturn(0); 1027 } 1028 1029 #undef __FUNCT__ 1030 #define __FUNCT__ "PCSetFromOptions_ML" 1031 PetscErrorCode PCSetFromOptions_ML(PC pc) 1032 { 1033 PetscErrorCode ierr; 1034 PetscInt indx,PrintLevel,partindx; 1035 const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"}; 1036 const char *part[] = {"Zoltan","ParMETIS"}; 1037 #if defined(HAVE_ML_ZOLTAN) 1038 PetscInt zidx; 1039 const char *zscheme[] = {"RCB","hypergraph","fast_hypergraph"}; 1040 #endif 1041 PC_MG *mg = (PC_MG*)pc->data; 1042 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 1043 PetscMPIInt size; 1044 MPI_Comm comm; 1045 1046 PetscFunctionBegin; 1047 ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr); 1048 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1049 ierr = PetscOptionsHead("ML options");CHKERRQ(ierr); 1050 1051 PrintLevel = 0; 1052 indx = 0; 1053 partindx = 0; 1054 1055 ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,NULL);CHKERRQ(ierr); 1056 PetscStackCall("ML_Set_PrintLeve",ML_Set_PrintLevel(PrintLevel)); 1057 ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,NULL);CHKERRQ(ierr); 1058 ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,NULL);CHKERRQ(ierr); 1059 ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,NULL);CHKERRQ(ierr); 1060 1061 pc_ml->CoarsenScheme = indx; 1062 1063 ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,NULL);CHKERRQ(ierr); 1064 ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,NULL);CHKERRQ(ierr); 1065 ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,NULL);CHKERRQ(ierr); 1066 ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,NULL);CHKERRQ(ierr); 1067 ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,NULL);CHKERRQ(ierr); 1068 ierr = PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,NULL);CHKERRQ(ierr); 1069 ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,NULL);CHKERRQ(ierr); 1070 ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,NULL);CHKERRQ(ierr); 1071 /* 1072 The following checks a number of conditions. If we let this stuff slip by, then ML's error handling will take over. 1073 This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions. 1074 1075 We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working 1076 combination of options and ML's exit(1) explanations don't help matters. 1077 */ 1078 if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4"); 1079 if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel"); 1080 if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);} 1081 if (pc_ml->EnergyMinimization) { 1082 ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,NULL);CHKERRQ(ierr); 1083 } 1084 if (pc_ml->EnergyMinimization == 2) { 1085 /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */ 1086 ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,NULL);CHKERRQ(ierr); 1087 } 1088 /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */ 1089 if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE; 1090 ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,NULL);CHKERRQ(ierr); 1091 /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */ 1092 if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE; 1093 ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,NULL);CHKERRQ(ierr); 1094 /* 1095 ML's C API is severely underdocumented and lacks significant functionality. The C++ API calls 1096 ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function 1097 ML_Gen_MGHierarchy_UsingAggregation(). This modification, however, does not provide a strict superset of the 1098 functionality in the old function, so some users may still want to use it. Note that many options are ignored in 1099 this context, but ML doesn't provide a way to find out which ones. 1100 */ 1101 ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,NULL);CHKERRQ(ierr); 1102 ierr = PetscOptionsBool("-pc_ml_repartition", "Allow ML to repartition levels of the heirarchy","ML_Repartition_Activate",pc_ml->Repartition,&pc_ml->Repartition,NULL);CHKERRQ(ierr); 1103 if (pc_ml->Repartition) { 1104 ierr = PetscOptionsReal("-pc_ml_repartitionMaxMinRatio", "Acceptable ratio of repartitioned sizes","ML_Repartition_Set_LargestMinMaxRatio",pc_ml->MaxMinRatio,&pc_ml->MaxMinRatio,NULL);CHKERRQ(ierr); 1105 ierr = PetscOptionsInt("-pc_ml_repartitionMinPerProc", "Smallest repartitioned size","ML_Repartition_Set_MinPerProc",pc_ml->MinPerProc,&pc_ml->MinPerProc,NULL);CHKERRQ(ierr); 1106 ierr = PetscOptionsInt("-pc_ml_repartitionPutOnSingleProc", "Problem size automatically repartitioned to one processor","ML_Repartition_Set_PutOnSingleProc",pc_ml->PutOnSingleProc,&pc_ml->PutOnSingleProc,NULL);CHKERRQ(ierr); 1107 #if defined(HAVE_ML_ZOLTAN) 1108 partindx = 0; 1109 ierr = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[0],&partindx,NULL);CHKERRQ(ierr); 1110 1111 pc_ml->RepartitionType = partindx; 1112 if (!partindx) { 1113 PetscInt zindx = 0; 1114 1115 ierr = PetscOptionsEList("-pc_ml_repartitionZoltanScheme", "Repartitioning scheme to use","None",zscheme,3,zscheme[0],&zindx,NULL);CHKERRQ(ierr); 1116 1117 pc_ml->ZoltanScheme = zindx; 1118 } 1119 #else 1120 partindx = 1; 1121 ierr = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[1],&partindx,NULL);CHKERRQ(ierr); 1122 if (!partindx) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP_SYS,"ML not compiled with Zoltan"); 1123 #endif 1124 ierr = PetscOptionsBool("-pc_ml_Aux","Aggregate using auxiliary coordinate-based laplacian","None",pc_ml->Aux,&pc_ml->Aux,NULL);CHKERRQ(ierr); 1125 ierr = PetscOptionsReal("-pc_ml_AuxThreshold","Auxiliary smoother drop tol","None",pc_ml->AuxThreshold,&pc_ml->AuxThreshold,NULL);CHKERRQ(ierr); 1126 } 1127 ierr = PetscOptionsTail();CHKERRQ(ierr); 1128 PetscFunctionReturn(0); 1129 } 1130 1131 /* -------------------------------------------------------------------------- */ 1132 /* 1133 PCCreate_ML - Creates a ML preconditioner context, PC_ML, 1134 and sets this as the private data within the generic preconditioning 1135 context, PC, that was created within PCCreate(). 1136 1137 Input Parameter: 1138 . pc - the preconditioner context 1139 1140 Application Interface Routine: PCCreate() 1141 */ 1142 1143 /*MC 1144 PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide 1145 fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 1146 operators are computed by ML, with the matrices coverted to PETSc matrices in aij format 1147 and the restriction/interpolation operators wrapped as PETSc shell matrices. 1148 1149 Options Database Key: 1150 Multigrid options(inherited) 1151 + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles) 1152 . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp) 1153 . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown) 1154 -pc_mg_type <multiplicative>: (one of) additive multiplicative full kascade 1155 ML options: 1156 . -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel) 1157 . -pc_ml_maxNlevels <10>: Maximum number of levels (None) 1158 . -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize) 1159 . -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS 1160 . -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor) 1161 . -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold) 1162 . -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm) 1163 . -pc_ml_repartition <false>: Allow ML to repartition levels of the heirarchy (ML_Repartition_Activate) 1164 . -pc_ml_repartitionMaxMinRatio <1.3>: Acceptable ratio of repartitioned sizes (ML_Repartition_Set_LargestMinMaxRatio) 1165 . -pc_ml_repartitionMinPerProc <512>: Smallest repartitioned size (ML_Repartition_Set_MinPerProc) 1166 . -pc_ml_repartitionPutOnSingleProc <5000>: Problem size automatically repartitioned to one processor (ML_Repartition_Set_PutOnSingleProc) 1167 . -pc_ml_repartitionType <Zoltan>: Repartitioning library to use (ML_Repartition_Set_Partitioner) 1168 . -pc_ml_repartitionZoltanScheme <RCB>: Repartitioning scheme to use (None) 1169 . -pc_ml_Aux <false>: Aggregate using auxiliary coordinate-based laplacian (None) 1170 - -pc_ml_AuxThreshold <0.0>: Auxiliary smoother drop tol (None) 1171 1172 Level: intermediate 1173 1174 Concepts: multigrid 1175 1176 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 1177 PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(), 1178 PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(), 1179 PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(), 1180 PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR() 1181 M*/ 1182 1183 #undef __FUNCT__ 1184 #define __FUNCT__ "PCCreate_ML" 1185 PETSC_EXTERN PetscErrorCode PCCreate_ML(PC pc) 1186 { 1187 PetscErrorCode ierr; 1188 PC_ML *pc_ml; 1189 PC_MG *mg; 1190 1191 PetscFunctionBegin; 1192 /* PCML is an inherited class of PCMG. Initialize pc as PCMG */ 1193 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 1194 ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr); 1195 /* Since PCMG tries to use DM assocated with PC must delete it */ 1196 ierr = DMDestroy(&pc->dm);CHKERRQ(ierr); 1197 mg = (PC_MG*)pc->data; 1198 mg->galerkin = 2; /* Use Galerkin, but it is computed externally */ 1199 1200 /* create a supporting struct and attach it to pc */ 1201 ierr = PetscNewLog(pc,&pc_ml);CHKERRQ(ierr); 1202 mg->innerctx = pc_ml; 1203 1204 pc_ml->ml_object = 0; 1205 pc_ml->agg_object = 0; 1206 pc_ml->gridctx = 0; 1207 pc_ml->PetscMLdata = 0; 1208 pc_ml->Nlevels = -1; 1209 pc_ml->MaxNlevels = 10; 1210 pc_ml->MaxCoarseSize = 1; 1211 pc_ml->CoarsenScheme = 1; 1212 pc_ml->Threshold = 0.0; 1213 pc_ml->DampingFactor = 4.0/3.0; 1214 pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE; 1215 pc_ml->size = 0; 1216 pc_ml->dim = 0; 1217 pc_ml->nloc = 0; 1218 pc_ml->coords = 0; 1219 pc_ml->Repartition = PETSC_FALSE; 1220 pc_ml->MaxMinRatio = 1.3; 1221 pc_ml->MinPerProc = 512; 1222 pc_ml->PutOnSingleProc = 5000; 1223 pc_ml->RepartitionType = 0; 1224 pc_ml->ZoltanScheme = 0; 1225 pc_ml->Aux = PETSC_FALSE; 1226 pc_ml->AuxThreshold = 0.0; 1227 1228 /* allow for coordinates to be passed */ 1229 ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",PCSetCoordinates_ML);CHKERRQ(ierr); 1230 1231 /* overwrite the pointers of PCMG by the functions of PCML */ 1232 pc->ops->setfromoptions = PCSetFromOptions_ML; 1233 pc->ops->setup = PCSetUp_ML; 1234 pc->ops->reset = PCReset_ML; 1235 pc->ops->destroy = PCDestroy_ML; 1236 PetscFunctionReturn(0); 1237 } 1238