1 2 /* 3 Provides an interface to the ML smoothed Aggregation 4 Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs 5 Jed Brown, see [PETSC #18321, #18449]. 6 */ 7 #include <petsc-private/pcimpl.h> /*I "petscpc.h" I*/ 8 #include <../src/ksp/pc/impls/mg/mgimpl.h> /*I "petscksp.h" I*/ 9 #include <../src/mat/impls/aij/seq/aij.h> 10 #include <../src/mat/impls/aij/mpi/mpiaij.h> 11 #include <petscdm.h> /* for DMDestroy(&pc->mg) hack */ 12 13 EXTERN_C_BEGIN 14 /* HAVE_CONFIG_H flag is required by ML include files */ 15 #if !defined(HAVE_CONFIG_H) 16 #define HAVE_CONFIG_H 17 #endif 18 #include <ml_include.h> 19 #include <ml_viz_stats.h> 20 EXTERN_C_END 21 22 typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType; 23 static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0}; 24 25 /* The context (data structure) at each grid level */ 26 typedef struct { 27 Vec x,b,r; /* global vectors */ 28 Mat A,P,R; 29 KSP ksp; 30 Vec coords; /* projected by ML, if PCSetCoordinates is called; values packed by node */ 31 } GridCtx; 32 33 /* The context used to input PETSc matrix into ML at fine grid */ 34 typedef struct { 35 Mat A; /* Petsc matrix in aij format */ 36 Mat Aloc; /* local portion of A to be used by ML */ 37 Vec x,y; 38 ML_Operator *mlmat; 39 PetscScalar *pwork; /* tmp array used by PetscML_comm() */ 40 } FineGridCtx; 41 42 /* The context associates a ML matrix with a PETSc shell matrix */ 43 typedef struct { 44 Mat A; /* PETSc shell matrix associated with mlmat */ 45 ML_Operator *mlmat; /* ML matrix assorciated with A */ 46 Vec y, work; 47 } Mat_MLShell; 48 49 /* Private context for the ML preconditioner */ 50 typedef struct { 51 ML *ml_object; 52 ML_Aggregate *agg_object; 53 GridCtx *gridctx; 54 FineGridCtx *PetscMLdata; 55 PetscInt Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization,MinPerProc,PutOnSingleProc,RepartitionType,ZoltanScheme; 56 PetscReal Threshold,DampingFactor,EnergyMinimizationDropTol,MaxMinRatio,AuxThreshold; 57 PetscBool SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable,Repartition,Aux; 58 PetscBool reuse_interpolation; 59 PCMLNullSpaceType nulltype; 60 PetscMPIInt size; /* size of communicator for pc->pmat */ 61 PetscInt dim; /* data from PCSetCoordinates(_ML) */ 62 PetscInt nloc; 63 PetscReal *coords; /* ML has a grid object for each level: the finest grid will point into coords */ 64 } PC_ML; 65 66 #undef __FUNCT__ 67 #define __FUNCT__ "PetscML_getrow" 68 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[]) 69 { 70 PetscErrorCode ierr; 71 PetscInt m,i,j,k=0,row,*aj; 72 PetscScalar *aa; 73 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data); 74 Mat_SeqAIJ *a = (Mat_SeqAIJ*)ml->Aloc->data; 75 76 ierr = MatGetSize(ml->Aloc,&m,NULL); if (ierr) return(0); 77 for (i = 0; i<N_requested_rows; i++) { 78 row = requested_rows[i]; 79 row_lengths[i] = a->ilen[row]; 80 if (allocated_space < k+row_lengths[i]) return(0); 81 if ((row >= 0) || (row <= (m-1))) { 82 aj = a->j + a->i[row]; 83 aa = a->a + a->i[row]; 84 for (j=0; j<row_lengths[i]; j++) { 85 columns[k] = aj[j]; 86 values[k++] = aa[j]; 87 } 88 } 89 } 90 return(1); 91 } 92 93 #undef __FUNCT__ 94 #define __FUNCT__ "PetscML_comm" 95 static PetscErrorCode PetscML_comm(double p[],void *ML_data) 96 { 97 PetscErrorCode ierr; 98 FineGridCtx *ml = (FineGridCtx*)ML_data; 99 Mat A = ml->A; 100 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 101 PetscMPIInt size; 102 PetscInt i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n; 103 PetscScalar *array; 104 105 PetscFunctionBegin; 106 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr); 107 if (size == 1) return 0; 108 109 ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr); 110 ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 111 ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 112 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 113 ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr); 114 for (i=in_length; i<out_length; i++) p[i] = array[i-in_length]; 115 ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr); 116 PetscFunctionReturn(0); 117 } 118 119 #undef __FUNCT__ 120 #define __FUNCT__ "PetscML_matvec" 121 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[]) 122 { 123 PetscErrorCode ierr; 124 FineGridCtx *ml = (FineGridCtx*)ML_Get_MyMatvecData(ML_data); 125 Mat A = ml->A, Aloc=ml->Aloc; 126 PetscMPIInt size; 127 PetscScalar *pwork=ml->pwork; 128 PetscInt i; 129 130 PetscFunctionBegin; 131 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr); 132 if (size == 1) { 133 ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr); 134 } else { 135 for (i=0; i<in_length; i++) pwork[i] = p[i]; 136 ierr = PetscML_comm(pwork,ml);CHKERRQ(ierr); 137 ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr); 138 } 139 ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr); 140 ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr); 141 ierr = VecResetArray(ml->x);CHKERRQ(ierr); 142 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 143 PetscFunctionReturn(0); 144 } 145 146 #undef __FUNCT__ 147 #define __FUNCT__ "MatMult_ML" 148 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y) 149 { 150 PetscErrorCode ierr; 151 Mat_MLShell *shell; 152 PetscScalar *xarray,*yarray; 153 PetscInt x_length,y_length; 154 155 PetscFunctionBegin; 156 ierr = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr); 157 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 158 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 159 x_length = shell->mlmat->invec_leng; 160 y_length = shell->mlmat->outvec_leng; 161 PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray)); 162 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 163 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 164 PetscFunctionReturn(0); 165 } 166 167 #undef __FUNCT__ 168 #define __FUNCT__ "MatMultAdd_ML" 169 /* Computes y = w + A * x 170 It is possible that w == y, but not x == y 171 */ 172 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y) 173 { 174 Mat_MLShell *shell; 175 PetscScalar *xarray,*yarray; 176 PetscInt x_length,y_length; 177 PetscErrorCode ierr; 178 179 PetscFunctionBegin; 180 ierr = MatShellGetContext(A, (void**) &shell);CHKERRQ(ierr); 181 if (y == w) { 182 if (!shell->work) { 183 ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr); 184 } 185 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 186 ierr = VecGetArray(shell->work, &yarray);CHKERRQ(ierr); 187 x_length = shell->mlmat->invec_leng; 188 y_length = shell->mlmat->outvec_leng; 189 PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray)); 190 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 191 ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr); 192 ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr); 193 } else { 194 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 195 ierr = VecGetArray(y, &yarray);CHKERRQ(ierr); 196 x_length = shell->mlmat->invec_leng; 197 y_length = shell->mlmat->outvec_leng; 198 PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray)); 199 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 200 ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr); 201 ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr); 202 } 203 PetscFunctionReturn(0); 204 } 205 206 /* newtype is ignored since only handles one case */ 207 #undef __FUNCT__ 208 #define __FUNCT__ "MatConvert_MPIAIJ_ML" 209 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc) 210 { 211 PetscErrorCode ierr; 212 Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data; 213 Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data; 214 PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j; 215 PetscScalar *aa=a->a,*ba=b->a,*ca; 216 PetscInt am =A->rmap->n,an=A->cmap->n,i,j,k; 217 PetscInt *ci,*cj,ncols; 218 219 PetscFunctionBegin; 220 if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an); 221 222 if (scall == MAT_INITIAL_MATRIX) { 223 ierr = PetscMalloc1((1+am),&ci);CHKERRQ(ierr); 224 ci[0] = 0; 225 for (i=0; i<am; i++) ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]); 226 ierr = PetscMalloc1((1+ci[am]),&cj);CHKERRQ(ierr); 227 ierr = PetscMalloc1((1+ci[am]),&ca);CHKERRQ(ierr); 228 229 k = 0; 230 for (i=0; i<am; i++) { 231 /* diagonal portion of A */ 232 ncols = ai[i+1] - ai[i]; 233 for (j=0; j<ncols; j++) { 234 cj[k] = *aj++; 235 ca[k++] = *aa++; 236 } 237 /* off-diagonal portion of A */ 238 ncols = bi[i+1] - bi[i]; 239 for (j=0; j<ncols; j++) { 240 cj[k] = an + (*bj); bj++; 241 ca[k++] = *ba++; 242 } 243 } 244 if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]); 245 246 /* put together the new matrix */ 247 an = mpimat->A->cmap->n+mpimat->B->cmap->n; 248 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr); 249 250 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 251 /* Since these are PETSc arrays, change flags to free them as necessary. */ 252 mat = (Mat_SeqAIJ*)(*Aloc)->data; 253 mat->free_a = PETSC_TRUE; 254 mat->free_ij = PETSC_TRUE; 255 256 mat->nonew = 0; 257 } else if (scall == MAT_REUSE_MATRIX) { 258 mat=(Mat_SeqAIJ*)(*Aloc)->data; 259 ci = mat->i; cj = mat->j; ca = mat->a; 260 for (i=0; i<am; i++) { 261 /* diagonal portion of A */ 262 ncols = ai[i+1] - ai[i]; 263 for (j=0; j<ncols; j++) *ca++ = *aa++; 264 /* off-diagonal portion of A */ 265 ncols = bi[i+1] - bi[i]; 266 for (j=0; j<ncols; j++) *ca++ = *ba++; 267 } 268 } else SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 269 PetscFunctionReturn(0); 270 } 271 272 extern PetscErrorCode MatDestroy_Shell(Mat); 273 #undef __FUNCT__ 274 #define __FUNCT__ "MatDestroy_ML" 275 static PetscErrorCode MatDestroy_ML(Mat A) 276 { 277 PetscErrorCode ierr; 278 Mat_MLShell *shell; 279 280 PetscFunctionBegin; 281 ierr = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr); 282 ierr = VecDestroy(&shell->y);CHKERRQ(ierr); 283 if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);} 284 ierr = PetscFree(shell);CHKERRQ(ierr); 285 ierr = MatDestroy_Shell(A);CHKERRQ(ierr); 286 ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr); 287 PetscFunctionReturn(0); 288 } 289 290 #undef __FUNCT__ 291 #define __FUNCT__ "MatWrapML_SeqAIJ" 292 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 293 { 294 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata*)mlmat->data; 295 PetscErrorCode ierr; 296 PetscInt m =mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = NULL,nz_max; 297 PetscInt *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i; 298 PetscScalar *ml_vals=matdata->values,*aa; 299 300 PetscFunctionBegin; 301 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 302 if (m != n) { /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */ 303 if (reuse) { 304 Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data; 305 aij->i = ml_rowptr; 306 aij->j = ml_cols; 307 aij->a = ml_vals; 308 } else { 309 /* sort ml_cols and ml_vals */ 310 ierr = PetscMalloc1((m+1),&nnz); 311 for (i=0; i<m; i++) nnz[i] = ml_rowptr[i+1] - ml_rowptr[i]; 312 aj = ml_cols; aa = ml_vals; 313 for (i=0; i<m; i++) { 314 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 315 aj += nnz[i]; aa += nnz[i]; 316 } 317 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr); 318 ierr = PetscFree(nnz);CHKERRQ(ierr); 319 } 320 PetscFunctionReturn(0); 321 } 322 323 nz_max = PetscMax(1,mlmat->max_nz_per_row); 324 ierr = PetscMalloc2(nz_max,&aa,nz_max,&aj);CHKERRQ(ierr); 325 if (!reuse) { 326 ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr); 327 ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 328 ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr); 329 /* keep track of block size for A matrices */ 330 ierr = MatSetBlockSize (*newmat, mlmat->num_PDEs);CHKERRQ(ierr); 331 332 ierr = PetscMalloc1(m,&nnz);CHKERRQ(ierr); 333 for (i=0; i<m; i++) { 334 PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i])); 335 } 336 ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr); 337 } 338 for (i=0; i<m; i++) { 339 PetscInt ncols; 340 341 PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols)); 342 ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr); 343 } 344 ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 345 ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 346 347 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 348 ierr = PetscFree(nnz);CHKERRQ(ierr); 349 PetscFunctionReturn(0); 350 } 351 352 #undef __FUNCT__ 353 #define __FUNCT__ "MatWrapML_SHELL" 354 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 355 { 356 PetscErrorCode ierr; 357 PetscInt m,n; 358 ML_Comm *MLcomm; 359 Mat_MLShell *shellctx; 360 361 PetscFunctionBegin; 362 m = mlmat->outvec_leng; 363 n = mlmat->invec_leng; 364 365 if (reuse) { 366 ierr = MatShellGetContext(*newmat,(void**)&shellctx);CHKERRQ(ierr); 367 shellctx->mlmat = mlmat; 368 PetscFunctionReturn(0); 369 } 370 371 MLcomm = mlmat->comm; 372 373 ierr = PetscNew(&shellctx);CHKERRQ(ierr); 374 ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr); 375 ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr); 376 ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr); 377 378 shellctx->A = *newmat; 379 shellctx->mlmat = mlmat; 380 shellctx->work = NULL; 381 382 ierr = VecCreate(MLcomm->USR_comm,&shellctx->y);CHKERRQ(ierr); 383 ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr); 384 ierr = VecSetType(shellctx->y,VECSTANDARD);CHKERRQ(ierr); 385 386 (*newmat)->ops->destroy = MatDestroy_ML; 387 PetscFunctionReturn(0); 388 } 389 390 #undef __FUNCT__ 391 #define __FUNCT__ "MatWrapML_MPIAIJ" 392 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 393 { 394 PetscInt *aj; 395 PetscScalar *aa; 396 PetscErrorCode ierr; 397 PetscInt i,j,*gordering; 398 PetscInt m=mlmat->outvec_leng,n,nz_max,row; 399 Mat A; 400 401 PetscFunctionBegin; 402 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 403 n = mlmat->invec_leng; 404 if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n); 405 406 /* create global row numbering for a ML_Operator */ 407 PetscStackCall("ML_build_global_numbering",ML_build_global_numbering(mlmat,&gordering,"rows")); 408 409 nz_max = PetscMax(1,mlmat->max_nz_per_row) + 1; 410 ierr = PetscMalloc2(nz_max,&aa,nz_max,&aj);CHKERRQ(ierr); 411 if (reuse) { 412 A = *newmat; 413 } else { 414 PetscInt *nnzA,*nnzB,*nnz; 415 PetscInt rstart; 416 ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr); 417 ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 418 ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr); 419 /* keep track of block size for A matrices */ 420 ierr = MatSetBlockSize (A,mlmat->num_PDEs);CHKERRQ(ierr); 421 ierr = PetscMalloc3(m,&nnzA,m,&nnzB,m,&nnz);CHKERRQ(ierr); 422 ierr = MPI_Scan(&m,&rstart,1,MPIU_INT,MPIU_SUM,mlmat->comm->USR_comm);CHKERRQ(ierr); 423 rstart -= m; 424 425 for (i=0; i<m; i++) { 426 row = gordering[i] - rstart; 427 PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i])); 428 nnzA[row] = 0; 429 for (j=0; j<nnz[i]; j++) { 430 if (aj[j] < m) nnzA[row]++; 431 } 432 nnzB[row] = nnz[i] - nnzA[row]; 433 } 434 ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr); 435 ierr = PetscFree3(nnzA,nnzB,nnz); 436 } 437 for (i=0; i<m; i++) { 438 PetscInt ncols; 439 row = gordering[i]; 440 441 PetscStackCall(",ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols)); 442 for (j = 0; j < ncols; j++) aj[j] = gordering[aj[j]]; 443 ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr); 444 } 445 PetscStackCall("ML_free",ML_free(gordering)); 446 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 447 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 448 *newmat = A; 449 450 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 451 PetscFunctionReturn(0); 452 } 453 454 /* -------------------------------------------------------------------------- */ 455 /* 456 PCSetCoordinates_ML 457 458 Input Parameter: 459 . pc - the preconditioner context 460 */ 461 #undef __FUNCT__ 462 #define __FUNCT__ "PCSetCoordinates_ML" 463 static PetscErrorCode PCSetCoordinates_ML(PC pc, PetscInt ndm, PetscInt a_nloc, PetscReal *coords) 464 { 465 PC_MG *mg = (PC_MG*)pc->data; 466 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 467 PetscErrorCode ierr; 468 PetscInt arrsz,oldarrsz,bs,my0,kk,ii,nloc,Iend; 469 Mat Amat = pc->pmat; 470 471 /* this function copied and modified from PCSetCoordinates_GEO -TGI */ 472 PetscFunctionBegin; 473 PetscValidHeaderSpecific(Amat, MAT_CLASSID, 1); 474 ierr = MatGetBlockSize(Amat, &bs);CHKERRQ(ierr); 475 476 ierr = MatGetOwnershipRange(Amat, &my0, &Iend);CHKERRQ(ierr); 477 nloc = (Iend-my0)/bs; 478 479 if (nloc!=a_nloc) SETERRQ2(PetscObjectComm((PetscObject)Amat),PETSC_ERR_ARG_WRONG, "Number of local blocks must locations = %d %d.",a_nloc,nloc); 480 if ((Iend-my0)%bs!=0) SETERRQ1(PetscObjectComm((PetscObject)Amat),PETSC_ERR_ARG_WRONG, "Bad local size %d.",nloc); 481 482 oldarrsz = pc_ml->dim * pc_ml->nloc; 483 pc_ml->dim = ndm; 484 pc_ml->nloc = a_nloc; 485 arrsz = ndm * a_nloc; 486 487 /* create data - syntactic sugar that should be refactored at some point */ 488 if (pc_ml->coords==0 || (oldarrsz != arrsz)) { 489 ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr); 490 ierr = PetscMalloc1((arrsz), &pc_ml->coords);CHKERRQ(ierr); 491 } 492 for (kk=0; kk<arrsz; kk++) pc_ml->coords[kk] = -999.; 493 /* copy data in - column oriented */ 494 for (kk = 0; kk < nloc; kk++) { 495 for (ii = 0; ii < ndm; ii++) { 496 pc_ml->coords[ii*nloc + kk] = coords[kk*ndm + ii]; 497 } 498 } 499 PetscFunctionReturn(0); 500 } 501 502 /* -----------------------------------------------------------------------------*/ 503 #undef __FUNCT__ 504 #define __FUNCT__ "PCReset_ML" 505 PetscErrorCode PCReset_ML(PC pc) 506 { 507 PetscErrorCode ierr; 508 PC_MG *mg = (PC_MG*)pc->data; 509 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 510 PetscInt level,fine_level=pc_ml->Nlevels-1,dim=pc_ml->dim; 511 512 PetscFunctionBegin; 513 if (dim) { 514 ML_Aggregate_Viz_Stats * grid_info = (ML_Aggregate_Viz_Stats*) pc_ml->ml_object->Grid[0].Grid; 515 516 for (level=0; level<=fine_level; level++) { 517 ierr = VecDestroy(&pc_ml->gridctx[level].coords);CHKERRQ(ierr); 518 } 519 520 grid_info->x = 0; /* do this so ML doesn't try to free coordinates */ 521 grid_info->y = 0; 522 grid_info->z = 0; 523 524 PetscStackCall("ML_Operator_Getrow",ML_Aggregate_VizAndStats_Clean(pc_ml->ml_object)); 525 } 526 PetscStackCall("ML_Aggregate_Destroy",ML_Aggregate_Destroy(&pc_ml->agg_object)); 527 PetscStackCall("ML_Aggregate_Destroy",ML_Destroy(&pc_ml->ml_object)); 528 529 if (pc_ml->PetscMLdata) { 530 ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr); 531 ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr); 532 ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr); 533 ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr); 534 } 535 ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr); 536 537 if (pc_ml->gridctx) { 538 for (level=0; level<fine_level; level++) { 539 if (pc_ml->gridctx[level].A) {ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);} 540 if (pc_ml->gridctx[level].P) {ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);} 541 if (pc_ml->gridctx[level].R) {ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);} 542 if (pc_ml->gridctx[level].x) {ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);} 543 if (pc_ml->gridctx[level].b) {ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);} 544 if (pc_ml->gridctx[level+1].r) {ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);} 545 } 546 } 547 ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr); 548 ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr); 549 550 pc_ml->dim = 0; 551 pc_ml->nloc = 0; 552 PetscFunctionReturn(0); 553 } 554 /* -------------------------------------------------------------------------- */ 555 /* 556 PCSetUp_ML - Prepares for the use of the ML preconditioner 557 by setting data structures and options. 558 559 Input Parameter: 560 . pc - the preconditioner context 561 562 Application Interface Routine: PCSetUp() 563 564 Notes: 565 The interface routine PCSetUp() is not usually called directly by 566 the user, but instead is called by PCApply() if necessary. 567 */ 568 extern PetscErrorCode PCSetFromOptions_MG(PC); 569 extern PetscErrorCode PCReset_MG(PC); 570 571 #undef __FUNCT__ 572 #define __FUNCT__ "PCSetUp_ML" 573 PetscErrorCode PCSetUp_ML(PC pc) 574 { 575 PetscErrorCode ierr; 576 PetscMPIInt size; 577 FineGridCtx *PetscMLdata; 578 ML *ml_object; 579 ML_Aggregate *agg_object; 580 ML_Operator *mlmat; 581 PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs; 582 Mat A,Aloc; 583 GridCtx *gridctx; 584 PC_MG *mg = (PC_MG*)pc->data; 585 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 586 PetscBool isSeq, isMPI; 587 KSP smoother; 588 PC subpc; 589 PetscInt mesh_level, old_mesh_level; 590 MatInfo info; 591 static PetscBool cite = PETSC_FALSE; 592 593 PetscFunctionBegin; 594 ierr = PetscCitationsRegister("@TechReport{ml_users_guide,\n author = {M. Sala and J.J. Hu and R.S. Tuminaro},\n title = {{ML}3.1 {S}moothed {A}ggregation {U}ser's {G}uide},\n institution = {Sandia National Laboratories},\n number = {SAND2004-4821},\n year = 2004\n}\n",&cite);CHKERRQ(ierr); 595 A = pc->pmat; 596 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr); 597 598 if (pc->setupcalled) { 599 if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) { 600 /* 601 Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for 602 multiple solves in which the matrix is not changing too quickly. 603 */ 604 ml_object = pc_ml->ml_object; 605 gridctx = pc_ml->gridctx; 606 Nlevels = pc_ml->Nlevels; 607 fine_level = Nlevels - 1; 608 gridctx[fine_level].A = A; 609 610 ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); 611 ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); 612 if (isMPI) { 613 ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); 614 } else if (isSeq) { 615 Aloc = A; 616 ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr); 617 } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name); 618 619 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 620 PetscMLdata = pc_ml->PetscMLdata; 621 ierr = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr); 622 PetscMLdata->A = A; 623 PetscMLdata->Aloc = Aloc; 624 PetscStackCall("ML_Aggregate_Destroy",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata)); 625 PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec)); 626 627 mesh_level = ml_object->ML_finest_level; 628 while (ml_object->SingleLevel[mesh_level].Rmat->to) { 629 old_mesh_level = mesh_level; 630 mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum; 631 632 /* clean and regenerate A */ 633 mlmat = &(ml_object->Amat[mesh_level]); 634 PetscStackCall("ML_Operator_Clean",ML_Operator_Clean(mlmat)); 635 PetscStackCall("ML_Operator_Init",ML_Operator_Init(mlmat,ml_object->comm)); 636 PetscStackCall("ML_Gen_AmatrixRAP",ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level)); 637 } 638 639 level = fine_level - 1; 640 if (size == 1) { /* convert ML P, R and A into seqaij format */ 641 for (mllevel=1; mllevel<Nlevels; mllevel++) { 642 mlmat = &(ml_object->Amat[mllevel]); 643 ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 644 level--; 645 } 646 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 647 for (mllevel=1; mllevel<Nlevels; mllevel++) { 648 mlmat = &(ml_object->Amat[mllevel]); 649 ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 650 level--; 651 } 652 } 653 654 for (level=0; level<fine_level; level++) { 655 if (level > 0) { 656 ierr = PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);CHKERRQ(ierr); 657 } 658 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A);CHKERRQ(ierr); 659 } 660 ierr = PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);CHKERRQ(ierr); 661 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A);CHKERRQ(ierr); 662 663 ierr = PCSetUp_MG(pc);CHKERRQ(ierr); 664 PetscFunctionReturn(0); 665 } else { 666 /* since ML can change the size of vectors/matrices at any level we must destroy everything */ 667 ierr = PCReset_ML(pc);CHKERRQ(ierr); 668 ierr = PCReset_MG(pc);CHKERRQ(ierr); 669 } 670 } 671 672 /* setup special features of PCML */ 673 /*--------------------------------*/ 674 /* covert A to Aloc to be used by ML at fine grid */ 675 pc_ml->size = size; 676 ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); 677 ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); 678 if (isMPI) { 679 ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); 680 } else if (isSeq) { 681 Aloc = A; 682 ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr); 683 } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name); 684 685 /* create and initialize struct 'PetscMLdata' */ 686 ierr = PetscNewLog(pc,&PetscMLdata);CHKERRQ(ierr); 687 pc_ml->PetscMLdata = PetscMLdata; 688 ierr = PetscMalloc1((Aloc->cmap->n+1),&PetscMLdata->pwork);CHKERRQ(ierr); 689 690 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr); 691 ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr); 692 ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); 693 694 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); 695 ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 696 ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr); 697 PetscMLdata->A = A; 698 PetscMLdata->Aloc = Aloc; 699 if (pc_ml->dim) { /* create vecs around the coordinate data given */ 700 PetscInt i,j,dim=pc_ml->dim; 701 PetscInt nloc = pc_ml->nloc,nlocghost; 702 PetscReal *ghostedcoords; 703 704 ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); 705 nlocghost = Aloc->cmap->n / bs; 706 ierr = PetscMalloc1(dim*nlocghost,&ghostedcoords);CHKERRQ(ierr); 707 for (i = 0; i < dim; i++) { 708 /* copy coordinate values into first component of pwork */ 709 for (j = 0; j < nloc; j++) { 710 PetscMLdata->pwork[bs * j] = pc_ml->coords[nloc * i + j]; 711 } 712 /* get the ghost values */ 713 ierr = PetscML_comm(PetscMLdata->pwork,PetscMLdata);CHKERRQ(ierr); 714 /* write into the vector */ 715 for (j = 0; j < nlocghost; j++) { 716 ghostedcoords[i * nlocghost + j] = PetscMLdata->pwork[bs * j]; 717 } 718 } 719 /* replace the original coords with the ghosted coords, because these are 720 * what ML needs */ 721 ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr); 722 pc_ml->coords = ghostedcoords; 723 } 724 725 /* create ML discretization matrix at fine grid */ 726 /* ML requires input of fine-grid matrix. It determines nlevels. */ 727 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 728 ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); 729 PetscStackCall("ML_Create",ML_Create(&ml_object,pc_ml->MaxNlevels)); 730 PetscStackCall("ML_Comm_Set_UsrComm",ML_Comm_Set_UsrComm(ml_object->comm,PetscObjectComm((PetscObject)A))); 731 pc_ml->ml_object = ml_object; 732 PetscStackCall("ML_Init_Amatrix",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata)); 733 PetscStackCall("ML_Set_Amatrix_Getrow",ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols)); 734 PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec)); 735 736 PetscStackCall("ML_Set_Symmetrize",ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO)); 737 738 /* aggregation */ 739 PetscStackCall("ML_Aggregate_Create",ML_Aggregate_Create(&agg_object)); 740 pc_ml->agg_object = agg_object; 741 742 { 743 MatNullSpace mnull; 744 ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr); 745 if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) { 746 if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER; 747 else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK; 748 else pc_ml->nulltype = PCML_NULLSPACE_SCALAR; 749 } 750 switch (pc_ml->nulltype) { 751 case PCML_NULLSPACE_USER: { 752 PetscScalar *nullvec; 753 const PetscScalar *v; 754 PetscBool has_const; 755 PetscInt i,j,mlocal,nvec,M; 756 const Vec *vecs; 757 758 if (!mnull) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space"); 759 ierr = MatGetSize(A,&M,NULL);CHKERRQ(ierr); 760 ierr = MatGetLocalSize(Aloc,&mlocal,NULL);CHKERRQ(ierr); 761 ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr); 762 ierr = PetscMalloc1((nvec+!!has_const)*mlocal,&nullvec);CHKERRQ(ierr); 763 if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M; 764 for (i=0; i<nvec; i++) { 765 ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr); 766 for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j]; 767 ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr); 768 } 769 PetscStackCall("ML_Aggregate_Create",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr)); 770 ierr = PetscFree(nullvec);CHKERRQ(ierr); 771 } break; 772 case PCML_NULLSPACE_BLOCK: 773 PetscStackCall("ML_Aggregate_Set_NullSpace",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr)); 774 break; 775 case PCML_NULLSPACE_SCALAR: 776 break; 777 default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP,"Unknown null space type"); 778 } 779 } 780 PetscStackCall("ML_Aggregate_Set_MaxCoarseSize",ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize)); 781 /* set options */ 782 switch (pc_ml->CoarsenScheme) { 783 case 1: 784 PetscStackCall("ML_Aggregate_Set_CoarsenScheme_Coupled",ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object));break; 785 case 2: 786 PetscStackCall("ML_Aggregate_Set_CoarsenScheme_MIS",ML_Aggregate_Set_CoarsenScheme_MIS(agg_object));break; 787 case 3: 788 PetscStackCall("ML_Aggregate_Set_CoarsenScheme_METIS",ML_Aggregate_Set_CoarsenScheme_METIS(agg_object));break; 789 } 790 PetscStackCall("ML_Aggregate_Set_Threshold",ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold)); 791 PetscStackCall("ML_Aggregate_Set_DampingFactor",ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor)); 792 if (pc_ml->SpectralNormScheme_Anorm) { 793 PetscStackCall("ML_Set_SpectralNormScheme_Anorm",ML_Set_SpectralNormScheme_Anorm(ml_object)); 794 } 795 agg_object->keep_agg_information = (int)pc_ml->KeepAggInfo; 796 agg_object->keep_P_tentative = (int)pc_ml->Reusable; 797 agg_object->block_scaled_SA = (int)pc_ml->BlockScaling; 798 agg_object->minimizing_energy = (int)pc_ml->EnergyMinimization; 799 agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol; 800 agg_object->cheap_minimizing_energy = (int)pc_ml->EnergyMinimizationCheap; 801 802 if (pc_ml->Aux) { 803 if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Auxiliary matrix requires coordinates"); 804 ml_object->Amat[0].aux_data->threshold = pc_ml->AuxThreshold; 805 ml_object->Amat[0].aux_data->enable = 1; 806 ml_object->Amat[0].aux_data->max_level = 10; 807 ml_object->Amat[0].num_PDEs = bs; 808 } 809 810 ierr = MatGetInfo(A,MAT_LOCAL,&info);CHKERRQ(ierr); 811 ml_object->Amat[0].N_nonzeros = (int) info.nz_used; 812 813 if (pc_ml->dim) { 814 PetscInt i,dim = pc_ml->dim; 815 ML_Aggregate_Viz_Stats *grid_info; 816 PetscInt nlocghost; 817 818 ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); 819 nlocghost = Aloc->cmap->n / bs; 820 821 PetscStackCall("ML_Aggregate_VizAndStats_Setup(",ML_Aggregate_VizAndStats_Setup(ml_object)); /* create ml info for coords */ 822 grid_info = (ML_Aggregate_Viz_Stats*) ml_object->Grid[0].Grid; 823 for (i = 0; i < dim; i++) { 824 /* set the finest level coordinates to point to the column-order array 825 * in pc_ml */ 826 /* NOTE: must point away before VizAndStats_Clean so ML doesn't free */ 827 switch (i) { 828 case 0: grid_info->x = pc_ml->coords + nlocghost * i; break; 829 case 1: grid_info->y = pc_ml->coords + nlocghost * i; break; 830 case 2: grid_info->z = pc_ml->coords + nlocghost * i; break; 831 default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3"); 832 } 833 } 834 grid_info->Ndim = dim; 835 } 836 837 /* repartitioning */ 838 if (pc_ml->Repartition) { 839 PetscStackCall("ML_Repartition_Activate",ML_Repartition_Activate(ml_object)); 840 PetscStackCall("ML_Repartition_Set_LargestMinMaxRatio",ML_Repartition_Set_LargestMinMaxRatio(ml_object,pc_ml->MaxMinRatio)); 841 PetscStackCall("ML_Repartition_Set_MinPerProc",ML_Repartition_Set_MinPerProc(ml_object,pc_ml->MinPerProc)); 842 PetscStackCall("ML_Repartition_Set_PutOnSingleProc",ML_Repartition_Set_PutOnSingleProc(ml_object,pc_ml->PutOnSingleProc)); 843 #if 0 /* Function not yet defined in ml-6.2 */ 844 /* I'm not sure what compatibility issues might crop up if we partitioned 845 * on the finest level, so to be safe repartition starts on the next 846 * finest level (reflection default behavior in 847 * ml_MultiLevelPreconditioner) */ 848 PetscStackCall("ML_Repartition_Set_StartLevel",ML_Repartition_Set_StartLevel(ml_object,1)); 849 #endif 850 851 if (!pc_ml->RepartitionType) { 852 PetscInt i; 853 854 if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"ML Zoltan repartitioning requires coordinates"); 855 PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEZOLTAN)); 856 PetscStackCall("ML_Aggregate_Set_Dimensions",ML_Aggregate_Set_Dimensions(agg_object, pc_ml->dim)); 857 858 for (i = 0; i < ml_object->ML_num_levels; i++) { 859 ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Grid[i].Grid; 860 grid_info->zoltan_type = pc_ml->ZoltanScheme + 1; /* ml numbers options 1, 2, 3 */ 861 /* defaults from ml_agg_info.c */ 862 grid_info->zoltan_estimated_its = 40; /* only relevant to hypergraph / fast hypergraph */ 863 grid_info->zoltan_timers = 0; 864 grid_info->smoothing_steps = 4; /* only relevant to hypergraph / fast hypergraph */ 865 } 866 } else { 867 PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEPARMETIS)); 868 } 869 } 870 871 if (pc_ml->OldHierarchy) { 872 PetscStackCall("ML_Gen_MGHierarchy_UsingAggregation",Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object)); 873 } else { 874 PetscStackCall("ML_Gen_MultiLevelHierarchy_UsingAggregation",Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object)); 875 } 876 if (Nlevels<=0) SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels); 877 pc_ml->Nlevels = Nlevels; 878 fine_level = Nlevels - 1; 879 880 ierr = PCMGSetLevels(pc,Nlevels,NULL);CHKERRQ(ierr); 881 /* set default smoothers */ 882 for (level=1; level<=fine_level; level++) { 883 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 884 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 885 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 886 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 887 } 888 ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr); 889 ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */ 890 ierr = PetscOptionsEnd();CHKERRQ(ierr); 891 892 ierr = PetscMalloc1(Nlevels,&gridctx);CHKERRQ(ierr); 893 894 pc_ml->gridctx = gridctx; 895 896 /* wrap ML matrices by PETSc shell matrices at coarsened grids. 897 Level 0 is the finest grid for ML, but coarsest for PETSc! */ 898 gridctx[fine_level].A = A; 899 900 level = fine_level - 1; 901 if (size == 1) { /* convert ML P, R and A into seqaij format */ 902 for (mllevel=1; mllevel<Nlevels; mllevel++) { 903 mlmat = &(ml_object->Pmat[mllevel]); 904 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 905 mlmat = &(ml_object->Rmat[mllevel-1]); 906 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 907 908 mlmat = &(ml_object->Amat[mllevel]); 909 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 910 level--; 911 } 912 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 913 for (mllevel=1; mllevel<Nlevels; mllevel++) { 914 mlmat = &(ml_object->Pmat[mllevel]); 915 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 916 mlmat = &(ml_object->Rmat[mllevel-1]); 917 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 918 919 mlmat = &(ml_object->Amat[mllevel]); 920 ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 921 level--; 922 } 923 } 924 925 /* create vectors and ksp at all levels */ 926 for (level=0; level<fine_level; level++) { 927 level1 = level + 1; 928 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr); 929 ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr); 930 ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); 931 ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); 932 933 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr); 934 ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 935 ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); 936 ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); 937 938 ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr); 939 ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 940 ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr); 941 ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr); 942 943 if (level == 0) { 944 ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr); 945 } else { 946 ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr); 947 } 948 } 949 ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr); 950 951 /* create coarse level and the interpolation between the levels */ 952 for (level=0; level<fine_level; level++) { 953 level1 = level + 1; 954 ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr); 955 ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr); 956 if (level > 0) { 957 ierr = PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);CHKERRQ(ierr); 958 } 959 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A);CHKERRQ(ierr); 960 } 961 ierr = PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);CHKERRQ(ierr); 962 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A);CHKERRQ(ierr); 963 964 /* put coordinate info in levels */ 965 if (pc_ml->dim) { 966 PetscInt i,j,dim = pc_ml->dim; 967 PetscInt bs, nloc; 968 PC subpc; 969 PetscReal *array; 970 971 level = fine_level; 972 for (mllevel = 0; mllevel < Nlevels; mllevel++) { 973 ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Amat[mllevel].to->Grid->Grid; 974 MPI_Comm comm = ((PetscObject)gridctx[level].A)->comm; 975 976 ierr = MatGetBlockSize (gridctx[level].A, &bs);CHKERRQ(ierr); 977 ierr = MatGetLocalSize (gridctx[level].A, NULL, &nloc);CHKERRQ(ierr); 978 nloc /= bs; /* number of local nodes */ 979 980 ierr = VecCreate(comm,&gridctx[level].coords);CHKERRQ(ierr); 981 ierr = VecSetSizes(gridctx[level].coords,dim * nloc,PETSC_DECIDE);CHKERRQ(ierr); 982 ierr = VecSetType(gridctx[level].coords,VECMPI);CHKERRQ(ierr); 983 ierr = VecGetArray(gridctx[level].coords,&array);CHKERRQ(ierr); 984 for (j = 0; j < nloc; j++) { 985 for (i = 0; i < dim; i++) { 986 switch (i) { 987 case 0: array[dim * j + i] = grid_info->x[j]; break; 988 case 1: array[dim * j + i] = grid_info->y[j]; break; 989 case 2: array[dim * j + i] = grid_info->z[j]; break; 990 default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3"); 991 } 992 } 993 } 994 995 /* passing coordinates to smoothers/coarse solver, should they need them */ 996 ierr = KSPGetPC(gridctx[level].ksp,&subpc);CHKERRQ(ierr); 997 ierr = PCSetCoordinates(subpc,dim,nloc,array);CHKERRQ(ierr); 998 ierr = VecRestoreArray(gridctx[level].coords,&array);CHKERRQ(ierr); 999 level--; 1000 } 1001 } 1002 1003 /* setupcalled is set to 0 so that MG is setup from scratch */ 1004 pc->setupcalled = 0; 1005 ierr = PCSetUp_MG(pc);CHKERRQ(ierr); 1006 PetscFunctionReturn(0); 1007 } 1008 1009 /* -------------------------------------------------------------------------- */ 1010 /* 1011 PCDestroy_ML - Destroys the private context for the ML preconditioner 1012 that was created with PCCreate_ML(). 1013 1014 Input Parameter: 1015 . pc - the preconditioner context 1016 1017 Application Interface Routine: PCDestroy() 1018 */ 1019 #undef __FUNCT__ 1020 #define __FUNCT__ "PCDestroy_ML" 1021 PetscErrorCode PCDestroy_ML(PC pc) 1022 { 1023 PetscErrorCode ierr; 1024 PC_MG *mg = (PC_MG*)pc->data; 1025 PC_ML *pc_ml= (PC_ML*)mg->innerctx; 1026 1027 PetscFunctionBegin; 1028 ierr = PCReset_ML(pc);CHKERRQ(ierr); 1029 ierr = PetscFree(pc_ml);CHKERRQ(ierr); 1030 ierr = PCDestroy_MG(pc);CHKERRQ(ierr); 1031 ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",NULL);CHKERRQ(ierr); 1032 PetscFunctionReturn(0); 1033 } 1034 1035 #undef __FUNCT__ 1036 #define __FUNCT__ "PCSetFromOptions_ML" 1037 PetscErrorCode PCSetFromOptions_ML(PC pc) 1038 { 1039 PetscErrorCode ierr; 1040 PetscInt indx,PrintLevel,partindx; 1041 const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"}; 1042 const char *part[] = {"Zoltan","ParMETIS"}; 1043 #if defined(HAVE_ML_ZOLTAN) 1044 PetscInt zidx; 1045 const char *zscheme[] = {"RCB","hypergraph","fast_hypergraph"}; 1046 #endif 1047 PC_MG *mg = (PC_MG*)pc->data; 1048 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 1049 PetscMPIInt size; 1050 MPI_Comm comm; 1051 1052 PetscFunctionBegin; 1053 ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr); 1054 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1055 ierr = PetscOptionsHead("ML options");CHKERRQ(ierr); 1056 1057 PrintLevel = 0; 1058 indx = 0; 1059 partindx = 0; 1060 1061 ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,NULL);CHKERRQ(ierr); 1062 PetscStackCall("ML_Set_PrintLeve",ML_Set_PrintLevel(PrintLevel)); 1063 ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,NULL);CHKERRQ(ierr); 1064 ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,NULL);CHKERRQ(ierr); 1065 ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,NULL);CHKERRQ(ierr); 1066 1067 pc_ml->CoarsenScheme = indx; 1068 1069 ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,NULL);CHKERRQ(ierr); 1070 ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,NULL);CHKERRQ(ierr); 1071 ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,NULL);CHKERRQ(ierr); 1072 ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,NULL);CHKERRQ(ierr); 1073 ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,NULL);CHKERRQ(ierr); 1074 ierr = PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,NULL);CHKERRQ(ierr); 1075 ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,NULL);CHKERRQ(ierr); 1076 ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,NULL);CHKERRQ(ierr); 1077 /* 1078 The following checks a number of conditions. If we let this stuff slip by, then ML's error handling will take over. 1079 This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions. 1080 1081 We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working 1082 combination of options and ML's exit(1) explanations don't help matters. 1083 */ 1084 if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4"); 1085 if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel"); 1086 if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);} 1087 if (pc_ml->EnergyMinimization) { 1088 ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,NULL);CHKERRQ(ierr); 1089 } 1090 if (pc_ml->EnergyMinimization == 2) { 1091 /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */ 1092 ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,NULL);CHKERRQ(ierr); 1093 } 1094 /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */ 1095 if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE; 1096 ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,NULL);CHKERRQ(ierr); 1097 /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */ 1098 if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE; 1099 ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,NULL);CHKERRQ(ierr); 1100 /* 1101 ML's C API is severely underdocumented and lacks significant functionality. The C++ API calls 1102 ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function 1103 ML_Gen_MGHierarchy_UsingAggregation(). This modification, however, does not provide a strict superset of the 1104 functionality in the old function, so some users may still want to use it. Note that many options are ignored in 1105 this context, but ML doesn't provide a way to find out which ones. 1106 */ 1107 ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,NULL);CHKERRQ(ierr); 1108 ierr = PetscOptionsBool("-pc_ml_repartition", "Allow ML to repartition levels of the heirarchy","ML_Repartition_Activate",pc_ml->Repartition,&pc_ml->Repartition,NULL);CHKERRQ(ierr); 1109 if (pc_ml->Repartition) { 1110 ierr = PetscOptionsReal("-pc_ml_repartitionMaxMinRatio", "Acceptable ratio of repartitioned sizes","ML_Repartition_Set_LargestMinMaxRatio",pc_ml->MaxMinRatio,&pc_ml->MaxMinRatio,NULL);CHKERRQ(ierr); 1111 ierr = PetscOptionsInt("-pc_ml_repartitionMinPerProc", "Smallest repartitioned size","ML_Repartition_Set_MinPerProc",pc_ml->MinPerProc,&pc_ml->MinPerProc,NULL);CHKERRQ(ierr); 1112 ierr = PetscOptionsInt("-pc_ml_repartitionPutOnSingleProc", "Problem size automatically repartitioned to one processor","ML_Repartition_Set_PutOnSingleProc",pc_ml->PutOnSingleProc,&pc_ml->PutOnSingleProc,NULL);CHKERRQ(ierr); 1113 #if defined(HAVE_ML_ZOLTAN) 1114 partindx = 0; 1115 ierr = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[0],&partindx,NULL);CHKERRQ(ierr); 1116 1117 pc_ml->RepartitionType = partindx; 1118 if (!partindx) { 1119 PetscInt zindx = 0; 1120 1121 ierr = PetscOptionsEList("-pc_ml_repartitionZoltanScheme", "Repartitioning scheme to use","None",zscheme,3,zscheme[0],&zindx,NULL);CHKERRQ(ierr); 1122 1123 pc_ml->ZoltanScheme = zindx; 1124 } 1125 #else 1126 partindx = 1; 1127 ierr = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[1],&partindx,NULL);CHKERRQ(ierr); 1128 if (!partindx) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP_SYS,"ML not compiled with Zoltan"); 1129 #endif 1130 ierr = PetscOptionsBool("-pc_ml_Aux","Aggregate using auxiliary coordinate-based laplacian","None",pc_ml->Aux,&pc_ml->Aux,NULL);CHKERRQ(ierr); 1131 ierr = PetscOptionsReal("-pc_ml_AuxThreshold","Auxiliary smoother drop tol","None",pc_ml->AuxThreshold,&pc_ml->AuxThreshold,NULL);CHKERRQ(ierr); 1132 } 1133 ierr = PetscOptionsTail();CHKERRQ(ierr); 1134 PetscFunctionReturn(0); 1135 } 1136 1137 /* -------------------------------------------------------------------------- */ 1138 /* 1139 PCCreate_ML - Creates a ML preconditioner context, PC_ML, 1140 and sets this as the private data within the generic preconditioning 1141 context, PC, that was created within PCCreate(). 1142 1143 Input Parameter: 1144 . pc - the preconditioner context 1145 1146 Application Interface Routine: PCCreate() 1147 */ 1148 1149 /*MC 1150 PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide 1151 fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 1152 operators are computed by ML, with the matrices coverted to PETSc matrices in aij format 1153 and the restriction/interpolation operators wrapped as PETSc shell matrices. 1154 1155 Options Database Key: 1156 Multigrid options(inherited): 1157 + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles) 1158 . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp) 1159 . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown) 1160 - -pc_mg_type <multiplicative>: (one of) additive multiplicative full kascade 1161 ML options: 1162 + -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel) 1163 . -pc_ml_maxNlevels <10>: Maximum number of levels (None) 1164 . -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize) 1165 . -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS 1166 . -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor) 1167 . -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold) 1168 . -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm) 1169 . -pc_ml_repartition <false>: Allow ML to repartition levels of the heirarchy (ML_Repartition_Activate) 1170 . -pc_ml_repartitionMaxMinRatio <1.3>: Acceptable ratio of repartitioned sizes (ML_Repartition_Set_LargestMinMaxRatio) 1171 . -pc_ml_repartitionMinPerProc <512>: Smallest repartitioned size (ML_Repartition_Set_MinPerProc) 1172 . -pc_ml_repartitionPutOnSingleProc <5000>: Problem size automatically repartitioned to one processor (ML_Repartition_Set_PutOnSingleProc) 1173 . -pc_ml_repartitionType <Zoltan>: Repartitioning library to use (ML_Repartition_Set_Partitioner) 1174 . -pc_ml_repartitionZoltanScheme <RCB>: Repartitioning scheme to use (None) 1175 . -pc_ml_Aux <false>: Aggregate using auxiliary coordinate-based laplacian (None) 1176 - -pc_ml_AuxThreshold <0.0>: Auxiliary smoother drop tol (None) 1177 1178 Level: intermediate 1179 1180 Concepts: multigrid 1181 1182 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 1183 PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(), 1184 PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(), 1185 PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(), 1186 PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR() 1187 M*/ 1188 1189 #undef __FUNCT__ 1190 #define __FUNCT__ "PCCreate_ML" 1191 PETSC_EXTERN PetscErrorCode PCCreate_ML(PC pc) 1192 { 1193 PetscErrorCode ierr; 1194 PC_ML *pc_ml; 1195 PC_MG *mg; 1196 1197 PetscFunctionBegin; 1198 /* PCML is an inherited class of PCMG. Initialize pc as PCMG */ 1199 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 1200 ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr); 1201 /* Since PCMG tries to use DM assocated with PC must delete it */ 1202 ierr = DMDestroy(&pc->dm);CHKERRQ(ierr); 1203 mg = (PC_MG*)pc->data; 1204 mg->galerkin = 2; /* Use Galerkin, but it is computed externally */ 1205 1206 /* create a supporting struct and attach it to pc */ 1207 ierr = PetscNewLog(pc,&pc_ml);CHKERRQ(ierr); 1208 mg->innerctx = pc_ml; 1209 1210 pc_ml->ml_object = 0; 1211 pc_ml->agg_object = 0; 1212 pc_ml->gridctx = 0; 1213 pc_ml->PetscMLdata = 0; 1214 pc_ml->Nlevels = -1; 1215 pc_ml->MaxNlevels = 10; 1216 pc_ml->MaxCoarseSize = 1; 1217 pc_ml->CoarsenScheme = 1; 1218 pc_ml->Threshold = 0.0; 1219 pc_ml->DampingFactor = 4.0/3.0; 1220 pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE; 1221 pc_ml->size = 0; 1222 pc_ml->dim = 0; 1223 pc_ml->nloc = 0; 1224 pc_ml->coords = 0; 1225 pc_ml->Repartition = PETSC_FALSE; 1226 pc_ml->MaxMinRatio = 1.3; 1227 pc_ml->MinPerProc = 512; 1228 pc_ml->PutOnSingleProc = 5000; 1229 pc_ml->RepartitionType = 0; 1230 pc_ml->ZoltanScheme = 0; 1231 pc_ml->Aux = PETSC_FALSE; 1232 pc_ml->AuxThreshold = 0.0; 1233 1234 /* allow for coordinates to be passed */ 1235 ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",PCSetCoordinates_ML);CHKERRQ(ierr); 1236 1237 /* overwrite the pointers of PCMG by the functions of PCML */ 1238 pc->ops->setfromoptions = PCSetFromOptions_ML; 1239 pc->ops->setup = PCSetUp_ML; 1240 pc->ops->reset = PCReset_ML; 1241 pc->ops->destroy = PCDestroy_ML; 1242 PetscFunctionReturn(0); 1243 } 1244