1 2 /* 3 Provides an interface to the ML smoothed Aggregation 4 Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs 5 Jed Brown, see [PETSC #18321, #18449]. 6 */ 7 #include <petsc-private/pcimpl.h> /*I "petscpc.h" I*/ 8 #include <../src/ksp/pc/impls/mg/mgimpl.h> /*I "petscksp.h" I*/ 9 #include <../src/mat/impls/aij/seq/aij.h> 10 #include <../src/mat/impls/aij/mpi/mpiaij.h> 11 #include <petscdm.h> /* for DMDestroy(&pc->mg) hack */ 12 13 EXTERN_C_BEGIN 14 /* HAVE_CONFIG_H flag is required by ML include files */ 15 #if !defined(HAVE_CONFIG_H) 16 #define HAVE_CONFIG_H 17 #endif 18 #include <ml_include.h> 19 #include <ml_viz_stats.h> 20 EXTERN_C_END 21 22 typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType; 23 static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0}; 24 25 /* The context (data structure) at each grid level */ 26 typedef struct { 27 Vec x,b,r; /* global vectors */ 28 Mat A,P,R; 29 KSP ksp; 30 Vec coords; /* projected by ML, if PCSetCoordinates is called; values packed by node */ 31 } GridCtx; 32 33 /* The context used to input PETSc matrix into ML at fine grid */ 34 typedef struct { 35 Mat A; /* Petsc matrix in aij format */ 36 Mat Aloc; /* local portion of A to be used by ML */ 37 Vec x,y; 38 ML_Operator *mlmat; 39 PetscScalar *pwork; /* tmp array used by PetscML_comm() */ 40 } FineGridCtx; 41 42 /* The context associates a ML matrix with a PETSc shell matrix */ 43 typedef struct { 44 Mat A; /* PETSc shell matrix associated with mlmat */ 45 ML_Operator *mlmat; /* ML matrix assorciated with A */ 46 Vec y, work; 47 } Mat_MLShell; 48 49 /* Private context for the ML preconditioner */ 50 typedef struct { 51 ML *ml_object; 52 ML_Aggregate *agg_object; 53 GridCtx *gridctx; 54 FineGridCtx *PetscMLdata; 55 PetscInt Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization,MinPerProc,PutOnSingleProc,RepartitionType,ZoltanScheme; 56 PetscReal Threshold,DampingFactor,EnergyMinimizationDropTol,MaxMinRatio,AuxThreshold; 57 PetscBool SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable,Repartition,Aux; 58 PetscBool reuse_interpolation; 59 PCMLNullSpaceType nulltype; 60 PetscMPIInt size; /* size of communicator for pc->pmat */ 61 PetscInt dim; /* data from PCSetCoordinates(_ML) */ 62 PetscInt nloc; 63 PetscReal *coords; /* ML has a grid object for each level: the finest grid will point into coords */ 64 } PC_ML; 65 66 #undef __FUNCT__ 67 #define __FUNCT__ "PetscML_getrow" 68 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[]) 69 { 70 PetscErrorCode ierr; 71 PetscInt m,i,j,k=0,row,*aj; 72 PetscScalar *aa; 73 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data); 74 Mat_SeqAIJ *a = (Mat_SeqAIJ*)ml->Aloc->data; 75 76 ierr = MatGetSize(ml->Aloc,&m,NULL); if (ierr) return(0); 77 for (i = 0; i<N_requested_rows; i++) { 78 row = requested_rows[i]; 79 row_lengths[i] = a->ilen[row]; 80 if (allocated_space < k+row_lengths[i]) return(0); 81 if ((row >= 0) || (row <= (m-1))) { 82 aj = a->j + a->i[row]; 83 aa = a->a + a->i[row]; 84 for (j=0; j<row_lengths[i]; j++) { 85 columns[k] = aj[j]; 86 values[k++] = aa[j]; 87 } 88 } 89 } 90 return(1); 91 } 92 93 #undef __FUNCT__ 94 #define __FUNCT__ "PetscML_comm" 95 static PetscErrorCode PetscML_comm(double p[],void *ML_data) 96 { 97 PetscErrorCode ierr; 98 FineGridCtx *ml = (FineGridCtx*)ML_data; 99 Mat A = ml->A; 100 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 101 PetscMPIInt size; 102 PetscInt i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n; 103 PetscScalar *array; 104 105 PetscFunctionBegin; 106 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr); 107 if (size == 1) return 0; 108 109 ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr); 110 ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 111 ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 112 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 113 ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr); 114 for (i=in_length; i<out_length; i++) p[i] = array[i-in_length]; 115 ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr); 116 PetscFunctionReturn(0); 117 } 118 119 #undef __FUNCT__ 120 #define __FUNCT__ "PetscML_matvec" 121 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[]) 122 { 123 PetscErrorCode ierr; 124 FineGridCtx *ml = (FineGridCtx*)ML_Get_MyMatvecData(ML_data); 125 Mat A = ml->A, Aloc=ml->Aloc; 126 PetscMPIInt size; 127 PetscScalar *pwork=ml->pwork; 128 PetscInt i; 129 130 PetscFunctionBegin; 131 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr); 132 if (size == 1) { 133 ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr); 134 } else { 135 for (i=0; i<in_length; i++) pwork[i] = p[i]; 136 ierr = PetscML_comm(pwork,ml);CHKERRQ(ierr); 137 ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr); 138 } 139 ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr); 140 ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr); 141 ierr = VecResetArray(ml->x);CHKERRQ(ierr); 142 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 143 PetscFunctionReturn(0); 144 } 145 146 #undef __FUNCT__ 147 #define __FUNCT__ "MatMult_ML" 148 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y) 149 { 150 PetscErrorCode ierr; 151 Mat_MLShell *shell; 152 PetscScalar *xarray,*yarray; 153 PetscInt x_length,y_length; 154 155 PetscFunctionBegin; 156 ierr = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr); 157 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 158 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 159 x_length = shell->mlmat->invec_leng; 160 y_length = shell->mlmat->outvec_leng; 161 PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray)); 162 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 163 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 164 PetscFunctionReturn(0); 165 } 166 167 #undef __FUNCT__ 168 #define __FUNCT__ "MatMultAdd_ML" 169 /* Computes y = w + A * x 170 It is possible that w == y, but not x == y 171 */ 172 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y) 173 { 174 Mat_MLShell *shell; 175 PetscScalar *xarray,*yarray; 176 PetscInt x_length,y_length; 177 PetscErrorCode ierr; 178 179 PetscFunctionBegin; 180 ierr = MatShellGetContext(A, (void**) &shell);CHKERRQ(ierr); 181 if (y == w) { 182 if (!shell->work) { 183 ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr); 184 } 185 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 186 ierr = VecGetArray(shell->work, &yarray);CHKERRQ(ierr); 187 x_length = shell->mlmat->invec_leng; 188 y_length = shell->mlmat->outvec_leng; 189 PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray)); 190 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 191 ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr); 192 ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr); 193 } else { 194 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 195 ierr = VecGetArray(y, &yarray);CHKERRQ(ierr); 196 x_length = shell->mlmat->invec_leng; 197 y_length = shell->mlmat->outvec_leng; 198 PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray)); 199 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 200 ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr); 201 ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr); 202 } 203 PetscFunctionReturn(0); 204 } 205 206 /* newtype is ignored since only handles one case */ 207 #undef __FUNCT__ 208 #define __FUNCT__ "MatConvert_MPIAIJ_ML" 209 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc) 210 { 211 PetscErrorCode ierr; 212 Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data; 213 Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data; 214 PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j; 215 PetscScalar *aa=a->a,*ba=b->a,*ca; 216 PetscInt am =A->rmap->n,an=A->cmap->n,i,j,k; 217 PetscInt *ci,*cj,ncols; 218 219 PetscFunctionBegin; 220 if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an); 221 222 if (scall == MAT_INITIAL_MATRIX) { 223 ierr = PetscMalloc1((1+am),&ci);CHKERRQ(ierr); 224 ci[0] = 0; 225 for (i=0; i<am; i++) ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]); 226 ierr = PetscMalloc1((1+ci[am]),&cj);CHKERRQ(ierr); 227 ierr = PetscMalloc1((1+ci[am]),&ca);CHKERRQ(ierr); 228 229 k = 0; 230 for (i=0; i<am; i++) { 231 /* diagonal portion of A */ 232 ncols = ai[i+1] - ai[i]; 233 for (j=0; j<ncols; j++) { 234 cj[k] = *aj++; 235 ca[k++] = *aa++; 236 } 237 /* off-diagonal portion of A */ 238 ncols = bi[i+1] - bi[i]; 239 for (j=0; j<ncols; j++) { 240 cj[k] = an + (*bj); bj++; 241 ca[k++] = *ba++; 242 } 243 } 244 if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]); 245 246 /* put together the new matrix */ 247 an = mpimat->A->cmap->n+mpimat->B->cmap->n; 248 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr); 249 250 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 251 /* Since these are PETSc arrays, change flags to free them as necessary. */ 252 mat = (Mat_SeqAIJ*)(*Aloc)->data; 253 mat->free_a = PETSC_TRUE; 254 mat->free_ij = PETSC_TRUE; 255 256 mat->nonew = 0; 257 } else if (scall == MAT_REUSE_MATRIX) { 258 mat=(Mat_SeqAIJ*)(*Aloc)->data; 259 ci = mat->i; cj = mat->j; ca = mat->a; 260 for (i=0; i<am; i++) { 261 /* diagonal portion of A */ 262 ncols = ai[i+1] - ai[i]; 263 for (j=0; j<ncols; j++) *ca++ = *aa++; 264 /* off-diagonal portion of A */ 265 ncols = bi[i+1] - bi[i]; 266 for (j=0; j<ncols; j++) *ca++ = *ba++; 267 } 268 } else SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 269 PetscFunctionReturn(0); 270 } 271 272 extern PetscErrorCode MatDestroy_Shell(Mat); 273 #undef __FUNCT__ 274 #define __FUNCT__ "MatDestroy_ML" 275 static PetscErrorCode MatDestroy_ML(Mat A) 276 { 277 PetscErrorCode ierr; 278 Mat_MLShell *shell; 279 280 PetscFunctionBegin; 281 ierr = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr); 282 ierr = VecDestroy(&shell->y);CHKERRQ(ierr); 283 if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);} 284 ierr = PetscFree(shell);CHKERRQ(ierr); 285 ierr = MatDestroy_Shell(A);CHKERRQ(ierr); 286 ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr); 287 PetscFunctionReturn(0); 288 } 289 290 #undef __FUNCT__ 291 #define __FUNCT__ "MatWrapML_SeqAIJ" 292 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 293 { 294 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata*)mlmat->data; 295 PetscErrorCode ierr; 296 PetscInt m =mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = NULL,nz_max; 297 PetscInt *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i; 298 PetscScalar *ml_vals=matdata->values,*aa; 299 300 PetscFunctionBegin; 301 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 302 if (m != n) { /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */ 303 if (reuse) { 304 Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data; 305 aij->i = ml_rowptr; 306 aij->j = ml_cols; 307 aij->a = ml_vals; 308 } else { 309 /* sort ml_cols and ml_vals */ 310 ierr = PetscMalloc1((m+1),&nnz); 311 for (i=0; i<m; i++) nnz[i] = ml_rowptr[i+1] - ml_rowptr[i]; 312 aj = ml_cols; aa = ml_vals; 313 for (i=0; i<m; i++) { 314 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 315 aj += nnz[i]; aa += nnz[i]; 316 } 317 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr); 318 ierr = PetscFree(nnz);CHKERRQ(ierr); 319 } 320 PetscFunctionReturn(0); 321 } 322 323 nz_max = PetscMax(1,mlmat->max_nz_per_row); 324 ierr = PetscMalloc2(nz_max,&aa,nz_max,&aj);CHKERRQ(ierr); 325 if (!reuse) { 326 ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr); 327 ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 328 ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr); 329 /* keep track of block size for A matrices */ 330 ierr = MatSetBlockSize (*newmat, mlmat->num_PDEs);CHKERRQ(ierr); 331 332 ierr = PetscMalloc1(m,&nnz);CHKERRQ(ierr); 333 for (i=0; i<m; i++) { 334 PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i])); 335 } 336 ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr); 337 } 338 for (i=0; i<m; i++) { 339 PetscInt ncols; 340 341 PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols)); 342 ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr); 343 } 344 ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 345 ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 346 347 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 348 ierr = PetscFree(nnz);CHKERRQ(ierr); 349 PetscFunctionReturn(0); 350 } 351 352 #undef __FUNCT__ 353 #define __FUNCT__ "MatWrapML_SHELL" 354 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 355 { 356 PetscErrorCode ierr; 357 PetscInt m,n; 358 ML_Comm *MLcomm; 359 Mat_MLShell *shellctx; 360 361 PetscFunctionBegin; 362 m = mlmat->outvec_leng; 363 n = mlmat->invec_leng; 364 365 if (reuse) { 366 ierr = MatShellGetContext(*newmat,(void**)&shellctx);CHKERRQ(ierr); 367 shellctx->mlmat = mlmat; 368 PetscFunctionReturn(0); 369 } 370 371 MLcomm = mlmat->comm; 372 373 ierr = PetscNew(&shellctx);CHKERRQ(ierr); 374 ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr); 375 ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr); 376 ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr); 377 378 shellctx->A = *newmat; 379 shellctx->mlmat = mlmat; 380 shellctx->work = NULL; 381 382 ierr = VecCreate(MLcomm->USR_comm,&shellctx->y);CHKERRQ(ierr); 383 ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr); 384 ierr = VecSetType(shellctx->y,VECSTANDARD);CHKERRQ(ierr); 385 386 (*newmat)->ops->destroy = MatDestroy_ML; 387 PetscFunctionReturn(0); 388 } 389 390 #undef __FUNCT__ 391 #define __FUNCT__ "MatWrapML_MPIAIJ" 392 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 393 { 394 PetscInt *aj; 395 PetscScalar *aa; 396 PetscErrorCode ierr; 397 PetscInt i,j,*gordering; 398 PetscInt m=mlmat->outvec_leng,n,nz_max,row; 399 Mat A; 400 401 PetscFunctionBegin; 402 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 403 n = mlmat->invec_leng; 404 if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n); 405 406 /* create global row numbering for a ML_Operator */ 407 PetscStackCall("ML_build_global_numbering",ML_build_global_numbering(mlmat,&gordering,"rows")); 408 409 nz_max = PetscMax(1,mlmat->max_nz_per_row) + 1; 410 ierr = PetscMalloc2(nz_max,&aa,nz_max,&aj);CHKERRQ(ierr); 411 if (reuse) { 412 A = *newmat; 413 } else { 414 PetscInt *nnzA,*nnzB,*nnz; 415 PetscInt rstart; 416 ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr); 417 ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 418 ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr); 419 /* keep track of block size for A matrices */ 420 ierr = MatSetBlockSize (A,mlmat->num_PDEs);CHKERRQ(ierr); 421 ierr = PetscMalloc3(m,&nnzA,m,&nnzB,m,&nnz);CHKERRQ(ierr); 422 ierr = MPI_Scan(&m,&rstart,1,MPIU_INT,MPIU_SUM,mlmat->comm->USR_comm);CHKERRQ(ierr); 423 rstart -= m; 424 425 for (i=0; i<m; i++) { 426 row = gordering[i] - rstart; 427 PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i])); 428 nnzA[row] = 0; 429 for (j=0; j<nnz[i]; j++) { 430 if (aj[j] < m) nnzA[row]++; 431 } 432 nnzB[row] = nnz[i] - nnzA[row]; 433 } 434 ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr); 435 ierr = PetscFree3(nnzA,nnzB,nnz); 436 } 437 for (i=0; i<m; i++) { 438 PetscInt ncols; 439 row = gordering[i]; 440 441 PetscStackCall(",ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols)); 442 for (j = 0; j < ncols; j++) aj[j] = gordering[aj[j]]; 443 ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr); 444 } 445 PetscStackCall("ML_free",ML_free(gordering)); 446 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 447 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 448 *newmat = A; 449 450 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 451 PetscFunctionReturn(0); 452 } 453 454 /* -------------------------------------------------------------------------- */ 455 /* 456 PCSetCoordinates_ML 457 458 Input Parameter: 459 . pc - the preconditioner context 460 */ 461 #undef __FUNCT__ 462 #define __FUNCT__ "PCSetCoordinates_ML" 463 static PetscErrorCode PCSetCoordinates_ML(PC pc, PetscInt ndm, PetscInt a_nloc, PetscReal *coords) 464 { 465 PC_MG *mg = (PC_MG*)pc->data; 466 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 467 PetscErrorCode ierr; 468 PetscInt arrsz,oldarrsz,bs,my0,kk,ii,nloc,Iend; 469 Mat Amat = pc->pmat; 470 471 /* this function copied and modified from PCSetCoordinates_GEO -TGI */ 472 PetscFunctionBegin; 473 PetscValidHeaderSpecific(Amat, MAT_CLASSID, 1); 474 ierr = MatGetBlockSize(Amat, &bs);CHKERRQ(ierr); 475 476 ierr = MatGetOwnershipRange(Amat, &my0, &Iend);CHKERRQ(ierr); 477 nloc = (Iend-my0)/bs; 478 479 if (nloc!=a_nloc) SETERRQ2(PetscObjectComm((PetscObject)Amat),PETSC_ERR_ARG_WRONG, "Number of local blocks must locations = %d %d.",a_nloc,nloc); 480 if ((Iend-my0)%bs!=0) SETERRQ1(PetscObjectComm((PetscObject)Amat),PETSC_ERR_ARG_WRONG, "Bad local size %d.",nloc); 481 482 oldarrsz = pc_ml->dim * pc_ml->nloc; 483 pc_ml->dim = ndm; 484 pc_ml->nloc = a_nloc; 485 arrsz = ndm * a_nloc; 486 487 /* create data - syntactic sugar that should be refactored at some point */ 488 if (pc_ml->coords==0 || (oldarrsz != arrsz)) { 489 ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr); 490 ierr = PetscMalloc1((arrsz), &pc_ml->coords);CHKERRQ(ierr); 491 } 492 for (kk=0; kk<arrsz; kk++) pc_ml->coords[kk] = -999.; 493 /* copy data in - column oriented */ 494 for (kk = 0; kk < nloc; kk++) { 495 for (ii = 0; ii < ndm; ii++) { 496 pc_ml->coords[ii*nloc + kk] = coords[kk*ndm + ii]; 497 } 498 } 499 PetscFunctionReturn(0); 500 } 501 502 /* -----------------------------------------------------------------------------*/ 503 extern PetscErrorCode PCReset_MG(PC); 504 #undef __FUNCT__ 505 #define __FUNCT__ "PCReset_ML" 506 PetscErrorCode PCReset_ML(PC pc) 507 { 508 PetscErrorCode ierr; 509 PC_MG *mg = (PC_MG*)pc->data; 510 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 511 PetscInt level,fine_level=pc_ml->Nlevels-1,dim=pc_ml->dim; 512 513 PetscFunctionBegin; 514 if (dim) { 515 ML_Aggregate_Viz_Stats * grid_info = (ML_Aggregate_Viz_Stats*) pc_ml->ml_object->Grid[0].Grid; 516 517 for (level=0; level<=fine_level; level++) { 518 ierr = VecDestroy(&pc_ml->gridctx[level].coords);CHKERRQ(ierr); 519 } 520 521 grid_info->x = 0; /* do this so ML doesn't try to free coordinates */ 522 grid_info->y = 0; 523 grid_info->z = 0; 524 525 PetscStackCall("ML_Operator_Getrow",ML_Aggregate_VizAndStats_Clean(pc_ml->ml_object)); 526 } 527 PetscStackCall("ML_Aggregate_Destroy",ML_Aggregate_Destroy(&pc_ml->agg_object)); 528 PetscStackCall("ML_Aggregate_Destroy",ML_Destroy(&pc_ml->ml_object)); 529 530 if (pc_ml->PetscMLdata) { 531 ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr); 532 ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr); 533 ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr); 534 ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr); 535 } 536 ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr); 537 538 if (pc_ml->gridctx) { 539 for (level=0; level<fine_level; level++) { 540 if (pc_ml->gridctx[level].A) {ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);} 541 if (pc_ml->gridctx[level].P) {ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);} 542 if (pc_ml->gridctx[level].R) {ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);} 543 if (pc_ml->gridctx[level].x) {ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);} 544 if (pc_ml->gridctx[level].b) {ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);} 545 if (pc_ml->gridctx[level+1].r) {ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);} 546 } 547 } 548 ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr); 549 ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr); 550 551 pc_ml->dim = 0; 552 pc_ml->nloc = 0; 553 ierr = PCReset_MG(pc);CHKERRQ(ierr); 554 PetscFunctionReturn(0); 555 } 556 /* -------------------------------------------------------------------------- */ 557 /* 558 PCSetUp_ML - Prepares for the use of the ML preconditioner 559 by setting data structures and options. 560 561 Input Parameter: 562 . pc - the preconditioner context 563 564 Application Interface Routine: PCSetUp() 565 566 Notes: 567 The interface routine PCSetUp() is not usually called directly by 568 the user, but instead is called by PCApply() if necessary. 569 */ 570 extern PetscErrorCode PCSetFromOptions_MG(PC); 571 extern PetscErrorCode PCReset_MG(PC); 572 573 #undef __FUNCT__ 574 #define __FUNCT__ "PCSetUp_ML" 575 PetscErrorCode PCSetUp_ML(PC pc) 576 { 577 PetscErrorCode ierr; 578 PetscMPIInt size; 579 FineGridCtx *PetscMLdata; 580 ML *ml_object; 581 ML_Aggregate *agg_object; 582 ML_Operator *mlmat; 583 PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs; 584 Mat A,Aloc; 585 GridCtx *gridctx; 586 PC_MG *mg = (PC_MG*)pc->data; 587 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 588 PetscBool isSeq, isMPI; 589 KSP smoother; 590 PC subpc; 591 PetscInt mesh_level, old_mesh_level; 592 MatInfo info; 593 static PetscBool cite = PETSC_FALSE; 594 595 PetscFunctionBegin; 596 ierr = PetscCitationsRegister("@TechReport{ml_users_guide,\n author = {M. Sala and J.J. Hu and R.S. Tuminaro},\n title = {{ML}3.1 {S}moothed {A}ggregation {U}ser's {G}uide},\n institution = {Sandia National Laboratories},\n number = {SAND2004-4821},\n year = 2004\n}\n",&cite);CHKERRQ(ierr); 597 A = pc->pmat; 598 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr); 599 600 if (pc->setupcalled) { 601 if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) { 602 /* 603 Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for 604 multiple solves in which the matrix is not changing too quickly. 605 */ 606 ml_object = pc_ml->ml_object; 607 gridctx = pc_ml->gridctx; 608 Nlevels = pc_ml->Nlevels; 609 fine_level = Nlevels - 1; 610 gridctx[fine_level].A = A; 611 612 ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); 613 ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); 614 if (isMPI) { 615 ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); 616 } else if (isSeq) { 617 Aloc = A; 618 ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr); 619 } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name); 620 621 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 622 PetscMLdata = pc_ml->PetscMLdata; 623 ierr = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr); 624 PetscMLdata->A = A; 625 PetscMLdata->Aloc = Aloc; 626 PetscStackCall("ML_Aggregate_Destroy",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata)); 627 PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec)); 628 629 mesh_level = ml_object->ML_finest_level; 630 while (ml_object->SingleLevel[mesh_level].Rmat->to) { 631 old_mesh_level = mesh_level; 632 mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum; 633 634 /* clean and regenerate A */ 635 mlmat = &(ml_object->Amat[mesh_level]); 636 PetscStackCall("ML_Operator_Clean",ML_Operator_Clean(mlmat)); 637 PetscStackCall("ML_Operator_Init",ML_Operator_Init(mlmat,ml_object->comm)); 638 PetscStackCall("ML_Gen_AmatrixRAP",ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level)); 639 } 640 641 level = fine_level - 1; 642 if (size == 1) { /* convert ML P, R and A into seqaij format */ 643 for (mllevel=1; mllevel<Nlevels; mllevel++) { 644 mlmat = &(ml_object->Amat[mllevel]); 645 ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 646 level--; 647 } 648 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 649 for (mllevel=1; mllevel<Nlevels; mllevel++) { 650 mlmat = &(ml_object->Amat[mllevel]); 651 ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 652 level--; 653 } 654 } 655 656 for (level=0; level<fine_level; level++) { 657 if (level > 0) { 658 ierr = PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);CHKERRQ(ierr); 659 } 660 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A);CHKERRQ(ierr); 661 } 662 ierr = PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);CHKERRQ(ierr); 663 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A);CHKERRQ(ierr); 664 665 ierr = PCSetUp_MG(pc);CHKERRQ(ierr); 666 PetscFunctionReturn(0); 667 } else { 668 /* since ML can change the size of vectors/matrices at any level we must destroy everything */ 669 ierr = PCReset_ML(pc);CHKERRQ(ierr); 670 } 671 } 672 673 /* setup special features of PCML */ 674 /*--------------------------------*/ 675 /* covert A to Aloc to be used by ML at fine grid */ 676 pc_ml->size = size; 677 ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); 678 ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); 679 if (isMPI) { 680 ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); 681 } else if (isSeq) { 682 Aloc = A; 683 ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr); 684 } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name); 685 686 /* create and initialize struct 'PetscMLdata' */ 687 ierr = PetscNewLog(pc,&PetscMLdata);CHKERRQ(ierr); 688 pc_ml->PetscMLdata = PetscMLdata; 689 ierr = PetscMalloc1((Aloc->cmap->n+1),&PetscMLdata->pwork);CHKERRQ(ierr); 690 691 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr); 692 ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr); 693 ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); 694 695 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); 696 ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 697 ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr); 698 PetscMLdata->A = A; 699 PetscMLdata->Aloc = Aloc; 700 if (pc_ml->dim) { /* create vecs around the coordinate data given */ 701 PetscInt i,j,dim=pc_ml->dim; 702 PetscInt nloc = pc_ml->nloc,nlocghost; 703 PetscReal *ghostedcoords; 704 705 ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); 706 nlocghost = Aloc->cmap->n / bs; 707 ierr = PetscMalloc1(dim*nlocghost,&ghostedcoords);CHKERRQ(ierr); 708 for (i = 0; i < dim; i++) { 709 /* copy coordinate values into first component of pwork */ 710 for (j = 0; j < nloc; j++) { 711 PetscMLdata->pwork[bs * j] = pc_ml->coords[nloc * i + j]; 712 } 713 /* get the ghost values */ 714 ierr = PetscML_comm(PetscMLdata->pwork,PetscMLdata);CHKERRQ(ierr); 715 /* write into the vector */ 716 for (j = 0; j < nlocghost; j++) { 717 ghostedcoords[i * nlocghost + j] = PetscMLdata->pwork[bs * j]; 718 } 719 } 720 /* replace the original coords with the ghosted coords, because these are 721 * what ML needs */ 722 ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr); 723 pc_ml->coords = ghostedcoords; 724 } 725 726 /* create ML discretization matrix at fine grid */ 727 /* ML requires input of fine-grid matrix. It determines nlevels. */ 728 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 729 ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); 730 PetscStackCall("ML_Create",ML_Create(&ml_object,pc_ml->MaxNlevels)); 731 PetscStackCall("ML_Comm_Set_UsrComm",ML_Comm_Set_UsrComm(ml_object->comm,PetscObjectComm((PetscObject)A))); 732 pc_ml->ml_object = ml_object; 733 PetscStackCall("ML_Init_Amatrix",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata)); 734 PetscStackCall("ML_Set_Amatrix_Getrow",ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols)); 735 PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec)); 736 737 PetscStackCall("ML_Set_Symmetrize",ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO)); 738 739 /* aggregation */ 740 PetscStackCall("ML_Aggregate_Create",ML_Aggregate_Create(&agg_object)); 741 pc_ml->agg_object = agg_object; 742 743 { 744 MatNullSpace mnull; 745 ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr); 746 if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) { 747 if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER; 748 else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK; 749 else pc_ml->nulltype = PCML_NULLSPACE_SCALAR; 750 } 751 switch (pc_ml->nulltype) { 752 case PCML_NULLSPACE_USER: { 753 PetscScalar *nullvec; 754 const PetscScalar *v; 755 PetscBool has_const; 756 PetscInt i,j,mlocal,nvec,M; 757 const Vec *vecs; 758 759 if (!mnull) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space"); 760 ierr = MatGetSize(A,&M,NULL);CHKERRQ(ierr); 761 ierr = MatGetLocalSize(Aloc,&mlocal,NULL);CHKERRQ(ierr); 762 ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr); 763 ierr = PetscMalloc1((nvec+!!has_const)*mlocal,&nullvec);CHKERRQ(ierr); 764 if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M; 765 for (i=0; i<nvec; i++) { 766 ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr); 767 for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j]; 768 ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr); 769 } 770 PetscStackCall("ML_Aggregate_Create",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr)); 771 ierr = PetscFree(nullvec);CHKERRQ(ierr); 772 } break; 773 case PCML_NULLSPACE_BLOCK: 774 PetscStackCall("ML_Aggregate_Set_NullSpace",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr)); 775 break; 776 case PCML_NULLSPACE_SCALAR: 777 break; 778 default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP,"Unknown null space type"); 779 } 780 } 781 PetscStackCall("ML_Aggregate_Set_MaxCoarseSize",ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize)); 782 /* set options */ 783 switch (pc_ml->CoarsenScheme) { 784 case 1: 785 PetscStackCall("ML_Aggregate_Set_CoarsenScheme_Coupled",ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object));break; 786 case 2: 787 PetscStackCall("ML_Aggregate_Set_CoarsenScheme_MIS",ML_Aggregate_Set_CoarsenScheme_MIS(agg_object));break; 788 case 3: 789 PetscStackCall("ML_Aggregate_Set_CoarsenScheme_METIS",ML_Aggregate_Set_CoarsenScheme_METIS(agg_object));break; 790 } 791 PetscStackCall("ML_Aggregate_Set_Threshold",ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold)); 792 PetscStackCall("ML_Aggregate_Set_DampingFactor",ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor)); 793 if (pc_ml->SpectralNormScheme_Anorm) { 794 PetscStackCall("ML_Set_SpectralNormScheme_Anorm",ML_Set_SpectralNormScheme_Anorm(ml_object)); 795 } 796 agg_object->keep_agg_information = (int)pc_ml->KeepAggInfo; 797 agg_object->keep_P_tentative = (int)pc_ml->Reusable; 798 agg_object->block_scaled_SA = (int)pc_ml->BlockScaling; 799 agg_object->minimizing_energy = (int)pc_ml->EnergyMinimization; 800 agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol; 801 agg_object->cheap_minimizing_energy = (int)pc_ml->EnergyMinimizationCheap; 802 803 if (pc_ml->Aux) { 804 if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Auxiliary matrix requires coordinates"); 805 ml_object->Amat[0].aux_data->threshold = pc_ml->AuxThreshold; 806 ml_object->Amat[0].aux_data->enable = 1; 807 ml_object->Amat[0].aux_data->max_level = 10; 808 ml_object->Amat[0].num_PDEs = bs; 809 } 810 811 ierr = MatGetInfo(A,MAT_LOCAL,&info);CHKERRQ(ierr); 812 ml_object->Amat[0].N_nonzeros = (int) info.nz_used; 813 814 if (pc_ml->dim) { 815 PetscInt i,dim = pc_ml->dim; 816 ML_Aggregate_Viz_Stats *grid_info; 817 PetscInt nlocghost; 818 819 ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); 820 nlocghost = Aloc->cmap->n / bs; 821 822 PetscStackCall("ML_Aggregate_VizAndStats_Setup(",ML_Aggregate_VizAndStats_Setup(ml_object)); /* create ml info for coords */ 823 grid_info = (ML_Aggregate_Viz_Stats*) ml_object->Grid[0].Grid; 824 for (i = 0; i < dim; i++) { 825 /* set the finest level coordinates to point to the column-order array 826 * in pc_ml */ 827 /* NOTE: must point away before VizAndStats_Clean so ML doesn't free */ 828 switch (i) { 829 case 0: grid_info->x = pc_ml->coords + nlocghost * i; break; 830 case 1: grid_info->y = pc_ml->coords + nlocghost * i; break; 831 case 2: grid_info->z = pc_ml->coords + nlocghost * i; break; 832 default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3"); 833 } 834 } 835 grid_info->Ndim = dim; 836 } 837 838 /* repartitioning */ 839 if (pc_ml->Repartition) { 840 PetscStackCall("ML_Repartition_Activate",ML_Repartition_Activate(ml_object)); 841 PetscStackCall("ML_Repartition_Set_LargestMinMaxRatio",ML_Repartition_Set_LargestMinMaxRatio(ml_object,pc_ml->MaxMinRatio)); 842 PetscStackCall("ML_Repartition_Set_MinPerProc",ML_Repartition_Set_MinPerProc(ml_object,pc_ml->MinPerProc)); 843 PetscStackCall("ML_Repartition_Set_PutOnSingleProc",ML_Repartition_Set_PutOnSingleProc(ml_object,pc_ml->PutOnSingleProc)); 844 #if 0 /* Function not yet defined in ml-6.2 */ 845 /* I'm not sure what compatibility issues might crop up if we partitioned 846 * on the finest level, so to be safe repartition starts on the next 847 * finest level (reflection default behavior in 848 * ml_MultiLevelPreconditioner) */ 849 PetscStackCall("ML_Repartition_Set_StartLevel",ML_Repartition_Set_StartLevel(ml_object,1)); 850 #endif 851 852 if (!pc_ml->RepartitionType) { 853 PetscInt i; 854 855 if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"ML Zoltan repartitioning requires coordinates"); 856 PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEZOLTAN)); 857 PetscStackCall("ML_Aggregate_Set_Dimensions",ML_Aggregate_Set_Dimensions(agg_object, pc_ml->dim)); 858 859 for (i = 0; i < ml_object->ML_num_levels; i++) { 860 ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Grid[i].Grid; 861 grid_info->zoltan_type = pc_ml->ZoltanScheme + 1; /* ml numbers options 1, 2, 3 */ 862 /* defaults from ml_agg_info.c */ 863 grid_info->zoltan_estimated_its = 40; /* only relevant to hypergraph / fast hypergraph */ 864 grid_info->zoltan_timers = 0; 865 grid_info->smoothing_steps = 4; /* only relevant to hypergraph / fast hypergraph */ 866 } 867 } else { 868 PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEPARMETIS)); 869 } 870 } 871 872 if (pc_ml->OldHierarchy) { 873 PetscStackCall("ML_Gen_MGHierarchy_UsingAggregation",Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object)); 874 } else { 875 PetscStackCall("ML_Gen_MultiLevelHierarchy_UsingAggregation",Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object)); 876 } 877 if (Nlevels<=0) SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels); 878 pc_ml->Nlevels = Nlevels; 879 fine_level = Nlevels - 1; 880 881 ierr = PCMGSetLevels(pc,Nlevels,NULL);CHKERRQ(ierr); 882 /* set default smoothers */ 883 for (level=1; level<=fine_level; level++) { 884 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 885 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 886 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 887 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 888 } 889 ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr); 890 ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */ 891 ierr = PetscOptionsEnd();CHKERRQ(ierr); 892 893 ierr = PetscMalloc1(Nlevels,&gridctx);CHKERRQ(ierr); 894 895 pc_ml->gridctx = gridctx; 896 897 /* wrap ML matrices by PETSc shell matrices at coarsened grids. 898 Level 0 is the finest grid for ML, but coarsest for PETSc! */ 899 gridctx[fine_level].A = A; 900 901 level = fine_level - 1; 902 if (size == 1) { /* convert ML P, R and A into seqaij format */ 903 for (mllevel=1; mllevel<Nlevels; mllevel++) { 904 mlmat = &(ml_object->Pmat[mllevel]); 905 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 906 mlmat = &(ml_object->Rmat[mllevel-1]); 907 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 908 909 mlmat = &(ml_object->Amat[mllevel]); 910 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 911 level--; 912 } 913 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 914 for (mllevel=1; mllevel<Nlevels; mllevel++) { 915 mlmat = &(ml_object->Pmat[mllevel]); 916 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 917 mlmat = &(ml_object->Rmat[mllevel-1]); 918 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 919 920 mlmat = &(ml_object->Amat[mllevel]); 921 ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 922 level--; 923 } 924 } 925 926 /* create vectors and ksp at all levels */ 927 for (level=0; level<fine_level; level++) { 928 level1 = level + 1; 929 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr); 930 ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr); 931 ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); 932 ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); 933 934 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr); 935 ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 936 ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); 937 ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); 938 939 ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr); 940 ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 941 ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr); 942 ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr); 943 944 if (level == 0) { 945 ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr); 946 } else { 947 ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr); 948 } 949 } 950 ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr); 951 952 /* create coarse level and the interpolation between the levels */ 953 for (level=0; level<fine_level; level++) { 954 level1 = level + 1; 955 ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr); 956 ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr); 957 if (level > 0) { 958 ierr = PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);CHKERRQ(ierr); 959 } 960 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A);CHKERRQ(ierr); 961 } 962 ierr = PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);CHKERRQ(ierr); 963 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A);CHKERRQ(ierr); 964 965 /* put coordinate info in levels */ 966 if (pc_ml->dim) { 967 PetscInt i,j,dim = pc_ml->dim; 968 PetscInt bs, nloc; 969 PC subpc; 970 PetscReal *array; 971 972 level = fine_level; 973 for (mllevel = 0; mllevel < Nlevels; mllevel++) { 974 ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Amat[mllevel].to->Grid->Grid; 975 MPI_Comm comm = ((PetscObject)gridctx[level].A)->comm; 976 977 ierr = MatGetBlockSize (gridctx[level].A, &bs);CHKERRQ(ierr); 978 ierr = MatGetLocalSize (gridctx[level].A, NULL, &nloc);CHKERRQ(ierr); 979 nloc /= bs; /* number of local nodes */ 980 981 ierr = VecCreate(comm,&gridctx[level].coords);CHKERRQ(ierr); 982 ierr = VecSetSizes(gridctx[level].coords,dim * nloc,PETSC_DECIDE);CHKERRQ(ierr); 983 ierr = VecSetType(gridctx[level].coords,VECMPI);CHKERRQ(ierr); 984 ierr = VecGetArray(gridctx[level].coords,&array);CHKERRQ(ierr); 985 for (j = 0; j < nloc; j++) { 986 for (i = 0; i < dim; i++) { 987 switch (i) { 988 case 0: array[dim * j + i] = grid_info->x[j]; break; 989 case 1: array[dim * j + i] = grid_info->y[j]; break; 990 case 2: array[dim * j + i] = grid_info->z[j]; break; 991 default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3"); 992 } 993 } 994 } 995 996 /* passing coordinates to smoothers/coarse solver, should they need them */ 997 ierr = KSPGetPC(gridctx[level].ksp,&subpc);CHKERRQ(ierr); 998 ierr = PCSetCoordinates(subpc,dim,nloc,array);CHKERRQ(ierr); 999 ierr = VecRestoreArray(gridctx[level].coords,&array);CHKERRQ(ierr); 1000 level--; 1001 } 1002 } 1003 1004 /* setupcalled is set to 0 so that MG is setup from scratch */ 1005 pc->setupcalled = 0; 1006 ierr = PCSetUp_MG(pc);CHKERRQ(ierr); 1007 PetscFunctionReturn(0); 1008 } 1009 1010 /* -------------------------------------------------------------------------- */ 1011 /* 1012 PCDestroy_ML - Destroys the private context for the ML preconditioner 1013 that was created with PCCreate_ML(). 1014 1015 Input Parameter: 1016 . pc - the preconditioner context 1017 1018 Application Interface Routine: PCDestroy() 1019 */ 1020 #undef __FUNCT__ 1021 #define __FUNCT__ "PCDestroy_ML" 1022 PetscErrorCode PCDestroy_ML(PC pc) 1023 { 1024 PetscErrorCode ierr; 1025 PC_MG *mg = (PC_MG*)pc->data; 1026 PC_ML *pc_ml= (PC_ML*)mg->innerctx; 1027 1028 PetscFunctionBegin; 1029 ierr = PCReset_ML(pc);CHKERRQ(ierr); 1030 ierr = PetscFree(pc_ml);CHKERRQ(ierr); 1031 ierr = PCDestroy_MG(pc);CHKERRQ(ierr); 1032 ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",NULL);CHKERRQ(ierr); 1033 PetscFunctionReturn(0); 1034 } 1035 1036 #undef __FUNCT__ 1037 #define __FUNCT__ "PCSetFromOptions_ML" 1038 PetscErrorCode PCSetFromOptions_ML(PC pc) 1039 { 1040 PetscErrorCode ierr; 1041 PetscInt indx,PrintLevel,partindx; 1042 const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"}; 1043 const char *part[] = {"Zoltan","ParMETIS"}; 1044 #if defined(HAVE_ML_ZOLTAN) 1045 PetscInt zidx; 1046 const char *zscheme[] = {"RCB","hypergraph","fast_hypergraph"}; 1047 #endif 1048 PC_MG *mg = (PC_MG*)pc->data; 1049 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 1050 PetscMPIInt size; 1051 MPI_Comm comm; 1052 1053 PetscFunctionBegin; 1054 ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr); 1055 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1056 ierr = PetscOptionsHead("ML options");CHKERRQ(ierr); 1057 1058 PrintLevel = 0; 1059 indx = 0; 1060 partindx = 0; 1061 1062 ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,NULL);CHKERRQ(ierr); 1063 PetscStackCall("ML_Set_PrintLeve",ML_Set_PrintLevel(PrintLevel)); 1064 ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,NULL);CHKERRQ(ierr); 1065 ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,NULL);CHKERRQ(ierr); 1066 ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,NULL);CHKERRQ(ierr); 1067 1068 pc_ml->CoarsenScheme = indx; 1069 1070 ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,NULL);CHKERRQ(ierr); 1071 ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,NULL);CHKERRQ(ierr); 1072 ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,NULL);CHKERRQ(ierr); 1073 ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,NULL);CHKERRQ(ierr); 1074 ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,NULL);CHKERRQ(ierr); 1075 ierr = PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,NULL);CHKERRQ(ierr); 1076 ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,NULL);CHKERRQ(ierr); 1077 ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,NULL);CHKERRQ(ierr); 1078 /* 1079 The following checks a number of conditions. If we let this stuff slip by, then ML's error handling will take over. 1080 This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions. 1081 1082 We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working 1083 combination of options and ML's exit(1) explanations don't help matters. 1084 */ 1085 if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4"); 1086 if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel"); 1087 if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);} 1088 if (pc_ml->EnergyMinimization) { 1089 ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,NULL);CHKERRQ(ierr); 1090 } 1091 if (pc_ml->EnergyMinimization == 2) { 1092 /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */ 1093 ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,NULL);CHKERRQ(ierr); 1094 } 1095 /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */ 1096 if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE; 1097 ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,NULL);CHKERRQ(ierr); 1098 /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */ 1099 if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE; 1100 ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,NULL);CHKERRQ(ierr); 1101 /* 1102 ML's C API is severely underdocumented and lacks significant functionality. The C++ API calls 1103 ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function 1104 ML_Gen_MGHierarchy_UsingAggregation(). This modification, however, does not provide a strict superset of the 1105 functionality in the old function, so some users may still want to use it. Note that many options are ignored in 1106 this context, but ML doesn't provide a way to find out which ones. 1107 */ 1108 ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,NULL);CHKERRQ(ierr); 1109 ierr = PetscOptionsBool("-pc_ml_repartition", "Allow ML to repartition levels of the heirarchy","ML_Repartition_Activate",pc_ml->Repartition,&pc_ml->Repartition,NULL);CHKERRQ(ierr); 1110 if (pc_ml->Repartition) { 1111 ierr = PetscOptionsReal("-pc_ml_repartitionMaxMinRatio", "Acceptable ratio of repartitioned sizes","ML_Repartition_Set_LargestMinMaxRatio",pc_ml->MaxMinRatio,&pc_ml->MaxMinRatio,NULL);CHKERRQ(ierr); 1112 ierr = PetscOptionsInt("-pc_ml_repartitionMinPerProc", "Smallest repartitioned size","ML_Repartition_Set_MinPerProc",pc_ml->MinPerProc,&pc_ml->MinPerProc,NULL);CHKERRQ(ierr); 1113 ierr = PetscOptionsInt("-pc_ml_repartitionPutOnSingleProc", "Problem size automatically repartitioned to one processor","ML_Repartition_Set_PutOnSingleProc",pc_ml->PutOnSingleProc,&pc_ml->PutOnSingleProc,NULL);CHKERRQ(ierr); 1114 #if defined(HAVE_ML_ZOLTAN) 1115 partindx = 0; 1116 ierr = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[0],&partindx,NULL);CHKERRQ(ierr); 1117 1118 pc_ml->RepartitionType = partindx; 1119 if (!partindx) { 1120 PetscInt zindx = 0; 1121 1122 ierr = PetscOptionsEList("-pc_ml_repartitionZoltanScheme", "Repartitioning scheme to use","None",zscheme,3,zscheme[0],&zindx,NULL);CHKERRQ(ierr); 1123 1124 pc_ml->ZoltanScheme = zindx; 1125 } 1126 #else 1127 partindx = 1; 1128 ierr = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[1],&partindx,NULL);CHKERRQ(ierr); 1129 if (!partindx) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP_SYS,"ML not compiled with Zoltan"); 1130 #endif 1131 ierr = PetscOptionsBool("-pc_ml_Aux","Aggregate using auxiliary coordinate-based laplacian","None",pc_ml->Aux,&pc_ml->Aux,NULL);CHKERRQ(ierr); 1132 ierr = PetscOptionsReal("-pc_ml_AuxThreshold","Auxiliary smoother drop tol","None",pc_ml->AuxThreshold,&pc_ml->AuxThreshold,NULL);CHKERRQ(ierr); 1133 } 1134 ierr = PetscOptionsTail();CHKERRQ(ierr); 1135 PetscFunctionReturn(0); 1136 } 1137 1138 /* -------------------------------------------------------------------------- */ 1139 /* 1140 PCCreate_ML - Creates a ML preconditioner context, PC_ML, 1141 and sets this as the private data within the generic preconditioning 1142 context, PC, that was created within PCCreate(). 1143 1144 Input Parameter: 1145 . pc - the preconditioner context 1146 1147 Application Interface Routine: PCCreate() 1148 */ 1149 1150 /*MC 1151 PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide 1152 fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 1153 operators are computed by ML, with the matrices coverted to PETSc matrices in aij format 1154 and the restriction/interpolation operators wrapped as PETSc shell matrices. 1155 1156 Options Database Key: 1157 Multigrid options(inherited): 1158 + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles) 1159 . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp) 1160 . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown) 1161 - -pc_mg_type <multiplicative>: (one of) additive multiplicative full kascade 1162 ML options: 1163 + -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel) 1164 . -pc_ml_maxNlevels <10>: Maximum number of levels (None) 1165 . -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize) 1166 . -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS 1167 . -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor) 1168 . -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold) 1169 . -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm) 1170 . -pc_ml_repartition <false>: Allow ML to repartition levels of the heirarchy (ML_Repartition_Activate) 1171 . -pc_ml_repartitionMaxMinRatio <1.3>: Acceptable ratio of repartitioned sizes (ML_Repartition_Set_LargestMinMaxRatio) 1172 . -pc_ml_repartitionMinPerProc <512>: Smallest repartitioned size (ML_Repartition_Set_MinPerProc) 1173 . -pc_ml_repartitionPutOnSingleProc <5000>: Problem size automatically repartitioned to one processor (ML_Repartition_Set_PutOnSingleProc) 1174 . -pc_ml_repartitionType <Zoltan>: Repartitioning library to use (ML_Repartition_Set_Partitioner) 1175 . -pc_ml_repartitionZoltanScheme <RCB>: Repartitioning scheme to use (None) 1176 . -pc_ml_Aux <false>: Aggregate using auxiliary coordinate-based laplacian (None) 1177 - -pc_ml_AuxThreshold <0.0>: Auxiliary smoother drop tol (None) 1178 1179 Level: intermediate 1180 1181 Concepts: multigrid 1182 1183 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 1184 PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(), 1185 PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(), 1186 PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(), 1187 PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR() 1188 M*/ 1189 1190 #undef __FUNCT__ 1191 #define __FUNCT__ "PCCreate_ML" 1192 PETSC_EXTERN PetscErrorCode PCCreate_ML(PC pc) 1193 { 1194 PetscErrorCode ierr; 1195 PC_ML *pc_ml; 1196 PC_MG *mg; 1197 1198 PetscFunctionBegin; 1199 /* PCML is an inherited class of PCMG. Initialize pc as PCMG */ 1200 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 1201 ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr); 1202 /* Since PCMG tries to use DM assocated with PC must delete it */ 1203 ierr = DMDestroy(&pc->dm);CHKERRQ(ierr); 1204 mg = (PC_MG*)pc->data; 1205 mg->galerkin = 2; /* Use Galerkin, but it is computed externally */ 1206 1207 /* create a supporting struct and attach it to pc */ 1208 ierr = PetscNewLog(pc,&pc_ml);CHKERRQ(ierr); 1209 mg->innerctx = pc_ml; 1210 1211 pc_ml->ml_object = 0; 1212 pc_ml->agg_object = 0; 1213 pc_ml->gridctx = 0; 1214 pc_ml->PetscMLdata = 0; 1215 pc_ml->Nlevels = -1; 1216 pc_ml->MaxNlevels = 10; 1217 pc_ml->MaxCoarseSize = 1; 1218 pc_ml->CoarsenScheme = 1; 1219 pc_ml->Threshold = 0.0; 1220 pc_ml->DampingFactor = 4.0/3.0; 1221 pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE; 1222 pc_ml->size = 0; 1223 pc_ml->dim = 0; 1224 pc_ml->nloc = 0; 1225 pc_ml->coords = 0; 1226 pc_ml->Repartition = PETSC_FALSE; 1227 pc_ml->MaxMinRatio = 1.3; 1228 pc_ml->MinPerProc = 512; 1229 pc_ml->PutOnSingleProc = 5000; 1230 pc_ml->RepartitionType = 0; 1231 pc_ml->ZoltanScheme = 0; 1232 pc_ml->Aux = PETSC_FALSE; 1233 pc_ml->AuxThreshold = 0.0; 1234 1235 /* allow for coordinates to be passed */ 1236 ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",PCSetCoordinates_ML);CHKERRQ(ierr); 1237 1238 /* overwrite the pointers of PCMG by the functions of PCML */ 1239 pc->ops->setfromoptions = PCSetFromOptions_ML; 1240 pc->ops->setup = PCSetUp_ML; 1241 pc->ops->reset = PCReset_ML; 1242 pc->ops->destroy = PCDestroy_ML; 1243 PetscFunctionReturn(0); 1244 } 1245