1 2 #include <../src/ksp/pc/impls/is/nn/nn.h> 3 4 /* -------------------------------------------------------------------------- */ 5 /* 6 PCSetUp_NN - Prepares for the use of the NN preconditioner 7 by setting data structures and options. 8 9 Input Parameter: 10 . pc - the preconditioner context 11 12 Application Interface Routine: PCSetUp() 13 14 Notes: 15 The interface routine PCSetUp() is not usually called directly by 16 the user, but instead is called by PCApply() if necessary. 17 */ 18 #undef __FUNCT__ 19 #define __FUNCT__ "PCSetUp_NN" 20 static PetscErrorCode PCSetUp_NN(PC pc) 21 { 22 PetscErrorCode ierr; 23 24 PetscFunctionBegin; 25 if (!pc->setupcalled) { 26 /* Set up all the "iterative substructuring" common block */ 27 ierr = PCISSetUp(pc);CHKERRQ(ierr); 28 /* Create the coarse matrix. */ 29 ierr = PCNNCreateCoarseMatrix(pc);CHKERRQ(ierr); 30 } 31 PetscFunctionReturn(0); 32 } 33 34 /* -------------------------------------------------------------------------- */ 35 /* 36 PCApply_NN - Applies the NN preconditioner to a vector. 37 38 Input Parameters: 39 . pc - the preconditioner context 40 . r - input vector (global) 41 42 Output Parameter: 43 . z - output vector (global) 44 45 Application Interface Routine: PCApply() 46 */ 47 #undef __FUNCT__ 48 #define __FUNCT__ "PCApply_NN" 49 static PetscErrorCode PCApply_NN(PC pc,Vec r,Vec z) 50 { 51 PC_IS *pcis = (PC_IS*)(pc->data); 52 PetscErrorCode ierr; 53 PetscScalar m_one = -1.0; 54 Vec w = pcis->vec1_global; 55 56 PetscFunctionBegin; 57 /* 58 Dirichlet solvers. 59 Solving $ B_I^{(i)}r_I^{(i)} $ at each processor. 60 Storing the local results at vec2_D 61 */ 62 ierr = VecScatterBegin(pcis->global_to_D,r,pcis->vec1_D,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 63 ierr = VecScatterEnd (pcis->global_to_D,r,pcis->vec1_D,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 64 ierr = KSPSolve(pcis->ksp_D,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr); 65 66 /* 67 Computing $ r_B - \sum_j \tilde R_j^T A_{BI}^{(j)} (B_I^{(j)}r_I^{(j)}) $ . 68 Storing the result in the interface portion of the global vector w. 69 */ 70 ierr = MatMult(pcis->A_BI,pcis->vec2_D,pcis->vec1_B);CHKERRQ(ierr); 71 ierr = VecScale(pcis->vec1_B,m_one);CHKERRQ(ierr); 72 ierr = VecCopy(r,w);CHKERRQ(ierr); 73 ierr = VecScatterBegin(pcis->global_to_B,pcis->vec1_B,w,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 74 ierr = VecScatterEnd (pcis->global_to_B,pcis->vec1_B,w,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 75 76 /* 77 Apply the interface preconditioner 78 */ 79 ierr = PCNNApplyInterfacePreconditioner(pc,w,z,pcis->work_N,pcis->vec1_B,pcis->vec2_B,pcis->vec3_B,pcis->vec1_D, 80 pcis->vec3_D,pcis->vec1_N,pcis->vec2_N);CHKERRQ(ierr); 81 82 /* 83 Computing $ t_I^{(i)} = A_{IB}^{(i)} \tilde R_i z_B $ 84 The result is stored in vec1_D. 85 */ 86 ierr = VecScatterBegin(pcis->global_to_B,z,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 87 ierr = VecScatterEnd (pcis->global_to_B,z,pcis->vec1_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 88 ierr = MatMult(pcis->A_IB,pcis->vec1_B,pcis->vec1_D);CHKERRQ(ierr); 89 90 /* 91 Dirichlet solvers. 92 Computing $ B_I^{(i)}t_I^{(i)} $ and sticking into the global vector the blocks 93 $ B_I^{(i)}r_I^{(i)} - B_I^{(i)}t_I^{(i)} $. 94 */ 95 ierr = VecScatterBegin(pcis->global_to_D,pcis->vec2_D,z,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 96 ierr = VecScatterEnd (pcis->global_to_D,pcis->vec2_D,z,INSERT_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 97 ierr = KSPSolve(pcis->ksp_D,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr); 98 ierr = VecScale(pcis->vec2_D,m_one);CHKERRQ(ierr); 99 ierr = VecScatterBegin(pcis->global_to_D,pcis->vec2_D,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 100 ierr = VecScatterEnd (pcis->global_to_D,pcis->vec2_D,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 101 PetscFunctionReturn(0); 102 } 103 104 /* -------------------------------------------------------------------------- */ 105 /* 106 PCDestroy_NN - Destroys the private context for the NN preconditioner 107 that was created with PCCreate_NN(). 108 109 Input Parameter: 110 . pc - the preconditioner context 111 112 Application Interface Routine: PCDestroy() 113 */ 114 #undef __FUNCT__ 115 #define __FUNCT__ "PCDestroy_NN" 116 static PetscErrorCode PCDestroy_NN(PC pc) 117 { 118 PC_NN *pcnn = (PC_NN*)pc->data; 119 PetscErrorCode ierr; 120 121 PetscFunctionBegin; 122 ierr = PCISDestroy(pc);CHKERRQ(ierr); 123 124 ierr = MatDestroy(&pcnn->coarse_mat);CHKERRQ(ierr); 125 ierr = VecDestroy(&pcnn->coarse_x);CHKERRQ(ierr); 126 ierr = VecDestroy(&pcnn->coarse_b);CHKERRQ(ierr); 127 ierr = KSPDestroy(&pcnn->ksp_coarse);CHKERRQ(ierr); 128 if (pcnn->DZ_IN) { 129 ierr = PetscFree(pcnn->DZ_IN[0]);CHKERRQ(ierr); 130 ierr = PetscFree(pcnn->DZ_IN);CHKERRQ(ierr); 131 } 132 133 /* 134 Free the private data structure that was hanging off the PC 135 */ 136 ierr = PetscFree(pc->data);CHKERRQ(ierr); 137 PetscFunctionReturn(0); 138 } 139 140 /* -------------------------------------------------------------------------- */ 141 /*MC 142 PCNN - Balancing Neumann-Neumann for scalar elliptic PDEs. 143 144 Options Database Keys: 145 + -pc_nn_turn_off_first_balancing - do not balance the residual before solving the local Neumann problems 146 (this skips the first coarse grid solve in the preconditioner) 147 . -pc_nn_turn_off_second_balancing - do not balance the solution solving the local Neumann problems 148 (this skips the second coarse grid solve in the preconditioner) 149 . -pc_is_damp_fixed <fact> - 150 . -pc_is_remove_nullspace_fixed - 151 . -pc_is_set_damping_factor_floating <fact> - 152 . -pc_is_not_damp_floating - 153 + -pc_is_not_remove_nullspace_floating - 154 155 Level: intermediate 156 157 Notes: The matrix used with this preconditioner must be of type MATIS 158 159 Unlike more 'conventional' Neumann-Neumann preconditioners this iterates over ALL the 160 degrees of freedom, NOT just those on the interface (this allows the use of approximate solvers 161 on the subdomains; though in our experience using approximate solvers is slower.). 162 163 Options for the coarse grid preconditioner can be set with -nn_coarse_pc_xxx 164 Options for the Dirichlet subproblem preconditioner can be set with -is_localD_pc_xxx 165 Options for the Neumann subproblem preconditioner can be set with -is_localN_pc_xxx 166 167 Contributed by Paulo Goldfeld 168 169 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, MATIS 170 M*/ 171 EXTERN_C_BEGIN 172 #undef __FUNCT__ 173 #define __FUNCT__ "PCCreate_NN" 174 PetscErrorCode PCCreate_NN(PC pc) 175 { 176 PetscErrorCode ierr; 177 PC_NN *pcnn; 178 179 PetscFunctionBegin; 180 /* 181 Creates the private data structure for this preconditioner and 182 attach it to the PC object. 183 */ 184 ierr = PetscNewLog(pc,PC_NN,&pcnn);CHKERRQ(ierr); 185 pc->data = (void*)pcnn; 186 187 ierr = PCISCreate(pc);CHKERRQ(ierr); 188 pcnn->coarse_mat = 0; 189 pcnn->coarse_x = 0; 190 pcnn->coarse_b = 0; 191 pcnn->ksp_coarse = 0; 192 pcnn->DZ_IN = 0; 193 194 /* 195 Set the pointers for the functions that are provided above. 196 Now when the user-level routines (such as PCApply(), PCDestroy(), etc.) 197 are called, they will automatically call these functions. Note we 198 choose not to provide a couple of these functions since they are 199 not needed. 200 */ 201 pc->ops->apply = PCApply_NN; 202 pc->ops->applytranspose = 0; 203 pc->ops->setup = PCSetUp_NN; 204 pc->ops->destroy = PCDestroy_NN; 205 pc->ops->view = 0; 206 pc->ops->applyrichardson = 0; 207 pc->ops->applysymmetricleft = 0; 208 pc->ops->applysymmetricright = 0; 209 PetscFunctionReturn(0); 210 } 211 EXTERN_C_END 212 213 214 /* -------------------------------------------------------------------------- */ 215 /* 216 PCNNCreateCoarseMatrix - 217 */ 218 #undef __FUNCT__ 219 #define __FUNCT__ "PCNNCreateCoarseMatrix" 220 PetscErrorCode PCNNCreateCoarseMatrix(PC pc) 221 { 222 MPI_Request *send_request, *recv_request; 223 PetscErrorCode ierr; 224 PetscInt i, j, k; 225 PetscScalar *mat; /* Sub-matrix with this subdomain's contribution to the coarse matrix */ 226 PetscScalar **DZ_OUT; /* proc[k].DZ_OUT[i][] = bit of vector to be sent from processor k to processor i */ 227 228 /* aliasing some names */ 229 PC_IS *pcis = (PC_IS*)(pc->data); 230 PC_NN *pcnn = (PC_NN*)pc->data; 231 PetscInt n_neigh = pcis->n_neigh; 232 PetscInt *neigh = pcis->neigh; 233 PetscInt *n_shared = pcis->n_shared; 234 PetscInt **shared = pcis->shared; 235 PetscScalar **DZ_IN; /* Must be initialized after memory allocation. */ 236 237 PetscFunctionBegin; 238 /* Allocate memory for mat (the +1 is to handle the case n_neigh equal to zero) */ 239 ierr = PetscMalloc((n_neigh*n_neigh+1)*sizeof(PetscScalar),&mat);CHKERRQ(ierr); 240 241 /* Allocate memory for DZ */ 242 /* Notice that DZ_OUT[0] is allocated some space that is never used. */ 243 /* This is just in order to DZ_OUT and DZ_IN to have exactly the same form. */ 244 { 245 PetscInt size_of_Z = 0; 246 ierr = PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&pcnn->DZ_IN);CHKERRQ(ierr); 247 DZ_IN = pcnn->DZ_IN; 248 ierr = PetscMalloc ((n_neigh+1)*sizeof(PetscScalar*),&DZ_OUT);CHKERRQ(ierr); 249 for (i=0; i<n_neigh; i++) size_of_Z += n_shared[i]; 250 ierr = PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_IN[0]);CHKERRQ(ierr); 251 ierr = PetscMalloc ((size_of_Z+1)*sizeof(PetscScalar),&DZ_OUT[0]);CHKERRQ(ierr); 252 } 253 for (i=1; i<n_neigh; i++) { 254 DZ_IN[i] = DZ_IN [i-1] + n_shared[i-1]; 255 DZ_OUT[i] = DZ_OUT[i-1] + n_shared[i-1]; 256 } 257 258 /* Set the values of DZ_OUT, in order to send this info to the neighbours */ 259 /* First, set the auxiliary array pcis->work_N. */ 260 ierr = PCISScatterArrayNToVecB(pcis->work_N,pcis->D,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr); 261 for (i=1; i<n_neigh; i++) { 262 for (j=0; j<n_shared[i]; j++) { 263 DZ_OUT[i][j] = pcis->work_N[shared[i][j]]; 264 } 265 } 266 267 /* Non-blocking send/receive the common-interface chunks of scaled nullspaces */ 268 /* Notice that send_request[] and recv_request[] could have one less element. */ 269 /* We make them longer to have request[i] corresponding to neigh[i]. */ 270 { 271 PetscMPIInt tag; 272 ierr = PetscObjectGetNewTag((PetscObject)pc,&tag);CHKERRQ(ierr); 273 ierr = PetscMalloc((2*(n_neigh)+1)*sizeof(MPI_Request),&send_request);CHKERRQ(ierr); 274 recv_request = send_request + (n_neigh); 275 for (i=1; i<n_neigh; i++) { 276 ierr = MPI_Isend((void*)(DZ_OUT[i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,((PetscObject)pc)->comm,&(send_request[i]));CHKERRQ(ierr); 277 ierr = MPI_Irecv((void*)(DZ_IN [i]),n_shared[i],MPIU_SCALAR,neigh[i],tag,((PetscObject)pc)->comm,&(recv_request[i]));CHKERRQ(ierr); 278 } 279 } 280 281 /* Set DZ_IN[0][] (recall that neigh[0]==rank, always) */ 282 for (j=0; j<n_shared[0]; j++) DZ_IN[0][j] = pcis->work_N[shared[0][j]]; 283 284 /* Start computing with local D*Z while communication goes on. */ 285 /* Apply Schur complement. The result is "stored" in vec (more */ 286 /* precisely, vec points to the result, stored in pc_nn->vec1_B) */ 287 /* and also scattered to pcnn->work_N. */ 288 ierr = PCNNApplySchurToChunk(pc,n_shared[0],shared[0],DZ_IN[0],pcis->work_N,pcis->vec1_B, 289 pcis->vec2_B,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr); 290 291 /* Compute the first column, while completing the receiving. */ 292 for (i=0; i<n_neigh; i++) { 293 MPI_Status stat; 294 PetscMPIInt ind=0; 295 if (i>0) { ierr = MPI_Waitany(n_neigh-1,recv_request+1,&ind,&stat);CHKERRQ(ierr); ind++;} 296 mat[ind*n_neigh+0] = 0.0; 297 for (k=0; k<n_shared[ind]; k++) mat[ind*n_neigh+0] += DZ_IN[ind][k] * pcis->work_N[shared[ind][k]]; 298 } 299 300 /* Compute the remaining of the columns */ 301 for (j=1; j<n_neigh; j++) { 302 ierr = PCNNApplySchurToChunk(pc,n_shared[j],shared[j],DZ_IN[j],pcis->work_N,pcis->vec1_B, 303 pcis->vec2_B,pcis->vec1_D,pcis->vec2_D);CHKERRQ(ierr); 304 for (i=0; i<n_neigh; i++) { 305 mat[i*n_neigh+j] = 0.0; 306 for (k=0; k<n_shared[i]; k++) mat[i*n_neigh+j] += DZ_IN[i][k] * pcis->work_N[shared[i][k]]; 307 } 308 } 309 310 /* Complete the sending. */ 311 if (n_neigh>1) { 312 MPI_Status *stat; 313 ierr = PetscMalloc((n_neigh-1)*sizeof(MPI_Status),&stat);CHKERRQ(ierr); 314 if (n_neigh-1) {ierr = MPI_Waitall(n_neigh-1,&(send_request[1]),stat);CHKERRQ(ierr);} 315 ierr = PetscFree(stat);CHKERRQ(ierr); 316 } 317 318 /* Free the memory for the MPI requests */ 319 ierr = PetscFree(send_request);CHKERRQ(ierr); 320 321 /* Free the memory for DZ_OUT */ 322 if (DZ_OUT) { 323 ierr = PetscFree(DZ_OUT[0]);CHKERRQ(ierr); 324 ierr = PetscFree(DZ_OUT);CHKERRQ(ierr); 325 } 326 327 { 328 PetscMPIInt size; 329 ierr = MPI_Comm_size(((PetscObject)pc)->comm,&size);CHKERRQ(ierr); 330 /* Create the global coarse vectors (rhs and solution). */ 331 ierr = VecCreateMPI(((PetscObject)pc)->comm,1,size,&(pcnn->coarse_b));CHKERRQ(ierr); 332 ierr = VecDuplicate(pcnn->coarse_b,&(pcnn->coarse_x));CHKERRQ(ierr); 333 /* Create and set the global coarse AIJ matrix. */ 334 ierr = MatCreate(((PetscObject)pc)->comm,&(pcnn->coarse_mat));CHKERRQ(ierr); 335 ierr = MatSetSizes(pcnn->coarse_mat,1,1,size,size);CHKERRQ(ierr); 336 ierr = MatSetType(pcnn->coarse_mat,MATAIJ);CHKERRQ(ierr); 337 ierr = MatSeqAIJSetPreallocation(pcnn->coarse_mat,1,PETSC_NULL);CHKERRQ(ierr); 338 ierr = MatMPIAIJSetPreallocation(pcnn->coarse_mat,1,PETSC_NULL,1,PETSC_NULL);CHKERRQ(ierr); 339 ierr = MatSetValues(pcnn->coarse_mat,n_neigh,neigh,n_neigh,neigh,mat,ADD_VALUES);CHKERRQ(ierr); 340 ierr = MatAssemblyBegin(pcnn->coarse_mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 341 ierr = MatAssemblyEnd (pcnn->coarse_mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 342 } 343 344 { 345 PetscMPIInt rank; 346 PetscScalar one = 1.0; 347 ierr = MPI_Comm_rank(((PetscObject)pc)->comm,&rank);CHKERRQ(ierr); 348 /* "Zero out" rows of not-purely-Neumann subdomains */ 349 if (pcis->pure_neumann) { /* does NOT zero the row; create an empty index set. The reason is that MatZeroRows() is collective. */ 350 ierr = MatZeroRows(pcnn->coarse_mat,0,PETSC_NULL,one,0,0);CHKERRQ(ierr); 351 } else { /* here it DOES zero the row, since it's not a floating subdomain. */ 352 PetscInt row = (PetscInt) rank; 353 ierr = MatZeroRows(pcnn->coarse_mat,1,&row,one,0,0);CHKERRQ(ierr); 354 } 355 } 356 357 /* Create the coarse linear solver context */ 358 { 359 PC pc_ctx, inner_pc; 360 KSP inner_ksp; 361 362 ierr = KSPCreate(((PetscObject)pc)->comm,&pcnn->ksp_coarse);CHKERRQ(ierr); 363 ierr = PetscObjectIncrementTabLevel((PetscObject)pcnn->ksp_coarse,(PetscObject)pc,2);CHKERRQ(ierr); 364 ierr = KSPSetOperators(pcnn->ksp_coarse,pcnn->coarse_mat,pcnn->coarse_mat,SAME_PRECONDITIONER);CHKERRQ(ierr); 365 ierr = KSPGetPC(pcnn->ksp_coarse,&pc_ctx);CHKERRQ(ierr); 366 ierr = PCSetType(pc_ctx,PCREDUNDANT);CHKERRQ(ierr); 367 ierr = KSPSetType(pcnn->ksp_coarse,KSPPREONLY);CHKERRQ(ierr); 368 ierr = PCRedundantGetKSP(pc_ctx,&inner_ksp);CHKERRQ(ierr); 369 ierr = KSPGetPC(inner_ksp,&inner_pc);CHKERRQ(ierr); 370 ierr = PCSetType(inner_pc,PCLU);CHKERRQ(ierr); 371 ierr = KSPSetOptionsPrefix(pcnn->ksp_coarse,"nn_coarse_");CHKERRQ(ierr); 372 ierr = KSPSetFromOptions(pcnn->ksp_coarse);CHKERRQ(ierr); 373 /* the vectors in the following line are dummy arguments, just telling the KSP the vector size. Values are not used */ 374 ierr = KSPSetUp(pcnn->ksp_coarse);CHKERRQ(ierr); 375 } 376 377 /* Free the memory for mat */ 378 ierr = PetscFree(mat);CHKERRQ(ierr); 379 380 /* for DEBUGGING, save the coarse matrix to a file. */ 381 { 382 PetscBool flg = PETSC_FALSE; 383 ierr = PetscOptionsGetBool(PETSC_NULL,"-pc_nn_save_coarse_matrix",&flg,PETSC_NULL);CHKERRQ(ierr); 384 if (flg) { 385 PetscViewer viewer; 386 ierr = PetscViewerASCIIOpen(PETSC_COMM_WORLD,"coarse.m",&viewer);CHKERRQ(ierr); 387 ierr = PetscViewerSetFormat(viewer,PETSC_VIEWER_ASCII_MATLAB);CHKERRQ(ierr); 388 ierr = MatView(pcnn->coarse_mat,viewer);CHKERRQ(ierr); 389 ierr = PetscViewerDestroy(&viewer);CHKERRQ(ierr); 390 } 391 } 392 393 /* Set the variable pcnn->factor_coarse_rhs. */ 394 pcnn->factor_coarse_rhs = (pcis->pure_neumann) ? 1.0 : 0.0; 395 396 /* See historical note 02, at the bottom of this file. */ 397 PetscFunctionReturn(0); 398 } 399 400 /* -------------------------------------------------------------------------- */ 401 /* 402 PCNNApplySchurToChunk - 403 404 Input parameters: 405 . pcnn 406 . n - size of chunk 407 . idx - indices of chunk 408 . chunk - values 409 410 Output parameters: 411 . array_N - result of Schur complement applied to chunk, scattered to big array 412 . vec1_B - result of Schur complement applied to chunk 413 . vec2_B - garbage (used as work space) 414 . vec1_D - garbage (used as work space) 415 . vec2_D - garbage (used as work space) 416 417 */ 418 #undef __FUNCT__ 419 #define __FUNCT__ "PCNNApplySchurToChunk" 420 PetscErrorCode PCNNApplySchurToChunk(PC pc, PetscInt n, PetscInt *idx, PetscScalar *chunk, PetscScalar *array_N, Vec vec1_B, Vec vec2_B, Vec vec1_D, Vec vec2_D) 421 { 422 PetscErrorCode ierr; 423 PetscInt i; 424 PC_IS *pcis = (PC_IS*)(pc->data); 425 426 PetscFunctionBegin; 427 ierr = PetscMemzero((void*)array_N, pcis->n*sizeof(PetscScalar));CHKERRQ(ierr); 428 for (i=0; i<n; i++) array_N[idx[i]] = chunk[i]; 429 ierr = PCISScatterArrayNToVecB(array_N,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pc);CHKERRQ(ierr); 430 ierr = PCISApplySchur(pc,vec2_B,vec1_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr); 431 ierr = PCISScatterArrayNToVecB(array_N,vec1_B,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr); 432 PetscFunctionReturn(0); 433 } 434 435 /* -------------------------------------------------------------------------- */ 436 /* 437 PCNNApplyInterfacePreconditioner - Apply the interface preconditioner, i.e., 438 the preconditioner for the Schur complement. 439 440 Input parameter: 441 . r - global vector of interior and interface nodes. The values on the interior nodes are NOT used. 442 443 Output parameters: 444 . z - global vector of interior and interface nodes. The values on the interface are the result of 445 the application of the interface preconditioner to the interface part of r. The values on the 446 interior nodes are garbage. 447 . work_N - array of local nodes (interior and interface, including ghosts); returns garbage (used as work space) 448 . vec1_B - vector of local interface nodes (including ghosts); returns garbage (used as work space) 449 . vec2_B - vector of local interface nodes (including ghosts); returns garbage (used as work space) 450 . vec3_B - vector of local interface nodes (including ghosts); returns garbage (used as work space) 451 . vec1_D - vector of local interior nodes; returns garbage (used as work space) 452 . vec2_D - vector of local interior nodes; returns garbage (used as work space) 453 . vec1_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space) 454 . vec2_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space) 455 456 */ 457 #undef __FUNCT__ 458 #define __FUNCT__ "PCNNApplyInterfacePreconditioner" 459 PetscErrorCode PCNNApplyInterfacePreconditioner(PC pc, Vec r, Vec z, PetscScalar *work_N, Vec vec1_B, Vec vec2_B, Vec vec3_B, Vec vec1_D,Vec vec2_D, Vec vec1_N, Vec vec2_N) 460 { 461 PetscErrorCode ierr; 462 PC_IS *pcis = (PC_IS*)(pc->data); 463 464 PetscFunctionBegin; 465 /* 466 First balancing step. 467 */ 468 { 469 PetscBool flg = PETSC_FALSE; 470 ierr = PetscOptionsGetBool(PETSC_NULL,"-pc_nn_turn_off_first_balancing",&flg,PETSC_NULL);CHKERRQ(ierr); 471 if (!flg) { 472 ierr = PCNNBalancing(pc,r,(Vec)0,z,vec1_B,vec2_B,(Vec)0,vec1_D,vec2_D,work_N);CHKERRQ(ierr); 473 } else { 474 ierr = VecCopy(r,z);CHKERRQ(ierr); 475 } 476 } 477 478 /* 479 Extract the local interface part of z and scale it by D 480 */ 481 ierr = VecScatterBegin(pcis->global_to_B,z,vec1_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 482 ierr = VecScatterEnd (pcis->global_to_B,z,vec1_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 483 ierr = VecPointwiseMult(vec2_B,pcis->D,vec1_B);CHKERRQ(ierr); 484 485 /* Neumann Solver */ 486 ierr = PCISApplyInvSchur(pc,vec2_B,vec1_B,vec1_N,vec2_N);CHKERRQ(ierr); 487 488 /* 489 Second balancing step. 490 */ 491 { 492 PetscBool flg = PETSC_FALSE; 493 ierr = PetscOptionsGetBool(PETSC_NULL,"-pc_turn_off_second_balancing",&flg,PETSC_NULL);CHKERRQ(ierr); 494 if (!flg) { 495 ierr = PCNNBalancing(pc,r,vec1_B,z,vec2_B,vec3_B,(Vec)0,vec1_D,vec2_D,work_N);CHKERRQ(ierr); 496 } else { 497 ierr = VecPointwiseMult(vec2_B,pcis->D,vec1_B);CHKERRQ(ierr); 498 ierr = VecSet(z,0.0);CHKERRQ(ierr); 499 ierr = VecScatterBegin(pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 500 ierr = VecScatterEnd (pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 501 } 502 } 503 PetscFunctionReturn(0); 504 } 505 506 /* -------------------------------------------------------------------------- */ 507 /* 508 PCNNBalancing - Computes z, as given in equations (15) and (16) (if the 509 input argument u is provided), or s, as given in equations 510 (12) and (13), if the input argument u is a null vector. 511 Notice that the input argument u plays the role of u_i in 512 equation (14). The equation numbers refer to [Man93]. 513 514 Input Parameters: 515 . pcnn - NN preconditioner context. 516 . r - MPI vector of all nodes (interior and interface). It's preserved. 517 . u - (Optional) sequential vector of local interface nodes. It's preserved UNLESS vec3_B is null. 518 519 Output Parameters: 520 . z - MPI vector of interior and interface nodes. Returns s or z (see description above). 521 . vec1_B - Sequential vector of local interface nodes. Workspace. 522 . vec2_B - Sequential vector of local interface nodes. Workspace. 523 . vec3_B - (Optional) sequential vector of local interface nodes. Workspace. 524 . vec1_D - Sequential vector of local interior nodes. Workspace. 525 . vec2_D - Sequential vector of local interior nodes. Workspace. 526 . work_N - Array of all local nodes (interior and interface). Workspace. 527 528 */ 529 #undef __FUNCT__ 530 #define __FUNCT__ "PCNNBalancing" 531 PetscErrorCode PCNNBalancing(PC pc, Vec r, Vec u, Vec z, Vec vec1_B, Vec vec2_B, Vec vec3_B,Vec vec1_D, Vec vec2_D, PetscScalar *work_N) 532 { 533 PetscErrorCode ierr; 534 PetscInt k; 535 PetscScalar value; 536 PetscScalar *lambda; 537 PC_NN *pcnn = (PC_NN*)(pc->data); 538 PC_IS *pcis = (PC_IS*)(pc->data); 539 540 PetscFunctionBegin; 541 ierr = PetscLogEventBegin(PC_ApplyCoarse,0,0,0,0);CHKERRQ(ierr); 542 if (u) { 543 if (!vec3_B) vec3_B = u; 544 ierr = VecPointwiseMult(vec1_B,pcis->D,u);CHKERRQ(ierr); 545 ierr = VecSet(z,0.0);CHKERRQ(ierr); 546 ierr = VecScatterBegin(pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 547 ierr = VecScatterEnd (pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 548 ierr = VecScatterBegin(pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 549 ierr = VecScatterEnd (pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 550 ierr = PCISApplySchur(pc,vec2_B,vec3_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr); 551 ierr = VecScale(vec3_B,-1.0);CHKERRQ(ierr); 552 ierr = VecCopy(r,z);CHKERRQ(ierr); 553 ierr = VecScatterBegin(pcis->global_to_B,vec3_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 554 ierr = VecScatterEnd (pcis->global_to_B,vec3_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 555 } else { 556 ierr = VecCopy(r,z);CHKERRQ(ierr); 557 } 558 ierr = VecScatterBegin(pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 559 ierr = VecScatterEnd (pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 560 ierr = PCISScatterArrayNToVecB(work_N,vec2_B,INSERT_VALUES,SCATTER_REVERSE,pc);CHKERRQ(ierr); 561 for (k=0, value=0.0; k<pcis->n_shared[0]; k++) value += pcnn->DZ_IN[0][k] * work_N[pcis->shared[0][k]]; 562 value *= pcnn->factor_coarse_rhs; /* This factor is set in CreateCoarseMatrix(). */ 563 { 564 PetscMPIInt rank; 565 ierr = MPI_Comm_rank(((PetscObject)pc)->comm,&rank);CHKERRQ(ierr); 566 ierr = VecSetValue(pcnn->coarse_b,rank,value,INSERT_VALUES);CHKERRQ(ierr); 567 /* 568 Since we are only inserting local values (one value actually) we don't need to do the 569 reduction that tells us there is no data that needs to be moved. Hence we comment out these 570 ierr = VecAssemblyBegin(pcnn->coarse_b);CHKERRQ(ierr); 571 ierr = VecAssemblyEnd (pcnn->coarse_b);CHKERRQ(ierr); 572 */ 573 } 574 ierr = KSPSolve(pcnn->ksp_coarse,pcnn->coarse_b,pcnn->coarse_x);CHKERRQ(ierr); 575 if (!u) { ierr = VecScale(pcnn->coarse_x,-1.0);CHKERRQ(ierr); } 576 ierr = VecGetArray(pcnn->coarse_x,&lambda);CHKERRQ(ierr); 577 for (k=0; k<pcis->n_shared[0]; k++) work_N[pcis->shared[0][k]] = *lambda * pcnn->DZ_IN[0][k]; 578 ierr = VecRestoreArray(pcnn->coarse_x,&lambda);CHKERRQ(ierr); 579 ierr = PCISScatterArrayNToVecB(work_N,vec2_B,INSERT_VALUES,SCATTER_FORWARD,pc);CHKERRQ(ierr); 580 ierr = VecSet(z,0.0);CHKERRQ(ierr); 581 ierr = VecScatterBegin(pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 582 ierr = VecScatterEnd (pcis->global_to_B,vec2_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 583 if (!u) { 584 ierr = VecScatterBegin(pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 585 ierr = VecScatterEnd (pcis->global_to_B,z,vec2_B,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 586 ierr = PCISApplySchur(pc,vec2_B,vec1_B,(Vec)0,vec1_D,vec2_D);CHKERRQ(ierr); 587 ierr = VecCopy(r,z);CHKERRQ(ierr); 588 } 589 ierr = VecScatterBegin(pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 590 ierr = VecScatterEnd (pcis->global_to_B,vec1_B,z,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 591 ierr = PetscLogEventEnd(PC_ApplyCoarse,0,0,0,0);CHKERRQ(ierr); 592 PetscFunctionReturn(0); 593 } 594 595 #undef __FUNCT__ 596 597 598 599 /* ------- E N D O F T H E C O D E ------- */ 600 /* */ 601 /* From now on, "footnotes" (or "historical notes"). */ 602 /* */ 603 /* ------------------------------------------------- */ 604 605 606 607 /* -------------------------------------------------------------------------- 608 Historical note 01 609 -------------------------------------------------------------------------- */ 610 /* 611 We considered the possibility of an alternative D_i that would still 612 provide a partition of unity (i.e., $ \sum_i N_i D_i N_i^T = I $). 613 The basic principle was still the pseudo-inverse of the counting 614 function; the difference was that we would not count subdomains 615 that do not contribute to the coarse space (i.e., not pure-Neumann 616 subdomains). 617 618 This turned out to be a bad idea: we would solve trivial Neumann 619 problems in the not pure-Neumann subdomains, since we would be scaling 620 the balanced residual by zero. 621 */ 622 623 624 625 626 /* -------------------------------------------------------------------------- 627 Historical note 02 628 -------------------------------------------------------------------------- */ 629 /* 630 We tried an alternative coarse problem, that would eliminate exactly a 631 constant error. Turned out not to improve the overall convergence. 632 */ 633 634 635