1 #include <../src/ksp/pc/impls/is/nn/nn.h> 2 3 /* 4 PCSetUp_NN - Prepares for the use of the NN preconditioner 5 by setting data structures and options. 6 7 Input Parameter: 8 . pc - the preconditioner context 9 10 Application Interface Routine: PCSetUp() 11 12 Note: 13 The interface routine PCSetUp() is not usually called directly by 14 the user, but instead is called by PCApply() if necessary. 15 */ 16 static PetscErrorCode PCSetUp_NN(PC pc) 17 { 18 PetscFunctionBegin; 19 if (!pc->setupcalled) { 20 /* Set up all the "iterative substructuring" common block */ 21 PetscCall(PCISSetUp(pc, PETSC_TRUE, PETSC_TRUE)); 22 /* Create the coarse matrix. */ 23 PetscCall(PCNNCreateCoarseMatrix(pc)); 24 } 25 PetscFunctionReturn(PETSC_SUCCESS); 26 } 27 28 /* 29 PCApply_NN - Applies the NN preconditioner to a vector. 30 31 Input Parameters: 32 + pc - the preconditioner context 33 - r - input vector (global) 34 35 Output Parameter: 36 . z - output vector (global) 37 38 Application Interface Routine: PCApply() 39 */ 40 static PetscErrorCode PCApply_NN(PC pc, Vec r, Vec z) 41 { 42 PC_IS *pcis = (PC_IS *)pc->data; 43 PetscScalar m_one = -1.0; 44 Vec w = pcis->vec1_global; 45 46 PetscFunctionBegin; 47 /* 48 Dirichlet solvers. 49 Solving $ B_I^{(i)}r_I^{(i)} $ at each processor. 50 Storing the local results at vec2_D 51 */ 52 PetscCall(VecScatterBegin(pcis->global_to_D, r, pcis->vec1_D, INSERT_VALUES, SCATTER_FORWARD)); 53 PetscCall(VecScatterEnd(pcis->global_to_D, r, pcis->vec1_D, INSERT_VALUES, SCATTER_FORWARD)); 54 PetscCall(KSPSolve(pcis->ksp_D, pcis->vec1_D, pcis->vec2_D)); 55 56 /* 57 Computing $ r_B - \sum_j \tilde R_j^T A_{BI}^{(j)} (B_I^{(j)}r_I^{(j)}) $ . 58 Storing the result in the interface portion of the global vector w. 59 */ 60 PetscCall(MatMult(pcis->A_BI, pcis->vec2_D, pcis->vec1_B)); 61 PetscCall(VecScale(pcis->vec1_B, m_one)); 62 PetscCall(VecCopy(r, w)); 63 PetscCall(VecScatterBegin(pcis->global_to_B, pcis->vec1_B, w, ADD_VALUES, SCATTER_REVERSE)); 64 PetscCall(VecScatterEnd(pcis->global_to_B, pcis->vec1_B, w, ADD_VALUES, SCATTER_REVERSE)); 65 66 /* 67 Apply the interface preconditioner 68 */ 69 PetscCall(PCNNApplyInterfacePreconditioner(pc, w, z, pcis->work_N, pcis->vec1_B, pcis->vec2_B, pcis->vec3_B, pcis->vec1_D, pcis->vec3_D, pcis->vec1_N, pcis->vec2_N)); 70 71 /* 72 Computing $ t_I^{(i)} = A_{IB}^{(i)} \tilde R_i z_B $ 73 The result is stored in vec1_D. 74 */ 75 PetscCall(VecScatterBegin(pcis->global_to_B, z, pcis->vec1_B, INSERT_VALUES, SCATTER_FORWARD)); 76 PetscCall(VecScatterEnd(pcis->global_to_B, z, pcis->vec1_B, INSERT_VALUES, SCATTER_FORWARD)); 77 PetscCall(MatMult(pcis->A_IB, pcis->vec1_B, pcis->vec1_D)); 78 79 /* 80 Dirichlet solvers. 81 Computing $ B_I^{(i)}t_I^{(i)} $ and sticking into the global vector the blocks 82 $ B_I^{(i)}r_I^{(i)} - B_I^{(i)}t_I^{(i)} $. 83 */ 84 PetscCall(VecScatterBegin(pcis->global_to_D, pcis->vec2_D, z, INSERT_VALUES, SCATTER_REVERSE)); 85 PetscCall(VecScatterEnd(pcis->global_to_D, pcis->vec2_D, z, INSERT_VALUES, SCATTER_REVERSE)); 86 PetscCall(KSPSolve(pcis->ksp_D, pcis->vec1_D, pcis->vec2_D)); 87 PetscCall(VecScale(pcis->vec2_D, m_one)); 88 PetscCall(VecScatterBegin(pcis->global_to_D, pcis->vec2_D, z, ADD_VALUES, SCATTER_REVERSE)); 89 PetscCall(VecScatterEnd(pcis->global_to_D, pcis->vec2_D, z, ADD_VALUES, SCATTER_REVERSE)); 90 PetscFunctionReturn(PETSC_SUCCESS); 91 } 92 93 /* 94 PCDestroy_NN - Destroys the private context for the NN preconditioner 95 that was created with PCCreate_NN(). 96 97 Input Parameter: 98 . pc - the preconditioner context 99 100 Application Interface Routine: PCDestroy() 101 */ 102 static PetscErrorCode PCDestroy_NN(PC pc) 103 { 104 PC_NN *pcnn = (PC_NN *)pc->data; 105 106 PetscFunctionBegin; 107 PetscCall(PCISReset(pc)); 108 109 PetscCall(MatDestroy(&pcnn->coarse_mat)); 110 PetscCall(VecDestroy(&pcnn->coarse_x)); 111 PetscCall(VecDestroy(&pcnn->coarse_b)); 112 PetscCall(KSPDestroy(&pcnn->ksp_coarse)); 113 if (pcnn->DZ_IN) { 114 PetscCall(PetscFree(pcnn->DZ_IN[0])); 115 PetscCall(PetscFree(pcnn->DZ_IN)); 116 } 117 118 /* 119 Free the private data structure that was hanging off the PC 120 */ 121 PetscCall(PetscFree(pc->data)); 122 PetscFunctionReturn(PETSC_SUCCESS); 123 } 124 125 /*MC 126 PCNN - Balancing Neumann-Neumann for scalar elliptic PDEs. 127 128 Options Database Keys: 129 + -pc_nn_turn_off_first_balancing - do not balance the residual before solving the local Neumann problems 130 (this skips the first coarse grid solve in the preconditioner) 131 . -pc_nn_turn_off_second_balancing - do not balance the solution solving the local Neumann problems 132 (this skips the second coarse grid solve in the preconditioner) 133 . -pc_is_damp_fixed <fact> - 134 . -pc_is_remove_nullspace_fixed - 135 . -pc_is_set_damping_factor_floating <fact> - 136 . -pc_is_not_damp_floating - 137 - -pc_is_not_remove_nullspace_floating - 138 139 Options Database prefixes for the subsolvers this preconditioner uses: 140 + -nn_coarse_pc_ - for the coarse grid preconditioner 141 . -is_localD_pc_ - for the Dirichlet subproblem preconditioner 142 - -is_localN_pc_ - for the Neumann subproblem preconditioner 143 144 Level: intermediate 145 146 Notes: 147 The matrix used with this preconditioner must be of type `MATIS` 148 149 Unlike more 'conventional' Neumann-Neumann preconditioners this iterates over ALL the 150 degrees of freedom, NOT just those on the interface (this allows the use of approximate solvers 151 on the subdomains; though in our experience using approximate solvers is slower.). 152 153 Contributed by Paulo Goldfeld 154 155 .seealso: [](ch_ksp), `PCCreate()`, `PCSetType()`, `PCType`, `PC`, `MATIS`, `PCBDDC` 156 M*/ 157 158 PETSC_EXTERN PetscErrorCode PCCreate_NN(PC pc) 159 { 160 PC_NN *pcnn; 161 162 PetscFunctionBegin; 163 /* 164 Creates the private data structure for this preconditioner and 165 attach it to the PC object. 166 */ 167 PetscCall(PetscNew(&pcnn)); 168 pc->data = (void *)pcnn; 169 170 PetscCall(PCISInitialize(pc)); 171 pcnn->coarse_mat = NULL; 172 pcnn->coarse_x = NULL; 173 pcnn->coarse_b = NULL; 174 pcnn->ksp_coarse = NULL; 175 pcnn->DZ_IN = NULL; 176 177 /* 178 Set the pointers for the functions that are provided above. 179 Now when the user-level routines (such as PCApply(), PCDestroy(), etc.) 180 are called, they will automatically call these functions. Note we 181 choose not to provide a couple of these functions since they are 182 not needed. 183 */ 184 pc->ops->apply = PCApply_NN; 185 pc->ops->applytranspose = NULL; 186 pc->ops->setup = PCSetUp_NN; 187 pc->ops->destroy = PCDestroy_NN; 188 pc->ops->view = NULL; 189 pc->ops->applyrichardson = NULL; 190 pc->ops->applysymmetricleft = NULL; 191 pc->ops->applysymmetricright = NULL; 192 PetscFunctionReturn(PETSC_SUCCESS); 193 } 194 195 /* 196 PCNNCreateCoarseMatrix - 197 */ 198 PetscErrorCode PCNNCreateCoarseMatrix(PC pc) 199 { 200 MPI_Request *send_request, *recv_request; 201 PetscInt i, j, k; 202 PetscScalar *mat; /* Sub-matrix with this subdomain's contribution to the coarse matrix */ 203 PetscScalar **DZ_OUT; /* proc[k].DZ_OUT[i][] = bit of vector to be sent from processor k to processor i */ 204 205 /* aliasing some names */ 206 PC_IS *pcis = (PC_IS *)pc->data; 207 PC_NN *pcnn = (PC_NN *)pc->data; 208 PetscInt n_neigh = pcis->n_neigh; 209 PetscInt *neigh = pcis->neigh; 210 PetscInt *n_shared = pcis->n_shared; 211 PetscInt **shared = pcis->shared; 212 PetscScalar **DZ_IN; /* Must be initialized after memory allocation. */ 213 214 PetscFunctionBegin; 215 /* Allocate memory for mat (the +1 is to handle the case n_neigh equal to zero) */ 216 PetscCall(PetscMalloc1(n_neigh * n_neigh + 1, &mat)); 217 218 /* Allocate memory for DZ */ 219 /* Notice that DZ_OUT[0] is allocated some space that is never used. */ 220 /* This is just in order to DZ_OUT and DZ_IN to have exactly the same form. */ 221 { 222 PetscInt size_of_Z = 0; 223 PetscCall(PetscMalloc((n_neigh + 1) * sizeof(PetscScalar *), &pcnn->DZ_IN)); 224 DZ_IN = pcnn->DZ_IN; 225 PetscCall(PetscMalloc((n_neigh + 1) * sizeof(PetscScalar *), &DZ_OUT)); 226 for (i = 0; i < n_neigh; i++) size_of_Z += n_shared[i]; 227 PetscCall(PetscMalloc((size_of_Z + 1) * sizeof(PetscScalar), &DZ_IN[0])); 228 PetscCall(PetscMalloc((size_of_Z + 1) * sizeof(PetscScalar), &DZ_OUT[0])); 229 } 230 for (i = 1; i < n_neigh; i++) { 231 DZ_IN[i] = DZ_IN[i - 1] + n_shared[i - 1]; 232 DZ_OUT[i] = DZ_OUT[i - 1] + n_shared[i - 1]; 233 } 234 235 /* Set the values of DZ_OUT, in order to send this info to the neighbours */ 236 /* First, set the auxiliary array pcis->work_N. */ 237 PetscCall(PCISScatterArrayNToVecB(pc, pcis->work_N, pcis->D, INSERT_VALUES, SCATTER_REVERSE)); 238 for (i = 1; i < n_neigh; i++) { 239 for (j = 0; j < n_shared[i]; j++) DZ_OUT[i][j] = pcis->work_N[shared[i][j]]; 240 } 241 242 /* Non-blocking send/receive the common-interface chunks of scaled nullspaces */ 243 /* Notice that send_request[] and recv_request[] could have one less element. */ 244 /* We make them longer to have request[i] corresponding to neigh[i]. */ 245 { 246 PetscMPIInt tag; 247 PetscCall(PetscObjectGetNewTag((PetscObject)pc, &tag)); 248 PetscCall(PetscMalloc2(n_neigh + 1, &send_request, n_neigh + 1, &recv_request)); 249 for (i = 1; i < n_neigh; i++) { 250 PetscCallMPI(MPI_Isend((void *)DZ_OUT[i], n_shared[i], MPIU_SCALAR, neigh[i], tag, PetscObjectComm((PetscObject)pc), &send_request[i])); 251 PetscCallMPI(MPI_Irecv((void *)DZ_IN[i], n_shared[i], MPIU_SCALAR, neigh[i], tag, PetscObjectComm((PetscObject)pc), &recv_request[i])); 252 } 253 } 254 255 /* Set DZ_IN[0][] (recall that neigh[0]==rank, always) */ 256 for (j = 0; j < n_shared[0]; j++) DZ_IN[0][j] = pcis->work_N[shared[0][j]]; 257 258 /* Start computing with local D*Z while communication goes on. */ 259 /* Apply Schur complement. The result is "stored" in vec (more */ 260 /* precisely, vec points to the result, stored in pc_nn->vec1_B) */ 261 /* and also scattered to pcnn->work_N. */ 262 PetscCall(PCNNApplySchurToChunk(pc, n_shared[0], shared[0], DZ_IN[0], pcis->work_N, pcis->vec1_B, pcis->vec2_B, pcis->vec1_D, pcis->vec2_D)); 263 264 /* Compute the first column, while completing the receiving. */ 265 for (i = 0; i < n_neigh; i++) { 266 MPI_Status stat; 267 PetscMPIInt ind = 0; 268 if (i > 0) { 269 PetscCallMPI(MPI_Waitany(n_neigh - 1, recv_request + 1, &ind, &stat)); 270 ind++; 271 } 272 mat[ind * n_neigh + 0] = 0.0; 273 for (k = 0; k < n_shared[ind]; k++) mat[ind * n_neigh + 0] += DZ_IN[ind][k] * pcis->work_N[shared[ind][k]]; 274 } 275 276 /* Compute the remaining of the columns */ 277 for (j = 1; j < n_neigh; j++) { 278 PetscCall(PCNNApplySchurToChunk(pc, n_shared[j], shared[j], DZ_IN[j], pcis->work_N, pcis->vec1_B, pcis->vec2_B, pcis->vec1_D, pcis->vec2_D)); 279 for (i = 0; i < n_neigh; i++) { 280 mat[i * n_neigh + j] = 0.0; 281 for (k = 0; k < n_shared[i]; k++) mat[i * n_neigh + j] += DZ_IN[i][k] * pcis->work_N[shared[i][k]]; 282 } 283 } 284 285 /* Complete the sending. */ 286 if (n_neigh > 1) { 287 MPI_Status *stat; 288 PetscCall(PetscMalloc1(n_neigh - 1, &stat)); 289 if (n_neigh - 1) PetscCallMPI(MPI_Waitall(n_neigh - 1, &send_request[1], stat)); 290 PetscCall(PetscFree(stat)); 291 } 292 293 /* Free the memory for the MPI requests */ 294 PetscCall(PetscFree2(send_request, recv_request)); 295 296 /* Free the memory for DZ_OUT */ 297 if (DZ_OUT) { 298 PetscCall(PetscFree(DZ_OUT[0])); 299 PetscCall(PetscFree(DZ_OUT)); 300 } 301 302 { 303 PetscMPIInt size; 304 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)pc), &size)); 305 /* Create the global coarse vectors (rhs and solution). */ 306 PetscCall(VecCreateMPI(PetscObjectComm((PetscObject)pc), 1, size, &pcnn->coarse_b)); 307 PetscCall(VecDuplicate(pcnn->coarse_b, &pcnn->coarse_x)); 308 /* Create and set the global coarse AIJ matrix. */ 309 PetscCall(MatCreate(PetscObjectComm((PetscObject)pc), &pcnn->coarse_mat)); 310 PetscCall(MatSetSizes(pcnn->coarse_mat, 1, 1, size, size)); 311 PetscCall(MatSetType(pcnn->coarse_mat, MATAIJ)); 312 PetscCall(MatSeqAIJSetPreallocation(pcnn->coarse_mat, 1, NULL)); 313 PetscCall(MatMPIAIJSetPreallocation(pcnn->coarse_mat, 1, NULL, n_neigh, NULL)); 314 PetscCall(MatSetOption(pcnn->coarse_mat, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE)); 315 PetscCall(MatSetOption(pcnn->coarse_mat, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_FALSE)); 316 PetscCall(MatSetValues(pcnn->coarse_mat, n_neigh, neigh, n_neigh, neigh, mat, ADD_VALUES)); 317 PetscCall(MatAssemblyBegin(pcnn->coarse_mat, MAT_FINAL_ASSEMBLY)); 318 PetscCall(MatAssemblyEnd(pcnn->coarse_mat, MAT_FINAL_ASSEMBLY)); 319 } 320 321 { 322 PetscMPIInt rank; 323 PetscScalar one = 1.0; 324 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)pc), &rank)); 325 /* "Zero out" rows of not-purely-Neumann subdomains */ 326 if (pcis->pure_neumann) { /* does NOT zero the row; create an empty index set. The reason is that MatZeroRows() is collective. */ 327 PetscCall(MatZeroRows(pcnn->coarse_mat, 0, NULL, one, NULL, NULL)); 328 } else { /* here it DOES zero the row, since it's not a floating subdomain. */ 329 PetscInt row = (PetscInt)rank; 330 PetscCall(MatZeroRows(pcnn->coarse_mat, 1, &row, one, NULL, NULL)); 331 } 332 } 333 334 /* Create the coarse linear solver context */ 335 { 336 PC pc_ctx, inner_pc; 337 KSP inner_ksp; 338 339 PetscCall(KSPCreate(PetscObjectComm((PetscObject)pc), &pcnn->ksp_coarse)); 340 PetscCall(KSPSetNestLevel(pcnn->ksp_coarse, pc->kspnestlevel)); 341 PetscCall(PetscObjectIncrementTabLevel((PetscObject)pcnn->ksp_coarse, (PetscObject)pc, 2)); 342 PetscCall(KSPSetOperators(pcnn->ksp_coarse, pcnn->coarse_mat, pcnn->coarse_mat)); 343 PetscCall(KSPGetPC(pcnn->ksp_coarse, &pc_ctx)); 344 PetscCall(PCSetType(pc_ctx, PCREDUNDANT)); 345 PetscCall(KSPSetType(pcnn->ksp_coarse, KSPPREONLY)); 346 PetscCall(PCRedundantGetKSP(pc_ctx, &inner_ksp)); 347 PetscCall(KSPGetPC(inner_ksp, &inner_pc)); 348 PetscCall(PCSetType(inner_pc, PCLU)); 349 PetscCall(KSPSetOptionsPrefix(pcnn->ksp_coarse, "nn_coarse_")); 350 PetscCall(KSPSetFromOptions(pcnn->ksp_coarse)); 351 /* the vectors in the following line are dummy arguments, just telling the KSP the vector size. Values are not used */ 352 PetscCall(KSPSetUp(pcnn->ksp_coarse)); 353 } 354 355 /* Free the memory for mat */ 356 PetscCall(PetscFree(mat)); 357 358 /* for DEBUGGING, save the coarse matrix to a file. */ 359 { 360 PetscBool flg = PETSC_FALSE; 361 PetscCall(PetscOptionsGetBool(NULL, NULL, "-pc_nn_save_coarse_matrix", &flg, NULL)); 362 if (flg) { 363 PetscViewer viewer; 364 PetscCall(PetscViewerASCIIOpen(PETSC_COMM_WORLD, "coarse.m", &viewer)); 365 PetscCall(PetscViewerPushFormat(viewer, PETSC_VIEWER_ASCII_MATLAB)); 366 PetscCall(MatView(pcnn->coarse_mat, viewer)); 367 PetscCall(PetscViewerPopFormat(viewer)); 368 PetscCall(PetscViewerDestroy(&viewer)); 369 } 370 } 371 372 /* Set the variable pcnn->factor_coarse_rhs. */ 373 pcnn->factor_coarse_rhs = (pcis->pure_neumann) ? 1.0 : 0.0; 374 375 /* See historical note 02, at the bottom of this file. */ 376 PetscFunctionReturn(PETSC_SUCCESS); 377 } 378 379 /* 380 PCNNApplySchurToChunk - 381 382 Input parameters: 383 . pcnn 384 . n - size of chunk 385 . idx - indices of chunk 386 . chunk - values 387 388 Output parameters: 389 . array_N - result of Schur complement applied to chunk, scattered to big array 390 . vec1_B - result of Schur complement applied to chunk 391 . vec2_B - garbage (used as work space) 392 . vec1_D - garbage (used as work space) 393 . vec2_D - garbage (used as work space) 394 395 */ 396 PetscErrorCode PCNNApplySchurToChunk(PC pc, PetscInt n, PetscInt *idx, PetscScalar *chunk, PetscScalar *array_N, Vec vec1_B, Vec vec2_B, Vec vec1_D, Vec vec2_D) 397 { 398 PetscInt i; 399 PC_IS *pcis = (PC_IS *)pc->data; 400 401 PetscFunctionBegin; 402 PetscCall(PetscArrayzero(array_N, pcis->n)); 403 for (i = 0; i < n; i++) array_N[idx[i]] = chunk[i]; 404 PetscCall(PCISScatterArrayNToVecB(pc, array_N, vec2_B, INSERT_VALUES, SCATTER_FORWARD)); 405 PetscCall(PCISApplySchur(pc, vec2_B, vec1_B, (Vec)0, vec1_D, vec2_D)); 406 PetscCall(PCISScatterArrayNToVecB(pc, array_N, vec1_B, INSERT_VALUES, SCATTER_REVERSE)); 407 PetscFunctionReturn(PETSC_SUCCESS); 408 } 409 410 /* 411 PCNNApplyInterfacePreconditioner - Apply the interface preconditioner, i.e., 412 the preconditioner for the Schur complement. 413 414 Input parameter: 415 . r - global vector of interior and interface nodes. The values on the interior nodes are NOT used. 416 417 Output parameters: 418 . z - global vector of interior and interface nodes. The values on the interface are the result of 419 the application of the interface preconditioner to the interface part of r. The values on the 420 interior nodes are garbage. 421 . work_N - array of local nodes (interior and interface, including ghosts); returns garbage (used as work space) 422 . vec1_B - vector of local interface nodes (including ghosts); returns garbage (used as work space) 423 . vec2_B - vector of local interface nodes (including ghosts); returns garbage (used as work space) 424 . vec3_B - vector of local interface nodes (including ghosts); returns garbage (used as work space) 425 . vec1_D - vector of local interior nodes; returns garbage (used as work space) 426 . vec2_D - vector of local interior nodes; returns garbage (used as work space) 427 . vec1_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space) 428 . vec2_N - vector of local nodes (interior and interface, including ghosts); returns garbage (used as work space) 429 430 */ 431 PetscErrorCode PCNNApplyInterfacePreconditioner(PC pc, Vec r, Vec z, PetscScalar *work_N, Vec vec1_B, Vec vec2_B, Vec vec3_B, Vec vec1_D, Vec vec2_D, Vec vec1_N, Vec vec2_N) 432 { 433 PC_IS *pcis = (PC_IS *)pc->data; 434 435 PetscFunctionBegin; 436 /* 437 First balancing step. 438 */ 439 { 440 PetscBool flg = PETSC_FALSE; 441 PetscCall(PetscOptionsGetBool(NULL, NULL, "-pc_nn_turn_off_first_balancing", &flg, NULL)); 442 if (!flg) { 443 PetscCall(PCNNBalancing(pc, r, (Vec)0, z, vec1_B, vec2_B, (Vec)0, vec1_D, vec2_D, work_N)); 444 } else { 445 PetscCall(VecCopy(r, z)); 446 } 447 } 448 449 /* 450 Extract the local interface part of z and scale it by D 451 */ 452 PetscCall(VecScatterBegin(pcis->global_to_B, z, vec1_B, INSERT_VALUES, SCATTER_FORWARD)); 453 PetscCall(VecScatterEnd(pcis->global_to_B, z, vec1_B, INSERT_VALUES, SCATTER_FORWARD)); 454 PetscCall(VecPointwiseMult(vec2_B, pcis->D, vec1_B)); 455 456 /* Neumann Solver */ 457 PetscCall(PCISApplyInvSchur(pc, vec2_B, vec1_B, vec1_N, vec2_N)); 458 459 /* 460 Second balancing step. 461 */ 462 { 463 PetscBool flg = PETSC_FALSE; 464 PetscCall(PetscOptionsGetBool(NULL, NULL, "-pc_turn_off_second_balancing", &flg, NULL)); 465 if (!flg) { 466 PetscCall(PCNNBalancing(pc, r, vec1_B, z, vec2_B, vec3_B, (Vec)0, vec1_D, vec2_D, work_N)); 467 } else { 468 PetscCall(VecPointwiseMult(vec2_B, pcis->D, vec1_B)); 469 PetscCall(VecSet(z, 0.0)); 470 PetscCall(VecScatterBegin(pcis->global_to_B, vec2_B, z, ADD_VALUES, SCATTER_REVERSE)); 471 PetscCall(VecScatterEnd(pcis->global_to_B, vec2_B, z, ADD_VALUES, SCATTER_REVERSE)); 472 } 473 } 474 PetscFunctionReturn(PETSC_SUCCESS); 475 } 476 477 /* 478 PCNNBalancing - Computes z, as given in equations (15) and (16) (if the 479 input argument u is provided), or s, as given in equations 480 (12) and (13), if the input argument u is a null vector. 481 Notice that the input argument u plays the role of u_i in 482 equation (14). The equation numbers refer to [Man93]. 483 484 Input Parameters: 485 + pcnn - NN preconditioner context. 486 . r - MPI vector of all nodes (interior and interface). It's preserved. 487 - u - (Optional) sequential vector of local interface nodes. It's preserved UNLESS vec3_B is null. 488 489 Output Parameters: 490 + z - MPI vector of interior and interface nodes. Returns s or z (see description above). 491 . vec1_B - Sequential vector of local interface nodes. Workspace. 492 . vec2_B - Sequential vector of local interface nodes. Workspace. 493 . vec3_B - (Optional) sequential vector of local interface nodes. Workspace. 494 . vec1_D - Sequential vector of local interior nodes. Workspace. 495 . vec2_D - Sequential vector of local interior nodes. Workspace. 496 - work_N - Array of all local nodes (interior and interface). Workspace. 497 498 */ 499 PetscErrorCode PCNNBalancing(PC pc, Vec r, Vec u, Vec z, Vec vec1_B, Vec vec2_B, Vec vec3_B, Vec vec1_D, Vec vec2_D, PetscScalar *work_N) 500 { 501 PetscInt k; 502 PetscScalar value; 503 PetscScalar *lambda; 504 PC_NN *pcnn = (PC_NN *)pc->data; 505 PC_IS *pcis = (PC_IS *)pc->data; 506 507 PetscFunctionBegin; 508 PetscCall(PetscLogEventBegin(PC_ApplyCoarse, pc, 0, 0, 0)); 509 if (u) { 510 if (!vec3_B) vec3_B = u; 511 PetscCall(VecPointwiseMult(vec1_B, pcis->D, u)); 512 PetscCall(VecSet(z, 0.0)); 513 PetscCall(VecScatterBegin(pcis->global_to_B, vec1_B, z, ADD_VALUES, SCATTER_REVERSE)); 514 PetscCall(VecScatterEnd(pcis->global_to_B, vec1_B, z, ADD_VALUES, SCATTER_REVERSE)); 515 PetscCall(VecScatterBegin(pcis->global_to_B, z, vec2_B, INSERT_VALUES, SCATTER_FORWARD)); 516 PetscCall(VecScatterEnd(pcis->global_to_B, z, vec2_B, INSERT_VALUES, SCATTER_FORWARD)); 517 PetscCall(PCISApplySchur(pc, vec2_B, vec3_B, (Vec)0, vec1_D, vec2_D)); 518 PetscCall(VecScale(vec3_B, -1.0)); 519 PetscCall(VecCopy(r, z)); 520 PetscCall(VecScatterBegin(pcis->global_to_B, vec3_B, z, ADD_VALUES, SCATTER_REVERSE)); 521 PetscCall(VecScatterEnd(pcis->global_to_B, vec3_B, z, ADD_VALUES, SCATTER_REVERSE)); 522 } else { 523 PetscCall(VecCopy(r, z)); 524 } 525 PetscCall(VecScatterBegin(pcis->global_to_B, z, vec2_B, INSERT_VALUES, SCATTER_FORWARD)); 526 PetscCall(VecScatterEnd(pcis->global_to_B, z, vec2_B, INSERT_VALUES, SCATTER_FORWARD)); 527 PetscCall(PCISScatterArrayNToVecB(pc, work_N, vec2_B, INSERT_VALUES, SCATTER_REVERSE)); 528 for (k = 0, value = 0.0; k < pcis->n_shared[0]; k++) value += pcnn->DZ_IN[0][k] * work_N[pcis->shared[0][k]]; 529 value *= pcnn->factor_coarse_rhs; /* This factor is set in CreateCoarseMatrix(). */ 530 { 531 PetscMPIInt rank; 532 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)pc), &rank)); 533 PetscCall(VecSetValue(pcnn->coarse_b, rank, value, INSERT_VALUES)); 534 /* 535 Since we are only inserting local values (one value actually) we don't need to do the 536 reduction that tells us there is no data that needs to be moved. Hence we comment out these 537 PetscCall(VecAssemblyBegin(pcnn->coarse_b)); 538 PetscCall(VecAssemblyEnd (pcnn->coarse_b)); 539 */ 540 } 541 PetscCall(KSPSolve(pcnn->ksp_coarse, pcnn->coarse_b, pcnn->coarse_x)); 542 if (!u) PetscCall(VecScale(pcnn->coarse_x, -1.0)); 543 PetscCall(VecGetArray(pcnn->coarse_x, &lambda)); 544 for (k = 0; k < pcis->n_shared[0]; k++) work_N[pcis->shared[0][k]] = *lambda * pcnn->DZ_IN[0][k]; 545 PetscCall(VecRestoreArray(pcnn->coarse_x, &lambda)); 546 PetscCall(PCISScatterArrayNToVecB(pc, work_N, vec2_B, INSERT_VALUES, SCATTER_FORWARD)); 547 PetscCall(VecSet(z, 0.0)); 548 PetscCall(VecScatterBegin(pcis->global_to_B, vec2_B, z, ADD_VALUES, SCATTER_REVERSE)); 549 PetscCall(VecScatterEnd(pcis->global_to_B, vec2_B, z, ADD_VALUES, SCATTER_REVERSE)); 550 if (!u) { 551 PetscCall(VecScatterBegin(pcis->global_to_B, z, vec2_B, INSERT_VALUES, SCATTER_FORWARD)); 552 PetscCall(VecScatterEnd(pcis->global_to_B, z, vec2_B, INSERT_VALUES, SCATTER_FORWARD)); 553 PetscCall(PCISApplySchur(pc, vec2_B, vec1_B, (Vec)0, vec1_D, vec2_D)); 554 PetscCall(VecCopy(r, z)); 555 } 556 PetscCall(VecScatterBegin(pcis->global_to_B, vec1_B, z, ADD_VALUES, SCATTER_REVERSE)); 557 PetscCall(VecScatterEnd(pcis->global_to_B, vec1_B, z, ADD_VALUES, SCATTER_REVERSE)); 558 PetscCall(PetscLogEventEnd(PC_ApplyCoarse, pc, 0, 0, 0)); 559 PetscFunctionReturn(PETSC_SUCCESS); 560 } 561 562 /* */ 563 /* From now on, "footnotes" (or "historical notes"). */ 564 /* */ 565 /* 566 Historical note 01 567 568 We considered the possibility of an alternative D_i that would still 569 provide a partition of unity (i.e., $ \sum_i N_i D_i N_i^T = I $). 570 The basic principle was still the pseudo-inverse of the counting 571 function; the difference was that we would not count subdomains 572 that do not contribute to the coarse space (i.e., not pure-Neumann 573 subdomains). 574 575 This turned out to be a bad idea: we would solve trivial Neumann 576 problems in the not pure-Neumann subdomains, since we would be scaling 577 the balanced residual by zero. 578 */ 579 580 /* 581 Historical note 02 582 583 We tried an alternative coarse problem, that would eliminate exactly a 584 constant error. Turned out not to improve the overall convergence. 585 */ 586