1 static char help[]= "This example shows 1) how to transfer vectors from a parent communicator to vectors on a child communicator and vice versa;\n\ 2 2) how to transfer vectors from a subcommunicator to vectors on another subcommunicator. The two subcommunicators are not\n\ 3 required to cover all processes in PETSC_COMM_WORLD; 3) how to copy a vector from a parent communicator to vectors on its child communicators.\n\ 4 To run any example with VECCUDA vectors, add -vectype cuda to the argument list\n\n"; 5 6 #include <petscvec.h> 7 int main(int argc,char **argv) 8 { 9 PetscMPIInt nproc,grank,mycolor; 10 PetscInt i,n,N = 20,low,high; 11 MPI_Comm subcomm; 12 Vec x = PETSC_NULL; /* global vectors on PETSC_COMM_WORLD */ 13 Vec yg = PETSC_NULL; /* global vectors on PETSC_COMM_WORLD */ 14 VecScatter vscat; 15 IS ix,iy; 16 PetscBool iscuda = PETSC_FALSE; /* Option to use VECCUDA vectors */ 17 PetscBool optionflag, compareflag; 18 char vectypename[PETSC_MAX_PATH_LEN]; 19 PetscBool world2sub = PETSC_FALSE; /* Copy a vector from WORLD to a subcomm? */ 20 PetscBool sub2sub = PETSC_FALSE; /* Copy a vector from a subcomm to another subcomm? */ 21 PetscBool world2subs = PETSC_FALSE; /* Copy a vector from WORLD to multiple subcomms? */ 22 23 CHKERRQ(PetscInitialize(&argc,&argv,(char*)0,help)); 24 CHKERRMPI(MPI_Comm_size(PETSC_COMM_WORLD,&nproc)); 25 CHKERRMPI(MPI_Comm_rank(PETSC_COMM_WORLD,&grank)); 26 27 PetscCheckFalse(nproc < 2,PETSC_COMM_WORLD,PETSC_ERR_ARG_SIZ,"This test must have at least two processes to run"); 28 29 CHKERRQ(PetscOptionsGetBool(NULL,0,"-world2sub",&world2sub,NULL)); 30 CHKERRQ(PetscOptionsGetBool(NULL,0,"-sub2sub",&sub2sub,NULL)); 31 CHKERRQ(PetscOptionsGetBool(NULL,0,"-world2subs",&world2subs,NULL)); 32 CHKERRQ(PetscOptionsGetString(NULL,NULL,"-vectype",vectypename,sizeof(vectypename),&optionflag)); 33 if (optionflag) { 34 CHKERRQ(PetscStrncmp(vectypename, "cuda", (size_t)4, &compareflag)); 35 if (compareflag) iscuda = PETSC_TRUE; 36 } 37 38 /* Split PETSC_COMM_WORLD into three subcomms. Each process can only see the subcomm it belongs to */ 39 mycolor = grank % 3; 40 CHKERRMPI(MPI_Comm_split(PETSC_COMM_WORLD,mycolor,grank,&subcomm)); 41 42 /*=========================================================================== 43 * Transfer a vector x defined on PETSC_COMM_WORLD to a vector y defined on 44 * a subcommunicator of PETSC_COMM_WORLD and vice versa. 45 *===========================================================================*/ 46 if (world2sub) { 47 CHKERRQ(VecCreate(PETSC_COMM_WORLD, &x)); 48 CHKERRQ(VecSetSizes(x, PETSC_DECIDE, N)); 49 if (iscuda) { 50 CHKERRQ(VecSetType(x, VECCUDA)); 51 } else { 52 CHKERRQ(VecSetType(x, VECSTANDARD)); 53 } 54 CHKERRQ(VecSetUp(x)); 55 CHKERRQ(PetscObjectSetName((PetscObject)x,"x_commworld")); /* Give a name to view x clearly */ 56 57 /* Initialize x to [-0.0, -1.0, -2.0, ..., -19.0] */ 58 CHKERRQ(VecGetOwnershipRange(x,&low,&high)); 59 for (i=low; i<high; i++) { 60 PetscScalar val = -i; 61 CHKERRQ(VecSetValue(x,i,val,INSERT_VALUES)); 62 } 63 CHKERRQ(VecAssemblyBegin(x)); 64 CHKERRQ(VecAssemblyEnd(x)); 65 66 /* Transfer x to a vector y only defined on subcomm0 and vice versa */ 67 if (mycolor == 0) { /* subcomm0 contains ranks 0, 3, 6, ... in PETSC_COMM_WORLD */ 68 Vec y; 69 PetscScalar *yvalue; 70 CHKERRQ(VecCreate(subcomm, &y)); 71 CHKERRQ(VecSetSizes(y, PETSC_DECIDE, N)); 72 if (iscuda) { 73 CHKERRQ(VecSetType(y, VECCUDA)); 74 } else { 75 CHKERRQ(VecSetType(y, VECSTANDARD)); 76 } 77 CHKERRQ(VecSetUp(y)); 78 CHKERRQ(PetscObjectSetName((PetscObject)y,"y_subcomm_0")); /* Give a name to view y clearly */ 79 CHKERRQ(VecGetLocalSize(y,&n)); 80 if (iscuda) { 81 #if defined(PETSC_HAVE_CUDA) 82 CHKERRQ(VecCUDAGetArray(y,&yvalue)); 83 #endif 84 } else { 85 CHKERRQ(VecGetArray(y,&yvalue)); 86 } 87 /* Create yg on PETSC_COMM_WORLD and alias yg with y. They share the memory pointed by yvalue. 88 Note this is a collective call. All processes have to call it and supply consistent N. 89 */ 90 if (iscuda) { 91 #if defined(PETSC_HAVE_CUDA) 92 CHKERRQ(VecCreateMPICUDAWithArray(PETSC_COMM_WORLD,1,n,N,yvalue,&yg)); 93 #endif 94 } else { 95 CHKERRQ(VecCreateMPIWithArray(PETSC_COMM_WORLD,1,n,N,yvalue,&yg)); 96 } 97 98 /* Create an identity map that makes yg[i] = x[i], i=0..N-1 */ 99 CHKERRQ(VecGetOwnershipRange(yg,&low,&high)); /* low, high are global indices */ 100 CHKERRQ(ISCreateStride(PETSC_COMM_SELF,high-low,low,1,&ix)); 101 CHKERRQ(ISDuplicate(ix,&iy)); 102 103 /* Union of ix's on subcomm0 covers the full range of [0,N) */ 104 CHKERRQ(VecScatterCreate(x,ix,yg,iy,&vscat)); 105 CHKERRQ(VecScatterBegin(vscat,x,yg,INSERT_VALUES,SCATTER_FORWARD)); 106 CHKERRQ(VecScatterEnd(vscat,x,yg,INSERT_VALUES,SCATTER_FORWARD)); 107 108 /* Once yg got the data from x, we return yvalue to y so that we can use y in other operations. 109 VecGetArray must be paired with VecRestoreArray. 110 */ 111 if (iscuda) { 112 #if defined(PETSC_HAVE_CUDA) 113 CHKERRQ(VecCUDARestoreArray(y,&yvalue)); 114 #endif 115 } else { 116 CHKERRQ(VecRestoreArray(y,&yvalue)); 117 } 118 119 /* Libraries on subcomm0 can safely use y now, for example, view and scale it */ 120 CHKERRQ(VecView(y,PETSC_VIEWER_STDOUT_(subcomm))); 121 CHKERRQ(VecScale(y,2.0)); 122 123 /* Send the new y back to x */ 124 CHKERRQ(VecGetArray(y,&yvalue)); /* If VecScale is done on GPU, Petsc will prepare a valid yvalue for access */ 125 /* Supply new yvalue to yg without memory copying */ 126 CHKERRQ(VecPlaceArray(yg,yvalue)); 127 CHKERRQ(VecScatterBegin(vscat,yg,x,INSERT_VALUES,SCATTER_REVERSE)); 128 CHKERRQ(VecScatterEnd(vscat,yg,x,INSERT_VALUES,SCATTER_REVERSE)); 129 CHKERRQ(VecResetArray(yg)); 130 if (iscuda) { 131 #if defined(PETSC_HAVE_CUDA) 132 CHKERRQ(VecCUDARestoreArray(y,&yvalue)); 133 #endif 134 } else { 135 CHKERRQ(VecRestoreArray(y,&yvalue)); 136 } 137 138 CHKERRQ(VecDestroy(&y)); 139 } else { 140 /* Ranks outside of subcomm0 do not supply values to yg */ 141 if (iscuda) { 142 #if defined(PETSC_HAVE_CUDA) 143 CHKERRQ(VecCreateMPICUDAWithArray(PETSC_COMM_WORLD,1,0/*n*/,N,NULL,&yg)); 144 #endif 145 } else { 146 CHKERRQ(VecCreateMPIWithArray(PETSC_COMM_WORLD,1,0/*n*/,N,NULL,&yg)); 147 } 148 149 /* Ranks in subcomm0 already specified the full range of the identity map. The remaining 150 ranks just need to create empty ISes to cheat VecScatterCreate. 151 */ 152 CHKERRQ(ISCreateStride(PETSC_COMM_SELF,0,0,1,&ix)); 153 CHKERRQ(ISDuplicate(ix,&iy)); 154 155 CHKERRQ(VecScatterCreate(x,ix,yg,iy,&vscat)); 156 CHKERRQ(VecScatterBegin(vscat,x,yg,INSERT_VALUES,SCATTER_FORWARD)); 157 CHKERRQ(VecScatterEnd(vscat,x,yg,INSERT_VALUES,SCATTER_FORWARD)); 158 159 /* Send the new y back to x. Ranks outside of subcomm0 actually have nothing to send. 160 But they have to call VecScatterBegin/End since these routines are collective. 161 */ 162 CHKERRQ(VecScatterBegin(vscat,yg,x,INSERT_VALUES,SCATTER_REVERSE)); 163 CHKERRQ(VecScatterEnd(vscat,yg,x,INSERT_VALUES,SCATTER_REVERSE)); 164 } 165 166 CHKERRQ(VecView(x,PETSC_VIEWER_STDOUT_WORLD)); 167 CHKERRQ(ISDestroy(&ix)); 168 CHKERRQ(ISDestroy(&iy)); 169 CHKERRQ(VecDestroy(&x)); 170 CHKERRQ(VecDestroy(&yg)); 171 CHKERRQ(VecScatterDestroy(&vscat)); 172 } /* world2sub */ 173 174 /*=========================================================================== 175 * Transfer a vector x defined on subcomm0 to a vector y defined on 176 * subcomm1. The two subcomms are not overlapping and their union is 177 * not necessarily equal to PETSC_COMM_WORLD. 178 *===========================================================================*/ 179 if (sub2sub) { 180 if (mycolor == 0) { 181 /* Intentionally declare N as a local variable so that processes in subcomm1 do not know its value */ 182 PetscInt n,N = 22; 183 Vec x,xg,yg; 184 IS ix,iy; 185 VecScatter vscat; 186 const PetscScalar *xvalue; 187 MPI_Comm intercomm,parentcomm; 188 PetscMPIInt lrank; 189 190 CHKERRMPI(MPI_Comm_rank(subcomm,&lrank)); 191 /* x is on subcomm */ 192 CHKERRQ(VecCreate(subcomm, &x)); 193 CHKERRQ(VecSetSizes(x, PETSC_DECIDE, N)); 194 if (iscuda) { 195 CHKERRQ(VecSetType(x, VECCUDA)); 196 } else { 197 CHKERRQ(VecSetType(x, VECSTANDARD)); 198 } 199 CHKERRQ(VecSetUp(x)); 200 CHKERRQ(VecGetOwnershipRange(x,&low,&high)); 201 202 /* initialize x = [0.0, 1.0, 2.0, ..., 21.0] */ 203 for (i=low; i<high; i++) { 204 PetscScalar val = i; 205 CHKERRQ(VecSetValue(x,i,val,INSERT_VALUES)); 206 } 207 CHKERRQ(VecAssemblyBegin(x)); 208 CHKERRQ(VecAssemblyEnd(x)); 209 210 CHKERRMPI(MPI_Intercomm_create(subcomm,0,PETSC_COMM_WORLD/*peer_comm*/,1,100/*tag*/,&intercomm)); 211 212 /* Tell rank 0 of subcomm1 the global size of x */ 213 if (!lrank) CHKERRMPI(MPI_Send(&N,1,MPIU_INT,0/*receiver's rank in remote comm, i.e., subcomm1*/,200/*tag*/,intercomm)); 214 215 /* Create an intracomm Petsc can work on. Ranks in subcomm0 are ordered before ranks in subcomm1 in parentcomm. 216 But this order actually does not matter, since what we care is vector y, which is defined on subcomm1. 217 */ 218 CHKERRMPI(MPI_Intercomm_merge(intercomm,0/*low*/,&parentcomm)); 219 220 /* Create a vector xg on parentcomm, which shares memory with x */ 221 CHKERRQ(VecGetLocalSize(x,&n)); 222 if (iscuda) { 223 #if defined(PETSC_HAVE_CUDA) 224 CHKERRQ(VecCUDAGetArrayRead(x,&xvalue)); 225 CHKERRQ(VecCreateMPICUDAWithArray(parentcomm,1,n,N,xvalue,&xg)); 226 #endif 227 } else { 228 CHKERRQ(VecGetArrayRead(x,&xvalue)); 229 CHKERRQ(VecCreateMPIWithArray(parentcomm,1,n,N,xvalue,&xg)); 230 } 231 232 /* Ranks in subcomm 0 have nothing on yg, so they simply have n=0, array=NULL */ 233 if (iscuda) { 234 #if defined(PETSC_HAVE_CUDA) 235 CHKERRQ(VecCreateMPICUDAWithArray(parentcomm,1,0/*n*/,N,NULL/*array*/,&yg)); 236 #endif 237 } else { 238 CHKERRQ(VecCreateMPIWithArray(parentcomm,1,0/*n*/,N,NULL/*array*/,&yg)); 239 } 240 241 /* Create the vecscatter, which does identity map by setting yg[i] = xg[i], i=0..N-1. */ 242 CHKERRQ(VecGetOwnershipRange(xg,&low,&high)); /* low, high are global indices of xg */ 243 CHKERRQ(ISCreateStride(PETSC_COMM_SELF,high-low,low,1,&ix)); 244 CHKERRQ(ISDuplicate(ix,&iy)); 245 CHKERRQ(VecScatterCreate(xg,ix,yg,iy,&vscat)); 246 247 /* Scatter values from xg to yg */ 248 CHKERRQ(VecScatterBegin(vscat,xg,yg,INSERT_VALUES,SCATTER_FORWARD)); 249 CHKERRQ(VecScatterEnd(vscat,xg,yg,INSERT_VALUES,SCATTER_FORWARD)); 250 251 /* After the VecScatter is done, xg is idle so we can safely return xvalue to x */ 252 if (iscuda) { 253 #if defined(PETSC_HAVE_CUDA) 254 CHKERRQ(VecCUDARestoreArrayRead(x,&xvalue)); 255 #endif 256 } else { 257 CHKERRQ(VecRestoreArrayRead(x,&xvalue)); 258 } 259 CHKERRQ(VecDestroy(&x)); 260 CHKERRQ(ISDestroy(&ix)); 261 CHKERRQ(ISDestroy(&iy)); 262 CHKERRQ(VecDestroy(&xg)); 263 CHKERRQ(VecDestroy(&yg)); 264 CHKERRQ(VecScatterDestroy(&vscat)); 265 CHKERRMPI(MPI_Comm_free(&intercomm)); 266 CHKERRMPI(MPI_Comm_free(&parentcomm)); 267 } else if (mycolor == 1) { /* subcomm 1, containing ranks 1, 4, 7, ... in PETSC_COMM_WORLD */ 268 PetscInt n,N; 269 Vec y,xg,yg; 270 IS ix,iy; 271 VecScatter vscat; 272 PetscScalar *yvalue; 273 MPI_Comm intercomm,parentcomm; 274 PetscMPIInt lrank; 275 276 CHKERRMPI(MPI_Comm_rank(subcomm,&lrank)); 277 CHKERRMPI(MPI_Intercomm_create(subcomm,0,PETSC_COMM_WORLD/*peer_comm*/,0/*remote_leader*/,100/*tag*/,&intercomm)); 278 279 /* Two rank-0 are talking */ 280 if (!lrank) CHKERRMPI(MPI_Recv(&N,1,MPIU_INT,0/*sender's rank in remote comm, i.e. subcomm0*/,200/*tag*/,intercomm,MPI_STATUS_IGNORE)); 281 /* Rank 0 of subcomm1 bcasts N to its members */ 282 CHKERRMPI(MPI_Bcast(&N,1,MPIU_INT,0/*local root*/,subcomm)); 283 284 /* Create a intracomm Petsc can work on */ 285 CHKERRMPI(MPI_Intercomm_merge(intercomm,1/*high*/,&parentcomm)); 286 287 /* Ranks in subcomm1 have nothing on xg, so they simply have n=0, array=NULL.*/ 288 if (iscuda) { 289 #if defined(PETSC_HAVE_CUDA) 290 CHKERRQ(VecCreateMPICUDAWithArray(parentcomm,1/*bs*/,0/*n*/,N,NULL/*array*/,&xg)); 291 #endif 292 } else { 293 CHKERRQ(VecCreateMPIWithArray(parentcomm,1/*bs*/,0/*n*/,N,NULL/*array*/,&xg)); 294 } 295 296 CHKERRQ(VecCreate(subcomm, &y)); 297 CHKERRQ(VecSetSizes(y, PETSC_DECIDE, N)); 298 if (iscuda) { 299 CHKERRQ(VecSetType(y, VECCUDA)); 300 } else { 301 CHKERRQ(VecSetType(y, VECSTANDARD)); 302 } 303 CHKERRQ(VecSetUp(y)); 304 305 CHKERRQ(PetscObjectSetName((PetscObject)y,"y_subcomm_1")); /* Give a name to view y clearly */ 306 CHKERRQ(VecGetLocalSize(y,&n)); 307 if (iscuda) { 308 #if defined(PETSC_HAVE_CUDA) 309 CHKERRQ(VecCUDAGetArray(y,&yvalue)); 310 #endif 311 } else { 312 CHKERRQ(VecGetArray(y,&yvalue)); 313 } 314 /* Create a vector yg on parentcomm, which shares memory with y. xg and yg must be 315 created in the same order in subcomm0/1. For example, we can not reverse the order of 316 creating xg and yg in subcomm1. 317 */ 318 if (iscuda) { 319 #if defined(PETSC_HAVE_CUDA) 320 CHKERRQ(VecCreateMPICUDAWithArray(parentcomm,1/*bs*/,n,N,yvalue,&yg)); 321 #endif 322 } else { 323 CHKERRQ(VecCreateMPIWithArray(parentcomm,1/*bs*/,n,N,yvalue,&yg)); 324 } 325 326 /* Ranks in subcomm0 already specified the full range of the identity map. 327 ranks in subcomm1 just need to create empty ISes to cheat VecScatterCreate. 328 */ 329 CHKERRQ(ISCreateStride(PETSC_COMM_SELF,0,0,1,&ix)); 330 CHKERRQ(ISDuplicate(ix,&iy)); 331 CHKERRQ(VecScatterCreate(xg,ix,yg,iy,&vscat)); 332 333 /* Scatter values from xg to yg */ 334 CHKERRQ(VecScatterBegin(vscat,xg,yg,INSERT_VALUES,SCATTER_FORWARD)); 335 CHKERRQ(VecScatterEnd(vscat,xg,yg,INSERT_VALUES,SCATTER_FORWARD)); 336 337 /* After the VecScatter is done, values in yg are available. y is our interest, so we return yvalue to y */ 338 if (iscuda) { 339 #if defined(PETSC_HAVE_CUDA) 340 CHKERRQ(VecCUDARestoreArray(y,&yvalue)); 341 #endif 342 } else { 343 CHKERRQ(VecRestoreArray(y,&yvalue)); 344 } 345 346 /* Libraries on subcomm1 can safely use y now, for example, view it */ 347 CHKERRQ(VecView(y,PETSC_VIEWER_STDOUT_(subcomm))); 348 349 CHKERRQ(VecDestroy(&y)); 350 CHKERRQ(ISDestroy(&ix)); 351 CHKERRQ(ISDestroy(&iy)); 352 CHKERRQ(VecDestroy(&xg)); 353 CHKERRQ(VecDestroy(&yg)); 354 CHKERRQ(VecScatterDestroy(&vscat)); 355 CHKERRMPI(MPI_Comm_free(&intercomm)); 356 CHKERRMPI(MPI_Comm_free(&parentcomm)); 357 } else if (mycolor == 2) { /* subcomm2 */ 358 /* Processes in subcomm2 do not participate in the VecScatter. They can freely do unrelated things on subcomm2 */ 359 } 360 } /* sub2sub */ 361 362 /*=========================================================================== 363 * Copy a vector x defined on PETSC_COMM_WORLD to vectors y defined on 364 * every subcommunicator of PETSC_COMM_WORLD. We could use multiple transfers 365 * as we did in case 1, but that is not efficient. Instead, we use one vecscatter 366 * to achieve that. 367 *===========================================================================*/ 368 if (world2subs) { 369 Vec y; 370 PetscInt n,N=15,xstart,ystart,low,high; 371 PetscScalar *yvalue; 372 373 /* Initialize x to [0, 1, 2, 3, ..., N-1] */ 374 CHKERRQ(VecCreate(PETSC_COMM_WORLD, &x)); 375 CHKERRQ(VecSetSizes(x, PETSC_DECIDE, N)); 376 if (iscuda) { 377 CHKERRQ(VecSetType(x, VECCUDA)); 378 } else { 379 CHKERRQ(VecSetType(x, VECSTANDARD)); 380 } 381 CHKERRQ(VecSetUp(x)); 382 CHKERRQ(VecGetOwnershipRange(x,&low,&high)); 383 for (i=low; i<high; i++) CHKERRQ(VecSetValue(x,i,(PetscScalar)i,INSERT_VALUES)); 384 CHKERRQ(VecAssemblyBegin(x)); 385 CHKERRQ(VecAssemblyEnd(x)); 386 387 /* Every subcomm has a y as long as x */ 388 CHKERRQ(VecCreate(subcomm, &y)); 389 CHKERRQ(VecSetSizes(y, PETSC_DECIDE, N)); 390 if (iscuda) { 391 CHKERRQ(VecSetType(y, VECCUDA)); 392 } else { 393 CHKERRQ(VecSetType(y, VECSTANDARD)); 394 } 395 CHKERRQ(VecSetUp(y)); 396 CHKERRQ(VecGetLocalSize(y,&n)); 397 398 /* Create a global vector yg on PETSC_COMM_WORLD using y's memory. yg's global size = N*(number of subcommunicators). 399 Eeach rank in subcomms contributes a piece to construct the global yg. Keep in mind that pieces from a subcomm are not 400 necessarily consecutive in yg. That depends on how PETSC_COMM_WORLD is split. In our case, subcomm0 is made of rank 401 0, 3, 6 etc from PETSC_COMM_WORLD. So subcomm0's pieces are interleaved with pieces from other subcomms in yg. 402 */ 403 if (iscuda) { 404 #if defined(PETSC_HAVE_CUDA) 405 CHKERRQ(VecCUDAGetArray(y,&yvalue)); 406 CHKERRQ(VecCreateMPICUDAWithArray(PETSC_COMM_WORLD,1,n,PETSC_DECIDE,yvalue,&yg)); 407 #endif 408 } else { 409 CHKERRQ(VecGetArray(y,&yvalue)); 410 CHKERRQ(VecCreateMPIWithArray(PETSC_COMM_WORLD,1,n,PETSC_DECIDE,yvalue,&yg)); 411 } 412 CHKERRQ(PetscObjectSetName((PetscObject)yg,"yg_on_subcomms")); /* Give a name to view yg clearly */ 413 414 /* The following two lines are key. From xstart, we know where to pull entries from x. Note that we get xstart from y, 415 since first entry of y on this rank is from x[xstart]. From ystart, we know where ot put entries to yg. 416 */ 417 CHKERRQ(VecGetOwnershipRange(y,&xstart,NULL)); 418 CHKERRQ(VecGetOwnershipRange(yg,&ystart,NULL)); 419 420 CHKERRQ(ISCreateStride(PETSC_COMM_SELF,n,xstart,1,&ix)); 421 CHKERRQ(ISCreateStride(PETSC_COMM_SELF,n,ystart,1,&iy)); 422 CHKERRQ(VecScatterCreate(x,ix,yg,iy,&vscat)); 423 CHKERRQ(VecScatterBegin(vscat,x,yg,INSERT_VALUES,SCATTER_FORWARD)); 424 CHKERRQ(VecScatterEnd(vscat,x,yg,INSERT_VALUES,SCATTER_FORWARD)); 425 426 /* View yg on PETSC_COMM_WORLD before destroying it. We shall see the interleaving effect in output. */ 427 CHKERRQ(VecView(yg,PETSC_VIEWER_STDOUT_WORLD)); 428 CHKERRQ(VecDestroy(&yg)); 429 430 /* Restory yvalue so that processes in subcomm can use y from now on. */ 431 if (iscuda) { 432 #if defined(PETSC_HAVE_CUDA) 433 CHKERRQ(VecCUDARestoreArray(y,&yvalue)); 434 #endif 435 } else { 436 CHKERRQ(VecRestoreArray(y,&yvalue)); 437 } 438 CHKERRQ(VecScale(y,3.0)); 439 440 CHKERRQ(ISDestroy(&ix)); /* One can also destroy ix, iy immediately after VecScatterCreate() */ 441 CHKERRQ(ISDestroy(&iy)); 442 CHKERRQ(VecDestroy(&x)); 443 CHKERRQ(VecDestroy(&y)); 444 CHKERRQ(VecScatterDestroy(&vscat)); 445 } /* world2subs */ 446 447 CHKERRMPI(MPI_Comm_free(&subcomm)); 448 CHKERRQ(PetscFinalize()); 449 return 0; 450 } 451 452 /*TEST 453 454 build: 455 requires: !defined(PETSC_HAVE_MPIUNI) 456 457 testset: 458 nsize: 7 459 460 test: 461 suffix: 1 462 args: -world2sub 463 464 test: 465 suffix: 2 466 args: -sub2sub 467 # deadlocks with NECMPI and INTELMPI (20210400300) 468 requires: !defined(PETSC_HAVE_NECMPI) !defined(PETSC_HAVE_I_MPI_NUMVERSION) 469 470 test: 471 suffix: 3 472 args: -world2subs 473 474 test: 475 suffix: 4 476 args: -world2sub -vectype cuda 477 requires: cuda 478 479 test: 480 suffix: 5 481 args: -sub2sub -vectype cuda 482 requires: cuda 483 484 test: 485 suffix: 6 486 args: -world2subs -vectype cuda 487 requires: cuda 488 489 test: 490 suffix: 7 491 args: -world2sub -sf_type neighbor 492 output_file: output/ex9_1.out 493 # OpenMPI has a bug wrt MPI_Neighbor_alltoallv etc (https://github.com/open-mpi/ompi/pull/6782). Once the patch is in, we can remove !define(PETSC_HAVE_OMPI_MAJOR_VERSION) 494 # segfaults with NECMPI 495 requires: defined(PETSC_HAVE_MPI_NEIGHBORHOOD_COLLECTIVES) !defined(PETSC_HAVE_OMPI_MAJOR_VERSION) !defined(PETSC_HAVE_NECMPI) 496 TEST*/ 497