1f6ced4a3SJed Brown #include <petscsys.h> /*I "petscsys.h" I*/ 2f6ced4a3SJed Brown 327a32ea5SJed Brown PetscLogEvent PETSC_BuildTwoSided,PETSC_BuildTwoSidedF; 43b3561c8SJed Brown 56145cd65SJed Brown const char *const PetscBuildTwoSidedTypes[] = { 6f6ced4a3SJed Brown "ALLREDUCE", 76145cd65SJed Brown "IBARRIER", 81654bf6bSJed Brown "REDSCATTER", 96145cd65SJed Brown "PetscBuildTwoSidedType", 106145cd65SJed Brown "PETSC_BUILDTWOSIDED_", 11f6ced4a3SJed Brown 0 12f6ced4a3SJed Brown }; 13f6ced4a3SJed Brown 146145cd65SJed Brown static PetscBuildTwoSidedType _twosided_type = PETSC_BUILDTWOSIDED_NOTSET; 15f6ced4a3SJed Brown 16f6ced4a3SJed Brown #undef __FUNCT__ 176145cd65SJed Brown #define __FUNCT__ "PetscCommBuildTwoSidedSetType" 186145cd65SJed Brown /*@ 196145cd65SJed Brown PetscCommBuildTwoSidedSetType - set algorithm to use when building two-sided communication 206145cd65SJed Brown 216145cd65SJed Brown Logically Collective 226145cd65SJed Brown 236145cd65SJed Brown Input Arguments: 246145cd65SJed Brown + comm - PETSC_COMM_WORLD 256145cd65SJed Brown - twosided - algorithm to use in subsequent calls to PetscCommBuildTwoSided() 266145cd65SJed Brown 276145cd65SJed Brown Level: developer 286145cd65SJed Brown 296145cd65SJed Brown Note: 306145cd65SJed Brown This option is currently global, but could be made per-communicator. 316145cd65SJed Brown 326145cd65SJed Brown .seealso: PetscCommBuildTwoSided(), PetscCommBuildTwoSidedGetType() 336145cd65SJed Brown @*/ 346145cd65SJed Brown PetscErrorCode PetscCommBuildTwoSidedSetType(MPI_Comm comm,PetscBuildTwoSidedType twosided) 356145cd65SJed Brown { 366145cd65SJed Brown PetscFunctionBegin; 376145cd65SJed Brown #if defined(PETSC_USE_DEBUG) 386145cd65SJed Brown { /* We don't have a PetscObject so can't use PetscValidLogicalCollectiveEnum */ 396145cd65SJed Brown PetscMPIInt ierr; 406145cd65SJed Brown PetscMPIInt b1[2],b2[2]; 416145cd65SJed Brown b1[0] = -(PetscMPIInt)twosided; 426145cd65SJed Brown b1[1] = (PetscMPIInt)twosided; 43b2566f29SBarry Smith ierr = MPIU_Allreduce(b1,b2,2,MPI_INT,MPI_MAX,comm);CHKERRQ(ierr); 446145cd65SJed Brown if (-b2[0] != b2[1]) SETERRQ(comm,PETSC_ERR_ARG_WRONG,"Enum value must be same on all processes"); 456145cd65SJed Brown } 466145cd65SJed Brown #endif 476145cd65SJed Brown _twosided_type = twosided; 486145cd65SJed Brown PetscFunctionReturn(0); 496145cd65SJed Brown } 506145cd65SJed Brown 516145cd65SJed Brown #undef __FUNCT__ 526145cd65SJed Brown #define __FUNCT__ "PetscCommBuildTwoSidedGetType" 536145cd65SJed Brown /*@ 546145cd65SJed Brown PetscCommBuildTwoSidedGetType - set algorithm to use when building two-sided communication 556145cd65SJed Brown 566145cd65SJed Brown Logically Collective 576145cd65SJed Brown 586145cd65SJed Brown Output Arguments: 596145cd65SJed Brown + comm - communicator on which to query algorithm 606145cd65SJed Brown - twosided - algorithm to use for PetscCommBuildTwoSided() 616145cd65SJed Brown 626145cd65SJed Brown Level: developer 636145cd65SJed Brown 646145cd65SJed Brown .seealso: PetscCommBuildTwoSided(), PetscCommBuildTwoSidedSetType() 656145cd65SJed Brown @*/ 666145cd65SJed Brown PetscErrorCode PetscCommBuildTwoSidedGetType(MPI_Comm comm,PetscBuildTwoSidedType *twosided) 67f6ced4a3SJed Brown { 68f6ced4a3SJed Brown PetscErrorCode ierr; 69f6ced4a3SJed Brown 70f6ced4a3SJed Brown PetscFunctionBegin; 716145cd65SJed Brown *twosided = PETSC_BUILDTWOSIDED_NOTSET; 726145cd65SJed Brown if (_twosided_type == PETSC_BUILDTWOSIDED_NOTSET) { 73f6ced4a3SJed Brown #if defined(PETSC_HAVE_MPI_IBARRIER) 746145cd65SJed Brown # if defined(PETSC_HAVE_MPICH_CH3_SOCK) && !defined(PETSC_HAVE_MPICH_CH3_SOCK_FIXED_NBC_PROGRESS) 756145cd65SJed Brown /* Deadlock in Ibarrier: http://trac.mpich.org/projects/mpich/ticket/1785 */ 766145cd65SJed Brown _twosided_type = PETSC_BUILDTWOSIDED_ALLREDUCE; 77f6ced4a3SJed Brown # else 786145cd65SJed Brown _twosided_type = PETSC_BUILDTWOSIDED_IBARRIER; 79f6ced4a3SJed Brown # endif 806145cd65SJed Brown #else 816145cd65SJed Brown _twosided_type = PETSC_BUILDTWOSIDED_ALLREDUCE; 826145cd65SJed Brown #endif 83c5929fdfSBarry Smith ierr = PetscOptionsGetEnum(NULL,NULL,"-build_twosided",PetscBuildTwoSidedTypes,(PetscEnum*)&_twosided_type,NULL);CHKERRQ(ierr); 84f6ced4a3SJed Brown } 85f6ced4a3SJed Brown *twosided = _twosided_type; 86f6ced4a3SJed Brown PetscFunctionReturn(0); 87f6ced4a3SJed Brown } 88f6ced4a3SJed Brown 89b5a01e9fSJed Brown #if defined(PETSC_HAVE_MPI_IBARRIER) || defined(PETSC_HAVE_MPIX_IBARRIER) 90f6ced4a3SJed Brown 91f6ced4a3SJed Brown #undef __FUNCT__ 926145cd65SJed Brown #define __FUNCT__ "PetscCommBuildTwoSided_Ibarrier" 93cf4b5b4fSJed Brown static PetscErrorCode PetscCommBuildTwoSided_Ibarrier(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata) 94f6ced4a3SJed Brown { 95f6ced4a3SJed Brown PetscErrorCode ierr; 96c3a59b84SJed Brown PetscMPIInt nrecvs,tag,done,i; 97c3a59b84SJed Brown MPI_Aint lb,unitbytes; 98f6ced4a3SJed Brown char *tdata; 99f6ced4a3SJed Brown MPI_Request *sendreqs,barrier; 1000f453b92SJed Brown PetscSegBuffer segrank,segdata; 10146e50bb6SJed Brown PetscBool barrier_started; 102f6ced4a3SJed Brown 103f6ced4a3SJed Brown PetscFunctionBegin; 104a502f807SJed Brown ierr = PetscCommDuplicate(comm,&comm,&tag);CHKERRQ(ierr); 105c3a59b84SJed Brown ierr = MPI_Type_get_extent(dtype,&lb,&unitbytes);CHKERRQ(ierr); 106c3a59b84SJed Brown if (lb != 0) SETERRQ1(comm,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb); 107f6ced4a3SJed Brown tdata = (char*)todata; 108785e854fSJed Brown ierr = PetscMalloc1(nto,&sendreqs);CHKERRQ(ierr); 109f6ced4a3SJed Brown for (i=0; i<nto; i++) { 110f6ced4a3SJed Brown ierr = MPI_Issend((void*)(tdata+count*unitbytes*i),count,dtype,toranks[i],tag,comm,sendreqs+i);CHKERRQ(ierr); 111f6ced4a3SJed Brown } 1120f453b92SJed Brown ierr = PetscSegBufferCreate(sizeof(PetscMPIInt),4,&segrank);CHKERRQ(ierr); 1130f453b92SJed Brown ierr = PetscSegBufferCreate(unitbytes,4*count,&segdata);CHKERRQ(ierr); 114f6ced4a3SJed Brown 115f6ced4a3SJed Brown nrecvs = 0; 116f6ced4a3SJed Brown barrier = MPI_REQUEST_NULL; 11746e50bb6SJed Brown /* MPICH-3.2 sometimes does not create a request in some "optimized" cases. This is arguably a standard violation, 11846e50bb6SJed Brown * but we need to work around it. */ 11946e50bb6SJed Brown barrier_started = PETSC_FALSE; 120f6ced4a3SJed Brown for (done=0; !done; ) { 121f6ced4a3SJed Brown PetscMPIInt flag; 122f6ced4a3SJed Brown MPI_Status status; 123f6ced4a3SJed Brown ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag,comm,&flag,&status);CHKERRQ(ierr); 124f6ced4a3SJed Brown if (flag) { /* incoming message */ 125f6ced4a3SJed Brown PetscMPIInt *recvrank; 126f6ced4a3SJed Brown void *buf; 127137cf7b6SJed Brown ierr = PetscSegBufferGet(segrank,1,&recvrank);CHKERRQ(ierr); 128137cf7b6SJed Brown ierr = PetscSegBufferGet(segdata,count,&buf);CHKERRQ(ierr); 129f6ced4a3SJed Brown *recvrank = status.MPI_SOURCE; 130f6ced4a3SJed Brown ierr = MPI_Recv(buf,count,dtype,status.MPI_SOURCE,tag,comm,MPI_STATUS_IGNORE);CHKERRQ(ierr); 131f6ced4a3SJed Brown nrecvs++; 132f6ced4a3SJed Brown } 13346e50bb6SJed Brown if (!barrier_started) { 1344dc2109aSBarry Smith PetscMPIInt sent,nsends; 1354dc2109aSBarry Smith ierr = PetscMPIIntCast(nto,&nsends);CHKERRQ(ierr); 136f6ced4a3SJed Brown ierr = MPI_Testall(nsends,sendreqs,&sent,MPI_STATUSES_IGNORE);CHKERRQ(ierr); 137f6ced4a3SJed Brown if (sent) { 138b5a01e9fSJed Brown #if defined(PETSC_HAVE_MPI_IBARRIER) 139f6ced4a3SJed Brown ierr = MPI_Ibarrier(comm,&barrier);CHKERRQ(ierr); 140b5a01e9fSJed Brown #elif defined(PETSC_HAVE_MPIX_IBARRIER) 141b5a01e9fSJed Brown ierr = MPIX_Ibarrier(comm,&barrier);CHKERRQ(ierr); 142b5a01e9fSJed Brown #endif 14346e50bb6SJed Brown barrier_started = PETSC_TRUE; 144f6ced4a3SJed Brown ierr = PetscFree(sendreqs);CHKERRQ(ierr); 145f6ced4a3SJed Brown } 146f6ced4a3SJed Brown } else { 147f6ced4a3SJed Brown ierr = MPI_Test(&barrier,&done,MPI_STATUS_IGNORE);CHKERRQ(ierr); 148f6ced4a3SJed Brown } 149f6ced4a3SJed Brown } 150f6ced4a3SJed Brown *nfrom = nrecvs; 151137cf7b6SJed Brown ierr = PetscSegBufferExtractAlloc(segrank,fromranks);CHKERRQ(ierr); 1520f453b92SJed Brown ierr = PetscSegBufferDestroy(&segrank);CHKERRQ(ierr); 153137cf7b6SJed Brown ierr = PetscSegBufferExtractAlloc(segdata,fromdata);CHKERRQ(ierr); 1540f453b92SJed Brown ierr = PetscSegBufferDestroy(&segdata);CHKERRQ(ierr); 155a502f807SJed Brown ierr = PetscCommDestroy(&comm);CHKERRQ(ierr); 156f6ced4a3SJed Brown PetscFunctionReturn(0); 157f6ced4a3SJed Brown } 158f6ced4a3SJed Brown #endif 159f6ced4a3SJed Brown 160f6ced4a3SJed Brown #undef __FUNCT__ 1616145cd65SJed Brown #define __FUNCT__ "PetscCommBuildTwoSided_Allreduce" 162cf4b5b4fSJed Brown static PetscErrorCode PetscCommBuildTwoSided_Allreduce(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata) 163f6ced4a3SJed Brown { 164f6ced4a3SJed Brown PetscErrorCode ierr; 165c3a59b84SJed Brown PetscMPIInt size,*iflags,nrecvs,tag,*franks,i; 166c3a59b84SJed Brown MPI_Aint lb,unitbytes; 167f6ced4a3SJed Brown char *tdata,*fdata; 168f6ced4a3SJed Brown MPI_Request *reqs,*sendreqs; 169f6ced4a3SJed Brown MPI_Status *statuses; 170f6ced4a3SJed Brown 171f6ced4a3SJed Brown PetscFunctionBegin; 172f6ced4a3SJed Brown ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1731795a4d1SJed Brown ierr = PetscCalloc1(size,&iflags);CHKERRQ(ierr); 174f6ced4a3SJed Brown for (i=0; i<nto; i++) iflags[toranks[i]] = 1; 1750298fd71SBarry Smith ierr = PetscGatherNumberOfMessages(comm,iflags,NULL,&nrecvs);CHKERRQ(ierr); 176f6ced4a3SJed Brown ierr = PetscFree(iflags);CHKERRQ(ierr); 177f6ced4a3SJed Brown 178a502f807SJed Brown ierr = PetscCommDuplicate(comm,&comm,&tag);CHKERRQ(ierr); 179c3a59b84SJed Brown ierr = MPI_Type_get_extent(dtype,&lb,&unitbytes);CHKERRQ(ierr); 180c3a59b84SJed Brown if (lb != 0) SETERRQ1(comm,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb); 181f6ced4a3SJed Brown ierr = PetscMalloc(nrecvs*count*unitbytes,&fdata);CHKERRQ(ierr); 182f6ced4a3SJed Brown tdata = (char*)todata; 183dcca6d9dSJed Brown ierr = PetscMalloc2(nto+nrecvs,&reqs,nto+nrecvs,&statuses);CHKERRQ(ierr); 18406926cafSJed Brown sendreqs = reqs + nrecvs; 185f6ced4a3SJed Brown for (i=0; i<nrecvs; i++) { 186f6ced4a3SJed Brown ierr = MPI_Irecv((void*)(fdata+count*unitbytes*i),count,dtype,MPI_ANY_SOURCE,tag,comm,reqs+i);CHKERRQ(ierr); 187f6ced4a3SJed Brown } 188f6ced4a3SJed Brown for (i=0; i<nto; i++) { 189f6ced4a3SJed Brown ierr = MPI_Isend((void*)(tdata+count*unitbytes*i),count,dtype,toranks[i],tag,comm,sendreqs+i);CHKERRQ(ierr); 190f6ced4a3SJed Brown } 191f6ced4a3SJed Brown ierr = MPI_Waitall(nto+nrecvs,reqs,statuses);CHKERRQ(ierr); 192785e854fSJed Brown ierr = PetscMalloc1(nrecvs,&franks);CHKERRQ(ierr); 193a297a907SKarl Rupp for (i=0; i<nrecvs; i++) franks[i] = statuses[i].MPI_SOURCE; 19406926cafSJed Brown ierr = PetscFree2(reqs,statuses);CHKERRQ(ierr); 195a502f807SJed Brown ierr = PetscCommDestroy(&comm);CHKERRQ(ierr); 196f6ced4a3SJed Brown 197f6ced4a3SJed Brown *nfrom = nrecvs; 198f6ced4a3SJed Brown *fromranks = franks; 199f6ced4a3SJed Brown *(void**)fromdata = fdata; 200f6ced4a3SJed Brown PetscFunctionReturn(0); 201f6ced4a3SJed Brown } 202f6ced4a3SJed Brown 2031654bf6bSJed Brown #if defined(PETSC_HAVE_MPI_REDUCE_SCATTER_BLOCK) 2041654bf6bSJed Brown #undef __FUNCT__ 2051654bf6bSJed Brown #define __FUNCT__ "PetscCommBuildTwoSided_RedScatter" 206cf4b5b4fSJed Brown static PetscErrorCode PetscCommBuildTwoSided_RedScatter(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata) 2071654bf6bSJed Brown { 2081654bf6bSJed Brown PetscErrorCode ierr; 209c3a59b84SJed Brown PetscMPIInt size,*iflags,nrecvs,tag,*franks,i; 210c3a59b84SJed Brown MPI_Aint lb,unitbytes; 2111654bf6bSJed Brown char *tdata,*fdata; 2121654bf6bSJed Brown MPI_Request *reqs,*sendreqs; 2131654bf6bSJed Brown MPI_Status *statuses; 2141654bf6bSJed Brown 2151654bf6bSJed Brown PetscFunctionBegin; 2161654bf6bSJed Brown ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 2171654bf6bSJed Brown ierr = PetscMalloc1(size,&iflags);CHKERRQ(ierr); 2181654bf6bSJed Brown ierr = PetscMemzero(iflags,size*sizeof(*iflags));CHKERRQ(ierr); 2191654bf6bSJed Brown for (i=0; i<nto; i++) iflags[toranks[i]] = 1; 2201654bf6bSJed Brown ierr = MPI_Reduce_scatter_block(iflags,&nrecvs,1,MPI_INT,MPI_SUM,comm);CHKERRQ(ierr); 2211654bf6bSJed Brown ierr = PetscFree(iflags);CHKERRQ(ierr); 2221654bf6bSJed Brown 223a502f807SJed Brown ierr = PetscCommDuplicate(comm,&comm,&tag);CHKERRQ(ierr); 224c3a59b84SJed Brown ierr = MPI_Type_get_extent(dtype,&lb,&unitbytes);CHKERRQ(ierr); 225c3a59b84SJed Brown if (lb != 0) SETERRQ1(comm,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb); 2261654bf6bSJed Brown ierr = PetscMalloc(nrecvs*count*unitbytes,&fdata);CHKERRQ(ierr); 2271654bf6bSJed Brown tdata = (char*)todata; 2281654bf6bSJed Brown ierr = PetscMalloc2(nto+nrecvs,&reqs,nto+nrecvs,&statuses);CHKERRQ(ierr); 2291654bf6bSJed Brown sendreqs = reqs + nrecvs; 2301654bf6bSJed Brown for (i=0; i<nrecvs; i++) { 2311654bf6bSJed Brown ierr = MPI_Irecv((void*)(fdata+count*unitbytes*i),count,dtype,MPI_ANY_SOURCE,tag,comm,reqs+i);CHKERRQ(ierr); 2321654bf6bSJed Brown } 2331654bf6bSJed Brown for (i=0; i<nto; i++) { 2341654bf6bSJed Brown ierr = MPI_Isend((void*)(tdata+count*unitbytes*i),count,dtype,toranks[i],tag,comm,sendreqs+i);CHKERRQ(ierr); 2351654bf6bSJed Brown } 2361654bf6bSJed Brown ierr = MPI_Waitall(nto+nrecvs,reqs,statuses);CHKERRQ(ierr); 2371654bf6bSJed Brown ierr = PetscMalloc1(nrecvs,&franks);CHKERRQ(ierr); 2381654bf6bSJed Brown for (i=0; i<nrecvs; i++) franks[i] = statuses[i].MPI_SOURCE; 2391654bf6bSJed Brown ierr = PetscFree2(reqs,statuses);CHKERRQ(ierr); 240a502f807SJed Brown ierr = PetscCommDestroy(&comm);CHKERRQ(ierr); 2411654bf6bSJed Brown 2421654bf6bSJed Brown *nfrom = nrecvs; 2431654bf6bSJed Brown *fromranks = franks; 2441654bf6bSJed Brown *(void**)fromdata = fdata; 2451654bf6bSJed Brown PetscFunctionReturn(0); 2461654bf6bSJed Brown } 2471654bf6bSJed Brown #endif 2481654bf6bSJed Brown 249f6ced4a3SJed Brown #undef __FUNCT__ 250f6ced4a3SJed Brown #define __FUNCT__ "PetscCommBuildTwoSided" 251f6ced4a3SJed Brown /*@C 252f6ced4a3SJed Brown PetscCommBuildTwoSided - discovers communicating ranks given one-sided information, moving constant-sized data in the process (often message lengths) 253f6ced4a3SJed Brown 254f6ced4a3SJed Brown Collective on MPI_Comm 255f6ced4a3SJed Brown 256f6ced4a3SJed Brown Input Arguments: 257f6ced4a3SJed Brown + comm - communicator 258f6ced4a3SJed Brown . count - number of entries to send/receive (must match on all ranks) 259f6ced4a3SJed Brown . dtype - datatype to send/receive from each rank (must match on all ranks) 260f6ced4a3SJed Brown . nto - number of ranks to send data to 261f6ced4a3SJed Brown . toranks - ranks to send to (array of length nto) 262f6ced4a3SJed Brown - todata - data to send to each rank (packed) 263f6ced4a3SJed Brown 264f6ced4a3SJed Brown Output Arguments: 265f6ced4a3SJed Brown + nfrom - number of ranks receiving messages from 266f6ced4a3SJed Brown . fromranks - ranks receiving messages from (length nfrom; caller should PetscFree()) 267f6ced4a3SJed Brown - fromdata - packed data from each rank, each with count entries of type dtype (length nfrom, caller responsible for PetscFree()) 268f6ced4a3SJed Brown 269f6ced4a3SJed Brown Level: developer 270f6ced4a3SJed Brown 2711654bf6bSJed Brown Options Database Keys: 2721654bf6bSJed Brown . -build_twosided <allreduce|ibarrier|redscatter> - algorithm to set up two-sided communication 2731654bf6bSJed Brown 274f6ced4a3SJed Brown Notes: 275f6ced4a3SJed Brown This memory-scalable interface is an alternative to calling PetscGatherNumberOfMessages() and 276f6ced4a3SJed Brown PetscGatherMessageLengths(), possibly with a subsequent round of communication to send other constant-size data. 277f6ced4a3SJed Brown 278f6ced4a3SJed Brown Basic data types as well as contiguous types are supported, but non-contiguous (e.g., strided) types are not. 279f6ced4a3SJed Brown 280f6ced4a3SJed Brown References: 281*4f02bc6aSBarry Smith Hoefler, Siebert and Lumsdaine, The MPI_Ibarrier implementation uses the algorithm in 282*4f02bc6aSBarry Smith Scalable communication protocols for dynamic sparse data exchange, 2010. 283f6ced4a3SJed Brown 284f6ced4a3SJed Brown .seealso: PetscGatherNumberOfMessages(), PetscGatherMessageLengths() 285f6ced4a3SJed Brown @*/ 286cf4b5b4fSJed Brown PetscErrorCode PetscCommBuildTwoSided(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata) 287f6ced4a3SJed Brown { 288f6ced4a3SJed Brown PetscErrorCode ierr; 2896145cd65SJed Brown PetscBuildTwoSidedType buildtype = PETSC_BUILDTWOSIDED_NOTSET; 290f6ced4a3SJed Brown 291f6ced4a3SJed Brown PetscFunctionBegin; 2923b3561c8SJed Brown ierr = PetscSysInitializePackage();CHKERRQ(ierr); 2933b3561c8SJed Brown ierr = PetscLogEventBegin(PETSC_BuildTwoSided,0,0,0,0);CHKERRQ(ierr); 2946145cd65SJed Brown ierr = PetscCommBuildTwoSidedGetType(comm,&buildtype);CHKERRQ(ierr); 295f6ced4a3SJed Brown switch (buildtype) { 2966145cd65SJed Brown case PETSC_BUILDTWOSIDED_IBARRIER: 297b5a01e9fSJed Brown #if defined(PETSC_HAVE_MPI_IBARRIER) || defined(PETSC_HAVE_MPIX_IBARRIER) 2986145cd65SJed Brown ierr = PetscCommBuildTwoSided_Ibarrier(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata);CHKERRQ(ierr); 2996145cd65SJed Brown #else 3006145cd65SJed Brown SETERRQ(comm,PETSC_ERR_PLIB,"MPI implementation does not provide MPI_Ibarrier (part of MPI-3)"); 301f6ced4a3SJed Brown #endif 3026145cd65SJed Brown break; 3036145cd65SJed Brown case PETSC_BUILDTWOSIDED_ALLREDUCE: 3046145cd65SJed Brown ierr = PetscCommBuildTwoSided_Allreduce(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata);CHKERRQ(ierr); 305f6ced4a3SJed Brown break; 3061654bf6bSJed Brown case PETSC_BUILDTWOSIDED_REDSCATTER: 3071654bf6bSJed Brown #if defined(PETSC_HAVE_MPI_REDUCE_SCATTER_BLOCK) 3081654bf6bSJed Brown ierr = PetscCommBuildTwoSided_RedScatter(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata);CHKERRQ(ierr); 3091654bf6bSJed Brown #else 3101654bf6bSJed Brown SETERRQ(comm,PETSC_ERR_PLIB,"MPI implementation does not provide MPI_Reduce_scatter_block (part of MPI-2.2)"); 3111654bf6bSJed Brown #endif 3121654bf6bSJed Brown break; 313f6ced4a3SJed Brown default: SETERRQ(comm,PETSC_ERR_PLIB,"Unknown method for building two-sided communication"); 314f6ced4a3SJed Brown } 3153b3561c8SJed Brown ierr = PetscLogEventEnd(PETSC_BuildTwoSided,0,0,0,0);CHKERRQ(ierr); 316f6ced4a3SJed Brown PetscFunctionReturn(0); 317f6ced4a3SJed Brown } 318d815da10SJed Brown 319d815da10SJed Brown #undef __FUNCT__ 3206afbe7ddSJed Brown #define __FUNCT__ "PetscCommBuildTwoSidedFReq_Reference" 3216afbe7ddSJed Brown static PetscErrorCode PetscCommBuildTwoSidedFReq_Reference(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata, 3226afbe7ddSJed Brown PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata,PetscMPIInt ntags,MPI_Request **toreqs,MPI_Request **fromreqs, 323d815da10SJed Brown PetscErrorCode (*send)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,PetscMPIInt,void*,MPI_Request[],void*), 324d815da10SJed Brown PetscErrorCode (*recv)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,void*,MPI_Request[],void*),void *ctx) 325d815da10SJed Brown { 326d815da10SJed Brown PetscErrorCode ierr; 327d815da10SJed Brown PetscMPIInt i,*tag; 328d815da10SJed Brown MPI_Aint lb,unitbytes; 329d815da10SJed Brown MPI_Request *sendreq,*recvreq; 330d815da10SJed Brown 331d815da10SJed Brown PetscFunctionBegin; 332d815da10SJed Brown ierr = PetscMalloc1(ntags,&tag);CHKERRQ(ierr); 333d815da10SJed Brown if (ntags > 0) { 334d815da10SJed Brown ierr = PetscCommDuplicate(comm,&comm,&tag[0]);CHKERRQ(ierr); 335d815da10SJed Brown } 336d815da10SJed Brown for (i=1; i<ntags; i++) { 337d815da10SJed Brown ierr = PetscCommGetNewTag(comm,&tag[i]);CHKERRQ(ierr); 338d815da10SJed Brown } 339d815da10SJed Brown 340d815da10SJed Brown /* Perform complete initial rendezvous */ 341d815da10SJed Brown ierr = PetscCommBuildTwoSided(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata);CHKERRQ(ierr); 342d815da10SJed Brown 3436afbe7ddSJed Brown ierr = PetscMalloc1(nto*ntags,&sendreq);CHKERRQ(ierr); 3446afbe7ddSJed Brown ierr = PetscMalloc1(*nfrom*ntags,&recvreq);CHKERRQ(ierr); 345d815da10SJed Brown 346d815da10SJed Brown ierr = MPI_Type_get_extent(dtype,&lb,&unitbytes);CHKERRQ(ierr); 347d815da10SJed Brown if (lb != 0) SETERRQ1(comm,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb); 348d815da10SJed Brown for (i=0; i<nto; i++) { 34989157a57SJed Brown PetscMPIInt k; 35089157a57SJed Brown for (k=0; k<ntags; k++) sendreq[i*ntags+k] = MPI_REQUEST_NULL; 351d815da10SJed Brown ierr = (*send)(comm,tag,i,toranks[i],((char*)todata)+count*unitbytes*i,sendreq+i*ntags,ctx);CHKERRQ(ierr); 352d815da10SJed Brown } 353d815da10SJed Brown for (i=0; i<*nfrom; i++) { 354d815da10SJed Brown void *header = (*(char**)fromdata) + count*unitbytes*i; 35589157a57SJed Brown PetscMPIInt k; 35689157a57SJed Brown for (k=0; k<ntags; k++) recvreq[i*ntags+k] = MPI_REQUEST_NULL; 357d815da10SJed Brown ierr = (*recv)(comm,tag,(*fromranks)[i],header,recvreq+i*ntags,ctx);CHKERRQ(ierr); 358d815da10SJed Brown } 359d815da10SJed Brown ierr = PetscFree(tag);CHKERRQ(ierr); 360d815da10SJed Brown ierr = PetscCommDestroy(&comm);CHKERRQ(ierr); 3616afbe7ddSJed Brown *toreqs = sendreq; 3626afbe7ddSJed Brown *fromreqs = recvreq; 363d815da10SJed Brown PetscFunctionReturn(0); 364d815da10SJed Brown } 365d815da10SJed Brown 366b5a01e9fSJed Brown #if defined(PETSC_HAVE_MPI_IBARRIER) || defined(PETSC_HAVE_MPIX_IBARRIER) 367afb254cdSJed Brown 368afb254cdSJed Brown #undef __FUNCT__ 3696afbe7ddSJed Brown #define __FUNCT__ "PetscCommBuildTwoSidedFReq_Ibarrier" 3706afbe7ddSJed Brown static PetscErrorCode PetscCommBuildTwoSidedFReq_Ibarrier(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata, 3716afbe7ddSJed Brown PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata,PetscMPIInt ntags,MPI_Request **toreqs,MPI_Request **fromreqs, 372afb254cdSJed Brown PetscErrorCode (*send)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,PetscMPIInt,void*,MPI_Request[],void*), 373afb254cdSJed Brown PetscErrorCode (*recv)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,void*,MPI_Request[],void*),void *ctx) 374afb254cdSJed Brown { 375afb254cdSJed Brown PetscErrorCode ierr; 376afb254cdSJed Brown PetscMPIInt nrecvs,tag,*tags,done,i; 377afb254cdSJed Brown MPI_Aint lb,unitbytes; 378afb254cdSJed Brown char *tdata; 379afb254cdSJed Brown MPI_Request *sendreqs,*usendreqs,*req,barrier; 380afb254cdSJed Brown PetscSegBuffer segrank,segdata,segreq; 38146e50bb6SJed Brown PetscBool barrier_started; 382afb254cdSJed Brown 383afb254cdSJed Brown PetscFunctionBegin; 384afb254cdSJed Brown ierr = PetscCommDuplicate(comm,&comm,&tag);CHKERRQ(ierr); 385afb254cdSJed Brown ierr = PetscMalloc1(ntags,&tags);CHKERRQ(ierr); 386afb254cdSJed Brown for (i=0; i<ntags; i++) { 387afb254cdSJed Brown ierr = PetscCommGetNewTag(comm,&tags[i]);CHKERRQ(ierr); 388afb254cdSJed Brown } 389afb254cdSJed Brown ierr = MPI_Type_get_extent(dtype,&lb,&unitbytes);CHKERRQ(ierr); 390afb254cdSJed Brown if (lb != 0) SETERRQ1(comm,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb); 391afb254cdSJed Brown tdata = (char*)todata; 3926afbe7ddSJed Brown ierr = PetscMalloc1(nto,&sendreqs);CHKERRQ(ierr); 3936afbe7ddSJed Brown ierr = PetscMalloc1(nto*ntags,&usendreqs);CHKERRQ(ierr); 394afb254cdSJed Brown /* Post synchronous sends */ 395afb254cdSJed Brown for (i=0; i<nto; i++) { 396afb254cdSJed Brown ierr = MPI_Issend((void*)(tdata+count*unitbytes*i),count,dtype,toranks[i],tag,comm,sendreqs+i);CHKERRQ(ierr); 397afb254cdSJed Brown } 398afb254cdSJed Brown /* Post actual payloads. These are typically larger messages. Hopefully sending these later does not slow down the 399afb254cdSJed Brown * synchronous messages above. */ 400afb254cdSJed Brown for (i=0; i<nto; i++) { 40189157a57SJed Brown PetscMPIInt k; 40289157a57SJed Brown for (k=0; k<ntags; k++) usendreqs[i*ntags+k] = MPI_REQUEST_NULL; 403afb254cdSJed Brown ierr = (*send)(comm,tags,i,toranks[i],tdata+count*unitbytes*i,usendreqs+i*ntags,ctx);CHKERRQ(ierr); 404afb254cdSJed Brown } 405afb254cdSJed Brown 406afb254cdSJed Brown ierr = PetscSegBufferCreate(sizeof(PetscMPIInt),4,&segrank);CHKERRQ(ierr); 407afb254cdSJed Brown ierr = PetscSegBufferCreate(unitbytes,4*count,&segdata);CHKERRQ(ierr); 408afb254cdSJed Brown ierr = PetscSegBufferCreate(sizeof(MPI_Request),4,&segreq);CHKERRQ(ierr); 409afb254cdSJed Brown 410afb254cdSJed Brown nrecvs = 0; 411afb254cdSJed Brown barrier = MPI_REQUEST_NULL; 41246e50bb6SJed Brown /* MPICH-3.2 sometimes does not create a request in some "optimized" cases. This is arguably a standard violation, 41346e50bb6SJed Brown * but we need to work around it. */ 41446e50bb6SJed Brown barrier_started = PETSC_FALSE; 415afb254cdSJed Brown for (done=0; !done; ) { 416afb254cdSJed Brown PetscMPIInt flag; 417afb254cdSJed Brown MPI_Status status; 418afb254cdSJed Brown ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag,comm,&flag,&status);CHKERRQ(ierr); 419afb254cdSJed Brown if (flag) { /* incoming message */ 42089157a57SJed Brown PetscMPIInt *recvrank,k; 421afb254cdSJed Brown void *buf; 422afb254cdSJed Brown ierr = PetscSegBufferGet(segrank,1,&recvrank);CHKERRQ(ierr); 423afb254cdSJed Brown ierr = PetscSegBufferGet(segdata,count,&buf);CHKERRQ(ierr); 424afb254cdSJed Brown *recvrank = status.MPI_SOURCE; 425afb254cdSJed Brown ierr = MPI_Recv(buf,count,dtype,status.MPI_SOURCE,tag,comm,MPI_STATUS_IGNORE);CHKERRQ(ierr); 426afb254cdSJed Brown ierr = PetscSegBufferGet(segreq,ntags,&req);CHKERRQ(ierr); 42789157a57SJed Brown for (k=0; k<ntags; k++) req[k] = MPI_REQUEST_NULL; 428afb254cdSJed Brown ierr = (*recv)(comm,tags,status.MPI_SOURCE,buf,req,ctx);CHKERRQ(ierr); 429afb254cdSJed Brown nrecvs++; 430afb254cdSJed Brown } 43146e50bb6SJed Brown if (!barrier_started) { 432afb254cdSJed Brown PetscMPIInt sent,nsends; 433afb254cdSJed Brown ierr = PetscMPIIntCast(nto,&nsends);CHKERRQ(ierr); 434afb254cdSJed Brown ierr = MPI_Testall(nsends,sendreqs,&sent,MPI_STATUSES_IGNORE);CHKERRQ(ierr); 435afb254cdSJed Brown if (sent) { 436b5a01e9fSJed Brown #if defined(PETSC_HAVE_MPI_IBARRIER) 437afb254cdSJed Brown ierr = MPI_Ibarrier(comm,&barrier);CHKERRQ(ierr); 438b5a01e9fSJed Brown #elif defined(PETSC_HAVE_MPIX_IBARRIER) 439b5a01e9fSJed Brown ierr = MPIX_Ibarrier(comm,&barrier);CHKERRQ(ierr); 440b5a01e9fSJed Brown #endif 44146e50bb6SJed Brown barrier_started = PETSC_TRUE; 442afb254cdSJed Brown } 443afb254cdSJed Brown } else { 444afb254cdSJed Brown ierr = MPI_Test(&barrier,&done,MPI_STATUS_IGNORE);CHKERRQ(ierr); 445afb254cdSJed Brown } 446afb254cdSJed Brown } 447afb254cdSJed Brown *nfrom = nrecvs; 448afb254cdSJed Brown ierr = PetscSegBufferExtractAlloc(segrank,fromranks);CHKERRQ(ierr); 449afb254cdSJed Brown ierr = PetscSegBufferDestroy(&segrank);CHKERRQ(ierr); 450afb254cdSJed Brown ierr = PetscSegBufferExtractAlloc(segdata,fromdata);CHKERRQ(ierr); 451afb254cdSJed Brown ierr = PetscSegBufferDestroy(&segdata);CHKERRQ(ierr); 4526afbe7ddSJed Brown *toreqs = usendreqs; 4536afbe7ddSJed Brown ierr = PetscSegBufferExtractAlloc(segreq,fromreqs);CHKERRQ(ierr); 454afb254cdSJed Brown ierr = PetscSegBufferDestroy(&segreq);CHKERRQ(ierr); 4556afbe7ddSJed Brown ierr = PetscFree(sendreqs);CHKERRQ(ierr); 456afb254cdSJed Brown ierr = PetscFree(tags);CHKERRQ(ierr); 457afb254cdSJed Brown ierr = PetscCommDestroy(&comm);CHKERRQ(ierr); 458afb254cdSJed Brown PetscFunctionReturn(0); 459afb254cdSJed Brown } 460afb254cdSJed Brown #endif 461afb254cdSJed Brown 462d815da10SJed Brown #undef __FUNCT__ 463d815da10SJed Brown #define __FUNCT__ "PetscCommBuildTwoSidedF" 464d815da10SJed Brown /*@C 465d815da10SJed Brown PetscCommBuildTwoSidedF - discovers communicating ranks given one-sided information, calling user-defined functions during rendezvous 466d815da10SJed Brown 467d815da10SJed Brown Collective on MPI_Comm 468d815da10SJed Brown 469d815da10SJed Brown Input Arguments: 470d815da10SJed Brown + comm - communicator 471d815da10SJed Brown . count - number of entries to send/receive in initial rendezvous (must match on all ranks) 472d815da10SJed Brown . dtype - datatype to send/receive from each rank (must match on all ranks) 473d815da10SJed Brown . nto - number of ranks to send data to 474d815da10SJed Brown . toranks - ranks to send to (array of length nto) 475d815da10SJed Brown . todata - data to send to each rank (packed) 476d815da10SJed Brown . ntags - number of tags needed by send/recv callbacks 477d815da10SJed Brown . send - callback invoked on sending process when ready to send primary payload 478d815da10SJed Brown . recv - callback invoked on receiving process after delivery of rendezvous message 479d815da10SJed Brown - ctx - context for callbacks 480d815da10SJed Brown 481d815da10SJed Brown Output Arguments: 482d815da10SJed Brown + nfrom - number of ranks receiving messages from 483d815da10SJed Brown . fromranks - ranks receiving messages from (length nfrom; caller should PetscFree()) 484d815da10SJed Brown - fromdata - packed data from each rank, each with count entries of type dtype (length nfrom, caller responsible for PetscFree()) 485d815da10SJed Brown 486d815da10SJed Brown Level: developer 487d815da10SJed Brown 488d815da10SJed Brown Notes: 489d815da10SJed Brown This memory-scalable interface is an alternative to calling PetscGatherNumberOfMessages() and 490d815da10SJed Brown PetscGatherMessageLengths(), possibly with a subsequent round of communication to send other data. 491d815da10SJed Brown 492d815da10SJed Brown Basic data types as well as contiguous types are supported, but non-contiguous (e.g., strided) types are not. 493d815da10SJed Brown 494d815da10SJed Brown References: 495*4f02bc6aSBarry Smith Hoefler, Siebert and Lumsdaine, The MPI_Ibarrier implementation uses the algorithm in 496*4f02bc6aSBarry Smith Scalable communication protocols for dynamic sparse data exchange, 2010. 497d815da10SJed Brown 4986afbe7ddSJed Brown .seealso: PetscCommBuildTwoSided(), PetscCommBuildTwoSidedFReq(), PetscGatherNumberOfMessages(), PetscGatherMessageLengths() 499d815da10SJed Brown @*/ 500d815da10SJed Brown PetscErrorCode PetscCommBuildTwoSidedF(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata,PetscMPIInt ntags, 501d815da10SJed Brown PetscErrorCode (*send)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,PetscMPIInt,void*,MPI_Request[],void*), 502d815da10SJed Brown PetscErrorCode (*recv)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,void*,MPI_Request[],void*),void *ctx) 503d815da10SJed Brown { 5046afbe7ddSJed Brown PetscErrorCode ierr; 5056afbe7ddSJed Brown MPI_Request *toreqs,*fromreqs; 5066afbe7ddSJed Brown 5076afbe7ddSJed Brown PetscFunctionBegin; 5086afbe7ddSJed Brown ierr = PetscCommBuildTwoSidedFReq(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata,ntags,&toreqs,&fromreqs,send,recv,ctx);CHKERRQ(ierr); 5096afbe7ddSJed Brown ierr = MPI_Waitall(nto*ntags,toreqs,MPI_STATUSES_IGNORE);CHKERRQ(ierr); 5106afbe7ddSJed Brown ierr = MPI_Waitall(*nfrom*ntags,fromreqs,MPI_STATUSES_IGNORE);CHKERRQ(ierr); 5116afbe7ddSJed Brown ierr = PetscFree(toreqs);CHKERRQ(ierr); 5126afbe7ddSJed Brown ierr = PetscFree(fromreqs);CHKERRQ(ierr); 5136afbe7ddSJed Brown PetscFunctionReturn(0); 5146afbe7ddSJed Brown } 5156afbe7ddSJed Brown 5166afbe7ddSJed Brown #undef __FUNCT__ 5176afbe7ddSJed Brown #define __FUNCT__ "PetscCommBuildTwoSidedFReq" 5186afbe7ddSJed Brown /*@C 5196afbe7ddSJed Brown PetscCommBuildTwoSidedFReq - discovers communicating ranks given one-sided information, calling user-defined functions during rendezvous, returns requests 5206afbe7ddSJed Brown 5216afbe7ddSJed Brown Collective on MPI_Comm 5226afbe7ddSJed Brown 5236afbe7ddSJed Brown Input Arguments: 5246afbe7ddSJed Brown + comm - communicator 5256afbe7ddSJed Brown . count - number of entries to send/receive in initial rendezvous (must match on all ranks) 5266afbe7ddSJed Brown . dtype - datatype to send/receive from each rank (must match on all ranks) 5276afbe7ddSJed Brown . nto - number of ranks to send data to 5286afbe7ddSJed Brown . toranks - ranks to send to (array of length nto) 5296afbe7ddSJed Brown . todata - data to send to each rank (packed) 5306afbe7ddSJed Brown . ntags - number of tags needed by send/recv callbacks 5316afbe7ddSJed Brown . send - callback invoked on sending process when ready to send primary payload 5326afbe7ddSJed Brown . recv - callback invoked on receiving process after delivery of rendezvous message 5336afbe7ddSJed Brown - ctx - context for callbacks 5346afbe7ddSJed Brown 5356afbe7ddSJed Brown Output Arguments: 5366afbe7ddSJed Brown + nfrom - number of ranks receiving messages from 5376afbe7ddSJed Brown . fromranks - ranks receiving messages from (length nfrom; caller should PetscFree()) 5386afbe7ddSJed Brown . fromdata - packed data from each rank, each with count entries of type dtype (length nfrom, caller responsible for PetscFree()) 5396afbe7ddSJed Brown . toreqs - array of nto*ntags sender requests (caller must wait on these, then PetscFree()) 5406afbe7ddSJed Brown - fromreqs - array of nfrom*ntags receiver requests (caller must wait on these, then PetscFree()) 5416afbe7ddSJed Brown 5426afbe7ddSJed Brown Level: developer 5436afbe7ddSJed Brown 5446afbe7ddSJed Brown Notes: 5456afbe7ddSJed Brown This memory-scalable interface is an alternative to calling PetscGatherNumberOfMessages() and 5466afbe7ddSJed Brown PetscGatherMessageLengths(), possibly with a subsequent round of communication to send other data. 5476afbe7ddSJed Brown 5486afbe7ddSJed Brown Basic data types as well as contiguous types are supported, but non-contiguous (e.g., strided) types are not. 5496afbe7ddSJed Brown 5506afbe7ddSJed Brown References: 551*4f02bc6aSBarry Smith Hoefler, Siebert and Lumsdaine, The MPI_Ibarrier implementation uses the algorithm in 552*4f02bc6aSBarry Smith Scalable communication protocols for dynamic sparse data exchange, 2010. 5536afbe7ddSJed Brown 5546afbe7ddSJed Brown .seealso: PetscCommBuildTwoSided(), PetscCommBuildTwoSidedF(), PetscGatherNumberOfMessages(), PetscGatherMessageLengths() 5556afbe7ddSJed Brown @*/ 5566afbe7ddSJed Brown PetscErrorCode PetscCommBuildTwoSidedFReq(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata, 5576afbe7ddSJed Brown PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata,PetscMPIInt ntags,MPI_Request **toreqs,MPI_Request **fromreqs, 5586afbe7ddSJed Brown PetscErrorCode (*send)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,PetscMPIInt,void*,MPI_Request[],void*), 5596afbe7ddSJed Brown PetscErrorCode (*recv)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,void*,MPI_Request[],void*),void *ctx) 5606afbe7ddSJed Brown { 5616afbe7ddSJed Brown PetscErrorCode ierr,(*f)(MPI_Comm,PetscMPIInt,MPI_Datatype,PetscMPIInt,const PetscMPIInt[],const void*, 5626afbe7ddSJed Brown PetscMPIInt*,PetscMPIInt**,void*,PetscMPIInt,MPI_Request**,MPI_Request**, 563d815da10SJed Brown PetscErrorCode (*send)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,PetscMPIInt,void*,MPI_Request[],void*), 564d815da10SJed Brown PetscErrorCode (*recv)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,void*,MPI_Request[],void*),void *ctx); 565d815da10SJed Brown PetscBuildTwoSidedType buildtype = PETSC_BUILDTWOSIDED_NOTSET; 5663461502dSJed Brown PetscMPIInt i,size; 567d815da10SJed Brown 568d815da10SJed Brown PetscFunctionBegin; 569d815da10SJed Brown ierr = PetscSysInitializePackage();CHKERRQ(ierr); 5703461502dSJed Brown ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 5713461502dSJed Brown for (i=0; i<nto; i++) { 5723461502dSJed Brown if (toranks[i] < 0 || size <= toranks[i]) SETERRQ3(comm,PETSC_ERR_ARG_OUTOFRANGE,"toranks[%d] %d not in comm size %d",i,toranks[i],size); 5733461502dSJed Brown } 57427a32ea5SJed Brown ierr = PetscLogEventBegin(PETSC_BuildTwoSidedF,0,0,0,0);CHKERRQ(ierr); 575d815da10SJed Brown ierr = PetscCommBuildTwoSidedGetType(comm,&buildtype);CHKERRQ(ierr); 576d815da10SJed Brown switch (buildtype) { 577d815da10SJed Brown case PETSC_BUILDTWOSIDED_IBARRIER: 578b5a01e9fSJed Brown #if defined(PETSC_HAVE_MPI_IBARRIER) || defined(PETSC_HAVE_MPIX_IBARRIER) 5796afbe7ddSJed Brown f = PetscCommBuildTwoSidedFReq_Ibarrier; 580afb254cdSJed Brown #else 581afb254cdSJed Brown SETERRQ(comm,PETSC_ERR_PLIB,"MPI implementation does not provide MPI_Ibarrier (part of MPI-3)"); 582afb254cdSJed Brown #endif 583afb254cdSJed Brown break; 584d815da10SJed Brown case PETSC_BUILDTWOSIDED_ALLREDUCE: 585d815da10SJed Brown case PETSC_BUILDTWOSIDED_REDSCATTER: 5866afbe7ddSJed Brown f = PetscCommBuildTwoSidedFReq_Reference; 587d815da10SJed Brown break; 588d815da10SJed Brown default: SETERRQ(comm,PETSC_ERR_PLIB,"Unknown method for building two-sided communication"); 589d815da10SJed Brown } 5906afbe7ddSJed Brown ierr = (*f)(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata,ntags,toreqs,fromreqs,send,recv,ctx);CHKERRQ(ierr); 59127a32ea5SJed Brown ierr = PetscLogEventEnd(PETSC_BuildTwoSidedF,0,0,0,0);CHKERRQ(ierr); 592d815da10SJed Brown PetscFunctionReturn(0); 593d815da10SJed Brown } 594