xref: /petsc/src/sys/utils/mpits.c (revision 6afbe7ddf4b0c8d21e60d8c450d907cb7de56747)
1f6ced4a3SJed Brown #include <petscsys.h>        /*I  "petscsys.h"  I*/
2f6ced4a3SJed Brown 
33b3561c8SJed Brown PetscLogEvent PETSC_BuildTwoSided;
43b3561c8SJed Brown 
56145cd65SJed Brown const char *const PetscBuildTwoSidedTypes[] = {
6f6ced4a3SJed Brown   "ALLREDUCE",
76145cd65SJed Brown   "IBARRIER",
81654bf6bSJed Brown   "REDSCATTER",
96145cd65SJed Brown   "PetscBuildTwoSidedType",
106145cd65SJed Brown   "PETSC_BUILDTWOSIDED_",
11f6ced4a3SJed Brown   0
12f6ced4a3SJed Brown };
13f6ced4a3SJed Brown 
146145cd65SJed Brown static PetscBuildTwoSidedType _twosided_type = PETSC_BUILDTWOSIDED_NOTSET;
15f6ced4a3SJed Brown 
16f6ced4a3SJed Brown #undef __FUNCT__
176145cd65SJed Brown #define __FUNCT__ "PetscCommBuildTwoSidedSetType"
186145cd65SJed Brown /*@
196145cd65SJed Brown    PetscCommBuildTwoSidedSetType - set algorithm to use when building two-sided communication
206145cd65SJed Brown 
216145cd65SJed Brown    Logically Collective
226145cd65SJed Brown 
236145cd65SJed Brown    Input Arguments:
246145cd65SJed Brown +  comm - PETSC_COMM_WORLD
256145cd65SJed Brown -  twosided - algorithm to use in subsequent calls to PetscCommBuildTwoSided()
266145cd65SJed Brown 
276145cd65SJed Brown    Level: developer
286145cd65SJed Brown 
296145cd65SJed Brown    Note:
306145cd65SJed Brown    This option is currently global, but could be made per-communicator.
316145cd65SJed Brown 
326145cd65SJed Brown .seealso: PetscCommBuildTwoSided(), PetscCommBuildTwoSidedGetType()
336145cd65SJed Brown @*/
346145cd65SJed Brown PetscErrorCode PetscCommBuildTwoSidedSetType(MPI_Comm comm,PetscBuildTwoSidedType twosided)
356145cd65SJed Brown {
366145cd65SJed Brown   PetscFunctionBegin;
376145cd65SJed Brown #if defined(PETSC_USE_DEBUG)
386145cd65SJed Brown   {                             /* We don't have a PetscObject so can't use PetscValidLogicalCollectiveEnum */
396145cd65SJed Brown     PetscMPIInt ierr;
406145cd65SJed Brown     PetscMPIInt b1[2],b2[2];
416145cd65SJed Brown     b1[0] = -(PetscMPIInt)twosided;
426145cd65SJed Brown     b1[1] = (PetscMPIInt)twosided;
436145cd65SJed Brown     ierr  = MPI_Allreduce(b1,b2,2,MPI_INT,MPI_MAX,comm);CHKERRQ(ierr);
446145cd65SJed Brown     if (-b2[0] != b2[1]) SETERRQ(comm,PETSC_ERR_ARG_WRONG,"Enum value must be same on all processes");
456145cd65SJed Brown   }
466145cd65SJed Brown #endif
476145cd65SJed Brown   _twosided_type = twosided;
486145cd65SJed Brown   PetscFunctionReturn(0);
496145cd65SJed Brown }
506145cd65SJed Brown 
516145cd65SJed Brown #undef __FUNCT__
526145cd65SJed Brown #define __FUNCT__ "PetscCommBuildTwoSidedGetType"
536145cd65SJed Brown /*@
546145cd65SJed Brown    PetscCommBuildTwoSidedGetType - set algorithm to use when building two-sided communication
556145cd65SJed Brown 
566145cd65SJed Brown    Logically Collective
576145cd65SJed Brown 
586145cd65SJed Brown    Output Arguments:
596145cd65SJed Brown +  comm - communicator on which to query algorithm
606145cd65SJed Brown -  twosided - algorithm to use for PetscCommBuildTwoSided()
616145cd65SJed Brown 
626145cd65SJed Brown    Level: developer
636145cd65SJed Brown 
646145cd65SJed Brown .seealso: PetscCommBuildTwoSided(), PetscCommBuildTwoSidedSetType()
656145cd65SJed Brown @*/
666145cd65SJed Brown PetscErrorCode PetscCommBuildTwoSidedGetType(MPI_Comm comm,PetscBuildTwoSidedType *twosided)
67f6ced4a3SJed Brown {
68f6ced4a3SJed Brown   PetscErrorCode ierr;
69f6ced4a3SJed Brown 
70f6ced4a3SJed Brown   PetscFunctionBegin;
716145cd65SJed Brown   *twosided = PETSC_BUILDTWOSIDED_NOTSET;
726145cd65SJed Brown   if (_twosided_type == PETSC_BUILDTWOSIDED_NOTSET) {
73f6ced4a3SJed Brown #if defined(PETSC_HAVE_MPI_IBARRIER)
746145cd65SJed Brown #  if defined(PETSC_HAVE_MPICH_CH3_SOCK) && !defined(PETSC_HAVE_MPICH_CH3_SOCK_FIXED_NBC_PROGRESS)
756145cd65SJed Brown     /* Deadlock in Ibarrier: http://trac.mpich.org/projects/mpich/ticket/1785 */
766145cd65SJed Brown     _twosided_type = PETSC_BUILDTWOSIDED_ALLREDUCE;
77f6ced4a3SJed Brown #  else
786145cd65SJed Brown     _twosided_type = PETSC_BUILDTWOSIDED_IBARRIER;
79f6ced4a3SJed Brown #  endif
806145cd65SJed Brown #else
816145cd65SJed Brown     _twosided_type = PETSC_BUILDTWOSIDED_ALLREDUCE;
826145cd65SJed Brown #endif
830298fd71SBarry Smith     ierr = PetscOptionsGetEnum(NULL,"-build_twosided",PetscBuildTwoSidedTypes,(PetscEnum*)&_twosided_type,NULL);CHKERRQ(ierr);
84f6ced4a3SJed Brown   }
85f6ced4a3SJed Brown   *twosided = _twosided_type;
86f6ced4a3SJed Brown   PetscFunctionReturn(0);
87f6ced4a3SJed Brown }
88f6ced4a3SJed Brown 
89f6ced4a3SJed Brown #if defined(PETSC_HAVE_MPI_IBARRIER)
90f6ced4a3SJed Brown 
91f6ced4a3SJed Brown #undef __FUNCT__
926145cd65SJed Brown #define __FUNCT__ "PetscCommBuildTwoSided_Ibarrier"
93cf4b5b4fSJed Brown static PetscErrorCode PetscCommBuildTwoSided_Ibarrier(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata)
94f6ced4a3SJed Brown {
95f6ced4a3SJed Brown   PetscErrorCode ierr;
96c3a59b84SJed Brown   PetscMPIInt    nrecvs,tag,done,i;
97c3a59b84SJed Brown   MPI_Aint       lb,unitbytes;
98f6ced4a3SJed Brown   char           *tdata;
99f6ced4a3SJed Brown   MPI_Request    *sendreqs,barrier;
1000f453b92SJed Brown   PetscSegBuffer segrank,segdata;
101f6ced4a3SJed Brown 
102f6ced4a3SJed Brown   PetscFunctionBegin;
103a502f807SJed Brown   ierr = PetscCommDuplicate(comm,&comm,&tag);CHKERRQ(ierr);
104c3a59b84SJed Brown   ierr = MPI_Type_get_extent(dtype,&lb,&unitbytes);CHKERRQ(ierr);
105c3a59b84SJed Brown   if (lb != 0) SETERRQ1(comm,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
106f6ced4a3SJed Brown   tdata = (char*)todata;
107785e854fSJed Brown   ierr  = PetscMalloc1(nto,&sendreqs);CHKERRQ(ierr);
108f6ced4a3SJed Brown   for (i=0; i<nto; i++) {
109f6ced4a3SJed Brown     ierr = MPI_Issend((void*)(tdata+count*unitbytes*i),count,dtype,toranks[i],tag,comm,sendreqs+i);CHKERRQ(ierr);
110f6ced4a3SJed Brown   }
1110f453b92SJed Brown   ierr = PetscSegBufferCreate(sizeof(PetscMPIInt),4,&segrank);CHKERRQ(ierr);
1120f453b92SJed Brown   ierr = PetscSegBufferCreate(unitbytes,4*count,&segdata);CHKERRQ(ierr);
113f6ced4a3SJed Brown 
114f6ced4a3SJed Brown   nrecvs  = 0;
115f6ced4a3SJed Brown   barrier = MPI_REQUEST_NULL;
116f6ced4a3SJed Brown   for (done=0; !done; ) {
117f6ced4a3SJed Brown     PetscMPIInt flag;
118f6ced4a3SJed Brown     MPI_Status  status;
119f6ced4a3SJed Brown     ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag,comm,&flag,&status);CHKERRQ(ierr);
120f6ced4a3SJed Brown     if (flag) {                 /* incoming message */
121f6ced4a3SJed Brown       PetscMPIInt *recvrank;
122f6ced4a3SJed Brown       void        *buf;
123137cf7b6SJed Brown       ierr      = PetscSegBufferGet(segrank,1,&recvrank);CHKERRQ(ierr);
124137cf7b6SJed Brown       ierr      = PetscSegBufferGet(segdata,count,&buf);CHKERRQ(ierr);
125f6ced4a3SJed Brown       *recvrank = status.MPI_SOURCE;
126f6ced4a3SJed Brown       ierr      = MPI_Recv(buf,count,dtype,status.MPI_SOURCE,tag,comm,MPI_STATUS_IGNORE);CHKERRQ(ierr);
127f6ced4a3SJed Brown       nrecvs++;
128f6ced4a3SJed Brown     }
129f6ced4a3SJed Brown     if (barrier == MPI_REQUEST_NULL) {
1304dc2109aSBarry Smith       PetscMPIInt sent,nsends;
1314dc2109aSBarry Smith       ierr = PetscMPIIntCast(nto,&nsends);CHKERRQ(ierr);
132f6ced4a3SJed Brown       ierr = MPI_Testall(nsends,sendreqs,&sent,MPI_STATUSES_IGNORE);CHKERRQ(ierr);
133f6ced4a3SJed Brown       if (sent) {
134f6ced4a3SJed Brown         ierr = MPI_Ibarrier(comm,&barrier);CHKERRQ(ierr);
135f6ced4a3SJed Brown         ierr = PetscFree(sendreqs);CHKERRQ(ierr);
136f6ced4a3SJed Brown       }
137f6ced4a3SJed Brown     } else {
138f6ced4a3SJed Brown       ierr = MPI_Test(&barrier,&done,MPI_STATUS_IGNORE);CHKERRQ(ierr);
139f6ced4a3SJed Brown     }
140f6ced4a3SJed Brown   }
141f6ced4a3SJed Brown   *nfrom = nrecvs;
142137cf7b6SJed Brown   ierr   = PetscSegBufferExtractAlloc(segrank,fromranks);CHKERRQ(ierr);
1430f453b92SJed Brown   ierr   = PetscSegBufferDestroy(&segrank);CHKERRQ(ierr);
144137cf7b6SJed Brown   ierr   = PetscSegBufferExtractAlloc(segdata,fromdata);CHKERRQ(ierr);
1450f453b92SJed Brown   ierr   = PetscSegBufferDestroy(&segdata);CHKERRQ(ierr);
146a502f807SJed Brown   ierr   = PetscCommDestroy(&comm);CHKERRQ(ierr);
147f6ced4a3SJed Brown   PetscFunctionReturn(0);
148f6ced4a3SJed Brown }
149f6ced4a3SJed Brown #endif
150f6ced4a3SJed Brown 
151f6ced4a3SJed Brown #undef __FUNCT__
1526145cd65SJed Brown #define __FUNCT__ "PetscCommBuildTwoSided_Allreduce"
153cf4b5b4fSJed Brown static PetscErrorCode PetscCommBuildTwoSided_Allreduce(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata)
154f6ced4a3SJed Brown {
155f6ced4a3SJed Brown   PetscErrorCode ierr;
156c3a59b84SJed Brown   PetscMPIInt    size,*iflags,nrecvs,tag,*franks,i;
157c3a59b84SJed Brown   MPI_Aint       lb,unitbytes;
158f6ced4a3SJed Brown   char           *tdata,*fdata;
159f6ced4a3SJed Brown   MPI_Request    *reqs,*sendreqs;
160f6ced4a3SJed Brown   MPI_Status     *statuses;
161f6ced4a3SJed Brown 
162f6ced4a3SJed Brown   PetscFunctionBegin;
163f6ced4a3SJed Brown   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1641795a4d1SJed Brown   ierr = PetscCalloc1(size,&iflags);CHKERRQ(ierr);
165f6ced4a3SJed Brown   for (i=0; i<nto; i++) iflags[toranks[i]] = 1;
1660298fd71SBarry Smith   ierr = PetscGatherNumberOfMessages(comm,iflags,NULL,&nrecvs);CHKERRQ(ierr);
167f6ced4a3SJed Brown   ierr = PetscFree(iflags);CHKERRQ(ierr);
168f6ced4a3SJed Brown 
169a502f807SJed Brown   ierr     = PetscCommDuplicate(comm,&comm,&tag);CHKERRQ(ierr);
170c3a59b84SJed Brown   ierr     = MPI_Type_get_extent(dtype,&lb,&unitbytes);CHKERRQ(ierr);
171c3a59b84SJed Brown   if (lb != 0) SETERRQ1(comm,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
172f6ced4a3SJed Brown   ierr     = PetscMalloc(nrecvs*count*unitbytes,&fdata);CHKERRQ(ierr);
173f6ced4a3SJed Brown   tdata    = (char*)todata;
174dcca6d9dSJed Brown   ierr     = PetscMalloc2(nto+nrecvs,&reqs,nto+nrecvs,&statuses);CHKERRQ(ierr);
17506926cafSJed Brown   sendreqs = reqs + nrecvs;
176f6ced4a3SJed Brown   for (i=0; i<nrecvs; i++) {
177f6ced4a3SJed Brown     ierr = MPI_Irecv((void*)(fdata+count*unitbytes*i),count,dtype,MPI_ANY_SOURCE,tag,comm,reqs+i);CHKERRQ(ierr);
178f6ced4a3SJed Brown   }
179f6ced4a3SJed Brown   for (i=0; i<nto; i++) {
180f6ced4a3SJed Brown     ierr = MPI_Isend((void*)(tdata+count*unitbytes*i),count,dtype,toranks[i],tag,comm,sendreqs+i);CHKERRQ(ierr);
181f6ced4a3SJed Brown   }
182f6ced4a3SJed Brown   ierr = MPI_Waitall(nto+nrecvs,reqs,statuses);CHKERRQ(ierr);
183785e854fSJed Brown   ierr = PetscMalloc1(nrecvs,&franks);CHKERRQ(ierr);
184a297a907SKarl Rupp   for (i=0; i<nrecvs; i++) franks[i] = statuses[i].MPI_SOURCE;
18506926cafSJed Brown   ierr = PetscFree2(reqs,statuses);CHKERRQ(ierr);
186a502f807SJed Brown   ierr = PetscCommDestroy(&comm);CHKERRQ(ierr);
187f6ced4a3SJed Brown 
188f6ced4a3SJed Brown   *nfrom            = nrecvs;
189f6ced4a3SJed Brown   *fromranks        = franks;
190f6ced4a3SJed Brown   *(void**)fromdata = fdata;
191f6ced4a3SJed Brown   PetscFunctionReturn(0);
192f6ced4a3SJed Brown }
193f6ced4a3SJed Brown 
1941654bf6bSJed Brown #if defined(PETSC_HAVE_MPI_REDUCE_SCATTER_BLOCK)
1951654bf6bSJed Brown #undef __FUNCT__
1961654bf6bSJed Brown #define __FUNCT__ "PetscCommBuildTwoSided_RedScatter"
197cf4b5b4fSJed Brown static PetscErrorCode PetscCommBuildTwoSided_RedScatter(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata)
1981654bf6bSJed Brown {
1991654bf6bSJed Brown   PetscErrorCode ierr;
200c3a59b84SJed Brown   PetscMPIInt    size,*iflags,nrecvs,tag,*franks,i;
201c3a59b84SJed Brown   MPI_Aint       lb,unitbytes;
2021654bf6bSJed Brown   char           *tdata,*fdata;
2031654bf6bSJed Brown   MPI_Request    *reqs,*sendreqs;
2041654bf6bSJed Brown   MPI_Status     *statuses;
2051654bf6bSJed Brown 
2061654bf6bSJed Brown   PetscFunctionBegin;
2071654bf6bSJed Brown   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
2081654bf6bSJed Brown   ierr = PetscMalloc1(size,&iflags);CHKERRQ(ierr);
2091654bf6bSJed Brown   ierr = PetscMemzero(iflags,size*sizeof(*iflags));CHKERRQ(ierr);
2101654bf6bSJed Brown   for (i=0; i<nto; i++) iflags[toranks[i]] = 1;
2111654bf6bSJed Brown   ierr = MPI_Reduce_scatter_block(iflags,&nrecvs,1,MPI_INT,MPI_SUM,comm);CHKERRQ(ierr);
2121654bf6bSJed Brown   ierr = PetscFree(iflags);CHKERRQ(ierr);
2131654bf6bSJed Brown 
214a502f807SJed Brown   ierr     = PetscCommDuplicate(comm,&comm,&tag);CHKERRQ(ierr);
215c3a59b84SJed Brown   ierr     = MPI_Type_get_extent(dtype,&lb,&unitbytes);CHKERRQ(ierr);
216c3a59b84SJed Brown   if (lb != 0) SETERRQ1(comm,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
2171654bf6bSJed Brown   ierr     = PetscMalloc(nrecvs*count*unitbytes,&fdata);CHKERRQ(ierr);
2181654bf6bSJed Brown   tdata    = (char*)todata;
2191654bf6bSJed Brown   ierr     = PetscMalloc2(nto+nrecvs,&reqs,nto+nrecvs,&statuses);CHKERRQ(ierr);
2201654bf6bSJed Brown   sendreqs = reqs + nrecvs;
2211654bf6bSJed Brown   for (i=0; i<nrecvs; i++) {
2221654bf6bSJed Brown     ierr = MPI_Irecv((void*)(fdata+count*unitbytes*i),count,dtype,MPI_ANY_SOURCE,tag,comm,reqs+i);CHKERRQ(ierr);
2231654bf6bSJed Brown   }
2241654bf6bSJed Brown   for (i=0; i<nto; i++) {
2251654bf6bSJed Brown     ierr = MPI_Isend((void*)(tdata+count*unitbytes*i),count,dtype,toranks[i],tag,comm,sendreqs+i);CHKERRQ(ierr);
2261654bf6bSJed Brown   }
2271654bf6bSJed Brown   ierr = MPI_Waitall(nto+nrecvs,reqs,statuses);CHKERRQ(ierr);
2281654bf6bSJed Brown   ierr = PetscMalloc1(nrecvs,&franks);CHKERRQ(ierr);
2291654bf6bSJed Brown   for (i=0; i<nrecvs; i++) franks[i] = statuses[i].MPI_SOURCE;
2301654bf6bSJed Brown   ierr = PetscFree2(reqs,statuses);CHKERRQ(ierr);
231a502f807SJed Brown   ierr = PetscCommDestroy(&comm);CHKERRQ(ierr);
2321654bf6bSJed Brown 
2331654bf6bSJed Brown   *nfrom            = nrecvs;
2341654bf6bSJed Brown   *fromranks        = franks;
2351654bf6bSJed Brown   *(void**)fromdata = fdata;
2361654bf6bSJed Brown   PetscFunctionReturn(0);
2371654bf6bSJed Brown }
2381654bf6bSJed Brown #endif
2391654bf6bSJed Brown 
240f6ced4a3SJed Brown #undef __FUNCT__
241f6ced4a3SJed Brown #define __FUNCT__ "PetscCommBuildTwoSided"
242f6ced4a3SJed Brown /*@C
243f6ced4a3SJed Brown    PetscCommBuildTwoSided - discovers communicating ranks given one-sided information, moving constant-sized data in the process (often message lengths)
244f6ced4a3SJed Brown 
245f6ced4a3SJed Brown    Collective on MPI_Comm
246f6ced4a3SJed Brown 
247f6ced4a3SJed Brown    Input Arguments:
248f6ced4a3SJed Brown +  comm - communicator
249f6ced4a3SJed Brown .  count - number of entries to send/receive (must match on all ranks)
250f6ced4a3SJed Brown .  dtype - datatype to send/receive from each rank (must match on all ranks)
251f6ced4a3SJed Brown .  nto - number of ranks to send data to
252f6ced4a3SJed Brown .  toranks - ranks to send to (array of length nto)
253f6ced4a3SJed Brown -  todata - data to send to each rank (packed)
254f6ced4a3SJed Brown 
255f6ced4a3SJed Brown    Output Arguments:
256f6ced4a3SJed Brown +  nfrom - number of ranks receiving messages from
257f6ced4a3SJed Brown .  fromranks - ranks receiving messages from (length nfrom; caller should PetscFree())
258f6ced4a3SJed Brown -  fromdata - packed data from each rank, each with count entries of type dtype (length nfrom, caller responsible for PetscFree())
259f6ced4a3SJed Brown 
260f6ced4a3SJed Brown    Level: developer
261f6ced4a3SJed Brown 
2621654bf6bSJed Brown    Options Database Keys:
2631654bf6bSJed Brown .  -build_twosided <allreduce|ibarrier|redscatter> - algorithm to set up two-sided communication
2641654bf6bSJed Brown 
265f6ced4a3SJed Brown    Notes:
266f6ced4a3SJed Brown    This memory-scalable interface is an alternative to calling PetscGatherNumberOfMessages() and
267f6ced4a3SJed Brown    PetscGatherMessageLengths(), possibly with a subsequent round of communication to send other constant-size data.
268f6ced4a3SJed Brown 
269f6ced4a3SJed Brown    Basic data types as well as contiguous types are supported, but non-contiguous (e.g., strided) types are not.
270f6ced4a3SJed Brown 
271f6ced4a3SJed Brown    References:
272f6ced4a3SJed Brown    The MPI_Ibarrier implementation uses the algorithm in
273f6ced4a3SJed Brown    Hoefler, Siebert and Lumsdaine, Scalable communication protocols for dynamic sparse data exchange, 2010.
274f6ced4a3SJed Brown 
275f6ced4a3SJed Brown .seealso: PetscGatherNumberOfMessages(), PetscGatherMessageLengths()
276f6ced4a3SJed Brown @*/
277cf4b5b4fSJed Brown PetscErrorCode PetscCommBuildTwoSided(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata)
278f6ced4a3SJed Brown {
279f6ced4a3SJed Brown   PetscErrorCode         ierr;
2806145cd65SJed Brown   PetscBuildTwoSidedType buildtype = PETSC_BUILDTWOSIDED_NOTSET;
281f6ced4a3SJed Brown 
282f6ced4a3SJed Brown   PetscFunctionBegin;
2833b3561c8SJed Brown   ierr = PetscSysInitializePackage();CHKERRQ(ierr);
2843b3561c8SJed Brown   ierr = PetscLogEventBegin(PETSC_BuildTwoSided,0,0,0,0);CHKERRQ(ierr);
2856145cd65SJed Brown   ierr = PetscCommBuildTwoSidedGetType(comm,&buildtype);CHKERRQ(ierr);
286f6ced4a3SJed Brown   switch (buildtype) {
2876145cd65SJed Brown   case PETSC_BUILDTWOSIDED_IBARRIER:
288f6ced4a3SJed Brown #if defined(PETSC_HAVE_MPI_IBARRIER)
2896145cd65SJed Brown     ierr = PetscCommBuildTwoSided_Ibarrier(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata);CHKERRQ(ierr);
2906145cd65SJed Brown #else
2916145cd65SJed Brown     SETERRQ(comm,PETSC_ERR_PLIB,"MPI implementation does not provide MPI_Ibarrier (part of MPI-3)");
292f6ced4a3SJed Brown #endif
2936145cd65SJed Brown     break;
2946145cd65SJed Brown   case PETSC_BUILDTWOSIDED_ALLREDUCE:
2956145cd65SJed Brown     ierr = PetscCommBuildTwoSided_Allreduce(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata);CHKERRQ(ierr);
296f6ced4a3SJed Brown     break;
2971654bf6bSJed Brown   case PETSC_BUILDTWOSIDED_REDSCATTER:
2981654bf6bSJed Brown #if defined(PETSC_HAVE_MPI_REDUCE_SCATTER_BLOCK)
2991654bf6bSJed Brown     ierr = PetscCommBuildTwoSided_RedScatter(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata);CHKERRQ(ierr);
3001654bf6bSJed Brown #else
3011654bf6bSJed Brown     SETERRQ(comm,PETSC_ERR_PLIB,"MPI implementation does not provide MPI_Reduce_scatter_block (part of MPI-2.2)");
3021654bf6bSJed Brown #endif
3031654bf6bSJed Brown     break;
304f6ced4a3SJed Brown   default: SETERRQ(comm,PETSC_ERR_PLIB,"Unknown method for building two-sided communication");
305f6ced4a3SJed Brown   }
3063b3561c8SJed Brown   ierr = PetscLogEventEnd(PETSC_BuildTwoSided,0,0,0,0);CHKERRQ(ierr);
307f6ced4a3SJed Brown   PetscFunctionReturn(0);
308f6ced4a3SJed Brown }
309d815da10SJed Brown 
310d815da10SJed Brown #undef __FUNCT__
311*6afbe7ddSJed Brown #define __FUNCT__ "PetscCommBuildTwoSidedFReq_Reference"
312*6afbe7ddSJed Brown static PetscErrorCode PetscCommBuildTwoSidedFReq_Reference(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,
313*6afbe7ddSJed Brown                                                            PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata,PetscMPIInt ntags,MPI_Request **toreqs,MPI_Request **fromreqs,
314d815da10SJed Brown                                                            PetscErrorCode (*send)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,PetscMPIInt,void*,MPI_Request[],void*),
315d815da10SJed Brown                                                            PetscErrorCode (*recv)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,void*,MPI_Request[],void*),void *ctx)
316d815da10SJed Brown {
317d815da10SJed Brown   PetscErrorCode ierr;
318d815da10SJed Brown   PetscMPIInt i,*tag;
319d815da10SJed Brown   MPI_Aint    lb,unitbytes;
320d815da10SJed Brown   MPI_Request *sendreq,*recvreq;
321d815da10SJed Brown 
322d815da10SJed Brown   PetscFunctionBegin;
323d815da10SJed Brown   ierr = PetscMalloc1(ntags,&tag);CHKERRQ(ierr);
324d815da10SJed Brown   if (ntags > 0) {
325d815da10SJed Brown     ierr = PetscCommDuplicate(comm,&comm,&tag[0]);CHKERRQ(ierr);
326d815da10SJed Brown   }
327d815da10SJed Brown   for (i=1; i<ntags; i++) {
328d815da10SJed Brown     ierr = PetscCommGetNewTag(comm,&tag[i]);CHKERRQ(ierr);
329d815da10SJed Brown   }
330d815da10SJed Brown 
331d815da10SJed Brown   /* Perform complete initial rendezvous */
332d815da10SJed Brown   ierr = PetscCommBuildTwoSided(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata);CHKERRQ(ierr);
333d815da10SJed Brown 
334*6afbe7ddSJed Brown   ierr = PetscMalloc1(nto*ntags,&sendreq);CHKERRQ(ierr);
335*6afbe7ddSJed Brown   ierr = PetscMalloc1(*nfrom*ntags,&recvreq);CHKERRQ(ierr);
336d815da10SJed Brown 
337d815da10SJed Brown   ierr = MPI_Type_get_extent(dtype,&lb,&unitbytes);CHKERRQ(ierr);
338d815da10SJed Brown   if (lb != 0) SETERRQ1(comm,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
339d815da10SJed Brown   for (i=0; i<nto; i++) {
340d815da10SJed Brown     ierr = (*send)(comm,tag,i,toranks[i],((char*)todata)+count*unitbytes*i,sendreq+i*ntags,ctx);CHKERRQ(ierr);
341d815da10SJed Brown   }
342d815da10SJed Brown   for (i=0; i<*nfrom; i++) {
343d815da10SJed Brown     void *header = (*(char**)fromdata) + count*unitbytes*i;
344d815da10SJed Brown     ierr = (*recv)(comm,tag,(*fromranks)[i],header,recvreq+i*ntags,ctx);CHKERRQ(ierr);
345d815da10SJed Brown   }
346d815da10SJed Brown   ierr = PetscFree(tag);CHKERRQ(ierr);
347d815da10SJed Brown   ierr = PetscCommDestroy(&comm);CHKERRQ(ierr);
348*6afbe7ddSJed Brown   *toreqs = sendreq;
349*6afbe7ddSJed Brown   *fromreqs = recvreq;
350d815da10SJed Brown   PetscFunctionReturn(0);
351d815da10SJed Brown }
352d815da10SJed Brown 
353afb254cdSJed Brown #if defined(PETSC_HAVE_MPI_IBARRIER)
354afb254cdSJed Brown 
355afb254cdSJed Brown #undef __FUNCT__
356*6afbe7ddSJed Brown #define __FUNCT__ "PetscCommBuildTwoSidedFReq_Ibarrier"
357*6afbe7ddSJed Brown static PetscErrorCode PetscCommBuildTwoSidedFReq_Ibarrier(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,
358*6afbe7ddSJed Brown                                                           PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata,PetscMPIInt ntags,MPI_Request **toreqs,MPI_Request **fromreqs,
359afb254cdSJed Brown                                                           PetscErrorCode (*send)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,PetscMPIInt,void*,MPI_Request[],void*),
360afb254cdSJed Brown                                                           PetscErrorCode (*recv)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,void*,MPI_Request[],void*),void *ctx)
361afb254cdSJed Brown {
362afb254cdSJed Brown   PetscErrorCode ierr;
363afb254cdSJed Brown   PetscMPIInt    nrecvs,tag,*tags,done,i;
364afb254cdSJed Brown   MPI_Aint       lb,unitbytes;
365afb254cdSJed Brown   char           *tdata;
366afb254cdSJed Brown   MPI_Request    *sendreqs,*usendreqs,*req,barrier;
367afb254cdSJed Brown   PetscSegBuffer segrank,segdata,segreq;
368afb254cdSJed Brown 
369afb254cdSJed Brown   PetscFunctionBegin;
370afb254cdSJed Brown   ierr = PetscCommDuplicate(comm,&comm,&tag);CHKERRQ(ierr);
371afb254cdSJed Brown   ierr = PetscMalloc1(ntags,&tags);CHKERRQ(ierr);
372afb254cdSJed Brown   for (i=0; i<ntags; i++) {
373afb254cdSJed Brown     ierr = PetscCommGetNewTag(comm,&tags[i]);CHKERRQ(ierr);
374afb254cdSJed Brown   }
375afb254cdSJed Brown   ierr = MPI_Type_get_extent(dtype,&lb,&unitbytes);CHKERRQ(ierr);
376afb254cdSJed Brown   if (lb != 0) SETERRQ1(comm,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
377afb254cdSJed Brown   tdata = (char*)todata;
378*6afbe7ddSJed Brown   ierr = PetscMalloc1(nto,&sendreqs);CHKERRQ(ierr);
379*6afbe7ddSJed Brown   ierr = PetscMalloc1(nto*ntags,&usendreqs);CHKERRQ(ierr);
380afb254cdSJed Brown   /* Post synchronous sends */
381afb254cdSJed Brown   for (i=0; i<nto; i++) {
382afb254cdSJed Brown     ierr = MPI_Issend((void*)(tdata+count*unitbytes*i),count,dtype,toranks[i],tag,comm,sendreqs+i);CHKERRQ(ierr);
383afb254cdSJed Brown   }
384afb254cdSJed Brown   /* Post actual payloads.  These are typically larger messages.  Hopefully sending these later does not slow down the
385afb254cdSJed Brown    * synchronous messages above. */
386afb254cdSJed Brown   for (i=0; i<nto; i++) {
387afb254cdSJed Brown     ierr = (*send)(comm,tags,i,toranks[i],tdata+count*unitbytes*i,usendreqs+i*ntags,ctx);CHKERRQ(ierr);
388afb254cdSJed Brown   }
389afb254cdSJed Brown 
390afb254cdSJed Brown   ierr = PetscSegBufferCreate(sizeof(PetscMPIInt),4,&segrank);CHKERRQ(ierr);
391afb254cdSJed Brown   ierr = PetscSegBufferCreate(unitbytes,4*count,&segdata);CHKERRQ(ierr);
392afb254cdSJed Brown   ierr = PetscSegBufferCreate(sizeof(MPI_Request),4,&segreq);CHKERRQ(ierr);
393afb254cdSJed Brown 
394afb254cdSJed Brown   nrecvs  = 0;
395afb254cdSJed Brown   barrier = MPI_REQUEST_NULL;
396afb254cdSJed Brown   for (done=0; !done; ) {
397afb254cdSJed Brown     PetscMPIInt flag;
398afb254cdSJed Brown     MPI_Status  status;
399afb254cdSJed Brown     ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag,comm,&flag,&status);CHKERRQ(ierr);
400afb254cdSJed Brown     if (flag) {                 /* incoming message */
401afb254cdSJed Brown       PetscMPIInt *recvrank;
402afb254cdSJed Brown       void        *buf;
403afb254cdSJed Brown       ierr = PetscSegBufferGet(segrank,1,&recvrank);CHKERRQ(ierr);
404afb254cdSJed Brown       ierr = PetscSegBufferGet(segdata,count,&buf);CHKERRQ(ierr);
405afb254cdSJed Brown       *recvrank = status.MPI_SOURCE;
406afb254cdSJed Brown       ierr = MPI_Recv(buf,count,dtype,status.MPI_SOURCE,tag,comm,MPI_STATUS_IGNORE);CHKERRQ(ierr);
407afb254cdSJed Brown       ierr = PetscSegBufferGet(segreq,ntags,&req);CHKERRQ(ierr);
408afb254cdSJed Brown       ierr = (*recv)(comm,tags,status.MPI_SOURCE,buf,req,ctx);CHKERRQ(ierr);
409afb254cdSJed Brown       nrecvs++;
410afb254cdSJed Brown     }
411afb254cdSJed Brown     if (barrier == MPI_REQUEST_NULL) {
412afb254cdSJed Brown       PetscMPIInt sent,nsends;
413afb254cdSJed Brown       ierr = PetscMPIIntCast(nto,&nsends);CHKERRQ(ierr);
414afb254cdSJed Brown       ierr = MPI_Testall(nsends,sendreqs,&sent,MPI_STATUSES_IGNORE);CHKERRQ(ierr);
415afb254cdSJed Brown       if (sent) {
416afb254cdSJed Brown         ierr = MPI_Ibarrier(comm,&barrier);CHKERRQ(ierr);
417afb254cdSJed Brown       }
418afb254cdSJed Brown     } else {
419afb254cdSJed Brown       ierr = MPI_Test(&barrier,&done,MPI_STATUS_IGNORE);CHKERRQ(ierr);
420afb254cdSJed Brown     }
421afb254cdSJed Brown   }
422afb254cdSJed Brown   *nfrom = nrecvs;
423afb254cdSJed Brown   ierr = PetscSegBufferExtractAlloc(segrank,fromranks);CHKERRQ(ierr);
424afb254cdSJed Brown   ierr = PetscSegBufferDestroy(&segrank);CHKERRQ(ierr);
425afb254cdSJed Brown   ierr = PetscSegBufferExtractAlloc(segdata,fromdata);CHKERRQ(ierr);
426afb254cdSJed Brown   ierr = PetscSegBufferDestroy(&segdata);CHKERRQ(ierr);
427*6afbe7ddSJed Brown   *toreqs = usendreqs;
428*6afbe7ddSJed Brown   ierr = PetscSegBufferExtractAlloc(segreq,fromreqs);CHKERRQ(ierr);
429afb254cdSJed Brown   ierr = PetscSegBufferDestroy(&segreq);CHKERRQ(ierr);
430*6afbe7ddSJed Brown   ierr = PetscFree(sendreqs);CHKERRQ(ierr);
431afb254cdSJed Brown   ierr = PetscFree(tags);CHKERRQ(ierr);
432afb254cdSJed Brown   ierr = PetscCommDestroy(&comm);CHKERRQ(ierr);
433afb254cdSJed Brown   PetscFunctionReturn(0);
434afb254cdSJed Brown }
435afb254cdSJed Brown #endif
436afb254cdSJed Brown 
437d815da10SJed Brown #undef __FUNCT__
438d815da10SJed Brown #define __FUNCT__ "PetscCommBuildTwoSidedF"
439d815da10SJed Brown /*@C
440d815da10SJed Brown    PetscCommBuildTwoSidedF - discovers communicating ranks given one-sided information, calling user-defined functions during rendezvous
441d815da10SJed Brown 
442d815da10SJed Brown    Collective on MPI_Comm
443d815da10SJed Brown 
444d815da10SJed Brown    Input Arguments:
445d815da10SJed Brown +  comm - communicator
446d815da10SJed Brown .  count - number of entries to send/receive in initial rendezvous (must match on all ranks)
447d815da10SJed Brown .  dtype - datatype to send/receive from each rank (must match on all ranks)
448d815da10SJed Brown .  nto - number of ranks to send data to
449d815da10SJed Brown .  toranks - ranks to send to (array of length nto)
450d815da10SJed Brown .  todata - data to send to each rank (packed)
451d815da10SJed Brown .  ntags - number of tags needed by send/recv callbacks
452d815da10SJed Brown .  send - callback invoked on sending process when ready to send primary payload
453d815da10SJed Brown .  recv - callback invoked on receiving process after delivery of rendezvous message
454d815da10SJed Brown -  ctx - context for callbacks
455d815da10SJed Brown 
456d815da10SJed Brown    Output Arguments:
457d815da10SJed Brown +  nfrom - number of ranks receiving messages from
458d815da10SJed Brown .  fromranks - ranks receiving messages from (length nfrom; caller should PetscFree())
459d815da10SJed Brown -  fromdata - packed data from each rank, each with count entries of type dtype (length nfrom, caller responsible for PetscFree())
460d815da10SJed Brown 
461d815da10SJed Brown    Level: developer
462d815da10SJed Brown 
463d815da10SJed Brown    Notes:
464d815da10SJed Brown    This memory-scalable interface is an alternative to calling PetscGatherNumberOfMessages() and
465d815da10SJed Brown    PetscGatherMessageLengths(), possibly with a subsequent round of communication to send other data.
466d815da10SJed Brown 
467d815da10SJed Brown    Basic data types as well as contiguous types are supported, but non-contiguous (e.g., strided) types are not.
468d815da10SJed Brown 
469d815da10SJed Brown    References:
470d815da10SJed Brown    The MPI_Ibarrier implementation uses the algorithm in
471d815da10SJed Brown    Hoefler, Siebert and Lumsdaine, Scalable communication protocols for dynamic sparse data exchange, 2010.
472d815da10SJed Brown 
473*6afbe7ddSJed Brown .seealso: PetscCommBuildTwoSided(), PetscCommBuildTwoSidedFReq(), PetscGatherNumberOfMessages(), PetscGatherMessageLengths()
474d815da10SJed Brown @*/
475d815da10SJed Brown PetscErrorCode PetscCommBuildTwoSidedF(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata,PetscMPIInt ntags,
476d815da10SJed Brown                                        PetscErrorCode (*send)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,PetscMPIInt,void*,MPI_Request[],void*),
477d815da10SJed Brown                                        PetscErrorCode (*recv)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,void*,MPI_Request[],void*),void *ctx)
478d815da10SJed Brown {
479*6afbe7ddSJed Brown   PetscErrorCode ierr;
480*6afbe7ddSJed Brown   MPI_Request    *toreqs,*fromreqs;
481*6afbe7ddSJed Brown 
482*6afbe7ddSJed Brown   PetscFunctionBegin;
483*6afbe7ddSJed Brown   ierr = PetscCommBuildTwoSidedFReq(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata,ntags,&toreqs,&fromreqs,send,recv,ctx);CHKERRQ(ierr);
484*6afbe7ddSJed Brown   ierr = MPI_Waitall(nto*ntags,toreqs,MPI_STATUSES_IGNORE);CHKERRQ(ierr);
485*6afbe7ddSJed Brown   ierr = MPI_Waitall(*nfrom*ntags,fromreqs,MPI_STATUSES_IGNORE);CHKERRQ(ierr);
486*6afbe7ddSJed Brown   ierr = PetscFree(toreqs);CHKERRQ(ierr);
487*6afbe7ddSJed Brown   ierr = PetscFree(fromreqs);CHKERRQ(ierr);
488*6afbe7ddSJed Brown   PetscFunctionReturn(0);
489*6afbe7ddSJed Brown }
490*6afbe7ddSJed Brown 
491*6afbe7ddSJed Brown #undef __FUNCT__
492*6afbe7ddSJed Brown #define __FUNCT__ "PetscCommBuildTwoSidedFReq"
493*6afbe7ddSJed Brown /*@C
494*6afbe7ddSJed Brown    PetscCommBuildTwoSidedFReq - discovers communicating ranks given one-sided information, calling user-defined functions during rendezvous, returns requests
495*6afbe7ddSJed Brown 
496*6afbe7ddSJed Brown    Collective on MPI_Comm
497*6afbe7ddSJed Brown 
498*6afbe7ddSJed Brown    Input Arguments:
499*6afbe7ddSJed Brown +  comm - communicator
500*6afbe7ddSJed Brown .  count - number of entries to send/receive in initial rendezvous (must match on all ranks)
501*6afbe7ddSJed Brown .  dtype - datatype to send/receive from each rank (must match on all ranks)
502*6afbe7ddSJed Brown .  nto - number of ranks to send data to
503*6afbe7ddSJed Brown .  toranks - ranks to send to (array of length nto)
504*6afbe7ddSJed Brown .  todata - data to send to each rank (packed)
505*6afbe7ddSJed Brown .  ntags - number of tags needed by send/recv callbacks
506*6afbe7ddSJed Brown .  send - callback invoked on sending process when ready to send primary payload
507*6afbe7ddSJed Brown .  recv - callback invoked on receiving process after delivery of rendezvous message
508*6afbe7ddSJed Brown -  ctx - context for callbacks
509*6afbe7ddSJed Brown 
510*6afbe7ddSJed Brown    Output Arguments:
511*6afbe7ddSJed Brown +  nfrom - number of ranks receiving messages from
512*6afbe7ddSJed Brown .  fromranks - ranks receiving messages from (length nfrom; caller should PetscFree())
513*6afbe7ddSJed Brown .  fromdata - packed data from each rank, each with count entries of type dtype (length nfrom, caller responsible for PetscFree())
514*6afbe7ddSJed Brown .  toreqs - array of nto*ntags sender requests (caller must wait on these, then PetscFree())
515*6afbe7ddSJed Brown -  fromreqs - array of nfrom*ntags receiver requests (caller must wait on these, then PetscFree())
516*6afbe7ddSJed Brown 
517*6afbe7ddSJed Brown    Level: developer
518*6afbe7ddSJed Brown 
519*6afbe7ddSJed Brown    Notes:
520*6afbe7ddSJed Brown    This memory-scalable interface is an alternative to calling PetscGatherNumberOfMessages() and
521*6afbe7ddSJed Brown    PetscGatherMessageLengths(), possibly with a subsequent round of communication to send other data.
522*6afbe7ddSJed Brown 
523*6afbe7ddSJed Brown    Basic data types as well as contiguous types are supported, but non-contiguous (e.g., strided) types are not.
524*6afbe7ddSJed Brown 
525*6afbe7ddSJed Brown    References:
526*6afbe7ddSJed Brown    The MPI_Ibarrier implementation uses the algorithm in
527*6afbe7ddSJed Brown    Hoefler, Siebert and Lumsdaine, Scalable communication protocols for dynamic sparse data exchange, 2010.
528*6afbe7ddSJed Brown 
529*6afbe7ddSJed Brown .seealso: PetscCommBuildTwoSided(), PetscCommBuildTwoSidedF(), PetscGatherNumberOfMessages(), PetscGatherMessageLengths()
530*6afbe7ddSJed Brown @*/
531*6afbe7ddSJed Brown PetscErrorCode PetscCommBuildTwoSidedFReq(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,
532*6afbe7ddSJed Brown                                           PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata,PetscMPIInt ntags,MPI_Request **toreqs,MPI_Request **fromreqs,
533*6afbe7ddSJed Brown                                           PetscErrorCode (*send)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,PetscMPIInt,void*,MPI_Request[],void*),
534*6afbe7ddSJed Brown                                           PetscErrorCode (*recv)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,void*,MPI_Request[],void*),void *ctx)
535*6afbe7ddSJed Brown {
536*6afbe7ddSJed Brown   PetscErrorCode         ierr,(*f)(MPI_Comm,PetscMPIInt,MPI_Datatype,PetscMPIInt,const PetscMPIInt[],const void*,
537*6afbe7ddSJed Brown                                    PetscMPIInt*,PetscMPIInt**,void*,PetscMPIInt,MPI_Request**,MPI_Request**,
538d815da10SJed Brown                                    PetscErrorCode (*send)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,PetscMPIInt,void*,MPI_Request[],void*),
539d815da10SJed Brown                                    PetscErrorCode (*recv)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,void*,MPI_Request[],void*),void *ctx);
540d815da10SJed Brown   PetscBuildTwoSidedType buildtype = PETSC_BUILDTWOSIDED_NOTSET;
541d815da10SJed Brown 
542d815da10SJed Brown   PetscFunctionBegin;
543d815da10SJed Brown   ierr = PetscSysInitializePackage();CHKERRQ(ierr);
544d815da10SJed Brown   ierr = PetscLogEventBegin(PETSC_BuildTwoSided,0,0,0,0);CHKERRQ(ierr);
545d815da10SJed Brown   ierr = PetscCommBuildTwoSidedGetType(comm,&buildtype);CHKERRQ(ierr);
546d815da10SJed Brown   switch (buildtype) {
547d815da10SJed Brown   case PETSC_BUILDTWOSIDED_IBARRIER:
548afb254cdSJed Brown #if defined(PETSC_HAVE_MPI_IBARRIER)
549*6afbe7ddSJed Brown     f = PetscCommBuildTwoSidedFReq_Ibarrier;
550afb254cdSJed Brown #else
551afb254cdSJed Brown     SETERRQ(comm,PETSC_ERR_PLIB,"MPI implementation does not provide MPI_Ibarrier (part of MPI-3)");
552afb254cdSJed Brown #endif
553afb254cdSJed Brown     break;
554d815da10SJed Brown   case PETSC_BUILDTWOSIDED_ALLREDUCE:
555d815da10SJed Brown   case PETSC_BUILDTWOSIDED_REDSCATTER:
556*6afbe7ddSJed Brown     f = PetscCommBuildTwoSidedFReq_Reference;
557d815da10SJed Brown     break;
558d815da10SJed Brown   default: SETERRQ(comm,PETSC_ERR_PLIB,"Unknown method for building two-sided communication");
559d815da10SJed Brown   }
560*6afbe7ddSJed Brown   ierr = (*f)(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata,ntags,toreqs,fromreqs,send,recv,ctx);CHKERRQ(ierr);
561d815da10SJed Brown   ierr = PetscLogEventEnd(PETSC_BuildTwoSided,0,0,0,0);CHKERRQ(ierr);
562d815da10SJed Brown   PetscFunctionReturn(0);
563d815da10SJed Brown }
564