xref: /petsc/src/sys/utils/mpits.c (revision b458e8f169278db94fa1d489e1d3db410fc8a4c8)
1f6ced4a3SJed Brown #include <petscsys.h>        /*I  "petscsys.h"  I*/
295c0884eSLisandro Dalcin #include <petsc/private/petscimpl.h>
3f6ced4a3SJed Brown 
495c0884eSLisandro Dalcin PetscLogEvent PETSC_BuildTwoSided;
595c0884eSLisandro Dalcin PetscLogEvent PETSC_BuildTwoSidedF;
63b3561c8SJed Brown 
76145cd65SJed Brown const char *const PetscBuildTwoSidedTypes[] = {
8f6ced4a3SJed Brown   "ALLREDUCE",
96145cd65SJed Brown   "IBARRIER",
101654bf6bSJed Brown   "REDSCATTER",
116145cd65SJed Brown   "PetscBuildTwoSidedType",
126145cd65SJed Brown   "PETSC_BUILDTWOSIDED_",
1302c9f0b5SLisandro Dalcin   NULL
14f6ced4a3SJed Brown };
15f6ced4a3SJed Brown 
166145cd65SJed Brown static PetscBuildTwoSidedType _twosided_type = PETSC_BUILDTWOSIDED_NOTSET;
17f6ced4a3SJed Brown 
186145cd65SJed Brown /*@
196145cd65SJed Brown    PetscCommBuildTwoSidedSetType - set algorithm to use when building two-sided communication
206145cd65SJed Brown 
216145cd65SJed Brown    Logically Collective
226145cd65SJed Brown 
236145cd65SJed Brown    Input Arguments:
246145cd65SJed Brown +  comm - PETSC_COMM_WORLD
256145cd65SJed Brown -  twosided - algorithm to use in subsequent calls to PetscCommBuildTwoSided()
266145cd65SJed Brown 
276145cd65SJed Brown    Level: developer
286145cd65SJed Brown 
296145cd65SJed Brown    Note:
306145cd65SJed Brown    This option is currently global, but could be made per-communicator.
316145cd65SJed Brown 
326145cd65SJed Brown .seealso: PetscCommBuildTwoSided(), PetscCommBuildTwoSidedGetType()
336145cd65SJed Brown @*/
346145cd65SJed Brown PetscErrorCode PetscCommBuildTwoSidedSetType(MPI_Comm comm,PetscBuildTwoSidedType twosided)
356145cd65SJed Brown {
366145cd65SJed Brown   PetscFunctionBegin;
3776bd3646SJed Brown   if (PetscDefined(USE_DEBUG)) {                             /* We don't have a PetscObject so can't use PetscValidLogicalCollectiveEnum */
386145cd65SJed Brown     PetscMPIInt ierr;
396145cd65SJed Brown     PetscMPIInt b1[2],b2[2];
406145cd65SJed Brown     b1[0] = -(PetscMPIInt)twosided;
416145cd65SJed Brown     b1[1] = (PetscMPIInt)twosided;
42b2566f29SBarry Smith     ierr  = MPIU_Allreduce(b1,b2,2,MPI_INT,MPI_MAX,comm);CHKERRQ(ierr);
436145cd65SJed Brown     if (-b2[0] != b2[1]) SETERRQ(comm,PETSC_ERR_ARG_WRONG,"Enum value must be same on all processes");
446145cd65SJed Brown   }
456145cd65SJed Brown   _twosided_type = twosided;
466145cd65SJed Brown   PetscFunctionReturn(0);
476145cd65SJed Brown }
486145cd65SJed Brown 
496145cd65SJed Brown /*@
506145cd65SJed Brown    PetscCommBuildTwoSidedGetType - set algorithm to use when building two-sided communication
516145cd65SJed Brown 
526145cd65SJed Brown    Logically Collective
536145cd65SJed Brown 
546145cd65SJed Brown    Output Arguments:
556145cd65SJed Brown +  comm - communicator on which to query algorithm
566145cd65SJed Brown -  twosided - algorithm to use for PetscCommBuildTwoSided()
576145cd65SJed Brown 
586145cd65SJed Brown    Level: developer
596145cd65SJed Brown 
606145cd65SJed Brown .seealso: PetscCommBuildTwoSided(), PetscCommBuildTwoSidedSetType()
616145cd65SJed Brown @*/
626145cd65SJed Brown PetscErrorCode PetscCommBuildTwoSidedGetType(MPI_Comm comm,PetscBuildTwoSidedType *twosided)
63f6ced4a3SJed Brown {
64f6ced4a3SJed Brown   PetscErrorCode ierr;
6505342407SJunchao Zhang   PetscMPIInt    size;
66f6ced4a3SJed Brown 
67f6ced4a3SJed Brown   PetscFunctionBegin;
686145cd65SJed Brown   *twosided = PETSC_BUILDTWOSIDED_NOTSET;
696145cd65SJed Brown   if (_twosided_type == PETSC_BUILDTWOSIDED_NOTSET) {
7005342407SJunchao Zhang     ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
7105342407SJunchao Zhang     _twosided_type = PETSC_BUILDTWOSIDED_ALLREDUCE; /* default for small comms, see https://gitlab.com/petsc/petsc/-/merge_requests/2611 */
72f6ced4a3SJed Brown #if defined(PETSC_HAVE_MPI_IBARRIER)
7305342407SJunchao Zhang     if (size > 1024) _twosided_type = PETSC_BUILDTWOSIDED_IBARRIER;
746145cd65SJed Brown #endif
75c5929fdfSBarry Smith     ierr = PetscOptionsGetEnum(NULL,NULL,"-build_twosided",PetscBuildTwoSidedTypes,(PetscEnum*)&_twosided_type,NULL);CHKERRQ(ierr);
76f6ced4a3SJed Brown   }
77f6ced4a3SJed Brown   *twosided = _twosided_type;
78f6ced4a3SJed Brown   PetscFunctionReturn(0);
79f6ced4a3SJed Brown }
80f6ced4a3SJed Brown 
81b5a01e9fSJed Brown #if defined(PETSC_HAVE_MPI_IBARRIER) || defined(PETSC_HAVE_MPIX_IBARRIER)
82f6ced4a3SJed Brown 
83cf4b5b4fSJed Brown static PetscErrorCode PetscCommBuildTwoSided_Ibarrier(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata)
84f6ced4a3SJed Brown {
85f6ced4a3SJed Brown   PetscErrorCode ierr;
86c3a59b84SJed Brown   PetscMPIInt    nrecvs,tag,done,i;
87c3a59b84SJed Brown   MPI_Aint       lb,unitbytes;
88f6ced4a3SJed Brown   char           *tdata;
89f6ced4a3SJed Brown   MPI_Request    *sendreqs,barrier;
900f453b92SJed Brown   PetscSegBuffer segrank,segdata;
9146e50bb6SJed Brown   PetscBool      barrier_started;
92f6ced4a3SJed Brown 
93f6ced4a3SJed Brown   PetscFunctionBegin;
94a502f807SJed Brown   ierr = PetscCommDuplicate(comm,&comm,&tag);CHKERRQ(ierr);
95c3a59b84SJed Brown   ierr = MPI_Type_get_extent(dtype,&lb,&unitbytes);CHKERRQ(ierr);
96c3a59b84SJed Brown   if (lb != 0) SETERRQ1(comm,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
97f6ced4a3SJed Brown   tdata = (char*)todata;
98785e854fSJed Brown   ierr  = PetscMalloc1(nto,&sendreqs);CHKERRQ(ierr);
99f6ced4a3SJed Brown   for (i=0; i<nto; i++) {
100f6ced4a3SJed Brown     ierr = MPI_Issend((void*)(tdata+count*unitbytes*i),count,dtype,toranks[i],tag,comm,sendreqs+i);CHKERRQ(ierr);
101f6ced4a3SJed Brown   }
1020f453b92SJed Brown   ierr = PetscSegBufferCreate(sizeof(PetscMPIInt),4,&segrank);CHKERRQ(ierr);
1030f453b92SJed Brown   ierr = PetscSegBufferCreate(unitbytes,4*count,&segdata);CHKERRQ(ierr);
104f6ced4a3SJed Brown 
105f6ced4a3SJed Brown   nrecvs  = 0;
106f6ced4a3SJed Brown   barrier = MPI_REQUEST_NULL;
10746e50bb6SJed Brown   /* MPICH-3.2 sometimes does not create a request in some "optimized" cases.  This is arguably a standard violation,
10846e50bb6SJed Brown    * but we need to work around it. */
10946e50bb6SJed Brown   barrier_started = PETSC_FALSE;
110f6ced4a3SJed Brown   for (done=0; !done;) {
111f6ced4a3SJed Brown     PetscMPIInt flag;
112f6ced4a3SJed Brown     MPI_Status  status;
113f6ced4a3SJed Brown     ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag,comm,&flag,&status);CHKERRQ(ierr);
114f6ced4a3SJed Brown     if (flag) {                 /* incoming message */
115f6ced4a3SJed Brown       PetscMPIInt *recvrank;
116f6ced4a3SJed Brown       void        *buf;
117137cf7b6SJed Brown       ierr      = PetscSegBufferGet(segrank,1,&recvrank);CHKERRQ(ierr);
118137cf7b6SJed Brown       ierr      = PetscSegBufferGet(segdata,count,&buf);CHKERRQ(ierr);
119f6ced4a3SJed Brown       *recvrank = status.MPI_SOURCE;
120f6ced4a3SJed Brown       ierr      = MPI_Recv(buf,count,dtype,status.MPI_SOURCE,tag,comm,MPI_STATUS_IGNORE);CHKERRQ(ierr);
121f6ced4a3SJed Brown       nrecvs++;
122f6ced4a3SJed Brown     }
12346e50bb6SJed Brown     if (!barrier_started) {
1244dc2109aSBarry Smith       PetscMPIInt sent,nsends;
1254dc2109aSBarry Smith       ierr = PetscMPIIntCast(nto,&nsends);CHKERRQ(ierr);
126f6ced4a3SJed Brown       ierr = MPI_Testall(nsends,sendreqs,&sent,MPI_STATUSES_IGNORE);CHKERRQ(ierr);
127f6ced4a3SJed Brown       if (sent) {
128b5a01e9fSJed Brown #if defined(PETSC_HAVE_MPI_IBARRIER)
129f6ced4a3SJed Brown         ierr = MPI_Ibarrier(comm,&barrier);CHKERRQ(ierr);
130b5a01e9fSJed Brown #elif defined(PETSC_HAVE_MPIX_IBARRIER)
131b5a01e9fSJed Brown         ierr = MPIX_Ibarrier(comm,&barrier);CHKERRQ(ierr);
132b5a01e9fSJed Brown #endif
13346e50bb6SJed Brown         barrier_started = PETSC_TRUE;
134f6ced4a3SJed Brown         ierr = PetscFree(sendreqs);CHKERRQ(ierr);
135f6ced4a3SJed Brown       }
136f6ced4a3SJed Brown     } else {
137f6ced4a3SJed Brown       ierr = MPI_Test(&barrier,&done,MPI_STATUS_IGNORE);CHKERRQ(ierr);
138f6ced4a3SJed Brown     }
139f6ced4a3SJed Brown   }
140f6ced4a3SJed Brown   *nfrom = nrecvs;
141137cf7b6SJed Brown   ierr   = PetscSegBufferExtractAlloc(segrank,fromranks);CHKERRQ(ierr);
1420f453b92SJed Brown   ierr   = PetscSegBufferDestroy(&segrank);CHKERRQ(ierr);
143137cf7b6SJed Brown   ierr   = PetscSegBufferExtractAlloc(segdata,fromdata);CHKERRQ(ierr);
1440f453b92SJed Brown   ierr   = PetscSegBufferDestroy(&segdata);CHKERRQ(ierr);
145a502f807SJed Brown   ierr   = PetscCommDestroy(&comm);CHKERRQ(ierr);
146f6ced4a3SJed Brown   PetscFunctionReturn(0);
147f6ced4a3SJed Brown }
148f6ced4a3SJed Brown #endif
149f6ced4a3SJed Brown 
150cf4b5b4fSJed Brown static PetscErrorCode PetscCommBuildTwoSided_Allreduce(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata)
151f6ced4a3SJed Brown {
152f6ced4a3SJed Brown   PetscErrorCode   ierr;
15305342407SJunchao Zhang   PetscMPIInt      size,rank,*iflags,nrecvs,tag,*franks,i,flg;
154c3a59b84SJed Brown   MPI_Aint         lb,unitbytes;
155f6ced4a3SJed Brown   char             *tdata,*fdata;
156f6ced4a3SJed Brown   MPI_Request      *reqs,*sendreqs;
157f6ced4a3SJed Brown   MPI_Status       *statuses;
15805342407SJunchao Zhang   PetscCommCounter *counter;
159f6ced4a3SJed Brown 
160f6ced4a3SJed Brown   PetscFunctionBegin;
161f6ced4a3SJed Brown   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
16205342407SJunchao Zhang   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
163a502f807SJed Brown   ierr = PetscCommDuplicate(comm,&comm,&tag);CHKERRQ(ierr);
16405342407SJunchao Zhang   ierr = MPI_Comm_get_attr(comm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
16505342407SJunchao Zhang   if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Inner PETSc communicator does not have its tag/name counter attribute set");
16605342407SJunchao Zhang   if (!counter->iflags) {
16705342407SJunchao Zhang     ierr   = PetscCalloc1(size,&counter->iflags);CHKERRQ(ierr);
16805342407SJunchao Zhang     iflags = counter->iflags;
16905342407SJunchao Zhang   } else {
17005342407SJunchao Zhang     iflags = counter->iflags;
17105342407SJunchao Zhang     ierr   = PetscArrayzero(iflags,size);CHKERRQ(ierr);
17205342407SJunchao Zhang   }
17305342407SJunchao Zhang   for (i=0; i<nto; i++) iflags[toranks[i]] = 1;
17405342407SJunchao Zhang   ierr     = MPIU_Allreduce(MPI_IN_PLACE,iflags,size,MPI_INT,MPI_SUM,comm);CHKERRQ(ierr);
17505342407SJunchao Zhang   nrecvs   = iflags[rank];
176c3a59b84SJed Brown   ierr     = MPI_Type_get_extent(dtype,&lb,&unitbytes);CHKERRQ(ierr);
177c3a59b84SJed Brown   if (lb != 0) SETERRQ1(comm,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
178f6ced4a3SJed Brown   ierr     = PetscMalloc(nrecvs*count*unitbytes,&fdata);CHKERRQ(ierr);
179f6ced4a3SJed Brown   tdata    = (char*)todata;
180dcca6d9dSJed Brown   ierr     = PetscMalloc2(nto+nrecvs,&reqs,nto+nrecvs,&statuses);CHKERRQ(ierr);
18106926cafSJed Brown   sendreqs = reqs + nrecvs;
182f6ced4a3SJed Brown   for (i=0; i<nrecvs; i++) {
183f6ced4a3SJed Brown     ierr = MPI_Irecv((void*)(fdata+count*unitbytes*i),count,dtype,MPI_ANY_SOURCE,tag,comm,reqs+i);CHKERRQ(ierr);
184f6ced4a3SJed Brown   }
185f6ced4a3SJed Brown   for (i=0; i<nto; i++) {
186f6ced4a3SJed Brown     ierr = MPI_Isend((void*)(tdata+count*unitbytes*i),count,dtype,toranks[i],tag,comm,sendreqs+i);CHKERRQ(ierr);
187f6ced4a3SJed Brown   }
188f6ced4a3SJed Brown   ierr = MPI_Waitall(nto+nrecvs,reqs,statuses);CHKERRQ(ierr);
189785e854fSJed Brown   ierr = PetscMalloc1(nrecvs,&franks);CHKERRQ(ierr);
190a297a907SKarl Rupp   for (i=0; i<nrecvs; i++) franks[i] = statuses[i].MPI_SOURCE;
19106926cafSJed Brown   ierr = PetscFree2(reqs,statuses);CHKERRQ(ierr);
192a502f807SJed Brown   ierr = PetscCommDestroy(&comm);CHKERRQ(ierr);
193f6ced4a3SJed Brown 
194f6ced4a3SJed Brown   *nfrom            = nrecvs;
195f6ced4a3SJed Brown   *fromranks        = franks;
196f6ced4a3SJed Brown   *(void**)fromdata = fdata;
197f6ced4a3SJed Brown   PetscFunctionReturn(0);
198f6ced4a3SJed Brown }
199f6ced4a3SJed Brown 
2001654bf6bSJed Brown #if defined(PETSC_HAVE_MPI_REDUCE_SCATTER_BLOCK)
201cf4b5b4fSJed Brown static PetscErrorCode PetscCommBuildTwoSided_RedScatter(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata)
2021654bf6bSJed Brown {
2031654bf6bSJed Brown   PetscErrorCode ierr;
20405342407SJunchao Zhang   PetscMPIInt    size,*iflags,nrecvs,tag,*franks,i,flg;
205c3a59b84SJed Brown   MPI_Aint       lb,unitbytes;
2061654bf6bSJed Brown   char           *tdata,*fdata;
2071654bf6bSJed Brown   MPI_Request    *reqs,*sendreqs;
2081654bf6bSJed Brown   MPI_Status     *statuses;
20905342407SJunchao Zhang   PetscCommCounter *counter;
2101654bf6bSJed Brown 
2111654bf6bSJed Brown   PetscFunctionBegin;
2121654bf6bSJed Brown   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
21305342407SJunchao Zhang   ierr = PetscCommDuplicate(comm,&comm,&tag);CHKERRQ(ierr);
21405342407SJunchao Zhang   ierr = MPI_Comm_get_attr(comm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
21505342407SJunchao Zhang   if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Inner PETSc communicator does not have its tag/name counter attribute set");
21605342407SJunchao Zhang   if (!counter->iflags) {
21705342407SJunchao Zhang     ierr   = PetscCalloc1(size,&counter->iflags);CHKERRQ(ierr);
21805342407SJunchao Zhang     iflags = counter->iflags;
21905342407SJunchao Zhang   } else {
22005342407SJunchao Zhang     iflags = counter->iflags;
221580bdb30SBarry Smith     ierr   = PetscArrayzero(iflags,size);CHKERRQ(ierr);
22205342407SJunchao Zhang   }
2231654bf6bSJed Brown   for (i=0; i<nto; i++) iflags[toranks[i]] = 1;
2241654bf6bSJed Brown   ierr     = MPI_Reduce_scatter_block(iflags,&nrecvs,1,MPI_INT,MPI_SUM,comm);CHKERRQ(ierr);
225c3a59b84SJed Brown   ierr     = MPI_Type_get_extent(dtype,&lb,&unitbytes);CHKERRQ(ierr);
226c3a59b84SJed Brown   if (lb != 0) SETERRQ1(comm,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
2271654bf6bSJed Brown   ierr     = PetscMalloc(nrecvs*count*unitbytes,&fdata);CHKERRQ(ierr);
2281654bf6bSJed Brown   tdata    = (char*)todata;
2291654bf6bSJed Brown   ierr     = PetscMalloc2(nto+nrecvs,&reqs,nto+nrecvs,&statuses);CHKERRQ(ierr);
2301654bf6bSJed Brown   sendreqs = reqs + nrecvs;
2311654bf6bSJed Brown   for (i=0; i<nrecvs; i++) {
2321654bf6bSJed Brown     ierr = MPI_Irecv((void*)(fdata+count*unitbytes*i),count,dtype,MPI_ANY_SOURCE,tag,comm,reqs+i);CHKERRQ(ierr);
2331654bf6bSJed Brown   }
2341654bf6bSJed Brown   for (i=0; i<nto; i++) {
2351654bf6bSJed Brown     ierr = MPI_Isend((void*)(tdata+count*unitbytes*i),count,dtype,toranks[i],tag,comm,sendreqs+i);CHKERRQ(ierr);
2361654bf6bSJed Brown   }
2371654bf6bSJed Brown   ierr = MPI_Waitall(nto+nrecvs,reqs,statuses);CHKERRQ(ierr);
2381654bf6bSJed Brown   ierr = PetscMalloc1(nrecvs,&franks);CHKERRQ(ierr);
2391654bf6bSJed Brown   for (i=0; i<nrecvs; i++) franks[i] = statuses[i].MPI_SOURCE;
2401654bf6bSJed Brown   ierr = PetscFree2(reqs,statuses);CHKERRQ(ierr);
241a502f807SJed Brown   ierr = PetscCommDestroy(&comm);CHKERRQ(ierr);
2421654bf6bSJed Brown 
2431654bf6bSJed Brown   *nfrom            = nrecvs;
2441654bf6bSJed Brown   *fromranks        = franks;
2451654bf6bSJed Brown   *(void**)fromdata = fdata;
2461654bf6bSJed Brown   PetscFunctionReturn(0);
2471654bf6bSJed Brown }
2481654bf6bSJed Brown #endif
2491654bf6bSJed Brown 
250f6ced4a3SJed Brown /*@C
251f6ced4a3SJed Brown    PetscCommBuildTwoSided - discovers communicating ranks given one-sided information, moving constant-sized data in the process (often message lengths)
252f6ced4a3SJed Brown 
253d083f849SBarry Smith    Collective
254f6ced4a3SJed Brown 
255f6ced4a3SJed Brown    Input Arguments:
256f6ced4a3SJed Brown +  comm - communicator
257f6ced4a3SJed Brown .  count - number of entries to send/receive (must match on all ranks)
258f6ced4a3SJed Brown .  dtype - datatype to send/receive from each rank (must match on all ranks)
259f6ced4a3SJed Brown .  nto - number of ranks to send data to
260f6ced4a3SJed Brown .  toranks - ranks to send to (array of length nto)
261f6ced4a3SJed Brown -  todata - data to send to each rank (packed)
262f6ced4a3SJed Brown 
263f6ced4a3SJed Brown    Output Arguments:
264f6ced4a3SJed Brown +  nfrom - number of ranks receiving messages from
265f6ced4a3SJed Brown .  fromranks - ranks receiving messages from (length nfrom; caller should PetscFree())
266f6ced4a3SJed Brown -  fromdata - packed data from each rank, each with count entries of type dtype (length nfrom, caller responsible for PetscFree())
267f6ced4a3SJed Brown 
268f6ced4a3SJed Brown    Level: developer
269f6ced4a3SJed Brown 
2701654bf6bSJed Brown    Options Database Keys:
27105342407SJunchao Zhang .  -build_twosided <allreduce|ibarrier|redscatter> - algorithm to set up two-sided communication. Default is allreduce for communicators with <= 1024 ranks, otherwise ibarrier.
2721654bf6bSJed Brown 
273f6ced4a3SJed Brown    Notes:
274f6ced4a3SJed Brown    This memory-scalable interface is an alternative to calling PetscGatherNumberOfMessages() and
275f6ced4a3SJed Brown    PetscGatherMessageLengths(), possibly with a subsequent round of communication to send other constant-size data.
276f6ced4a3SJed Brown 
277f6ced4a3SJed Brown    Basic data types as well as contiguous types are supported, but non-contiguous (e.g., strided) types are not.
278f6ced4a3SJed Brown 
279f6ced4a3SJed Brown    References:
28096a0c994SBarry Smith .  1. - Hoefler, Siebert and Lumsdaine, The MPI_Ibarrier implementation uses the algorithm in
2814f02bc6aSBarry Smith    Scalable communication protocols for dynamic sparse data exchange, 2010.
282f6ced4a3SJed Brown 
283f6ced4a3SJed Brown .seealso: PetscGatherNumberOfMessages(), PetscGatherMessageLengths()
284f6ced4a3SJed Brown @*/
285cf4b5b4fSJed Brown PetscErrorCode PetscCommBuildTwoSided(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata)
286f6ced4a3SJed Brown {
287f6ced4a3SJed Brown   PetscErrorCode         ierr;
2886145cd65SJed Brown   PetscBuildTwoSidedType buildtype = PETSC_BUILDTWOSIDED_NOTSET;
289f6ced4a3SJed Brown 
290f6ced4a3SJed Brown   PetscFunctionBegin;
2913b3561c8SJed Brown   ierr = PetscSysInitializePackage();CHKERRQ(ierr);
29262872c28SLisandro Dalcin   ierr = PetscLogEventSync(PETSC_BuildTwoSided,comm);CHKERRQ(ierr);
2933b3561c8SJed Brown   ierr = PetscLogEventBegin(PETSC_BuildTwoSided,0,0,0,0);CHKERRQ(ierr);
2946145cd65SJed Brown   ierr = PetscCommBuildTwoSidedGetType(comm,&buildtype);CHKERRQ(ierr);
295f6ced4a3SJed Brown   switch (buildtype) {
2966145cd65SJed Brown   case PETSC_BUILDTWOSIDED_IBARRIER:
297b5a01e9fSJed Brown #if defined(PETSC_HAVE_MPI_IBARRIER) || defined(PETSC_HAVE_MPIX_IBARRIER)
2986145cd65SJed Brown     ierr = PetscCommBuildTwoSided_Ibarrier(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata);CHKERRQ(ierr);
299*b458e8f1SJose E. Roman     break;
3006145cd65SJed Brown #else
3016145cd65SJed Brown     SETERRQ(comm,PETSC_ERR_PLIB,"MPI implementation does not provide MPI_Ibarrier (part of MPI-3)");
302f6ced4a3SJed Brown #endif
3036145cd65SJed Brown   case PETSC_BUILDTWOSIDED_ALLREDUCE:
3046145cd65SJed Brown     ierr = PetscCommBuildTwoSided_Allreduce(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata);CHKERRQ(ierr);
305f6ced4a3SJed Brown     break;
3061654bf6bSJed Brown   case PETSC_BUILDTWOSIDED_REDSCATTER:
3071654bf6bSJed Brown #if defined(PETSC_HAVE_MPI_REDUCE_SCATTER_BLOCK)
3081654bf6bSJed Brown     ierr = PetscCommBuildTwoSided_RedScatter(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata);CHKERRQ(ierr);
309*b458e8f1SJose E. Roman     break;
3101654bf6bSJed Brown #else
3111654bf6bSJed Brown     SETERRQ(comm,PETSC_ERR_PLIB,"MPI implementation does not provide MPI_Reduce_scatter_block (part of MPI-2.2)");
3121654bf6bSJed Brown #endif
313f6ced4a3SJed Brown   default: SETERRQ(comm,PETSC_ERR_PLIB,"Unknown method for building two-sided communication");
314f6ced4a3SJed Brown   }
3153b3561c8SJed Brown   ierr = PetscLogEventEnd(PETSC_BuildTwoSided,0,0,0,0);CHKERRQ(ierr);
316f6ced4a3SJed Brown   PetscFunctionReturn(0);
317f6ced4a3SJed Brown }
318d815da10SJed Brown 
3196afbe7ddSJed Brown static PetscErrorCode PetscCommBuildTwoSidedFReq_Reference(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,
3206afbe7ddSJed Brown                                                            PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata,PetscMPIInt ntags,MPI_Request **toreqs,MPI_Request **fromreqs,
321d815da10SJed Brown                                                            PetscErrorCode (*send)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,PetscMPIInt,void*,MPI_Request[],void*),
322d815da10SJed Brown                                                            PetscErrorCode (*recv)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,void*,MPI_Request[],void*),void *ctx)
323d815da10SJed Brown {
324d815da10SJed Brown   PetscErrorCode ierr;
325d815da10SJed Brown   PetscMPIInt i,*tag;
326d815da10SJed Brown   MPI_Aint    lb,unitbytes;
327d815da10SJed Brown   MPI_Request *sendreq,*recvreq;
328d815da10SJed Brown 
329d815da10SJed Brown   PetscFunctionBegin;
330d815da10SJed Brown   ierr = PetscMalloc1(ntags,&tag);CHKERRQ(ierr);
331d815da10SJed Brown   if (ntags > 0) {
332d815da10SJed Brown     ierr = PetscCommDuplicate(comm,&comm,&tag[0]);CHKERRQ(ierr);
333d815da10SJed Brown   }
334d815da10SJed Brown   for (i=1; i<ntags; i++) {
335d815da10SJed Brown     ierr = PetscCommGetNewTag(comm,&tag[i]);CHKERRQ(ierr);
336d815da10SJed Brown   }
337d815da10SJed Brown 
338d815da10SJed Brown   /* Perform complete initial rendezvous */
339d815da10SJed Brown   ierr = PetscCommBuildTwoSided(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata);CHKERRQ(ierr);
340d815da10SJed Brown 
3416afbe7ddSJed Brown   ierr = PetscMalloc1(nto*ntags,&sendreq);CHKERRQ(ierr);
3426afbe7ddSJed Brown   ierr = PetscMalloc1(*nfrom*ntags,&recvreq);CHKERRQ(ierr);
343d815da10SJed Brown 
344d815da10SJed Brown   ierr = MPI_Type_get_extent(dtype,&lb,&unitbytes);CHKERRQ(ierr);
345d815da10SJed Brown   if (lb != 0) SETERRQ1(comm,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
346d815da10SJed Brown   for (i=0; i<nto; i++) {
34789157a57SJed Brown     PetscMPIInt k;
34889157a57SJed Brown     for (k=0; k<ntags; k++) sendreq[i*ntags+k] = MPI_REQUEST_NULL;
349d815da10SJed Brown     ierr = (*send)(comm,tag,i,toranks[i],((char*)todata)+count*unitbytes*i,sendreq+i*ntags,ctx);CHKERRQ(ierr);
350d815da10SJed Brown   }
351d815da10SJed Brown   for (i=0; i<*nfrom; i++) {
352d815da10SJed Brown     void *header = (*(char**)fromdata) + count*unitbytes*i;
35389157a57SJed Brown     PetscMPIInt k;
35489157a57SJed Brown     for (k=0; k<ntags; k++) recvreq[i*ntags+k] = MPI_REQUEST_NULL;
355d815da10SJed Brown     ierr = (*recv)(comm,tag,(*fromranks)[i],header,recvreq+i*ntags,ctx);CHKERRQ(ierr);
356d815da10SJed Brown   }
357d815da10SJed Brown   ierr = PetscFree(tag);CHKERRQ(ierr);
358d815da10SJed Brown   ierr = PetscCommDestroy(&comm);CHKERRQ(ierr);
3596afbe7ddSJed Brown   *toreqs = sendreq;
3606afbe7ddSJed Brown   *fromreqs = recvreq;
361d815da10SJed Brown   PetscFunctionReturn(0);
362d815da10SJed Brown }
363d815da10SJed Brown 
364b5a01e9fSJed Brown #if defined(PETSC_HAVE_MPI_IBARRIER) || defined(PETSC_HAVE_MPIX_IBARRIER)
365afb254cdSJed Brown 
3666afbe7ddSJed Brown static PetscErrorCode PetscCommBuildTwoSidedFReq_Ibarrier(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,
3676afbe7ddSJed Brown                                                           PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata,PetscMPIInt ntags,MPI_Request **toreqs,MPI_Request **fromreqs,
368afb254cdSJed Brown                                                           PetscErrorCode (*send)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,PetscMPIInt,void*,MPI_Request[],void*),
369afb254cdSJed Brown                                                           PetscErrorCode (*recv)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,void*,MPI_Request[],void*),void *ctx)
370afb254cdSJed Brown {
371afb254cdSJed Brown   PetscErrorCode ierr;
372afb254cdSJed Brown   PetscMPIInt    nrecvs,tag,*tags,done,i;
373afb254cdSJed Brown   MPI_Aint       lb,unitbytes;
374afb254cdSJed Brown   char           *tdata;
375afb254cdSJed Brown   MPI_Request    *sendreqs,*usendreqs,*req,barrier;
376afb254cdSJed Brown   PetscSegBuffer segrank,segdata,segreq;
37746e50bb6SJed Brown   PetscBool      barrier_started;
378afb254cdSJed Brown 
379afb254cdSJed Brown   PetscFunctionBegin;
380afb254cdSJed Brown   ierr = PetscCommDuplicate(comm,&comm,&tag);CHKERRQ(ierr);
381afb254cdSJed Brown   ierr = PetscMalloc1(ntags,&tags);CHKERRQ(ierr);
382afb254cdSJed Brown   for (i=0; i<ntags; i++) {
383afb254cdSJed Brown     ierr = PetscCommGetNewTag(comm,&tags[i]);CHKERRQ(ierr);
384afb254cdSJed Brown   }
385afb254cdSJed Brown   ierr = MPI_Type_get_extent(dtype,&lb,&unitbytes);CHKERRQ(ierr);
386afb254cdSJed Brown   if (lb != 0) SETERRQ1(comm,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
387afb254cdSJed Brown   tdata = (char*)todata;
3886afbe7ddSJed Brown   ierr = PetscMalloc1(nto,&sendreqs);CHKERRQ(ierr);
3896afbe7ddSJed Brown   ierr = PetscMalloc1(nto*ntags,&usendreqs);CHKERRQ(ierr);
390afb254cdSJed Brown   /* Post synchronous sends */
391afb254cdSJed Brown   for (i=0; i<nto; i++) {
392afb254cdSJed Brown     ierr = MPI_Issend((void*)(tdata+count*unitbytes*i),count,dtype,toranks[i],tag,comm,sendreqs+i);CHKERRQ(ierr);
393afb254cdSJed Brown   }
394afb254cdSJed Brown   /* Post actual payloads.  These are typically larger messages.  Hopefully sending these later does not slow down the
395afb254cdSJed Brown    * synchronous messages above. */
396afb254cdSJed Brown   for (i=0; i<nto; i++) {
39789157a57SJed Brown     PetscMPIInt k;
39889157a57SJed Brown     for (k=0; k<ntags; k++) usendreqs[i*ntags+k] = MPI_REQUEST_NULL;
399afb254cdSJed Brown     ierr = (*send)(comm,tags,i,toranks[i],tdata+count*unitbytes*i,usendreqs+i*ntags,ctx);CHKERRQ(ierr);
400afb254cdSJed Brown   }
401afb254cdSJed Brown 
402afb254cdSJed Brown   ierr = PetscSegBufferCreate(sizeof(PetscMPIInt),4,&segrank);CHKERRQ(ierr);
403afb254cdSJed Brown   ierr = PetscSegBufferCreate(unitbytes,4*count,&segdata);CHKERRQ(ierr);
404afb254cdSJed Brown   ierr = PetscSegBufferCreate(sizeof(MPI_Request),4,&segreq);CHKERRQ(ierr);
405afb254cdSJed Brown 
406afb254cdSJed Brown   nrecvs  = 0;
407afb254cdSJed Brown   barrier = MPI_REQUEST_NULL;
40846e50bb6SJed Brown   /* MPICH-3.2 sometimes does not create a request in some "optimized" cases.  This is arguably a standard violation,
40946e50bb6SJed Brown    * but we need to work around it. */
41046e50bb6SJed Brown   barrier_started = PETSC_FALSE;
411afb254cdSJed Brown   for (done=0; !done;) {
412afb254cdSJed Brown     PetscMPIInt flag;
413afb254cdSJed Brown     MPI_Status  status;
414afb254cdSJed Brown     ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag,comm,&flag,&status);CHKERRQ(ierr);
415afb254cdSJed Brown     if (flag) {                 /* incoming message */
41689157a57SJed Brown       PetscMPIInt *recvrank,k;
417afb254cdSJed Brown       void        *buf;
418afb254cdSJed Brown       ierr = PetscSegBufferGet(segrank,1,&recvrank);CHKERRQ(ierr);
419afb254cdSJed Brown       ierr = PetscSegBufferGet(segdata,count,&buf);CHKERRQ(ierr);
420afb254cdSJed Brown       *recvrank = status.MPI_SOURCE;
421afb254cdSJed Brown       ierr = MPI_Recv(buf,count,dtype,status.MPI_SOURCE,tag,comm,MPI_STATUS_IGNORE);CHKERRQ(ierr);
422afb254cdSJed Brown       ierr = PetscSegBufferGet(segreq,ntags,&req);CHKERRQ(ierr);
42389157a57SJed Brown       for (k=0; k<ntags; k++) req[k] = MPI_REQUEST_NULL;
424afb254cdSJed Brown       ierr = (*recv)(comm,tags,status.MPI_SOURCE,buf,req,ctx);CHKERRQ(ierr);
425afb254cdSJed Brown       nrecvs++;
426afb254cdSJed Brown     }
42746e50bb6SJed Brown     if (!barrier_started) {
428afb254cdSJed Brown       PetscMPIInt sent,nsends;
429afb254cdSJed Brown       ierr = PetscMPIIntCast(nto,&nsends);CHKERRQ(ierr);
430afb254cdSJed Brown       ierr = MPI_Testall(nsends,sendreqs,&sent,MPI_STATUSES_IGNORE);CHKERRQ(ierr);
431afb254cdSJed Brown       if (sent) {
432b5a01e9fSJed Brown #if defined(PETSC_HAVE_MPI_IBARRIER)
433afb254cdSJed Brown         ierr = MPI_Ibarrier(comm,&barrier);CHKERRQ(ierr);
434b5a01e9fSJed Brown #elif defined(PETSC_HAVE_MPIX_IBARRIER)
435b5a01e9fSJed Brown         ierr = MPIX_Ibarrier(comm,&barrier);CHKERRQ(ierr);
436b5a01e9fSJed Brown #endif
43746e50bb6SJed Brown         barrier_started = PETSC_TRUE;
438afb254cdSJed Brown       }
439afb254cdSJed Brown     } else {
440afb254cdSJed Brown       ierr = MPI_Test(&barrier,&done,MPI_STATUS_IGNORE);CHKERRQ(ierr);
441afb254cdSJed Brown     }
442afb254cdSJed Brown   }
443afb254cdSJed Brown   *nfrom = nrecvs;
444afb254cdSJed Brown   ierr = PetscSegBufferExtractAlloc(segrank,fromranks);CHKERRQ(ierr);
445afb254cdSJed Brown   ierr = PetscSegBufferDestroy(&segrank);CHKERRQ(ierr);
446afb254cdSJed Brown   ierr = PetscSegBufferExtractAlloc(segdata,fromdata);CHKERRQ(ierr);
447afb254cdSJed Brown   ierr = PetscSegBufferDestroy(&segdata);CHKERRQ(ierr);
4486afbe7ddSJed Brown   *toreqs = usendreqs;
4496afbe7ddSJed Brown   ierr = PetscSegBufferExtractAlloc(segreq,fromreqs);CHKERRQ(ierr);
450afb254cdSJed Brown   ierr = PetscSegBufferDestroy(&segreq);CHKERRQ(ierr);
4516afbe7ddSJed Brown   ierr = PetscFree(sendreqs);CHKERRQ(ierr);
452afb254cdSJed Brown   ierr = PetscFree(tags);CHKERRQ(ierr);
453afb254cdSJed Brown   ierr = PetscCommDestroy(&comm);CHKERRQ(ierr);
454afb254cdSJed Brown   PetscFunctionReturn(0);
455afb254cdSJed Brown }
456afb254cdSJed Brown #endif
457afb254cdSJed Brown 
458d815da10SJed Brown /*@C
459d815da10SJed Brown    PetscCommBuildTwoSidedF - discovers communicating ranks given one-sided information, calling user-defined functions during rendezvous
460d815da10SJed Brown 
461d083f849SBarry Smith    Collective
462d815da10SJed Brown 
463d815da10SJed Brown    Input Arguments:
464d815da10SJed Brown +  comm - communicator
465d815da10SJed Brown .  count - number of entries to send/receive in initial rendezvous (must match on all ranks)
466d815da10SJed Brown .  dtype - datatype to send/receive from each rank (must match on all ranks)
467d815da10SJed Brown .  nto - number of ranks to send data to
468d815da10SJed Brown .  toranks - ranks to send to (array of length nto)
469d815da10SJed Brown .  todata - data to send to each rank (packed)
470d815da10SJed Brown .  ntags - number of tags needed by send/recv callbacks
471d815da10SJed Brown .  send - callback invoked on sending process when ready to send primary payload
472d815da10SJed Brown .  recv - callback invoked on receiving process after delivery of rendezvous message
473d815da10SJed Brown -  ctx - context for callbacks
474d815da10SJed Brown 
475d815da10SJed Brown    Output Arguments:
476d815da10SJed Brown +  nfrom - number of ranks receiving messages from
477d815da10SJed Brown .  fromranks - ranks receiving messages from (length nfrom; caller should PetscFree())
478d815da10SJed Brown -  fromdata - packed data from each rank, each with count entries of type dtype (length nfrom, caller responsible for PetscFree())
479d815da10SJed Brown 
480d815da10SJed Brown    Level: developer
481d815da10SJed Brown 
482d815da10SJed Brown    Notes:
483d815da10SJed Brown    This memory-scalable interface is an alternative to calling PetscGatherNumberOfMessages() and
484d815da10SJed Brown    PetscGatherMessageLengths(), possibly with a subsequent round of communication to send other data.
485d815da10SJed Brown 
486d815da10SJed Brown    Basic data types as well as contiguous types are supported, but non-contiguous (e.g., strided) types are not.
487d815da10SJed Brown 
488d815da10SJed Brown    References:
48996a0c994SBarry Smith .  1. - Hoefler, Siebert and Lumsdaine, The MPI_Ibarrier implementation uses the algorithm in
4904f02bc6aSBarry Smith    Scalable communication protocols for dynamic sparse data exchange, 2010.
491d815da10SJed Brown 
4926afbe7ddSJed Brown .seealso: PetscCommBuildTwoSided(), PetscCommBuildTwoSidedFReq(), PetscGatherNumberOfMessages(), PetscGatherMessageLengths()
493d815da10SJed Brown @*/
494d815da10SJed Brown PetscErrorCode PetscCommBuildTwoSidedF(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata,PetscMPIInt ntags,
495d815da10SJed Brown                                        PetscErrorCode (*send)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,PetscMPIInt,void*,MPI_Request[],void*),
496d815da10SJed Brown                                        PetscErrorCode (*recv)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,void*,MPI_Request[],void*),void *ctx)
497d815da10SJed Brown {
4986afbe7ddSJed Brown   PetscErrorCode ierr;
4996afbe7ddSJed Brown   MPI_Request    *toreqs,*fromreqs;
5006afbe7ddSJed Brown 
5016afbe7ddSJed Brown   PetscFunctionBegin;
5026afbe7ddSJed Brown   ierr = PetscCommBuildTwoSidedFReq(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata,ntags,&toreqs,&fromreqs,send,recv,ctx);CHKERRQ(ierr);
5036afbe7ddSJed Brown   ierr = MPI_Waitall(nto*ntags,toreqs,MPI_STATUSES_IGNORE);CHKERRQ(ierr);
5046afbe7ddSJed Brown   ierr = MPI_Waitall(*nfrom*ntags,fromreqs,MPI_STATUSES_IGNORE);CHKERRQ(ierr);
5056afbe7ddSJed Brown   ierr = PetscFree(toreqs);CHKERRQ(ierr);
5066afbe7ddSJed Brown   ierr = PetscFree(fromreqs);CHKERRQ(ierr);
5076afbe7ddSJed Brown   PetscFunctionReturn(0);
5086afbe7ddSJed Brown }
5096afbe7ddSJed Brown 
5106afbe7ddSJed Brown /*@C
5116afbe7ddSJed Brown    PetscCommBuildTwoSidedFReq - discovers communicating ranks given one-sided information, calling user-defined functions during rendezvous, returns requests
5126afbe7ddSJed Brown 
513d083f849SBarry Smith    Collective
5146afbe7ddSJed Brown 
5156afbe7ddSJed Brown    Input Arguments:
5166afbe7ddSJed Brown +  comm - communicator
5176afbe7ddSJed Brown .  count - number of entries to send/receive in initial rendezvous (must match on all ranks)
5186afbe7ddSJed Brown .  dtype - datatype to send/receive from each rank (must match on all ranks)
5196afbe7ddSJed Brown .  nto - number of ranks to send data to
5206afbe7ddSJed Brown .  toranks - ranks to send to (array of length nto)
5216afbe7ddSJed Brown .  todata - data to send to each rank (packed)
5226afbe7ddSJed Brown .  ntags - number of tags needed by send/recv callbacks
5236afbe7ddSJed Brown .  send - callback invoked on sending process when ready to send primary payload
5246afbe7ddSJed Brown .  recv - callback invoked on receiving process after delivery of rendezvous message
5256afbe7ddSJed Brown -  ctx - context for callbacks
5266afbe7ddSJed Brown 
5276afbe7ddSJed Brown    Output Arguments:
5286afbe7ddSJed Brown +  nfrom - number of ranks receiving messages from
5296afbe7ddSJed Brown .  fromranks - ranks receiving messages from (length nfrom; caller should PetscFree())
5306afbe7ddSJed Brown .  fromdata - packed data from each rank, each with count entries of type dtype (length nfrom, caller responsible for PetscFree())
5316afbe7ddSJed Brown .  toreqs - array of nto*ntags sender requests (caller must wait on these, then PetscFree())
5326afbe7ddSJed Brown -  fromreqs - array of nfrom*ntags receiver requests (caller must wait on these, then PetscFree())
5336afbe7ddSJed Brown 
5346afbe7ddSJed Brown    Level: developer
5356afbe7ddSJed Brown 
5366afbe7ddSJed Brown    Notes:
5376afbe7ddSJed Brown    This memory-scalable interface is an alternative to calling PetscGatherNumberOfMessages() and
5386afbe7ddSJed Brown    PetscGatherMessageLengths(), possibly with a subsequent round of communication to send other data.
5396afbe7ddSJed Brown 
5406afbe7ddSJed Brown    Basic data types as well as contiguous types are supported, but non-contiguous (e.g., strided) types are not.
5416afbe7ddSJed Brown 
5426afbe7ddSJed Brown    References:
54396a0c994SBarry Smith .  1. - Hoefler, Siebert and Lumsdaine, The MPI_Ibarrier implementation uses the algorithm in
5444f02bc6aSBarry Smith    Scalable communication protocols for dynamic sparse data exchange, 2010.
5456afbe7ddSJed Brown 
5466afbe7ddSJed Brown .seealso: PetscCommBuildTwoSided(), PetscCommBuildTwoSidedF(), PetscGatherNumberOfMessages(), PetscGatherMessageLengths()
5476afbe7ddSJed Brown @*/
5486afbe7ddSJed Brown PetscErrorCode PetscCommBuildTwoSidedFReq(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,
5496afbe7ddSJed Brown                                           PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata,PetscMPIInt ntags,MPI_Request **toreqs,MPI_Request **fromreqs,
5506afbe7ddSJed Brown                                           PetscErrorCode (*send)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,PetscMPIInt,void*,MPI_Request[],void*),
5516afbe7ddSJed Brown                                           PetscErrorCode (*recv)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,void*,MPI_Request[],void*),void *ctx)
5526afbe7ddSJed Brown {
5536afbe7ddSJed Brown   PetscErrorCode         ierr,(*f)(MPI_Comm,PetscMPIInt,MPI_Datatype,PetscMPIInt,const PetscMPIInt[],const void*,
5546afbe7ddSJed Brown                                    PetscMPIInt*,PetscMPIInt**,void*,PetscMPIInt,MPI_Request**,MPI_Request**,
555d815da10SJed Brown                                    PetscErrorCode (*send)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,PetscMPIInt,void*,MPI_Request[],void*),
556d815da10SJed Brown                                    PetscErrorCode (*recv)(MPI_Comm,const PetscMPIInt[],PetscMPIInt,void*,MPI_Request[],void*),void *ctx);
557d815da10SJed Brown   PetscBuildTwoSidedType buildtype = PETSC_BUILDTWOSIDED_NOTSET;
5583461502dSJed Brown   PetscMPIInt i,size;
559d815da10SJed Brown 
560d815da10SJed Brown   PetscFunctionBegin;
561d815da10SJed Brown   ierr = PetscSysInitializePackage();CHKERRQ(ierr);
5623461502dSJed Brown   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
5633461502dSJed Brown   for (i=0; i<nto; i++) {
5643461502dSJed Brown     if (toranks[i] < 0 || size <= toranks[i]) SETERRQ3(comm,PETSC_ERR_ARG_OUTOFRANGE,"toranks[%d] %d not in comm size %d",i,toranks[i],size);
5653461502dSJed Brown   }
56662872c28SLisandro Dalcin   ierr = PetscLogEventSync(PETSC_BuildTwoSidedF,comm);CHKERRQ(ierr);
56727a32ea5SJed Brown   ierr = PetscLogEventBegin(PETSC_BuildTwoSidedF,0,0,0,0);CHKERRQ(ierr);
568d815da10SJed Brown   ierr = PetscCommBuildTwoSidedGetType(comm,&buildtype);CHKERRQ(ierr);
569d815da10SJed Brown   switch (buildtype) {
570d815da10SJed Brown   case PETSC_BUILDTWOSIDED_IBARRIER:
571b5a01e9fSJed Brown #if defined(PETSC_HAVE_MPI_IBARRIER) || defined(PETSC_HAVE_MPIX_IBARRIER)
5726afbe7ddSJed Brown     f = PetscCommBuildTwoSidedFReq_Ibarrier;
573*b458e8f1SJose E. Roman     break;
574afb254cdSJed Brown #else
575afb254cdSJed Brown     SETERRQ(comm,PETSC_ERR_PLIB,"MPI implementation does not provide MPI_Ibarrier (part of MPI-3)");
576afb254cdSJed Brown #endif
577d815da10SJed Brown   case PETSC_BUILDTWOSIDED_ALLREDUCE:
578d815da10SJed Brown   case PETSC_BUILDTWOSIDED_REDSCATTER:
5796afbe7ddSJed Brown     f = PetscCommBuildTwoSidedFReq_Reference;
580d815da10SJed Brown     break;
581d815da10SJed Brown   default: SETERRQ(comm,PETSC_ERR_PLIB,"Unknown method for building two-sided communication");
582d815da10SJed Brown   }
5836afbe7ddSJed Brown   ierr = (*f)(comm,count,dtype,nto,toranks,todata,nfrom,fromranks,fromdata,ntags,toreqs,fromreqs,send,recv,ctx);CHKERRQ(ierr);
58427a32ea5SJed Brown   ierr = PetscLogEventEnd(PETSC_BuildTwoSidedF,0,0,0,0);CHKERRQ(ierr);
585d815da10SJed Brown   PetscFunctionReturn(0);
586d815da10SJed Brown }
587