xref: /petsc/src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.c (revision 834855d6effb0d027771461c8e947ee1ce5a1e17)
1dd5b3ca6SJunchao Zhang #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>
2dd5b3ca6SJunchao Zhang 
3dd5b3ca6SJunchao Zhang /* PetscSFGetGraph is non-collective. An implementation should not have collective calls */
PetscSFGetGraph_Allgatherv(PetscSF sf,PetscInt * nroots,PetscInt * nleaves,const PetscInt ** ilocal,const PetscSFNode ** iremote)4d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFGetGraph_Allgatherv(PetscSF sf, PetscInt *nroots, PetscInt *nleaves, const PetscInt **ilocal, const PetscSFNode **iremote)
5d71ae5a4SJacob Faibussowitsch {
66497c311SBarry Smith   PetscInt        j, k;
7dd5b3ca6SJunchao Zhang   const PetscInt *range;
8dd5b3ca6SJunchao Zhang   PetscMPIInt     size;
9dd5b3ca6SJunchao Zhang 
10dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
119566063dSJacob Faibussowitsch   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)sf), &size));
12dd5b3ca6SJunchao Zhang   if (nroots) *nroots = sf->nroots;
13dd5b3ca6SJunchao Zhang   if (nleaves) *nleaves = sf->nleaves;
14dd5b3ca6SJunchao Zhang   if (ilocal) *ilocal = NULL; /* Contiguous leaves */
15dd5b3ca6SJunchao Zhang   if (iremote) {
16dd5b3ca6SJunchao Zhang     if (!sf->remote && sf->nleaves) { /* The && sf->nleaves makes sfgatherv able to inherit this routine */
179566063dSJacob Faibussowitsch       PetscCall(PetscLayoutGetRanges(sf->map, &range));
189566063dSJacob Faibussowitsch       PetscCall(PetscMalloc1(sf->nleaves, &sf->remote));
19dd5b3ca6SJunchao Zhang       sf->remote_alloc = sf->remote;
206497c311SBarry Smith       for (PetscMPIInt i = 0; i < size; i++) {
21dd5b3ca6SJunchao Zhang         for (j = range[i], k = 0; j < range[i + 1]; j++, k++) {
22dd5b3ca6SJunchao Zhang           sf->remote[j].rank  = i;
23dd5b3ca6SJunchao Zhang           sf->remote[j].index = k;
24dd5b3ca6SJunchao Zhang         }
25dd5b3ca6SJunchao Zhang       }
26dd5b3ca6SJunchao Zhang     }
27dd5b3ca6SJunchao Zhang     *iremote = sf->remote;
28dd5b3ca6SJunchao Zhang   }
293ba16761SJacob Faibussowitsch   PetscFunctionReturn(PETSC_SUCCESS);
30dd5b3ca6SJunchao Zhang }
31dd5b3ca6SJunchao Zhang 
PetscSFSetUp_Allgatherv(PetscSF sf)32d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFSetUp_Allgatherv(PetscSF sf)
33d71ae5a4SJacob Faibussowitsch {
34dd5b3ca6SJunchao Zhang   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
35dd5b3ca6SJunchao Zhang   PetscMPIInt         size;
36dd5b3ca6SJunchao Zhang   PetscInt            i;
37dd5b3ca6SJunchao Zhang   const PetscInt     *range;
3866100624SStefano Zampini   MPI_Comm            comm;
39dd5b3ca6SJunchao Zhang 
40dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
419566063dSJacob Faibussowitsch   PetscCall(PetscSFSetUp_Allgather(sf));
4266100624SStefano Zampini   PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
4366100624SStefano Zampini   PetscCallMPI(MPI_Comm_size(comm, &size));
44dd5b3ca6SJunchao Zhang   if (sf->nleaves) { /* This if (sf->nleaves) test makes sfgatherv able to inherit this routine */
4566100624SStefano Zampini     PetscBool isallgatherv = PETSC_FALSE;
4666100624SStefano Zampini 
479566063dSJacob Faibussowitsch     PetscCall(PetscMalloc1(size, &dat->recvcounts));
489566063dSJacob Faibussowitsch     PetscCall(PetscMalloc1(size, &dat->displs));
499566063dSJacob Faibussowitsch     PetscCall(PetscLayoutGetRanges(sf->map, &range));
50dd5b3ca6SJunchao Zhang 
51dd5b3ca6SJunchao Zhang     for (i = 0; i < size; i++) {
529566063dSJacob Faibussowitsch       PetscCall(PetscMPIIntCast(range[i], &dat->displs[i]));
539566063dSJacob Faibussowitsch       PetscCall(PetscMPIIntCast(range[i + 1] - range[i], &dat->recvcounts[i]));
54dd5b3ca6SJunchao Zhang     }
5566100624SStefano Zampini 
5666100624SStefano Zampini     /* check if we actually have a one-to-all pattern */
5766100624SStefano Zampini     PetscCall(PetscObjectTypeCompare((PetscObject)sf, PETSCSFALLGATHERV, &isallgatherv));
5866100624SStefano Zampini     if (isallgatherv) {
5966100624SStefano Zampini       PetscMPIInt rank, nRanksWithZeroRoots;
6066100624SStefano Zampini 
6166100624SStefano Zampini       nRanksWithZeroRoots = (sf->nroots == 0) ? 1 : 0; /* I have no roots */
626a210b70SBarry Smith       PetscCallMPI(MPIU_Allreduce(MPI_IN_PLACE, &nRanksWithZeroRoots, 1, MPI_INT, MPI_SUM, comm));
6366100624SStefano Zampini       if (nRanksWithZeroRoots == size - 1) { /* Only one rank has roots, which indicates a bcast pattern */
6466100624SStefano Zampini         dat->bcast_pattern = PETSC_TRUE;
6566100624SStefano Zampini         PetscCallMPI(MPI_Comm_rank(comm, &rank));
6666100624SStefano Zampini         dat->bcast_root = sf->nroots > 0 ? rank : -1;
676a210b70SBarry Smith         PetscCallMPI(MPIU_Allreduce(MPI_IN_PLACE, &dat->bcast_root, 1, MPI_INT, MPI_MAX, comm));
6866100624SStefano Zampini       }
6966100624SStefano Zampini     }
70dd5b3ca6SJunchao Zhang   }
713ba16761SJacob Faibussowitsch   PetscFunctionReturn(PETSC_SUCCESS);
72dd5b3ca6SJunchao Zhang }
73dd5b3ca6SJunchao Zhang 
PetscSFReset_Allgatherv(PetscSF sf)74d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFReset_Allgatherv(PetscSF sf)
75d71ae5a4SJacob Faibussowitsch {
76eb02082bSJunchao Zhang   PetscSF_Allgatherv *dat  = (PetscSF_Allgatherv *)sf->data;
7771438e86SJunchao Zhang   PetscSFLink         link = dat->avail, next;
78dd5b3ca6SJunchao Zhang 
79dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
809566063dSJacob Faibussowitsch   PetscCall(PetscFree(dat->iranks));
819566063dSJacob Faibussowitsch   PetscCall(PetscFree(dat->ioffset));
829566063dSJacob Faibussowitsch   PetscCall(PetscFree(dat->irootloc));
839566063dSJacob Faibussowitsch   PetscCall(PetscFree(dat->recvcounts));
849566063dSJacob Faibussowitsch   PetscCall(PetscFree(dat->displs));
8528b400f6SJacob Faibussowitsch   PetscCheck(!dat->inuse, PetscObjectComm((PetscObject)sf), PETSC_ERR_ARG_WRONGSTATE, "Outstanding operation has not been completed");
869371c9d4SSatish Balay   for (; link; link = next) {
879371c9d4SSatish Balay     next = link->next;
889371c9d4SSatish Balay     PetscCall(PetscSFLinkDestroy(sf, link));
899371c9d4SSatish Balay   }
9071438e86SJunchao Zhang   dat->avail = NULL;
913ba16761SJacob Faibussowitsch   PetscFunctionReturn(PETSC_SUCCESS);
92dd5b3ca6SJunchao Zhang }
93dd5b3ca6SJunchao Zhang 
PetscSFDestroy_Allgatherv(PetscSF sf)94d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFDestroy_Allgatherv(PetscSF sf)
95d71ae5a4SJacob Faibussowitsch {
96dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
979566063dSJacob Faibussowitsch   PetscCall(PetscSFReset_Allgatherv(sf));
989566063dSJacob Faibussowitsch   PetscCall(PetscFree(sf->data));
993ba16761SJacob Faibussowitsch   PetscFunctionReturn(PETSC_SUCCESS);
100dd5b3ca6SJunchao Zhang }
101dd5b3ca6SJunchao Zhang 
PetscSFBcastBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void * rootdata,PetscMemType leafmtype,void * leafdata,MPI_Op op)102d71ae5a4SJacob Faibussowitsch static PetscErrorCode PetscSFBcastBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata, MPI_Op op)
103d71ae5a4SJacob Faibussowitsch {
104cd620004SJunchao Zhang   PetscSFLink         link;
10560b1fa21SPierre Jolivet   PetscMPIInt         sendcount, rank, nleaves;
106dd5b3ca6SJunchao Zhang   MPI_Comm            comm;
107cd620004SJunchao Zhang   void               *rootbuf = NULL, *leafbuf = NULL;
108f5d27ee7SJunchao Zhang   MPI_Request        *req = NULL;
109dd5b3ca6SJunchao Zhang   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
110dd5b3ca6SJunchao Zhang 
111dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
1129566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_BCAST, &link));
1139566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata));
1149566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
1159566063dSJacob Faibussowitsch   PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
11666100624SStefano Zampini   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1179566063dSJacob Faibussowitsch   PetscCall(PetscMPIIntCast(sf->nroots, &sendcount));
1189566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL));
11966100624SStefano Zampini 
12066100624SStefano Zampini   if (dat->bcast_pattern && rank == dat->bcast_root) PetscCall((*link->Memcpy)(link, link->leafmtype_mpi, leafbuf, link->rootmtype_mpi, rootbuf, (size_t)sendcount * link->unitbytes));
12166100624SStefano Zampini   /* Ready the buffers for MPI */
122646b835dSJunchao Zhang   PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link));
12360b1fa21SPierre Jolivet   PetscCall(PetscMPIIntCast(sf->nleaves, &nleaves));
12460b1fa21SPierre Jolivet   if (dat->bcast_pattern) PetscCallMPI(MPIU_Ibcast(leafbuf, nleaves, unit, dat->bcast_root, comm, req));
12566100624SStefano Zampini   else PetscCallMPI(MPIU_Iallgatherv(rootbuf, sendcount, unit, leafbuf, dat->recvcounts, dat->displs, unit, comm, req));
1263ba16761SJacob Faibussowitsch   PetscFunctionReturn(PETSC_SUCCESS);
127855db38dSJunchao Zhang }
128855db38dSJunchao Zhang 
PetscSFReduceBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType leafmtype,const void * leafdata,PetscMemType rootmtype,void * rootdata,MPI_Op op)129d71ae5a4SJacob Faibussowitsch static PetscErrorCode PetscSFReduceBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType leafmtype, const void *leafdata, PetscMemType rootmtype, void *rootdata, MPI_Op op)
130d71ae5a4SJacob Faibussowitsch {
131cd620004SJunchao Zhang   PetscSFLink         link;
132dd5b3ca6SJunchao Zhang   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
133dd5b3ca6SJunchao Zhang   PetscInt            rstart;
134cd620004SJunchao Zhang   PetscMPIInt         rank, count, recvcount;
135dd5b3ca6SJunchao Zhang   MPI_Comm            comm;
136cd620004SJunchao Zhang   void               *rootbuf = NULL, *leafbuf = NULL;
137f5d27ee7SJunchao Zhang   MPI_Request        *req = NULL;
138dd5b3ca6SJunchao Zhang 
139dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
1409566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_REDUCE, &link));
14183df288dSJunchao Zhang   if (op == MPI_REPLACE) {
142cd620004SJunchao Zhang     /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copying from local leafdata is fine */
1439566063dSJacob Faibussowitsch     PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL));
1449566063dSJacob Faibussowitsch     PetscCall((*link->Memcpy)(link, rootmtype, rootdata, leafmtype, (const char *)leafdata + (size_t)rstart * link->unitbytes, (size_t)sf->nroots * link->unitbytes));
1459566063dSJacob Faibussowitsch     if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) PetscCall((*link->SyncStream)(link));
146dd5b3ca6SJunchao Zhang   } else {
1479566063dSJacob Faibussowitsch     PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
1489566063dSJacob Faibussowitsch     PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata));
1499566063dSJacob Faibussowitsch     PetscCall(PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
1509566063dSJacob Faibussowitsch     PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_LEAF2ROOT, &rootbuf, &leafbuf, &req, NULL));
151646b835dSJunchao Zhang     PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link));
15266100624SStefano Zampini     if (dat->bcast_pattern) {
153d016bddeSToby Isaac       PetscInt     nleaves = sf->nleaves;
154d016bddeSToby Isaac       PetscInt     nreal;
155835f2295SStefano Zampini       PetscMPIInt  nleavesi;
156d016bddeSToby Isaac       MPI_Datatype baseunit = unit;
157835f2295SStefano Zampini 
158d016bddeSToby Isaac       PetscCall(MPIPetsc_Type_compare_contig(unit, MPIU_REAL, &nreal));
159d016bddeSToby Isaac       if (nreal > 0) {
160d016bddeSToby Isaac         baseunit = MPIU_REAL;
161d016bddeSToby Isaac         nleaves *= nreal;
162d016bddeSToby Isaac #if PetscDefined(HAVE_COMPLEX)
163d016bddeSToby Isaac       } else {
164d016bddeSToby Isaac         PetscInt ncomplex;
165d016bddeSToby Isaac 
166d016bddeSToby Isaac         PetscCall(MPIPetsc_Type_compare_contig(unit, MPIU_COMPLEX, &ncomplex));
167d016bddeSToby Isaac         if (ncomplex > 0) {
168d016bddeSToby Isaac           baseunit = MPIU_COMPLEX;
169d016bddeSToby Isaac           nleaves *= ncomplex;
170d016bddeSToby Isaac         }
171d016bddeSToby Isaac #endif
172d016bddeSToby Isaac       }
173d016bddeSToby Isaac       PetscCall(PetscMPIIntCast(nleaves, &nleavesi));
174100ffedbSJunchao Zhang #if defined(PETSC_HAVE_OPENMPI) /* Workaround: cuda-aware Open MPI 4.1.3 does not support MPI_Ireduce() with device buffers */
17566100624SStefano Zampini       *req = MPI_REQUEST_NULL;  /* Set NULL so that we can safely MPI_Wait(req) */
176d016bddeSToby Isaac       PetscCallMPI(MPIU_Reduce(leafbuf, rootbuf, nleavesi, baseunit, op, dat->bcast_root, comm));
17766100624SStefano Zampini #else
178d016bddeSToby Isaac       PetscCallMPI(MPIU_Ireduce(leafbuf, rootbuf, nleavesi, baseunit, op, dat->bcast_root, comm, req));
17966100624SStefano Zampini #endif
18066100624SStefano Zampini     } else { /* Reduce leafdata, then scatter to rootdata */
18166100624SStefano Zampini       PetscCallMPI(MPI_Comm_rank(comm, &rank));
1829566063dSJacob Faibussowitsch       PetscCall(PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE], &recvcount));
183cd620004SJunchao Zhang       /* Allocate a separate leaf buffer on rank 0 */
184*3a7d0413SPierre Jolivet       if (rank == 0 && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) PetscCall(PetscSFMalloc(sf, link->leafmtype_mpi, sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes, (void **)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]));
185cd620004SJunchao Zhang       /* In case we already copied leafdata from device to host (i.e., no use_gpu_aware_mpi), we need to adjust leafbuf on rank 0 */
186dd400576SPatrick Sanan       if (rank == 0 && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE;
1879566063dSJacob Faibussowitsch       PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count));
18866100624SStefano Zampini       PetscCallMPI(MPI_Reduce(leafbuf, link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], count, link->basicunit, op, 0, comm)); /* Must do reduce with MPI builtin datatype basicunit */
1899566063dSJacob Faibussowitsch       PetscCallMPI(MPIU_Iscatterv(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], dat->recvcounts, dat->displs, unit, rootbuf, recvcount, unit, 0, comm, req));
190dd5b3ca6SJunchao Zhang     }
19166100624SStefano Zampini   }
1923ba16761SJacob Faibussowitsch   PetscFunctionReturn(PETSC_SUCCESS);
193eb02082bSJunchao Zhang }
194eb02082bSJunchao Zhang 
PetscSFReduceEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,const void * leafdata,void * rootdata,MPI_Op op)195d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFReduceEnd_Allgatherv(PetscSF sf, MPI_Datatype unit, const void *leafdata, void *rootdata, MPI_Op op)
196d71ae5a4SJacob Faibussowitsch {
1979319200aSJunchao Zhang   PetscSFLink link;
1989319200aSJunchao Zhang 
1999319200aSJunchao Zhang   PetscFunctionBegin;
2009319200aSJunchao Zhang   if (op == MPI_REPLACE) {
2019319200aSJunchao Zhang     /* A rare case happens when op is MPI_REPLACE, using GPUs but no GPU aware MPI. In PetscSFReduceBegin_Allgather(v),
2029319200aSJunchao Zhang       we did a device to device copy and in effect finished the communication. But in PetscSFLinkFinishCommunication()
2039319200aSJunchao Zhang       of PetscSFReduceEnd_Basic(), it thinks since there is rootbuf, it calls PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI().
2046aad120cSJose E. Roman       It does a host to device memory copy on rootbuf, wrongly overwriting the results. So we don't overload
2059319200aSJunchao Zhang       PetscSFReduceEnd_Basic() in this case, and just reclaim the link.
2069319200aSJunchao Zhang      */
2079566063dSJacob Faibussowitsch     PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
2089566063dSJacob Faibussowitsch     PetscCall(PetscSFLinkReclaim(sf, &link));
2099319200aSJunchao Zhang   } else {
2109566063dSJacob Faibussowitsch     PetscCall(PetscSFReduceEnd_Basic(sf, unit, leafdata, rootdata, op));
2119319200aSJunchao Zhang   }
2123ba16761SJacob Faibussowitsch   PetscFunctionReturn(PETSC_SUCCESS);
2139319200aSJunchao Zhang }
2149319200aSJunchao Zhang 
PetscSFBcastToZero_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void * rootdata,PetscMemType leafmtype,void * leafdata)215d71ae5a4SJacob Faibussowitsch static PetscErrorCode PetscSFBcastToZero_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata)
216d71ae5a4SJacob Faibussowitsch {
217cd620004SJunchao Zhang   PetscSFLink         link;
218855db38dSJunchao Zhang   PetscMPIInt         rank;
219f5d27ee7SJunchao Zhang   PetscMPIInt         sendcount;
220f5d27ee7SJunchao Zhang   MPI_Comm            comm;
221f5d27ee7SJunchao Zhang   PetscSF_Allgatherv *dat     = (PetscSF_Allgatherv *)sf->data;
222f5d27ee7SJunchao Zhang   void               *rootbuf = NULL, *leafbuf = NULL; /* buffer seen by MPI */
223f5d27ee7SJunchao Zhang   MPI_Request        *req = NULL;
224eb02082bSJunchao Zhang 
225eb02082bSJunchao Zhang   PetscFunctionBegin;
226f5d27ee7SJunchao Zhang   PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, MPI_REPLACE, PETSCSF_BCAST, &link));
227f5d27ee7SJunchao Zhang   PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata));
228f5d27ee7SJunchao Zhang   PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
229f5d27ee7SJunchao Zhang   PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
230f5d27ee7SJunchao Zhang   PetscCall(PetscMPIIntCast(sf->nroots, &sendcount));
231f5d27ee7SJunchao Zhang   PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL));
232646b835dSJunchao Zhang   PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link));
233f5d27ee7SJunchao Zhang   PetscCallMPI(MPIU_Igatherv(rootbuf, sendcount, unit, leafbuf, dat->recvcounts, dat->displs, unit, 0 /*rank 0*/, comm, req));
234f5d27ee7SJunchao Zhang 
2359566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
2369566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkFinishCommunication(sf, link, PETSCSF_ROOT2LEAF));
2379566063dSJacob Faibussowitsch   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)sf), &rank));
23848a46eb9SPierre Jolivet   if (rank == 0 && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) PetscCall((*link->Memcpy)(link, PETSC_MEMTYPE_DEVICE, leafdata, PETSC_MEMTYPE_HOST, link->leafbuf[PETSC_MEMTYPE_HOST], sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes));
2399566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkReclaim(sf, &link));
2403ba16761SJacob Faibussowitsch   PetscFunctionReturn(PETSC_SUCCESS);
241dd5b3ca6SJunchao Zhang }
242dd5b3ca6SJunchao Zhang 
243dd5b3ca6SJunchao Zhang /* This routine is very tricky (I believe it is rarely used with this kind of graph so just provide a simple but not-optimal implementation).
244dd5b3ca6SJunchao Zhang 
245dd5b3ca6SJunchao Zhang    Suppose we have three ranks. Rank 0 has a root with value 1. Rank 0,1,2 has a leaf with value 2,3,4 respectively. The leaves are connected
246dd5b3ca6SJunchao Zhang    to the root on rank 0. Suppose op=MPI_SUM and rank 0,1,2 gets root state in their rank order. By definition of this routine, rank 0 sees 1
247dd5b3ca6SJunchao Zhang    in root, fetches it into its leafupate, then updates root to 1 + 2 = 3; rank 1 sees 3 in root, fetches it into its leafupate, then updates
248dd5b3ca6SJunchao Zhang    root to 3 + 3 = 6; rank 2 sees 6 in root, fetches it into its leafupdate, then updates root to 6 + 4 = 10.  At the end, leafupdate on rank
249dd5b3ca6SJunchao Zhang    0,1,2 is 1,3,6 respectively. root is 10.
250dd5b3ca6SJunchao Zhang 
251dd5b3ca6SJunchao Zhang    We use a simpler implementation. From the same initial state, we copy leafdata to leafupdate
252dd5b3ca6SJunchao Zhang              rank-0   rank-1    rank-2
253dd5b3ca6SJunchao Zhang         Root     1
254dd5b3ca6SJunchao Zhang         Leaf     2       3         4
255dd5b3ca6SJunchao Zhang      Leafupdate  2       3         4
256dd5b3ca6SJunchao Zhang 
257dd5b3ca6SJunchao Zhang    Do MPI_Exscan on leafupdate,
258dd5b3ca6SJunchao Zhang              rank-0   rank-1    rank-2
259dd5b3ca6SJunchao Zhang         Root     1
260dd5b3ca6SJunchao Zhang         Leaf     2       3         4
261dd5b3ca6SJunchao Zhang      Leafupdate  2       2         5
262dd5b3ca6SJunchao Zhang 
263dd5b3ca6SJunchao Zhang    BcastAndOp from root to leafupdate,
264dd5b3ca6SJunchao Zhang              rank-0   rank-1    rank-2
265dd5b3ca6SJunchao Zhang         Root     1
266dd5b3ca6SJunchao Zhang         Leaf     2       3         4
267dd5b3ca6SJunchao Zhang      Leafupdate  3       3         6
268dd5b3ca6SJunchao Zhang 
269dd5b3ca6SJunchao Zhang    Copy root to leafupdate on rank-0
270dd5b3ca6SJunchao Zhang              rank-0   rank-1    rank-2
271dd5b3ca6SJunchao Zhang         Root     1
272dd5b3ca6SJunchao Zhang         Leaf     2       3         4
273dd5b3ca6SJunchao Zhang      Leafupdate  1       3         6
274dd5b3ca6SJunchao Zhang 
275dd5b3ca6SJunchao Zhang    Reduce from leaf to root,
276dd5b3ca6SJunchao Zhang              rank-0   rank-1    rank-2
277dd5b3ca6SJunchao Zhang         Root     10
278dd5b3ca6SJunchao Zhang         Leaf     2       3         4
279dd5b3ca6SJunchao Zhang      Leafupdate  1       3         6
280dd5b3ca6SJunchao Zhang */
PetscSFFetchAndOpBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,void * rootdata,PetscMemType leafmtype,const void * leafdata,void * leafupdate,MPI_Op op)281d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFFetchAndOpBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, void *rootdata, PetscMemType leafmtype, const void *leafdata, void *leafupdate, MPI_Op op)
282d71ae5a4SJacob Faibussowitsch {
283cd620004SJunchao Zhang   PetscSFLink link;
284dd5b3ca6SJunchao Zhang   MPI_Comm    comm;
285dd5b3ca6SJunchao Zhang   PetscMPIInt count;
286dd5b3ca6SJunchao Zhang 
287dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
2889566063dSJacob Faibussowitsch   PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
28908401ef6SPierre Jolivet   PetscCheck(!PetscMemTypeDevice(rootmtype) && !PetscMemTypeDevice(leafmtype), comm, PETSC_ERR_SUP, "Do FetchAndOp on device");
290dd5b3ca6SJunchao Zhang   /* Copy leafdata to leafupdate */
2919566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_FETCH, &link));
2929566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata)); /* Sync the device */
2939566063dSJacob Faibussowitsch   PetscCall((*link->Memcpy)(link, leafmtype, leafupdate, leafmtype, leafdata, sf->nleaves * link->unitbytes));
2949566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
295dd5b3ca6SJunchao Zhang 
296dd5b3ca6SJunchao Zhang   /* Exscan on leafupdate and then BcastAndOp rootdata to leafupdate */
29783df288dSJunchao Zhang   if (op == MPI_REPLACE) {
298dd5b3ca6SJunchao Zhang     PetscMPIInt size, rank, prev, next;
2999566063dSJacob Faibussowitsch     PetscCallMPI(MPI_Comm_rank(comm, &rank));
3009566063dSJacob Faibussowitsch     PetscCallMPI(MPI_Comm_size(comm, &size));
301dd5b3ca6SJunchao Zhang     prev = rank ? rank - 1 : MPI_PROC_NULL;
302dd5b3ca6SJunchao Zhang     next = (rank < size - 1) ? rank + 1 : MPI_PROC_NULL;
3039566063dSJacob Faibussowitsch     PetscCall(PetscMPIIntCast(sf->nleaves, &count));
3049566063dSJacob Faibussowitsch     PetscCallMPI(MPI_Sendrecv_replace(leafupdate, count, unit, next, link->tag, prev, link->tag, comm, MPI_STATUSES_IGNORE));
305cd620004SJunchao Zhang   } else {
3069566063dSJacob Faibussowitsch     PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count));
3079566063dSJacob Faibussowitsch     PetscCallMPI(MPI_Exscan(MPI_IN_PLACE, leafupdate, count, link->basicunit, op, comm));
308cd620004SJunchao Zhang   }
3099566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkReclaim(sf, &link));
3109566063dSJacob Faibussowitsch   PetscCall(PetscSFBcastBegin(sf, unit, rootdata, leafupdate, op));
3119566063dSJacob Faibussowitsch   PetscCall(PetscSFBcastEnd(sf, unit, rootdata, leafupdate, op));
312dd5b3ca6SJunchao Zhang 
313dd5b3ca6SJunchao Zhang   /* Bcast roots to rank 0's leafupdate */
3149566063dSJacob Faibussowitsch   PetscCall(PetscSFBcastToZero_Private(sf, unit, rootdata, leafupdate)); /* Using this line makes Allgather SFs able to inherit this routine */
315dd5b3ca6SJunchao Zhang 
316dd5b3ca6SJunchao Zhang   /* Reduce leafdata to rootdata */
3179566063dSJacob Faibussowitsch   PetscCall(PetscSFReduceBegin(sf, unit, leafdata, rootdata, op));
3183ba16761SJacob Faibussowitsch   PetscFunctionReturn(PETSC_SUCCESS);
319dd5b3ca6SJunchao Zhang }
320dd5b3ca6SJunchao Zhang 
PetscSFFetchAndOpEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,void * rootdata,const void * leafdata,void * leafupdate,MPI_Op op)321d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFFetchAndOpEnd_Allgatherv(PetscSF sf, MPI_Datatype unit, void *rootdata, const void *leafdata, void *leafupdate, MPI_Op op)
322d71ae5a4SJacob Faibussowitsch {
323dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
3249566063dSJacob Faibussowitsch   PetscCall(PetscSFReduceEnd(sf, unit, leafdata, rootdata, op));
3253ba16761SJacob Faibussowitsch   PetscFunctionReturn(PETSC_SUCCESS);
326dd5b3ca6SJunchao Zhang }
327dd5b3ca6SJunchao Zhang 
328dd5b3ca6SJunchao Zhang /* Get root ranks accessing my leaves */
PetscSFGetRootRanks_Allgatherv(PetscSF sf,PetscMPIInt * nranks,const PetscMPIInt ** ranks,const PetscInt ** roffset,const PetscInt ** rmine,const PetscInt ** rremote)3296497c311SBarry Smith PETSC_INTERN PetscErrorCode PetscSFGetRootRanks_Allgatherv(PetscSF sf, PetscMPIInt *nranks, const PetscMPIInt **ranks, const PetscInt **roffset, const PetscInt **rmine, const PetscInt **rremote)
330d71ae5a4SJacob Faibussowitsch {
3316497c311SBarry Smith   PetscInt        j, k, size;
332dd5b3ca6SJunchao Zhang   const PetscInt *range;
333dd5b3ca6SJunchao Zhang 
334dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
335dd5b3ca6SJunchao Zhang   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
336dd5b3ca6SJunchao Zhang   if (sf->nranks && !sf->ranks) { /* On rank!=0, sf->nranks=0. The sf->nranks test makes this routine also works for sfgatherv */
337dd5b3ca6SJunchao Zhang     size = sf->nranks;
3389566063dSJacob Faibussowitsch     PetscCall(PetscLayoutGetRanges(sf->map, &range));
3399566063dSJacob Faibussowitsch     PetscCall(PetscMalloc4(size, &sf->ranks, size + 1, &sf->roffset, sf->nleaves, &sf->rmine, sf->nleaves, &sf->rremote));
3406497c311SBarry Smith     for (PetscMPIInt i = 0; i < size; i++) sf->ranks[i] = i;
3419566063dSJacob Faibussowitsch     PetscCall(PetscArraycpy(sf->roffset, range, size + 1));
3426497c311SBarry Smith     for (PetscInt i = 0; i < sf->nleaves; i++) sf->rmine[i] = i; /*rmine are never NULL even for contiguous leaves */
3436497c311SBarry Smith     for (PetscMPIInt i = 0; i < size; i++) {
344dd5b3ca6SJunchao Zhang       for (j = range[i], k = 0; j < range[i + 1]; j++, k++) sf->rremote[j] = k;
345dd5b3ca6SJunchao Zhang     }
346dd5b3ca6SJunchao Zhang   }
347dd5b3ca6SJunchao Zhang 
348dd5b3ca6SJunchao Zhang   if (nranks) *nranks = sf->nranks;
349dd5b3ca6SJunchao Zhang   if (ranks) *ranks = sf->ranks;
350dd5b3ca6SJunchao Zhang   if (roffset) *roffset = sf->roffset;
351dd5b3ca6SJunchao Zhang   if (rmine) *rmine = sf->rmine;
352dd5b3ca6SJunchao Zhang   if (rremote) *rremote = sf->rremote;
3533ba16761SJacob Faibussowitsch   PetscFunctionReturn(PETSC_SUCCESS);
354dd5b3ca6SJunchao Zhang }
355dd5b3ca6SJunchao Zhang 
356dd5b3ca6SJunchao Zhang /* Get leaf ranks accessing my roots */
PetscSFGetLeafRanks_Allgatherv(PetscSF sf,PetscMPIInt * niranks,const PetscMPIInt ** iranks,const PetscInt ** ioffset,const PetscInt ** irootloc)3576497c311SBarry Smith PETSC_INTERN PetscErrorCode PetscSFGetLeafRanks_Allgatherv(PetscSF sf, PetscMPIInt *niranks, const PetscMPIInt **iranks, const PetscInt **ioffset, const PetscInt **irootloc)
358d71ae5a4SJacob Faibussowitsch {
359dd5b3ca6SJunchao Zhang   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
360dd5b3ca6SJunchao Zhang   MPI_Comm            comm;
361dd5b3ca6SJunchao Zhang   PetscMPIInt         size, rank;
362dd5b3ca6SJunchao Zhang 
363dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
364dd5b3ca6SJunchao Zhang   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
3659566063dSJacob Faibussowitsch   PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
3669566063dSJacob Faibussowitsch   PetscCallMPI(MPI_Comm_size(comm, &size));
3679566063dSJacob Faibussowitsch   PetscCallMPI(MPI_Comm_rank(comm, &rank));
368dd5b3ca6SJunchao Zhang   if (niranks) *niranks = size;
369dd5b3ca6SJunchao Zhang 
370dd5b3ca6SJunchao Zhang   /* PetscSF_Basic has distinguished incoming ranks. Here we do not need that. But we must put self as the first and
371dd5b3ca6SJunchao Zhang      sort other ranks. See comments in PetscSFSetUp_Basic about MatGetBrowsOfAoCols_MPIAIJ on why.
372dd5b3ca6SJunchao Zhang    */
373dd5b3ca6SJunchao Zhang   if (iranks) {
374dd5b3ca6SJunchao Zhang     if (!dat->iranks) {
3759566063dSJacob Faibussowitsch       PetscCall(PetscMalloc1(size, &dat->iranks));
376dd5b3ca6SJunchao Zhang       dat->iranks[0] = rank;
3776497c311SBarry Smith       for (PetscMPIInt i = 0, j = 1; i < size; i++) {
3789371c9d4SSatish Balay         if (i == rank) continue;
3799371c9d4SSatish Balay         dat->iranks[j++] = i;
3809371c9d4SSatish Balay       }
381dd5b3ca6SJunchao Zhang     }
38266100624SStefano Zampini     *iranks = dat->iranks; /* dat->iranks was init'ed to NULL by PetscNew */
383dd5b3ca6SJunchao Zhang   }
384dd5b3ca6SJunchao Zhang 
385dd5b3ca6SJunchao Zhang   if (ioffset) {
386dd5b3ca6SJunchao Zhang     if (!dat->ioffset) {
3879566063dSJacob Faibussowitsch       PetscCall(PetscMalloc1(size + 1, &dat->ioffset));
3886497c311SBarry Smith       for (PetscMPIInt i = 0; i <= size; i++) dat->ioffset[i] = i * sf->nroots;
389dd5b3ca6SJunchao Zhang     }
390dd5b3ca6SJunchao Zhang     *ioffset = dat->ioffset;
391dd5b3ca6SJunchao Zhang   }
392dd5b3ca6SJunchao Zhang 
393dd5b3ca6SJunchao Zhang   if (irootloc) {
394dd5b3ca6SJunchao Zhang     if (!dat->irootloc) {
3959566063dSJacob Faibussowitsch       PetscCall(PetscMalloc1(sf->nleaves, &dat->irootloc));
3966497c311SBarry Smith       for (PetscMPIInt i = 0; i < size; i++) {
3976497c311SBarry Smith         for (PetscInt j = 0; j < sf->nroots; j++) dat->irootloc[i * sf->nroots + j] = j;
398dd5b3ca6SJunchao Zhang       }
399dd5b3ca6SJunchao Zhang     }
400dd5b3ca6SJunchao Zhang     *irootloc = dat->irootloc;
401dd5b3ca6SJunchao Zhang   }
4023ba16761SJacob Faibussowitsch   PetscFunctionReturn(PETSC_SUCCESS);
403dd5b3ca6SJunchao Zhang }
404dd5b3ca6SJunchao Zhang 
PetscSFCreateLocalSF_Allgatherv(PetscSF sf,PetscSF * out)405d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFCreateLocalSF_Allgatherv(PetscSF sf, PetscSF *out)
406d71ae5a4SJacob Faibussowitsch {
407dd5b3ca6SJunchao Zhang   PetscInt     i, nroots, nleaves, rstart, *ilocal;
408dd5b3ca6SJunchao Zhang   PetscSFNode *iremote;
409dd5b3ca6SJunchao Zhang   PetscSF      lsf;
410dd5b3ca6SJunchao Zhang 
411dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
412eb02082bSJunchao Zhang   nleaves = sf->nleaves ? sf->nroots : 0; /* sf->nleaves can be zero with SFGather(v) */
413eb02082bSJunchao Zhang   nroots  = nleaves;
4149566063dSJacob Faibussowitsch   PetscCall(PetscMalloc1(nleaves, &ilocal));
4159566063dSJacob Faibussowitsch   PetscCall(PetscMalloc1(nleaves, &iremote));
4169566063dSJacob Faibussowitsch   PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL));
417dd5b3ca6SJunchao Zhang 
418dd5b3ca6SJunchao Zhang   for (i = 0; i < nleaves; i++) {
419dd5b3ca6SJunchao Zhang     ilocal[i]        = rstart + i; /* lsf does not change leave indices */
420dd5b3ca6SJunchao Zhang     iremote[i].rank  = 0;          /* rank in PETSC_COMM_SELF */
421dd5b3ca6SJunchao Zhang     iremote[i].index = i;          /* root index */
422dd5b3ca6SJunchao Zhang   }
423dd5b3ca6SJunchao Zhang 
4249566063dSJacob Faibussowitsch   PetscCall(PetscSFCreate(PETSC_COMM_SELF, &lsf));
4259566063dSJacob Faibussowitsch   PetscCall(PetscSFSetGraph(lsf, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
4269566063dSJacob Faibussowitsch   PetscCall(PetscSFSetUp(lsf));
427dd5b3ca6SJunchao Zhang   *out = lsf;
4283ba16761SJacob Faibussowitsch   PetscFunctionReturn(PETSC_SUCCESS);
429dd5b3ca6SJunchao Zhang }
430dd5b3ca6SJunchao Zhang 
PetscSFCreate_Allgatherv(PetscSF sf)431d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFCreate_Allgatherv(PetscSF sf)
432d71ae5a4SJacob Faibussowitsch {
433dd5b3ca6SJunchao Zhang   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
434dd5b3ca6SJunchao Zhang 
435dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
436ad227feaSJunchao Zhang   sf->ops->BcastEnd  = PetscSFBcastEnd_Basic;
4379319200aSJunchao Zhang   sf->ops->ReduceEnd = PetscSFReduceEnd_Allgatherv;
438cd620004SJunchao Zhang 
439dd5b3ca6SJunchao Zhang   sf->ops->SetUp           = PetscSFSetUp_Allgatherv;
440dd5b3ca6SJunchao Zhang   sf->ops->Reset           = PetscSFReset_Allgatherv;
441dd5b3ca6SJunchao Zhang   sf->ops->Destroy         = PetscSFDestroy_Allgatherv;
442dd5b3ca6SJunchao Zhang   sf->ops->GetRootRanks    = PetscSFGetRootRanks_Allgatherv;
443dd5b3ca6SJunchao Zhang   sf->ops->GetLeafRanks    = PetscSFGetLeafRanks_Allgatherv;
444dd5b3ca6SJunchao Zhang   sf->ops->GetGraph        = PetscSFGetGraph_Allgatherv;
445ad227feaSJunchao Zhang   sf->ops->BcastBegin      = PetscSFBcastBegin_Allgatherv;
446dd5b3ca6SJunchao Zhang   sf->ops->ReduceBegin     = PetscSFReduceBegin_Allgatherv;
447dd5b3ca6SJunchao Zhang   sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
448dd5b3ca6SJunchao Zhang   sf->ops->FetchAndOpEnd   = PetscSFFetchAndOpEnd_Allgatherv;
449dd5b3ca6SJunchao Zhang   sf->ops->CreateLocalSF   = PetscSFCreateLocalSF_Allgatherv;
450dd5b3ca6SJunchao Zhang   sf->ops->BcastToZero     = PetscSFBcastToZero_Allgatherv;
451dd5b3ca6SJunchao Zhang 
4526677b1c1SJunchao Zhang   sf->collective = PETSC_TRUE;
4536677b1c1SJunchao Zhang 
4544dfa11a4SJacob Faibussowitsch   PetscCall(PetscNew(&dat));
45566100624SStefano Zampini   dat->bcast_root = -1;
456dd5b3ca6SJunchao Zhang   sf->data        = (void *)dat;
4573ba16761SJacob Faibussowitsch   PetscFunctionReturn(PETSC_SUCCESS);
458dd5b3ca6SJunchao Zhang }
459