xref: /petsc/src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.c (revision 834855d6effb0d027771461c8e947ee1ce5a1e17) !
1 #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>
2 
3 /* PetscSFGetGraph is non-collective. An implementation should not have collective calls */
PetscSFGetGraph_Allgatherv(PetscSF sf,PetscInt * nroots,PetscInt * nleaves,const PetscInt ** ilocal,const PetscSFNode ** iremote)4 PETSC_INTERN PetscErrorCode PetscSFGetGraph_Allgatherv(PetscSF sf, PetscInt *nroots, PetscInt *nleaves, const PetscInt **ilocal, const PetscSFNode **iremote)
5 {
6   PetscInt        j, k;
7   const PetscInt *range;
8   PetscMPIInt     size;
9 
10   PetscFunctionBegin;
11   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)sf), &size));
12   if (nroots) *nroots = sf->nroots;
13   if (nleaves) *nleaves = sf->nleaves;
14   if (ilocal) *ilocal = NULL; /* Contiguous leaves */
15   if (iremote) {
16     if (!sf->remote && sf->nleaves) { /* The && sf->nleaves makes sfgatherv able to inherit this routine */
17       PetscCall(PetscLayoutGetRanges(sf->map, &range));
18       PetscCall(PetscMalloc1(sf->nleaves, &sf->remote));
19       sf->remote_alloc = sf->remote;
20       for (PetscMPIInt i = 0; i < size; i++) {
21         for (j = range[i], k = 0; j < range[i + 1]; j++, k++) {
22           sf->remote[j].rank  = i;
23           sf->remote[j].index = k;
24         }
25       }
26     }
27     *iremote = sf->remote;
28   }
29   PetscFunctionReturn(PETSC_SUCCESS);
30 }
31 
PetscSFSetUp_Allgatherv(PetscSF sf)32 PETSC_INTERN PetscErrorCode PetscSFSetUp_Allgatherv(PetscSF sf)
33 {
34   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
35   PetscMPIInt         size;
36   PetscInt            i;
37   const PetscInt     *range;
38   MPI_Comm            comm;
39 
40   PetscFunctionBegin;
41   PetscCall(PetscSFSetUp_Allgather(sf));
42   PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
43   PetscCallMPI(MPI_Comm_size(comm, &size));
44   if (sf->nleaves) { /* This if (sf->nleaves) test makes sfgatherv able to inherit this routine */
45     PetscBool isallgatherv = PETSC_FALSE;
46 
47     PetscCall(PetscMalloc1(size, &dat->recvcounts));
48     PetscCall(PetscMalloc1(size, &dat->displs));
49     PetscCall(PetscLayoutGetRanges(sf->map, &range));
50 
51     for (i = 0; i < size; i++) {
52       PetscCall(PetscMPIIntCast(range[i], &dat->displs[i]));
53       PetscCall(PetscMPIIntCast(range[i + 1] - range[i], &dat->recvcounts[i]));
54     }
55 
56     /* check if we actually have a one-to-all pattern */
57     PetscCall(PetscObjectTypeCompare((PetscObject)sf, PETSCSFALLGATHERV, &isallgatherv));
58     if (isallgatherv) {
59       PetscMPIInt rank, nRanksWithZeroRoots;
60 
61       nRanksWithZeroRoots = (sf->nroots == 0) ? 1 : 0; /* I have no roots */
62       PetscCallMPI(MPIU_Allreduce(MPI_IN_PLACE, &nRanksWithZeroRoots, 1, MPI_INT, MPI_SUM, comm));
63       if (nRanksWithZeroRoots == size - 1) { /* Only one rank has roots, which indicates a bcast pattern */
64         dat->bcast_pattern = PETSC_TRUE;
65         PetscCallMPI(MPI_Comm_rank(comm, &rank));
66         dat->bcast_root = sf->nroots > 0 ? rank : -1;
67         PetscCallMPI(MPIU_Allreduce(MPI_IN_PLACE, &dat->bcast_root, 1, MPI_INT, MPI_MAX, comm));
68       }
69     }
70   }
71   PetscFunctionReturn(PETSC_SUCCESS);
72 }
73 
PetscSFReset_Allgatherv(PetscSF sf)74 PETSC_INTERN PetscErrorCode PetscSFReset_Allgatherv(PetscSF sf)
75 {
76   PetscSF_Allgatherv *dat  = (PetscSF_Allgatherv *)sf->data;
77   PetscSFLink         link = dat->avail, next;
78 
79   PetscFunctionBegin;
80   PetscCall(PetscFree(dat->iranks));
81   PetscCall(PetscFree(dat->ioffset));
82   PetscCall(PetscFree(dat->irootloc));
83   PetscCall(PetscFree(dat->recvcounts));
84   PetscCall(PetscFree(dat->displs));
85   PetscCheck(!dat->inuse, PetscObjectComm((PetscObject)sf), PETSC_ERR_ARG_WRONGSTATE, "Outstanding operation has not been completed");
86   for (; link; link = next) {
87     next = link->next;
88     PetscCall(PetscSFLinkDestroy(sf, link));
89   }
90   dat->avail = NULL;
91   PetscFunctionReturn(PETSC_SUCCESS);
92 }
93 
PetscSFDestroy_Allgatherv(PetscSF sf)94 PETSC_INTERN PetscErrorCode PetscSFDestroy_Allgatherv(PetscSF sf)
95 {
96   PetscFunctionBegin;
97   PetscCall(PetscSFReset_Allgatherv(sf));
98   PetscCall(PetscFree(sf->data));
99   PetscFunctionReturn(PETSC_SUCCESS);
100 }
101 
PetscSFBcastBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void * rootdata,PetscMemType leafmtype,void * leafdata,MPI_Op op)102 static PetscErrorCode PetscSFBcastBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata, MPI_Op op)
103 {
104   PetscSFLink         link;
105   PetscMPIInt         sendcount, rank, nleaves;
106   MPI_Comm            comm;
107   void               *rootbuf = NULL, *leafbuf = NULL;
108   MPI_Request        *req = NULL;
109   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
110 
111   PetscFunctionBegin;
112   PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_BCAST, &link));
113   PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata));
114   PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
115   PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
116   PetscCallMPI(MPI_Comm_rank(comm, &rank));
117   PetscCall(PetscMPIIntCast(sf->nroots, &sendcount));
118   PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL));
119 
120   if (dat->bcast_pattern && rank == dat->bcast_root) PetscCall((*link->Memcpy)(link, link->leafmtype_mpi, leafbuf, link->rootmtype_mpi, rootbuf, (size_t)sendcount * link->unitbytes));
121   /* Ready the buffers for MPI */
122   PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link));
123   PetscCall(PetscMPIIntCast(sf->nleaves, &nleaves));
124   if (dat->bcast_pattern) PetscCallMPI(MPIU_Ibcast(leafbuf, nleaves, unit, dat->bcast_root, comm, req));
125   else PetscCallMPI(MPIU_Iallgatherv(rootbuf, sendcount, unit, leafbuf, dat->recvcounts, dat->displs, unit, comm, req));
126   PetscFunctionReturn(PETSC_SUCCESS);
127 }
128 
PetscSFReduceBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType leafmtype,const void * leafdata,PetscMemType rootmtype,void * rootdata,MPI_Op op)129 static PetscErrorCode PetscSFReduceBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType leafmtype, const void *leafdata, PetscMemType rootmtype, void *rootdata, MPI_Op op)
130 {
131   PetscSFLink         link;
132   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
133   PetscInt            rstart;
134   PetscMPIInt         rank, count, recvcount;
135   MPI_Comm            comm;
136   void               *rootbuf = NULL, *leafbuf = NULL;
137   MPI_Request        *req = NULL;
138 
139   PetscFunctionBegin;
140   PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_REDUCE, &link));
141   if (op == MPI_REPLACE) {
142     /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copying from local leafdata is fine */
143     PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL));
144     PetscCall((*link->Memcpy)(link, rootmtype, rootdata, leafmtype, (const char *)leafdata + (size_t)rstart * link->unitbytes, (size_t)sf->nroots * link->unitbytes));
145     if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) PetscCall((*link->SyncStream)(link));
146   } else {
147     PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
148     PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata));
149     PetscCall(PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
150     PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_LEAF2ROOT, &rootbuf, &leafbuf, &req, NULL));
151     PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link));
152     if (dat->bcast_pattern) {
153       PetscInt     nleaves = sf->nleaves;
154       PetscInt     nreal;
155       PetscMPIInt  nleavesi;
156       MPI_Datatype baseunit = unit;
157 
158       PetscCall(MPIPetsc_Type_compare_contig(unit, MPIU_REAL, &nreal));
159       if (nreal > 0) {
160         baseunit = MPIU_REAL;
161         nleaves *= nreal;
162 #if PetscDefined(HAVE_COMPLEX)
163       } else {
164         PetscInt ncomplex;
165 
166         PetscCall(MPIPetsc_Type_compare_contig(unit, MPIU_COMPLEX, &ncomplex));
167         if (ncomplex > 0) {
168           baseunit = MPIU_COMPLEX;
169           nleaves *= ncomplex;
170         }
171 #endif
172       }
173       PetscCall(PetscMPIIntCast(nleaves, &nleavesi));
174 #if defined(PETSC_HAVE_OPENMPI) /* Workaround: cuda-aware Open MPI 4.1.3 does not support MPI_Ireduce() with device buffers */
175       *req = MPI_REQUEST_NULL;  /* Set NULL so that we can safely MPI_Wait(req) */
176       PetscCallMPI(MPIU_Reduce(leafbuf, rootbuf, nleavesi, baseunit, op, dat->bcast_root, comm));
177 #else
178       PetscCallMPI(MPIU_Ireduce(leafbuf, rootbuf, nleavesi, baseunit, op, dat->bcast_root, comm, req));
179 #endif
180     } else { /* Reduce leafdata, then scatter to rootdata */
181       PetscCallMPI(MPI_Comm_rank(comm, &rank));
182       PetscCall(PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE], &recvcount));
183       /* Allocate a separate leaf buffer on rank 0 */
184       if (rank == 0 && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) PetscCall(PetscSFMalloc(sf, link->leafmtype_mpi, sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes, (void **)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]));
185       /* In case we already copied leafdata from device to host (i.e., no use_gpu_aware_mpi), we need to adjust leafbuf on rank 0 */
186       if (rank == 0 && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE;
187       PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count));
188       PetscCallMPI(MPI_Reduce(leafbuf, link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], count, link->basicunit, op, 0, comm)); /* Must do reduce with MPI builtin datatype basicunit */
189       PetscCallMPI(MPIU_Iscatterv(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], dat->recvcounts, dat->displs, unit, rootbuf, recvcount, unit, 0, comm, req));
190     }
191   }
192   PetscFunctionReturn(PETSC_SUCCESS);
193 }
194 
PetscSFReduceEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,const void * leafdata,void * rootdata,MPI_Op op)195 PETSC_INTERN PetscErrorCode PetscSFReduceEnd_Allgatherv(PetscSF sf, MPI_Datatype unit, const void *leafdata, void *rootdata, MPI_Op op)
196 {
197   PetscSFLink link;
198 
199   PetscFunctionBegin;
200   if (op == MPI_REPLACE) {
201     /* A rare case happens when op is MPI_REPLACE, using GPUs but no GPU aware MPI. In PetscSFReduceBegin_Allgather(v),
202       we did a device to device copy and in effect finished the communication. But in PetscSFLinkFinishCommunication()
203       of PetscSFReduceEnd_Basic(), it thinks since there is rootbuf, it calls PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI().
204       It does a host to device memory copy on rootbuf, wrongly overwriting the results. So we don't overload
205       PetscSFReduceEnd_Basic() in this case, and just reclaim the link.
206      */
207     PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
208     PetscCall(PetscSFLinkReclaim(sf, &link));
209   } else {
210     PetscCall(PetscSFReduceEnd_Basic(sf, unit, leafdata, rootdata, op));
211   }
212   PetscFunctionReturn(PETSC_SUCCESS);
213 }
214 
PetscSFBcastToZero_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void * rootdata,PetscMemType leafmtype,void * leafdata)215 static PetscErrorCode PetscSFBcastToZero_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata)
216 {
217   PetscSFLink         link;
218   PetscMPIInt         rank;
219   PetscMPIInt         sendcount;
220   MPI_Comm            comm;
221   PetscSF_Allgatherv *dat     = (PetscSF_Allgatherv *)sf->data;
222   void               *rootbuf = NULL, *leafbuf = NULL; /* buffer seen by MPI */
223   MPI_Request        *req = NULL;
224 
225   PetscFunctionBegin;
226   PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, MPI_REPLACE, PETSCSF_BCAST, &link));
227   PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata));
228   PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
229   PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
230   PetscCall(PetscMPIIntCast(sf->nroots, &sendcount));
231   PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL));
232   PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link));
233   PetscCallMPI(MPIU_Igatherv(rootbuf, sendcount, unit, leafbuf, dat->recvcounts, dat->displs, unit, 0 /*rank 0*/, comm, req));
234 
235   PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
236   PetscCall(PetscSFLinkFinishCommunication(sf, link, PETSCSF_ROOT2LEAF));
237   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)sf), &rank));
238   if (rank == 0 && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) PetscCall((*link->Memcpy)(link, PETSC_MEMTYPE_DEVICE, leafdata, PETSC_MEMTYPE_HOST, link->leafbuf[PETSC_MEMTYPE_HOST], sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes));
239   PetscCall(PetscSFLinkReclaim(sf, &link));
240   PetscFunctionReturn(PETSC_SUCCESS);
241 }
242 
243 /* This routine is very tricky (I believe it is rarely used with this kind of graph so just provide a simple but not-optimal implementation).
244 
245    Suppose we have three ranks. Rank 0 has a root with value 1. Rank 0,1,2 has a leaf with value 2,3,4 respectively. The leaves are connected
246    to the root on rank 0. Suppose op=MPI_SUM and rank 0,1,2 gets root state in their rank order. By definition of this routine, rank 0 sees 1
247    in root, fetches it into its leafupate, then updates root to 1 + 2 = 3; rank 1 sees 3 in root, fetches it into its leafupate, then updates
248    root to 3 + 3 = 6; rank 2 sees 6 in root, fetches it into its leafupdate, then updates root to 6 + 4 = 10.  At the end, leafupdate on rank
249    0,1,2 is 1,3,6 respectively. root is 10.
250 
251    We use a simpler implementation. From the same initial state, we copy leafdata to leafupdate
252              rank-0   rank-1    rank-2
253         Root     1
254         Leaf     2       3         4
255      Leafupdate  2       3         4
256 
257    Do MPI_Exscan on leafupdate,
258              rank-0   rank-1    rank-2
259         Root     1
260         Leaf     2       3         4
261      Leafupdate  2       2         5
262 
263    BcastAndOp from root to leafupdate,
264              rank-0   rank-1    rank-2
265         Root     1
266         Leaf     2       3         4
267      Leafupdate  3       3         6
268 
269    Copy root to leafupdate on rank-0
270              rank-0   rank-1    rank-2
271         Root     1
272         Leaf     2       3         4
273      Leafupdate  1       3         6
274 
275    Reduce from leaf to root,
276              rank-0   rank-1    rank-2
277         Root     10
278         Leaf     2       3         4
279      Leafupdate  1       3         6
280 */
PetscSFFetchAndOpBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,void * rootdata,PetscMemType leafmtype,const void * leafdata,void * leafupdate,MPI_Op op)281 PETSC_INTERN PetscErrorCode PetscSFFetchAndOpBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, void *rootdata, PetscMemType leafmtype, const void *leafdata, void *leafupdate, MPI_Op op)
282 {
283   PetscSFLink link;
284   MPI_Comm    comm;
285   PetscMPIInt count;
286 
287   PetscFunctionBegin;
288   PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
289   PetscCheck(!PetscMemTypeDevice(rootmtype) && !PetscMemTypeDevice(leafmtype), comm, PETSC_ERR_SUP, "Do FetchAndOp on device");
290   /* Copy leafdata to leafupdate */
291   PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_FETCH, &link));
292   PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata)); /* Sync the device */
293   PetscCall((*link->Memcpy)(link, leafmtype, leafupdate, leafmtype, leafdata, sf->nleaves * link->unitbytes));
294   PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
295 
296   /* Exscan on leafupdate and then BcastAndOp rootdata to leafupdate */
297   if (op == MPI_REPLACE) {
298     PetscMPIInt size, rank, prev, next;
299     PetscCallMPI(MPI_Comm_rank(comm, &rank));
300     PetscCallMPI(MPI_Comm_size(comm, &size));
301     prev = rank ? rank - 1 : MPI_PROC_NULL;
302     next = (rank < size - 1) ? rank + 1 : MPI_PROC_NULL;
303     PetscCall(PetscMPIIntCast(sf->nleaves, &count));
304     PetscCallMPI(MPI_Sendrecv_replace(leafupdate, count, unit, next, link->tag, prev, link->tag, comm, MPI_STATUSES_IGNORE));
305   } else {
306     PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count));
307     PetscCallMPI(MPI_Exscan(MPI_IN_PLACE, leafupdate, count, link->basicunit, op, comm));
308   }
309   PetscCall(PetscSFLinkReclaim(sf, &link));
310   PetscCall(PetscSFBcastBegin(sf, unit, rootdata, leafupdate, op));
311   PetscCall(PetscSFBcastEnd(sf, unit, rootdata, leafupdate, op));
312 
313   /* Bcast roots to rank 0's leafupdate */
314   PetscCall(PetscSFBcastToZero_Private(sf, unit, rootdata, leafupdate)); /* Using this line makes Allgather SFs able to inherit this routine */
315 
316   /* Reduce leafdata to rootdata */
317   PetscCall(PetscSFReduceBegin(sf, unit, leafdata, rootdata, op));
318   PetscFunctionReturn(PETSC_SUCCESS);
319 }
320 
PetscSFFetchAndOpEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,void * rootdata,const void * leafdata,void * leafupdate,MPI_Op op)321 PETSC_INTERN PetscErrorCode PetscSFFetchAndOpEnd_Allgatherv(PetscSF sf, MPI_Datatype unit, void *rootdata, const void *leafdata, void *leafupdate, MPI_Op op)
322 {
323   PetscFunctionBegin;
324   PetscCall(PetscSFReduceEnd(sf, unit, leafdata, rootdata, op));
325   PetscFunctionReturn(PETSC_SUCCESS);
326 }
327 
328 /* Get root ranks accessing my leaves */
PetscSFGetRootRanks_Allgatherv(PetscSF sf,PetscMPIInt * nranks,const PetscMPIInt ** ranks,const PetscInt ** roffset,const PetscInt ** rmine,const PetscInt ** rremote)329 PETSC_INTERN PetscErrorCode PetscSFGetRootRanks_Allgatherv(PetscSF sf, PetscMPIInt *nranks, const PetscMPIInt **ranks, const PetscInt **roffset, const PetscInt **rmine, const PetscInt **rremote)
330 {
331   PetscInt        j, k, size;
332   const PetscInt *range;
333 
334   PetscFunctionBegin;
335   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
336   if (sf->nranks && !sf->ranks) { /* On rank!=0, sf->nranks=0. The sf->nranks test makes this routine also works for sfgatherv */
337     size = sf->nranks;
338     PetscCall(PetscLayoutGetRanges(sf->map, &range));
339     PetscCall(PetscMalloc4(size, &sf->ranks, size + 1, &sf->roffset, sf->nleaves, &sf->rmine, sf->nleaves, &sf->rremote));
340     for (PetscMPIInt i = 0; i < size; i++) sf->ranks[i] = i;
341     PetscCall(PetscArraycpy(sf->roffset, range, size + 1));
342     for (PetscInt i = 0; i < sf->nleaves; i++) sf->rmine[i] = i; /*rmine are never NULL even for contiguous leaves */
343     for (PetscMPIInt i = 0; i < size; i++) {
344       for (j = range[i], k = 0; j < range[i + 1]; j++, k++) sf->rremote[j] = k;
345     }
346   }
347 
348   if (nranks) *nranks = sf->nranks;
349   if (ranks) *ranks = sf->ranks;
350   if (roffset) *roffset = sf->roffset;
351   if (rmine) *rmine = sf->rmine;
352   if (rremote) *rremote = sf->rremote;
353   PetscFunctionReturn(PETSC_SUCCESS);
354 }
355 
356 /* Get leaf ranks accessing my roots */
PetscSFGetLeafRanks_Allgatherv(PetscSF sf,PetscMPIInt * niranks,const PetscMPIInt ** iranks,const PetscInt ** ioffset,const PetscInt ** irootloc)357 PETSC_INTERN PetscErrorCode PetscSFGetLeafRanks_Allgatherv(PetscSF sf, PetscMPIInt *niranks, const PetscMPIInt **iranks, const PetscInt **ioffset, const PetscInt **irootloc)
358 {
359   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
360   MPI_Comm            comm;
361   PetscMPIInt         size, rank;
362 
363   PetscFunctionBegin;
364   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
365   PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
366   PetscCallMPI(MPI_Comm_size(comm, &size));
367   PetscCallMPI(MPI_Comm_rank(comm, &rank));
368   if (niranks) *niranks = size;
369 
370   /* PetscSF_Basic has distinguished incoming ranks. Here we do not need that. But we must put self as the first and
371      sort other ranks. See comments in PetscSFSetUp_Basic about MatGetBrowsOfAoCols_MPIAIJ on why.
372    */
373   if (iranks) {
374     if (!dat->iranks) {
375       PetscCall(PetscMalloc1(size, &dat->iranks));
376       dat->iranks[0] = rank;
377       for (PetscMPIInt i = 0, j = 1; i < size; i++) {
378         if (i == rank) continue;
379         dat->iranks[j++] = i;
380       }
381     }
382     *iranks = dat->iranks; /* dat->iranks was init'ed to NULL by PetscNew */
383   }
384 
385   if (ioffset) {
386     if (!dat->ioffset) {
387       PetscCall(PetscMalloc1(size + 1, &dat->ioffset));
388       for (PetscMPIInt i = 0; i <= size; i++) dat->ioffset[i] = i * sf->nroots;
389     }
390     *ioffset = dat->ioffset;
391   }
392 
393   if (irootloc) {
394     if (!dat->irootloc) {
395       PetscCall(PetscMalloc1(sf->nleaves, &dat->irootloc));
396       for (PetscMPIInt i = 0; i < size; i++) {
397         for (PetscInt j = 0; j < sf->nroots; j++) dat->irootloc[i * sf->nroots + j] = j;
398       }
399     }
400     *irootloc = dat->irootloc;
401   }
402   PetscFunctionReturn(PETSC_SUCCESS);
403 }
404 
PetscSFCreateLocalSF_Allgatherv(PetscSF sf,PetscSF * out)405 PETSC_INTERN PetscErrorCode PetscSFCreateLocalSF_Allgatherv(PetscSF sf, PetscSF *out)
406 {
407   PetscInt     i, nroots, nleaves, rstart, *ilocal;
408   PetscSFNode *iremote;
409   PetscSF      lsf;
410 
411   PetscFunctionBegin;
412   nleaves = sf->nleaves ? sf->nroots : 0; /* sf->nleaves can be zero with SFGather(v) */
413   nroots  = nleaves;
414   PetscCall(PetscMalloc1(nleaves, &ilocal));
415   PetscCall(PetscMalloc1(nleaves, &iremote));
416   PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL));
417 
418   for (i = 0; i < nleaves; i++) {
419     ilocal[i]        = rstart + i; /* lsf does not change leave indices */
420     iremote[i].rank  = 0;          /* rank in PETSC_COMM_SELF */
421     iremote[i].index = i;          /* root index */
422   }
423 
424   PetscCall(PetscSFCreate(PETSC_COMM_SELF, &lsf));
425   PetscCall(PetscSFSetGraph(lsf, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
426   PetscCall(PetscSFSetUp(lsf));
427   *out = lsf;
428   PetscFunctionReturn(PETSC_SUCCESS);
429 }
430 
PetscSFCreate_Allgatherv(PetscSF sf)431 PETSC_INTERN PetscErrorCode PetscSFCreate_Allgatherv(PetscSF sf)
432 {
433   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
434 
435   PetscFunctionBegin;
436   sf->ops->BcastEnd  = PetscSFBcastEnd_Basic;
437   sf->ops->ReduceEnd = PetscSFReduceEnd_Allgatherv;
438 
439   sf->ops->SetUp           = PetscSFSetUp_Allgatherv;
440   sf->ops->Reset           = PetscSFReset_Allgatherv;
441   sf->ops->Destroy         = PetscSFDestroy_Allgatherv;
442   sf->ops->GetRootRanks    = PetscSFGetRootRanks_Allgatherv;
443   sf->ops->GetLeafRanks    = PetscSFGetLeafRanks_Allgatherv;
444   sf->ops->GetGraph        = PetscSFGetGraph_Allgatherv;
445   sf->ops->BcastBegin      = PetscSFBcastBegin_Allgatherv;
446   sf->ops->ReduceBegin     = PetscSFReduceBegin_Allgatherv;
447   sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
448   sf->ops->FetchAndOpEnd   = PetscSFFetchAndOpEnd_Allgatherv;
449   sf->ops->CreateLocalSF   = PetscSFCreateLocalSF_Allgatherv;
450   sf->ops->BcastToZero     = PetscSFBcastToZero_Allgatherv;
451 
452   sf->collective = PETSC_TRUE;
453 
454   PetscCall(PetscNew(&dat));
455   dat->bcast_root = -1;
456   sf->data        = (void *)dat;
457   PetscFunctionReturn(PETSC_SUCCESS);
458 }
459