xref: /petsc/src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.c (revision 7c441f3aff93c611491d4ea0564d57010b1fd4e9)
1 #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>
2 
3 PETSC_INTERN PetscErrorCode PetscSFBcastBegin_Gatherv(PetscSF, MPI_Datatype, PetscMemType, const void *, PetscMemType, void *, MPI_Op);
4 
5 /* PetscSFGetGraph is non-collective. An implementation should not have collective calls */
6 PETSC_INTERN PetscErrorCode PetscSFGetGraph_Allgatherv(PetscSF sf, PetscInt *nroots, PetscInt *nleaves, const PetscInt **ilocal, const PetscSFNode **iremote)
7 {
8   PetscInt        i, j, k;
9   const PetscInt *range;
10   PetscMPIInt     size;
11 
12   PetscFunctionBegin;
13   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)sf), &size));
14   if (nroots) *nroots = sf->nroots;
15   if (nleaves) *nleaves = sf->nleaves;
16   if (ilocal) *ilocal = NULL; /* Contiguous leaves */
17   if (iremote) {
18     if (!sf->remote && sf->nleaves) { /* The && sf->nleaves makes sfgatherv able to inherit this routine */
19       PetscCall(PetscLayoutGetRanges(sf->map, &range));
20       PetscCall(PetscMalloc1(sf->nleaves, &sf->remote));
21       sf->remote_alloc = sf->remote;
22       for (i = 0; i < size; i++) {
23         for (j = range[i], k = 0; j < range[i + 1]; j++, k++) {
24           sf->remote[j].rank  = i;
25           sf->remote[j].index = k;
26         }
27       }
28     }
29     *iremote = sf->remote;
30   }
31   PetscFunctionReturn(0);
32 }
33 
34 PETSC_INTERN PetscErrorCode PetscSFSetUp_Allgatherv(PetscSF sf)
35 {
36   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
37   PetscMPIInt         size;
38   PetscInt            i;
39   const PetscInt     *range;
40 
41   PetscFunctionBegin;
42   PetscCall(PetscSFSetUp_Allgather(sf));
43   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)sf), &size));
44   if (sf->nleaves) { /* This if (sf->nleaves) test makes sfgatherv able to inherit this routine */
45     PetscCall(PetscMalloc1(size, &dat->recvcounts));
46     PetscCall(PetscMalloc1(size, &dat->displs));
47     PetscCall(PetscLayoutGetRanges(sf->map, &range));
48 
49     for (i = 0; i < size; i++) {
50       PetscCall(PetscMPIIntCast(range[i], &dat->displs[i]));
51       PetscCall(PetscMPIIntCast(range[i + 1] - range[i], &dat->recvcounts[i]));
52     }
53   }
54   PetscFunctionReturn(0);
55 }
56 
57 PETSC_INTERN PetscErrorCode PetscSFReset_Allgatherv(PetscSF sf)
58 {
59   PetscSF_Allgatherv *dat  = (PetscSF_Allgatherv *)sf->data;
60   PetscSFLink         link = dat->avail, next;
61 
62   PetscFunctionBegin;
63   PetscCall(PetscFree(dat->iranks));
64   PetscCall(PetscFree(dat->ioffset));
65   PetscCall(PetscFree(dat->irootloc));
66   PetscCall(PetscFree(dat->recvcounts));
67   PetscCall(PetscFree(dat->displs));
68   PetscCheck(!dat->inuse, PetscObjectComm((PetscObject)sf), PETSC_ERR_ARG_WRONGSTATE, "Outstanding operation has not been completed");
69   for (; link; link = next) {
70     next = link->next;
71     PetscCall(PetscSFLinkDestroy(sf, link));
72   }
73   dat->avail = NULL;
74   PetscFunctionReturn(0);
75 }
76 
77 PETSC_INTERN PetscErrorCode PetscSFDestroy_Allgatherv(PetscSF sf)
78 {
79   PetscFunctionBegin;
80   PetscCall(PetscSFReset_Allgatherv(sf));
81   PetscCall(PetscFree(sf->data));
82   PetscFunctionReturn(0);
83 }
84 
85 static PetscErrorCode PetscSFBcastBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata, MPI_Op op)
86 {
87   PetscSFLink         link;
88   PetscMPIInt         sendcount;
89   MPI_Comm            comm;
90   void               *rootbuf = NULL, *leafbuf = NULL;
91   MPI_Request        *req;
92   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
93 
94   PetscFunctionBegin;
95   PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_BCAST, &link));
96   PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata));
97   PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
98   PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
99   PetscCall(PetscMPIIntCast(sf->nroots, &sendcount));
100   PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL));
101   PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link, PETSCSF_ROOT2LEAF));
102   PetscCallMPI(MPIU_Iallgatherv(rootbuf, sendcount, unit, leafbuf, dat->recvcounts, dat->displs, unit, comm, req));
103   PetscFunctionReturn(0);
104 }
105 
106 static PetscErrorCode PetscSFReduceBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType leafmtype, const void *leafdata, PetscMemType rootmtype, void *rootdata, MPI_Op op)
107 {
108   PetscSFLink         link;
109   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
110   PetscInt            rstart;
111   PetscMPIInt         rank, count, recvcount;
112   MPI_Comm            comm;
113   void               *rootbuf = NULL, *leafbuf = NULL;
114   MPI_Request        *req;
115 
116   PetscFunctionBegin;
117   PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_REDUCE, &link));
118   if (op == MPI_REPLACE) {
119     /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copying from local leafdata is fine */
120     PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL));
121     PetscCall((*link->Memcpy)(link, rootmtype, rootdata, leafmtype, (const char *)leafdata + (size_t)rstart * link->unitbytes, (size_t)sf->nroots * link->unitbytes));
122     if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) PetscCall((*link->SyncStream)(link));
123   } else {
124     /* Reduce leafdata, then scatter to rootdata */
125     PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
126     PetscCallMPI(MPI_Comm_rank(comm, &rank));
127     PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata));
128     PetscCall(PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
129     PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_LEAF2ROOT, &rootbuf, &leafbuf, &req, NULL));
130     PetscCall(PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE], &recvcount));
131     /* Allocate a separate leaf buffer on rank 0 */
132     if (rank == 0 && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) {
133       PetscCall(PetscSFMalloc(sf, link->leafmtype_mpi, sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes, (void **)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]));
134     }
135     /* In case we already copied leafdata from device to host (i.e., no use_gpu_aware_mpi), we need to adjust leafbuf on rank 0 */
136     if (rank == 0 && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE;
137     PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count));
138     PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link, PETSCSF_LEAF2ROOT));
139     PetscCallMPI(MPI_Reduce(leafbuf, link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], count, link->basicunit, op, 0, comm)); /* Must do reduce with MPI builltin datatype basicunit */
140     PetscCallMPI(MPIU_Iscatterv(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], dat->recvcounts, dat->displs, unit, rootbuf, recvcount, unit, 0, comm, req));
141   }
142   PetscFunctionReturn(0);
143 }
144 
145 PETSC_INTERN PetscErrorCode PetscSFReduceEnd_Allgatherv(PetscSF sf, MPI_Datatype unit, const void *leafdata, void *rootdata, MPI_Op op)
146 {
147   PetscSFLink link;
148 
149   PetscFunctionBegin;
150   if (op == MPI_REPLACE) {
151     /* A rare case happens when op is MPI_REPLACE, using GPUs but no GPU aware MPI. In PetscSFReduceBegin_Allgather(v),
152       we did a device to device copy and in effect finished the communication. But in PetscSFLinkFinishCommunication()
153       of PetscSFReduceEnd_Basic(), it thinks since there is rootbuf, it calls PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI().
154       It does a host to device memory copy on rootbuf, wrongly overwriting the results. So we don't overload
155       PetscSFReduceEnd_Basic() in this case, and just reclaim the link.
156      */
157     PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
158     PetscCall(PetscSFLinkReclaim(sf, &link));
159   } else {
160     PetscCall(PetscSFReduceEnd_Basic(sf, unit, leafdata, rootdata, op));
161   }
162   PetscFunctionReturn(0);
163 }
164 
165 static PetscErrorCode PetscSFBcastToZero_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata)
166 {
167   PetscSFLink link;
168   PetscMPIInt rank;
169 
170   PetscFunctionBegin;
171   PetscCall(PetscSFBcastBegin_Gatherv(sf, unit, rootmtype, rootdata, leafmtype, leafdata, MPI_REPLACE));
172   PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
173   PetscCall(PetscSFLinkFinishCommunication(sf, link, PETSCSF_ROOT2LEAF));
174   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)sf), &rank));
175   if (rank == 0 && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) PetscCall((*link->Memcpy)(link, PETSC_MEMTYPE_DEVICE, leafdata, PETSC_MEMTYPE_HOST, link->leafbuf[PETSC_MEMTYPE_HOST], sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes));
176   PetscCall(PetscSFLinkReclaim(sf, &link));
177   PetscFunctionReturn(0);
178 }
179 
180 /* This routine is very tricky (I believe it is rarely used with this kind of graph so just provide a simple but not-optimal implementation).
181 
182    Suppose we have three ranks. Rank 0 has a root with value 1. Rank 0,1,2 has a leaf with value 2,3,4 respectively. The leaves are connected
183    to the root on rank 0. Suppose op=MPI_SUM and rank 0,1,2 gets root state in their rank order. By definition of this routine, rank 0 sees 1
184    in root, fetches it into its leafupate, then updates root to 1 + 2 = 3; rank 1 sees 3 in root, fetches it into its leafupate, then updates
185    root to 3 + 3 = 6; rank 2 sees 6 in root, fetches it into its leafupdate, then updates root to 6 + 4 = 10.  At the end, leafupdate on rank
186    0,1,2 is 1,3,6 respectively. root is 10.
187 
188    We use a simpler implementation. From the same initial state, we copy leafdata to leafupdate
189              rank-0   rank-1    rank-2
190         Root     1
191         Leaf     2       3         4
192      Leafupdate  2       3         4
193 
194    Do MPI_Exscan on leafupdate,
195              rank-0   rank-1    rank-2
196         Root     1
197         Leaf     2       3         4
198      Leafupdate  2       2         5
199 
200    BcastAndOp from root to leafupdate,
201              rank-0   rank-1    rank-2
202         Root     1
203         Leaf     2       3         4
204      Leafupdate  3       3         6
205 
206    Copy root to leafupdate on rank-0
207              rank-0   rank-1    rank-2
208         Root     1
209         Leaf     2       3         4
210      Leafupdate  1       3         6
211 
212    Reduce from leaf to root,
213              rank-0   rank-1    rank-2
214         Root     10
215         Leaf     2       3         4
216      Leafupdate  1       3         6
217 */
218 PETSC_INTERN PetscErrorCode PetscSFFetchAndOpBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, void *rootdata, PetscMemType leafmtype, const void *leafdata, void *leafupdate, MPI_Op op)
219 {
220   PetscSFLink link;
221   MPI_Comm    comm;
222   PetscMPIInt count;
223 
224   PetscFunctionBegin;
225   PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
226   PetscCheck(!PetscMemTypeDevice(rootmtype) && !PetscMemTypeDevice(leafmtype), comm, PETSC_ERR_SUP, "Do FetchAndOp on device");
227   /* Copy leafdata to leafupdate */
228   PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_FETCH, &link));
229   PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata)); /* Sync the device */
230   PetscCall((*link->Memcpy)(link, leafmtype, leafupdate, leafmtype, leafdata, sf->nleaves * link->unitbytes));
231   PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
232 
233   /* Exscan on leafupdate and then BcastAndOp rootdata to leafupdate */
234   if (op == MPI_REPLACE) {
235     PetscMPIInt size, rank, prev, next;
236     PetscCallMPI(MPI_Comm_rank(comm, &rank));
237     PetscCallMPI(MPI_Comm_size(comm, &size));
238     prev = rank ? rank - 1 : MPI_PROC_NULL;
239     next = (rank < size - 1) ? rank + 1 : MPI_PROC_NULL;
240     PetscCall(PetscMPIIntCast(sf->nleaves, &count));
241     PetscCallMPI(MPI_Sendrecv_replace(leafupdate, count, unit, next, link->tag, prev, link->tag, comm, MPI_STATUSES_IGNORE));
242   } else {
243     PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count));
244     PetscCallMPI(MPI_Exscan(MPI_IN_PLACE, leafupdate, count, link->basicunit, op, comm));
245   }
246   PetscCall(PetscSFLinkReclaim(sf, &link));
247   PetscCall(PetscSFBcastBegin(sf, unit, rootdata, leafupdate, op));
248   PetscCall(PetscSFBcastEnd(sf, unit, rootdata, leafupdate, op));
249 
250   /* Bcast roots to rank 0's leafupdate */
251   PetscCall(PetscSFBcastToZero_Private(sf, unit, rootdata, leafupdate)); /* Using this line makes Allgather SFs able to inherit this routine */
252 
253   /* Reduce leafdata to rootdata */
254   PetscCall(PetscSFReduceBegin(sf, unit, leafdata, rootdata, op));
255   PetscFunctionReturn(0);
256 }
257 
258 PETSC_INTERN PetscErrorCode PetscSFFetchAndOpEnd_Allgatherv(PetscSF sf, MPI_Datatype unit, void *rootdata, const void *leafdata, void *leafupdate, MPI_Op op)
259 {
260   PetscFunctionBegin;
261   PetscCall(PetscSFReduceEnd(sf, unit, leafdata, rootdata, op));
262   PetscFunctionReturn(0);
263 }
264 
265 /* Get root ranks accessing my leaves */
266 PETSC_INTERN PetscErrorCode PetscSFGetRootRanks_Allgatherv(PetscSF sf, PetscInt *nranks, const PetscMPIInt **ranks, const PetscInt **roffset, const PetscInt **rmine, const PetscInt **rremote)
267 {
268   PetscInt        i, j, k, size;
269   const PetscInt *range;
270 
271   PetscFunctionBegin;
272   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
273   if (sf->nranks && !sf->ranks) { /* On rank!=0, sf->nranks=0. The sf->nranks test makes this routine also works for sfgatherv */
274     size = sf->nranks;
275     PetscCall(PetscLayoutGetRanges(sf->map, &range));
276     PetscCall(PetscMalloc4(size, &sf->ranks, size + 1, &sf->roffset, sf->nleaves, &sf->rmine, sf->nleaves, &sf->rremote));
277     for (i = 0; i < size; i++) sf->ranks[i] = i;
278     PetscCall(PetscArraycpy(sf->roffset, range, size + 1));
279     for (i = 0; i < sf->nleaves; i++) sf->rmine[i] = i; /*rmine are never NULL even for contiguous leaves */
280     for (i = 0; i < size; i++) {
281       for (j = range[i], k = 0; j < range[i + 1]; j++, k++) sf->rremote[j] = k;
282     }
283   }
284 
285   if (nranks) *nranks = sf->nranks;
286   if (ranks) *ranks = sf->ranks;
287   if (roffset) *roffset = sf->roffset;
288   if (rmine) *rmine = sf->rmine;
289   if (rremote) *rremote = sf->rremote;
290   PetscFunctionReturn(0);
291 }
292 
293 /* Get leaf ranks accessing my roots */
294 PETSC_INTERN PetscErrorCode PetscSFGetLeafRanks_Allgatherv(PetscSF sf, PetscInt *niranks, const PetscMPIInt **iranks, const PetscInt **ioffset, const PetscInt **irootloc)
295 {
296   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
297   MPI_Comm            comm;
298   PetscMPIInt         size, rank;
299   PetscInt            i, j;
300 
301   PetscFunctionBegin;
302   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
303   PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
304   PetscCallMPI(MPI_Comm_size(comm, &size));
305   PetscCallMPI(MPI_Comm_rank(comm, &rank));
306   if (niranks) *niranks = size;
307 
308   /* PetscSF_Basic has distinguished incoming ranks. Here we do not need that. But we must put self as the first and
309      sort other ranks. See comments in PetscSFSetUp_Basic about MatGetBrowsOfAoCols_MPIAIJ on why.
310    */
311   if (iranks) {
312     if (!dat->iranks) {
313       PetscCall(PetscMalloc1(size, &dat->iranks));
314       dat->iranks[0] = rank;
315       for (i = 0, j = 1; i < size; i++) {
316         if (i == rank) continue;
317         dat->iranks[j++] = i;
318       }
319     }
320     *iranks = dat->iranks; /* dat->iranks was init'ed to NULL by PetscNewLog */
321   }
322 
323   if (ioffset) {
324     if (!dat->ioffset) {
325       PetscCall(PetscMalloc1(size + 1, &dat->ioffset));
326       for (i = 0; i <= size; i++) dat->ioffset[i] = i * sf->nroots;
327     }
328     *ioffset = dat->ioffset;
329   }
330 
331   if (irootloc) {
332     if (!dat->irootloc) {
333       PetscCall(PetscMalloc1(sf->nleaves, &dat->irootloc));
334       for (i = 0; i < size; i++) {
335         for (j = 0; j < sf->nroots; j++) dat->irootloc[i * sf->nroots + j] = j;
336       }
337     }
338     *irootloc = dat->irootloc;
339   }
340   PetscFunctionReturn(0);
341 }
342 
343 PETSC_INTERN PetscErrorCode PetscSFCreateLocalSF_Allgatherv(PetscSF sf, PetscSF *out)
344 {
345   PetscInt     i, nroots, nleaves, rstart, *ilocal;
346   PetscSFNode *iremote;
347   PetscSF      lsf;
348 
349   PetscFunctionBegin;
350   nleaves = sf->nleaves ? sf->nroots : 0; /* sf->nleaves can be zero with SFGather(v) */
351   nroots  = nleaves;
352   PetscCall(PetscMalloc1(nleaves, &ilocal));
353   PetscCall(PetscMalloc1(nleaves, &iremote));
354   PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL));
355 
356   for (i = 0; i < nleaves; i++) {
357     ilocal[i]        = rstart + i; /* lsf does not change leave indices */
358     iremote[i].rank  = 0;          /* rank in PETSC_COMM_SELF */
359     iremote[i].index = i;          /* root index */
360   }
361 
362   PetscCall(PetscSFCreate(PETSC_COMM_SELF, &lsf));
363   PetscCall(PetscSFSetGraph(lsf, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
364   PetscCall(PetscSFSetUp(lsf));
365   *out = lsf;
366   PetscFunctionReturn(0);
367 }
368 
369 PETSC_INTERN PetscErrorCode PetscSFCreate_Allgatherv(PetscSF sf)
370 {
371   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
372 
373   PetscFunctionBegin;
374   sf->ops->BcastEnd  = PetscSFBcastEnd_Basic;
375   sf->ops->ReduceEnd = PetscSFReduceEnd_Allgatherv;
376 
377   sf->ops->SetUp           = PetscSFSetUp_Allgatherv;
378   sf->ops->Reset           = PetscSFReset_Allgatherv;
379   sf->ops->Destroy         = PetscSFDestroy_Allgatherv;
380   sf->ops->GetRootRanks    = PetscSFGetRootRanks_Allgatherv;
381   sf->ops->GetLeafRanks    = PetscSFGetLeafRanks_Allgatherv;
382   sf->ops->GetGraph        = PetscSFGetGraph_Allgatherv;
383   sf->ops->BcastBegin      = PetscSFBcastBegin_Allgatherv;
384   sf->ops->ReduceBegin     = PetscSFReduceBegin_Allgatherv;
385   sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
386   sf->ops->FetchAndOpEnd   = PetscSFFetchAndOpEnd_Allgatherv;
387   sf->ops->CreateLocalSF   = PetscSFCreateLocalSF_Allgatherv;
388   sf->ops->BcastToZero     = PetscSFBcastToZero_Allgatherv;
389 
390   PetscCall(PetscNew(&dat));
391   sf->data = (void *)dat;
392   PetscFunctionReturn(0);
393 }
394