xref: /petsc/src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.c (revision ef5c7bd2f4ec6f9849104a8422b5777b3b2f72fb)
1 #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>
2 
3 PETSC_INTERN PetscErrorCode PetscSFBcastAndOpBegin_Gatherv(PetscSF,MPI_Datatype,PetscMemType,const void*,PetscMemType,void*,MPI_Op);
4 
5 /*===================================================================================*/
6 /*              Internal routines for PetscSFPack                                    */
7 /*===================================================================================*/
8 PETSC_INTERN PetscErrorCode PetscSFPackGet_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,const void *leafdata,PetscSFPack *mylink)
9 {
10   PetscErrorCode         ierr;
11   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;
12   PetscSFPack            link,*p;
13   PetscBool              match;
14   PetscInt               i,j;
15 
16   PetscFunctionBegin;
17   ierr = PetscSFPackSetErrorOnUnsupportedOverlap(sf,unit,rootdata,leafdata);CHKERRQ(ierr);
18   /* Look for types in cache */
19   for (p=&dat->avail; (link=*p); p=&link->next) {
20     ierr = MPIPetsc_Type_compare(unit,link->unit,&match);CHKERRQ(ierr);
21     if (match) {
22       *p = link->next; /* Remove from available list */
23       goto found;
24     }
25   }
26 
27   ierr = PetscNew(&link);CHKERRQ(ierr);
28   ierr = PetscSFPackSetUp_Host(sf,link,unit);CHKERRQ(ierr);
29 
30   link->rootbuflen = sf->nroots;
31   link->leafbuflen = sf->nleaves;
32   link->nrootreqs  = 1;
33   link->nleafreqs  = 0;
34   ierr = PetscMalloc1(4,&link->reqs);CHKERRQ(ierr); /* 4 = (nrootreqs+nleafreqs)*4 */
35   for (i=0; i<4; i++) link->reqs[i] = MPI_REQUEST_NULL; /* Initialized to NULL so that we know which need to be freed in Destroy */
36 
37   for (i=0; i<2; i++) {
38     for (j=0; j<2; j++) {
39       link->rootreqs[i][j] = link->reqs + (2*i+j);
40       link->leafreqs[i][j] = NULL; /* leaf requests are not needed. Make it NULL to segfault accident use */
41     }
42   }
43 
44   /* DO NOT allocate link->rootbuf[]/leafleaf[]. We use lazy allocation since these buffers are likely not needed */
45 found:
46   link->rootmtype = rootmtype;
47   link->leafmtype = leafmtype;
48 #if defined(PETSC_HAVE_CUDA)
49   ierr = PetscSFPackSetUp_Device(sf,link,unit);CHKERRQ(ierr);
50 #endif
51   link->rootdata  = rootdata;
52   link->leafdata  = leafdata;
53   link->next      = dat->inuse;
54   dat->inuse      = link;
55 
56   *mylink         = link;
57   PetscFunctionReturn(0);
58 }
59 
60 /*===================================================================================*/
61 /*              Implementations of SF public APIs                                    */
62 /*===================================================================================*/
63 
64 /* PetscSFGetGraph is non-collective. An implementation should not have collective calls */
65 PETSC_INTERN PetscErrorCode PetscSFGetGraph_Allgatherv(PetscSF sf,PetscInt *nroots,PetscInt *nleaves,const PetscInt **ilocal,const PetscSFNode **iremote)
66 {
67   PetscErrorCode ierr;
68   PetscInt       i,j,k;
69   const PetscInt *range;
70   PetscMPIInt    size;
71 
72   PetscFunctionBegin;
73   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)sf),&size);CHKERRQ(ierr);
74   if (nroots)  *nroots  = sf->nroots;
75   if (nleaves) *nleaves = sf->nleaves;
76   if (ilocal)  *ilocal  = NULL; /* Contiguous leaves */
77   if (iremote) {
78     if (!sf->remote && sf->nleaves) { /* The && sf->nleaves makes sfgatherv able to inherit this routine */
79       ierr = PetscLayoutGetRanges(sf->map,&range);CHKERRQ(ierr);
80       ierr = PetscMalloc1(sf->nleaves,&sf->remote);CHKERRQ(ierr);
81       sf->remote_alloc = sf->remote;
82       for (i=0; i<size; i++) {
83         for (j=range[i],k=0; j<range[i+1]; j++,k++) {
84           sf->remote[j].rank  = i;
85           sf->remote[j].index = k;
86         }
87       }
88     }
89     *iremote = sf->remote;
90   }
91   PetscFunctionReturn(0);
92 }
93 
94 PETSC_INTERN PetscErrorCode PetscSFSetUp_Allgatherv(PetscSF sf)
95 {
96   PetscErrorCode     ierr;
97   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
98   PetscMPIInt        size;
99   PetscInt           i;
100   const PetscInt     *range;
101 
102   PetscFunctionBegin;
103   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)sf),&size);CHKERRQ(ierr);
104   if (sf->nleaves) { /* This if (sf->nleaves) test makes sfgatherv able to inherit this routine */
105     ierr = PetscMalloc1(size,&dat->recvcounts);CHKERRQ(ierr);
106     ierr = PetscMalloc1(size,&dat->displs);CHKERRQ(ierr);
107     ierr = PetscLayoutGetRanges(sf->map,&range);CHKERRQ(ierr);
108 
109     for (i=0; i<size; i++) {
110       ierr = PetscMPIIntCast(range[i],&dat->displs[i]);CHKERRQ(ierr);
111       ierr = PetscMPIIntCast(range[i+1]-range[i],&dat->recvcounts[i]);CHKERRQ(ierr);
112     }
113   }
114   PetscFunctionReturn(0);
115 }
116 
117 PETSC_INTERN PetscErrorCode PetscSFReset_Allgatherv(PetscSF sf)
118 {
119   PetscErrorCode         ierr;
120   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;
121 
122   PetscFunctionBegin;
123   ierr = PetscFree(dat->iranks);CHKERRQ(ierr);
124   ierr = PetscFree(dat->ioffset);CHKERRQ(ierr);
125   ierr = PetscFree(dat->irootloc);CHKERRQ(ierr);
126   ierr = PetscFree(dat->recvcounts);CHKERRQ(ierr);
127   ierr = PetscFree(dat->displs);CHKERRQ(ierr);
128   if (dat->inuse) SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_ARG_WRONGSTATE,"Outstanding operation has not been completed");
129   ierr = PetscSFPackDestroyAvailable(sf,&dat->avail);CHKERRQ(ierr);
130   PetscFunctionReturn(0);
131 }
132 
133 PETSC_INTERN PetscErrorCode PetscSFDestroy_Allgatherv(PetscSF sf)
134 {
135   PetscErrorCode ierr;
136 
137   PetscFunctionBegin;
138   ierr = PetscSFReset_Allgatherv(sf);CHKERRQ(ierr);
139   ierr = PetscFree(sf->data);CHKERRQ(ierr);
140   PetscFunctionReturn(0);
141 }
142 
143 /*
144   Prepare the rootbuf, leafbuf etc used by MPI in PetscSFBcastAndOpBegin.
145 
146 Input Arguments:
147 + sf    - the start forest
148 . link  - the link PetscSFBcastAndOp is currently using
149 - op    - the reduction op
150 
151 Output Arguments:
152 +rootmtype_mpi  - memtype of rootbuf_mpi
153 .rootbuf_mpi    - root buffer used by MPI in the following MPI call
154 .leafmtype_mpi  - memtype of leafbuf_mpi
155 -leafbuf_mpi    - leaf buffer used by MPI in the following MPI call
156 
157 Notes:
158   This function was created because things became complex when rootdata or leafdata is on device, but the user does not want to use GPU-aware MPI.
159   We have to copy data from device to host before doing MPI. This function encapsulates all varieties and is reused by Allgatherv & Allgahter.
160 */
161 PETSC_INTERN PetscErrorCode PetscSFBcastPrepareMPIBuffers_Allgatherv(PetscSF sf,PetscSFPack link,MPI_Op op,PetscMemType *rootmtype_mpi,const void **rootbuf_mpi,PetscMemType *leafmtype_mpi, void **leafbuf_mpi)
162 {
163   PetscErrorCode         ierr;
164 
165   PetscFunctionBegin;
166   /* If rootdata is on device but no gpu-aware mpi, we need to copy rootdata to rootbuf on host before bcast; otherwise we directly bcast from leafdata */
167   if (link->rootmtype == PETSC_MEMTYPE_DEVICE && !use_gpu_aware_mpi) {
168     if (!link->rootbuf[PETSC_MEMTYPE_HOST]) {ierr = PetscMallocWithMemType(PETSC_MEMTYPE_HOST,link->rootbuflen*link->unitbytes,(void**)&link->rootbuf[PETSC_MEMTYPE_HOST]);CHKERRQ(ierr);}
169     ierr           = PetscMemcpyWithMemType(PETSC_MEMTYPE_HOST,PETSC_MEMTYPE_DEVICE,link->rootbuf[PETSC_MEMTYPE_HOST],link->rootdata,link->rootbuflen*link->unitbytes);CHKERRQ(ierr);
170     *rootbuf_mpi   = link->rootbuf[PETSC_MEMTYPE_HOST];
171     *rootmtype_mpi = PETSC_MEMTYPE_HOST;
172   } else {
173     *rootbuf_mpi   = link->rootdata;
174     *rootmtype_mpi = link->rootmtype;
175   }
176 
177   if (link->leafmtype == PETSC_MEMTYPE_DEVICE && !use_gpu_aware_mpi) {  /* If leafdata is on device but no gpu-aware mpi, we need a leafbuf on host to receive bcast'ed data */
178     if (!link->leafbuf[PETSC_MEMTYPE_HOST]) {ierr = PetscMallocWithMemType(PETSC_MEMTYPE_HOST,link->leafbuflen*link->unitbytes,(void**)&link->leafbuf[PETSC_MEMTYPE_HOST]);CHKERRQ(ierr);}
179     *leafbuf_mpi   = link->leafbuf[PETSC_MEMTYPE_HOST];
180     *leafmtype_mpi = PETSC_MEMTYPE_HOST;
181   } else if (op == MPIU_REPLACE) { /* If op is MPIU_REPLACE, we can directly bcast to leafdata. No intermediate buffer is needed. */
182     *leafbuf_mpi   = (char *)link->leafdata;
183     *leafmtype_mpi = link->leafmtype;
184   } else { /* Otherwise, op is a reduction. Have to allocate a buffer aside leafdata to apply the op. The buffer is either on host or device, depending on where leafdata is. */
185     if (!link->leafbuf[link->leafmtype]) {ierr = PetscMallocWithMemType(link->leafmtype,link->leafbuflen*link->unitbytes,(void**)&link->leafbuf[link->leafmtype]);CHKERRQ(ierr);}
186     *leafbuf_mpi   = link->leafbuf[link->leafmtype];
187     *leafmtype_mpi = link->leafmtype;
188   }
189   PetscFunctionReturn(0);
190 }
191 
192 static PetscErrorCode PetscSFBcastAndOpBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata,MPI_Op op)
193 {
194   PetscErrorCode         ierr;
195   PetscSFPack            link;
196   PetscMPIInt            sendcount;
197   MPI_Comm               comm;
198   const void             *rootbuf_mpi = NULL; /* buffer used by MPI */
199   void                   *leafbuf_mpi = NULL;
200   PetscMemType           rootmtype_mpi = PETSC_MEMTYPE_HOST,leafmtype_mpi = PETSC_MEMTYPE_HOST; /* Seen by MPI */
201   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;
202 
203   PetscFunctionBegin;
204   ierr = PetscSFPackGet_Allgatherv(sf,unit,rootmtype,rootdata,leafmtype,leafdata,&link);CHKERRQ(ierr);
205   ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr);
206   ierr = PetscMPIIntCast(sf->nroots,&sendcount);CHKERRQ(ierr);
207   ierr = PetscSFBcastPrepareMPIBuffers_Allgatherv(sf,link,op,&rootmtype_mpi,&rootbuf_mpi,&leafmtype_mpi,&leafbuf_mpi);CHKERRQ(ierr);
208   ierr = MPIU_Iallgatherv(rootbuf_mpi,sendcount,unit,leafbuf_mpi,dat->recvcounts,dat->displs,unit,comm,link->rootreqs[PETSCSF_ROOT2LEAF_BCAST][rootmtype_mpi]);CHKERRQ(ierr);
209   PetscFunctionReturn(0);
210 }
211 
212 PETSC_INTERN PetscErrorCode PetscSFBcastAndOpEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata,MPI_Op op)
213 {
214   PetscErrorCode         ierr;
215   PetscSFPack            link;
216 
217   PetscFunctionBegin;
218   ierr = PetscSFPackGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link);CHKERRQ(ierr);
219   ierr = PetscSFPackWaitall(link,PETSCSF_ROOT2LEAF_BCAST);CHKERRQ(ierr);
220   if (op != MPIU_REPLACE) {
221     /* Have a leaf buffer aside leafdata to do Op */
222     ierr = PetscSFUnpackAndOpLeafData(sf,link,NULL,leafdata,op,PETSC_FALSE);CHKERRQ(ierr);
223   } else if (leafmtype == PETSC_MEMTYPE_DEVICE && !use_gpu_aware_mpi) {
224     /* Just need to copy data in leafbuf on host to leafdata on device */
225     ierr = PetscMemcpyWithMemType(PETSC_MEMTYPE_DEVICE,PETSC_MEMTYPE_HOST,leafdata,link->leafbuf[PETSC_MEMTYPE_HOST],link->leafbuflen*link->unitbytes);CHKERRQ(ierr);
226   }
227   ierr = PetscSFPackReclaim(sf,&link);CHKERRQ(ierr);
228   PetscFunctionReturn(0);
229 }
230 
231 /*
232   Prepare the rootbuf, leafbuf etc used by MPI in PetscSFReduceBegin.
233 
234 Input Arguments:
235 + sf    - the start forest
236 . link  - the link PetscSFReduceBegin is currently using
237 - op    - the reduction op
238 
239 Output Arguments:
240 +rootmtype_mpi  - memtype of rootbuf_mpi
241 .rootbuf_mpi    - root buffer used by MPI in the following MPI call
242 .leafmtype_mpi  - memtype of leafbuf_mpi
243 -leafbuf_mpi    - leaf buffer used by MPI in the following MPI call
244 
245 Notes: This function is called assuming op != MPIU_REPLACE.
246 */
247 PETSC_INTERN PetscErrorCode PetscSFReducePrepareMPIBuffers_Allgatherv(PetscSF sf,PetscSFPack link,MPI_Op op,PetscMemType *rootmtype_mpi,void **rootbuf_mpi,PetscMemType *leafmtype_mpi,const void **leafbuf_mpi)
248 {
249   PetscErrorCode         ierr;
250   PetscMPIInt            rank,count;
251   MPI_Comm               comm;
252   const void             *leafdata_mpi;
253 
254   PetscFunctionBegin;
255   ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr);
256   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
257 
258   /* Step 1: Reduce leafdata on all ranks to leafbuf on rank 0 */
259   if (link->leafmtype == PETSC_MEMTYPE_DEVICE && !use_gpu_aware_mpi) { /* Need to copy leafdata to leafbuf on every rank */
260     if (!link->leafbuf[PETSC_MEMTYPE_HOST]) {ierr = PetscMallocWithMemType(PETSC_MEMTYPE_HOST,link->leafbuflen*link->unitbytes,(void**)&link->leafbuf[PETSC_MEMTYPE_HOST]);CHKERRQ(ierr);}
261     ierr = PetscMemcpyWithMemType(PETSC_MEMTYPE_HOST,PETSC_MEMTYPE_DEVICE,link->leafbuf[PETSC_MEMTYPE_HOST],link->leafdata,link->leafbuflen*link->unitbytes);CHKERRQ(ierr);
262     leafdata_mpi   = !rank ? MPI_IN_PLACE : link->leafbuf[PETSC_MEMTYPE_HOST];
263     *leafmtype_mpi = PETSC_MEMTYPE_HOST;
264   } else { /* Only need to allocate a leafbuf on rank 0. Then directly reduce leafdata to the leafbuf */
265     if (!rank && !link->leafbuf[link->leafmtype]) {ierr = PetscMallocWithMemType(link->leafmtype,link->leafbuflen*link->unitbytes,(void**)&link->leafbuf[link->leafmtype]);CHKERRQ(ierr);}
266     leafdata_mpi   = link->leafdata;
267     *leafmtype_mpi = link->leafmtype;
268   }
269   *leafbuf_mpi = (const char*)link->leafbuf[*leafmtype_mpi];
270   ierr = PetscMPIIntCast(sf->nleaves*link->bs,&count);CHKERRQ(ierr);
271   ierr = MPI_Reduce(leafdata_mpi,(void*)(*leafbuf_mpi),count,link->basicunit,op,0,comm);CHKERRQ(ierr); /* Must do reduce with MPI builltin datatype basicunit */
272 
273   /* Step 2: Prepare the root buffer (we'll scatter the reduction result to it in a moment) */
274   if (link->rootmtype == PETSC_MEMTYPE_DEVICE && !use_gpu_aware_mpi) *rootmtype_mpi = PETSC_MEMTYPE_HOST;
275   else *rootmtype_mpi = link->rootmtype;
276 
277   if (!link->rootbuf[*rootmtype_mpi]) {ierr = PetscMallocWithMemType(*rootmtype_mpi,link->rootbuflen*link->unitbytes,(void**)&link->rootbuf[*rootmtype_mpi]);CHKERRQ(ierr);}
278   *rootbuf_mpi = link->rootbuf[*rootmtype_mpi];
279   PetscFunctionReturn(0);
280 }
281 
282 static PetscErrorCode PetscSFReduceBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType leafmtype,const void *leafdata,PetscMemType rootmtype,void *rootdata,MPI_Op op)
283 {
284   PetscErrorCode         ierr;
285   PetscSFPack            link;
286   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;
287   PetscInt               rstart;
288   PetscMPIInt            rank,recvcount;
289   MPI_Comm               comm;
290   const void             *leafbuf_mpi;
291   void                   *rootbuf_mpi;
292   PetscMemType           leafmtype_mpi,rootmtype_mpi; /* Seen by MPI */
293 
294   PetscFunctionBegin;
295   ierr = PetscSFPackGet_Allgatherv(sf,unit,rootmtype,rootdata,leafmtype,leafdata,&link);CHKERRQ(ierr);
296   ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr);
297   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
298 
299   if (op == MPIU_REPLACE) {
300     /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copy from local leafdata is fine */
301     ierr = PetscLayoutGetRange(sf->map,&rstart,NULL);CHKERRQ(ierr);
302     ierr = PetscMemcpyWithMemType(rootmtype,leafmtype,rootdata,(const char*)leafdata+(size_t)rstart*link->unitbytes,(size_t)sf->nroots*link->unitbytes);CHKERRQ(ierr);
303   } else {
304     ierr = PetscMPIIntCast(sf->nroots,&recvcount);CHKERRQ(ierr);
305     ierr = PetscSFReducePrepareMPIBuffers_Allgatherv(sf,link,op,&rootmtype_mpi,&rootbuf_mpi,&leafmtype_mpi,&leafbuf_mpi);CHKERRQ(ierr);
306     ierr = MPIU_Iscatterv(leafbuf_mpi,dat->recvcounts,dat->displs,unit,rootbuf_mpi,recvcount,unit,0,comm,link->rootreqs[PETSCSF_LEAF2ROOT_REDUCE][rootmtype_mpi]);CHKERRQ(ierr);
307   }
308   PetscFunctionReturn(0);
309 }
310 
311 PETSC_INTERN PetscErrorCode PetscSFReduceEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType leafmtype,const void *leafdata,PetscMemType rootmtype,void *rootdata,MPI_Op op)
312 {
313   PetscErrorCode         ierr;
314   PetscSFPack            link;
315 
316   PetscFunctionBegin;
317   ierr = PetscSFPackGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link);CHKERRQ(ierr);
318   ierr = PetscSFPackWaitall(link,PETSCSF_LEAF2ROOT_REDUCE);CHKERRQ(ierr);
319   if (op != MPIU_REPLACE) {
320     ierr = PetscSFUnpackAndOpRootData(sf,link,NULL,rootdata,op,PETSC_FALSE);CHKERRQ(ierr);
321   } else if (rootmtype == PETSC_MEMTYPE_DEVICE && !use_gpu_aware_mpi) {
322     ierr = PetscMemcpyWithMemType(PETSC_MEMTYPE_DEVICE,PETSC_MEMTYPE_HOST,rootdata,link->rootbuf[PETSC_MEMTYPE_HOST],link->rootbuflen*link->unitbytes);CHKERRQ(ierr);
323   }
324   ierr = PetscSFPackReclaim(sf,&link);CHKERRQ(ierr);
325   PetscFunctionReturn(0);
326 }
327 
328 static PetscErrorCode PetscSFBcastToZero_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata)
329 {
330   PetscErrorCode         ierr;
331   PetscSFPack            link;
332   PetscMPIInt            rank;
333 
334   PetscFunctionBegin;
335   ierr = PetscSFBcastAndOpBegin_Gatherv(sf,unit,rootmtype,rootdata,leafmtype,leafdata,MPIU_REPLACE);CHKERRQ(ierr);
336   ierr = PetscSFPackGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link);CHKERRQ(ierr);
337   ierr = PetscSFPackWaitall(link,PETSCSF_ROOT2LEAF_BCAST);CHKERRQ(ierr);
338   ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)sf),&rank);CHKERRQ(ierr);
339   if (!rank && leafmtype == PETSC_MEMTYPE_DEVICE && !use_gpu_aware_mpi) {
340     ierr = PetscMemcpyWithMemType(PETSC_MEMTYPE_DEVICE,PETSC_MEMTYPE_HOST,leafdata,link->leafbuf[PETSC_MEMTYPE_HOST],link->leafbuflen*link->unitbytes);CHKERRQ(ierr);
341   }
342   ierr = PetscSFPackReclaim(sf,&link);CHKERRQ(ierr);
343   PetscFunctionReturn(0);
344 }
345 
346 /* This routine is very tricky (I believe it is rarely used with this kind of graph so just provide a simple but not-optimal implementation).
347 
348    Suppose we have three ranks. Rank 0 has a root with value 1. Rank 0,1,2 has a leaf with value 2,3,4 respectively. The leaves are connected
349    to the root on rank 0. Suppose op=MPI_SUM and rank 0,1,2 gets root state in their rank order. By definition of this routine, rank 0 sees 1
350    in root, fetches it into its leafupate, then updates root to 1 + 2 = 3; rank 1 sees 3 in root, fetches it into its leafupate, then updates
351    root to 3 + 3 = 6; rank 2 sees 6 in root, fetches it into its leafupdate, then updates root to 6 + 4 = 10.  At the end, leafupdate on rank
352    0,1,2 is 1,3,6 respectively. root is 10.
353 
354    One optimized implementation could be: starting from the initial state:
355              rank-0   rank-1    rank-2
356         Root     1
357         Leaf     2       3         4
358 
359    Shift leaves rightwards to leafupdate. Rank 0 gathers the root value and puts it in leafupdate. We have:
360              rank-0   rank-1    rank-2
361         Root     1
362         Leaf     2       3         4
363      Leafupdate  1       2         3
364 
365    Then, do MPI_Scan on leafupdate and get:
366              rank-0   rank-1    rank-2
367         Root     1
368         Leaf     2       3         4
369      Leafupdate  1       3         6
370 
371    Rank 2 sums its leaf and leafupdate, scatters the result to the root, and gets
372              rank-0   rank-1    rank-2
373         Root     10
374         Leaf     2       3         4
375      Leafupdate  1       3         6
376 
377    We use a simpler implementation. From the same initial state, we copy leafdata to leafupdate
378              rank-0   rank-1    rank-2
379         Root     1
380         Leaf     2       3         4
381      Leafupdate  2       3         4
382 
383    Do MPI_Exscan on leafupdate,
384              rank-0   rank-1    rank-2
385         Root     1
386         Leaf     2       3         4
387      Leafupdate  2       2         5
388 
389    BcastAndOp from root to leafupdate,
390              rank-0   rank-1    rank-2
391         Root     1
392         Leaf     2       3         4
393      Leafupdate  3       3         6
394 
395    Copy root to leafupdate on rank-0
396              rank-0   rank-1    rank-2
397         Root     1
398         Leaf     2       3         4
399      Leafupdate  1       3         6
400 
401    Reduce from leaf to root,
402              rank-0   rank-1    rank-2
403         Root     10
404         Leaf     2       3         4
405      Leafupdate  1       3         6
406 */
407 PETSC_INTERN PetscErrorCode PetscSFFetchAndOpBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,void *rootdata,PetscMemType leafmtype,const void *leafdata,void *leafupdate,MPI_Op op)
408 {
409   PetscErrorCode         ierr;
410   PetscSFPack            link;
411   MPI_Comm               comm;
412   PetscMPIInt            count;
413 
414   PetscFunctionBegin;
415   ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr);
416   if (!use_gpu_aware_mpi && (rootmtype == PETSC_MEMTYPE_DEVICE || leafmtype == PETSC_MEMTYPE_DEVICE)) SETERRQ(comm,PETSC_ERR_SUP,"No support for FetchAndOp"); /* No known uses */
417   /* Copy leafdata to leafupdate */
418   ierr = PetscSFPackGet_Allgatherv(sf,unit,rootmtype,rootdata,leafmtype,leafdata,&link);CHKERRQ(ierr);
419   ierr = PetscMemcpyWithMemType(leafmtype,leafmtype,leafupdate,leafdata,sf->nleaves*link->unitbytes);CHKERRQ(ierr);
420   ierr = PetscSFPackGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link);CHKERRQ(ierr);
421 
422   /* Exscan on leafupdate and then BcastAndOp rootdata to leafupdate */
423   ierr = PetscMPIIntCast(sf->nleaves,&count);CHKERRQ(ierr);
424   if (op == MPIU_REPLACE) {
425     PetscMPIInt size,rank,prev,next;
426     ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
427     ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
428     prev = rank ?            rank-1 : MPI_PROC_NULL;
429     next = (rank < size-1) ? rank+1 : MPI_PROC_NULL;
430     ierr = MPI_Sendrecv_replace(leafupdate,count,unit,next,link->tag,prev,link->tag,comm,MPI_STATUSES_IGNORE);CHKERRQ(ierr);
431   } else {ierr = MPI_Exscan(MPI_IN_PLACE,leafupdate,count,unit,op,comm);CHKERRQ(ierr);}
432   ierr = PetscSFPackReclaim(sf,&link);CHKERRQ(ierr);
433   ierr = PetscSFBcastAndOpBegin(sf,unit,rootdata,leafupdate,op);CHKERRQ(ierr);
434   ierr = PetscSFBcastAndOpEnd(sf,unit,rootdata,leafupdate,op);CHKERRQ(ierr);
435 
436   /* Bcast roots to rank 0's leafupdate */
437   ierr = PetscSFBcastToZero_Private(sf,unit,rootdata,leafupdate);CHKERRQ(ierr); /* Using this line makes Allgather SFs able to inherit this routine */
438 
439   /* Reduce leafdata to rootdata */
440   ierr = PetscSFReduceBegin(sf,unit,leafdata,rootdata,op);CHKERRQ(ierr);
441   PetscFunctionReturn(0);
442 }
443 
444 PETSC_INTERN PetscErrorCode PetscSFFetchAndOpEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,void *rootdata,PetscMemType leafmtype,const void *leafdata,void *leafupdate,MPI_Op op)
445 {
446   PetscErrorCode         ierr;
447 
448   PetscFunctionBegin;
449   ierr = PetscSFReduceEnd(sf,unit,leafdata,rootdata,op);CHKERRQ(ierr);
450   PetscFunctionReturn(0);
451 }
452 
453 /* Get root ranks accessing my leaves */
454 PETSC_INTERN PetscErrorCode PetscSFGetRootRanks_Allgatherv(PetscSF sf,PetscInt *nranks,const PetscMPIInt **ranks,const PetscInt **roffset,const PetscInt **rmine,const PetscInt **rremote)
455 {
456   PetscErrorCode ierr;
457   PetscInt       i,j,k,size;
458   const PetscInt *range;
459 
460   PetscFunctionBegin;
461   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
462   if (sf->nranks && !sf->ranks) { /* On rank!=0, sf->nranks=0. The sf->nranks test makes this routine also works for sfgatherv */
463     size = sf->nranks;
464     ierr = PetscLayoutGetRanges(sf->map,&range);CHKERRQ(ierr);
465     ierr = PetscMalloc4(size,&sf->ranks,size+1,&sf->roffset,sf->nleaves,&sf->rmine,sf->nleaves,&sf->rremote);CHKERRQ(ierr);
466     for (i=0; i<size; i++) sf->ranks[i] = i;
467     ierr = PetscArraycpy(sf->roffset,range,size+1);CHKERRQ(ierr);
468     for (i=0; i<sf->nleaves; i++) sf->rmine[i] = i; /*rmine are never NULL even for contiguous leaves */
469     for (i=0; i<size; i++) {
470       for (j=range[i],k=0; j<range[i+1]; j++,k++) sf->rremote[j] = k;
471     }
472   }
473 
474   if (nranks)  *nranks  = sf->nranks;
475   if (ranks)   *ranks   = sf->ranks;
476   if (roffset) *roffset = sf->roffset;
477   if (rmine)   *rmine   = sf->rmine;
478   if (rremote) *rremote = sf->rremote;
479   PetscFunctionReturn(0);
480 }
481 
482 /* Get leaf ranks accessing my roots */
483 PETSC_INTERN PetscErrorCode PetscSFGetLeafRanks_Allgatherv(PetscSF sf,PetscInt *niranks,const PetscMPIInt **iranks,const PetscInt **ioffset,const PetscInt **irootloc)
484 {
485   PetscErrorCode     ierr;
486   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
487   MPI_Comm           comm;
488   PetscMPIInt        size,rank;
489   PetscInt           i,j;
490 
491   PetscFunctionBegin;
492   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
493   ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr);
494   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
495   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
496   if (niranks) *niranks = size;
497 
498   /* PetscSF_Basic has distinguished incoming ranks. Here we do not need that. But we must put self as the first and
499      sort other ranks. See comments in PetscSFSetUp_Basic about MatGetBrowsOfAoCols_MPIAIJ on why.
500    */
501   if (iranks) {
502     if (!dat->iranks) {
503       ierr = PetscMalloc1(size,&dat->iranks);CHKERRQ(ierr);
504       dat->iranks[0] = rank;
505       for (i=0,j=1; i<size; i++) {if (i == rank) continue; dat->iranks[j++] = i;}
506     }
507     *iranks = dat->iranks; /* dat->iranks was init'ed to NULL by PetscNewLog */
508   }
509 
510   if (ioffset) {
511     if (!dat->ioffset) {
512       ierr = PetscMalloc1(size+1,&dat->ioffset);CHKERRQ(ierr);
513       for (i=0; i<=size; i++) dat->ioffset[i] = i*sf->nroots;
514     }
515     *ioffset = dat->ioffset;
516   }
517 
518   if (irootloc) {
519     if (!dat->irootloc) {
520       ierr = PetscMalloc1(sf->nleaves,&dat->irootloc);CHKERRQ(ierr);
521       for (i=0; i<size; i++) {
522         for (j=0; j<sf->nroots; j++) dat->irootloc[i*sf->nroots+j] = j;
523       }
524     }
525     *irootloc = dat->irootloc;
526   }
527   PetscFunctionReturn(0);
528 }
529 
530 PETSC_INTERN PetscErrorCode PetscSFCreateLocalSF_Allgatherv(PetscSF sf,PetscSF *out)
531 {
532   PetscInt       i,nroots,nleaves,rstart,*ilocal;
533   PetscSFNode    *iremote;
534   PetscSF        lsf;
535   PetscErrorCode ierr;
536 
537   PetscFunctionBegin;
538   nleaves = sf->nleaves ? sf->nroots : 0; /* sf->nleaves can be zero with SFGather(v) */
539   nroots  = nleaves;
540   ierr    = PetscMalloc1(nleaves,&ilocal);CHKERRQ(ierr);
541   ierr    = PetscMalloc1(nleaves,&iremote);CHKERRQ(ierr);
542   ierr    = PetscLayoutGetRange(sf->map,&rstart,NULL);CHKERRQ(ierr);
543 
544   for (i=0; i<nleaves; i++) {
545     ilocal[i]        = rstart + i; /* lsf does not change leave indices */
546     iremote[i].rank  = 0;          /* rank in PETSC_COMM_SELF */
547     iremote[i].index = i;          /* root index */
548   }
549 
550   ierr = PetscSFCreate(PETSC_COMM_SELF,&lsf);CHKERRQ(ierr);
551   ierr = PetscSFSetGraph(lsf,nroots,nleaves,ilocal,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);CHKERRQ(ierr);
552   ierr = PetscSFSetUp(lsf);CHKERRQ(ierr);
553   *out = lsf;
554   PetscFunctionReturn(0);
555 }
556 
557 PETSC_INTERN PetscErrorCode PetscSFCreate_Allgatherv(PetscSF sf)
558 {
559   PetscErrorCode     ierr;
560   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
561 
562   PetscFunctionBegin;
563   sf->ops->SetUp           = PetscSFSetUp_Allgatherv;
564   sf->ops->Reset           = PetscSFReset_Allgatherv;
565   sf->ops->Destroy         = PetscSFDestroy_Allgatherv;
566   sf->ops->GetRootRanks    = PetscSFGetRootRanks_Allgatherv;
567   sf->ops->GetLeafRanks    = PetscSFGetLeafRanks_Allgatherv;
568   sf->ops->GetGraph        = PetscSFGetGraph_Allgatherv;
569   sf->ops->BcastAndOpBegin = PetscSFBcastAndOpBegin_Allgatherv;
570   sf->ops->BcastAndOpEnd   = PetscSFBcastAndOpEnd_Allgatherv;
571   sf->ops->ReduceBegin     = PetscSFReduceBegin_Allgatherv;
572   sf->ops->ReduceEnd       = PetscSFReduceEnd_Allgatherv;
573   sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
574   sf->ops->FetchAndOpEnd   = PetscSFFetchAndOpEnd_Allgatherv;
575   sf->ops->CreateLocalSF   = PetscSFCreateLocalSF_Allgatherv;
576   sf->ops->BcastToZero     = PetscSFBcastToZero_Allgatherv;
577 
578   ierr = PetscNewLog(sf,&dat);CHKERRQ(ierr);
579   sf->data = (void*)dat;
580   PetscFunctionReturn(0);
581 }
582