xref: /petsc/src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.c (revision efa12513287cff49a2b9648ae83199dcbfaad71a)
1 #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>
2 
3 PETSC_INTERN PetscErrorCode PetscSFBcastAndOpBegin_Gatherv(PetscSF,MPI_Datatype,PetscMemType,const void*,PetscMemType,void*,MPI_Op);
4 
5 /* PetscSFGetGraph is non-collective. An implementation should not have collective calls */
6 PETSC_INTERN PetscErrorCode PetscSFGetGraph_Allgatherv(PetscSF sf,PetscInt *nroots,PetscInt *nleaves,const PetscInt **ilocal,const PetscSFNode **iremote)
7 {
8   PetscErrorCode ierr;
9   PetscInt       i,j,k;
10   const PetscInt *range;
11   PetscMPIInt    size;
12 
13   PetscFunctionBegin;
14   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)sf),&size);CHKERRMPI(ierr);
15   if (nroots)  *nroots  = sf->nroots;
16   if (nleaves) *nleaves = sf->nleaves;
17   if (ilocal)  *ilocal  = NULL; /* Contiguous leaves */
18   if (iremote) {
19     if (!sf->remote && sf->nleaves) { /* The && sf->nleaves makes sfgatherv able to inherit this routine */
20       ierr = PetscLayoutGetRanges(sf->map,&range);CHKERRQ(ierr);
21       ierr = PetscMalloc1(sf->nleaves,&sf->remote);CHKERRQ(ierr);
22       sf->remote_alloc = sf->remote;
23       for (i=0; i<size; i++) {
24         for (j=range[i],k=0; j<range[i+1]; j++,k++) {
25           sf->remote[j].rank  = i;
26           sf->remote[j].index = k;
27         }
28       }
29     }
30     *iremote = sf->remote;
31   }
32   PetscFunctionReturn(0);
33 }
34 
35 PETSC_INTERN PetscErrorCode PetscSFSetUp_Allgatherv(PetscSF sf)
36 {
37   PetscErrorCode     ierr;
38   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
39   PetscMPIInt        size;
40   PetscInt           i;
41   const PetscInt     *range;
42 
43   PetscFunctionBegin;
44   ierr = PetscSFSetUp_Allgather(sf);CHKERRQ(ierr);
45   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)sf),&size);CHKERRMPI(ierr);
46   if (sf->nleaves) { /* This if (sf->nleaves) test makes sfgatherv able to inherit this routine */
47     ierr = PetscMalloc1(size,&dat->recvcounts);CHKERRQ(ierr);
48     ierr = PetscMalloc1(size,&dat->displs);CHKERRQ(ierr);
49     ierr = PetscLayoutGetRanges(sf->map,&range);CHKERRQ(ierr);
50 
51     for (i=0; i<size; i++) {
52       ierr = PetscMPIIntCast(range[i],&dat->displs[i]);CHKERRQ(ierr);
53       ierr = PetscMPIIntCast(range[i+1]-range[i],&dat->recvcounts[i]);CHKERRQ(ierr);
54     }
55   }
56   PetscFunctionReturn(0);
57 }
58 
59 PETSC_INTERN PetscErrorCode PetscSFReset_Allgatherv(PetscSF sf)
60 {
61   PetscErrorCode         ierr;
62   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;
63 
64   PetscFunctionBegin;
65   ierr = PetscFree(dat->iranks);CHKERRQ(ierr);
66   ierr = PetscFree(dat->ioffset);CHKERRQ(ierr);
67   ierr = PetscFree(dat->irootloc);CHKERRQ(ierr);
68   ierr = PetscFree(dat->recvcounts);CHKERRQ(ierr);
69   ierr = PetscFree(dat->displs);CHKERRQ(ierr);
70   if (dat->inuse) SETERRQ(PetscObjectComm((PetscObject)sf),PETSC_ERR_ARG_WRONGSTATE,"Outstanding operation has not been completed");
71   ierr = PetscSFLinkDestroy(sf,&dat->avail);CHKERRQ(ierr);
72   PetscFunctionReturn(0);
73 }
74 
75 PETSC_INTERN PetscErrorCode PetscSFDestroy_Allgatherv(PetscSF sf)
76 {
77   PetscErrorCode ierr;
78 
79   PetscFunctionBegin;
80   ierr = PetscSFReset_Allgatherv(sf);CHKERRQ(ierr);
81   ierr = PetscFree(sf->data);CHKERRQ(ierr);
82   PetscFunctionReturn(0);
83 }
84 
85 static PetscErrorCode PetscSFBcastAndOpBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata,MPI_Op op)
86 {
87   PetscErrorCode         ierr;
88   PetscSFLink            link;
89   PetscMPIInt            sendcount;
90   MPI_Comm               comm;
91   void                   *rootbuf = NULL,*leafbuf = NULL;
92   MPI_Request            *req;
93   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;
94 
95   PetscFunctionBegin;
96   ierr = PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_BCAST,&link);CHKERRQ(ierr);
97   ierr = PetscSFLinkPackRootData(sf,link,PETSCSF_REMOTE,rootdata);CHKERRQ(ierr);
98   ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr);
99   ierr = PetscMPIIntCast(sf->nroots,&sendcount);CHKERRQ(ierr);
100   ierr = PetscSFLinkGetMPIBuffersAndRequests(sf,link,PETSCSF_ROOT2LEAF,&rootbuf,&leafbuf,&req,NULL);CHKERRQ(ierr);
101   ierr = MPIU_Iallgatherv(rootbuf,sendcount,unit,leafbuf,dat->recvcounts,dat->displs,unit,comm,req);CHKERRQ(ierr);
102   PetscFunctionReturn(0);
103 }
104 
105 static PetscErrorCode PetscSFReduceBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType leafmtype,const void *leafdata,PetscMemType rootmtype,void *rootdata,MPI_Op op)
106 {
107   PetscErrorCode         ierr;
108   PetscSFLink            link;
109   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;
110   PetscInt               rstart;
111   PetscMPIInt            rank,count,recvcount;
112   MPI_Comm               comm;
113   void                   *rootbuf = NULL,*leafbuf = NULL;
114   MPI_Request            *req;
115 
116   PetscFunctionBegin;
117   ierr = PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_REDUCE,&link);CHKERRQ(ierr);
118   if (op == MPIU_REPLACE) {
119     /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copying from local leafdata is fine */
120     ierr = PetscLayoutGetRange(sf->map,&rstart,NULL);CHKERRQ(ierr);
121     ierr = (*link->Memcpy)(link,rootmtype,rootdata,leafmtype,(const char*)leafdata+(size_t)rstart*link->unitbytes,(size_t)sf->nroots*link->unitbytes);CHKERRQ(ierr);
122 #if defined(PETSC_HAVE_DEVICE)
123     if (PetscMemTypeHost(rootmtype)  && PetscMemTypeDevice(leafmtype)) {ierr = (*link->d_SyncStream)(link);CHKERRQ(ierr);}
124 #endif
125   } else {
126     /* Reduce leafdata, then scatter to rootdata */
127     ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr);
128     ierr = MPI_Comm_rank(comm,&rank);CHKERRMPI(ierr);
129     ierr = PetscSFLinkPackLeafData(sf,link,PETSCSF_REMOTE,leafdata);CHKERRQ(ierr);
130     ierr = PetscSFLinkGetMPIBuffersAndRequests(sf,link,PETSCSF_LEAF2ROOT,&rootbuf,&leafbuf,&req,NULL);CHKERRQ(ierr);
131     ierr = PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE],&recvcount);CHKERRQ(ierr);
132     /* Allocate a separate leaf buffer on rank 0 */
133     if (!rank && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) {
134       ierr = PetscSFMalloc(sf,link->leafmtype_mpi,sf->leafbuflen[PETSCSF_REMOTE]*link->unitbytes,(void**)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]);CHKERRQ(ierr);
135     }
136     /* In case we already copied leafdata from device to host (i.e., no use_gpu_aware_mpi), we need to adjust leafbuf on rank 0 */
137     if (!rank && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE;
138     ierr = PetscMPIIntCast(sf->nleaves*link->bs,&count);CHKERRQ(ierr);
139     ierr = MPI_Reduce(leafbuf,link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi],count,link->basicunit,op,0,comm);CHKERRMPI(ierr);
140     ierr = MPIU_Iscatterv(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi],dat->recvcounts,dat->displs,unit,rootbuf,recvcount,unit,0,comm,req);CHKERRQ(ierr);
141   }
142   PetscFunctionReturn(0);
143 }
144 
145 static PetscErrorCode PetscSFBcastToZero_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata)
146 {
147   PetscErrorCode         ierr;
148   PetscSFLink            link;
149   PetscMPIInt            rank;
150 
151   PetscFunctionBegin;
152   ierr = PetscSFBcastAndOpBegin_Gatherv(sf,unit,rootmtype,rootdata,leafmtype,leafdata,MPIU_REPLACE);CHKERRQ(ierr);
153   ierr = PetscSFLinkGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link);CHKERRQ(ierr);
154   ierr = PetscSFLinkMPIWaitall(sf,link,PETSCSF_ROOT2LEAF);CHKERRQ(ierr);
155   ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)sf),&rank);CHKERRMPI(ierr);
156   if (!rank && leafmtype == PETSC_MEMTYPE_DEVICE && !sf->use_gpu_aware_mpi) {
157     ierr = (*link->Memcpy)(link,PETSC_MEMTYPE_DEVICE,leafdata,PETSC_MEMTYPE_HOST,link->leafbuf[PETSC_MEMTYPE_HOST],sf->leafbuflen[PETSCSF_REMOTE]*link->unitbytes);CHKERRQ(ierr);
158   }
159   ierr = PetscSFLinkReclaim(sf,&link);CHKERRQ(ierr);
160   PetscFunctionReturn(0);
161 }
162 
163 /* This routine is very tricky (I believe it is rarely used with this kind of graph so just provide a simple but not-optimal implementation).
164 
165    Suppose we have three ranks. Rank 0 has a root with value 1. Rank 0,1,2 has a leaf with value 2,3,4 respectively. The leaves are connected
166    to the root on rank 0. Suppose op=MPI_SUM and rank 0,1,2 gets root state in their rank order. By definition of this routine, rank 0 sees 1
167    in root, fetches it into its leafupate, then updates root to 1 + 2 = 3; rank 1 sees 3 in root, fetches it into its leafupate, then updates
168    root to 3 + 3 = 6; rank 2 sees 6 in root, fetches it into its leafupdate, then updates root to 6 + 4 = 10.  At the end, leafupdate on rank
169    0,1,2 is 1,3,6 respectively. root is 10.
170 
171    We use a simpler implementation. From the same initial state, we copy leafdata to leafupdate
172              rank-0   rank-1    rank-2
173         Root     1
174         Leaf     2       3         4
175      Leafupdate  2       3         4
176 
177    Do MPI_Exscan on leafupdate,
178              rank-0   rank-1    rank-2
179         Root     1
180         Leaf     2       3         4
181      Leafupdate  2       2         5
182 
183    BcastAndOp from root to leafupdate,
184              rank-0   rank-1    rank-2
185         Root     1
186         Leaf     2       3         4
187      Leafupdate  3       3         6
188 
189    Copy root to leafupdate on rank-0
190              rank-0   rank-1    rank-2
191         Root     1
192         Leaf     2       3         4
193      Leafupdate  1       3         6
194 
195    Reduce from leaf to root,
196              rank-0   rank-1    rank-2
197         Root     10
198         Leaf     2       3         4
199      Leafupdate  1       3         6
200 */
201 PETSC_INTERN PetscErrorCode PetscSFFetchAndOpBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,void *rootdata,PetscMemType leafmtype,const void *leafdata,void *leafupdate,MPI_Op op)
202 {
203   PetscErrorCode         ierr;
204   PetscSFLink            link;
205   MPI_Comm               comm;
206   PetscMPIInt            count;
207 
208   PetscFunctionBegin;
209   ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr);
210   if (!sf->use_gpu_aware_mpi && (rootmtype == PETSC_MEMTYPE_DEVICE || leafmtype == PETSC_MEMTYPE_DEVICE)) SETERRQ(comm,PETSC_ERR_SUP,"Do FetchAndOp on device but not use gpu-aware MPI");
211   /* Copy leafdata to leafupdate */
212   ierr = PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_FETCH,&link);CHKERRQ(ierr);
213   ierr = PetscSFLinkPackLeafData(sf,link,PETSCSF_REMOTE,leafdata);CHKERRQ(ierr); /* Sync the device */
214   ierr = (*link->Memcpy)(link,leafmtype,leafupdate,leafmtype,leafdata,sf->nleaves*link->unitbytes);CHKERRQ(ierr);
215   ierr = PetscSFLinkGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link);CHKERRQ(ierr);
216 
217   /* Exscan on leafupdate and then BcastAndOp rootdata to leafupdate */
218   if (op == MPIU_REPLACE) {
219     PetscMPIInt size,rank,prev,next;
220     ierr = MPI_Comm_rank(comm,&rank);CHKERRMPI(ierr);
221     ierr = MPI_Comm_size(comm,&size);CHKERRMPI(ierr);
222     prev = rank ?            rank-1 : MPI_PROC_NULL;
223     next = (rank < size-1) ? rank+1 : MPI_PROC_NULL;
224     ierr = PetscMPIIntCast(sf->nleaves,&count);CHKERRQ(ierr);
225     ierr = MPI_Sendrecv_replace(leafupdate,count,unit,next,link->tag,prev,link->tag,comm,MPI_STATUSES_IGNORE);CHKERRMPI(ierr);
226   } else {
227     ierr = PetscMPIIntCast(sf->nleaves*link->bs,&count);CHKERRQ(ierr);
228     ierr = MPI_Exscan(MPI_IN_PLACE,leafupdate,count,link->basicunit,op,comm);CHKERRMPI(ierr);
229   }
230   ierr = PetscSFLinkReclaim(sf,&link);CHKERRQ(ierr);
231   ierr = PetscSFBcastAndOpBegin(sf,unit,rootdata,leafupdate,op);CHKERRQ(ierr);
232   ierr = PetscSFBcastAndOpEnd(sf,unit,rootdata,leafupdate,op);CHKERRQ(ierr);
233 
234   /* Bcast roots to rank 0's leafupdate */
235   ierr = PetscSFBcastToZero_Private(sf,unit,rootdata,leafupdate);CHKERRQ(ierr); /* Using this line makes Allgather SFs able to inherit this routine */
236 
237   /* Reduce leafdata to rootdata */
238   ierr = PetscSFReduceBegin(sf,unit,leafdata,rootdata,op);CHKERRQ(ierr);
239   PetscFunctionReturn(0);
240 }
241 
242 PETSC_INTERN PetscErrorCode PetscSFFetchAndOpEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,void *rootdata,const void *leafdata,void *leafupdate,MPI_Op op)
243 {
244   PetscErrorCode         ierr;
245 
246   PetscFunctionBegin;
247   ierr = PetscSFReduceEnd(sf,unit,leafdata,rootdata,op);CHKERRQ(ierr);
248   PetscFunctionReturn(0);
249 }
250 
251 /* Get root ranks accessing my leaves */
252 PETSC_INTERN PetscErrorCode PetscSFGetRootRanks_Allgatherv(PetscSF sf,PetscInt *nranks,const PetscMPIInt **ranks,const PetscInt **roffset,const PetscInt **rmine,const PetscInt **rremote)
253 {
254   PetscErrorCode ierr;
255   PetscInt       i,j,k,size;
256   const PetscInt *range;
257 
258   PetscFunctionBegin;
259   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
260   if (sf->nranks && !sf->ranks) { /* On rank!=0, sf->nranks=0. The sf->nranks test makes this routine also works for sfgatherv */
261     size = sf->nranks;
262     ierr = PetscLayoutGetRanges(sf->map,&range);CHKERRQ(ierr);
263     ierr = PetscMalloc4(size,&sf->ranks,size+1,&sf->roffset,sf->nleaves,&sf->rmine,sf->nleaves,&sf->rremote);CHKERRQ(ierr);
264     for (i=0; i<size; i++) sf->ranks[i] = i;
265     ierr = PetscArraycpy(sf->roffset,range,size+1);CHKERRQ(ierr);
266     for (i=0; i<sf->nleaves; i++) sf->rmine[i] = i; /*rmine are never NULL even for contiguous leaves */
267     for (i=0; i<size; i++) {
268       for (j=range[i],k=0; j<range[i+1]; j++,k++) sf->rremote[j] = k;
269     }
270   }
271 
272   if (nranks)  *nranks  = sf->nranks;
273   if (ranks)   *ranks   = sf->ranks;
274   if (roffset) *roffset = sf->roffset;
275   if (rmine)   *rmine   = sf->rmine;
276   if (rremote) *rremote = sf->rremote;
277   PetscFunctionReturn(0);
278 }
279 
280 /* Get leaf ranks accessing my roots */
281 PETSC_INTERN PetscErrorCode PetscSFGetLeafRanks_Allgatherv(PetscSF sf,PetscInt *niranks,const PetscMPIInt **iranks,const PetscInt **ioffset,const PetscInt **irootloc)
282 {
283   PetscErrorCode     ierr;
284   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
285   MPI_Comm           comm;
286   PetscMPIInt        size,rank;
287   PetscInt           i,j;
288 
289   PetscFunctionBegin;
290   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
291   ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr);
292   ierr = MPI_Comm_size(comm,&size);CHKERRMPI(ierr);
293   ierr = MPI_Comm_rank(comm,&rank);CHKERRMPI(ierr);
294   if (niranks) *niranks = size;
295 
296   /* PetscSF_Basic has distinguished incoming ranks. Here we do not need that. But we must put self as the first and
297      sort other ranks. See comments in PetscSFSetUp_Basic about MatGetBrowsOfAoCols_MPIAIJ on why.
298    */
299   if (iranks) {
300     if (!dat->iranks) {
301       ierr = PetscMalloc1(size,&dat->iranks);CHKERRQ(ierr);
302       dat->iranks[0] = rank;
303       for (i=0,j=1; i<size; i++) {if (i == rank) continue; dat->iranks[j++] = i;}
304     }
305     *iranks = dat->iranks; /* dat->iranks was init'ed to NULL by PetscNewLog */
306   }
307 
308   if (ioffset) {
309     if (!dat->ioffset) {
310       ierr = PetscMalloc1(size+1,&dat->ioffset);CHKERRQ(ierr);
311       for (i=0; i<=size; i++) dat->ioffset[i] = i*sf->nroots;
312     }
313     *ioffset = dat->ioffset;
314   }
315 
316   if (irootloc) {
317     if (!dat->irootloc) {
318       ierr = PetscMalloc1(sf->nleaves,&dat->irootloc);CHKERRQ(ierr);
319       for (i=0; i<size; i++) {
320         for (j=0; j<sf->nroots; j++) dat->irootloc[i*sf->nroots+j] = j;
321       }
322     }
323     *irootloc = dat->irootloc;
324   }
325   PetscFunctionReturn(0);
326 }
327 
328 PETSC_INTERN PetscErrorCode PetscSFCreateLocalSF_Allgatherv(PetscSF sf,PetscSF *out)
329 {
330   PetscInt       i,nroots,nleaves,rstart,*ilocal;
331   PetscSFNode    *iremote;
332   PetscSF        lsf;
333   PetscErrorCode ierr;
334 
335   PetscFunctionBegin;
336   nleaves = sf->nleaves ? sf->nroots : 0; /* sf->nleaves can be zero with SFGather(v) */
337   nroots  = nleaves;
338   ierr    = PetscMalloc1(nleaves,&ilocal);CHKERRQ(ierr);
339   ierr    = PetscMalloc1(nleaves,&iremote);CHKERRQ(ierr);
340   ierr    = PetscLayoutGetRange(sf->map,&rstart,NULL);CHKERRQ(ierr);
341 
342   for (i=0; i<nleaves; i++) {
343     ilocal[i]        = rstart + i; /* lsf does not change leave indices */
344     iremote[i].rank  = 0;          /* rank in PETSC_COMM_SELF */
345     iremote[i].index = i;          /* root index */
346   }
347 
348   ierr = PetscSFCreate(PETSC_COMM_SELF,&lsf);CHKERRQ(ierr);
349   ierr = PetscSFSetGraph(lsf,nroots,nleaves,ilocal,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);CHKERRQ(ierr);
350   ierr = PetscSFSetUp(lsf);CHKERRQ(ierr);
351   *out = lsf;
352   PetscFunctionReturn(0);
353 }
354 
355 PETSC_INTERN PetscErrorCode PetscSFCreate_Allgatherv(PetscSF sf)
356 {
357   PetscErrorCode     ierr;
358   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
359 
360   PetscFunctionBegin;
361   sf->ops->BcastAndOpEnd   = PetscSFBcastAndOpEnd_Basic;
362   sf->ops->ReduceEnd       = PetscSFReduceEnd_Basic;
363 
364   sf->ops->SetUp           = PetscSFSetUp_Allgatherv;
365   sf->ops->Reset           = PetscSFReset_Allgatherv;
366   sf->ops->Destroy         = PetscSFDestroy_Allgatherv;
367   sf->ops->GetRootRanks    = PetscSFGetRootRanks_Allgatherv;
368   sf->ops->GetLeafRanks    = PetscSFGetLeafRanks_Allgatherv;
369   sf->ops->GetGraph        = PetscSFGetGraph_Allgatherv;
370   sf->ops->BcastAndOpBegin = PetscSFBcastAndOpBegin_Allgatherv;
371   sf->ops->ReduceBegin     = PetscSFReduceBegin_Allgatherv;
372   sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
373   sf->ops->FetchAndOpEnd   = PetscSFFetchAndOpEnd_Allgatherv;
374   sf->ops->CreateLocalSF   = PetscSFCreateLocalSF_Allgatherv;
375   sf->ops->BcastToZero     = PetscSFBcastToZero_Allgatherv;
376 
377   ierr = PetscNewLog(sf,&dat);CHKERRQ(ierr);
378   sf->data = (void*)dat;
379   PetscFunctionReturn(0);
380 }
381