xref: /petsc/src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.c (revision 28b400f66ebc7ae0049166a2294dfcd3df27e64b)
1 #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>
2 
3 PETSC_INTERN PetscErrorCode PetscSFBcastBegin_Gatherv(PetscSF,MPI_Datatype,PetscMemType,const void*,PetscMemType,void*,MPI_Op);
4 
5 /* PetscSFGetGraph is non-collective. An implementation should not have collective calls */
6 PETSC_INTERN PetscErrorCode PetscSFGetGraph_Allgatherv(PetscSF sf,PetscInt *nroots,PetscInt *nleaves,const PetscInt **ilocal,const PetscSFNode **iremote)
7 {
8   PetscInt       i,j,k;
9   const PetscInt *range;
10   PetscMPIInt    size;
11 
12   PetscFunctionBegin;
13   CHKERRMPI(MPI_Comm_size(PetscObjectComm((PetscObject)sf),&size));
14   if (nroots)  *nroots  = sf->nroots;
15   if (nleaves) *nleaves = sf->nleaves;
16   if (ilocal)  *ilocal  = NULL; /* Contiguous leaves */
17   if (iremote) {
18     if (!sf->remote && sf->nleaves) { /* The && sf->nleaves makes sfgatherv able to inherit this routine */
19       CHKERRQ(PetscLayoutGetRanges(sf->map,&range));
20       CHKERRQ(PetscMalloc1(sf->nleaves,&sf->remote));
21       sf->remote_alloc = sf->remote;
22       for (i=0; i<size; i++) {
23         for (j=range[i],k=0; j<range[i+1]; j++,k++) {
24           sf->remote[j].rank  = i;
25           sf->remote[j].index = k;
26         }
27       }
28     }
29     *iremote = sf->remote;
30   }
31   PetscFunctionReturn(0);
32 }
33 
34 PETSC_INTERN PetscErrorCode PetscSFSetUp_Allgatherv(PetscSF sf)
35 {
36   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
37   PetscMPIInt        size;
38   PetscInt           i;
39   const PetscInt     *range;
40 
41   PetscFunctionBegin;
42   CHKERRQ(PetscSFSetUp_Allgather(sf));
43   CHKERRMPI(MPI_Comm_size(PetscObjectComm((PetscObject)sf),&size));
44   if (sf->nleaves) { /* This if (sf->nleaves) test makes sfgatherv able to inherit this routine */
45     CHKERRQ(PetscMalloc1(size,&dat->recvcounts));
46     CHKERRQ(PetscMalloc1(size,&dat->displs));
47     CHKERRQ(PetscLayoutGetRanges(sf->map,&range));
48 
49     for (i=0; i<size; i++) {
50       CHKERRQ(PetscMPIIntCast(range[i],&dat->displs[i]));
51       CHKERRQ(PetscMPIIntCast(range[i+1]-range[i],&dat->recvcounts[i]));
52     }
53   }
54   PetscFunctionReturn(0);
55 }
56 
57 PETSC_INTERN PetscErrorCode PetscSFReset_Allgatherv(PetscSF sf)
58 {
59   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;
60   PetscSFLink            link = dat->avail,next;
61 
62   PetscFunctionBegin;
63   CHKERRQ(PetscFree(dat->iranks));
64   CHKERRQ(PetscFree(dat->ioffset));
65   CHKERRQ(PetscFree(dat->irootloc));
66   CHKERRQ(PetscFree(dat->recvcounts));
67   CHKERRQ(PetscFree(dat->displs));
68   PetscCheck(!dat->inuse,PetscObjectComm((PetscObject)sf),PETSC_ERR_ARG_WRONGSTATE,"Outstanding operation has not been completed");
69   for (; link; link=next) {next = link->next; CHKERRQ(PetscSFLinkDestroy(sf,link));}
70   dat->avail = NULL;
71   PetscFunctionReturn(0);
72 }
73 
74 PETSC_INTERN PetscErrorCode PetscSFDestroy_Allgatherv(PetscSF sf)
75 {
76   PetscFunctionBegin;
77   CHKERRQ(PetscSFReset_Allgatherv(sf));
78   CHKERRQ(PetscFree(sf->data));
79   PetscFunctionReturn(0);
80 }
81 
82 static PetscErrorCode PetscSFBcastBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata,MPI_Op op)
83 {
84   PetscSFLink            link;
85   PetscMPIInt            sendcount;
86   MPI_Comm               comm;
87   void                   *rootbuf = NULL,*leafbuf = NULL;
88   MPI_Request            *req;
89   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;
90 
91   PetscFunctionBegin;
92   CHKERRQ(PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_BCAST,&link));
93   CHKERRQ(PetscSFLinkPackRootData(sf,link,PETSCSF_REMOTE,rootdata));
94   CHKERRQ(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf,link,PETSC_TRUE/* device2host before sending */));
95   CHKERRQ(PetscObjectGetComm((PetscObject)sf,&comm));
96   CHKERRQ(PetscMPIIntCast(sf->nroots,&sendcount));
97   CHKERRQ(PetscSFLinkGetMPIBuffersAndRequests(sf,link,PETSCSF_ROOT2LEAF,&rootbuf,&leafbuf,&req,NULL));
98   CHKERRQ(PetscSFLinkSyncStreamBeforeCallMPI(sf,link,PETSCSF_ROOT2LEAF));
99   CHKERRMPI(MPIU_Iallgatherv(rootbuf,sendcount,unit,leafbuf,dat->recvcounts,dat->displs,unit,comm,req));
100   PetscFunctionReturn(0);
101 }
102 
103 static PetscErrorCode PetscSFReduceBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType leafmtype,const void *leafdata,PetscMemType rootmtype,void *rootdata,MPI_Op op)
104 {
105   PetscSFLink            link;
106   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;
107   PetscInt               rstart;
108   PetscMPIInt            rank,count,recvcount;
109   MPI_Comm               comm;
110   void                   *rootbuf = NULL,*leafbuf = NULL;
111   MPI_Request            *req;
112 
113   PetscFunctionBegin;
114   CHKERRQ(PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_REDUCE,&link));
115   if (op == MPI_REPLACE) {
116     /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copying from local leafdata is fine */
117     CHKERRQ(PetscLayoutGetRange(sf->map,&rstart,NULL));
118     CHKERRQ((*link->Memcpy)(link,rootmtype,rootdata,leafmtype,(const char*)leafdata+(size_t)rstart*link->unitbytes,(size_t)sf->nroots*link->unitbytes));
119     if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) CHKERRQ((*link->SyncStream)(link));
120   } else {
121     /* Reduce leafdata, then scatter to rootdata */
122     CHKERRQ(PetscObjectGetComm((PetscObject)sf,&comm));
123     CHKERRMPI(MPI_Comm_rank(comm,&rank));
124     CHKERRQ(PetscSFLinkPackLeafData(sf,link,PETSCSF_REMOTE,leafdata));
125     CHKERRQ(PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf,link,PETSC_TRUE/* device2host before sending */));
126     CHKERRQ(PetscSFLinkGetMPIBuffersAndRequests(sf,link,PETSCSF_LEAF2ROOT,&rootbuf,&leafbuf,&req,NULL));
127     CHKERRQ(PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE],&recvcount));
128     /* Allocate a separate leaf buffer on rank 0 */
129     if (rank == 0 && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) {
130       CHKERRQ(PetscSFMalloc(sf,link->leafmtype_mpi,sf->leafbuflen[PETSCSF_REMOTE]*link->unitbytes,(void**)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]));
131     }
132     /* In case we already copied leafdata from device to host (i.e., no use_gpu_aware_mpi), we need to adjust leafbuf on rank 0 */
133     if (rank == 0 && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE;
134     CHKERRQ(PetscMPIIntCast(sf->nleaves*link->bs,&count));
135     CHKERRQ(PetscSFLinkSyncStreamBeforeCallMPI(sf,link,PETSCSF_LEAF2ROOT));
136     CHKERRMPI(MPI_Reduce(leafbuf,link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi],count,link->basicunit,op,0,comm)); /* Must do reduce with MPI builltin datatype basicunit */
137     CHKERRMPI(MPIU_Iscatterv(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi],dat->recvcounts,dat->displs,unit,rootbuf,recvcount,unit,0,comm,req));
138   }
139   PetscFunctionReturn(0);
140 }
141 
142 PETSC_INTERN PetscErrorCode PetscSFReduceEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,const void *leafdata,void *rootdata,MPI_Op op)
143 {
144   PetscSFLink           link;
145 
146   PetscFunctionBegin;
147   if (op == MPI_REPLACE) {
148     /* A rare case happens when op is MPI_REPLACE, using GPUs but no GPU aware MPI. In PetscSFReduceBegin_Allgather(v),
149       we did a device to device copy and in effect finished the communication. But in PetscSFLinkFinishCommunication()
150       of PetscSFReduceEnd_Basic(), it thinks since there is rootbuf, it calls PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI().
151       It does a host to device memory copy on rootbuf, wrongly overwritting the results. So we don't overload
152       PetscSFReduceEnd_Basic() in this case, and just reclaim the link.
153      */
154     CHKERRQ(PetscSFLinkGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link));
155     CHKERRQ(PetscSFLinkReclaim(sf,&link));
156   } else {
157     CHKERRQ(PetscSFReduceEnd_Basic(sf,unit,leafdata,rootdata,op));
158   }
159   PetscFunctionReturn(0);
160 }
161 
162 static PetscErrorCode PetscSFBcastToZero_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata)
163 {
164   PetscSFLink            link;
165   PetscMPIInt            rank;
166 
167   PetscFunctionBegin;
168   CHKERRQ(PetscSFBcastBegin_Gatherv(sf,unit,rootmtype,rootdata,leafmtype,leafdata,MPI_REPLACE));
169   CHKERRQ(PetscSFLinkGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link));
170   CHKERRQ(PetscSFLinkFinishCommunication(sf,link,PETSCSF_ROOT2LEAF));
171   CHKERRMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)sf),&rank));
172   if (rank == 0 && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) {
173     CHKERRQ((*link->Memcpy)(link,PETSC_MEMTYPE_DEVICE,leafdata,PETSC_MEMTYPE_HOST,link->leafbuf[PETSC_MEMTYPE_HOST],sf->leafbuflen[PETSCSF_REMOTE]*link->unitbytes));
174   }
175   CHKERRQ(PetscSFLinkReclaim(sf,&link));
176   PetscFunctionReturn(0);
177 }
178 
179 /* This routine is very tricky (I believe it is rarely used with this kind of graph so just provide a simple but not-optimal implementation).
180 
181    Suppose we have three ranks. Rank 0 has a root with value 1. Rank 0,1,2 has a leaf with value 2,3,4 respectively. The leaves are connected
182    to the root on rank 0. Suppose op=MPI_SUM and rank 0,1,2 gets root state in their rank order. By definition of this routine, rank 0 sees 1
183    in root, fetches it into its leafupate, then updates root to 1 + 2 = 3; rank 1 sees 3 in root, fetches it into its leafupate, then updates
184    root to 3 + 3 = 6; rank 2 sees 6 in root, fetches it into its leafupdate, then updates root to 6 + 4 = 10.  At the end, leafupdate on rank
185    0,1,2 is 1,3,6 respectively. root is 10.
186 
187    We use a simpler implementation. From the same initial state, we copy leafdata to leafupdate
188              rank-0   rank-1    rank-2
189         Root     1
190         Leaf     2       3         4
191      Leafupdate  2       3         4
192 
193    Do MPI_Exscan on leafupdate,
194              rank-0   rank-1    rank-2
195         Root     1
196         Leaf     2       3         4
197      Leafupdate  2       2         5
198 
199    BcastAndOp from root to leafupdate,
200              rank-0   rank-1    rank-2
201         Root     1
202         Leaf     2       3         4
203      Leafupdate  3       3         6
204 
205    Copy root to leafupdate on rank-0
206              rank-0   rank-1    rank-2
207         Root     1
208         Leaf     2       3         4
209      Leafupdate  1       3         6
210 
211    Reduce from leaf to root,
212              rank-0   rank-1    rank-2
213         Root     10
214         Leaf     2       3         4
215      Leafupdate  1       3         6
216 */
217 PETSC_INTERN PetscErrorCode PetscSFFetchAndOpBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,void *rootdata,PetscMemType leafmtype,const void *leafdata,void *leafupdate,MPI_Op op)
218 {
219   PetscSFLink            link;
220   MPI_Comm               comm;
221   PetscMPIInt            count;
222 
223   PetscFunctionBegin;
224   CHKERRQ(PetscObjectGetComm((PetscObject)sf,&comm));
225   PetscCheckFalse(PetscMemTypeDevice(rootmtype) || PetscMemTypeDevice(leafmtype),comm,PETSC_ERR_SUP,"Do FetchAndOp on device");
226   /* Copy leafdata to leafupdate */
227   CHKERRQ(PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_FETCH,&link));
228   CHKERRQ(PetscSFLinkPackLeafData(sf,link,PETSCSF_REMOTE,leafdata)); /* Sync the device */
229   CHKERRQ((*link->Memcpy)(link,leafmtype,leafupdate,leafmtype,leafdata,sf->nleaves*link->unitbytes));
230   CHKERRQ(PetscSFLinkGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link));
231 
232   /* Exscan on leafupdate and then BcastAndOp rootdata to leafupdate */
233   if (op == MPI_REPLACE) {
234     PetscMPIInt size,rank,prev,next;
235     CHKERRMPI(MPI_Comm_rank(comm,&rank));
236     CHKERRMPI(MPI_Comm_size(comm,&size));
237     prev = rank ?            rank-1 : MPI_PROC_NULL;
238     next = (rank < size-1) ? rank+1 : MPI_PROC_NULL;
239     CHKERRQ(PetscMPIIntCast(sf->nleaves,&count));
240     CHKERRMPI(MPI_Sendrecv_replace(leafupdate,count,unit,next,link->tag,prev,link->tag,comm,MPI_STATUSES_IGNORE));
241   } else {
242     CHKERRQ(PetscMPIIntCast(sf->nleaves*link->bs,&count));
243     CHKERRMPI(MPI_Exscan(MPI_IN_PLACE,leafupdate,count,link->basicunit,op,comm));
244   }
245   CHKERRQ(PetscSFLinkReclaim(sf,&link));
246   CHKERRQ(PetscSFBcastBegin(sf,unit,rootdata,leafupdate,op));
247   CHKERRQ(PetscSFBcastEnd(sf,unit,rootdata,leafupdate,op));
248 
249   /* Bcast roots to rank 0's leafupdate */
250   CHKERRQ(PetscSFBcastToZero_Private(sf,unit,rootdata,leafupdate)); /* Using this line makes Allgather SFs able to inherit this routine */
251 
252   /* Reduce leafdata to rootdata */
253   CHKERRQ(PetscSFReduceBegin(sf,unit,leafdata,rootdata,op));
254   PetscFunctionReturn(0);
255 }
256 
257 PETSC_INTERN PetscErrorCode PetscSFFetchAndOpEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,void *rootdata,const void *leafdata,void *leafupdate,MPI_Op op)
258 {
259   PetscFunctionBegin;
260   CHKERRQ(PetscSFReduceEnd(sf,unit,leafdata,rootdata,op));
261   PetscFunctionReturn(0);
262 }
263 
264 /* Get root ranks accessing my leaves */
265 PETSC_INTERN PetscErrorCode PetscSFGetRootRanks_Allgatherv(PetscSF sf,PetscInt *nranks,const PetscMPIInt **ranks,const PetscInt **roffset,const PetscInt **rmine,const PetscInt **rremote)
266 {
267   PetscInt       i,j,k,size;
268   const PetscInt *range;
269 
270   PetscFunctionBegin;
271   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
272   if (sf->nranks && !sf->ranks) { /* On rank!=0, sf->nranks=0. The sf->nranks test makes this routine also works for sfgatherv */
273     size = sf->nranks;
274     CHKERRQ(PetscLayoutGetRanges(sf->map,&range));
275     CHKERRQ(PetscMalloc4(size,&sf->ranks,size+1,&sf->roffset,sf->nleaves,&sf->rmine,sf->nleaves,&sf->rremote));
276     for (i=0; i<size; i++) sf->ranks[i] = i;
277     CHKERRQ(PetscArraycpy(sf->roffset,range,size+1));
278     for (i=0; i<sf->nleaves; i++) sf->rmine[i] = i; /*rmine are never NULL even for contiguous leaves */
279     for (i=0; i<size; i++) {
280       for (j=range[i],k=0; j<range[i+1]; j++,k++) sf->rremote[j] = k;
281     }
282   }
283 
284   if (nranks)  *nranks  = sf->nranks;
285   if (ranks)   *ranks   = sf->ranks;
286   if (roffset) *roffset = sf->roffset;
287   if (rmine)   *rmine   = sf->rmine;
288   if (rremote) *rremote = sf->rremote;
289   PetscFunctionReturn(0);
290 }
291 
292 /* Get leaf ranks accessing my roots */
293 PETSC_INTERN PetscErrorCode PetscSFGetLeafRanks_Allgatherv(PetscSF sf,PetscInt *niranks,const PetscMPIInt **iranks,const PetscInt **ioffset,const PetscInt **irootloc)
294 {
295   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
296   MPI_Comm           comm;
297   PetscMPIInt        size,rank;
298   PetscInt           i,j;
299 
300   PetscFunctionBegin;
301   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
302   CHKERRQ(PetscObjectGetComm((PetscObject)sf,&comm));
303   CHKERRMPI(MPI_Comm_size(comm,&size));
304   CHKERRMPI(MPI_Comm_rank(comm,&rank));
305   if (niranks) *niranks = size;
306 
307   /* PetscSF_Basic has distinguished incoming ranks. Here we do not need that. But we must put self as the first and
308      sort other ranks. See comments in PetscSFSetUp_Basic about MatGetBrowsOfAoCols_MPIAIJ on why.
309    */
310   if (iranks) {
311     if (!dat->iranks) {
312       CHKERRQ(PetscMalloc1(size,&dat->iranks));
313       dat->iranks[0] = rank;
314       for (i=0,j=1; i<size; i++) {if (i == rank) continue; dat->iranks[j++] = i;}
315     }
316     *iranks = dat->iranks; /* dat->iranks was init'ed to NULL by PetscNewLog */
317   }
318 
319   if (ioffset) {
320     if (!dat->ioffset) {
321       CHKERRQ(PetscMalloc1(size+1,&dat->ioffset));
322       for (i=0; i<=size; i++) dat->ioffset[i] = i*sf->nroots;
323     }
324     *ioffset = dat->ioffset;
325   }
326 
327   if (irootloc) {
328     if (!dat->irootloc) {
329       CHKERRQ(PetscMalloc1(sf->nleaves,&dat->irootloc));
330       for (i=0; i<size; i++) {
331         for (j=0; j<sf->nroots; j++) dat->irootloc[i*sf->nroots+j] = j;
332       }
333     }
334     *irootloc = dat->irootloc;
335   }
336   PetscFunctionReturn(0);
337 }
338 
339 PETSC_INTERN PetscErrorCode PetscSFCreateLocalSF_Allgatherv(PetscSF sf,PetscSF *out)
340 {
341   PetscInt       i,nroots,nleaves,rstart,*ilocal;
342   PetscSFNode    *iremote;
343   PetscSF        lsf;
344 
345   PetscFunctionBegin;
346   nleaves = sf->nleaves ? sf->nroots : 0; /* sf->nleaves can be zero with SFGather(v) */
347   nroots  = nleaves;
348   CHKERRQ(PetscMalloc1(nleaves,&ilocal));
349   CHKERRQ(PetscMalloc1(nleaves,&iremote));
350   CHKERRQ(PetscLayoutGetRange(sf->map,&rstart,NULL));
351 
352   for (i=0; i<nleaves; i++) {
353     ilocal[i]        = rstart + i; /* lsf does not change leave indices */
354     iremote[i].rank  = 0;          /* rank in PETSC_COMM_SELF */
355     iremote[i].index = i;          /* root index */
356   }
357 
358   CHKERRQ(PetscSFCreate(PETSC_COMM_SELF,&lsf));
359   CHKERRQ(PetscSFSetGraph(lsf,nroots,nleaves,ilocal,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER));
360   CHKERRQ(PetscSFSetUp(lsf));
361   *out = lsf;
362   PetscFunctionReturn(0);
363 }
364 
365 PETSC_INTERN PetscErrorCode PetscSFCreate_Allgatherv(PetscSF sf)
366 {
367   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
368 
369   PetscFunctionBegin;
370   sf->ops->BcastEnd        = PetscSFBcastEnd_Basic;
371   sf->ops->ReduceEnd       = PetscSFReduceEnd_Allgatherv;
372 
373   sf->ops->SetUp           = PetscSFSetUp_Allgatherv;
374   sf->ops->Reset           = PetscSFReset_Allgatherv;
375   sf->ops->Destroy         = PetscSFDestroy_Allgatherv;
376   sf->ops->GetRootRanks    = PetscSFGetRootRanks_Allgatherv;
377   sf->ops->GetLeafRanks    = PetscSFGetLeafRanks_Allgatherv;
378   sf->ops->GetGraph        = PetscSFGetGraph_Allgatherv;
379   sf->ops->BcastBegin      = PetscSFBcastBegin_Allgatherv;
380   sf->ops->ReduceBegin     = PetscSFReduceBegin_Allgatherv;
381   sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
382   sf->ops->FetchAndOpEnd   = PetscSFFetchAndOpEnd_Allgatherv;
383   sf->ops->CreateLocalSF   = PetscSFCreateLocalSF_Allgatherv;
384   sf->ops->BcastToZero     = PetscSFBcastToZero_Allgatherv;
385 
386   CHKERRQ(PetscNewLog(sf,&dat));
387   sf->data = (void*)dat;
388   PetscFunctionReturn(0);
389 }
390