1 #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>
2
3 /* Reuse the type. The difference is some fields (i.e., displs, recvcounts) are not used in Allgather on rank != 0, which is not a big deal */
4 typedef PetscSF_Allgatherv PetscSF_Allgather;
5
PetscSFSetUp_Allgather(PetscSF sf)6 PetscErrorCode PetscSFSetUp_Allgather(PetscSF sf)
7 {
8 PetscInt i;
9 PetscSF_Allgather *dat = (PetscSF_Allgather *)sf->data;
10
11 PetscFunctionBegin;
12 for (i = PETSCSF_LOCAL; i <= PETSCSF_REMOTE; i++) {
13 sf->leafbuflen[i] = 0;
14 sf->leafstart[i] = 0;
15 sf->leafcontig[i] = PETSC_TRUE;
16 sf->leafdups[i] = PETSC_FALSE;
17 dat->rootbuflen[i] = 0;
18 dat->rootstart[i] = 0;
19 dat->rootcontig[i] = PETSC_TRUE;
20 dat->rootdups[i] = PETSC_FALSE;
21 }
22
23 sf->leafbuflen[PETSCSF_REMOTE] = sf->nleaves;
24 dat->rootbuflen[PETSCSF_REMOTE] = sf->nroots;
25 sf->persistent = PETSC_FALSE;
26 sf->nleafreqs = 0; /* MPI collectives only need one request. We treat it as a root request. */
27 dat->nrootreqs = 1;
28 PetscFunctionReturn(PETSC_SUCCESS);
29 }
30
PetscSFBcastBegin_Allgather(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void * rootdata,PetscMemType leafmtype,void * leafdata,MPI_Op op)31 static PetscErrorCode PetscSFBcastBegin_Allgather(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata, MPI_Op op)
32 {
33 PetscSFLink link;
34 PetscMPIInt sendcount;
35 MPI_Comm comm;
36 void *rootbuf = NULL, *leafbuf = NULL; /* buffer seen by MPI */
37 MPI_Request *req = NULL;
38
39 PetscFunctionBegin;
40 PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_BCAST, &link));
41 PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata));
42 PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
43 PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
44 PetscCall(PetscMPIIntCast(sf->nroots, &sendcount));
45 PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL));
46 PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link));
47 PetscCallMPI(MPIU_Iallgather(rootbuf, sendcount, unit, leafbuf, sendcount, unit, comm, req));
48 PetscFunctionReturn(PETSC_SUCCESS);
49 }
50
PetscSFReduceBegin_Allgather(PetscSF sf,MPI_Datatype unit,PetscMemType leafmtype,const void * leafdata,PetscMemType rootmtype,void * rootdata,MPI_Op op)51 static PetscErrorCode PetscSFReduceBegin_Allgather(PetscSF sf, MPI_Datatype unit, PetscMemType leafmtype, const void *leafdata, PetscMemType rootmtype, void *rootdata, MPI_Op op)
52 {
53 PetscSFLink link;
54 PetscInt rstart;
55 MPI_Comm comm;
56 PetscMPIInt rank, count, recvcount;
57 void *rootbuf = NULL, *leafbuf = NULL; /* buffer seen by MPI */
58 PetscSF_Allgather *dat = (PetscSF_Allgather *)sf->data;
59 MPI_Request *req = NULL;
60
61 PetscFunctionBegin;
62 PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_REDUCE, &link));
63 if (op == MPI_REPLACE) {
64 /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copy from local leafdata is fine */
65 PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL));
66 PetscCall((*link->Memcpy)(link, rootmtype, rootdata, leafmtype, (const char *)leafdata + (size_t)rstart * link->unitbytes, (size_t)sf->nroots * link->unitbytes));
67 if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) PetscCall((*link->SyncStream)(link)); /* Sync the device to host memcpy */
68 } else {
69 PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
70 PetscCallMPI(MPI_Comm_rank(comm, &rank));
71 PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata));
72 PetscCall(PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
73 PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_LEAF2ROOT, &rootbuf, &leafbuf, &req, NULL));
74 PetscCall(PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE], &recvcount));
75 if (rank == 0 && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) PetscCall(PetscSFMalloc(sf, link->leafmtype_mpi, sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes, (void **)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]));
76 if (rank == 0 && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE;
77 PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count));
78 PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link));
79 PetscCallMPI(MPI_Reduce(leafbuf, link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], count, link->basicunit, op, 0, comm)); /* Must do reduce with MPI builtin datatype basicunit */
80 PetscCallMPI(MPIU_Iscatter(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], recvcount, unit, rootbuf, recvcount, unit, 0 /*rank 0*/, comm, req));
81 }
82 PetscFunctionReturn(PETSC_SUCCESS);
83 }
84
PetscSFBcastToZero_Allgather(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void * rootdata,PetscMemType leafmtype,void * leafdata)85 static PetscErrorCode PetscSFBcastToZero_Allgather(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata)
86 {
87 PetscSFLink link;
88 PetscMPIInt rank;
89 PetscMPIInt sendcount;
90 MPI_Comm comm;
91 void *rootbuf = NULL, *leafbuf = NULL;
92 MPI_Request *req = NULL;
93
94 PetscFunctionBegin;
95 PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, MPI_REPLACE, PETSCSF_BCAST, &link));
96 PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata));
97 PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
98 PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
99 PetscCall(PetscMPIIntCast(sf->nroots, &sendcount));
100 PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL));
101 PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link));
102 PetscCallMPI(MPIU_Igather(rootbuf == leafbuf ? MPI_IN_PLACE : rootbuf, sendcount, unit, leafbuf, sendcount, unit, 0 /*rank 0*/, comm, req));
103 PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
104 PetscCall(PetscSFLinkFinishCommunication(sf, link, PETSCSF_ROOT2LEAF));
105 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)sf), &rank));
106 if (rank == 0 && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) {
107 PetscCall((*link->Memcpy)(link, PETSC_MEMTYPE_DEVICE, leafdata, PETSC_MEMTYPE_HOST, link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_HOST], sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes));
108 }
109 PetscCall(PetscSFLinkReclaim(sf, &link));
110 PetscFunctionReturn(PETSC_SUCCESS);
111 }
112
PetscSFCreate_Allgather(PetscSF sf)113 PETSC_INTERN PetscErrorCode PetscSFCreate_Allgather(PetscSF sf)
114 {
115 PetscSF_Allgather *dat = (PetscSF_Allgather *)sf->data;
116
117 PetscFunctionBegin;
118 sf->ops->BcastEnd = PetscSFBcastEnd_Basic;
119 sf->ops->ReduceEnd = PetscSFReduceEnd_Allgatherv;
120
121 /* Inherit from Allgatherv */
122 sf->ops->Reset = PetscSFReset_Allgatherv;
123 sf->ops->Destroy = PetscSFDestroy_Allgatherv;
124 sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
125 sf->ops->FetchAndOpEnd = PetscSFFetchAndOpEnd_Allgatherv;
126 sf->ops->GetRootRanks = PetscSFGetRootRanks_Allgatherv;
127 sf->ops->CreateLocalSF = PetscSFCreateLocalSF_Allgatherv;
128 sf->ops->GetGraph = PetscSFGetGraph_Allgatherv;
129 sf->ops->GetLeafRanks = PetscSFGetLeafRanks_Allgatherv;
130
131 /* Allgather stuff */
132 sf->ops->SetUp = PetscSFSetUp_Allgather;
133 sf->ops->BcastBegin = PetscSFBcastBegin_Allgather;
134 sf->ops->ReduceBegin = PetscSFReduceBegin_Allgather;
135 sf->ops->BcastToZero = PetscSFBcastToZero_Allgather;
136
137 sf->collective = PETSC_TRUE;
138
139 PetscCall(PetscNew(&dat));
140 sf->data = (void *)dat;
141 PetscFunctionReturn(PETSC_SUCCESS);
142 }
143