xref: /petsc/src/vec/is/sf/impls/basic/allgather/sfallgather.c (revision 9371c9d470a9602b6d10a8bf50c9b2280a79e45a)
1 #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>
2 
3 /* Reuse the type. The difference is some fields (i.e., displs, recvcounts) are not used in Allgather on rank != 0, which is not a big deal */
4 typedef PetscSF_Allgatherv PetscSF_Allgather;
5 
6 PETSC_INTERN PetscErrorCode PetscSFBcastBegin_Gather(PetscSF, MPI_Datatype, PetscMemType, const void *, PetscMemType, void *, MPI_Op);
7 
8 PetscErrorCode PetscSFSetUp_Allgather(PetscSF sf) {
9   PetscInt           i;
10   PetscSF_Allgather *dat = (PetscSF_Allgather *)sf->data;
11 
12   PetscFunctionBegin;
13   for (i = PETSCSF_LOCAL; i <= PETSCSF_REMOTE; i++) {
14     sf->leafbuflen[i]  = 0;
15     sf->leafstart[i]   = 0;
16     sf->leafcontig[i]  = PETSC_TRUE;
17     sf->leafdups[i]    = PETSC_FALSE;
18     dat->rootbuflen[i] = 0;
19     dat->rootstart[i]  = 0;
20     dat->rootcontig[i] = PETSC_TRUE;
21     dat->rootdups[i]   = PETSC_FALSE;
22   }
23 
24   sf->leafbuflen[PETSCSF_REMOTE]  = sf->nleaves;
25   dat->rootbuflen[PETSCSF_REMOTE] = sf->nroots;
26   sf->persistent                  = PETSC_FALSE;
27   sf->nleafreqs                   = 0; /* MPI collectives only need one request. We treat it as a root request. */
28   dat->nrootreqs                  = 1;
29   PetscFunctionReturn(0);
30 }
31 
32 static PetscErrorCode PetscSFBcastBegin_Allgather(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata, MPI_Op op) {
33   PetscSFLink  link;
34   PetscMPIInt  sendcount;
35   MPI_Comm     comm;
36   void        *rootbuf = NULL, *leafbuf = NULL; /* buffer seen by MPI */
37   MPI_Request *req;
38 
39   PetscFunctionBegin;
40   PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_BCAST, &link));
41   PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata));
42   PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
43   PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
44   PetscCall(PetscMPIIntCast(sf->nroots, &sendcount));
45   PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL));
46   PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link, PETSCSF_ROOT2LEAF));
47   PetscCallMPI(MPIU_Iallgather(rootbuf, sendcount, unit, leafbuf, sendcount, unit, comm, req));
48   PetscFunctionReturn(0);
49 }
50 
51 static PetscErrorCode PetscSFReduceBegin_Allgather(PetscSF sf, MPI_Datatype unit, PetscMemType leafmtype, const void *leafdata, PetscMemType rootmtype, void *rootdata, MPI_Op op) {
52   PetscSFLink        link;
53   PetscInt           rstart;
54   MPI_Comm           comm;
55   PetscMPIInt        rank, count, recvcount;
56   void              *rootbuf = NULL, *leafbuf = NULL; /* buffer seen by MPI */
57   PetscSF_Allgather *dat = (PetscSF_Allgather *)sf->data;
58   MPI_Request       *req;
59 
60   PetscFunctionBegin;
61   PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_REDUCE, &link));
62   if (op == MPI_REPLACE) {
63     /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copy from local leafdata is fine */
64     PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL));
65     PetscCall((*link->Memcpy)(link, rootmtype, rootdata, leafmtype, (const char *)leafdata + (size_t)rstart * link->unitbytes, (size_t)sf->nroots * link->unitbytes));
66     if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) PetscCall((*link->SyncStream)(link)); /* Sync the device to host memcpy */
67   } else {
68     PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
69     PetscCallMPI(MPI_Comm_rank(comm, &rank));
70     PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata));
71     PetscCall(PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
72     PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_LEAF2ROOT, &rootbuf, &leafbuf, &req, NULL));
73     PetscCall(PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE], &recvcount));
74     if (rank == 0 && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) {
75       PetscCall(PetscSFMalloc(sf, link->leafmtype_mpi, sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes, (void **)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]));
76     }
77     if (rank == 0 && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE;
78     PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count));
79     PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link, PETSCSF_LEAF2ROOT));
80     PetscCallMPI(MPI_Reduce(leafbuf, link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], count, link->basicunit, op, 0, comm)); /* Must do reduce with MPI builltin datatype basicunit */
81     PetscCallMPI(MPIU_Iscatter(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], recvcount, unit, rootbuf, recvcount, unit, 0 /*rank 0*/, comm, req));
82   }
83   PetscFunctionReturn(0);
84 }
85 
86 static PetscErrorCode PetscSFBcastToZero_Allgather(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata) {
87   PetscSFLink link;
88   PetscMPIInt rank;
89 
90   PetscFunctionBegin;
91   PetscCall(PetscSFBcastBegin_Gather(sf, unit, rootmtype, rootdata, leafmtype, leafdata, MPI_REPLACE));
92   PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
93   PetscCall(PetscSFLinkFinishCommunication(sf, link, PETSCSF_ROOT2LEAF));
94   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)sf), &rank));
95   if (rank == 0 && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) {
96     PetscCall((*link->Memcpy)(link, PETSC_MEMTYPE_DEVICE, leafdata, PETSC_MEMTYPE_HOST, link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_HOST], sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes));
97   }
98   PetscCall(PetscSFLinkReclaim(sf, &link));
99   PetscFunctionReturn(0);
100 }
101 
102 PETSC_INTERN PetscErrorCode PetscSFCreate_Allgather(PetscSF sf) {
103   PetscSF_Allgather *dat = (PetscSF_Allgather *)sf->data;
104 
105   PetscFunctionBegin;
106   sf->ops->BcastEnd  = PetscSFBcastEnd_Basic;
107   sf->ops->ReduceEnd = PetscSFReduceEnd_Allgatherv;
108 
109   /* Inherit from Allgatherv */
110   sf->ops->Reset           = PetscSFReset_Allgatherv;
111   sf->ops->Destroy         = PetscSFDestroy_Allgatherv;
112   sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
113   sf->ops->FetchAndOpEnd   = PetscSFFetchAndOpEnd_Allgatherv;
114   sf->ops->GetRootRanks    = PetscSFGetRootRanks_Allgatherv;
115   sf->ops->CreateLocalSF   = PetscSFCreateLocalSF_Allgatherv;
116   sf->ops->GetGraph        = PetscSFGetGraph_Allgatherv;
117   sf->ops->GetLeafRanks    = PetscSFGetLeafRanks_Allgatherv;
118 
119   /* Allgather stuff */
120   sf->ops->SetUp       = PetscSFSetUp_Allgather;
121   sf->ops->BcastBegin  = PetscSFBcastBegin_Allgather;
122   sf->ops->ReduceBegin = PetscSFReduceBegin_Allgather;
123   sf->ops->BcastToZero = PetscSFBcastToZero_Allgather;
124 
125   PetscCall(PetscNewLog(sf, &dat));
126   sf->data = (void *)dat;
127   PetscFunctionReturn(0);
128 }
129