1 #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h> 2 3 PETSC_INTERN PetscErrorCode PetscSFBcastBegin_Gatherv(PetscSF, MPI_Datatype, PetscMemType, const void *, PetscMemType, void *, MPI_Op); 4 5 /* PetscSFGetGraph is non-collective. An implementation should not have collective calls */ 6 PETSC_INTERN PetscErrorCode PetscSFGetGraph_Allgatherv(PetscSF sf, PetscInt *nroots, PetscInt *nleaves, const PetscInt **ilocal, const PetscSFNode **iremote) { 7 PetscInt i, j, k; 8 const PetscInt *range; 9 PetscMPIInt size; 10 11 PetscFunctionBegin; 12 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)sf), &size)); 13 if (nroots) *nroots = sf->nroots; 14 if (nleaves) *nleaves = sf->nleaves; 15 if (ilocal) *ilocal = NULL; /* Contiguous leaves */ 16 if (iremote) { 17 if (!sf->remote && sf->nleaves) { /* The && sf->nleaves makes sfgatherv able to inherit this routine */ 18 PetscCall(PetscLayoutGetRanges(sf->map, &range)); 19 PetscCall(PetscMalloc1(sf->nleaves, &sf->remote)); 20 sf->remote_alloc = sf->remote; 21 for (i = 0; i < size; i++) { 22 for (j = range[i], k = 0; j < range[i + 1]; j++, k++) { 23 sf->remote[j].rank = i; 24 sf->remote[j].index = k; 25 } 26 } 27 } 28 *iremote = sf->remote; 29 } 30 PetscFunctionReturn(0); 31 } 32 33 PETSC_INTERN PetscErrorCode PetscSFSetUp_Allgatherv(PetscSF sf) { 34 PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data; 35 PetscMPIInt size; 36 PetscInt i; 37 const PetscInt *range; 38 39 PetscFunctionBegin; 40 PetscCall(PetscSFSetUp_Allgather(sf)); 41 PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)sf), &size)); 42 if (sf->nleaves) { /* This if (sf->nleaves) test makes sfgatherv able to inherit this routine */ 43 PetscCall(PetscMalloc1(size, &dat->recvcounts)); 44 PetscCall(PetscMalloc1(size, &dat->displs)); 45 PetscCall(PetscLayoutGetRanges(sf->map, &range)); 46 47 for (i = 0; i < size; i++) { 48 PetscCall(PetscMPIIntCast(range[i], &dat->displs[i])); 49 PetscCall(PetscMPIIntCast(range[i + 1] - range[i], &dat->recvcounts[i])); 50 } 51 } 52 PetscFunctionReturn(0); 53 } 54 55 PETSC_INTERN PetscErrorCode PetscSFReset_Allgatherv(PetscSF sf) { 56 PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data; 57 PetscSFLink link = dat->avail, next; 58 59 PetscFunctionBegin; 60 PetscCall(PetscFree(dat->iranks)); 61 PetscCall(PetscFree(dat->ioffset)); 62 PetscCall(PetscFree(dat->irootloc)); 63 PetscCall(PetscFree(dat->recvcounts)); 64 PetscCall(PetscFree(dat->displs)); 65 PetscCheck(!dat->inuse, PetscObjectComm((PetscObject)sf), PETSC_ERR_ARG_WRONGSTATE, "Outstanding operation has not been completed"); 66 for (; link; link = next) { 67 next = link->next; 68 PetscCall(PetscSFLinkDestroy(sf, link)); 69 } 70 dat->avail = NULL; 71 PetscFunctionReturn(0); 72 } 73 74 PETSC_INTERN PetscErrorCode PetscSFDestroy_Allgatherv(PetscSF sf) { 75 PetscFunctionBegin; 76 PetscCall(PetscSFReset_Allgatherv(sf)); 77 PetscCall(PetscFree(sf->data)); 78 PetscFunctionReturn(0); 79 } 80 81 static PetscErrorCode PetscSFBcastBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata, MPI_Op op) { 82 PetscSFLink link; 83 PetscMPIInt sendcount; 84 MPI_Comm comm; 85 void *rootbuf = NULL, *leafbuf = NULL; 86 MPI_Request *req; 87 PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data; 88 89 PetscFunctionBegin; 90 PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_BCAST, &link)); 91 PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata)); 92 PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */)); 93 PetscCall(PetscObjectGetComm((PetscObject)sf, &comm)); 94 PetscCall(PetscMPIIntCast(sf->nroots, &sendcount)); 95 PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL)); 96 PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link, PETSCSF_ROOT2LEAF)); 97 PetscCallMPI(MPIU_Iallgatherv(rootbuf, sendcount, unit, leafbuf, dat->recvcounts, dat->displs, unit, comm, req)); 98 PetscFunctionReturn(0); 99 } 100 101 static PetscErrorCode PetscSFReduceBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType leafmtype, const void *leafdata, PetscMemType rootmtype, void *rootdata, MPI_Op op) { 102 PetscSFLink link; 103 PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data; 104 PetscInt rstart; 105 PetscMPIInt rank, count, recvcount; 106 MPI_Comm comm; 107 void *rootbuf = NULL, *leafbuf = NULL; 108 MPI_Request *req; 109 110 PetscFunctionBegin; 111 PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_REDUCE, &link)); 112 if (op == MPI_REPLACE) { 113 /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copying from local leafdata is fine */ 114 PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL)); 115 PetscCall((*link->Memcpy)(link, rootmtype, rootdata, leafmtype, (const char *)leafdata + (size_t)rstart * link->unitbytes, (size_t)sf->nroots * link->unitbytes)); 116 if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) PetscCall((*link->SyncStream)(link)); 117 } else { 118 /* Reduce leafdata, then scatter to rootdata */ 119 PetscCall(PetscObjectGetComm((PetscObject)sf, &comm)); 120 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 121 PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata)); 122 PetscCall(PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */)); 123 PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_LEAF2ROOT, &rootbuf, &leafbuf, &req, NULL)); 124 PetscCall(PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE], &recvcount)); 125 /* Allocate a separate leaf buffer on rank 0 */ 126 if (rank == 0 && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) { 127 PetscCall(PetscSFMalloc(sf, link->leafmtype_mpi, sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes, (void **)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi])); 128 } 129 /* In case we already copied leafdata from device to host (i.e., no use_gpu_aware_mpi), we need to adjust leafbuf on rank 0 */ 130 if (rank == 0 && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE; 131 PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count)); 132 PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link, PETSCSF_LEAF2ROOT)); 133 PetscCallMPI(MPI_Reduce(leafbuf, link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], count, link->basicunit, op, 0, comm)); /* Must do reduce with MPI builltin datatype basicunit */ 134 PetscCallMPI(MPIU_Iscatterv(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], dat->recvcounts, dat->displs, unit, rootbuf, recvcount, unit, 0, comm, req)); 135 } 136 PetscFunctionReturn(0); 137 } 138 139 PETSC_INTERN PetscErrorCode PetscSFReduceEnd_Allgatherv(PetscSF sf, MPI_Datatype unit, const void *leafdata, void *rootdata, MPI_Op op) { 140 PetscSFLink link; 141 142 PetscFunctionBegin; 143 if (op == MPI_REPLACE) { 144 /* A rare case happens when op is MPI_REPLACE, using GPUs but no GPU aware MPI. In PetscSFReduceBegin_Allgather(v), 145 we did a device to device copy and in effect finished the communication. But in PetscSFLinkFinishCommunication() 146 of PetscSFReduceEnd_Basic(), it thinks since there is rootbuf, it calls PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(). 147 It does a host to device memory copy on rootbuf, wrongly overwriting the results. So we don't overload 148 PetscSFReduceEnd_Basic() in this case, and just reclaim the link. 149 */ 150 PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link)); 151 PetscCall(PetscSFLinkReclaim(sf, &link)); 152 } else { 153 PetscCall(PetscSFReduceEnd_Basic(sf, unit, leafdata, rootdata, op)); 154 } 155 PetscFunctionReturn(0); 156 } 157 158 static PetscErrorCode PetscSFBcastToZero_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata) { 159 PetscSFLink link; 160 PetscMPIInt rank; 161 162 PetscFunctionBegin; 163 PetscCall(PetscSFBcastBegin_Gatherv(sf, unit, rootmtype, rootdata, leafmtype, leafdata, MPI_REPLACE)); 164 PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link)); 165 PetscCall(PetscSFLinkFinishCommunication(sf, link, PETSCSF_ROOT2LEAF)); 166 PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)sf), &rank)); 167 if (rank == 0 && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) PetscCall((*link->Memcpy)(link, PETSC_MEMTYPE_DEVICE, leafdata, PETSC_MEMTYPE_HOST, link->leafbuf[PETSC_MEMTYPE_HOST], sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes)); 168 PetscCall(PetscSFLinkReclaim(sf, &link)); 169 PetscFunctionReturn(0); 170 } 171 172 /* This routine is very tricky (I believe it is rarely used with this kind of graph so just provide a simple but not-optimal implementation). 173 174 Suppose we have three ranks. Rank 0 has a root with value 1. Rank 0,1,2 has a leaf with value 2,3,4 respectively. The leaves are connected 175 to the root on rank 0. Suppose op=MPI_SUM and rank 0,1,2 gets root state in their rank order. By definition of this routine, rank 0 sees 1 176 in root, fetches it into its leafupate, then updates root to 1 + 2 = 3; rank 1 sees 3 in root, fetches it into its leafupate, then updates 177 root to 3 + 3 = 6; rank 2 sees 6 in root, fetches it into its leafupdate, then updates root to 6 + 4 = 10. At the end, leafupdate on rank 178 0,1,2 is 1,3,6 respectively. root is 10. 179 180 We use a simpler implementation. From the same initial state, we copy leafdata to leafupdate 181 rank-0 rank-1 rank-2 182 Root 1 183 Leaf 2 3 4 184 Leafupdate 2 3 4 185 186 Do MPI_Exscan on leafupdate, 187 rank-0 rank-1 rank-2 188 Root 1 189 Leaf 2 3 4 190 Leafupdate 2 2 5 191 192 BcastAndOp from root to leafupdate, 193 rank-0 rank-1 rank-2 194 Root 1 195 Leaf 2 3 4 196 Leafupdate 3 3 6 197 198 Copy root to leafupdate on rank-0 199 rank-0 rank-1 rank-2 200 Root 1 201 Leaf 2 3 4 202 Leafupdate 1 3 6 203 204 Reduce from leaf to root, 205 rank-0 rank-1 rank-2 206 Root 10 207 Leaf 2 3 4 208 Leafupdate 1 3 6 209 */ 210 PETSC_INTERN PetscErrorCode PetscSFFetchAndOpBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, void *rootdata, PetscMemType leafmtype, const void *leafdata, void *leafupdate, MPI_Op op) { 211 PetscSFLink link; 212 MPI_Comm comm; 213 PetscMPIInt count; 214 215 PetscFunctionBegin; 216 PetscCall(PetscObjectGetComm((PetscObject)sf, &comm)); 217 PetscCheck(!PetscMemTypeDevice(rootmtype) && !PetscMemTypeDevice(leafmtype), comm, PETSC_ERR_SUP, "Do FetchAndOp on device"); 218 /* Copy leafdata to leafupdate */ 219 PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_FETCH, &link)); 220 PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata)); /* Sync the device */ 221 PetscCall((*link->Memcpy)(link, leafmtype, leafupdate, leafmtype, leafdata, sf->nleaves * link->unitbytes)); 222 PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link)); 223 224 /* Exscan on leafupdate and then BcastAndOp rootdata to leafupdate */ 225 if (op == MPI_REPLACE) { 226 PetscMPIInt size, rank, prev, next; 227 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 228 PetscCallMPI(MPI_Comm_size(comm, &size)); 229 prev = rank ? rank - 1 : MPI_PROC_NULL; 230 next = (rank < size - 1) ? rank + 1 : MPI_PROC_NULL; 231 PetscCall(PetscMPIIntCast(sf->nleaves, &count)); 232 PetscCallMPI(MPI_Sendrecv_replace(leafupdate, count, unit, next, link->tag, prev, link->tag, comm, MPI_STATUSES_IGNORE)); 233 } else { 234 PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count)); 235 PetscCallMPI(MPI_Exscan(MPI_IN_PLACE, leafupdate, count, link->basicunit, op, comm)); 236 } 237 PetscCall(PetscSFLinkReclaim(sf, &link)); 238 PetscCall(PetscSFBcastBegin(sf, unit, rootdata, leafupdate, op)); 239 PetscCall(PetscSFBcastEnd(sf, unit, rootdata, leafupdate, op)); 240 241 /* Bcast roots to rank 0's leafupdate */ 242 PetscCall(PetscSFBcastToZero_Private(sf, unit, rootdata, leafupdate)); /* Using this line makes Allgather SFs able to inherit this routine */ 243 244 /* Reduce leafdata to rootdata */ 245 PetscCall(PetscSFReduceBegin(sf, unit, leafdata, rootdata, op)); 246 PetscFunctionReturn(0); 247 } 248 249 PETSC_INTERN PetscErrorCode PetscSFFetchAndOpEnd_Allgatherv(PetscSF sf, MPI_Datatype unit, void *rootdata, const void *leafdata, void *leafupdate, MPI_Op op) { 250 PetscFunctionBegin; 251 PetscCall(PetscSFReduceEnd(sf, unit, leafdata, rootdata, op)); 252 PetscFunctionReturn(0); 253 } 254 255 /* Get root ranks accessing my leaves */ 256 PETSC_INTERN PetscErrorCode PetscSFGetRootRanks_Allgatherv(PetscSF sf, PetscInt *nranks, const PetscMPIInt **ranks, const PetscInt **roffset, const PetscInt **rmine, const PetscInt **rremote) { 257 PetscInt i, j, k, size; 258 const PetscInt *range; 259 260 PetscFunctionBegin; 261 /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */ 262 if (sf->nranks && !sf->ranks) { /* On rank!=0, sf->nranks=0. The sf->nranks test makes this routine also works for sfgatherv */ 263 size = sf->nranks; 264 PetscCall(PetscLayoutGetRanges(sf->map, &range)); 265 PetscCall(PetscMalloc4(size, &sf->ranks, size + 1, &sf->roffset, sf->nleaves, &sf->rmine, sf->nleaves, &sf->rremote)); 266 for (i = 0; i < size; i++) sf->ranks[i] = i; 267 PetscCall(PetscArraycpy(sf->roffset, range, size + 1)); 268 for (i = 0; i < sf->nleaves; i++) sf->rmine[i] = i; /*rmine are never NULL even for contiguous leaves */ 269 for (i = 0; i < size; i++) { 270 for (j = range[i], k = 0; j < range[i + 1]; j++, k++) sf->rremote[j] = k; 271 } 272 } 273 274 if (nranks) *nranks = sf->nranks; 275 if (ranks) *ranks = sf->ranks; 276 if (roffset) *roffset = sf->roffset; 277 if (rmine) *rmine = sf->rmine; 278 if (rremote) *rremote = sf->rremote; 279 PetscFunctionReturn(0); 280 } 281 282 /* Get leaf ranks accessing my roots */ 283 PETSC_INTERN PetscErrorCode PetscSFGetLeafRanks_Allgatherv(PetscSF sf, PetscInt *niranks, const PetscMPIInt **iranks, const PetscInt **ioffset, const PetscInt **irootloc) { 284 PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data; 285 MPI_Comm comm; 286 PetscMPIInt size, rank; 287 PetscInt i, j; 288 289 PetscFunctionBegin; 290 /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */ 291 PetscCall(PetscObjectGetComm((PetscObject)sf, &comm)); 292 PetscCallMPI(MPI_Comm_size(comm, &size)); 293 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 294 if (niranks) *niranks = size; 295 296 /* PetscSF_Basic has distinguished incoming ranks. Here we do not need that. But we must put self as the first and 297 sort other ranks. See comments in PetscSFSetUp_Basic about MatGetBrowsOfAoCols_MPIAIJ on why. 298 */ 299 if (iranks) { 300 if (!dat->iranks) { 301 PetscCall(PetscMalloc1(size, &dat->iranks)); 302 dat->iranks[0] = rank; 303 for (i = 0, j = 1; i < size; i++) { 304 if (i == rank) continue; 305 dat->iranks[j++] = i; 306 } 307 } 308 *iranks = dat->iranks; /* dat->iranks was init'ed to NULL by PetscNewLog */ 309 } 310 311 if (ioffset) { 312 if (!dat->ioffset) { 313 PetscCall(PetscMalloc1(size + 1, &dat->ioffset)); 314 for (i = 0; i <= size; i++) dat->ioffset[i] = i * sf->nroots; 315 } 316 *ioffset = dat->ioffset; 317 } 318 319 if (irootloc) { 320 if (!dat->irootloc) { 321 PetscCall(PetscMalloc1(sf->nleaves, &dat->irootloc)); 322 for (i = 0; i < size; i++) { 323 for (j = 0; j < sf->nroots; j++) dat->irootloc[i * sf->nroots + j] = j; 324 } 325 } 326 *irootloc = dat->irootloc; 327 } 328 PetscFunctionReturn(0); 329 } 330 331 PETSC_INTERN PetscErrorCode PetscSFCreateLocalSF_Allgatherv(PetscSF sf, PetscSF *out) { 332 PetscInt i, nroots, nleaves, rstart, *ilocal; 333 PetscSFNode *iremote; 334 PetscSF lsf; 335 336 PetscFunctionBegin; 337 nleaves = sf->nleaves ? sf->nroots : 0; /* sf->nleaves can be zero with SFGather(v) */ 338 nroots = nleaves; 339 PetscCall(PetscMalloc1(nleaves, &ilocal)); 340 PetscCall(PetscMalloc1(nleaves, &iremote)); 341 PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL)); 342 343 for (i = 0; i < nleaves; i++) { 344 ilocal[i] = rstart + i; /* lsf does not change leave indices */ 345 iremote[i].rank = 0; /* rank in PETSC_COMM_SELF */ 346 iremote[i].index = i; /* root index */ 347 } 348 349 PetscCall(PetscSFCreate(PETSC_COMM_SELF, &lsf)); 350 PetscCall(PetscSFSetGraph(lsf, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER)); 351 PetscCall(PetscSFSetUp(lsf)); 352 *out = lsf; 353 PetscFunctionReturn(0); 354 } 355 356 PETSC_INTERN PetscErrorCode PetscSFCreate_Allgatherv(PetscSF sf) { 357 PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data; 358 359 PetscFunctionBegin; 360 sf->ops->BcastEnd = PetscSFBcastEnd_Basic; 361 sf->ops->ReduceEnd = PetscSFReduceEnd_Allgatherv; 362 363 sf->ops->SetUp = PetscSFSetUp_Allgatherv; 364 sf->ops->Reset = PetscSFReset_Allgatherv; 365 sf->ops->Destroy = PetscSFDestroy_Allgatherv; 366 sf->ops->GetRootRanks = PetscSFGetRootRanks_Allgatherv; 367 sf->ops->GetLeafRanks = PetscSFGetLeafRanks_Allgatherv; 368 sf->ops->GetGraph = PetscSFGetGraph_Allgatherv; 369 sf->ops->BcastBegin = PetscSFBcastBegin_Allgatherv; 370 sf->ops->ReduceBegin = PetscSFReduceBegin_Allgatherv; 371 sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv; 372 sf->ops->FetchAndOpEnd = PetscSFFetchAndOpEnd_Allgatherv; 373 sf->ops->CreateLocalSF = PetscSFCreateLocalSF_Allgatherv; 374 sf->ops->BcastToZero = PetscSFBcastToZero_Allgatherv; 375 376 PetscCall(PetscNewLog(sf, &dat)); 377 sf->data = (void *)dat; 378 PetscFunctionReturn(0); 379 } 380