1 #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h> 2 3 /* Reuse the type. The difference is some fields (i.e., displs, recvcounts) are not used in Allgather on rank != 0, which is not a big deal */ 4 typedef PetscSF_Allgatherv PetscSF_Allgather; 5 6 PETSC_INTERN PetscErrorCode PetscSFBcastBegin_Gather(PetscSF,MPI_Datatype,PetscMemType,const void*,PetscMemType,void*,MPI_Op); 7 8 PetscErrorCode PetscSFSetUp_Allgather(PetscSF sf) 9 { 10 PetscInt i; 11 PetscSF_Allgather *dat = (PetscSF_Allgather*)sf->data; 12 13 PetscFunctionBegin; 14 for (i=PETSCSF_LOCAL; i<=PETSCSF_REMOTE; i++) { 15 sf->leafbuflen[i] = 0; 16 sf->leafstart[i] = 0; 17 sf->leafcontig[i] = PETSC_TRUE; 18 sf->leafdups[i] = PETSC_FALSE; 19 dat->rootbuflen[i] = 0; 20 dat->rootstart[i] = 0; 21 dat->rootcontig[i] = PETSC_TRUE; 22 dat->rootdups[i] = PETSC_FALSE; 23 } 24 25 sf->leafbuflen[PETSCSF_REMOTE] = sf->nleaves; 26 dat->rootbuflen[PETSCSF_REMOTE] = sf->nroots; 27 sf->persistent = PETSC_FALSE; 28 sf->nleafreqs = 0; /* MPI collectives only need one request. We treat it as a root request. */ 29 dat->nrootreqs = 1; 30 PetscFunctionReturn(0); 31 } 32 33 static PetscErrorCode PetscSFBcastBegin_Allgather(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata,MPI_Op op) 34 { 35 PetscErrorCode ierr; 36 PetscSFLink link; 37 PetscMPIInt sendcount; 38 MPI_Comm comm; 39 void *rootbuf = NULL,*leafbuf = NULL; /* buffer seen by MPI */ 40 MPI_Request *req; 41 42 PetscFunctionBegin; 43 ierr = PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_BCAST,&link);CHKERRQ(ierr); 44 ierr = PetscSFLinkPackRootData(sf,link,PETSCSF_REMOTE,rootdata);CHKERRQ(ierr); 45 ierr = PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf,link,PETSC_TRUE/* device2host before sending */);CHKERRQ(ierr); 46 ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr); 47 ierr = PetscMPIIntCast(sf->nroots,&sendcount);CHKERRQ(ierr); 48 ierr = PetscSFLinkGetMPIBuffersAndRequests(sf,link,PETSCSF_ROOT2LEAF,&rootbuf,&leafbuf,&req,NULL);CHKERRQ(ierr); 49 ierr = PetscSFLinkSyncStreamBeforeCallMPI(sf,link,PETSCSF_ROOT2LEAF);CHKERRQ(ierr); 50 ierr = MPIU_Iallgather(rootbuf,sendcount,unit,leafbuf,sendcount,unit,comm,req);CHKERRMPI(ierr); 51 PetscFunctionReturn(0); 52 } 53 54 static PetscErrorCode PetscSFReduceBegin_Allgather(PetscSF sf,MPI_Datatype unit,PetscMemType leafmtype,const void *leafdata,PetscMemType rootmtype,void *rootdata,MPI_Op op) 55 { 56 PetscErrorCode ierr; 57 PetscSFLink link; 58 PetscInt rstart; 59 MPI_Comm comm; 60 PetscMPIInt rank,count,recvcount; 61 void *rootbuf = NULL,*leafbuf = NULL; /* buffer seen by MPI */ 62 PetscSF_Allgather *dat = (PetscSF_Allgather*)sf->data; 63 MPI_Request *req; 64 65 PetscFunctionBegin; 66 ierr = PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_REDUCE,&link);CHKERRQ(ierr); 67 if (op == MPI_REPLACE) { 68 /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copy from local leafdata is fine */ 69 ierr = PetscLayoutGetRange(sf->map,&rstart,NULL);CHKERRQ(ierr); 70 ierr = (*link->Memcpy)(link,rootmtype,rootdata,leafmtype,(const char*)leafdata+(size_t)rstart*link->unitbytes,(size_t)sf->nroots*link->unitbytes);CHKERRQ(ierr); 71 if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) {ierr = (*link->SyncStream)(link);CHKERRQ(ierr);} /* Sync the device to host memcpy */ 72 } else { 73 ierr = PetscObjectGetComm((PetscObject)sf,&comm);CHKERRQ(ierr); 74 ierr = MPI_Comm_rank(comm,&rank);CHKERRMPI(ierr); 75 ierr = PetscSFLinkPackLeafData(sf,link,PETSCSF_REMOTE,leafdata);CHKERRQ(ierr); 76 ierr = PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf,link,PETSC_TRUE/* device2host before sending */);CHKERRQ(ierr); 77 ierr = PetscSFLinkGetMPIBuffersAndRequests(sf,link,PETSCSF_LEAF2ROOT,&rootbuf,&leafbuf,&req,NULL);CHKERRQ(ierr); 78 ierr = PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE],&recvcount);CHKERRQ(ierr); 79 if (!rank && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) { 80 ierr = PetscSFMalloc(sf,link->leafmtype_mpi,sf->leafbuflen[PETSCSF_REMOTE]*link->unitbytes,(void**)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]);CHKERRQ(ierr); 81 } 82 if (!rank && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE; 83 ierr = PetscMPIIntCast(sf->nleaves*link->bs,&count);CHKERRQ(ierr); 84 ierr = PetscSFLinkSyncStreamBeforeCallMPI(sf,link,PETSCSF_LEAF2ROOT);CHKERRQ(ierr); 85 ierr = MPI_Reduce(leafbuf,link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi],count,link->basicunit,op,0,comm);CHKERRMPI(ierr); /* Must do reduce with MPI builltin datatype basicunit */ 86 ierr = MPIU_Iscatter(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi],recvcount,unit,rootbuf,recvcount,unit,0/*rank 0*/,comm,req);CHKERRMPI(ierr); 87 } 88 PetscFunctionReturn(0); 89 } 90 91 static PetscErrorCode PetscSFBcastToZero_Allgather(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata) 92 { 93 PetscErrorCode ierr; 94 PetscSFLink link; 95 PetscMPIInt rank; 96 97 PetscFunctionBegin; 98 ierr = PetscSFBcastBegin_Gather(sf,unit,rootmtype,rootdata,leafmtype,leafdata,MPI_REPLACE);CHKERRQ(ierr); 99 ierr = PetscSFLinkGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link);CHKERRQ(ierr); 100 ierr = PetscSFLinkFinishCommunication(sf,link,PETSCSF_ROOT2LEAF);CHKERRQ(ierr); 101 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)sf),&rank);CHKERRMPI(ierr); 102 if (!rank && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) { 103 ierr = (*link->Memcpy)(link,PETSC_MEMTYPE_DEVICE,leafdata,PETSC_MEMTYPE_HOST,link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_HOST],sf->leafbuflen[PETSCSF_REMOTE]*link->unitbytes);CHKERRQ(ierr); 104 } 105 ierr = PetscSFLinkReclaim(sf,&link);CHKERRQ(ierr); 106 PetscFunctionReturn(0); 107 } 108 109 PETSC_INTERN PetscErrorCode PetscSFCreate_Allgather(PetscSF sf) 110 { 111 PetscErrorCode ierr; 112 PetscSF_Allgather *dat = (PetscSF_Allgather*)sf->data; 113 114 PetscFunctionBegin; 115 sf->ops->BcastEnd = PetscSFBcastEnd_Basic; 116 sf->ops->ReduceEnd = PetscSFReduceEnd_Basic; 117 118 /* Inherit from Allgatherv */ 119 sf->ops->Reset = PetscSFReset_Allgatherv; 120 sf->ops->Destroy = PetscSFDestroy_Allgatherv; 121 sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv; 122 sf->ops->FetchAndOpEnd = PetscSFFetchAndOpEnd_Allgatherv; 123 sf->ops->GetRootRanks = PetscSFGetRootRanks_Allgatherv; 124 sf->ops->CreateLocalSF = PetscSFCreateLocalSF_Allgatherv; 125 sf->ops->GetGraph = PetscSFGetGraph_Allgatherv; 126 sf->ops->GetLeafRanks = PetscSFGetLeafRanks_Allgatherv; 127 128 /* Allgather stuff */ 129 sf->ops->SetUp = PetscSFSetUp_Allgather; 130 sf->ops->BcastBegin = PetscSFBcastBegin_Allgather; 131 sf->ops->ReduceBegin = PetscSFReduceBegin_Allgather; 132 sf->ops->BcastToZero = PetscSFBcastToZero_Allgather; 133 134 ierr = PetscNewLog(sf,&dat);CHKERRQ(ierr); 135 sf->data = (void*)dat; 136 PetscFunctionReturn(0); 137 } 138