1 2 /* 3 Defines projective product routines where A is a MPIAIJ matrix 4 C = P^T * A * P 5 */ 6 7 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 8 #include <../src/mat/utils/freespace.h> 9 #include <../src/mat/impls/aij/mpi/mpiaij.h> 10 #include <petscbt.h> 11 #include <petsctime.h> 12 #include <petsc/private/hashmapiv.h> 13 #include <petsc/private/hashseti.h> 14 #include <petscsf.h> 15 16 17 PetscErrorCode MatView_MPIAIJ_PtAP(Mat A,PetscViewer viewer) 18 { 19 PetscErrorCode ierr; 20 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 21 Mat_APMPI *ptap=a->ap; 22 PetscBool iascii; 23 PetscViewerFormat format; 24 25 PetscFunctionBegin; 26 if (!ptap) { 27 /* hack: MatDuplicate() sets oldmat->ops->view to newmat which is a base mat class with null ptpa! */ 28 A->ops->view = MatView_MPIAIJ; 29 ierr = (A->ops->view)(A,viewer);CHKERRQ(ierr); 30 PetscFunctionReturn(0); 31 } 32 33 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr); 34 if (iascii) { 35 ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr); 36 if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) { 37 if (ptap->algType == 0) { 38 ierr = PetscViewerASCIIPrintf(viewer,"using scalable MatPtAP() implementation\n");CHKERRQ(ierr); 39 } else if (ptap->algType == 1) { 40 ierr = PetscViewerASCIIPrintf(viewer,"using nonscalable MatPtAP() implementation\n");CHKERRQ(ierr); 41 } else if (ptap->algType == 2) { 42 ierr = PetscViewerASCIIPrintf(viewer,"using allatonce MatPtAP() implementation\n");CHKERRQ(ierr); 43 } else if (ptap->algType == 3) { 44 ierr = PetscViewerASCIIPrintf(viewer,"using merged allatonce MatPtAP() implementation\n");CHKERRQ(ierr); 45 } 46 } 47 } 48 ierr = (ptap->view)(A,viewer);CHKERRQ(ierr); 49 PetscFunctionReturn(0); 50 } 51 52 PetscErrorCode MatFreeIntermediateDataStructures_MPIAIJ_AP(Mat A) 53 { 54 PetscErrorCode ierr; 55 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 56 Mat_APMPI *ptap=a->ap; 57 Mat_Merge_SeqsToMPI *merge; 58 59 PetscFunctionBegin; 60 if (!ptap) PetscFunctionReturn(0); 61 62 ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr); 63 ierr = PetscFree(ptap->bufa);CHKERRQ(ierr); 64 ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr); 65 ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr); 66 ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */ 67 ierr = MatDestroy(&ptap->Rd);CHKERRQ(ierr); 68 ierr = MatDestroy(&ptap->Ro);CHKERRQ(ierr); 69 if (ptap->AP_loc) { /* used by alg_rap */ 70 Mat_SeqAIJ *ap = (Mat_SeqAIJ*)(ptap->AP_loc)->data; 71 ierr = PetscFree(ap->i);CHKERRQ(ierr); 72 ierr = PetscFree2(ap->j,ap->a);CHKERRQ(ierr); 73 ierr = MatDestroy(&ptap->AP_loc);CHKERRQ(ierr); 74 } else { /* used by alg_ptap */ 75 ierr = PetscFree(ptap->api);CHKERRQ(ierr); 76 ierr = PetscFree(ptap->apj);CHKERRQ(ierr); 77 } 78 ierr = MatDestroy(&ptap->C_loc);CHKERRQ(ierr); 79 ierr = MatDestroy(&ptap->C_oth);CHKERRQ(ierr); 80 if (ptap->apa) {ierr = PetscFree(ptap->apa);CHKERRQ(ierr);} 81 82 ierr = MatDestroy(&ptap->Pt);CHKERRQ(ierr); 83 84 merge=ptap->merge; 85 if (merge) { /* used by alg_ptap */ 86 ierr = PetscFree(merge->id_r);CHKERRQ(ierr); 87 ierr = PetscFree(merge->len_s);CHKERRQ(ierr); 88 ierr = PetscFree(merge->len_r);CHKERRQ(ierr); 89 ierr = PetscFree(merge->bi);CHKERRQ(ierr); 90 ierr = PetscFree(merge->bj);CHKERRQ(ierr); 91 ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr); 92 ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr); 93 ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr); 94 ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr); 95 ierr = PetscFree(merge->coi);CHKERRQ(ierr); 96 ierr = PetscFree(merge->coj);CHKERRQ(ierr); 97 ierr = PetscFree(merge->owners_co);CHKERRQ(ierr); 98 ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr); 99 ierr = PetscFree(ptap->merge);CHKERRQ(ierr); 100 } 101 ierr = ISLocalToGlobalMappingDestroy(&ptap->ltog);CHKERRQ(ierr); 102 103 ierr = PetscSFDestroy(&ptap->sf);CHKERRQ(ierr); 104 ierr = PetscFree(ptap->c_othi);CHKERRQ(ierr); 105 ierr = PetscFree(ptap->c_rmti);CHKERRQ(ierr); 106 PetscFunctionReturn(0); 107 } 108 109 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A) 110 { 111 PetscErrorCode ierr; 112 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 113 Mat_APMPI *ptap=a->ap; 114 115 PetscFunctionBegin; 116 ierr = (*A->ops->freeintermediatedatastructures)(A);CHKERRQ(ierr); 117 ierr = (*ptap->destroy)(A);CHKERRQ(ierr); /* MatDestroy_MPIAIJ(A) */ 118 ierr = PetscFree(ptap);CHKERRQ(ierr); 119 PetscFunctionReturn(0); 120 } 121 122 PETSC_INTERN PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C) 123 { 124 PetscErrorCode ierr; 125 PetscBool flg; 126 MPI_Comm comm; 127 #if !defined(PETSC_HAVE_HYPRE) 128 const char *algTypes[4] = {"scalable","nonscalable","allatonce","allatonce_merged"}; 129 PetscInt nalg=4; 130 #else 131 const char *algTypes[5] = {"scalable","nonscalable","allatonce","allatonce_merged","hypre"}; 132 PetscInt nalg=5; 133 #endif 134 PetscInt pN=P->cmap->N,alg=1; /* set default algorithm */ 135 136 PetscFunctionBegin; 137 /* check if matrix local sizes are compatible */ 138 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 139 if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend); 140 if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend); 141 142 if (scall == MAT_INITIAL_MATRIX) { 143 /* pick an algorithm */ 144 ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)A),((PetscObject)A)->prefix,"MatPtAP","Mat");CHKERRQ(ierr); 145 ierr = PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr); 146 ierr = PetscOptionsEnd();CHKERRQ(ierr); 147 148 if (!flg && pN > 100000) { /* may switch to scalable algorithm as default */ 149 MatInfo Ainfo,Pinfo; 150 PetscInt nz_local; 151 PetscBool alg_scalable_loc=PETSC_FALSE,alg_scalable; 152 153 ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr); 154 ierr = MatGetInfo(P,MAT_LOCAL,&Pinfo);CHKERRQ(ierr); 155 nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated); 156 157 if (pN > fill*nz_local) alg_scalable_loc = PETSC_TRUE; 158 ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr); 159 160 if (alg_scalable) { 161 alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */ 162 } 163 } 164 165 switch (alg) { 166 case 1: 167 /* do R=P^T locally, then C=R*A*P -- nonscalable */ 168 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 169 ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr); 170 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 171 break; 172 case 2: 173 /* compute C=P^T*A*P allatonce */ 174 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 175 ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_allatonce(A,P,fill,C);CHKERRQ(ierr); 176 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 177 break; 178 case 3: 179 /* compute C=P^T*A*P allatonce */ 180 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 181 ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_allatonce_merged(A,P,fill,C);CHKERRQ(ierr); 182 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 183 break; 184 #if defined(PETSC_HAVE_HYPRE) 185 case 4: 186 /* Use boomerAMGBuildCoarseOperator */ 187 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 188 ierr = MatPtAPSymbolic_AIJ_AIJ_wHYPRE(A,P,fill,C);CHKERRQ(ierr); 189 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 190 break; 191 #endif 192 default: 193 /* do R=P^T locally, then C=R*A*P */ 194 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 195 ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(A,P,fill,C);CHKERRQ(ierr); 196 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 197 break; 198 } 199 200 if (alg == 0 || alg == 1 || alg == 2 || alg == 3) { 201 Mat_MPIAIJ *c = (Mat_MPIAIJ*)(*C)->data; 202 Mat_APMPI *ap = c->ap; 203 ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)(*C)),((PetscObject)(*C))->prefix,"MatFreeIntermediateDataStructures","Mat");CHKERRQ(ierr); 204 ap->freestruct = PETSC_FALSE; 205 ierr = PetscOptionsBool("-mat_freeintermediatedatastructures","Free intermediate data structures", "MatFreeIntermediateDataStructures",ap->freestruct,&ap->freestruct, NULL);CHKERRQ(ierr); 206 ierr = PetscOptionsEnd();CHKERRQ(ierr); 207 } 208 } 209 210 ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr); 211 ierr = (*(*C)->ops->ptapnumeric)(A,P,*C);CHKERRQ(ierr); 212 ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr); 213 PetscFunctionReturn(0); 214 } 215 216 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,Mat C) 217 { 218 PetscErrorCode ierr; 219 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 220 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 221 Mat_SeqAIJ *ap,*p_loc,*p_oth=NULL,*c_seq; 222 Mat_APMPI *ptap = c->ap; 223 Mat AP_loc,C_loc,C_oth; 224 PetscInt i,rstart,rend,cm,ncols,row,*api,*apj,am = A->rmap->n,apnz,nout; 225 PetscScalar *apa; 226 const PetscInt *cols; 227 const PetscScalar *vals; 228 229 PetscFunctionBegin; 230 if (!ptap->AP_loc) { 231 MPI_Comm comm; 232 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 233 SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'"); 234 } 235 236 ierr = MatZeroEntries(C);CHKERRQ(ierr); 237 238 /* 1) get R = Pd^T,Ro = Po^T */ 239 if (ptap->reuse == MAT_REUSE_MATRIX) { 240 ierr = MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr); 241 ierr = MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr); 242 } 243 244 /* 2) get AP_loc */ 245 AP_loc = ptap->AP_loc; 246 ap = (Mat_SeqAIJ*)AP_loc->data; 247 248 /* 2-1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 249 /*-----------------------------------------------------*/ 250 if (ptap->reuse == MAT_REUSE_MATRIX) { 251 /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */ 252 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 253 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 254 } 255 256 /* 2-2) compute numeric A_loc*P - dominating part */ 257 /* ---------------------------------------------- */ 258 /* get data from symbolic products */ 259 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 260 if (ptap->P_oth) p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 261 262 api = ap->i; 263 apj = ap->j; 264 ierr = ISLocalToGlobalMappingApply(ptap->ltog,api[AP_loc->rmap->n],apj,apj);CHKERRQ(ierr); 265 for (i=0; i<am; i++) { 266 /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */ 267 apnz = api[i+1] - api[i]; 268 apa = ap->a + api[i]; 269 ierr = PetscArrayzero(apa,apnz);CHKERRQ(ierr); 270 AProw_scalable(i,ad,ao,p_loc,p_oth,api,apj,apa); 271 } 272 ierr = ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,api[AP_loc->rmap->n],apj,&nout,apj);CHKERRQ(ierr); 273 if (api[AP_loc->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",api[AP_loc->rmap->n],nout); 274 275 /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */ 276 /* Always use scalable version since we are in the MPI scalable version */ 277 ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr); 278 ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr); 279 280 C_loc = ptap->C_loc; 281 C_oth = ptap->C_oth; 282 283 /* add C_loc and Co to to C */ 284 ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr); 285 286 /* C_loc -> C */ 287 cm = C_loc->rmap->N; 288 c_seq = (Mat_SeqAIJ*)C_loc->data; 289 cols = c_seq->j; 290 vals = c_seq->a; 291 ierr = ISLocalToGlobalMappingApply(ptap->ltog,c_seq->i[C_loc->rmap->n],c_seq->j,c_seq->j);CHKERRQ(ierr); 292 293 /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */ 294 /* when there are no off-processor parts. */ 295 /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */ 296 /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */ 297 /* a table, and other, more complex stuff has to be done. */ 298 if (C->assembled) { 299 C->was_assembled = PETSC_TRUE; 300 C->assembled = PETSC_FALSE; 301 } 302 if (C->was_assembled) { 303 for (i=0; i<cm; i++) { 304 ncols = c_seq->i[i+1] - c_seq->i[i]; 305 row = rstart + i; 306 ierr = MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 307 cols += ncols; vals += ncols; 308 } 309 } else { 310 ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);CHKERRQ(ierr); 311 } 312 ierr = ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_seq->i[C_loc->rmap->n],c_seq->j,&nout,c_seq->j);CHKERRQ(ierr); 313 if (c_seq->i[C_loc->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_seq->i[C_loc->rmap->n],nout); 314 315 /* Co -> C, off-processor part */ 316 cm = C_oth->rmap->N; 317 c_seq = (Mat_SeqAIJ*)C_oth->data; 318 cols = c_seq->j; 319 vals = c_seq->a; 320 ierr = ISLocalToGlobalMappingApply(ptap->ltog,c_seq->i[C_oth->rmap->n],c_seq->j,c_seq->j);CHKERRQ(ierr); 321 for (i=0; i<cm; i++) { 322 ncols = c_seq->i[i+1] - c_seq->i[i]; 323 row = p->garray[i]; 324 ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 325 cols += ncols; vals += ncols; 326 } 327 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 328 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 329 330 ptap->reuse = MAT_REUSE_MATRIX; 331 332 ierr = ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_seq->i[C_oth->rmap->n],c_seq->j,&nout,c_seq->j);CHKERRQ(ierr); 333 if (c_seq->i[C_oth->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_seq->i[C_loc->rmap->n],nout); 334 335 /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */ 336 if (ptap->freestruct) { 337 ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr); 338 } 339 PetscFunctionReturn(0); 340 } 341 342 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,PetscReal fill,Mat *C) 343 { 344 PetscErrorCode ierr; 345 Mat_APMPI *ptap; 346 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c; 347 MPI_Comm comm; 348 PetscMPIInt size,rank; 349 Mat Cmpi,P_loc,P_oth; 350 PetscFreeSpaceList free_space=NULL,current_space=NULL; 351 PetscInt am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n; 352 PetscInt *lnk,i,k,pnz,row,nsend; 353 PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv; 354 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 355 PetscInt len,proc,*dnz,*onz,*owners,nzi,nspacedouble; 356 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 357 MPI_Request *swaits,*rwaits; 358 MPI_Status *sstatus,rstatus; 359 PetscLayout rowmap; 360 PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */ 361 PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */ 362 PetscInt *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,Crmax,*aj,*ai,*pi,nout; 363 Mat_SeqAIJ *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth; 364 PetscScalar *apv; 365 PetscTable ta; 366 MatType mtype; 367 const char *prefix; 368 #if defined(PETSC_USE_INFO) 369 PetscReal apfill; 370 #endif 371 372 PetscFunctionBegin; 373 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 374 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 375 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 376 377 if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data; 378 379 /* create symbolic parallel matrix Cmpi */ 380 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 381 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 382 ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr); 383 384 /* create struct Mat_APMPI and attached it to C later */ 385 ierr = PetscNew(&ptap);CHKERRQ(ierr); 386 ptap->reuse = MAT_INITIAL_MATRIX; 387 ptap->algType = 0; 388 389 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 390 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&P_oth);CHKERRQ(ierr); 391 /* get P_loc by taking all local rows of P */ 392 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&P_loc);CHKERRQ(ierr); 393 394 ptap->P_loc = P_loc; 395 ptap->P_oth = P_oth; 396 397 /* (0) compute Rd = Pd^T, Ro = Po^T */ 398 /* --------------------------------- */ 399 ierr = MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr); 400 ierr = MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr); 401 402 /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */ 403 /* ----------------------------------------------------------------- */ 404 p_loc = (Mat_SeqAIJ*)P_loc->data; 405 if (P_oth) p_oth = (Mat_SeqAIJ*)P_oth->data; 406 407 /* create and initialize a linked list */ 408 ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */ 409 MatRowMergeMax_SeqAIJ(p_loc,P_loc->rmap->N,ta); 410 MatRowMergeMax_SeqAIJ(p_oth,P_oth->rmap->N,ta); 411 ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */ 412 413 ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr); 414 415 /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */ 416 if (ao) { 417 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr); 418 } else { 419 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr); 420 } 421 current_space = free_space; 422 nspacedouble = 0; 423 424 ierr = PetscMalloc1(am+1,&api);CHKERRQ(ierr); 425 api[0] = 0; 426 for (i=0; i<am; i++) { 427 /* diagonal portion: Ad[i,:]*P */ 428 ai = ad->i; pi = p_loc->i; 429 nzi = ai[i+1] - ai[i]; 430 aj = ad->j + ai[i]; 431 for (j=0; j<nzi; j++) { 432 row = aj[j]; 433 pnz = pi[row+1] - pi[row]; 434 Jptr = p_loc->j + pi[row]; 435 /* add non-zero cols of P into the sorted linked list lnk */ 436 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 437 } 438 /* off-diagonal portion: Ao[i,:]*P */ 439 if (ao) { 440 ai = ao->i; pi = p_oth->i; 441 nzi = ai[i+1] - ai[i]; 442 aj = ao->j + ai[i]; 443 for (j=0; j<nzi; j++) { 444 row = aj[j]; 445 pnz = pi[row+1] - pi[row]; 446 Jptr = p_oth->j + pi[row]; 447 ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr); 448 } 449 } 450 apnz = lnk[0]; 451 api[i+1] = api[i] + apnz; 452 453 /* if free space is not available, double the total space in the list */ 454 if (current_space->local_remaining<apnz) { 455 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 456 nspacedouble++; 457 } 458 459 /* Copy data into free space, then initialize lnk */ 460 ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr); 461 462 current_space->array += apnz; 463 current_space->local_used += apnz; 464 current_space->local_remaining -= apnz; 465 } 466 /* Allocate space for apj and apv, initialize apj, and */ 467 /* destroy list of free space and other temporary array(s) */ 468 ierr = PetscCalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr); 469 ierr = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr); 470 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 471 472 /* Create AP_loc for reuse */ 473 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr); 474 ierr = MatSeqAIJCompactOutExtraColumns_SeqAIJ(ptap->AP_loc, &ptap->ltog);CHKERRQ(ierr); 475 476 #if defined(PETSC_USE_INFO) 477 if (ao) { 478 apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1); 479 } else { 480 apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1); 481 } 482 ptap->AP_loc->info.mallocs = nspacedouble; 483 ptap->AP_loc->info.fill_ratio_given = fill; 484 ptap->AP_loc->info.fill_ratio_needed = apfill; 485 486 if (api[am]) { 487 ierr = PetscInfo3(ptap->AP_loc,"Scalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr); 488 ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr); 489 } else { 490 ierr = PetscInfo(ptap->AP_loc,"Scalable algorithm, AP_loc is empty \n");CHKERRQ(ierr); 491 } 492 #endif 493 494 /* (2-1) compute symbolic Co = Ro*AP_loc */ 495 /* ------------------------------------ */ 496 ierr = MatGetOptionsPrefix(A,&prefix);CHKERRQ(ierr); 497 ierr = MatSetOptionsPrefix(ptap->Ro,prefix);CHKERRQ(ierr); 498 ierr = MatAppendOptionsPrefix(ptap->Ro,"inner_offdiag_");CHKERRQ(ierr); 499 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr); 500 501 /* (3) send coj of C_oth to other processors */ 502 /* ------------------------------------------ */ 503 /* determine row ownership */ 504 ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr); 505 rowmap->n = pn; 506 rowmap->bs = 1; 507 ierr = PetscLayoutSetUp(rowmap);CHKERRQ(ierr); 508 owners = rowmap->range; 509 510 /* determine the number of messages to send, their lengths */ 511 ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr); 512 ierr = PetscArrayzero(len_s,size);CHKERRQ(ierr); 513 ierr = PetscArrayzero(len_si,size);CHKERRQ(ierr); 514 515 c_oth = (Mat_SeqAIJ*)ptap->C_oth->data; 516 coi = c_oth->i; coj = c_oth->j; 517 con = ptap->C_oth->rmap->n; 518 proc = 0; 519 ierr = ISLocalToGlobalMappingApply(ptap->ltog,coi[con],coj,coj);CHKERRQ(ierr); 520 for (i=0; i<con; i++) { 521 while (prmap[i] >= owners[proc+1]) proc++; 522 len_si[proc]++; /* num of rows in Co(=Pt*AP) to be sent to [proc] */ 523 len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */ 524 } 525 526 len = 0; /* max length of buf_si[], see (4) */ 527 owners_co[0] = 0; 528 nsend = 0; 529 for (proc=0; proc<size; proc++) { 530 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 531 if (len_s[proc]) { 532 nsend++; 533 len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */ 534 len += len_si[proc]; 535 } 536 } 537 538 /* determine the number and length of messages to receive for coi and coj */ 539 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr); 540 ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr); 541 542 /* post the Irecv and Isend of coj */ 543 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 544 ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 545 ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr); 546 for (proc=0, k=0; proc<size; proc++) { 547 if (!len_s[proc]) continue; 548 i = owners_co[proc]; 549 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 550 k++; 551 } 552 553 /* (2-2) compute symbolic C_loc = Rd*AP_loc */ 554 /* ---------------------------------------- */ 555 ierr = MatSetOptionsPrefix(ptap->Rd,prefix);CHKERRQ(ierr); 556 ierr = MatAppendOptionsPrefix(ptap->Rd,"inner_diag_");CHKERRQ(ierr); 557 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr); 558 c_loc = (Mat_SeqAIJ*)ptap->C_loc->data; 559 ierr = ISLocalToGlobalMappingApply(ptap->ltog,c_loc->i[ptap->C_loc->rmap->n],c_loc->j,c_loc->j);CHKERRQ(ierr); 560 561 /* receives coj are complete */ 562 for (i=0; i<nrecv; i++) { 563 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 564 } 565 ierr = PetscFree(rwaits);CHKERRQ(ierr); 566 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 567 568 /* add received column indices into ta to update Crmax */ 569 for (k=0; k<nrecv; k++) {/* k-th received message */ 570 Jptr = buf_rj[k]; 571 for (j=0; j<len_r[k]; j++) { 572 ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr); 573 } 574 } 575 ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); 576 ierr = PetscTableDestroy(&ta);CHKERRQ(ierr); 577 578 /* (4) send and recv coi */ 579 /*-----------------------*/ 580 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 581 ierr = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 582 ierr = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr); 583 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 584 for (proc=0,k=0; proc<size; proc++) { 585 if (!len_s[proc]) continue; 586 /* form outgoing message for i-structure: 587 buf_si[0]: nrows to be sent 588 [1:nrows]: row index (global) 589 [nrows+1:2*nrows+1]: i-structure index 590 */ 591 /*-------------------------------------------*/ 592 nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */ 593 buf_si_i = buf_si + nrows+1; 594 buf_si[0] = nrows; 595 buf_si_i[0] = 0; 596 nrows = 0; 597 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 598 nzi = coi[i+1] - coi[i]; 599 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 600 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 601 nrows++; 602 } 603 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 604 k++; 605 buf_si += len_si[proc]; 606 } 607 for (i=0; i<nrecv; i++) { 608 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 609 } 610 ierr = PetscFree(rwaits);CHKERRQ(ierr); 611 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 612 613 ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr); 614 ierr = PetscFree(len_ri);CHKERRQ(ierr); 615 ierr = PetscFree(swaits);CHKERRQ(ierr); 616 ierr = PetscFree(buf_s);CHKERRQ(ierr); 617 618 /* (5) compute the local portion of Cmpi */ 619 /* ------------------------------------------ */ 620 /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */ 621 ierr = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr); 622 current_space = free_space; 623 624 ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr); 625 for (k=0; k<nrecv; k++) { 626 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 627 nrows = *buf_ri_k[k]; 628 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 629 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 630 } 631 632 ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr); 633 ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr); 634 for (i=0; i<pn; i++) { 635 /* add C_loc into Cmpi */ 636 nzi = c_loc->i[i+1] - c_loc->i[i]; 637 Jptr = c_loc->j + c_loc->i[i]; 638 ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr); 639 640 /* add received col data into lnk */ 641 for (k=0; k<nrecv; k++) { /* k-th received message */ 642 if (i == *nextrow[k]) { /* i-th row */ 643 nzi = *(nextci[k]+1) - *nextci[k]; 644 Jptr = buf_rj[k] + *nextci[k]; 645 ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr); 646 nextrow[k]++; nextci[k]++; 647 } 648 } 649 nzi = lnk[0]; 650 651 /* copy data into free space, then initialize lnk */ 652 ierr = PetscLLCondensedClean_Scalable(nzi,current_space->array,lnk);CHKERRQ(ierr); 653 ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr); 654 } 655 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 656 ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr); 657 ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr); 658 659 /* local sizes and preallocation */ 660 ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 661 if (P->cmap->bs > 0) { 662 ierr = PetscLayoutSetBlockSize(Cmpi->rmap,P->cmap->bs);CHKERRQ(ierr); 663 ierr = PetscLayoutSetBlockSize(Cmpi->cmap,P->cmap->bs);CHKERRQ(ierr); 664 } 665 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 666 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 667 668 /* members in merge */ 669 ierr = PetscFree(id_r);CHKERRQ(ierr); 670 ierr = PetscFree(len_r);CHKERRQ(ierr); 671 ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr); 672 ierr = PetscFree(buf_ri);CHKERRQ(ierr); 673 ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr); 674 ierr = PetscFree(buf_rj);CHKERRQ(ierr); 675 ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr); 676 677 /* attach the supporting struct to Cmpi for reuse */ 678 c = (Mat_MPIAIJ*)Cmpi->data; 679 c->ap = ptap; 680 ptap->duplicate = Cmpi->ops->duplicate; 681 ptap->destroy = Cmpi->ops->destroy; 682 ptap->view = Cmpi->ops->view; 683 684 /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */ 685 Cmpi->assembled = PETSC_FALSE; 686 Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable; 687 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 688 Cmpi->ops->view = MatView_MPIAIJ_PtAP; 689 Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP; 690 *C = Cmpi; 691 692 nout = 0; 693 ierr = ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_oth->i[ptap->C_oth->rmap->n],c_oth->j,&nout,c_oth->j);CHKERRQ(ierr); 694 if (c_oth->i[ptap->C_oth->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_oth->i[ptap->C_oth->rmap->n],nout); 695 ierr = ISGlobalToLocalMappingApply(ptap->ltog,IS_GTOLM_DROP,c_loc->i[ptap->C_loc->rmap->n],c_loc->j,&nout,c_loc->j);CHKERRQ(ierr); 696 if (c_loc->i[ptap->C_loc->rmap->n] != nout) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Incorrect mapping %D != %D\n",c_loc->i[ptap->C_loc->rmap->n],nout); 697 698 PetscFunctionReturn(0); 699 } 700 701 PETSC_STATIC_INLINE PetscErrorCode MatPtAPSymbolicComputeOneRowOfAP_private(Mat A,Mat P,Mat P_oth,const PetscInt *map,PetscInt dof,PetscInt i,PetscHSetI dht,PetscHSetI oht) 702 { 703 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data; 704 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_oth=(Mat_SeqAIJ*)P_oth->data,*pd=(Mat_SeqAIJ*)p->A->data,*po=(Mat_SeqAIJ*)p->B->data; 705 PetscInt *ai,nzi,j,*aj,row,col,*pi,*pj,pnz,nzpi,*p_othcols,k; 706 PetscInt pcstart,pcend,column,offset; 707 PetscErrorCode ierr; 708 709 PetscFunctionBegin; 710 pcstart = P->cmap->rstart; 711 pcstart *= dof; 712 pcend = P->cmap->rend; 713 pcend *= dof; 714 /* diagonal portion: Ad[i,:]*P */ 715 ai = ad->i; 716 nzi = ai[i+1] - ai[i]; 717 aj = ad->j + ai[i]; 718 for (j=0; j<nzi; j++) { 719 row = aj[j]; 720 offset = row%dof; 721 row /= dof; 722 nzpi = pd->i[row+1] - pd->i[row]; 723 pj = pd->j + pd->i[row]; 724 for (k=0; k<nzpi; k++) { 725 ierr = PetscHSetIAdd(dht,pj[k]*dof+offset+pcstart);CHKERRQ(ierr); 726 } 727 } 728 /* off diag P */ 729 for (j=0; j<nzi; j++) { 730 row = aj[j]; 731 offset = row%dof; 732 row /= dof; 733 nzpi = po->i[row+1] - po->i[row]; 734 pj = po->j + po->i[row]; 735 for (k=0; k<nzpi; k++) { 736 ierr = PetscHSetIAdd(oht,p->garray[pj[k]]*dof+offset);CHKERRQ(ierr); 737 } 738 } 739 740 /* off diagonal part: Ao[i, :]*P_oth */ 741 if (ao) { 742 ai = ao->i; 743 pi = p_oth->i; 744 nzi = ai[i+1] - ai[i]; 745 aj = ao->j + ai[i]; 746 for (j=0; j<nzi; j++) { 747 row = aj[j]; 748 offset = a->garray[row]%dof; 749 row = map[row]; 750 pnz = pi[row+1] - pi[row]; 751 p_othcols = p_oth->j + pi[row]; 752 for (col=0; col<pnz; col++) { 753 column = p_othcols[col] * dof + offset; 754 if (column>=pcstart && column<pcend) { 755 ierr = PetscHSetIAdd(dht,column);CHKERRQ(ierr); 756 } else { 757 ierr = PetscHSetIAdd(oht,column);CHKERRQ(ierr); 758 } 759 } 760 } 761 } /* end if (ao) */ 762 PetscFunctionReturn(0); 763 } 764 765 PETSC_STATIC_INLINE PetscErrorCode MatPtAPNumericComputeOneRowOfAP_private(Mat A,Mat P,Mat P_oth,const PetscInt *map,PetscInt dof,PetscInt i,PetscHMapIV hmap) 766 { 767 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data; 768 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_oth=(Mat_SeqAIJ*)P_oth->data,*pd=(Mat_SeqAIJ*)p->A->data,*po=(Mat_SeqAIJ*)p->B->data; 769 PetscInt *ai,nzi,j,*aj,row,col,*pi,pnz,*p_othcols,pcstart,*pj,k,nzpi,offset; 770 PetscScalar ra,*aa,*pa; 771 PetscErrorCode ierr; 772 773 PetscFunctionBegin; 774 pcstart = P->cmap->rstart; 775 pcstart *= dof; 776 777 /* diagonal portion: Ad[i,:]*P */ 778 ai = ad->i; 779 nzi = ai[i+1] - ai[i]; 780 aj = ad->j + ai[i]; 781 aa = ad->a + ai[i]; 782 for (j=0; j<nzi; j++) { 783 ra = aa[j]; 784 row = aj[j]; 785 offset = row%dof; 786 row /= dof; 787 nzpi = pd->i[row+1] - pd->i[row]; 788 pj = pd->j + pd->i[row]; 789 pa = pd->a + pd->i[row]; 790 for (k=0; k<nzpi; k++) { 791 ierr = PetscHMapIVAddValue(hmap,pj[k]*dof+offset+pcstart,ra*pa[k]);CHKERRQ(ierr); 792 } 793 ierr = PetscLogFlops(2.0*nzpi);CHKERRQ(ierr); 794 } 795 for (j=0; j<nzi; j++) { 796 ra = aa[j]; 797 row = aj[j]; 798 offset = row%dof; 799 row /= dof; 800 nzpi = po->i[row+1] - po->i[row]; 801 pj = po->j + po->i[row]; 802 pa = po->a + po->i[row]; 803 for (k=0; k<nzpi; k++) { 804 ierr = PetscHMapIVAddValue(hmap,p->garray[pj[k]]*dof+offset,ra*pa[k]);CHKERRQ(ierr); 805 } 806 ierr = PetscLogFlops(2.0*nzpi);CHKERRQ(ierr); 807 } 808 809 /* off diagonal part: Ao[i, :]*P_oth */ 810 if (ao) { 811 ai = ao->i; 812 pi = p_oth->i; 813 nzi = ai[i+1] - ai[i]; 814 aj = ao->j + ai[i]; 815 aa = ao->a + ai[i]; 816 for (j=0; j<nzi; j++) { 817 row = aj[j]; 818 offset = a->garray[row]%dof; 819 row = map[row]; 820 ra = aa[j]; 821 pnz = pi[row+1] - pi[row]; 822 p_othcols = p_oth->j + pi[row]; 823 pa = p_oth->a + pi[row]; 824 for (col=0; col<pnz; col++) { 825 ierr = PetscHMapIVAddValue(hmap,p_othcols[col]*dof+offset,ra*pa[col]);CHKERRQ(ierr); 826 } 827 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 828 } 829 } /* end if (ao) */ 830 831 PetscFunctionReturn(0); 832 } 833 834 PetscErrorCode MatGetBrowsOfAcols_MPIXAIJ(Mat,Mat,PetscInt dof,MatReuse,Mat*); 835 836 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIXAIJ_allatonce(Mat A,Mat P,PetscInt dof,Mat C) 837 { 838 PetscErrorCode ierr; 839 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 840 Mat_SeqAIJ *cd,*co,*po=(Mat_SeqAIJ*)p->B->data,*pd=(Mat_SeqAIJ*)p->A->data; 841 Mat_APMPI *ptap = c->ap; 842 PetscHMapIV hmap; 843 PetscInt i,j,jj,kk,nzi,*c_rmtj,voff,*c_othj,pn,pon,pcstart,pcend,ccstart,ccend,row,am,*poj,*pdj,*apindices,cmaxr,*c_rmtc,*c_rmtjj,*dcc,*occ,loc; 844 PetscScalar *c_rmta,*c_otha,*poa,*pda,*apvalues,*apvaluestmp,*c_rmtaa; 845 PetscInt offset,ii,pocol; 846 const PetscInt *mappingindices; 847 IS map; 848 MPI_Comm comm; 849 850 PetscFunctionBegin; 851 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 852 if (!ptap->P_oth) SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'"); 853 854 ierr = MatZeroEntries(C);CHKERRQ(ierr); 855 856 /* Get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 857 /*-----------------------------------------------------*/ 858 if (ptap->reuse == MAT_REUSE_MATRIX) { 859 /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */ 860 ierr = MatGetBrowsOfAcols_MPIXAIJ(A,P,dof,MAT_REUSE_MATRIX,&ptap->P_oth);CHKERRQ(ierr); 861 } 862 ierr = PetscObjectQuery((PetscObject)ptap->P_oth,"aoffdiagtopothmapping",(PetscObject*)&map);CHKERRQ(ierr); 863 864 ierr = MatGetLocalSize(p->B,NULL,&pon);CHKERRQ(ierr); 865 pon *= dof; 866 ierr = PetscCalloc2(ptap->c_rmti[pon],&c_rmtj,ptap->c_rmti[pon],&c_rmta);CHKERRQ(ierr); 867 ierr = MatGetLocalSize(A,&am,NULL);CHKERRQ(ierr); 868 cmaxr = 0; 869 for (i=0; i<pon; i++) { 870 cmaxr = PetscMax(cmaxr,ptap->c_rmti[i+1]-ptap->c_rmti[i]); 871 } 872 ierr = PetscCalloc4(cmaxr,&apindices,cmaxr,&apvalues,cmaxr,&apvaluestmp,pon,&c_rmtc);CHKERRQ(ierr); 873 ierr = PetscHMapIVCreate(&hmap);CHKERRQ(ierr); 874 ierr = PetscHMapIVResize(hmap,cmaxr);CHKERRQ(ierr); 875 ierr = ISGetIndices(map,&mappingindices);CHKERRQ(ierr); 876 for (i=0; i<am && pon; i++) { 877 ierr = PetscHMapIVClear(hmap);CHKERRQ(ierr); 878 offset = i%dof; 879 ii = i/dof; 880 nzi = po->i[ii+1] - po->i[ii]; 881 if (!nzi) continue; 882 ierr = MatPtAPNumericComputeOneRowOfAP_private(A,P,ptap->P_oth,mappingindices,dof,i,hmap);CHKERRQ(ierr); 883 voff = 0; 884 ierr = PetscHMapIVGetPairs(hmap,&voff,apindices,apvalues);CHKERRQ(ierr); 885 if (!voff) continue; 886 887 /* Form C(ii, :) */ 888 poj = po->j + po->i[ii]; 889 poa = po->a + po->i[ii]; 890 for (j=0; j<nzi; j++) { 891 pocol = poj[j]*dof+offset; 892 c_rmtjj = c_rmtj + ptap->c_rmti[pocol]; 893 c_rmtaa = c_rmta + ptap->c_rmti[pocol]; 894 for (jj=0; jj<voff; jj++) { 895 apvaluestmp[jj] = apvalues[jj]*poa[j]; 896 /*If the row is empty */ 897 if (!c_rmtc[pocol]) { 898 c_rmtjj[jj] = apindices[jj]; 899 c_rmtaa[jj] = apvaluestmp[jj]; 900 c_rmtc[pocol]++; 901 } else { 902 ierr = PetscFindInt(apindices[jj],c_rmtc[pocol],c_rmtjj,&loc);CHKERRQ(ierr); 903 if (loc>=0){ /* hit */ 904 c_rmtaa[loc] += apvaluestmp[jj]; 905 ierr = PetscLogFlops(1.0);CHKERRQ(ierr); 906 } else { /* new element */ 907 loc = -(loc+1); 908 /* Move data backward */ 909 for (kk=c_rmtc[pocol]; kk>loc; kk--) { 910 c_rmtjj[kk] = c_rmtjj[kk-1]; 911 c_rmtaa[kk] = c_rmtaa[kk-1]; 912 }/* End kk */ 913 c_rmtjj[loc] = apindices[jj]; 914 c_rmtaa[loc] = apvaluestmp[jj]; 915 c_rmtc[pocol]++; 916 } 917 } 918 ierr = PetscLogFlops(voff);CHKERRQ(ierr); 919 } /* End jj */ 920 } /* End j */ 921 } /* End i */ 922 923 ierr = PetscFree4(apindices,apvalues,apvaluestmp,c_rmtc);CHKERRQ(ierr); 924 ierr = PetscHMapIVDestroy(&hmap);CHKERRQ(ierr); 925 926 ierr = MatGetLocalSize(P,NULL,&pn);CHKERRQ(ierr); 927 pn *= dof; 928 ierr = PetscCalloc2(ptap->c_othi[pn],&c_othj,ptap->c_othi[pn],&c_otha);CHKERRQ(ierr); 929 930 ierr = PetscSFReduceBegin(ptap->sf,MPIU_INT,c_rmtj,c_othj,MPIU_REPLACE);CHKERRQ(ierr); 931 ierr = PetscSFReduceBegin(ptap->sf,MPIU_SCALAR,c_rmta,c_otha,MPIU_REPLACE);CHKERRQ(ierr); 932 ierr = MatGetOwnershipRangeColumn(P,&pcstart,&pcend);CHKERRQ(ierr); 933 pcstart = pcstart*dof; 934 pcend = pcend*dof; 935 cd = (Mat_SeqAIJ*)(c->A)->data; 936 co = (Mat_SeqAIJ*)(c->B)->data; 937 938 cmaxr = 0; 939 for (i=0; i<pn; i++) { 940 cmaxr = PetscMax(cmaxr,(cd->i[i+1]-cd->i[i])+(co->i[i+1]-co->i[i])); 941 } 942 ierr = PetscCalloc5(cmaxr,&apindices,cmaxr,&apvalues,cmaxr,&apvaluestmp,pn,&dcc,pn,&occ);CHKERRQ(ierr); 943 ierr = PetscHMapIVCreate(&hmap);CHKERRQ(ierr); 944 ierr = PetscHMapIVResize(hmap,cmaxr);CHKERRQ(ierr); 945 for (i=0; i<am && pn; i++) { 946 ierr = PetscHMapIVClear(hmap);CHKERRQ(ierr); 947 offset = i%dof; 948 ii = i/dof; 949 nzi = pd->i[ii+1] - pd->i[ii]; 950 if (!nzi) continue; 951 ierr = MatPtAPNumericComputeOneRowOfAP_private(A,P,ptap->P_oth,mappingindices,dof,i,hmap);CHKERRQ(ierr); 952 voff = 0; 953 ierr = PetscHMapIVGetPairs(hmap,&voff,apindices,apvalues);CHKERRQ(ierr); 954 if (!voff) continue; 955 /* Form C(ii, :) */ 956 pdj = pd->j + pd->i[ii]; 957 pda = pd->a + pd->i[ii]; 958 for (j=0; j<nzi; j++) { 959 row = pcstart + pdj[j] * dof + offset; 960 for (jj=0; jj<voff; jj++) { 961 apvaluestmp[jj] = apvalues[jj]*pda[j]; 962 } 963 ierr = PetscLogFlops(voff);CHKERRQ(ierr); 964 ierr = MatSetValues(C,1,&row,voff,apindices,apvaluestmp,ADD_VALUES);CHKERRQ(ierr); 965 } 966 } 967 ierr = ISRestoreIndices(map,&mappingindices);CHKERRQ(ierr); 968 ierr = MatGetOwnershipRangeColumn(C,&ccstart,&ccend);CHKERRQ(ierr); 969 ierr = PetscFree5(apindices,apvalues,apvaluestmp,dcc,occ);CHKERRQ(ierr); 970 ierr = PetscHMapIVDestroy(&hmap);CHKERRQ(ierr); 971 ierr = PetscSFReduceEnd(ptap->sf,MPIU_INT,c_rmtj,c_othj,MPIU_REPLACE);CHKERRQ(ierr); 972 ierr = PetscSFReduceEnd(ptap->sf,MPIU_SCALAR,c_rmta,c_otha,MPIU_REPLACE);CHKERRQ(ierr); 973 ierr = PetscFree2(c_rmtj,c_rmta);CHKERRQ(ierr); 974 975 /* Add contributions from remote */ 976 for (i = 0; i < pn; i++) { 977 row = i + pcstart; 978 ierr = MatSetValues(C,1,&row,ptap->c_othi[i+1]-ptap->c_othi[i],c_othj+ptap->c_othi[i],c_otha+ptap->c_othi[i],ADD_VALUES);CHKERRQ(ierr); 979 } 980 ierr = PetscFree2(c_othj,c_otha);CHKERRQ(ierr); 981 982 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 983 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 984 985 ptap->reuse = MAT_REUSE_MATRIX; 986 987 /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */ 988 if (ptap->freestruct) { 989 ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr); 990 } 991 PetscFunctionReturn(0); 992 } 993 994 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce(Mat A,Mat P,Mat C) 995 { 996 PetscErrorCode ierr; 997 998 PetscFunctionBegin; 999 1000 ierr = MatPtAPNumeric_MPIAIJ_MPIXAIJ_allatonce(A,P,1,C);CHKERRQ(ierr); 1001 PetscFunctionReturn(0); 1002 } 1003 1004 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIXAIJ_allatonce_merged(Mat A,Mat P,PetscInt dof,Mat C) 1005 { 1006 PetscErrorCode ierr; 1007 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 1008 Mat_SeqAIJ *cd,*co,*po=(Mat_SeqAIJ*)p->B->data,*pd=(Mat_SeqAIJ*)p->A->data; 1009 Mat_APMPI *ptap = c->ap; 1010 PetscHMapIV hmap; 1011 PetscInt i,j,jj,kk,nzi,dnzi,*c_rmtj,voff,*c_othj,pn,pon,pcstart,pcend,row,am,*poj,*pdj,*apindices,cmaxr,*c_rmtc,*c_rmtjj,loc; 1012 PetscScalar *c_rmta,*c_otha,*poa,*pda,*apvalues,*apvaluestmp,*c_rmtaa; 1013 PetscInt offset,ii,pocol; 1014 const PetscInt *mappingindices; 1015 IS map; 1016 MPI_Comm comm; 1017 1018 PetscFunctionBegin; 1019 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1020 if (!ptap->P_oth) SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'"); 1021 1022 ierr = MatZeroEntries(C);CHKERRQ(ierr); 1023 1024 /* Get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 1025 /*-----------------------------------------------------*/ 1026 if (ptap->reuse == MAT_REUSE_MATRIX) { 1027 /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */ 1028 ierr = MatGetBrowsOfAcols_MPIXAIJ(A,P,dof,MAT_REUSE_MATRIX,&ptap->P_oth);CHKERRQ(ierr); 1029 } 1030 ierr = PetscObjectQuery((PetscObject)ptap->P_oth,"aoffdiagtopothmapping",(PetscObject*)&map);CHKERRQ(ierr); 1031 ierr = MatGetLocalSize(p->B,NULL,&pon);CHKERRQ(ierr); 1032 pon *= dof; 1033 ierr = MatGetLocalSize(P,NULL,&pn);CHKERRQ(ierr); 1034 pn *= dof; 1035 1036 ierr = PetscCalloc2(ptap->c_rmti[pon],&c_rmtj,ptap->c_rmti[pon],&c_rmta);CHKERRQ(ierr); 1037 ierr = MatGetLocalSize(A,&am,NULL);CHKERRQ(ierr); 1038 ierr = MatGetOwnershipRangeColumn(P,&pcstart,&pcend);CHKERRQ(ierr); 1039 pcstart *= dof; 1040 pcend *= dof; 1041 cmaxr = 0; 1042 for (i=0; i<pon; i++) { 1043 cmaxr = PetscMax(cmaxr,ptap->c_rmti[i+1]-ptap->c_rmti[i]); 1044 } 1045 cd = (Mat_SeqAIJ*)(c->A)->data; 1046 co = (Mat_SeqAIJ*)(c->B)->data; 1047 for (i=0; i<pn; i++) { 1048 cmaxr = PetscMax(cmaxr,(cd->i[i+1]-cd->i[i])+(co->i[i+1]-co->i[i])); 1049 } 1050 ierr = PetscCalloc4(cmaxr,&apindices,cmaxr,&apvalues,cmaxr,&apvaluestmp,pon,&c_rmtc);CHKERRQ(ierr); 1051 ierr = PetscHMapIVCreate(&hmap);CHKERRQ(ierr); 1052 ierr = PetscHMapIVResize(hmap,cmaxr);CHKERRQ(ierr); 1053 ierr = ISGetIndices(map,&mappingindices);CHKERRQ(ierr); 1054 for (i=0; i<am && (pon || pn); i++) { 1055 ierr = PetscHMapIVClear(hmap);CHKERRQ(ierr); 1056 offset = i%dof; 1057 ii = i/dof; 1058 nzi = po->i[ii+1] - po->i[ii]; 1059 dnzi = pd->i[ii+1] - pd->i[ii]; 1060 if (!nzi && !dnzi) continue; 1061 ierr = MatPtAPNumericComputeOneRowOfAP_private(A,P,ptap->P_oth,mappingindices,dof,i,hmap);CHKERRQ(ierr); 1062 voff = 0; 1063 ierr = PetscHMapIVGetPairs(hmap,&voff,apindices,apvalues);CHKERRQ(ierr); 1064 if (!voff) continue; 1065 1066 /* Form remote C(ii, :) */ 1067 poj = po->j + po->i[ii]; 1068 poa = po->a + po->i[ii]; 1069 for (j=0; j<nzi; j++) { 1070 pocol = poj[j]*dof+offset; 1071 c_rmtjj = c_rmtj + ptap->c_rmti[pocol]; 1072 c_rmtaa = c_rmta + ptap->c_rmti[pocol]; 1073 for (jj=0; jj<voff; jj++) { 1074 apvaluestmp[jj] = apvalues[jj]*poa[j]; 1075 /*If the row is empty */ 1076 if (!c_rmtc[pocol]) { 1077 c_rmtjj[jj] = apindices[jj]; 1078 c_rmtaa[jj] = apvaluestmp[jj]; 1079 c_rmtc[pocol]++; 1080 } else { 1081 ierr = PetscFindInt(apindices[jj],c_rmtc[pocol],c_rmtjj,&loc);CHKERRQ(ierr); 1082 if (loc>=0){ /* hit */ 1083 c_rmtaa[loc] += apvaluestmp[jj]; 1084 ierr = PetscLogFlops(1.0);CHKERRQ(ierr); 1085 } else { /* new element */ 1086 loc = -(loc+1); 1087 /* Move data backward */ 1088 for (kk=c_rmtc[pocol]; kk>loc; kk--) { 1089 c_rmtjj[kk] = c_rmtjj[kk-1]; 1090 c_rmtaa[kk] = c_rmtaa[kk-1]; 1091 }/* End kk */ 1092 c_rmtjj[loc] = apindices[jj]; 1093 c_rmtaa[loc] = apvaluestmp[jj]; 1094 c_rmtc[pocol]++; 1095 } 1096 } 1097 } /* End jj */ 1098 ierr = PetscLogFlops(voff);CHKERRQ(ierr); 1099 } /* End j */ 1100 1101 /* Form local C(ii, :) */ 1102 pdj = pd->j + pd->i[ii]; 1103 pda = pd->a + pd->i[ii]; 1104 for (j=0; j<dnzi; j++) { 1105 row = pcstart + pdj[j] * dof + offset; 1106 for (jj=0; jj<voff; jj++) { 1107 apvaluestmp[jj] = apvalues[jj]*pda[j]; 1108 }/* End kk */ 1109 ierr = PetscLogFlops(voff);CHKERRQ(ierr); 1110 ierr = MatSetValues(C,1,&row,voff,apindices,apvaluestmp,ADD_VALUES);CHKERRQ(ierr); 1111 }/* End j */ 1112 } /* End i */ 1113 1114 ierr = ISRestoreIndices(map,&mappingindices);CHKERRQ(ierr); 1115 ierr = PetscFree4(apindices,apvalues,apvaluestmp,c_rmtc);CHKERRQ(ierr); 1116 ierr = PetscHMapIVDestroy(&hmap);CHKERRQ(ierr); 1117 ierr = PetscCalloc2(ptap->c_othi[pn],&c_othj,ptap->c_othi[pn],&c_otha);CHKERRQ(ierr); 1118 1119 ierr = PetscSFReduceBegin(ptap->sf,MPIU_INT,c_rmtj,c_othj,MPIU_REPLACE);CHKERRQ(ierr); 1120 ierr = PetscSFReduceBegin(ptap->sf,MPIU_SCALAR,c_rmta,c_otha,MPIU_REPLACE);CHKERRQ(ierr); 1121 ierr = PetscSFReduceEnd(ptap->sf,MPIU_INT,c_rmtj,c_othj,MPIU_REPLACE);CHKERRQ(ierr); 1122 ierr = PetscSFReduceEnd(ptap->sf,MPIU_SCALAR,c_rmta,c_otha,MPIU_REPLACE);CHKERRQ(ierr); 1123 ierr = PetscFree2(c_rmtj,c_rmta);CHKERRQ(ierr); 1124 1125 /* Add contributions from remote */ 1126 for (i = 0; i < pn; i++) { 1127 row = i + pcstart; 1128 ierr = MatSetValues(C,1,&row,ptap->c_othi[i+1]-ptap->c_othi[i],c_othj+ptap->c_othi[i],c_otha+ptap->c_othi[i],ADD_VALUES);CHKERRQ(ierr); 1129 } 1130 ierr = PetscFree2(c_othj,c_otha);CHKERRQ(ierr); 1131 1132 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1133 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1134 1135 ptap->reuse = MAT_REUSE_MATRIX; 1136 1137 /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */ 1138 if (ptap->freestruct) { 1139 ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr); 1140 } 1141 PetscFunctionReturn(0); 1142 } 1143 1144 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce_merged(Mat A,Mat P,Mat C) 1145 { 1146 PetscErrorCode ierr; 1147 1148 PetscFunctionBegin; 1149 1150 ierr = MatPtAPNumeric_MPIAIJ_MPIXAIJ_allatonce_merged(A,P,1,C);CHKERRQ(ierr); 1151 PetscFunctionReturn(0); 1152 } 1153 1154 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIXAIJ_allatonce(Mat A,Mat P,PetscInt dof,PetscReal fill,Mat *C) 1155 { 1156 Mat_APMPI *ptap; 1157 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*c; 1158 MPI_Comm comm; 1159 Mat Cmpi; 1160 Mat_SeqAIJ *pd=(Mat_SeqAIJ*)p->A->data,*po=(Mat_SeqAIJ*)p->B->data; 1161 MatType mtype; 1162 PetscSF sf; 1163 PetscSFNode *iremote; 1164 PetscInt rootspacesize,*rootspace,*rootspaceoffsets,nleaves; 1165 const PetscInt *rootdegrees; 1166 PetscHSetI ht,oht,*hta,*hto; 1167 PetscInt pn,pon,*c_rmtc,i,j,nzi,htsize,htosize,*c_rmtj,off,*c_othj,rcvncols,sendncols,*c_rmtoffsets; 1168 PetscInt lidx,*rdj,col,pcstart,pcend,*dnz,*onz,am,arstart,arend,*poj,*pdj; 1169 PetscInt nalg=2,alg=0,offset,ii; 1170 PetscMPIInt owner; 1171 const PetscInt *mappingindices; 1172 PetscBool flg; 1173 const char *algTypes[2] = {"overlapping","merged"}; 1174 IS map; 1175 PetscErrorCode ierr; 1176 1177 PetscFunctionBegin; 1178 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1179 1180 /* Create symbolic parallel matrix Cmpi */ 1181 ierr = MatGetLocalSize(P,NULL,&pn);CHKERRQ(ierr); 1182 pn *= dof; 1183 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 1184 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 1185 ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr); 1186 ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1187 1188 ierr = PetscNew(&ptap);CHKERRQ(ierr); 1189 ptap->reuse = MAT_INITIAL_MATRIX; 1190 ptap->algType = 2; 1191 1192 /* Get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 1193 ierr = MatGetBrowsOfAcols_MPIXAIJ(A,P,dof,MAT_INITIAL_MATRIX,&ptap->P_oth);CHKERRQ(ierr); 1194 ierr = PetscObjectQuery((PetscObject)ptap->P_oth,"aoffdiagtopothmapping",(PetscObject*)&map);CHKERRQ(ierr); 1195 /* This equals to the number of offdiag columns in P */ 1196 ierr = MatGetLocalSize(p->B,NULL,&pon);CHKERRQ(ierr); 1197 pon *= dof; 1198 /* offsets */ 1199 ierr = PetscMalloc1(pon+1,&ptap->c_rmti);CHKERRQ(ierr); 1200 /* The number of columns we will send to remote ranks */ 1201 ierr = PetscMalloc1(pon,&c_rmtc);CHKERRQ(ierr); 1202 ierr = PetscMalloc1(pon,&hta);CHKERRQ(ierr); 1203 for (i=0; i<pon; i++) { 1204 ierr = PetscHSetICreate(&hta[i]);CHKERRQ(ierr); 1205 } 1206 ierr = MatGetLocalSize(A,&am,NULL);CHKERRQ(ierr); 1207 ierr = MatGetOwnershipRange(A,&arstart,&arend);CHKERRQ(ierr); 1208 /* Create hash table to merge all columns for C(i, :) */ 1209 ierr = PetscHSetICreate(&ht);CHKERRQ(ierr); 1210 1211 ierr = ISGetIndices(map,&mappingindices);CHKERRQ(ierr); 1212 ptap->c_rmti[0] = 0; 1213 /* 2) Pass 1: calculate the size for C_rmt (a matrix need to be sent to other processors) */ 1214 for (i=0; i<am && pon; i++) { 1215 /* Form one row of AP */ 1216 ierr = PetscHSetIClear(ht);CHKERRQ(ierr); 1217 offset = i%dof; 1218 ii = i/dof; 1219 /* If the off diag is empty, we should not do any calculation */ 1220 nzi = po->i[ii+1] - po->i[ii]; 1221 if (!nzi) continue; 1222 1223 ierr = MatPtAPSymbolicComputeOneRowOfAP_private(A,P,ptap->P_oth,mappingindices,dof,i,ht,ht);CHKERRQ(ierr); 1224 ierr = PetscHSetIGetSize(ht,&htsize);CHKERRQ(ierr); 1225 /* If AP is empty, just continue */ 1226 if (!htsize) continue; 1227 /* Form C(ii, :) */ 1228 poj = po->j + po->i[ii]; 1229 for (j=0; j<nzi; j++) { 1230 ierr = PetscHSetIUpdate(hta[poj[j]*dof+offset],ht);CHKERRQ(ierr); 1231 } 1232 } 1233 1234 for (i=0; i<pon; i++) { 1235 ierr = PetscHSetIGetSize(hta[i],&htsize);CHKERRQ(ierr); 1236 ptap->c_rmti[i+1] = ptap->c_rmti[i] + htsize; 1237 c_rmtc[i] = htsize; 1238 } 1239 1240 ierr = PetscMalloc1(ptap->c_rmti[pon],&c_rmtj);CHKERRQ(ierr); 1241 1242 for (i=0; i<pon; i++) { 1243 off = 0; 1244 ierr = PetscHSetIGetElems(hta[i],&off,c_rmtj+ptap->c_rmti[i]);CHKERRQ(ierr); 1245 ierr = PetscHSetIDestroy(&hta[i]);CHKERRQ(ierr); 1246 } 1247 ierr = PetscFree(hta);CHKERRQ(ierr); 1248 1249 ierr = PetscMalloc1(pon,&iremote);CHKERRQ(ierr); 1250 for (i=0; i<pon; i++) { 1251 owner = 0; lidx = 0; 1252 offset = i%dof; 1253 ii = i/dof; 1254 ierr = PetscLayoutFindOwnerIndex(P->cmap,p->garray[ii],&owner,&lidx);CHKERRQ(ierr); 1255 iremote[i].index = lidx*dof + offset; 1256 iremote[i].rank = owner; 1257 } 1258 1259 ierr = PetscSFCreate(comm,&sf);CHKERRQ(ierr); 1260 ierr = PetscSFSetGraph(sf,pn,pon,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);CHKERRQ(ierr); 1261 /* Reorder ranks properly so that the data handled by gather and scatter have the same order */ 1262 ierr = PetscSFSetRankOrder(sf,PETSC_TRUE);CHKERRQ(ierr); 1263 ierr = PetscSFSetFromOptions(sf);CHKERRQ(ierr); 1264 ierr = PetscSFSetUp(sf);CHKERRQ(ierr); 1265 /* How many neighbors have contributions to my rows? */ 1266 ierr = PetscSFComputeDegreeBegin(sf,&rootdegrees);CHKERRQ(ierr); 1267 ierr = PetscSFComputeDegreeEnd(sf,&rootdegrees);CHKERRQ(ierr); 1268 rootspacesize = 0; 1269 for (i = 0; i < pn; i++) { 1270 rootspacesize += rootdegrees[i]; 1271 } 1272 ierr = PetscMalloc1(rootspacesize,&rootspace);CHKERRQ(ierr); 1273 ierr = PetscMalloc1(rootspacesize+1,&rootspaceoffsets);CHKERRQ(ierr); 1274 /* Get information from leaves 1275 * Number of columns other people contribute to my rows 1276 * */ 1277 ierr = PetscSFGatherBegin(sf,MPIU_INT,c_rmtc,rootspace);CHKERRQ(ierr); 1278 ierr = PetscSFGatherEnd(sf,MPIU_INT,c_rmtc,rootspace);CHKERRQ(ierr); 1279 ierr = PetscFree(c_rmtc);CHKERRQ(ierr); 1280 ierr = PetscCalloc1(pn+1,&ptap->c_othi);CHKERRQ(ierr); 1281 /* The number of columns is received for each row */ 1282 ptap->c_othi[0] = 0; 1283 rootspacesize = 0; 1284 rootspaceoffsets[0] = 0; 1285 for (i = 0; i < pn; i++) { 1286 rcvncols = 0; 1287 for (j = 0; j<rootdegrees[i]; j++) { 1288 rcvncols += rootspace[rootspacesize]; 1289 rootspaceoffsets[rootspacesize+1] = rootspaceoffsets[rootspacesize] + rootspace[rootspacesize]; 1290 rootspacesize++; 1291 } 1292 ptap->c_othi[i+1] = ptap->c_othi[i] + rcvncols; 1293 } 1294 ierr = PetscFree(rootspace);CHKERRQ(ierr); 1295 1296 ierr = PetscMalloc1(pon,&c_rmtoffsets);CHKERRQ(ierr); 1297 ierr = PetscSFScatterBegin(sf,MPIU_INT,rootspaceoffsets,c_rmtoffsets);CHKERRQ(ierr); 1298 ierr = PetscSFScatterEnd(sf,MPIU_INT,rootspaceoffsets,c_rmtoffsets);CHKERRQ(ierr); 1299 ierr = PetscSFDestroy(&sf);CHKERRQ(ierr); 1300 ierr = PetscFree(rootspaceoffsets);CHKERRQ(ierr); 1301 1302 ierr = PetscCalloc1(ptap->c_rmti[pon],&iremote);CHKERRQ(ierr); 1303 nleaves = 0; 1304 for (i = 0; i<pon; i++) { 1305 owner = 0; 1306 ii = i/dof; 1307 ierr = PetscLayoutFindOwnerIndex(P->cmap,p->garray[ii],&owner,NULL);CHKERRQ(ierr); 1308 sendncols = ptap->c_rmti[i+1] - ptap->c_rmti[i]; 1309 for (j=0; j<sendncols; j++) { 1310 iremote[nleaves].rank = owner; 1311 iremote[nleaves++].index = c_rmtoffsets[i] + j; 1312 } 1313 } 1314 ierr = PetscFree(c_rmtoffsets);CHKERRQ(ierr); 1315 ierr = PetscCalloc1(ptap->c_othi[pn],&c_othj);CHKERRQ(ierr); 1316 1317 ierr = PetscSFCreate(comm,&ptap->sf);CHKERRQ(ierr); 1318 ierr = PetscSFSetGraph(ptap->sf,ptap->c_othi[pn],nleaves,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);CHKERRQ(ierr); 1319 ierr = PetscSFSetFromOptions(ptap->sf);CHKERRQ(ierr); 1320 /* One to one map */ 1321 ierr = PetscSFReduceBegin(ptap->sf,MPIU_INT,c_rmtj,c_othj,MPIU_REPLACE);CHKERRQ(ierr); 1322 1323 ierr = PetscMalloc2(pn,&dnz,pn,&onz);CHKERRQ(ierr); 1324 ierr = PetscHSetICreate(&oht);CHKERRQ(ierr); 1325 ierr = MatGetOwnershipRangeColumn(P,&pcstart,&pcend);CHKERRQ(ierr); 1326 pcstart *= dof; 1327 pcend *= dof; 1328 ierr = PetscMalloc2(pn,&hta,pn,&hto);CHKERRQ(ierr); 1329 for (i=0; i<pn; i++) { 1330 ierr = PetscHSetICreate(&hta[i]);CHKERRQ(ierr); 1331 ierr = PetscHSetICreate(&hto[i]);CHKERRQ(ierr); 1332 } 1333 /* Work on local part */ 1334 /* 4) Pass 1: Estimate memory for C_loc */ 1335 for (i=0; i<am && pn; i++) { 1336 ierr = PetscHSetIClear(ht);CHKERRQ(ierr); 1337 ierr = PetscHSetIClear(oht);CHKERRQ(ierr); 1338 offset = i%dof; 1339 ii = i/dof; 1340 nzi = pd->i[ii+1] - pd->i[ii]; 1341 if (!nzi) continue; 1342 1343 ierr = MatPtAPSymbolicComputeOneRowOfAP_private(A,P,ptap->P_oth,mappingindices,dof,i,ht,oht);CHKERRQ(ierr); 1344 ierr = PetscHSetIGetSize(ht,&htsize);CHKERRQ(ierr); 1345 ierr = PetscHSetIGetSize(oht,&htosize);CHKERRQ(ierr); 1346 if (!(htsize+htosize)) continue; 1347 /* Form C(ii, :) */ 1348 pdj = pd->j + pd->i[ii]; 1349 for (j=0; j<nzi; j++) { 1350 ierr = PetscHSetIUpdate(hta[pdj[j]*dof+offset],ht);CHKERRQ(ierr); 1351 ierr = PetscHSetIUpdate(hto[pdj[j]*dof+offset],oht);CHKERRQ(ierr); 1352 } 1353 } 1354 1355 ierr = ISRestoreIndices(map,&mappingindices);CHKERRQ(ierr); 1356 1357 ierr = PetscHSetIDestroy(&ht);CHKERRQ(ierr); 1358 ierr = PetscHSetIDestroy(&oht);CHKERRQ(ierr); 1359 1360 /* Get remote data */ 1361 ierr = PetscSFReduceEnd(ptap->sf,MPIU_INT,c_rmtj,c_othj,MPIU_REPLACE);CHKERRQ(ierr); 1362 ierr = PetscFree(c_rmtj);CHKERRQ(ierr); 1363 1364 for (i = 0; i < pn; i++) { 1365 nzi = ptap->c_othi[i+1] - ptap->c_othi[i]; 1366 rdj = c_othj + ptap->c_othi[i]; 1367 for (j = 0; j < nzi; j++) { 1368 col = rdj[j]; 1369 /* diag part */ 1370 if (col>=pcstart && col<pcend) { 1371 ierr = PetscHSetIAdd(hta[i],col);CHKERRQ(ierr); 1372 } else { /* off diag */ 1373 ierr = PetscHSetIAdd(hto[i],col);CHKERRQ(ierr); 1374 } 1375 } 1376 ierr = PetscHSetIGetSize(hta[i],&htsize);CHKERRQ(ierr); 1377 dnz[i] = htsize; 1378 ierr = PetscHSetIDestroy(&hta[i]);CHKERRQ(ierr); 1379 ierr = PetscHSetIGetSize(hto[i],&htsize);CHKERRQ(ierr); 1380 onz[i] = htsize; 1381 ierr = PetscHSetIDestroy(&hto[i]);CHKERRQ(ierr); 1382 } 1383 1384 ierr = PetscFree2(hta,hto);CHKERRQ(ierr); 1385 ierr = PetscFree(c_othj);CHKERRQ(ierr); 1386 1387 /* local sizes and preallocation */ 1388 ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1389 ierr = MatSetBlockSizes(Cmpi,dof>1? dof: P->cmap->bs,dof>1? dof: P->cmap->bs);CHKERRQ(ierr); 1390 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 1391 ierr = MatSetUp(Cmpi);CHKERRQ(ierr); 1392 ierr = PetscFree2(dnz,onz);CHKERRQ(ierr); 1393 1394 /* attach the supporting struct to Cmpi for reuse */ 1395 c = (Mat_MPIAIJ*)Cmpi->data; 1396 c->ap = ptap; 1397 ptap->duplicate = Cmpi->ops->duplicate; 1398 ptap->destroy = Cmpi->ops->destroy; 1399 ptap->view = Cmpi->ops->view; 1400 1401 /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */ 1402 Cmpi->assembled = PETSC_FALSE; 1403 /* pick an algorithm */ 1404 ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)A),((PetscObject)A)->prefix,"MatPtAP","Mat");CHKERRQ(ierr); 1405 alg = 0; 1406 ierr = PetscOptionsEList("-matptap_allatonce_via","PtAP allatonce numeric approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr); 1407 ierr = PetscOptionsEnd();CHKERRQ(ierr); 1408 switch (alg) { 1409 case 0: 1410 Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce; 1411 break; 1412 case 1: 1413 Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce_merged; 1414 break; 1415 default: 1416 SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG," Unsupported allatonce numerical algorithm \n"); 1417 } 1418 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 1419 Cmpi->ops->view = MatView_MPIAIJ_PtAP; 1420 Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP; 1421 *C = Cmpi; 1422 PetscFunctionReturn(0); 1423 } 1424 1425 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_allatonce(Mat A,Mat P,PetscReal fill,Mat *C) 1426 { 1427 PetscErrorCode ierr; 1428 1429 PetscFunctionBegin; 1430 1431 ierr = MatPtAPSymbolic_MPIAIJ_MPIXAIJ_allatonce(A,P,1,fill,C);CHKERRQ(ierr); 1432 PetscFunctionReturn(0); 1433 } 1434 1435 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIXAIJ_allatonce_merged(Mat A,Mat P,PetscInt dof,PetscReal fill,Mat *C) 1436 { 1437 Mat_APMPI *ptap; 1438 Mat_MPIAIJ *p=(Mat_MPIAIJ*)P->data,*c; 1439 MPI_Comm comm; 1440 Mat Cmpi; 1441 Mat_SeqAIJ *pd=(Mat_SeqAIJ*)p->A->data,*po=(Mat_SeqAIJ*)p->B->data; 1442 MatType mtype; 1443 PetscSF sf; 1444 PetscSFNode *iremote; 1445 PetscInt rootspacesize,*rootspace,*rootspaceoffsets,nleaves; 1446 const PetscInt *rootdegrees; 1447 PetscHSetI ht,oht,*hta,*hto,*htd; 1448 PetscInt pn,pon,*c_rmtc,i,j,nzi,dnzi,htsize,htosize,*c_rmtj,off,*c_othj,rcvncols,sendncols,*c_rmtoffsets; 1449 PetscInt lidx,*rdj,col,pcstart,pcend,*dnz,*onz,am,arstart,arend,*poj,*pdj; 1450 PetscInt nalg=2,alg=0,offset,ii; 1451 PetscMPIInt owner; 1452 PetscBool flg; 1453 const char *algTypes[2] = {"merged","overlapping"}; 1454 const PetscInt *mappingindices; 1455 IS map; 1456 PetscErrorCode ierr; 1457 1458 PetscFunctionBegin; 1459 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1460 1461 /* Create symbolic parallel matrix Cmpi */ 1462 ierr = MatGetLocalSize(P,NULL,&pn);CHKERRQ(ierr); 1463 pn *= dof; 1464 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 1465 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 1466 ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr); 1467 ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1468 1469 ierr = PetscNew(&ptap);CHKERRQ(ierr); 1470 ptap->reuse = MAT_INITIAL_MATRIX; 1471 ptap->algType = 3; 1472 1473 /* 0) Get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 1474 ierr = MatGetBrowsOfAcols_MPIXAIJ(A,P,dof,MAT_INITIAL_MATRIX,&ptap->P_oth);CHKERRQ(ierr); 1475 ierr = PetscObjectQuery((PetscObject)ptap->P_oth,"aoffdiagtopothmapping",(PetscObject*)&map);CHKERRQ(ierr); 1476 1477 /* This equals to the number of offdiag columns in P */ 1478 ierr = MatGetLocalSize(p->B,NULL,&pon);CHKERRQ(ierr); 1479 pon *= dof; 1480 /* offsets */ 1481 ierr = PetscMalloc1(pon+1,&ptap->c_rmti);CHKERRQ(ierr); 1482 /* The number of columns we will send to remote ranks */ 1483 ierr = PetscMalloc1(pon,&c_rmtc);CHKERRQ(ierr); 1484 ierr = PetscMalloc1(pon,&hta);CHKERRQ(ierr); 1485 for (i=0; i<pon; i++) { 1486 ierr = PetscHSetICreate(&hta[i]);CHKERRQ(ierr); 1487 } 1488 ierr = MatGetLocalSize(A,&am,NULL);CHKERRQ(ierr); 1489 ierr = MatGetOwnershipRange(A,&arstart,&arend);CHKERRQ(ierr); 1490 /* Create hash table to merge all columns for C(i, :) */ 1491 ierr = PetscHSetICreate(&ht);CHKERRQ(ierr); 1492 ierr = PetscHSetICreate(&oht);CHKERRQ(ierr); 1493 ierr = PetscMalloc2(pn,&htd,pn,&hto);CHKERRQ(ierr); 1494 for (i=0; i<pn; i++) { 1495 ierr = PetscHSetICreate(&htd[i]);CHKERRQ(ierr); 1496 ierr = PetscHSetICreate(&hto[i]);CHKERRQ(ierr); 1497 } 1498 1499 ierr = ISGetIndices(map,&mappingindices);CHKERRQ(ierr); 1500 ptap->c_rmti[0] = 0; 1501 /* 2) Pass 1: calculate the size for C_rmt (a matrix need to be sent to other processors) */ 1502 for (i=0; i<am && (pon || pn); i++) { 1503 /* Form one row of AP */ 1504 ierr = PetscHSetIClear(ht);CHKERRQ(ierr); 1505 ierr = PetscHSetIClear(oht);CHKERRQ(ierr); 1506 offset = i%dof; 1507 ii = i/dof; 1508 /* If the off diag is empty, we should not do any calculation */ 1509 nzi = po->i[ii+1] - po->i[ii]; 1510 dnzi = pd->i[ii+1] - pd->i[ii]; 1511 if (!nzi && !dnzi) continue; 1512 1513 ierr = MatPtAPSymbolicComputeOneRowOfAP_private(A,P,ptap->P_oth,mappingindices,dof,i,ht,oht);CHKERRQ(ierr); 1514 ierr = PetscHSetIGetSize(ht,&htsize);CHKERRQ(ierr); 1515 ierr = PetscHSetIGetSize(oht,&htosize);CHKERRQ(ierr); 1516 /* If AP is empty, just continue */ 1517 if (!(htsize+htosize)) continue; 1518 1519 /* Form remote C(ii, :) */ 1520 poj = po->j + po->i[ii]; 1521 for (j=0; j<nzi; j++) { 1522 ierr = PetscHSetIUpdate(hta[poj[j]*dof+offset],ht);CHKERRQ(ierr); 1523 ierr = PetscHSetIUpdate(hta[poj[j]*dof+offset],oht);CHKERRQ(ierr); 1524 } 1525 1526 /* Form local C(ii, :) */ 1527 pdj = pd->j + pd->i[ii]; 1528 for (j=0; j<dnzi; j++) { 1529 ierr = PetscHSetIUpdate(htd[pdj[j]*dof+offset],ht);CHKERRQ(ierr); 1530 ierr = PetscHSetIUpdate(hto[pdj[j]*dof+offset],oht);CHKERRQ(ierr); 1531 } 1532 } 1533 1534 ierr = ISRestoreIndices(map,&mappingindices);CHKERRQ(ierr); 1535 1536 ierr = PetscHSetIDestroy(&ht);CHKERRQ(ierr); 1537 ierr = PetscHSetIDestroy(&oht);CHKERRQ(ierr); 1538 1539 for (i=0; i<pon; i++) { 1540 ierr = PetscHSetIGetSize(hta[i],&htsize);CHKERRQ(ierr); 1541 ptap->c_rmti[i+1] = ptap->c_rmti[i] + htsize; 1542 c_rmtc[i] = htsize; 1543 } 1544 1545 ierr = PetscMalloc1(ptap->c_rmti[pon],&c_rmtj);CHKERRQ(ierr); 1546 1547 for (i=0; i<pon; i++) { 1548 off = 0; 1549 ierr = PetscHSetIGetElems(hta[i],&off,c_rmtj+ptap->c_rmti[i]);CHKERRQ(ierr); 1550 ierr = PetscHSetIDestroy(&hta[i]);CHKERRQ(ierr); 1551 } 1552 ierr = PetscFree(hta);CHKERRQ(ierr); 1553 1554 ierr = PetscMalloc1(pon,&iremote);CHKERRQ(ierr); 1555 for (i=0; i<pon; i++) { 1556 owner = 0; lidx = 0; 1557 offset = i%dof; 1558 ii = i/dof; 1559 ierr = PetscLayoutFindOwnerIndex(P->cmap,p->garray[ii],&owner,&lidx);CHKERRQ(ierr); 1560 iremote[i].index = lidx*dof+offset; 1561 iremote[i].rank = owner; 1562 } 1563 1564 ierr = PetscSFCreate(comm,&sf);CHKERRQ(ierr); 1565 ierr = PetscSFSetGraph(sf,pn,pon,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);CHKERRQ(ierr); 1566 /* Reorder ranks properly so that the data handled by gather and scatter have the same order */ 1567 ierr = PetscSFSetRankOrder(sf,PETSC_TRUE);CHKERRQ(ierr); 1568 ierr = PetscSFSetFromOptions(sf);CHKERRQ(ierr); 1569 ierr = PetscSFSetUp(sf);CHKERRQ(ierr); 1570 /* How many neighbors have contributions to my rows? */ 1571 ierr = PetscSFComputeDegreeBegin(sf,&rootdegrees);CHKERRQ(ierr); 1572 ierr = PetscSFComputeDegreeEnd(sf,&rootdegrees);CHKERRQ(ierr); 1573 rootspacesize = 0; 1574 for (i = 0; i < pn; i++) { 1575 rootspacesize += rootdegrees[i]; 1576 } 1577 ierr = PetscMalloc1(rootspacesize,&rootspace);CHKERRQ(ierr); 1578 ierr = PetscMalloc1(rootspacesize+1,&rootspaceoffsets);CHKERRQ(ierr); 1579 /* Get information from leaves 1580 * Number of columns other people contribute to my rows 1581 * */ 1582 ierr = PetscSFGatherBegin(sf,MPIU_INT,c_rmtc,rootspace);CHKERRQ(ierr); 1583 ierr = PetscSFGatherEnd(sf,MPIU_INT,c_rmtc,rootspace);CHKERRQ(ierr); 1584 ierr = PetscFree(c_rmtc);CHKERRQ(ierr); 1585 ierr = PetscMalloc1(pn+1,&ptap->c_othi);CHKERRQ(ierr); 1586 /* The number of columns is received for each row */ 1587 ptap->c_othi[0] = 0; 1588 rootspacesize = 0; 1589 rootspaceoffsets[0] = 0; 1590 for (i = 0; i < pn; i++) { 1591 rcvncols = 0; 1592 for (j = 0; j<rootdegrees[i]; j++) { 1593 rcvncols += rootspace[rootspacesize]; 1594 rootspaceoffsets[rootspacesize+1] = rootspaceoffsets[rootspacesize] + rootspace[rootspacesize]; 1595 rootspacesize++; 1596 } 1597 ptap->c_othi[i+1] = ptap->c_othi[i] + rcvncols; 1598 } 1599 ierr = PetscFree(rootspace);CHKERRQ(ierr); 1600 1601 ierr = PetscMalloc1(pon,&c_rmtoffsets);CHKERRQ(ierr); 1602 ierr = PetscSFScatterBegin(sf,MPIU_INT,rootspaceoffsets,c_rmtoffsets);CHKERRQ(ierr); 1603 ierr = PetscSFScatterEnd(sf,MPIU_INT,rootspaceoffsets,c_rmtoffsets);CHKERRQ(ierr); 1604 ierr = PetscSFDestroy(&sf);CHKERRQ(ierr); 1605 ierr = PetscFree(rootspaceoffsets);CHKERRQ(ierr); 1606 1607 ierr = PetscCalloc1(ptap->c_rmti[pon],&iremote);CHKERRQ(ierr); 1608 nleaves = 0; 1609 for (i = 0; i<pon; i++) { 1610 owner = 0; 1611 ii = i/dof; 1612 ierr = PetscLayoutFindOwnerIndex(P->cmap,p->garray[ii],&owner,NULL);CHKERRQ(ierr); 1613 sendncols = ptap->c_rmti[i+1] - ptap->c_rmti[i]; 1614 for (j=0; j<sendncols; j++) { 1615 iremote[nleaves].rank = owner; 1616 iremote[nleaves++].index = c_rmtoffsets[i] + j; 1617 } 1618 } 1619 ierr = PetscFree(c_rmtoffsets);CHKERRQ(ierr); 1620 ierr = PetscCalloc1(ptap->c_othi[pn],&c_othj);CHKERRQ(ierr); 1621 1622 ierr = PetscSFCreate(comm,&ptap->sf);CHKERRQ(ierr); 1623 ierr = PetscSFSetGraph(ptap->sf,ptap->c_othi[pn],nleaves,NULL,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);CHKERRQ(ierr); 1624 ierr = PetscSFSetFromOptions(ptap->sf);CHKERRQ(ierr); 1625 /* One to one map */ 1626 ierr = PetscSFReduceBegin(ptap->sf,MPIU_INT,c_rmtj,c_othj,MPIU_REPLACE);CHKERRQ(ierr); 1627 /* Get remote data */ 1628 ierr = PetscSFReduceEnd(ptap->sf,MPIU_INT,c_rmtj,c_othj,MPIU_REPLACE);CHKERRQ(ierr); 1629 ierr = PetscFree(c_rmtj);CHKERRQ(ierr); 1630 ierr = PetscMalloc2(pn,&dnz,pn,&onz);CHKERRQ(ierr); 1631 ierr = MatGetOwnershipRangeColumn(P,&pcstart,&pcend);CHKERRQ(ierr); 1632 pcstart *= dof; 1633 pcend *= dof; 1634 for (i = 0; i < pn; i++) { 1635 nzi = ptap->c_othi[i+1] - ptap->c_othi[i]; 1636 rdj = c_othj + ptap->c_othi[i]; 1637 for (j = 0; j < nzi; j++) { 1638 col = rdj[j]; 1639 /* diag part */ 1640 if (col>=pcstart && col<pcend) { 1641 ierr = PetscHSetIAdd(htd[i],col);CHKERRQ(ierr); 1642 } else { /* off diag */ 1643 ierr = PetscHSetIAdd(hto[i],col);CHKERRQ(ierr); 1644 } 1645 } 1646 ierr = PetscHSetIGetSize(htd[i],&htsize);CHKERRQ(ierr); 1647 dnz[i] = htsize; 1648 ierr = PetscHSetIDestroy(&htd[i]);CHKERRQ(ierr); 1649 ierr = PetscHSetIGetSize(hto[i],&htsize);CHKERRQ(ierr); 1650 onz[i] = htsize; 1651 ierr = PetscHSetIDestroy(&hto[i]);CHKERRQ(ierr); 1652 } 1653 1654 ierr = PetscFree2(htd,hto);CHKERRQ(ierr); 1655 ierr = PetscFree(c_othj);CHKERRQ(ierr); 1656 1657 /* local sizes and preallocation */ 1658 ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1659 ierr = MatSetBlockSizes(Cmpi, dof>1? dof: P->cmap->bs,dof>1? dof: P->cmap->bs);CHKERRQ(ierr); 1660 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 1661 ierr = PetscFree2(dnz,onz);CHKERRQ(ierr); 1662 1663 /* attach the supporting struct to Cmpi for reuse */ 1664 c = (Mat_MPIAIJ*)Cmpi->data; 1665 c->ap = ptap; 1666 ptap->duplicate = Cmpi->ops->duplicate; 1667 ptap->destroy = Cmpi->ops->destroy; 1668 ptap->view = Cmpi->ops->view; 1669 1670 /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */ 1671 Cmpi->assembled = PETSC_FALSE; 1672 /* pick an algorithm */ 1673 ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)A),((PetscObject)A)->prefix,"MatPtAP","Mat");CHKERRQ(ierr); 1674 alg = 0; 1675 ierr = PetscOptionsEList("-matptap_allatonce_via","PtAP allatonce numeric approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr); 1676 ierr = PetscOptionsEnd();CHKERRQ(ierr); 1677 switch (alg) { 1678 case 0: 1679 Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce_merged; 1680 break; 1681 case 1: 1682 Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_allatonce; 1683 break; 1684 default: 1685 SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG," Unsupported allatonce numerical algorithm \n"); 1686 } 1687 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 1688 Cmpi->ops->view = MatView_MPIAIJ_PtAP; 1689 Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP; 1690 *C = Cmpi; 1691 PetscFunctionReturn(0); 1692 } 1693 1694 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_allatonce_merged(Mat A,Mat P,PetscReal fill,Mat *C) 1695 { 1696 PetscErrorCode ierr; 1697 1698 PetscFunctionBegin; 1699 1700 ierr = MatPtAPSymbolic_MPIAIJ_MPIXAIJ_allatonce_merged(A,P,1,fill,C);CHKERRQ(ierr); 1701 PetscFunctionReturn(0); 1702 } 1703 1704 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 1705 { 1706 PetscErrorCode ierr; 1707 Mat_APMPI *ptap; 1708 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c; 1709 MPI_Comm comm; 1710 PetscMPIInt size,rank; 1711 Mat Cmpi; 1712 PetscFreeSpaceList free_space=NULL,current_space=NULL; 1713 PetscInt am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n; 1714 PetscInt *lnk,i,k,pnz,row,nsend; 1715 PetscBT lnkbt; 1716 PetscMPIInt tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv; 1717 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 1718 PetscInt len,proc,*dnz,*onz,*owners,nzi,nspacedouble; 1719 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 1720 MPI_Request *swaits,*rwaits; 1721 MPI_Status *sstatus,rstatus; 1722 PetscLayout rowmap; 1723 PetscInt *owners_co,*coi,*coj; /* i and j array of (p->B)^T*A*P - used in the communication */ 1724 PetscMPIInt *len_r,*id_r; /* array of length of comm->size, store send/recv matrix values */ 1725 PetscInt *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,ap_rmax=0,Crmax,*aj,*ai,*pi; 1726 Mat_SeqAIJ *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth; 1727 PetscScalar *apv; 1728 PetscTable ta; 1729 MatType mtype; 1730 const char *prefix; 1731 #if defined(PETSC_USE_INFO) 1732 PetscReal apfill; 1733 #endif 1734 1735 PetscFunctionBegin; 1736 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1737 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1738 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1739 1740 if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data; 1741 1742 /* create symbolic parallel matrix Cmpi */ 1743 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 1744 ierr = MatGetType(A,&mtype);CHKERRQ(ierr); 1745 ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr); 1746 1747 /* Do dense axpy in MatPtAPNumeric_MPIAIJ_MPIAIJ() */ 1748 Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ; 1749 1750 /* create struct Mat_APMPI and attached it to C later */ 1751 ierr = PetscNew(&ptap);CHKERRQ(ierr); 1752 ptap->reuse = MAT_INITIAL_MATRIX; 1753 ptap->algType = 1; 1754 1755 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 1756 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 1757 /* get P_loc by taking all local rows of P */ 1758 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 1759 1760 /* (0) compute Rd = Pd^T, Ro = Po^T */ 1761 /* --------------------------------- */ 1762 ierr = MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr); 1763 ierr = MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr); 1764 1765 /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */ 1766 /* ----------------------------------------------------------------- */ 1767 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 1768 if (ptap->P_oth) p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 1769 1770 /* create and initialize a linked list */ 1771 ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */ 1772 MatRowMergeMax_SeqAIJ(p_loc,ptap->P_loc->rmap->N,ta); 1773 MatRowMergeMax_SeqAIJ(p_oth,ptap->P_oth->rmap->N,ta); 1774 ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */ 1775 /* printf("[%d] est %d, Crmax %d; pN %d\n",rank,5*(p_loc->rmax+p_oth->rmax + (PetscInt)(1.e-2*pN)),Crmax,pN); */ 1776 1777 ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr); 1778 1779 /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */ 1780 if (ao) { 1781 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr); 1782 } else { 1783 ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr); 1784 } 1785 current_space = free_space; 1786 nspacedouble = 0; 1787 1788 ierr = PetscMalloc1(am+1,&api);CHKERRQ(ierr); 1789 api[0] = 0; 1790 for (i=0; i<am; i++) { 1791 /* diagonal portion: Ad[i,:]*P */ 1792 ai = ad->i; pi = p_loc->i; 1793 nzi = ai[i+1] - ai[i]; 1794 aj = ad->j + ai[i]; 1795 for (j=0; j<nzi; j++) { 1796 row = aj[j]; 1797 pnz = pi[row+1] - pi[row]; 1798 Jptr = p_loc->j + pi[row]; 1799 /* add non-zero cols of P into the sorted linked list lnk */ 1800 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1801 } 1802 /* off-diagonal portion: Ao[i,:]*P */ 1803 if (ao) { 1804 ai = ao->i; pi = p_oth->i; 1805 nzi = ai[i+1] - ai[i]; 1806 aj = ao->j + ai[i]; 1807 for (j=0; j<nzi; j++) { 1808 row = aj[j]; 1809 pnz = pi[row+1] - pi[row]; 1810 Jptr = p_oth->j + pi[row]; 1811 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 1812 } 1813 } 1814 apnz = lnk[0]; 1815 api[i+1] = api[i] + apnz; 1816 if (ap_rmax < apnz) ap_rmax = apnz; 1817 1818 /* if free space is not available, double the total space in the list */ 1819 if (current_space->local_remaining<apnz) { 1820 ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),¤t_space);CHKERRQ(ierr); 1821 nspacedouble++; 1822 } 1823 1824 /* Copy data into free space, then initialize lnk */ 1825 ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 1826 1827 current_space->array += apnz; 1828 current_space->local_used += apnz; 1829 current_space->local_remaining -= apnz; 1830 } 1831 /* Allocate space for apj and apv, initialize apj, and */ 1832 /* destroy list of free space and other temporary array(s) */ 1833 ierr = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr); 1834 ierr = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr); 1835 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 1836 1837 /* Create AP_loc for reuse */ 1838 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr); 1839 1840 #if defined(PETSC_USE_INFO) 1841 if (ao) { 1842 apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1); 1843 } else { 1844 apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1); 1845 } 1846 ptap->AP_loc->info.mallocs = nspacedouble; 1847 ptap->AP_loc->info.fill_ratio_given = fill; 1848 ptap->AP_loc->info.fill_ratio_needed = apfill; 1849 1850 if (api[am]) { 1851 ierr = PetscInfo3(ptap->AP_loc,"Nonscalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr); 1852 ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr); 1853 } else { 1854 ierr = PetscInfo(ptap->AP_loc,"Nonscalable algorithm, AP_loc is empty \n");CHKERRQ(ierr); 1855 } 1856 #endif 1857 1858 /* (2-1) compute symbolic Co = Ro*AP_loc */ 1859 /* ------------------------------------ */ 1860 ierr = MatGetOptionsPrefix(A,&prefix);CHKERRQ(ierr); 1861 ierr = MatSetOptionsPrefix(ptap->Ro,prefix);CHKERRQ(ierr); 1862 ierr = MatAppendOptionsPrefix(ptap->Ro,"inner_offdiag_");CHKERRQ(ierr); 1863 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr); 1864 1865 /* (3) send coj of C_oth to other processors */ 1866 /* ------------------------------------------ */ 1867 /* determine row ownership */ 1868 ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr); 1869 rowmap->n = pn; 1870 rowmap->bs = 1; 1871 ierr = PetscLayoutSetUp(rowmap);CHKERRQ(ierr); 1872 owners = rowmap->range; 1873 1874 /* determine the number of messages to send, their lengths */ 1875 ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr); 1876 ierr = PetscArrayzero(len_s,size);CHKERRQ(ierr); 1877 ierr = PetscArrayzero(len_si,size);CHKERRQ(ierr); 1878 1879 c_oth = (Mat_SeqAIJ*)ptap->C_oth->data; 1880 coi = c_oth->i; coj = c_oth->j; 1881 con = ptap->C_oth->rmap->n; 1882 proc = 0; 1883 for (i=0; i<con; i++) { 1884 while (prmap[i] >= owners[proc+1]) proc++; 1885 len_si[proc]++; /* num of rows in Co(=Pt*AP) to be sent to [proc] */ 1886 len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */ 1887 } 1888 1889 len = 0; /* max length of buf_si[], see (4) */ 1890 owners_co[0] = 0; 1891 nsend = 0; 1892 for (proc=0; proc<size; proc++) { 1893 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 1894 if (len_s[proc]) { 1895 nsend++; 1896 len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */ 1897 len += len_si[proc]; 1898 } 1899 } 1900 1901 /* determine the number and length of messages to receive for coi and coj */ 1902 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr); 1903 ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr); 1904 1905 /* post the Irecv and Isend of coj */ 1906 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 1907 ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 1908 ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr); 1909 for (proc=0, k=0; proc<size; proc++) { 1910 if (!len_s[proc]) continue; 1911 i = owners_co[proc]; 1912 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 1913 k++; 1914 } 1915 1916 /* (2-2) compute symbolic C_loc = Rd*AP_loc */ 1917 /* ---------------------------------------- */ 1918 ierr = MatSetOptionsPrefix(ptap->Rd,prefix);CHKERRQ(ierr); 1919 ierr = MatAppendOptionsPrefix(ptap->Rd,"inner_diag_");CHKERRQ(ierr); 1920 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr); 1921 c_loc = (Mat_SeqAIJ*)ptap->C_loc->data; 1922 1923 /* receives coj are complete */ 1924 for (i=0; i<nrecv; i++) { 1925 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1926 } 1927 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1928 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 1929 1930 /* add received column indices into ta to update Crmax */ 1931 for (k=0; k<nrecv; k++) {/* k-th received message */ 1932 Jptr = buf_rj[k]; 1933 for (j=0; j<len_r[k]; j++) { 1934 ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr); 1935 } 1936 } 1937 ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); 1938 ierr = PetscTableDestroy(&ta);CHKERRQ(ierr); 1939 1940 /* (4) send and recv coi */ 1941 /*-----------------------*/ 1942 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 1943 ierr = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 1944 ierr = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr); 1945 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 1946 for (proc=0,k=0; proc<size; proc++) { 1947 if (!len_s[proc]) continue; 1948 /* form outgoing message for i-structure: 1949 buf_si[0]: nrows to be sent 1950 [1:nrows]: row index (global) 1951 [nrows+1:2*nrows+1]: i-structure index 1952 */ 1953 /*-------------------------------------------*/ 1954 nrows = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */ 1955 buf_si_i = buf_si + nrows+1; 1956 buf_si[0] = nrows; 1957 buf_si_i[0] = 0; 1958 nrows = 0; 1959 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 1960 nzi = coi[i+1] - coi[i]; 1961 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 1962 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 1963 nrows++; 1964 } 1965 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 1966 k++; 1967 buf_si += len_si[proc]; 1968 } 1969 for (i=0; i<nrecv; i++) { 1970 ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 1971 } 1972 ierr = PetscFree(rwaits);CHKERRQ(ierr); 1973 if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);} 1974 1975 ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr); 1976 ierr = PetscFree(len_ri);CHKERRQ(ierr); 1977 ierr = PetscFree(swaits);CHKERRQ(ierr); 1978 ierr = PetscFree(buf_s);CHKERRQ(ierr); 1979 1980 /* (5) compute the local portion of Cmpi */ 1981 /* ------------------------------------------ */ 1982 /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */ 1983 ierr = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr); 1984 current_space = free_space; 1985 1986 ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr); 1987 for (k=0; k<nrecv; k++) { 1988 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 1989 nrows = *buf_ri_k[k]; 1990 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 1991 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 1992 } 1993 1994 ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr); 1995 ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr); 1996 for (i=0; i<pn; i++) { 1997 /* add C_loc into Cmpi */ 1998 nzi = c_loc->i[i+1] - c_loc->i[i]; 1999 Jptr = c_loc->j + c_loc->i[i]; 2000 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 2001 2002 /* add received col data into lnk */ 2003 for (k=0; k<nrecv; k++) { /* k-th received message */ 2004 if (i == *nextrow[k]) { /* i-th row */ 2005 nzi = *(nextci[k]+1) - *nextci[k]; 2006 Jptr = buf_rj[k] + *nextci[k]; 2007 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 2008 nextrow[k]++; nextci[k]++; 2009 } 2010 } 2011 nzi = lnk[0]; 2012 2013 /* copy data into free space, then initialize lnk */ 2014 ierr = PetscLLCondensedClean(pN,nzi,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 2015 ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr); 2016 } 2017 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 2018 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 2019 ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr); 2020 2021 /* local sizes and preallocation */ 2022 ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 2023 if (P->cmap->bs > 0) { 2024 ierr = PetscLayoutSetBlockSize(Cmpi->rmap,P->cmap->bs);CHKERRQ(ierr); 2025 ierr = PetscLayoutSetBlockSize(Cmpi->cmap,P->cmap->bs);CHKERRQ(ierr); 2026 } 2027 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 2028 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 2029 2030 /* members in merge */ 2031 ierr = PetscFree(id_r);CHKERRQ(ierr); 2032 ierr = PetscFree(len_r);CHKERRQ(ierr); 2033 ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr); 2034 ierr = PetscFree(buf_ri);CHKERRQ(ierr); 2035 ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr); 2036 ierr = PetscFree(buf_rj);CHKERRQ(ierr); 2037 ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr); 2038 2039 /* attach the supporting struct to Cmpi for reuse */ 2040 c = (Mat_MPIAIJ*)Cmpi->data; 2041 c->ap = ptap; 2042 ptap->duplicate = Cmpi->ops->duplicate; 2043 ptap->destroy = Cmpi->ops->destroy; 2044 ptap->view = Cmpi->ops->view; 2045 ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr); 2046 2047 /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */ 2048 Cmpi->assembled = PETSC_FALSE; 2049 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 2050 Cmpi->ops->view = MatView_MPIAIJ_PtAP; 2051 Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP; 2052 *C = Cmpi; 2053 PetscFunctionReturn(0); 2054 } 2055 2056 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C) 2057 { 2058 PetscErrorCode ierr; 2059 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 2060 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 2061 Mat_SeqAIJ *ap,*p_loc,*p_oth=NULL,*c_seq; 2062 Mat_APMPI *ptap = c->ap; 2063 Mat AP_loc,C_loc,C_oth; 2064 PetscInt i,rstart,rend,cm,ncols,row; 2065 PetscInt *api,*apj,am = A->rmap->n,j,col,apnz; 2066 PetscScalar *apa; 2067 const PetscInt *cols; 2068 const PetscScalar *vals; 2069 2070 PetscFunctionBegin; 2071 if (!ptap->AP_loc) { 2072 MPI_Comm comm; 2073 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 2074 SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'"); 2075 } 2076 2077 ierr = MatZeroEntries(C);CHKERRQ(ierr); 2078 /* 1) get R = Pd^T,Ro = Po^T */ 2079 if (ptap->reuse == MAT_REUSE_MATRIX) { 2080 ierr = MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr); 2081 ierr = MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr); 2082 } 2083 2084 /* 2) get AP_loc */ 2085 AP_loc = ptap->AP_loc; 2086 ap = (Mat_SeqAIJ*)AP_loc->data; 2087 2088 /* 2-1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 2089 /*-----------------------------------------------------*/ 2090 if (ptap->reuse == MAT_REUSE_MATRIX) { 2091 /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */ 2092 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 2093 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 2094 } 2095 2096 /* 2-2) compute numeric A_loc*P - dominating part */ 2097 /* ---------------------------------------------- */ 2098 /* get data from symbolic products */ 2099 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 2100 if (ptap->P_oth) { 2101 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 2102 } 2103 apa = ptap->apa; 2104 api = ap->i; 2105 apj = ap->j; 2106 for (i=0; i<am; i++) { 2107 /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */ 2108 AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa); 2109 apnz = api[i+1] - api[i]; 2110 for (j=0; j<apnz; j++) { 2111 col = apj[j+api[i]]; 2112 ap->a[j+ap->i[i]] = apa[col]; 2113 apa[col] = 0.0; 2114 } 2115 } 2116 2117 /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */ 2118 ierr = ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr); 2119 ierr = ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr); 2120 C_loc = ptap->C_loc; 2121 C_oth = ptap->C_oth; 2122 2123 /* add C_loc and Co to to C */ 2124 ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr); 2125 2126 /* C_loc -> C */ 2127 cm = C_loc->rmap->N; 2128 c_seq = (Mat_SeqAIJ*)C_loc->data; 2129 cols = c_seq->j; 2130 vals = c_seq->a; 2131 2132 2133 /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */ 2134 /* when there are no off-processor parts. */ 2135 /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */ 2136 /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */ 2137 /* a table, and other, more complex stuff has to be done. */ 2138 if (C->assembled) { 2139 C->was_assembled = PETSC_TRUE; 2140 C->assembled = PETSC_FALSE; 2141 } 2142 if (C->was_assembled) { 2143 for (i=0; i<cm; i++) { 2144 ncols = c_seq->i[i+1] - c_seq->i[i]; 2145 row = rstart + i; 2146 ierr = MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 2147 cols += ncols; vals += ncols; 2148 } 2149 } else { 2150 ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);CHKERRQ(ierr); 2151 } 2152 2153 /* Co -> C, off-processor part */ 2154 cm = C_oth->rmap->N; 2155 c_seq = (Mat_SeqAIJ*)C_oth->data; 2156 cols = c_seq->j; 2157 vals = c_seq->a; 2158 for (i=0; i<cm; i++) { 2159 ncols = c_seq->i[i+1] - c_seq->i[i]; 2160 row = p->garray[i]; 2161 ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr); 2162 cols += ncols; vals += ncols; 2163 } 2164 2165 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2166 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2167 2168 ptap->reuse = MAT_REUSE_MATRIX; 2169 2170 /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */ 2171 if (ptap->freestruct) { 2172 ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr); 2173 } 2174 PetscFunctionReturn(0); 2175 } 2176