1 2 /* 3 Defines projective product routines where A is a MPIAIJ matrix 4 C = P^T * A * P 5 */ 6 7 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 8 #include <../src/mat/utils/freespace.h> 9 #include <../src/mat/impls/aij/mpi/mpiaij.h> 10 #include <petscbt.h> 11 #include <petsctime.h> 12 13 /* #define PTAP_PROFILE */ 14 15 extern PetscErrorCode MatDestroy_MPIAIJ(Mat); 16 #undef __FUNCT__ 17 #define __FUNCT__ "MatDestroy_MPIAIJ_PtAP" 18 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A) 19 { 20 PetscErrorCode ierr; 21 Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data; 22 Mat_PtAPMPI *ptap=a->ptap; 23 24 PetscFunctionBegin; 25 if (ptap) { 26 Mat_Merge_SeqsToMPI *merge=ptap->merge; 27 ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr); 28 ierr = PetscFree(ptap->bufa);CHKERRQ(ierr); 29 ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr); 30 ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr); 31 ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */ 32 if (ptap->api) {ierr = PetscFree(ptap->api);CHKERRQ(ierr);} 33 if (ptap->apj) {ierr = PetscFree(ptap->apj);CHKERRQ(ierr);} 34 if (ptap->apa) {ierr = PetscFree(ptap->apa);CHKERRQ(ierr);} 35 if (merge) { 36 ierr = PetscFree(merge->id_r);CHKERRQ(ierr); 37 ierr = PetscFree(merge->len_s);CHKERRQ(ierr); 38 ierr = PetscFree(merge->len_r);CHKERRQ(ierr); 39 ierr = PetscFree(merge->bi);CHKERRQ(ierr); 40 ierr = PetscFree(merge->bj);CHKERRQ(ierr); 41 ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr); 42 ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr); 43 ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr); 44 ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr); 45 ierr = PetscFree(merge->coi);CHKERRQ(ierr); 46 ierr = PetscFree(merge->coj);CHKERRQ(ierr); 47 ierr = PetscFree(merge->owners_co);CHKERRQ(ierr); 48 ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr); 49 ierr = merge->destroy(A);CHKERRQ(ierr); 50 ierr = PetscFree(ptap->merge);CHKERRQ(ierr); 51 } 52 ierr = PetscFree(ptap);CHKERRQ(ierr); 53 } 54 PetscFunctionReturn(0); 55 } 56 57 #undef __FUNCT__ 58 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatPtAP" 59 PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat A, MatDuplicateOption op, Mat *M) 60 { 61 PetscErrorCode ierr; 62 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 63 Mat_PtAPMPI *ptap = a->ptap; 64 Mat_Merge_SeqsToMPI *merge = ptap->merge; 65 66 PetscFunctionBegin; 67 ierr = (*merge->duplicate)(A,op,M);CHKERRQ(ierr); 68 69 (*M)->ops->destroy = merge->destroy; 70 (*M)->ops->duplicate = merge->duplicate; 71 PetscFunctionReturn(0); 72 } 73 74 #undef __FUNCT__ 75 #define __FUNCT__ "MatPtAP_MPIAIJ_MPIAIJ" 76 PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C) 77 { 78 PetscErrorCode ierr; 79 80 PetscFunctionBegin; 81 if (scall == MAT_INITIAL_MATRIX) { 82 ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 83 ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr); 84 ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr); 85 } 86 ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr); 87 ierr = MatPtAPNumeric_MPIAIJ_MPIAIJ(A,P,*C);CHKERRQ(ierr); 88 ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr); 89 PetscFunctionReturn(0); 90 } 91 92 #undef __FUNCT__ 93 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ_MPIAIJ" 94 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 95 { 96 PetscErrorCode ierr; 97 Mat Cmpi; 98 Mat_PtAPMPI *ptap; 99 PetscFreeSpaceList free_space=NULL,current_space=NULL; 100 Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c; 101 Mat_SeqAIJ *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 102 Mat_SeqAIJ *p_loc,*p_oth; 103 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pdti,*pdtj,*poti,*potj,*ptJ; 104 PetscInt *adi=ad->i,*aj,*aoi=ao->i,nnz; 105 PetscInt *lnk,*owners_co,*coi,*coj,i,k,pnz,row; 106 PetscInt am=A->rmap->n,pN=P->cmap->N,pm=P->rmap->n,pn=P->cmap->n; 107 PetscBT lnkbt; 108 MPI_Comm comm; 109 PetscMPIInt size,rank,tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0; 110 PetscInt **buf_rj,**buf_ri,**buf_ri_k; 111 PetscInt len,proc,*dnz,*onz,*owners; 112 PetscInt nzi,*pti,*ptj; 113 PetscInt nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci; 114 MPI_Request *swaits,*rwaits; 115 MPI_Status *sstatus,rstatus; 116 Mat_Merge_SeqsToMPI *merge; 117 PetscInt *api,*apj,*Jptr,apnz,*prmap=p->garray,pon,nspacedouble=0,j,ap_rmax=0; 118 PetscReal afill=1.0,afill_tmp; 119 PetscInt rmax; 120 #if defined(PTAP_PROFILE) 121 PetscLogDouble t0,t1,t2,t3,t4; 122 #endif 123 124 PetscFunctionBegin; 125 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 126 #if defined(PTAP_PROFILE) 127 ierr = PetscTime(&t0);CHKERRQ(ierr); 128 #endif 129 130 /* check if matrix local sizes are compatible */ 131 if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) { 132 SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend); 133 } 134 if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) { 135 SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend); 136 } 137 138 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 139 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 140 141 /* create struct Mat_PtAPMPI and attached it to C later */ 142 ierr = PetscNew(&ptap);CHKERRQ(ierr); 143 ierr = PetscNew(&merge);CHKERRQ(ierr); 144 ptap->merge = merge; 145 ptap->reuse = MAT_INITIAL_MATRIX; 146 147 /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */ 148 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 149 150 /* get P_loc by taking all local rows of P */ 151 ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 152 153 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 154 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 155 pi_loc = p_loc->i; pj_loc = p_loc->j; 156 pi_oth = p_oth->i; pj_oth = p_oth->j; 157 #if defined(PTAP_PROFILE) 158 ierr = PetscTime(&t1);CHKERRQ(ierr); 159 #endif 160 161 /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */ 162 /*-------------------------------------------------------------------*/ 163 ierr = PetscMalloc1((am+1),&api);CHKERRQ(ierr); 164 api[0] = 0; 165 166 /* create and initialize a linked list */ 167 ierr = PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);CHKERRQ(ierr); 168 169 /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) -OOM for ex56, np=8k on Intrepid! */ 170 ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);CHKERRQ(ierr); 171 172 current_space = free_space; 173 174 for (i=0; i<am; i++) { 175 /* diagonal portion of A */ 176 nzi = adi[i+1] - adi[i]; 177 aj = ad->j + adi[i]; 178 for (j=0; j<nzi; j++) { 179 row = aj[j]; 180 pnz = pi_loc[row+1] - pi_loc[row]; 181 Jptr = pj_loc + pi_loc[row]; 182 /* add non-zero cols of P into the sorted linked list lnk */ 183 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 184 } 185 /* off-diagonal portion of A */ 186 nzi = aoi[i+1] - aoi[i]; 187 aj = ao->j + aoi[i]; 188 for (j=0; j<nzi; j++) { 189 row = aj[j]; 190 pnz = pi_oth[row+1] - pi_oth[row]; 191 Jptr = pj_oth + pi_oth[row]; 192 ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 193 } 194 apnz = lnk[0]; 195 api[i+1] = api[i] + apnz; 196 if (ap_rmax < apnz) ap_rmax = apnz; 197 198 /* if free space is not available, double the total space in the list */ 199 if (current_space->local_remaining<apnz) { 200 ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 201 nspacedouble++; 202 } 203 204 /* Copy data into free space, then initialize lnk */ 205 ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 206 207 current_space->array += apnz; 208 current_space->local_used += apnz; 209 current_space->local_remaining -= apnz; 210 } 211 212 /* Allocate space for apj, initialize apj, and */ 213 /* destroy list of free space and other temporary array(s) */ 214 ierr = PetscMalloc1((api[am]+1),&apj);CHKERRQ(ierr); 215 ierr = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr); 216 afill_tmp = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1); 217 if (afill_tmp > afill) afill = afill_tmp; 218 219 #if defined(PTAP_PROFILE) 220 ierr = PetscTime(&t2);CHKERRQ(ierr); 221 #endif 222 223 /* determine symbolic Co=(p->B)^T*AP - send to others */ 224 /*----------------------------------------------------*/ 225 ierr = MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr); 226 227 /* then, compute symbolic Co = (p->B)^T*AP */ 228 pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors 229 >= (num of nonzero rows of C_seq) - pn */ 230 ierr = PetscMalloc1((pon+1),&coi);CHKERRQ(ierr); 231 coi[0] = 0; 232 233 /* set initial free space to be fill*(nnz(p->B) + nnz(AP)) */ 234 nnz = fill*(poti[pon] + api[am]); 235 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 236 current_space = free_space; 237 238 for (i=0; i<pon; i++) { 239 pnz = poti[i+1] - poti[i]; 240 ptJ = potj + poti[i]; 241 for (j=0; j<pnz; j++) { 242 row = ptJ[j]; /* row of AP == col of Pot */ 243 apnz = api[row+1] - api[row]; 244 Jptr = apj + api[row]; 245 /* add non-zero cols of AP into the sorted linked list lnk */ 246 ierr = PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 247 } 248 nnz = lnk[0]; 249 250 /* If free space is not available, double the total space in the list */ 251 if (current_space->local_remaining<nnz) { 252 ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 253 nspacedouble++; 254 } 255 256 /* Copy data into free space, and zero out denserows */ 257 ierr = PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 258 259 current_space->array += nnz; 260 current_space->local_used += nnz; 261 current_space->local_remaining -= nnz; 262 263 coi[i+1] = coi[i] + nnz; 264 } 265 ierr = PetscMalloc1((coi[pon]+1),&coj);CHKERRQ(ierr); 266 ierr = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr); 267 afill_tmp = (PetscReal)coi[pon]/(poti[pon] + api[am]+1); 268 if (afill_tmp > afill) afill = afill_tmp; 269 ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr); 270 271 /* send j-array (coj) of Co to other processors */ 272 /*----------------------------------------------*/ 273 /* determine row ownership */ 274 ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr); 275 merge->rowmap->n = pn; 276 merge->rowmap->bs = 1; 277 278 ierr = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr); 279 owners = merge->rowmap->range; 280 281 /* determine the number of messages to send, their lengths */ 282 ierr = PetscMalloc2(size,&len_si,size,&sstatus);CHKERRQ(ierr); 283 ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 284 ierr = PetscCalloc1(size,&merge->len_s);CHKERRQ(ierr); 285 286 len_s = merge->len_s; 287 merge->nsend = 0; 288 289 ierr = PetscMalloc1((size+2),&owners_co);CHKERRQ(ierr); 290 291 proc = 0; 292 for (i=0; i<pon; i++) { 293 while (prmap[i] >= owners[proc+1]) proc++; 294 len_si[proc]++; /* num of rows in Co to be sent to [proc] */ 295 len_s[proc] += coi[i+1] - coi[i]; 296 } 297 298 len = 0; /* max length of buf_si[] */ 299 owners_co[0] = 0; 300 for (proc=0; proc<size; proc++) { 301 owners_co[proc+1] = owners_co[proc] + len_si[proc]; 302 if (len_si[proc]) { 303 merge->nsend++; 304 len_si[proc] = 2*(len_si[proc] + 1); 305 len += len_si[proc]; 306 } 307 } 308 309 /* determine the number and length of messages to receive for coi and coj */ 310 ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr); 311 ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr); 312 313 /* post the Irecv and Isend of coj */ 314 ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr); 315 ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr); 316 ierr = PetscMalloc1((merge->nsend+1),&swaits);CHKERRQ(ierr); 317 for (proc=0, k=0; proc<size; proc++) { 318 if (!len_s[proc]) continue; 319 i = owners_co[proc]; 320 ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr); 321 k++; 322 } 323 324 /* receives and sends of coj are complete */ 325 for (i=0; i<merge->nrecv; i++) { 326 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 327 } 328 ierr = PetscFree(rwaits);CHKERRQ(ierr); 329 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 330 331 /* send and recv coi */ 332 /*-------------------*/ 333 ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr); 334 ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr); 335 ierr = PetscMalloc1((len+1),&buf_s);CHKERRQ(ierr); 336 buf_si = buf_s; /* points to the beginning of k-th msg to be sent */ 337 for (proc=0,k=0; proc<size; proc++) { 338 if (!len_s[proc]) continue; 339 /* form outgoing message for i-structure: 340 buf_si[0]: nrows to be sent 341 [1:nrows]: row index (global) 342 [nrows+1:2*nrows+1]: i-structure index 343 */ 344 /*-------------------------------------------*/ 345 nrows = len_si[proc]/2 - 1; 346 buf_si_i = buf_si + nrows+1; 347 buf_si[0] = nrows; 348 buf_si_i[0] = 0; 349 nrows = 0; 350 for (i=owners_co[proc]; i<owners_co[proc+1]; i++) { 351 nzi = coi[i+1] - coi[i]; 352 353 buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */ 354 buf_si[nrows+1] = prmap[i] -owners[proc]; /* local row index */ 355 nrows++; 356 } 357 ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr); 358 k++; 359 buf_si += len_si[proc]; 360 } 361 i = merge->nrecv; 362 while (i--) { 363 ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr); 364 } 365 ierr = PetscFree(rwaits);CHKERRQ(ierr); 366 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);} 367 368 ierr = PetscFree2(len_si,sstatus);CHKERRQ(ierr); 369 ierr = PetscFree(len_ri);CHKERRQ(ierr); 370 ierr = PetscFree(swaits);CHKERRQ(ierr); 371 ierr = PetscFree(buf_s);CHKERRQ(ierr); 372 373 #if defined(PTAP_PROFILE) 374 ierr = PetscTime(&t3);CHKERRQ(ierr); 375 #endif 376 377 /* compute the local portion of C (mpi mat) */ 378 /*------------------------------------------*/ 379 ierr = MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr); 380 381 /* allocate pti array and free space for accumulating nonzero column info */ 382 ierr = PetscMalloc1((pn+1),&pti);CHKERRQ(ierr); 383 pti[0] = 0; 384 385 /* set initial free space to be fill*(nnz(P) + nnz(AP)) */ 386 nnz = fill*(pi_loc[pm] + api[am]); 387 ierr = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr); 388 current_space = free_space; 389 390 ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr); 391 for (k=0; k<merge->nrecv; k++) { 392 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 393 nrows = *buf_ri_k[k]; 394 nextrow[k] = buf_ri_k[k] + 1; /* next row number of k-th recved i-structure */ 395 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 396 } 397 ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr); 398 rmax = 0; 399 for (i=0; i<pn; i++) { 400 /* add pdt[i,:]*AP into lnk */ 401 pnz = pdti[i+1] - pdti[i]; 402 ptJ = pdtj + pdti[i]; 403 for (j=0; j<pnz; j++) { 404 row = ptJ[j]; /* row of AP == col of Pt */ 405 apnz = api[row+1] - api[row]; 406 Jptr = apj + api[row]; 407 /* add non-zero cols of AP into the sorted linked list lnk */ 408 ierr = PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);CHKERRQ(ierr); 409 } 410 411 /* add received col data into lnk */ 412 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 413 if (i == *nextrow[k]) { /* i-th row */ 414 nzi = *(nextci[k]+1) - *nextci[k]; 415 Jptr = buf_rj[k] + *nextci[k]; 416 ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr); 417 nextrow[k]++; nextci[k]++; 418 } 419 } 420 nnz = lnk[0]; 421 422 /* if free space is not available, make more free space */ 423 if (current_space->local_remaining<nnz) { 424 ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 425 nspacedouble++; 426 } 427 /* copy data into free space, then initialize lnk */ 428 ierr = PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr); 429 ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr); 430 431 current_space->array += nnz; 432 current_space->local_used += nnz; 433 current_space->local_remaining -= nnz; 434 435 pti[i+1] = pti[i] + nnz; 436 if (nnz > rmax) rmax = nnz; 437 } 438 ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr); 439 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 440 441 ierr = PetscMalloc1((pti[pn]+1),&ptj);CHKERRQ(ierr); 442 ierr = PetscFreeSpaceContiguous(&free_space,ptj);CHKERRQ(ierr); 443 afill_tmp = (PetscReal)pti[pn]/(pi_loc[pm] + api[am]+1); 444 if (afill_tmp > afill) afill = afill_tmp; 445 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 446 447 /* create symbolic parallel matrix Cmpi */ 448 /*--------------------------------------*/ 449 ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr); 450 ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 451 ierr = MatSetBlockSizes(Cmpi,P->cmap->bs,P->cmap->bs);CHKERRQ(ierr); 452 ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr); 453 ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr); 454 ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr); 455 456 merge->bi = pti; /* Cseq->i */ 457 merge->bj = ptj; /* Cseq->j */ 458 merge->coi = coi; /* Co->i */ 459 merge->coj = coj; /* Co->j */ 460 merge->buf_ri = buf_ri; 461 merge->buf_rj = buf_rj; 462 merge->owners_co = owners_co; 463 merge->destroy = Cmpi->ops->destroy; 464 merge->duplicate = Cmpi->ops->duplicate; 465 466 /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */ 467 Cmpi->assembled = PETSC_FALSE; 468 Cmpi->ops->destroy = MatDestroy_MPIAIJ_PtAP; 469 Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP; 470 471 /* attach the supporting struct to Cmpi for reuse */ 472 c = (Mat_MPIAIJ*)Cmpi->data; 473 c->ptap = ptap; 474 ptap->api = api; 475 ptap->apj = apj; 476 ptap->rmax = ap_rmax; 477 *C = Cmpi; 478 479 /* flag 'scalable' determines which implementations to be used: 480 0: do dense axpy in MatPtAPNumeric() - fast, but requires storage of a nonscalable dense array apa; 481 1: do sparse axpy in MatPtAPNumeric() - might slow, uses a sparse array apa */ 482 /* set default scalable */ 483 ptap->scalable = PETSC_TRUE; 484 485 ierr = PetscOptionsGetBool(((PetscObject)Cmpi)->prefix,"-matptap_scalable",&ptap->scalable,NULL);CHKERRQ(ierr); 486 if (!ptap->scalable) { /* Do dense axpy */ 487 ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr); 488 } else { 489 ierr = PetscCalloc1(ap_rmax+1,&ptap->apa);CHKERRQ(ierr); 490 } 491 492 #if defined(PTAP_PROFILE) 493 ierr = PetscTime(&t4);CHKERRQ(ierr); 494 if (rank==1) PetscPrintf(MPI_COMM_SELF," [%d] PtAPSymbolic %g/P + %g/AP + %g/comm + %g/PtAP = %g\n",rank,t1-t0,t2-t1,t3-t2,t4-t3,t4-t0);CHKERRQ(ierr); 495 #endif 496 497 #if defined(PETSC_USE_INFO) 498 if (pti[pn] != 0) { 499 ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr); 500 ierr = PetscInfo1(Cmpi,"Use MatPtAP(A,P,MatReuse,%g,&C) for best performance.\n",(double)afill);CHKERRQ(ierr); 501 } else { 502 ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr); 503 } 504 #endif 505 PetscFunctionReturn(0); 506 } 507 508 #undef __FUNCT__ 509 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ_MPIAIJ" 510 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C) 511 { 512 PetscErrorCode ierr; 513 Mat_MPIAIJ *a =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data; 514 Mat_SeqAIJ *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data; 515 Mat_SeqAIJ *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data; 516 Mat_SeqAIJ *p_loc,*p_oth; 517 Mat_PtAPMPI *ptap; 518 Mat_Merge_SeqsToMPI *merge; 519 PetscInt *adi=ad->i,*aoi=ao->i,*adj,*aoj,*apJ,nextp; 520 PetscInt *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pJ,*pj; 521 PetscInt i,j,k,anz,pnz,apnz,nextap,row,*cj; 522 MatScalar *ada,*aoa,*apa,*pa,*ca,*pa_loc,*pa_oth,valtmp; 523 PetscInt am =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n; 524 MPI_Comm comm; 525 PetscMPIInt size,rank,taga,*len_s; 526 PetscInt *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci; 527 PetscInt **buf_ri,**buf_rj; 528 PetscInt cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */ 529 MPI_Request *s_waits,*r_waits; 530 MPI_Status *status; 531 MatScalar **abuf_r,*ba_i,*pA,*coa,*ba; 532 PetscInt *api,*apj,*coi,*coj; 533 PetscInt *poJ=po->j,*pdJ=pd->j,pcstart=P->cmap->rstart,pcend=P->cmap->rend; 534 PetscBool scalable; 535 #if defined(PTAP_PROFILE) 536 PetscLogDouble t0,t1,t2,t3,t4,et2_AP=0.0,et2_PtAP=0.0,t2_0,t2_1,t2_2; 537 #endif 538 539 PetscFunctionBegin; 540 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 541 #if defined(PTAP_PROFILE) 542 ierr = PetscTime(&t0);CHKERRQ(ierr); 543 #endif 544 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 545 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 546 547 ptap = c->ptap; 548 if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_INCOMP,"MatPtAP() has not been called to create matrix C yet, cannot use MAT_REUSE_MATRIX"); 549 merge = ptap->merge; 550 apa = ptap->apa; 551 scalable = ptap->scalable; 552 553 /* 1) get P_oth = ptap->P_oth and P_loc = ptap->P_loc */ 554 /*--------------------------------------------------*/ 555 if (ptap->reuse == MAT_INITIAL_MATRIX) { 556 /* P_oth and P_loc are obtained in MatPtASymbolic(), skip calling MatGetBrowsOfAoCols() and MatMPIAIJGetLocalMat() */ 557 ptap->reuse = MAT_REUSE_MATRIX; 558 } else { /* update numerical values of P_oth and P_loc */ 559 ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr); 560 ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr); 561 } 562 #if defined(PTAP_PROFILE) 563 ierr = PetscTime(&t1);CHKERRQ(ierr); 564 #endif 565 566 /* 2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */ 567 /*--------------------------------------------------------------*/ 568 /* get data from symbolic products */ 569 p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data; 570 p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data; 571 pi_loc=p_loc->i; pj_loc=p_loc->j; pJ=pj_loc; pa_loc=p_loc->a; 572 pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a; 573 574 coi = merge->coi; coj = merge->coj; 575 ierr = PetscCalloc1(coi[pon]+1,&coa);CHKERRQ(ierr); 576 577 bi = merge->bi; bj = merge->bj; 578 owners = merge->rowmap->range; 579 ierr = PetscCalloc1(bi[cm]+1,&ba);CHKERRQ(ierr); /* ba: Cseq->a */ 580 581 api = ptap->api; apj = ptap->apj; 582 583 if (!scalable) { /* Do dense axpy on apa (length of pN, stores A[i,:]*P) - nonscalable, but fast */ 584 ierr = PetscInfo(C,"Using non-scalable dense axpy\n");CHKERRQ(ierr); 585 /*-----------------------------------------------------------------------------------------------------*/ 586 for (i=0; i<am; i++) { 587 #if defined(PTAP_PROFILE) 588 ierr = PetscTime(&t2_0);CHKERRQ(ierr); 589 #endif 590 /* 2-a) form i-th sparse row of A_loc*P = Ad*P_loc + Ao*P_oth */ 591 /*------------------------------------------------------------*/ 592 apJ = apj + api[i]; 593 594 /* diagonal portion of A */ 595 anz = adi[i+1] - adi[i]; 596 adj = ad->j + adi[i]; 597 ada = ad->a + adi[i]; 598 for (j=0; j<anz; j++) { 599 row = adj[j]; 600 pnz = pi_loc[row+1] - pi_loc[row]; 601 pj = pj_loc + pi_loc[row]; 602 pa = pa_loc + pi_loc[row]; 603 604 /* perform dense axpy */ 605 valtmp = ada[j]; 606 for (k=0; k<pnz; k++) { 607 apa[pj[k]] += valtmp*pa[k]; 608 } 609 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 610 } 611 612 /* off-diagonal portion of A */ 613 anz = aoi[i+1] - aoi[i]; 614 aoj = ao->j + aoi[i]; 615 aoa = ao->a + aoi[i]; 616 for (j=0; j<anz; j++) { 617 row = aoj[j]; 618 pnz = pi_oth[row+1] - pi_oth[row]; 619 pj = pj_oth + pi_oth[row]; 620 pa = pa_oth + pi_oth[row]; 621 622 /* perform dense axpy */ 623 valtmp = aoa[j]; 624 for (k=0; k<pnz; k++) { 625 apa[pj[k]] += valtmp*pa[k]; 626 } 627 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 628 } 629 #if defined(PTAP_PROFILE) 630 ierr = PetscTime(&t2_1);CHKERRQ(ierr); 631 et2_AP += t2_1 - t2_0; 632 #endif 633 634 /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */ 635 /*--------------------------------------------------------------*/ 636 apnz = api[i+1] - api[i]; 637 /* put the value into Co=(p->B)^T*AP (off-diagonal part, send to others) */ 638 pnz = po->i[i+1] - po->i[i]; 639 poJ = po->j + po->i[i]; 640 pA = po->a + po->i[i]; 641 for (j=0; j<pnz; j++) { 642 row = poJ[j]; 643 cnz = coi[row+1] - coi[row]; 644 cj = coj + coi[row]; 645 ca = coa + coi[row]; 646 /* perform dense axpy */ 647 valtmp = pA[j]; 648 for (k=0; k<cnz; k++) { 649 ca[k] += valtmp*apa[cj[k]]; 650 } 651 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 652 } 653 654 /* put the value into Cd (diagonal part) */ 655 pnz = pd->i[i+1] - pd->i[i]; 656 pdJ = pd->j + pd->i[i]; 657 pA = pd->a + pd->i[i]; 658 for (j=0; j<pnz; j++) { 659 row = pdJ[j]; 660 cnz = bi[row+1] - bi[row]; 661 cj = bj + bi[row]; 662 ca = ba + bi[row]; 663 /* perform dense axpy */ 664 valtmp = pA[j]; 665 for (k=0; k<cnz; k++) { 666 ca[k] += valtmp*apa[cj[k]]; 667 } 668 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 669 } 670 671 /* zero the current row of A*P */ 672 for (k=0; k<apnz; k++) apa[apJ[k]] = 0.0; 673 #if defined(PTAP_PROFILE) 674 ierr = PetscTime(&t2_2);CHKERRQ(ierr); 675 et2_PtAP += t2_2 - t2_1; 676 #endif 677 } 678 } else { /* Do sparse axpy on apa (length of ap_rmax, stores A[i,:]*P) - scalable, but slower */ 679 ierr = PetscInfo(C,"Using scalable sparse axpy\n");CHKERRQ(ierr); 680 /*-----------------------------------------------------------------------------------------*/ 681 pA=pa_loc; 682 for (i=0; i<am; i++) { 683 #if defined(PTAP_PROFILE) 684 ierr = PetscTime(&t2_0);CHKERRQ(ierr); 685 #endif 686 /* form i-th sparse row of A*P */ 687 apnz = api[i+1] - api[i]; 688 apJ = apj + api[i]; 689 /* diagonal portion of A */ 690 anz = adi[i+1] - adi[i]; 691 adj = ad->j + adi[i]; 692 ada = ad->a + adi[i]; 693 for (j=0; j<anz; j++) { 694 row = adj[j]; 695 pnz = pi_loc[row+1] - pi_loc[row]; 696 pj = pj_loc + pi_loc[row]; 697 pa = pa_loc + pi_loc[row]; 698 valtmp = ada[j]; 699 nextp = 0; 700 for (k=0; nextp<pnz; k++) { 701 if (apJ[k] == pj[nextp]) { /* col of AP == col of P */ 702 apa[k] += valtmp*pa[nextp++]; 703 } 704 } 705 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 706 } 707 /* off-diagonal portion of A */ 708 anz = aoi[i+1] - aoi[i]; 709 aoj = ao->j + aoi[i]; 710 aoa = ao->a + aoi[i]; 711 for (j=0; j<anz; j++) { 712 row = aoj[j]; 713 pnz = pi_oth[row+1] - pi_oth[row]; 714 pj = pj_oth + pi_oth[row]; 715 pa = pa_oth + pi_oth[row]; 716 valtmp = aoa[j]; 717 nextp = 0; 718 for (k=0; nextp<pnz; k++) { 719 if (apJ[k] == pj[nextp]) { /* col of AP == col of P */ 720 apa[k] += valtmp*pa[nextp++]; 721 } 722 } 723 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 724 } 725 #if defined(PTAP_PROFILE) 726 ierr = PetscTime(&t2_1);CHKERRQ(ierr); 727 et2_AP += t2_1 - t2_0; 728 #endif 729 730 /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */ 731 /*--------------------------------------------------------------*/ 732 pnz = pi_loc[i+1] - pi_loc[i]; 733 pJ = pj_loc + pi_loc[i]; 734 for (j=0; j<pnz; j++) { 735 nextap = 0; 736 row = pJ[j]; /* global index */ 737 if (row < pcstart || row >=pcend) { /* put the value into Co */ 738 row = *poJ; 739 cj = coj + coi[row]; 740 ca = coa + coi[row]; poJ++; 741 } else { /* put the value into Cd */ 742 row = *pdJ; 743 cj = bj + bi[row]; 744 ca = ba + bi[row]; pdJ++; 745 } 746 valtmp = pA[j]; 747 for (k=0; nextap<apnz; k++) { 748 if (cj[k]==apJ[nextap]) ca[k] += valtmp*apa[nextap++]; 749 } 750 ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr); 751 } 752 pA += pnz; 753 /* zero the current row info for A*P */ 754 ierr = PetscMemzero(apa,apnz*sizeof(MatScalar));CHKERRQ(ierr); 755 #if defined(PTAP_PROFILE) 756 ierr = PetscTime(&t2_2);CHKERRQ(ierr); 757 et2_PtAP += t2_2 - t2_1; 758 #endif 759 } 760 } 761 #if defined(PTAP_PROFILE) 762 ierr = PetscTime(&t2);CHKERRQ(ierr); 763 #endif 764 765 /* 3) send and recv matrix values coa */ 766 /*------------------------------------*/ 767 buf_ri = merge->buf_ri; 768 buf_rj = merge->buf_rj; 769 len_s = merge->len_s; 770 ierr = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr); 771 ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr); 772 773 ierr = PetscMalloc2(merge->nsend+1,&s_waits,size,&status);CHKERRQ(ierr); 774 for (proc=0,k=0; proc<size; proc++) { 775 if (!len_s[proc]) continue; 776 i = merge->owners_co[proc]; 777 ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr); 778 k++; 779 } 780 if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);} 781 if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);} 782 783 ierr = PetscFree2(s_waits,status);CHKERRQ(ierr); 784 ierr = PetscFree(r_waits);CHKERRQ(ierr); 785 ierr = PetscFree(coa);CHKERRQ(ierr); 786 #if defined(PTAP_PROFILE) 787 ierr = PetscTime(&t3);CHKERRQ(ierr); 788 #endif 789 790 /* 4) insert local Cseq and received values into Cmpi */ 791 /*------------------------------------------------------*/ 792 ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr); 793 for (k=0; k<merge->nrecv; k++) { 794 buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */ 795 nrows = *(buf_ri_k[k]); 796 nextrow[k] = buf_ri_k[k]+1; /* next row number of k-th recved i-structure */ 797 nextci[k] = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure */ 798 } 799 800 for (i=0; i<cm; i++) { 801 row = owners[rank] + i; /* global row index of C_seq */ 802 bj_i = bj + bi[i]; /* col indices of the i-th row of C */ 803 ba_i = ba + bi[i]; 804 bnz = bi[i+1] - bi[i]; 805 /* add received vals into ba */ 806 for (k=0; k<merge->nrecv; k++) { /* k-th received message */ 807 /* i-th row */ 808 if (i == *nextrow[k]) { 809 cnz = *(nextci[k]+1) - *nextci[k]; 810 cj = buf_rj[k] + *(nextci[k]); 811 ca = abuf_r[k] + *(nextci[k]); 812 nextcj = 0; 813 for (j=0; nextcj<cnz; j++) { 814 if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */ 815 ba_i[j] += ca[nextcj++]; 816 } 817 } 818 nextrow[k]++; nextci[k]++; 819 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 820 } 821 } 822 ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr); 823 } 824 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 825 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 826 827 ierr = PetscFree(ba);CHKERRQ(ierr); 828 ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr); 829 ierr = PetscFree(abuf_r);CHKERRQ(ierr); 830 ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr); 831 #if defined(PTAP_PROFILE) 832 ierr = PetscTime(&t4);CHKERRQ(ierr); 833 if (rank==1) PetscPrintf(MPI_COMM_SELF," [%d] PtAPNum %g/P + %g/PtAP( %g + %g ) + %g/comm + %g/Cloc = %g\n\n",rank,t1-t0,t2-t1,et2_AP,et2_PtAP,t3-t2,t4-t3,t4-t0);CHKERRQ(ierr); 834 #endif 835 PetscFunctionReturn(0); 836 } 837