1 2 /* 3 Defines projective product routines where A is a SeqAIJ matrix 4 C = P^T * A * P 5 */ 6 7 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ 8 #include <../src/mat/utils/freespace.h> 9 #include <petscbt.h> 10 11 #undef __FUNCT__ 12 #define __FUNCT__ "MatPtAPSymbolic_SeqAIJ" 13 PetscErrorCode MatPtAPSymbolic_SeqAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 14 { 15 PetscErrorCode ierr; 16 17 PetscFunctionBegin; 18 if (!P->ops->ptapsymbolic_seqaij) SETERRQ2(((PetscObject)A)->comm,PETSC_ERR_SUP,"Not implemented for A=%s and P=%s",((PetscObject)A)->type_name,((PetscObject)P)->type_name); 19 ierr = (*P->ops->ptapsymbolic_seqaij)(A,P,fill,C);CHKERRQ(ierr); 20 PetscFunctionReturn(0); 21 } 22 23 #undef __FUNCT__ 24 #define __FUNCT__ "MatPtAPNumeric_SeqAIJ" 25 PetscErrorCode MatPtAPNumeric_SeqAIJ(Mat A,Mat P,Mat C) 26 { 27 PetscErrorCode ierr; 28 29 PetscFunctionBegin; 30 if (!P->ops->ptapnumeric_seqaij) SETERRQ2(((PetscObject)A)->comm,PETSC_ERR_SUP,"Not implemented for A=%s and P=%s",((PetscObject)A)->type_name,((PetscObject)P)->type_name); 31 ierr = (*P->ops->ptapnumeric_seqaij)(A,P,C);CHKERRQ(ierr); 32 PetscFunctionReturn(0); 33 } 34 35 #undef __FUNCT__ 36 #define __FUNCT__ "MatDestroy_SeqAIJ_PtAP" 37 PetscErrorCode MatDestroy_SeqAIJ_PtAP(Mat A) 38 { 39 PetscErrorCode ierr; 40 Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; 41 Mat_PtAP *ptap = a->ptap; 42 43 PetscFunctionBegin; 44 /* free ptap, then A */ 45 ierr = PetscFree(ptap->apa);CHKERRQ(ierr); 46 ierr = PetscFree(ptap->api);CHKERRQ(ierr); 47 ierr = PetscFree(ptap->apj);CHKERRQ(ierr); 48 ierr = (ptap->destroy)(A);CHKERRQ(ierr); 49 ierr = PetscFree(ptap);CHKERRQ(ierr); 50 PetscFunctionReturn(0); 51 } 52 53 #undef __FUNCT__ 54 #define __FUNCT__ "MatPtAPSymbolic_SeqAIJ_SeqAIJ_SparseAxpy2" 55 PetscErrorCode MatPtAPSymbolic_SeqAIJ_SeqAIJ_SparseAxpy2(Mat A,Mat P,PetscReal fill,Mat *C) 56 { 57 PetscErrorCode ierr; 58 PetscFreeSpaceList free_space=PETSC_NULL,current_space=PETSC_NULL; 59 Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*p = (Mat_SeqAIJ*)P->data,*c; 60 PetscInt *pti,*ptj,*ptJ,*ai=a->i,*aj=a->j,*ajj,*pi=p->i,*pj=p->j,*pjj; 61 PetscInt *ci,*cj,*ptadenserow,*ptasparserow,*ptaj,nspacedouble=0; 62 PetscInt an=A->cmap->N,am=A->rmap->N,pn=P->cmap->N; 63 PetscInt i,j,k,ptnzi,arow,anzj,ptanzi,prow,pnzj,cnzi,nlnk,*lnk; 64 MatScalar *ca; 65 PetscBT lnkbt; 66 67 PetscFunctionBegin; 68 /* Get ij structure of P^T */ 69 ierr = MatGetSymbolicTranspose_SeqAIJ(P,&pti,&ptj);CHKERRQ(ierr); 70 ptJ=ptj; 71 72 /* Allocate ci array, arrays for fill computation and */ 73 /* free space for accumulating nonzero column info */ 74 ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 75 ci[0] = 0; 76 77 ierr = PetscMalloc((2*an+1)*sizeof(PetscInt),&ptadenserow);CHKERRQ(ierr); 78 ierr = PetscMemzero(ptadenserow,(2*an+1)*sizeof(PetscInt));CHKERRQ(ierr); 79 ptasparserow = ptadenserow + an; 80 81 /* create and initialize a linked list */ 82 nlnk = pn+1; 83 ierr = PetscLLCreate(pn,pn,nlnk,lnk,lnkbt);CHKERRQ(ierr); 84 85 /* Set initial free space to be fill*nnz(A). */ 86 /* This should be reasonable if sparsity of PtAP is similar to that of A. */ 87 ierr = PetscFreeSpaceGet((PetscInt)(fill*ai[am]),&free_space); 88 current_space = free_space; 89 90 /* Determine symbolic info for each row of C: */ 91 for (i=0;i<pn;i++) { 92 ptnzi = pti[i+1] - pti[i]; 93 ptanzi = 0; 94 /* Determine symbolic row of PtA: */ 95 for (j=0;j<ptnzi;j++) { 96 arow = *ptJ++; 97 anzj = ai[arow+1] - ai[arow]; 98 ajj = aj + ai[arow]; 99 for (k=0;k<anzj;k++) { 100 if (!ptadenserow[ajj[k]]) { 101 ptadenserow[ajj[k]] = -1; 102 ptasparserow[ptanzi++] = ajj[k]; 103 } 104 } 105 } 106 /* Using symbolic info for row of PtA, determine symbolic info for row of C: */ 107 ptaj = ptasparserow; 108 cnzi = 0; 109 for (j=0;j<ptanzi;j++) { 110 prow = *ptaj++; 111 pnzj = pi[prow+1] - pi[prow]; 112 pjj = pj + pi[prow]; 113 /* add non-zero cols of P into the sorted linked list lnk */ 114 ierr = PetscLLAddSorted(pnzj,pjj,pn,nlnk,lnk,lnkbt);CHKERRQ(ierr); 115 cnzi += nlnk; 116 } 117 118 /* If free space is not available, make more free space */ 119 /* Double the amount of total space in the list */ 120 if (current_space->local_remaining<cnzi) { 121 ierr = PetscFreeSpaceGet(cnzi+current_space->total_array_size,¤t_space);CHKERRQ(ierr); 122 nspacedouble++; 123 } 124 125 /* Copy data into free space, and zero out denserows */ 126 ierr = PetscLLClean(pn,pn,cnzi,lnk,current_space->array,lnkbt);CHKERRQ(ierr); 127 current_space->array += cnzi; 128 current_space->local_used += cnzi; 129 current_space->local_remaining -= cnzi; 130 131 for (j=0;j<ptanzi;j++) { 132 ptadenserow[ptasparserow[j]] = 0; 133 } 134 /* Aside: Perhaps we should save the pta info for the numerical factorization. */ 135 /* For now, we will recompute what is needed. */ 136 ci[i+1] = ci[i] + cnzi; 137 } 138 /* nnz is now stored in ci[ptm], column indices are in the list of free space */ 139 /* Allocate space for cj, initialize cj, and */ 140 /* destroy list of free space and other temporary array(s) */ 141 ierr = PetscMalloc((ci[pn]+1)*sizeof(PetscInt),&cj);CHKERRQ(ierr); 142 ierr = PetscFreeSpaceContiguous(&free_space,cj);CHKERRQ(ierr); 143 ierr = PetscFree(ptadenserow);CHKERRQ(ierr); 144 ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr); 145 146 /* Allocate space for ca */ 147 ierr = PetscMalloc((ci[pn]+1)*sizeof(MatScalar),&ca);CHKERRQ(ierr); 148 ierr = PetscMemzero(ca,(ci[pn]+1)*sizeof(MatScalar));CHKERRQ(ierr); 149 150 /* put together the new matrix */ 151 ierr = MatCreateSeqAIJWithArrays(((PetscObject)A)->comm,pn,pn,ci,cj,ca,C);CHKERRQ(ierr); 152 (*C)->rmap->bs = P->cmap->bs; 153 (*C)->cmap->bs = P->cmap->bs; 154 PetscPrintf(PETSC_COMM_SELF,"************%s C.bs=%d,%d\n",__FUNCT__,(*C)->rmap->bs,(*C)->cmap->bs); 155 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 156 /* Since these are PETSc arrays, change flags to free them as necessary. */ 157 c = (Mat_SeqAIJ *)((*C)->data); 158 c->free_a = PETSC_TRUE; 159 c->free_ij = PETSC_TRUE; 160 c->nonew = 0; 161 A->ops->ptapnumeric = MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy2; /* should use *C->ops until PtAP insterface is updated to double dispatch as MatMatMult() */ 162 163 /* Clean up. */ 164 ierr = MatRestoreSymbolicTranspose_SeqAIJ(P,&pti,&ptj);CHKERRQ(ierr); 165 #if defined(PETSC_USE_INFO) 166 if (ci[pn] != 0) { 167 PetscReal afill = ((PetscReal)ci[pn])/ai[am]; 168 if (afill < 1.0) afill = 1.0; 169 ierr = PetscInfo3((*C),"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr); 170 ierr = PetscInfo1((*C),"Use MatPtAP(A,P,MatReuse,%G,&C) for best performance.\n",afill);CHKERRQ(ierr); 171 } else { 172 ierr = PetscInfo((*C),"Empty matrix product\n");CHKERRQ(ierr); 173 } 174 #endif 175 PetscFunctionReturn(0); 176 } 177 178 #undef __FUNCT__ 179 #define __FUNCT__ "MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy2" 180 PetscErrorCode MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy2(Mat A,Mat P,Mat C) 181 { 182 PetscErrorCode ierr; 183 Mat_SeqAIJ *a = (Mat_SeqAIJ *) A->data; 184 Mat_SeqAIJ *p = (Mat_SeqAIJ *) P->data; 185 Mat_SeqAIJ *c = (Mat_SeqAIJ *) C->data; 186 PetscInt *ai=a->i,*aj=a->j,*apj,*apjdense,*pi=p->i,*pj=p->j,*pJ=p->j,*pjj; 187 PetscInt *ci=c->i,*cj=c->j,*cjj; 188 PetscInt am=A->rmap->N,cn=C->cmap->N,cm=C->rmap->N; 189 PetscInt i,j,k,anzi,pnzi,apnzj,nextap,pnzj,prow,crow; 190 MatScalar *aa=a->a,*apa,*pa=p->a,*pA=p->a,*paj,*ca=c->a,*caj; 191 192 PetscFunctionBegin; 193 /* Allocate temporary array for storage of one row of A*P */ 194 ierr = PetscMalloc(cn*(sizeof(MatScalar)+sizeof(PetscInt))+c->rmax*sizeof(PetscInt),&apa);CHKERRQ(ierr); 195 196 apjdense = (PetscInt *)(apa + cn); 197 apj = apjdense + cn; 198 ierr = PetscMemzero(apa,cn*(sizeof(MatScalar)+sizeof(PetscInt)));CHKERRQ(ierr); 199 200 /* Clear old values in C */ 201 ierr = PetscMemzero(ca,ci[cm]*sizeof(MatScalar));CHKERRQ(ierr); 202 203 for (i=0; i<am; i++) { 204 /* Form sparse row of A*P */ 205 anzi = ai[i+1] - ai[i]; 206 apnzj = 0; 207 for (j=0; j<anzi; j++) { 208 prow = *aj++; 209 pnzj = pi[prow+1] - pi[prow]; 210 pjj = pj + pi[prow]; 211 paj = pa + pi[prow]; 212 for (k=0;k<pnzj;k++) { 213 if (!apjdense[pjj[k]]) { 214 apjdense[pjj[k]] = -1; 215 apj[apnzj++] = pjj[k]; 216 } 217 apa[pjj[k]] += (*aa)*paj[k]; 218 } 219 ierr = PetscLogFlops(2.0*pnzj);CHKERRQ(ierr); 220 aa++; 221 } 222 223 /* Sort the j index array for quick sparse axpy. */ 224 /* Note: a array does not need sorting as it is in dense storage locations. */ 225 ierr = PetscSortInt(apnzj,apj);CHKERRQ(ierr); 226 227 /* Compute P^T*A*P using outer product (P^T)[:,j]*(A*P)[j,:]. */ 228 pnzi = pi[i+1] - pi[i]; 229 for (j=0; j<pnzi; j++) { 230 nextap = 0; 231 crow = *pJ++; 232 cjj = cj + ci[crow]; 233 caj = ca + ci[crow]; 234 /* Perform sparse axpy operation. Note cjj includes apj. */ 235 for (k=0;nextap<apnzj;k++) { 236 #if defined(PETSC_USE_DEBUG) 237 if (k >= ci[crow+1] - ci[crow]) { 238 SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"k too large k %d, crow %d",k,crow); 239 } 240 #endif 241 if (cjj[k]==apj[nextap]) { 242 caj[k] += (*pA)*apa[apj[nextap++]]; 243 } 244 } 245 ierr = PetscLogFlops(2.0*apnzj);CHKERRQ(ierr); 246 pA++; 247 } 248 249 /* Zero the current row info for A*P */ 250 for (j=0; j<apnzj; j++) { 251 apa[apj[j]] = 0.; 252 apjdense[apj[j]] = 0; 253 } 254 } 255 256 /* Assemble the final matrix and clean up */ 257 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 258 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 259 PetscPrintf(PETSC_COMM_SELF,"************%s C.bs=%d,%d\n",__FUNCT__,C->rmap->bs,C->cmap->bs); 260 ierr = PetscFree(apa);CHKERRQ(ierr); 261 PetscFunctionReturn(0); 262 } 263 264 #undef __FUNCT__ 265 #define __FUNCT__ "MatPtAPSymbolic_SeqAIJ_SeqAIJ" 266 PetscErrorCode MatPtAPSymbolic_SeqAIJ_SeqAIJ(Mat A,Mat P,PetscReal fill,Mat *C) 267 { 268 PetscErrorCode ierr; 269 Mat_SeqAIJ *ap,*c; 270 PetscInt *api,*apj,*ci,pn=P->cmap->N,sparse_axpy=0; 271 MatScalar *ca; 272 Mat_PtAP *ptap; 273 Mat Pt,AP; 274 275 PetscFunctionBegin; 276 /* flag 'sparse_axpy' determines which implementations to be used: 277 0: do dense axpy in MatPtAPNumeric() - fastest, but requires storage of struct A*P; (default) 278 1: do one sparse axpy - uses same memory as sparse_axpy=0 and might execute less flops 279 (apnz vs. cnz in the outerproduct), slower than case '0' when cnz is not too large than apnz; 280 2: do two sparse axpy in MatPtAPNumeric() - slowest, does not store structure of A*P. */ 281 ierr = PetscOptionsGetInt(PETSC_NULL,"-matptap_sparseaxpy",&sparse_axpy,PETSC_NULL);CHKERRQ(ierr); 282 if (sparse_axpy == 2){ 283 ierr = MatPtAPSymbolic_SeqAIJ_SeqAIJ_SparseAxpy2(A,P,fill,C);CHKERRQ(ierr); 284 285 PetscFunctionReturn(0); 286 } 287 288 /* Get symbolic Pt = P^T */ 289 ierr = MatTransposeSymbolic_SeqAIJ(P,&Pt);CHKERRQ(ierr); 290 291 /* Get symbolic AP = A*P */ 292 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(A,P,fill,&AP);CHKERRQ(ierr); 293 294 ap = (Mat_SeqAIJ*)AP->data; 295 api = ap->i; 296 apj = ap->j; 297 ap->free_ij = PETSC_FALSE; /* api and apj are kept in struct ptap, cannot be destroyed with AP */ 298 299 /* Get C = Pt*AP */ 300 ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(Pt,AP,fill,C);CHKERRQ(ierr); 301 302 c = (Mat_SeqAIJ*)(*C)->data; 303 ci = c->i; 304 ierr = PetscMalloc((ci[pn]+1)*sizeof(MatScalar),&ca);CHKERRQ(ierr); 305 ierr = PetscMemzero(ca,(ci[pn]+1)*sizeof(MatScalar));CHKERRQ(ierr); 306 c->a = ca; 307 c->free_a = PETSC_TRUE; 308 309 /* Create a supporting struct for reuse by MatPtAPNumeric() */ 310 ierr = PetscNew(Mat_PtAP,&ptap);CHKERRQ(ierr); 311 c->ptap = ptap; 312 ptap->destroy = (*C)->ops->destroy; 313 (*C)->ops->destroy = MatDestroy_SeqAIJ_PtAP; 314 315 /* Allocate temporary array for storage of one row of A*P */ 316 ierr = PetscMalloc((pn+1)*sizeof(PetscScalar),&ptap->apa);CHKERRQ(ierr); 317 ierr = PetscMemzero(ptap->apa,(pn+1)*sizeof(PetscScalar));CHKERRQ(ierr); 318 if (sparse_axpy == 1){ 319 A->ops->ptapnumeric = MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy; 320 } else { 321 A->ops->ptapnumeric = MatPtAPNumeric_SeqAIJ_SeqAIJ; 322 } 323 ptap->api = api; 324 ptap->apj = apj; 325 326 /* Clean up. */ 327 ierr = MatDestroy(&Pt);CHKERRQ(ierr); 328 ierr = MatDestroy(&AP);CHKERRQ(ierr); 329 PetscFunctionReturn(0); 330 } 331 332 /* #define PROFILE_MatPtAPNumeric */ 333 #undef __FUNCT__ 334 #define __FUNCT__ "MatPtAPNumeric_SeqAIJ_SeqAIJ" 335 PetscErrorCode MatPtAPNumeric_SeqAIJ_SeqAIJ(Mat A,Mat P,Mat C) 336 { 337 PetscErrorCode ierr; 338 Mat_SeqAIJ *a = (Mat_SeqAIJ *) A->data; 339 Mat_SeqAIJ *p = (Mat_SeqAIJ *) P->data; 340 Mat_SeqAIJ *c = (Mat_SeqAIJ *) C->data; 341 const PetscInt *ai=a->i,*aj=a->j,*pi=p->i,*pj=p->j,*ci=c->i,*cj=c->j; 342 const PetscScalar *aa=a->a,*pa=p->a,*pval; 343 const PetscInt *apj,*pcol,*cjj; 344 const PetscInt am=A->rmap->N,cm=C->rmap->N; 345 PetscInt i,j,k,anz,apnz,pnz,prow,crow,cnz; 346 PetscScalar *apa,*ca=c->a,*caj,pvalj; 347 Mat_PtAP *ptap = c->ptap; 348 #if defined(PROFILE_MatPtAPNumeric) 349 PetscLogDouble t0,tf,time_Cseq0=0.0,time_Cseq1=0.0; 350 PetscInt flops0=0,flops1=0; 351 #endif 352 353 PetscFunctionBegin; 354 /* Get temporary array for storage of one row of A*P */ 355 apa = ptap->apa; 356 357 /* Clear old values in C */ 358 ierr = PetscMemzero(ca,ci[cm]*sizeof(MatScalar));CHKERRQ(ierr); 359 360 for (i=0;i<am;i++) { 361 /* Form sparse row of AP[i,:] = A[i,:]*P */ 362 #if defined(PROFILE_MatPtAPNumeric) 363 ierr = PetscGetTime(&t0);CHKERRQ(ierr); 364 #endif 365 anz = ai[i+1] - ai[i]; 366 apnz = 0; 367 for (j=0; j<anz; j++) { 368 prow = aj[j]; 369 pnz = pi[prow+1] - pi[prow]; 370 pcol = pj + pi[prow]; 371 pval = pa + pi[prow]; 372 for (k=0; k<pnz; k++) { 373 apa[pcol[k]] += aa[j]*pval[k]; 374 } 375 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 376 #if defined(PROFILE_MatPtAPNumeric) 377 flops0 += 2.0*pnz; 378 #endif 379 } 380 aj += anz; aa += anz; 381 #if defined(PROFILE_MatPtAPNumeric) 382 ierr = PetscGetTime(&tf);CHKERRQ(ierr); 383 time_Cseq0 += tf - t0; 384 #endif 385 386 /* Compute P^T*A*P using outer product P[i,:]^T*AP[i,:]. */ 387 #if defined(PROFILE_MatPtAPNumeric) 388 ierr = PetscGetTime(&t0);CHKERRQ(ierr); 389 #endif 390 apj = ptap->apj + ptap->api[i]; 391 apnz = ptap->api[i+1] - ptap->api[i]; 392 pnz = pi[i+1] - pi[i]; 393 pcol = pj + pi[i]; 394 pval = pa + pi[i]; 395 396 /* Perform dense axpy */ 397 for (j=0; j<pnz; j++) { 398 crow = pcol[j]; 399 cjj = cj + ci[crow]; 400 caj = ca + ci[crow]; 401 pvalj = pval[j]; 402 cnz = ci[crow+1] - ci[crow]; 403 for (k=0; k<cnz; k++){ 404 caj[k] += pvalj*apa[cjj[k]]; 405 } 406 ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr); 407 #if defined(PROFILE_MatPtAPNumeric) 408 flops1 += 2.0*cnz; 409 #endif 410 } 411 #if defined(PROFILE_MatPtAPNumeric) 412 ierr = PetscGetTime(&tf);CHKERRQ(ierr); 413 time_Cseq1 += tf - t0; 414 #endif 415 416 /* Zero the current row info for A*P */ 417 for (j=0; j<apnz; j++) apa[apj[j]] = 0.0; 418 } 419 420 /* Assemble the final matrix and clean up */ 421 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 422 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 423 #if defined(PROFILE_MatPtAPNumeric) 424 printf("PtAPNumeric_SeqAIJ time %g + %g, flops %d %d\n",time_Cseq0,time_Cseq1,flops0,flops1); 425 #endif 426 PetscFunctionReturn(0); 427 } 428 429 #undef __FUNCT__ 430 #define __FUNCT__ "MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy" 431 PetscErrorCode MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy(Mat A,Mat P,Mat C) 432 { 433 PetscErrorCode ierr; 434 Mat_SeqAIJ *a = (Mat_SeqAIJ *) A->data; 435 Mat_SeqAIJ *p = (Mat_SeqAIJ *) P->data; 436 Mat_SeqAIJ *c = (Mat_SeqAIJ *) C->data; 437 const PetscInt *ai=a->i,*aj=a->j,*pi=p->i,*pj=p->j,*ci=c->i,*cj=c->j; 438 const PetscScalar *aa=a->a,*pa=p->a,*pval; 439 const PetscInt *apj,*pcol,*cjj; 440 const PetscInt am=A->rmap->N,cm=C->rmap->N; 441 PetscInt i,j,k,anz,apnz,pnz,prow,crow,apcol,nextap; 442 PetscScalar *apa,*ca=c->a,*caj,pvalj; 443 Mat_PtAP *ptap = c->ptap; 444 #if defined(PROFILE_MatPtAPNumeric) 445 PetscLogDouble t0,tf,time_Cseq0=0.0,time_Cseq1=0.0; 446 PetscInt flops0=0,flops1=0; 447 #endif 448 449 PetscFunctionBegin; 450 /* Get temporary array for storage of one row of A*P */ 451 apa = ptap->apa; 452 453 /* Clear old values in C */ 454 ierr = PetscMemzero(ca,ci[cm]*sizeof(MatScalar));CHKERRQ(ierr); 455 456 for (i=0;i<am;i++) { 457 /* Form sparse row of AP[i,:] = A[i,:]*P */ 458 #if defined(PROFILE_MatPtAPNumeric) 459 ierr = PetscGetTime(&t0);CHKERRQ(ierr); 460 #endif 461 anz = ai[i+1] - ai[i]; 462 apnz = 0; 463 for (j=0; j<anz; j++) { 464 prow = aj[j]; 465 pnz = pi[prow+1] - pi[prow]; 466 pcol = pj + pi[prow]; 467 pval = pa + pi[prow]; 468 for (k=0; k<pnz; k++) { 469 apa[pcol[k]] += aa[j]*pval[k]; 470 } 471 ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr); 472 #if defined(PROFILE_MatPtAPNumeric) 473 flops0 += 2.0*pnz; 474 #endif 475 } 476 aj += anz; aa += anz; 477 #if defined(PROFILE_MatPtAPNumeric) 478 ierr = PetscGetTime(&tf);CHKERRQ(ierr); 479 time_Cseq0 += tf - t0; 480 #endif 481 482 /* Compute P^T*A*P using outer product P[i,:]^T*AP[i,:]. */ 483 #if defined(PROFILE_MatPtAPNumeric) 484 ierr = PetscGetTime(&t0);CHKERRQ(ierr); 485 #endif 486 apj = ptap->apj + ptap->api[i]; 487 apnz = ptap->api[i+1] - ptap->api[i]; 488 pnz = pi[i+1] - pi[i]; 489 pcol = pj + pi[i]; 490 pval = pa + pi[i]; 491 492 /* Perform sparse axpy */ 493 for (j=0; j<pnz; j++) { 494 crow = pcol[j]; 495 cjj = cj + ci[crow]; 496 caj = ca + ci[crow]; 497 pvalj = pval[j]; 498 nextap = 1; 499 apcol = apj[nextap]; 500 for (k=0; nextap<apnz; k++) { 501 if (cjj[k] == apcol) { 502 caj[k] += pvalj*apa[apcol]; 503 apcol = apj[nextap++]; 504 } 505 } 506 ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr); 507 #if defined(PROFILE_MatPtAPNumeric) 508 flops1 += 2.0*apnz; 509 #endif 510 } 511 #if defined(PROFILE_MatPtAPNumeric) 512 ierr = PetscGetTime(&tf);CHKERRQ(ierr); 513 time_Cseq1 += tf - t0; 514 #endif 515 516 /* Zero the current row info for A*P */ 517 for (j=0; j<apnz; j++) { 518 apcol = apj[j]; 519 apa[apcol] = 0.; 520 } 521 } 522 523 /* Assemble the final matrix and clean up */ 524 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 525 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 526 #if defined(PROFILE_MatPtAPNumeric) 527 printf("MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy time %g + %g, flops %d %d\n",time_Cseq0,time_Cseq1,flops0,flops1); 528 #endif 529 PetscFunctionReturn(0); 530 } 531