xref: /petsc/src/mat/impls/aij/seq/matptap.c (revision 5edf65166e22df6feda2ed6bd8a6e348419dee17)
1 
2 /*
3   Defines projective product routines where A is a SeqAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
8 #include <../src/mat/utils/freespace.h>
9 #include <petscbt.h>
10 
11 #undef __FUNCT__
12 #define __FUNCT__ "MatPtAP_SeqAIJ_SeqAIJ"
13 PetscErrorCode MatPtAP_SeqAIJ_SeqAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
14 {
15   PetscErrorCode ierr;
16 
17   PetscFunctionBegin;
18   if (scall == MAT_INITIAL_MATRIX){
19     ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
20     ierr = MatPtAPSymbolic_SeqAIJ_SeqAIJ(A,P,fill,C);CHKERRQ(ierr);
21     ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
22   }
23   ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
24   ierr = MatPtAPNumeric_SeqAIJ_SeqAIJ(A,P,*C);CHKERRQ(ierr);
25   ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
26   PetscFunctionReturn(0);
27 }
28 
29 #undef __FUNCT__
30 #define __FUNCT__ "MatDestroy_SeqAIJ_PtAP"
31 PetscErrorCode MatDestroy_SeqAIJ_PtAP(Mat A)
32 {
33   PetscErrorCode ierr;
34   Mat_SeqAIJ     *a = (Mat_SeqAIJ *)A->data;
35   Mat_PtAP       *ptap = a->ptap;
36 
37   PetscFunctionBegin;
38   /* free ptap, then A */
39   ierr = PetscFree(ptap->apa);CHKERRQ(ierr);
40   ierr = PetscFree(ptap->api);CHKERRQ(ierr);
41   ierr = PetscFree(ptap->apj);CHKERRQ(ierr);
42   ierr = (ptap->destroy)(A);CHKERRQ(ierr);
43   ierr = PetscFree(ptap);CHKERRQ(ierr);
44   PetscFunctionReturn(0);
45 }
46 
47 #undef __FUNCT__
48 #define __FUNCT__ "MatPtAPSymbolic_SeqAIJ_SeqAIJ_SparseAxpy2"
49 PetscErrorCode MatPtAPSymbolic_SeqAIJ_SeqAIJ_SparseAxpy2(Mat A,Mat P,PetscReal fill,Mat *C)
50 {
51   PetscErrorCode     ierr;
52   PetscFreeSpaceList free_space=PETSC_NULL,current_space=PETSC_NULL;
53   Mat_SeqAIJ         *a = (Mat_SeqAIJ*)A->data,*p = (Mat_SeqAIJ*)P->data,*c;
54   PetscInt           *pti,*ptj,*ptJ,*ai=a->i,*aj=a->j,*ajj,*pi=p->i,*pj=p->j,*pjj;
55   PetscInt           *ci,*cj,*ptadenserow,*ptasparserow,*ptaj,nspacedouble=0;
56   PetscInt           an=A->cmap->N,am=A->rmap->N,pn=P->cmap->N;
57   PetscInt           i,j,k,ptnzi,arow,anzj,ptanzi,prow,pnzj,cnzi,nlnk,*lnk;
58   MatScalar          *ca;
59   PetscBT            lnkbt;
60 
61   PetscFunctionBegin;
62   /* Get ij structure of P^T */
63   ierr = MatGetSymbolicTranspose_SeqAIJ(P,&pti,&ptj);CHKERRQ(ierr);
64   ptJ=ptj;
65 
66   /* Allocate ci array, arrays for fill computation and */
67   /* free space for accumulating nonzero column info */
68   ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&ci);CHKERRQ(ierr);
69   ci[0] = 0;
70 
71   ierr = PetscMalloc((2*an+1)*sizeof(PetscInt),&ptadenserow);CHKERRQ(ierr);
72   ierr = PetscMemzero(ptadenserow,(2*an+1)*sizeof(PetscInt));CHKERRQ(ierr);
73   ptasparserow = ptadenserow  + an;
74 
75   /* create and initialize a linked list */
76   nlnk = pn+1;
77   ierr = PetscLLCreate(pn,pn,nlnk,lnk,lnkbt);CHKERRQ(ierr);
78 
79   /* Set initial free space to be fill*nnz(A). */
80   /* This should be reasonable if sparsity of PtAP is similar to that of A. */
81   ierr          = PetscFreeSpaceGet((PetscInt)(fill*ai[am]),&free_space);
82   current_space = free_space;
83 
84   /* Determine symbolic info for each row of C: */
85   for (i=0;i<pn;i++) {
86     ptnzi  = pti[i+1] - pti[i];
87     ptanzi = 0;
88     /* Determine symbolic row of PtA: */
89     for (j=0;j<ptnzi;j++) {
90       arow = *ptJ++;
91       anzj = ai[arow+1] - ai[arow];
92       ajj  = aj + ai[arow];
93       for (k=0;k<anzj;k++) {
94         if (!ptadenserow[ajj[k]]) {
95           ptadenserow[ajj[k]]    = -1;
96           ptasparserow[ptanzi++] = ajj[k];
97         }
98       }
99     }
100     /* Using symbolic info for row of PtA, determine symbolic info for row of C: */
101     ptaj = ptasparserow;
102     cnzi   = 0;
103     for (j=0;j<ptanzi;j++) {
104       prow = *ptaj++;
105       pnzj = pi[prow+1] - pi[prow];
106       pjj  = pj + pi[prow];
107       /* add non-zero cols of P into the sorted linked list lnk */
108       ierr = PetscLLAddSorted(pnzj,pjj,pn,nlnk,lnk,lnkbt);CHKERRQ(ierr);
109       cnzi += nlnk;
110     }
111 
112     /* If free space is not available, make more free space */
113     /* Double the amount of total space in the list */
114     if (current_space->local_remaining<cnzi) {
115       ierr = PetscFreeSpaceGet(cnzi+current_space->total_array_size,&current_space);CHKERRQ(ierr);
116       nspacedouble++;
117     }
118 
119     /* Copy data into free space, and zero out denserows */
120     ierr = PetscLLClean(pn,pn,cnzi,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
121     current_space->array           += cnzi;
122     current_space->local_used      += cnzi;
123     current_space->local_remaining -= cnzi;
124 
125     for (j=0;j<ptanzi;j++) {
126       ptadenserow[ptasparserow[j]] = 0;
127     }
128     /* Aside: Perhaps we should save the pta info for the numerical factorization. */
129     /*        For now, we will recompute what is needed. */
130     ci[i+1] = ci[i] + cnzi;
131   }
132   /* nnz is now stored in ci[ptm], column indices are in the list of free space */
133   /* Allocate space for cj, initialize cj, and */
134   /* destroy list of free space and other temporary array(s) */
135   ierr = PetscMalloc((ci[pn]+1)*sizeof(PetscInt),&cj);CHKERRQ(ierr);
136   ierr = PetscFreeSpaceContiguous(&free_space,cj);CHKERRQ(ierr);
137   ierr = PetscFree(ptadenserow);CHKERRQ(ierr);
138   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
139 
140   /* Allocate space for ca */
141   ierr = PetscMalloc((ci[pn]+1)*sizeof(MatScalar),&ca);CHKERRQ(ierr);
142   ierr = PetscMemzero(ca,(ci[pn]+1)*sizeof(MatScalar));CHKERRQ(ierr);
143 
144   /* put together the new matrix */
145   ierr = MatCreateSeqAIJWithArrays(((PetscObject)A)->comm,pn,pn,ci,cj,ca,C);CHKERRQ(ierr);
146   (*C)->rmap->bs = P->cmap->bs;
147   (*C)->cmap->bs = P->cmap->bs;
148 PetscPrintf(PETSC_COMM_SELF,"************%s C.bs=%d,%d\n",__FUNCT__,(*C)->rmap->bs,(*C)->cmap->bs);
149   /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
150   /* Since these are PETSc arrays, change flags to free them as necessary. */
151   c = (Mat_SeqAIJ *)((*C)->data);
152   c->free_a  = PETSC_TRUE;
153   c->free_ij = PETSC_TRUE;
154   c->nonew   = 0;
155   A->ops->ptapnumeric = MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy2; /* should use *C->ops until PtAP insterface is updated to double dispatch as MatMatMult() */
156 
157   /* Clean up. */
158   ierr = MatRestoreSymbolicTranspose_SeqAIJ(P,&pti,&ptj);CHKERRQ(ierr);
159 #if defined(PETSC_USE_INFO)
160   if (ci[pn] != 0) {
161     PetscReal afill = ((PetscReal)ci[pn])/ai[am];
162     if (afill < 1.0) afill = 1.0;
163     ierr = PetscInfo3((*C),"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr);
164     ierr = PetscInfo1((*C),"Use MatPtAP(A,P,MatReuse,%G,&C) for best performance.\n",afill);CHKERRQ(ierr);
165   } else {
166     ierr = PetscInfo((*C),"Empty matrix product\n");CHKERRQ(ierr);
167   }
168 #endif
169   PetscFunctionReturn(0);
170 }
171 
172 #undef __FUNCT__
173 #define __FUNCT__ "MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy2"
174 PetscErrorCode MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy2(Mat A,Mat P,Mat C)
175 {
176   PetscErrorCode ierr;
177   Mat_SeqAIJ     *a  = (Mat_SeqAIJ *) A->data;
178   Mat_SeqAIJ     *p  = (Mat_SeqAIJ *) P->data;
179   Mat_SeqAIJ     *c  = (Mat_SeqAIJ *) C->data;
180   PetscInt       *ai=a->i,*aj=a->j,*apj,*apjdense,*pi=p->i,*pj=p->j,*pJ=p->j,*pjj;
181   PetscInt       *ci=c->i,*cj=c->j,*cjj;
182   PetscInt       am=A->rmap->N,cn=C->cmap->N,cm=C->rmap->N;
183   PetscInt       i,j,k,anzi,pnzi,apnzj,nextap,pnzj,prow,crow;
184   MatScalar      *aa=a->a,*apa,*pa=p->a,*pA=p->a,*paj,*ca=c->a,*caj;
185 
186   PetscFunctionBegin;
187   /* Allocate temporary array for storage of one row of A*P */
188   ierr = PetscMalloc(cn*(sizeof(MatScalar)+sizeof(PetscInt))+c->rmax*sizeof(PetscInt),&apa);CHKERRQ(ierr);
189 
190   apjdense = (PetscInt *)(apa + cn);
191   apj      = apjdense + cn;
192   ierr = PetscMemzero(apa,cn*(sizeof(MatScalar)+sizeof(PetscInt)));CHKERRQ(ierr);
193 
194   /* Clear old values in C */
195   ierr = PetscMemzero(ca,ci[cm]*sizeof(MatScalar));CHKERRQ(ierr);
196 
197   for (i=0; i<am; i++) {
198     /* Form sparse row of A*P */
199     anzi  = ai[i+1] - ai[i];
200     apnzj = 0;
201     for (j=0; j<anzi; j++) {
202       prow = *aj++;
203       pnzj = pi[prow+1] - pi[prow];
204       pjj  = pj + pi[prow];
205       paj  = pa + pi[prow];
206       for (k=0;k<pnzj;k++) {
207         if (!apjdense[pjj[k]]) {
208           apjdense[pjj[k]] = -1;
209           apj[apnzj++]     = pjj[k];
210         }
211         apa[pjj[k]] += (*aa)*paj[k];
212       }
213       ierr = PetscLogFlops(2.0*pnzj);CHKERRQ(ierr);
214       aa++;
215     }
216 
217     /* Sort the j index array for quick sparse axpy. */
218     /* Note: a array does not need sorting as it is in dense storage locations. */
219     ierr = PetscSortInt(apnzj,apj);CHKERRQ(ierr);
220 
221     /* Compute P^T*A*P using outer product (P^T)[:,j]*(A*P)[j,:]. */
222     pnzi = pi[i+1] - pi[i];
223     for (j=0; j<pnzi; j++) {
224       nextap = 0;
225       crow   = *pJ++;
226       cjj    = cj + ci[crow];
227       caj    = ca + ci[crow];
228       /* Perform sparse axpy operation.  Note cjj includes apj. */
229       for (k=0;nextap<apnzj;k++) {
230 #if defined(PETSC_USE_DEBUG)
231         if (k >= ci[crow+1] - ci[crow]) {
232           SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"k too large k %d, crow %d",k,crow);
233         }
234 #endif
235         if (cjj[k]==apj[nextap]) {
236           caj[k] += (*pA)*apa[apj[nextap++]];
237         }
238       }
239       ierr = PetscLogFlops(2.0*apnzj);CHKERRQ(ierr);
240       pA++;
241     }
242 
243     /* Zero the current row info for A*P */
244     for (j=0; j<apnzj; j++) {
245       apa[apj[j]]      = 0.;
246       apjdense[apj[j]] = 0;
247     }
248   }
249 
250   /* Assemble the final matrix and clean up */
251   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
252   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
253 PetscPrintf(PETSC_COMM_SELF,"************%s C.bs=%d,%d\n",__FUNCT__,C->rmap->bs,C->cmap->bs);
254   ierr = PetscFree(apa);CHKERRQ(ierr);
255   PetscFunctionReturn(0);
256 }
257 
258 #undef __FUNCT__
259 #define __FUNCT__ "MatPtAPSymbolic_SeqAIJ_SeqAIJ"
260 PetscErrorCode MatPtAPSymbolic_SeqAIJ_SeqAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
261 {
262   PetscErrorCode     ierr;
263   Mat_SeqAIJ         *ap,*c;
264   PetscInt           *api,*apj,*ci,pn=P->cmap->N,sparse_axpy=0;
265   MatScalar          *ca;
266   Mat_PtAP           *ptap;
267   Mat                Pt,AP;
268 
269   PetscFunctionBegin;
270   /* flag 'sparse_axpy' determines which implementations to be used:
271        0: do dense axpy in MatPtAPNumeric() - fastest, but requires storage of struct A*P; (default)
272        1: do one sparse axpy - uses same memory as sparse_axpy=0 and might execute less flops
273           (apnz vs. cnz in the outerproduct), slower than case '0' when cnz is not too large than apnz;
274        2: do two sparse axpy in MatPtAPNumeric() - slowest, does not store structure of A*P. */
275   ierr = PetscOptionsGetInt(PETSC_NULL,"-matptap_sparseaxpy",&sparse_axpy,PETSC_NULL);CHKERRQ(ierr);
276   if (sparse_axpy == 2){
277     ierr = MatPtAPSymbolic_SeqAIJ_SeqAIJ_SparseAxpy2(A,P,fill,C);CHKERRQ(ierr);
278 
279     PetscFunctionReturn(0);
280   }
281 
282   /* Get symbolic Pt = P^T */
283   ierr = MatTransposeSymbolic_SeqAIJ(P,&Pt);CHKERRQ(ierr);
284 
285   /* Get symbolic AP = A*P */
286   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(A,P,fill,&AP);CHKERRQ(ierr);
287 
288   ap          = (Mat_SeqAIJ*)AP->data;
289   api         = ap->i;
290   apj         = ap->j;
291   ap->free_ij = PETSC_FALSE; /* api and apj are kept in struct ptap, cannot be destroyed with AP */
292 
293   /* Get C = Pt*AP */
294   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(Pt,AP,fill,C);CHKERRQ(ierr);
295 
296   c  = (Mat_SeqAIJ*)(*C)->data;
297   ci = c->i;
298   ierr = PetscMalloc((ci[pn]+1)*sizeof(MatScalar),&ca);CHKERRQ(ierr);
299   ierr = PetscMemzero(ca,(ci[pn]+1)*sizeof(MatScalar));CHKERRQ(ierr);
300   c->a       = ca;
301   c->free_a  = PETSC_TRUE;
302 
303   /* Create a supporting struct for reuse by MatPtAPNumeric() */
304   ierr = PetscNew(Mat_PtAP,&ptap);CHKERRQ(ierr);
305   c->ptap            = ptap;
306   ptap->destroy      = (*C)->ops->destroy;
307   (*C)->ops->destroy = MatDestroy_SeqAIJ_PtAP;
308 
309   /* Allocate temporary array for storage of one row of A*P */
310   ierr = PetscMalloc((pn+1)*sizeof(PetscScalar),&ptap->apa);CHKERRQ(ierr);
311   ierr = PetscMemzero(ptap->apa,(pn+1)*sizeof(PetscScalar));CHKERRQ(ierr);
312   if (sparse_axpy == 1){
313     A->ops->ptapnumeric = MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy;
314   } else {
315     A->ops->ptapnumeric = MatPtAPNumeric_SeqAIJ_SeqAIJ;
316   }
317   ptap->api = api;
318   ptap->apj = apj;
319 
320   /* Clean up. */
321   ierr = MatDestroy(&Pt);CHKERRQ(ierr);
322   ierr = MatDestroy(&AP);CHKERRQ(ierr);
323   PetscFunctionReturn(0);
324 }
325 
326 /* #define PROFILE_MatPtAPNumeric */
327 #undef __FUNCT__
328 #define __FUNCT__ "MatPtAPNumeric_SeqAIJ_SeqAIJ"
329 PetscErrorCode MatPtAPNumeric_SeqAIJ_SeqAIJ(Mat A,Mat P,Mat C)
330 {
331   PetscErrorCode    ierr;
332   Mat_SeqAIJ        *a  = (Mat_SeqAIJ *) A->data;
333   Mat_SeqAIJ        *p  = (Mat_SeqAIJ *) P->data;
334   Mat_SeqAIJ        *c  = (Mat_SeqAIJ *) C->data;
335   const PetscInt    *ai=a->i,*aj=a->j,*pi=p->i,*pj=p->j,*ci=c->i,*cj=c->j;
336   const PetscScalar *aa=a->a,*pa=p->a,*pval;
337   const PetscInt    *apj,*pcol,*cjj;
338   const PetscInt    am=A->rmap->N,cm=C->rmap->N;
339   PetscInt          i,j,k,anz,apnz,pnz,prow,crow,cnz;
340   PetscScalar       *apa,*ca=c->a,*caj,pvalj;
341   Mat_PtAP          *ptap = c->ptap;
342 #if defined(PROFILE_MatPtAPNumeric)
343   PetscLogDouble    t0,tf,time_Cseq0=0.0,time_Cseq1=0.0;
344   PetscInt          flops0=0,flops1=0;
345 #endif
346 
347   PetscFunctionBegin;
348   /* Get temporary array for storage of one row of A*P */
349   apa = ptap->apa;
350 
351   /* Clear old values in C */
352   ierr = PetscMemzero(ca,ci[cm]*sizeof(MatScalar));CHKERRQ(ierr);
353 
354   for (i=0;i<am;i++) {
355     /* Form sparse row of AP[i,:] = A[i,:]*P */
356 #if defined(PROFILE_MatPtAPNumeric)
357     ierr = PetscGetTime(&t0);CHKERRQ(ierr);
358 #endif
359     anz  = ai[i+1] - ai[i];
360     apnz = 0;
361     for (j=0; j<anz; j++) {
362       prow = aj[j];
363       pnz  = pi[prow+1] - pi[prow];
364       pcol = pj + pi[prow];
365       pval = pa + pi[prow];
366       for (k=0; k<pnz; k++) {
367         apa[pcol[k]] += aa[j]*pval[k];
368       }
369       ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
370 #if defined(PROFILE_MatPtAPNumeric)
371       flops0 += 2.0*pnz;
372 #endif
373     }
374     aj += anz; aa += anz;
375 #if defined(PROFILE_MatPtAPNumeric)
376     ierr = PetscGetTime(&tf);CHKERRQ(ierr);
377     time_Cseq0 += tf - t0;
378 #endif
379 
380     /* Compute P^T*A*P using outer product P[i,:]^T*AP[i,:]. */
381 #if defined(PROFILE_MatPtAPNumeric)
382     ierr = PetscGetTime(&t0);CHKERRQ(ierr);
383 #endif
384     apj  = ptap->apj + ptap->api[i];
385     apnz = ptap->api[i+1] - ptap->api[i];
386     pnz  = pi[i+1] - pi[i];
387     pcol = pj + pi[i];
388     pval = pa + pi[i];
389 
390     /* Perform dense axpy */
391     for (j=0; j<pnz; j++) {
392       crow  = pcol[j];
393       cjj   = cj + ci[crow];
394       caj   = ca + ci[crow];
395       pvalj = pval[j];
396       cnz   = ci[crow+1] - ci[crow];
397       for (k=0; k<cnz; k++){
398         caj[k] += pvalj*apa[cjj[k]];
399       }
400       ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
401 #if defined(PROFILE_MatPtAPNumeric)
402       flops1 += 2.0*cnz;
403 #endif
404     }
405 #if defined(PROFILE_MatPtAPNumeric)
406     ierr = PetscGetTime(&tf);CHKERRQ(ierr);
407     time_Cseq1 += tf - t0;
408 #endif
409 
410     /* Zero the current row info for A*P */
411     for (j=0; j<apnz; j++) apa[apj[j]] = 0.0;
412   }
413 
414   /* Assemble the final matrix and clean up */
415   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
416   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
417 #if defined(PROFILE_MatPtAPNumeric)
418   printf("PtAPNumeric_SeqAIJ time %g + %g, flops %d %d\n",time_Cseq0,time_Cseq1,flops0,flops1);
419 #endif
420   PetscFunctionReturn(0);
421 }
422 
423 #undef __FUNCT__
424 #define __FUNCT__ "MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy"
425 PetscErrorCode MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy(Mat A,Mat P,Mat C)
426 {
427   PetscErrorCode    ierr;
428   Mat_SeqAIJ        *a  = (Mat_SeqAIJ *) A->data;
429   Mat_SeqAIJ        *p  = (Mat_SeqAIJ *) P->data;
430   Mat_SeqAIJ        *c  = (Mat_SeqAIJ *) C->data;
431   const PetscInt    *ai=a->i,*aj=a->j,*pi=p->i,*pj=p->j,*ci=c->i,*cj=c->j;
432   const PetscScalar *aa=a->a,*pa=p->a,*pval;
433   const PetscInt    *apj,*pcol,*cjj;
434   const PetscInt    am=A->rmap->N,cm=C->rmap->N;
435   PetscInt          i,j,k,anz,apnz,pnz,prow,crow,apcol,nextap;
436   PetscScalar       *apa,*ca=c->a,*caj,pvalj;
437   Mat_PtAP          *ptap = c->ptap;
438 #if defined(PROFILE_MatPtAPNumeric)
439   PetscLogDouble    t0,tf,time_Cseq0=0.0,time_Cseq1=0.0;
440   PetscInt          flops0=0,flops1=0;
441 #endif
442 
443   PetscFunctionBegin;
444   /* Get temporary array for storage of one row of A*P */
445   apa = ptap->apa;
446 
447   /* Clear old values in C */
448   ierr = PetscMemzero(ca,ci[cm]*sizeof(MatScalar));CHKERRQ(ierr);
449 
450   for (i=0;i<am;i++) {
451     /* Form sparse row of AP[i,:] = A[i,:]*P */
452 #if defined(PROFILE_MatPtAPNumeric)
453     ierr = PetscGetTime(&t0);CHKERRQ(ierr);
454 #endif
455     anz  = ai[i+1] - ai[i];
456     apnz = 0;
457     for (j=0; j<anz; j++) {
458       prow = aj[j];
459       pnz  = pi[prow+1] - pi[prow];
460       pcol = pj + pi[prow];
461       pval = pa + pi[prow];
462       for (k=0; k<pnz; k++) {
463         apa[pcol[k]] += aa[j]*pval[k];
464       }
465       ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
466 #if defined(PROFILE_MatPtAPNumeric)
467       flops0 += 2.0*pnz;
468 #endif
469     }
470     aj += anz; aa += anz;
471 #if defined(PROFILE_MatPtAPNumeric)
472     ierr = PetscGetTime(&tf);CHKERRQ(ierr);
473     time_Cseq0 += tf - t0;
474 #endif
475 
476     /* Compute P^T*A*P using outer product P[i,:]^T*AP[i,:]. */
477 #if defined(PROFILE_MatPtAPNumeric)
478     ierr = PetscGetTime(&t0);CHKERRQ(ierr);
479 #endif
480     apj  = ptap->apj + ptap->api[i];
481     apnz = ptap->api[i+1] - ptap->api[i];
482     pnz  = pi[i+1] - pi[i];
483     pcol = pj + pi[i];
484     pval = pa + pi[i];
485 
486     /* Perform sparse axpy */
487     for (j=0; j<pnz; j++) {
488       crow   = pcol[j];
489       cjj    = cj + ci[crow];
490       caj    = ca + ci[crow];
491       pvalj  = pval[j];
492       nextap = 1;
493       apcol  = apj[nextap];
494       for (k=0; nextap<apnz; k++) {
495         if (cjj[k] == apcol) {
496           caj[k] += pvalj*apa[apcol];
497           apcol   = apj[nextap++];
498         }
499       }
500       ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
501 #if defined(PROFILE_MatPtAPNumeric)
502       flops1 += 2.0*apnz;
503 #endif
504     }
505 #if defined(PROFILE_MatPtAPNumeric)
506     ierr = PetscGetTime(&tf);CHKERRQ(ierr);
507     time_Cseq1 += tf - t0;
508 #endif
509 
510     /* Zero the current row info for A*P */
511     for (j=0; j<apnz; j++) {
512       apcol      = apj[j];
513       apa[apcol] = 0.;
514     }
515   }
516 
517   /* Assemble the final matrix and clean up */
518   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
519   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
520 #if defined(PROFILE_MatPtAPNumeric)
521   printf("MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy time %g + %g, flops %d %d\n",time_Cseq0,time_Cseq1,flops0,flops1);
522 #endif
523   PetscFunctionReturn(0);
524 }
525