xref: /petsc/src/mat/impls/aij/seq/matptap.c (revision 0d04baf89eca684847eb03c0d4ee6b8dfe3c620a)
1 
2 /*
3   Defines projective product routines where A is a SeqAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
8 #include <../src/mat/utils/freespace.h>
9 #include <petscbt.h>
10 
11 #undef __FUNCT__
12 #define __FUNCT__ "MatPtAPSymbolic_SeqAIJ"
13 PetscErrorCode MatPtAPSymbolic_SeqAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
14 {
15   PetscErrorCode ierr;
16 
17   PetscFunctionBegin;
18   if (!P->ops->ptapsymbolic_seqaij) SETERRQ2(((PetscObject)A)->comm,PETSC_ERR_SUP,"Not implemented for A=%s and P=%s",((PetscObject)A)->type_name,((PetscObject)P)->type_name);
19   ierr = (*P->ops->ptapsymbolic_seqaij)(A,P,fill,C);CHKERRQ(ierr);
20   PetscFunctionReturn(0);
21 }
22 
23 #undef __FUNCT__
24 #define __FUNCT__ "MatPtAPNumeric_SeqAIJ"
25 PetscErrorCode MatPtAPNumeric_SeqAIJ(Mat A,Mat P,Mat C)
26 {
27   PetscErrorCode ierr;
28 
29   PetscFunctionBegin;
30   if (!P->ops->ptapnumeric_seqaij) SETERRQ2(((PetscObject)A)->comm,PETSC_ERR_SUP,"Not implemented for A=%s and P=%s",((PetscObject)A)->type_name,((PetscObject)P)->type_name);
31   ierr = (*P->ops->ptapnumeric_seqaij)(A,P,C);CHKERRQ(ierr);
32   PetscFunctionReturn(0);
33 }
34 
35 #undef __FUNCT__
36 #define __FUNCT__ "MatDestroy_SeqAIJ_PtAP"
37 PetscErrorCode MatDestroy_SeqAIJ_PtAP(Mat A)
38 {
39   PetscErrorCode ierr;
40   Mat_SeqAIJ     *a = (Mat_SeqAIJ *)A->data;
41   Mat_PtAP       *ptap = a->ptap;
42 
43   PetscFunctionBegin;
44   /* free ptap, then A */
45   ierr = PetscFree(ptap->apa);CHKERRQ(ierr);
46   ierr = PetscFree(ptap->api);CHKERRQ(ierr);
47   ierr = PetscFree(ptap->apj);CHKERRQ(ierr);
48   ierr = (ptap->destroy)(A);CHKERRQ(ierr);
49   ierr = PetscFree(ptap);CHKERRQ(ierr);
50   PetscFunctionReturn(0);
51 }
52 
53 #undef __FUNCT__
54 #define __FUNCT__ "MatPtAPSymbolic_SeqAIJ_SeqAIJ_SparseAxpy2"
55 PetscErrorCode MatPtAPSymbolic_SeqAIJ_SeqAIJ_SparseAxpy2(Mat A,Mat P,PetscReal fill,Mat *C)
56 {
57   PetscErrorCode     ierr;
58   PetscFreeSpaceList free_space=PETSC_NULL,current_space=PETSC_NULL;
59   Mat_SeqAIJ         *a = (Mat_SeqAIJ*)A->data,*p = (Mat_SeqAIJ*)P->data,*c;
60   PetscInt           *pti,*ptj,*ptJ,*ai=a->i,*aj=a->j,*ajj,*pi=p->i,*pj=p->j,*pjj;
61   PetscInt           *ci,*cj,*ptadenserow,*ptasparserow,*ptaj,nspacedouble=0;
62   PetscInt           an=A->cmap->N,am=A->rmap->N,pn=P->cmap->N;
63   PetscInt           i,j,k,ptnzi,arow,anzj,ptanzi,prow,pnzj,cnzi,nlnk,*lnk;
64   MatScalar          *ca;
65   PetscBT            lnkbt;
66 
67   PetscFunctionBegin;
68   /* Get ij structure of P^T */
69   ierr = MatGetSymbolicTranspose_SeqAIJ(P,&pti,&ptj);CHKERRQ(ierr);
70   ptJ=ptj;
71 
72   /* Allocate ci array, arrays for fill computation and */
73   /* free space for accumulating nonzero column info */
74   ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&ci);CHKERRQ(ierr);
75   ci[0] = 0;
76 
77   ierr = PetscMalloc((2*an+1)*sizeof(PetscInt),&ptadenserow);CHKERRQ(ierr);
78   ierr = PetscMemzero(ptadenserow,(2*an+1)*sizeof(PetscInt));CHKERRQ(ierr);
79   ptasparserow = ptadenserow  + an;
80 
81   /* create and initialize a linked list */
82   nlnk = pn+1;
83   ierr = PetscLLCreate(pn,pn,nlnk,lnk,lnkbt);CHKERRQ(ierr);
84 
85   /* Set initial free space to be fill*nnz(A). */
86   /* This should be reasonable if sparsity of PtAP is similar to that of A. */
87   ierr          = PetscFreeSpaceGet((PetscInt)(fill*ai[am]),&free_space);
88   current_space = free_space;
89 
90   /* Determine symbolic info for each row of C: */
91   for (i=0;i<pn;i++) {
92     ptnzi  = pti[i+1] - pti[i];
93     ptanzi = 0;
94     /* Determine symbolic row of PtA: */
95     for (j=0;j<ptnzi;j++) {
96       arow = *ptJ++;
97       anzj = ai[arow+1] - ai[arow];
98       ajj  = aj + ai[arow];
99       for (k=0;k<anzj;k++) {
100         if (!ptadenserow[ajj[k]]) {
101           ptadenserow[ajj[k]]    = -1;
102           ptasparserow[ptanzi++] = ajj[k];
103         }
104       }
105     }
106     /* Using symbolic info for row of PtA, determine symbolic info for row of C: */
107     ptaj = ptasparserow;
108     cnzi   = 0;
109     for (j=0;j<ptanzi;j++) {
110       prow = *ptaj++;
111       pnzj = pi[prow+1] - pi[prow];
112       pjj  = pj + pi[prow];
113       /* add non-zero cols of P into the sorted linked list lnk */
114       ierr = PetscLLAdd(pnzj,pjj,pn,nlnk,lnk,lnkbt);CHKERRQ(ierr);
115       cnzi += nlnk;
116     }
117 
118     /* If free space is not available, make more free space */
119     /* Double the amount of total space in the list */
120     if (current_space->local_remaining<cnzi) {
121       ierr = PetscFreeSpaceGet(cnzi+current_space->total_array_size,&current_space);CHKERRQ(ierr);
122       nspacedouble++;
123     }
124 
125     /* Copy data into free space, and zero out denserows */
126     ierr = PetscLLClean(pn,pn,cnzi,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
127     current_space->array           += cnzi;
128     current_space->local_used      += cnzi;
129     current_space->local_remaining -= cnzi;
130 
131     for (j=0;j<ptanzi;j++) {
132       ptadenserow[ptasparserow[j]] = 0;
133     }
134     /* Aside: Perhaps we should save the pta info for the numerical factorization. */
135     /*        For now, we will recompute what is needed. */
136     ci[i+1] = ci[i] + cnzi;
137   }
138   /* nnz is now stored in ci[ptm], column indices are in the list of free space */
139   /* Allocate space for cj, initialize cj, and */
140   /* destroy list of free space and other temporary array(s) */
141   ierr = PetscMalloc((ci[pn]+1)*sizeof(PetscInt),&cj);CHKERRQ(ierr);
142   ierr = PetscFreeSpaceContiguous(&free_space,cj);CHKERRQ(ierr);
143   ierr = PetscFree(ptadenserow);CHKERRQ(ierr);
144   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
145 
146   /* Allocate space for ca */
147   ierr = PetscMalloc((ci[pn]+1)*sizeof(MatScalar),&ca);CHKERRQ(ierr);
148   ierr = PetscMemzero(ca,(ci[pn]+1)*sizeof(MatScalar));CHKERRQ(ierr);
149 
150   /* put together the new matrix */
151   ierr = MatCreateSeqAIJWithArrays(((PetscObject)A)->comm,pn,pn,ci,cj,ca,C);CHKERRQ(ierr);
152 
153   /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
154   /* Since these are PETSc arrays, change flags to free them as necessary. */
155   c = (Mat_SeqAIJ *)((*C)->data);
156   c->free_a  = PETSC_TRUE;
157   c->free_ij = PETSC_TRUE;
158   c->nonew   = 0;
159   A->ops->ptapnumeric = MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy2; /* should use *C->ops until PtAP insterface is updated to double dispatch as MatMatMult() */
160 
161   /* Clean up. */
162   ierr = MatRestoreSymbolicTranspose_SeqAIJ(P,&pti,&ptj);CHKERRQ(ierr);
163 #if defined(PETSC_USE_INFO)
164   if (ci[pn] != 0) {
165     PetscReal afill = ((PetscReal)ci[pn])/ai[am];
166     if (afill < 1.0) afill = 1.0;
167     ierr = PetscInfo3((*C),"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);CHKERRQ(ierr);
168     ierr = PetscInfo1((*C),"Use MatPtAP(A,P,MatReuse,%G,&C) for best performance.\n",afill);CHKERRQ(ierr);
169   } else {
170     ierr = PetscInfo((*C),"Empty matrix product\n");CHKERRQ(ierr);
171   }
172 #endif
173   PetscFunctionReturn(0);
174 }
175 
176 #undef __FUNCT__
177 #define __FUNCT__ "MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy2"
178 PetscErrorCode MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy2(Mat A,Mat P,Mat C)
179 {
180   PetscErrorCode ierr;
181   Mat_SeqAIJ     *a  = (Mat_SeqAIJ *) A->data;
182   Mat_SeqAIJ     *p  = (Mat_SeqAIJ *) P->data;
183   Mat_SeqAIJ     *c  = (Mat_SeqAIJ *) C->data;
184   PetscInt       *ai=a->i,*aj=a->j,*apj,*apjdense,*pi=p->i,*pj=p->j,*pJ=p->j,*pjj;
185   PetscInt       *ci=c->i,*cj=c->j,*cjj;
186   PetscInt       am=A->rmap->N,cn=C->cmap->N,cm=C->rmap->N;
187   PetscInt       i,j,k,anzi,pnzi,apnzj,nextap,pnzj,prow,crow;
188   MatScalar      *aa=a->a,*apa,*pa=p->a,*pA=p->a,*paj,*ca=c->a,*caj;
189 
190   PetscFunctionBegin;
191   /* Allocate temporary array for storage of one row of A*P */
192   ierr = PetscMalloc(cn*(sizeof(MatScalar)+sizeof(PetscInt))+c->rmax*sizeof(PetscInt),&apa);CHKERRQ(ierr);
193 
194   apjdense = (PetscInt *)(apa + cn);
195   apj      = apjdense + cn;
196   ierr = PetscMemzero(apa,cn*(sizeof(MatScalar)+sizeof(PetscInt)));CHKERRQ(ierr);
197 
198   /* Clear old values in C */
199   ierr = PetscMemzero(ca,ci[cm]*sizeof(MatScalar));CHKERRQ(ierr);
200 
201   for (i=0; i<am; i++) {
202     /* Form sparse row of A*P */
203     anzi  = ai[i+1] - ai[i];
204     apnzj = 0;
205     for (j=0; j<anzi; j++) {
206       prow = *aj++;
207       pnzj = pi[prow+1] - pi[prow];
208       pjj  = pj + pi[prow];
209       paj  = pa + pi[prow];
210       for (k=0;k<pnzj;k++) {
211         if (!apjdense[pjj[k]]) {
212           apjdense[pjj[k]] = -1;
213           apj[apnzj++]     = pjj[k];
214         }
215         apa[pjj[k]] += (*aa)*paj[k];
216       }
217       ierr = PetscLogFlops(2.0*pnzj);CHKERRQ(ierr);
218       aa++;
219     }
220 
221     /* Sort the j index array for quick sparse axpy. */
222     /* Note: a array does not need sorting as it is in dense storage locations. */
223     ierr = PetscSortInt(apnzj,apj);CHKERRQ(ierr);
224 
225     /* Compute P^T*A*P using outer product (P^T)[:,j]*(A*P)[j,:]. */
226     pnzi = pi[i+1] - pi[i];
227     for (j=0; j<pnzi; j++) {
228       nextap = 0;
229       crow   = *pJ++;
230       cjj    = cj + ci[crow];
231       caj    = ca + ci[crow];
232       /* Perform sparse axpy operation.  Note cjj includes apj. */
233       for (k=0;nextap<apnzj;k++) {
234 #if defined(PETSC_USE_DEBUG)
235         if (k >= ci[crow+1] - ci[crow]) {
236           SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"k too large k %d, crow %d",k,crow);
237         }
238 #endif
239         if (cjj[k]==apj[nextap]) {
240           caj[k] += (*pA)*apa[apj[nextap++]];
241         }
242       }
243       ierr = PetscLogFlops(2.0*apnzj);CHKERRQ(ierr);
244       pA++;
245     }
246 
247     /* Zero the current row info for A*P */
248     for (j=0; j<apnzj; j++) {
249       apa[apj[j]]      = 0.;
250       apjdense[apj[j]] = 0;
251     }
252   }
253 
254   /* Assemble the final matrix and clean up */
255   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
256   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
257   ierr = PetscFree(apa);CHKERRQ(ierr);
258   PetscFunctionReturn(0);
259 }
260 
261 #undef __FUNCT__
262 #define __FUNCT__ "MatPtAPSymbolic_SeqAIJ_SeqAIJ"
263 PetscErrorCode MatPtAPSymbolic_SeqAIJ_SeqAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
264 {
265   PetscErrorCode     ierr;
266   Mat_SeqAIJ         *a = (Mat_SeqAIJ*)A->data,*p = (Mat_SeqAIJ*)P->data,*c;
267   PetscInt           *pti,*ptj,*ai=a->i,*aj=a->j,*pi=p->i,*pj=p->j,*api,*apj;
268   PetscInt           *ci,*cj,ndouble_ap,ndouble_ptap;
269   PetscInt           an=A->cmap->N,am=A->rmap->N,pn=P->cmap->N;
270   MatScalar          *ca;
271   Mat_PtAP           *ptap;
272   PetscInt           sparse_axpy=0;
273   //#if defined(PROFILE_MatPtAPSymbolic)
274   PetscLogDouble  t0,tf,time_Trans=0.0,time_GetSymbolic1=0.0,time_GetSymbolic2=0.0;
275   //#endif
276   PetscFunctionBegin;
277   /* flag 'sparse_axpy' determines which implementations to be used:
278        0: do dense axpy in MatPtAPNumeric() - fastest, but requires storage of struct A*P; (default)
279        1: do one sparse axpy - uses same memory as sparse_axpy=0 and might execute less flops
280           (apnz vs. cnz in the outerproduct), slower than case '0' when cnz is not too large than apnz;
281        2: do two sparse axpy in MatPtAPNumeric() - slowest, does not store structure of A*P. */
282   ierr = PetscOptionsGetInt(PETSC_NULL,"-matptap_sparseaxpy",&sparse_axpy,PETSC_NULL);CHKERRQ(ierr);
283   if (sparse_axpy == 2){
284     ierr = MatPtAPSymbolic_SeqAIJ_SeqAIJ_SparseAxpy2(A,P,fill,C);CHKERRQ(ierr);
285     PetscFunctionReturn(0);
286   }
287 
288   /* Get ij structure of Pt = P^T */
289   ierr = PetscGetTime(&t0);CHKERRQ(ierr);
290   ierr = MatGetSymbolicTranspose_SeqAIJ(P,&pti,&ptj);CHKERRQ(ierr);
291   ierr = PetscGetTime(&tf);CHKERRQ(ierr);
292   time_Trans += tf - t0;
293 
294   /* Get structure of AP = A*P */
295   ierr = PetscGetTime(&t0);CHKERRQ(ierr);
296   ierr = MatGetSymbolicMatMatMult_SeqAIJ_SeqAIJ(am,ai,aj,an,pn,pi,pj,fill,&api,&apj,&ndouble_ap);CHKERRQ(ierr);
297   ierr = PetscGetTime(&tf);CHKERRQ(ierr);
298   time_GetSymbolic1 += tf - t0;
299 
300   /* Get structure of C = Pt*AP */
301   ierr = PetscGetTime(&t0);CHKERRQ(ierr);
302   ierr = MatGetSymbolicMatMatMult_SeqAIJ_SeqAIJ(pn,pti,ptj,am,pn,api,apj,fill,&ci,&cj,&ndouble_ptap);CHKERRQ(ierr);
303   ierr = PetscGetTime(&tf);CHKERRQ(ierr);
304   time_GetSymbolic2 += tf - t0;
305 
306   /* Allocate space for ca */
307   ierr = PetscMalloc((ci[pn]+1)*sizeof(MatScalar),&ca);CHKERRQ(ierr);
308   ierr = PetscMemzero(ca,(ci[pn]+1)*sizeof(MatScalar));CHKERRQ(ierr);
309 
310   /* put together the new matrix */
311   ierr = MatCreateSeqAIJWithArrays(((PetscObject)A)->comm,pn,pn,ci,cj,ca,C);CHKERRQ(ierr);
312 
313   /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
314   /* Since these are PETSc arrays, change flags to free them as necessary. */
315   c          = (Mat_SeqAIJ *)(*C)->data;
316   c->free_a  = PETSC_TRUE;
317   c->free_ij = PETSC_TRUE;
318   c->nonew   = 0;
319 
320   /* create a supporting struct for reuse by MatPtAPNumeric() */
321   ierr = PetscNew(Mat_PtAP,&ptap);CHKERRQ(ierr);
322   c->ptap            = ptap;
323   ptap->destroy      = (*C)->ops->destroy;
324   (*C)->ops->destroy = MatDestroy_SeqAIJ_PtAP;
325 
326   /* Allocate temporary array for storage of one row of A*P */
327   ierr = PetscMalloc((pn+1)*sizeof(PetscScalar),&ptap->apa);CHKERRQ(ierr);
328   ierr = PetscMemzero(ptap->apa,(pn+1)*sizeof(PetscScalar));CHKERRQ(ierr);
329   if (sparse_axpy == 1){
330     A->ops->ptapnumeric = MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy;
331   } else {
332     A->ops->ptapnumeric = MatPtAPNumeric_SeqAIJ_SeqAIJ;
333   }
334   ptap->api = api;
335   ptap->apj = apj;
336 
337   /* Clean up. */
338   ierr = MatRestoreSymbolicTranspose_SeqAIJ(P,&pti,&ptj);CHKERRQ(ierr);
339 #if defined(PETSC_USE_INFO)
340   if (ci[pn] != 0) {
341     PetscReal apfill,ptapfill;
342     apfill = ((PetscReal)api[am])/(ai[am]+pi[an]);
343     if (apfill < 1.0) apfill = 1.0;
344     ierr = PetscInfo3((*C),"A*P: Reallocs %D; Fill ratio: given %G needed %G.\n",ndouble_ap,fill,apfill);CHKERRQ(ierr);
345     ptapfill = ((PetscReal)ci[pn])/(pi[an]+api[am]);
346     if (ptapfill < 1.0) ptapfill = 1.0;
347     ierr = PetscInfo3((*C),"Pt*AP: Reallocs %D; Fill ratio: given %G needed %G.\n",ndouble_ptap,fill,ptapfill);CHKERRQ(ierr);
348 
349     ierr = PetscInfo1((*C),"Use MatPtAP(A,P,MatReuse,%G,&C) for best performance.\n",PetscMax(apfill,ptapfill));CHKERRQ(ierr);
350     ierr = PetscInfo4((*C),"nonzeros: A %D, P %D, A*P %D, C=PtAP %D\n",ai[am],pi[an],api[am],ci[pn]);CHKERRQ(ierr);
351   } else {
352     ierr = PetscInfo((*C),"Empty matrix product\n");CHKERRQ(ierr);
353   }
354 #endif
355 
356   printf("MatPtAPSymbolic_SeqAIJ_SeqAIJ time %g + %g + %g\n",time_Trans,time_GetSymbolic1,time_GetSymbolic2);
357   PetscFunctionReturn(0);
358 }
359 
360 /* #define PROFILE_MatPtAPNumeric */
361 #undef __FUNCT__
362 #define __FUNCT__ "MatPtAPNumeric_SeqAIJ_SeqAIJ"
363 PetscErrorCode MatPtAPNumeric_SeqAIJ_SeqAIJ(Mat A,Mat P,Mat C)
364 {
365   PetscErrorCode  ierr;
366   Mat_SeqAIJ      *a  = (Mat_SeqAIJ *) A->data;
367   Mat_SeqAIJ      *p  = (Mat_SeqAIJ *) P->data;
368   Mat_SeqAIJ      *c  = (Mat_SeqAIJ *) C->data;
369   PetscInt        *ai=a->i,*aj=a->j,*pi=p->i,*pj=p->j,*ci=c->i,*cj=c->j;
370   PetscScalar     *aa=a->a,*pa=p->a;
371   PetscInt        *apj,*pcol,*cjj,cnz;
372   PetscInt        am=A->rmap->N,cm=C->rmap->N;
373   PetscInt        i,j,k,anz,apnz,pnz,prow,crow;
374   PetscScalar     *apa,*pval,*ca=c->a,*caj,pvalj;
375   Mat_PtAP        *ptap = c->ptap;
376 #if defined(PROFILE_MatPtAPNumeric)
377   PetscLogDouble  t0,tf,time_Cseq0=0.0,time_Cseq1=0.0;
378   PetscInt        flops0=0,flops1=0;
379 #endif
380 
381   PetscFunctionBegin;
382   /* Get temporary array for storage of one row of A*P */
383   apa = ptap->apa;
384 
385   /* Clear old values in C */
386   ierr = PetscMemzero(ca,ci[cm]*sizeof(MatScalar));CHKERRQ(ierr);
387 
388   for (i=0;i<am;i++) {
389     /* Form sparse row of AP[i,:] = A[i,:]*P */
390 #if defined(PROFILE_MatPtAPNumeric)
391     ierr = PetscGetTime(&t0);CHKERRQ(ierr);
392 #endif
393     anz  = ai[i+1] - ai[i];
394     apnz = 0;
395     for (j=0; j<anz; j++) {
396       prow = aj[j];
397       pnz  = pi[prow+1] - pi[prow];
398       pcol = pj + pi[prow];
399       pval = pa + pi[prow];
400       for (k=0; k<pnz; k++) {
401         apa[pcol[k]] += aa[j]*pval[k];
402       }
403       ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
404 #if defined(PROFILE_MatPtAPNumeric)
405       flops0 += 2.0*pnz;
406 #endif
407     }
408     aj += anz; aa += anz;
409 #if defined(PROFILE_MatPtAPNumeric)
410     ierr = PetscGetTime(&tf);CHKERRQ(ierr);
411     time_Cseq0 += tf - t0;
412 #endif
413 
414     /* Compute P^T*A*P using outer product P[i,:]^T*AP[i,:]. */
415 #if defined(PROFILE_MatPtAPNumeric)
416     ierr = PetscGetTime(&t0);CHKERRQ(ierr);
417 #endif
418     apj  = ptap->apj + ptap->api[i];
419     apnz = ptap->api[i+1] - ptap->api[i];
420     pnz  = pi[i+1] - pi[i];
421     pcol = pj + pi[i];
422     pval = pa + pi[i];
423 
424     /* Perform dense axpy */
425     for (j=0; j<pnz; j++) {
426       crow  = pcol[j];
427       cjj   = cj + ci[crow];
428       caj   = ca + ci[crow];
429       pvalj = pval[j];
430       cnz   = ci[crow+1] - ci[crow];
431       for (k=0; k<cnz; k++){
432         caj[k] += pvalj*apa[cjj[k]];
433       }
434       ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
435 #if defined(PROFILE_MatPtAPNumeric)
436       flops1 += 2.0*cnz;
437 #endif
438     }
439 #if defined(PROFILE_MatPtAPNumeric)
440     ierr = PetscGetTime(&tf);CHKERRQ(ierr);
441     time_Cseq1 += tf - t0;
442 #endif
443 
444     /* Zero the current row info for A*P */
445     for (j=0; j<apnz; j++) apa[apj[j]] = 0.0;
446   }
447 
448   /* Assemble the final matrix and clean up */
449   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
450   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
451 #if defined(PROFILE_MatPtAPNumeric)
452   printf("PtAPNumeric_SeqAIJ time %g + %g, flops %d %d\n",time_Cseq0,time_Cseq1,flops0,flops1);
453 #endif
454   PetscFunctionReturn(0);
455 }
456 
457 #undef __FUNCT__
458 #define __FUNCT__ "MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy"
459 PetscErrorCode MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy(Mat A,Mat P,Mat C)
460 {
461   PetscErrorCode  ierr;
462   Mat_SeqAIJ      *a  = (Mat_SeqAIJ *) A->data;
463   Mat_SeqAIJ      *p  = (Mat_SeqAIJ *) P->data;
464   Mat_SeqAIJ      *c  = (Mat_SeqAIJ *) C->data;
465   PetscInt        *ai=a->i,*aj=a->j,*pi=p->i,*pj=p->j,*ci=c->i,*cj=c->j;
466   PetscScalar     *aa=a->a,*pa=p->a;
467   PetscInt        *apj,*pcol,*cjj;
468   PetscInt        am=A->rmap->N,cm=C->rmap->N;
469   PetscInt        i,j,k,anz,apnz,pnz,prow,crow,apcol,nextap;
470   PetscScalar     *apa,*pval,*ca=c->a,*caj,pvalj;
471   Mat_PtAP        *ptap = c->ptap;
472 #if defined(PROFILE_MatPtAPNumeric)
473   PetscLogDouble  t0,tf,time_Cseq0=0.0,time_Cseq1=0.0;
474   PetscInt        flops0=0,flops1=0;
475 #endif
476 
477   PetscFunctionBegin;
478   /* Get temporary array for storage of one row of A*P */
479   apa = ptap->apa;
480 
481   /* Clear old values in C */
482   ierr = PetscMemzero(ca,ci[cm]*sizeof(MatScalar));CHKERRQ(ierr);
483 
484   for (i=0;i<am;i++) {
485     /* Form sparse row of AP[i,:] = A[i,:]*P */
486 #if defined(PROFILE_MatPtAPNumeric)
487     ierr = PetscGetTime(&t0);CHKERRQ(ierr);
488 #endif
489     anz  = ai[i+1] - ai[i];
490     apnz = 0;
491     for (j=0; j<anz; j++) {
492       prow = aj[j];
493       pnz  = pi[prow+1] - pi[prow];
494       pcol = pj + pi[prow];
495       pval = pa + pi[prow];
496       for (k=0; k<pnz; k++) {
497         apa[pcol[k]] += aa[j]*pval[k];
498       }
499       ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
500 #if defined(PROFILE_MatPtAPNumeric)
501       flops0 += 2.0*pnz;
502 #endif
503     }
504     aj += anz; aa += anz;
505 #if defined(PROFILE_MatPtAPNumeric)
506     ierr = PetscGetTime(&tf);CHKERRQ(ierr);
507     time_Cseq0 += tf - t0;
508 #endif
509 
510     /* Compute P^T*A*P using outer product P[i,:]^T*AP[i,:]. */
511 #if defined(PROFILE_MatPtAPNumeric)
512     ierr = PetscGetTime(&t0);CHKERRQ(ierr);
513 #endif
514     apj  = ptap->apj + ptap->api[i];
515     apnz = ptap->api[i+1] - ptap->api[i];
516     pnz  = pi[i+1] - pi[i];
517     pcol = pj + pi[i];
518     pval = pa + pi[i];
519 
520     /* Perform sparse axpy */
521     for (j=0; j<pnz; j++) {
522       crow   = pcol[j];
523       cjj    = cj + ci[crow];
524       caj    = ca + ci[crow];
525       pvalj  = pval[j];
526       nextap = 1;
527       apcol  = apj[nextap];
528       for (k=0; nextap<apnz; k++) {
529         if (cjj[k] == apcol) {
530           caj[k] += pvalj*apa[apcol];
531           apcol   = apj[nextap++];
532         }
533       }
534       ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
535 #if defined(PROFILE_MatPtAPNumeric)
536       flops1 += 2.0*apnz;
537 #endif
538     }
539 #if defined(PROFILE_MatPtAPNumeric)
540     ierr = PetscGetTime(&tf);CHKERRQ(ierr);
541     time_Cseq1 += tf - t0;
542 #endif
543 
544     /* Zero the current row info for A*P */
545     for (j=0; j<apnz; j++) {
546       apcol      = apj[j];
547       apa[apcol] = 0.;
548     }
549   }
550 
551   /* Assemble the final matrix and clean up */
552   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
553   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
554 #if defined(PROFILE_MatPtAPNumeric)
555   printf("MatPtAPNumeric_SeqAIJ_SeqAIJ_SparseAxpy time %g + %g, flops %d %d\n",time_Cseq0,time_Cseq1,flops0,flops1);
556 #endif
557   PetscFunctionReturn(0);
558 }
559