xref: /petsc/src/mat/impls/aij/mpi/mpiaij.c (revision 7a4fe282d1b349e95b3be72d69d8dd3d3bcd7bc6)
1 #define PETSCMAT_DLL
2 
3 #include "../src/mat/impls/aij/mpi/mpiaij.h"   /*I "petscmat.h" I*/
4 #include "../src/inline/spops.h"
5 
6 #undef __FUNCT__
7 #define __FUNCT__ "MatDistribute_MPIAIJ"
8 /*
9     Distributes a SeqAIJ matrix across a set of processes. Code stolen from
10     MatLoad_MPIAIJ(). Horrible lack of reuse. Should be a routine for each matrix type.
11 
12     Only for square matrices
13 */
14 PetscErrorCode MatDistribute_MPIAIJ(MPI_Comm comm,Mat gmat,PetscInt m,MatReuse reuse,Mat *inmat)
15 {
16   PetscMPIInt    rank,size;
17   PetscInt       *rowners,*dlens,*olens,i,rstart,rend,j,jj,nz,*gmataj,cnt,row,*ld;
18   PetscErrorCode ierr;
19   Mat            mat;
20   Mat_SeqAIJ     *gmata;
21   PetscMPIInt    tag;
22   MPI_Status     status;
23   PetscTruth     aij;
24   MatScalar      *gmataa,*ao,*ad,*gmataarestore=0;
25 
26   PetscFunctionBegin;
27   CHKMEMQ;
28   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
29   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
30   if (!rank) {
31     ierr = PetscTypeCompare((PetscObject)gmat,MATSEQAIJ,&aij);CHKERRQ(ierr);
32     if (!aij) SETERRQ1(PETSC_ERR_SUP,"Currently no support for input matrix of type %s\n",((PetscObject)gmat)->type_name);
33   }
34   if (reuse == MAT_INITIAL_MATRIX) {
35     ierr = MatCreate(comm,&mat);CHKERRQ(ierr);
36     ierr = MatSetSizes(mat,m,m,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
37     ierr = MatSetType(mat,MATAIJ);CHKERRQ(ierr);
38     ierr = PetscMalloc((size+1)*sizeof(PetscInt),&rowners);CHKERRQ(ierr);
39     ierr = PetscMalloc2(m,PetscInt,&dlens,m,PetscInt,&olens);CHKERRQ(ierr);
40     ierr = MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);CHKERRQ(ierr);
41     rowners[0] = 0;
42     for (i=2; i<=size; i++) {
43       rowners[i] += rowners[i-1];
44     }
45     rstart = rowners[rank];
46     rend   = rowners[rank+1];
47     ierr   = PetscObjectGetNewTag((PetscObject)mat,&tag);CHKERRQ(ierr);
48     if (!rank) {
49       gmata = (Mat_SeqAIJ*) gmat->data;
50       /* send row lengths to all processors */
51       for (i=0; i<m; i++) dlens[i] = gmata->ilen[i];
52       for (i=1; i<size; i++) {
53 	ierr = MPI_Send(gmata->ilen + rowners[i],rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);CHKERRQ(ierr);
54       }
55       /* determine number diagonal and off-diagonal counts */
56       ierr = PetscMemzero(olens,m*sizeof(PetscInt));CHKERRQ(ierr);
57       ierr = PetscMalloc(m*sizeof(PetscInt),&ld);CHKERRQ(ierr);
58       ierr = PetscMemzero(ld,m*sizeof(PetscInt));CHKERRQ(ierr);
59       jj = 0;
60       for (i=0; i<m; i++) {
61 	for (j=0; j<dlens[i]; j++) {
62           if (gmata->j[jj] < rstart) ld[i]++;
63 	  if (gmata->j[jj] < rstart || gmata->j[jj] >= rend) olens[i]++;
64 	  jj++;
65 	}
66       }
67       /* send column indices to other processes */
68       for (i=1; i<size; i++) {
69 	nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
70 	ierr = MPI_Send(&nz,1,MPIU_INT,i,tag,comm);CHKERRQ(ierr);
71 	ierr = MPI_Send(gmata->j + gmata->i[rowners[i]],nz,MPIU_INT,i,tag,comm);CHKERRQ(ierr);
72       }
73 
74       /* send numerical values to other processes */
75       for (i=1; i<size; i++) {
76         nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
77         ierr = MPI_Send(gmata->a + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);CHKERRQ(ierr);
78       }
79       gmataa = gmata->a;
80       gmataj = gmata->j;
81 
82     } else {
83       /* receive row lengths */
84       ierr = MPI_Recv(dlens,m,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr);
85       /* receive column indices */
86       ierr = MPI_Recv(&nz,1,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr);
87       ierr = PetscMalloc2(nz,PetscScalar,&gmataa,nz,PetscInt,&gmataj);CHKERRQ(ierr);
88       ierr = MPI_Recv(gmataj,nz,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr);
89       /* determine number diagonal and off-diagonal counts */
90       ierr = PetscMemzero(olens,m*sizeof(PetscInt));CHKERRQ(ierr);
91       ierr = PetscMalloc(m*sizeof(PetscInt),&ld);CHKERRQ(ierr);
92       ierr = PetscMemzero(ld,m*sizeof(PetscInt));CHKERRQ(ierr);
93       jj = 0;
94       for (i=0; i<m; i++) {
95 	for (j=0; j<dlens[i]; j++) {
96           if (gmataj[jj] < rstart) ld[i]++;
97 	  if (gmataj[jj] < rstart || gmataj[jj] >= rend) olens[i]++;
98 	  jj++;
99 	}
100       }
101       /* receive numerical values */
102       ierr = PetscMemzero(gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr);
103       ierr = MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);CHKERRQ(ierr);
104     }
105     /* set preallocation */
106     for (i=0; i<m; i++) {
107       dlens[i] -= olens[i];
108     }
109     ierr = MatSeqAIJSetPreallocation(mat,0,dlens);CHKERRQ(ierr);
110     ierr = MatMPIAIJSetPreallocation(mat,0,dlens,0,olens);CHKERRQ(ierr);
111 
112     for (i=0; i<m; i++) {
113       dlens[i] += olens[i];
114     }
115     cnt  = 0;
116     for (i=0; i<m; i++) {
117       row  = rstart + i;
118       ierr = MatSetValues(mat,1,&row,dlens[i],gmataj+cnt,gmataa+cnt,INSERT_VALUES);CHKERRQ(ierr);
119       cnt += dlens[i];
120     }
121     if (rank) {
122       ierr = PetscFree2(gmataa,gmataj);CHKERRQ(ierr);
123     }
124     ierr = PetscFree2(dlens,olens);CHKERRQ(ierr);
125     ierr = PetscFree(rowners);CHKERRQ(ierr);
126     ((Mat_MPIAIJ*)(mat->data))->ld = ld;
127     *inmat = mat;
128   } else {   /* column indices are already set; only need to move over numerical values from process 0 */
129     Mat_SeqAIJ *Ad = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->A->data;
130     Mat_SeqAIJ *Ao = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->B->data;
131     mat   = *inmat;
132     ierr  = PetscObjectGetNewTag((PetscObject)mat,&tag);CHKERRQ(ierr);
133     if (!rank) {
134       /* send numerical values to other processes */
135       gmata = (Mat_SeqAIJ*) gmat->data;
136       ierr   = MatGetOwnershipRanges(mat,(const PetscInt**)&rowners);CHKERRQ(ierr);
137       gmataa = gmata->a;
138       for (i=1; i<size; i++) {
139         nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
140         ierr = MPI_Send(gmataa + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);CHKERRQ(ierr);
141       }
142       nz   = gmata->i[rowners[1]]-gmata->i[rowners[0]];
143     } else {
144       /* receive numerical values from process 0*/
145       nz   = Ad->nz + Ao->nz;
146       ierr = PetscMalloc(nz*sizeof(PetscScalar),&gmataa);CHKERRQ(ierr); gmataarestore = gmataa;
147       ierr = MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);CHKERRQ(ierr);
148     }
149     /* transfer numerical values into the diagonal A and off diagonal B parts of mat */
150     ld = ((Mat_MPIAIJ*)(mat->data))->ld;
151     ad = Ad->a;
152     ao = Ao->a;
153     if (mat->rmap->n) {
154       i  = 0;
155       nz = ld[i];                                   ierr = PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); ao += nz; gmataa += nz;
156       nz = Ad->i[i+1] - Ad->i[i];                   ierr = PetscMemcpy(ad,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); ad += nz; gmataa += nz;
157     }
158     for (i=1; i<mat->rmap->n; i++) {
159       nz = Ao->i[i] - Ao->i[i-1] - ld[i-1] + ld[i]; ierr = PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); ao += nz; gmataa += nz;
160       nz = Ad->i[i+1] - Ad->i[i];                   ierr = PetscMemcpy(ad,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); ad += nz; gmataa += nz;
161     }
162     i--;
163     if (mat->rmap->n) {
164       nz = Ao->i[i+1] - Ao->i[i] - ld[i];           ierr = PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); ao += nz; gmataa += nz;
165     }
166     if (rank) {
167       ierr = PetscFree(gmataarestore);CHKERRQ(ierr);
168     }
169   }
170   ierr = MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
171   ierr = MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
172   CHKMEMQ;
173   PetscFunctionReturn(0);
174 }
175 
176 /*
177   Local utility routine that creates a mapping from the global column
178 number to the local number in the off-diagonal part of the local
179 storage of the matrix.  When PETSC_USE_CTABLE is used this is scalable at
180 a slightly higher hash table cost; without it it is not scalable (each processor
181 has an order N integer array but is fast to acess.
182 */
183 #undef __FUNCT__
184 #define __FUNCT__ "CreateColmap_MPIAIJ_Private"
185 PetscErrorCode CreateColmap_MPIAIJ_Private(Mat mat)
186 {
187   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
188   PetscErrorCode ierr;
189   PetscInt       n = aij->B->cmap->n,i;
190 
191   PetscFunctionBegin;
192 #if defined (PETSC_USE_CTABLE)
193   ierr = PetscTableCreate(n,&aij->colmap);CHKERRQ(ierr);
194   for (i=0; i<n; i++){
195     ierr = PetscTableAdd(aij->colmap,aij->garray[i]+1,i+1);CHKERRQ(ierr);
196   }
197 #else
198   ierr = PetscMalloc((mat->cmap->N+1)*sizeof(PetscInt),&aij->colmap);CHKERRQ(ierr);
199   ierr = PetscLogObjectMemory(mat,mat->cmap->N*sizeof(PetscInt));CHKERRQ(ierr);
200   ierr = PetscMemzero(aij->colmap,mat->cmap->N*sizeof(PetscInt));CHKERRQ(ierr);
201   for (i=0; i<n; i++) aij->colmap[aij->garray[i]] = i+1;
202 #endif
203   PetscFunctionReturn(0);
204 }
205 
206 
207 #define CHUNKSIZE   15
208 #define MatSetValues_SeqAIJ_A_Private(row,col,value,addv) \
209 { \
210     if (col <= lastcol1) low1 = 0; else high1 = nrow1; \
211     lastcol1 = col;\
212     while (high1-low1 > 5) { \
213       t = (low1+high1)/2; \
214       if (rp1[t] > col) high1 = t; \
215       else             low1  = t; \
216     } \
217       for (_i=low1; _i<high1; _i++) { \
218         if (rp1[_i] > col) break; \
219         if (rp1[_i] == col) { \
220           if (addv == ADD_VALUES) ap1[_i] += value;   \
221           else                    ap1[_i] = value; \
222           goto a_noinsert; \
223         } \
224       }  \
225       if (value == 0.0 && ignorezeroentries) {low1 = 0; high1 = nrow1;goto a_noinsert;} \
226       if (nonew == 1) {low1 = 0; high1 = nrow1; goto a_noinsert;}		\
227       if (nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
228       MatSeqXAIJReallocateAIJ(A,am,1,nrow1,row,col,rmax1,aa,ai,aj,rp1,ap1,aimax,nonew,MatScalar); \
229       N = nrow1++ - 1; a->nz++; high1++; \
230       /* shift up all the later entries in this row */ \
231       for (ii=N; ii>=_i; ii--) { \
232         rp1[ii+1] = rp1[ii]; \
233         ap1[ii+1] = ap1[ii]; \
234       } \
235       rp1[_i] = col;  \
236       ap1[_i] = value;  \
237       a_noinsert: ; \
238       ailen[row] = nrow1; \
239 }
240 
241 
242 #define MatSetValues_SeqAIJ_B_Private(row,col,value,addv) \
243 { \
244     if (col <= lastcol2) low2 = 0; else high2 = nrow2; \
245     lastcol2 = col;\
246     while (high2-low2 > 5) { \
247       t = (low2+high2)/2; \
248       if (rp2[t] > col) high2 = t; \
249       else             low2  = t; \
250     } \
251     for (_i=low2; _i<high2; _i++) {		\
252       if (rp2[_i] > col) break;			\
253       if (rp2[_i] == col) {			      \
254 	if (addv == ADD_VALUES) ap2[_i] += value;     \
255 	else                    ap2[_i] = value;      \
256 	goto b_noinsert;			      \
257       }						      \
258     }							      \
259     if (value == 0.0 && ignorezeroentries) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
260     if (nonew == 1) {low2 = 0; high2 = nrow2; goto b_noinsert;}		\
261     if (nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
262     MatSeqXAIJReallocateAIJ(B,bm,1,nrow2,row,col,rmax2,ba,bi,bj,rp2,ap2,bimax,nonew,MatScalar); \
263     N = nrow2++ - 1; b->nz++; high2++;					\
264     /* shift up all the later entries in this row */			\
265     for (ii=N; ii>=_i; ii--) {						\
266       rp2[ii+1] = rp2[ii];						\
267       ap2[ii+1] = ap2[ii];						\
268     }									\
269     rp2[_i] = col;							\
270     ap2[_i] = value;							\
271     b_noinsert: ;								\
272     bilen[row] = nrow2;							\
273 }
274 
275 #undef __FUNCT__
276 #define __FUNCT__ "MatSetValuesRow_MPIAIJ"
277 PetscErrorCode MatSetValuesRow_MPIAIJ(Mat A,PetscInt row,const PetscScalar v[])
278 {
279   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)A->data;
280   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)mat->A->data,*b = (Mat_SeqAIJ*)mat->B->data;
281   PetscErrorCode ierr;
282   PetscInt       l,*garray = mat->garray,diag;
283 
284   PetscFunctionBegin;
285   /* code only works for square matrices A */
286 
287   /* find size of row to the left of the diagonal part */
288   ierr = MatGetOwnershipRange(A,&diag,0);CHKERRQ(ierr);
289   row  = row - diag;
290   for (l=0; l<b->i[row+1]-b->i[row]; l++) {
291     if (garray[b->j[b->i[row]+l]] > diag) break;
292   }
293   ierr = PetscMemcpy(b->a+b->i[row],v,l*sizeof(PetscScalar));CHKERRQ(ierr);
294 
295   /* diagonal part */
296   ierr = PetscMemcpy(a->a+a->i[row],v+l,(a->i[row+1]-a->i[row])*sizeof(PetscScalar));CHKERRQ(ierr);
297 
298   /* right of diagonal part */
299   ierr = PetscMemcpy(b->a+b->i[row]+l,v+l+a->i[row+1]-a->i[row],(b->i[row+1]-b->i[row]-l)*sizeof(PetscScalar));CHKERRQ(ierr);
300   PetscFunctionReturn(0);
301 }
302 
303 #undef __FUNCT__
304 #define __FUNCT__ "MatSetValues_MPIAIJ"
305 PetscErrorCode MatSetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
306 {
307   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
308   PetscScalar    value;
309   PetscErrorCode ierr;
310   PetscInt       i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
311   PetscInt       cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
312   PetscTruth     roworiented = aij->roworiented;
313 
314   /* Some Variables required in the macro */
315   Mat            A = aij->A;
316   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)A->data;
317   PetscInt       *aimax = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
318   MatScalar      *aa = a->a;
319   PetscTruth     ignorezeroentries = a->ignorezeroentries;
320   Mat            B = aij->B;
321   Mat_SeqAIJ     *b = (Mat_SeqAIJ*)B->data;
322   PetscInt       *bimax = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
323   MatScalar      *ba = b->a;
324 
325   PetscInt       *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
326   PetscInt       nonew = a->nonew;
327   MatScalar      *ap1,*ap2;
328 
329   PetscFunctionBegin;
330   for (i=0; i<m; i++) {
331     if (im[i] < 0) continue;
332 #if defined(PETSC_USE_DEBUG)
333     if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
334 #endif
335     if (im[i] >= rstart && im[i] < rend) {
336       row      = im[i] - rstart;
337       lastcol1 = -1;
338       rp1      = aj + ai[row];
339       ap1      = aa + ai[row];
340       rmax1    = aimax[row];
341       nrow1    = ailen[row];
342       low1     = 0;
343       high1    = nrow1;
344       lastcol2 = -1;
345       rp2      = bj + bi[row];
346       ap2      = ba + bi[row];
347       rmax2    = bimax[row];
348       nrow2    = bilen[row];
349       low2     = 0;
350       high2    = nrow2;
351 
352       for (j=0; j<n; j++) {
353         if (v) {if (roworiented) value = v[i*n+j]; else value = v[i+j*m];} else value = 0.0;
354         if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES)) continue;
355         if (in[j] >= cstart && in[j] < cend){
356           col = in[j] - cstart;
357           MatSetValues_SeqAIJ_A_Private(row,col,value,addv);
358         } else if (in[j] < 0) continue;
359 #if defined(PETSC_USE_DEBUG)
360         else if (in[j] >= mat->cmap->N) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);}
361 #endif
362         else {
363           if (mat->was_assembled) {
364             if (!aij->colmap) {
365               ierr = CreateColmap_MPIAIJ_Private(mat);CHKERRQ(ierr);
366             }
367 #if defined (PETSC_USE_CTABLE)
368             ierr = PetscTableFind(aij->colmap,in[j]+1,&col);CHKERRQ(ierr);
369 	    col--;
370 #else
371             col = aij->colmap[in[j]] - 1;
372 #endif
373             if (col < 0 && !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
374               ierr = DisAssemble_MPIAIJ(mat);CHKERRQ(ierr);
375               col =  in[j];
376               /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
377               B = aij->B;
378               b = (Mat_SeqAIJ*)B->data;
379               bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j; ba = b->a;
380               rp2      = bj + bi[row];
381               ap2      = ba + bi[row];
382               rmax2    = bimax[row];
383               nrow2    = bilen[row];
384               low2     = 0;
385               high2    = nrow2;
386               bm       = aij->B->rmap->n;
387               ba = b->a;
388             }
389           } else col = in[j];
390           MatSetValues_SeqAIJ_B_Private(row,col,value,addv);
391         }
392       }
393     } else {
394       if (!aij->donotstash) {
395         if (roworiented) {
396           if (ignorezeroentries && v[i*n] == 0.0) continue;
397           ierr = MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n);CHKERRQ(ierr);
398         } else {
399           if (ignorezeroentries && v[i] == 0.0) continue;
400           ierr = MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m);CHKERRQ(ierr);
401         }
402       }
403     }
404   }
405   PetscFunctionReturn(0);
406 }
407 
408 #undef __FUNCT__
409 #define __FUNCT__ "MatGetValues_MPIAIJ"
410 PetscErrorCode MatGetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
411 {
412   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
413   PetscErrorCode ierr;
414   PetscInt       i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
415   PetscInt       cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
416 
417   PetscFunctionBegin;
418   for (i=0; i<m; i++) {
419     if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/
420     if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1);
421     if (idxm[i] >= rstart && idxm[i] < rend) {
422       row = idxm[i] - rstart;
423       for (j=0; j<n; j++) {
424         if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */
425         if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1);
426         if (idxn[j] >= cstart && idxn[j] < cend){
427           col = idxn[j] - cstart;
428           ierr = MatGetValues(aij->A,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
429         } else {
430           if (!aij->colmap) {
431             ierr = CreateColmap_MPIAIJ_Private(mat);CHKERRQ(ierr);
432           }
433 #if defined (PETSC_USE_CTABLE)
434           ierr = PetscTableFind(aij->colmap,idxn[j]+1,&col);CHKERRQ(ierr);
435           col --;
436 #else
437           col = aij->colmap[idxn[j]] - 1;
438 #endif
439           if ((col < 0) || (aij->garray[col] != idxn[j])) *(v+i*n+j) = 0.0;
440           else {
441             ierr = MatGetValues(aij->B,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
442           }
443         }
444       }
445     } else {
446       SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
447     }
448   }
449   PetscFunctionReturn(0);
450 }
451 
452 #undef __FUNCT__
453 #define __FUNCT__ "MatAssemblyBegin_MPIAIJ"
454 PetscErrorCode MatAssemblyBegin_MPIAIJ(Mat mat,MatAssemblyType mode)
455 {
456   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
457   PetscErrorCode ierr;
458   PetscInt       nstash,reallocs;
459   InsertMode     addv;
460 
461   PetscFunctionBegin;
462   if (aij->donotstash) {
463     PetscFunctionReturn(0);
464   }
465 
466   /* make sure all processors are either in INSERTMODE or ADDMODE */
467   ierr = MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,((PetscObject)mat)->comm);CHKERRQ(ierr);
468   if (addv == (ADD_VALUES|INSERT_VALUES)) {
469     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
470   }
471   mat->insertmode = addv; /* in case this processor had no cache */
472 
473   ierr = MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);CHKERRQ(ierr);
474   ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr);
475   ierr = PetscInfo2(aij->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);CHKERRQ(ierr);
476   PetscFunctionReturn(0);
477 }
478 
479 #undef __FUNCT__
480 #define __FUNCT__ "MatAssemblyEnd_MPIAIJ"
481 PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat mat,MatAssemblyType mode)
482 {
483   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
484   Mat_SeqAIJ     *a=(Mat_SeqAIJ *)aij->A->data;
485   PetscErrorCode ierr;
486   PetscMPIInt    n;
487   PetscInt       i,j,rstart,ncols,flg;
488   PetscInt       *row,*col;
489   PetscTruth     other_disassembled;
490   PetscScalar    *val;
491   InsertMode     addv = mat->insertmode;
492 
493   /* do not use 'b = (Mat_SeqAIJ *)aij->B->data' as B can be reset in disassembly */
494   PetscFunctionBegin;
495   if (!aij->donotstash) {
496     while (1) {
497       ierr = MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);CHKERRQ(ierr);
498       if (!flg) break;
499 
500       for (i=0; i<n;) {
501         /* Now identify the consecutive vals belonging to the same row */
502         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
503         if (j < n) ncols = j-i;
504         else       ncols = n-i;
505         /* Now assemble all these values with a single function call */
506         ierr = MatSetValues_MPIAIJ(mat,1,row+i,ncols,col+i,val+i,addv);CHKERRQ(ierr);
507         i = j;
508       }
509     }
510     ierr = MatStashScatterEnd_Private(&mat->stash);CHKERRQ(ierr);
511   }
512   a->compressedrow.use     = PETSC_FALSE;
513   ierr = MatAssemblyBegin(aij->A,mode);CHKERRQ(ierr);
514   ierr = MatAssemblyEnd(aij->A,mode);CHKERRQ(ierr);
515 
516   /* determine if any processor has disassembled, if so we must
517      also disassemble ourselfs, in order that we may reassemble. */
518   /*
519      if nonzero structure of submatrix B cannot change then we know that
520      no processor disassembled thus we can skip this stuff
521   */
522   if (!((Mat_SeqAIJ*)aij->B->data)->nonew)  {
523     ierr = MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,((PetscObject)mat)->comm);CHKERRQ(ierr);
524     if (mat->was_assembled && !other_disassembled) {
525       ierr = DisAssemble_MPIAIJ(mat);CHKERRQ(ierr);
526     }
527   }
528   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
529     ierr = MatSetUpMultiply_MPIAIJ(mat);CHKERRQ(ierr);
530   }
531   ierr = MatSetOption(aij->B,MAT_USE_INODES,PETSC_FALSE);CHKERRQ(ierr);
532   ((Mat_SeqAIJ *)aij->B->data)->compressedrow.use = PETSC_TRUE; /* b->compressedrow.use */
533   ierr = MatAssemblyBegin(aij->B,mode);CHKERRQ(ierr);
534   ierr = MatAssemblyEnd(aij->B,mode);CHKERRQ(ierr);
535 
536   ierr = PetscFree(aij->rowvalues);CHKERRQ(ierr);
537   aij->rowvalues = 0;
538 
539   /* used by MatAXPY() */
540   a->xtoy = 0; ((Mat_SeqAIJ *)aij->B->data)->xtoy = 0;  /* b->xtoy = 0 */
541   a->XtoY = 0; ((Mat_SeqAIJ *)aij->B->data)->XtoY = 0;  /* b->XtoY = 0 */
542 
543   PetscFunctionReturn(0);
544 }
545 
546 #undef __FUNCT__
547 #define __FUNCT__ "MatZeroEntries_MPIAIJ"
548 PetscErrorCode MatZeroEntries_MPIAIJ(Mat A)
549 {
550   Mat_MPIAIJ     *l = (Mat_MPIAIJ*)A->data;
551   PetscErrorCode ierr;
552 
553   PetscFunctionBegin;
554   ierr = MatZeroEntries(l->A);CHKERRQ(ierr);
555   ierr = MatZeroEntries(l->B);CHKERRQ(ierr);
556   PetscFunctionReturn(0);
557 }
558 
559 #undef __FUNCT__
560 #define __FUNCT__ "MatZeroRows_MPIAIJ"
561 PetscErrorCode MatZeroRows_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag)
562 {
563   Mat_MPIAIJ     *l = (Mat_MPIAIJ*)A->data;
564   PetscErrorCode ierr;
565   PetscMPIInt    size = l->size,imdex,n,rank = l->rank,tag = ((PetscObject)A)->tag,lastidx = -1;
566   PetscInt       i,*owners = A->rmap->range;
567   PetscInt       *nprocs,j,idx,nsends,row;
568   PetscInt       nmax,*svalues,*starts,*owner,nrecvs;
569   PetscInt       *rvalues,count,base,slen,*source;
570   PetscInt       *lens,*lrows,*values,rstart=A->rmap->rstart;
571   MPI_Comm       comm = ((PetscObject)A)->comm;
572   MPI_Request    *send_waits,*recv_waits;
573   MPI_Status     recv_status,*send_status;
574 #if defined(PETSC_DEBUG)
575   PetscTruth     found = PETSC_FALSE;
576 #endif
577 
578   PetscFunctionBegin;
579   /*  first count number of contributors to each processor */
580   ierr = PetscMalloc(2*size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
581   ierr = PetscMemzero(nprocs,2*size*sizeof(PetscInt));CHKERRQ(ierr);
582   ierr = PetscMalloc((N+1)*sizeof(PetscInt),&owner);CHKERRQ(ierr); /* see note*/
583   j = 0;
584   for (i=0; i<N; i++) {
585     if (lastidx > (idx = rows[i])) j = 0;
586     lastidx = idx;
587     for (; j<size; j++) {
588       if (idx >= owners[j] && idx < owners[j+1]) {
589         nprocs[2*j]++;
590         nprocs[2*j+1] = 1;
591         owner[i] = j;
592 #if defined(PETSC_DEBUG)
593         found = PETSC_TRUE;
594 #endif
595         break;
596       }
597     }
598 #if defined(PETSC_DEBUG)
599     if (!found) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Index out of range");
600     found = PETSC_FALSE;
601 #endif
602   }
603   nsends = 0;  for (i=0; i<size; i++) { nsends += nprocs[2*i+1];}
604 
605   /* inform other processors of number of messages and max length*/
606   ierr = PetscMaxSum(comm,nprocs,&nmax,&nrecvs);CHKERRQ(ierr);
607 
608   /* post receives:   */
609   ierr = PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(PetscInt),&rvalues);CHKERRQ(ierr);
610   ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
611   for (i=0; i<nrecvs; i++) {
612     ierr = MPI_Irecv(rvalues+nmax*i,nmax,MPIU_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);CHKERRQ(ierr);
613   }
614 
615   /* do sends:
616       1) starts[i] gives the starting index in svalues for stuff going to
617          the ith processor
618   */
619   ierr = PetscMalloc((N+1)*sizeof(PetscInt),&svalues);CHKERRQ(ierr);
620   ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
621   ierr = PetscMalloc((size+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
622   starts[0] = 0;
623   for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
624   for (i=0; i<N; i++) {
625     svalues[starts[owner[i]]++] = rows[i];
626   }
627 
628   starts[0] = 0;
629   for (i=1; i<size+1; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
630   count = 0;
631   for (i=0; i<size; i++) {
632     if (nprocs[2*i+1]) {
633       ierr = MPI_Isend(svalues+starts[i],nprocs[2*i],MPIU_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr);
634     }
635   }
636   ierr = PetscFree(starts);CHKERRQ(ierr);
637 
638   base = owners[rank];
639 
640   /*  wait on receives */
641   ierr   = PetscMalloc(2*(nrecvs+1)*sizeof(PetscInt),&lens);CHKERRQ(ierr);
642   source = lens + nrecvs;
643   count  = nrecvs; slen = 0;
644   while (count) {
645     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
646     /* unpack receives into our local space */
647     ierr = MPI_Get_count(&recv_status,MPIU_INT,&n);CHKERRQ(ierr);
648     source[imdex]  = recv_status.MPI_SOURCE;
649     lens[imdex]    = n;
650     slen          += n;
651     count--;
652   }
653   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
654 
655   /* move the data into the send scatter */
656   ierr = PetscMalloc((slen+1)*sizeof(PetscInt),&lrows);CHKERRQ(ierr);
657   count = 0;
658   for (i=0; i<nrecvs; i++) {
659     values = rvalues + i*nmax;
660     for (j=0; j<lens[i]; j++) {
661       lrows[count++] = values[j] - base;
662     }
663   }
664   ierr = PetscFree(rvalues);CHKERRQ(ierr);
665   ierr = PetscFree(lens);CHKERRQ(ierr);
666   ierr = PetscFree(owner);CHKERRQ(ierr);
667   ierr = PetscFree(nprocs);CHKERRQ(ierr);
668 
669   /* actually zap the local rows */
670   /*
671         Zero the required rows. If the "diagonal block" of the matrix
672      is square and the user wishes to set the diagonal we use separate
673      code so that MatSetValues() is not called for each diagonal allocating
674      new memory, thus calling lots of mallocs and slowing things down.
675 
676        Contributed by: Matthew Knepley
677   */
678   /* must zero l->B before l->A because the (diag) case below may put values into l->B*/
679   ierr = MatZeroRows(l->B,slen,lrows,0.0);CHKERRQ(ierr);
680   if ((diag != 0.0) && (l->A->rmap->N == l->A->cmap->N)) {
681     ierr      = MatZeroRows(l->A,slen,lrows,diag);CHKERRQ(ierr);
682   } else if (diag != 0.0) {
683     ierr = MatZeroRows(l->A,slen,lrows,0.0);CHKERRQ(ierr);
684     if (((Mat_SeqAIJ*)l->A->data)->nonew) {
685       SETERRQ(PETSC_ERR_SUP,"MatZeroRows() on rectangular matrices cannot be used with the Mat options\n\
686 MAT_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR");
687     }
688     for (i = 0; i < slen; i++) {
689       row  = lrows[i] + rstart;
690       ierr = MatSetValues(A,1,&row,1,&row,&diag,INSERT_VALUES);CHKERRQ(ierr);
691     }
692     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
693     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
694   } else {
695     ierr = MatZeroRows(l->A,slen,lrows,0.0);CHKERRQ(ierr);
696   }
697   ierr = PetscFree(lrows);CHKERRQ(ierr);
698 
699   /* wait on sends */
700   if (nsends) {
701     ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
702     ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
703     ierr = PetscFree(send_status);CHKERRQ(ierr);
704   }
705   ierr = PetscFree(send_waits);CHKERRQ(ierr);
706   ierr = PetscFree(svalues);CHKERRQ(ierr);
707 
708   PetscFunctionReturn(0);
709 }
710 
711 #undef __FUNCT__
712 #define __FUNCT__ "MatMult_MPIAIJ"
713 PetscErrorCode MatMult_MPIAIJ(Mat A,Vec xx,Vec yy)
714 {
715   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
716   PetscErrorCode ierr;
717   PetscInt       nt;
718 
719   PetscFunctionBegin;
720   ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr);
721   if (nt != A->cmap->n) {
722     SETERRQ2(PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);
723   }
724   ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
725   ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr);
726   ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
727   ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr);
728   PetscFunctionReturn(0);
729 }
730 
731 #undef __FUNCT__
732 #define __FUNCT__ "MatMultAdd_MPIAIJ"
733 PetscErrorCode MatMultAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
734 {
735   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
736   PetscErrorCode ierr;
737 
738   PetscFunctionBegin;
739   ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
740   ierr = (*a->A->ops->multadd)(a->A,xx,yy,zz);CHKERRQ(ierr);
741   ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
742   ierr = (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);CHKERRQ(ierr);
743   PetscFunctionReturn(0);
744 }
745 
746 #undef __FUNCT__
747 #define __FUNCT__ "MatMultTranspose_MPIAIJ"
748 PetscErrorCode MatMultTranspose_MPIAIJ(Mat A,Vec xx,Vec yy)
749 {
750   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
751   PetscErrorCode ierr;
752   PetscTruth     merged;
753 
754   PetscFunctionBegin;
755   ierr = VecScatterGetMerged(a->Mvctx,&merged);CHKERRQ(ierr);
756   /* do nondiagonal part */
757   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
758   if (!merged) {
759     /* send it on its way */
760     ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
761     /* do local part */
762     ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr);
763     /* receive remote parts: note this assumes the values are not actually */
764     /* added in yy until the next line, */
765     ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
766   } else {
767     /* do local part */
768     ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr);
769     /* send it on its way */
770     ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
771     /* values actually were received in the Begin() but we need to call this nop */
772     ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
773   }
774   PetscFunctionReturn(0);
775 }
776 
777 EXTERN_C_BEGIN
778 #undef __FUNCT__
779 #define __FUNCT__ "MatIsTranspose_MPIAIJ"
780 PetscErrorCode PETSCMAT_DLLEXPORT MatIsTranspose_MPIAIJ(Mat Amat,Mat Bmat,PetscReal tol,PetscTruth *f)
781 {
782   MPI_Comm       comm;
783   Mat_MPIAIJ     *Aij = (Mat_MPIAIJ *) Amat->data, *Bij;
784   Mat            Adia = Aij->A, Bdia, Aoff,Boff,*Aoffs,*Boffs;
785   IS             Me,Notme;
786   PetscErrorCode ierr;
787   PetscInt       M,N,first,last,*notme,i;
788   PetscMPIInt    size;
789 
790   PetscFunctionBegin;
791 
792   /* Easy test: symmetric diagonal block */
793   Bij = (Mat_MPIAIJ *) Bmat->data; Bdia = Bij->A;
794   ierr = MatIsTranspose(Adia,Bdia,tol,f);CHKERRQ(ierr);
795   if (!*f) PetscFunctionReturn(0);
796   ierr = PetscObjectGetComm((PetscObject)Amat,&comm);CHKERRQ(ierr);
797   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
798   if (size == 1) PetscFunctionReturn(0);
799 
800   /* Hard test: off-diagonal block. This takes a MatGetSubMatrix. */
801   ierr = MatGetSize(Amat,&M,&N);CHKERRQ(ierr);
802   ierr = MatGetOwnershipRange(Amat,&first,&last);CHKERRQ(ierr);
803   ierr = PetscMalloc((N-last+first)*sizeof(PetscInt),&notme);CHKERRQ(ierr);
804   for (i=0; i<first; i++) notme[i] = i;
805   for (i=last; i<M; i++) notme[i-last+first] = i;
806   ierr = ISCreateGeneral(MPI_COMM_SELF,N-last+first,notme,&Notme);CHKERRQ(ierr);
807   ierr = ISCreateStride(MPI_COMM_SELF,last-first,first,1,&Me);CHKERRQ(ierr);
808   ierr = MatGetSubMatrices(Amat,1,&Me,&Notme,MAT_INITIAL_MATRIX,&Aoffs);CHKERRQ(ierr);
809   Aoff = Aoffs[0];
810   ierr = MatGetSubMatrices(Bmat,1,&Notme,&Me,MAT_INITIAL_MATRIX,&Boffs);CHKERRQ(ierr);
811   Boff = Boffs[0];
812   ierr = MatIsTranspose(Aoff,Boff,tol,f);CHKERRQ(ierr);
813   ierr = MatDestroyMatrices(1,&Aoffs);CHKERRQ(ierr);
814   ierr = MatDestroyMatrices(1,&Boffs);CHKERRQ(ierr);
815   ierr = ISDestroy(Me);CHKERRQ(ierr);
816   ierr = ISDestroy(Notme);CHKERRQ(ierr);
817 
818   PetscFunctionReturn(0);
819 }
820 EXTERN_C_END
821 
822 #undef __FUNCT__
823 #define __FUNCT__ "MatMultTransposeAdd_MPIAIJ"
824 PetscErrorCode MatMultTransposeAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
825 {
826   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
827   PetscErrorCode ierr;
828 
829   PetscFunctionBegin;
830   /* do nondiagonal part */
831   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
832   /* send it on its way */
833   ierr = VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
834   /* do local part */
835   ierr = (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);CHKERRQ(ierr);
836   /* receive remote parts */
837   ierr = VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
838   PetscFunctionReturn(0);
839 }
840 
841 /*
842   This only works correctly for square matrices where the subblock A->A is the
843    diagonal block
844 */
845 #undef __FUNCT__
846 #define __FUNCT__ "MatGetDiagonal_MPIAIJ"
847 PetscErrorCode MatGetDiagonal_MPIAIJ(Mat A,Vec v)
848 {
849   PetscErrorCode ierr;
850   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
851 
852   PetscFunctionBegin;
853   if (A->rmap->N != A->cmap->N) SETERRQ(PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block");
854   if (A->rmap->rstart != A->cmap->rstart || A->rmap->rend != A->cmap->rend) {
855     SETERRQ(PETSC_ERR_ARG_SIZ,"row partition must equal col partition");
856   }
857   ierr = MatGetDiagonal(a->A,v);CHKERRQ(ierr);
858   PetscFunctionReturn(0);
859 }
860 
861 #undef __FUNCT__
862 #define __FUNCT__ "MatScale_MPIAIJ"
863 PetscErrorCode MatScale_MPIAIJ(Mat A,PetscScalar aa)
864 {
865   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
866   PetscErrorCode ierr;
867 
868   PetscFunctionBegin;
869   ierr = MatScale(a->A,aa);CHKERRQ(ierr);
870   ierr = MatScale(a->B,aa);CHKERRQ(ierr);
871   PetscFunctionReturn(0);
872 }
873 
874 #undef __FUNCT__
875 #define __FUNCT__ "MatDestroy_MPIAIJ"
876 PetscErrorCode MatDestroy_MPIAIJ(Mat mat)
877 {
878   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
879   PetscErrorCode ierr;
880 
881   PetscFunctionBegin;
882 #if defined(PETSC_USE_LOG)
883   PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap->N,mat->cmap->N);
884 #endif
885   ierr = MatStashDestroy_Private(&mat->stash);CHKERRQ(ierr);
886   ierr = MatDestroy(aij->A);CHKERRQ(ierr);
887   ierr = MatDestroy(aij->B);CHKERRQ(ierr);
888 #if defined (PETSC_USE_CTABLE)
889   if (aij->colmap) {ierr = PetscTableDestroy(aij->colmap);CHKERRQ(ierr);}
890 #else
891   ierr = PetscFree(aij->colmap);CHKERRQ(ierr);
892 #endif
893   ierr = PetscFree(aij->garray);CHKERRQ(ierr);
894   if (aij->lvec)   {ierr = VecDestroy(aij->lvec);CHKERRQ(ierr);}
895   if (aij->Mvctx)  {ierr = VecScatterDestroy(aij->Mvctx);CHKERRQ(ierr);}
896   ierr = PetscFree(aij->rowvalues);CHKERRQ(ierr);
897   ierr = PetscFree(aij->ld);CHKERRQ(ierr);
898   ierr = PetscFree(aij);CHKERRQ(ierr);
899 
900   ierr = PetscObjectChangeTypeName((PetscObject)mat,0);CHKERRQ(ierr);
901   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C","",PETSC_NULL);CHKERRQ(ierr);
902   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C","",PETSC_NULL);CHKERRQ(ierr);
903   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatGetDiagonalBlock_C","",PETSC_NULL);CHKERRQ(ierr);
904   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatIsTranspose_C","",PETSC_NULL);CHKERRQ(ierr);
905   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocation_C","",PETSC_NULL);CHKERRQ(ierr);
906   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocationCSR_C","",PETSC_NULL);CHKERRQ(ierr);
907   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C","",PETSC_NULL);CHKERRQ(ierr);
908   PetscFunctionReturn(0);
909 }
910 
911 #undef __FUNCT__
912 #define __FUNCT__ "MatView_MPIAIJ_Binary"
913 PetscErrorCode MatView_MPIAIJ_Binary(Mat mat,PetscViewer viewer)
914 {
915   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)mat->data;
916   Mat_SeqAIJ*       A = (Mat_SeqAIJ*)aij->A->data;
917   Mat_SeqAIJ*       B = (Mat_SeqAIJ*)aij->B->data;
918   PetscErrorCode    ierr;
919   PetscMPIInt       rank,size,tag = ((PetscObject)viewer)->tag;
920   int               fd;
921   PetscInt          nz,header[4],*row_lengths,*range=0,rlen,i;
922   PetscInt          nzmax,*column_indices,j,k,col,*garray = aij->garray,cnt,cstart = mat->cmap->rstart,rnz;
923   PetscScalar       *column_values;
924 
925   PetscFunctionBegin;
926   ierr = MPI_Comm_rank(((PetscObject)mat)->comm,&rank);CHKERRQ(ierr);
927   ierr = MPI_Comm_size(((PetscObject)mat)->comm,&size);CHKERRQ(ierr);
928   nz   = A->nz + B->nz;
929   if (!rank) {
930     header[0] = MAT_FILE_COOKIE;
931     header[1] = mat->rmap->N;
932     header[2] = mat->cmap->N;
933     ierr = MPI_Reduce(&nz,&header[3],1,MPIU_INT,MPI_SUM,0,((PetscObject)mat)->comm);CHKERRQ(ierr);
934     ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr);
935     ierr = PetscBinaryWrite(fd,header,4,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr);
936     /* get largest number of rows any processor has */
937     rlen = mat->rmap->n;
938     range = mat->rmap->range;
939     for (i=1; i<size; i++) {
940       rlen = PetscMax(rlen,range[i+1] - range[i]);
941     }
942   } else {
943     ierr = MPI_Reduce(&nz,0,1,MPIU_INT,MPI_SUM,0,((PetscObject)mat)->comm);CHKERRQ(ierr);
944     rlen = mat->rmap->n;
945   }
946 
947   /* load up the local row counts */
948   ierr = PetscMalloc((rlen+1)*sizeof(PetscInt),&row_lengths);CHKERRQ(ierr);
949   for (i=0; i<mat->rmap->n; i++) {
950     row_lengths[i] = A->i[i+1] - A->i[i] + B->i[i+1] - B->i[i];
951   }
952 
953   /* store the row lengths to the file */
954   if (!rank) {
955     MPI_Status status;
956     ierr = PetscBinaryWrite(fd,row_lengths,mat->rmap->n,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr);
957     for (i=1; i<size; i++) {
958       rlen = range[i+1] - range[i];
959       ierr = MPI_Recv(row_lengths,rlen,MPIU_INT,i,tag,((PetscObject)mat)->comm,&status);CHKERRQ(ierr);
960       ierr = PetscBinaryWrite(fd,row_lengths,rlen,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr);
961     }
962   } else {
963     ierr = MPI_Send(row_lengths,mat->rmap->n,MPIU_INT,0,tag,((PetscObject)mat)->comm);CHKERRQ(ierr);
964   }
965   ierr = PetscFree(row_lengths);CHKERRQ(ierr);
966 
967   /* load up the local column indices */
968   nzmax = nz; /* )th processor needs space a largest processor needs */
969   ierr = MPI_Reduce(&nz,&nzmax,1,MPIU_INT,MPI_MAX,0,((PetscObject)mat)->comm);CHKERRQ(ierr);
970   ierr = PetscMalloc((nzmax+1)*sizeof(PetscInt),&column_indices);CHKERRQ(ierr);
971   cnt  = 0;
972   for (i=0; i<mat->rmap->n; i++) {
973     for (j=B->i[i]; j<B->i[i+1]; j++) {
974       if ( (col = garray[B->j[j]]) > cstart) break;
975       column_indices[cnt++] = col;
976     }
977     for (k=A->i[i]; k<A->i[i+1]; k++) {
978       column_indices[cnt++] = A->j[k] + cstart;
979     }
980     for (; j<B->i[i+1]; j++) {
981       column_indices[cnt++] = garray[B->j[j]];
982     }
983   }
984   if (cnt != A->nz + B->nz) SETERRQ2(PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);
985 
986   /* store the column indices to the file */
987   if (!rank) {
988     MPI_Status status;
989     ierr = PetscBinaryWrite(fd,column_indices,nz,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr);
990     for (i=1; i<size; i++) {
991       ierr = MPI_Recv(&rnz,1,MPIU_INT,i,tag,((PetscObject)mat)->comm,&status);CHKERRQ(ierr);
992       if (rnz > nzmax) SETERRQ2(PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
993       ierr = MPI_Recv(column_indices,rnz,MPIU_INT,i,tag,((PetscObject)mat)->comm,&status);CHKERRQ(ierr);
994       ierr = PetscBinaryWrite(fd,column_indices,rnz,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr);
995     }
996   } else {
997     ierr = MPI_Send(&nz,1,MPIU_INT,0,tag,((PetscObject)mat)->comm);CHKERRQ(ierr);
998     ierr = MPI_Send(column_indices,nz,MPIU_INT,0,tag,((PetscObject)mat)->comm);CHKERRQ(ierr);
999   }
1000   ierr = PetscFree(column_indices);CHKERRQ(ierr);
1001 
1002   /* load up the local column values */
1003   ierr = PetscMalloc((nzmax+1)*sizeof(PetscScalar),&column_values);CHKERRQ(ierr);
1004   cnt  = 0;
1005   for (i=0; i<mat->rmap->n; i++) {
1006     for (j=B->i[i]; j<B->i[i+1]; j++) {
1007       if ( garray[B->j[j]] > cstart) break;
1008       column_values[cnt++] = B->a[j];
1009     }
1010     for (k=A->i[i]; k<A->i[i+1]; k++) {
1011       column_values[cnt++] = A->a[k];
1012     }
1013     for (; j<B->i[i+1]; j++) {
1014       column_values[cnt++] = B->a[j];
1015     }
1016   }
1017   if (cnt != A->nz + B->nz) SETERRQ2(PETSC_ERR_PLIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);
1018 
1019   /* store the column values to the file */
1020   if (!rank) {
1021     MPI_Status status;
1022     ierr = PetscBinaryWrite(fd,column_values,nz,PETSC_SCALAR,PETSC_TRUE);CHKERRQ(ierr);
1023     for (i=1; i<size; i++) {
1024       ierr = MPI_Recv(&rnz,1,MPIU_INT,i,tag,((PetscObject)mat)->comm,&status);CHKERRQ(ierr);
1025       if (rnz > nzmax) SETERRQ2(PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
1026       ierr = MPI_Recv(column_values,rnz,MPIU_SCALAR,i,tag,((PetscObject)mat)->comm,&status);CHKERRQ(ierr);
1027       ierr = PetscBinaryWrite(fd,column_values,rnz,PETSC_SCALAR,PETSC_TRUE);CHKERRQ(ierr);
1028     }
1029   } else {
1030     ierr = MPI_Send(&nz,1,MPIU_INT,0,tag,((PetscObject)mat)->comm);CHKERRQ(ierr);
1031     ierr = MPI_Send(column_values,nz,MPIU_SCALAR,0,tag,((PetscObject)mat)->comm);CHKERRQ(ierr);
1032   }
1033   ierr = PetscFree(column_values);CHKERRQ(ierr);
1034   PetscFunctionReturn(0);
1035 }
1036 
1037 #undef __FUNCT__
1038 #define __FUNCT__ "MatView_MPIAIJ_ASCIIorDraworSocket"
1039 PetscErrorCode MatView_MPIAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
1040 {
1041   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)mat->data;
1042   PetscErrorCode    ierr;
1043   PetscMPIInt       rank = aij->rank,size = aij->size;
1044   PetscTruth        isdraw,iascii,isbinary;
1045   PetscViewer       sviewer;
1046   PetscViewerFormat format;
1047 
1048   PetscFunctionBegin;
1049   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr);
1050   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);CHKERRQ(ierr);
1051   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);CHKERRQ(ierr);
1052   if (iascii) {
1053     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
1054     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1055       MatInfo    info;
1056       PetscTruth inodes;
1057 
1058       ierr = MPI_Comm_rank(((PetscObject)mat)->comm,&rank);CHKERRQ(ierr);
1059       ierr = MatGetInfo(mat,MAT_LOCAL,&info);CHKERRQ(ierr);
1060       ierr = MatInodeGetInodeSizes(aij->A,PETSC_NULL,(PetscInt **)&inodes,PETSC_NULL);CHKERRQ(ierr);
1061       if (!inodes) {
1062         ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %D, not using I-node routines\n",
1063 					      rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);CHKERRQ(ierr);
1064       } else {
1065         ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %D, using I-node routines\n",
1066 		    rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);CHKERRQ(ierr);
1067       }
1068       ierr = MatGetInfo(aij->A,MAT_LOCAL,&info);CHKERRQ(ierr);
1069       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);CHKERRQ(ierr);
1070       ierr = MatGetInfo(aij->B,MAT_LOCAL,&info);CHKERRQ(ierr);
1071       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);CHKERRQ(ierr);
1072       ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
1073       ierr = PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");CHKERRQ(ierr);
1074       ierr = VecScatterView(aij->Mvctx,viewer);CHKERRQ(ierr);
1075       PetscFunctionReturn(0);
1076     } else if (format == PETSC_VIEWER_ASCII_INFO) {
1077       PetscInt   inodecount,inodelimit,*inodes;
1078       ierr = MatInodeGetInodeSizes(aij->A,&inodecount,&inodes,&inodelimit);CHKERRQ(ierr);
1079       if (inodes) {
1080         ierr = PetscViewerASCIIPrintf(viewer,"using I-node (on process 0) routines: found %D nodes, limit used is %D\n",inodecount,inodelimit);CHKERRQ(ierr);
1081       } else {
1082         ierr = PetscViewerASCIIPrintf(viewer,"not using I-node (on process 0) routines\n");CHKERRQ(ierr);
1083       }
1084       PetscFunctionReturn(0);
1085     } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1086       PetscFunctionReturn(0);
1087     }
1088   } else if (isbinary) {
1089     if (size == 1) {
1090       ierr = PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);CHKERRQ(ierr);
1091       ierr = MatView(aij->A,viewer);CHKERRQ(ierr);
1092     } else {
1093       ierr = MatView_MPIAIJ_Binary(mat,viewer);CHKERRQ(ierr);
1094     }
1095     PetscFunctionReturn(0);
1096   } else if (isdraw) {
1097     PetscDraw  draw;
1098     PetscTruth isnull;
1099     ierr = PetscViewerDrawGetDraw(viewer,0,&draw);CHKERRQ(ierr);
1100     ierr = PetscDrawIsNull(draw,&isnull);CHKERRQ(ierr); if (isnull) PetscFunctionReturn(0);
1101   }
1102 
1103   if (size == 1) {
1104     ierr = PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);CHKERRQ(ierr);
1105     ierr = MatView(aij->A,viewer);CHKERRQ(ierr);
1106   } else {
1107     /* assemble the entire matrix onto first processor. */
1108     Mat         A;
1109     Mat_SeqAIJ  *Aloc;
1110     PetscInt    M = mat->rmap->N,N = mat->cmap->N,m,*ai,*aj,row,*cols,i,*ct;
1111     MatScalar   *a;
1112 
1113     if (mat->rmap->N > 1024) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"ASCII matrix output not allowed for matrices with more than 512 rows, use binary format instead");
1114 
1115     ierr = MatCreate(((PetscObject)mat)->comm,&A);CHKERRQ(ierr);
1116     if (!rank) {
1117       ierr = MatSetSizes(A,M,N,M,N);CHKERRQ(ierr);
1118     } else {
1119       ierr = MatSetSizes(A,0,0,M,N);CHKERRQ(ierr);
1120     }
1121     /* This is just a temporary matrix, so explicitly using MATMPIAIJ is probably best */
1122     ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
1123     ierr = MatMPIAIJSetPreallocation(A,0,PETSC_NULL,0,PETSC_NULL);CHKERRQ(ierr);
1124     ierr = PetscLogObjectParent(mat,A);CHKERRQ(ierr);
1125 
1126     /* copy over the A part */
1127     Aloc = (Mat_SeqAIJ*)aij->A->data;
1128     m = aij->A->rmap->n; ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1129     row = mat->rmap->rstart;
1130     for (i=0; i<ai[m]; i++) {aj[i] += mat->cmap->rstart ;}
1131     for (i=0; i<m; i++) {
1132       ierr = MatSetValues(A,1,&row,ai[i+1]-ai[i],aj,a,INSERT_VALUES);CHKERRQ(ierr);
1133       row++; a += ai[i+1]-ai[i]; aj += ai[i+1]-ai[i];
1134     }
1135     aj = Aloc->j;
1136     for (i=0; i<ai[m]; i++) {aj[i] -= mat->cmap->rstart;}
1137 
1138     /* copy over the B part */
1139     Aloc = (Mat_SeqAIJ*)aij->B->data;
1140     m    = aij->B->rmap->n;  ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1141     row  = mat->rmap->rstart;
1142     ierr = PetscMalloc((ai[m]+1)*sizeof(PetscInt),&cols);CHKERRQ(ierr);
1143     ct   = cols;
1144     for (i=0; i<ai[m]; i++) {cols[i] = aij->garray[aj[i]];}
1145     for (i=0; i<m; i++) {
1146       ierr = MatSetValues(A,1,&row,ai[i+1]-ai[i],cols,a,INSERT_VALUES);CHKERRQ(ierr);
1147       row++; a += ai[i+1]-ai[i]; cols += ai[i+1]-ai[i];
1148     }
1149     ierr = PetscFree(ct);CHKERRQ(ierr);
1150     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1151     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1152     /*
1153        Everyone has to call to draw the matrix since the graphics waits are
1154        synchronized across all processors that share the PetscDraw object
1155     */
1156     ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
1157     if (!rank) {
1158       ierr = PetscObjectSetName((PetscObject)((Mat_MPIAIJ*)(A->data))->A,((PetscObject)mat)->name);CHKERRQ(ierr);
1159       ierr = MatView(((Mat_MPIAIJ*)(A->data))->A,sviewer);CHKERRQ(ierr);
1160     }
1161     ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
1162     ierr = MatDestroy(A);CHKERRQ(ierr);
1163   }
1164   PetscFunctionReturn(0);
1165 }
1166 
1167 #undef __FUNCT__
1168 #define __FUNCT__ "MatView_MPIAIJ"
1169 PetscErrorCode MatView_MPIAIJ(Mat mat,PetscViewer viewer)
1170 {
1171   PetscErrorCode ierr;
1172   PetscTruth     iascii,isdraw,issocket,isbinary;
1173 
1174   PetscFunctionBegin;
1175   ierr  = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);CHKERRQ(ierr);
1176   ierr  = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr);
1177   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);CHKERRQ(ierr);
1178   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_SOCKET,&issocket);CHKERRQ(ierr);
1179   if (iascii || isdraw || isbinary || issocket) {
1180     ierr = MatView_MPIAIJ_ASCIIorDraworSocket(mat,viewer);CHKERRQ(ierr);
1181   } else {
1182     SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported by MPIAIJ matrices",((PetscObject)viewer)->type_name);
1183   }
1184   PetscFunctionReturn(0);
1185 }
1186 
1187 #undef __FUNCT__
1188 #define __FUNCT__ "MatRelax_MPIAIJ"
1189 PetscErrorCode MatRelax_MPIAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
1190 {
1191   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1192   PetscErrorCode ierr;
1193   Vec            bb1;
1194 
1195   PetscFunctionBegin;
1196   ierr = VecDuplicate(bb,&bb1);CHKERRQ(ierr);
1197 
1198   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
1199     if (flag & SOR_ZERO_INITIAL_GUESS) {
1200       ierr = (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,lits,xx);CHKERRQ(ierr);
1201       its--;
1202     }
1203 
1204     while (its--) {
1205       ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1206       ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1207 
1208       /* update rhs: bb1 = bb - B*x */
1209       ierr = VecScale(mat->lvec,-1.0);CHKERRQ(ierr);
1210       ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);CHKERRQ(ierr);
1211 
1212       /* local sweep */
1213       ierr = (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,lits,xx);CHKERRQ(ierr);
1214     }
1215   } else if (flag & SOR_LOCAL_FORWARD_SWEEP){
1216     if (flag & SOR_ZERO_INITIAL_GUESS) {
1217       ierr = (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,PETSC_NULL,xx);CHKERRQ(ierr);
1218       its--;
1219     }
1220     while (its--) {
1221       ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1222       ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1223 
1224       /* update rhs: bb1 = bb - B*x */
1225       ierr = VecScale(mat->lvec,-1.0);CHKERRQ(ierr);
1226       ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);CHKERRQ(ierr);
1227 
1228       /* local sweep */
1229       ierr = (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,PETSC_NULL,xx);CHKERRQ(ierr);
1230     }
1231   } else if (flag & SOR_LOCAL_BACKWARD_SWEEP){
1232     if (flag & SOR_ZERO_INITIAL_GUESS) {
1233       ierr = (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,PETSC_NULL,xx);CHKERRQ(ierr);
1234       its--;
1235     }
1236     while (its--) {
1237       ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1238       ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1239 
1240       /* update rhs: bb1 = bb - B*x */
1241       ierr = VecScale(mat->lvec,-1.0);CHKERRQ(ierr);
1242       ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);CHKERRQ(ierr);
1243 
1244       /* local sweep */
1245       ierr = (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,PETSC_NULL,xx);CHKERRQ(ierr);
1246     }
1247   } else {
1248     SETERRQ(PETSC_ERR_SUP,"Parallel SOR not supported");
1249   }
1250 
1251   ierr = VecDestroy(bb1);CHKERRQ(ierr);
1252   PetscFunctionReturn(0);
1253 }
1254 
1255 #undef __FUNCT__
1256 #define __FUNCT__ "MatPermute_MPIAIJ"
1257 PetscErrorCode MatPermute_MPIAIJ(Mat A,IS rowp,IS colp,Mat *B)
1258 {
1259   MPI_Comm       comm,pcomm;
1260   PetscInt       first,local_size,nrows;
1261   const PetscInt *rows;
1262   int            ntids;
1263   IS             crowp,growp,irowp,lrowp,lcolp,icolp;
1264   PetscErrorCode ierr;
1265 
1266   PetscFunctionBegin;
1267   ierr = PetscObjectGetComm((PetscObject)A,&comm); CHKERRQ(ierr);
1268   /* make a collective version of 'rowp' */
1269   ierr = PetscObjectGetComm((PetscObject)rowp,&pcomm); CHKERRQ(ierr);
1270   if (pcomm==comm) {
1271     crowp = rowp;
1272   } else {
1273     ierr = ISGetSize(rowp,&nrows); CHKERRQ(ierr);
1274     ierr = ISGetIndices(rowp,&rows); CHKERRQ(ierr);
1275     ierr = ISCreateGeneral(comm,nrows,rows,&crowp); CHKERRQ(ierr);
1276     ierr = ISRestoreIndices(rowp,&rows); CHKERRQ(ierr);
1277   }
1278   /* collect the global row permutation and invert it */
1279   ierr = ISAllGather(crowp,&growp); CHKERRQ(ierr);
1280   ierr = ISSetPermutation(growp); CHKERRQ(ierr);
1281   if (pcomm!=comm) {
1282     ierr = ISDestroy(crowp); CHKERRQ(ierr);
1283   }
1284   ierr = ISInvertPermutation(growp,PETSC_DECIDE,&irowp);CHKERRQ(ierr);
1285   /* get the local target indices */
1286   ierr = MatGetOwnershipRange(A,&first,PETSC_NULL); CHKERRQ(ierr);
1287   ierr = MatGetLocalSize(A,&local_size,PETSC_NULL); CHKERRQ(ierr);
1288   ierr = ISGetIndices(irowp,&rows); CHKERRQ(ierr);
1289   ierr = ISCreateGeneral(MPI_COMM_SELF,local_size,rows+first,&lrowp); CHKERRQ(ierr);
1290   ierr = ISRestoreIndices(irowp,&rows); CHKERRQ(ierr);
1291   ierr = ISDestroy(irowp); CHKERRQ(ierr);
1292   /* the column permutation is so much easier;
1293      make a local version of 'colp' and invert it */
1294   ierr = PetscObjectGetComm((PetscObject)colp,&pcomm); CHKERRQ(ierr);
1295   ierr = MPI_Comm_size(pcomm,&ntids); CHKERRQ(ierr);
1296   if (ntids==1) {
1297     lcolp = colp;
1298   } else {
1299     ierr = ISGetSize(colp,&nrows); CHKERRQ(ierr);
1300     ierr = ISGetIndices(colp,&rows); CHKERRQ(ierr);
1301     ierr = ISCreateGeneral(MPI_COMM_SELF,nrows,rows,&lcolp); CHKERRQ(ierr);
1302   }
1303   ierr = ISInvertPermutation(lcolp,PETSC_DECIDE,&icolp); CHKERRQ(ierr);
1304   ierr = ISSetPermutation(lcolp); CHKERRQ(ierr);
1305   if (ntids>1) {
1306     ierr = ISRestoreIndices(colp,&rows); CHKERRQ(ierr);
1307     ierr = ISDestroy(lcolp); CHKERRQ(ierr);
1308   }
1309   /* now we just get the submatrix */
1310   ierr = MatGetSubMatrix(A,lrowp,icolp,local_size,MAT_INITIAL_MATRIX,B); CHKERRQ(ierr);
1311   /* clean up */
1312   ierr = ISDestroy(lrowp); CHKERRQ(ierr);
1313   ierr = ISDestroy(icolp); CHKERRQ(ierr);
1314   PetscFunctionReturn(0);
1315 }
1316 
1317 #undef __FUNCT__
1318 #define __FUNCT__ "MatGetInfo_MPIAIJ"
1319 PetscErrorCode MatGetInfo_MPIAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1320 {
1321   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1322   Mat            A = mat->A,B = mat->B;
1323   PetscErrorCode ierr;
1324   PetscReal      isend[5],irecv[5];
1325 
1326   PetscFunctionBegin;
1327   info->block_size     = 1.0;
1328   ierr = MatGetInfo(A,MAT_LOCAL,info);CHKERRQ(ierr);
1329   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1330   isend[3] = info->memory;  isend[4] = info->mallocs;
1331   ierr = MatGetInfo(B,MAT_LOCAL,info);CHKERRQ(ierr);
1332   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1333   isend[3] += info->memory;  isend[4] += info->mallocs;
1334   if (flag == MAT_LOCAL) {
1335     info->nz_used      = isend[0];
1336     info->nz_allocated = isend[1];
1337     info->nz_unneeded  = isend[2];
1338     info->memory       = isend[3];
1339     info->mallocs      = isend[4];
1340   } else if (flag == MAT_GLOBAL_MAX) {
1341     ierr = MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_MAX,((PetscObject)matin)->comm);CHKERRQ(ierr);
1342     info->nz_used      = irecv[0];
1343     info->nz_allocated = irecv[1];
1344     info->nz_unneeded  = irecv[2];
1345     info->memory       = irecv[3];
1346     info->mallocs      = irecv[4];
1347   } else if (flag == MAT_GLOBAL_SUM) {
1348     ierr = MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_SUM,((PetscObject)matin)->comm);CHKERRQ(ierr);
1349     info->nz_used      = irecv[0];
1350     info->nz_allocated = irecv[1];
1351     info->nz_unneeded  = irecv[2];
1352     info->memory       = irecv[3];
1353     info->mallocs      = irecv[4];
1354   }
1355   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1356   info->fill_ratio_needed = 0;
1357   info->factor_mallocs    = 0;
1358 
1359   PetscFunctionReturn(0);
1360 }
1361 
1362 #undef __FUNCT__
1363 #define __FUNCT__ "MatSetOption_MPIAIJ"
1364 PetscErrorCode MatSetOption_MPIAIJ(Mat A,MatOption op,PetscTruth flg)
1365 {
1366   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
1367   PetscErrorCode ierr;
1368 
1369   PetscFunctionBegin;
1370   switch (op) {
1371   case MAT_NEW_NONZERO_LOCATIONS:
1372   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1373   case MAT_UNUSED_NONZERO_LOCATION_ERR:
1374   case MAT_KEEP_ZEROED_ROWS:
1375   case MAT_NEW_NONZERO_LOCATION_ERR:
1376   case MAT_USE_INODES:
1377   case MAT_IGNORE_ZERO_ENTRIES:
1378     ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr);
1379     ierr = MatSetOption(a->B,op,flg);CHKERRQ(ierr);
1380     break;
1381   case MAT_ROW_ORIENTED:
1382     a->roworiented = flg;
1383     ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr);
1384     ierr = MatSetOption(a->B,op,flg);CHKERRQ(ierr);
1385     break;
1386   case MAT_NEW_DIAGONALS:
1387     ierr = PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);CHKERRQ(ierr);
1388     break;
1389   case MAT_IGNORE_OFF_PROC_ENTRIES:
1390     a->donotstash = PETSC_TRUE;
1391     break;
1392   case MAT_SYMMETRIC:
1393     ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr);
1394     break;
1395   case MAT_STRUCTURALLY_SYMMETRIC:
1396   case MAT_HERMITIAN:
1397   case MAT_SYMMETRY_ETERNAL:
1398     ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr);
1399     break;
1400   default:
1401     SETERRQ1(PETSC_ERR_SUP,"unknown option %d",op);
1402   }
1403   PetscFunctionReturn(0);
1404 }
1405 
1406 #undef __FUNCT__
1407 #define __FUNCT__ "MatGetRow_MPIAIJ"
1408 PetscErrorCode MatGetRow_MPIAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1409 {
1410   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1411   PetscScalar    *vworkA,*vworkB,**pvA,**pvB,*v_p;
1412   PetscErrorCode ierr;
1413   PetscInt       i,*cworkA,*cworkB,**pcA,**pcB,cstart = matin->cmap->rstart;
1414   PetscInt       nztot,nzA,nzB,lrow,rstart = matin->rmap->rstart,rend = matin->rmap->rend;
1415   PetscInt       *cmap,*idx_p;
1416 
1417   PetscFunctionBegin;
1418   if (mat->getrowactive) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Already active");
1419   mat->getrowactive = PETSC_TRUE;
1420 
1421   if (!mat->rowvalues && (idx || v)) {
1422     /*
1423         allocate enough space to hold information from the longest row.
1424     */
1425     Mat_SeqAIJ *Aa = (Mat_SeqAIJ*)mat->A->data,*Ba = (Mat_SeqAIJ*)mat->B->data;
1426     PetscInt     max = 1,tmp;
1427     for (i=0; i<matin->rmap->n; i++) {
1428       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i];
1429       if (max < tmp) { max = tmp; }
1430     }
1431     ierr = PetscMalloc(max*(sizeof(PetscInt)+sizeof(PetscScalar)),&mat->rowvalues);CHKERRQ(ierr);
1432     mat->rowindices = (PetscInt*)(mat->rowvalues + max);
1433   }
1434 
1435   if (row < rstart || row >= rend) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Only local rows")
1436   lrow = row - rstart;
1437 
1438   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1439   if (!v)   {pvA = 0; pvB = 0;}
1440   if (!idx) {pcA = 0; if (!v) pcB = 0;}
1441   ierr = (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
1442   ierr = (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1443   nztot = nzA + nzB;
1444 
1445   cmap  = mat->garray;
1446   if (v  || idx) {
1447     if (nztot) {
1448       /* Sort by increasing column numbers, assuming A and B already sorted */
1449       PetscInt imark = -1;
1450       if (v) {
1451         *v = v_p = mat->rowvalues;
1452         for (i=0; i<nzB; i++) {
1453           if (cmap[cworkB[i]] < cstart)   v_p[i] = vworkB[i];
1454           else break;
1455         }
1456         imark = i;
1457         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1458         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1459       }
1460       if (idx) {
1461         *idx = idx_p = mat->rowindices;
1462         if (imark > -1) {
1463           for (i=0; i<imark; i++) {
1464             idx_p[i] = cmap[cworkB[i]];
1465           }
1466         } else {
1467           for (i=0; i<nzB; i++) {
1468             if (cmap[cworkB[i]] < cstart)   idx_p[i] = cmap[cworkB[i]];
1469             else break;
1470           }
1471           imark = i;
1472         }
1473         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart + cworkA[i];
1474         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]];
1475       }
1476     } else {
1477       if (idx) *idx = 0;
1478       if (v)   *v   = 0;
1479     }
1480   }
1481   *nz = nztot;
1482   ierr = (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
1483   ierr = (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1484   PetscFunctionReturn(0);
1485 }
1486 
1487 #undef __FUNCT__
1488 #define __FUNCT__ "MatRestoreRow_MPIAIJ"
1489 PetscErrorCode MatRestoreRow_MPIAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1490 {
1491   Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1492 
1493   PetscFunctionBegin;
1494   if (!aij->getrowactive) {
1495     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first");
1496   }
1497   aij->getrowactive = PETSC_FALSE;
1498   PetscFunctionReturn(0);
1499 }
1500 
1501 #undef __FUNCT__
1502 #define __FUNCT__ "MatNorm_MPIAIJ"
1503 PetscErrorCode MatNorm_MPIAIJ(Mat mat,NormType type,PetscReal *norm)
1504 {
1505   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
1506   Mat_SeqAIJ     *amat = (Mat_SeqAIJ*)aij->A->data,*bmat = (Mat_SeqAIJ*)aij->B->data;
1507   PetscErrorCode ierr;
1508   PetscInt       i,j,cstart = mat->cmap->rstart;
1509   PetscReal      sum = 0.0;
1510   MatScalar      *v;
1511 
1512   PetscFunctionBegin;
1513   if (aij->size == 1) {
1514     ierr =  MatNorm(aij->A,type,norm);CHKERRQ(ierr);
1515   } else {
1516     if (type == NORM_FROBENIUS) {
1517       v = amat->a;
1518       for (i=0; i<amat->nz; i++) {
1519 #if defined(PETSC_USE_COMPLEX)
1520         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1521 #else
1522         sum += (*v)*(*v); v++;
1523 #endif
1524       }
1525       v = bmat->a;
1526       for (i=0; i<bmat->nz; i++) {
1527 #if defined(PETSC_USE_COMPLEX)
1528         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1529 #else
1530         sum += (*v)*(*v); v++;
1531 #endif
1532       }
1533       ierr = MPI_Allreduce(&sum,norm,1,MPIU_REAL,MPI_SUM,((PetscObject)mat)->comm);CHKERRQ(ierr);
1534       *norm = sqrt(*norm);
1535     } else if (type == NORM_1) { /* max column norm */
1536       PetscReal *tmp,*tmp2;
1537       PetscInt  *jj,*garray = aij->garray;
1538       ierr = PetscMalloc((mat->cmap->N+1)*sizeof(PetscReal),&tmp);CHKERRQ(ierr);
1539       ierr = PetscMalloc((mat->cmap->N+1)*sizeof(PetscReal),&tmp2);CHKERRQ(ierr);
1540       ierr = PetscMemzero(tmp,mat->cmap->N*sizeof(PetscReal));CHKERRQ(ierr);
1541       *norm = 0.0;
1542       v = amat->a; jj = amat->j;
1543       for (j=0; j<amat->nz; j++) {
1544         tmp[cstart + *jj++ ] += PetscAbsScalar(*v);  v++;
1545       }
1546       v = bmat->a; jj = bmat->j;
1547       for (j=0; j<bmat->nz; j++) {
1548         tmp[garray[*jj++]] += PetscAbsScalar(*v); v++;
1549       }
1550       ierr = MPI_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPI_SUM,((PetscObject)mat)->comm);CHKERRQ(ierr);
1551       for (j=0; j<mat->cmap->N; j++) {
1552         if (tmp2[j] > *norm) *norm = tmp2[j];
1553       }
1554       ierr = PetscFree(tmp);CHKERRQ(ierr);
1555       ierr = PetscFree(tmp2);CHKERRQ(ierr);
1556     } else if (type == NORM_INFINITY) { /* max row norm */
1557       PetscReal ntemp = 0.0;
1558       for (j=0; j<aij->A->rmap->n; j++) {
1559         v = amat->a + amat->i[j];
1560         sum = 0.0;
1561         for (i=0; i<amat->i[j+1]-amat->i[j]; i++) {
1562           sum += PetscAbsScalar(*v); v++;
1563         }
1564         v = bmat->a + bmat->i[j];
1565         for (i=0; i<bmat->i[j+1]-bmat->i[j]; i++) {
1566           sum += PetscAbsScalar(*v); v++;
1567         }
1568         if (sum > ntemp) ntemp = sum;
1569       }
1570       ierr = MPI_Allreduce(&ntemp,norm,1,MPIU_REAL,MPI_MAX,((PetscObject)mat)->comm);CHKERRQ(ierr);
1571     } else {
1572       SETERRQ(PETSC_ERR_SUP,"No support for two norm");
1573     }
1574   }
1575   PetscFunctionReturn(0);
1576 }
1577 
1578 #undef __FUNCT__
1579 #define __FUNCT__ "MatTranspose_MPIAIJ"
1580 PetscErrorCode MatTranspose_MPIAIJ(Mat A,MatReuse reuse,Mat *matout)
1581 {
1582   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
1583   Mat_SeqAIJ     *Aloc=(Mat_SeqAIJ*)a->A->data,*Bloc=(Mat_SeqAIJ*)a->B->data;
1584   PetscErrorCode ierr;
1585   PetscInt       M = A->rmap->N,N = A->cmap->N,ma,na,mb,*ai,*aj,*bi,*bj,row,*cols,*cols_tmp,i,*d_nnz;
1586   PetscInt       cstart=A->cmap->rstart,ncol;
1587   Mat            B;
1588   MatScalar      *array;
1589 
1590   PetscFunctionBegin;
1591   if (reuse == MAT_REUSE_MATRIX && A == *matout && M != N) SETERRQ(PETSC_ERR_ARG_SIZ,"Square matrix only for in-place");
1592 
1593   ma = A->rmap->n; na = A->cmap->n; mb = a->B->rmap->n;
1594   ai = Aloc->i; aj = Aloc->j;
1595   bi = Bloc->i; bj = Bloc->j;
1596   if (reuse == MAT_INITIAL_MATRIX || *matout == A) {
1597     /* compute d_nnz for preallocation; o_nnz is approximated by d_nnz to avoid communication */
1598     ierr = PetscMalloc((1+na)*sizeof(PetscInt),&d_nnz);CHKERRQ(ierr);
1599     ierr = PetscMemzero(d_nnz,(1+na)*sizeof(PetscInt));CHKERRQ(ierr);
1600     for (i=0; i<ai[ma]; i++){
1601       d_nnz[aj[i]] ++;
1602       aj[i] += cstart; /* global col index to be used by MatSetValues() */
1603     }
1604 
1605     ierr = MatCreate(((PetscObject)A)->comm,&B);CHKERRQ(ierr);
1606     ierr = MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);CHKERRQ(ierr);
1607     ierr = MatSetType(B,((PetscObject)A)->type_name);CHKERRQ(ierr);
1608     ierr = MatMPIAIJSetPreallocation(B,0,d_nnz,0,d_nnz);CHKERRQ(ierr);
1609     ierr = PetscFree(d_nnz);CHKERRQ(ierr);
1610   } else {
1611     B = *matout;
1612   }
1613 
1614   /* copy over the A part */
1615   array = Aloc->a;
1616   row = A->rmap->rstart;
1617   for (i=0; i<ma; i++) {
1618     ncol = ai[i+1]-ai[i];
1619     ierr = MatSetValues(B,ncol,aj,1,&row,array,INSERT_VALUES);CHKERRQ(ierr);
1620     row++; array += ncol; aj += ncol;
1621   }
1622   aj = Aloc->j;
1623   for (i=0; i<ai[ma]; i++) aj[i] -= cstart; /* resume local col index */
1624 
1625   /* copy over the B part */
1626   ierr = PetscMalloc(bi[mb]*sizeof(PetscInt),&cols);CHKERRQ(ierr);
1627   ierr = PetscMemzero(cols,bi[mb]*sizeof(PetscInt));CHKERRQ(ierr);
1628   array = Bloc->a;
1629   row = A->rmap->rstart;
1630   for (i=0; i<bi[mb]; i++) {cols[i] = a->garray[bj[i]];}
1631   cols_tmp = cols;
1632   for (i=0; i<mb; i++) {
1633     ncol = bi[i+1]-bi[i];
1634     ierr = MatSetValues(B,ncol,cols_tmp,1,&row,array,INSERT_VALUES);CHKERRQ(ierr);
1635     row++; array += ncol; cols_tmp += ncol;
1636   }
1637   ierr = PetscFree(cols);CHKERRQ(ierr);
1638 
1639   ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1640   ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1641   if (reuse == MAT_INITIAL_MATRIX || *matout != A) {
1642     *matout = B;
1643   } else {
1644     ierr = MatHeaderCopy(A,B);CHKERRQ(ierr);
1645   }
1646   PetscFunctionReturn(0);
1647 }
1648 
1649 #undef __FUNCT__
1650 #define __FUNCT__ "MatDiagonalScale_MPIAIJ"
1651 PetscErrorCode MatDiagonalScale_MPIAIJ(Mat mat,Vec ll,Vec rr)
1652 {
1653   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
1654   Mat            a = aij->A,b = aij->B;
1655   PetscErrorCode ierr;
1656   PetscInt       s1,s2,s3;
1657 
1658   PetscFunctionBegin;
1659   ierr = MatGetLocalSize(mat,&s2,&s3);CHKERRQ(ierr);
1660   if (rr) {
1661     ierr = VecGetLocalSize(rr,&s1);CHKERRQ(ierr);
1662     if (s1!=s3) SETERRQ(PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
1663     /* Overlap communication with computation. */
1664     ierr = VecScatterBegin(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1665   }
1666   if (ll) {
1667     ierr = VecGetLocalSize(ll,&s1);CHKERRQ(ierr);
1668     if (s1!=s2) SETERRQ(PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
1669     ierr = (*b->ops->diagonalscale)(b,ll,0);CHKERRQ(ierr);
1670   }
1671   /* scale  the diagonal block */
1672   ierr = (*a->ops->diagonalscale)(a,ll,rr);CHKERRQ(ierr);
1673 
1674   if (rr) {
1675     /* Do a scatter end and then right scale the off-diagonal block */
1676     ierr = VecScatterEnd(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1677     ierr = (*b->ops->diagonalscale)(b,0,aij->lvec);CHKERRQ(ierr);
1678   }
1679 
1680   PetscFunctionReturn(0);
1681 }
1682 
1683 #undef __FUNCT__
1684 #define __FUNCT__ "MatSetBlockSize_MPIAIJ"
1685 PetscErrorCode MatSetBlockSize_MPIAIJ(Mat A,PetscInt bs)
1686 {
1687   Mat_MPIAIJ     *a   = (Mat_MPIAIJ*)A->data;
1688   PetscErrorCode ierr;
1689 
1690   PetscFunctionBegin;
1691   ierr = MatSetBlockSize(a->A,bs);CHKERRQ(ierr);
1692   ierr = MatSetBlockSize(a->B,bs);CHKERRQ(ierr);
1693   PetscFunctionReturn(0);
1694 }
1695 #undef __FUNCT__
1696 #define __FUNCT__ "MatSetUnfactored_MPIAIJ"
1697 PetscErrorCode MatSetUnfactored_MPIAIJ(Mat A)
1698 {
1699   Mat_MPIAIJ     *a   = (Mat_MPIAIJ*)A->data;
1700   PetscErrorCode ierr;
1701 
1702   PetscFunctionBegin;
1703   ierr = MatSetUnfactored(a->A);CHKERRQ(ierr);
1704   PetscFunctionReturn(0);
1705 }
1706 
1707 #undef __FUNCT__
1708 #define __FUNCT__ "MatEqual_MPIAIJ"
1709 PetscErrorCode MatEqual_MPIAIJ(Mat A,Mat B,PetscTruth *flag)
1710 {
1711   Mat_MPIAIJ     *matB = (Mat_MPIAIJ*)B->data,*matA = (Mat_MPIAIJ*)A->data;
1712   Mat            a,b,c,d;
1713   PetscTruth     flg;
1714   PetscErrorCode ierr;
1715 
1716   PetscFunctionBegin;
1717   a = matA->A; b = matA->B;
1718   c = matB->A; d = matB->B;
1719 
1720   ierr = MatEqual(a,c,&flg);CHKERRQ(ierr);
1721   if (flg) {
1722     ierr = MatEqual(b,d,&flg);CHKERRQ(ierr);
1723   }
1724   ierr = MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,((PetscObject)A)->comm);CHKERRQ(ierr);
1725   PetscFunctionReturn(0);
1726 }
1727 
1728 #undef __FUNCT__
1729 #define __FUNCT__ "MatCopy_MPIAIJ"
1730 PetscErrorCode MatCopy_MPIAIJ(Mat A,Mat B,MatStructure str)
1731 {
1732   PetscErrorCode ierr;
1733   Mat_MPIAIJ     *a = (Mat_MPIAIJ *)A->data;
1734   Mat_MPIAIJ     *b = (Mat_MPIAIJ *)B->data;
1735 
1736   PetscFunctionBegin;
1737   /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
1738   if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
1739     /* because of the column compression in the off-processor part of the matrix a->B,
1740        the number of columns in a->B and b->B may be different, hence we cannot call
1741        the MatCopy() directly on the two parts. If need be, we can provide a more
1742        efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
1743        then copying the submatrices */
1744     ierr = MatCopy_Basic(A,B,str);CHKERRQ(ierr);
1745   } else {
1746     ierr = MatCopy(a->A,b->A,str);CHKERRQ(ierr);
1747     ierr = MatCopy(a->B,b->B,str);CHKERRQ(ierr);
1748   }
1749   PetscFunctionReturn(0);
1750 }
1751 
1752 #undef __FUNCT__
1753 #define __FUNCT__ "MatSetUpPreallocation_MPIAIJ"
1754 PetscErrorCode MatSetUpPreallocation_MPIAIJ(Mat A)
1755 {
1756   PetscErrorCode ierr;
1757 
1758   PetscFunctionBegin;
1759   ierr =  MatMPIAIJSetPreallocation(A,PETSC_DEFAULT,0,PETSC_DEFAULT,0);CHKERRQ(ierr);
1760   PetscFunctionReturn(0);
1761 }
1762 
1763 #include "petscblaslapack.h"
1764 #undef __FUNCT__
1765 #define __FUNCT__ "MatAXPY_MPIAIJ"
1766 PetscErrorCode MatAXPY_MPIAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
1767 {
1768   PetscErrorCode ierr;
1769   PetscInt       i;
1770   Mat_MPIAIJ     *xx = (Mat_MPIAIJ *)X->data,*yy = (Mat_MPIAIJ *)Y->data;
1771   PetscBLASInt   bnz,one=1;
1772   Mat_SeqAIJ     *x,*y;
1773 
1774   PetscFunctionBegin;
1775   if (str == SAME_NONZERO_PATTERN) {
1776     PetscScalar alpha = a;
1777     x = (Mat_SeqAIJ *)xx->A->data;
1778     y = (Mat_SeqAIJ *)yy->A->data;
1779     bnz = PetscBLASIntCast(x->nz);
1780     BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one);
1781     x = (Mat_SeqAIJ *)xx->B->data;
1782     y = (Mat_SeqAIJ *)yy->B->data;
1783     bnz = PetscBLASIntCast(x->nz);
1784     BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one);
1785   } else if (str == SUBSET_NONZERO_PATTERN) {
1786     ierr = MatAXPY_SeqAIJ(yy->A,a,xx->A,str);CHKERRQ(ierr);
1787 
1788     x = (Mat_SeqAIJ *)xx->B->data;
1789     y = (Mat_SeqAIJ *)yy->B->data;
1790     if (y->xtoy && y->XtoY != xx->B) {
1791       ierr = PetscFree(y->xtoy);CHKERRQ(ierr);
1792       ierr = MatDestroy(y->XtoY);CHKERRQ(ierr);
1793     }
1794     if (!y->xtoy) { /* get xtoy */
1795       ierr = MatAXPYGetxtoy_Private(xx->B->rmap->n,x->i,x->j,xx->garray,y->i,y->j,yy->garray,&y->xtoy);CHKERRQ(ierr);
1796       y->XtoY = xx->B;
1797       ierr = PetscObjectReference((PetscObject)xx->B);CHKERRQ(ierr);
1798     }
1799     for (i=0; i<x->nz; i++) y->a[y->xtoy[i]] += a*(x->a[i]);
1800   } else {
1801     ierr = MatAXPY_Basic(Y,a,X,str);CHKERRQ(ierr);
1802   }
1803   PetscFunctionReturn(0);
1804 }
1805 
1806 EXTERN PetscErrorCode PETSCMAT_DLLEXPORT MatConjugate_SeqAIJ(Mat);
1807 
1808 #undef __FUNCT__
1809 #define __FUNCT__ "MatConjugate_MPIAIJ"
1810 PetscErrorCode PETSCMAT_DLLEXPORT MatConjugate_MPIAIJ(Mat mat)
1811 {
1812 #if defined(PETSC_USE_COMPLEX)
1813   PetscErrorCode ierr;
1814   Mat_MPIAIJ     *aij = (Mat_MPIAIJ *)mat->data;
1815 
1816   PetscFunctionBegin;
1817   ierr = MatConjugate_SeqAIJ(aij->A);CHKERRQ(ierr);
1818   ierr = MatConjugate_SeqAIJ(aij->B);CHKERRQ(ierr);
1819 #else
1820   PetscFunctionBegin;
1821 #endif
1822   PetscFunctionReturn(0);
1823 }
1824 
1825 #undef __FUNCT__
1826 #define __FUNCT__ "MatRealPart_MPIAIJ"
1827 PetscErrorCode MatRealPart_MPIAIJ(Mat A)
1828 {
1829   Mat_MPIAIJ   *a = (Mat_MPIAIJ*)A->data;
1830   PetscErrorCode ierr;
1831 
1832   PetscFunctionBegin;
1833   ierr = MatRealPart(a->A);CHKERRQ(ierr);
1834   ierr = MatRealPart(a->B);CHKERRQ(ierr);
1835   PetscFunctionReturn(0);
1836 }
1837 
1838 #undef __FUNCT__
1839 #define __FUNCT__ "MatImaginaryPart_MPIAIJ"
1840 PetscErrorCode MatImaginaryPart_MPIAIJ(Mat A)
1841 {
1842   Mat_MPIAIJ   *a = (Mat_MPIAIJ*)A->data;
1843   PetscErrorCode ierr;
1844 
1845   PetscFunctionBegin;
1846   ierr = MatImaginaryPart(a->A);CHKERRQ(ierr);
1847   ierr = MatImaginaryPart(a->B);CHKERRQ(ierr);
1848   PetscFunctionReturn(0);
1849 }
1850 
1851 #ifdef PETSC_HAVE_PBGL
1852 
1853 #include <boost/parallel/mpi/bsp_process_group.hpp>
1854 #include <boost/graph/distributed/ilu_default_graph.hpp>
1855 #include <boost/graph/distributed/ilu_0_block.hpp>
1856 #include <boost/graph/distributed/ilu_preconditioner.hpp>
1857 #include <boost/graph/distributed/petsc/interface.hpp>
1858 #include <boost/multi_array.hpp>
1859 #include <boost/parallel/distributed_property_map->hpp>
1860 
1861 #undef __FUNCT__
1862 #define __FUNCT__ "MatILUFactorSymbolic_MPIAIJ"
1863 /*
1864   This uses the parallel ILU factorization of Peter Gottschling <pgottsch@osl.iu.edu>
1865 */
1866 PetscErrorCode MatILUFactorSymbolic_MPIAIJ(Mat fact,Mat A, IS isrow, IS iscol, const MatFactorInfo *info)
1867 {
1868   namespace petsc = boost::distributed::petsc;
1869 
1870   namespace graph_dist = boost::graph::distributed;
1871   using boost::graph::distributed::ilu_default::process_group_type;
1872   using boost::graph::ilu_permuted;
1873 
1874   PetscTruth      row_identity, col_identity;
1875   PetscContainer  c;
1876   PetscInt        m, n, M, N;
1877   PetscErrorCode  ierr;
1878 
1879   PetscFunctionBegin;
1880   if (info->levels != 0) SETERRQ(PETSC_ERR_SUP,"Only levels = 0 supported for parallel ilu");
1881   ierr = ISIdentity(isrow, &row_identity);CHKERRQ(ierr);
1882   ierr = ISIdentity(iscol, &col_identity);CHKERRQ(ierr);
1883   if (!row_identity || !col_identity) {
1884     SETERRQ(PETSC_ERR_ARG_WRONG,"Row and column permutations must be identity for parallel ILU");
1885   }
1886 
1887   process_group_type pg;
1888   typedef graph_dist::ilu_default::ilu_level_graph_type  lgraph_type;
1889   lgraph_type*   lgraph_p = new lgraph_type(petsc::num_global_vertices(A), pg, petsc::matrix_distribution(A, pg));
1890   lgraph_type&   level_graph = *lgraph_p;
1891   graph_dist::ilu_default::graph_type&            graph(level_graph.graph);
1892 
1893   petsc::read_matrix(A, graph, get(boost::edge_weight, graph));
1894   ilu_permuted(level_graph);
1895 
1896   /* put together the new matrix */
1897   ierr = MatCreate(((PetscObject)A)->comm, fact);CHKERRQ(ierr);
1898   ierr = MatGetLocalSize(A, &m, &n);CHKERRQ(ierr);
1899   ierr = MatGetSize(A, &M, &N);CHKERRQ(ierr);
1900   ierr = MatSetSizes(fact, m, n, M, N);CHKERRQ(ierr);
1901   ierr = MatSetType(fact, ((PetscObject)A)->type_name);CHKERRQ(ierr);
1902   ierr = MatAssemblyBegin(fact, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1903   ierr = MatAssemblyEnd(fact, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1904 
1905   ierr = PetscContainerCreate(((PetscObject)A)->comm, &c);
1906   ierr = PetscContainerSetPointer(c, lgraph_p);
1907   ierr = PetscObjectCompose((PetscObject) (fact), "graph", (PetscObject) c);
1908   PetscFunctionReturn(0);
1909 }
1910 
1911 #undef __FUNCT__
1912 #define __FUNCT__ "MatLUFactorNumeric_MPIAIJ"
1913 PetscErrorCode MatLUFactorNumeric_MPIAIJ(Mat B,Mat A, const MatFactorInfo *info)
1914 {
1915   PetscFunctionBegin;
1916   PetscFunctionReturn(0);
1917 }
1918 
1919 #undef __FUNCT__
1920 #define __FUNCT__ "MatSolve_MPIAIJ"
1921 /*
1922   This uses the parallel ILU factorization of Peter Gottschling <pgottsch@osl.iu.edu>
1923 */
1924 PetscErrorCode MatSolve_MPIAIJ(Mat A, Vec b, Vec x)
1925 {
1926   namespace graph_dist = boost::graph::distributed;
1927 
1928   typedef graph_dist::ilu_default::ilu_level_graph_type  lgraph_type;
1929   lgraph_type*   lgraph_p;
1930   PetscContainer c;
1931   PetscErrorCode ierr;
1932 
1933   PetscFunctionBegin;
1934   ierr = PetscObjectQuery((PetscObject) A, "graph", (PetscObject *) &c);CHKERRQ(ierr);
1935   ierr = PetscContainerGetPointer(c, (void **) &lgraph_p);CHKERRQ(ierr);
1936   ierr = VecCopy(b, x); CHKERRQ(ierr);
1937 
1938   PetscScalar* array_x;
1939   ierr = VecGetArray(x, &array_x);CHKERRQ(ierr);
1940   PetscInt sx;
1941   ierr = VecGetSize(x, &sx);CHKERRQ(ierr);
1942 
1943   PetscScalar* array_b;
1944   ierr = VecGetArray(b, &array_b);CHKERRQ(ierr);
1945   PetscInt sb;
1946   ierr = VecGetSize(b, &sb);CHKERRQ(ierr);
1947 
1948   lgraph_type&   level_graph = *lgraph_p;
1949   graph_dist::ilu_default::graph_type&            graph(level_graph.graph);
1950 
1951   typedef boost::multi_array_ref<PetscScalar, 1> array_ref_type;
1952   array_ref_type                                 ref_b(array_b, boost::extents[num_vertices(graph)]),
1953                                                  ref_x(array_x, boost::extents[num_vertices(graph)]);
1954 
1955   typedef boost::iterator_property_map<array_ref_type::iterator,
1956                                 boost::property_map<graph_dist::ilu_default::graph_type, boost::vertex_index_t>::type>  gvector_type;
1957   gvector_type                                   vector_b(ref_b.begin(), get(boost::vertex_index, graph)),
1958                                                  vector_x(ref_x.begin(), get(boost::vertex_index, graph));
1959 
1960   ilu_set_solve(*lgraph_p, vector_b, vector_x);
1961 
1962   PetscFunctionReturn(0);
1963 }
1964 #endif
1965 
1966 typedef struct { /* used by MatGetRedundantMatrix() for reusing matredundant */
1967   PetscInt       nzlocal,nsends,nrecvs;
1968   PetscMPIInt    *send_rank;
1969   PetscInt       *sbuf_nz,*sbuf_j,**rbuf_j;
1970   PetscScalar    *sbuf_a,**rbuf_a;
1971   PetscErrorCode (*MatDestroy)(Mat);
1972 } Mat_Redundant;
1973 
1974 #undef __FUNCT__
1975 #define __FUNCT__ "PetscContainerDestroy_MatRedundant"
1976 PetscErrorCode PetscContainerDestroy_MatRedundant(void *ptr)
1977 {
1978   PetscErrorCode       ierr;
1979   Mat_Redundant        *redund=(Mat_Redundant*)ptr;
1980   PetscInt             i;
1981 
1982   PetscFunctionBegin;
1983   ierr = PetscFree(redund->send_rank);CHKERRQ(ierr);
1984   ierr = PetscFree(redund->sbuf_j);CHKERRQ(ierr);
1985   ierr = PetscFree(redund->sbuf_a);CHKERRQ(ierr);
1986   for (i=0; i<redund->nrecvs; i++){
1987     ierr = PetscFree(redund->rbuf_j[i]);CHKERRQ(ierr);
1988     ierr = PetscFree(redund->rbuf_a[i]);CHKERRQ(ierr);
1989   }
1990   ierr = PetscFree3(redund->sbuf_nz,redund->rbuf_j,redund->rbuf_a);CHKERRQ(ierr);
1991   ierr = PetscFree(redund);CHKERRQ(ierr);
1992   PetscFunctionReturn(0);
1993 }
1994 
1995 #undef __FUNCT__
1996 #define __FUNCT__ "MatDestroy_MatRedundant"
1997 PetscErrorCode MatDestroy_MatRedundant(Mat A)
1998 {
1999   PetscErrorCode  ierr;
2000   PetscContainer  container;
2001   Mat_Redundant   *redund=PETSC_NULL;
2002 
2003   PetscFunctionBegin;
2004   ierr = PetscObjectQuery((PetscObject)A,"Mat_Redundant",(PetscObject *)&container);CHKERRQ(ierr);
2005   if (container) {
2006     ierr = PetscContainerGetPointer(container,(void **)&redund);CHKERRQ(ierr);
2007   } else {
2008     SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
2009   }
2010   A->ops->destroy = redund->MatDestroy;
2011   ierr = PetscObjectCompose((PetscObject)A,"Mat_Redundant",0);CHKERRQ(ierr);
2012   ierr = (*A->ops->destroy)(A);CHKERRQ(ierr);
2013   ierr = PetscContainerDestroy(container);CHKERRQ(ierr);
2014   PetscFunctionReturn(0);
2015 }
2016 
2017 #undef __FUNCT__
2018 #define __FUNCT__ "MatGetRedundantMatrix_MPIAIJ"
2019 PetscErrorCode MatGetRedundantMatrix_MPIAIJ(Mat mat,PetscInt nsubcomm,MPI_Comm subcomm,PetscInt mlocal_sub,MatReuse reuse,Mat *matredundant)
2020 {
2021   PetscMPIInt    rank,size;
2022   MPI_Comm       comm=((PetscObject)mat)->comm;
2023   PetscErrorCode ierr;
2024   PetscInt       nsends=0,nrecvs=0,i,rownz_max=0;
2025   PetscMPIInt    *send_rank=PETSC_NULL,*recv_rank=PETSC_NULL;
2026   PetscInt       *rowrange=mat->rmap->range;
2027   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
2028   Mat            A=aij->A,B=aij->B,C=*matredundant;
2029   Mat_SeqAIJ     *a=(Mat_SeqAIJ*)A->data,*b=(Mat_SeqAIJ*)B->data;
2030   PetscScalar    *sbuf_a;
2031   PetscInt       nzlocal=a->nz+b->nz;
2032   PetscInt       j,cstart=mat->cmap->rstart,cend=mat->cmap->rend,row,nzA,nzB,ncols,*cworkA,*cworkB;
2033   PetscInt       rstart=mat->rmap->rstart,rend=mat->rmap->rend,*bmap=aij->garray,M,N;
2034   PetscInt       *cols,ctmp,lwrite,*rptr,l,*sbuf_j;
2035   MatScalar      *aworkA,*aworkB;
2036   PetscScalar    *vals;
2037   PetscMPIInt    tag1,tag2,tag3,imdex;
2038   MPI_Request    *s_waits1=PETSC_NULL,*s_waits2=PETSC_NULL,*s_waits3=PETSC_NULL,
2039                  *r_waits1=PETSC_NULL,*r_waits2=PETSC_NULL,*r_waits3=PETSC_NULL;
2040   MPI_Status     recv_status,*send_status;
2041   PetscInt       *sbuf_nz=PETSC_NULL,*rbuf_nz=PETSC_NULL,count;
2042   PetscInt       **rbuf_j=PETSC_NULL;
2043   PetscScalar    **rbuf_a=PETSC_NULL;
2044   Mat_Redundant  *redund=PETSC_NULL;
2045   PetscContainer container;
2046 
2047   PetscFunctionBegin;
2048   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
2049   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
2050 
2051   if (reuse == MAT_REUSE_MATRIX) {
2052     ierr = MatGetSize(C,&M,&N);CHKERRQ(ierr);
2053     if (M != N || M != mat->rmap->N) SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. Wrong global size");
2054     ierr = MatGetLocalSize(C,&M,&N);CHKERRQ(ierr);
2055     if (M != N || M != mlocal_sub) SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. Wrong local size");
2056     ierr = PetscObjectQuery((PetscObject)C,"Mat_Redundant",(PetscObject *)&container);CHKERRQ(ierr);
2057     if (container) {
2058       ierr = PetscContainerGetPointer(container,(void **)&redund);CHKERRQ(ierr);
2059     } else {
2060       SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
2061     }
2062     if (nzlocal != redund->nzlocal) SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. Wrong nzlocal");
2063 
2064     nsends    = redund->nsends;
2065     nrecvs    = redund->nrecvs;
2066     send_rank = redund->send_rank; recv_rank = send_rank + size;
2067     sbuf_nz   = redund->sbuf_nz;     rbuf_nz = sbuf_nz + nsends;
2068     sbuf_j    = redund->sbuf_j;
2069     sbuf_a    = redund->sbuf_a;
2070     rbuf_j    = redund->rbuf_j;
2071     rbuf_a    = redund->rbuf_a;
2072   }
2073 
2074   if (reuse == MAT_INITIAL_MATRIX){
2075     PetscMPIInt  subrank,subsize;
2076     PetscInt     nleftover,np_subcomm;
2077     /* get the destination processors' id send_rank, nsends and nrecvs */
2078     ierr = MPI_Comm_rank(subcomm,&subrank);CHKERRQ(ierr);
2079     ierr = MPI_Comm_size(subcomm,&subsize);CHKERRQ(ierr);
2080     ierr = PetscMalloc((2*size+1)*sizeof(PetscMPIInt),&send_rank);
2081     recv_rank = send_rank + size;
2082     np_subcomm = size/nsubcomm;
2083     nleftover  = size - nsubcomm*np_subcomm;
2084     nsends = 0; nrecvs = 0;
2085     for (i=0; i<size; i++){ /* i=rank*/
2086       if (subrank == i/nsubcomm && rank != i){ /* my_subrank == other's subrank */
2087         send_rank[nsends] = i; nsends++;
2088         recv_rank[nrecvs++] = i;
2089       }
2090     }
2091     if (rank >= size - nleftover){/* this proc is a leftover processor */
2092       i = size-nleftover-1;
2093       j = 0;
2094       while (j < nsubcomm - nleftover){
2095         send_rank[nsends++] = i;
2096         i--; j++;
2097       }
2098     }
2099 
2100     if (nleftover && subsize == size/nsubcomm && subrank==subsize-1){ /* this proc recvs from leftover processors */
2101       for (i=0; i<nleftover; i++){
2102         recv_rank[nrecvs++] = size-nleftover+i;
2103       }
2104     }
2105 
2106     /* allocate sbuf_j, sbuf_a */
2107     i = nzlocal + rowrange[rank+1] - rowrange[rank] + 2;
2108     ierr = PetscMalloc(i*sizeof(PetscInt),&sbuf_j);CHKERRQ(ierr);
2109     ierr = PetscMalloc((nzlocal+1)*sizeof(PetscScalar),&sbuf_a);CHKERRQ(ierr);
2110   } /* endof if (reuse == MAT_INITIAL_MATRIX) */
2111 
2112   /* copy mat's local entries into the buffers */
2113   if (reuse == MAT_INITIAL_MATRIX){
2114     rownz_max = 0;
2115     rptr = sbuf_j;
2116     cols = sbuf_j + rend-rstart + 1;
2117     vals = sbuf_a;
2118     rptr[0] = 0;
2119     for (i=0; i<rend-rstart; i++){
2120       row = i + rstart;
2121       nzA    = a->i[i+1] - a->i[i]; nzB = b->i[i+1] - b->i[i];
2122       ncols  = nzA + nzB;
2123       cworkA = a->j + a->i[i]; cworkB = b->j + b->i[i];
2124       aworkA = a->a + a->i[i]; aworkB = b->a + b->i[i];
2125       /* load the column indices for this row into cols */
2126       lwrite = 0;
2127       for (l=0; l<nzB; l++) {
2128         if ((ctmp = bmap[cworkB[l]]) < cstart){
2129           vals[lwrite]   = aworkB[l];
2130           cols[lwrite++] = ctmp;
2131         }
2132       }
2133       for (l=0; l<nzA; l++){
2134         vals[lwrite]   = aworkA[l];
2135         cols[lwrite++] = cstart + cworkA[l];
2136       }
2137       for (l=0; l<nzB; l++) {
2138         if ((ctmp = bmap[cworkB[l]]) >= cend){
2139           vals[lwrite]   = aworkB[l];
2140           cols[lwrite++] = ctmp;
2141         }
2142       }
2143       vals += ncols;
2144       cols += ncols;
2145       rptr[i+1] = rptr[i] + ncols;
2146       if (rownz_max < ncols) rownz_max = ncols;
2147     }
2148     if (rptr[rend-rstart] != a->nz + b->nz) SETERRQ4(1, "rptr[%d] %d != %d + %d",rend-rstart,rptr[rend-rstart+1],a->nz,b->nz);
2149   } else { /* only copy matrix values into sbuf_a */
2150     rptr = sbuf_j;
2151     vals = sbuf_a;
2152     rptr[0] = 0;
2153     for (i=0; i<rend-rstart; i++){
2154       row = i + rstart;
2155       nzA    = a->i[i+1] - a->i[i]; nzB = b->i[i+1] - b->i[i];
2156       ncols  = nzA + nzB;
2157       cworkA = a->j + a->i[i]; cworkB = b->j + b->i[i];
2158       aworkA = a->a + a->i[i]; aworkB = b->a + b->i[i];
2159       lwrite = 0;
2160       for (l=0; l<nzB; l++) {
2161         if ((ctmp = bmap[cworkB[l]]) < cstart) vals[lwrite++] = aworkB[l];
2162       }
2163       for (l=0; l<nzA; l++) vals[lwrite++] = aworkA[l];
2164       for (l=0; l<nzB; l++) {
2165         if ((ctmp = bmap[cworkB[l]]) >= cend) vals[lwrite++] = aworkB[l];
2166       }
2167       vals += ncols;
2168       rptr[i+1] = rptr[i] + ncols;
2169     }
2170   } /* endof if (reuse == MAT_INITIAL_MATRIX) */
2171 
2172   /* send nzlocal to others, and recv other's nzlocal */
2173   /*--------------------------------------------------*/
2174   if (reuse == MAT_INITIAL_MATRIX){
2175     ierr = PetscMalloc2(3*(nsends + nrecvs)+1,MPI_Request,&s_waits3,nsends+1,MPI_Status,&send_status);CHKERRQ(ierr);
2176     s_waits2 = s_waits3 + nsends;
2177     s_waits1 = s_waits2 + nsends;
2178     r_waits1 = s_waits1 + nsends;
2179     r_waits2 = r_waits1 + nrecvs;
2180     r_waits3 = r_waits2 + nrecvs;
2181   } else {
2182     ierr = PetscMalloc2(nsends + nrecvs +1,MPI_Request,&s_waits3,nsends+1,MPI_Status,&send_status);CHKERRQ(ierr);
2183     r_waits3 = s_waits3 + nsends;
2184   }
2185 
2186   ierr = PetscObjectGetNewTag((PetscObject)mat,&tag3);CHKERRQ(ierr);
2187   if (reuse == MAT_INITIAL_MATRIX){
2188     /* get new tags to keep the communication clean */
2189     ierr = PetscObjectGetNewTag((PetscObject)mat,&tag1);CHKERRQ(ierr);
2190     ierr = PetscObjectGetNewTag((PetscObject)mat,&tag2);CHKERRQ(ierr);
2191     ierr = PetscMalloc3(nsends+nrecvs+1,PetscInt,&sbuf_nz,nrecvs,PetscInt*,&rbuf_j,nrecvs,PetscScalar*,&rbuf_a);CHKERRQ(ierr);
2192     rbuf_nz = sbuf_nz + nsends;
2193 
2194     /* post receives of other's nzlocal */
2195     for (i=0; i<nrecvs; i++){
2196       ierr = MPI_Irecv(rbuf_nz+i,1,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,r_waits1+i);CHKERRQ(ierr);
2197     }
2198     /* send nzlocal to others */
2199     for (i=0; i<nsends; i++){
2200       sbuf_nz[i] = nzlocal;
2201       ierr = MPI_Isend(sbuf_nz+i,1,MPIU_INT,send_rank[i],tag1,comm,s_waits1+i);CHKERRQ(ierr);
2202     }
2203     /* wait on receives of nzlocal; allocate space for rbuf_j, rbuf_a */
2204     count = nrecvs;
2205     while (count) {
2206       ierr = MPI_Waitany(nrecvs,r_waits1,&imdex,&recv_status);CHKERRQ(ierr);
2207       recv_rank[imdex] = recv_status.MPI_SOURCE;
2208       /* allocate rbuf_a and rbuf_j; then post receives of rbuf_j */
2209       ierr = PetscMalloc((rbuf_nz[imdex]+1)*sizeof(PetscScalar),&rbuf_a[imdex]);CHKERRQ(ierr);
2210 
2211       i = rowrange[recv_status.MPI_SOURCE+1] - rowrange[recv_status.MPI_SOURCE]; /* number of expected mat->i */
2212       rbuf_nz[imdex] += i + 2;
2213       ierr = PetscMalloc(rbuf_nz[imdex]*sizeof(PetscInt),&rbuf_j[imdex]);CHKERRQ(ierr);
2214       ierr = MPI_Irecv(rbuf_j[imdex],rbuf_nz[imdex],MPIU_INT,recv_status.MPI_SOURCE,tag2,comm,r_waits2+imdex);CHKERRQ(ierr);
2215       count--;
2216     }
2217     /* wait on sends of nzlocal */
2218     if (nsends) {ierr = MPI_Waitall(nsends,s_waits1,send_status);CHKERRQ(ierr);}
2219     /* send mat->i,j to others, and recv from other's */
2220     /*------------------------------------------------*/
2221     for (i=0; i<nsends; i++){
2222       j = nzlocal + rowrange[rank+1] - rowrange[rank] + 1;
2223       ierr = MPI_Isend(sbuf_j,j,MPIU_INT,send_rank[i],tag2,comm,s_waits2+i);CHKERRQ(ierr);
2224     }
2225     /* wait on receives of mat->i,j */
2226     /*------------------------------*/
2227     count = nrecvs;
2228     while (count) {
2229       ierr = MPI_Waitany(nrecvs,r_waits2,&imdex,&recv_status);CHKERRQ(ierr);
2230       if (recv_rank[imdex] != recv_status.MPI_SOURCE) SETERRQ2(1, "recv_rank %d != MPI_SOURCE %d",recv_rank[imdex],recv_status.MPI_SOURCE);
2231       count--;
2232     }
2233     /* wait on sends of mat->i,j */
2234     /*---------------------------*/
2235     if (nsends) {
2236       ierr = MPI_Waitall(nsends,s_waits2,send_status);CHKERRQ(ierr);
2237     }
2238   } /* endof if (reuse == MAT_INITIAL_MATRIX) */
2239 
2240   /* post receives, send and receive mat->a */
2241   /*----------------------------------------*/
2242   for (imdex=0; imdex<nrecvs; imdex++) {
2243     ierr = MPI_Irecv(rbuf_a[imdex],rbuf_nz[imdex],MPIU_SCALAR,recv_rank[imdex],tag3,comm,r_waits3+imdex);CHKERRQ(ierr);
2244   }
2245   for (i=0; i<nsends; i++){
2246     ierr = MPI_Isend(sbuf_a,nzlocal,MPIU_SCALAR,send_rank[i],tag3,comm,s_waits3+i);CHKERRQ(ierr);
2247   }
2248   count = nrecvs;
2249   while (count) {
2250     ierr = MPI_Waitany(nrecvs,r_waits3,&imdex,&recv_status);CHKERRQ(ierr);
2251     if (recv_rank[imdex] != recv_status.MPI_SOURCE) SETERRQ2(1, "recv_rank %d != MPI_SOURCE %d",recv_rank[imdex],recv_status.MPI_SOURCE);
2252     count--;
2253   }
2254   if (nsends) {
2255     ierr = MPI_Waitall(nsends,s_waits3,send_status);CHKERRQ(ierr);
2256   }
2257 
2258   ierr = PetscFree2(s_waits3,send_status);CHKERRQ(ierr);
2259 
2260   /* create redundant matrix */
2261   /*-------------------------*/
2262   if (reuse == MAT_INITIAL_MATRIX){
2263     /* compute rownz_max for preallocation */
2264     for (imdex=0; imdex<nrecvs; imdex++){
2265       j = rowrange[recv_rank[imdex]+1] - rowrange[recv_rank[imdex]];
2266       rptr = rbuf_j[imdex];
2267       for (i=0; i<j; i++){
2268         ncols = rptr[i+1] - rptr[i];
2269         if (rownz_max < ncols) rownz_max = ncols;
2270       }
2271     }
2272 
2273     ierr = MatCreate(subcomm,&C);CHKERRQ(ierr);
2274     ierr = MatSetSizes(C,mlocal_sub,mlocal_sub,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
2275     ierr = MatSetFromOptions(C);CHKERRQ(ierr);
2276     ierr = MatSeqAIJSetPreallocation(C,rownz_max,PETSC_NULL);CHKERRQ(ierr);
2277     ierr = MatMPIAIJSetPreallocation(C,rownz_max,PETSC_NULL,rownz_max,PETSC_NULL);CHKERRQ(ierr);
2278   } else {
2279     C = *matredundant;
2280   }
2281 
2282   /* insert local matrix entries */
2283   rptr = sbuf_j;
2284   cols = sbuf_j + rend-rstart + 1;
2285   vals = sbuf_a;
2286   for (i=0; i<rend-rstart; i++){
2287     row   = i + rstart;
2288     ncols = rptr[i+1] - rptr[i];
2289     ierr = MatSetValues(C,1,&row,ncols,cols,vals,INSERT_VALUES);CHKERRQ(ierr);
2290     vals += ncols;
2291     cols += ncols;
2292   }
2293   /* insert received matrix entries */
2294   for (imdex=0; imdex<nrecvs; imdex++){
2295     rstart = rowrange[recv_rank[imdex]];
2296     rend   = rowrange[recv_rank[imdex]+1];
2297     rptr = rbuf_j[imdex];
2298     cols = rbuf_j[imdex] + rend-rstart + 1;
2299     vals = rbuf_a[imdex];
2300     for (i=0; i<rend-rstart; i++){
2301       row   = i + rstart;
2302       ncols = rptr[i+1] - rptr[i];
2303       ierr = MatSetValues(C,1,&row,ncols,cols,vals,INSERT_VALUES);CHKERRQ(ierr);
2304       vals += ncols;
2305       cols += ncols;
2306     }
2307   }
2308   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2309   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2310   ierr = MatGetSize(C,&M,&N);CHKERRQ(ierr);
2311   if (M != mat->rmap->N || N != mat->cmap->N) SETERRQ2(PETSC_ERR_ARG_INCOMP,"redundant mat size %d != input mat size %d",M,mat->rmap->N);
2312   if (reuse == MAT_INITIAL_MATRIX){
2313     PetscContainer container;
2314     *matredundant = C;
2315     /* create a supporting struct and attach it to C for reuse */
2316     ierr = PetscNewLog(C,Mat_Redundant,&redund);CHKERRQ(ierr);
2317     ierr = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr);
2318     ierr = PetscContainerSetPointer(container,redund);CHKERRQ(ierr);
2319     ierr = PetscObjectCompose((PetscObject)C,"Mat_Redundant",(PetscObject)container);CHKERRQ(ierr);
2320     ierr = PetscContainerSetUserDestroy(container,PetscContainerDestroy_MatRedundant);CHKERRQ(ierr);
2321 
2322     redund->nzlocal = nzlocal;
2323     redund->nsends  = nsends;
2324     redund->nrecvs  = nrecvs;
2325     redund->send_rank = send_rank;
2326     redund->sbuf_nz = sbuf_nz;
2327     redund->sbuf_j  = sbuf_j;
2328     redund->sbuf_a  = sbuf_a;
2329     redund->rbuf_j  = rbuf_j;
2330     redund->rbuf_a  = rbuf_a;
2331 
2332     redund->MatDestroy = C->ops->destroy;
2333     C->ops->destroy    = MatDestroy_MatRedundant;
2334   }
2335   PetscFunctionReturn(0);
2336 }
2337 
2338 #undef __FUNCT__
2339 #define __FUNCT__ "MatGetRowMaxAbs_MPIAIJ"
2340 PetscErrorCode MatGetRowMaxAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2341 {
2342   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2343   PetscErrorCode ierr;
2344   PetscInt       i,*idxb = 0;
2345   PetscScalar    *va,*vb;
2346   Vec            vtmp;
2347 
2348   PetscFunctionBegin;
2349   ierr = MatGetRowMaxAbs(a->A,v,idx);CHKERRQ(ierr);
2350   ierr = VecGetArray(v,&va);CHKERRQ(ierr);
2351   if (idx) {
2352     for (i=0; i<A->cmap->n; i++) {
2353       if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2354     }
2355   }
2356 
2357   ierr = VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);CHKERRQ(ierr);
2358   if (idx) {
2359     ierr = PetscMalloc(A->rmap->n*sizeof(PetscInt),&idxb);CHKERRQ(ierr);
2360   }
2361   ierr = MatGetRowMaxAbs(a->B,vtmp,idxb);CHKERRQ(ierr);
2362   ierr = VecGetArray(vtmp,&vb);CHKERRQ(ierr);
2363 
2364   for (i=0; i<A->rmap->n; i++){
2365     if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {
2366       va[i] = vb[i];
2367       if (idx) idx[i] = a->garray[idxb[i]];
2368     }
2369   }
2370 
2371   ierr = VecRestoreArray(v,&va);CHKERRQ(ierr);
2372   ierr = VecRestoreArray(vtmp,&vb);CHKERRQ(ierr);
2373   if (idxb) {
2374     ierr = PetscFree(idxb);CHKERRQ(ierr);
2375   }
2376   ierr = VecDestroy(vtmp);CHKERRQ(ierr);
2377   PetscFunctionReturn(0);
2378 }
2379 
2380 #undef __FUNCT__
2381 #define __FUNCT__ "MatGetRowMinAbs_MPIAIJ"
2382 PetscErrorCode MatGetRowMinAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2383 {
2384   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2385   PetscErrorCode ierr;
2386   PetscInt       i,*idxb = 0;
2387   PetscScalar    *va,*vb;
2388   Vec            vtmp;
2389 
2390   PetscFunctionBegin;
2391   ierr = MatGetRowMinAbs(a->A,v,idx);CHKERRQ(ierr);
2392   ierr = VecGetArray(v,&va);CHKERRQ(ierr);
2393   if (idx) {
2394     for (i=0; i<A->cmap->n; i++) {
2395       if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2396     }
2397   }
2398 
2399   ierr = VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);CHKERRQ(ierr);
2400   if (idx) {
2401     ierr = PetscMalloc(A->rmap->n*sizeof(PetscInt),&idxb);CHKERRQ(ierr);
2402   }
2403   ierr = MatGetRowMinAbs(a->B,vtmp,idxb);CHKERRQ(ierr);
2404   ierr = VecGetArray(vtmp,&vb);CHKERRQ(ierr);
2405 
2406   for (i=0; i<A->rmap->n; i++){
2407     if (PetscAbsScalar(va[i]) > PetscAbsScalar(vb[i])) {
2408       va[i] = vb[i];
2409       if (idx) idx[i] = a->garray[idxb[i]];
2410     }
2411   }
2412 
2413   ierr = VecRestoreArray(v,&va);CHKERRQ(ierr);
2414   ierr = VecRestoreArray(vtmp,&vb);CHKERRQ(ierr);
2415   if (idxb) {
2416     ierr = PetscFree(idxb);CHKERRQ(ierr);
2417   }
2418   ierr = VecDestroy(vtmp);CHKERRQ(ierr);
2419   PetscFunctionReturn(0);
2420 }
2421 
2422 #undef __FUNCT__
2423 #define __FUNCT__ "MatGetRowMin_MPIAIJ"
2424 PetscErrorCode MatGetRowMin_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2425 {
2426   Mat_MPIAIJ    *mat    = (Mat_MPIAIJ *) A->data;
2427   PetscInt       n      = A->rmap->n;
2428   PetscInt       cstart = A->cmap->rstart;
2429   PetscInt      *cmap   = mat->garray;
2430   PetscInt      *diagIdx, *offdiagIdx;
2431   Vec            diagV, offdiagV;
2432   PetscScalar   *a, *diagA, *offdiagA;
2433   PetscInt       r;
2434   PetscErrorCode ierr;
2435 
2436   PetscFunctionBegin;
2437   ierr = PetscMalloc2(n,PetscInt,&diagIdx,n,PetscInt,&offdiagIdx);CHKERRQ(ierr);
2438   ierr = VecCreateSeq(((PetscObject)A)->comm, n, &diagV);CHKERRQ(ierr);
2439   ierr = VecCreateSeq(((PetscObject)A)->comm, n, &offdiagV);CHKERRQ(ierr);
2440   ierr = MatGetRowMin(mat->A, diagV,    diagIdx);CHKERRQ(ierr);
2441   ierr = MatGetRowMin(mat->B, offdiagV, offdiagIdx);CHKERRQ(ierr);
2442   ierr = VecGetArray(v,        &a);CHKERRQ(ierr);
2443   ierr = VecGetArray(diagV,    &diagA);CHKERRQ(ierr);
2444   ierr = VecGetArray(offdiagV, &offdiagA);CHKERRQ(ierr);
2445   for(r = 0; r < n; ++r) {
2446     if (PetscAbsScalar(diagA[r]) <= PetscAbsScalar(offdiagA[r])) {
2447       a[r]   = diagA[r];
2448       idx[r] = cstart + diagIdx[r];
2449     } else {
2450       a[r]   = offdiagA[r];
2451       idx[r] = cmap[offdiagIdx[r]];
2452     }
2453   }
2454   ierr = VecRestoreArray(v,        &a);CHKERRQ(ierr);
2455   ierr = VecRestoreArray(diagV,    &diagA);CHKERRQ(ierr);
2456   ierr = VecRestoreArray(offdiagV, &offdiagA);CHKERRQ(ierr);
2457   ierr = VecDestroy(diagV);CHKERRQ(ierr);
2458   ierr = VecDestroy(offdiagV);CHKERRQ(ierr);
2459   ierr = PetscFree2(diagIdx, offdiagIdx);CHKERRQ(ierr);
2460   PetscFunctionReturn(0);
2461 }
2462 
2463 #undef __FUNCT__
2464 #define __FUNCT__ "MatGetRowMax_MPIAIJ"
2465 PetscErrorCode MatGetRowMax_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2466 {
2467   Mat_MPIAIJ    *mat    = (Mat_MPIAIJ *) A->data;
2468   PetscInt       n      = A->rmap->n;
2469   PetscInt       cstart = A->cmap->rstart;
2470   PetscInt      *cmap   = mat->garray;
2471   PetscInt      *diagIdx, *offdiagIdx;
2472   Vec            diagV, offdiagV;
2473   PetscScalar   *a, *diagA, *offdiagA;
2474   PetscInt       r;
2475   PetscErrorCode ierr;
2476 
2477   PetscFunctionBegin;
2478   ierr = PetscMalloc2(n,PetscInt,&diagIdx,n,PetscInt,&offdiagIdx);CHKERRQ(ierr);
2479   ierr = VecCreateSeq(((PetscObject)A)->comm, n, &diagV);CHKERRQ(ierr);
2480   ierr = VecCreateSeq(((PetscObject)A)->comm, n, &offdiagV);CHKERRQ(ierr);
2481   ierr = MatGetRowMax(mat->A, diagV,    diagIdx);CHKERRQ(ierr);
2482   ierr = MatGetRowMax(mat->B, offdiagV, offdiagIdx);CHKERRQ(ierr);
2483   ierr = VecGetArray(v,        &a);CHKERRQ(ierr);
2484   ierr = VecGetArray(diagV,    &diagA);CHKERRQ(ierr);
2485   ierr = VecGetArray(offdiagV, &offdiagA);CHKERRQ(ierr);
2486   for(r = 0; r < n; ++r) {
2487     if (PetscAbsScalar(diagA[r]) >= PetscAbsScalar(offdiagA[r])) {
2488       a[r]   = diagA[r];
2489       idx[r] = cstart + diagIdx[r];
2490     } else {
2491       a[r]   = offdiagA[r];
2492       idx[r] = cmap[offdiagIdx[r]];
2493     }
2494   }
2495   ierr = VecRestoreArray(v,        &a);CHKERRQ(ierr);
2496   ierr = VecRestoreArray(diagV,    &diagA);CHKERRQ(ierr);
2497   ierr = VecRestoreArray(offdiagV, &offdiagA);CHKERRQ(ierr);
2498   ierr = VecDestroy(diagV);CHKERRQ(ierr);
2499   ierr = VecDestroy(offdiagV);CHKERRQ(ierr);
2500   ierr = PetscFree2(diagIdx, offdiagIdx);CHKERRQ(ierr);
2501   PetscFunctionReturn(0);
2502 }
2503 
2504 #undef __FUNCT__
2505 #define __FUNCT__ "MatGetSeqNonzerostructure_MPIAIJ"
2506 PetscErrorCode MatGetSeqNonzerostructure_MPIAIJ(Mat mat,Mat *newmat[])
2507 {
2508   PetscErrorCode ierr;
2509 
2510   PetscFunctionBegin;
2511   ierr = MatGetSubMatrix_MPIAIJ_All(mat,MAT_DO_NOT_GET_VALUES,MAT_INITIAL_MATRIX,newmat);CHKERRQ(ierr);
2512   PetscFunctionReturn(0);
2513 }
2514 
2515 /* -------------------------------------------------------------------*/
2516 static struct _MatOps MatOps_Values = {MatSetValues_MPIAIJ,
2517        MatGetRow_MPIAIJ,
2518        MatRestoreRow_MPIAIJ,
2519        MatMult_MPIAIJ,
2520 /* 4*/ MatMultAdd_MPIAIJ,
2521        MatMultTranspose_MPIAIJ,
2522        MatMultTransposeAdd_MPIAIJ,
2523 #ifdef PETSC_HAVE_PBGL
2524        MatSolve_MPIAIJ,
2525 #else
2526        0,
2527 #endif
2528        0,
2529        0,
2530 /*10*/ 0,
2531        0,
2532        0,
2533        MatRelax_MPIAIJ,
2534        MatTranspose_MPIAIJ,
2535 /*15*/ MatGetInfo_MPIAIJ,
2536        MatEqual_MPIAIJ,
2537        MatGetDiagonal_MPIAIJ,
2538        MatDiagonalScale_MPIAIJ,
2539        MatNorm_MPIAIJ,
2540 /*20*/ MatAssemblyBegin_MPIAIJ,
2541        MatAssemblyEnd_MPIAIJ,
2542        0,
2543        MatSetOption_MPIAIJ,
2544        MatZeroEntries_MPIAIJ,
2545 /*25*/ MatZeroRows_MPIAIJ,
2546        0,
2547 #ifdef PETSC_HAVE_PBGL
2548        0,
2549 #else
2550        0,
2551 #endif
2552        0,
2553        0,
2554 /*30*/ MatSetUpPreallocation_MPIAIJ,
2555 #ifdef PETSC_HAVE_PBGL
2556        0,
2557 #else
2558        0,
2559 #endif
2560        0,
2561        0,
2562        0,
2563 /*35*/ MatDuplicate_MPIAIJ,
2564        0,
2565        0,
2566        0,
2567        0,
2568 /*40*/ MatAXPY_MPIAIJ,
2569        MatGetSubMatrices_MPIAIJ,
2570        MatIncreaseOverlap_MPIAIJ,
2571        MatGetValues_MPIAIJ,
2572        MatCopy_MPIAIJ,
2573 /*45*/ MatGetRowMax_MPIAIJ,
2574        MatScale_MPIAIJ,
2575        0,
2576        0,
2577        0,
2578 /*50*/ MatSetBlockSize_MPIAIJ,
2579        0,
2580        0,
2581        0,
2582        0,
2583 /*55*/ MatFDColoringCreate_MPIAIJ,
2584        0,
2585        MatSetUnfactored_MPIAIJ,
2586        MatPermute_MPIAIJ,
2587        0,
2588 /*60*/ MatGetSubMatrix_MPIAIJ,
2589        MatDestroy_MPIAIJ,
2590        MatView_MPIAIJ,
2591        0,
2592        0,
2593 /*65*/ 0,
2594        0,
2595        0,
2596        0,
2597        0,
2598 /*70*/ MatGetRowMaxAbs_MPIAIJ,
2599        MatGetRowMinAbs_MPIAIJ,
2600        0,
2601        MatSetColoring_MPIAIJ,
2602 #if defined(PETSC_HAVE_ADIC)
2603        MatSetValuesAdic_MPIAIJ,
2604 #else
2605        0,
2606 #endif
2607        MatSetValuesAdifor_MPIAIJ,
2608 /*75*/ 0,
2609        0,
2610        0,
2611        0,
2612        0,
2613 /*80*/ 0,
2614        0,
2615        0,
2616 /*84*/ MatLoad_MPIAIJ,
2617        0,
2618        0,
2619        0,
2620        0,
2621        0,
2622 /*90*/ MatMatMult_MPIAIJ_MPIAIJ,
2623        MatMatMultSymbolic_MPIAIJ_MPIAIJ,
2624        MatMatMultNumeric_MPIAIJ_MPIAIJ,
2625        MatPtAP_Basic,
2626        MatPtAPSymbolic_MPIAIJ,
2627 /*95*/ MatPtAPNumeric_MPIAIJ,
2628        0,
2629        0,
2630        0,
2631        0,
2632 /*100*/0,
2633        MatPtAPSymbolic_MPIAIJ_MPIAIJ,
2634        MatPtAPNumeric_MPIAIJ_MPIAIJ,
2635        MatConjugate_MPIAIJ,
2636        0,
2637 /*105*/MatSetValuesRow_MPIAIJ,
2638        MatRealPart_MPIAIJ,
2639        MatImaginaryPart_MPIAIJ,
2640        0,
2641        0,
2642 /*110*/0,
2643        MatGetRedundantMatrix_MPIAIJ,
2644        MatGetRowMin_MPIAIJ,
2645        0,
2646        0,
2647 /*115*/MatGetSeqNonzerostructure_MPIAIJ};
2648 
2649 /* ----------------------------------------------------------------------------------------*/
2650 
2651 EXTERN_C_BEGIN
2652 #undef __FUNCT__
2653 #define __FUNCT__ "MatStoreValues_MPIAIJ"
2654 PetscErrorCode PETSCMAT_DLLEXPORT MatStoreValues_MPIAIJ(Mat mat)
2655 {
2656   Mat_MPIAIJ     *aij = (Mat_MPIAIJ *)mat->data;
2657   PetscErrorCode ierr;
2658 
2659   PetscFunctionBegin;
2660   ierr = MatStoreValues(aij->A);CHKERRQ(ierr);
2661   ierr = MatStoreValues(aij->B);CHKERRQ(ierr);
2662   PetscFunctionReturn(0);
2663 }
2664 EXTERN_C_END
2665 
2666 EXTERN_C_BEGIN
2667 #undef __FUNCT__
2668 #define __FUNCT__ "MatRetrieveValues_MPIAIJ"
2669 PetscErrorCode PETSCMAT_DLLEXPORT MatRetrieveValues_MPIAIJ(Mat mat)
2670 {
2671   Mat_MPIAIJ     *aij = (Mat_MPIAIJ *)mat->data;
2672   PetscErrorCode ierr;
2673 
2674   PetscFunctionBegin;
2675   ierr = MatRetrieveValues(aij->A);CHKERRQ(ierr);
2676   ierr = MatRetrieveValues(aij->B);CHKERRQ(ierr);
2677   PetscFunctionReturn(0);
2678 }
2679 EXTERN_C_END
2680 
2681 #include "petscpc.h"
2682 EXTERN_C_BEGIN
2683 #undef __FUNCT__
2684 #define __FUNCT__ "MatMPIAIJSetPreallocation_MPIAIJ"
2685 PetscErrorCode PETSCMAT_DLLEXPORT MatMPIAIJSetPreallocation_MPIAIJ(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
2686 {
2687   Mat_MPIAIJ     *b;
2688   PetscErrorCode ierr;
2689   PetscInt       i;
2690 
2691   PetscFunctionBegin;
2692   B->preallocated = PETSC_TRUE;
2693   if (d_nz == PETSC_DEFAULT || d_nz == PETSC_DECIDE) d_nz = 5;
2694   if (o_nz == PETSC_DEFAULT || o_nz == PETSC_DECIDE) o_nz = 2;
2695   if (d_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than 0: value %D",d_nz);
2696   if (o_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than 0: value %D",o_nz);
2697 
2698   ierr = PetscMapSetBlockSize(B->rmap,1);CHKERRQ(ierr);
2699   ierr = PetscMapSetBlockSize(B->cmap,1);CHKERRQ(ierr);
2700   ierr = PetscMapSetUp(B->rmap);CHKERRQ(ierr);
2701   ierr = PetscMapSetUp(B->cmap);CHKERRQ(ierr);
2702   if (d_nnz) {
2703     for (i=0; i<B->rmap->n; i++) {
2704       if (d_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than 0: local row %D value %D",i,d_nnz[i]);
2705     }
2706   }
2707   if (o_nnz) {
2708     for (i=0; i<B->rmap->n; i++) {
2709       if (o_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than 0: local row %D value %D",i,o_nnz[i]);
2710     }
2711   }
2712   b = (Mat_MPIAIJ*)B->data;
2713 
2714   /* Explicitly create 2 MATSEQAIJ matrices. */
2715   ierr = MatCreate(PETSC_COMM_SELF,&b->A);CHKERRQ(ierr);
2716   ierr = MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);CHKERRQ(ierr);
2717   ierr = MatSetType(b->A,MATSEQAIJ);CHKERRQ(ierr);
2718   ierr = PetscLogObjectParent(B,b->A);CHKERRQ(ierr);
2719   ierr = MatCreate(PETSC_COMM_SELF,&b->B);CHKERRQ(ierr);
2720   ierr = MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);CHKERRQ(ierr);
2721   ierr = MatSetType(b->B,MATSEQAIJ);CHKERRQ(ierr);
2722   ierr = PetscLogObjectParent(B,b->B);CHKERRQ(ierr);
2723 
2724   ierr = MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);CHKERRQ(ierr);
2725   ierr = MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);CHKERRQ(ierr);
2726 
2727   PetscFunctionReturn(0);
2728 }
2729 EXTERN_C_END
2730 
2731 #undef __FUNCT__
2732 #define __FUNCT__ "MatDuplicate_MPIAIJ"
2733 PetscErrorCode MatDuplicate_MPIAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2734 {
2735   Mat            mat;
2736   Mat_MPIAIJ     *a,*oldmat = (Mat_MPIAIJ*)matin->data;
2737   PetscErrorCode ierr;
2738 
2739   PetscFunctionBegin;
2740   *newmat       = 0;
2741   ierr = MatCreate(((PetscObject)matin)->comm,&mat);CHKERRQ(ierr);
2742   ierr = MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);CHKERRQ(ierr);
2743   ierr = MatSetType(mat,((PetscObject)matin)->type_name);CHKERRQ(ierr);
2744   ierr = PetscMemcpy(mat->ops,matin->ops,sizeof(struct _MatOps));CHKERRQ(ierr);
2745   a    = (Mat_MPIAIJ*)mat->data;
2746 
2747   mat->factor       = matin->factor;
2748   mat->rmap->bs      = matin->rmap->bs;
2749   mat->assembled    = PETSC_TRUE;
2750   mat->insertmode   = NOT_SET_VALUES;
2751   mat->preallocated = PETSC_TRUE;
2752 
2753   a->size           = oldmat->size;
2754   a->rank           = oldmat->rank;
2755   a->donotstash     = oldmat->donotstash;
2756   a->roworiented    = oldmat->roworiented;
2757   a->rowindices     = 0;
2758   a->rowvalues      = 0;
2759   a->getrowactive   = PETSC_FALSE;
2760 
2761   ierr = PetscMapCopy(((PetscObject)mat)->comm,matin->rmap,mat->rmap);CHKERRQ(ierr);
2762   ierr = PetscMapCopy(((PetscObject)mat)->comm,matin->cmap,mat->cmap);CHKERRQ(ierr);
2763 
2764   ierr = MatStashCreate_Private(((PetscObject)matin)->comm,1,&mat->stash);CHKERRQ(ierr);
2765   if (oldmat->colmap) {
2766 #if defined (PETSC_USE_CTABLE)
2767     ierr = PetscTableCreateCopy(oldmat->colmap,&a->colmap);CHKERRQ(ierr);
2768 #else
2769     ierr = PetscMalloc((mat->cmap->N)*sizeof(PetscInt),&a->colmap);CHKERRQ(ierr);
2770     ierr = PetscLogObjectMemory(mat,(mat->cmap->N)*sizeof(PetscInt));CHKERRQ(ierr);
2771     ierr = PetscMemcpy(a->colmap,oldmat->colmap,(mat->cmap->N)*sizeof(PetscInt));CHKERRQ(ierr);
2772 #endif
2773   } else a->colmap = 0;
2774   if (oldmat->garray) {
2775     PetscInt len;
2776     len  = oldmat->B->cmap->n;
2777     ierr = PetscMalloc((len+1)*sizeof(PetscInt),&a->garray);CHKERRQ(ierr);
2778     ierr = PetscLogObjectMemory(mat,len*sizeof(PetscInt));CHKERRQ(ierr);
2779     if (len) { ierr = PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt));CHKERRQ(ierr); }
2780   } else a->garray = 0;
2781 
2782   ierr = VecDuplicate(oldmat->lvec,&a->lvec);CHKERRQ(ierr);
2783   ierr = PetscLogObjectParent(mat,a->lvec);CHKERRQ(ierr);
2784   ierr = VecScatterCopy(oldmat->Mvctx,&a->Mvctx);CHKERRQ(ierr);
2785   ierr = PetscLogObjectParent(mat,a->Mvctx);CHKERRQ(ierr);
2786   ierr = MatDuplicate(oldmat->A,cpvalues,&a->A);CHKERRQ(ierr);
2787   ierr = PetscLogObjectParent(mat,a->A);CHKERRQ(ierr);
2788   ierr = MatDuplicate(oldmat->B,cpvalues,&a->B);CHKERRQ(ierr);
2789   ierr = PetscLogObjectParent(mat,a->B);CHKERRQ(ierr);
2790   ierr = PetscFListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);CHKERRQ(ierr);
2791   *newmat = mat;
2792   PetscFunctionReturn(0);
2793 }
2794 
2795 #include "petscsys.h"
2796 
2797 #undef __FUNCT__
2798 #define __FUNCT__ "MatLoad_MPIAIJ"
2799 PetscErrorCode MatLoad_MPIAIJ(PetscViewer viewer, const MatType type,Mat *newmat)
2800 {
2801   Mat            A;
2802   PetscScalar    *vals,*svals;
2803   MPI_Comm       comm = ((PetscObject)viewer)->comm;
2804   MPI_Status     status;
2805   PetscErrorCode ierr;
2806   PetscMPIInt    rank,size,tag = ((PetscObject)viewer)->tag,maxnz;
2807   PetscInt       i,nz,j,rstart,rend,mmax;
2808   PetscInt       header[4],*rowlengths = 0,M,N,m,*cols;
2809   PetscInt       *ourlens = PETSC_NULL,*procsnz = PETSC_NULL,*offlens = PETSC_NULL,jj,*mycols,*smycols;
2810   PetscInt       cend,cstart,n,*rowners;
2811   int            fd;
2812 
2813   PetscFunctionBegin;
2814   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
2815   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
2816   if (!rank) {
2817     ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr);
2818     ierr = PetscBinaryRead(fd,(char *)header,4,PETSC_INT);CHKERRQ(ierr);
2819     if (header[0] != MAT_FILE_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2820   }
2821 
2822   ierr = MPI_Bcast(header+1,3,MPIU_INT,0,comm);CHKERRQ(ierr);
2823   M = header[1]; N = header[2];
2824   /* determine ownership of all rows */
2825   m    = M/size + ((M % size) > rank);
2826   ierr = PetscMalloc((size+1)*sizeof(PetscInt),&rowners);CHKERRQ(ierr);
2827   ierr = MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);CHKERRQ(ierr);
2828 
2829   /* First process needs enough room for process with most rows */
2830   if (!rank) {
2831     mmax       = rowners[1];
2832     for (i=2; i<size; i++) {
2833       mmax = PetscMax(mmax,rowners[i]);
2834     }
2835   } else mmax = m;
2836 
2837   rowners[0] = 0;
2838   for (i=2; i<=size; i++) {
2839     rowners[i] += rowners[i-1];
2840   }
2841   rstart = rowners[rank];
2842   rend   = rowners[rank+1];
2843 
2844   /* distribute row lengths to all processors */
2845   ierr    = PetscMalloc2(mmax,PetscInt,&ourlens,mmax,PetscInt,&offlens);CHKERRQ(ierr);
2846   if (!rank) {
2847     ierr = PetscBinaryRead(fd,ourlens,m,PETSC_INT);CHKERRQ(ierr);
2848     ierr = PetscMalloc(m*sizeof(PetscInt),&rowlengths);CHKERRQ(ierr);
2849     ierr = PetscMalloc(size*sizeof(PetscInt),&procsnz);CHKERRQ(ierr);
2850     ierr = PetscMemzero(procsnz,size*sizeof(PetscInt));CHKERRQ(ierr);
2851     for (j=0; j<m; j++) {
2852       procsnz[0] += ourlens[j];
2853     }
2854     for (i=1; i<size; i++) {
2855       ierr = PetscBinaryRead(fd,rowlengths,rowners[i+1]-rowners[i],PETSC_INT);CHKERRQ(ierr);
2856       /* calculate the number of nonzeros on each processor */
2857       for (j=0; j<rowners[i+1]-rowners[i]; j++) {
2858         procsnz[i] += rowlengths[j];
2859       }
2860       ierr = MPI_Send(rowlengths,rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);CHKERRQ(ierr);
2861     }
2862     ierr = PetscFree(rowlengths);CHKERRQ(ierr);
2863   } else {
2864     ierr = MPI_Recv(ourlens,m,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr);
2865   }
2866 
2867   if (!rank) {
2868     /* determine max buffer needed and allocate it */
2869     maxnz = 0;
2870     for (i=0; i<size; i++) {
2871       maxnz = PetscMax(maxnz,procsnz[i]);
2872     }
2873     ierr = PetscMalloc(maxnz*sizeof(PetscInt),&cols);CHKERRQ(ierr);
2874 
2875     /* read in my part of the matrix column indices  */
2876     nz   = procsnz[0];
2877     ierr = PetscMalloc(nz*sizeof(PetscInt),&mycols);CHKERRQ(ierr);
2878     ierr = PetscBinaryRead(fd,mycols,nz,PETSC_INT);CHKERRQ(ierr);
2879 
2880     /* read in every one elses and ship off */
2881     for (i=1; i<size; i++) {
2882       nz   = procsnz[i];
2883       ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr);
2884       ierr = MPI_Send(cols,nz,MPIU_INT,i,tag,comm);CHKERRQ(ierr);
2885     }
2886     ierr = PetscFree(cols);CHKERRQ(ierr);
2887   } else {
2888     /* determine buffer space needed for message */
2889     nz = 0;
2890     for (i=0; i<m; i++) {
2891       nz += ourlens[i];
2892     }
2893     ierr = PetscMalloc(nz*sizeof(PetscInt),&mycols);CHKERRQ(ierr);
2894 
2895     /* receive message of column indices*/
2896     ierr = MPI_Recv(mycols,nz,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr);
2897     ierr = MPI_Get_count(&status,MPIU_INT,&maxnz);CHKERRQ(ierr);
2898     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2899   }
2900 
2901   /* determine column ownership if matrix is not square */
2902   if (N != M) {
2903     n      = N/size + ((N % size) > rank);
2904     ierr   = MPI_Scan(&n,&cend,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
2905     cstart = cend - n;
2906   } else {
2907     cstart = rstart;
2908     cend   = rend;
2909     n      = cend - cstart;
2910   }
2911 
2912   /* loop over local rows, determining number of off diagonal entries */
2913   ierr = PetscMemzero(offlens,m*sizeof(PetscInt));CHKERRQ(ierr);
2914   jj = 0;
2915   for (i=0; i<m; i++) {
2916     for (j=0; j<ourlens[i]; j++) {
2917       if (mycols[jj] < cstart || mycols[jj] >= cend) offlens[i]++;
2918       jj++;
2919     }
2920   }
2921 
2922   /* create our matrix */
2923   for (i=0; i<m; i++) {
2924     ourlens[i] -= offlens[i];
2925   }
2926   ierr = MatCreate(comm,&A);CHKERRQ(ierr);
2927   ierr = MatSetSizes(A,m,n,M,N);CHKERRQ(ierr);
2928   ierr = MatSetType(A,type);CHKERRQ(ierr);
2929   ierr = MatMPIAIJSetPreallocation(A,0,ourlens,0,offlens);CHKERRQ(ierr);
2930 
2931   for (i=0; i<m; i++) {
2932     ourlens[i] += offlens[i];
2933   }
2934 
2935   if (!rank) {
2936     ierr = PetscMalloc((maxnz+1)*sizeof(PetscScalar),&vals);CHKERRQ(ierr);
2937 
2938     /* read in my part of the matrix numerical values  */
2939     nz   = procsnz[0];
2940     ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2941 
2942     /* insert into matrix */
2943     jj      = rstart;
2944     smycols = mycols;
2945     svals   = vals;
2946     for (i=0; i<m; i++) {
2947       ierr = MatSetValues_MPIAIJ(A,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);CHKERRQ(ierr);
2948       smycols += ourlens[i];
2949       svals   += ourlens[i];
2950       jj++;
2951     }
2952 
2953     /* read in other processors and ship out */
2954     for (i=1; i<size; i++) {
2955       nz   = procsnz[i];
2956       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2957       ierr = MPI_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)A)->tag,comm);CHKERRQ(ierr);
2958     }
2959     ierr = PetscFree(procsnz);CHKERRQ(ierr);
2960   } else {
2961     /* receive numeric values */
2962     ierr = PetscMalloc((nz+1)*sizeof(PetscScalar),&vals);CHKERRQ(ierr);
2963 
2964     /* receive message of values*/
2965     ierr = MPI_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)A)->tag,comm,&status);CHKERRQ(ierr);
2966     ierr = MPI_Get_count(&status,MPIU_SCALAR,&maxnz);CHKERRQ(ierr);
2967     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2968 
2969     /* insert into matrix */
2970     jj      = rstart;
2971     smycols = mycols;
2972     svals   = vals;
2973     for (i=0; i<m; i++) {
2974       ierr     = MatSetValues_MPIAIJ(A,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);CHKERRQ(ierr);
2975       smycols += ourlens[i];
2976       svals   += ourlens[i];
2977       jj++;
2978     }
2979   }
2980   ierr = PetscFree2(ourlens,offlens);CHKERRQ(ierr);
2981   ierr = PetscFree(vals);CHKERRQ(ierr);
2982   ierr = PetscFree(mycols);CHKERRQ(ierr);
2983   ierr = PetscFree(rowners);CHKERRQ(ierr);
2984 
2985   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2986   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2987   *newmat = A;
2988   PetscFunctionReturn(0);
2989 }
2990 
2991 #undef __FUNCT__
2992 #define __FUNCT__ "MatGetSubMatrix_MPIAIJ"
2993 /*
2994     Not great since it makes two copies of the submatrix, first an SeqAIJ
2995   in local and then by concatenating the local matrices the end result.
2996   Writing it directly would be much like MatGetSubMatrices_MPIAIJ()
2997 */
2998 PetscErrorCode MatGetSubMatrix_MPIAIJ(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat)
2999 {
3000   PetscErrorCode ierr;
3001   PetscMPIInt    rank,size;
3002   PetscInt       i,m,n,rstart,row,rend,nz,*cwork,j;
3003   PetscInt       *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3004   Mat            *local,M,Mreuse;
3005   MatScalar      *vwork,*aa;
3006   MPI_Comm       comm = ((PetscObject)mat)->comm;
3007   Mat_SeqAIJ     *aij;
3008 
3009 
3010   PetscFunctionBegin;
3011   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
3012   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
3013 
3014   if (call ==  MAT_REUSE_MATRIX) {
3015     ierr = PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject *)&Mreuse);CHKERRQ(ierr);
3016     if (!Mreuse) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3017     local = &Mreuse;
3018     ierr  = MatGetSubMatrices(mat,1,&isrow,&iscol,MAT_REUSE_MATRIX,&local);CHKERRQ(ierr);
3019   } else {
3020     ierr   = MatGetSubMatrices(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&local);CHKERRQ(ierr);
3021     Mreuse = *local;
3022     ierr   = PetscFree(local);CHKERRQ(ierr);
3023   }
3024 
3025   /*
3026       m - number of local rows
3027       n - number of columns (same on all processors)
3028       rstart - first row in new global matrix generated
3029   */
3030   ierr = MatGetSize(Mreuse,&m,&n);CHKERRQ(ierr);
3031   if (call == MAT_INITIAL_MATRIX) {
3032     aij = (Mat_SeqAIJ*)(Mreuse)->data;
3033     ii  = aij->i;
3034     jj  = aij->j;
3035 
3036     /*
3037         Determine the number of non-zeros in the diagonal and off-diagonal
3038         portions of the matrix in order to do correct preallocation
3039     */
3040 
3041     /* first get start and end of "diagonal" columns */
3042     if (csize == PETSC_DECIDE) {
3043       ierr = ISGetSize(isrow,&mglobal);CHKERRQ(ierr);
3044       if (mglobal == n) { /* square matrix */
3045 	nlocal = m;
3046       } else {
3047         nlocal = n/size + ((n % size) > rank);
3048       }
3049     } else {
3050       nlocal = csize;
3051     }
3052     ierr   = MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
3053     rstart = rend - nlocal;
3054     if (rank == size - 1 && rend != n) {
3055       SETERRQ2(PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n);
3056     }
3057 
3058     /* next, compute all the lengths */
3059     ierr  = PetscMalloc((2*m+1)*sizeof(PetscInt),&dlens);CHKERRQ(ierr);
3060     olens = dlens + m;
3061     for (i=0; i<m; i++) {
3062       jend = ii[i+1] - ii[i];
3063       olen = 0;
3064       dlen = 0;
3065       for (j=0; j<jend; j++) {
3066         if (*jj < rstart || *jj >= rend) olen++;
3067         else dlen++;
3068         jj++;
3069       }
3070       olens[i] = olen;
3071       dlens[i] = dlen;
3072     }
3073     ierr = MatCreate(comm,&M);CHKERRQ(ierr);
3074     ierr = MatSetSizes(M,m,nlocal,PETSC_DECIDE,n);CHKERRQ(ierr);
3075     ierr = MatSetType(M,((PetscObject)mat)->type_name);CHKERRQ(ierr);
3076     ierr = MatMPIAIJSetPreallocation(M,0,dlens,0,olens);CHKERRQ(ierr);
3077     ierr = PetscFree(dlens);CHKERRQ(ierr);
3078   } else {
3079     PetscInt ml,nl;
3080 
3081     M = *newmat;
3082     ierr = MatGetLocalSize(M,&ml,&nl);CHKERRQ(ierr);
3083     if (ml != m) SETERRQ(PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3084     ierr = MatZeroEntries(M);CHKERRQ(ierr);
3085     /*
3086          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3087        rather than the slower MatSetValues().
3088     */
3089     M->was_assembled = PETSC_TRUE;
3090     M->assembled     = PETSC_FALSE;
3091   }
3092   ierr = MatGetOwnershipRange(M,&rstart,&rend);CHKERRQ(ierr);
3093   aij = (Mat_SeqAIJ*)(Mreuse)->data;
3094   ii  = aij->i;
3095   jj  = aij->j;
3096   aa  = aij->a;
3097   for (i=0; i<m; i++) {
3098     row   = rstart + i;
3099     nz    = ii[i+1] - ii[i];
3100     cwork = jj;     jj += nz;
3101     vwork = aa;     aa += nz;
3102     ierr = MatSetValues_MPIAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);CHKERRQ(ierr);
3103   }
3104 
3105   ierr = MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3106   ierr = MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3107   *newmat = M;
3108 
3109   /* save submatrix used in processor for next request */
3110   if (call ==  MAT_INITIAL_MATRIX) {
3111     ierr = PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);CHKERRQ(ierr);
3112     ierr = PetscObjectDereference((PetscObject)Mreuse);CHKERRQ(ierr);
3113   }
3114 
3115   PetscFunctionReturn(0);
3116 }
3117 
3118 EXTERN_C_BEGIN
3119 #undef __FUNCT__
3120 #define __FUNCT__ "MatMPIAIJSetPreallocationCSR_MPIAIJ"
3121 PetscErrorCode PETSCMAT_DLLEXPORT MatMPIAIJSetPreallocationCSR_MPIAIJ(Mat B,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
3122 {
3123   PetscInt       m,cstart, cend,j,nnz,i,d;
3124   PetscInt       *d_nnz,*o_nnz,nnz_max = 0,rstart,ii;
3125   const PetscInt *JJ;
3126   PetscScalar    *values;
3127   PetscErrorCode ierr;
3128 
3129   PetscFunctionBegin;
3130   if (Ii[0]) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Ii[0] must be 0 it is %D",Ii[0]);
3131 
3132   ierr = PetscMapSetBlockSize(B->rmap,1);CHKERRQ(ierr);
3133   ierr = PetscMapSetBlockSize(B->cmap,1);CHKERRQ(ierr);
3134   ierr = PetscMapSetUp(B->rmap);CHKERRQ(ierr);
3135   ierr = PetscMapSetUp(B->cmap);CHKERRQ(ierr);
3136   m      = B->rmap->n;
3137   cstart = B->cmap->rstart;
3138   cend   = B->cmap->rend;
3139   rstart = B->rmap->rstart;
3140 
3141   ierr  = PetscMalloc((2*m+1)*sizeof(PetscInt),&d_nnz);CHKERRQ(ierr);
3142   o_nnz = d_nnz + m;
3143 
3144 #if defined(PETSC_USE_DEBUGGING)
3145   for (i=0; i<m; i++) {
3146     nnz     = Ii[i+1]- Ii[i];
3147     JJ      = J + Ii[i];
3148     if (nnz < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative %D number of columns",i,nnz);
3149     if (nnz && (JJ[0] < 0)) SETERRRQ1(PETSC_ERR_ARG_WRONGSTATE,"Row %D starts with negative column index",i,j);
3150     if (nnz && (JJ[nnz-1] >= B->cmap->N) SETERRRQ3(PETSC_ERR_ARG_WRONGSTATE,"Row %D ends with too large a column index %D (max allowed %D)",i,JJ[nnz-1],B->cmap->N);
3151     for (j=1; j<nnz; j++) {
3152       if (JJ[i] <= JJ[i-1]) SETERRRQ(PETSC_ERR_ARG_WRONGSTATE,"Row %D has unsorted column index at %D location in column indices",i,j);
3153     }
3154   }
3155 #endif
3156 
3157   for (i=0; i<m; i++) {
3158     nnz     = Ii[i+1]- Ii[i];
3159     JJ      = J + Ii[i];
3160     nnz_max = PetscMax(nnz_max,nnz);
3161     for (j=0; j<nnz; j++) {
3162       if (*JJ >= cstart) break;
3163       JJ++;
3164     }
3165     d = 0;
3166     for (; j<nnz; j++) {
3167       if (*JJ++ >= cend) break;
3168       d++;
3169     }
3170     d_nnz[i] = d;
3171     o_nnz[i] = nnz - d;
3172   }
3173   ierr = MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);CHKERRQ(ierr);
3174   ierr = PetscFree(d_nnz);CHKERRQ(ierr);
3175 
3176   if (v) values = (PetscScalar*)v;
3177   else {
3178     ierr = PetscMalloc((nnz_max+1)*sizeof(PetscScalar),&values);CHKERRQ(ierr);
3179     ierr = PetscMemzero(values,nnz_max*sizeof(PetscScalar));CHKERRQ(ierr);
3180   }
3181 
3182   for (i=0; i<m; i++) {
3183     ii   = i + rstart;
3184     nnz  = Ii[i+1]- Ii[i];
3185     ierr = MatSetValues_MPIAIJ(B,1,&ii,nnz,J+Ii[i],values+(v ? Ii[i] : 0),INSERT_VALUES);CHKERRQ(ierr);
3186   }
3187   ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3188   ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3189 
3190   if (!v) {
3191     ierr = PetscFree(values);CHKERRQ(ierr);
3192   }
3193   PetscFunctionReturn(0);
3194 }
3195 EXTERN_C_END
3196 
3197 #undef __FUNCT__
3198 #define __FUNCT__ "MatMPIAIJSetPreallocationCSR"
3199 /*@
3200    MatMPIAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in AIJ format
3201    (the default parallel PETSc format).
3202 
3203    Collective on MPI_Comm
3204 
3205    Input Parameters:
3206 +  B - the matrix
3207 .  i - the indices into j for the start of each local row (starts with zero)
3208 .  j - the column indices for each local row (starts with zero) these must be sorted for each row
3209 -  v - optional values in the matrix
3210 
3211    Level: developer
3212 
3213    Notes:
3214        The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
3215      thus you CANNOT change the matrix entries by changing the values of a[] after you have
3216      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.
3217 
3218        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.
3219 
3220        The format which is used for the sparse matrix input, is equivalent to a
3221     row-major ordering.. i.e for the following matrix, the input data expected is
3222     as shown:
3223 
3224         1 0 0
3225         2 0 3     P0
3226        -------
3227         4 5 6     P1
3228 
3229      Process0 [P0]: rows_owned=[0,1]
3230         i =  {0,1,3}  [size = nrow+1  = 2+1]
3231         j =  {0,0,2}  [size = nz = 6]
3232         v =  {1,2,3}  [size = nz = 6]
3233 
3234      Process1 [P1]: rows_owned=[2]
3235         i =  {0,3}    [size = nrow+1  = 1+1]
3236         j =  {0,1,2}  [size = nz = 6]
3237         v =  {4,5,6}  [size = nz = 6]
3238 
3239       The column indices for each row MUST be sorted.
3240 
3241 .keywords: matrix, aij, compressed row, sparse, parallel
3242 
3243 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatCreateMPIAIJ(), MPIAIJ,
3244           MatCreateSeqAIJWithArrays(), MatCreateMPIAIJWithSplitArrays()
3245 @*/
3246 PetscErrorCode PETSCMAT_DLLEXPORT MatMPIAIJSetPreallocationCSR(Mat B,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
3247 {
3248   PetscErrorCode ierr,(*f)(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]);
3249 
3250   PetscFunctionBegin;
3251   ierr = PetscObjectQueryFunction((PetscObject)B,"MatMPIAIJSetPreallocationCSR_C",(void (**)(void))&f);CHKERRQ(ierr);
3252   if (f) {
3253     ierr = (*f)(B,i,j,v);CHKERRQ(ierr);
3254   }
3255   PetscFunctionReturn(0);
3256 }
3257 
3258 #undef __FUNCT__
3259 #define __FUNCT__ "MatMPIAIJSetPreallocation"
3260 /*@C
3261    MatMPIAIJSetPreallocation - Preallocates memory for a sparse parallel matrix in AIJ format
3262    (the default parallel PETSc format).  For good matrix assembly performance
3263    the user should preallocate the matrix storage by setting the parameters
3264    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
3265    performance can be increased by more than a factor of 50.
3266 
3267    Collective on MPI_Comm
3268 
3269    Input Parameters:
3270 +  A - the matrix
3271 .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
3272            (same value is used for all local rows)
3273 .  d_nnz - array containing the number of nonzeros in the various rows of the
3274            DIAGONAL portion of the local submatrix (possibly different for each row)
3275            or PETSC_NULL, if d_nz is used to specify the nonzero structure.
3276            The size of this array is equal to the number of local rows, i.e 'm'.
3277            You must leave room for the diagonal entry even if it is zero.
3278 .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
3279            submatrix (same value is used for all local rows).
3280 -  o_nnz - array containing the number of nonzeros in the various rows of the
3281            OFF-DIAGONAL portion of the local submatrix (possibly different for
3282            each row) or PETSC_NULL, if o_nz is used to specify the nonzero
3283            structure. The size of this array is equal to the number
3284            of local rows, i.e 'm'.
3285 
3286    If the *_nnz parameter is given then the *_nz parameter is ignored
3287 
3288    The AIJ format (also called the Yale sparse matrix format or
3289    compressed row storage (CSR)), is fully compatible with standard Fortran 77
3290    storage.  The stored row and column indices begin with zero.  See the users manual for details.
3291 
3292    The parallel matrix is partitioned such that the first m0 rows belong to
3293    process 0, the next m1 rows belong to process 1, the next m2 rows belong
3294    to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.
3295 
3296    The DIAGONAL portion of the local submatrix of a processor can be defined
3297    as the submatrix which is obtained by extraction the part corresponding
3298    to the rows r1-r2 and columns r1-r2 of the global matrix, where r1 is the
3299    first row that belongs to the processor, and r2 is the last row belonging
3300    to the this processor. This is a square mxm matrix. The remaining portion
3301    of the local submatrix (mxN) constitute the OFF-DIAGONAL portion.
3302 
3303    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.
3304 
3305    You can call MatGetInfo() to get information on how effective the preallocation was;
3306    for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
3307    You can also run with the option -info and look for messages with the string
3308    malloc in them to see if additional memory allocation was needed.
3309 
3310    Example usage:
3311 
3312    Consider the following 8x8 matrix with 34 non-zero values, that is
3313    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
3314    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
3315    as follows:
3316 
3317 .vb
3318             1  2  0  |  0  3  0  |  0  4
3319     Proc0   0  5  6  |  7  0  0  |  8  0
3320             9  0 10  | 11  0  0  | 12  0
3321     -------------------------------------
3322            13  0 14  | 15 16 17  |  0  0
3323     Proc1   0 18  0  | 19 20 21  |  0  0
3324             0  0  0  | 22 23  0  | 24  0
3325     -------------------------------------
3326     Proc2  25 26 27  |  0  0 28  | 29  0
3327            30  0  0  | 31 32 33  |  0 34
3328 .ve
3329 
3330    This can be represented as a collection of submatrices as:
3331 
3332 .vb
3333       A B C
3334       D E F
3335       G H I
3336 .ve
3337 
3338    Where the submatrices A,B,C are owned by proc0, D,E,F are
3339    owned by proc1, G,H,I are owned by proc2.
3340 
3341    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3342    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3343    The 'M','N' parameters are 8,8, and have the same values on all procs.
3344 
3345    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
3346    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
3347    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
3348    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
3349    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
3350    matrix, ans [DF] as another SeqAIJ matrix.
3351 
3352    When d_nz, o_nz parameters are specified, d_nz storage elements are
3353    allocated for every row of the local diagonal submatrix, and o_nz
3354    storage locations are allocated for every row of the OFF-DIAGONAL submat.
3355    One way to choose d_nz and o_nz is to use the max nonzerors per local
3356    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
3357    In this case, the values of d_nz,o_nz are:
3358 .vb
3359      proc0 : dnz = 2, o_nz = 2
3360      proc1 : dnz = 3, o_nz = 2
3361      proc2 : dnz = 1, o_nz = 4
3362 .ve
3363    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
3364    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
3365    for proc3. i.e we are using 12+15+10=37 storage locations to store
3366    34 values.
3367 
3368    When d_nnz, o_nnz parameters are specified, the storage is specified
3369    for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
3370    In the above case the values for d_nnz,o_nnz are:
3371 .vb
3372      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
3373      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
3374      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
3375 .ve
3376    Here the space allocated is sum of all the above values i.e 34, and
3377    hence pre-allocation is perfect.
3378 
3379    Level: intermediate
3380 
3381 .keywords: matrix, aij, compressed row, sparse, parallel
3382 
3383 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatCreateMPIAIJ(), MatMPIAIJSetPreallocationCSR(),
3384           MPIAIJ, MatGetInfo()
3385 @*/
3386 PetscErrorCode PETSCMAT_DLLEXPORT MatMPIAIJSetPreallocation(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
3387 {
3388   PetscErrorCode ierr,(*f)(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]);
3389 
3390   PetscFunctionBegin;
3391   ierr = PetscObjectQueryFunction((PetscObject)B,"MatMPIAIJSetPreallocation_C",(void (**)(void))&f);CHKERRQ(ierr);
3392   if (f) {
3393     ierr = (*f)(B,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
3394   }
3395   PetscFunctionReturn(0);
3396 }
3397 
3398 #undef __FUNCT__
3399 #define __FUNCT__ "MatCreateMPIAIJWithArrays"
3400 /*@
3401      MatCreateMPIAIJWithArrays - creates a MPI AIJ matrix using arrays that contain in standard
3402          CSR format the local rows.
3403 
3404    Collective on MPI_Comm
3405 
3406    Input Parameters:
3407 +  comm - MPI communicator
3408 .  m - number of local rows (Cannot be PETSC_DECIDE)
3409 .  n - This value should be the same as the local size used in creating the
3410        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
3411        calculated if N is given) For square matrices n is almost always m.
3412 .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
3413 .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
3414 .   i - row indices
3415 .   j - column indices
3416 -   a - matrix values
3417 
3418    Output Parameter:
3419 .   mat - the matrix
3420 
3421    Level: intermediate
3422 
3423    Notes:
3424        The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
3425      thus you CANNOT change the matrix entries by changing the values of a[] after you have
3426      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.
3427 
3428        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.
3429 
3430        The format which is used for the sparse matrix input, is equivalent to a
3431     row-major ordering.. i.e for the following matrix, the input data expected is
3432     as shown:
3433 
3434         1 0 0
3435         2 0 3     P0
3436        -------
3437         4 5 6     P1
3438 
3439      Process0 [P0]: rows_owned=[0,1]
3440         i =  {0,1,3}  [size = nrow+1  = 2+1]
3441         j =  {0,0,2}  [size = nz = 6]
3442         v =  {1,2,3}  [size = nz = 6]
3443 
3444      Process1 [P1]: rows_owned=[2]
3445         i =  {0,3}    [size = nrow+1  = 1+1]
3446         j =  {0,1,2}  [size = nz = 6]
3447         v =  {4,5,6}  [size = nz = 6]
3448 
3449 .keywords: matrix, aij, compressed row, sparse, parallel
3450 
3451 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
3452           MPIAIJ, MatCreateMPIAIJ(), MatCreateMPIAIJWithSplitArrays()
3453 @*/
3454 PetscErrorCode PETSCMAT_DLLEXPORT MatCreateMPIAIJWithArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat)
3455 {
3456   PetscErrorCode ierr;
3457 
3458  PetscFunctionBegin;
3459   if (i[0]) {
3460     SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
3461   }
3462   if (m < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
3463   ierr = MatCreate(comm,mat);CHKERRQ(ierr);
3464   ierr = MatSetSizes(*mat,m,n,M,N);CHKERRQ(ierr);
3465   ierr = MatSetType(*mat,MATMPIAIJ);CHKERRQ(ierr);
3466   ierr = MatMPIAIJSetPreallocationCSR(*mat,i,j,a);CHKERRQ(ierr);
3467   PetscFunctionReturn(0);
3468 }
3469 
3470 #undef __FUNCT__
3471 #define __FUNCT__ "MatCreateMPIAIJ"
3472 /*@C
3473    MatCreateMPIAIJ - Creates a sparse parallel matrix in AIJ format
3474    (the default parallel PETSc format).  For good matrix assembly performance
3475    the user should preallocate the matrix storage by setting the parameters
3476    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
3477    performance can be increased by more than a factor of 50.
3478 
3479    Collective on MPI_Comm
3480 
3481    Input Parameters:
3482 +  comm - MPI communicator
3483 .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
3484            This value should be the same as the local size used in creating the
3485            y vector for the matrix-vector product y = Ax.
3486 .  n - This value should be the same as the local size used in creating the
3487        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
3488        calculated if N is given) For square matrices n is almost always m.
3489 .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
3490 .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
3491 .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
3492            (same value is used for all local rows)
3493 .  d_nnz - array containing the number of nonzeros in the various rows of the
3494            DIAGONAL portion of the local submatrix (possibly different for each row)
3495            or PETSC_NULL, if d_nz is used to specify the nonzero structure.
3496            The size of this array is equal to the number of local rows, i.e 'm'.
3497            You must leave room for the diagonal entry even if it is zero.
3498 .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
3499            submatrix (same value is used for all local rows).
3500 -  o_nnz - array containing the number of nonzeros in the various rows of the
3501            OFF-DIAGONAL portion of the local submatrix (possibly different for
3502            each row) or PETSC_NULL, if o_nz is used to specify the nonzero
3503            structure. The size of this array is equal to the number
3504            of local rows, i.e 'm'.
3505 
3506    Output Parameter:
3507 .  A - the matrix
3508 
3509    It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
3510    MatXXXXSetPreallocation() paradgm instead of this routine directly. This is definitely
3511    true if you plan to use the external direct solvers such as SuperLU, MUMPS or Spooles.
3512    [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
3513 
3514    Notes:
3515    If the *_nnz parameter is given then the *_nz parameter is ignored
3516 
3517    m,n,M,N parameters specify the size of the matrix, and its partitioning across
3518    processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
3519    storage requirements for this matrix.
3520 
3521    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one
3522    processor than it must be used on all processors that share the object for
3523    that argument.
3524 
3525    The user MUST specify either the local or global matrix dimensions
3526    (possibly both).
3527 
3528    The parallel matrix is partitioned across processors such that the
3529    first m0 rows belong to process 0, the next m1 rows belong to
3530    process 1, the next m2 rows belong to process 2 etc.. where
3531    m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
3532    values corresponding to [m x N] submatrix.
3533 
3534    The columns are logically partitioned with the n0 columns belonging
3535    to 0th partition, the next n1 columns belonging to the next
3536    partition etc.. where n0,n1,n2... are the the input parameter 'n'.
3537 
3538    The DIAGONAL portion of the local submatrix on any given processor
3539    is the submatrix corresponding to the rows and columns m,n
3540    corresponding to the given processor. i.e diagonal matrix on
3541    process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
3542    etc. The remaining portion of the local submatrix [m x (N-n)]
3543    constitute the OFF-DIAGONAL portion. The example below better
3544    illustrates this concept.
3545 
3546    For a square global matrix we define each processor's diagonal portion
3547    to be its local rows and the corresponding columns (a square submatrix);
3548    each processor's off-diagonal portion encompasses the remainder of the
3549    local matrix (a rectangular submatrix).
3550 
3551    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.
3552 
3553    When calling this routine with a single process communicator, a matrix of
3554    type SEQAIJ is returned.  If a matrix of type MPIAIJ is desired for this
3555    type of communicator, use the construction mechanism:
3556      MatCreate(...,&A); MatSetType(A,MPIAIJ); MatMPIAIJSetPreallocation(A,...);
3557 
3558    By default, this format uses inodes (identical nodes) when possible.
3559    We search for consecutive rows with the same nonzero structure, thereby
3560    reusing matrix information to achieve increased efficiency.
3561 
3562    Options Database Keys:
3563 +  -mat_no_inode  - Do not use inodes
3564 .  -mat_inode_limit <limit> - Sets inode limit (max limit=5)
3565 -  -mat_aij_oneindex - Internally use indexing starting at 1
3566         rather than 0.  Note that when calling MatSetValues(),
3567         the user still MUST index entries starting at 0!
3568 
3569 
3570    Example usage:
3571 
3572    Consider the following 8x8 matrix with 34 non-zero values, that is
3573    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
3574    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
3575    as follows:
3576 
3577 .vb
3578             1  2  0  |  0  3  0  |  0  4
3579     Proc0   0  5  6  |  7  0  0  |  8  0
3580             9  0 10  | 11  0  0  | 12  0
3581     -------------------------------------
3582            13  0 14  | 15 16 17  |  0  0
3583     Proc1   0 18  0  | 19 20 21  |  0  0
3584             0  0  0  | 22 23  0  | 24  0
3585     -------------------------------------
3586     Proc2  25 26 27  |  0  0 28  | 29  0
3587            30  0  0  | 31 32 33  |  0 34
3588 .ve
3589 
3590    This can be represented as a collection of submatrices as:
3591 
3592 .vb
3593       A B C
3594       D E F
3595       G H I
3596 .ve
3597 
3598    Where the submatrices A,B,C are owned by proc0, D,E,F are
3599    owned by proc1, G,H,I are owned by proc2.
3600 
3601    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3602    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3603    The 'M','N' parameters are 8,8, and have the same values on all procs.
3604 
3605    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
3606    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
3607    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
3608    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
3609    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
3610    matrix, ans [DF] as another SeqAIJ matrix.
3611 
3612    When d_nz, o_nz parameters are specified, d_nz storage elements are
3613    allocated for every row of the local diagonal submatrix, and o_nz
3614    storage locations are allocated for every row of the OFF-DIAGONAL submat.
3615    One way to choose d_nz and o_nz is to use the max nonzerors per local
3616    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
3617    In this case, the values of d_nz,o_nz are:
3618 .vb
3619      proc0 : dnz = 2, o_nz = 2
3620      proc1 : dnz = 3, o_nz = 2
3621      proc2 : dnz = 1, o_nz = 4
3622 .ve
3623    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
3624    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
3625    for proc3. i.e we are using 12+15+10=37 storage locations to store
3626    34 values.
3627 
3628    When d_nnz, o_nnz parameters are specified, the storage is specified
3629    for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
3630    In the above case the values for d_nnz,o_nnz are:
3631 .vb
3632      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
3633      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
3634      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
3635 .ve
3636    Here the space allocated is sum of all the above values i.e 34, and
3637    hence pre-allocation is perfect.
3638 
3639    Level: intermediate
3640 
3641 .keywords: matrix, aij, compressed row, sparse, parallel
3642 
3643 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
3644           MPIAIJ, MatCreateMPIAIJWithArrays()
3645 @*/
3646 PetscErrorCode PETSCMAT_DLLEXPORT MatCreateMPIAIJ(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
3647 {
3648   PetscErrorCode ierr;
3649   PetscMPIInt    size;
3650 
3651   PetscFunctionBegin;
3652   ierr = MatCreate(comm,A);CHKERRQ(ierr);
3653   ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr);
3654   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
3655   if (size > 1) {
3656     ierr = MatSetType(*A,MATMPIAIJ);CHKERRQ(ierr);
3657     ierr = MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
3658   } else {
3659     ierr = MatSetType(*A,MATSEQAIJ);CHKERRQ(ierr);
3660     ierr = MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);CHKERRQ(ierr);
3661   }
3662   PetscFunctionReturn(0);
3663 }
3664 
3665 #undef __FUNCT__
3666 #define __FUNCT__ "MatMPIAIJGetSeqAIJ"
3667 PetscErrorCode PETSCMAT_DLLEXPORT MatMPIAIJGetSeqAIJ(Mat A,Mat *Ad,Mat *Ao,PetscInt *colmap[])
3668 {
3669   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
3670 
3671   PetscFunctionBegin;
3672   *Ad     = a->A;
3673   *Ao     = a->B;
3674   *colmap = a->garray;
3675   PetscFunctionReturn(0);
3676 }
3677 
3678 #undef __FUNCT__
3679 #define __FUNCT__ "MatSetColoring_MPIAIJ"
3680 PetscErrorCode MatSetColoring_MPIAIJ(Mat A,ISColoring coloring)
3681 {
3682   PetscErrorCode ierr;
3683   PetscInt       i;
3684   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
3685 
3686   PetscFunctionBegin;
3687   if (coloring->ctype == IS_COLORING_GLOBAL) {
3688     ISColoringValue *allcolors,*colors;
3689     ISColoring      ocoloring;
3690 
3691     /* set coloring for diagonal portion */
3692     ierr = MatSetColoring_SeqAIJ(a->A,coloring);CHKERRQ(ierr);
3693 
3694     /* set coloring for off-diagonal portion */
3695     ierr = ISAllGatherColors(((PetscObject)A)->comm,coloring->n,coloring->colors,PETSC_NULL,&allcolors);CHKERRQ(ierr);
3696     ierr = PetscMalloc((a->B->cmap->n+1)*sizeof(ISColoringValue),&colors);CHKERRQ(ierr);
3697     for (i=0; i<a->B->cmap->n; i++) {
3698       colors[i] = allcolors[a->garray[i]];
3699     }
3700     ierr = PetscFree(allcolors);CHKERRQ(ierr);
3701     ierr = ISColoringCreate(MPI_COMM_SELF,coloring->n,a->B->cmap->n,colors,&ocoloring);CHKERRQ(ierr);
3702     ierr = MatSetColoring_SeqAIJ(a->B,ocoloring);CHKERRQ(ierr);
3703     ierr = ISColoringDestroy(ocoloring);CHKERRQ(ierr);
3704   } else if (coloring->ctype == IS_COLORING_GHOSTED) {
3705     ISColoringValue *colors;
3706     PetscInt        *larray;
3707     ISColoring      ocoloring;
3708 
3709     /* set coloring for diagonal portion */
3710     ierr = PetscMalloc((a->A->cmap->n+1)*sizeof(PetscInt),&larray);CHKERRQ(ierr);
3711     for (i=0; i<a->A->cmap->n; i++) {
3712       larray[i] = i + A->cmap->rstart;
3713     }
3714     ierr = ISGlobalToLocalMappingApply(A->mapping,IS_GTOLM_MASK,a->A->cmap->n,larray,PETSC_NULL,larray);CHKERRQ(ierr);
3715     ierr = PetscMalloc((a->A->cmap->n+1)*sizeof(ISColoringValue),&colors);CHKERRQ(ierr);
3716     for (i=0; i<a->A->cmap->n; i++) {
3717       colors[i] = coloring->colors[larray[i]];
3718     }
3719     ierr = PetscFree(larray);CHKERRQ(ierr);
3720     ierr = ISColoringCreate(PETSC_COMM_SELF,coloring->n,a->A->cmap->n,colors,&ocoloring);CHKERRQ(ierr);
3721     ierr = MatSetColoring_SeqAIJ(a->A,ocoloring);CHKERRQ(ierr);
3722     ierr = ISColoringDestroy(ocoloring);CHKERRQ(ierr);
3723 
3724     /* set coloring for off-diagonal portion */
3725     ierr = PetscMalloc((a->B->cmap->n+1)*sizeof(PetscInt),&larray);CHKERRQ(ierr);
3726     ierr = ISGlobalToLocalMappingApply(A->mapping,IS_GTOLM_MASK,a->B->cmap->n,a->garray,PETSC_NULL,larray);CHKERRQ(ierr);
3727     ierr = PetscMalloc((a->B->cmap->n+1)*sizeof(ISColoringValue),&colors);CHKERRQ(ierr);
3728     for (i=0; i<a->B->cmap->n; i++) {
3729       colors[i] = coloring->colors[larray[i]];
3730     }
3731     ierr = PetscFree(larray);CHKERRQ(ierr);
3732     ierr = ISColoringCreate(MPI_COMM_SELF,coloring->n,a->B->cmap->n,colors,&ocoloring);CHKERRQ(ierr);
3733     ierr = MatSetColoring_SeqAIJ(a->B,ocoloring);CHKERRQ(ierr);
3734     ierr = ISColoringDestroy(ocoloring);CHKERRQ(ierr);
3735   } else {
3736     SETERRQ1(PETSC_ERR_SUP,"No support ISColoringType %d",(int)coloring->ctype);
3737   }
3738 
3739   PetscFunctionReturn(0);
3740 }
3741 
3742 #if defined(PETSC_HAVE_ADIC)
3743 #undef __FUNCT__
3744 #define __FUNCT__ "MatSetValuesAdic_MPIAIJ"
3745 PetscErrorCode MatSetValuesAdic_MPIAIJ(Mat A,void *advalues)
3746 {
3747   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
3748   PetscErrorCode ierr;
3749 
3750   PetscFunctionBegin;
3751   ierr = MatSetValuesAdic_SeqAIJ(a->A,advalues);CHKERRQ(ierr);
3752   ierr = MatSetValuesAdic_SeqAIJ(a->B,advalues);CHKERRQ(ierr);
3753   PetscFunctionReturn(0);
3754 }
3755 #endif
3756 
3757 #undef __FUNCT__
3758 #define __FUNCT__ "MatSetValuesAdifor_MPIAIJ"
3759 PetscErrorCode MatSetValuesAdifor_MPIAIJ(Mat A,PetscInt nl,void *advalues)
3760 {
3761   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
3762   PetscErrorCode ierr;
3763 
3764   PetscFunctionBegin;
3765   ierr = MatSetValuesAdifor_SeqAIJ(a->A,nl,advalues);CHKERRQ(ierr);
3766   ierr = MatSetValuesAdifor_SeqAIJ(a->B,nl,advalues);CHKERRQ(ierr);
3767   PetscFunctionReturn(0);
3768 }
3769 
3770 #undef __FUNCT__
3771 #define __FUNCT__ "MatMerge"
3772 /*@
3773       MatMerge - Creates a single large PETSc matrix by concatinating sequential
3774                  matrices from each processor
3775 
3776     Collective on MPI_Comm
3777 
3778    Input Parameters:
3779 +    comm - the communicators the parallel matrix will live on
3780 .    inmat - the input sequential matrices
3781 .    n - number of local columns (or PETSC_DECIDE)
3782 -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
3783 
3784    Output Parameter:
3785 .    outmat - the parallel matrix generated
3786 
3787     Level: advanced
3788 
3789    Notes: The number of columns of the matrix in EACH processor MUST be the same.
3790 
3791 @*/
3792 PetscErrorCode PETSCMAT_DLLEXPORT MatMerge(MPI_Comm comm,Mat inmat,PetscInt n,MatReuse scall,Mat *outmat)
3793 {
3794   PetscErrorCode ierr;
3795   PetscInt       m,N,i,rstart,nnz,Ii,*dnz,*onz;
3796   PetscInt       *indx;
3797   PetscScalar    *values;
3798 
3799   PetscFunctionBegin;
3800   ierr = MatGetSize(inmat,&m,&N);CHKERRQ(ierr);
3801   if (scall == MAT_INITIAL_MATRIX){
3802     /* count nonzeros in each row, for diagonal and off diagonal portion of matrix */
3803     if (n == PETSC_DECIDE){
3804       ierr = PetscSplitOwnership(comm,&n,&N);CHKERRQ(ierr);
3805     }
3806     ierr = MPI_Scan(&m, &rstart,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
3807     rstart -= m;
3808 
3809     ierr = MatPreallocateInitialize(comm,m,n,dnz,onz);CHKERRQ(ierr);
3810     for (i=0;i<m;i++) {
3811       ierr = MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,PETSC_NULL);CHKERRQ(ierr);
3812       ierr = MatPreallocateSet(i+rstart,nnz,indx,dnz,onz);CHKERRQ(ierr);
3813       ierr = MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,PETSC_NULL);CHKERRQ(ierr);
3814     }
3815     /* This routine will ONLY return MPIAIJ type matrix */
3816     ierr = MatCreate(comm,outmat);CHKERRQ(ierr);
3817     ierr = MatSetSizes(*outmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
3818     ierr = MatSetType(*outmat,MATMPIAIJ);CHKERRQ(ierr);
3819     ierr = MatMPIAIJSetPreallocation(*outmat,0,dnz,0,onz);CHKERRQ(ierr);
3820     ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
3821 
3822   } else if (scall == MAT_REUSE_MATRIX){
3823     ierr = MatGetOwnershipRange(*outmat,&rstart,PETSC_NULL);CHKERRQ(ierr);
3824   } else {
3825     SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
3826   }
3827 
3828   for (i=0;i<m;i++) {
3829     ierr = MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,&values);CHKERRQ(ierr);
3830     Ii    = i + rstart;
3831     ierr = MatSetValues(*outmat,1,&Ii,nnz,indx,values,INSERT_VALUES);CHKERRQ(ierr);
3832     ierr = MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,&values);CHKERRQ(ierr);
3833   }
3834   ierr = MatDestroy(inmat);CHKERRQ(ierr);
3835   ierr = MatAssemblyBegin(*outmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3836   ierr = MatAssemblyEnd(*outmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3837 
3838   PetscFunctionReturn(0);
3839 }
3840 
3841 #undef __FUNCT__
3842 #define __FUNCT__ "MatFileSplit"
3843 PetscErrorCode MatFileSplit(Mat A,char *outfile)
3844 {
3845   PetscErrorCode    ierr;
3846   PetscMPIInt       rank;
3847   PetscInt          m,N,i,rstart,nnz;
3848   size_t            len;
3849   const PetscInt    *indx;
3850   PetscViewer       out;
3851   char              *name;
3852   Mat               B;
3853   const PetscScalar *values;
3854 
3855   PetscFunctionBegin;
3856   ierr = MatGetLocalSize(A,&m,0);CHKERRQ(ierr);
3857   ierr = MatGetSize(A,0,&N);CHKERRQ(ierr);
3858   /* Should this be the type of the diagonal block of A? */
3859   ierr = MatCreate(PETSC_COMM_SELF,&B);CHKERRQ(ierr);
3860   ierr = MatSetSizes(B,m,N,m,N);CHKERRQ(ierr);
3861   ierr = MatSetType(B,MATSEQAIJ);CHKERRQ(ierr);
3862   ierr = MatSeqAIJSetPreallocation(B,0,PETSC_NULL);CHKERRQ(ierr);
3863   ierr = MatGetOwnershipRange(A,&rstart,0);CHKERRQ(ierr);
3864   for (i=0;i<m;i++) {
3865     ierr = MatGetRow(A,i+rstart,&nnz,&indx,&values);CHKERRQ(ierr);
3866     ierr = MatSetValues(B,1,&i,nnz,indx,values,INSERT_VALUES);CHKERRQ(ierr);
3867     ierr = MatRestoreRow(A,i+rstart,&nnz,&indx,&values);CHKERRQ(ierr);
3868   }
3869   ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3870   ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3871 
3872   ierr = MPI_Comm_rank(((PetscObject)A)->comm,&rank);CHKERRQ(ierr);
3873   ierr = PetscStrlen(outfile,&len);CHKERRQ(ierr);
3874   ierr = PetscMalloc((len+5)*sizeof(char),&name);CHKERRQ(ierr);
3875   sprintf(name,"%s.%d",outfile,rank);
3876   ierr = PetscViewerBinaryOpen(PETSC_COMM_SELF,name,FILE_MODE_APPEND,&out);CHKERRQ(ierr);
3877   ierr = PetscFree(name);
3878   ierr = MatView(B,out);CHKERRQ(ierr);
3879   ierr = PetscViewerDestroy(out);CHKERRQ(ierr);
3880   ierr = MatDestroy(B);CHKERRQ(ierr);
3881   PetscFunctionReturn(0);
3882 }
3883 
3884 EXTERN PetscErrorCode MatDestroy_MPIAIJ(Mat);
3885 #undef __FUNCT__
3886 #define __FUNCT__ "MatDestroy_MPIAIJ_SeqsToMPI"
3887 PetscErrorCode PETSCMAT_DLLEXPORT MatDestroy_MPIAIJ_SeqsToMPI(Mat A)
3888 {
3889   PetscErrorCode       ierr;
3890   Mat_Merge_SeqsToMPI  *merge;
3891   PetscContainer       container;
3892 
3893   PetscFunctionBegin;
3894   ierr = PetscObjectQuery((PetscObject)A,"MatMergeSeqsToMPI",(PetscObject *)&container);CHKERRQ(ierr);
3895   if (container) {
3896     ierr = PetscContainerGetPointer(container,(void **)&merge);CHKERRQ(ierr);
3897     ierr = PetscFree(merge->id_r);CHKERRQ(ierr);
3898     ierr = PetscFree(merge->len_s);CHKERRQ(ierr);
3899     ierr = PetscFree(merge->len_r);CHKERRQ(ierr);
3900     ierr = PetscFree(merge->bi);CHKERRQ(ierr);
3901     ierr = PetscFree(merge->bj);CHKERRQ(ierr);
3902     ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr);
3903     ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr);
3904     ierr = PetscFree(merge->coi);CHKERRQ(ierr);
3905     ierr = PetscFree(merge->coj);CHKERRQ(ierr);
3906     ierr = PetscFree(merge->owners_co);CHKERRQ(ierr);
3907     ierr = PetscFree(merge->rowmap.range);CHKERRQ(ierr);
3908 
3909     ierr = PetscContainerDestroy(container);CHKERRQ(ierr);
3910     ierr = PetscObjectCompose((PetscObject)A,"MatMergeSeqsToMPI",0);CHKERRQ(ierr);
3911   }
3912   ierr = PetscFree(merge);CHKERRQ(ierr);
3913 
3914   ierr = MatDestroy_MPIAIJ(A);CHKERRQ(ierr);
3915   PetscFunctionReturn(0);
3916 }
3917 
3918 #include "../src/mat/utils/freespace.h"
3919 #include "petscbt.h"
3920 
3921 #undef __FUNCT__
3922 #define __FUNCT__ "MatMerge_SeqsToMPINumeric"
3923 /*@C
3924       MatMerge_SeqsToMPI - Creates a MPIAIJ matrix by adding sequential
3925                  matrices from each processor
3926 
3927     Collective on MPI_Comm
3928 
3929    Input Parameters:
3930 +    comm - the communicators the parallel matrix will live on
3931 .    seqmat - the input sequential matrices
3932 .    m - number of local rows (or PETSC_DECIDE)
3933 .    n - number of local columns (or PETSC_DECIDE)
3934 -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
3935 
3936    Output Parameter:
3937 .    mpimat - the parallel matrix generated
3938 
3939     Level: advanced
3940 
3941    Notes:
3942      The dimensions of the sequential matrix in each processor MUST be the same.
3943      The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be
3944      destroyed when mpimat is destroyed. Call PetscObjectQuery() to access seqmat.
3945 @*/
3946 PetscErrorCode PETSCMAT_DLLEXPORT MatMerge_SeqsToMPINumeric(Mat seqmat,Mat mpimat)
3947 {
3948   PetscErrorCode       ierr;
3949   MPI_Comm             comm=((PetscObject)mpimat)->comm;
3950   Mat_SeqAIJ           *a=(Mat_SeqAIJ*)seqmat->data;
3951   PetscMPIInt          size,rank,taga,*len_s;
3952   PetscInt             N=mpimat->cmap->N,i,j,*owners,*ai=a->i,*aj=a->j;
3953   PetscInt             proc,m;
3954   PetscInt             **buf_ri,**buf_rj;
3955   PetscInt             k,anzi,*bj_i,*bi,*bj,arow,bnzi,nextaj;
3956   PetscInt             nrows,**buf_ri_k,**nextrow,**nextai;
3957   MPI_Request          *s_waits,*r_waits;
3958   MPI_Status           *status;
3959   MatScalar            *aa=a->a;
3960   MatScalar            **abuf_r,*ba_i;
3961   Mat_Merge_SeqsToMPI  *merge;
3962   PetscContainer       container;
3963 
3964   PetscFunctionBegin;
3965   ierr = PetscLogEventBegin(MAT_Seqstompinum,seqmat,0,0,0);CHKERRQ(ierr);
3966 
3967   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
3968   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
3969 
3970   ierr = PetscObjectQuery((PetscObject)mpimat,"MatMergeSeqsToMPI",(PetscObject *)&container);CHKERRQ(ierr);
3971   if (container) {
3972     ierr  = PetscContainerGetPointer(container,(void **)&merge);CHKERRQ(ierr);
3973   }
3974   bi     = merge->bi;
3975   bj     = merge->bj;
3976   buf_ri = merge->buf_ri;
3977   buf_rj = merge->buf_rj;
3978 
3979   ierr   = PetscMalloc(size*sizeof(MPI_Status),&status);CHKERRQ(ierr);
3980   owners = merge->rowmap.range;
3981   len_s  = merge->len_s;
3982 
3983   /* send and recv matrix values */
3984   /*-----------------------------*/
3985   ierr = PetscObjectGetNewTag((PetscObject)mpimat,&taga);CHKERRQ(ierr);
3986   ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr);
3987 
3988   ierr = PetscMalloc((merge->nsend+1)*sizeof(MPI_Request),&s_waits);CHKERRQ(ierr);
3989   for (proc=0,k=0; proc<size; proc++){
3990     if (!len_s[proc]) continue;
3991     i = owners[proc];
3992     ierr = MPI_Isend(aa+ai[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr);
3993     k++;
3994   }
3995 
3996   if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);}
3997   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);}
3998   ierr = PetscFree(status);CHKERRQ(ierr);
3999 
4000   ierr = PetscFree(s_waits);CHKERRQ(ierr);
4001   ierr = PetscFree(r_waits);CHKERRQ(ierr);
4002 
4003   /* insert mat values of mpimat */
4004   /*----------------------------*/
4005   ierr = PetscMalloc(N*sizeof(PetscScalar),&ba_i);CHKERRQ(ierr);
4006   ierr = PetscMalloc((3*merge->nrecv+1)*sizeof(PetscInt**),&buf_ri_k);CHKERRQ(ierr);
4007   nextrow = buf_ri_k + merge->nrecv;
4008   nextai  = nextrow + merge->nrecv;
4009 
4010   for (k=0; k<merge->nrecv; k++){
4011     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4012     nrows = *(buf_ri_k[k]);
4013     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
4014     nextai[k]   = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure  */
4015   }
4016 
4017   /* set values of ba */
4018   m = merge->rowmap.n;
4019   for (i=0; i<m; i++) {
4020     arow = owners[rank] + i;
4021     bj_i = bj+bi[i];  /* col indices of the i-th row of mpimat */
4022     bnzi = bi[i+1] - bi[i];
4023     ierr = PetscMemzero(ba_i,bnzi*sizeof(PetscScalar));CHKERRQ(ierr);
4024 
4025     /* add local non-zero vals of this proc's seqmat into ba */
4026     anzi = ai[arow+1] - ai[arow];
4027     aj   = a->j + ai[arow];
4028     aa   = a->a + ai[arow];
4029     nextaj = 0;
4030     for (j=0; nextaj<anzi; j++){
4031       if (*(bj_i + j) == aj[nextaj]){ /* bcol == acol */
4032         ba_i[j] += aa[nextaj++];
4033       }
4034     }
4035 
4036     /* add received vals into ba */
4037     for (k=0; k<merge->nrecv; k++){ /* k-th received message */
4038       /* i-th row */
4039       if (i == *nextrow[k]) {
4040         anzi = *(nextai[k]+1) - *nextai[k];
4041         aj   = buf_rj[k] + *(nextai[k]);
4042         aa   = abuf_r[k] + *(nextai[k]);
4043         nextaj = 0;
4044         for (j=0; nextaj<anzi; j++){
4045           if (*(bj_i + j) == aj[nextaj]){ /* bcol == acol */
4046             ba_i[j] += aa[nextaj++];
4047           }
4048         }
4049         nextrow[k]++; nextai[k]++;
4050       }
4051     }
4052     ierr = MatSetValues(mpimat,1,&arow,bnzi,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr);
4053   }
4054   ierr = MatAssemblyBegin(mpimat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
4055   ierr = MatAssemblyEnd(mpimat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
4056 
4057   ierr = PetscFree(abuf_r);CHKERRQ(ierr);
4058   ierr = PetscFree(ba_i);CHKERRQ(ierr);
4059   ierr = PetscFree(buf_ri_k);CHKERRQ(ierr);
4060   ierr = PetscLogEventEnd(MAT_Seqstompinum,seqmat,0,0,0);CHKERRQ(ierr);
4061   PetscFunctionReturn(0);
4062 }
4063 
4064 #undef __FUNCT__
4065 #define __FUNCT__ "MatMerge_SeqsToMPISymbolic"
4066 PetscErrorCode PETSCMAT_DLLEXPORT MatMerge_SeqsToMPISymbolic(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,Mat *mpimat)
4067 {
4068   PetscErrorCode       ierr;
4069   Mat                  B_mpi;
4070   Mat_SeqAIJ           *a=(Mat_SeqAIJ*)seqmat->data;
4071   PetscMPIInt          size,rank,tagi,tagj,*len_s,*len_si,*len_ri;
4072   PetscInt             **buf_rj,**buf_ri,**buf_ri_k;
4073   PetscInt             M=seqmat->rmap->n,N=seqmat->cmap->n,i,*owners,*ai=a->i,*aj=a->j;
4074   PetscInt             len,proc,*dnz,*onz;
4075   PetscInt             k,anzi,*bi,*bj,*lnk,nlnk,arow,bnzi,nspacedouble=0;
4076   PetscInt             nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextai;
4077   MPI_Request          *si_waits,*sj_waits,*ri_waits,*rj_waits;
4078   MPI_Status           *status;
4079   PetscFreeSpaceList   free_space=PETSC_NULL,current_space=PETSC_NULL;
4080   PetscBT              lnkbt;
4081   Mat_Merge_SeqsToMPI  *merge;
4082   PetscContainer       container;
4083 
4084   PetscFunctionBegin;
4085   ierr = PetscLogEventBegin(MAT_Seqstompisym,seqmat,0,0,0);CHKERRQ(ierr);
4086 
4087   /* make sure it is a PETSc comm */
4088   ierr = PetscCommDuplicate(comm,&comm,PETSC_NULL);CHKERRQ(ierr);
4089   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
4090   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
4091 
4092   ierr = PetscNew(Mat_Merge_SeqsToMPI,&merge);CHKERRQ(ierr);
4093   ierr = PetscMalloc(size*sizeof(MPI_Status),&status);CHKERRQ(ierr);
4094 
4095   /* determine row ownership */
4096   /*---------------------------------------------------------*/
4097   ierr = PetscMapInitialize(comm,&merge->rowmap);CHKERRQ(ierr);
4098   merge->rowmap.n = m;
4099   merge->rowmap.N = M;
4100   merge->rowmap.bs = 1;
4101   ierr = PetscMapSetUp(&merge->rowmap);CHKERRQ(ierr);
4102   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&len_si);CHKERRQ(ierr);
4103   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&merge->len_s);CHKERRQ(ierr);
4104 
4105   m      = merge->rowmap.n;
4106   M      = merge->rowmap.N;
4107   owners = merge->rowmap.range;
4108 
4109   /* determine the number of messages to send, their lengths */
4110   /*---------------------------------------------------------*/
4111   len_s  = merge->len_s;
4112 
4113   len = 0;  /* length of buf_si[] */
4114   merge->nsend = 0;
4115   for (proc=0; proc<size; proc++){
4116     len_si[proc] = 0;
4117     if (proc == rank){
4118       len_s[proc] = 0;
4119     } else {
4120       len_si[proc] = owners[proc+1] - owners[proc] + 1;
4121       len_s[proc] = ai[owners[proc+1]] - ai[owners[proc]]; /* num of rows to be sent to [proc] */
4122     }
4123     if (len_s[proc]) {
4124       merge->nsend++;
4125       nrows = 0;
4126       for (i=owners[proc]; i<owners[proc+1]; i++){
4127         if (ai[i+1] > ai[i]) nrows++;
4128       }
4129       len_si[proc] = 2*(nrows+1);
4130       len += len_si[proc];
4131     }
4132   }
4133 
4134   /* determine the number and length of messages to receive for ij-structure */
4135   /*-------------------------------------------------------------------------*/
4136   ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&merge->nrecv);CHKERRQ(ierr);
4137   ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr);
4138 
4139   /* post the Irecv of j-structure */
4140   /*-------------------------------*/
4141   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
4142   ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rj_waits);CHKERRQ(ierr);
4143 
4144   /* post the Isend of j-structure */
4145   /*--------------------------------*/
4146   ierr = PetscMalloc((2*merge->nsend+1)*sizeof(MPI_Request),&si_waits);CHKERRQ(ierr);
4147   sj_waits = si_waits + merge->nsend;
4148 
4149   for (proc=0, k=0; proc<size; proc++){
4150     if (!len_s[proc]) continue;
4151     i = owners[proc];
4152     ierr = MPI_Isend(aj+ai[i],len_s[proc],MPIU_INT,proc,tagj,comm,sj_waits+k);CHKERRQ(ierr);
4153     k++;
4154   }
4155 
4156   /* receives and sends of j-structure are complete */
4157   /*------------------------------------------------*/
4158   if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,rj_waits,status);CHKERRQ(ierr);}
4159   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,sj_waits,status);CHKERRQ(ierr);}
4160 
4161   /* send and recv i-structure */
4162   /*---------------------------*/
4163   ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
4164   ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&ri_waits);CHKERRQ(ierr);
4165 
4166   ierr = PetscMalloc((len+1)*sizeof(PetscInt),&buf_s);CHKERRQ(ierr);
4167   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
4168   for (proc=0,k=0; proc<size; proc++){
4169     if (!len_s[proc]) continue;
4170     /* form outgoing message for i-structure:
4171          buf_si[0]:                 nrows to be sent
4172                [1:nrows]:           row index (global)
4173                [nrows+1:2*nrows+1]: i-structure index
4174     */
4175     /*-------------------------------------------*/
4176     nrows = len_si[proc]/2 - 1;
4177     buf_si_i    = buf_si + nrows+1;
4178     buf_si[0]   = nrows;
4179     buf_si_i[0] = 0;
4180     nrows = 0;
4181     for (i=owners[proc]; i<owners[proc+1]; i++){
4182       anzi = ai[i+1] - ai[i];
4183       if (anzi) {
4184         buf_si_i[nrows+1] = buf_si_i[nrows] + anzi; /* i-structure */
4185         buf_si[nrows+1] = i-owners[proc]; /* local row index */
4186         nrows++;
4187       }
4188     }
4189     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,si_waits+k);CHKERRQ(ierr);
4190     k++;
4191     buf_si += len_si[proc];
4192   }
4193 
4194   if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,ri_waits,status);CHKERRQ(ierr);}
4195   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,si_waits,status);CHKERRQ(ierr);}
4196 
4197   ierr = PetscInfo2(seqmat,"nsend: %D, nrecv: %D\n",merge->nsend,merge->nrecv);CHKERRQ(ierr);
4198   for (i=0; i<merge->nrecv; i++){
4199     ierr = PetscInfo3(seqmat,"recv len_ri=%D, len_rj=%D from [%D]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);CHKERRQ(ierr);
4200   }
4201 
4202   ierr = PetscFree(len_si);CHKERRQ(ierr);
4203   ierr = PetscFree(len_ri);CHKERRQ(ierr);
4204   ierr = PetscFree(rj_waits);CHKERRQ(ierr);
4205   ierr = PetscFree(si_waits);CHKERRQ(ierr);
4206   ierr = PetscFree(ri_waits);CHKERRQ(ierr);
4207   ierr = PetscFree(buf_s);CHKERRQ(ierr);
4208   ierr = PetscFree(status);CHKERRQ(ierr);
4209 
4210   /* compute a local seq matrix in each processor */
4211   /*----------------------------------------------*/
4212   /* allocate bi array and free space for accumulating nonzero column info */
4213   ierr = PetscMalloc((m+1)*sizeof(PetscInt),&bi);CHKERRQ(ierr);
4214   bi[0] = 0;
4215 
4216   /* create and initialize a linked list */
4217   nlnk = N+1;
4218   ierr = PetscLLCreate(N,N,nlnk,lnk,lnkbt);CHKERRQ(ierr);
4219 
4220   /* initial FreeSpace size is 2*(num of local nnz(seqmat)) */
4221   len = 0;
4222   len  = ai[owners[rank+1]] - ai[owners[rank]];
4223   ierr = PetscFreeSpaceGet((PetscInt)(2*len+1),&free_space);CHKERRQ(ierr);
4224   current_space = free_space;
4225 
4226   /* determine symbolic info for each local row */
4227   ierr = PetscMalloc((3*merge->nrecv+1)*sizeof(PetscInt**),&buf_ri_k);CHKERRQ(ierr);
4228   nextrow = buf_ri_k + merge->nrecv;
4229   nextai  = nextrow + merge->nrecv;
4230   for (k=0; k<merge->nrecv; k++){
4231     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4232     nrows = *buf_ri_k[k];
4233     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
4234     nextai[k]   = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure  */
4235   }
4236 
4237   ierr = MatPreallocateInitialize(comm,m,n,dnz,onz);CHKERRQ(ierr);
4238   len = 0;
4239   for (i=0;i<m;i++) {
4240     bnzi   = 0;
4241     /* add local non-zero cols of this proc's seqmat into lnk */
4242     arow   = owners[rank] + i;
4243     anzi   = ai[arow+1] - ai[arow];
4244     aj     = a->j + ai[arow];
4245     ierr = PetscLLAdd(anzi,aj,N,nlnk,lnk,lnkbt);CHKERRQ(ierr);
4246     bnzi += nlnk;
4247     /* add received col data into lnk */
4248     for (k=0; k<merge->nrecv; k++){ /* k-th received message */
4249       if (i == *nextrow[k]) { /* i-th row */
4250         anzi = *(nextai[k]+1) - *nextai[k];
4251         aj   = buf_rj[k] + *nextai[k];
4252         ierr = PetscLLAdd(anzi,aj,N,nlnk,lnk,lnkbt);CHKERRQ(ierr);
4253         bnzi += nlnk;
4254         nextrow[k]++; nextai[k]++;
4255       }
4256     }
4257     if (len < bnzi) len = bnzi;  /* =max(bnzi) */
4258 
4259     /* if free space is not available, make more free space */
4260     if (current_space->local_remaining<bnzi) {
4261       ierr = PetscFreeSpaceGet(bnzi+current_space->total_array_size,&current_space);CHKERRQ(ierr);
4262       nspacedouble++;
4263     }
4264     /* copy data into free space, then initialize lnk */
4265     ierr = PetscLLClean(N,N,bnzi,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
4266     ierr = MatPreallocateSet(i+owners[rank],bnzi,current_space->array,dnz,onz);CHKERRQ(ierr);
4267 
4268     current_space->array           += bnzi;
4269     current_space->local_used      += bnzi;
4270     current_space->local_remaining -= bnzi;
4271 
4272     bi[i+1] = bi[i] + bnzi;
4273   }
4274 
4275   ierr = PetscFree(buf_ri_k);CHKERRQ(ierr);
4276 
4277   ierr = PetscMalloc((bi[m]+1)*sizeof(PetscInt),&bj);CHKERRQ(ierr);
4278   ierr = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr);
4279   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
4280 
4281   /* create symbolic parallel matrix B_mpi */
4282   /*---------------------------------------*/
4283   ierr = MatCreate(comm,&B_mpi);CHKERRQ(ierr);
4284   if (n==PETSC_DECIDE) {
4285     ierr = MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,N);CHKERRQ(ierr);
4286   } else {
4287     ierr = MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
4288   }
4289   ierr = MatSetType(B_mpi,MATMPIAIJ);CHKERRQ(ierr);
4290   ierr = MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);CHKERRQ(ierr);
4291   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
4292 
4293   /* B_mpi is not ready for use - assembly will be done by MatMerge_SeqsToMPINumeric() */
4294   B_mpi->assembled     = PETSC_FALSE;
4295   B_mpi->ops->destroy  = MatDestroy_MPIAIJ_SeqsToMPI;
4296   merge->bi            = bi;
4297   merge->bj            = bj;
4298   merge->buf_ri        = buf_ri;
4299   merge->buf_rj        = buf_rj;
4300   merge->coi           = PETSC_NULL;
4301   merge->coj           = PETSC_NULL;
4302   merge->owners_co     = PETSC_NULL;
4303 
4304   /* attach the supporting struct to B_mpi for reuse */
4305   ierr = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr);
4306   ierr = PetscContainerSetPointer(container,merge);CHKERRQ(ierr);
4307   ierr = PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);CHKERRQ(ierr);
4308   *mpimat = B_mpi;
4309 
4310   ierr = PetscCommDestroy(&comm);CHKERRQ(ierr);
4311   ierr = PetscLogEventEnd(MAT_Seqstompisym,seqmat,0,0,0);CHKERRQ(ierr);
4312   PetscFunctionReturn(0);
4313 }
4314 
4315 #undef __FUNCT__
4316 #define __FUNCT__ "MatMerge_SeqsToMPI"
4317 PetscErrorCode PETSCMAT_DLLEXPORT MatMerge_SeqsToMPI(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,MatReuse scall,Mat *mpimat)
4318 {
4319   PetscErrorCode   ierr;
4320 
4321   PetscFunctionBegin;
4322   ierr = PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);CHKERRQ(ierr);
4323   if (scall == MAT_INITIAL_MATRIX){
4324     ierr = MatMerge_SeqsToMPISymbolic(comm,seqmat,m,n,mpimat);CHKERRQ(ierr);
4325   }
4326   ierr = MatMerge_SeqsToMPINumeric(seqmat,*mpimat);CHKERRQ(ierr);
4327   ierr = PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);CHKERRQ(ierr);
4328   PetscFunctionReturn(0);
4329 }
4330 
4331 #undef __FUNCT__
4332 #define __FUNCT__ "MatGetLocalMat"
4333 /*@
4334      MatGetLocalMat - Creates a SeqAIJ matrix by taking all its local rows
4335 
4336     Not Collective
4337 
4338    Input Parameters:
4339 +    A - the matrix
4340 .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4341 
4342    Output Parameter:
4343 .    A_loc - the local sequential matrix generated
4344 
4345     Level: developer
4346 
4347 @*/
4348 PetscErrorCode PETSCMAT_DLLEXPORT MatGetLocalMat(Mat A,MatReuse scall,Mat *A_loc)
4349 {
4350   PetscErrorCode  ierr;
4351   Mat_MPIAIJ      *mpimat=(Mat_MPIAIJ*)A->data;
4352   Mat_SeqAIJ      *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
4353   PetscInt        *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j,*cmap=mpimat->garray;
4354   MatScalar       *aa=a->a,*ba=b->a,*cam;
4355   PetscScalar     *ca;
4356   PetscInt        am=A->rmap->n,i,j,k,cstart=A->cmap->rstart;
4357   PetscInt        *ci,*cj,col,ncols_d,ncols_o,jo;
4358 
4359   PetscFunctionBegin;
4360   ierr = PetscLogEventBegin(MAT_Getlocalmat,A,0,0,0);CHKERRQ(ierr);
4361   if (scall == MAT_INITIAL_MATRIX){
4362     ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr);
4363     ci[0] = 0;
4364     for (i=0; i<am; i++){
4365       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
4366     }
4367     ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr);
4368     ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr);
4369     k = 0;
4370     for (i=0; i<am; i++) {
4371       ncols_o = bi[i+1] - bi[i];
4372       ncols_d = ai[i+1] - ai[i];
4373       /* off-diagonal portion of A */
4374       for (jo=0; jo<ncols_o; jo++) {
4375         col = cmap[*bj];
4376         if (col >= cstart) break;
4377         cj[k]   = col; bj++;
4378         ca[k++] = *ba++;
4379       }
4380       /* diagonal portion of A */
4381       for (j=0; j<ncols_d; j++) {
4382         cj[k]   = cstart + *aj++;
4383         ca[k++] = *aa++;
4384       }
4385       /* off-diagonal portion of A */
4386       for (j=jo; j<ncols_o; j++) {
4387         cj[k]   = cmap[*bj++];
4388         ca[k++] = *ba++;
4389       }
4390     }
4391     /* put together the new matrix */
4392     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ci,cj,ca,A_loc);CHKERRQ(ierr);
4393     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
4394     /* Since these are PETSc arrays, change flags to free them as necessary. */
4395     mat          = (Mat_SeqAIJ*)(*A_loc)->data;
4396     mat->free_a  = PETSC_TRUE;
4397     mat->free_ij = PETSC_TRUE;
4398     mat->nonew   = 0;
4399   } else if (scall == MAT_REUSE_MATRIX){
4400     mat=(Mat_SeqAIJ*)(*A_loc)->data;
4401     ci = mat->i; cj = mat->j; cam = mat->a;
4402     for (i=0; i<am; i++) {
4403       /* off-diagonal portion of A */
4404       ncols_o = bi[i+1] - bi[i];
4405       for (jo=0; jo<ncols_o; jo++) {
4406         col = cmap[*bj];
4407         if (col >= cstart) break;
4408         *cam++ = *ba++; bj++;
4409       }
4410       /* diagonal portion of A */
4411       ncols_d = ai[i+1] - ai[i];
4412       for (j=0; j<ncols_d; j++) *cam++ = *aa++;
4413       /* off-diagonal portion of A */
4414       for (j=jo; j<ncols_o; j++) {
4415         *cam++ = *ba++; bj++;
4416       }
4417     }
4418   } else {
4419     SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
4420   }
4421 
4422   ierr = PetscLogEventEnd(MAT_Getlocalmat,A,0,0,0);CHKERRQ(ierr);
4423   PetscFunctionReturn(0);
4424 }
4425 
4426 #undef __FUNCT__
4427 #define __FUNCT__ "MatGetLocalMatCondensed"
4428 /*@C
4429      MatGetLocalMatCondensed - Creates a SeqAIJ matrix by taking all its local rows and NON-ZERO columns
4430 
4431     Not Collective
4432 
4433    Input Parameters:
4434 +    A - the matrix
4435 .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4436 -    row, col - index sets of rows and columns to extract (or PETSC_NULL)
4437 
4438    Output Parameter:
4439 .    A_loc - the local sequential matrix generated
4440 
4441     Level: developer
4442 
4443 @*/
4444 PetscErrorCode PETSCMAT_DLLEXPORT MatGetLocalMatCondensed(Mat A,MatReuse scall,IS *row,IS *col,Mat *A_loc)
4445 {
4446   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data;
4447   PetscErrorCode    ierr;
4448   PetscInt          i,start,end,ncols,nzA,nzB,*cmap,imark,*idx;
4449   IS                isrowa,iscola;
4450   Mat               *aloc;
4451 
4452   PetscFunctionBegin;
4453   ierr = PetscLogEventBegin(MAT_Getlocalmatcondensed,A,0,0,0);CHKERRQ(ierr);
4454   if (!row){
4455     start = A->rmap->rstart; end = A->rmap->rend;
4456     ierr = ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&isrowa);CHKERRQ(ierr);
4457   } else {
4458     isrowa = *row;
4459   }
4460   if (!col){
4461     start = A->cmap->rstart;
4462     cmap  = a->garray;
4463     nzA   = a->A->cmap->n;
4464     nzB   = a->B->cmap->n;
4465     ierr  = PetscMalloc((nzA+nzB)*sizeof(PetscInt), &idx);CHKERRQ(ierr);
4466     ncols = 0;
4467     for (i=0; i<nzB; i++) {
4468       if (cmap[i] < start) idx[ncols++] = cmap[i];
4469       else break;
4470     }
4471     imark = i;
4472     for (i=0; i<nzA; i++) idx[ncols++] = start + i;
4473     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i];
4474     ierr = ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,&iscola);CHKERRQ(ierr);
4475     ierr = PetscFree(idx);CHKERRQ(ierr);
4476   } else {
4477     iscola = *col;
4478   }
4479   if (scall != MAT_INITIAL_MATRIX){
4480     ierr = PetscMalloc(sizeof(Mat),&aloc);CHKERRQ(ierr);
4481     aloc[0] = *A_loc;
4482   }
4483   ierr = MatGetSubMatrices(A,1,&isrowa,&iscola,scall,&aloc);CHKERRQ(ierr);
4484   *A_loc = aloc[0];
4485   ierr = PetscFree(aloc);CHKERRQ(ierr);
4486   if (!row){
4487     ierr = ISDestroy(isrowa);CHKERRQ(ierr);
4488   }
4489   if (!col){
4490     ierr = ISDestroy(iscola);CHKERRQ(ierr);
4491   }
4492   ierr = PetscLogEventEnd(MAT_Getlocalmatcondensed,A,0,0,0);CHKERRQ(ierr);
4493   PetscFunctionReturn(0);
4494 }
4495 
4496 #undef __FUNCT__
4497 #define __FUNCT__ "MatGetBrowsOfAcols"
4498 /*@C
4499     MatGetBrowsOfAcols - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A
4500 
4501     Collective on Mat
4502 
4503    Input Parameters:
4504 +    A,B - the matrices in mpiaij format
4505 .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4506 -    rowb, colb - index sets of rows and columns of B to extract (or PETSC_NULL)
4507 
4508    Output Parameter:
4509 +    rowb, colb - index sets of rows and columns of B to extract
4510 .    brstart - row index of B_seq from which next B->rmap->n rows are taken from B's local rows
4511 -    B_seq - the sequential matrix generated
4512 
4513     Level: developer
4514 
4515 @*/
4516 PetscErrorCode PETSCMAT_DLLEXPORT MatGetBrowsOfAcols(Mat A,Mat B,MatReuse scall,IS *rowb,IS *colb,PetscInt *brstart,Mat *B_seq)
4517 {
4518   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data;
4519   PetscErrorCode    ierr;
4520   PetscInt          *idx,i,start,ncols,nzA,nzB,*cmap,imark;
4521   IS                isrowb,iscolb;
4522   Mat               *bseq;
4523 
4524   PetscFunctionBegin;
4525   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend){
4526     SETERRQ4(PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
4527   }
4528   ierr = PetscLogEventBegin(MAT_GetBrowsOfAcols,A,B,0,0);CHKERRQ(ierr);
4529 
4530   if (scall == MAT_INITIAL_MATRIX){
4531     start = A->cmap->rstart;
4532     cmap  = a->garray;
4533     nzA   = a->A->cmap->n;
4534     nzB   = a->B->cmap->n;
4535     ierr  = PetscMalloc((nzA+nzB)*sizeof(PetscInt), &idx);CHKERRQ(ierr);
4536     ncols = 0;
4537     for (i=0; i<nzB; i++) {  /* row < local row index */
4538       if (cmap[i] < start) idx[ncols++] = cmap[i];
4539       else break;
4540     }
4541     imark = i;
4542     for (i=0; i<nzA; i++) idx[ncols++] = start + i;  /* local rows */
4543     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */
4544     ierr = ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,&isrowb);CHKERRQ(ierr);
4545     ierr = PetscFree(idx);CHKERRQ(ierr);
4546     *brstart = imark;
4547     ierr = ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&iscolb);CHKERRQ(ierr);
4548   } else {
4549     if (!rowb || !colb) SETERRQ(PETSC_ERR_SUP,"IS rowb and colb must be provided for MAT_REUSE_MATRIX");
4550     isrowb = *rowb; iscolb = *colb;
4551     ierr = PetscMalloc(sizeof(Mat),&bseq);CHKERRQ(ierr);
4552     bseq[0] = *B_seq;
4553   }
4554   ierr = MatGetSubMatrices(B,1,&isrowb,&iscolb,scall,&bseq);CHKERRQ(ierr);
4555   *B_seq = bseq[0];
4556   ierr = PetscFree(bseq);CHKERRQ(ierr);
4557   if (!rowb){
4558     ierr = ISDestroy(isrowb);CHKERRQ(ierr);
4559   } else {
4560     *rowb = isrowb;
4561   }
4562   if (!colb){
4563     ierr = ISDestroy(iscolb);CHKERRQ(ierr);
4564   } else {
4565     *colb = iscolb;
4566   }
4567   ierr = PetscLogEventEnd(MAT_GetBrowsOfAcols,A,B,0,0);CHKERRQ(ierr);
4568   PetscFunctionReturn(0);
4569 }
4570 
4571 #undef __FUNCT__
4572 #define __FUNCT__ "MatGetBrowsOfAoCols"
4573 /*@C
4574     MatGetBrowsOfAoCols - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns
4575     of the OFF-DIAGONAL portion of local A
4576 
4577     Collective on Mat
4578 
4579    Input Parameters:
4580 +    A,B - the matrices in mpiaij format
4581 .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4582 .    startsj - starting point in B's sending and receiving j-arrays, saved for MAT_REUSE (or PETSC_NULL)
4583 -    bufa_ptr - array for sending matrix values, saved for MAT_REUSE (or PETSC_NULL)
4584 
4585    Output Parameter:
4586 +    B_oth - the sequential matrix generated
4587 
4588     Level: developer
4589 
4590 @*/
4591 PetscErrorCode PETSCMAT_DLLEXPORT MatGetBrowsOfAoCols(Mat A,Mat B,MatReuse scall,PetscInt **startsj,MatScalar **bufa_ptr,Mat *B_oth)
4592 {
4593   VecScatter_MPI_General *gen_to,*gen_from;
4594   PetscErrorCode         ierr;
4595   Mat_MPIAIJ             *a=(Mat_MPIAIJ*)A->data;
4596   Mat_SeqAIJ             *b_oth;
4597   VecScatter             ctx=a->Mvctx;
4598   MPI_Comm               comm=((PetscObject)ctx)->comm;
4599   PetscMPIInt            *rprocs,*sprocs,tag=((PetscObject)ctx)->tag,rank;
4600   PetscInt               *rowlen,*bufj,*bufJ,ncols,aBn=a->B->cmap->n,row,*b_othi,*b_othj;
4601   PetscScalar            *rvalues,*svalues;
4602   MatScalar              *b_otha,*bufa,*bufA;
4603   PetscInt               i,j,k,l,ll,nrecvs,nsends,nrows,*srow,*rstarts,*rstartsj = 0,*sstarts,*sstartsj,len;
4604   MPI_Request            *rwaits = PETSC_NULL,*swaits = PETSC_NULL;
4605   MPI_Status             *sstatus,rstatus;
4606   PetscMPIInt            jj;
4607   PetscInt               *cols,sbs,rbs;
4608   PetscScalar            *vals;
4609 
4610   PetscFunctionBegin;
4611   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend){
4612     SETERRQ4(PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%d, %d) != (%d,%d)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
4613   }
4614   ierr = PetscLogEventBegin(MAT_GetBrowsOfAocols,A,B,0,0);CHKERRQ(ierr);
4615   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
4616 
4617   gen_to   = (VecScatter_MPI_General*)ctx->todata;
4618   gen_from = (VecScatter_MPI_General*)ctx->fromdata;
4619   rvalues  = gen_from->values; /* holds the length of receiving row */
4620   svalues  = gen_to->values;   /* holds the length of sending row */
4621   nrecvs   = gen_from->n;
4622   nsends   = gen_to->n;
4623 
4624   ierr = PetscMalloc2(nrecvs,MPI_Request,&rwaits,nsends,MPI_Request,&swaits);CHKERRQ(ierr);
4625   srow     = gen_to->indices;   /* local row index to be sent */
4626   sstarts  = gen_to->starts;
4627   sprocs   = gen_to->procs;
4628   sstatus  = gen_to->sstatus;
4629   sbs      = gen_to->bs;
4630   rstarts  = gen_from->starts;
4631   rprocs   = gen_from->procs;
4632   rbs      = gen_from->bs;
4633 
4634   if (!startsj || !bufa_ptr) scall = MAT_INITIAL_MATRIX;
4635   if (scall == MAT_INITIAL_MATRIX){
4636     /* i-array */
4637     /*---------*/
4638     /*  post receives */
4639     for (i=0; i<nrecvs; i++){
4640       rowlen = (PetscInt*)rvalues + rstarts[i]*rbs;
4641       nrows = (rstarts[i+1]-rstarts[i])*rbs; /* num of indices to be received */
4642       ierr = MPI_Irecv(rowlen,nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr);
4643     }
4644 
4645     /* pack the outgoing message */
4646     ierr = PetscMalloc((nsends+nrecvs+3)*sizeof(PetscInt),&sstartsj);CHKERRQ(ierr);
4647     rstartsj = sstartsj + nsends +1;
4648     sstartsj[0] = 0;  rstartsj[0] = 0;
4649     len = 0; /* total length of j or a array to be sent */
4650     k = 0;
4651     for (i=0; i<nsends; i++){
4652       rowlen = (PetscInt*)svalues + sstarts[i]*sbs;
4653       nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
4654       for (j=0; j<nrows; j++) {
4655         row = srow[k] + B->rmap->range[rank]; /* global row idx */
4656         for (l=0; l<sbs; l++){
4657           ierr = MatGetRow_MPIAIJ(B,row+l,&ncols,PETSC_NULL,PETSC_NULL);CHKERRQ(ierr); /* rowlength */
4658           rowlen[j*sbs+l] = ncols;
4659           len += ncols;
4660           ierr = MatRestoreRow_MPIAIJ(B,row+l,&ncols,PETSC_NULL,PETSC_NULL);CHKERRQ(ierr);
4661         }
4662         k++;
4663       }
4664       ierr = MPI_Isend(rowlen,nrows*sbs,MPIU_INT,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr);
4665       sstartsj[i+1] = len;  /* starting point of (i+1)-th outgoing msg in bufj and bufa */
4666     }
4667     /* recvs and sends of i-array are completed */
4668     i = nrecvs;
4669     while (i--) {
4670       ierr = MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);CHKERRQ(ierr);
4671     }
4672     if (nsends) {ierr = MPI_Waitall(nsends,swaits,sstatus);CHKERRQ(ierr);}
4673 
4674     /* allocate buffers for sending j and a arrays */
4675     ierr = PetscMalloc((len+1)*sizeof(PetscInt),&bufj);CHKERRQ(ierr);
4676     ierr = PetscMalloc((len+1)*sizeof(PetscScalar),&bufa);CHKERRQ(ierr);
4677 
4678     /* create i-array of B_oth */
4679     ierr = PetscMalloc((aBn+2)*sizeof(PetscInt),&b_othi);CHKERRQ(ierr);
4680     b_othi[0] = 0;
4681     len = 0; /* total length of j or a array to be received */
4682     k = 0;
4683     for (i=0; i<nrecvs; i++){
4684       rowlen = (PetscInt*)rvalues + rstarts[i]*rbs;
4685       nrows = rbs*(rstarts[i+1]-rstarts[i]); /* num of rows to be recieved */
4686       for (j=0; j<nrows; j++) {
4687         b_othi[k+1] = b_othi[k] + rowlen[j];
4688         len += rowlen[j]; k++;
4689       }
4690       rstartsj[i+1] = len; /* starting point of (i+1)-th incoming msg in bufj and bufa */
4691     }
4692 
4693     /* allocate space for j and a arrrays of B_oth */
4694     ierr = PetscMalloc((b_othi[aBn]+1)*sizeof(PetscInt),&b_othj);CHKERRQ(ierr);
4695     ierr = PetscMalloc((b_othi[aBn]+1)*sizeof(MatScalar),&b_otha);CHKERRQ(ierr);
4696 
4697     /* j-array */
4698     /*---------*/
4699     /*  post receives of j-array */
4700     for (i=0; i<nrecvs; i++){
4701       nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
4702       ierr = MPI_Irecv(b_othj+rstartsj[i],nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr);
4703     }
4704 
4705     /* pack the outgoing message j-array */
4706     k = 0;
4707     for (i=0; i<nsends; i++){
4708       nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
4709       bufJ = bufj+sstartsj[i];
4710       for (j=0; j<nrows; j++) {
4711         row  = srow[k++] + B->rmap->range[rank]; /* global row idx */
4712         for (ll=0; ll<sbs; ll++){
4713           ierr = MatGetRow_MPIAIJ(B,row+ll,&ncols,&cols,PETSC_NULL);CHKERRQ(ierr);
4714           for (l=0; l<ncols; l++){
4715             *bufJ++ = cols[l];
4716           }
4717           ierr = MatRestoreRow_MPIAIJ(B,row+ll,&ncols,&cols,PETSC_NULL);CHKERRQ(ierr);
4718         }
4719       }
4720       ierr = MPI_Isend(bufj+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_INT,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr);
4721     }
4722 
4723     /* recvs and sends of j-array are completed */
4724     i = nrecvs;
4725     while (i--) {
4726       ierr = MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);CHKERRQ(ierr);
4727     }
4728     if (nsends) {ierr = MPI_Waitall(nsends,swaits,sstatus);CHKERRQ(ierr);}
4729   } else if (scall == MAT_REUSE_MATRIX){
4730     sstartsj = *startsj;
4731     rstartsj = sstartsj + nsends +1;
4732     bufa     = *bufa_ptr;
4733     b_oth    = (Mat_SeqAIJ*)(*B_oth)->data;
4734     b_otha   = b_oth->a;
4735   } else {
4736     SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");
4737   }
4738 
4739   /* a-array */
4740   /*---------*/
4741   /*  post receives of a-array */
4742   for (i=0; i<nrecvs; i++){
4743     nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
4744     ierr = MPI_Irecv(b_otha+rstartsj[i],nrows,MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr);
4745   }
4746 
4747   /* pack the outgoing message a-array */
4748   k = 0;
4749   for (i=0; i<nsends; i++){
4750     nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
4751     bufA = bufa+sstartsj[i];
4752     for (j=0; j<nrows; j++) {
4753       row  = srow[k++] + B->rmap->range[rank]; /* global row idx */
4754       for (ll=0; ll<sbs; ll++){
4755         ierr = MatGetRow_MPIAIJ(B,row+ll,&ncols,PETSC_NULL,&vals);CHKERRQ(ierr);
4756         for (l=0; l<ncols; l++){
4757           *bufA++ = vals[l];
4758         }
4759         ierr = MatRestoreRow_MPIAIJ(B,row+ll,&ncols,PETSC_NULL,&vals);CHKERRQ(ierr);
4760       }
4761     }
4762     ierr = MPI_Isend(bufa+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr);
4763   }
4764   /* recvs and sends of a-array are completed */
4765   i = nrecvs;
4766   while (i--) {
4767     ierr = MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);CHKERRQ(ierr);
4768   }
4769   if (nsends) {ierr = MPI_Waitall(nsends,swaits,sstatus);CHKERRQ(ierr);}
4770   ierr = PetscFree2(rwaits,swaits);CHKERRQ(ierr);
4771 
4772   if (scall == MAT_INITIAL_MATRIX){
4773     /* put together the new matrix */
4774     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,aBn,B->cmap->N,b_othi,b_othj,b_otha,B_oth);CHKERRQ(ierr);
4775 
4776     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
4777     /* Since these are PETSc arrays, change flags to free them as necessary. */
4778     b_oth          = (Mat_SeqAIJ *)(*B_oth)->data;
4779     b_oth->free_a  = PETSC_TRUE;
4780     b_oth->free_ij = PETSC_TRUE;
4781     b_oth->nonew   = 0;
4782 
4783     ierr = PetscFree(bufj);CHKERRQ(ierr);
4784     if (!startsj || !bufa_ptr){
4785       ierr = PetscFree(sstartsj);CHKERRQ(ierr);
4786       ierr = PetscFree(bufa_ptr);CHKERRQ(ierr);
4787     } else {
4788       *startsj  = sstartsj;
4789       *bufa_ptr = bufa;
4790     }
4791   }
4792   ierr = PetscLogEventEnd(MAT_GetBrowsOfAocols,A,B,0,0);CHKERRQ(ierr);
4793   PetscFunctionReturn(0);
4794 }
4795 
4796 #undef __FUNCT__
4797 #define __FUNCT__ "MatGetCommunicationStructs"
4798 /*@C
4799   MatGetCommunicationStructs - Provides access to the communication structures used in matrix-vector multiplication.
4800 
4801   Not Collective
4802 
4803   Input Parameters:
4804 . A - The matrix in mpiaij format
4805 
4806   Output Parameter:
4807 + lvec - The local vector holding off-process values from the argument to a matrix-vector product
4808 . colmap - A map from global column index to local index into lvec
4809 - multScatter - A scatter from the argument of a matrix-vector product to lvec
4810 
4811   Level: developer
4812 
4813 @*/
4814 #if defined (PETSC_USE_CTABLE)
4815 PetscErrorCode PETSCMAT_DLLEXPORT MatGetCommunicationStructs(Mat A, Vec *lvec, PetscTable *colmap, VecScatter *multScatter)
4816 #else
4817 PetscErrorCode PETSCMAT_DLLEXPORT MatGetCommunicationStructs(Mat A, Vec *lvec, PetscInt *colmap[], VecScatter *multScatter)
4818 #endif
4819 {
4820   Mat_MPIAIJ *a;
4821 
4822   PetscFunctionBegin;
4823   PetscValidHeaderSpecific(A, MAT_COOKIE, 1);
4824   PetscValidPointer(lvec, 2)
4825   PetscValidPointer(colmap, 3)
4826   PetscValidPointer(multScatter, 4)
4827   a = (Mat_MPIAIJ *) A->data;
4828   if (lvec) *lvec = a->lvec;
4829   if (colmap) *colmap = a->colmap;
4830   if (multScatter) *multScatter = a->Mvctx;
4831   PetscFunctionReturn(0);
4832 }
4833 
4834 EXTERN_C_BEGIN
4835 extern PetscErrorCode PETSCMAT_DLLEXPORT MatConvert_MPIAIJ_MPICRL(Mat,const MatType,MatReuse,Mat*);
4836 extern PetscErrorCode PETSCMAT_DLLEXPORT MatConvert_MPIAIJ_MPICSRPERM(Mat,const MatType,MatReuse,Mat*);
4837 EXTERN_C_END
4838 
4839 #include "../src/mat/impls/dense/mpi/mpidense.h"
4840 
4841 #undef __FUNCT__
4842 #define __FUNCT__ "MatMatMultNumeric_MPIDense_MPIAIJ"
4843 /*
4844     Computes (B'*A')' since computing B*A directly is untenable
4845 
4846                n                       p                          p
4847         (              )       (              )         (                  )
4848       m (      A       )  *  n (       B      )   =   m (         C        )
4849         (              )       (              )         (                  )
4850 
4851 */
4852 PetscErrorCode MatMatMultNumeric_MPIDense_MPIAIJ(Mat A,Mat B,Mat C)
4853 {
4854   PetscErrorCode     ierr;
4855   Mat                At,Bt,Ct;
4856 
4857   PetscFunctionBegin;
4858   ierr = MatTranspose(A,MAT_INITIAL_MATRIX,&At);CHKERRQ(ierr);
4859   ierr = MatTranspose(B,MAT_INITIAL_MATRIX,&Bt);CHKERRQ(ierr);
4860   ierr = MatMatMult(Bt,At,MAT_INITIAL_MATRIX,1.0,&Ct);CHKERRQ(ierr);
4861   ierr = MatDestroy(At);CHKERRQ(ierr);
4862   ierr = MatDestroy(Bt);CHKERRQ(ierr);
4863   ierr = MatTranspose(Ct,MAT_REUSE_MATRIX,&C);CHKERRQ(ierr);
4864   ierr = MatDestroy(Ct);CHKERRQ(ierr);
4865   PetscFunctionReturn(0);
4866 }
4867 
4868 #undef __FUNCT__
4869 #define __FUNCT__ "MatMatMultSymbolic_MPIDense_MPIAIJ"
4870 PetscErrorCode MatMatMultSymbolic_MPIDense_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C)
4871 {
4872   PetscErrorCode ierr;
4873   PetscInt       m=A->rmap->n,n=B->cmap->n;
4874   Mat            Cmat;
4875 
4876   PetscFunctionBegin;
4877   if (A->cmap->n != B->rmap->n) SETERRQ2(PETSC_ERR_ARG_SIZ,"A->cmap->n %d != B->rmap->n %d\n",A->cmap->n,B->rmap->n);
4878   ierr = MatCreate(((PetscObject)A)->comm,&Cmat);CHKERRQ(ierr);
4879   ierr = MatSetSizes(Cmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
4880   ierr = MatSetType(Cmat,MATMPIDENSE);CHKERRQ(ierr);
4881   ierr = MatMPIDenseSetPreallocation(Cmat,PETSC_NULL);CHKERRQ(ierr);
4882   ierr = MatAssemblyBegin(Cmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
4883   ierr = MatAssemblyEnd(Cmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
4884   *C   = Cmat;
4885   PetscFunctionReturn(0);
4886 }
4887 
4888 /* ----------------------------------------------------------------*/
4889 #undef __FUNCT__
4890 #define __FUNCT__ "MatMatMult_MPIDense_MPIAIJ"
4891 PetscErrorCode MatMatMult_MPIDense_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
4892 {
4893   PetscErrorCode ierr;
4894 
4895   PetscFunctionBegin;
4896   if (scall == MAT_INITIAL_MATRIX){
4897     ierr = MatMatMultSymbolic_MPIDense_MPIAIJ(A,B,fill,C);CHKERRQ(ierr);
4898   }
4899   ierr = MatMatMultNumeric_MPIDense_MPIAIJ(A,B,*C);CHKERRQ(ierr);
4900   PetscFunctionReturn(0);
4901 }
4902 
4903 EXTERN_C_BEGIN
4904 #if defined(PETSC_HAVE_MUMPS)
4905 extern PetscErrorCode MatGetFactor_mpiaij_mumps(Mat,MatFactorType,Mat*);
4906 #endif
4907 #if defined(PETSC_HAVE_PASTIX)
4908 extern PetscErrorCode MatGetFactor_mpiaij_pastix(Mat,MatFactorType,Mat*);
4909 #endif
4910 #if defined(PETSC_HAVE_SUPERLU_DIST)
4911 extern PetscErrorCode MatGetFactor_mpiaij_superlu_dist(Mat,MatFactorType,Mat*);
4912 #endif
4913 #if defined(PETSC_HAVE_SPOOLES)
4914 extern PetscErrorCode MatGetFactor_mpiaij_spooles(Mat,MatFactorType,Mat*);
4915 #endif
4916 EXTERN_C_END
4917 
4918 /*MC
4919    MATMPIAIJ - MATMPIAIJ = "mpiaij" - A matrix type to be used for parallel sparse matrices.
4920 
4921    Options Database Keys:
4922 . -mat_type mpiaij - sets the matrix type to "mpiaij" during a call to MatSetFromOptions()
4923 
4924   Level: beginner
4925 
4926 .seealso: MatCreateMPIAIJ()
4927 M*/
4928 
4929 EXTERN_C_BEGIN
4930 #undef __FUNCT__
4931 #define __FUNCT__ "MatCreate_MPIAIJ"
4932 PetscErrorCode PETSCMAT_DLLEXPORT MatCreate_MPIAIJ(Mat B)
4933 {
4934   Mat_MPIAIJ     *b;
4935   PetscErrorCode ierr;
4936   PetscMPIInt    size;
4937 
4938   PetscFunctionBegin;
4939   ierr = MPI_Comm_size(((PetscObject)B)->comm,&size);CHKERRQ(ierr);
4940 
4941   ierr            = PetscNewLog(B,Mat_MPIAIJ,&b);CHKERRQ(ierr);
4942   B->data         = (void*)b;
4943   ierr            = PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));CHKERRQ(ierr);
4944   B->rmap->bs      = 1;
4945   B->assembled    = PETSC_FALSE;
4946   B->mapping      = 0;
4947 
4948   B->insertmode      = NOT_SET_VALUES;
4949   b->size            = size;
4950   ierr = MPI_Comm_rank(((PetscObject)B)->comm,&b->rank);CHKERRQ(ierr);
4951 
4952   /* build cache for off array entries formed */
4953   ierr = MatStashCreate_Private(((PetscObject)B)->comm,1,&B->stash);CHKERRQ(ierr);
4954   b->donotstash  = PETSC_FALSE;
4955   b->colmap      = 0;
4956   b->garray      = 0;
4957   b->roworiented = PETSC_TRUE;
4958 
4959   /* stuff used for matrix vector multiply */
4960   b->lvec      = PETSC_NULL;
4961   b->Mvctx     = PETSC_NULL;
4962 
4963   /* stuff for MatGetRow() */
4964   b->rowindices   = 0;
4965   b->rowvalues    = 0;
4966   b->getrowactive = PETSC_FALSE;
4967 
4968 #if defined(PETSC_HAVE_SPOOLES)
4969   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_mpiaij_spooles_C",
4970                                      "MatGetFactor_mpiaij_spooles",
4971                                      MatGetFactor_mpiaij_spooles);CHKERRQ(ierr);
4972 #endif
4973 #if defined(PETSC_HAVE_MUMPS)
4974   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_mpiaij_mumps_C",
4975                                      "MatGetFactor_mpiaij_mumps",
4976                                      MatGetFactor_mpiaij_mumps);CHKERRQ(ierr);
4977 #endif
4978 #if defined(PETSC_HAVE_PASTIX)
4979   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_mpiaij_pastix_C",
4980 					   "MatGetFactor_mpiaij_pastix",
4981 					   MatGetFactor_mpiaij_pastix);CHKERRQ(ierr);
4982 #endif
4983 #if defined(PETSC_HAVE_SUPERLU_DIST)
4984   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_mpiaij_superlu_dist_C",
4985                                      "MatGetFactor_mpiaij_superlu_dist",
4986                                      MatGetFactor_mpiaij_superlu_dist);CHKERRQ(ierr);
4987 #endif
4988   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C",
4989                                      "MatStoreValues_MPIAIJ",
4990                                      MatStoreValues_MPIAIJ);CHKERRQ(ierr);
4991   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C",
4992                                      "MatRetrieveValues_MPIAIJ",
4993                                      MatRetrieveValues_MPIAIJ);CHKERRQ(ierr);
4994   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
4995 				     "MatGetDiagonalBlock_MPIAIJ",
4996                                      MatGetDiagonalBlock_MPIAIJ);CHKERRQ(ierr);
4997   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatIsTranspose_C",
4998 				     "MatIsTranspose_MPIAIJ",
4999 				     MatIsTranspose_MPIAIJ);CHKERRQ(ierr);
5000   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPIAIJSetPreallocation_C",
5001 				     "MatMPIAIJSetPreallocation_MPIAIJ",
5002 				     MatMPIAIJSetPreallocation_MPIAIJ);CHKERRQ(ierr);
5003   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPIAIJSetPreallocationCSR_C",
5004 				     "MatMPIAIJSetPreallocationCSR_MPIAIJ",
5005 				     MatMPIAIJSetPreallocationCSR_MPIAIJ);CHKERRQ(ierr);
5006   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatDiagonalScaleLocal_C",
5007 				     "MatDiagonalScaleLocal_MPIAIJ",
5008 				     MatDiagonalScaleLocal_MPIAIJ);CHKERRQ(ierr);
5009   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_mpiaij_mpicsrperm_C",
5010                                      "MatConvert_MPIAIJ_MPICSRPERM",
5011                                       MatConvert_MPIAIJ_MPICSRPERM);CHKERRQ(ierr);
5012   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_mpiaij_mpicrl_C",
5013                                      "MatConvert_MPIAIJ_MPICRL",
5014                                       MatConvert_MPIAIJ_MPICRL);CHKERRQ(ierr);
5015   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMatMult_mpidense_mpiaij_C",
5016                                      "MatMatMult_MPIDense_MPIAIJ",
5017                                       MatMatMult_MPIDense_MPIAIJ);CHKERRQ(ierr);
5018   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMatMultSymbolic_mpidense_mpiaij_C",
5019                                      "MatMatMultSymbolic_MPIDense_MPIAIJ",
5020                                       MatMatMultSymbolic_MPIDense_MPIAIJ);CHKERRQ(ierr);
5021   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMatMultNumeric_mpidense_mpiaij_C",
5022                                      "MatMatMultNumeric_MPIDense_MPIAIJ",
5023                                       MatMatMultNumeric_MPIDense_MPIAIJ);CHKERRQ(ierr);
5024   ierr = PetscObjectChangeTypeName((PetscObject)B,MATMPIAIJ);CHKERRQ(ierr);
5025   PetscFunctionReturn(0);
5026 }
5027 EXTERN_C_END
5028 
5029 #undef __FUNCT__
5030 #define __FUNCT__ "MatCreateMPIAIJWithSplitArrays"
5031 /*@
5032      MatCreateMPIAIJWithSplitArrays - creates a MPI AIJ matrix using arrays that contain the "diagonal"
5033          and "off-diagonal" part of the matrix in CSR format.
5034 
5035    Collective on MPI_Comm
5036 
5037    Input Parameters:
5038 +  comm - MPI communicator
5039 .  m - number of local rows (Cannot be PETSC_DECIDE)
5040 .  n - This value should be the same as the local size used in creating the
5041        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
5042        calculated if N is given) For square matrices n is almost always m.
5043 .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
5044 .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
5045 .   i - row indices for "diagonal" portion of matrix
5046 .   j - column indices
5047 .   a - matrix values
5048 .   oi - row indices for "off-diagonal" portion of matrix
5049 .   oj - column indices
5050 -   oa - matrix values
5051 
5052    Output Parameter:
5053 .   mat - the matrix
5054 
5055    Level: advanced
5056 
5057    Notes:
5058        The i, j, and a arrays ARE NOT copied by this routine into the internal format used by PETSc.
5059 
5060        The i and j indices are 0 based
5061 
5062        See MatCreateMPIAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix
5063 
5064 
5065 .keywords: matrix, aij, compressed row, sparse, parallel
5066 
5067 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
5068           MPIAIJ, MatCreateMPIAIJ(), MatCreateMPIAIJWithArrays()
5069 @*/
5070 PetscErrorCode PETSCMAT_DLLEXPORT MatCreateMPIAIJWithSplitArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt i[],PetscInt j[],PetscScalar a[],
5071 								PetscInt oi[], PetscInt oj[],PetscScalar oa[],Mat *mat)
5072 {
5073   PetscErrorCode ierr;
5074   Mat_MPIAIJ     *maij;
5075 
5076  PetscFunctionBegin;
5077   if (m < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
5078   if (i[0]) {
5079     SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
5080   }
5081   if (oi[0]) {
5082     SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"oi (row indices) must start with 0");
5083   }
5084   ierr = MatCreate(comm,mat);CHKERRQ(ierr);
5085   ierr = MatSetSizes(*mat,m,n,M,N);CHKERRQ(ierr);
5086   ierr = MatSetType(*mat,MATMPIAIJ);CHKERRQ(ierr);
5087   maij = (Mat_MPIAIJ*) (*mat)->data;
5088   maij->donotstash     = PETSC_TRUE;
5089   (*mat)->preallocated = PETSC_TRUE;
5090 
5091   ierr = PetscMapSetBlockSize((*mat)->rmap,1);CHKERRQ(ierr);
5092   ierr = PetscMapSetBlockSize((*mat)->cmap,1);CHKERRQ(ierr);
5093   ierr = PetscMapSetUp((*mat)->rmap);CHKERRQ(ierr);
5094   ierr = PetscMapSetUp((*mat)->cmap);CHKERRQ(ierr);
5095 
5096   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,i,j,a,&maij->A);CHKERRQ(ierr);
5097   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,(*mat)->cmap->N,oi,oj,oa,&maij->B);CHKERRQ(ierr);
5098 
5099   ierr = MatAssemblyBegin(maij->A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5100   ierr = MatAssemblyEnd(maij->A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5101   ierr = MatAssemblyBegin(maij->B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5102   ierr = MatAssemblyEnd(maij->B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5103 
5104   ierr = MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5105   ierr = MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5106   PetscFunctionReturn(0);
5107 }
5108 
5109 /*
5110     Special version for direct calls from Fortran
5111 */
5112 #if defined(PETSC_HAVE_FORTRAN_CAPS)
5113 #define matsetvaluesmpiaij_ MATSETVALUESMPIAIJ
5114 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
5115 #define matsetvaluesmpiaij_ matsetvaluesmpiaij
5116 #endif
5117 
5118 /* Change these macros so can be used in void function */
5119 #undef CHKERRQ
5120 #define CHKERRQ(ierr) CHKERRABORT(((PetscObject)mat)->comm,ierr)
5121 #undef SETERRQ2
5122 #define SETERRQ2(ierr,b,c,d) CHKERRABORT(((PetscObject)mat)->comm,ierr)
5123 #undef SETERRQ
5124 #define SETERRQ(ierr,b) CHKERRABORT(((PetscObject)mat)->comm,ierr)
5125 
5126 EXTERN_C_BEGIN
5127 #undef __FUNCT__
5128 #define __FUNCT__ "matsetvaluesmpiaij_"
5129 void PETSC_STDCALL matsetvaluesmpiaij_(Mat *mmat,PetscInt *mm,const PetscInt im[],PetscInt *mn,const PetscInt in[],const PetscScalar v[],InsertMode *maddv,PetscErrorCode *_ierr)
5130 {
5131   Mat             mat = *mmat;
5132   PetscInt        m = *mm, n = *mn;
5133   InsertMode      addv = *maddv;
5134   Mat_MPIAIJ      *aij = (Mat_MPIAIJ*)mat->data;
5135   PetscScalar     value;
5136   PetscErrorCode  ierr;
5137 
5138   MatPreallocated(mat);
5139   if (mat->insertmode == NOT_SET_VALUES) {
5140     mat->insertmode = addv;
5141   }
5142 #if defined(PETSC_USE_DEBUG)
5143   else if (mat->insertmode != addv) {
5144     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
5145   }
5146 #endif
5147   {
5148   PetscInt        i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
5149   PetscInt        cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
5150   PetscTruth      roworiented = aij->roworiented;
5151 
5152   /* Some Variables required in the macro */
5153   Mat             A = aij->A;
5154   Mat_SeqAIJ      *a = (Mat_SeqAIJ*)A->data;
5155   PetscInt        *aimax = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
5156   MatScalar       *aa = a->a;
5157   PetscTruth      ignorezeroentries = (((a->ignorezeroentries)&&(addv==ADD_VALUES))?PETSC_TRUE:PETSC_FALSE);
5158   Mat             B = aij->B;
5159   Mat_SeqAIJ      *b = (Mat_SeqAIJ*)B->data;
5160   PetscInt        *bimax = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
5161   MatScalar       *ba = b->a;
5162 
5163   PetscInt        *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
5164   PetscInt        nonew = a->nonew;
5165   MatScalar       *ap1,*ap2;
5166 
5167   PetscFunctionBegin;
5168   for (i=0; i<m; i++) {
5169     if (im[i] < 0) continue;
5170 #if defined(PETSC_USE_DEBUG)
5171     if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
5172 #endif
5173     if (im[i] >= rstart && im[i] < rend) {
5174       row      = im[i] - rstart;
5175       lastcol1 = -1;
5176       rp1      = aj + ai[row];
5177       ap1      = aa + ai[row];
5178       rmax1    = aimax[row];
5179       nrow1    = ailen[row];
5180       low1     = 0;
5181       high1    = nrow1;
5182       lastcol2 = -1;
5183       rp2      = bj + bi[row];
5184       ap2      = ba + bi[row];
5185       rmax2    = bimax[row];
5186       nrow2    = bilen[row];
5187       low2     = 0;
5188       high2    = nrow2;
5189 
5190       for (j=0; j<n; j++) {
5191         if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
5192         if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES)) continue;
5193         if (in[j] >= cstart && in[j] < cend){
5194           col = in[j] - cstart;
5195           MatSetValues_SeqAIJ_A_Private(row,col,value,addv);
5196         } else if (in[j] < 0) continue;
5197 #if defined(PETSC_USE_DEBUG)
5198         else if (in[j] >= mat->cmap->N) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);}
5199 #endif
5200         else {
5201           if (mat->was_assembled) {
5202             if (!aij->colmap) {
5203               ierr = CreateColmap_MPIAIJ_Private(mat);CHKERRQ(ierr);
5204             }
5205 #if defined (PETSC_USE_CTABLE)
5206             ierr = PetscTableFind(aij->colmap,in[j]+1,&col);CHKERRQ(ierr);
5207 	    col--;
5208 #else
5209             col = aij->colmap[in[j]] - 1;
5210 #endif
5211             if (col < 0 && !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
5212               ierr = DisAssemble_MPIAIJ(mat);CHKERRQ(ierr);
5213               col =  in[j];
5214               /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
5215               B = aij->B;
5216               b = (Mat_SeqAIJ*)B->data;
5217               bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j;
5218               rp2      = bj + bi[row];
5219               ap2      = ba + bi[row];
5220               rmax2    = bimax[row];
5221               nrow2    = bilen[row];
5222               low2     = 0;
5223               high2    = nrow2;
5224               bm       = aij->B->rmap->n;
5225               ba = b->a;
5226             }
5227           } else col = in[j];
5228           MatSetValues_SeqAIJ_B_Private(row,col,value,addv);
5229         }
5230       }
5231     } else {
5232       if (!aij->donotstash) {
5233         if (roworiented) {
5234           if (ignorezeroentries && v[i*n] == 0.0) continue;
5235           ierr = MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n);CHKERRQ(ierr);
5236         } else {
5237           if (ignorezeroentries && v[i] == 0.0) continue;
5238           ierr = MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m);CHKERRQ(ierr);
5239         }
5240       }
5241     }
5242   }}
5243   PetscFunctionReturnVoid();
5244 }
5245 EXTERN_C_END
5246 
5247