xref: /petsc/src/mat/impls/dense/mpi/mpidense.c (revision d32f9abdbc052d6e1fd06679b17a55415c3aae30)
1 #define PETSCMAT_DLL
2 
3 /*
4    Basic functions for basic parallel dense matrices.
5 */
6 
7 
8 #include "src/mat/impls/dense/mpi/mpidense.h"    /*I   "petscmat.h"  I*/
9 #if defined(PETSC_HAVE_PLAPACK)
10 static PetscMPIInt Plapack_nprows,Plapack_npcols,Plapack_ierror,Plapack_nb_alg;
11 static MPI_Comm Plapack_comm_2d;
12 #endif
13 
14 #undef __FUNCT__
15 #define __FUNCT__ "MatDenseGetLocalMatrix"
16 /*@
17 
18       MatDenseGetLocalMatrix - For a MATMPIDENSE or MATSEQDENSE matrix returns the sequential
19               matrix that represents the operator. For sequential matrices it returns itself.
20 
21     Input Parameter:
22 .      A - the Seq or MPI dense matrix
23 
24     Output Parameter:
25 .      B - the inner matrix
26 
27     Level: intermediate
28 
29 @*/
30 PetscErrorCode MatDenseGetLocalMatrix(Mat A,Mat *B)
31 {
32   Mat_MPIDense   *mat = (Mat_MPIDense*)A->data;
33   PetscErrorCode ierr;
34   PetscTruth     flg;
35 
36   PetscFunctionBegin;
37   ierr = PetscTypeCompare((PetscObject)A,MATMPIDENSE,&flg);CHKERRQ(ierr);
38   if (flg) {
39     *B = mat->A;
40   } else {
41     *B = A;
42   }
43   PetscFunctionReturn(0);
44 }
45 
46 #undef __FUNCT__
47 #define __FUNCT__ "MatGetRow_MPIDense"
48 PetscErrorCode MatGetRow_MPIDense(Mat A,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
49 {
50   Mat_MPIDense   *mat = (Mat_MPIDense*)A->data;
51   PetscErrorCode ierr;
52   PetscInt       lrow,rstart = A->rmap.rstart,rend = A->rmap.rend;
53 
54   PetscFunctionBegin;
55   if (row < rstart || row >= rend) SETERRQ(PETSC_ERR_SUP,"only local rows")
56   lrow = row - rstart;
57   ierr = MatGetRow(mat->A,lrow,nz,(const PetscInt **)idx,(const PetscScalar **)v);CHKERRQ(ierr);
58   PetscFunctionReturn(0);
59 }
60 
61 #undef __FUNCT__
62 #define __FUNCT__ "MatRestoreRow_MPIDense"
63 PetscErrorCode MatRestoreRow_MPIDense(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
64 {
65   PetscErrorCode ierr;
66 
67   PetscFunctionBegin;
68   if (idx) {ierr = PetscFree(*idx);CHKERRQ(ierr);}
69   if (v) {ierr = PetscFree(*v);CHKERRQ(ierr);}
70   PetscFunctionReturn(0);
71 }
72 
73 EXTERN_C_BEGIN
74 #undef __FUNCT__
75 #define __FUNCT__ "MatGetDiagonalBlock_MPIDense"
76 PetscErrorCode PETSCMAT_DLLEXPORT MatGetDiagonalBlock_MPIDense(Mat A,PetscTruth *iscopy,MatReuse reuse,Mat *B)
77 {
78   Mat_MPIDense   *mdn = (Mat_MPIDense*)A->data;
79   PetscErrorCode ierr;
80   PetscInt       m = A->rmap.n,rstart = A->rmap.rstart;
81   PetscScalar    *array;
82   MPI_Comm       comm;
83 
84   PetscFunctionBegin;
85   if (A->rmap.N != A->cmap.N) SETERRQ(PETSC_ERR_SUP,"Only square matrices supported.");
86 
87   /* The reuse aspect is not implemented efficiently */
88   if (reuse) { ierr = MatDestroy(*B);CHKERRQ(ierr);}
89 
90   ierr = PetscObjectGetComm((PetscObject)(mdn->A),&comm);CHKERRQ(ierr);
91   ierr = MatGetArray(mdn->A,&array);CHKERRQ(ierr);
92   ierr = MatCreate(comm,B);CHKERRQ(ierr);
93   ierr = MatSetSizes(*B,m,m,m,m);CHKERRQ(ierr);
94   ierr = MatSetType(*B,((PetscObject)mdn->A)->type_name);CHKERRQ(ierr);
95   ierr = MatSeqDenseSetPreallocation(*B,array+m*rstart);CHKERRQ(ierr);
96   ierr = MatRestoreArray(mdn->A,&array);CHKERRQ(ierr);
97   ierr = MatAssemblyBegin(*B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
98   ierr = MatAssemblyEnd(*B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
99 
100   *iscopy = PETSC_TRUE;
101   PetscFunctionReturn(0);
102 }
103 EXTERN_C_END
104 
105 #undef __FUNCT__
106 #define __FUNCT__ "MatSetValues_MPIDense"
107 PetscErrorCode MatSetValues_MPIDense(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],const PetscScalar v[],InsertMode addv)
108 {
109   Mat_MPIDense   *A = (Mat_MPIDense*)mat->data;
110   PetscErrorCode ierr;
111   PetscInt       i,j,rstart = mat->rmap.rstart,rend = mat->rmap.rend,row;
112   PetscTruth     roworiented = A->roworiented;
113 
114   PetscFunctionBegin;
115   for (i=0; i<m; i++) {
116     if (idxm[i] < 0) continue;
117     if (idxm[i] >= mat->rmap.N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
118     if (idxm[i] >= rstart && idxm[i] < rend) {
119       row = idxm[i] - rstart;
120       if (roworiented) {
121         ierr = MatSetValues(A->A,1,&row,n,idxn,v+i*n,addv);CHKERRQ(ierr);
122       } else {
123         for (j=0; j<n; j++) {
124           if (idxn[j] < 0) continue;
125           if (idxn[j] >= mat->cmap.N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Column too large");
126           ierr = MatSetValues(A->A,1,&row,1,&idxn[j],v+i+j*m,addv);CHKERRQ(ierr);
127         }
128       }
129     } else {
130       if (!A->donotstash) {
131         if (roworiented) {
132           ierr = MatStashValuesRow_Private(&mat->stash,idxm[i],n,idxn,v+i*n);CHKERRQ(ierr);
133         } else {
134           ierr = MatStashValuesCol_Private(&mat->stash,idxm[i],n,idxn,v+i,m);CHKERRQ(ierr);
135         }
136       }
137     }
138   }
139   PetscFunctionReturn(0);
140 }
141 
142 #undef __FUNCT__
143 #define __FUNCT__ "MatGetValues_MPIDense"
144 PetscErrorCode MatGetValues_MPIDense(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
145 {
146   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;
147   PetscErrorCode ierr;
148   PetscInt       i,j,rstart = mat->rmap.rstart,rend = mat->rmap.rend,row;
149 
150   PetscFunctionBegin;
151   for (i=0; i<m; i++) {
152     if (idxm[i] < 0) continue; /* SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative row"); */
153     if (idxm[i] >= mat->rmap.N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
154     if (idxm[i] >= rstart && idxm[i] < rend) {
155       row = idxm[i] - rstart;
156       for (j=0; j<n; j++) {
157         if (idxn[j] < 0) continue; /* SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative column"); */
158         if (idxn[j] >= mat->cmap.N) {
159           SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Column too large");
160         }
161         ierr = MatGetValues(mdn->A,1,&row,1,&idxn[j],v+i*n+j);CHKERRQ(ierr);
162       }
163     } else {
164       SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
165     }
166   }
167   PetscFunctionReturn(0);
168 }
169 
170 #undef __FUNCT__
171 #define __FUNCT__ "MatGetArray_MPIDense"
172 PetscErrorCode MatGetArray_MPIDense(Mat A,PetscScalar *array[])
173 {
174   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;
175   PetscErrorCode ierr;
176 
177   PetscFunctionBegin;
178   ierr = MatGetArray(a->A,array);CHKERRQ(ierr);
179   PetscFunctionReturn(0);
180 }
181 
182 #undef __FUNCT__
183 #define __FUNCT__ "MatGetSubMatrix_MPIDense"
184 static PetscErrorCode MatGetSubMatrix_MPIDense(Mat A,IS isrow,IS iscol,PetscInt cs,MatReuse scall,Mat *B)
185 {
186   Mat_MPIDense   *mat = (Mat_MPIDense*)A->data,*newmatd;
187   Mat_SeqDense   *lmat = (Mat_SeqDense*)mat->A->data;
188   PetscErrorCode ierr;
189   PetscInt       i,j,*irow,*icol,rstart,rend,nrows,ncols,nlrows,nlcols;
190   PetscScalar    *av,*bv,*v = lmat->v;
191   Mat            newmat;
192 
193   PetscFunctionBegin;
194   ierr = ISGetIndices(isrow,&irow);CHKERRQ(ierr);
195   ierr = ISGetIndices(iscol,&icol);CHKERRQ(ierr);
196   ierr = ISGetLocalSize(isrow,&nrows);CHKERRQ(ierr);
197   ierr = ISGetLocalSize(iscol,&ncols);CHKERRQ(ierr);
198 
199   /* No parallel redistribution currently supported! Should really check each index set
200      to comfirm that it is OK.  ... Currently supports only submatrix same partitioning as
201      original matrix! */
202 
203   ierr = MatGetLocalSize(A,&nlrows,&nlcols);CHKERRQ(ierr);
204   ierr = MatGetOwnershipRange(A,&rstart,&rend);CHKERRQ(ierr);
205 
206   /* Check submatrix call */
207   if (scall == MAT_REUSE_MATRIX) {
208     /* SETERRQ(PETSC_ERR_ARG_SIZ,"Reused submatrix wrong size"); */
209     /* Really need to test rows and column sizes! */
210     newmat = *B;
211   } else {
212     /* Create and fill new matrix */
213     ierr = MatCreate(((PetscObject)A)->comm,&newmat);CHKERRQ(ierr);
214     ierr = MatSetSizes(newmat,nrows,cs,PETSC_DECIDE,ncols);CHKERRQ(ierr);
215     ierr = MatSetType(newmat,((PetscObject)A)->type_name);CHKERRQ(ierr);
216     ierr = MatMPIDenseSetPreallocation(newmat,PETSC_NULL);CHKERRQ(ierr);
217   }
218 
219   /* Now extract the data pointers and do the copy, column at a time */
220   newmatd = (Mat_MPIDense*)newmat->data;
221   bv      = ((Mat_SeqDense *)newmatd->A->data)->v;
222 
223   for (i=0; i<ncols; i++) {
224     av = v + nlrows*icol[i];
225     for (j=0; j<nrows; j++) {
226       *bv++ = av[irow[j] - rstart];
227     }
228   }
229 
230   /* Assemble the matrices so that the correct flags are set */
231   ierr = MatAssemblyBegin(newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
232   ierr = MatAssemblyEnd(newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
233 
234   /* Free work space */
235   ierr = ISRestoreIndices(isrow,&irow);CHKERRQ(ierr);
236   ierr = ISRestoreIndices(iscol,&icol);CHKERRQ(ierr);
237   *B = newmat;
238   PetscFunctionReturn(0);
239 }
240 
241 #undef __FUNCT__
242 #define __FUNCT__ "MatRestoreArray_MPIDense"
243 PetscErrorCode MatRestoreArray_MPIDense(Mat A,PetscScalar *array[])
244 {
245   PetscFunctionBegin;
246   PetscFunctionReturn(0);
247 }
248 
249 #undef __FUNCT__
250 #define __FUNCT__ "MatAssemblyBegin_MPIDense"
251 PetscErrorCode MatAssemblyBegin_MPIDense(Mat mat,MatAssemblyType mode)
252 {
253   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;
254   MPI_Comm       comm = ((PetscObject)mat)->comm;
255   PetscErrorCode ierr;
256   PetscInt       nstash,reallocs;
257   InsertMode     addv;
258 
259   PetscFunctionBegin;
260   /* make sure all processors are either in INSERTMODE or ADDMODE */
261   ierr = MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,comm);CHKERRQ(ierr);
262   if (addv == (ADD_VALUES|INSERT_VALUES)) {
263     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Cannot mix adds/inserts on different procs");
264   }
265   mat->insertmode = addv; /* in case this processor had no cache */
266 
267   ierr = MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap.range);CHKERRQ(ierr);
268   ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr);
269   ierr = PetscInfo2(mdn->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);CHKERRQ(ierr);
270   PetscFunctionReturn(0);
271 }
272 
273 #undef __FUNCT__
274 #define __FUNCT__ "MatAssemblyEnd_MPIDense"
275 PetscErrorCode MatAssemblyEnd_MPIDense(Mat mat,MatAssemblyType mode)
276 {
277   Mat_MPIDense    *mdn=(Mat_MPIDense*)mat->data;
278   PetscErrorCode  ierr;
279   PetscInt        i,*row,*col,flg,j,rstart,ncols;
280   PetscMPIInt     n;
281   PetscScalar     *val;
282   InsertMode      addv=mat->insertmode;
283 
284   PetscFunctionBegin;
285   /*  wait on receives */
286   while (1) {
287     ierr = MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);CHKERRQ(ierr);
288     if (!flg) break;
289 
290     for (i=0; i<n;) {
291       /* Now identify the consecutive vals belonging to the same row */
292       for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
293       if (j < n) ncols = j-i;
294       else       ncols = n-i;
295       /* Now assemble all these values with a single function call */
296       ierr = MatSetValues_MPIDense(mat,1,row+i,ncols,col+i,val+i,addv);CHKERRQ(ierr);
297       i = j;
298     }
299   }
300   ierr = MatStashScatterEnd_Private(&mat->stash);CHKERRQ(ierr);
301 
302   ierr = MatAssemblyBegin(mdn->A,mode);CHKERRQ(ierr);
303   ierr = MatAssemblyEnd(mdn->A,mode);CHKERRQ(ierr);
304 
305   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
306     ierr = MatSetUpMultiply_MPIDense(mat);CHKERRQ(ierr);
307   }
308   PetscFunctionReturn(0);
309 }
310 
311 #undef __FUNCT__
312 #define __FUNCT__ "MatZeroEntries_MPIDense"
313 PetscErrorCode MatZeroEntries_MPIDense(Mat A)
314 {
315   PetscErrorCode ierr;
316   Mat_MPIDense   *l = (Mat_MPIDense*)A->data;
317 
318   PetscFunctionBegin;
319   ierr = MatZeroEntries(l->A);CHKERRQ(ierr);
320   PetscFunctionReturn(0);
321 }
322 
323 /* the code does not do the diagonal entries correctly unless the
324    matrix is square and the column and row owerships are identical.
325    This is a BUG. The only way to fix it seems to be to access
326    mdn->A and mdn->B directly and not through the MatZeroRows()
327    routine.
328 */
329 #undef __FUNCT__
330 #define __FUNCT__ "MatZeroRows_MPIDense"
331 PetscErrorCode MatZeroRows_MPIDense(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag)
332 {
333   Mat_MPIDense   *l = (Mat_MPIDense*)A->data;
334   PetscErrorCode ierr;
335   PetscInt       i,*owners = A->rmap.range;
336   PetscInt       *nprocs,j,idx,nsends;
337   PetscInt       nmax,*svalues,*starts,*owner,nrecvs;
338   PetscInt       *rvalues,tag = ((PetscObject)A)->tag,count,base,slen,*source;
339   PetscInt       *lens,*lrows,*values;
340   PetscMPIInt    n,imdex,rank = l->rank,size = l->size;
341   MPI_Comm       comm = ((PetscObject)A)->comm;
342   MPI_Request    *send_waits,*recv_waits;
343   MPI_Status     recv_status,*send_status;
344   PetscTruth     found;
345 
346   PetscFunctionBegin;
347   /*  first count number of contributors to each processor */
348   ierr  = PetscMalloc(2*size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
349   ierr  = PetscMemzero(nprocs,2*size*sizeof(PetscInt));CHKERRQ(ierr);
350   ierr  = PetscMalloc((N+1)*sizeof(PetscInt),&owner);CHKERRQ(ierr); /* see note*/
351   for (i=0; i<N; i++) {
352     idx = rows[i];
353     found = PETSC_FALSE;
354     for (j=0; j<size; j++) {
355       if (idx >= owners[j] && idx < owners[j+1]) {
356         nprocs[2*j]++; nprocs[2*j+1] = 1; owner[i] = j; found = PETSC_TRUE; break;
357       }
358     }
359     if (!found) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Index out of range");
360   }
361   nsends = 0;  for (i=0; i<size; i++) { nsends += nprocs[2*i+1];}
362 
363   /* inform other processors of number of messages and max length*/
364   ierr = PetscMaxSum(comm,nprocs,&nmax,&nrecvs);CHKERRQ(ierr);
365 
366   /* post receives:   */
367   ierr = PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(PetscInt),&rvalues);CHKERRQ(ierr);
368   ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
369   for (i=0; i<nrecvs; i++) {
370     ierr = MPI_Irecv(rvalues+nmax*i,nmax,MPIU_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);CHKERRQ(ierr);
371   }
372 
373   /* do sends:
374       1) starts[i] gives the starting index in svalues for stuff going to
375          the ith processor
376   */
377   ierr = PetscMalloc((N+1)*sizeof(PetscInt),&svalues);CHKERRQ(ierr);
378   ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
379   ierr = PetscMalloc((size+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
380   starts[0]  = 0;
381   for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
382   for (i=0; i<N; i++) {
383     svalues[starts[owner[i]]++] = rows[i];
384   }
385 
386   starts[0] = 0;
387   for (i=1; i<size+1; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
388   count = 0;
389   for (i=0; i<size; i++) {
390     if (nprocs[2*i+1]) {
391       ierr = MPI_Isend(svalues+starts[i],nprocs[2*i],MPIU_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr);
392     }
393   }
394   ierr = PetscFree(starts);CHKERRQ(ierr);
395 
396   base = owners[rank];
397 
398   /*  wait on receives */
399   ierr   = PetscMalloc(2*(nrecvs+1)*sizeof(PetscInt),&lens);CHKERRQ(ierr);
400   source = lens + nrecvs;
401   count  = nrecvs; slen = 0;
402   while (count) {
403     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
404     /* unpack receives into our local space */
405     ierr = MPI_Get_count(&recv_status,MPIU_INT,&n);CHKERRQ(ierr);
406     source[imdex]  = recv_status.MPI_SOURCE;
407     lens[imdex]    = n;
408     slen += n;
409     count--;
410   }
411   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
412 
413   /* move the data into the send scatter */
414   ierr = PetscMalloc((slen+1)*sizeof(PetscInt),&lrows);CHKERRQ(ierr);
415   count = 0;
416   for (i=0; i<nrecvs; i++) {
417     values = rvalues + i*nmax;
418     for (j=0; j<lens[i]; j++) {
419       lrows[count++] = values[j] - base;
420     }
421   }
422   ierr = PetscFree(rvalues);CHKERRQ(ierr);
423   ierr = PetscFree(lens);CHKERRQ(ierr);
424   ierr = PetscFree(owner);CHKERRQ(ierr);
425   ierr = PetscFree(nprocs);CHKERRQ(ierr);
426 
427   /* actually zap the local rows */
428   ierr = MatZeroRows(l->A,slen,lrows,diag);CHKERRQ(ierr);
429   ierr = PetscFree(lrows);CHKERRQ(ierr);
430 
431   /* wait on sends */
432   if (nsends) {
433     ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
434     ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
435     ierr = PetscFree(send_status);CHKERRQ(ierr);
436   }
437   ierr = PetscFree(send_waits);CHKERRQ(ierr);
438   ierr = PetscFree(svalues);CHKERRQ(ierr);
439 
440   PetscFunctionReturn(0);
441 }
442 
443 #undef __FUNCT__
444 #define __FUNCT__ "MatMult_MPIDense"
445 PetscErrorCode MatMult_MPIDense(Mat mat,Vec xx,Vec yy)
446 {
447   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;
448   PetscErrorCode ierr;
449 
450   PetscFunctionBegin;
451   ierr = VecScatterBegin(mdn->Mvctx,xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
452   ierr = VecScatterEnd(mdn->Mvctx,xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
453   ierr = MatMult_SeqDense(mdn->A,mdn->lvec,yy);CHKERRQ(ierr);
454   PetscFunctionReturn(0);
455 }
456 
457 #undef __FUNCT__
458 #define __FUNCT__ "MatMultAdd_MPIDense"
459 PetscErrorCode MatMultAdd_MPIDense(Mat mat,Vec xx,Vec yy,Vec zz)
460 {
461   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;
462   PetscErrorCode ierr;
463 
464   PetscFunctionBegin;
465   ierr = VecScatterBegin(mdn->Mvctx,xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
466   ierr = VecScatterEnd(mdn->Mvctx,xx,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
467   ierr = MatMultAdd_SeqDense(mdn->A,mdn->lvec,yy,zz);CHKERRQ(ierr);
468   PetscFunctionReturn(0);
469 }
470 
471 #undef __FUNCT__
472 #define __FUNCT__ "MatMultTranspose_MPIDense"
473 PetscErrorCode MatMultTranspose_MPIDense(Mat A,Vec xx,Vec yy)
474 {
475   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;
476   PetscErrorCode ierr;
477   PetscScalar    zero = 0.0;
478 
479   PetscFunctionBegin;
480   ierr = VecSet(yy,zero);CHKERRQ(ierr);
481   ierr = MatMultTranspose_SeqDense(a->A,xx,a->lvec);CHKERRQ(ierr);
482   ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
483   ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
484   PetscFunctionReturn(0);
485 }
486 
487 #undef __FUNCT__
488 #define __FUNCT__ "MatMultTransposeAdd_MPIDense"
489 PetscErrorCode MatMultTransposeAdd_MPIDense(Mat A,Vec xx,Vec yy,Vec zz)
490 {
491   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;
492   PetscErrorCode ierr;
493 
494   PetscFunctionBegin;
495   ierr = VecCopy(yy,zz);CHKERRQ(ierr);
496   ierr = MatMultTranspose_SeqDense(a->A,xx,a->lvec);CHKERRQ(ierr);
497   ierr = VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
498   ierr = VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
499   PetscFunctionReturn(0);
500 }
501 
502 #undef __FUNCT__
503 #define __FUNCT__ "MatGetDiagonal_MPIDense"
504 PetscErrorCode MatGetDiagonal_MPIDense(Mat A,Vec v)
505 {
506   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;
507   Mat_SeqDense   *aloc = (Mat_SeqDense*)a->A->data;
508   PetscErrorCode ierr;
509   PetscInt       len,i,n,m = A->rmap.n,radd;
510   PetscScalar    *x,zero = 0.0;
511 
512   PetscFunctionBegin;
513   ierr = VecSet(v,zero);CHKERRQ(ierr);
514   ierr = VecGetArray(v,&x);CHKERRQ(ierr);
515   ierr = VecGetSize(v,&n);CHKERRQ(ierr);
516   if (n != A->rmap.N) SETERRQ(PETSC_ERR_ARG_SIZ,"Nonconforming mat and vec");
517   len  = PetscMin(a->A->rmap.n,a->A->cmap.n);
518   radd = A->rmap.rstart*m;
519   for (i=0; i<len; i++) {
520     x[i] = aloc->v[radd + i*m + i];
521   }
522   ierr = VecRestoreArray(v,&x);CHKERRQ(ierr);
523   PetscFunctionReturn(0);
524 }
525 
526 #undef __FUNCT__
527 #define __FUNCT__ "MatDestroy_MPIDense"
528 PetscErrorCode MatDestroy_MPIDense(Mat mat)
529 {
530   Mat_MPIDense   *mdn = (Mat_MPIDense*)mat->data;
531   PetscErrorCode ierr;
532 #if defined(PETSC_HAVE_PLAPACK)
533   Mat_Plapack   *lu=(Mat_Plapack*)(mat->spptr);
534 #endif
535 
536   PetscFunctionBegin;
537 
538 #if defined(PETSC_USE_LOG)
539   PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap.N,mat->cmap.N);
540 #endif
541   ierr = MatStashDestroy_Private(&mat->stash);CHKERRQ(ierr);
542   ierr = MatDestroy(mdn->A);CHKERRQ(ierr);
543   if (mdn->lvec)   {ierr = VecDestroy(mdn->lvec);CHKERRQ(ierr);}
544   if (mdn->Mvctx)  {ierr = VecScatterDestroy(mdn->Mvctx);CHKERRQ(ierr);}
545 #if defined(PETSC_HAVE_PLAPACK)
546   if (lu) {
547     ierr = PLA_Obj_free(&lu->A);CHKERRQ(ierr);
548     ierr = PLA_Obj_free (&lu->pivots);CHKERRQ(ierr);
549     ierr = PLA_Temp_free(&lu->templ);CHKERRQ(ierr);
550 
551     if (lu->is_pla) {
552       ierr = ISDestroy(lu->is_pla);CHKERRQ(ierr);
553       ierr = ISDestroy(lu->is_petsc);CHKERRQ(ierr);
554       ierr = VecScatterDestroy(lu->ctx);CHKERRQ(ierr);
555     }
556   }
557 #endif
558 
559   ierr = PetscFree(mdn);CHKERRQ(ierr);
560   ierr = PetscObjectChangeTypeName((PetscObject)mat,0);CHKERRQ(ierr);
561   ierr = PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatGetDiagonalBlock_C","",PETSC_NULL);CHKERRQ(ierr);
562   ierr = PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMPIDenseSetPreallocation_C","",PETSC_NULL);CHKERRQ(ierr);
563   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMatMult_mpiaij_mpidense_C","",PETSC_NULL);CHKERRQ(ierr);
564   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMatMultSymbolic_mpiaij_mpidense_C","",PETSC_NULL);CHKERRQ(ierr);
565   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMatMultNumeric_mpiaij_mpidense_C","",PETSC_NULL);CHKERRQ(ierr);
566   PetscFunctionReturn(0);
567 }
568 
569 #undef __FUNCT__
570 #define __FUNCT__ "MatView_MPIDense_Binary"
571 static PetscErrorCode MatView_MPIDense_Binary(Mat mat,PetscViewer viewer)
572 {
573   Mat_MPIDense      *mdn = (Mat_MPIDense*)mat->data;
574   PetscErrorCode    ierr;
575   PetscViewerFormat format;
576   int               fd;
577   PetscInt          header[4],mmax,N = mat->cmap.N,i,j,m,k;
578   PetscMPIInt       rank,tag  = ((PetscObject)viewer)->tag,size;
579   PetscScalar       *work,*v,*vv;
580   Mat_SeqDense      *a = (Mat_SeqDense*)mdn->A->data;
581   MPI_Status        status;
582 
583   PetscFunctionBegin;
584   if (mdn->size == 1) {
585     ierr = MatView(mdn->A,viewer);CHKERRQ(ierr);
586   } else {
587     ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr);
588     ierr = MPI_Comm_rank(((PetscObject)mat)->comm,&rank);CHKERRQ(ierr);
589     ierr = MPI_Comm_size(((PetscObject)mat)->comm,&size);CHKERRQ(ierr);
590 
591     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
592     if (format == PETSC_VIEWER_BINARY_NATIVE) {
593 
594       if (!rank) {
595         /* store the matrix as a dense matrix */
596         header[0] = MAT_FILE_COOKIE;
597         header[1] = mat->rmap.N;
598         header[2] = N;
599         header[3] = MATRIX_BINARY_FORMAT_DENSE;
600         ierr = PetscBinaryWrite(fd,header,4,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr);
601 
602         /* get largest work array needed for transposing array */
603         mmax = mat->rmap.n;
604         for (i=1; i<size; i++) {
605           mmax = PetscMax(mmax,mat->rmap.range[i+1] - mat->rmap.range[i]);
606         }
607         ierr = PetscMalloc(mmax*N*sizeof(PetscScalar),&work);CHKERRQ(ierr);
608 
609         /* write out local array, by rows */
610         m    = mat->rmap.n;
611         v    = a->v;
612         for (j=0; j<N; j++) {
613           for (i=0; i<m; i++) {
614             work[j + i*N] = *v++;
615           }
616         }
617         ierr = PetscBinaryWrite(fd,work,m*N,PETSC_SCALAR,PETSC_FALSE);CHKERRQ(ierr);
618         /* get largest work array to receive messages from other processes, excludes process zero */
619         mmax = 0;
620         for (i=1; i<size; i++) {
621           mmax = PetscMax(mmax,mat->rmap.range[i+1] - mat->rmap.range[i]);
622         }
623         ierr = PetscMalloc(mmax*N*sizeof(PetscScalar),&vv);CHKERRQ(ierr);
624         for(k = 1; k < size; k++) {
625           v    = vv;
626           m    = mat->rmap.range[k+1] - mat->rmap.range[k];
627           ierr = MPI_Recv(v,m*N,MPIU_SCALAR,k,tag,((PetscObject)mat)->comm,&status);CHKERRQ(ierr);
628 
629           for(j = 0; j < N; j++) {
630             for(i = 0; i < m; i++) {
631               work[j + i*N] = *v++;
632             }
633           }
634           ierr = PetscBinaryWrite(fd,work,m*N,PETSC_SCALAR,PETSC_FALSE);CHKERRQ(ierr);
635         }
636         ierr = PetscFree(work);CHKERRQ(ierr);
637         ierr = PetscFree(vv);CHKERRQ(ierr);
638       } else {
639         ierr = MPI_Send(a->v,mat->rmap.n*mat->cmap.N,MPIU_SCALAR,0,tag,((PetscObject)mat)->comm);CHKERRQ(ierr);
640       }
641     } else {
642       SETERRQ(PETSC_ERR_SUP,"To store a parallel dense matrix you must first call PetscViewerSetFormat(viewer,PETSC_VIEWER_BINARY_NATIVE");
643     }
644   }
645   PetscFunctionReturn(0);
646 }
647 
648 #undef __FUNCT__
649 #define __FUNCT__ "MatView_MPIDense_ASCIIorDraworSocket"
650 static PetscErrorCode MatView_MPIDense_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
651 {
652   Mat_MPIDense          *mdn = (Mat_MPIDense*)mat->data;
653   PetscErrorCode        ierr;
654   PetscMPIInt           size = mdn->size,rank = mdn->rank;
655   const PetscViewerType vtype;
656   PetscTruth            iascii,isdraw;
657   PetscViewer           sviewer;
658   PetscViewerFormat     format;
659 #if defined(PETSC_HAVE_PLAPACK)
660   Mat_Plapack           *lu=(Mat_Plapack*)(mat->spptr);
661 #endif
662 
663   PetscFunctionBegin;
664   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);CHKERRQ(ierr);
665   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr);
666   if (iascii) {
667     ierr = PetscViewerGetType(viewer,&vtype);CHKERRQ(ierr);
668     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
669     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
670       MatInfo info;
671       ierr = MatGetInfo(mat,MAT_LOCAL,&info);CHKERRQ(ierr);
672       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"  [%d] local rows %D nz %D nz alloced %D mem %D \n",rank,mat->rmap.n,
673                    (PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);CHKERRQ(ierr);
674       ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
675       ierr = VecScatterView(mdn->Mvctx,viewer);CHKERRQ(ierr);
676 #if defined(PETSC_HAVE_PLAPACK)
677       ierr = PetscViewerASCIIPrintf(viewer,"PLAPACK run parameters:\n");CHKERRQ(ierr);
678       ierr = PetscViewerASCIIPrintf(viewer,"  Processor mesh: nprows %d, npcols %d\n",Plapack_nprows, Plapack_npcols);CHKERRQ(ierr);
679       ierr = PetscViewerASCIIPrintf(viewer,"  Distr. block size nb: %d \n",lu->nb);CHKERRQ(ierr);
680       ierr = PetscViewerASCIIPrintf(viewer,"  Error checking: %d\n",Plapack_ierror);CHKERRQ(ierr);
681       ierr = PetscViewerASCIIPrintf(viewer,"  Algorithmic block size: %d\n",Plapack_nb_alg);CHKERRQ(ierr);
682 #endif
683       PetscFunctionReturn(0);
684     } else if (format == PETSC_VIEWER_ASCII_INFO) {
685       PetscFunctionReturn(0);
686     }
687   } else if (isdraw) {
688     PetscDraw  draw;
689     PetscTruth isnull;
690 
691     ierr = PetscViewerDrawGetDraw(viewer,0,&draw);CHKERRQ(ierr);
692     ierr = PetscDrawIsNull(draw,&isnull);CHKERRQ(ierr);
693     if (isnull) PetscFunctionReturn(0);
694   }
695 
696   if (size == 1) {
697     ierr = MatView(mdn->A,viewer);CHKERRQ(ierr);
698   } else {
699     /* assemble the entire matrix onto first processor. */
700     Mat         A;
701     PetscInt    M = mat->rmap.N,N = mat->cmap.N,m,row,i,nz;
702     PetscInt    *cols;
703     PetscScalar *vals;
704 
705     ierr = MatCreate(((PetscObject)mat)->comm,&A);CHKERRQ(ierr);
706     if (!rank) {
707       ierr = MatSetSizes(A,M,N,M,N);CHKERRQ(ierr);
708     } else {
709       ierr = MatSetSizes(A,0,0,M,N);CHKERRQ(ierr);
710     }
711     /* Since this is a temporary matrix, MATMPIDENSE instead of ((PetscObject)A)->type_name here is probably acceptable. */
712     ierr = MatSetType(A,MATMPIDENSE);CHKERRQ(ierr);
713     ierr = MatMPIDenseSetPreallocation(A,PETSC_NULL);
714     ierr = PetscLogObjectParent(mat,A);CHKERRQ(ierr);
715 
716     /* Copy the matrix ... This isn't the most efficient means,
717        but it's quick for now */
718     A->insertmode = INSERT_VALUES;
719     row = mat->rmap.rstart; m = mdn->A->rmap.n;
720     for (i=0; i<m; i++) {
721       ierr = MatGetRow_MPIDense(mat,row,&nz,&cols,&vals);CHKERRQ(ierr);
722       ierr = MatSetValues_MPIDense(A,1,&row,nz,cols,vals,INSERT_VALUES);CHKERRQ(ierr);
723       ierr = MatRestoreRow_MPIDense(mat,row,&nz,&cols,&vals);CHKERRQ(ierr);
724       row++;
725     }
726 
727     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
728     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
729     ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
730     if (!rank) {
731       ierr = MatView(((Mat_MPIDense*)(A->data))->A,sviewer);CHKERRQ(ierr);
732     }
733     ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
734     ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
735     ierr = MatDestroy(A);CHKERRQ(ierr);
736   }
737   PetscFunctionReturn(0);
738 }
739 
740 #undef __FUNCT__
741 #define __FUNCT__ "MatView_MPIDense"
742 PetscErrorCode MatView_MPIDense(Mat mat,PetscViewer viewer)
743 {
744   PetscErrorCode ierr;
745   PetscTruth     iascii,isbinary,isdraw,issocket;
746 
747   PetscFunctionBegin;
748 
749   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);CHKERRQ(ierr);
750   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);CHKERRQ(ierr);
751   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_SOCKET,&issocket);CHKERRQ(ierr);
752   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr);
753 
754   if (iascii || issocket || isdraw) {
755     ierr = MatView_MPIDense_ASCIIorDraworSocket(mat,viewer);CHKERRQ(ierr);
756   } else if (isbinary) {
757     ierr = MatView_MPIDense_Binary(mat,viewer);CHKERRQ(ierr);
758   } else {
759     SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported by MPI dense matrix",((PetscObject)viewer)->type_name);
760   }
761   PetscFunctionReturn(0);
762 }
763 
764 #undef __FUNCT__
765 #define __FUNCT__ "MatGetInfo_MPIDense"
766 PetscErrorCode MatGetInfo_MPIDense(Mat A,MatInfoType flag,MatInfo *info)
767 {
768   Mat_MPIDense   *mat = (Mat_MPIDense*)A->data;
769   Mat            mdn = mat->A;
770   PetscErrorCode ierr;
771   PetscReal      isend[5],irecv[5];
772 
773   PetscFunctionBegin;
774   info->rows_global    = (double)A->rmap.N;
775   info->columns_global = (double)A->cmap.N;
776   info->rows_local     = (double)A->rmap.n;
777   info->columns_local  = (double)A->cmap.N;
778   info->block_size     = 1.0;
779   ierr = MatGetInfo(mdn,MAT_LOCAL,info);CHKERRQ(ierr);
780   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
781   isend[3] = info->memory;  isend[4] = info->mallocs;
782   if (flag == MAT_LOCAL) {
783     info->nz_used      = isend[0];
784     info->nz_allocated = isend[1];
785     info->nz_unneeded  = isend[2];
786     info->memory       = isend[3];
787     info->mallocs      = isend[4];
788   } else if (flag == MAT_GLOBAL_MAX) {
789     ierr = MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_MAX,((PetscObject)A)->comm);CHKERRQ(ierr);
790     info->nz_used      = irecv[0];
791     info->nz_allocated = irecv[1];
792     info->nz_unneeded  = irecv[2];
793     info->memory       = irecv[3];
794     info->mallocs      = irecv[4];
795   } else if (flag == MAT_GLOBAL_SUM) {
796     ierr = MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_SUM,((PetscObject)A)->comm);CHKERRQ(ierr);
797     info->nz_used      = irecv[0];
798     info->nz_allocated = irecv[1];
799     info->nz_unneeded  = irecv[2];
800     info->memory       = irecv[3];
801     info->mallocs      = irecv[4];
802   }
803   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
804   info->fill_ratio_needed = 0;
805   info->factor_mallocs    = 0;
806   PetscFunctionReturn(0);
807 }
808 
809 #undef __FUNCT__
810 #define __FUNCT__ "MatSetOption_MPIDense"
811 PetscErrorCode MatSetOption_MPIDense(Mat A,MatOption op,PetscTruth flg)
812 {
813   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;
814   PetscErrorCode ierr;
815 
816   PetscFunctionBegin;
817   switch (op) {
818   case MAT_NEW_NONZERO_LOCATIONS:
819   case MAT_NEW_NONZERO_LOCATION_ERR:
820   case MAT_NEW_NONZERO_ALLOCATION_ERR:
821     ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr);
822     break;
823   case MAT_ROW_ORIENTED:
824     a->roworiented = flg;
825     ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr);
826     break;
827   case MAT_NEW_DIAGONALS:
828   case MAT_USE_HASH_TABLE:
829     ierr = PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);CHKERRQ(ierr);
830     break;
831   case MAT_IGNORE_OFF_PROC_ENTRIES:
832     a->donotstash = flg;
833     break;
834   case MAT_SYMMETRIC:
835   case MAT_STRUCTURALLY_SYMMETRIC:
836   case MAT_HERMITIAN:
837   case MAT_SYMMETRY_ETERNAL:
838   case MAT_IGNORE_LOWER_TRIANGULAR:
839     ierr = PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);CHKERRQ(ierr);
840     break;
841   default:
842     SETERRQ1(PETSC_ERR_SUP,"unknown option %s",MatOptions[op]);
843   }
844   PetscFunctionReturn(0);
845 }
846 
847 
848 #undef __FUNCT__
849 #define __FUNCT__ "MatDiagonalScale_MPIDense"
850 PetscErrorCode MatDiagonalScale_MPIDense(Mat A,Vec ll,Vec rr)
851 {
852   Mat_MPIDense   *mdn = (Mat_MPIDense*)A->data;
853   Mat_SeqDense   *mat = (Mat_SeqDense*)mdn->A->data;
854   PetscScalar    *l,*r,x,*v;
855   PetscErrorCode ierr;
856   PetscInt       i,j,s2a,s3a,s2,s3,m=mdn->A->rmap.n,n=mdn->A->cmap.n;
857 
858   PetscFunctionBegin;
859   ierr = MatGetLocalSize(A,&s2,&s3);CHKERRQ(ierr);
860   if (ll) {
861     ierr = VecGetLocalSize(ll,&s2a);CHKERRQ(ierr);
862     if (s2a != s2) SETERRQ2(PETSC_ERR_ARG_SIZ,"Left scaling vector non-conforming local size, %d != %d.", s2a, s2);
863     ierr = VecGetArray(ll,&l);CHKERRQ(ierr);
864     for (i=0; i<m; i++) {
865       x = l[i];
866       v = mat->v + i;
867       for (j=0; j<n; j++) { (*v) *= x; v+= m;}
868     }
869     ierr = VecRestoreArray(ll,&l);CHKERRQ(ierr);
870     ierr = PetscLogFlops(n*m);CHKERRQ(ierr);
871   }
872   if (rr) {
873     ierr = VecGetLocalSize(rr,&s3a);CHKERRQ(ierr);
874     if (s3a != s3) SETERRQ2(PETSC_ERR_ARG_SIZ,"Right scaling vec non-conforming local size, %d != %d.", s3a, s3);
875     ierr = VecScatterBegin(mdn->Mvctx,rr,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
876     ierr = VecScatterEnd(mdn->Mvctx,rr,mdn->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
877     ierr = VecGetArray(mdn->lvec,&r);CHKERRQ(ierr);
878     for (i=0; i<n; i++) {
879       x = r[i];
880       v = mat->v + i*m;
881       for (j=0; j<m; j++) { (*v++) *= x;}
882     }
883     ierr = VecRestoreArray(mdn->lvec,&r);CHKERRQ(ierr);
884     ierr = PetscLogFlops(n*m);CHKERRQ(ierr);
885   }
886   PetscFunctionReturn(0);
887 }
888 
889 #undef __FUNCT__
890 #define __FUNCT__ "MatNorm_MPIDense"
891 PetscErrorCode MatNorm_MPIDense(Mat A,NormType type,PetscReal *nrm)
892 {
893   Mat_MPIDense   *mdn = (Mat_MPIDense*)A->data;
894   Mat_SeqDense   *mat = (Mat_SeqDense*)mdn->A->data;
895   PetscErrorCode ierr;
896   PetscInt       i,j;
897   PetscReal      sum = 0.0;
898   PetscScalar    *v = mat->v;
899 
900   PetscFunctionBegin;
901   if (mdn->size == 1) {
902     ierr =  MatNorm(mdn->A,type,nrm);CHKERRQ(ierr);
903   } else {
904     if (type == NORM_FROBENIUS) {
905       for (i=0; i<mdn->A->cmap.n*mdn->A->rmap.n; i++) {
906 #if defined(PETSC_USE_COMPLEX)
907         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
908 #else
909         sum += (*v)*(*v); v++;
910 #endif
911       }
912       ierr = MPI_Allreduce(&sum,nrm,1,MPIU_REAL,MPI_SUM,((PetscObject)A)->comm);CHKERRQ(ierr);
913       *nrm = sqrt(*nrm);
914       ierr = PetscLogFlops(2*mdn->A->cmap.n*mdn->A->rmap.n);CHKERRQ(ierr);
915     } else if (type == NORM_1) {
916       PetscReal *tmp,*tmp2;
917       ierr = PetscMalloc(2*A->cmap.N*sizeof(PetscReal),&tmp);CHKERRQ(ierr);
918       tmp2 = tmp + A->cmap.N;
919       ierr = PetscMemzero(tmp,2*A->cmap.N*sizeof(PetscReal));CHKERRQ(ierr);
920       *nrm = 0.0;
921       v = mat->v;
922       for (j=0; j<mdn->A->cmap.n; j++) {
923         for (i=0; i<mdn->A->rmap.n; i++) {
924           tmp[j] += PetscAbsScalar(*v);  v++;
925         }
926       }
927       ierr = MPI_Allreduce(tmp,tmp2,A->cmap.N,MPIU_REAL,MPI_SUM,((PetscObject)A)->comm);CHKERRQ(ierr);
928       for (j=0; j<A->cmap.N; j++) {
929         if (tmp2[j] > *nrm) *nrm = tmp2[j];
930       }
931       ierr = PetscFree(tmp);CHKERRQ(ierr);
932       ierr = PetscLogFlops(A->cmap.n*A->rmap.n);CHKERRQ(ierr);
933     } else if (type == NORM_INFINITY) { /* max row norm */
934       PetscReal ntemp;
935       ierr = MatNorm(mdn->A,type,&ntemp);CHKERRQ(ierr);
936       ierr = MPI_Allreduce(&ntemp,nrm,1,MPIU_REAL,MPI_MAX,((PetscObject)A)->comm);CHKERRQ(ierr);
937     } else {
938       SETERRQ(PETSC_ERR_SUP,"No support for two norm");
939     }
940   }
941   PetscFunctionReturn(0);
942 }
943 
944 #undef __FUNCT__
945 #define __FUNCT__ "MatTranspose_MPIDense"
946 PetscErrorCode MatTranspose_MPIDense(Mat A,MatReuse reuse,Mat *matout)
947 {
948   Mat_MPIDense   *a = (Mat_MPIDense*)A->data;
949   Mat_SeqDense   *Aloc = (Mat_SeqDense*)a->A->data;
950   Mat            B;
951   PetscInt       M = A->rmap.N,N = A->cmap.N,m,n,*rwork,rstart = A->rmap.rstart;
952   PetscErrorCode ierr;
953   PetscInt       j,i;
954   PetscScalar    *v;
955 
956   PetscFunctionBegin;
957   if (reuse == MAT_REUSE_MATRIX && A == *matout && M != N) SETERRQ(PETSC_ERR_SUP,"Supports square matrix only in-place");
958   if (reuse == MAT_INITIAL_MATRIX || A == *matout) {
959     ierr = MatCreate(((PetscObject)A)->comm,&B);CHKERRQ(ierr);
960     ierr = MatSetSizes(B,PETSC_DECIDE,PETSC_DECIDE,N,M);CHKERRQ(ierr);
961     ierr = MatSetType(B,((PetscObject)A)->type_name);CHKERRQ(ierr);
962     ierr = MatMPIDenseSetPreallocation(B,PETSC_NULL);CHKERRQ(ierr);
963   } else {
964     B = *matout;
965   }
966 
967   m = a->A->rmap.n; n = a->A->cmap.n; v = Aloc->v;
968   ierr = PetscMalloc(m*sizeof(PetscInt),&rwork);CHKERRQ(ierr);
969   for (i=0; i<m; i++) rwork[i] = rstart + i;
970   for (j=0; j<n; j++) {
971     ierr = MatSetValues(B,1,&j,m,rwork,v,INSERT_VALUES);CHKERRQ(ierr);
972     v   += m;
973   }
974   ierr = PetscFree(rwork);CHKERRQ(ierr);
975   ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
976   ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
977   if (reuse == MAT_INITIAL_MATRIX || *matout != A) {
978     *matout = B;
979   } else {
980     ierr = MatHeaderCopy(A,B);CHKERRQ(ierr);
981   }
982   PetscFunctionReturn(0);
983 }
984 
985 #include "petscblaslapack.h"
986 #undef __FUNCT__
987 #define __FUNCT__ "MatScale_MPIDense"
988 PetscErrorCode MatScale_MPIDense(Mat inA,PetscScalar alpha)
989 {
990   Mat_MPIDense   *A = (Mat_MPIDense*)inA->data;
991   Mat_SeqDense   *a = (Mat_SeqDense*)A->A->data;
992   PetscScalar    oalpha = alpha;
993   PetscErrorCode ierr;
994   PetscBLASInt   one = 1,nz = PetscBLASIntCast(inA->rmap.n*inA->cmap.N);
995 
996   PetscFunctionBegin;
997   BLASscal_(&nz,&oalpha,a->v,&one);
998   ierr = PetscLogFlops(nz);CHKERRQ(ierr);
999   PetscFunctionReturn(0);
1000 }
1001 
1002 static PetscErrorCode MatDuplicate_MPIDense(Mat,MatDuplicateOption,Mat *);
1003 
1004 #undef __FUNCT__
1005 #define __FUNCT__ "MatSetUpPreallocation_MPIDense"
1006 PetscErrorCode MatSetUpPreallocation_MPIDense(Mat A)
1007 {
1008   PetscErrorCode ierr;
1009 
1010   PetscFunctionBegin;
1011   ierr =  MatMPIDenseSetPreallocation(A,0);CHKERRQ(ierr);
1012   PetscFunctionReturn(0);
1013 }
1014 
1015 #if defined(PETSC_HAVE_PLAPACK)
1016 
1017 #undef __FUNCT__
1018 #define __FUNCT__ "MatMPIDenseCopyToPlapack"
1019 PetscErrorCode MatMPIDenseCopyToPlapack(Mat A,Mat F)
1020 {
1021   Mat_Plapack    *lu = (Mat_Plapack*)(F)->spptr;
1022   PetscErrorCode ierr;
1023   PetscInt       M=A->cmap.N,m=A->rmap.n,rstart;
1024   PetscScalar    *array;
1025   PetscReal      one = 1.0;
1026 
1027   PetscFunctionBegin;
1028   /* Copy A into F->lu->A */
1029   ierr = PLA_Obj_set_to_zero(lu->A);CHKERRQ(ierr);
1030   ierr = PLA_API_begin();CHKERRQ(ierr);
1031   ierr = PLA_Obj_API_open(lu->A);CHKERRQ(ierr);
1032   ierr = MatGetOwnershipRange(A,&rstart,PETSC_NULL);CHKERRQ(ierr);
1033   ierr = MatGetArray(A,&array);CHKERRQ(ierr);
1034   ierr = PLA_API_axpy_matrix_to_global(m,M, &one,(void *)array,m,lu->A,rstart,0);CHKERRQ(ierr);
1035   ierr = MatRestoreArray(A,&array);CHKERRQ(ierr);
1036   ierr = PLA_Obj_API_close(lu->A);CHKERRQ(ierr);
1037   ierr = PLA_API_end();CHKERRQ(ierr);
1038   lu->rstart = rstart;
1039   PetscFunctionReturn(0);
1040 }
1041 
1042 #undef __FUNCT__
1043 #define __FUNCT__ "MatMPIDenseCopyFromPlapack"
1044 PetscErrorCode MatMPIDenseCopyFromPlapack(Mat F,Mat A)
1045 {
1046   Mat_Plapack    *lu = (Mat_Plapack*)(F)->spptr;
1047   PetscErrorCode ierr;
1048   PetscInt       M=A->cmap.N,m=A->rmap.n,rstart;
1049   PetscScalar    *array;
1050   PetscReal      one = 1.0;
1051 
1052   PetscFunctionBegin;
1053   /* Copy F into A->lu->A */
1054   ierr = MatZeroEntries(A);CHKERRQ(ierr);
1055   ierr = PLA_API_begin();CHKERRQ(ierr);
1056   ierr = PLA_Obj_API_open(lu->A);CHKERRQ(ierr);
1057   ierr = MatGetOwnershipRange(A,&rstart,PETSC_NULL);CHKERRQ(ierr);
1058   ierr = MatGetArray(A,&array);CHKERRQ(ierr);
1059   ierr = PLA_API_axpy_global_to_matrix(m,M, &one,lu->A,rstart,0,(void *)array,m);CHKERRQ(ierr);
1060   ierr = MatRestoreArray(A,&array);CHKERRQ(ierr);
1061   ierr = PLA_Obj_API_close(lu->A);CHKERRQ(ierr);
1062   ierr = PLA_API_end();CHKERRQ(ierr);
1063   lu->rstart = rstart;
1064   PetscFunctionReturn(0);
1065 }
1066 
1067 #undef __FUNCT__
1068 #define __FUNCT__ "MatMatMultNumeric_MPIDense_MPIDense"
1069 PetscErrorCode MatMatMultNumeric_MPIDense_MPIDense(Mat A,Mat B,Mat C)
1070 {
1071   PetscErrorCode ierr;
1072   Mat_Plapack    *luA = (Mat_Plapack*)A->spptr;
1073   Mat_Plapack    *luB = (Mat_Plapack*)B->spptr;
1074   Mat_Plapack    *luC = (Mat_Plapack*)C->spptr;
1075   PLA_Obj        alpha = NULL,beta = NULL;
1076 
1077   PetscFunctionBegin;
1078   ierr = MatMPIDenseCopyToPlapack(A,A);CHKERRQ(ierr);
1079   ierr = MatMPIDenseCopyToPlapack(B,B);CHKERRQ(ierr);
1080 
1081   /*
1082   ierr = PLA_Global_show("A = ",luA->A,"%g ","");CHKERRQ(ierr);
1083   ierr = PLA_Global_show("B = ",luB->A,"%g ","");CHKERRQ(ierr);
1084   */
1085 
1086   /* do the multiply in PLA  */
1087   ierr = PLA_Create_constants_conf_to(luA->A,NULL,NULL,&alpha);CHKERRQ(ierr);
1088   ierr = PLA_Create_constants_conf_to(luC->A,NULL,&beta,NULL);CHKERRQ(ierr);
1089   CHKMEMQ;
1090 
1091   ierr = PLA_Gemm(PLA_NO_TRANSPOSE,PLA_NO_TRANSPOSE,alpha,luA->A,luB->A,beta,luC->A); /* CHKERRQ(ierr); */
1092   CHKMEMQ;
1093   ierr = PLA_Obj_free(&alpha);CHKERRQ(ierr);
1094   ierr = PLA_Obj_free(&beta);CHKERRQ(ierr);
1095 
1096   /*
1097   ierr = PLA_Global_show("C = ",luC->A,"%g ","");CHKERRQ(ierr);
1098   */
1099   ierr = MatMPIDenseCopyFromPlapack(C,C);CHKERRQ(ierr);
1100   PetscFunctionReturn(0);
1101 }
1102 
1103 #undef __FUNCT__
1104 #define __FUNCT__ "MatMatMultSymbolic_MPIDense_MPIDense"
1105 PetscErrorCode MatMatMultSymbolic_MPIDense_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C)
1106 {
1107   PetscErrorCode ierr;
1108   PetscInt       m=A->rmap.n,n=B->cmap.n;
1109   Mat            Cmat;
1110 
1111   PetscFunctionBegin;
1112   if (A->cmap.n != B->rmap.n) SETERRQ2(PETSC_ERR_ARG_SIZ,"A->cmap.n %d != B->rmap.n %d\n",A->cmap.n,B->rmap.n);
1113   SETERRQ(PETSC_ERR_LIB,"Due to aparent bugs in PLAPACK,this is not currently supported");
1114   ierr = MatCreate(((PetscObject)B)->comm,&Cmat);CHKERRQ(ierr);
1115   ierr = MatSetSizes(Cmat,m,n,A->rmap.N,B->cmap.N);CHKERRQ(ierr);
1116   ierr = MatSetType(Cmat,MATMPIDENSE);CHKERRQ(ierr);
1117   ierr = MatAssemblyBegin(Cmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1118   ierr = MatAssemblyEnd(Cmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1119   //ierr = MatMPIDenseCreatePlapack(A);CHKERRQ(ierr);
1120   //ierr = MatMPIDenseCreatePlapack(B);CHKERRQ(ierr);
1121   //ierr = MatMPIDenseCreatePlapack(Cmat);CHKERRQ(ierr);
1122   *C = Cmat;
1123   PetscFunctionReturn(0);
1124 }
1125 
1126 #undef __FUNCT__
1127 #define __FUNCT__ "MatMatMult_MPIDense_MPIDense"
1128 PetscErrorCode MatMatMult_MPIDense_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
1129 {
1130   PetscErrorCode ierr;
1131 
1132   PetscFunctionBegin;
1133   if (scall == MAT_INITIAL_MATRIX){
1134     ierr = MatMatMultSymbolic_MPIDense_MPIDense(A,B,fill,C);CHKERRQ(ierr);
1135   }
1136   ierr = MatMatMultNumeric_MPIDense_MPIDense(A,B,*C);CHKERRQ(ierr);
1137   PetscFunctionReturn(0);
1138 }
1139 
1140 #undef __FUNCT__
1141 #define __FUNCT__ "MatFactorSymbolic_Plapack_Private"
1142 PetscErrorCode MatFactorSymbolic_Plapack_Private(Mat A,MatFactorInfo *info,Mat *F)
1143 {
1144   Mat            B = *F;
1145   Mat_Plapack    *lu;
1146   PetscErrorCode ierr;
1147   PetscInt       M=A->rmap.N,N=A->cmap.N;
1148   MPI_Comm       comm=((PetscObject)A)->comm,comm_2d;
1149   PetscMPIInt    size;
1150   PetscInt       ierror;
1151 
1152   PetscFunctionBegin;
1153   lu = (Mat_Plapack*)(B->spptr);
1154 
1155   /* Set default Plapack parameters */
1156   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1157   lu->nprows = 1; lu->npcols = size;
1158   ierror = 0;
1159   lu->nb     = M/size;
1160   if (M - lu->nb*size) lu->nb++; /* without cyclic distribution */
1161 
1162   /* Set runtime options */
1163   ierr = PetscOptionsBegin(((PetscObject)A)->comm,((PetscObject)A)->prefix,"PLAPACK Options","Mat");CHKERRQ(ierr);
1164   ierr = PetscOptionsInt("-mat_plapack_nprows","row dimension of 2D processor mesh","None",lu->nprows,&lu->nprows,PETSC_NULL);CHKERRQ(ierr);
1165   ierr = PetscOptionsInt("-mat_plapack_npcols","column dimension of 2D processor mesh","None",lu->npcols,&lu->npcols,PETSC_NULL);CHKERRQ(ierr);
1166 
1167   ierr = PetscOptionsInt("-mat_plapack_nb","block size of template vector","None",lu->nb,&lu->nb,PETSC_NULL);CHKERRQ(ierr);
1168   ierr = PetscOptionsInt("-mat_plapack_ckerror","error checking flag","None",ierror,&ierror,PETSC_NULL);CHKERRQ(ierr);
1169   if (ierror){
1170     PLA_Set_error_checking(ierror,PETSC_TRUE,PETSC_TRUE,PETSC_FALSE );
1171   } else {
1172     PLA_Set_error_checking(ierror,PETSC_FALSE,PETSC_FALSE,PETSC_FALSE );
1173   }
1174   lu->ierror = ierror;
1175 
1176   lu->nb_alg = 0;
1177   ierr = PetscOptionsInt("-mat_plapack_nb_alg","algorithmic block size","None",lu->nb_alg,&lu->nb_alg,PETSC_NULL);CHKERRQ(ierr);
1178   if (lu->nb_alg){
1179     pla_Environ_set_nb_alg (PLA_OP_ALL_ALG,lu->nb_alg);
1180   }
1181   PetscOptionsEnd();
1182 
1183 
1184   /* Create a 2D communicator */
1185   PLA_Comm_1D_to_2D(comm,lu->nprows,lu->npcols,&comm_2d);
1186   lu->comm_2d = comm_2d;
1187 
1188   /* Initialize PLAPACK */
1189   PLA_Init(comm_2d);
1190 
1191   /* Create object distribution template */
1192   lu->templ = NULL;
1193   PLA_Temp_create(lu->nb, 0, &lu->templ);
1194 
1195   /* Use suggested nb_alg if it is not provided by user */
1196   if (lu->nb_alg == 0){
1197     PLA_Environ_nb_alg(PLA_OP_PAN_PAN,lu->templ,&lu->nb_alg);
1198     pla_Environ_set_nb_alg(PLA_OP_ALL_ALG,lu->nb_alg);
1199   }
1200 
1201   /* Set the datatype */
1202 #if defined(PETSC_USE_COMPLEX)
1203   lu->datatype = MPI_DOUBLE_COMPLEX;
1204 #else
1205   lu->datatype = MPI_DOUBLE;
1206 #endif
1207 
1208   lu->pla_solved     = PETSC_FALSE; /* MatSolve_Plapack() is called yet */
1209   lu->mstruct        = DIFFERENT_NONZERO_PATTERN;
1210   lu->CleanUpPlapack = PETSC_TRUE;
1211   *F                 = B;
1212   PetscFunctionReturn(0);
1213 }
1214 
1215 /* Note the Petsc perm permutation is ignored */
1216 #undef __FUNCT__
1217 #define __FUNCT__ "MatCholeskyFactorSymbolic_Plapack"
1218 PetscErrorCode MatCholeskyFactorSymbolic_Plapack(Mat A,IS perm,MatFactorInfo *info,Mat *F)
1219 {
1220   PetscErrorCode ierr;
1221   PetscTruth     issymmetric,set;
1222 
1223   PetscFunctionBegin;
1224   ierr = MatIsSymmetricKnown(A,&set,&issymmetric); CHKERRQ(ierr);
1225   if (!set || !issymmetric) SETERRQ(PETSC_ERR_USER,"Matrix must be set as MAT_SYMMETRIC for CholeskyFactor()");
1226   ierr = MatFactorSymbolic_Plapack_Private(A,info,F);CHKERRQ(ierr);
1227   (*F)->factor = MAT_FACTOR_CHOLESKY;
1228   PetscFunctionReturn(0);
1229 }
1230 
1231 /* Note the Petsc r and c permutations are ignored */
1232 #undef __FUNCT__
1233 #define __FUNCT__ "MatLUFactorSymbolic_Plapack"
1234 PetscErrorCode MatLUFactorSymbolic_Plapack(Mat A,IS r,IS c,MatFactorInfo *info,Mat *F)
1235 {
1236   PetscErrorCode ierr;
1237   PetscInt       M = A->rmap.N;
1238   Mat_Plapack    *lu;
1239 
1240   PetscFunctionBegin;
1241   ierr = MatFactorSymbolic_Plapack_Private(A,info,F);CHKERRQ(ierr);
1242   lu = (Mat_Plapack*)(*F)->spptr;
1243   ierr = PLA_Mvector_create(MPI_INT,M,1,lu->templ,PLA_ALIGN_FIRST,&lu->pivots);CHKERRQ(ierr);
1244   (*F)->factor = MAT_FACTOR_LU;
1245   PetscFunctionReturn(0);
1246 }
1247 
1248 #undef __FUNCT__
1249 #define __FUNCT__ "MatGetFactor_mpidense_plapack"
1250 PetscErrorCode MatGetFactor_mpidense_plapack(Mat A,MatFactorType ftype,Mat *F)
1251 {
1252   PetscErrorCode ierr;
1253 
1254   PetscFunctionBegin;
1255   /* Create the factorization matrix */
1256   ierr = MatCreate(((PetscObject)A)->comm,F);CHKERRQ(ierr);
1257   ierr = MatSetSizes(*F,A->rmap.n,A->cmap.n,A->rmap.N,A->cmap.N);CHKERRQ(ierr);
1258   ierr = MatSetType(*F,((PetscObject)A)->type_name);CHKERRQ(ierr);
1259   ierr = PetscNewLog(A,Mat_Plapack,&lu);CHKERRQ(ierr);
1260   A->spptr = (void*)lu;
1261 
1262   lu = (Mat_Plapack*)(A->spptr);
1263 
1264   /* Set default Plapack parameters */
1265   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1266   lu->nb     = M/size;
1267   if (M - lu->nb*size) lu->nb++; /* without cyclic distribution */
1268 
1269   /* Set runtime options */
1270   ierr = PetscOptionsBegin(((PetscObject)A)->comm,((PetscObject)A)->prefix,"PLAPACK Options","Mat");CHKERRQ(ierr);
1271     ierr = PetscOptionsInt("-mat_plapack_nb","block size of template vector","None",lu->nb,&lu->nb,PETSC_NULL);CHKERRQ(ierr);
1272   PetscOptionsEnd();
1273 
1274   /* Create object distribution template */
1275   lu->templ = NULL;
1276   ierr = PLA_Temp_create(lu->nb, 0, &lu->templ);CHKERRQ(ierr);
1277 
1278   /* Set the datatype */
1279 #if defined(PETSC_USE_COMPLEX)
1280   lu->datatype = MPI_DOUBLE_COMPLEX;
1281 #else
1282   lu->datatype = MPI_DOUBLE;
1283 #endif
1284 
1285   ierr = PLA_Matrix_create(lu->datatype,M,A->cmap.N,lu->templ,PLA_ALIGN_FIRST,PLA_ALIGN_FIRST,&lu->A);CHKERRQ(ierr);
1286 
1287 
1288   lu->pla_solved     = PETSC_FALSE; /* MatSolve_Plapack() is called yet */
1289 
1290   if (ftype == MAT_FACTOR_LU) {
1291     (*F)->ops->lufactorsymbolic = MatLUFactorSymbolic_MPIDense;
1292     (*F)->ops->lufactornumeric  = MatLUFactorNumeric_MPIDense;
1293     (*F)->ops->solve            = MatSolve_MPIDense;
1294   } else if (ftype == MAT_FACTOR_CHOLESKY) {
1295     (*F)->ops->choleksyfactorsymbolic = MatCholeskyFactorSymbolic_MPIDense;
1296     (*F)->ops->choleskyfactornumeric  = MatCholeksyFactorNumeric_MPIDense;
1297     (*F)->ops->solve                  = MatSolve_MPIDense;
1298   } else SETERRQ(PETSC_ERR_SUP,"No incomplete factorizations for dense matrices");
1299 
1300   PetscFunctionReturn(0);
1301 }
1302 #endif
1303 
1304 /* -------------------------------------------------------------------*/
1305 static struct _MatOps MatOps_Values = {MatSetValues_MPIDense,
1306        MatGetRow_MPIDense,
1307        MatRestoreRow_MPIDense,
1308        MatMult_MPIDense,
1309 /* 4*/ MatMultAdd_MPIDense,
1310        MatMultTranspose_MPIDense,
1311        MatMultTransposeAdd_MPIDense,
1312 #if defined(PETSC_HAVE_PLAPACK)
1313        MatSolve_MPIDense,
1314 #else
1315        0,
1316 #endif
1317        0,
1318        0,
1319 /*10*/ 0,
1320        0,
1321        0,
1322        0,
1323        MatTranspose_MPIDense,
1324 /*15*/ MatGetInfo_MPIDense,
1325        MatEqual_MPIDense,
1326        MatGetDiagonal_MPIDense,
1327        MatDiagonalScale_MPIDense,
1328        MatNorm_MPIDense,
1329 /*20*/ MatAssemblyBegin_MPIDense,
1330        MatAssemblyEnd_MPIDense,
1331        0,
1332        MatSetOption_MPIDense,
1333        MatZeroEntries_MPIDense,
1334 /*25*/ MatZeroRows_MPIDense,
1335 #if defined(PETSC_HAVE_PLAPACK)
1336        MatLUFactorSymbolic_MPIDense,
1337        MatLUFactorNumeric_MPIDense,
1338        MatCholeskyFactorSymbolic_MPIDense,
1339        MatCholeskyFactorNumeric_MPIDense,
1340 #else
1341        0,
1342        0,
1343        0,
1344        0,
1345 #endif
1346 /*30*/ MatSetUpPreallocation_MPIDense,
1347        0,
1348        0,
1349        MatGetArray_MPIDense,
1350        MatRestoreArray_MPIDense,
1351 /*35*/ MatDuplicate_MPIDense,
1352        0,
1353        0,
1354        0,
1355        0,
1356 /*40*/ 0,
1357        MatGetSubMatrices_MPIDense,
1358        0,
1359        MatGetValues_MPIDense,
1360        0,
1361 /*45*/ 0,
1362        MatScale_MPIDense,
1363        0,
1364        0,
1365        0,
1366 /*50*/ 0,
1367        0,
1368        0,
1369        0,
1370        0,
1371 /*55*/ 0,
1372        0,
1373        0,
1374        0,
1375        0,
1376 /*60*/ MatGetSubMatrix_MPIDense,
1377        MatDestroy_MPIDense,
1378        MatView_MPIDense,
1379        0,
1380        0,
1381 /*65*/ 0,
1382        0,
1383        0,
1384        0,
1385        0,
1386 /*70*/ 0,
1387        0,
1388        0,
1389        0,
1390        0,
1391 /*75*/ 0,
1392        0,
1393        0,
1394        0,
1395        0,
1396 /*80*/ 0,
1397        0,
1398        0,
1399        0,
1400 /*84*/ MatLoad_MPIDense,
1401        0,
1402        0,
1403        0,
1404        0,
1405        0,
1406 /*90*/
1407 #if defined(PETSC_HAVE_PLAPACK)
1408        MatMatMult_MPIDense_MPIDense,
1409        MatMatMultSymbolic_MPIDense_MPIDense,
1410        MatMatMultNumeric_MPIDense_MPIDense,
1411 #else
1412        0,
1413        0,
1414        0,
1415 #endif
1416        0,
1417 /*95*/ 0,
1418        0,
1419        0,
1420        0};
1421 
1422 EXTERN_C_BEGIN
1423 #undef __FUNCT__
1424 #define __FUNCT__ "MatMPIDenseSetPreallocation_MPIDense"
1425 PetscErrorCode PETSCMAT_DLLEXPORT MatMPIDenseSetPreallocation_MPIDense(Mat mat,PetscScalar *data)
1426 {
1427   Mat_MPIDense   *a;
1428   PetscErrorCode ierr;
1429 
1430   PetscFunctionBegin;
1431   mat->preallocated = PETSC_TRUE;
1432   /* Note:  For now, when data is specified above, this assumes the user correctly
1433    allocates the local dense storage space.  We should add error checking. */
1434 
1435   a    = (Mat_MPIDense*)mat->data;
1436   ierr = MatCreate(PETSC_COMM_SELF,&a->A);CHKERRQ(ierr);
1437   ierr = MatSetSizes(a->A,mat->rmap.n,mat->cmap.N,mat->rmap.n,mat->cmap.N);CHKERRQ(ierr);
1438   ierr = MatSetType(a->A,MATSEQDENSE);CHKERRQ(ierr);
1439   ierr = MatSeqDenseSetPreallocation(a->A,data);CHKERRQ(ierr);
1440   ierr = PetscLogObjectParent(mat,a->A);CHKERRQ(ierr);
1441   PetscFunctionReturn(0);
1442 }
1443 EXTERN_C_END
1444 
1445 /*MC
1446    MATMPIDENSE - MATMPIDENSE = "mpidense" - A matrix type to be used for distributed dense matrices.
1447 
1448    Options Database Keys:
1449 . -mat_type mpidense - sets the matrix type to "mpidense" during a call to MatSetFromOptions()
1450 
1451   Level: beginner
1452 
1453   MATMPIDENSE matrices may use direct solvers (LU, Cholesky, and QR)
1454   for parallel dense matrices via the external package PLAPACK, if PLAPACK is installed
1455   (run config/configure.py with the option --download-plapack)
1456 
1457 
1458   Options Database Keys:
1459 . -mat_plapack_nprows <n> - number of rows in processor partition
1460 . -mat_plapack_npcols <n> - number of columns in processor partition
1461 . -mat_plapack_nb <n> - block size of template vector
1462 . -mat_plapack_nb_alg <n> - algorithmic block size
1463 - -mat_plapack_ckerror <n> - error checking flag
1464 
1465 .seealso: MatCreateMPIDense(), MATDENSE, MATSEQDENSE
1466 M*/
1467 
1468 EXTERN_C_BEGIN
1469 #undef __FUNCT__
1470 #define __FUNCT__ "MatCreate_MPIDense"
1471 PetscErrorCode PETSCMAT_DLLEXPORT MatCreate_MPIDense(Mat mat)
1472 {
1473   Mat_MPIDense   *a;
1474   PetscErrorCode ierr;
1475 
1476   PetscFunctionBegin;
1477   ierr              = PetscNewLog(mat,Mat_MPIDense,&a);CHKERRQ(ierr);
1478   mat->data         = (void*)a;
1479   ierr              = PetscMemcpy(mat->ops,&MatOps_Values,sizeof(struct _MatOps));CHKERRQ(ierr);
1480   mat->mapping      = 0;
1481 
1482   mat->insertmode = NOT_SET_VALUES;
1483   ierr = MPI_Comm_rank(((PetscObject)mat)->comm,&a->rank);CHKERRQ(ierr);
1484   ierr = MPI_Comm_size(((PetscObject)mat)->comm,&a->size);CHKERRQ(ierr);
1485 
1486   mat->rmap.bs = mat->cmap.bs = 1;
1487   ierr = PetscMapSetUp(&mat->rmap);CHKERRQ(ierr);
1488   ierr = PetscMapSetUp(&mat->cmap);CHKERRQ(ierr);
1489   a->nvec = mat->cmap.n;
1490 
1491   /* build cache for off array entries formed */
1492   a->donotstash = PETSC_FALSE;
1493   ierr = MatStashCreate_Private(((PetscObject)mat)->comm,1,&mat->stash);CHKERRQ(ierr);
1494 
1495   /* stuff used for matrix vector multiply */
1496   a->lvec        = 0;
1497   a->Mvctx       = 0;
1498   a->roworiented = PETSC_TRUE;
1499 
1500   ierr = PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatGetDiagonalBlock_C",
1501                                      "MatGetDiagonalBlock_MPIDense",
1502                                      MatGetDiagonalBlock_MPIDense);CHKERRQ(ierr);
1503   ierr = PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMPIDenseSetPreallocation_C",
1504                                      "MatMPIDenseSetPreallocation_MPIDense",
1505                                      MatMPIDenseSetPreallocation_MPIDense);CHKERRQ(ierr);
1506   ierr = PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMatMult_mpiaij_mpidense_C",
1507                                      "MatMatMult_MPIAIJ_MPIDense",
1508                                       MatMatMult_MPIAIJ_MPIDense);CHKERRQ(ierr);
1509   ierr = PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMatMultSymbolic_mpiaij_mpidense_C",
1510                                      "MatMatMultSymbolic_MPIAIJ_MPIDense",
1511                                       MatMatMultSymbolic_MPIAIJ_MPIDense);CHKERRQ(ierr);
1512   ierr = PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatMatMultNumeric_mpiaij_mpidense_C",
1513                                      "MatMatMultNumeric_MPIAIJ_MPIDense",
1514                                       MatMatMultNumeric_MPIAIJ_MPIDense);CHKERRQ(ierr);
1515 #if defined(PETSC_HAVE_PLAPACK)
1516   ierr = PetscObjectComposeFunctionDynamic((PetscObject)mat,"MatGetFactor_mpidense_plapack_C",
1517                                      "MatGetFactor_mpidense_plapack",
1518                                       MatGetFactor_mpidense_plapack);CHKERRQ(ierr);
1519 #endif
1520   ierr = PetscObjectChangeTypeName((PetscObject)mat,MATMPIDENSE);CHKERRQ(ierr);
1521 
1522   PetscFunctionReturn(0);
1523 }
1524 EXTERN_C_END
1525 
1526 /*MC
1527    MATDENSE - MATDENSE = "dense" - A matrix type to be used for dense matrices.
1528 
1529    This matrix type is identical to MATSEQDENSE when constructed with a single process communicator,
1530    and MATMPIDENSE otherwise.
1531 
1532    Options Database Keys:
1533 . -mat_type dense - sets the matrix type to "dense" during a call to MatSetFromOptions()
1534 
1535   Level: beginner
1536 
1537 
1538 .seealso: MatCreateMPIDense,MATSEQDENSE,MATMPIDENSE
1539 M*/
1540 
1541 EXTERN_C_BEGIN
1542 #undef __FUNCT__
1543 #define __FUNCT__ "MatCreate_Dense"
1544 PetscErrorCode PETSCMAT_DLLEXPORT MatCreate_Dense(Mat A)
1545 {
1546   PetscErrorCode ierr;
1547   PetscMPIInt    size;
1548 
1549   PetscFunctionBegin;
1550   ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);
1551   if (size == 1) {
1552     ierr = MatSetType(A,MATSEQDENSE);CHKERRQ(ierr);
1553   } else {
1554     ierr = MatSetType(A,MATMPIDENSE);CHKERRQ(ierr);
1555   }
1556   PetscFunctionReturn(0);
1557 }
1558 EXTERN_C_END
1559 
1560 #undef __FUNCT__
1561 #define __FUNCT__ "MatMPIDenseSetPreallocation"
1562 /*@C
1563    MatMPIDenseSetPreallocation - Sets the array used to store the matrix entries
1564 
1565    Not collective
1566 
1567    Input Parameters:
1568 .  A - the matrix
1569 -  data - optional location of matrix data.  Set data=PETSC_NULL for PETSc
1570    to control all matrix memory allocation.
1571 
1572    Notes:
1573    The dense format is fully compatible with standard Fortran 77
1574    storage by columns.
1575 
1576    The data input variable is intended primarily for Fortran programmers
1577    who wish to allocate their own matrix memory space.  Most users should
1578    set data=PETSC_NULL.
1579 
1580    Level: intermediate
1581 
1582 .keywords: matrix,dense, parallel
1583 
1584 .seealso: MatCreate(), MatCreateSeqDense(), MatSetValues()
1585 @*/
1586 PetscErrorCode PETSCMAT_DLLEXPORT MatMPIDenseSetPreallocation(Mat mat,PetscScalar *data)
1587 {
1588   PetscErrorCode ierr,(*f)(Mat,PetscScalar *);
1589 
1590   PetscFunctionBegin;
1591   ierr = PetscObjectQueryFunction((PetscObject)mat,"MatMPIDenseSetPreallocation_C",(void (**)(void))&f);CHKERRQ(ierr);
1592   if (f) {
1593     ierr = (*f)(mat,data);CHKERRQ(ierr);
1594   }
1595   PetscFunctionReturn(0);
1596 }
1597 
1598 #undef __FUNCT__
1599 #define __FUNCT__ "MatCreateMPIDense"
1600 /*@C
1601    MatCreateMPIDense - Creates a sparse parallel matrix in dense format.
1602 
1603    Collective on MPI_Comm
1604 
1605    Input Parameters:
1606 +  comm - MPI communicator
1607 .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
1608 .  n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
1609 .  M - number of global rows (or PETSC_DECIDE to have calculated if m is given)
1610 .  N - number of global columns (or PETSC_DECIDE to have calculated if n is given)
1611 -  data - optional location of matrix data.  Set data=PETSC_NULL (PETSC_NULL_SCALAR for Fortran users) for PETSc
1612    to control all matrix memory allocation.
1613 
1614    Output Parameter:
1615 .  A - the matrix
1616 
1617    Notes:
1618    The dense format is fully compatible with standard Fortran 77
1619    storage by columns.
1620 
1621    The data input variable is intended primarily for Fortran programmers
1622    who wish to allocate their own matrix memory space.  Most users should
1623    set data=PETSC_NULL (PETSC_NULL_SCALAR for Fortran users).
1624 
1625    The user MUST specify either the local or global matrix dimensions
1626    (possibly both).
1627 
1628    Level: intermediate
1629 
1630 .keywords: matrix,dense, parallel
1631 
1632 .seealso: MatCreate(), MatCreateSeqDense(), MatSetValues()
1633 @*/
1634 PetscErrorCode PETSCMAT_DLLEXPORT MatCreateMPIDense(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscScalar *data,Mat *A)
1635 {
1636   PetscErrorCode ierr;
1637   PetscMPIInt    size;
1638 
1639   PetscFunctionBegin;
1640   ierr = MatCreate(comm,A);CHKERRQ(ierr);
1641   ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr);
1642   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1643   if (size > 1) {
1644     ierr = MatSetType(*A,MATMPIDENSE);CHKERRQ(ierr);
1645     ierr = MatMPIDenseSetPreallocation(*A,data);CHKERRQ(ierr);
1646   } else {
1647     ierr = MatSetType(*A,MATSEQDENSE);CHKERRQ(ierr);
1648     ierr = MatSeqDenseSetPreallocation(*A,data);CHKERRQ(ierr);
1649   }
1650   PetscFunctionReturn(0);
1651 }
1652 
1653 #undef __FUNCT__
1654 #define __FUNCT__ "MatDuplicate_MPIDense"
1655 static PetscErrorCode MatDuplicate_MPIDense(Mat A,MatDuplicateOption cpvalues,Mat *newmat)
1656 {
1657   Mat            mat;
1658   Mat_MPIDense   *a,*oldmat = (Mat_MPIDense*)A->data;
1659   PetscErrorCode ierr;
1660 
1661   PetscFunctionBegin;
1662   *newmat       = 0;
1663   ierr = MatCreate(((PetscObject)A)->comm,&mat);CHKERRQ(ierr);
1664   ierr = MatSetSizes(mat,A->rmap.n,A->cmap.n,A->rmap.N,A->cmap.N);CHKERRQ(ierr);
1665   ierr = MatSetType(mat,((PetscObject)A)->type_name);CHKERRQ(ierr);
1666   a                 = (Mat_MPIDense*)mat->data;
1667   ierr              = PetscMemcpy(mat->ops,A->ops,sizeof(struct _MatOps));CHKERRQ(ierr);
1668   mat->factor       = A->factor;
1669   mat->assembled    = PETSC_TRUE;
1670   mat->preallocated = PETSC_TRUE;
1671 
1672   mat->rmap.rstart     = A->rmap.rstart;
1673   mat->rmap.rend       = A->rmap.rend;
1674   a->size              = oldmat->size;
1675   a->rank              = oldmat->rank;
1676   mat->insertmode      = NOT_SET_VALUES;
1677   a->nvec              = oldmat->nvec;
1678   a->donotstash        = oldmat->donotstash;
1679 
1680   ierr = PetscMemcpy(mat->rmap.range,A->rmap.range,(a->size+1)*sizeof(PetscInt));CHKERRQ(ierr);
1681   ierr = PetscMemcpy(mat->cmap.range,A->cmap.range,(a->size+1)*sizeof(PetscInt));CHKERRQ(ierr);
1682   ierr = MatStashCreate_Private(((PetscObject)A)->comm,1,&mat->stash);CHKERRQ(ierr);
1683 
1684   ierr = MatSetUpMultiply_MPIDense(mat);CHKERRQ(ierr);
1685   ierr = MatDuplicate(oldmat->A,cpvalues,&a->A);CHKERRQ(ierr);
1686   ierr = PetscLogObjectParent(mat,a->A);CHKERRQ(ierr);
1687 
1688 #if defined(PETSC_HAVE_PLAPACK)
1689   ierr = PetscMemcpy(mat->spptr,A->spptr,sizeof(Mat_Plapack));CHKERRQ(ierr);
1690 #endif
1691   *newmat = mat;
1692   PetscFunctionReturn(0);
1693 }
1694 
1695 #include "petscsys.h"
1696 
1697 #undef __FUNCT__
1698 #define __FUNCT__ "MatLoad_MPIDense_DenseInFile"
1699 PetscErrorCode MatLoad_MPIDense_DenseInFile(MPI_Comm comm,PetscInt fd,PetscInt M,PetscInt N, const MatType type,Mat *newmat)
1700 {
1701   PetscErrorCode ierr;
1702   PetscMPIInt    rank,size;
1703   PetscInt       *rowners,i,m,nz,j;
1704   PetscScalar    *array,*vals,*vals_ptr;
1705   MPI_Status     status;
1706 
1707   PetscFunctionBegin;
1708   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
1709   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1710 
1711   /* determine ownership of all rows */
1712   m          = M/size + ((M % size) > rank);
1713   ierr       = PetscMalloc((size+2)*sizeof(PetscInt),&rowners);CHKERRQ(ierr);
1714   ierr       = MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);CHKERRQ(ierr);
1715   rowners[0] = 0;
1716   for (i=2; i<=size; i++) {
1717     rowners[i] += rowners[i-1];
1718   }
1719 
1720   ierr = MatCreate(comm,newmat);CHKERRQ(ierr);
1721   ierr = MatSetSizes(*newmat,m,PETSC_DECIDE,M,N);CHKERRQ(ierr);
1722   ierr = MatSetType(*newmat,type);CHKERRQ(ierr);
1723   ierr = MatMPIDenseSetPreallocation(*newmat,PETSC_NULL);CHKERRQ(ierr);
1724   ierr = MatGetArray(*newmat,&array);CHKERRQ(ierr);
1725 
1726   if (!rank) {
1727     ierr = PetscMalloc(m*N*sizeof(PetscScalar),&vals);CHKERRQ(ierr);
1728 
1729     /* read in my part of the matrix numerical values  */
1730     ierr = PetscBinaryRead(fd,vals,m*N,PETSC_SCALAR);CHKERRQ(ierr);
1731 
1732     /* insert into matrix-by row (this is why cannot directly read into array */
1733     vals_ptr = vals;
1734     for (i=0; i<m; i++) {
1735       for (j=0; j<N; j++) {
1736         array[i + j*m] = *vals_ptr++;
1737       }
1738     }
1739 
1740     /* read in other processors and ship out */
1741     for (i=1; i<size; i++) {
1742       nz   = (rowners[i+1] - rowners[i])*N;
1743       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
1744       ierr = MPI_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)(*newmat))->tag,comm);CHKERRQ(ierr);
1745     }
1746   } else {
1747     /* receive numeric values */
1748     ierr = PetscMalloc(m*N*sizeof(PetscScalar),&vals);CHKERRQ(ierr);
1749 
1750     /* receive message of values*/
1751     ierr = MPI_Recv(vals,m*N,MPIU_SCALAR,0,((PetscObject)(*newmat))->tag,comm,&status);CHKERRQ(ierr);
1752 
1753     /* insert into matrix-by row (this is why cannot directly read into array */
1754     vals_ptr = vals;
1755     for (i=0; i<m; i++) {
1756       for (j=0; j<N; j++) {
1757         array[i + j*m] = *vals_ptr++;
1758       }
1759     }
1760   }
1761   ierr = PetscFree(rowners);CHKERRQ(ierr);
1762   ierr = PetscFree(vals);CHKERRQ(ierr);
1763   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1764   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1765   PetscFunctionReturn(0);
1766 }
1767 
1768 #undef __FUNCT__
1769 #define __FUNCT__ "MatLoad_MPIDense"
1770 PetscErrorCode MatLoad_MPIDense(PetscViewer viewer,const MatType type,Mat *newmat)
1771 {
1772   Mat            A;
1773   PetscScalar    *vals,*svals;
1774   MPI_Comm       comm = ((PetscObject)viewer)->comm;
1775   MPI_Status     status;
1776   PetscMPIInt    rank,size,tag = ((PetscObject)viewer)->tag,*rowners,*sndcounts,m,maxnz;
1777   PetscInt       header[4],*rowlengths = 0,M,N,*cols;
1778   PetscInt       *ourlens,*procsnz = 0,*offlens,jj,*mycols,*smycols;
1779   PetscInt       i,nz,j,rstart,rend;
1780   int            fd;
1781   PetscErrorCode ierr;
1782 
1783   PetscFunctionBegin;
1784   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1785   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
1786   if (!rank) {
1787     ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr);
1788     ierr = PetscBinaryRead(fd,(char *)header,4,PETSC_INT);CHKERRQ(ierr);
1789     if (header[0] != MAT_FILE_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
1790   }
1791 
1792   ierr = MPI_Bcast(header+1,3,MPIU_INT,0,comm);CHKERRQ(ierr);
1793   M = header[1]; N = header[2]; nz = header[3];
1794 
1795   /*
1796        Handle case where matrix is stored on disk as a dense matrix
1797   */
1798   if (nz == MATRIX_BINARY_FORMAT_DENSE) {
1799     ierr = MatLoad_MPIDense_DenseInFile(comm,fd,M,N,type,newmat);CHKERRQ(ierr);
1800     PetscFunctionReturn(0);
1801   }
1802 
1803   /* determine ownership of all rows */
1804   m          = PetscMPIIntCast(M/size + ((M % size) > rank));
1805   ierr       = PetscMalloc((size+2)*sizeof(PetscMPIInt),&rowners);CHKERRQ(ierr);
1806   ierr       = MPI_Allgather(&m,1,MPI_INT,rowners+1,1,MPI_INT,comm);CHKERRQ(ierr);
1807   rowners[0] = 0;
1808   for (i=2; i<=size; i++) {
1809     rowners[i] += rowners[i-1];
1810   }
1811   rstart = rowners[rank];
1812   rend   = rowners[rank+1];
1813 
1814   /* distribute row lengths to all processors */
1815   ierr    = PetscMalloc(2*(rend-rstart+1)*sizeof(PetscInt),&ourlens);CHKERRQ(ierr);
1816   offlens = ourlens + (rend-rstart);
1817   if (!rank) {
1818     ierr = PetscMalloc(M*sizeof(PetscInt),&rowlengths);CHKERRQ(ierr);
1819     ierr = PetscBinaryRead(fd,rowlengths,M,PETSC_INT);CHKERRQ(ierr);
1820     ierr = PetscMalloc(size*sizeof(PetscMPIInt),&sndcounts);CHKERRQ(ierr);
1821     for (i=0; i<size; i++) sndcounts[i] = rowners[i+1] - rowners[i];
1822     ierr = MPI_Scatterv(rowlengths,sndcounts,rowners,MPIU_INT,ourlens,rend-rstart,MPIU_INT,0,comm);CHKERRQ(ierr);
1823     ierr = PetscFree(sndcounts);CHKERRQ(ierr);
1824   } else {
1825     ierr = MPI_Scatterv(0,0,0,MPIU_INT,ourlens,rend-rstart,MPIU_INT,0,comm);CHKERRQ(ierr);
1826   }
1827 
1828   if (!rank) {
1829     /* calculate the number of nonzeros on each processor */
1830     ierr = PetscMalloc(size*sizeof(PetscInt),&procsnz);CHKERRQ(ierr);
1831     ierr = PetscMemzero(procsnz,size*sizeof(PetscInt));CHKERRQ(ierr);
1832     for (i=0; i<size; i++) {
1833       for (j=rowners[i]; j< rowners[i+1]; j++) {
1834         procsnz[i] += rowlengths[j];
1835       }
1836     }
1837     ierr = PetscFree(rowlengths);CHKERRQ(ierr);
1838 
1839     /* determine max buffer needed and allocate it */
1840     maxnz = 0;
1841     for (i=0; i<size; i++) {
1842       maxnz = PetscMax(maxnz,procsnz[i]);
1843     }
1844     ierr = PetscMalloc(maxnz*sizeof(PetscInt),&cols);CHKERRQ(ierr);
1845 
1846     /* read in my part of the matrix column indices  */
1847     nz = procsnz[0];
1848     ierr = PetscMalloc(nz*sizeof(PetscInt),&mycols);CHKERRQ(ierr);
1849     ierr = PetscBinaryRead(fd,mycols,nz,PETSC_INT);CHKERRQ(ierr);
1850 
1851     /* read in every one elses and ship off */
1852     for (i=1; i<size; i++) {
1853       nz   = procsnz[i];
1854       ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr);
1855       ierr = MPI_Send(cols,nz,MPIU_INT,i,tag,comm);CHKERRQ(ierr);
1856     }
1857     ierr = PetscFree(cols);CHKERRQ(ierr);
1858   } else {
1859     /* determine buffer space needed for message */
1860     nz = 0;
1861     for (i=0; i<m; i++) {
1862       nz += ourlens[i];
1863     }
1864     ierr = PetscMalloc((nz+1)*sizeof(PetscInt),&mycols);CHKERRQ(ierr);
1865 
1866     /* receive message of column indices*/
1867     ierr = MPI_Recv(mycols,nz,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr);
1868     ierr = MPI_Get_count(&status,MPIU_INT,&maxnz);CHKERRQ(ierr);
1869     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
1870   }
1871 
1872   /* loop over local rows, determining number of off diagonal entries */
1873   ierr = PetscMemzero(offlens,m*sizeof(PetscInt));CHKERRQ(ierr);
1874   jj = 0;
1875   for (i=0; i<m; i++) {
1876     for (j=0; j<ourlens[i]; j++) {
1877       if (mycols[jj] < rstart || mycols[jj] >= rend) offlens[i]++;
1878       jj++;
1879     }
1880   }
1881 
1882   /* create our matrix */
1883   for (i=0; i<m; i++) {
1884     ourlens[i] -= offlens[i];
1885   }
1886   ierr = MatCreate(comm,newmat);CHKERRQ(ierr);
1887   ierr = MatSetSizes(*newmat,m,PETSC_DECIDE,M,N);CHKERRQ(ierr);
1888   ierr = MatSetType(*newmat,type);CHKERRQ(ierr);
1889   ierr = MatMPIDenseSetPreallocation(*newmat,PETSC_NULL);CHKERRQ(ierr);
1890   A = *newmat;
1891   for (i=0; i<m; i++) {
1892     ourlens[i] += offlens[i];
1893   }
1894 
1895   if (!rank) {
1896     ierr = PetscMalloc(maxnz*sizeof(PetscScalar),&vals);CHKERRQ(ierr);
1897 
1898     /* read in my part of the matrix numerical values  */
1899     nz = procsnz[0];
1900     ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
1901 
1902     /* insert into matrix */
1903     jj      = rstart;
1904     smycols = mycols;
1905     svals   = vals;
1906     for (i=0; i<m; i++) {
1907       ierr = MatSetValues(A,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);CHKERRQ(ierr);
1908       smycols += ourlens[i];
1909       svals   += ourlens[i];
1910       jj++;
1911     }
1912 
1913     /* read in other processors and ship out */
1914     for (i=1; i<size; i++) {
1915       nz   = procsnz[i];
1916       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
1917       ierr = MPI_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)A)->tag,comm);CHKERRQ(ierr);
1918     }
1919     ierr = PetscFree(procsnz);CHKERRQ(ierr);
1920   } else {
1921     /* receive numeric values */
1922     ierr = PetscMalloc((nz+1)*sizeof(PetscScalar),&vals);CHKERRQ(ierr);
1923 
1924     /* receive message of values*/
1925     ierr = MPI_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)A)->tag,comm,&status);CHKERRQ(ierr);
1926     ierr = MPI_Get_count(&status,MPIU_SCALAR,&maxnz);CHKERRQ(ierr);
1927     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
1928 
1929     /* insert into matrix */
1930     jj      = rstart;
1931     smycols = mycols;
1932     svals   = vals;
1933     for (i=0; i<m; i++) {
1934       ierr = MatSetValues(A,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);CHKERRQ(ierr);
1935       smycols += ourlens[i];
1936       svals   += ourlens[i];
1937       jj++;
1938     }
1939   }
1940   ierr = PetscFree(ourlens);CHKERRQ(ierr);
1941   ierr = PetscFree(vals);CHKERRQ(ierr);
1942   ierr = PetscFree(mycols);CHKERRQ(ierr);
1943   ierr = PetscFree(rowners);CHKERRQ(ierr);
1944 
1945   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1946   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1947   PetscFunctionReturn(0);
1948 }
1949 
1950 #undef __FUNCT__
1951 #define __FUNCT__ "MatEqual_MPIDense"
1952 PetscErrorCode MatEqual_MPIDense(Mat A,Mat B,PetscTruth *flag)
1953 {
1954   Mat_MPIDense   *matB = (Mat_MPIDense*)B->data,*matA = (Mat_MPIDense*)A->data;
1955   Mat            a,b;
1956   PetscTruth     flg;
1957   PetscErrorCode ierr;
1958 
1959   PetscFunctionBegin;
1960   a = matA->A;
1961   b = matB->A;
1962   ierr = MatEqual(a,b,&flg);CHKERRQ(ierr);
1963   ierr = MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,((PetscObject)A)->comm);CHKERRQ(ierr);
1964   PetscFunctionReturn(0);
1965 }
1966 
1967 #if defined(PETSC_HAVE_PLAPACK)
1968 
1969 #undef __FUNCT__
1970 #define __FUNCT__ "PetscPLAPACKFinalizePackage"
1971 /*@C
1972   PetscPLAPACKFinalizePackage - This function destroys everything in the Petsc interface to PLAPACK.
1973   Level: developer
1974 
1975 .keywords: Petsc, destroy, package, PLAPACK
1976 .seealso: PetscFinalize()
1977 @*/
1978 PetscErrorCode PETSC_DLLEXPORT PetscPLAPACKFinalizePackage(void)
1979 {
1980   PetscErrorCode ierr;
1981 
1982   PetscFunctionBegin;
1983   ierr = PLA_Finalize();CHKERRQ(ierr);
1984   PetscFunctionReturn(0);
1985 }
1986 
1987 #undef __FUNCT__
1988 #define __FUNCT__ "PetscPLAPACKInitializePackage"
1989 /*@C
1990   PetscPLAPACKInitializePackage - This function initializes everything in the Petsc interface to PLAPACK. It is
1991   called from PetscDLLibraryRegister() when using dynamic libraries, and on the call to PetscInitialize()
1992   when using static libraries.
1993 
1994   Input Parameter:
1995   path - The dynamic library path, or PETSC_NULL
1996 
1997   Level: developer
1998 
1999 .keywords: Petsc, initialize, package, PLAPACK
2000 .seealso: PetscInitializePackage(), PetscInitialize()
2001 @*/
2002 PetscErrorCode PETSC_DLLEXPORT PetscPLAPACKInitializePackage(const char path[])
2003 {
2004   MPI_Comm       comm = PETSC_COMM_WORLD;
2005   PetscMPIInt    size;
2006   PetscErrorCode ierr;
2007 
2008   PetscFunctionBegin;
2009   if (!PLA_Initialized(PETSC_NULL)) {
2010 
2011     ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
2012     Plapack_nprows = 1;
2013     Plapack_npcols = size;
2014 
2015     ierr = PetscOptionsBegin(comm,PETSC_NULL,"PLAPACK Options","Mat");CHKERRQ(ierr);
2016       ierr = PetscOptionsInt("-plapack_nprows","row dimension of 2D processor mesh","None",Plapack_nprows,&Plapack_nprows,PETSC_NULL);CHKERRQ(ierr);
2017       ierr = PetscOptionsInt("-plapack_npcols","column dimension of 2D processor mesh","None",Plapack_npcols,&Plapack_npcols,PETSC_NULL);CHKERRQ(ierr);
2018 #if defined(PETSC_USE_DEBUG)
2019       Plapack_ierror = 3;
2020 #else
2021       Plapack_ierror = 0;
2022 #endif
2023       ierr = PetscOptionsInt("-plapack_ckerror","error checking flag","None",Plapack_ierror,&Plapack_ierror,PETSC_NULL);CHKERRQ(ierr);
2024       if (Plapack_ierror){
2025 	ierr = PLA_Set_error_checking(Plapack_ierror,PETSC_TRUE,PETSC_TRUE,PETSC_FALSE );CHKERRQ(ierr);
2026       } else {
2027 	ierr = PLA_Set_error_checking(Plapack_ierror,PETSC_FALSE,PETSC_FALSE,PETSC_FALSE );CHKERRQ(ierr);
2028       }
2029 
2030       Plapack_nb_alg = 0;
2031       ierr = PetscOptionsInt("-plapack_nb_alg","algorithmic block size","None",Plapack_nb_alg,&Plapack_nb_alg,PETSC_NULL);CHKERRQ(ierr);
2032       if (Plapack_nb_alg) {
2033         ierr = pla_Environ_set_nb_alg (PLA_OP_ALL_ALG,Plapack_nb_alg);CHKERRQ(ierr);
2034       }
2035     PetscOptionsEnd();
2036 
2037     ierr = PLA_Comm_1D_to_2D(comm,Plapack_nprows,Plapack_npcols,&Plapack_comm_2d);CHKERRQ(ierr);
2038     ierr = PLA_Init(Plapack_comm_2d);CHKERRQ(ierr);
2039     ierr = PetscRegisterFinalize(PetscPLAPACKFinalizePackage);CHKERRQ(ierr);
2040   }
2041   PetscFunctionReturn(0);
2042 }
2043 
2044 
2045 #endif
2046