1 #define PETSCMAT_DLL 2 3 /* 4 Routines to compute overlapping regions of a parallel MPI matrix 5 and to find submatrices that were shared across processors. 6 */ 7 #include "src/mat/impls/aij/mpi/mpiaij.h" 8 #include "petscbt.h" 9 10 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Once(Mat,PetscInt,IS *); 11 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Local(Mat,PetscInt,char **,PetscInt*,PetscInt**); 12 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Receive(Mat,PetscInt,PetscInt **,PetscInt**,PetscInt*); 13 EXTERN PetscErrorCode MatGetRow_MPIAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**); 14 EXTERN PetscErrorCode MatRestoreRow_MPIAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**); 15 16 #undef __FUNCT__ 17 #define __FUNCT__ "MatIncreaseOverlap_MPIAIJ" 18 PetscErrorCode MatIncreaseOverlap_MPIAIJ(Mat C,PetscInt imax,IS is[],PetscInt ov) 19 { 20 PetscErrorCode ierr; 21 PetscInt i; 22 23 PetscFunctionBegin; 24 if (ov < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified"); 25 for (i=0; i<ov; ++i) { 26 ierr = MatIncreaseOverlap_MPIAIJ_Once(C,imax,is);CHKERRQ(ierr); 27 } 28 PetscFunctionReturn(0); 29 } 30 31 /* 32 Sample message format: 33 If a processor A wants processor B to process some elements corresponding 34 to index sets is[1],is[5] 35 mesg [0] = 2 (no of index sets in the mesg) 36 ----------- 37 mesg [1] = 1 => is[1] 38 mesg [2] = sizeof(is[1]); 39 ----------- 40 mesg [3] = 5 => is[5] 41 mesg [4] = sizeof(is[5]); 42 ----------- 43 mesg [5] 44 mesg [n] datas[1] 45 ----------- 46 mesg[n+1] 47 mesg[m] data(is[5]) 48 ----------- 49 50 Notes: 51 nrqs - no of requests sent (or to be sent out) 52 nrqr - no of requests recieved (which have to be or which have been processed 53 */ 54 #undef __FUNCT__ 55 #define __FUNCT__ "MatIncreaseOverlap_MPIAIJ_Once" 56 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Once(Mat C,PetscInt imax,IS is[]) 57 { 58 Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data; 59 PetscMPIInt *w1,*w2,nrqr,*w3,*w4,*onodes1,*olengths1,*onodes2,*olengths2; 60 PetscInt **idx,*n,*rtable,**data,len,*idx_i; 61 PetscErrorCode ierr; 62 PetscMPIInt size,rank,tag1,tag2; 63 PetscInt m,i,j,k,**rbuf,row,proc,nrqs,msz,**outdat,**ptr; 64 PetscInt *ctr,*pa,*tmp,*isz,*isz1,**xdata,**rbuf2; 65 PetscBT *table; 66 MPI_Comm comm; 67 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2; 68 MPI_Status *s_status,*recv_status; 69 70 PetscFunctionBegin; 71 comm = ((PetscObject)C)->comm; 72 size = c->size; 73 rank = c->rank; 74 m = C->rmap.N; 75 76 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 77 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 78 79 len = (imax+1)*sizeof(PetscInt*)+ (imax + m)*sizeof(PetscInt); 80 ierr = PetscMalloc(len,&idx);CHKERRQ(ierr); 81 n = (PetscInt*)(idx + imax); 82 rtable = n + imax; 83 84 for (i=0; i<imax; i++) { 85 ierr = ISGetIndices(is[i],&idx[i]);CHKERRQ(ierr); 86 ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr); 87 } 88 89 /* Create hash table for the mapping :row -> proc*/ 90 for (i=0,j=0; i<size; i++) { 91 len = C->rmap.range[i+1]; 92 for (; j<len; j++) { 93 rtable[j] = i; 94 } 95 } 96 97 /* evaluate communication - mesg to who,length of mesg, and buffer space 98 required. Based on this, buffers are allocated, and data copied into them*/ 99 ierr = PetscMalloc(size*4*sizeof(PetscMPIInt),&w1);CHKERRQ(ierr);/* mesg size */ 100 w2 = w1 + size; /* if w2[i] marked, then a message to proc i*/ 101 w3 = w2 + size; /* no of IS that needs to be sent to proc i */ 102 w4 = w3 + size; /* temp work space used in determining w1, w2, w3 */ 103 ierr = PetscMemzero(w1,size*3*sizeof(PetscMPIInt));CHKERRQ(ierr); /* initialise work vector*/ 104 for (i=0; i<imax; i++) { 105 ierr = PetscMemzero(w4,size*sizeof(PetscMPIInt));CHKERRQ(ierr); /* initialise work vector*/ 106 idx_i = idx[i]; 107 len = n[i]; 108 for (j=0; j<len; j++) { 109 row = idx_i[j]; 110 if (row < 0) { 111 SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Index set cannot have negative entries"); 112 } 113 proc = rtable[row]; 114 w4[proc]++; 115 } 116 for (j=0; j<size; j++){ 117 if (w4[j]) { w1[j] += w4[j]; w3[j]++;} 118 } 119 } 120 121 nrqs = 0; /* no of outgoing messages */ 122 msz = 0; /* total mesg length (for all proc */ 123 w1[rank] = 0; /* no mesg sent to intself */ 124 w3[rank] = 0; 125 for (i=0; i<size; i++) { 126 if (w1[i]) {w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 127 } 128 /* pa - is list of processors to communicate with */ 129 ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt),&pa);CHKERRQ(ierr); 130 for (i=0,j=0; i<size; i++) { 131 if (w1[i]) {pa[j] = i; j++;} 132 } 133 134 /* Each message would have a header = 1 + 2*(no of IS) + data */ 135 for (i=0; i<nrqs; i++) { 136 j = pa[i]; 137 w1[j] += w2[j] + 2*w3[j]; 138 msz += w1[j]; 139 } 140 141 /* Determine the number of messages to expect, their lengths, from from-ids */ 142 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 143 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 144 145 /* Now post the Irecvs corresponding to these messages */ 146 ierr = PetscPostIrecvInt(comm,tag1,nrqr,onodes1,olengths1,&rbuf,&r_waits1);CHKERRQ(ierr); 147 148 /* Allocate Memory for outgoing messages */ 149 len = 2*size*sizeof(PetscInt*) + (size+msz)*sizeof(PetscInt); 150 ierr = PetscMalloc(len,&outdat);CHKERRQ(ierr); 151 ptr = outdat + size; /* Pointers to the data in outgoing buffers */ 152 ierr = PetscMemzero(outdat,2*size*sizeof(PetscInt*));CHKERRQ(ierr); 153 tmp = (PetscInt*)(outdat + 2*size); 154 ctr = tmp + msz; 155 156 { 157 PetscInt *iptr = tmp,ict = 0; 158 for (i=0; i<nrqs; i++) { 159 j = pa[i]; 160 iptr += ict; 161 outdat[j] = iptr; 162 ict = w1[j]; 163 } 164 } 165 166 /* Form the outgoing messages */ 167 /*plug in the headers*/ 168 for (i=0; i<nrqs; i++) { 169 j = pa[i]; 170 outdat[j][0] = 0; 171 ierr = PetscMemzero(outdat[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr); 172 ptr[j] = outdat[j] + 2*w3[j] + 1; 173 } 174 175 /* Memory for doing local proc's work*/ 176 { 177 PetscInt *d_p; 178 char *t_p; 179 180 len = (imax)*(sizeof(PetscBT) + sizeof(PetscInt*)+ sizeof(PetscInt)) + 181 (m)*imax*sizeof(PetscInt) + (m/PETSC_BITS_PER_BYTE+1)*imax*sizeof(char) + 1; 182 ierr = PetscMalloc(len,&table);CHKERRQ(ierr); 183 ierr = PetscMemzero(table,len);CHKERRQ(ierr); 184 data = (PetscInt **)(table + imax); 185 isz = (PetscInt *)(data + imax); 186 d_p = (PetscInt *)(isz + imax); 187 t_p = (char *)(d_p + m*imax); 188 for (i=0; i<imax; i++) { 189 table[i] = t_p + (m/PETSC_BITS_PER_BYTE+1)*i; 190 data[i] = d_p + (m)*i; 191 } 192 } 193 194 /* Parse the IS and update local tables and the outgoing buf with the data*/ 195 { 196 PetscInt n_i,*data_i,isz_i,*outdat_j,ctr_j; 197 PetscBT table_i; 198 199 for (i=0; i<imax; i++) { 200 ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr); 201 n_i = n[i]; 202 table_i = table[i]; 203 idx_i = idx[i]; 204 data_i = data[i]; 205 isz_i = isz[i]; 206 for (j=0; j<n_i; j++) { /* parse the indices of each IS */ 207 row = idx_i[j]; 208 proc = rtable[row]; 209 if (proc != rank) { /* copy to the outgoing buffer */ 210 ctr[proc]++; 211 *ptr[proc] = row; 212 ptr[proc]++; 213 } else { /* Update the local table */ 214 if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;} 215 } 216 } 217 /* Update the headers for the current IS */ 218 for (j=0; j<size; j++) { /* Can Optimise this loop by using pa[] */ 219 if ((ctr_j = ctr[j])) { 220 outdat_j = outdat[j]; 221 k = ++outdat_j[0]; 222 outdat_j[2*k] = ctr_j; 223 outdat_j[2*k-1] = i; 224 } 225 } 226 isz[i] = isz_i; 227 } 228 } 229 230 231 232 /* Now post the sends */ 233 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr); 234 for (i=0; i<nrqs; ++i) { 235 j = pa[i]; 236 ierr = MPI_Isend(outdat[j],w1[j],MPIU_INT,j,tag1,comm,s_waits1+i);CHKERRQ(ierr); 237 } 238 239 /* No longer need the original indices*/ 240 for (i=0; i<imax; ++i) { 241 ierr = ISRestoreIndices(is[i],idx+i);CHKERRQ(ierr); 242 } 243 ierr = PetscFree(idx);CHKERRQ(ierr); 244 245 for (i=0; i<imax; ++i) { 246 ierr = ISDestroy(is[i]);CHKERRQ(ierr); 247 } 248 249 /* Do Local work*/ 250 ierr = MatIncreaseOverlap_MPIAIJ_Local(C,imax,table,isz,data);CHKERRQ(ierr); 251 252 /* Receive messages*/ 253 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&recv_status);CHKERRQ(ierr); 254 if (nrqr) {ierr = MPI_Waitall(nrqr,r_waits1,recv_status);CHKERRQ(ierr);} 255 256 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status);CHKERRQ(ierr); 257 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);} 258 259 /* Phase 1 sends are complete - deallocate buffers */ 260 ierr = PetscFree(outdat);CHKERRQ(ierr); 261 ierr = PetscFree(w1);CHKERRQ(ierr); 262 263 ierr = PetscMalloc((nrqr+1)*sizeof(PetscInt*),&xdata);CHKERRQ(ierr); 264 ierr = PetscMalloc((nrqr+1)*sizeof(PetscInt),&isz1);CHKERRQ(ierr); 265 ierr = MatIncreaseOverlap_MPIAIJ_Receive(C,nrqr,rbuf,xdata,isz1);CHKERRQ(ierr); 266 ierr = PetscFree(rbuf);CHKERRQ(ierr); 267 268 269 /* Send the data back*/ 270 /* Do a global reduction to know the buffer space req for incoming messages*/ 271 { 272 PetscMPIInt *rw1; 273 274 ierr = PetscMalloc(size*sizeof(PetscMPIInt),&rw1);CHKERRQ(ierr); 275 ierr = PetscMemzero(rw1,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 276 277 for (i=0; i<nrqr; ++i) { 278 proc = recv_status[i].MPI_SOURCE; 279 if (proc != onodes1[i]) SETERRQ(PETSC_ERR_PLIB,"MPI_SOURCE mismatch"); 280 rw1[proc] = isz1[i]; 281 } 282 ierr = PetscFree(onodes1);CHKERRQ(ierr); 283 ierr = PetscFree(olengths1);CHKERRQ(ierr); 284 285 /* Determine the number of messages to expect, their lengths, from from-ids */ 286 ierr = PetscGatherMessageLengths(comm,nrqr,nrqs,rw1,&onodes2,&olengths2);CHKERRQ(ierr); 287 ierr = PetscFree(rw1);CHKERRQ(ierr); 288 } 289 /* Now post the Irecvs corresponding to these messages */ 290 ierr = PetscPostIrecvInt(comm,tag2,nrqs,onodes2,olengths2,&rbuf2,&r_waits2);CHKERRQ(ierr); 291 292 /* Now post the sends */ 293 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);CHKERRQ(ierr); 294 for (i=0; i<nrqr; ++i) { 295 j = recv_status[i].MPI_SOURCE; 296 ierr = MPI_Isend(xdata[i],isz1[i],MPIU_INT,j,tag2,comm,s_waits2+i);CHKERRQ(ierr); 297 } 298 299 /* receive work done on other processors*/ 300 { 301 PetscInt is_no,ct1,max,*rbuf2_i,isz_i,*data_i,jmax; 302 PetscMPIInt idex; 303 PetscBT table_i; 304 MPI_Status *status2; 305 306 ierr = PetscMalloc((PetscMax(nrqr,nrqs)+1)*sizeof(MPI_Status),&status2);CHKERRQ(ierr); 307 for (i=0; i<nrqs; ++i) { 308 ierr = MPI_Waitany(nrqs,r_waits2,&idex,status2+i);CHKERRQ(ierr); 309 /* Process the message*/ 310 rbuf2_i = rbuf2[idex]; 311 ct1 = 2*rbuf2_i[0]+1; 312 jmax = rbuf2[idex][0]; 313 for (j=1; j<=jmax; j++) { 314 max = rbuf2_i[2*j]; 315 is_no = rbuf2_i[2*j-1]; 316 isz_i = isz[is_no]; 317 data_i = data[is_no]; 318 table_i = table[is_no]; 319 for (k=0; k<max; k++,ct1++) { 320 row = rbuf2_i[ct1]; 321 if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;} 322 } 323 isz[is_no] = isz_i; 324 } 325 } 326 327 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,status2);CHKERRQ(ierr);} 328 ierr = PetscFree(status2);CHKERRQ(ierr); 329 } 330 331 for (i=0; i<imax; ++i) { 332 ierr = ISCreateGeneral(PETSC_COMM_SELF,isz[i],data[i],is+i);CHKERRQ(ierr); 333 } 334 335 ierr = PetscFree(onodes2);CHKERRQ(ierr); 336 ierr = PetscFree(olengths2);CHKERRQ(ierr); 337 338 ierr = PetscFree(pa);CHKERRQ(ierr); 339 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 340 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 341 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 342 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 343 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 344 ierr = PetscFree(table);CHKERRQ(ierr); 345 ierr = PetscFree(s_status);CHKERRQ(ierr); 346 ierr = PetscFree(recv_status);CHKERRQ(ierr); 347 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 348 ierr = PetscFree(xdata);CHKERRQ(ierr); 349 ierr = PetscFree(isz1);CHKERRQ(ierr); 350 PetscFunctionReturn(0); 351 } 352 353 #undef __FUNCT__ 354 #define __FUNCT__ "MatIncreaseOverlap_MPIAIJ_Local" 355 /* 356 MatIncreaseOverlap_MPIAIJ_Local - Called by MatincreaseOverlap, to do 357 the work on the local processor. 358 359 Inputs: 360 C - MAT_MPIAIJ; 361 imax - total no of index sets processed at a time; 362 table - an array of char - size = m bits. 363 364 Output: 365 isz - array containing the count of the solution elements correspondign 366 to each index set; 367 data - pointer to the solutions 368 */ 369 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Local(Mat C,PetscInt imax,PetscBT *table,PetscInt *isz,PetscInt **data) 370 { 371 Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data; 372 Mat A = c->A,B = c->B; 373 Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*b = (Mat_SeqAIJ*)B->data; 374 PetscInt start,end,val,max,rstart,cstart,*ai,*aj; 375 PetscInt *bi,*bj,*garray,i,j,k,row,*data_i,isz_i; 376 PetscBT table_i; 377 378 PetscFunctionBegin; 379 rstart = C->rmap.rstart; 380 cstart = C->cmap.rstart; 381 ai = a->i; 382 aj = a->j; 383 bi = b->i; 384 bj = b->j; 385 garray = c->garray; 386 387 388 for (i=0; i<imax; i++) { 389 data_i = data[i]; 390 table_i = table[i]; 391 isz_i = isz[i]; 392 for (j=0,max=isz[i]; j<max; j++) { 393 row = data_i[j] - rstart; 394 start = ai[row]; 395 end = ai[row+1]; 396 for (k=start; k<end; k++) { /* Amat */ 397 val = aj[k] + cstart; 398 if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;} 399 } 400 start = bi[row]; 401 end = bi[row+1]; 402 for (k=start; k<end; k++) { /* Bmat */ 403 val = garray[bj[k]]; 404 if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;} 405 } 406 } 407 isz[i] = isz_i; 408 } 409 PetscFunctionReturn(0); 410 } 411 412 #undef __FUNCT__ 413 #define __FUNCT__ "MatIncreaseOverlap_MPIAIJ_Receive" 414 /* 415 MatIncreaseOverlap_MPIAIJ_Receive - Process the recieved messages, 416 and return the output 417 418 Input: 419 C - the matrix 420 nrqr - no of messages being processed. 421 rbuf - an array of pointers to the recieved requests 422 423 Output: 424 xdata - array of messages to be sent back 425 isz1 - size of each message 426 427 For better efficiency perhaps we should malloc separately each xdata[i], 428 then if a remalloc is required we need only copy the data for that one row 429 rather then all previous rows as it is now where a single large chunck of 430 memory is used. 431 432 */ 433 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Receive(Mat C,PetscInt nrqr,PetscInt **rbuf,PetscInt **xdata,PetscInt * isz1) 434 { 435 Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data; 436 Mat A = c->A,B = c->B; 437 Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*b = (Mat_SeqAIJ*)B->data; 438 PetscErrorCode ierr; 439 PetscMPIInt rank; 440 PetscInt rstart,cstart,*ai,*aj,*bi,*bj,*garray,i,j,k; 441 PetscInt row,total_sz,ct,ct1,ct2,ct3,mem_estimate,oct2,l,start,end; 442 PetscInt val,max1,max2,m,no_malloc =0,*tmp,new_estimate,ctr; 443 PetscInt *rbuf_i,kmax,rbuf_0; 444 PetscBT xtable; 445 446 PetscFunctionBegin; 447 rank = c->rank; 448 m = C->rmap.N; 449 rstart = C->rmap.rstart; 450 cstart = C->cmap.rstart; 451 ai = a->i; 452 aj = a->j; 453 bi = b->i; 454 bj = b->j; 455 garray = c->garray; 456 457 458 for (i=0,ct=0,total_sz=0; i<nrqr; ++i) { 459 rbuf_i = rbuf[i]; 460 rbuf_0 = rbuf_i[0]; 461 ct += rbuf_0; 462 for (j=1; j<=rbuf_0; j++) { total_sz += rbuf_i[2*j]; } 463 } 464 465 if (C->rmap.n) max1 = ct*(a->nz + b->nz)/C->rmap.n; 466 else max1 = 1; 467 mem_estimate = 3*((total_sz > max1 ? total_sz : max1)+1); 468 ierr = PetscMalloc(mem_estimate*sizeof(PetscInt),&xdata[0]);CHKERRQ(ierr); 469 ++no_malloc; 470 ierr = PetscBTCreate(m,xtable);CHKERRQ(ierr); 471 ierr = PetscMemzero(isz1,nrqr*sizeof(PetscInt));CHKERRQ(ierr); 472 473 ct3 = 0; 474 for (i=0; i<nrqr; i++) { /* for easch mesg from proc i */ 475 rbuf_i = rbuf[i]; 476 rbuf_0 = rbuf_i[0]; 477 ct1 = 2*rbuf_0+1; 478 ct2 = ct1; 479 ct3 += ct1; 480 for (j=1; j<=rbuf_0; j++) { /* for each IS from proc i*/ 481 ierr = PetscBTMemzero(m,xtable);CHKERRQ(ierr); 482 oct2 = ct2; 483 kmax = rbuf_i[2*j]; 484 for (k=0; k<kmax; k++,ct1++) { 485 row = rbuf_i[ct1]; 486 if (!PetscBTLookupSet(xtable,row)) { 487 if (!(ct3 < mem_estimate)) { 488 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 489 ierr = PetscMalloc(new_estimate*sizeof(PetscInt),&tmp);CHKERRQ(ierr); 490 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 491 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 492 xdata[0] = tmp; 493 mem_estimate = new_estimate; ++no_malloc; 494 for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];} 495 } 496 xdata[i][ct2++] = row; 497 ct3++; 498 } 499 } 500 for (k=oct2,max2=ct2; k<max2; k++) { 501 row = xdata[i][k] - rstart; 502 start = ai[row]; 503 end = ai[row+1]; 504 for (l=start; l<end; l++) { 505 val = aj[l] + cstart; 506 if (!PetscBTLookupSet(xtable,val)) { 507 if (!(ct3 < mem_estimate)) { 508 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 509 ierr = PetscMalloc(new_estimate*sizeof(PetscInt),&tmp);CHKERRQ(ierr); 510 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 511 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 512 xdata[0] = tmp; 513 mem_estimate = new_estimate; ++no_malloc; 514 for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];} 515 } 516 xdata[i][ct2++] = val; 517 ct3++; 518 } 519 } 520 start = bi[row]; 521 end = bi[row+1]; 522 for (l=start; l<end; l++) { 523 val = garray[bj[l]]; 524 if (!PetscBTLookupSet(xtable,val)) { 525 if (!(ct3 < mem_estimate)) { 526 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 527 ierr = PetscMalloc(new_estimate*sizeof(PetscInt),&tmp);CHKERRQ(ierr); 528 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 529 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 530 xdata[0] = tmp; 531 mem_estimate = new_estimate; ++no_malloc; 532 for (ctr =1; ctr <=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];} 533 } 534 xdata[i][ct2++] = val; 535 ct3++; 536 } 537 } 538 } 539 /* Update the header*/ 540 xdata[i][2*j] = ct2 - oct2; /* Undo the vector isz1 and use only a var*/ 541 xdata[i][2*j-1] = rbuf_i[2*j-1]; 542 } 543 xdata[i][0] = rbuf_0; 544 xdata[i+1] = xdata[i] + ct2; 545 isz1[i] = ct2; /* size of each message */ 546 } 547 ierr = PetscBTDestroy(xtable);CHKERRQ(ierr); 548 ierr = PetscInfo4(C,"Allocated %D bytes, required %D bytes, no of mallocs = %D\n",rank,mem_estimate,ct3,no_malloc);CHKERRQ(ierr); 549 PetscFunctionReturn(0); 550 } 551 /* -------------------------------------------------------------------------*/ 552 EXTERN PetscErrorCode MatGetSubMatrices_MPIAIJ_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,Mat*); 553 EXTERN PetscErrorCode MatAssemblyEnd_SeqAIJ(Mat,MatAssemblyType); 554 /* 555 Every processor gets the entire matrix 556 */ 557 #undef __FUNCT__ 558 #define __FUNCT__ "MatGetSubMatrix_MPIAIJ_All" 559 PetscErrorCode MatGetSubMatrix_MPIAIJ_All(Mat A,MatGetSubMatrixOption flag,MatReuse scall,Mat *Bin[]) 560 { 561 Mat B; 562 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 563 Mat_SeqAIJ *b,*ad = (Mat_SeqAIJ*)a->A->data,*bd = (Mat_SeqAIJ*)a->B->data; 564 PetscErrorCode ierr; 565 PetscMPIInt size,rank,*recvcounts = 0,*displs = 0; 566 PetscInt sendcount,i,*rstarts = A->rmap.range,n,cnt,j; 567 PetscInt m,*b_sendj,*garray = a->garray,*lens,*jsendbuf,*a_jsendbuf,*b_jsendbuf; 568 PetscScalar *sendbuf,*recvbuf,*a_sendbuf,*b_sendbuf; 569 570 PetscFunctionBegin; 571 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 572 ierr = MPI_Comm_rank(((PetscObject)A)->comm,&rank);CHKERRQ(ierr); 573 574 if (scall == MAT_INITIAL_MATRIX) { 575 /* ---------------------------------------------------------------- 576 Tell every processor the number of nonzeros per row 577 */ 578 ierr = PetscMalloc(A->rmap.N*sizeof(PetscInt),&lens);CHKERRQ(ierr); 579 for (i=A->rmap.rstart; i<A->rmap.rend; i++) { 580 lens[i] = ad->i[i-A->rmap.rstart+1] - ad->i[i-A->rmap.rstart] + bd->i[i-A->rmap.rstart+1] - bd->i[i-A->rmap.rstart]; 581 } 582 sendcount = A->rmap.rend - A->rmap.rstart; 583 ierr = PetscMalloc(2*size*sizeof(PetscMPIInt),&recvcounts);CHKERRQ(ierr); 584 displs = recvcounts + size; 585 for (i=0; i<size; i++) { 586 recvcounts[i] = A->rmap.range[i+1] - A->rmap.range[i]; 587 displs[i] = A->rmap.range[i]; 588 } 589 ierr = MPI_Allgatherv(lens+A->rmap.rstart,sendcount,MPIU_INT,lens,recvcounts,displs,MPIU_INT,((PetscObject)A)->comm);CHKERRQ(ierr); 590 591 /* --------------------------------------------------------------- 592 Create the sequential matrix of the same type as the local block diagonal 593 */ 594 ierr = MatCreate(PETSC_COMM_SELF,&B);CHKERRQ(ierr); 595 ierr = MatSetSizes(B,A->rmap.N,A->cmap.N,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 596 ierr = MatSetType(B,((PetscObject)a->A)->type_name);CHKERRQ(ierr); 597 ierr = MatSeqAIJSetPreallocation(B,0,lens);CHKERRQ(ierr); 598 ierr = PetscMalloc(sizeof(Mat),Bin);CHKERRQ(ierr); 599 **Bin = B; 600 b = (Mat_SeqAIJ *)B->data; 601 602 /*-------------------------------------------------------------------- 603 Copy my part of matrix column indices over 604 */ 605 sendcount = ad->nz + bd->nz; 606 jsendbuf = b->j + b->i[rstarts[rank]]; 607 a_jsendbuf = ad->j; 608 b_jsendbuf = bd->j; 609 n = A->rmap.rend - A->rmap.rstart; 610 cnt = 0; 611 for (i=0; i<n; i++) { 612 613 /* put in lower diagonal portion */ 614 m = bd->i[i+1] - bd->i[i]; 615 while (m > 0) { 616 /* is it above diagonal (in bd (compressed) numbering) */ 617 if (garray[*b_jsendbuf] > A->rmap.rstart + i) break; 618 jsendbuf[cnt++] = garray[*b_jsendbuf++]; 619 m--; 620 } 621 622 /* put in diagonal portion */ 623 for (j=ad->i[i]; j<ad->i[i+1]; j++) { 624 jsendbuf[cnt++] = A->rmap.rstart + *a_jsendbuf++; 625 } 626 627 /* put in upper diagonal portion */ 628 while (m-- > 0) { 629 jsendbuf[cnt++] = garray[*b_jsendbuf++]; 630 } 631 } 632 if (cnt != sendcount) SETERRQ2(PETSC_ERR_PLIB,"Corrupted PETSc matrix: nz given %D actual nz %D",sendcount,cnt); 633 634 /*-------------------------------------------------------------------- 635 Gather all column indices to all processors 636 */ 637 for (i=0; i<size; i++) { 638 recvcounts[i] = 0; 639 for (j=A->rmap.range[i]; j<A->rmap.range[i+1]; j++) { 640 recvcounts[i] += lens[j]; 641 } 642 } 643 displs[0] = 0; 644 for (i=1; i<size; i++) { 645 displs[i] = displs[i-1] + recvcounts[i-1]; 646 } 647 ierr = MPI_Allgatherv(jsendbuf,sendcount,MPIU_INT,b->j,recvcounts,displs,MPIU_INT,((PetscObject)A)->comm);CHKERRQ(ierr); 648 649 /*-------------------------------------------------------------------- 650 Assemble the matrix into useable form (note numerical values not yet set) 651 */ 652 /* set the b->ilen (length of each row) values */ 653 ierr = PetscMemcpy(b->ilen,lens,A->rmap.N*sizeof(PetscInt));CHKERRQ(ierr); 654 /* set the b->i indices */ 655 b->i[0] = 0; 656 for (i=1; i<=A->rmap.N; i++) { 657 b->i[i] = b->i[i-1] + lens[i-1]; 658 } 659 ierr = PetscFree(lens);CHKERRQ(ierr); 660 ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 661 ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 662 663 } else { 664 B = **Bin; 665 b = (Mat_SeqAIJ *)B->data; 666 } 667 668 /*-------------------------------------------------------------------- 669 Copy my part of matrix numerical values into the values location 670 */ 671 if (flag == MAT_GET_VALUES){ 672 sendcount = ad->nz + bd->nz; 673 sendbuf = b->a + b->i[rstarts[rank]]; 674 a_sendbuf = ad->a; 675 b_sendbuf = bd->a; 676 b_sendj = bd->j; 677 n = A->rmap.rend - A->rmap.rstart; 678 cnt = 0; 679 for (i=0; i<n; i++) { 680 681 /* put in lower diagonal portion */ 682 m = bd->i[i+1] - bd->i[i]; 683 while (m > 0) { 684 /* is it above diagonal (in bd (compressed) numbering) */ 685 if (garray[*b_sendj] > A->rmap.rstart + i) break; 686 sendbuf[cnt++] = *b_sendbuf++; 687 m--; 688 b_sendj++; 689 } 690 691 /* put in diagonal portion */ 692 for (j=ad->i[i]; j<ad->i[i+1]; j++) { 693 sendbuf[cnt++] = *a_sendbuf++; 694 } 695 696 /* put in upper diagonal portion */ 697 while (m-- > 0) { 698 sendbuf[cnt++] = *b_sendbuf++; 699 b_sendj++; 700 } 701 } 702 if (cnt != sendcount) SETERRQ2(PETSC_ERR_PLIB,"Corrupted PETSc matrix: nz given %D actual nz %D",sendcount,cnt); 703 704 /* ----------------------------------------------------------------- 705 Gather all numerical values to all processors 706 */ 707 if (!recvcounts) { 708 ierr = PetscMalloc(2*size*sizeof(PetscInt),&recvcounts);CHKERRQ(ierr); 709 displs = recvcounts + size; 710 } 711 for (i=0; i<size; i++) { 712 recvcounts[i] = b->i[rstarts[i+1]] - b->i[rstarts[i]]; 713 } 714 displs[0] = 0; 715 for (i=1; i<size; i++) { 716 displs[i] = displs[i-1] + recvcounts[i-1]; 717 } 718 recvbuf = b->a; 719 ierr = MPI_Allgatherv(sendbuf,sendcount,MPIU_SCALAR,recvbuf,recvcounts,displs,MPIU_SCALAR,((PetscObject)A)->comm);CHKERRQ(ierr); 720 } /* endof (flag == MAT_GET_VALUES) */ 721 ierr = PetscFree(recvcounts);CHKERRQ(ierr); 722 723 if (A->symmetric){ 724 ierr = MatSetOption(B,MAT_SYMMETRIC,PETSC_TRUE);CHKERRQ(ierr); 725 } else if (A->hermitian) { 726 ierr = MatSetOption(B,MAT_HERMITIAN,PETSC_TRUE);CHKERRQ(ierr); 727 } else if (A->structurally_symmetric) { 728 ierr = MatSetOption(B,MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);CHKERRQ(ierr); 729 } 730 PetscFunctionReturn(0); 731 } 732 733 #undef __FUNCT__ 734 #define __FUNCT__ "MatGetSubMatrices_MPIAIJ" 735 PetscErrorCode MatGetSubMatrices_MPIAIJ(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[]) 736 { 737 PetscErrorCode ierr; 738 PetscInt nmax,nstages_local,nstages,i,pos,max_no,nrow,ncol; 739 PetscTruth rowflag,colflag,wantallmatrix = PETSC_FALSE,twantallmatrix; 740 741 PetscFunctionBegin; 742 /* 743 Check for special case each processor gets entire matrix 744 */ 745 if (ismax == 1 && C->rmap.N == C->cmap.N) { 746 ierr = ISIdentity(*isrow,&rowflag);CHKERRQ(ierr); 747 ierr = ISIdentity(*iscol,&colflag);CHKERRQ(ierr); 748 ierr = ISGetLocalSize(*isrow,&nrow);CHKERRQ(ierr); 749 ierr = ISGetLocalSize(*iscol,&ncol);CHKERRQ(ierr); 750 if (rowflag && colflag && nrow == C->rmap.N && ncol == C->cmap.N) { 751 wantallmatrix = PETSC_TRUE; 752 ierr = PetscOptionsGetTruth(((PetscObject)C)->prefix,"-use_fast_submatrix",&wantallmatrix,PETSC_NULL);CHKERRQ(ierr); 753 } 754 } 755 ierr = MPI_Allreduce(&wantallmatrix,&twantallmatrix,1,MPI_INT,MPI_MIN,((PetscObject)C)->comm);CHKERRQ(ierr); 756 if (twantallmatrix) { 757 ierr = MatGetSubMatrix_MPIAIJ_All(C,MAT_GET_VALUES,scall,submat);CHKERRQ(ierr); 758 PetscFunctionReturn(0); 759 } 760 761 /* Allocate memory to hold all the submatrices */ 762 if (scall != MAT_REUSE_MATRIX) { 763 ierr = PetscMalloc((ismax+1)*sizeof(Mat),submat);CHKERRQ(ierr); 764 } 765 /* Determine the number of stages through which submatrices are done */ 766 nmax = 20*1000000 / (C->cmap.N * sizeof(PetscInt)); 767 if (!nmax) nmax = 1; 768 nstages_local = ismax/nmax + ((ismax % nmax)?1:0); 769 770 /* Make sure every processor loops through the nstages */ 771 ierr = MPI_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,((PetscObject)C)->comm);CHKERRQ(ierr); 772 773 for (i=0,pos=0; i<nstages; i++) { 774 if (pos+nmax <= ismax) max_no = nmax; 775 else if (pos == ismax) max_no = 0; 776 else max_no = ismax-pos; 777 ierr = MatGetSubMatrices_MPIAIJ_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);CHKERRQ(ierr); 778 pos += max_no; 779 } 780 PetscFunctionReturn(0); 781 } 782 /* -------------------------------------------------------------------------*/ 783 #undef __FUNCT__ 784 #define __FUNCT__ "MatGetSubMatrices_MPIAIJ_Local" 785 PetscErrorCode MatGetSubMatrices_MPIAIJ_Local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats) 786 { 787 Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data; 788 Mat A = c->A; 789 Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*b = (Mat_SeqAIJ*)c->B->data,*mat; 790 PetscInt **irow,**icol,*nrow,*ncol,start; 791 PetscErrorCode ierr; 792 PetscMPIInt rank,size,tag0,tag1,tag2,tag3,*w1,*w2,*w3,*w4,nrqr; 793 PetscInt **sbuf1,**sbuf2,i,j,k,l,ct1,ct2,**rbuf1,row,proc; 794 PetscInt nrqs,msz,**ptr,*req_size,*ctr,*pa,*tmp,tcol; 795 PetscInt **rbuf3,*req_source,**sbuf_aj,**rbuf2,max1,max2,**rmap; 796 PetscInt **cmap,**lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax,*irow_i; 797 PetscInt len,ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*cmap_i,*lens_i; 798 PetscInt *rmap_i; 799 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3; 800 MPI_Request *r_waits4,*s_waits3,*s_waits4; 801 MPI_Status *r_status1,*r_status2,*s_status1,*s_status3,*s_status2; 802 MPI_Status *r_status3,*r_status4,*s_status4; 803 MPI_Comm comm; 804 PetscScalar **rbuf4,**sbuf_aa,*vals,*mat_a,*sbuf_aa_i; 805 PetscTruth sorted; 806 PetscMPIInt *onodes1,*olengths1; 807 PetscMPIInt idex,idex2,end; 808 809 PetscFunctionBegin; 810 comm = ((PetscObject)C)->comm; 811 tag0 = ((PetscObject)C)->tag; 812 size = c->size; 813 rank = c->rank; 814 815 /* Get some new tags to keep the communication clean */ 816 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 817 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 818 ierr = PetscObjectGetNewTag((PetscObject)C,&tag3);CHKERRQ(ierr); 819 820 /* Check if the col indices are sorted */ 821 for (i=0; i<ismax; i++) { 822 ierr = ISSorted(isrow[i],&sorted);CHKERRQ(ierr); 823 /*if (!sorted) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"isrow is not sorted");*/ 824 ierr = ISSorted(iscol[i],&sorted);CHKERRQ(ierr); 825 /* if (!sorted) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"iscol is not sorted"); */ 826 } 827 828 len = (2*ismax+1)*(sizeof(PetscInt*)+ sizeof(PetscInt)); 829 ierr = PetscMalloc(len,&irow);CHKERRQ(ierr); 830 icol = irow + ismax; 831 nrow = (PetscInt*)(icol + ismax); 832 ncol = nrow + ismax; 833 834 for (i=0; i<ismax; i++) { 835 ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr); 836 ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr); 837 ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr); 838 ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr); 839 } 840 841 /* evaluate communication - mesg to who, length of mesg, and buffer space 842 required. Based on this, buffers are allocated, and data copied into them*/ 843 ierr = PetscMalloc(size*4*sizeof(PetscMPIInt),&w1);CHKERRQ(ierr); /* mesg size */ 844 w2 = w1 + size; /* if w2[i] marked, then a message to proc i*/ 845 w3 = w2 + size; /* no of IS that needs to be sent to proc i */ 846 w4 = w3 + size; /* temp work space used in determining w1, w2, w3 */ 847 ierr = PetscMemzero(w1,size*3*sizeof(PetscMPIInt));CHKERRQ(ierr); /* initialize work vector*/ 848 for (i=0; i<ismax; i++) { 849 ierr = PetscMemzero(w4,size*sizeof(PetscMPIInt));CHKERRQ(ierr); /* initialize work vector*/ 850 jmax = nrow[i]; 851 irow_i = irow[i]; 852 for (j=0; j<jmax; j++) { 853 l = 0; 854 row = irow_i[j]; 855 while (row >= C->rmap.range[l+1]) l++; 856 proc = l; 857 w4[proc]++; 858 } 859 for (j=0; j<size; j++) { 860 if (w4[j]) { w1[j] += w4[j]; w3[j]++;} 861 } 862 } 863 864 nrqs = 0; /* no of outgoing messages */ 865 msz = 0; /* total mesg length (for all procs) */ 866 w1[rank] = 0; /* no mesg sent to self */ 867 w3[rank] = 0; 868 for (i=0; i<size; i++) { 869 if (w1[i]) { w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 870 } 871 ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt),&pa);CHKERRQ(ierr); /*(proc -array)*/ 872 for (i=0,j=0; i<size; i++) { 873 if (w1[i]) { pa[j] = i; j++; } 874 } 875 876 /* Each message would have a header = 1 + 2*(no of IS) + data */ 877 for (i=0; i<nrqs; i++) { 878 j = pa[i]; 879 w1[j] += w2[j] + 2* w3[j]; 880 msz += w1[j]; 881 } 882 883 /* Determine the number of messages to expect, their lengths, from from-ids */ 884 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 885 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 886 887 /* Now post the Irecvs corresponding to these messages */ 888 ierr = PetscPostIrecvInt(comm,tag0,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);CHKERRQ(ierr); 889 890 ierr = PetscFree(onodes1);CHKERRQ(ierr); 891 ierr = PetscFree(olengths1);CHKERRQ(ierr); 892 893 /* Allocate Memory for outgoing messages */ 894 len = 2*size*sizeof(PetscInt*) + 2*msz*sizeof(PetscInt) + size*sizeof(PetscInt); 895 ierr = PetscMalloc(len,&sbuf1);CHKERRQ(ierr); 896 ptr = sbuf1 + size; /* Pointers to the data in outgoing buffers */ 897 ierr = PetscMemzero(sbuf1,2*size*sizeof(PetscInt*));CHKERRQ(ierr); 898 /* allocate memory for outgoing data + buf to receive the first reply */ 899 tmp = (PetscInt*)(ptr + size); 900 ctr = tmp + 2*msz; 901 902 { 903 PetscInt *iptr = tmp,ict = 0; 904 for (i=0; i<nrqs; i++) { 905 j = pa[i]; 906 iptr += ict; 907 sbuf1[j] = iptr; 908 ict = w1[j]; 909 } 910 } 911 912 /* Form the outgoing messages */ 913 /* Initialize the header space */ 914 for (i=0; i<nrqs; i++) { 915 j = pa[i]; 916 sbuf1[j][0] = 0; 917 ierr = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr); 918 ptr[j] = sbuf1[j] + 2*w3[j] + 1; 919 } 920 921 /* Parse the isrow and copy data into outbuf */ 922 for (i=0; i<ismax; i++) { 923 ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr); 924 irow_i = irow[i]; 925 jmax = nrow[i]; 926 for (j=0; j<jmax; j++) { /* parse the indices of each IS */ 927 l = 0; 928 row = irow_i[j]; 929 while (row >= C->rmap.range[l+1]) l++; 930 proc = l; 931 if (proc != rank) { /* copy to the outgoing buf*/ 932 ctr[proc]++; 933 *ptr[proc] = row; 934 ptr[proc]++; 935 } 936 } 937 /* Update the headers for the current IS */ 938 for (j=0; j<size; j++) { /* Can Optimise this loop too */ 939 if ((ctr_j = ctr[j])) { 940 sbuf1_j = sbuf1[j]; 941 k = ++sbuf1_j[0]; 942 sbuf1_j[2*k] = ctr_j; 943 sbuf1_j[2*k-1] = i; 944 } 945 } 946 } 947 948 /* Now post the sends */ 949 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr); 950 for (i=0; i<nrqs; ++i) { 951 j = pa[i]; 952 ierr = MPI_Isend(sbuf1[j],w1[j],MPIU_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr); 953 } 954 955 /* Post Receives to capture the buffer size */ 956 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits2);CHKERRQ(ierr); 957 ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt*),&rbuf2);CHKERRQ(ierr); 958 rbuf2[0] = tmp + msz; 959 for (i=1; i<nrqs; ++i) { 960 rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]]; 961 } 962 for (i=0; i<nrqs; ++i) { 963 j = pa[i]; 964 ierr = MPI_Irecv(rbuf2[i],w1[j],MPIU_INT,j,tag1,comm,r_waits2+i);CHKERRQ(ierr); 965 } 966 967 /* Send to other procs the buf size they should allocate */ 968 969 970 /* Receive messages*/ 971 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);CHKERRQ(ierr); 972 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status1);CHKERRQ(ierr); 973 len = 2*nrqr*sizeof(PetscInt) + (nrqr+1)*sizeof(PetscInt*); 974 ierr = PetscMalloc(len,&sbuf2);CHKERRQ(ierr); 975 req_size = (PetscInt*)(sbuf2 + nrqr); 976 req_source = req_size + nrqr; 977 978 { 979 Mat_SeqAIJ *sA = (Mat_SeqAIJ*)c->A->data,*sB = (Mat_SeqAIJ*)c->B->data; 980 PetscInt *sAi = sA->i,*sBi = sB->i,id,rstart = C->rmap.rstart; 981 PetscInt *sbuf2_i; 982 983 for (i=0; i<nrqr; ++i) { 984 ierr = MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);CHKERRQ(ierr); 985 req_size[idex] = 0; 986 rbuf1_i = rbuf1[idex]; 987 start = 2*rbuf1_i[0] + 1; 988 ierr = MPI_Get_count(r_status1+i,MPIU_INT,&end);CHKERRQ(ierr); 989 ierr = PetscMalloc((end+1)*sizeof(PetscInt),&sbuf2[idex]);CHKERRQ(ierr); 990 sbuf2_i = sbuf2[idex]; 991 for (j=start; j<end; j++) { 992 id = rbuf1_i[j] - rstart; 993 ncols = sAi[id+1] - sAi[id] + sBi[id+1] - sBi[id]; 994 sbuf2_i[j] = ncols; 995 req_size[idex] += ncols; 996 } 997 req_source[idex] = r_status1[i].MPI_SOURCE; 998 /* form the header */ 999 sbuf2_i[0] = req_size[idex]; 1000 for (j=1; j<start; j++) { sbuf2_i[j] = rbuf1_i[j]; } 1001 ierr = MPI_Isend(sbuf2_i,end,MPIU_INT,req_source[idex],tag1,comm,s_waits2+i);CHKERRQ(ierr); 1002 } 1003 } 1004 ierr = PetscFree(r_status1);CHKERRQ(ierr); 1005 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 1006 1007 /* recv buffer sizes */ 1008 /* Receive messages*/ 1009 1010 ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt*),&rbuf3);CHKERRQ(ierr); 1011 ierr = PetscMalloc((nrqs+1)*sizeof(PetscScalar*),&rbuf4);CHKERRQ(ierr); 1012 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits3);CHKERRQ(ierr); 1013 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits4);CHKERRQ(ierr); 1014 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status2);CHKERRQ(ierr); 1015 1016 for (i=0; i<nrqs; ++i) { 1017 ierr = MPI_Waitany(nrqs,r_waits2,&idex,r_status2+i);CHKERRQ(ierr); 1018 ierr = PetscMalloc((rbuf2[idex][0]+1)*sizeof(PetscInt),&rbuf3[idex]);CHKERRQ(ierr); 1019 ierr = PetscMalloc((rbuf2[idex][0]+1)*sizeof(PetscScalar),&rbuf4[idex]);CHKERRQ(ierr); 1020 ierr = MPI_Irecv(rbuf3[idex],rbuf2[idex][0],MPIU_INT,r_status2[i].MPI_SOURCE,tag2,comm,r_waits3+idex);CHKERRQ(ierr); 1021 ierr = MPI_Irecv(rbuf4[idex],rbuf2[idex][0],MPIU_SCALAR,r_status2[i].MPI_SOURCE,tag3,comm,r_waits4+idex);CHKERRQ(ierr); 1022 } 1023 ierr = PetscFree(r_status2);CHKERRQ(ierr); 1024 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 1025 1026 /* Wait on sends1 and sends2 */ 1027 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status1);CHKERRQ(ierr); 1028 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status2);CHKERRQ(ierr); 1029 1030 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);} 1031 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);} 1032 ierr = PetscFree(s_status1);CHKERRQ(ierr); 1033 ierr = PetscFree(s_status2);CHKERRQ(ierr); 1034 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 1035 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 1036 1037 /* Now allocate buffers for a->j, and send them off */ 1038 ierr = PetscMalloc((nrqr+1)*sizeof(PetscInt*),&sbuf_aj);CHKERRQ(ierr); 1039 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 1040 ierr = PetscMalloc((j+1)*sizeof(PetscInt),&sbuf_aj[0]);CHKERRQ(ierr); 1041 for (i=1; i<nrqr; i++) sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1]; 1042 1043 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits3);CHKERRQ(ierr); 1044 { 1045 PetscInt nzA,nzB,*a_i = a->i,*b_i = b->i,lwrite; 1046 PetscInt *cworkA,*cworkB,cstart = C->cmap.rstart,rstart = C->rmap.rstart,*bmap = c->garray; 1047 PetscInt cend = C->cmap.rend; 1048 PetscInt *a_j = a->j,*b_j = b->j,ctmp; 1049 1050 for (i=0; i<nrqr; i++) { 1051 rbuf1_i = rbuf1[i]; 1052 sbuf_aj_i = sbuf_aj[i]; 1053 ct1 = 2*rbuf1_i[0] + 1; 1054 ct2 = 0; 1055 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 1056 kmax = rbuf1[i][2*j]; 1057 for (k=0; k<kmax; k++,ct1++) { 1058 row = rbuf1_i[ct1] - rstart; 1059 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 1060 ncols = nzA + nzB; 1061 cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row]; 1062 1063 /* load the column indices for this row into cols*/ 1064 cols = sbuf_aj_i + ct2; 1065 1066 lwrite = 0; 1067 for (l=0; l<nzB; l++) { 1068 if ((ctmp = bmap[cworkB[l]]) < cstart) cols[lwrite++] = ctmp; 1069 } 1070 for (l=0; l<nzA; l++) cols[lwrite++] = cstart + cworkA[l]; 1071 for (l=0; l<nzB; l++) { 1072 if ((ctmp = bmap[cworkB[l]]) >= cend) cols[lwrite++] = ctmp; 1073 } 1074 1075 ct2 += ncols; 1076 } 1077 } 1078 ierr = MPI_Isend(sbuf_aj_i,req_size[i],MPIU_INT,req_source[i],tag2,comm,s_waits3+i);CHKERRQ(ierr); 1079 } 1080 } 1081 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status3);CHKERRQ(ierr); 1082 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status3);CHKERRQ(ierr); 1083 1084 /* Allocate buffers for a->a, and send them off */ 1085 ierr = PetscMalloc((nrqr+1)*sizeof(PetscScalar*),&sbuf_aa);CHKERRQ(ierr); 1086 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 1087 ierr = PetscMalloc((j+1)*sizeof(PetscScalar),&sbuf_aa[0]);CHKERRQ(ierr); 1088 for (i=1; i<nrqr; i++) sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]; 1089 1090 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits4);CHKERRQ(ierr); 1091 { 1092 PetscInt nzA,nzB,*a_i = a->i,*b_i = b->i, *cworkB,lwrite; 1093 PetscInt cstart = C->cmap.rstart,rstart = C->rmap.rstart,*bmap = c->garray; 1094 PetscInt cend = C->cmap.rend; 1095 PetscInt *b_j = b->j; 1096 PetscScalar *vworkA,*vworkB,*a_a = a->a,*b_a = b->a; 1097 1098 for (i=0; i<nrqr; i++) { 1099 rbuf1_i = rbuf1[i]; 1100 sbuf_aa_i = sbuf_aa[i]; 1101 ct1 = 2*rbuf1_i[0]+1; 1102 ct2 = 0; 1103 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 1104 kmax = rbuf1_i[2*j]; 1105 for (k=0; k<kmax; k++,ct1++) { 1106 row = rbuf1_i[ct1] - rstart; 1107 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 1108 ncols = nzA + nzB; 1109 cworkB = b_j + b_i[row]; 1110 vworkA = a_a + a_i[row]; 1111 vworkB = b_a + b_i[row]; 1112 1113 /* load the column values for this row into vals*/ 1114 vals = sbuf_aa_i+ct2; 1115 1116 lwrite = 0; 1117 for (l=0; l<nzB; l++) { 1118 if ((bmap[cworkB[l]]) < cstart) vals[lwrite++] = vworkB[l]; 1119 } 1120 for (l=0; l<nzA; l++) vals[lwrite++] = vworkA[l]; 1121 for (l=0; l<nzB; l++) { 1122 if ((bmap[cworkB[l]]) >= cend) vals[lwrite++] = vworkB[l]; 1123 } 1124 1125 ct2 += ncols; 1126 } 1127 } 1128 ierr = MPI_Isend(sbuf_aa_i,req_size[i],MPIU_SCALAR,req_source[i],tag3,comm,s_waits4+i);CHKERRQ(ierr); 1129 } 1130 } 1131 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status4);CHKERRQ(ierr); 1132 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status4);CHKERRQ(ierr); 1133 ierr = PetscFree(rbuf1);CHKERRQ(ierr); 1134 1135 /* Form the matrix */ 1136 /* create col map */ 1137 { 1138 PetscInt *icol_i; 1139 1140 len = (1+ismax)*sizeof(PetscInt*)+ (1+ismax*C->cmap.N)*sizeof(PetscInt); 1141 ierr = PetscMalloc(len,&cmap);CHKERRQ(ierr); 1142 cmap[0] = (PetscInt*)(cmap + ismax); 1143 ierr = PetscMemzero(cmap[0],(1+ismax*C->cmap.N)*sizeof(PetscInt));CHKERRQ(ierr); 1144 for (i=1; i<ismax; i++) { cmap[i] = cmap[i-1] + C->cmap.N; } 1145 for (i=0; i<ismax; i++) { 1146 jmax = ncol[i]; 1147 icol_i = icol[i]; 1148 cmap_i = cmap[i]; 1149 for (j=0; j<jmax; j++) { 1150 cmap_i[icol_i[j]] = j+1; 1151 } 1152 } 1153 } 1154 1155 /* Create lens which is required for MatCreate... */ 1156 for (i=0,j=0; i<ismax; i++) { j += nrow[i]; } 1157 len = (1+ismax)*sizeof(PetscInt*)+ j*sizeof(PetscInt); 1158 ierr = PetscMalloc(len,&lens);CHKERRQ(ierr); 1159 lens[0] = (PetscInt*)(lens + ismax); 1160 ierr = PetscMemzero(lens[0],j*sizeof(PetscInt));CHKERRQ(ierr); 1161 for (i=1; i<ismax; i++) { lens[i] = lens[i-1] + nrow[i-1]; } 1162 1163 /* Update lens from local data */ 1164 for (i=0; i<ismax; i++) { 1165 jmax = nrow[i]; 1166 cmap_i = cmap[i]; 1167 irow_i = irow[i]; 1168 lens_i = lens[i]; 1169 for (j=0; j<jmax; j++) { 1170 l = 0; 1171 row = irow_i[j]; 1172 while (row >= C->rmap.range[l+1]) l++; 1173 proc = l; 1174 if (proc == rank) { 1175 ierr = MatGetRow_MPIAIJ(C,row,&ncols,&cols,0);CHKERRQ(ierr); 1176 for (k=0; k<ncols; k++) { 1177 if (cmap_i[cols[k]]) { lens_i[j]++;} 1178 } 1179 ierr = MatRestoreRow_MPIAIJ(C,row,&ncols,&cols,0);CHKERRQ(ierr); 1180 } 1181 } 1182 } 1183 1184 /* Create row map*/ 1185 len = (1+ismax)*sizeof(PetscInt*)+ ismax*C->rmap.N*sizeof(PetscInt); 1186 ierr = PetscMalloc(len,&rmap);CHKERRQ(ierr); 1187 rmap[0] = (PetscInt*)(rmap + ismax); 1188 ierr = PetscMemzero(rmap[0],ismax*C->rmap.N*sizeof(PetscInt));CHKERRQ(ierr); 1189 for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + C->rmap.N;} 1190 for (i=0; i<ismax; i++) { 1191 rmap_i = rmap[i]; 1192 irow_i = irow[i]; 1193 jmax = nrow[i]; 1194 for (j=0; j<jmax; j++) { 1195 rmap_i[irow_i[j]] = j; 1196 } 1197 } 1198 1199 /* Update lens from offproc data */ 1200 { 1201 PetscInt *rbuf2_i,*rbuf3_i,*sbuf1_i; 1202 1203 for (tmp2=0; tmp2<nrqs; tmp2++) { 1204 ierr = MPI_Waitany(nrqs,r_waits3,&idex2,r_status3+tmp2);CHKERRQ(ierr); 1205 idex = pa[idex2]; 1206 sbuf1_i = sbuf1[idex]; 1207 jmax = sbuf1_i[0]; 1208 ct1 = 2*jmax+1; 1209 ct2 = 0; 1210 rbuf2_i = rbuf2[idex2]; 1211 rbuf3_i = rbuf3[idex2]; 1212 for (j=1; j<=jmax; j++) { 1213 is_no = sbuf1_i[2*j-1]; 1214 max1 = sbuf1_i[2*j]; 1215 lens_i = lens[is_no]; 1216 cmap_i = cmap[is_no]; 1217 rmap_i = rmap[is_no]; 1218 for (k=0; k<max1; k++,ct1++) { 1219 row = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */ 1220 max2 = rbuf2_i[ct1]; 1221 for (l=0; l<max2; l++,ct2++) { 1222 if (cmap_i[rbuf3_i[ct2]]) { 1223 lens_i[row]++; 1224 } 1225 } 1226 } 1227 } 1228 } 1229 } 1230 ierr = PetscFree(r_status3);CHKERRQ(ierr); 1231 ierr = PetscFree(r_waits3);CHKERRQ(ierr); 1232 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits3,s_status3);CHKERRQ(ierr);} 1233 ierr = PetscFree(s_status3);CHKERRQ(ierr); 1234 ierr = PetscFree(s_waits3);CHKERRQ(ierr); 1235 1236 /* Create the submatrices */ 1237 if (scall == MAT_REUSE_MATRIX) { 1238 PetscTruth flag; 1239 1240 /* 1241 Assumes new rows are same length as the old rows,hence bug! 1242 */ 1243 for (i=0; i<ismax; i++) { 1244 mat = (Mat_SeqAIJ *)(submats[i]->data); 1245 if ((submats[i]->rmap.n != nrow[i]) || (submats[i]->cmap.n != ncol[i])) { 1246 SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size"); 1247 } 1248 ierr = PetscMemcmp(mat->ilen,lens[i],submats[i]->rmap.n*sizeof(PetscInt),&flag);CHKERRQ(ierr); 1249 if (!flag) { 1250 SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong no of nonzeros"); 1251 } 1252 /* Initial matrix as if empty */ 1253 ierr = PetscMemzero(mat->ilen,submats[i]->rmap.n*sizeof(PetscInt));CHKERRQ(ierr); 1254 submats[i]->factor = C->factor; 1255 } 1256 } else { 1257 for (i=0; i<ismax; i++) { 1258 ierr = MatCreate(PETSC_COMM_SELF,submats+i);CHKERRQ(ierr); 1259 ierr = MatSetSizes(submats[i],nrow[i],ncol[i],PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1260 ierr = MatSetType(submats[i],((PetscObject)A)->type_name);CHKERRQ(ierr); 1261 ierr = MatSeqAIJSetPreallocation(submats[i],0,lens[i]);CHKERRQ(ierr); 1262 } 1263 } 1264 1265 /* Assemble the matrices */ 1266 /* First assemble the local rows */ 1267 { 1268 PetscInt ilen_row,*imat_ilen,*imat_j,*imat_i,old_row; 1269 PetscScalar *imat_a; 1270 1271 for (i=0; i<ismax; i++) { 1272 mat = (Mat_SeqAIJ*)submats[i]->data; 1273 imat_ilen = mat->ilen; 1274 imat_j = mat->j; 1275 imat_i = mat->i; 1276 imat_a = mat->a; 1277 cmap_i = cmap[i]; 1278 rmap_i = rmap[i]; 1279 irow_i = irow[i]; 1280 jmax = nrow[i]; 1281 for (j=0; j<jmax; j++) { 1282 l = 0; 1283 row = irow_i[j]; 1284 while (row >= C->rmap.range[l+1]) l++; 1285 proc = l; 1286 if (proc == rank) { 1287 old_row = row; 1288 row = rmap_i[row]; 1289 ilen_row = imat_ilen[row]; 1290 ierr = MatGetRow_MPIAIJ(C,old_row,&ncols,&cols,&vals);CHKERRQ(ierr); 1291 mat_i = imat_i[row] ; 1292 mat_a = imat_a + mat_i; 1293 mat_j = imat_j + mat_i; 1294 for (k=0; k<ncols; k++) { 1295 if ((tcol = cmap_i[cols[k]])) { 1296 *mat_j++ = tcol - 1; 1297 *mat_a++ = vals[k]; 1298 ilen_row++; 1299 } 1300 } 1301 ierr = MatRestoreRow_MPIAIJ(C,old_row,&ncols,&cols,&vals);CHKERRQ(ierr); 1302 imat_ilen[row] = ilen_row; 1303 } 1304 } 1305 } 1306 } 1307 1308 /* Now assemble the off proc rows*/ 1309 { 1310 PetscInt *sbuf1_i,*rbuf2_i,*rbuf3_i,*imat_ilen,ilen; 1311 PetscInt *imat_j,*imat_i; 1312 PetscScalar *imat_a,*rbuf4_i; 1313 1314 for (tmp2=0; tmp2<nrqs; tmp2++) { 1315 ierr = MPI_Waitany(nrqs,r_waits4,&idex2,r_status4+tmp2);CHKERRQ(ierr); 1316 idex = pa[idex2]; 1317 sbuf1_i = sbuf1[idex]; 1318 jmax = sbuf1_i[0]; 1319 ct1 = 2*jmax + 1; 1320 ct2 = 0; 1321 rbuf2_i = rbuf2[idex2]; 1322 rbuf3_i = rbuf3[idex2]; 1323 rbuf4_i = rbuf4[idex2]; 1324 for (j=1; j<=jmax; j++) { 1325 is_no = sbuf1_i[2*j-1]; 1326 rmap_i = rmap[is_no]; 1327 cmap_i = cmap[is_no]; 1328 mat = (Mat_SeqAIJ*)submats[is_no]->data; 1329 imat_ilen = mat->ilen; 1330 imat_j = mat->j; 1331 imat_i = mat->i; 1332 imat_a = mat->a; 1333 max1 = sbuf1_i[2*j]; 1334 for (k=0; k<max1; k++,ct1++) { 1335 row = sbuf1_i[ct1]; 1336 row = rmap_i[row]; 1337 ilen = imat_ilen[row]; 1338 mat_i = imat_i[row] ; 1339 mat_a = imat_a + mat_i; 1340 mat_j = imat_j + mat_i; 1341 max2 = rbuf2_i[ct1]; 1342 for (l=0; l<max2; l++,ct2++) { 1343 if ((tcol = cmap_i[rbuf3_i[ct2]])) { 1344 *mat_j++ = tcol - 1; 1345 *mat_a++ = rbuf4_i[ct2]; 1346 ilen++; 1347 } 1348 } 1349 imat_ilen[row] = ilen; 1350 } 1351 } 1352 } 1353 } 1354 ierr = PetscFree(r_status4);CHKERRQ(ierr); 1355 ierr = PetscFree(r_waits4);CHKERRQ(ierr); 1356 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits4,s_status4);CHKERRQ(ierr);} 1357 ierr = PetscFree(s_waits4);CHKERRQ(ierr); 1358 ierr = PetscFree(s_status4);CHKERRQ(ierr); 1359 1360 /* Restore the indices */ 1361 for (i=0; i<ismax; i++) { 1362 ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr); 1363 ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr); 1364 } 1365 1366 /* Destroy allocated memory */ 1367 ierr = PetscFree(irow);CHKERRQ(ierr); 1368 ierr = PetscFree(w1);CHKERRQ(ierr); 1369 ierr = PetscFree(pa);CHKERRQ(ierr); 1370 1371 ierr = PetscFree(sbuf1);CHKERRQ(ierr); 1372 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 1373 for (i=0; i<nrqr; ++i) { 1374 ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr); 1375 } 1376 for (i=0; i<nrqs; ++i) { 1377 ierr = PetscFree(rbuf3[i]);CHKERRQ(ierr); 1378 ierr = PetscFree(rbuf4[i]);CHKERRQ(ierr); 1379 } 1380 1381 ierr = PetscFree(sbuf2);CHKERRQ(ierr); 1382 ierr = PetscFree(rbuf3);CHKERRQ(ierr); 1383 ierr = PetscFree(rbuf4);CHKERRQ(ierr); 1384 ierr = PetscFree(sbuf_aj[0]);CHKERRQ(ierr); 1385 ierr = PetscFree(sbuf_aj);CHKERRQ(ierr); 1386 ierr = PetscFree(sbuf_aa[0]);CHKERRQ(ierr); 1387 ierr = PetscFree(sbuf_aa);CHKERRQ(ierr); 1388 1389 ierr = PetscFree(cmap);CHKERRQ(ierr); 1390 ierr = PetscFree(rmap);CHKERRQ(ierr); 1391 ierr = PetscFree(lens);CHKERRQ(ierr); 1392 1393 for (i=0; i<ismax; i++) { 1394 ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1395 ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1396 } 1397 PetscFunctionReturn(0); 1398 } 1399 1400