1 /*$Id: baijov.c,v 1.60 2001/03/06 20:15:24 balay Exp balay $*/ 2 3 /* 4 Routines to compute overlapping regions of a parallel MPI matrix 5 and to find submatrices that were shared across processors. 6 */ 7 #include "src/mat/impls/baij/mpi/mpibaij.h" 8 #include "petscbt.h" 9 10 static int MatIncreaseOverlap_MPIBAIJ_Once(Mat,int,IS *); 11 static int MatIncreaseOverlap_MPIBAIJ_Local(Mat,int,char **,int*,int**); 12 static int MatIncreaseOverlap_MPIBAIJ_Receive(Mat,int,int **,int**,int*); 13 EXTERN int MatGetRow_MPIBAIJ(Mat,int,int*,int**,Scalar**); 14 EXTERN int MatRestoreRow_MPIBAIJ(Mat,int,int*,int**,Scalar**); 15 16 #undef __FUNC__ 17 #define __FUNC__ "MatCompressIndicesGeneral_MPIBAIJ" 18 static int MatCompressIndicesGeneral_MPIBAIJ(Mat C,int imax,IS *is_in,IS *is_out) 19 { 20 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)C->data; 21 int ierr,isz,bs = baij->bs,n,i,j,*idx,ival; 22 #if defined (PETSC_USE_CTABLE) 23 PetscTable gid1_lid1; 24 int tt, gid1, *nidx; 25 PetscTablePosition tpos; 26 #else 27 int Nbs,*nidx; 28 PetscBT table; 29 #endif 30 31 PetscFunctionBegin; 32 #if defined (PETSC_USE_CTABLE) 33 ierr = PetscTableCreate(baij->mbs,&gid1_lid1);CHKERRQ(ierr); 34 #else 35 Nbs = baij->Nbs; 36 ierr = PetscMalloc((Nbs+1)*sizeof(int),&nidx);CHKERRQ(ierr); 37 ierr = PetscBTCreate(Nbs,table);CHKERRQ(ierr); 38 #endif 39 for (i=0; i<imax; i++) { 40 isz = 0; 41 #if defined (PETSC_USE_CTABLE) 42 ierr = PetscTableRemoveAll(gid1_lid1);CHKERRQ(ierr); 43 #else 44 ierr = PetscBTMemzero(Nbs,table);CHKERRQ(ierr); 45 #endif 46 ierr = ISGetIndices(is_in[i],&idx);CHKERRQ(ierr); 47 ierr = ISGetLocalSize(is_in[i],&n);CHKERRQ(ierr); 48 for (j=0; j<n ; j++) { 49 ival = idx[j]/bs; /* convert the indices into block indices */ 50 #if defined (PETSC_USE_CTABLE) 51 ierr = PetscTableFind(gid1_lid1,ival+1,&tt);CHKERRQ(ierr); 52 if (!tt) { 53 ierr = PetscTableAdd(gid1_lid1,ival+1,isz+1);CHKERRQ(ierr); 54 isz++; 55 } 56 #else 57 if (ival>Nbs) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"index greater than mat-dim"); 58 if(!PetscBTLookupSet(table,ival)) { nidx[isz++] = ival;} 59 #endif 60 } 61 ierr = ISRestoreIndices(is_in[i],&idx);CHKERRQ(ierr); 62 #if defined (PETSC_USE_CTABLE) 63 ierr = PetscMalloc((isz+1)*sizeof(int),&nidx);CHKERRQ(ierr); 64 ierr = PetscTableGetHeadPosition(gid1_lid1,&tpos);CHKERRQ(ierr); 65 j = 0; 66 while (tpos) { 67 ierr = PetscTableGetNext(gid1_lid1,&tpos,&gid1,&tt);CHKERRQ(ierr); 68 if (tt-- > isz) { SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"index greater than array-dim"); } 69 nidx[tt] = gid1 - 1; 70 j++; 71 } 72 if (j != isz) { SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"table error: jj != isz"); } 73 ierr = ISCreateGeneral(PETSC_COMM_SELF,isz,nidx,(is_out+i));CHKERRQ(ierr); 74 ierr = PetscFree(nidx);CHKERRQ(ierr); 75 #else 76 ierr = ISCreateGeneral(PETSC_COMM_SELF,isz,nidx,(is_out+i));CHKERRQ(ierr); 77 #endif 78 } 79 #if defined (PETSC_USE_CTABLE) 80 ierr = PetscTableDelete(gid1_lid1);CHKERRQ(ierr); 81 #else 82 ierr = PetscBTDestroy(table);CHKERRQ(ierr); 83 ierr = PetscFree(nidx);CHKERRQ(ierr); 84 #endif 85 PetscFunctionReturn(0); 86 } 87 88 #undef __FUNC__ 89 #define __FUNC__ "MatCompressIndicesSorted_MPIBAIJ" 90 static int MatCompressIndicesSorted_MPIBAIJ(Mat C,int imax,IS *is_in,IS *is_out) 91 { 92 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)C->data; 93 int ierr,bs=baij->bs,i,j,k,val,n,*idx,*nidx,*idx_local; 94 PetscTruth flg; 95 #if defined (PETSC_USE_CTABLE) 96 int maxsz; 97 #else 98 int Nbs=baij->Nbs; 99 #endif 100 PetscFunctionBegin; 101 for (i=0; i<imax; i++) { 102 ierr = ISSorted(is_in[i],&flg);CHKERRQ(ierr); 103 if (!flg) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Indices are not sorted"); 104 } 105 #if defined (PETSC_USE_CTABLE) 106 /* Now check max size */ 107 for (i=0,maxsz=0; i<imax; i++) { 108 ierr = ISGetIndices(is_in[i],&idx);CHKERRQ(ierr); 109 ierr = ISGetLocalSize(is_in[i],&n);CHKERRQ(ierr); 110 if (n%bs !=0) SETERRQ(1,"Indices are not block ordered"); 111 n = n/bs; /* The reduced index size */ 112 if (n > maxsz) maxsz = n; 113 } 114 ierr = PetscMalloc((maxsz+1)*sizeof(int),&nidx);CHKERRQ(ierr); 115 #else 116 ierr = PetscMalloc((Nbs+1)*sizeof(int),&nidx);CHKERRQ(ierr); 117 #endif 118 /* Now check if the indices are in block order */ 119 for (i=0; i<imax; i++) { 120 ierr = ISGetIndices(is_in[i],&idx);CHKERRQ(ierr); 121 ierr = ISGetLocalSize(is_in[i],&n);CHKERRQ(ierr); 122 if (n%bs !=0) SETERRQ(1,"Indices are not block ordered"); 123 124 n = n/bs; /* The reduced index size */ 125 idx_local = idx; 126 for (j=0; j<n ; j++) { 127 val = idx_local[0]; 128 if (val%bs != 0) SETERRQ(1,"Indices are not block ordered"); 129 for (k=0; k<bs; k++) { 130 if (val+k != idx_local[k]) SETERRQ(1,"Indices are not block ordered"); 131 } 132 nidx[j] = val/bs; 133 idx_local +=bs; 134 } 135 ierr = ISRestoreIndices(is_in[i],&idx);CHKERRQ(ierr); 136 ierr = ISCreateGeneral(PETSC_COMM_SELF,n,nidx,(is_out+i));CHKERRQ(ierr); 137 } 138 ierr = PetscFree(nidx);CHKERRQ(ierr); 139 140 PetscFunctionReturn(0); 141 } 142 143 #undef __FUNC__ 144 #define __FUNC__ "MatExpandIndices_MPIBAIJ" 145 static int MatExpandIndices_MPIBAIJ(Mat C,int imax,IS *is_in,IS *is_out) 146 { 147 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)C->data; 148 int ierr,bs = baij->bs,n,i,j,k,*idx,*nidx; 149 #if defined (PETSC_USE_CTABLE) 150 int maxsz; 151 #else 152 int Nbs = baij->Nbs; 153 #endif 154 PetscFunctionBegin; 155 156 #if defined (PETSC_USE_CTABLE) 157 /* Now check max size */ 158 for (i=0,maxsz=0; i<imax; i++) { 159 ierr = ISGetIndices(is_in[i],&idx);CHKERRQ(ierr); 160 ierr = ISGetLocalSize(is_in[i],&n);CHKERRQ(ierr); 161 if (n*bs > maxsz) maxsz = n*bs; 162 } 163 ierr = PetscMalloc((maxsz+1)*sizeof(int),&nidx);CHKERRQ(ierr); 164 #else 165 ierr = PetscMalloc((Nbs*bs+1)*sizeof(int),&nidx);CHKERRQ(ierr); 166 #endif 167 168 for (i=0; i<imax; i++) { 169 ierr = ISGetIndices(is_in[i],&idx);CHKERRQ(ierr); 170 ierr = ISGetLocalSize(is_in[i],&n);CHKERRQ(ierr); 171 for (j=0; j<n ; ++j){ 172 for (k=0; k<bs; k++) 173 nidx[j*bs+k] = idx[j]*bs+k; 174 } 175 ierr = ISRestoreIndices(is_in[i],&idx);CHKERRQ(ierr); 176 ierr = ISCreateGeneral(PETSC_COMM_SELF,n*bs,nidx,is_out+i);CHKERRQ(ierr); 177 } 178 ierr = PetscFree(nidx);CHKERRQ(ierr); 179 PetscFunctionReturn(0); 180 } 181 182 183 #undef __FUNC__ 184 #define __FUNC__ "MatIncreaseOverlap_MPIBAIJ" 185 int MatIncreaseOverlap_MPIBAIJ(Mat C,int imax,IS *is,int ov) 186 { 187 int i,ierr; 188 IS *is_new; 189 190 PetscFunctionBegin; 191 ierr = PetscMalloc(imax*sizeof(IS),&is_new);CHKERRQ(ierr); 192 /* Convert the indices into block format */ 193 ierr = MatCompressIndicesGeneral_MPIBAIJ(C,imax,is,is_new);CHKERRQ(ierr); 194 if (ov < 0){ SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");} 195 for (i=0; i<ov; ++i) { 196 ierr = MatIncreaseOverlap_MPIBAIJ_Once(C,imax,is_new);CHKERRQ(ierr); 197 } 198 for (i=0; i<imax; i++) {ierr = ISDestroy(is[i]);CHKERRQ(ierr);} 199 ierr = MatExpandIndices_MPIBAIJ(C,imax,is_new,is);CHKERRQ(ierr); 200 for (i=0; i<imax; i++) {ierr = ISDestroy(is_new[i]);CHKERRQ(ierr);} 201 ierr = PetscFree(is_new);CHKERRQ(ierr); 202 PetscFunctionReturn(0); 203 } 204 205 /* 206 Sample message format: 207 If a processor A wants processor B to process some elements corresponding 208 to index sets 1s[1], is[5] 209 mesg [0] = 2 (no of index sets in the mesg) 210 ----------- 211 mesg [1] = 1 => is[1] 212 mesg [2] = sizeof(is[1]); 213 ----------- 214 mesg [5] = 5 => is[5] 215 mesg [6] = sizeof(is[5]); 216 ----------- 217 mesg [7] 218 mesg [n] datas[1] 219 ----------- 220 mesg[n+1] 221 mesg[m] data(is[5]) 222 ----------- 223 224 Notes: 225 nrqs - no of requests sent (or to be sent out) 226 nrqr - no of requests recieved (which have to be or which have been processed 227 */ 228 #undef __FUNC__ 229 #define __FUNC__ "MatIncreaseOverlap_MPIBAIJ_Once" 230 static int MatIncreaseOverlap_MPIBAIJ_Once(Mat C,int imax,IS *is) 231 { 232 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 233 int **idx,*n,*w1,*w2,*w3,*w4,*rtable,**data,len,*idx_i; 234 int size,rank,Mbs,i,j,k,ierr,**rbuf,row,proc,nrqs,msz,**outdat,**ptr; 235 int *ctr,*pa,*tmp,nrqr,*isz,*isz1,**xdata,**rbuf2; 236 int *onodes1,*olengths1,tag1,tag2,*onodes2,*olengths2; 237 PetscBT *table; 238 MPI_Comm comm; 239 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2; 240 MPI_Status *s_status,*recv_status; 241 242 PetscFunctionBegin; 243 comm = C->comm; 244 size = c->size; 245 rank = c->rank; 246 Mbs = c->Mbs; 247 248 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 249 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 250 251 len = (imax+1)*sizeof(int*)+ (imax + Mbs)*sizeof(int); 252 ierr = PetscMalloc(len,&idx);CHKERRQ(ierr); 253 n = (int*)(idx + imax); 254 rtable = n + imax; 255 256 for (i=0; i<imax; i++) { 257 ierr = ISGetIndices(is[i],&idx[i]);CHKERRQ(ierr); 258 ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr); 259 } 260 261 /* Create hash table for the mapping :row -> proc*/ 262 for (i=0,j=0; i<size; i++) { 263 len = c->rowners[i+1]; 264 for (; j<len; j++) { 265 rtable[j] = i; 266 } 267 } 268 269 /* evaluate communication - mesg to who,length of mesg, and buffer space 270 required. Based on this, buffers are allocated, and data copied into them*/ 271 ierr = PetscMalloc(size*4*sizeof(int),&w1);CHKERRQ(ierr);/* mesg size */ 272 w2 = w1 + size; /* if w2[i] marked, then a message to proc i*/ 273 w3 = w2 + size; /* no of IS that needs to be sent to proc i */ 274 w4 = w3 + size; /* temp work space used in determining w1, w2, w3 */ 275 ierr = PetscMemzero(w1,size*3*sizeof(int));CHKERRQ(ierr); /* initialise work vector*/ 276 for (i=0; i<imax; i++) { 277 ierr = PetscMemzero(w4,size*sizeof(int));CHKERRQ(ierr); /* initialise work vector*/ 278 idx_i = idx[i]; 279 len = n[i]; 280 for (j=0; j<len; j++) { 281 row = idx_i[j]; 282 if (row < 0) { 283 SETERRQ(1,"Index set cannot have negative entries"); 284 } 285 proc = rtable[row]; 286 w4[proc]++; 287 } 288 for (j=0; j<size; j++){ 289 if (w4[j]) { w1[j] += w4[j]; w3[j]++;} 290 } 291 } 292 293 nrqs = 0; /* no of outgoing messages */ 294 msz = 0; /* total mesg length (for all proc */ 295 w1[rank] = 0; /* no mesg sent to intself */ 296 w3[rank] = 0; 297 for (i=0; i<size; i++) { 298 if (w1[i]) {w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 299 } 300 /* pa - is list of processors to communicate with */ 301 ierr = PetscMalloc((nrqs+1)*sizeof(int),&pa);CHKERRQ(ierr); 302 for (i=0,j=0; i<size; i++) { 303 if (w1[i]) {pa[j] = i; j++;} 304 } 305 306 /* Each message would have a header = 1 + 2*(no of IS) + data */ 307 for (i=0; i<nrqs; i++) { 308 j = pa[i]; 309 w1[j] += w2[j] + 2*w3[j]; 310 msz += w1[j]; 311 } 312 313 /* Determine the number of messages to expect, their lengths, from from-ids */ 314 ierr = PetscGatherNumberOfMessages(comm,nrqs,w2,w1,&nrqr);CHKERRQ(ierr); 315 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 316 317 /* Now post the Irecvs corresponding to these messages */ 318 ierr = PetscPostIrecvInt(comm,tag1,nrqr,onodes1,olengths1,&rbuf,&r_waits1);CHKERRQ(ierr); 319 320 /* Allocate Memory for outgoing messages */ 321 len = 2*size*sizeof(int*) + (size+msz)*sizeof(int); 322 ierr = PetscMalloc(len,&outdat);CHKERRQ(ierr); 323 ptr = outdat + size; /* Pointers to the data in outgoing buffers */ 324 ierr = PetscMemzero(outdat,2*size*sizeof(int*));CHKERRQ(ierr); 325 tmp = (int*)(outdat + 2*size); 326 ctr = tmp + msz; 327 328 { 329 int *iptr = tmp,ict = 0; 330 for (i=0; i<nrqs; i++) { 331 j = pa[i]; 332 iptr += ict; 333 outdat[j] = iptr; 334 ict = w1[j]; 335 } 336 } 337 338 /* Form the outgoing messages */ 339 /*plug in the headers*/ 340 for (i=0; i<nrqs; i++) { 341 j = pa[i]; 342 outdat[j][0] = 0; 343 ierr = PetscMemzero(outdat[j]+1,2*w3[j]*sizeof(int));CHKERRQ(ierr); 344 ptr[j] = outdat[j] + 2*w3[j] + 1; 345 } 346 347 /* Memory for doing local proc's work*/ 348 { 349 int *d_p; 350 char *t_p; 351 352 len = (imax)*(sizeof(PetscBT) + sizeof(int*)+ sizeof(int)) + 353 (Mbs)*imax*sizeof(int) + (Mbs/BITSPERBYTE+1)*imax*sizeof(char) + 1; 354 ierr = PetscMalloc(len,&table);CHKERRQ(ierr); 355 ierr = PetscMemzero(table,len);CHKERRQ(ierr); 356 data = (int **)(table + imax); 357 isz = (int *)(data + imax); 358 d_p = (int *)(isz + imax); 359 t_p = (char *)(d_p + Mbs*imax); 360 for (i=0; i<imax; i++) { 361 table[i] = t_p + (Mbs/BITSPERBYTE+1)*i; 362 data[i] = d_p + (Mbs)*i; 363 } 364 } 365 366 /* Parse the IS and update local tables and the outgoing buf with the data*/ 367 { 368 int n_i,*data_i,isz_i,*outdat_j,ctr_j; 369 PetscBT table_i; 370 371 for (i=0; i<imax; i++) { 372 ierr = PetscMemzero(ctr,size*sizeof(int));CHKERRQ(ierr); 373 n_i = n[i]; 374 table_i = table[i]; 375 idx_i = idx[i]; 376 data_i = data[i]; 377 isz_i = isz[i]; 378 for (j=0; j<n_i; j++) { /* parse the indices of each IS */ 379 row = idx_i[j]; 380 proc = rtable[row]; 381 if (proc != rank) { /* copy to the outgoing buffer */ 382 ctr[proc]++; 383 *ptr[proc] = row; 384 ptr[proc]++; 385 } 386 else { /* Update the local table */ 387 if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;} 388 } 389 } 390 /* Update the headers for the current IS */ 391 for (j=0; j<size; j++) { /* Can Optimise this loop by using pa[] */ 392 if ((ctr_j = ctr[j])) { 393 outdat_j = outdat[j]; 394 k = ++outdat_j[0]; 395 outdat_j[2*k] = ctr_j; 396 outdat_j[2*k-1] = i; 397 } 398 } 399 isz[i] = isz_i; 400 } 401 } 402 403 /* Now post the sends */ 404 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr); 405 for (i=0; i<nrqs; ++i) { 406 j = pa[i]; 407 ierr = MPI_Isend(outdat[j],w1[j],MPI_INT,j,tag1,comm,s_waits1+i);CHKERRQ(ierr); 408 } 409 410 /* No longer need the original indices*/ 411 for (i=0; i<imax; ++i) { 412 ierr = ISRestoreIndices(is[i],idx+i);CHKERRQ(ierr); 413 } 414 ierr = PetscFree(idx);CHKERRQ(ierr); 415 416 for (i=0; i<imax; ++i) { 417 ierr = ISDestroy(is[i]);CHKERRQ(ierr); 418 } 419 420 /* Do Local work*/ 421 ierr = MatIncreaseOverlap_MPIBAIJ_Local(C,imax,table,isz,data);CHKERRQ(ierr); 422 423 /* Receive messages*/ 424 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&recv_status);CHKERRQ(ierr); 425 ierr = MPI_Waitall(nrqr,r_waits1,recv_status);CHKERRQ(ierr); 426 427 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status);CHKERRQ(ierr); 428 ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr); 429 430 /* Phase 1 sends are complete - deallocate buffers */ 431 ierr = PetscFree(outdat);CHKERRQ(ierr); 432 ierr = PetscFree(w1);CHKERRQ(ierr); 433 434 ierr = PetscMalloc((nrqr+1)*sizeof(int *),&xdata);CHKERRQ(ierr); 435 ierr = PetscMalloc((nrqr+1)*sizeof(int),&isz1);CHKERRQ(ierr); 436 ierr = MatIncreaseOverlap_MPIBAIJ_Receive(C,nrqr,rbuf,xdata,isz1);CHKERRQ(ierr); 437 ierr = PetscFree(rbuf);CHKERRQ(ierr); 438 439 /* Send the data back*/ 440 /* Do a global reduction to know the buffer space req for incoming messages*/ 441 { 442 int *rw1; 443 444 ierr = PetscMalloc(size*sizeof(int),&rw1);CHKERRQ(ierr); 445 ierr = PetscMemzero(rw1,size*sizeof(int));CHKERRQ(ierr); 446 447 for (i=0; i<nrqr; ++i) { 448 proc = recv_status[i].MPI_SOURCE; 449 if (proc != onodes1[i]) SETERRQ(1,"MPI_SOURCE mismatch"); 450 rw1[proc] = isz1[i]; 451 } 452 453 ierr = PetscFree(onodes1);CHKERRQ(ierr); 454 ierr = PetscFree(olengths1);CHKERRQ(ierr); 455 456 /* Determine the number of messages to expect, their lengths, from from-ids */ 457 ierr = PetscGatherMessageLengths(comm,nrqr,nrqs,rw1,&onodes2,&olengths2);CHKERRQ(ierr); 458 PetscFree(rw1); 459 } 460 /* Now post the Irecvs corresponding to these messages */ 461 ierr = PetscPostIrecvInt(comm,tag2,nrqs,onodes2,olengths2,&rbuf2,&r_waits2);CHKERRQ(ierr); 462 463 /* Now post the sends */ 464 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);CHKERRQ(ierr); 465 for (i=0; i<nrqr; ++i) { 466 j = recv_status[i].MPI_SOURCE; 467 ierr = MPI_Isend(xdata[i],isz1[i],MPI_INT,j,tag2,comm,s_waits2+i);CHKERRQ(ierr); 468 } 469 470 /* receive work done on other processors*/ 471 { 472 int index,is_no,ct1,max,*rbuf2_i,isz_i,*data_i,jmax; 473 PetscBT table_i; 474 MPI_Status *status2; 475 476 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&status2);CHKERRQ(ierr); 477 478 for (i=0; i<nrqs; ++i) { 479 ierr = MPI_Waitany(nrqs,r_waits2,&index,status2+i);CHKERRQ(ierr); 480 /* Process the message*/ 481 rbuf2_i = rbuf2[index]; 482 ct1 = 2*rbuf2_i[0]+1; 483 jmax = rbuf2[index][0]; 484 for (j=1; j<=jmax; j++) { 485 max = rbuf2_i[2*j]; 486 is_no = rbuf2_i[2*j-1]; 487 isz_i = isz[is_no]; 488 data_i = data[is_no]; 489 table_i = table[is_no]; 490 for (k=0; k<max; k++,ct1++) { 491 row = rbuf2_i[ct1]; 492 if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;} 493 } 494 isz[is_no] = isz_i; 495 } 496 } 497 ierr = MPI_Waitall(nrqr,s_waits2,status2);CHKERRQ(ierr); 498 ierr = PetscFree(status2);CHKERRQ(ierr); 499 } 500 501 for (i=0; i<imax; ++i) { 502 ierr = ISCreateGeneral(PETSC_COMM_SELF,isz[i],data[i],is+i);CHKERRQ(ierr); 503 } 504 505 506 ierr = PetscFree(onodes2);CHKERRQ(ierr); 507 ierr = PetscFree(olengths2);CHKERRQ(ierr); 508 509 ierr = PetscFree(pa);CHKERRQ(ierr); 510 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 511 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 512 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 513 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 514 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 515 ierr = PetscFree(table);CHKERRQ(ierr); 516 ierr = PetscFree(s_status);CHKERRQ(ierr); 517 ierr = PetscFree(recv_status);CHKERRQ(ierr); 518 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 519 ierr = PetscFree(xdata);CHKERRQ(ierr); 520 ierr = PetscFree(isz1);CHKERRQ(ierr); 521 PetscFunctionReturn(0); 522 } 523 524 #undef __FUNC__ 525 #define __FUNC__ "MatIncreaseOverlap_MPIBAIJ_Local" 526 /* 527 MatIncreaseOverlap_MPIBAIJ_Local - Called by MatincreaseOverlap, to do 528 the work on the local processor. 529 530 Inputs: 531 C - MAT_MPIBAIJ; 532 imax - total no of index sets processed at a time; 533 table - an array of char - size = Mbs bits. 534 535 Output: 536 isz - array containing the count of the solution elements correspondign 537 to each index set; 538 data - pointer to the solutions 539 */ 540 static int MatIncreaseOverlap_MPIBAIJ_Local(Mat C,int imax,PetscBT *table,int *isz,int **data) 541 { 542 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 543 Mat A = c->A,B = c->B; 544 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data; 545 int start,end,val,max,rstart,cstart,*ai,*aj; 546 int *bi,*bj,*garray,i,j,k,row,*data_i,isz_i; 547 PetscBT table_i; 548 549 PetscFunctionBegin; 550 rstart = c->rstart; 551 cstart = c->cstart; 552 ai = a->i; 553 aj = a->j; 554 bi = b->i; 555 bj = b->j; 556 garray = c->garray; 557 558 559 for (i=0; i<imax; i++) { 560 data_i = data[i]; 561 table_i = table[i]; 562 isz_i = isz[i]; 563 for (j=0,max=isz[i]; j<max; j++) { 564 row = data_i[j] - rstart; 565 start = ai[row]; 566 end = ai[row+1]; 567 for (k=start; k<end; k++) { /* Amat */ 568 val = aj[k] + cstart; 569 if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;} 570 } 571 start = bi[row]; 572 end = bi[row+1]; 573 for (k=start; k<end; k++) { /* Bmat */ 574 val = garray[bj[k]]; 575 if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;} 576 } 577 } 578 isz[i] = isz_i; 579 } 580 PetscFunctionReturn(0); 581 } 582 #undef __FUNC__ 583 #define __FUNC__ "MatIncreaseOverlap_MPIBAIJ_Receive" 584 /* 585 MatIncreaseOverlap_MPIBAIJ_Receive - Process the recieved messages, 586 and return the output 587 588 Input: 589 C - the matrix 590 nrqr - no of messages being processed. 591 rbuf - an array of pointers to the recieved requests 592 593 Output: 594 xdata - array of messages to be sent back 595 isz1 - size of each message 596 597 For better efficiency perhaps we should malloc seperately each xdata[i], 598 then if a remalloc is required we need only copy the data for that one row 599 rather then all previous rows as it is now where a single large chunck of 600 memory is used. 601 602 */ 603 static int MatIncreaseOverlap_MPIBAIJ_Receive(Mat C,int nrqr,int **rbuf,int **xdata,int * isz1) 604 { 605 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 606 Mat A = c->A,B = c->B; 607 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data; 608 int rstart,cstart,*ai,*aj,*bi,*bj,*garray,i,j,k; 609 int row,total_sz,ct,ct1,ct2,ct3,mem_estimate,oct2,l,start,end; 610 int val,max1,max2,rank,Mbs,no_malloc =0,*tmp,new_estimate,ctr; 611 int *rbuf_i,kmax,rbuf_0,ierr; 612 PetscBT xtable; 613 614 PetscFunctionBegin; 615 rank = c->rank; 616 Mbs = c->Mbs; 617 rstart = c->rstart; 618 cstart = c->cstart; 619 ai = a->i; 620 aj = a->j; 621 bi = b->i; 622 bj = b->j; 623 garray = c->garray; 624 625 626 for (i=0,ct=0,total_sz=0; i<nrqr; ++i) { 627 rbuf_i = rbuf[i]; 628 rbuf_0 = rbuf_i[0]; 629 ct += rbuf_0; 630 for (j=1; j<=rbuf_0; j++) { total_sz += rbuf_i[2*j]; } 631 } 632 633 if (c->Mbs) max1 = ct*(a->nz +b->nz)/c->Mbs; 634 else max1 = 1; 635 mem_estimate = 3*((total_sz > max1 ? total_sz : max1)+1); 636 ierr = PetscMalloc(mem_estimate*sizeof(int),&xdata[0]);CHKERRQ(ierr); 637 ++no_malloc; 638 ierr = PetscBTCreate(Mbs,xtable);CHKERRQ(ierr); 639 ierr = PetscMemzero(isz1,nrqr*sizeof(int));CHKERRQ(ierr); 640 641 ct3 = 0; 642 for (i=0; i<nrqr; i++) { /* for easch mesg from proc i */ 643 rbuf_i = rbuf[i]; 644 rbuf_0 = rbuf_i[0]; 645 ct1 = 2*rbuf_0+1; 646 ct2 = ct1; 647 ct3 += ct1; 648 for (j=1; j<=rbuf_0; j++) { /* for each IS from proc i*/ 649 ierr = PetscBTMemzero(Mbs,xtable);CHKERRQ(ierr); 650 oct2 = ct2; 651 kmax = rbuf_i[2*j]; 652 for (k=0; k<kmax; k++,ct1++) { 653 row = rbuf_i[ct1]; 654 if (!PetscBTLookupSet(xtable,row)) { 655 if (!(ct3 < mem_estimate)) { 656 new_estimate = (int)(1.5*mem_estimate)+1; 657 ierr = PetscMalloc(new_estimate * sizeof(int),&tmp);CHKERRQ(ierr); 658 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(int));CHKERRQ(ierr); 659 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 660 xdata[0] = tmp; 661 mem_estimate = new_estimate; ++no_malloc; 662 for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];} 663 } 664 xdata[i][ct2++] = row; 665 ct3++; 666 } 667 } 668 for (k=oct2,max2=ct2; k<max2; k++) { 669 row = xdata[i][k] - rstart; 670 start = ai[row]; 671 end = ai[row+1]; 672 for (l=start; l<end; l++) { 673 val = aj[l] + cstart; 674 if (!PetscBTLookupSet(xtable,val)) { 675 if (!(ct3 < mem_estimate)) { 676 new_estimate = (int)(1.5*mem_estimate)+1; 677 ierr = PetscMalloc(new_estimate * sizeof(int),&tmp);CHKERRQ(ierr); 678 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(int));CHKERRQ(ierr); 679 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 680 xdata[0] = tmp; 681 mem_estimate = new_estimate; ++no_malloc; 682 for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];} 683 } 684 xdata[i][ct2++] = val; 685 ct3++; 686 } 687 } 688 start = bi[row]; 689 end = bi[row+1]; 690 for (l=start; l<end; l++) { 691 val = garray[bj[l]]; 692 if (!PetscBTLookupSet(xtable,val)) { 693 if (!(ct3 < mem_estimate)) { 694 new_estimate = (int)(1.5*mem_estimate)+1; 695 ierr = PetscMalloc(new_estimate * sizeof(int),&tmp);CHKERRQ(ierr); 696 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(int));CHKERRQ(ierr); 697 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 698 xdata[0] = tmp; 699 mem_estimate = new_estimate; ++no_malloc; 700 for (ctr =1; ctr <=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];} 701 } 702 xdata[i][ct2++] = val; 703 ct3++; 704 } 705 } 706 } 707 /* Update the header*/ 708 xdata[i][2*j] = ct2 - oct2; /* Undo the vector isz1 and use only a var*/ 709 xdata[i][2*j-1] = rbuf_i[2*j-1]; 710 } 711 xdata[i][0] = rbuf_0; 712 xdata[i+1] = xdata[i] + ct2; 713 isz1[i] = ct2; /* size of each message */ 714 } 715 ierr = PetscBTDestroy(xtable);CHKERRQ(ierr); 716 PetscLogInfo(0,"MatIncreaseOverlap_MPIBAIJ:[%d] Allocated %d bytes, required %d, no of mallocs = %d\n",rank,mem_estimate,ct3,no_malloc); 717 PetscFunctionReturn(0); 718 } 719 720 static int MatGetSubMatrices_MPIBAIJ_local(Mat,int,IS *,IS *,MatReuse,Mat *); 721 722 #undef __FUNC__ 723 #define __FUNC__ "MatGetSubMatrices_MPIBAIJ" 724 int MatGetSubMatrices_MPIBAIJ(Mat C,int ismax,IS *isrow,IS *iscol,MatReuse scall,Mat **submat) 725 { 726 IS *isrow_new,*iscol_new; 727 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 728 int nmax,nstages_local,nstages,i,pos,max_no,ierr; 729 730 PetscFunctionBegin; 731 /* The compression and expansion should be avoided. Does'nt point 732 out errors might change the indices hence buggey */ 733 734 ierr = PetscMalloc(2*(ismax+1)*sizeof(IS),&isrow_new);CHKERRQ(ierr); 735 iscol_new = isrow_new + ismax; 736 ierr = MatCompressIndicesSorted_MPIBAIJ(C,ismax,isrow,isrow_new);CHKERRQ(ierr); 737 ierr = MatCompressIndicesSorted_MPIBAIJ(C,ismax,iscol,iscol_new);CHKERRQ(ierr); 738 739 /* Allocate memory to hold all the submatrices */ 740 if (scall != MAT_REUSE_MATRIX) { 741 ierr = PetscMalloc((ismax+1)*sizeof(Mat),submat);CHKERRQ(ierr); 742 } 743 /* Determine the number of stages through which submatrices are done */ 744 nmax = 20*1000000 / (c->Nbs * sizeof(int)); 745 if (!nmax) nmax = 1; 746 nstages_local = ismax/nmax + ((ismax % nmax)?1:0); 747 748 /* Make sure every porcessor loops through the nstages */ 749 ierr = MPI_Allreduce(&nstages_local,&nstages,1,MPI_INT,MPI_MAX,C->comm);CHKERRQ(ierr); 750 for (i=0,pos=0; i<nstages; i++) { 751 if (pos+nmax <= ismax) max_no = nmax; 752 else if (pos == ismax) max_no = 0; 753 else max_no = ismax-pos; 754 ierr = MatGetSubMatrices_MPIBAIJ_local(C,max_no,isrow_new+pos,iscol_new+pos,scall,*submat+pos);CHKERRQ(ierr); 755 pos += max_no; 756 } 757 758 for (i=0; i<ismax; i++) { 759 ierr = ISDestroy(isrow_new[i]);CHKERRQ(ierr); 760 ierr = ISDestroy(iscol_new[i]);CHKERRQ(ierr); 761 } 762 ierr = PetscFree(isrow_new);CHKERRQ(ierr); 763 PetscFunctionReturn(0); 764 } 765 766 #if defined (PETSC_USE_CTABLE) 767 #undef __FUNC__ 768 #define __FUNC__ "PetscGetProc" 769 int PetscGetProc(const int gid, const int numprocs, const int proc_gnode[], int *proc) 770 { 771 int nGlobalNd = proc_gnode[numprocs]; 772 int fproc = (int) ((float)gid * (float)numprocs / (float)nGlobalNd + 0.5); 773 774 PetscFunctionBegin; 775 /* if(fproc < 0) SETERRQ(1,"fproc < 0");*/ 776 if (fproc > numprocs) fproc = numprocs; 777 while (gid < proc_gnode[fproc] || gid >= proc_gnode[fproc+1]) { 778 if (gid < proc_gnode[fproc]) fproc--; 779 else fproc++; 780 } 781 /* if(fproc<0 || fproc>=numprocs) { SETERRQ(1,"fproc < 0 || fproc >= numprocs"); }*/ 782 *proc = fproc; 783 PetscFunctionReturn(0); 784 } 785 #endif 786 787 /* -------------------------------------------------------------------------*/ 788 #undef __FUNC__ 789 #define __FUNC__ "MatGetSubMatrices_MPIBAIJ_local" 790 static int MatGetSubMatrices_MPIBAIJ_local(Mat C,int ismax,IS *isrow,IS *iscol,MatReuse scall,Mat *submats) 791 { 792 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 793 Mat A = c->A; 794 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)c->B->data,*mat; 795 int **irow,**icol,*nrow,*ncol,*w1,*w2,*w3,*w4,start,end,size; 796 int **sbuf1,**sbuf2,rank,i,j,k,l,ct1,ct2,ierr,**rbuf1,row,proc; 797 int nrqs,msz,**ptr,index,*req_size,*ctr,*pa,*tmp,tcol,nrqr; 798 int **rbuf3,*req_source,**sbuf_aj,**rbuf2,max1,max2; 799 int **lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax,*irow_i; 800 int len,ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*lens_i; 801 int bs=c->bs,bs2=c->bs2,*a_j=a->j,*b_j=b->j,*cworkA,*cworkB; 802 int cstart = c->cstart,nzA,nzB,*a_i=a->i,*b_i=b->i,imark; 803 int *bmap = c->garray,ctmp,rstart=c->rstart,tag0,tag1,tag2,tag3; 804 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3; 805 MPI_Request *r_waits4,*s_waits3,*s_waits4; 806 MPI_Status *r_status1,*r_status2,*s_status1,*s_status3,*s_status2; 807 MPI_Status *r_status3,*r_status4,*s_status4; 808 MPI_Comm comm; 809 MatScalar **rbuf4,**sbuf_aa,*vals,*mat_a,*sbuf_aa_i,*vworkA,*vworkB; 810 MatScalar *a_a=a->a,*b_a=b->a; 811 PetscTruth flag; 812 int *onodes1,*olengths1; 813 814 #if defined (PETSC_USE_CTABLE) 815 int tt; 816 PetscTable *rowmaps,*colmaps,lrow1_grow1,lcol1_gcol1; 817 #else 818 int **cmap,*cmap_i,*rtable,*rmap_i,**rmap, Mbs = c->Mbs; 819 #endif 820 821 PetscFunctionBegin; 822 comm = C->comm; 823 tag0 = C->tag; 824 size = c->size; 825 rank = c->rank; 826 827 /* Get some new tags to keep the communication clean */ 828 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 829 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 830 ierr = PetscObjectGetNewTag((PetscObject)C,&tag3);CHKERRQ(ierr); 831 832 /* Check if the col indices are sorted */ 833 for (i=0; i<ismax; i++) { 834 ierr = ISSorted(iscol[i],(PetscTruth*)&j);CHKERRQ(ierr); 835 if (!j) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"IS is not sorted"); 836 } 837 838 len = (2*ismax+1)*(sizeof(int*)+ sizeof(int)); 839 #if !defined (PETSC_USE_CTABLE) 840 len += (Mbs+1)*sizeof(int); 841 #endif 842 ierr = PetscMalloc(len,&irow);CHKERRQ(ierr); 843 icol = irow + ismax; 844 nrow = (int*)(icol + ismax); 845 ncol = nrow + ismax; 846 #if !defined (PETSC_USE_CTABLE) 847 rtable = ncol + ismax; 848 /* Create hash table for the mapping :row -> proc*/ 849 for (i=0,j=0; i<size; i++) { 850 jmax = c->rowners[i+1]; 851 for (; j<jmax; j++) { 852 rtable[j] = i; 853 } 854 } 855 #endif 856 857 for (i=0; i<ismax; i++) { 858 ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr); 859 ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr); 860 ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr); 861 ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr); 862 } 863 864 /* evaluate communication - mesg to who,length of mesg,and buffer space 865 required. Based on this, buffers are allocated, and data copied into them*/ 866 ierr = PetscMalloc(size*4*sizeof(int),&w1);CHKERRQ(ierr); /* mesg size */ 867 w2 = w1 + size; /* if w2[i] marked, then a message to proc i*/ 868 w3 = w2 + size; /* no of IS that needs to be sent to proc i */ 869 w4 = w3 + size; /* temp work space used in determining w1, w2, w3 */ 870 ierr = PetscMemzero(w1,size*3*sizeof(int));CHKERRQ(ierr); /* initialise work vector*/ 871 for (i=0; i<ismax; i++) { 872 ierr = PetscMemzero(w4,size*sizeof(int));CHKERRQ(ierr); /* initialise work vector*/ 873 jmax = nrow[i]; 874 irow_i = irow[i]; 875 for (j=0; j<jmax; j++) { 876 row = irow_i[j]; 877 #if defined (PETSC_USE_CTABLE) 878 ierr = PetscGetProc(row,size,c->rowners,&proc);CHKERRQ(ierr); 879 #else 880 proc = rtable[row]; 881 #endif 882 w4[proc]++; 883 } 884 for (j=0; j<size; j++) { 885 if (w4[j]) { w1[j] += w4[j]; w3[j]++;} 886 } 887 } 888 889 nrqs = 0; /* no of outgoing messages */ 890 msz = 0; /* total mesg length for all proc */ 891 w1[rank] = 0; /* no mesg sent to intself */ 892 w3[rank] = 0; 893 for (i=0; i<size; i++) { 894 if (w1[i]) { w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 895 } 896 ierr = PetscMalloc((nrqs+1)*sizeof(int),&pa);CHKERRQ(ierr); /*(proc -array)*/ 897 for (i=0,j=0; i<size; i++) { 898 if (w1[i]) { pa[j] = i; j++; } 899 } 900 901 /* Each message would have a header = 1 + 2*(no of IS) + data */ 902 for (i=0; i<nrqs; i++) { 903 j = pa[i]; 904 w1[j] += w2[j] + 2* w3[j]; 905 msz += w1[j]; 906 } 907 908 /* Determine the number of messages to expect, their lengths, from from-ids */ 909 ierr = PetscGatherNumberOfMessages(comm,nrqs,w2,w1,&nrqr);CHKERRQ(ierr); 910 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 911 912 /* Now post the Irecvs corresponding to these messages */ 913 ierr = PetscPostIrecvInt(comm,tag0,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);CHKERRQ(ierr); 914 915 ierr = PetscFree(onodes1);CHKERRQ(ierr); 916 ierr = PetscFree(olengths1);CHKERRQ(ierr); 917 918 /* Allocate Memory for outgoing messages */ 919 len = 2*size*sizeof(int*) + 2*msz*sizeof(int) + size*sizeof(int); 920 ierr = PetscMalloc(len,&sbuf1);CHKERRQ(ierr); 921 ptr = sbuf1 + size; /* Pointers to the data in outgoing buffers */ 922 ierr = PetscMemzero(sbuf1,2*size*sizeof(int*));CHKERRQ(ierr); 923 /* allocate memory for outgoing data + buf to receive the first reply */ 924 tmp = (int*)(ptr + size); 925 ctr = tmp + 2*msz; 926 927 { 928 int *iptr = tmp,ict = 0; 929 for (i=0; i<nrqs; i++) { 930 j = pa[i]; 931 iptr += ict; 932 sbuf1[j] = iptr; 933 ict = w1[j]; 934 } 935 } 936 937 /* Form the outgoing messages */ 938 /* Initialise the header space */ 939 for (i=0; i<nrqs; i++) { 940 j = pa[i]; 941 sbuf1[j][0] = 0; 942 ierr = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(int));CHKERRQ(ierr); 943 ptr[j] = sbuf1[j] + 2*w3[j] + 1; 944 } 945 946 /* Parse the isrow and copy data into outbuf */ 947 for (i=0; i<ismax; i++) { 948 ierr = PetscMemzero(ctr,size*sizeof(int));CHKERRQ(ierr); 949 irow_i = irow[i]; 950 jmax = nrow[i]; 951 for (j=0; j<jmax; j++) { /* parse the indices of each IS */ 952 row = irow_i[j]; 953 #if defined (PETSC_USE_CTABLE) 954 ierr = PetscGetProc(row,size,c->rowners,&proc);CHKERRQ(ierr); 955 #else 956 proc = rtable[row]; 957 #endif 958 if (proc != rank) { /* copy to the outgoing buf*/ 959 ctr[proc]++; 960 *ptr[proc] = row; 961 ptr[proc]++; 962 } 963 } 964 /* Update the headers for the current IS */ 965 for (j=0; j<size; j++) { /* Can Optimise this loop too */ 966 if ((ctr_j == ctr[j])) { 967 sbuf1_j = sbuf1[j]; 968 k = ++sbuf1_j[0]; 969 sbuf1_j[2*k] = ctr_j; 970 sbuf1_j[2*k-1] = i; 971 } 972 } 973 } 974 975 /* Now post the sends */ 976 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr); 977 for (i=0; i<nrqs; ++i) { 978 j = pa[i]; 979 ierr = MPI_Isend(sbuf1[j],w1[j],MPI_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr); 980 } 981 982 /* Post Recieves to capture the buffer size */ 983 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits2);CHKERRQ(ierr); 984 ierr = PetscMalloc((nrqs+1)*sizeof(int *),&rbuf2);CHKERRQ(ierr); 985 rbuf2[0] = tmp + msz; 986 for (i=1; i<nrqs; ++i) { 987 j = pa[i]; 988 rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]]; 989 } 990 for (i=0; i<nrqs; ++i) { 991 j = pa[i]; 992 ierr = MPI_Irecv(rbuf2[i],w1[j],MPI_INT,j,tag1,comm,r_waits2+i);CHKERRQ(ierr); 993 } 994 995 /* Send to other procs the buf size they should allocate */ 996 997 /* Receive messages*/ 998 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);CHKERRQ(ierr); 999 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status1);CHKERRQ(ierr); 1000 len = 2*nrqr*sizeof(int) + (nrqr+1)*sizeof(int*); 1001 ierr = PetscMalloc(len,&sbuf2);CHKERRQ(ierr); 1002 req_size = (int*)(sbuf2 + nrqr); 1003 req_source = req_size + nrqr; 1004 1005 { 1006 Mat_SeqBAIJ *sA = (Mat_SeqBAIJ*)c->A->data,*sB = (Mat_SeqBAIJ*)c->B->data; 1007 int *sAi = sA->i,*sBi = sB->i,id,*sbuf2_i; 1008 1009 for (i=0; i<nrqr; ++i) { 1010 ierr = MPI_Waitany(nrqr,r_waits1,&index,r_status1+i);CHKERRQ(ierr); 1011 req_size[index] = 0; 1012 rbuf1_i = rbuf1[index]; 1013 start = 2*rbuf1_i[0] + 1; 1014 ierr = MPI_Get_count(r_status1+i,MPI_INT,&end);CHKERRQ(ierr); 1015 ierr = PetscMalloc(end*sizeof(int),&sbuf2[index]);CHKERRQ(ierr); 1016 sbuf2_i = sbuf2[index]; 1017 for (j=start; j<end; j++) { 1018 id = rbuf1_i[j] - rstart; 1019 ncols = sAi[id+1] - sAi[id] + sBi[id+1] - sBi[id]; 1020 sbuf2_i[j] = ncols; 1021 req_size[index] += ncols; 1022 } 1023 req_source[index] = r_status1[i].MPI_SOURCE; 1024 /* form the header */ 1025 sbuf2_i[0] = req_size[index]; 1026 for (j=1; j<start; j++) { sbuf2_i[j] = rbuf1_i[j]; } 1027 ierr = MPI_Isend(sbuf2_i,end,MPI_INT,req_source[index],tag1,comm,s_waits2+i);CHKERRQ(ierr); 1028 } 1029 } 1030 ierr = PetscFree(r_status1);CHKERRQ(ierr); 1031 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 1032 1033 /* recv buffer sizes */ 1034 /* Receive messages*/ 1035 1036 ierr = PetscMalloc((nrqs+1)*sizeof(int*),&rbuf3);CHKERRQ(ierr); 1037 ierr = PetscMalloc((nrqs+1)*sizeof(MatScalar*),&rbuf4);CHKERRQ(ierr); 1038 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits3);CHKERRQ(ierr); 1039 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits4);CHKERRQ(ierr); 1040 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status2);CHKERRQ(ierr); 1041 1042 for (i=0; i<nrqs; ++i) { 1043 ierr = MPI_Waitany(nrqs,r_waits2,&index,r_status2+i);CHKERRQ(ierr); 1044 ierr = PetscMalloc(rbuf2[index][0]*sizeof(int),&rbuf3[index]);CHKERRQ(ierr); 1045 ierr = PetscMalloc(rbuf2[index][0]*bs2*sizeof(MatScalar),&rbuf4[index]);CHKERRQ(ierr); 1046 ierr = MPI_Irecv(rbuf3[index],rbuf2[index][0],MPI_INT, 1047 r_status2[i].MPI_SOURCE,tag2,comm,r_waits3+index);CHKERRQ(ierr); 1048 ierr = MPI_Irecv(rbuf4[index],rbuf2[index][0]*bs2,MPIU_MATSCALAR, 1049 r_status2[i].MPI_SOURCE,tag3,comm,r_waits4+index);CHKERRQ(ierr); 1050 } 1051 ierr = PetscFree(r_status2);CHKERRQ(ierr); 1052 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 1053 1054 /* Wait on sends1 and sends2 */ 1055 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status1);CHKERRQ(ierr); 1056 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status2);CHKERRQ(ierr); 1057 1058 ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr); 1059 ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr); 1060 ierr = PetscFree(s_status1);CHKERRQ(ierr); 1061 ierr = PetscFree(s_status2);CHKERRQ(ierr); 1062 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 1063 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 1064 1065 /* Now allocate buffers for a->j, and send them off */ 1066 ierr = PetscMalloc((nrqr+1)*sizeof(int *),&sbuf_aj);CHKERRQ(ierr); 1067 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 1068 ierr = PetscMalloc((j+1)*sizeof(int),&sbuf_aj[0]);CHKERRQ(ierr); 1069 for (i=1; i<nrqr; i++) sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1]; 1070 1071 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits3);CHKERRQ(ierr); 1072 { 1073 for (i=0; i<nrqr; i++) { 1074 rbuf1_i = rbuf1[i]; 1075 sbuf_aj_i = sbuf_aj[i]; 1076 ct1 = 2*rbuf1_i[0] + 1; 1077 ct2 = 0; 1078 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 1079 kmax = rbuf1[i][2*j]; 1080 for (k=0; k<kmax; k++,ct1++) { 1081 row = rbuf1_i[ct1] - rstart; 1082 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 1083 ncols = nzA + nzB; 1084 cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row]; 1085 1086 /* load the column indices for this row into cols*/ 1087 cols = sbuf_aj_i + ct2; 1088 for (l=0; l<nzB; l++) { 1089 if ((ctmp = bmap[cworkB[l]]) < cstart) cols[l] = ctmp; 1090 else break; 1091 } 1092 imark = l; 1093 for (l=0; l<nzA; l++) cols[imark+l] = cstart + cworkA[l]; 1094 for (l=imark; l<nzB; l++) cols[nzA+l] = bmap[cworkB[l]]; 1095 ct2 += ncols; 1096 } 1097 } 1098 ierr = MPI_Isend(sbuf_aj_i,req_size[i],MPI_INT,req_source[i],tag2,comm,s_waits3+i);CHKERRQ(ierr); 1099 } 1100 } 1101 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status3);CHKERRQ(ierr); 1102 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status3);CHKERRQ(ierr); 1103 1104 /* Allocate buffers for a->a, and send them off */ 1105 ierr = PetscMalloc((nrqr+1)*sizeof(MatScalar *),&sbuf_aa);CHKERRQ(ierr); 1106 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 1107 ierr = PetscMalloc((j+1)*bs2*sizeof(MatScalar),&sbuf_aa[0]);CHKERRQ(ierr); 1108 for (i=1; i<nrqr; i++) sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]*bs2; 1109 1110 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits4);CHKERRQ(ierr); 1111 { 1112 for (i=0; i<nrqr; i++) { 1113 rbuf1_i = rbuf1[i]; 1114 sbuf_aa_i = sbuf_aa[i]; 1115 ct1 = 2*rbuf1_i[0]+1; 1116 ct2 = 0; 1117 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 1118 kmax = rbuf1_i[2*j]; 1119 for (k=0; k<kmax; k++,ct1++) { 1120 row = rbuf1_i[ct1] - rstart; 1121 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 1122 ncols = nzA + nzB; 1123 cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row]; 1124 vworkA = a_a + a_i[row]*bs2; vworkB = b_a + b_i[row]*bs2; 1125 1126 /* load the column values for this row into vals*/ 1127 vals = sbuf_aa_i+ct2*bs2; 1128 for (l=0; l<nzB; l++) { 1129 if ((bmap[cworkB[l]]) < cstart) { 1130 ierr = PetscMemcpy(vals+l*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1131 } 1132 else break; 1133 } 1134 imark = l; 1135 for (l=0; l<nzA; l++) { 1136 ierr = PetscMemcpy(vals+(imark+l)*bs2,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1137 } 1138 for (l=imark; l<nzB; l++) { 1139 ierr = PetscMemcpy(vals+(nzA+l)*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1140 } 1141 ct2 += ncols; 1142 } 1143 } 1144 ierr = MPI_Isend(sbuf_aa_i,req_size[i]*bs2,MPIU_MATSCALAR,req_source[i],tag3,comm,s_waits4+i);CHKERRQ(ierr); 1145 } 1146 } 1147 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status4);CHKERRQ(ierr); 1148 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status4);CHKERRQ(ierr); 1149 ierr = PetscFree(rbuf1);CHKERRQ(ierr); 1150 1151 /* Form the matrix */ 1152 /* create col map */ 1153 { 1154 int *icol_i; 1155 #if defined (PETSC_USE_CTABLE) 1156 /* Create row map*/ 1157 ierr = PetscMalloc((1+ismax)*sizeof(PetscTable),&colmaps);CHKERRQ(ierr); 1158 for (i=0; i<ismax+1; i++) { 1159 ierr = PetscTableCreate(((i<ismax) ? ncol[i] : ncol[i-1])+1,&colmaps[i]);CHKERRQ(ierr); 1160 } 1161 #else 1162 len = (1+ismax)*sizeof(int*)+ ismax*c->Nbs*sizeof(int); 1163 ierr = PetscMalloc(len,&cmap);CHKERRQ(ierr); 1164 cmap[0] = (int *)(cmap + ismax); 1165 ierr = PetscMemzero(cmap[0],(1+ismax*c->Nbs)*sizeof(int));CHKERRQ(ierr); 1166 for (i=1; i<ismax; i++) { cmap[i] = cmap[i-1] + c->Nbs; } 1167 #endif 1168 for (i=0; i<ismax; i++) { 1169 jmax = ncol[i]; 1170 icol_i = icol[i]; 1171 #if defined (PETSC_USE_CTABLE) 1172 lcol1_gcol1 = colmaps[i]; 1173 for (j=0; j<jmax; j++) { 1174 ierr = PetscTableAdd(lcol1_gcol1,icol_i[j]+1,j+1);CHKERRQ(ierr); 1175 } 1176 #else 1177 cmap_i = cmap[i]; 1178 for (j=0; j<jmax; j++) { 1179 cmap_i[icol_i[j]] = j+1; 1180 } 1181 #endif 1182 } 1183 } 1184 1185 /* Create lens which is required for MatCreate... */ 1186 for (i=0,j=0; i<ismax; i++) { j += nrow[i]; } 1187 len = (1+ismax)*sizeof(int*)+ j*sizeof(int); 1188 ierr = PetscMalloc(len,&lens);CHKERRQ(ierr); 1189 lens[0] = (int *)(lens + ismax); 1190 ierr = PetscMemzero(lens[0],j*sizeof(int));CHKERRQ(ierr); 1191 for (i=1; i<ismax; i++) { lens[i] = lens[i-1] + nrow[i-1]; } 1192 1193 /* Update lens from local data */ 1194 for (i=0; i<ismax; i++) { 1195 jmax = nrow[i]; 1196 #if defined (PETSC_USE_CTABLE) 1197 lcol1_gcol1 = colmaps[i]; 1198 #else 1199 cmap_i = cmap[i]; 1200 #endif 1201 irow_i = irow[i]; 1202 lens_i = lens[i]; 1203 for (j=0; j<jmax; j++) { 1204 row = irow_i[j]; 1205 #if defined (PETSC_USE_CTABLE) 1206 ierr = PetscGetProc(row,size,c->rowners,&proc);CHKERRQ(ierr); 1207 #else 1208 proc = rtable[row]; 1209 #endif 1210 if (proc == rank) { 1211 /* Get indices from matA and then from matB */ 1212 row = row - rstart; 1213 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 1214 cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row]; 1215 #if defined (PETSC_USE_CTABLE) 1216 for (k=0; k<nzA; k++) { 1217 ierr = PetscTableFind(lcol1_gcol1,cstart+cworkA[k]+1,&tt);CHKERRQ(ierr); 1218 if (tt) { lens_i[j]++; } 1219 } 1220 for (k=0; k<nzB; k++) { 1221 ierr = PetscTableFind(lcol1_gcol1,bmap[cworkB[k]]+1,&tt);CHKERRQ(ierr); 1222 if (tt) { lens_i[j]++; } 1223 } 1224 #else 1225 for (k=0; k<nzA; k++) { 1226 if (cmap_i[cstart + cworkA[k]]) { lens_i[j]++; } 1227 } 1228 for (k=0; k<nzB; k++) { 1229 if (cmap_i[bmap[cworkB[k]]]) { lens_i[j]++; } 1230 } 1231 #endif 1232 } 1233 } 1234 } 1235 #if defined (PETSC_USE_CTABLE) 1236 /* Create row map*/ 1237 ierr = PetscMalloc((1+ismax)*sizeof(PetscTable),&rowmaps);CHKERRQ(ierr); 1238 for (i=0; i<ismax+1; i++){ 1239 ierr = PetscTableCreate((i<ismax) ? nrow[i] : nrow[i-1],&rowmaps[i]);CHKERRQ(ierr); 1240 } 1241 #else 1242 /* Create row map*/ 1243 len = (1+ismax)*sizeof(int*)+ ismax*Mbs*sizeof(int); 1244 ierr = PetscMalloc(len,&rmap);CHKERRQ(ierr); 1245 rmap[0] = (int *)(rmap + ismax); 1246 ierr = PetscMemzero(rmap[0],ismax*Mbs*sizeof(int));CHKERRQ(ierr); 1247 for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + Mbs;} 1248 #endif 1249 for (i=0; i<ismax; i++) { 1250 irow_i = irow[i]; 1251 jmax = nrow[i]; 1252 #if defined (PETSC_USE_CTABLE) 1253 lrow1_grow1 = rowmaps[i]; 1254 for (j=0; j<jmax; j++) { 1255 ierr = PetscTableAdd(lrow1_grow1,irow_i[j]+1,j+1);CHKERRQ(ierr); 1256 } 1257 #else 1258 rmap_i = rmap[i]; 1259 for (j=0; j<jmax; j++) { 1260 rmap_i[irow_i[j]] = j; 1261 } 1262 #endif 1263 } 1264 1265 /* Update lens from offproc data */ 1266 { 1267 int *rbuf2_i,*rbuf3_i,*sbuf1_i; 1268 1269 for (tmp2=0; tmp2<nrqs; tmp2++) { 1270 ierr = MPI_Waitany(nrqs,r_waits3,&i,r_status3+tmp2);CHKERRQ(ierr); 1271 index = pa[i]; 1272 sbuf1_i = sbuf1[index]; 1273 jmax = sbuf1_i[0]; 1274 ct1 = 2*jmax+1; 1275 ct2 = 0; 1276 rbuf2_i = rbuf2[i]; 1277 rbuf3_i = rbuf3[i]; 1278 for (j=1; j<=jmax; j++) { 1279 is_no = sbuf1_i[2*j-1]; 1280 max1 = sbuf1_i[2*j]; 1281 lens_i = lens[is_no]; 1282 #if defined (PETSC_USE_CTABLE) 1283 lcol1_gcol1 = colmaps[is_no]; 1284 lrow1_grow1 = rowmaps[is_no]; 1285 #else 1286 cmap_i = cmap[is_no]; 1287 rmap_i = rmap[is_no]; 1288 #endif 1289 for (k=0; k<max1; k++,ct1++) { 1290 #if defined (PETSC_USE_CTABLE) 1291 ierr = PetscTableFind(lrow1_grow1,sbuf1_i[ct1]+1,&row);CHKERRQ(ierr); 1292 row--; 1293 if(row < 0) { SETERRQ(1,"row not found in table"); } 1294 #else 1295 row = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */ 1296 #endif 1297 max2 = rbuf2_i[ct1]; 1298 for (l=0; l<max2; l++,ct2++) { 1299 #if defined (PETSC_USE_CTABLE) 1300 ierr = PetscTableFind(lcol1_gcol1,rbuf3_i[ct2]+1,&tt);CHKERRQ(ierr); 1301 if (tt) { 1302 lens_i[row]++; 1303 } 1304 #else 1305 if (cmap_i[rbuf3_i[ct2]]) { 1306 lens_i[row]++; 1307 } 1308 #endif 1309 } 1310 } 1311 } 1312 } 1313 } 1314 ierr = PetscFree(r_status3);CHKERRQ(ierr); 1315 ierr = PetscFree(r_waits3);CHKERRQ(ierr); 1316 ierr = MPI_Waitall(nrqr,s_waits3,s_status3);CHKERRQ(ierr); 1317 ierr = PetscFree(s_status3);CHKERRQ(ierr); 1318 ierr = PetscFree(s_waits3);CHKERRQ(ierr); 1319 1320 /* Create the submatrices */ 1321 if (scall == MAT_REUSE_MATRIX) { 1322 /* 1323 Assumes new rows are same length as the old rows, hence bug! 1324 */ 1325 for (i=0; i<ismax; i++) { 1326 mat = (Mat_SeqBAIJ *)(submats[i]->data); 1327 if ((mat->mbs != nrow[i]) || (mat->nbs != ncol[i] || mat->bs != bs)) { 1328 SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size"); 1329 } 1330 ierr = PetscMemcmp(mat->ilen,lens[i],mat->mbs *sizeof(int),&flag);CHKERRQ(ierr); 1331 if (flag == PETSC_FALSE) { 1332 SETERRQ(PETSC_ERR_ARG_INCOMP,"Cannot reuse matrix. wrong no of nonzeros"); 1333 } 1334 /* Initial matrix as if empty */ 1335 ierr = PetscMemzero(mat->ilen,mat->mbs*sizeof(int));CHKERRQ(ierr); 1336 submats[i]->factor = C->factor; 1337 } 1338 } else { 1339 for (i=0; i<ismax; i++) { 1340 ierr = MatCreateSeqBAIJ(PETSC_COMM_SELF,a->bs,nrow[i]*bs,ncol[i]*bs,0,lens[i],submats+i);CHKERRQ(ierr); 1341 } 1342 } 1343 1344 /* Assemble the matrices */ 1345 /* First assemble the local rows */ 1346 { 1347 int ilen_row,*imat_ilen,*imat_j,*imat_i; 1348 MatScalar *imat_a; 1349 1350 for (i=0; i<ismax; i++) { 1351 mat = (Mat_SeqBAIJ*)submats[i]->data; 1352 imat_ilen = mat->ilen; 1353 imat_j = mat->j; 1354 imat_i = mat->i; 1355 imat_a = mat->a; 1356 1357 #if defined (PETSC_USE_CTABLE) 1358 lcol1_gcol1 = colmaps[i]; 1359 lrow1_grow1 = rowmaps[i]; 1360 #else 1361 cmap_i = cmap[i]; 1362 rmap_i = rmap[i]; 1363 #endif 1364 irow_i = irow[i]; 1365 jmax = nrow[i]; 1366 for (j=0; j<jmax; j++) { 1367 row = irow_i[j]; 1368 #if defined (PETSC_USE_CTABLE) 1369 ierr = PetscGetProc(row,size,c->rowners,&proc);CHKERRQ(ierr); 1370 #else 1371 proc = rtable[row]; 1372 #endif 1373 if (proc == rank) { 1374 row = row - rstart; 1375 nzA = a_i[row+1] - a_i[row]; 1376 nzB = b_i[row+1] - b_i[row]; 1377 cworkA = a_j + a_i[row]; 1378 cworkB = b_j + b_i[row]; 1379 vworkA = a_a + a_i[row]*bs2; 1380 vworkB = b_a + b_i[row]*bs2; 1381 #if defined (PETSC_USE_CTABLE) 1382 ierr = PetscTableFind(lrow1_grow1,row+rstart+1,&row);CHKERRQ(ierr); 1383 row--; 1384 if (row < 0) { SETERRQ(1,"row not found in table"); } 1385 #else 1386 row = rmap_i[row + rstart]; 1387 #endif 1388 mat_i = imat_i[row]; 1389 mat_a = imat_a + mat_i*bs2; 1390 mat_j = imat_j + mat_i; 1391 ilen_row = imat_ilen[row]; 1392 1393 /* load the column indices for this row into cols*/ 1394 for (l=0; l<nzB; l++) { 1395 if ((ctmp = bmap[cworkB[l]]) < cstart) { 1396 #if defined (PETSC_USE_CTABLE) 1397 ierr = PetscTableFind(lcol1_gcol1,ctmp+1,&tcol);CHKERRQ(ierr); 1398 if (tcol) { 1399 #else 1400 if ((tcol = cmap_i[ctmp])) { 1401 #endif 1402 *mat_j++ = tcol - 1; 1403 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1404 mat_a += bs2; 1405 ilen_row++; 1406 } 1407 } else break; 1408 } 1409 imark = l; 1410 for (l=0; l<nzA; l++) { 1411 #if defined (PETSC_USE_CTABLE) 1412 ierr = PetscTableFind(lcol1_gcol1,cstart+cworkA[l]+1,&tcol);CHKERRQ(ierr); 1413 if (tcol) { 1414 #else 1415 if ((tcol = cmap_i[cstart + cworkA[l]])) { 1416 #endif 1417 *mat_j++ = tcol - 1; 1418 ierr = PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1419 mat_a += bs2; 1420 ilen_row++; 1421 } 1422 } 1423 for (l=imark; l<nzB; l++) { 1424 #if defined (PETSC_USE_CTABLE) 1425 ierr = PetscTableFind(lcol1_gcol1,bmap[cworkB[l]]+1,&tcol);CHKERRQ(ierr); 1426 if (tcol) { 1427 #else 1428 if ((tcol = cmap_i[bmap[cworkB[l]]])) { 1429 #endif 1430 *mat_j++ = tcol - 1; 1431 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1432 mat_a += bs2; 1433 ilen_row++; 1434 } 1435 } 1436 imat_ilen[row] = ilen_row; 1437 } 1438 } 1439 1440 } 1441 } 1442 1443 /* Now assemble the off proc rows*/ 1444 { 1445 int *sbuf1_i,*rbuf2_i,*rbuf3_i,*imat_ilen,ilen; 1446 int *imat_j,*imat_i; 1447 MatScalar *imat_a,*rbuf4_i; 1448 1449 for (tmp2=0; tmp2<nrqs; tmp2++) { 1450 ierr = MPI_Waitany(nrqs,r_waits4,&i,r_status4+tmp2);CHKERRQ(ierr); 1451 index = pa[i]; 1452 sbuf1_i = sbuf1[index]; 1453 jmax = sbuf1_i[0]; 1454 ct1 = 2*jmax + 1; 1455 ct2 = 0; 1456 rbuf2_i = rbuf2[i]; 1457 rbuf3_i = rbuf3[i]; 1458 rbuf4_i = rbuf4[i]; 1459 for (j=1; j<=jmax; j++) { 1460 is_no = sbuf1_i[2*j-1]; 1461 #if defined (PETSC_USE_CTABLE) 1462 lrow1_grow1 = rowmaps[is_no]; 1463 lcol1_gcol1 = colmaps[is_no]; 1464 #else 1465 rmap_i = rmap[is_no]; 1466 cmap_i = cmap[is_no]; 1467 #endif 1468 mat = (Mat_SeqBAIJ*)submats[is_no]->data; 1469 imat_ilen = mat->ilen; 1470 imat_j = mat->j; 1471 imat_i = mat->i; 1472 imat_a = mat->a; 1473 max1 = sbuf1_i[2*j]; 1474 for (k=0; k<max1; k++,ct1++) { 1475 row = sbuf1_i[ct1]; 1476 #if defined (PETSC_USE_CTABLE) 1477 ierr = PetscTableFind(lrow1_grow1,row+1,&row);CHKERRQ(ierr); 1478 row--; 1479 if(row < 0) { SETERRQ(1,"row not found in table"); } 1480 #else 1481 row = rmap_i[row]; 1482 #endif 1483 ilen = imat_ilen[row]; 1484 mat_i = imat_i[row]; 1485 mat_a = imat_a + mat_i*bs2; 1486 mat_j = imat_j + mat_i; 1487 max2 = rbuf2_i[ct1]; 1488 for (l=0; l<max2; l++,ct2++) { 1489 #if defined (PETSC_USE_CTABLE) 1490 ierr = PetscTableFind(lcol1_gcol1,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr); 1491 if (tcol) { 1492 #else 1493 if ((tcol = cmap_i[rbuf3_i[ct2]])) { 1494 #endif 1495 *mat_j++ = tcol - 1; 1496 /* *mat_a++= rbuf4_i[ct2]; */ 1497 ierr = PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1498 mat_a += bs2; 1499 ilen++; 1500 } 1501 } 1502 imat_ilen[row] = ilen; 1503 } 1504 } 1505 } 1506 } 1507 ierr = PetscFree(r_status4);CHKERRQ(ierr); 1508 ierr = PetscFree(r_waits4);CHKERRQ(ierr); 1509 ierr = MPI_Waitall(nrqr,s_waits4,s_status4);CHKERRQ(ierr); 1510 ierr = PetscFree(s_waits4);CHKERRQ(ierr); 1511 ierr = PetscFree(s_status4);CHKERRQ(ierr); 1512 1513 /* Restore the indices */ 1514 for (i=0; i<ismax; i++) { 1515 ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr); 1516 ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr); 1517 } 1518 1519 /* Destroy allocated memory */ 1520 ierr = PetscFree(irow);CHKERRQ(ierr); 1521 ierr = PetscFree(w1);CHKERRQ(ierr); 1522 ierr = PetscFree(pa);CHKERRQ(ierr); 1523 1524 ierr = PetscFree(sbuf1);CHKERRQ(ierr); 1525 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 1526 for (i=0; i<nrqr; ++i) { 1527 ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr); 1528 } 1529 for (i=0; i<nrqs; ++i) { 1530 ierr = PetscFree(rbuf3[i]);CHKERRQ(ierr); 1531 ierr = PetscFree(rbuf4[i]);CHKERRQ(ierr); 1532 } 1533 1534 ierr = PetscFree(sbuf2);CHKERRQ(ierr); 1535 ierr = PetscFree(rbuf3);CHKERRQ(ierr); 1536 ierr = PetscFree(rbuf4);CHKERRQ(ierr); 1537 ierr = PetscFree(sbuf_aj[0]);CHKERRQ(ierr); 1538 ierr = PetscFree(sbuf_aj);CHKERRQ(ierr); 1539 ierr = PetscFree(sbuf_aa[0]);CHKERRQ(ierr); 1540 ierr = PetscFree(sbuf_aa);CHKERRQ(ierr); 1541 1542 #if defined (PETSC_USE_CTABLE) 1543 for (i=0; i<ismax+1; i++){ 1544 ierr = PetscTableDelete(rowmaps[i]);CHKERRQ(ierr); 1545 ierr = PetscTableDelete(colmaps[i]);CHKERRQ(ierr); 1546 } 1547 ierr = PetscFree(colmaps);CHKERRQ(ierr); 1548 ierr = PetscFree(rowmaps);CHKERRQ(ierr); 1549 /* Mark Adams */ 1550 #else 1551 ierr = PetscFree(rmap);CHKERRQ(ierr); 1552 ierr = PetscFree(cmap);CHKERRQ(ierr); 1553 #endif 1554 ierr = PetscFree(lens);CHKERRQ(ierr); 1555 1556 for (i=0; i<ismax; i++) { 1557 ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1558 ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1559 } 1560 1561 PetscFunctionReturn(0); 1562 } 1563 1564