1 /* 2 Routines to compute overlapping regions of a parallel MPI matrix 3 and to find submatrices that were shared across processors. 4 */ 5 #include "src/mat/impls/aij/mpi/mpiaij.h" 6 #include "petscbt.h" 7 8 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Once(Mat,int,IS *); 9 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Local(Mat,int,char **,int*,int**); 10 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Receive(Mat,int,int **,int**,int*); 11 EXTERN PetscErrorCode MatGetRow_MPIAIJ(Mat,int,int*,int**,PetscScalar**); 12 EXTERN PetscErrorCode MatRestoreRow_MPIAIJ(Mat,int,int*,int**,PetscScalar**); 13 14 #undef __FUNCT__ 15 #define __FUNCT__ "MatIncreaseOverlap_MPIAIJ" 16 PetscErrorCode MatIncreaseOverlap_MPIAIJ(Mat C,int imax,IS is[],int ov) 17 { 18 PetscErrorCode ierr; 19 int i; 20 21 PetscFunctionBegin; 22 if (ov < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified"); 23 for (i=0; i<ov; ++i) { 24 ierr = MatIncreaseOverlap_MPIAIJ_Once(C,imax,is);CHKERRQ(ierr); 25 } 26 PetscFunctionReturn(0); 27 } 28 29 /* 30 Sample message format: 31 If a processor A wants processor B to process some elements corresponding 32 to index sets is[1],is[5] 33 mesg [0] = 2 (no of index sets in the mesg) 34 ----------- 35 mesg [1] = 1 => is[1] 36 mesg [2] = sizeof(is[1]); 37 ----------- 38 mesg [3] = 5 => is[5] 39 mesg [4] = sizeof(is[5]); 40 ----------- 41 mesg [5] 42 mesg [n] datas[1] 43 ----------- 44 mesg[n+1] 45 mesg[m] data(is[5]) 46 ----------- 47 48 Notes: 49 nrqs - no of requests sent (or to be sent out) 50 nrqr - no of requests recieved (which have to be or which have been processed 51 */ 52 #undef __FUNCT__ 53 #define __FUNCT__ "MatIncreaseOverlap_MPIAIJ_Once" 54 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Once(Mat C,int imax,IS is[]) 55 { 56 Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data; 57 int **idx,*n,*w1,*w2,*w3,*w4,*rtable,**data,len,*idx_i; 58 PetscErrorCode ierr; 59 PetscMPIInt size,rank; 60 int m,i,j,k,**rbuf,row,proc,nrqs,msz,**outdat,**ptr; 61 int *ctr,*pa,*tmp,nrqr,*isz,*isz1,**xdata,**rbuf2; 62 int *onodes1,*olengths1,tag1,tag2,*onodes2,*olengths2; 63 PetscBT *table; 64 MPI_Comm comm; 65 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2; 66 MPI_Status *s_status,*recv_status; 67 68 PetscFunctionBegin; 69 comm = C->comm; 70 size = c->size; 71 rank = c->rank; 72 m = C->M; 73 74 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 75 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 76 77 len = (imax+1)*sizeof(int*)+ (imax + m)*sizeof(int); 78 ierr = PetscMalloc(len,&idx);CHKERRQ(ierr); 79 n = (int*)(idx + imax); 80 rtable = n + imax; 81 82 for (i=0; i<imax; i++) { 83 ierr = ISGetIndices(is[i],&idx[i]);CHKERRQ(ierr); 84 ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr); 85 } 86 87 /* Create hash table for the mapping :row -> proc*/ 88 for (i=0,j=0; i<size; i++) { 89 len = c->rowners[i+1]; 90 for (; j<len; j++) { 91 rtable[j] = i; 92 } 93 } 94 95 /* evaluate communication - mesg to who,length of mesg, and buffer space 96 required. Based on this, buffers are allocated, and data copied into them*/ 97 ierr = PetscMalloc(size*4*sizeof(int),&w1);CHKERRQ(ierr);/* mesg size */ 98 w2 = w1 + size; /* if w2[i] marked, then a message to proc i*/ 99 w3 = w2 + size; /* no of IS that needs to be sent to proc i */ 100 w4 = w3 + size; /* temp work space used in determining w1, w2, w3 */ 101 ierr = PetscMemzero(w1,size*3*sizeof(int));CHKERRQ(ierr); /* initialise work vector*/ 102 for (i=0; i<imax; i++) { 103 ierr = PetscMemzero(w4,size*sizeof(int));CHKERRQ(ierr); /* initialise work vector*/ 104 idx_i = idx[i]; 105 len = n[i]; 106 for (j=0; j<len; j++) { 107 row = idx_i[j]; 108 if (row < 0) { 109 SETERRQ(1,"Index set cannot have negative entries"); 110 } 111 proc = rtable[row]; 112 w4[proc]++; 113 } 114 for (j=0; j<size; j++){ 115 if (w4[j]) { w1[j] += w4[j]; w3[j]++;} 116 } 117 } 118 119 nrqs = 0; /* no of outgoing messages */ 120 msz = 0; /* total mesg length (for all proc */ 121 w1[rank] = 0; /* no mesg sent to intself */ 122 w3[rank] = 0; 123 for (i=0; i<size; i++) { 124 if (w1[i]) {w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 125 } 126 /* pa - is list of processors to communicate with */ 127 ierr = PetscMalloc((nrqs+1)*sizeof(int),&pa);CHKERRQ(ierr); 128 for (i=0,j=0; i<size; i++) { 129 if (w1[i]) {pa[j] = i; j++;} 130 } 131 132 /* Each message would have a header = 1 + 2*(no of IS) + data */ 133 for (i=0; i<nrqs; i++) { 134 j = pa[i]; 135 w1[j] += w2[j] + 2*w3[j]; 136 msz += w1[j]; 137 } 138 139 /* Determine the number of messages to expect, their lengths, from from-ids */ 140 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 141 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 142 143 /* Now post the Irecvs corresponding to these messages */ 144 ierr = PetscPostIrecvInt(comm,tag1,nrqr,onodes1,olengths1,&rbuf,&r_waits1);CHKERRQ(ierr); 145 146 /* Allocate Memory for outgoing messages */ 147 len = 2*size*sizeof(int*) + (size+msz)*sizeof(int); 148 ierr = PetscMalloc(len,&outdat);CHKERRQ(ierr); 149 ptr = outdat + size; /* Pointers to the data in outgoing buffers */ 150 ierr = PetscMemzero(outdat,2*size*sizeof(int*));CHKERRQ(ierr); 151 tmp = (int*)(outdat + 2*size); 152 ctr = tmp + msz; 153 154 { 155 int *iptr = tmp,ict = 0; 156 for (i=0; i<nrqs; i++) { 157 j = pa[i]; 158 iptr += ict; 159 outdat[j] = iptr; 160 ict = w1[j]; 161 } 162 } 163 164 /* Form the outgoing messages */ 165 /*plug in the headers*/ 166 for (i=0; i<nrqs; i++) { 167 j = pa[i]; 168 outdat[j][0] = 0; 169 ierr = PetscMemzero(outdat[j]+1,2*w3[j]*sizeof(int));CHKERRQ(ierr); 170 ptr[j] = outdat[j] + 2*w3[j] + 1; 171 } 172 173 /* Memory for doing local proc's work*/ 174 { 175 int *d_p; 176 char *t_p; 177 178 len = (imax)*(sizeof(PetscBT) + sizeof(int*)+ sizeof(int)) + 179 (m)*imax*sizeof(int) + (m/PETSC_BITS_PER_BYTE+1)*imax*sizeof(char) + 1; 180 ierr = PetscMalloc(len,&table);CHKERRQ(ierr); 181 ierr = PetscMemzero(table,len);CHKERRQ(ierr); 182 data = (int **)(table + imax); 183 isz = (int *)(data + imax); 184 d_p = (int *)(isz + imax); 185 t_p = (char *)(d_p + m*imax); 186 for (i=0; i<imax; i++) { 187 table[i] = t_p + (m/PETSC_BITS_PER_BYTE+1)*i; 188 data[i] = d_p + (m)*i; 189 } 190 } 191 192 /* Parse the IS and update local tables and the outgoing buf with the data*/ 193 { 194 int n_i,*data_i,isz_i,*outdat_j,ctr_j; 195 PetscBT table_i; 196 197 for (i=0; i<imax; i++) { 198 ierr = PetscMemzero(ctr,size*sizeof(int));CHKERRQ(ierr); 199 n_i = n[i]; 200 table_i = table[i]; 201 idx_i = idx[i]; 202 data_i = data[i]; 203 isz_i = isz[i]; 204 for (j=0; j<n_i; j++) { /* parse the indices of each IS */ 205 row = idx_i[j]; 206 proc = rtable[row]; 207 if (proc != rank) { /* copy to the outgoing buffer */ 208 ctr[proc]++; 209 *ptr[proc] = row; 210 ptr[proc]++; 211 } else { /* Update the local table */ 212 if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;} 213 } 214 } 215 /* Update the headers for the current IS */ 216 for (j=0; j<size; j++) { /* Can Optimise this loop by using pa[] */ 217 if ((ctr_j = ctr[j])) { 218 outdat_j = outdat[j]; 219 k = ++outdat_j[0]; 220 outdat_j[2*k] = ctr_j; 221 outdat_j[2*k-1] = i; 222 } 223 } 224 isz[i] = isz_i; 225 } 226 } 227 228 229 230 /* Now post the sends */ 231 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr); 232 for (i=0; i<nrqs; ++i) { 233 j = pa[i]; 234 ierr = MPI_Isend(outdat[j],w1[j],MPI_INT,j,tag1,comm,s_waits1+i);CHKERRQ(ierr); 235 } 236 237 /* No longer need the original indices*/ 238 for (i=0; i<imax; ++i) { 239 ierr = ISRestoreIndices(is[i],idx+i);CHKERRQ(ierr); 240 } 241 ierr = PetscFree(idx);CHKERRQ(ierr); 242 243 for (i=0; i<imax; ++i) { 244 ierr = ISDestroy(is[i]);CHKERRQ(ierr); 245 } 246 247 /* Do Local work*/ 248 ierr = MatIncreaseOverlap_MPIAIJ_Local(C,imax,table,isz,data);CHKERRQ(ierr); 249 250 /* Receive messages*/ 251 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&recv_status);CHKERRQ(ierr); 252 ierr = MPI_Waitall(nrqr,r_waits1,recv_status);CHKERRQ(ierr); 253 254 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status);CHKERRQ(ierr); 255 ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr); 256 257 /* Phase 1 sends are complete - deallocate buffers */ 258 ierr = PetscFree(outdat);CHKERRQ(ierr); 259 ierr = PetscFree(w1);CHKERRQ(ierr); 260 261 ierr = PetscMalloc((nrqr+1)*sizeof(int*),&xdata);CHKERRQ(ierr); 262 ierr = PetscMalloc((nrqr+1)*sizeof(int),&isz1);CHKERRQ(ierr); 263 ierr = MatIncreaseOverlap_MPIAIJ_Receive(C,nrqr,rbuf,xdata,isz1);CHKERRQ(ierr); 264 ierr = PetscFree(rbuf);CHKERRQ(ierr); 265 266 267 /* Send the data back*/ 268 /* Do a global reduction to know the buffer space req for incoming messages*/ 269 { 270 int *rw1; 271 272 ierr = PetscMalloc(size*sizeof(int),&rw1);CHKERRQ(ierr); 273 ierr = PetscMemzero(rw1,size*sizeof(int));CHKERRQ(ierr); 274 275 for (i=0; i<nrqr; ++i) { 276 proc = recv_status[i].MPI_SOURCE; 277 if (proc != onodes1[i]) SETERRQ(1,"MPI_SOURCE mismatch"); 278 rw1[proc] = isz1[i]; 279 } 280 ierr = PetscFree(onodes1);CHKERRQ(ierr); 281 ierr = PetscFree(olengths1);CHKERRQ(ierr); 282 283 /* Determine the number of messages to expect, their lengths, from from-ids */ 284 ierr = PetscGatherMessageLengths(comm,nrqr,nrqs,rw1,&onodes2,&olengths2);CHKERRQ(ierr); 285 PetscFree(rw1); 286 } 287 /* Now post the Irecvs corresponding to these messages */ 288 ierr = PetscPostIrecvInt(comm,tag2,nrqs,onodes2,olengths2,&rbuf2,&r_waits2);CHKERRQ(ierr); 289 290 /* Now post the sends */ 291 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);CHKERRQ(ierr); 292 for (i=0; i<nrqr; ++i) { 293 j = recv_status[i].MPI_SOURCE; 294 ierr = MPI_Isend(xdata[i],isz1[i],MPI_INT,j,tag2,comm,s_waits2+i);CHKERRQ(ierr); 295 } 296 297 /* receive work done on other processors*/ 298 { 299 int idex,is_no,ct1,max,*rbuf2_i,isz_i,*data_i,jmax; 300 PetscBT table_i; 301 MPI_Status *status2; 302 303 ierr = PetscMalloc((PetscMax(nrqr,nrqs)+1)*sizeof(MPI_Status),&status2);CHKERRQ(ierr); 304 for (i=0; i<nrqs; ++i) { 305 ierr = MPI_Waitany(nrqs,r_waits2,&idex,status2+i);CHKERRQ(ierr); 306 /* Process the message*/ 307 rbuf2_i = rbuf2[idex]; 308 ct1 = 2*rbuf2_i[0]+1; 309 jmax = rbuf2[idex][0]; 310 for (j=1; j<=jmax; j++) { 311 max = rbuf2_i[2*j]; 312 is_no = rbuf2_i[2*j-1]; 313 isz_i = isz[is_no]; 314 data_i = data[is_no]; 315 table_i = table[is_no]; 316 for (k=0; k<max; k++,ct1++) { 317 row = rbuf2_i[ct1]; 318 if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;} 319 } 320 isz[is_no] = isz_i; 321 } 322 } 323 324 ierr = MPI_Waitall(nrqr,s_waits2,status2);CHKERRQ(ierr); 325 ierr = PetscFree(status2);CHKERRQ(ierr); 326 } 327 328 for (i=0; i<imax; ++i) { 329 ierr = ISCreateGeneral(PETSC_COMM_SELF,isz[i],data[i],is+i);CHKERRQ(ierr); 330 } 331 332 ierr = PetscFree(onodes2);CHKERRQ(ierr); 333 ierr = PetscFree(olengths2);CHKERRQ(ierr); 334 335 ierr = PetscFree(pa);CHKERRQ(ierr); 336 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 337 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 338 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 339 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 340 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 341 ierr = PetscFree(table);CHKERRQ(ierr); 342 ierr = PetscFree(s_status);CHKERRQ(ierr); 343 ierr = PetscFree(recv_status);CHKERRQ(ierr); 344 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 345 ierr = PetscFree(xdata);CHKERRQ(ierr); 346 ierr = PetscFree(isz1);CHKERRQ(ierr); 347 PetscFunctionReturn(0); 348 } 349 350 #undef __FUNCT__ 351 #define __FUNCT__ "MatIncreaseOverlap_MPIAIJ_Local" 352 /* 353 MatIncreaseOverlap_MPIAIJ_Local - Called by MatincreaseOverlap, to do 354 the work on the local processor. 355 356 Inputs: 357 C - MAT_MPIAIJ; 358 imax - total no of index sets processed at a time; 359 table - an array of char - size = m bits. 360 361 Output: 362 isz - array containing the count of the solution elements correspondign 363 to each index set; 364 data - pointer to the solutions 365 */ 366 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Local(Mat C,int imax,PetscBT *table,int *isz,int **data) 367 { 368 Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data; 369 Mat A = c->A,B = c->B; 370 Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*b = (Mat_SeqAIJ*)B->data; 371 int start,end,val,max,rstart,cstart,*ai,*aj; 372 int *bi,*bj,*garray,i,j,k,row,*data_i,isz_i; 373 PetscBT table_i; 374 375 PetscFunctionBegin; 376 rstart = c->rstart; 377 cstart = c->cstart; 378 ai = a->i; 379 aj = a->j; 380 bi = b->i; 381 bj = b->j; 382 garray = c->garray; 383 384 385 for (i=0; i<imax; i++) { 386 data_i = data[i]; 387 table_i = table[i]; 388 isz_i = isz[i]; 389 for (j=0,max=isz[i]; j<max; j++) { 390 row = data_i[j] - rstart; 391 start = ai[row]; 392 end = ai[row+1]; 393 for (k=start; k<end; k++) { /* Amat */ 394 val = aj[k] + cstart; 395 if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;} 396 } 397 start = bi[row]; 398 end = bi[row+1]; 399 for (k=start; k<end; k++) { /* Bmat */ 400 val = garray[bj[k]]; 401 if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;} 402 } 403 } 404 isz[i] = isz_i; 405 } 406 PetscFunctionReturn(0); 407 } 408 409 #undef __FUNCT__ 410 #define __FUNCT__ "MatIncreaseOverlap_MPIAIJ_Receive" 411 /* 412 MatIncreaseOverlap_MPIAIJ_Receive - Process the recieved messages, 413 and return the output 414 415 Input: 416 C - the matrix 417 nrqr - no of messages being processed. 418 rbuf - an array of pointers to the recieved requests 419 420 Output: 421 xdata - array of messages to be sent back 422 isz1 - size of each message 423 424 For better efficiency perhaps we should malloc seperately each xdata[i], 425 then if a remalloc is required we need only copy the data for that one row 426 rather then all previous rows as it is now where a single large chunck of 427 memory is used. 428 429 */ 430 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Receive(Mat C,int nrqr,int **rbuf,int **xdata,int * isz1) 431 { 432 Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data; 433 Mat A = c->A,B = c->B; 434 Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*b = (Mat_SeqAIJ*)B->data; 435 PetscErrorCode ierr; 436 PetscMPIInt rank; 437 int rstart,cstart,*ai,*aj,*bi,*bj,*garray,i,j,k; 438 int row,total_sz,ct,ct1,ct2,ct3,mem_estimate,oct2,l,start,end; 439 int val,max1,max2,m,no_malloc =0,*tmp,new_estimate,ctr; 440 int *rbuf_i,kmax,rbuf_0; 441 PetscBT xtable; 442 443 PetscFunctionBegin; 444 rank = c->rank; 445 m = C->M; 446 rstart = c->rstart; 447 cstart = c->cstart; 448 ai = a->i; 449 aj = a->j; 450 bi = b->i; 451 bj = b->j; 452 garray = c->garray; 453 454 455 for (i=0,ct=0,total_sz=0; i<nrqr; ++i) { 456 rbuf_i = rbuf[i]; 457 rbuf_0 = rbuf_i[0]; 458 ct += rbuf_0; 459 for (j=1; j<=rbuf_0; j++) { total_sz += rbuf_i[2*j]; } 460 } 461 462 if (C->m) max1 = ct*(a->nz + b->nz)/C->m; 463 else max1 = 1; 464 mem_estimate = 3*((total_sz > max1 ? total_sz : max1)+1); 465 ierr = PetscMalloc(mem_estimate*sizeof(int),&xdata[0]);CHKERRQ(ierr); 466 ++no_malloc; 467 ierr = PetscBTCreate(m,xtable);CHKERRQ(ierr); 468 ierr = PetscMemzero(isz1,nrqr*sizeof(int));CHKERRQ(ierr); 469 470 ct3 = 0; 471 for (i=0; i<nrqr; i++) { /* for easch mesg from proc i */ 472 rbuf_i = rbuf[i]; 473 rbuf_0 = rbuf_i[0]; 474 ct1 = 2*rbuf_0+1; 475 ct2 = ct1; 476 ct3 += ct1; 477 for (j=1; j<=rbuf_0; j++) { /* for each IS from proc i*/ 478 ierr = PetscBTMemzero(m,xtable);CHKERRQ(ierr); 479 oct2 = ct2; 480 kmax = rbuf_i[2*j]; 481 for (k=0; k<kmax; k++,ct1++) { 482 row = rbuf_i[ct1]; 483 if (!PetscBTLookupSet(xtable,row)) { 484 if (!(ct3 < mem_estimate)) { 485 new_estimate = (int)(1.5*mem_estimate)+1; 486 ierr = PetscMalloc(new_estimate*sizeof(int),&tmp);CHKERRQ(ierr); 487 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(int));CHKERRQ(ierr); 488 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 489 xdata[0] = tmp; 490 mem_estimate = new_estimate; ++no_malloc; 491 for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];} 492 } 493 xdata[i][ct2++] = row; 494 ct3++; 495 } 496 } 497 for (k=oct2,max2=ct2; k<max2; k++) { 498 row = xdata[i][k] - rstart; 499 start = ai[row]; 500 end = ai[row+1]; 501 for (l=start; l<end; l++) { 502 val = aj[l] + cstart; 503 if (!PetscBTLookupSet(xtable,val)) { 504 if (!(ct3 < mem_estimate)) { 505 new_estimate = (int)(1.5*mem_estimate)+1; 506 ierr = PetscMalloc(new_estimate*sizeof(int),&tmp);CHKERRQ(ierr); 507 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(int));CHKERRQ(ierr); 508 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 509 xdata[0] = tmp; 510 mem_estimate = new_estimate; ++no_malloc; 511 for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];} 512 } 513 xdata[i][ct2++] = val; 514 ct3++; 515 } 516 } 517 start = bi[row]; 518 end = bi[row+1]; 519 for (l=start; l<end; l++) { 520 val = garray[bj[l]]; 521 if (!PetscBTLookupSet(xtable,val)) { 522 if (!(ct3 < mem_estimate)) { 523 new_estimate = (int)(1.5*mem_estimate)+1; 524 ierr = PetscMalloc(new_estimate*sizeof(int),&tmp);CHKERRQ(ierr); 525 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(int));CHKERRQ(ierr); 526 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 527 xdata[0] = tmp; 528 mem_estimate = new_estimate; ++no_malloc; 529 for (ctr =1; ctr <=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];} 530 } 531 xdata[i][ct2++] = val; 532 ct3++; 533 } 534 } 535 } 536 /* Update the header*/ 537 xdata[i][2*j] = ct2 - oct2; /* Undo the vector isz1 and use only a var*/ 538 xdata[i][2*j-1] = rbuf_i[2*j-1]; 539 } 540 xdata[i][0] = rbuf_0; 541 xdata[i+1] = xdata[i] + ct2; 542 isz1[i] = ct2; /* size of each message */ 543 } 544 ierr = PetscBTDestroy(xtable);CHKERRQ(ierr); 545 PetscLogInfo(0,"MatIncreaseOverlap_MPIAIJ:[%d] Allocated %d bytes, required %d bytes, no of mallocs = %d\n",rank,mem_estimate, ct3,no_malloc); 546 PetscFunctionReturn(0); 547 } 548 /* -------------------------------------------------------------------------*/ 549 EXTERN PetscErrorCode MatGetSubMatrices_MPIAIJ_Local(Mat,int,const IS[],const IS[],MatReuse,Mat*); 550 EXTERN PetscErrorCode MatAssemblyEnd_SeqAIJ(Mat,MatAssemblyType); 551 /* 552 Every processor gets the entire matrix 553 */ 554 #undef __FUNCT__ 555 #define __FUNCT__ "MatGetSubMatrix_MPIAIJ_All" 556 PetscErrorCode MatGetSubMatrix_MPIAIJ_All(Mat A,MatReuse scall,Mat *Bin[]) 557 { 558 Mat B; 559 Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data; 560 Mat_SeqAIJ *b,*ad = (Mat_SeqAIJ*)a->A->data,*bd = (Mat_SeqAIJ*)a->B->data; 561 PetscErrorCode ierr; 562 PetscMPIInt rank; 563 int sendcount,*recvcounts = 0,*displs = 0,size,i,*rstarts = a->rowners,n,cnt,j; 564 int m,*b_sendj,*garray = a->garray,*lens,*jsendbuf,*a_jsendbuf,*b_jsendbuf; 565 PetscScalar *sendbuf,*recvbuf,*a_sendbuf,*b_sendbuf; 566 567 PetscFunctionBegin; 568 ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr); 569 ierr = MPI_Comm_rank(A->comm,&rank);CHKERRQ(ierr); 570 571 if (scall == MAT_INITIAL_MATRIX) { 572 /* ---------------------------------------------------------------- 573 Tell every processor the number of nonzeros per row 574 */ 575 ierr = PetscMalloc(A->M*sizeof(int),&lens);CHKERRQ(ierr); 576 for (i=a->rstart; i<a->rend; i++) { 577 lens[i] = ad->i[i-a->rstart+1] - ad->i[i-a->rstart] + bd->i[i-a->rstart+1] - bd->i[i-a->rstart]; 578 } 579 sendcount = a->rend - a->rstart; 580 ierr = PetscMalloc(2*size*sizeof(int),&recvcounts);CHKERRQ(ierr); 581 displs = recvcounts + size; 582 for (i=0; i<size; i++) { 583 recvcounts[i] = a->rowners[i+1] - a->rowners[i]; 584 displs[i] = a->rowners[i]; 585 } 586 ierr = MPI_Allgatherv(lens+a->rstart,sendcount,MPI_INT,lens,recvcounts,displs,MPI_INT,A->comm);CHKERRQ(ierr); 587 588 /* --------------------------------------------------------------- 589 Create the sequential matrix of the same type as the local block diagonal 590 */ 591 ierr = MatCreate(PETSC_COMM_SELF,A->M,A->N,PETSC_DETERMINE,PETSC_DETERMINE,&B);CHKERRQ(ierr); 592 ierr = MatSetType(B,a->A->type_name);CHKERRQ(ierr); 593 ierr = MatSeqAIJSetPreallocation(B,0,lens);CHKERRQ(ierr); 594 ierr = PetscMalloc(sizeof(Mat),Bin);CHKERRQ(ierr); 595 **Bin = B; 596 b = (Mat_SeqAIJ *)B->data; 597 598 /*-------------------------------------------------------------------- 599 Copy my part of matrix column indices over 600 */ 601 sendcount = ad->nz + bd->nz; 602 jsendbuf = b->j + b->i[rstarts[rank]]; 603 a_jsendbuf = ad->j; 604 b_jsendbuf = bd->j; 605 n = a->rend - a->rstart; 606 cnt = 0; 607 for (i=0; i<n; i++) { 608 609 /* put in lower diagonal portion */ 610 m = bd->i[i+1] - bd->i[i]; 611 while (m > 0) { 612 /* is it above diagonal (in bd (compressed) numbering) */ 613 if (garray[*b_jsendbuf] > a->rstart + i) break; 614 jsendbuf[cnt++] = garray[*b_jsendbuf++]; 615 m--; 616 } 617 618 /* put in diagonal portion */ 619 for (j=ad->i[i]; j<ad->i[i+1]; j++) { 620 jsendbuf[cnt++] = a->rstart + *a_jsendbuf++; 621 } 622 623 /* put in upper diagonal portion */ 624 while (m-- > 0) { 625 jsendbuf[cnt++] = garray[*b_jsendbuf++]; 626 } 627 } 628 if (cnt != sendcount) SETERRQ2(1,"Corrupted PETSc matrix: nz given %d actual nz %d",sendcount,cnt); 629 630 /*-------------------------------------------------------------------- 631 Gather all column indices to all processors 632 */ 633 for (i=0; i<size; i++) { 634 recvcounts[i] = 0; 635 for (j=a->rowners[i]; j<a->rowners[i+1]; j++) { 636 recvcounts[i] += lens[j]; 637 } 638 } 639 displs[0] = 0; 640 for (i=1; i<size; i++) { 641 displs[i] = displs[i-1] + recvcounts[i-1]; 642 } 643 ierr = MPI_Allgatherv(jsendbuf,sendcount,MPI_INT,b->j,recvcounts,displs,MPI_INT,A->comm);CHKERRQ(ierr); 644 645 /*-------------------------------------------------------------------- 646 Assemble the matrix into useable form (note numerical values not yet set) 647 */ 648 /* set the b->ilen (length of each row) values */ 649 ierr = PetscMemcpy(b->ilen,lens,A->M*sizeof(int));CHKERRQ(ierr); 650 /* set the b->i indices */ 651 b->i[0] = 0; 652 for (i=1; i<=A->M; i++) { 653 b->i[i] = b->i[i-1] + lens[i-1]; 654 } 655 ierr = PetscFree(lens);CHKERRQ(ierr); 656 ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 657 ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 658 659 } else { 660 B = **Bin; 661 b = (Mat_SeqAIJ *)B->data; 662 } 663 664 /*-------------------------------------------------------------------- 665 Copy my part of matrix numerical values into the values location 666 */ 667 sendcount = ad->nz + bd->nz; 668 sendbuf = b->a + b->i[rstarts[rank]]; 669 a_sendbuf = ad->a; 670 b_sendbuf = bd->a; 671 b_sendj = bd->j; 672 n = a->rend - a->rstart; 673 cnt = 0; 674 for (i=0; i<n; i++) { 675 676 /* put in lower diagonal portion */ 677 m = bd->i[i+1] - bd->i[i]; 678 while (m > 0) { 679 /* is it above diagonal (in bd (compressed) numbering) */ 680 if (garray[*b_sendj] > a->rstart + i) break; 681 sendbuf[cnt++] = *b_sendbuf++; 682 m--; 683 b_sendj++; 684 } 685 686 /* put in diagonal portion */ 687 for (j=ad->i[i]; j<ad->i[i+1]; j++) { 688 sendbuf[cnt++] = *a_sendbuf++; 689 } 690 691 /* put in upper diagonal portion */ 692 while (m-- > 0) { 693 sendbuf[cnt++] = *b_sendbuf++; 694 b_sendj++; 695 } 696 } 697 if (cnt != sendcount) SETERRQ2(1,"Corrupted PETSc matrix: nz given %d actual nz %d",sendcount,cnt); 698 699 /* ----------------------------------------------------------------- 700 Gather all numerical values to all processors 701 */ 702 if (!recvcounts) { 703 ierr = PetscMalloc(2*size*sizeof(int),&recvcounts);CHKERRQ(ierr); 704 displs = recvcounts + size; 705 } 706 for (i=0; i<size; i++) { 707 recvcounts[i] = b->i[rstarts[i+1]] - b->i[rstarts[i]]; 708 } 709 displs[0] = 0; 710 for (i=1; i<size; i++) { 711 displs[i] = displs[i-1] + recvcounts[i-1]; 712 } 713 recvbuf = b->a; 714 ierr = MPI_Allgatherv(sendbuf,sendcount,MPIU_SCALAR,recvbuf,recvcounts,displs,MPIU_SCALAR,A->comm);CHKERRQ(ierr); 715 ierr = PetscFree(recvcounts);CHKERRQ(ierr); 716 if (A->symmetric){ 717 ierr = MatSetOption(B,MAT_SYMMETRIC);CHKERRQ(ierr); 718 } else if (A->hermitian) { 719 ierr = MatSetOption(B,MAT_HERMITIAN);CHKERRQ(ierr); 720 } else if (A->structurally_symmetric) { 721 ierr = MatSetOption(B,MAT_STRUCTURALLY_SYMMETRIC);CHKERRQ(ierr); 722 } 723 724 PetscFunctionReturn(0); 725 } 726 727 #undef __FUNCT__ 728 #define __FUNCT__ "MatGetSubMatrices_MPIAIJ" 729 PetscErrorCode MatGetSubMatrices_MPIAIJ(Mat C,int ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[]) 730 { 731 PetscErrorCode ierr; 732 int nmax,nstages_local,nstages,i,pos,max_no,nrow,ncol; 733 PetscTruth rowflag,colflag,wantallmatrix = PETSC_FALSE,twantallmatrix; 734 735 PetscFunctionBegin; 736 /* 737 Check for special case each processor gets entire matrix 738 */ 739 if (ismax == 1 && C->M == C->N) { 740 ierr = ISIdentity(*isrow,&rowflag);CHKERRQ(ierr); 741 ierr = ISIdentity(*iscol,&colflag);CHKERRQ(ierr); 742 ierr = ISGetLocalSize(*isrow,&nrow);CHKERRQ(ierr); 743 ierr = ISGetLocalSize(*iscol,&ncol);CHKERRQ(ierr); 744 if (rowflag && colflag && nrow == C->M && ncol == C->N) { 745 wantallmatrix = PETSC_TRUE; 746 ierr = PetscOptionsGetLogical(C->prefix,"-use_fast_submatrix",&wantallmatrix,PETSC_NULL);CHKERRQ(ierr); 747 } 748 } 749 ierr = MPI_Allreduce(&wantallmatrix,&twantallmatrix,1,MPI_INT,MPI_MIN,C->comm);CHKERRQ(ierr); 750 if (twantallmatrix) { 751 ierr = MatGetSubMatrix_MPIAIJ_All(C,scall,submat);CHKERRQ(ierr); 752 PetscFunctionReturn(0); 753 } 754 755 /* Allocate memory to hold all the submatrices */ 756 if (scall != MAT_REUSE_MATRIX) { 757 ierr = PetscMalloc((ismax+1)*sizeof(Mat),submat);CHKERRQ(ierr); 758 } 759 /* Determine the number of stages through which submatrices are done */ 760 nmax = 20*1000000 / (C->N * sizeof(int)); 761 if (!nmax) nmax = 1; 762 nstages_local = ismax/nmax + ((ismax % nmax)?1:0); 763 764 /* Make sure every processor loops through the nstages */ 765 ierr = MPI_Allreduce(&nstages_local,&nstages,1,MPI_INT,MPI_MAX,C->comm);CHKERRQ(ierr); 766 767 for (i=0,pos=0; i<nstages; i++) { 768 if (pos+nmax <= ismax) max_no = nmax; 769 else if (pos == ismax) max_no = 0; 770 else max_no = ismax-pos; 771 ierr = MatGetSubMatrices_MPIAIJ_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);CHKERRQ(ierr); 772 pos += max_no; 773 } 774 PetscFunctionReturn(0); 775 } 776 /* -------------------------------------------------------------------------*/ 777 #undef __FUNCT__ 778 #define __FUNCT__ "MatGetSubMatrices_MPIAIJ_Local" 779 PetscErrorCode MatGetSubMatrices_MPIAIJ_Local(Mat C,int ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats) 780 { 781 Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data; 782 Mat A = c->A; 783 Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*b = (Mat_SeqAIJ*)c->B->data,*mat; 784 int **irow,**icol,*nrow,*ncol,*w1,*w2,*w3,*w4,*rtable,start,end; 785 PetscErrorCode ierr; 786 PetscMPIInt rank,size; 787 int **sbuf1,**sbuf2,m,i,j,k,l,ct1,ct2,**rbuf1,row,proc; 788 int nrqs,msz,**ptr,idex,*req_size,*ctr,*pa,*tmp,tcol,nrqr; 789 int **rbuf3,*req_source,**sbuf_aj,**rbuf2,max1,max2,**rmap; 790 int **cmap,**lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax,*irow_i; 791 int len,ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*cmap_i,*lens_i; 792 int *rmap_i,tag0,tag1,tag2,tag3; 793 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3; 794 MPI_Request *r_waits4,*s_waits3,*s_waits4; 795 MPI_Status *r_status1,*r_status2,*s_status1,*s_status3,*s_status2; 796 MPI_Status *r_status3,*r_status4,*s_status4; 797 MPI_Comm comm; 798 PetscScalar **rbuf4,**sbuf_aa,*vals,*mat_a,*sbuf_aa_i; 799 PetscTruth sorted; 800 int *onodes1,*olengths1; 801 802 PetscFunctionBegin; 803 comm = C->comm; 804 tag0 = C->tag; 805 size = c->size; 806 rank = c->rank; 807 m = C->M; 808 809 /* Get some new tags to keep the communication clean */ 810 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 811 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 812 ierr = PetscObjectGetNewTag((PetscObject)C,&tag3);CHKERRQ(ierr); 813 814 /* Check if the col indices are sorted */ 815 for (i=0; i<ismax; i++) { 816 ierr = ISSorted(isrow[i],&sorted);CHKERRQ(ierr); 817 if (!sorted) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"ISrow is not sorted"); 818 ierr = ISSorted(iscol[i],&sorted);CHKERRQ(ierr); 819 /* if (!sorted) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"IScol is not sorted"); */ 820 } 821 822 len = (2*ismax+1)*(sizeof(int*)+ sizeof(int)) + (m+1)*sizeof(int); 823 ierr = PetscMalloc(len,&irow);CHKERRQ(ierr); 824 icol = irow + ismax; 825 nrow = (int*)(icol + ismax); 826 ncol = nrow + ismax; 827 rtable = ncol + ismax; 828 829 for (i=0; i<ismax; i++) { 830 ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr); 831 ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr); 832 ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr); 833 ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr); 834 } 835 836 /* Create hash table for the mapping :row -> proc*/ 837 for (i=0,j=0; i<size; i++) { 838 jmax = c->rowners[i+1]; 839 for (; j<jmax; j++) { 840 rtable[j] = i; 841 } 842 } 843 844 /* evaluate communication - mesg to who, length of mesg, and buffer space 845 required. Based on this, buffers are allocated, and data copied into them*/ 846 ierr = PetscMalloc(size*4*sizeof(int),&w1);CHKERRQ(ierr); /* mesg size */ 847 w2 = w1 + size; /* if w2[i] marked, then a message to proc i*/ 848 w3 = w2 + size; /* no of IS that needs to be sent to proc i */ 849 w4 = w3 + size; /* temp work space used in determining w1, w2, w3 */ 850 ierr = PetscMemzero(w1,size*3*sizeof(int));CHKERRQ(ierr); /* initialize work vector*/ 851 for (i=0; i<ismax; i++) { 852 ierr = PetscMemzero(w4,size*sizeof(int));CHKERRQ(ierr); /* initialize work vector*/ 853 jmax = nrow[i]; 854 irow_i = irow[i]; 855 for (j=0; j<jmax; j++) { 856 row = irow_i[j]; 857 proc = rtable[row]; 858 w4[proc]++; 859 } 860 for (j=0; j<size; j++) { 861 if (w4[j]) { w1[j] += w4[j]; w3[j]++;} 862 } 863 } 864 865 nrqs = 0; /* no of outgoing messages */ 866 msz = 0; /* total mesg length (for all procs) */ 867 w1[rank] = 0; /* no mesg sent to self */ 868 w3[rank] = 0; 869 for (i=0; i<size; i++) { 870 if (w1[i]) { w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 871 } 872 ierr = PetscMalloc((nrqs+1)*sizeof(int),&pa);CHKERRQ(ierr); /*(proc -array)*/ 873 for (i=0,j=0; i<size; i++) { 874 if (w1[i]) { pa[j] = i; j++; } 875 } 876 877 /* Each message would have a header = 1 + 2*(no of IS) + data */ 878 for (i=0; i<nrqs; i++) { 879 j = pa[i]; 880 w1[j] += w2[j] + 2* w3[j]; 881 msz += w1[j]; 882 } 883 884 /* Determine the number of messages to expect, their lengths, from from-ids */ 885 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 886 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 887 888 /* Now post the Irecvs corresponding to these messages */ 889 ierr = PetscPostIrecvInt(comm,tag0,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);CHKERRQ(ierr); 890 891 ierr = PetscFree(onodes1);CHKERRQ(ierr); 892 ierr = PetscFree(olengths1);CHKERRQ(ierr); 893 894 /* Allocate Memory for outgoing messages */ 895 len = 2*size*sizeof(int*) + 2*msz*sizeof(int) + size*sizeof(int); 896 ierr = PetscMalloc(len,&sbuf1);CHKERRQ(ierr); 897 ptr = sbuf1 + size; /* Pointers to the data in outgoing buffers */ 898 ierr = PetscMemzero(sbuf1,2*size*sizeof(int*));CHKERRQ(ierr); 899 /* allocate memory for outgoing data + buf to receive the first reply */ 900 tmp = (int*)(ptr + size); 901 ctr = tmp + 2*msz; 902 903 { 904 int *iptr = tmp,ict = 0; 905 for (i=0; i<nrqs; i++) { 906 j = pa[i]; 907 iptr += ict; 908 sbuf1[j] = iptr; 909 ict = w1[j]; 910 } 911 } 912 913 /* Form the outgoing messages */ 914 /* Initialize the header space */ 915 for (i=0; i<nrqs; i++) { 916 j = pa[i]; 917 sbuf1[j][0] = 0; 918 ierr = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(int));CHKERRQ(ierr); 919 ptr[j] = sbuf1[j] + 2*w3[j] + 1; 920 } 921 922 /* Parse the isrow and copy data into outbuf */ 923 for (i=0; i<ismax; i++) { 924 ierr = PetscMemzero(ctr,size*sizeof(int));CHKERRQ(ierr); 925 irow_i = irow[i]; 926 jmax = nrow[i]; 927 for (j=0; j<jmax; j++) { /* parse the indices of each IS */ 928 row = irow_i[j]; 929 proc = rtable[row]; 930 if (proc != rank) { /* copy to the outgoing buf*/ 931 ctr[proc]++; 932 *ptr[proc] = row; 933 ptr[proc]++; 934 } 935 } 936 /* Update the headers for the current IS */ 937 for (j=0; j<size; j++) { /* Can Optimise this loop too */ 938 if ((ctr_j = ctr[j])) { 939 sbuf1_j = sbuf1[j]; 940 k = ++sbuf1_j[0]; 941 sbuf1_j[2*k] = ctr_j; 942 sbuf1_j[2*k-1] = i; 943 } 944 } 945 } 946 947 /* Now post the sends */ 948 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr); 949 for (i=0; i<nrqs; ++i) { 950 j = pa[i]; 951 ierr = MPI_Isend(sbuf1[j],w1[j],MPI_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr); 952 } 953 954 /* Post Receives to capture the buffer size */ 955 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits2);CHKERRQ(ierr); 956 ierr = PetscMalloc((nrqs+1)*sizeof(int*),&rbuf2);CHKERRQ(ierr); 957 rbuf2[0] = tmp + msz; 958 for (i=1; i<nrqs; ++i) { 959 rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]]; 960 } 961 for (i=0; i<nrqs; ++i) { 962 j = pa[i]; 963 ierr = MPI_Irecv(rbuf2[i],w1[j],MPI_INT,j,tag1,comm,r_waits2+i);CHKERRQ(ierr); 964 } 965 966 /* Send to other procs the buf size they should allocate */ 967 968 969 /* Receive messages*/ 970 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);CHKERRQ(ierr); 971 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status1);CHKERRQ(ierr); 972 len = 2*nrqr*sizeof(int) + (nrqr+1)*sizeof(int*); 973 ierr = PetscMalloc(len,&sbuf2);CHKERRQ(ierr); 974 req_size = (int*)(sbuf2 + nrqr); 975 req_source = req_size + nrqr; 976 977 { 978 Mat_SeqAIJ *sA = (Mat_SeqAIJ*)c->A->data,*sB = (Mat_SeqAIJ*)c->B->data; 979 int *sAi = sA->i,*sBi = sB->i,id,rstart = c->rstart; 980 int *sbuf2_i; 981 982 for (i=0; i<nrqr; ++i) { 983 ierr = MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);CHKERRQ(ierr); 984 req_size[idex] = 0; 985 rbuf1_i = rbuf1[idex]; 986 start = 2*rbuf1_i[0] + 1; 987 ierr = MPI_Get_count(r_status1+i,MPI_INT,&end);CHKERRQ(ierr); 988 ierr = PetscMalloc((end+1)*sizeof(int),&sbuf2[idex]);CHKERRQ(ierr); 989 sbuf2_i = sbuf2[idex]; 990 for (j=start; j<end; j++) { 991 id = rbuf1_i[j] - rstart; 992 ncols = sAi[id+1] - sAi[id] + sBi[id+1] - sBi[id]; 993 sbuf2_i[j] = ncols; 994 req_size[idex] += ncols; 995 } 996 req_source[idex] = r_status1[i].MPI_SOURCE; 997 /* form the header */ 998 sbuf2_i[0] = req_size[idex]; 999 for (j=1; j<start; j++) { sbuf2_i[j] = rbuf1_i[j]; } 1000 ierr = MPI_Isend(sbuf2_i,end,MPI_INT,req_source[idex],tag1,comm,s_waits2+i);CHKERRQ(ierr); 1001 } 1002 } 1003 ierr = PetscFree(r_status1);CHKERRQ(ierr); 1004 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 1005 1006 /* recv buffer sizes */ 1007 /* Receive messages*/ 1008 1009 ierr = PetscMalloc((nrqs+1)*sizeof(int*),&rbuf3);CHKERRQ(ierr); 1010 ierr = PetscMalloc((nrqs+1)*sizeof(PetscScalar*),&rbuf4);CHKERRQ(ierr); 1011 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits3);CHKERRQ(ierr); 1012 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits4);CHKERRQ(ierr); 1013 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status2);CHKERRQ(ierr); 1014 1015 for (i=0; i<nrqs; ++i) { 1016 ierr = MPI_Waitany(nrqs,r_waits2,&idex,r_status2+i);CHKERRQ(ierr); 1017 ierr = PetscMalloc((rbuf2[idex][0]+1)*sizeof(int),&rbuf3[idex]);CHKERRQ(ierr); 1018 ierr = PetscMalloc((rbuf2[idex][0]+1)*sizeof(PetscScalar),&rbuf4[idex]);CHKERRQ(ierr); 1019 ierr = MPI_Irecv(rbuf3[idex],rbuf2[idex][0],MPI_INT,r_status2[i].MPI_SOURCE,tag2,comm,r_waits3+idex);CHKERRQ(ierr); 1020 ierr = MPI_Irecv(rbuf4[idex],rbuf2[idex][0],MPIU_SCALAR,r_status2[i].MPI_SOURCE,tag3,comm,r_waits4+idex);CHKERRQ(ierr); 1021 } 1022 ierr = PetscFree(r_status2);CHKERRQ(ierr); 1023 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 1024 1025 /* Wait on sends1 and sends2 */ 1026 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status1);CHKERRQ(ierr); 1027 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status2);CHKERRQ(ierr); 1028 1029 ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr); 1030 ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr); 1031 ierr = PetscFree(s_status1);CHKERRQ(ierr); 1032 ierr = PetscFree(s_status2);CHKERRQ(ierr); 1033 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 1034 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 1035 1036 /* Now allocate buffers for a->j, and send them off */ 1037 ierr = PetscMalloc((nrqr+1)*sizeof(int*),&sbuf_aj);CHKERRQ(ierr); 1038 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 1039 ierr = PetscMalloc((j+1)*sizeof(int),&sbuf_aj[0]);CHKERRQ(ierr); 1040 for (i=1; i<nrqr; i++) sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1]; 1041 1042 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits3);CHKERRQ(ierr); 1043 { 1044 int nzA,nzB,*a_i = a->i,*b_i = b->i,imark; 1045 int *cworkA,*cworkB,cstart = c->cstart,rstart = c->rstart,*bmap = c->garray; 1046 int *a_j = a->j,*b_j = b->j,ctmp; 1047 1048 for (i=0; i<nrqr; i++) { 1049 rbuf1_i = rbuf1[i]; 1050 sbuf_aj_i = sbuf_aj[i]; 1051 ct1 = 2*rbuf1_i[0] + 1; 1052 ct2 = 0; 1053 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 1054 kmax = rbuf1[i][2*j]; 1055 for (k=0; k<kmax; k++,ct1++) { 1056 row = rbuf1_i[ct1] - rstart; 1057 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 1058 ncols = nzA + nzB; 1059 cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row]; 1060 1061 /* load the column indices for this row into cols*/ 1062 cols = sbuf_aj_i + ct2; 1063 1064 for (l=0; l<nzB; l++) { 1065 if ((ctmp = bmap[cworkB[l]]) < cstart) cols[l] = ctmp; 1066 else break; 1067 } 1068 imark = l; 1069 for (l=0; l<nzA; l++) cols[imark+l] = cstart + cworkA[l]; 1070 for (l=imark; l<nzB; l++) cols[nzA+l] = bmap[cworkB[l]]; 1071 1072 ct2 += ncols; 1073 } 1074 } 1075 ierr = MPI_Isend(sbuf_aj_i,req_size[i],MPI_INT,req_source[i],tag2,comm,s_waits3+i);CHKERRQ(ierr); 1076 } 1077 } 1078 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status3);CHKERRQ(ierr); 1079 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status3);CHKERRQ(ierr); 1080 1081 /* Allocate buffers for a->a, and send them off */ 1082 ierr = PetscMalloc((nrqr+1)*sizeof(PetscScalar*),&sbuf_aa);CHKERRQ(ierr); 1083 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 1084 ierr = PetscMalloc((j+1)*sizeof(PetscScalar),&sbuf_aa[0]);CHKERRQ(ierr); 1085 for (i=1; i<nrqr; i++) sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]; 1086 1087 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits4);CHKERRQ(ierr); 1088 { 1089 int nzA,nzB,*a_i = a->i,*b_i = b->i, *cworkB,imark; 1090 int cstart = c->cstart,rstart = c->rstart,*bmap = c->garray; 1091 int *b_j = b->j; 1092 PetscScalar *vworkA,*vworkB,*a_a = a->a,*b_a = b->a; 1093 1094 for (i=0; i<nrqr; i++) { 1095 rbuf1_i = rbuf1[i]; 1096 sbuf_aa_i = sbuf_aa[i]; 1097 ct1 = 2*rbuf1_i[0]+1; 1098 ct2 = 0; 1099 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 1100 kmax = rbuf1_i[2*j]; 1101 for (k=0; k<kmax; k++,ct1++) { 1102 row = rbuf1_i[ct1] - rstart; 1103 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 1104 ncols = nzA + nzB; 1105 cworkB = b_j + b_i[row]; 1106 vworkA = a_a + a_i[row]; 1107 vworkB = b_a + b_i[row]; 1108 1109 /* load the column values for this row into vals*/ 1110 vals = sbuf_aa_i+ct2; 1111 1112 for (l=0; l<nzB; l++) { 1113 if ((bmap[cworkB[l]]) < cstart) vals[l] = vworkB[l]; 1114 else break; 1115 } 1116 imark = l; 1117 for (l=0; l<nzA; l++) vals[imark+l] = vworkA[l]; 1118 for (l=imark; l<nzB; l++) vals[nzA+l] = vworkB[l]; 1119 1120 ct2 += ncols; 1121 } 1122 } 1123 ierr = MPI_Isend(sbuf_aa_i,req_size[i],MPIU_SCALAR,req_source[i],tag3,comm,s_waits4+i);CHKERRQ(ierr); 1124 } 1125 } 1126 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status4);CHKERRQ(ierr); 1127 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status4);CHKERRQ(ierr); 1128 ierr = PetscFree(rbuf1);CHKERRQ(ierr); 1129 1130 /* Form the matrix */ 1131 /* create col map */ 1132 { 1133 int *icol_i; 1134 1135 len = (1+ismax)*sizeof(int*)+ ismax*C->N*sizeof(int); 1136 ierr = PetscMalloc(len,&cmap);CHKERRQ(ierr); 1137 cmap[0] = (int*)(cmap + ismax); 1138 ierr = PetscMemzero(cmap[0],(1+ismax*C->N)*sizeof(int));CHKERRQ(ierr); 1139 for (i=1; i<ismax; i++) { cmap[i] = cmap[i-1] + C->N; } 1140 for (i=0; i<ismax; i++) { 1141 jmax = ncol[i]; 1142 icol_i = icol[i]; 1143 cmap_i = cmap[i]; 1144 for (j=0; j<jmax; j++) { 1145 cmap_i[icol_i[j]] = j+1; 1146 } 1147 } 1148 } 1149 1150 /* Create lens which is required for MatCreate... */ 1151 for (i=0,j=0; i<ismax; i++) { j += nrow[i]; } 1152 len = (1+ismax)*sizeof(int*)+ j*sizeof(int); 1153 ierr = PetscMalloc(len,&lens);CHKERRQ(ierr); 1154 lens[0] = (int*)(lens + ismax); 1155 ierr = PetscMemzero(lens[0],j*sizeof(int));CHKERRQ(ierr); 1156 for (i=1; i<ismax; i++) { lens[i] = lens[i-1] + nrow[i-1]; } 1157 1158 /* Update lens from local data */ 1159 for (i=0; i<ismax; i++) { 1160 jmax = nrow[i]; 1161 cmap_i = cmap[i]; 1162 irow_i = irow[i]; 1163 lens_i = lens[i]; 1164 for (j=0; j<jmax; j++) { 1165 row = irow_i[j]; 1166 proc = rtable[row]; 1167 if (proc == rank) { 1168 ierr = MatGetRow_MPIAIJ(C,row,&ncols,&cols,0);CHKERRQ(ierr); 1169 for (k=0; k<ncols; k++) { 1170 if (cmap_i[cols[k]]) { lens_i[j]++;} 1171 } 1172 ierr = MatRestoreRow_MPIAIJ(C,row,&ncols,&cols,0);CHKERRQ(ierr); 1173 } 1174 } 1175 } 1176 1177 /* Create row map*/ 1178 len = (1+ismax)*sizeof(int*)+ ismax*C->M*sizeof(int); 1179 ierr = PetscMalloc(len,&rmap);CHKERRQ(ierr); 1180 rmap[0] = (int*)(rmap + ismax); 1181 ierr = PetscMemzero(rmap[0],ismax*C->M*sizeof(int));CHKERRQ(ierr); 1182 for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + C->M;} 1183 for (i=0; i<ismax; i++) { 1184 rmap_i = rmap[i]; 1185 irow_i = irow[i]; 1186 jmax = nrow[i]; 1187 for (j=0; j<jmax; j++) { 1188 rmap_i[irow_i[j]] = j; 1189 } 1190 } 1191 1192 /* Update lens from offproc data */ 1193 { 1194 int *rbuf2_i,*rbuf3_i,*sbuf1_i; 1195 1196 for (tmp2=0; tmp2<nrqs; tmp2++) { 1197 ierr = MPI_Waitany(nrqs,r_waits3,&i,r_status3+tmp2);CHKERRQ(ierr); 1198 idex = pa[i]; 1199 sbuf1_i = sbuf1[idex]; 1200 jmax = sbuf1_i[0]; 1201 ct1 = 2*jmax+1; 1202 ct2 = 0; 1203 rbuf2_i = rbuf2[i]; 1204 rbuf3_i = rbuf3[i]; 1205 for (j=1; j<=jmax; j++) { 1206 is_no = sbuf1_i[2*j-1]; 1207 max1 = sbuf1_i[2*j]; 1208 lens_i = lens[is_no]; 1209 cmap_i = cmap[is_no]; 1210 rmap_i = rmap[is_no]; 1211 for (k=0; k<max1; k++,ct1++) { 1212 row = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */ 1213 max2 = rbuf2_i[ct1]; 1214 for (l=0; l<max2; l++,ct2++) { 1215 if (cmap_i[rbuf3_i[ct2]]) { 1216 lens_i[row]++; 1217 } 1218 } 1219 } 1220 } 1221 } 1222 } 1223 ierr = PetscFree(r_status3);CHKERRQ(ierr); 1224 ierr = PetscFree(r_waits3);CHKERRQ(ierr); 1225 ierr = MPI_Waitall(nrqr,s_waits3,s_status3);CHKERRQ(ierr); 1226 ierr = PetscFree(s_status3);CHKERRQ(ierr); 1227 ierr = PetscFree(s_waits3);CHKERRQ(ierr); 1228 1229 /* Create the submatrices */ 1230 if (scall == MAT_REUSE_MATRIX) { 1231 PetscTruth flag; 1232 1233 /* 1234 Assumes new rows are same length as the old rows,hence bug! 1235 */ 1236 for (i=0; i<ismax; i++) { 1237 mat = (Mat_SeqAIJ *)(submats[i]->data); 1238 if ((submats[i]->m != nrow[i]) || (submats[i]->n != ncol[i])) { 1239 SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size"); 1240 } 1241 ierr = PetscMemcmp(mat->ilen,lens[i],submats[i]->m*sizeof(int),&flag);CHKERRQ(ierr); 1242 if (flag == PETSC_FALSE) { 1243 SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong no of nonzeros"); 1244 } 1245 /* Initial matrix as if empty */ 1246 ierr = PetscMemzero(mat->ilen,submats[i]->m*sizeof(int));CHKERRQ(ierr); 1247 submats[i]->factor = C->factor; 1248 } 1249 } else { 1250 for (i=0; i<ismax; i++) { 1251 ierr = MatCreate(PETSC_COMM_SELF,nrow[i],ncol[i],PETSC_DETERMINE,PETSC_DETERMINE,submats+i);CHKERRQ(ierr); 1252 ierr = MatSetType(submats[i],A->type_name);CHKERRQ(ierr); 1253 ierr = MatSeqAIJSetPreallocation(submats[i],0,lens[i]);CHKERRQ(ierr); 1254 } 1255 } 1256 1257 /* Assemble the matrices */ 1258 /* First assemble the local rows */ 1259 { 1260 int ilen_row,*imat_ilen,*imat_j,*imat_i,old_row; 1261 PetscScalar *imat_a; 1262 1263 for (i=0; i<ismax; i++) { 1264 mat = (Mat_SeqAIJ*)submats[i]->data; 1265 imat_ilen = mat->ilen; 1266 imat_j = mat->j; 1267 imat_i = mat->i; 1268 imat_a = mat->a; 1269 cmap_i = cmap[i]; 1270 rmap_i = rmap[i]; 1271 irow_i = irow[i]; 1272 jmax = nrow[i]; 1273 for (j=0; j<jmax; j++) { 1274 row = irow_i[j]; 1275 proc = rtable[row]; 1276 if (proc == rank) { 1277 old_row = row; 1278 row = rmap_i[row]; 1279 ilen_row = imat_ilen[row]; 1280 ierr = MatGetRow_MPIAIJ(C,old_row,&ncols,&cols,&vals);CHKERRQ(ierr); 1281 mat_i = imat_i[row] ; 1282 mat_a = imat_a + mat_i; 1283 mat_j = imat_j + mat_i; 1284 for (k=0; k<ncols; k++) { 1285 if ((tcol = cmap_i[cols[k]])) { 1286 *mat_j++ = tcol - 1; 1287 *mat_a++ = vals[k]; 1288 ilen_row++; 1289 } 1290 } 1291 ierr = MatRestoreRow_MPIAIJ(C,old_row,&ncols,&cols,&vals);CHKERRQ(ierr); 1292 imat_ilen[row] = ilen_row; 1293 } 1294 } 1295 } 1296 } 1297 1298 /* Now assemble the off proc rows*/ 1299 { 1300 int *sbuf1_i,*rbuf2_i,*rbuf3_i,*imat_ilen,ilen; 1301 int *imat_j,*imat_i; 1302 PetscScalar *imat_a,*rbuf4_i; 1303 1304 for (tmp2=0; tmp2<nrqs; tmp2++) { 1305 ierr = MPI_Waitany(nrqs,r_waits4,&i,r_status4+tmp2);CHKERRQ(ierr); 1306 idex = pa[i]; 1307 sbuf1_i = sbuf1[idex]; 1308 jmax = sbuf1_i[0]; 1309 ct1 = 2*jmax + 1; 1310 ct2 = 0; 1311 rbuf2_i = rbuf2[i]; 1312 rbuf3_i = rbuf3[i]; 1313 rbuf4_i = rbuf4[i]; 1314 for (j=1; j<=jmax; j++) { 1315 is_no = sbuf1_i[2*j-1]; 1316 rmap_i = rmap[is_no]; 1317 cmap_i = cmap[is_no]; 1318 mat = (Mat_SeqAIJ*)submats[is_no]->data; 1319 imat_ilen = mat->ilen; 1320 imat_j = mat->j; 1321 imat_i = mat->i; 1322 imat_a = mat->a; 1323 max1 = sbuf1_i[2*j]; 1324 for (k=0; k<max1; k++,ct1++) { 1325 row = sbuf1_i[ct1]; 1326 row = rmap_i[row]; 1327 ilen = imat_ilen[row]; 1328 mat_i = imat_i[row] ; 1329 mat_a = imat_a + mat_i; 1330 mat_j = imat_j + mat_i; 1331 max2 = rbuf2_i[ct1]; 1332 for (l=0; l<max2; l++,ct2++) { 1333 if ((tcol = cmap_i[rbuf3_i[ct2]])) { 1334 *mat_j++ = tcol - 1; 1335 *mat_a++ = rbuf4_i[ct2]; 1336 ilen++; 1337 } 1338 } 1339 imat_ilen[row] = ilen; 1340 } 1341 } 1342 } 1343 } 1344 ierr = PetscFree(r_status4);CHKERRQ(ierr); 1345 ierr = PetscFree(r_waits4);CHKERRQ(ierr); 1346 ierr = MPI_Waitall(nrqr,s_waits4,s_status4);CHKERRQ(ierr); 1347 ierr = PetscFree(s_waits4);CHKERRQ(ierr); 1348 ierr = PetscFree(s_status4);CHKERRQ(ierr); 1349 1350 /* Restore the indices */ 1351 for (i=0; i<ismax; i++) { 1352 ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr); 1353 ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr); 1354 } 1355 1356 /* Destroy allocated memory */ 1357 ierr = PetscFree(irow);CHKERRQ(ierr); 1358 ierr = PetscFree(w1);CHKERRQ(ierr); 1359 ierr = PetscFree(pa);CHKERRQ(ierr); 1360 1361 ierr = PetscFree(sbuf1);CHKERRQ(ierr); 1362 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 1363 for (i=0; i<nrqr; ++i) { 1364 ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr); 1365 } 1366 for (i=0; i<nrqs; ++i) { 1367 ierr = PetscFree(rbuf3[i]);CHKERRQ(ierr); 1368 ierr = PetscFree(rbuf4[i]);CHKERRQ(ierr); 1369 } 1370 1371 ierr = PetscFree(sbuf2);CHKERRQ(ierr); 1372 ierr = PetscFree(rbuf3);CHKERRQ(ierr); 1373 ierr = PetscFree(rbuf4);CHKERRQ(ierr); 1374 ierr = PetscFree(sbuf_aj[0]);CHKERRQ(ierr); 1375 ierr = PetscFree(sbuf_aj);CHKERRQ(ierr); 1376 ierr = PetscFree(sbuf_aa[0]);CHKERRQ(ierr); 1377 ierr = PetscFree(sbuf_aa);CHKERRQ(ierr); 1378 1379 ierr = PetscFree(cmap);CHKERRQ(ierr); 1380 ierr = PetscFree(rmap);CHKERRQ(ierr); 1381 ierr = PetscFree(lens);CHKERRQ(ierr); 1382 1383 for (i=0; i<ismax; i++) { 1384 ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1385 ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1386 } 1387 PetscFunctionReturn(0); 1388 } 1389 1390