1 /* 2 Routines to compute overlapping regions of a parallel MPI matrix 3 and to find submatrices that were shared across processors. 4 */ 5 #include <../src/mat/impls/aij/seq/aij.h> 6 #include <../src/mat/impls/aij/mpi/mpiaij.h> 7 #include <petscbt.h> 8 #include <petscsf.h> 9 10 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Once(Mat,PetscInt,IS*); 11 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Local(Mat,PetscInt,char**,PetscInt*,PetscInt**,PetscTable*); 12 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Receive(Mat,PetscInt,PetscInt**,PetscInt**,PetscInt*); 13 extern PetscErrorCode MatGetRow_MPIAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**); 14 extern PetscErrorCode MatRestoreRow_MPIAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**); 15 16 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Once_Scalable(Mat,PetscInt,IS*); 17 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Local_Scalable(Mat,PetscInt,IS*); 18 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Send_Scalable(Mat,PetscInt,PetscMPIInt,PetscMPIInt *,PetscInt *, PetscInt *,PetscInt **,PetscInt **); 19 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Receive_Scalable(Mat,PetscInt,IS*,PetscInt,PetscInt *); 20 21 22 PetscErrorCode MatIncreaseOverlap_MPIAIJ(Mat C,PetscInt imax,IS is[],PetscInt ov) 23 { 24 PetscErrorCode ierr; 25 PetscInt i; 26 27 PetscFunctionBegin; 28 if (ov < 0) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified"); 29 for (i=0; i<ov; ++i) { 30 ierr = MatIncreaseOverlap_MPIAIJ_Once(C,imax,is);CHKERRQ(ierr); 31 } 32 PetscFunctionReturn(0); 33 } 34 35 PetscErrorCode MatIncreaseOverlap_MPIAIJ_Scalable(Mat C,PetscInt imax,IS is[],PetscInt ov) 36 { 37 PetscErrorCode ierr; 38 PetscInt i; 39 40 PetscFunctionBegin; 41 if (ov < 0) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified"); 42 for (i=0; i<ov; ++i) { 43 ierr = MatIncreaseOverlap_MPIAIJ_Once_Scalable(C,imax,is);CHKERRQ(ierr); 44 } 45 PetscFunctionReturn(0); 46 } 47 48 49 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Once_Scalable(Mat mat,PetscInt nidx,IS is[]) 50 { 51 PetscErrorCode ierr; 52 MPI_Comm comm; 53 PetscInt *length,length_i,tlength,*remoterows,nrrows,reducednrrows,*rrow_ranks,*rrow_isids,i,j,owner; 54 PetscInt *tosizes,*tosizes_temp,*toffsets,*fromsizes,*todata,*fromdata; 55 PetscInt nrecvrows,*sbsizes = 0,*sbdata = 0; 56 const PetscInt *indices_i,**indices; 57 PetscLayout rmap; 58 PetscMPIInt rank,size,*toranks,*fromranks,nto,nfrom; 59 PetscSF sf; 60 PetscSFNode *remote; 61 62 PetscFunctionBegin; 63 ierr = PetscObjectGetComm((PetscObject)mat,&comm);CHKERRQ(ierr); 64 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 65 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 66 /* get row map to determine where rows should be going */ 67 ierr = MatGetLayouts(mat,&rmap,NULL);CHKERRQ(ierr); 68 /* retrieve IS data and put all together so that we 69 * can optimize communication 70 * */ 71 ierr = PetscCalloc2(nidx,(PetscInt ***)&indices,nidx,&length);CHKERRQ(ierr); 72 for (i=0,tlength=0; i<nidx; i++){ 73 ierr = ISGetLocalSize(is[i],&length[i]);CHKERRQ(ierr); 74 tlength += length[i]; 75 ierr = ISGetIndices(is[i],&indices[i]);CHKERRQ(ierr); 76 } 77 /* find these rows on remote processors */ 78 ierr = PetscCalloc3(tlength,&remoterows,tlength,&rrow_ranks,tlength,&rrow_isids);CHKERRQ(ierr); 79 ierr = PetscCalloc3(size,&toranks,2*size,&tosizes,size,&tosizes_temp);CHKERRQ(ierr); 80 nrrows = 0; 81 for (i=0; i<nidx; i++){ 82 length_i = length[i]; 83 indices_i = indices[i]; 84 for (j=0; j<length_i; j++){ 85 owner = -1; 86 ierr = PetscLayoutFindOwner(rmap,indices_i[j],&owner);CHKERRQ(ierr); 87 /* remote processors */ 88 if (owner != rank){ 89 tosizes_temp[owner]++; /* number of rows to owner */ 90 rrow_ranks[nrrows] = owner; /* processor */ 91 rrow_isids[nrrows] = i; /* is id */ 92 remoterows[nrrows++] = indices_i[j]; /* row */ 93 } 94 } 95 ierr = ISRestoreIndices(is[i],&indices[i]);CHKERRQ(ierr); 96 } 97 ierr = PetscFree2(indices,length);CHKERRQ(ierr); 98 /* test if we need to exchange messages 99 * generally speaking, we do not need to exchange 100 * data when overlap is 1 101 * */ 102 ierr = MPIU_Allreduce(&nrrows,&reducednrrows,1,MPIU_INT,MPIU_MAX,comm);CHKERRQ(ierr); 103 /* we do not have any messages 104 * It usually corresponds to overlap 1 105 * */ 106 if (!reducednrrows){ 107 ierr = PetscFree3(toranks,tosizes,tosizes_temp);CHKERRQ(ierr); 108 ierr = PetscFree3(remoterows,rrow_ranks,rrow_isids);CHKERRQ(ierr); 109 ierr = MatIncreaseOverlap_MPIAIJ_Local_Scalable(mat,nidx,is);CHKERRQ(ierr); 110 PetscFunctionReturn(0); 111 } 112 nto = 0; 113 /* send sizes and ranks for building a two-sided communcation */ 114 for (i=0; i<size; i++){ 115 if (tosizes_temp[i]){ 116 tosizes[nto*2] = tosizes_temp[i]*2; /* size */ 117 tosizes_temp[i] = nto; /* a map from processor to index */ 118 toranks[nto++] = i; /* processor */ 119 } 120 } 121 ierr = PetscCalloc1(nto+1,&toffsets);CHKERRQ(ierr); 122 for (i=0; i<nto; i++){ 123 toffsets[i+1] = toffsets[i]+tosizes[2*i]; /* offsets */ 124 tosizes[2*i+1] = toffsets[i]; /* offsets to send */ 125 } 126 /* send information to other processors */ 127 ierr = PetscCommBuildTwoSided(comm,2,MPIU_INT,nto,toranks,tosizes,&nfrom,&fromranks,&fromsizes);CHKERRQ(ierr); 128 nrecvrows = 0; 129 for (i=0; i<nfrom; i++) nrecvrows += fromsizes[2*i]; 130 ierr = PetscMalloc1(nrecvrows,&remote);CHKERRQ(ierr); 131 nrecvrows = 0; 132 for (i=0; i<nfrom; i++){ 133 for (j=0; j<fromsizes[2*i]; j++){ 134 remote[nrecvrows].rank = fromranks[i]; 135 remote[nrecvrows++].index = fromsizes[2*i+1]+j; 136 } 137 } 138 ierr = PetscSFCreate(comm,&sf);CHKERRQ(ierr); 139 ierr = PetscSFSetGraph(sf,nrecvrows,nrecvrows,NULL,PETSC_OWN_POINTER,remote,PETSC_OWN_POINTER);CHKERRQ(ierr); 140 /* use two-sided communication by default since OPENMPI has some bugs for one-sided one */ 141 ierr = PetscSFSetType(sf,PETSCSFBASIC);CHKERRQ(ierr); 142 ierr = PetscSFSetFromOptions(sf);CHKERRQ(ierr); 143 /* message pair <no of is, row> */ 144 ierr = PetscCalloc2(2*nrrows,&todata,nrecvrows,&fromdata);CHKERRQ(ierr); 145 for (i=0; i<nrrows; i++){ 146 owner = rrow_ranks[i]; /* processor */ 147 j = tosizes_temp[owner]; /* index */ 148 todata[toffsets[j]++] = rrow_isids[i]; 149 todata[toffsets[j]++] = remoterows[i]; 150 } 151 ierr = PetscFree3(toranks,tosizes,tosizes_temp);CHKERRQ(ierr); 152 ierr = PetscFree3(remoterows,rrow_ranks,rrow_isids);CHKERRQ(ierr); 153 ierr = PetscFree(toffsets);CHKERRQ(ierr); 154 ierr = PetscSFBcastBegin(sf,MPIU_INT,todata,fromdata);CHKERRQ(ierr); 155 ierr = PetscSFBcastEnd(sf,MPIU_INT,todata,fromdata);CHKERRQ(ierr); 156 ierr = PetscSFDestroy(&sf);CHKERRQ(ierr); 157 /* send rows belonging to the remote so that then we could get the overlapping data back */ 158 ierr = MatIncreaseOverlap_MPIAIJ_Send_Scalable(mat,nidx,nfrom,fromranks,fromsizes,fromdata,&sbsizes,&sbdata);CHKERRQ(ierr); 159 ierr = PetscFree2(todata,fromdata);CHKERRQ(ierr); 160 ierr = PetscFree(fromsizes);CHKERRQ(ierr); 161 ierr = PetscCommBuildTwoSided(comm,2,MPIU_INT,nfrom,fromranks,sbsizes,&nto,&toranks,&tosizes);CHKERRQ(ierr); 162 ierr = PetscFree(fromranks);CHKERRQ(ierr); 163 nrecvrows = 0; 164 for (i=0; i<nto; i++) nrecvrows += tosizes[2*i]; 165 ierr = PetscCalloc1(nrecvrows,&todata);CHKERRQ(ierr); 166 ierr = PetscMalloc1(nrecvrows,&remote);CHKERRQ(ierr); 167 nrecvrows = 0; 168 for (i=0; i<nto; i++){ 169 for (j=0; j<tosizes[2*i]; j++){ 170 remote[nrecvrows].rank = toranks[i]; 171 remote[nrecvrows++].index = tosizes[2*i+1]+j; 172 } 173 } 174 ierr = PetscSFCreate(comm,&sf);CHKERRQ(ierr); 175 ierr = PetscSFSetGraph(sf,nrecvrows,nrecvrows,NULL,PETSC_OWN_POINTER,remote,PETSC_OWN_POINTER);CHKERRQ(ierr); 176 /* use two-sided communication by default since OPENMPI has some bugs for one-sided one */ 177 ierr = PetscSFSetType(sf,PETSCSFBASIC);CHKERRQ(ierr); 178 ierr = PetscSFSetFromOptions(sf);CHKERRQ(ierr); 179 /* overlap communication and computation */ 180 ierr = PetscSFBcastBegin(sf,MPIU_INT,sbdata,todata);CHKERRQ(ierr); 181 ierr = MatIncreaseOverlap_MPIAIJ_Local_Scalable(mat,nidx,is);CHKERRQ(ierr); 182 ierr = PetscSFBcastEnd(sf,MPIU_INT,sbdata,todata);CHKERRQ(ierr); 183 ierr = PetscSFDestroy(&sf);CHKERRQ(ierr); 184 ierr = PetscFree2(sbdata,sbsizes);CHKERRQ(ierr); 185 ierr = MatIncreaseOverlap_MPIAIJ_Receive_Scalable(mat,nidx,is,nrecvrows,todata);CHKERRQ(ierr); 186 ierr = PetscFree(toranks);CHKERRQ(ierr); 187 ierr = PetscFree(tosizes);CHKERRQ(ierr); 188 ierr = PetscFree(todata);CHKERRQ(ierr); 189 PetscFunctionReturn(0); 190 } 191 192 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Receive_Scalable(Mat mat,PetscInt nidx, IS is[], PetscInt nrecvs, PetscInt *recvdata) 193 { 194 PetscInt *isz,isz_i,i,j,is_id, data_size; 195 PetscInt col,lsize,max_lsize,*indices_temp, *indices_i; 196 const PetscInt *indices_i_temp; 197 PetscErrorCode ierr; 198 199 PetscFunctionBegin; 200 max_lsize = 0; 201 ierr = PetscMalloc1(nidx,&isz);CHKERRQ(ierr); 202 for (i=0; i<nidx; i++){ 203 ierr = ISGetLocalSize(is[i],&lsize);CHKERRQ(ierr); 204 max_lsize = lsize>max_lsize ? lsize:max_lsize; 205 isz[i] = lsize; 206 } 207 ierr = PetscMalloc1((max_lsize+nrecvs)*nidx,&indices_temp);CHKERRQ(ierr); 208 for (i=0; i<nidx; i++){ 209 ierr = ISGetIndices(is[i],&indices_i_temp);CHKERRQ(ierr); 210 ierr = PetscMemcpy(indices_temp+i*(max_lsize+nrecvs),indices_i_temp, sizeof(PetscInt)*isz[i]);CHKERRQ(ierr); 211 ierr = ISRestoreIndices(is[i],&indices_i_temp);CHKERRQ(ierr); 212 ierr = ISDestroy(&is[i]);CHKERRQ(ierr); 213 } 214 /* retrieve information to get row id and its overlap */ 215 for (i=0; i<nrecvs; ){ 216 is_id = recvdata[i++]; 217 data_size = recvdata[i++]; 218 indices_i = indices_temp+(max_lsize+nrecvs)*is_id; 219 isz_i = isz[is_id]; 220 for (j=0; j< data_size; j++){ 221 col = recvdata[i++]; 222 indices_i[isz_i++] = col; 223 } 224 isz[is_id] = isz_i; 225 } 226 /* remove duplicate entities */ 227 for (i=0; i<nidx; i++){ 228 indices_i = indices_temp+(max_lsize+nrecvs)*i; 229 isz_i = isz[i]; 230 ierr = PetscSortRemoveDupsInt(&isz_i,indices_i);CHKERRQ(ierr); 231 ierr = ISCreateGeneral(PETSC_COMM_SELF,isz_i,indices_i,PETSC_COPY_VALUES,&is[i]);CHKERRQ(ierr); 232 } 233 ierr = PetscFree(isz);CHKERRQ(ierr); 234 ierr = PetscFree(indices_temp);CHKERRQ(ierr); 235 PetscFunctionReturn(0); 236 } 237 238 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Send_Scalable(Mat mat,PetscInt nidx, PetscMPIInt nfrom,PetscMPIInt *fromranks,PetscInt *fromsizes, PetscInt *fromrows, PetscInt **sbrowsizes, PetscInt **sbrows) 239 { 240 PetscLayout rmap,cmap; 241 PetscInt i,j,k,l,*rows_i,*rows_data_ptr,**rows_data,max_fszs,rows_pos,*rows_pos_i; 242 PetscInt is_id,tnz,an,bn,rstart,cstart,row,start,end,col,totalrows,*sbdata; 243 PetscInt *indv_counts,indvc_ij,*sbsizes,*indices_tmp,*offsets; 244 const PetscInt *gcols,*ai,*aj,*bi,*bj; 245 Mat amat,bmat; 246 PetscMPIInt rank; 247 PetscBool done; 248 MPI_Comm comm; 249 PetscErrorCode ierr; 250 251 PetscFunctionBegin; 252 ierr = PetscObjectGetComm((PetscObject)mat,&comm);CHKERRQ(ierr); 253 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 254 ierr = MatMPIAIJGetSeqAIJ(mat,&amat,&bmat,&gcols);CHKERRQ(ierr); 255 /* Even if the mat is symmetric, we still assume it is not symmetric */ 256 ierr = MatGetRowIJ(amat,0,PETSC_FALSE,PETSC_FALSE,&an,&ai,&aj,&done);CHKERRQ(ierr); 257 if (!done) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"can not get row IJ \n"); 258 ierr = MatGetRowIJ(bmat,0,PETSC_FALSE,PETSC_FALSE,&bn,&bi,&bj,&done);CHKERRQ(ierr); 259 if (!done) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"can not get row IJ \n"); 260 /* total number of nonzero values is used to estimate the memory usage in the next step */ 261 tnz = ai[an]+bi[bn]; 262 ierr = MatGetLayouts(mat,&rmap,&cmap);CHKERRQ(ierr); 263 ierr = PetscLayoutGetRange(rmap,&rstart,NULL);CHKERRQ(ierr); 264 ierr = PetscLayoutGetRange(cmap,&cstart,NULL);CHKERRQ(ierr); 265 /* to find the longest message */ 266 max_fszs = 0; 267 for (i=0; i<nfrom; i++) max_fszs = fromsizes[2*i]>max_fszs ? fromsizes[2*i]:max_fszs; 268 /* better way to estimate number of nonzero in the mat??? */ 269 ierr = PetscCalloc5(max_fszs*nidx,&rows_data_ptr,nidx,&rows_data,nidx,&rows_pos_i,nfrom*nidx,&indv_counts,tnz,&indices_tmp);CHKERRQ(ierr); 270 for (i=0; i<nidx; i++) rows_data[i] = rows_data_ptr+max_fszs*i; 271 rows_pos = 0; 272 totalrows = 0; 273 for (i=0; i<nfrom; i++){ 274 ierr = PetscMemzero(rows_pos_i,sizeof(PetscInt)*nidx);CHKERRQ(ierr); 275 /* group data together */ 276 for (j=0; j<fromsizes[2*i]; j+=2){ 277 is_id = fromrows[rows_pos++];/* no of is */ 278 rows_i = rows_data[is_id]; 279 rows_i[rows_pos_i[is_id]++] = fromrows[rows_pos++];/* row */ 280 } 281 /* estimate a space to avoid multiple allocations */ 282 for (j=0; j<nidx; j++){ 283 indvc_ij = 0; 284 rows_i = rows_data[j]; 285 for (l=0; l<rows_pos_i[j]; l++){ 286 row = rows_i[l]-rstart; 287 start = ai[row]; 288 end = ai[row+1]; 289 for (k=start; k<end; k++){ /* Amat */ 290 col = aj[k] + cstart; 291 indices_tmp[indvc_ij++] = col;/* do not count the rows from the original rank */ 292 } 293 start = bi[row]; 294 end = bi[row+1]; 295 for (k=start; k<end; k++) { /* Bmat */ 296 col = gcols[bj[k]]; 297 indices_tmp[indvc_ij++] = col; 298 } 299 } 300 ierr = PetscSortRemoveDupsInt(&indvc_ij,indices_tmp);CHKERRQ(ierr); 301 indv_counts[i*nidx+j] = indvc_ij; 302 totalrows += indvc_ij; 303 } 304 } 305 /* message triple <no of is, number of rows, rows> */ 306 ierr = PetscCalloc2(totalrows+nidx*nfrom*2,&sbdata,2*nfrom,&sbsizes);CHKERRQ(ierr); 307 totalrows = 0; 308 rows_pos = 0; 309 /* use this code again */ 310 for (i=0;i<nfrom;i++){ 311 ierr = PetscMemzero(rows_pos_i,sizeof(PetscInt)*nidx);CHKERRQ(ierr); 312 for (j=0; j<fromsizes[2*i]; j+=2){ 313 is_id = fromrows[rows_pos++]; 314 rows_i = rows_data[is_id]; 315 rows_i[rows_pos_i[is_id]++] = fromrows[rows_pos++]; 316 } 317 /* add data */ 318 for (j=0; j<nidx; j++){ 319 if (!indv_counts[i*nidx+j]) continue; 320 indvc_ij = 0; 321 sbdata[totalrows++] = j; 322 sbdata[totalrows++] = indv_counts[i*nidx+j]; 323 sbsizes[2*i] += 2; 324 rows_i = rows_data[j]; 325 for (l=0; l<rows_pos_i[j]; l++){ 326 row = rows_i[l]-rstart; 327 start = ai[row]; 328 end = ai[row+1]; 329 for (k=start; k<end; k++){ /* Amat */ 330 col = aj[k] + cstart; 331 indices_tmp[indvc_ij++] = col; 332 } 333 start = bi[row]; 334 end = bi[row+1]; 335 for (k=start; k<end; k++) { /* Bmat */ 336 col = gcols[bj[k]]; 337 indices_tmp[indvc_ij++] = col; 338 } 339 } 340 ierr = PetscSortRemoveDupsInt(&indvc_ij,indices_tmp);CHKERRQ(ierr); 341 sbsizes[2*i] += indvc_ij; 342 ierr = PetscMemcpy(sbdata+totalrows,indices_tmp,sizeof(PetscInt)*indvc_ij);CHKERRQ(ierr); 343 totalrows += indvc_ij; 344 } 345 } 346 ierr = PetscCalloc1(nfrom+1,&offsets);CHKERRQ(ierr); 347 for (i=0; i<nfrom; i++){ 348 offsets[i+1] = offsets[i] + sbsizes[2*i]; 349 sbsizes[2*i+1] = offsets[i]; 350 } 351 ierr = PetscFree(offsets);CHKERRQ(ierr); 352 if (sbrowsizes) *sbrowsizes = sbsizes; 353 if (sbrows) *sbrows = sbdata; 354 ierr = PetscFree5(rows_data_ptr,rows_data,rows_pos_i,indv_counts,indices_tmp);CHKERRQ(ierr); 355 ierr = MatRestoreRowIJ(amat,0,PETSC_FALSE,PETSC_FALSE,&an,&ai,&aj,&done);CHKERRQ(ierr); 356 ierr = MatRestoreRowIJ(bmat,0,PETSC_FALSE,PETSC_FALSE,&bn,&bi,&bj,&done);CHKERRQ(ierr); 357 PetscFunctionReturn(0); 358 } 359 360 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Local_Scalable(Mat mat,PetscInt nidx, IS is[]) 361 { 362 const PetscInt *gcols,*ai,*aj,*bi,*bj, *indices; 363 PetscInt tnz,an,bn,i,j,row,start,end,rstart,cstart,col,k,*indices_temp; 364 PetscInt lsize,lsize_tmp,owner; 365 PetscMPIInt rank; 366 Mat amat,bmat; 367 PetscBool done; 368 PetscLayout cmap,rmap; 369 MPI_Comm comm; 370 PetscErrorCode ierr; 371 372 PetscFunctionBegin; 373 ierr = PetscObjectGetComm((PetscObject)mat,&comm);CHKERRQ(ierr); 374 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 375 ierr = MatMPIAIJGetSeqAIJ(mat,&amat,&bmat,&gcols);CHKERRQ(ierr); 376 ierr = MatGetRowIJ(amat,0,PETSC_FALSE,PETSC_FALSE,&an,&ai,&aj,&done);CHKERRQ(ierr); 377 if (!done) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"can not get row IJ \n"); 378 ierr = MatGetRowIJ(bmat,0,PETSC_FALSE,PETSC_FALSE,&bn,&bi,&bj,&done);CHKERRQ(ierr); 379 if (!done) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"can not get row IJ \n"); 380 /* is it a safe way to compute number of nonzero values ? */ 381 tnz = ai[an]+bi[bn]; 382 ierr = MatGetLayouts(mat,&rmap,&cmap);CHKERRQ(ierr); 383 ierr = PetscLayoutGetRange(rmap,&rstart,NULL);CHKERRQ(ierr); 384 ierr = PetscLayoutGetRange(cmap,&cstart,NULL);CHKERRQ(ierr); 385 /* it is a better way to estimate memory than the old implementation 386 * where global size of matrix is used 387 * */ 388 ierr = PetscMalloc1(tnz,&indices_temp);CHKERRQ(ierr); 389 for (i=0; i<nidx; i++) { 390 ierr = ISGetLocalSize(is[i],&lsize);CHKERRQ(ierr); 391 ierr = ISGetIndices(is[i],&indices);CHKERRQ(ierr); 392 lsize_tmp = 0; 393 for (j=0; j<lsize; j++) { 394 owner = -1; 395 row = indices[j]; 396 ierr = PetscLayoutFindOwner(rmap,row,&owner);CHKERRQ(ierr); 397 if (owner != rank) continue; 398 /* local number */ 399 row -= rstart; 400 start = ai[row]; 401 end = ai[row+1]; 402 for (k=start; k<end; k++) { /* Amat */ 403 col = aj[k] + cstart; 404 indices_temp[lsize_tmp++] = col; 405 } 406 start = bi[row]; 407 end = bi[row+1]; 408 for (k=start; k<end; k++) { /* Bmat */ 409 col = gcols[bj[k]]; 410 indices_temp[lsize_tmp++] = col; 411 } 412 } 413 ierr = ISRestoreIndices(is[i],&indices);CHKERRQ(ierr); 414 ierr = ISDestroy(&is[i]);CHKERRQ(ierr); 415 ierr = PetscSortRemoveDupsInt(&lsize_tmp,indices_temp);CHKERRQ(ierr); 416 ierr = ISCreateGeneral(PETSC_COMM_SELF,lsize_tmp,indices_temp,PETSC_COPY_VALUES,&is[i]);CHKERRQ(ierr); 417 } 418 ierr = PetscFree(indices_temp);CHKERRQ(ierr); 419 ierr = MatRestoreRowIJ(amat,0,PETSC_FALSE,PETSC_FALSE,&an,&ai,&aj,&done);CHKERRQ(ierr); 420 ierr = MatRestoreRowIJ(bmat,0,PETSC_FALSE,PETSC_FALSE,&bn,&bi,&bj,&done);CHKERRQ(ierr); 421 PetscFunctionReturn(0); 422 } 423 424 425 /* 426 Sample message format: 427 If a processor A wants processor B to process some elements corresponding 428 to index sets is[1],is[5] 429 mesg [0] = 2 (no of index sets in the mesg) 430 ----------- 431 mesg [1] = 1 => is[1] 432 mesg [2] = sizeof(is[1]); 433 ----------- 434 mesg [3] = 5 => is[5] 435 mesg [4] = sizeof(is[5]); 436 ----------- 437 mesg [5] 438 mesg [n] datas[1] 439 ----------- 440 mesg[n+1] 441 mesg[m] data(is[5]) 442 ----------- 443 444 Notes: 445 nrqs - no of requests sent (or to be sent out) 446 nrqr - no of requests recieved (which have to be or which have been processed 447 */ 448 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Once(Mat C,PetscInt imax,IS is[]) 449 { 450 Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data; 451 PetscMPIInt *w1,*w2,nrqr,*w3,*w4,*onodes1,*olengths1,*onodes2,*olengths2; 452 const PetscInt **idx,*idx_i; 453 PetscInt *n,**data,len; 454 #if defined(PETSC_USE_CTABLE) 455 PetscTable *table_data,table_data_i; 456 PetscInt *tdata,tcount,tcount_max; 457 #else 458 PetscInt *data_i,*d_p; 459 #endif 460 PetscErrorCode ierr; 461 PetscMPIInt size,rank,tag1,tag2; 462 PetscInt M,i,j,k,**rbuf,row,proc = 0,nrqs,msz,**outdat,**ptr; 463 PetscInt *ctr,*pa,*tmp,*isz,*isz1,**xdata,**rbuf2; 464 PetscBT *table; 465 MPI_Comm comm; 466 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2; 467 MPI_Status *s_status,*recv_status; 468 char *t_p; 469 470 PetscFunctionBegin; 471 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 472 size = c->size; 473 rank = c->rank; 474 M = C->rmap->N; 475 476 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 477 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 478 479 ierr = PetscMalloc2(imax,&idx,imax,&n);CHKERRQ(ierr); 480 481 for (i=0; i<imax; i++) { 482 ierr = ISGetIndices(is[i],&idx[i]);CHKERRQ(ierr); 483 ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr); 484 } 485 486 /* evaluate communication - mesg to who,length of mesg, and buffer space 487 required. Based on this, buffers are allocated, and data copied into them */ 488 ierr = PetscMalloc4(size,&w1,size,&w2,size,&w3,size,&w4);CHKERRQ(ierr); 489 ierr = PetscMemzero(w1,size*sizeof(PetscMPIInt));CHKERRQ(ierr); /* initialise work vector*/ 490 ierr = PetscMemzero(w2,size*sizeof(PetscMPIInt));CHKERRQ(ierr); /* initialise work vector*/ 491 ierr = PetscMemzero(w3,size*sizeof(PetscMPIInt));CHKERRQ(ierr); /* initialise work vector*/ 492 for (i=0; i<imax; i++) { 493 ierr = PetscMemzero(w4,size*sizeof(PetscMPIInt));CHKERRQ(ierr); /* initialise work vector*/ 494 idx_i = idx[i]; 495 len = n[i]; 496 for (j=0; j<len; j++) { 497 row = idx_i[j]; 498 if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index set cannot have negative entries"); 499 ierr = PetscLayoutFindOwner(C->rmap,row,&proc);CHKERRQ(ierr); 500 w4[proc]++; 501 } 502 for (j=0; j<size; j++) { 503 if (w4[j]) { w1[j] += w4[j]; w3[j]++;} 504 } 505 } 506 507 nrqs = 0; /* no of outgoing messages */ 508 msz = 0; /* total mesg length (for all proc */ 509 w1[rank] = 0; /* no mesg sent to intself */ 510 w3[rank] = 0; 511 for (i=0; i<size; i++) { 512 if (w1[i]) {w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 513 } 514 /* pa - is list of processors to communicate with */ 515 ierr = PetscMalloc1(nrqs+1,&pa);CHKERRQ(ierr); 516 for (i=0,j=0; i<size; i++) { 517 if (w1[i]) {pa[j] = i; j++;} 518 } 519 520 /* Each message would have a header = 1 + 2*(no of IS) + data */ 521 for (i=0; i<nrqs; i++) { 522 j = pa[i]; 523 w1[j] += w2[j] + 2*w3[j]; 524 msz += w1[j]; 525 } 526 527 /* Determine the number of messages to expect, their lengths, from from-ids */ 528 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 529 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 530 531 /* Now post the Irecvs corresponding to these messages */ 532 ierr = PetscPostIrecvInt(comm,tag1,nrqr,onodes1,olengths1,&rbuf,&r_waits1);CHKERRQ(ierr); 533 534 /* Allocate Memory for outgoing messages */ 535 ierr = PetscMalloc4(size,&outdat,size,&ptr,msz,&tmp,size,&ctr);CHKERRQ(ierr); 536 ierr = PetscMemzero(outdat,size*sizeof(PetscInt*));CHKERRQ(ierr); 537 ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr); 538 539 { 540 PetscInt *iptr = tmp,ict = 0; 541 for (i=0; i<nrqs; i++) { 542 j = pa[i]; 543 iptr += ict; 544 outdat[j] = iptr; 545 ict = w1[j]; 546 } 547 } 548 549 /* Form the outgoing messages */ 550 /* plug in the headers */ 551 for (i=0; i<nrqs; i++) { 552 j = pa[i]; 553 outdat[j][0] = 0; 554 ierr = PetscMemzero(outdat[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr); 555 ptr[j] = outdat[j] + 2*w3[j] + 1; 556 } 557 558 /* Memory for doing local proc's work */ 559 { 560 PetscInt M_BPB_imax = 0; 561 #if defined(PETSC_USE_CTABLE) 562 ierr = PetscIntMultError((M/PETSC_BITS_PER_BYTE+1),imax, &M_BPB_imax);CHKERRQ(ierr); 563 ierr = PetscMalloc1(imax,&table_data);CHKERRQ(ierr); 564 for (i=0; i<imax; i++) { 565 ierr = PetscTableCreate(n[i]+1,M+1,&table_data[i]);CHKERRQ(ierr); 566 } 567 ierr = PetscCalloc4(imax,&table, imax,&data, imax,&isz, M_BPB_imax,&t_p);CHKERRQ(ierr); 568 for (i=0; i<imax; i++) { 569 table[i] = t_p + (M/PETSC_BITS_PER_BYTE+1)*i; 570 } 571 #else 572 PetscInt Mimax = 0; 573 ierr = PetscIntMultError(M,imax, &Mimax);CHKERRQ(ierr); 574 ierr = PetscIntMultError((M/PETSC_BITS_PER_BYTE+1),imax, &M_BPB_imax);CHKERRQ(ierr); 575 ierr = PetscCalloc5(imax,&table, imax,&data, imax,&isz, Mimax,&d_p, M_BPB_imax,&t_p);CHKERRQ(ierr); 576 for (i=0; i<imax; i++) { 577 table[i] = t_p + (M/PETSC_BITS_PER_BYTE+1)*i; 578 data[i] = d_p + M*i; 579 } 580 #endif 581 } 582 583 /* Parse the IS and update local tables and the outgoing buf with the data */ 584 { 585 PetscInt n_i,isz_i,*outdat_j,ctr_j; 586 PetscBT table_i; 587 588 for (i=0; i<imax; i++) { 589 ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr); 590 n_i = n[i]; 591 table_i = table[i]; 592 idx_i = idx[i]; 593 #if defined(PETSC_USE_CTABLE) 594 table_data_i = table_data[i]; 595 #else 596 data_i = data[i]; 597 #endif 598 isz_i = isz[i]; 599 for (j=0; j<n_i; j++) { /* parse the indices of each IS */ 600 row = idx_i[j]; 601 ierr = PetscLayoutFindOwner(C->rmap,row,&proc);CHKERRQ(ierr); 602 if (proc != rank) { /* copy to the outgoing buffer */ 603 ctr[proc]++; 604 *ptr[proc] = row; 605 ptr[proc]++; 606 } else if (!PetscBTLookupSet(table_i,row)) { 607 #if defined(PETSC_USE_CTABLE) 608 ierr = PetscTableAdd(table_data_i,row+1,isz_i+1,INSERT_VALUES);CHKERRQ(ierr); 609 #else 610 data_i[isz_i] = row; /* Update the local table */ 611 #endif 612 isz_i++; 613 } 614 } 615 /* Update the headers for the current IS */ 616 for (j=0; j<size; j++) { /* Can Optimise this loop by using pa[] */ 617 if ((ctr_j = ctr[j])) { 618 outdat_j = outdat[j]; 619 k = ++outdat_j[0]; 620 outdat_j[2*k] = ctr_j; 621 outdat_j[2*k-1] = i; 622 } 623 } 624 isz[i] = isz_i; 625 } 626 } 627 628 /* Now post the sends */ 629 ierr = PetscMalloc1(nrqs+1,&s_waits1);CHKERRQ(ierr); 630 for (i=0; i<nrqs; ++i) { 631 j = pa[i]; 632 ierr = MPI_Isend(outdat[j],w1[j],MPIU_INT,j,tag1,comm,s_waits1+i);CHKERRQ(ierr); 633 } 634 635 /* No longer need the original indices */ 636 for (i=0; i<imax; ++i) { 637 ierr = ISRestoreIndices(is[i],idx+i);CHKERRQ(ierr); 638 } 639 ierr = PetscFree2(idx,n);CHKERRQ(ierr); 640 641 for (i=0; i<imax; ++i) { 642 ierr = ISDestroy(&is[i]);CHKERRQ(ierr); 643 } 644 645 /* Do Local work */ 646 #if defined(PETSC_USE_CTABLE) 647 ierr = MatIncreaseOverlap_MPIAIJ_Local(C,imax,table,isz,NULL,table_data);CHKERRQ(ierr); 648 #else 649 ierr = MatIncreaseOverlap_MPIAIJ_Local(C,imax,table,isz,data,NULL);CHKERRQ(ierr); 650 #endif 651 652 /* Receive messages */ 653 ierr = PetscMalloc1(nrqr+1,&recv_status);CHKERRQ(ierr); 654 if (nrqr) {ierr = MPI_Waitall(nrqr,r_waits1,recv_status);CHKERRQ(ierr);} 655 656 ierr = PetscMalloc1(nrqs+1,&s_status);CHKERRQ(ierr); 657 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);} 658 659 /* Phase 1 sends are complete - deallocate buffers */ 660 ierr = PetscFree4(outdat,ptr,tmp,ctr);CHKERRQ(ierr); 661 ierr = PetscFree4(w1,w2,w3,w4);CHKERRQ(ierr); 662 663 ierr = PetscMalloc1(nrqr+1,&xdata);CHKERRQ(ierr); 664 ierr = PetscMalloc1(nrqr+1,&isz1);CHKERRQ(ierr); 665 ierr = MatIncreaseOverlap_MPIAIJ_Receive(C,nrqr,rbuf,xdata,isz1);CHKERRQ(ierr); 666 ierr = PetscFree(rbuf[0]);CHKERRQ(ierr); 667 ierr = PetscFree(rbuf);CHKERRQ(ierr); 668 669 670 /* Send the data back */ 671 /* Do a global reduction to know the buffer space req for incoming messages */ 672 { 673 PetscMPIInt *rw1; 674 675 ierr = PetscCalloc1(size,&rw1);CHKERRQ(ierr); 676 677 for (i=0; i<nrqr; ++i) { 678 proc = recv_status[i].MPI_SOURCE; 679 680 if (proc != onodes1[i]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPI_SOURCE mismatch"); 681 rw1[proc] = isz1[i]; 682 } 683 ierr = PetscFree(onodes1);CHKERRQ(ierr); 684 ierr = PetscFree(olengths1);CHKERRQ(ierr); 685 686 /* Determine the number of messages to expect, their lengths, from from-ids */ 687 ierr = PetscGatherMessageLengths(comm,nrqr,nrqs,rw1,&onodes2,&olengths2);CHKERRQ(ierr); 688 ierr = PetscFree(rw1);CHKERRQ(ierr); 689 } 690 /* Now post the Irecvs corresponding to these messages */ 691 ierr = PetscPostIrecvInt(comm,tag2,nrqs,onodes2,olengths2,&rbuf2,&r_waits2);CHKERRQ(ierr); 692 693 /* Now post the sends */ 694 ierr = PetscMalloc1(nrqr+1,&s_waits2);CHKERRQ(ierr); 695 for (i=0; i<nrqr; ++i) { 696 j = recv_status[i].MPI_SOURCE; 697 ierr = MPI_Isend(xdata[i],isz1[i],MPIU_INT,j,tag2,comm,s_waits2+i);CHKERRQ(ierr); 698 } 699 700 /* receive work done on other processors */ 701 { 702 PetscInt is_no,ct1,max,*rbuf2_i,isz_i,jmax; 703 PetscMPIInt idex; 704 PetscBT table_i; 705 MPI_Status *status2; 706 707 ierr = PetscMalloc1((PetscMax(nrqr,nrqs)+1),&status2);CHKERRQ(ierr); 708 for (i=0; i<nrqs; ++i) { 709 ierr = MPI_Waitany(nrqs,r_waits2,&idex,status2+i);CHKERRQ(ierr); 710 /* Process the message */ 711 rbuf2_i = rbuf2[idex]; 712 ct1 = 2*rbuf2_i[0]+1; 713 jmax = rbuf2[idex][0]; 714 for (j=1; j<=jmax; j++) { 715 max = rbuf2_i[2*j]; 716 is_no = rbuf2_i[2*j-1]; 717 isz_i = isz[is_no]; 718 table_i = table[is_no]; 719 #if defined(PETSC_USE_CTABLE) 720 table_data_i = table_data[is_no]; 721 #else 722 data_i = data[is_no]; 723 #endif 724 for (k=0; k<max; k++,ct1++) { 725 row = rbuf2_i[ct1]; 726 if (!PetscBTLookupSet(table_i,row)) { 727 #if defined(PETSC_USE_CTABLE) 728 ierr = PetscTableAdd(table_data_i,row+1,isz_i+1,INSERT_VALUES);CHKERRQ(ierr); 729 #else 730 data_i[isz_i] = row; 731 #endif 732 isz_i++; 733 } 734 } 735 isz[is_no] = isz_i; 736 } 737 } 738 739 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,status2);CHKERRQ(ierr);} 740 ierr = PetscFree(status2);CHKERRQ(ierr); 741 } 742 743 #if defined(PETSC_USE_CTABLE) 744 tcount_max = 0; 745 for (i=0; i<imax; ++i) { 746 table_data_i = table_data[i]; 747 ierr = PetscTableGetCount(table_data_i,&tcount);CHKERRQ(ierr); 748 if (tcount_max < tcount) tcount_max = tcount; 749 } 750 ierr = PetscMalloc1(tcount_max+1,&tdata);CHKERRQ(ierr); 751 #endif 752 753 for (i=0; i<imax; ++i) { 754 #if defined(PETSC_USE_CTABLE) 755 PetscTablePosition tpos; 756 table_data_i = table_data[i]; 757 758 ierr = PetscTableGetHeadPosition(table_data_i,&tpos);CHKERRQ(ierr); 759 while (tpos) { 760 ierr = PetscTableGetNext(table_data_i,&tpos,&k,&j);CHKERRQ(ierr); 761 tdata[--j] = --k; 762 } 763 ierr = ISCreateGeneral(PETSC_COMM_SELF,isz[i],tdata,PETSC_COPY_VALUES,is+i);CHKERRQ(ierr); 764 #else 765 ierr = ISCreateGeneral(PETSC_COMM_SELF,isz[i],data[i],PETSC_COPY_VALUES,is+i);CHKERRQ(ierr); 766 #endif 767 } 768 769 ierr = PetscFree(onodes2);CHKERRQ(ierr); 770 ierr = PetscFree(olengths2);CHKERRQ(ierr); 771 772 ierr = PetscFree(pa);CHKERRQ(ierr); 773 ierr = PetscFree(rbuf2[0]);CHKERRQ(ierr); 774 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 775 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 776 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 777 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 778 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 779 ierr = PetscFree(s_status);CHKERRQ(ierr); 780 ierr = PetscFree(recv_status);CHKERRQ(ierr); 781 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 782 ierr = PetscFree(xdata);CHKERRQ(ierr); 783 ierr = PetscFree(isz1);CHKERRQ(ierr); 784 #if defined(PETSC_USE_CTABLE) 785 for (i=0; i<imax; i++) { 786 ierr = PetscTableDestroy((PetscTable*)&table_data[i]);CHKERRQ(ierr); 787 } 788 ierr = PetscFree(table_data);CHKERRQ(ierr); 789 ierr = PetscFree(tdata);CHKERRQ(ierr); 790 ierr = PetscFree4(table,data,isz,t_p);CHKERRQ(ierr); 791 #else 792 ierr = PetscFree5(table,data,isz,d_p,t_p);CHKERRQ(ierr); 793 #endif 794 PetscFunctionReturn(0); 795 } 796 797 /* 798 MatIncreaseOverlap_MPIAIJ_Local - Called by MatincreaseOverlap, to do 799 the work on the local processor. 800 801 Inputs: 802 C - MAT_MPIAIJ; 803 imax - total no of index sets processed at a time; 804 table - an array of char - size = m bits. 805 806 Output: 807 isz - array containing the count of the solution elements corresponding 808 to each index set; 809 data or table_data - pointer to the solutions 810 */ 811 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Local(Mat C,PetscInt imax,PetscBT *table,PetscInt *isz,PetscInt **data,PetscTable *table_data) 812 { 813 Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data; 814 Mat A = c->A,B = c->B; 815 Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*b = (Mat_SeqAIJ*)B->data; 816 PetscInt start,end,val,max,rstart,cstart,*ai,*aj; 817 PetscInt *bi,*bj,*garray,i,j,k,row,isz_i; 818 PetscBT table_i; 819 #if defined(PETSC_USE_CTABLE) 820 PetscTable table_data_i; 821 PetscErrorCode ierr; 822 PetscTablePosition tpos; 823 PetscInt tcount,*tdata; 824 #else 825 PetscInt *data_i; 826 #endif 827 828 PetscFunctionBegin; 829 rstart = C->rmap->rstart; 830 cstart = C->cmap->rstart; 831 ai = a->i; 832 aj = a->j; 833 bi = b->i; 834 bj = b->j; 835 garray = c->garray; 836 837 for (i=0; i<imax; i++) { 838 #if defined(PETSC_USE_CTABLE) 839 /* copy existing entries of table_data_i into tdata[] */ 840 table_data_i = table_data[i]; 841 ierr = PetscTableGetCount(table_data_i,&tcount);CHKERRQ(ierr); 842 if (tcount != isz[i]) SETERRQ3(PETSC_COMM_SELF,0," tcount %d != isz[%d] %d",tcount,i,isz[i]); 843 844 ierr = PetscMalloc1(tcount,&tdata);CHKERRQ(ierr); 845 ierr = PetscTableGetHeadPosition(table_data_i,&tpos);CHKERRQ(ierr); 846 while (tpos) { 847 ierr = PetscTableGetNext(table_data_i,&tpos,&row,&j);CHKERRQ(ierr); 848 tdata[--j] = --row; 849 if (j > tcount - 1) SETERRQ2(PETSC_COMM_SELF,0," j %d >= tcount %d",j,tcount); 850 } 851 #else 852 data_i = data[i]; 853 #endif 854 table_i = table[i]; 855 isz_i = isz[i]; 856 max = isz[i]; 857 858 for (j=0; j<max; j++) { 859 #if defined(PETSC_USE_CTABLE) 860 row = tdata[j] - rstart; 861 #else 862 row = data_i[j] - rstart; 863 #endif 864 start = ai[row]; 865 end = ai[row+1]; 866 for (k=start; k<end; k++) { /* Amat */ 867 val = aj[k] + cstart; 868 if (!PetscBTLookupSet(table_i,val)) { 869 #if defined(PETSC_USE_CTABLE) 870 ierr = PetscTableAdd(table_data_i,val+1,isz_i+1,INSERT_VALUES);CHKERRQ(ierr); 871 #else 872 data_i[isz_i] = val; 873 #endif 874 isz_i++; 875 } 876 } 877 start = bi[row]; 878 end = bi[row+1]; 879 for (k=start; k<end; k++) { /* Bmat */ 880 val = garray[bj[k]]; 881 if (!PetscBTLookupSet(table_i,val)) { 882 #if defined(PETSC_USE_CTABLE) 883 ierr = PetscTableAdd(table_data_i,val+1,isz_i+1,INSERT_VALUES);CHKERRQ(ierr); 884 #else 885 data_i[isz_i] = val; 886 #endif 887 isz_i++; 888 } 889 } 890 } 891 isz[i] = isz_i; 892 893 #if defined(PETSC_USE_CTABLE) 894 ierr = PetscFree(tdata);CHKERRQ(ierr); 895 #endif 896 } 897 PetscFunctionReturn(0); 898 } 899 900 /* 901 MatIncreaseOverlap_MPIAIJ_Receive - Process the recieved messages, 902 and return the output 903 904 Input: 905 C - the matrix 906 nrqr - no of messages being processed. 907 rbuf - an array of pointers to the recieved requests 908 909 Output: 910 xdata - array of messages to be sent back 911 isz1 - size of each message 912 913 For better efficiency perhaps we should malloc separately each xdata[i], 914 then if a remalloc is required we need only copy the data for that one row 915 rather then all previous rows as it is now where a single large chunck of 916 memory is used. 917 918 */ 919 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Receive(Mat C,PetscInt nrqr,PetscInt **rbuf,PetscInt **xdata,PetscInt * isz1) 920 { 921 Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data; 922 Mat A = c->A,B = c->B; 923 Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*b = (Mat_SeqAIJ*)B->data; 924 PetscErrorCode ierr; 925 PetscInt rstart,cstart,*ai,*aj,*bi,*bj,*garray,i,j,k; 926 PetscInt row,total_sz,ct,ct1,ct2,ct3,mem_estimate,oct2,l,start,end; 927 PetscInt val,max1,max2,m,no_malloc =0,*tmp,new_estimate,ctr; 928 PetscInt *rbuf_i,kmax,rbuf_0; 929 PetscBT xtable; 930 931 PetscFunctionBegin; 932 m = C->rmap->N; 933 rstart = C->rmap->rstart; 934 cstart = C->cmap->rstart; 935 ai = a->i; 936 aj = a->j; 937 bi = b->i; 938 bj = b->j; 939 garray = c->garray; 940 941 942 for (i=0,ct=0,total_sz=0; i<nrqr; ++i) { 943 rbuf_i = rbuf[i]; 944 rbuf_0 = rbuf_i[0]; 945 ct += rbuf_0; 946 for (j=1; j<=rbuf_0; j++) total_sz += rbuf_i[2*j]; 947 } 948 949 if (C->rmap->n) max1 = ct*(a->nz + b->nz)/C->rmap->n; 950 else max1 = 1; 951 mem_estimate = 3*((total_sz > max1 ? total_sz : max1)+1); 952 ierr = PetscMalloc1(mem_estimate,&xdata[0]);CHKERRQ(ierr); 953 ++no_malloc; 954 ierr = PetscBTCreate(m,&xtable);CHKERRQ(ierr); 955 ierr = PetscMemzero(isz1,nrqr*sizeof(PetscInt));CHKERRQ(ierr); 956 957 ct3 = 0; 958 for (i=0; i<nrqr; i++) { /* for easch mesg from proc i */ 959 rbuf_i = rbuf[i]; 960 rbuf_0 = rbuf_i[0]; 961 ct1 = 2*rbuf_0+1; 962 ct2 = ct1; 963 ct3 += ct1; 964 for (j=1; j<=rbuf_0; j++) { /* for each IS from proc i*/ 965 ierr = PetscBTMemzero(m,xtable);CHKERRQ(ierr); 966 oct2 = ct2; 967 kmax = rbuf_i[2*j]; 968 for (k=0; k<kmax; k++,ct1++) { 969 row = rbuf_i[ct1]; 970 if (!PetscBTLookupSet(xtable,row)) { 971 if (!(ct3 < mem_estimate)) { 972 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 973 ierr = PetscMalloc1(new_estimate,&tmp);CHKERRQ(ierr); 974 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 975 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 976 xdata[0] = tmp; 977 mem_estimate = new_estimate; ++no_malloc; 978 for (ctr=1; ctr<=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1]; 979 } 980 xdata[i][ct2++] = row; 981 ct3++; 982 } 983 } 984 for (k=oct2,max2=ct2; k<max2; k++) { 985 row = xdata[i][k] - rstart; 986 start = ai[row]; 987 end = ai[row+1]; 988 for (l=start; l<end; l++) { 989 val = aj[l] + cstart; 990 if (!PetscBTLookupSet(xtable,val)) { 991 if (!(ct3 < mem_estimate)) { 992 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 993 ierr = PetscMalloc1(new_estimate,&tmp);CHKERRQ(ierr); 994 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 995 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 996 xdata[0] = tmp; 997 mem_estimate = new_estimate; ++no_malloc; 998 for (ctr=1; ctr<=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1]; 999 } 1000 xdata[i][ct2++] = val; 1001 ct3++; 1002 } 1003 } 1004 start = bi[row]; 1005 end = bi[row+1]; 1006 for (l=start; l<end; l++) { 1007 val = garray[bj[l]]; 1008 if (!PetscBTLookupSet(xtable,val)) { 1009 if (!(ct3 < mem_estimate)) { 1010 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 1011 ierr = PetscMalloc1(new_estimate,&tmp);CHKERRQ(ierr); 1012 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 1013 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 1014 xdata[0] = tmp; 1015 mem_estimate = new_estimate; ++no_malloc; 1016 for (ctr =1; ctr <=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1]; 1017 } 1018 xdata[i][ct2++] = val; 1019 ct3++; 1020 } 1021 } 1022 } 1023 /* Update the header*/ 1024 xdata[i][2*j] = ct2 - oct2; /* Undo the vector isz1 and use only a var*/ 1025 xdata[i][2*j-1] = rbuf_i[2*j-1]; 1026 } 1027 xdata[i][0] = rbuf_0; 1028 xdata[i+1] = xdata[i] + ct2; 1029 isz1[i] = ct2; /* size of each message */ 1030 } 1031 ierr = PetscBTDestroy(&xtable);CHKERRQ(ierr); 1032 ierr = PetscInfo3(C,"Allocated %D bytes, required %D bytes, no of mallocs = %D\n",mem_estimate,ct3,no_malloc);CHKERRQ(ierr); 1033 PetscFunctionReturn(0); 1034 } 1035 /* -------------------------------------------------------------------------*/ 1036 extern PetscErrorCode MatGetSubMatrices_MPIAIJ_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,Mat*); 1037 extern PetscErrorCode MatAssemblyEnd_SeqAIJ(Mat,MatAssemblyType); 1038 /* 1039 Every processor gets the entire matrix 1040 */ 1041 PetscErrorCode MatGetSubMatrix_MPIAIJ_All(Mat A,MatGetSubMatrixOption flag,MatReuse scall,Mat *Bin[]) 1042 { 1043 Mat B; 1044 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 1045 Mat_SeqAIJ *b,*ad = (Mat_SeqAIJ*)a->A->data,*bd = (Mat_SeqAIJ*)a->B->data; 1046 PetscErrorCode ierr; 1047 PetscMPIInt size,rank,*recvcounts = 0,*displs = 0; 1048 PetscInt sendcount,i,*rstarts = A->rmap->range,n,cnt,j; 1049 PetscInt m,*b_sendj,*garray = a->garray,*lens,*jsendbuf,*a_jsendbuf,*b_jsendbuf; 1050 MatScalar *sendbuf,*recvbuf,*a_sendbuf,*b_sendbuf; 1051 1052 PetscFunctionBegin; 1053 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr); 1054 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)A),&rank);CHKERRQ(ierr); 1055 1056 if (scall == MAT_INITIAL_MATRIX) { 1057 /* ---------------------------------------------------------------- 1058 Tell every processor the number of nonzeros per row 1059 */ 1060 ierr = PetscMalloc1(A->rmap->N,&lens);CHKERRQ(ierr); 1061 for (i=A->rmap->rstart; i<A->rmap->rend; i++) { 1062 lens[i] = ad->i[i-A->rmap->rstart+1] - ad->i[i-A->rmap->rstart] + bd->i[i-A->rmap->rstart+1] - bd->i[i-A->rmap->rstart]; 1063 } 1064 ierr = PetscMalloc2(size,&recvcounts,size,&displs);CHKERRQ(ierr); 1065 for (i=0; i<size; i++) { 1066 recvcounts[i] = A->rmap->range[i+1] - A->rmap->range[i]; 1067 displs[i] = A->rmap->range[i]; 1068 } 1069 #if defined(PETSC_HAVE_MPI_IN_PLACE) 1070 ierr = MPI_Allgatherv(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,lens,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 1071 #else 1072 sendcount = A->rmap->rend - A->rmap->rstart; 1073 ierr = MPI_Allgatherv(lens+A->rmap->rstart,sendcount,MPIU_INT,lens,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 1074 #endif 1075 /* --------------------------------------------------------------- 1076 Create the sequential matrix of the same type as the local block diagonal 1077 */ 1078 ierr = MatCreate(PETSC_COMM_SELF,&B);CHKERRQ(ierr); 1079 ierr = MatSetSizes(B,A->rmap->N,A->cmap->N,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1080 ierr = MatSetBlockSizesFromMats(B,A,A);CHKERRQ(ierr); 1081 ierr = MatSetType(B,((PetscObject)a->A)->type_name);CHKERRQ(ierr); 1082 ierr = MatSeqAIJSetPreallocation(B,0,lens);CHKERRQ(ierr); 1083 ierr = PetscMalloc1(1,Bin);CHKERRQ(ierr); 1084 **Bin = B; 1085 b = (Mat_SeqAIJ*)B->data; 1086 1087 /*-------------------------------------------------------------------- 1088 Copy my part of matrix column indices over 1089 */ 1090 sendcount = ad->nz + bd->nz; 1091 jsendbuf = b->j + b->i[rstarts[rank]]; 1092 a_jsendbuf = ad->j; 1093 b_jsendbuf = bd->j; 1094 n = A->rmap->rend - A->rmap->rstart; 1095 cnt = 0; 1096 for (i=0; i<n; i++) { 1097 1098 /* put in lower diagonal portion */ 1099 m = bd->i[i+1] - bd->i[i]; 1100 while (m > 0) { 1101 /* is it above diagonal (in bd (compressed) numbering) */ 1102 if (garray[*b_jsendbuf] > A->rmap->rstart + i) break; 1103 jsendbuf[cnt++] = garray[*b_jsendbuf++]; 1104 m--; 1105 } 1106 1107 /* put in diagonal portion */ 1108 for (j=ad->i[i]; j<ad->i[i+1]; j++) { 1109 jsendbuf[cnt++] = A->rmap->rstart + *a_jsendbuf++; 1110 } 1111 1112 /* put in upper diagonal portion */ 1113 while (m-- > 0) { 1114 jsendbuf[cnt++] = garray[*b_jsendbuf++]; 1115 } 1116 } 1117 if (cnt != sendcount) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Corrupted PETSc matrix: nz given %D actual nz %D",sendcount,cnt); 1118 1119 /*-------------------------------------------------------------------- 1120 Gather all column indices to all processors 1121 */ 1122 for (i=0; i<size; i++) { 1123 recvcounts[i] = 0; 1124 for (j=A->rmap->range[i]; j<A->rmap->range[i+1]; j++) { 1125 recvcounts[i] += lens[j]; 1126 } 1127 } 1128 displs[0] = 0; 1129 for (i=1; i<size; i++) { 1130 displs[i] = displs[i-1] + recvcounts[i-1]; 1131 } 1132 #if defined(PETSC_HAVE_MPI_IN_PLACE) 1133 ierr = MPI_Allgatherv(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,b->j,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 1134 #else 1135 ierr = MPI_Allgatherv(jsendbuf,sendcount,MPIU_INT,b->j,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 1136 #endif 1137 /*-------------------------------------------------------------------- 1138 Assemble the matrix into useable form (note numerical values not yet set) 1139 */ 1140 /* set the b->ilen (length of each row) values */ 1141 ierr = PetscMemcpy(b->ilen,lens,A->rmap->N*sizeof(PetscInt));CHKERRQ(ierr); 1142 /* set the b->i indices */ 1143 b->i[0] = 0; 1144 for (i=1; i<=A->rmap->N; i++) { 1145 b->i[i] = b->i[i-1] + lens[i-1]; 1146 } 1147 ierr = PetscFree(lens);CHKERRQ(ierr); 1148 ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1149 ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1150 1151 } else { 1152 B = **Bin; 1153 b = (Mat_SeqAIJ*)B->data; 1154 } 1155 1156 /*-------------------------------------------------------------------- 1157 Copy my part of matrix numerical values into the values location 1158 */ 1159 if (flag == MAT_GET_VALUES) { 1160 sendcount = ad->nz + bd->nz; 1161 sendbuf = b->a + b->i[rstarts[rank]]; 1162 a_sendbuf = ad->a; 1163 b_sendbuf = bd->a; 1164 b_sendj = bd->j; 1165 n = A->rmap->rend - A->rmap->rstart; 1166 cnt = 0; 1167 for (i=0; i<n; i++) { 1168 1169 /* put in lower diagonal portion */ 1170 m = bd->i[i+1] - bd->i[i]; 1171 while (m > 0) { 1172 /* is it above diagonal (in bd (compressed) numbering) */ 1173 if (garray[*b_sendj] > A->rmap->rstart + i) break; 1174 sendbuf[cnt++] = *b_sendbuf++; 1175 m--; 1176 b_sendj++; 1177 } 1178 1179 /* put in diagonal portion */ 1180 for (j=ad->i[i]; j<ad->i[i+1]; j++) { 1181 sendbuf[cnt++] = *a_sendbuf++; 1182 } 1183 1184 /* put in upper diagonal portion */ 1185 while (m-- > 0) { 1186 sendbuf[cnt++] = *b_sendbuf++; 1187 b_sendj++; 1188 } 1189 } 1190 if (cnt != sendcount) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Corrupted PETSc matrix: nz given %D actual nz %D",sendcount,cnt); 1191 1192 /* ----------------------------------------------------------------- 1193 Gather all numerical values to all processors 1194 */ 1195 if (!recvcounts) { 1196 ierr = PetscMalloc2(size,&recvcounts,size,&displs);CHKERRQ(ierr); 1197 } 1198 for (i=0; i<size; i++) { 1199 recvcounts[i] = b->i[rstarts[i+1]] - b->i[rstarts[i]]; 1200 } 1201 displs[0] = 0; 1202 for (i=1; i<size; i++) { 1203 displs[i] = displs[i-1] + recvcounts[i-1]; 1204 } 1205 recvbuf = b->a; 1206 #if defined(PETSC_HAVE_MPI_IN_PLACE) 1207 ierr = MPI_Allgatherv(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,recvbuf,recvcounts,displs,MPIU_SCALAR,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 1208 #else 1209 ierr = MPI_Allgatherv(sendbuf,sendcount,MPIU_SCALAR,recvbuf,recvcounts,displs,MPIU_SCALAR,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 1210 #endif 1211 } /* endof (flag == MAT_GET_VALUES) */ 1212 ierr = PetscFree2(recvcounts,displs);CHKERRQ(ierr); 1213 1214 if (A->symmetric) { 1215 ierr = MatSetOption(B,MAT_SYMMETRIC,PETSC_TRUE);CHKERRQ(ierr); 1216 } else if (A->hermitian) { 1217 ierr = MatSetOption(B,MAT_HERMITIAN,PETSC_TRUE);CHKERRQ(ierr); 1218 } else if (A->structurally_symmetric) { 1219 ierr = MatSetOption(B,MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);CHKERRQ(ierr); 1220 } 1221 PetscFunctionReturn(0); 1222 } 1223 1224 PetscErrorCode MatDestroy_MPIAIJ_MatGetSubmatrices(Mat C) 1225 { 1226 PetscErrorCode ierr; 1227 Mat_SeqAIJ *c = (Mat_SeqAIJ*)C->data; 1228 Mat_SubMat *submatj = c->submatis1; 1229 PetscInt i; 1230 1231 PetscFunctionBegin; 1232 if (!submatj->id) { /* delete data that are linked only to submats[id=0] */ 1233 ierr = PetscFree4(submatj->sbuf1,submatj->ptr,submatj->tmp,submatj->ctr);CHKERRQ(ierr); 1234 1235 for (i=0; i<submatj->nrqr; ++i) { 1236 ierr = PetscFree(submatj->sbuf2[i]);CHKERRQ(ierr); 1237 } 1238 ierr = PetscFree3(submatj->sbuf2,submatj->req_size,submatj->req_source1);CHKERRQ(ierr); 1239 1240 if (submatj->rbuf1) { 1241 ierr = PetscFree(submatj->rbuf1[0]);CHKERRQ(ierr); 1242 ierr = PetscFree(submatj->rbuf1);CHKERRQ(ierr); 1243 } 1244 1245 for (i=0; i<submatj->nrqs; ++i) { 1246 ierr = PetscFree(submatj->rbuf3[i]);CHKERRQ(ierr); 1247 } 1248 ierr = PetscFree3(submatj->req_source2,submatj->rbuf2,submatj->rbuf3);CHKERRQ(ierr); 1249 ierr = PetscFree(submatj->pa);CHKERRQ(ierr); 1250 } 1251 1252 #if defined(PETSC_USE_CTABLE) 1253 ierr = PetscTableDestroy((PetscTable*)&submatj->rmap);CHKERRQ(ierr); 1254 if (submatj->cmap_loc) {ierr = PetscFree(submatj->cmap_loc);CHKERRQ(ierr);} 1255 ierr = PetscFree(submatj->rmap_loc);CHKERRQ(ierr); 1256 #else 1257 ierr = PetscFree(submatj->rmap);CHKERRQ(ierr); 1258 #endif 1259 1260 if (!submatj->allcolumns) { 1261 #if defined(PETSC_USE_CTABLE) 1262 ierr = PetscTableDestroy((PetscTable*)&submatj->cmap);CHKERRQ(ierr); 1263 #else 1264 ierr = PetscFree(submatj->cmap);CHKERRQ(ierr); 1265 #endif 1266 } 1267 ierr = submatj->destroy(C);CHKERRQ(ierr); 1268 ierr = PetscFree(submatj->row2proc);CHKERRQ(ierr); 1269 1270 ierr = PetscFree(submatj);CHKERRQ(ierr); 1271 PetscFunctionReturn(0); 1272 } 1273 1274 PetscErrorCode MatGetSubMatrices_MPIAIJ_SingleIS_Local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,PetscBool allcolumns,Mat *submats) 1275 { 1276 Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data; 1277 Mat submat,A = c->A,B = c->B; 1278 Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*b = (Mat_SeqAIJ*)B->data,*subc; 1279 PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j,nzA,nzB; 1280 PetscInt cstart = C->cmap->rstart,cend = C->cmap->rend,rstart = C->rmap->rstart,*bmap = c->garray; 1281 const PetscInt *icol,*irow; 1282 PetscInt nrow,ncol,start; 1283 PetscErrorCode ierr; 1284 PetscMPIInt rank,size,tag1,tag2,tag3,tag4,*w1,*w2,nrqr; 1285 PetscInt **sbuf1,**sbuf2,i,j,k,l,ct1,ct2,ct3,**rbuf1,row,proc; 1286 PetscInt nrqs=0,msz,**ptr,*req_size,*ctr,*pa,*tmp,tcol,*iptr; 1287 PetscInt **rbuf3,*req_source1,*req_source2,**sbuf_aj,**rbuf2,max1,nnz; 1288 PetscInt *lens,rmax,ncols,*cols,Crow; 1289 #if defined(PETSC_USE_CTABLE) 1290 PetscTable cmap,rmap; 1291 PetscInt *cmap_loc,*rmap_loc; 1292 #else 1293 PetscInt *cmap,*rmap; 1294 #endif 1295 PetscInt ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*sbuf1_i,*rbuf2_i,*rbuf3_i; 1296 PetscInt *cworkB,lwrite,*subcols,*row2proc; 1297 PetscScalar *vworkA,*vworkB,*a_a = a->a,*b_a = b->a,*subvals=NULL; 1298 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3; 1299 MPI_Request *r_waits4,*s_waits3 = NULL,*s_waits4; 1300 MPI_Status *r_status1,*r_status2,*s_status1,*s_status3 = NULL,*s_status2; 1301 MPI_Status *r_status3 = NULL,*r_status4,*s_status4; 1302 MPI_Comm comm; 1303 PetscScalar **rbuf4,**sbuf_aa,*vals,*sbuf_aa_i,*rbuf4_i; 1304 PetscMPIInt *onodes1,*olengths1,idex,end; 1305 Mat_SubMat *smatis1; 1306 PetscBool isrowsorted; 1307 1308 PetscFunctionBegin; 1309 if (ismax != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"This routine only works when all processes have ismax=1"); 1310 1311 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 1312 size = c->size; 1313 rank = c->rank; 1314 1315 ierr = ISSorted(isrow[0],&isrowsorted);CHKERRQ(ierr); 1316 if (!isrowsorted) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"isrow[0] must be sorted"); 1317 1318 ierr = ISGetIndices(isrow[0],&irow);CHKERRQ(ierr); 1319 ierr = ISGetLocalSize(isrow[0],&nrow);CHKERRQ(ierr); 1320 if (allcolumns) { 1321 icol = NULL; 1322 ncol = C->cmap->N; 1323 } else { 1324 ierr = ISGetIndices(iscol[0],&icol);CHKERRQ(ierr); 1325 ierr = ISGetLocalSize(iscol[0],&ncol);CHKERRQ(ierr); 1326 } 1327 1328 if (scall == MAT_INITIAL_MATRIX) { 1329 PetscInt *sbuf2_i,*cworkA,lwrite,ctmp; 1330 1331 /* Get some new tags to keep the communication clean */ 1332 tag1 = ((PetscObject)C)->tag; 1333 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 1334 ierr = PetscObjectGetNewTag((PetscObject)C,&tag3);CHKERRQ(ierr); 1335 1336 /* evaluate communication - mesg to who, length of mesg, and buffer space 1337 required. Based on this, buffers are allocated, and data copied into them */ 1338 ierr = PetscCalloc2(size,&w1,size,&w2);CHKERRQ(ierr); 1339 ierr = PetscMalloc1(nrow,&row2proc);CHKERRQ(ierr); 1340 1341 /* w1[proc] = num of rows owned by proc -- to be requested */ 1342 proc = 0; 1343 nrqs = 0; /* num of outgoing messages */ 1344 for (j=0; j<nrow; j++) { 1345 row = irow[j]; /* sorted! */ 1346 while (row >= C->rmap->range[proc+1]) proc++; 1347 w1[proc]++; 1348 row2proc[j] = proc; /* map row index to proc */ 1349 1350 if (proc != rank && !w2[proc]) { 1351 w2[proc] = 1; nrqs++; 1352 } 1353 } 1354 w1[rank] = 0; /* rows owned by self will not be requested */ 1355 1356 ierr = PetscMalloc1(nrqs+1,&pa);CHKERRQ(ierr); /*(proc -array)*/ 1357 for (proc=0,j=0; proc<size; proc++) { 1358 if (w1[proc]) { pa[j++] = proc;} 1359 } 1360 1361 /* Each message would have a header = 1 + 2*(num of IS) + data (here,num of IS = 1) */ 1362 msz = 0; /* total mesg length (for all procs) */ 1363 for (i=0; i<nrqs; i++) { 1364 proc = pa[i]; 1365 w1[proc] += 3; 1366 msz += w1[proc]; 1367 } 1368 ierr = PetscInfo2(0,"Number of outgoing messages %D Total message length %D\n",nrqs,msz);CHKERRQ(ierr); 1369 1370 /* Determine nrqr, the number of messages to expect, their lengths, from from-ids */ 1371 /* if w2[proc]=1, a message of length w1[proc] will be sent to proc; */ 1372 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 1373 1374 /* Input: nrqs: nsend; nrqr: nrecv; w1: msg length to be sent; 1375 Output: onodes1: recv node-ids; olengths1: corresponding recv message length */ 1376 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 1377 1378 /* Now post the Irecvs corresponding to these messages */ 1379 ierr = PetscPostIrecvInt(comm,tag1,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);CHKERRQ(ierr); 1380 1381 ierr = PetscFree(onodes1);CHKERRQ(ierr); 1382 ierr = PetscFree(olengths1);CHKERRQ(ierr); 1383 1384 /* Allocate Memory for outgoing messages */ 1385 ierr = PetscMalloc4(size,&sbuf1,size,&ptr,2*msz,&tmp,size,&ctr);CHKERRQ(ierr); 1386 ierr = PetscMemzero(sbuf1,size*sizeof(PetscInt*));CHKERRQ(ierr); 1387 ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr); 1388 1389 /* subf1[pa[0]] = tmp, subf1[pa[i]] = subf1[pa[i-1]] + w1[pa[i-1]] */ 1390 iptr = tmp; 1391 for (i=0; i<nrqs; i++) { 1392 proc = pa[i]; 1393 sbuf1[proc] = iptr; 1394 iptr += w1[proc]; 1395 } 1396 1397 /* Form the outgoing messages */ 1398 /* Initialize the header space */ 1399 for (i=0; i<nrqs; i++) { 1400 proc = pa[i]; 1401 ierr = PetscMemzero(sbuf1[proc],3*sizeof(PetscInt));CHKERRQ(ierr); 1402 ptr[proc] = sbuf1[proc] + 3; 1403 } 1404 1405 /* Parse the isrow and copy data into outbuf */ 1406 ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr); 1407 for (j=0; j<nrow; j++) { /* parse the indices of each IS */ 1408 proc = row2proc[j]; 1409 if (proc != rank) { /* copy to the outgoing buf*/ 1410 *ptr[proc] = irow[j]; 1411 ctr[proc]++; ptr[proc]++; 1412 } 1413 } 1414 1415 /* Update the headers for the current IS */ 1416 for (j=0; j<size; j++) { /* Can Optimise this loop too */ 1417 if ((ctr_j = ctr[j])) { 1418 sbuf1_j = sbuf1[j]; 1419 k = ++sbuf1_j[0]; 1420 sbuf1_j[2*k] = ctr_j; 1421 sbuf1_j[2*k-1] = 0; 1422 } 1423 } 1424 1425 /* Now post the sends */ 1426 ierr = PetscMalloc1(nrqs+1,&s_waits1);CHKERRQ(ierr); 1427 for (i=0; i<nrqs; ++i) { 1428 proc = pa[i]; 1429 ierr = MPI_Isend(sbuf1[proc],w1[proc],MPIU_INT,proc,tag1,comm,s_waits1+i);CHKERRQ(ierr); 1430 } 1431 1432 /* Post Receives to capture the buffer size */ 1433 ierr = PetscMalloc4(nrqs+1,&r_status2,nrqr+1,&s_waits2,nrqs+1,&r_waits2,nrqr+1,&s_status2);CHKERRQ(ierr); 1434 ierr = PetscMalloc3(nrqs+1,&req_source2,nrqs+1,&rbuf2,nrqs+1,&rbuf3);CHKERRQ(ierr); 1435 1436 rbuf2[0] = tmp + msz; 1437 for (i=1; i<nrqs; ++i) rbuf2[i] = rbuf2[i-1] + w1[pa[i-1]]; 1438 1439 for (i=0; i<nrqs; ++i) { 1440 proc = pa[i]; 1441 ierr = MPI_Irecv(rbuf2[i],w1[proc],MPIU_INT,proc,tag2,comm,r_waits2+i);CHKERRQ(ierr); 1442 } 1443 1444 ierr = PetscFree2(w1,w2);CHKERRQ(ierr); 1445 1446 /* Send to other procs the buf size they should allocate */ 1447 /* Receive messages*/ 1448 ierr = PetscMalloc1(nrqr+1,&r_status1);CHKERRQ(ierr); 1449 ierr = PetscMalloc3(nrqr,&sbuf2,nrqr,&req_size,nrqr,&req_source1);CHKERRQ(ierr); 1450 1451 ierr = MPI_Waitall(nrqr,r_waits1,r_status1);CHKERRQ(ierr); 1452 for (i=0; i<nrqr; ++i) { 1453 req_size[i] = 0; 1454 rbuf1_i = rbuf1[i]; 1455 start = 2*rbuf1_i[0] + 1; 1456 ierr = MPI_Get_count(r_status1+i,MPIU_INT,&end);CHKERRQ(ierr); 1457 ierr = PetscMalloc1(end+1,&sbuf2[i]);CHKERRQ(ierr); 1458 sbuf2_i = sbuf2[i]; 1459 for (j=start; j<end; j++) { 1460 k = rbuf1_i[j] - rstart; 1461 ncols = ai[k+1] - ai[k] + bi[k+1] - bi[k]; 1462 sbuf2_i[j] = ncols; 1463 req_size[i] += ncols; 1464 } 1465 req_source1[i] = r_status1[i].MPI_SOURCE; 1466 1467 /* form the header */ 1468 sbuf2_i[0] = req_size[i]; 1469 for (j=1; j<start; j++) sbuf2_i[j] = rbuf1_i[j]; 1470 1471 ierr = MPI_Isend(sbuf2_i,end,MPIU_INT,req_source1[i],tag2,comm,s_waits2+i);CHKERRQ(ierr); 1472 } 1473 1474 ierr = PetscFree(r_status1);CHKERRQ(ierr); 1475 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 1476 1477 /* rbuf2 is received, Post recv column indices a->j */ 1478 ierr = MPI_Waitall(nrqs,r_waits2,r_status2);CHKERRQ(ierr); 1479 1480 ierr = PetscMalloc4(nrqs+1,&r_waits3,nrqr+1,&s_waits3,nrqs+1,&r_status3,nrqr+1,&s_status3);CHKERRQ(ierr); 1481 for (i=0; i<nrqs; ++i) { 1482 ierr = PetscMalloc1(rbuf2[i][0]+1,&rbuf3[i]);CHKERRQ(ierr); 1483 req_source2[i] = r_status2[i].MPI_SOURCE; 1484 ierr = MPI_Irecv(rbuf3[i],rbuf2[i][0],MPIU_INT,req_source2[i],tag3,comm,r_waits3+i);CHKERRQ(ierr); 1485 } 1486 1487 /* Wait on sends1 and sends2 */ 1488 ierr = PetscMalloc1(nrqs+1,&s_status1);CHKERRQ(ierr); 1489 ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr); 1490 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 1491 ierr = PetscFree(s_status1);CHKERRQ(ierr); 1492 1493 ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr); 1494 ierr = PetscFree4(r_status2,s_waits2,r_waits2,s_status2);CHKERRQ(ierr); 1495 1496 /* Now allocate sending buffers for a->j, and send them off */ 1497 ierr = PetscMalloc1(nrqr+1,&sbuf_aj);CHKERRQ(ierr); 1498 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 1499 ierr = PetscMalloc1(j+1,&sbuf_aj[0]);CHKERRQ(ierr); 1500 for (i=1; i<nrqr; i++) sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1]; 1501 1502 for (i=0; i<nrqr; i++) { /* for each requested message */ 1503 rbuf1_i = rbuf1[i]; 1504 sbuf_aj_i = sbuf_aj[i]; 1505 ct1 = 2*rbuf1_i[0] + 1; 1506 ct2 = 0; 1507 /* max1=rbuf1_i[0]; if (max1 != 1) SETERRQ1(PETSC_COMM_SELF,0,"max1 %d != 1",max1); */ 1508 1509 kmax = rbuf1[i][2]; 1510 for (k=0; k<kmax; k++,ct1++) { /* for each row */ 1511 row = rbuf1_i[ct1] - rstart; 1512 nzA = ai[row+1] - ai[row]; 1513 nzB = bi[row+1] - bi[row]; 1514 ncols = nzA + nzB; 1515 cworkA = aj + ai[row]; cworkB = bj + bi[row]; 1516 1517 /* load the column indices for this row into cols*/ 1518 cols = sbuf_aj_i + ct2; 1519 1520 lwrite = 0; 1521 for (l=0; l<nzB; l++) { 1522 if ((ctmp = bmap[cworkB[l]]) < cstart) cols[lwrite++] = ctmp; 1523 } 1524 for (l=0; l<nzA; l++) cols[lwrite++] = cstart + cworkA[l]; 1525 for (l=0; l<nzB; l++) { 1526 if ((ctmp = bmap[cworkB[l]]) >= cend) cols[lwrite++] = ctmp; 1527 } 1528 1529 ct2 += ncols; 1530 } 1531 ierr = MPI_Isend(sbuf_aj_i,req_size[i],MPIU_INT,req_source1[i],tag3,comm,s_waits3+i);CHKERRQ(ierr); 1532 } 1533 1534 /* create column map (cmap): global col of C -> local col of submat */ 1535 #if defined(PETSC_USE_CTABLE) 1536 if (!allcolumns) { 1537 ierr = PetscTableCreate(ncol+1,C->cmap->N+1,&cmap);CHKERRQ(ierr); 1538 ierr = PetscCalloc1(C->cmap->n,&cmap_loc);CHKERRQ(ierr); 1539 for (j=0; j<ncol; j++) { /* use array cmap_loc[] for local col indices */ 1540 if (icol[j] >= cstart && icol[j] <cend) { 1541 cmap_loc[icol[j] - cstart] = j+1; 1542 } else { /* use PetscTable for non-local col indices */ 1543 ierr = PetscTableAdd(cmap,icol[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr); 1544 } 1545 } 1546 } else { 1547 cmap = NULL; 1548 cmap_loc = NULL; 1549 } 1550 ierr = PetscCalloc1(C->rmap->n,&rmap_loc);CHKERRQ(ierr); 1551 #else 1552 if (!allcolumns) { 1553 ierr = PetscCalloc1(C->cmap->N,&cmap);CHKERRQ(ierr); 1554 for (j=0; j<ncol; j++) cmap[icol[j]] = j+1; 1555 } else { 1556 cmap = NULL; 1557 } 1558 #endif 1559 1560 /* Create lens for MatSeqAIJSetPreallocation() */ 1561 ierr = PetscCalloc1(nrow,&lens);CHKERRQ(ierr); 1562 1563 /* Compute lens from local part of C */ 1564 for (j=0; j<nrow; j++) { 1565 row = irow[j]; 1566 proc = row2proc[j]; 1567 if (proc == rank) { 1568 /* diagonal part A = c->A */ 1569 ncols = ai[row-rstart+1] - ai[row-rstart]; 1570 cols = aj + ai[row-rstart]; 1571 if (!allcolumns) { 1572 for (k=0; k<ncols; k++) { 1573 #if defined(PETSC_USE_CTABLE) 1574 tcol = cmap_loc[cols[k]]; 1575 #else 1576 tcol = cmap[cols[k]+cstart]; 1577 #endif 1578 if (tcol) lens[j]++; 1579 } 1580 } else { /* allcolumns */ 1581 lens[j] = ncols; 1582 } 1583 1584 /* off-diagonal part B = c->B */ 1585 ncols = bi[row-rstart+1] - bi[row-rstart]; 1586 cols = bj + bi[row-rstart]; 1587 if (!allcolumns) { 1588 for (k=0; k<ncols; k++) { 1589 #if defined(PETSC_USE_CTABLE) 1590 ierr = PetscTableFind(cmap,bmap[cols[k]]+1,&tcol);CHKERRQ(ierr); 1591 #else 1592 tcol = cmap[bmap[cols[k]]]; 1593 #endif 1594 if (tcol) lens[j]++; 1595 } 1596 } else { /* allcolumns */ 1597 lens[j] += ncols; 1598 } 1599 } 1600 } 1601 1602 /* Create row map (rmap): global row of C -> local row of submat */ 1603 #if defined(PETSC_USE_CTABLE) 1604 ierr = PetscTableCreate(nrow+1,C->rmap->N+1,&rmap);CHKERRQ(ierr); 1605 for (j=0; j<nrow; j++) { 1606 row = irow[j]; 1607 proc = row2proc[j]; 1608 if (proc == rank) { /* a local row */ 1609 rmap_loc[row - rstart] = j; 1610 } else { 1611 ierr = PetscTableAdd(rmap,irow[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr); 1612 } 1613 } 1614 #else 1615 ierr = PetscCalloc1(C->rmap->N,&rmap);CHKERRQ(ierr); 1616 for (j=0; j<nrow; j++) { 1617 rmap[irow[j]] = j; 1618 } 1619 #endif 1620 1621 /* Update lens from offproc data */ 1622 /* recv a->j is done */ 1623 ierr = MPI_Waitall(nrqs,r_waits3,r_status3);CHKERRQ(ierr); 1624 for (i=0; i<nrqs; i++) { 1625 proc = pa[i]; 1626 sbuf1_i = sbuf1[proc]; 1627 /* jmax = sbuf1_i[0]; if (jmax != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"jmax !=1"); */ 1628 ct1 = 2 + 1; 1629 ct2 = 0; 1630 rbuf2_i = rbuf2[i]; /* received length of C->j */ 1631 rbuf3_i = rbuf3[i]; /* received C->j */ 1632 1633 /* is_no = sbuf1_i[2*j-1]; if (is_no != 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"is_no !=0"); */ 1634 max1 = sbuf1_i[2]; 1635 for (k=0; k<max1; k++,ct1++) { 1636 #if defined(PETSC_USE_CTABLE) 1637 ierr = PetscTableFind(rmap,sbuf1_i[ct1]+1,&row);CHKERRQ(ierr); 1638 row--; 1639 if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); 1640 #else 1641 row = rmap[sbuf1_i[ct1]]; /* the row index in submat */ 1642 #endif 1643 /* Now, store row index of submat in sbuf1_i[ct1] */ 1644 sbuf1_i[ct1] = row; 1645 1646 nnz = rbuf2_i[ct1]; 1647 if (!allcolumns) { 1648 for (l=0; l<nnz; l++,ct2++) { 1649 #if defined(PETSC_USE_CTABLE) 1650 if (rbuf3_i[ct2] >= cstart && rbuf3_i[ct2] <cend) { 1651 tcol = cmap_loc[rbuf3_i[ct2] - cstart]; 1652 } else { 1653 ierr = PetscTableFind(cmap,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr); 1654 } 1655 #else 1656 tcol = cmap[rbuf3_i[ct2]]; /* column index in submat */ 1657 #endif 1658 if (tcol) lens[row]++; 1659 } 1660 } else { /* allcolumns */ 1661 lens[row] += nnz; 1662 } 1663 } 1664 } 1665 ierr = MPI_Waitall(nrqr,s_waits3,s_status3);CHKERRQ(ierr); 1666 ierr = PetscFree4(r_waits3,s_waits3,r_status3,s_status3);CHKERRQ(ierr); 1667 1668 /* Create the submatrices */ 1669 ierr = MatCreate(PETSC_COMM_SELF,&submat);CHKERRQ(ierr); 1670 ierr = MatSetSizes(submat,nrow,ncol,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1671 1672 ierr = ISGetBlockSize(isrow[0],&i);CHKERRQ(ierr); 1673 ierr = ISGetBlockSize(iscol[0],&j);CHKERRQ(ierr); 1674 ierr = MatSetBlockSizes(submat,i,j);CHKERRQ(ierr); 1675 ierr = MatSetType(submat,((PetscObject)A)->type_name);CHKERRQ(ierr); 1676 ierr = MatSeqAIJSetPreallocation(submat,0,lens);CHKERRQ(ierr); 1677 1678 /* create struct Mat_SubMat and attached it to submat */ 1679 ierr = PetscNew(&smatis1);CHKERRQ(ierr); 1680 subc = (Mat_SeqAIJ*)submat->data; 1681 subc->submatis1 = smatis1; 1682 1683 smatis1->id = 0; 1684 smatis1->nrqs = nrqs; 1685 smatis1->nrqr = nrqr; 1686 smatis1->rbuf1 = rbuf1; 1687 smatis1->rbuf2 = rbuf2; 1688 smatis1->rbuf3 = rbuf3; 1689 smatis1->sbuf2 = sbuf2; 1690 smatis1->req_source2 = req_source2; 1691 1692 smatis1->sbuf1 = sbuf1; 1693 smatis1->ptr = ptr; 1694 smatis1->tmp = tmp; 1695 smatis1->ctr = ctr; 1696 1697 smatis1->pa = pa; 1698 smatis1->req_size = req_size; 1699 smatis1->req_source1 = req_source1; 1700 1701 smatis1->allcolumns = allcolumns; 1702 smatis1->singleis = PETSC_TRUE; 1703 smatis1->row2proc = row2proc; 1704 smatis1->rmap = rmap; 1705 smatis1->cmap = cmap; 1706 #if defined(PETSC_USE_CTABLE) 1707 smatis1->rmap_loc = rmap_loc; 1708 smatis1->cmap_loc = cmap_loc; 1709 #endif 1710 1711 smatis1->destroy = submat->ops->destroy; 1712 submat->ops->destroy = MatDestroy_MPIAIJ_MatGetSubmatrices; 1713 submat->factortype = C->factortype; 1714 1715 /* compute rmax */ 1716 rmax = 0; 1717 for (i=0; i<nrow; i++) rmax = PetscMax(rmax,lens[i]); 1718 1719 } else { /* scall == MAT_REUSE_MATRIX */ 1720 submat = submats[0]; 1721 if (submat->rmap->n != nrow || submat->cmap->n != ncol) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size"); 1722 1723 subc = (Mat_SeqAIJ*)submat->data; 1724 rmax = subc->rmax; 1725 smatis1 = subc->submatis1; 1726 nrqs = smatis1->nrqs; 1727 nrqr = smatis1->nrqr; 1728 rbuf1 = smatis1->rbuf1; 1729 rbuf2 = smatis1->rbuf2; 1730 rbuf3 = smatis1->rbuf3; 1731 req_source2 = smatis1->req_source2; 1732 1733 sbuf1 = smatis1->sbuf1; 1734 sbuf2 = smatis1->sbuf2; 1735 ptr = smatis1->ptr; 1736 tmp = smatis1->tmp; 1737 ctr = smatis1->ctr; 1738 1739 pa = smatis1->pa; 1740 req_size = smatis1->req_size; 1741 req_source1 = smatis1->req_source1; 1742 1743 allcolumns = smatis1->allcolumns; 1744 row2proc = smatis1->row2proc; 1745 rmap = smatis1->rmap; 1746 cmap = smatis1->cmap; 1747 #if defined(PETSC_USE_CTABLE) 1748 rmap_loc = smatis1->rmap_loc; 1749 cmap_loc = smatis1->cmap_loc; 1750 #endif 1751 } 1752 1753 /* Post recv matrix values */ 1754 ierr = PetscMalloc3(nrqs+1,&rbuf4, rmax,&subcols, rmax,&subvals);CHKERRQ(ierr); 1755 ierr = PetscMalloc4(nrqs+1,&r_waits4,nrqr+1,&s_waits4,nrqs+1,&r_status4,nrqr+1,&s_status4);CHKERRQ(ierr); 1756 ierr = PetscObjectGetNewTag((PetscObject)C,&tag4);CHKERRQ(ierr); 1757 for (i=0; i<nrqs; ++i) { 1758 ierr = PetscMalloc1(rbuf2[i][0]+1,&rbuf4[i]);CHKERRQ(ierr); 1759 ierr = MPI_Irecv(rbuf4[i],rbuf2[i][0],MPIU_SCALAR,req_source2[i],tag4,comm,r_waits4+i);CHKERRQ(ierr); 1760 } 1761 1762 /* Allocate sending buffers for a->a, and send them off */ 1763 ierr = PetscMalloc1(nrqr+1,&sbuf_aa);CHKERRQ(ierr); 1764 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 1765 ierr = PetscMalloc1(j+1,&sbuf_aa[0]);CHKERRQ(ierr); 1766 for (i=1; i<nrqr; i++) sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]; 1767 1768 for (i=0; i<nrqr; i++) { 1769 rbuf1_i = rbuf1[i]; 1770 sbuf_aa_i = sbuf_aa[i]; 1771 ct1 = 2*rbuf1_i[0]+1; 1772 ct2 = 0; 1773 /* max1=rbuf1_i[0]; if (max1 != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"max1 !=1"); */ 1774 1775 kmax = rbuf1_i[2]; 1776 for (k=0; k<kmax; k++,ct1++) { 1777 row = rbuf1_i[ct1] - rstart; 1778 nzA = ai[row+1] - ai[row]; 1779 nzB = bi[row+1] - bi[row]; 1780 ncols = nzA + nzB; 1781 cworkB = bj + bi[row]; 1782 vworkA = a_a + ai[row]; 1783 vworkB = b_a + bi[row]; 1784 1785 /* load the column values for this row into vals*/ 1786 vals = sbuf_aa_i + ct2; 1787 1788 lwrite = 0; 1789 for (l=0; l<nzB; l++) { 1790 if ((bmap[cworkB[l]]) < cstart) vals[lwrite++] = vworkB[l]; 1791 } 1792 for (l=0; l<nzA; l++) vals[lwrite++] = vworkA[l]; 1793 for (l=0; l<nzB; l++) { 1794 if ((bmap[cworkB[l]]) >= cend) vals[lwrite++] = vworkB[l]; 1795 } 1796 1797 ct2 += ncols; 1798 } 1799 ierr = MPI_Isend(sbuf_aa_i,req_size[i],MPIU_SCALAR,req_source1[i],tag4,comm,s_waits4+i);CHKERRQ(ierr); 1800 } 1801 1802 /* Assemble submat */ 1803 /* First assemble the local rows */ 1804 for (j=0; j<nrow; j++) { 1805 row = irow[j]; 1806 proc = row2proc[j]; 1807 if (proc == rank) { 1808 Crow = row - rstart; /* local row index of C */ 1809 #if defined(PETSC_USE_CTABLE) 1810 row = rmap_loc[Crow]; /* row index of submat */ 1811 #else 1812 row = rmap[row]; 1813 #endif 1814 1815 if (allcolumns) { 1816 /* diagonal part A = c->A */ 1817 ncols = ai[Crow+1] - ai[Crow]; 1818 cols = aj + ai[Crow]; 1819 vals = a->a + ai[Crow]; 1820 i = 0; 1821 for (k=0; k<ncols; k++) { 1822 subcols[i] = cols[k] + cstart; 1823 subvals[i++] = vals[k]; 1824 } 1825 1826 /* off-diagonal part B = c->B */ 1827 ncols = bi[Crow+1] - bi[Crow]; 1828 cols = bj + bi[Crow]; 1829 vals = b->a + bi[Crow]; 1830 for (k=0; k<ncols; k++) { 1831 subcols[i] = bmap[cols[k]]; 1832 subvals[i++] = vals[k]; 1833 } 1834 1835 ierr = MatSetValues_SeqAIJ(submat,1,&row,i,subcols,subvals,INSERT_VALUES);CHKERRQ(ierr); 1836 1837 } else { /* !allcolumns */ 1838 #if defined(PETSC_USE_CTABLE) 1839 /* diagonal part A = c->A */ 1840 ncols = ai[Crow+1] - ai[Crow]; 1841 cols = aj + ai[Crow]; 1842 vals = a->a + ai[Crow]; 1843 i = 0; 1844 for (k=0; k<ncols; k++) { 1845 tcol = cmap_loc[cols[k]]; 1846 if (tcol) { 1847 subcols[i] = --tcol; 1848 subvals[i++] = vals[k]; 1849 } 1850 } 1851 1852 /* off-diagonal part B = c->B */ 1853 ncols = bi[Crow+1] - bi[Crow]; 1854 cols = bj + bi[Crow]; 1855 vals = b->a + bi[Crow]; 1856 for (k=0; k<ncols; k++) { 1857 ierr = PetscTableFind(cmap,bmap[cols[k]]+1,&tcol);CHKERRQ(ierr); 1858 if (tcol) { 1859 subcols[i] = --tcol; 1860 subvals[i++] = vals[k]; 1861 } 1862 } 1863 #else 1864 /* diagonal part A = c->A */ 1865 ncols = ai[Crow+1] - ai[Crow]; 1866 cols = aj + ai[Crow]; 1867 vals = a->a + ai[Crow]; 1868 i = 0; 1869 for (k=0; k<ncols; k++) { 1870 tcol = cmap[cols[k]+cstart]; 1871 if (tcol) { 1872 subcols[i] = --tcol; 1873 subvals[i++] = vals[k]; 1874 } 1875 } 1876 1877 /* off-diagonal part B = c->B */ 1878 ncols = bi[Crow+1] - bi[Crow]; 1879 cols = bj + bi[Crow]; 1880 vals = b->a + bi[Crow]; 1881 for (k=0; k<ncols; k++) { 1882 tcol = cmap[bmap[cols[k]]]; 1883 if (tcol) { 1884 subcols[i] = --tcol; 1885 subvals[i++] = vals[k]; 1886 } 1887 } 1888 #endif 1889 ierr = MatSetValues_SeqAIJ(submat,1,&row,i,subcols,subvals,INSERT_VALUES);CHKERRQ(ierr); 1890 } 1891 } 1892 } 1893 1894 /* Now assemble the off-proc rows */ 1895 for (i=0; i<nrqs; i++) { /* for each requested message */ 1896 /* recv values from other processes */ 1897 ierr = MPI_Waitany(nrqs,r_waits4,&idex,r_status4+i);CHKERRQ(ierr); 1898 proc = pa[idex]; 1899 sbuf1_i = sbuf1[proc]; 1900 /* jmax = sbuf1_i[0]; if (jmax != 1)SETERRQ1(PETSC_COMM_SELF,0,"jmax %d != 1",jmax); */ 1901 ct1 = 2 + 1; 1902 ct2 = 0; /* count of received C->j */ 1903 ct3 = 0; /* count of received C->j that will be inserted into submat */ 1904 rbuf2_i = rbuf2[idex]; /* int** received length of C->j from other processes */ 1905 rbuf3_i = rbuf3[idex]; /* int** received C->j from other processes */ 1906 rbuf4_i = rbuf4[idex]; /* scalar** received C->a from other processes */ 1907 1908 /* is_no = sbuf1_i[2*j-1]; if (is_no != 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"is_no !=0"); */ 1909 max1 = sbuf1_i[2]; /* num of rows */ 1910 for (k=0; k<max1; k++,ct1++) { /* for each recved row */ 1911 row = sbuf1_i[ct1]; /* row index of submat */ 1912 if (!allcolumns) { 1913 idex = 0; 1914 if (scall == MAT_INITIAL_MATRIX) { 1915 nnz = rbuf2_i[ct1]; /* num of C entries in this row */ 1916 for (l=0; l<nnz; l++,ct2++) { /* for each recved column */ 1917 #if defined(PETSC_USE_CTABLE) 1918 if (rbuf3_i[ct2] >= cstart && rbuf3_i[ct2] <cend) { 1919 tcol = cmap_loc[rbuf3_i[ct2] - cstart]; 1920 } else { 1921 ierr = PetscTableFind(cmap,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr); 1922 } 1923 #else 1924 tcol = cmap[rbuf3_i[ct2]]; 1925 #endif 1926 if (tcol) { 1927 subcols[idex] = --tcol; 1928 subvals[idex++] = rbuf4_i[ct2]; 1929 1930 /* We receive an entire column of C, but a subset of it needs to be inserted into submat. 1931 For reuse, we replace received C->j with index that should be inserted to submat */ 1932 rbuf3_i[ct3++] = ct2; 1933 } 1934 } 1935 ierr = MatSetValues_SeqAIJ(submat,1,&row,idex,subcols,subvals,INSERT_VALUES);CHKERRQ(ierr); 1936 1937 } else { /* scall == MAT_REUSE_MATRIX */ 1938 submat = submats[0]; 1939 subc = (Mat_SeqAIJ*)submat->data; 1940 1941 nnz = subc->i[row+1] - subc->i[row]; /* num of submat entries in this row */ 1942 for (l=0; l<nnz; l++) { 1943 ct2 = rbuf3_i[ct3++]; /* index of rbuf4_i[] which needs to be inserted into submat */ 1944 subvals[idex++] = rbuf4_i[ct2]; 1945 } 1946 1947 bj = subc->j + subc->i[row]; 1948 ierr = MatSetValues_SeqAIJ(submat,1,&row,nnz,bj,subvals,INSERT_VALUES);CHKERRQ(ierr); 1949 } 1950 } else { /* allcolumns */ 1951 nnz = rbuf2_i[ct1]; /* num of C entries in this row */ 1952 ierr = MatSetValues_SeqAIJ(submat,1,&row,nnz,rbuf3_i+ct2,rbuf4_i+ct2,INSERT_VALUES);CHKERRQ(ierr); 1953 ct2 += nnz; 1954 } 1955 } 1956 } 1957 1958 /* sending a->a are done */ 1959 ierr = MPI_Waitall(nrqr,s_waits4,s_status4);CHKERRQ(ierr); 1960 ierr = PetscFree4(r_waits4,s_waits4,r_status4,s_status4);CHKERRQ(ierr); 1961 1962 ierr = MatAssemblyBegin(submat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1963 ierr = MatAssemblyEnd(submat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1964 submats[0] = submat; 1965 1966 /* Restore the indices */ 1967 ierr = ISRestoreIndices(isrow[0],&irow);CHKERRQ(ierr); 1968 if (!allcolumns) { 1969 ierr = ISRestoreIndices(iscol[0],&icol);CHKERRQ(ierr); 1970 } 1971 1972 /* Destroy allocated memory */ 1973 for (i=0; i<nrqs; ++i) { 1974 ierr = PetscFree3(rbuf4[i],subcols,subvals);CHKERRQ(ierr); 1975 } 1976 ierr = PetscFree3(rbuf4,subcols,subvals);CHKERRQ(ierr); 1977 ierr = PetscFree(sbuf_aa[0]);CHKERRQ(ierr); 1978 ierr = PetscFree(sbuf_aa);CHKERRQ(ierr); 1979 1980 if (scall == MAT_INITIAL_MATRIX) { 1981 ierr = PetscFree(lens);CHKERRQ(ierr); 1982 ierr = PetscFree(sbuf_aj[0]);CHKERRQ(ierr); 1983 ierr = PetscFree(sbuf_aj);CHKERRQ(ierr); 1984 } 1985 PetscFunctionReturn(0); 1986 } 1987 1988 PetscErrorCode MatGetSubMatrices_MPIAIJ_SingleIS(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[]) 1989 { 1990 PetscErrorCode ierr; 1991 PetscInt ncol; 1992 PetscBool colflag,allcolumns=PETSC_FALSE; 1993 1994 PetscFunctionBegin; 1995 /* Allocate memory to hold all the submatrices */ 1996 if (scall == MAT_INITIAL_MATRIX) { 1997 ierr = PetscMalloc1(1,submat);CHKERRQ(ierr); 1998 } 1999 2000 /* Check for special case: each processor gets entire matrix columns */ 2001 ierr = ISIdentity(iscol[0],&colflag);CHKERRQ(ierr); 2002 ierr = ISGetLocalSize(iscol[0],&ncol);CHKERRQ(ierr); 2003 if (colflag && ncol == C->cmap->N) allcolumns = PETSC_TRUE; 2004 2005 ierr = MatGetSubMatrices_MPIAIJ_SingleIS_Local(C,ismax,isrow,iscol,scall,allcolumns,*submat);CHKERRQ(ierr); 2006 PetscFunctionReturn(0); 2007 } 2008 2009 PetscErrorCode MatGetSubMatrices_MPIAIJ(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[]) 2010 { 2011 PetscErrorCode ierr; 2012 PetscInt nmax,nstages,i,pos,max_no,nrow,ncol,in[2],out[2]; 2013 PetscBool rowflag,colflag,wantallmatrix=PETSC_FALSE; 2014 Mat_SeqAIJ *subc; 2015 Mat_SubMat *smat; 2016 2017 PetscFunctionBegin; 2018 /* Check for special case: each processor has a single IS */ 2019 if (C->submat_singleis) { /* flag is set in PCSetUp_ASM() to skip MPIU_Allreduce() */ 2020 ierr = MatGetSubMatrices_MPIAIJ_SingleIS(C,ismax,isrow,iscol,scall,submat);CHKERRQ(ierr); 2021 C->submat_singleis = PETSC_FALSE; /* resume its default value in case C will be used for non-singlis */ 2022 PetscFunctionReturn(0); 2023 } 2024 2025 /* Collect global wantallmatrix and nstages */ 2026 if (!C->cmap->N) nmax=20*1000000/sizeof(PetscInt); 2027 else nmax = 20*1000000 / (C->cmap->N * sizeof(PetscInt)); 2028 if (!nmax) nmax = 1; 2029 2030 if (scall == MAT_INITIAL_MATRIX) { 2031 /* Collect global wantallmatrix and nstages */ 2032 if (ismax == 1 && C->rmap->N == C->cmap->N) { 2033 ierr = ISIdentity(*isrow,&rowflag);CHKERRQ(ierr); 2034 ierr = ISIdentity(*iscol,&colflag);CHKERRQ(ierr); 2035 ierr = ISGetLocalSize(*isrow,&nrow);CHKERRQ(ierr); 2036 ierr = ISGetLocalSize(*iscol,&ncol);CHKERRQ(ierr); 2037 if (rowflag && colflag && nrow == C->rmap->N && ncol == C->cmap->N) { 2038 wantallmatrix = PETSC_TRUE; 2039 2040 ierr = PetscOptionsGetBool(((PetscObject)C)->options,((PetscObject)C)->prefix,"-use_fast_submatrix",&wantallmatrix,NULL);CHKERRQ(ierr); 2041 } 2042 } 2043 2044 /* Determine the number of stages through which submatrices are done 2045 Each stage will extract nmax submatrices. 2046 nmax is determined by the matrix column dimension. 2047 If the original matrix has 20M columns, only one submatrix per stage is allowed, etc. 2048 */ 2049 nstages = ismax/nmax + ((ismax % nmax) ? 1 : 0); /* local nstages */ 2050 2051 in[0] = -1*(PetscInt)wantallmatrix; 2052 in[1] = nstages; 2053 ierr = MPIU_Allreduce(in,out,2,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C));CHKERRQ(ierr); 2054 wantallmatrix = (PetscBool)(-out[0]); 2055 nstages = out[1]; /* Make sure every processor loops through the global nstages */ 2056 2057 } else { /* MAT_REUSE_MATRIX */ 2058 subc = (Mat_SeqAIJ*)((*submat)[0]->data); 2059 smat = subc->submatis1; 2060 if (!smat) { 2061 /* smat is not generated by MatGetSubMatrix_MPIAIJ_All(...,MAT_INITIAL_MATRIX,...) */ 2062 wantallmatrix = PETSC_TRUE; 2063 } else if (smat->singleis) { 2064 ierr = MatGetSubMatrices_MPIAIJ_SingleIS(C,ismax,isrow,iscol,scall,submat);CHKERRQ(ierr); 2065 PetscFunctionReturn(0); 2066 } else { 2067 nstages = smat->nstages; 2068 } 2069 } 2070 2071 if (wantallmatrix) { 2072 ierr = MatGetSubMatrix_MPIAIJ_All(C,MAT_GET_VALUES,scall,submat);CHKERRQ(ierr); 2073 PetscFunctionReturn(0); 2074 } 2075 2076 /* Allocate memory to hold all the submatrices and dummy submatrices */ 2077 if (scall == MAT_INITIAL_MATRIX) { 2078 ierr = PetscCalloc1(ismax+nstages,submat);CHKERRQ(ierr); 2079 } 2080 2081 #if 0 2082 PetscMPIInt rank; 2083 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)C),&rank);CHKERRQ(ierr); 2084 printf("[%d] reuse %d, ismax %d, CN %d, nstages %d, nmax %d\n",rank,scall,ismax,C->cmap->N,nstages,nmax); 2085 #endif 2086 for (i=0,pos=0; i<nstages; i++) { 2087 if (pos+nmax <= ismax) max_no = nmax; 2088 else if (pos == ismax) max_no = 0; 2089 else max_no = ismax-pos; 2090 /* if (!max_no) printf("[%d] max_no=0, %d-stage\n",rank,i); */ 2091 ierr = MatGetSubMatrices_MPIAIJ_Local(C,max_no,isrow+pos,iscol+pos,scall,*submat+pos);CHKERRQ(ierr); 2092 pos += max_no; 2093 } 2094 2095 if (scall == MAT_INITIAL_MATRIX) { 2096 /* save nstages for reuse */ 2097 subc = (Mat_SeqAIJ*)((*submat)[0]->data); 2098 smat = subc->submatis1; 2099 if (!smat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"smat does not exit"); 2100 smat->nstages = nstages; 2101 } 2102 PetscFunctionReturn(0); 2103 } 2104 2105 /* -------------------------------------------------------------------------*/ 2106 PetscErrorCode MatGetSubMatrices_MPIAIJ_Local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats) 2107 { 2108 Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data; 2109 Mat A = c->A; 2110 Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*b = (Mat_SeqAIJ*)c->B->data,*subc; 2111 const PetscInt **icol,**irow; 2112 PetscInt *nrow,*ncol,start; 2113 PetscErrorCode ierr; 2114 PetscMPIInt rank,size,tag0,tag2,tag3,tag4,*w1,*w2,*w3,*w4,nrqr; 2115 PetscInt **sbuf1,**sbuf2,i,j,k,l,ct1,ct2,**rbuf1,row,proc=-1; 2116 PetscInt nrqs=0,msz,**ptr=NULL,*req_size=NULL,*ctr=NULL,*pa,*tmp=NULL,tcol; 2117 PetscInt **rbuf3=NULL,*req_source1=NULL,*req_source2,**sbuf_aj,**rbuf2=NULL,max1,max2; 2118 PetscInt **lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax; 2119 #if defined(PETSC_USE_CTABLE) 2120 PetscTable *cmap,cmap_i=NULL,*rmap,rmap_i; 2121 #else 2122 PetscInt **cmap,*cmap_i=NULL,**rmap,*rmap_i; 2123 #endif 2124 const PetscInt *irow_i; 2125 PetscInt ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*lens_i; 2126 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3; 2127 MPI_Request *r_waits4,*s_waits3,*s_waits4; 2128 MPI_Status *r_status1,*r_status2,*s_status1,*s_status3,*s_status2; 2129 MPI_Status *r_status3,*r_status4,*s_status4; 2130 MPI_Comm comm; 2131 PetscScalar **rbuf4,*rbuf4_i,**sbuf_aa,*vals,*mat_a,*imat_a,*sbuf_aa_i; 2132 PetscMPIInt *onodes1,*olengths1,end; 2133 PetscInt **row2proc,*row2proc_i,ilen_row,*imat_ilen,*imat_j,*imat_i,old_row; 2134 Mat_SubMat *smat_i; 2135 PetscBool *issorted,*allcolumns,colflag,iscsorted=PETSC_TRUE; 2136 PetscInt *sbuf1_i,*rbuf2_i,*rbuf3_i,ilen; 2137 2138 PetscFunctionBegin; 2139 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 2140 size = c->size; 2141 rank = c->rank; 2142 2143 ierr = PetscMalloc4(ismax,&row2proc,ismax,&cmap,ismax,&rmap,ismax+1,&allcolumns);CHKERRQ(ierr); 2144 ierr = PetscMalloc5(ismax,&irow,ismax,&icol,ismax,&nrow,ismax,&ncol,ismax,&issorted);CHKERRQ(ierr); 2145 2146 for (i=0; i<ismax; i++) { 2147 ierr = ISSorted(iscol[i],&issorted[i]);CHKERRQ(ierr); 2148 if (!issorted[i]) iscsorted = issorted[i]; 2149 2150 ierr = ISSorted(isrow[i],&issorted[i]);CHKERRQ(ierr); 2151 2152 ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr); 2153 ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr); 2154 2155 /* Check for special case: allcolumn */ 2156 ierr = ISIdentity(iscol[i],&colflag);CHKERRQ(ierr); 2157 ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr); 2158 if (colflag && ncol[i] == C->cmap->N) { 2159 allcolumns[i] = PETSC_TRUE; 2160 icol[i] = NULL; 2161 } else { 2162 allcolumns[i] = PETSC_FALSE; 2163 ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr); 2164 } 2165 } 2166 2167 if (scall == MAT_REUSE_MATRIX) { 2168 /* Assumes new rows are same length as the old rows */ 2169 for (i=0; i<ismax; i++) { 2170 if (!submats[i]) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"submats[%D] is null, cannot reuse",i); 2171 subc = (Mat_SeqAIJ*)(submats[i]->data); 2172 if ((submats[i]->rmap->n != nrow[i]) || (submats[i]->cmap->n != ncol[i])) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size"); 2173 2174 /* Initial matrix as if empty */ 2175 ierr = PetscMemzero(subc->ilen,submats[i]->rmap->n*sizeof(PetscInt));CHKERRQ(ierr); 2176 2177 smat_i = subc->submatis1; 2178 2179 nrqs = smat_i->nrqs; 2180 nrqr = smat_i->nrqr; 2181 rbuf1 = smat_i->rbuf1; 2182 rbuf2 = smat_i->rbuf2; 2183 rbuf3 = smat_i->rbuf3; 2184 req_source2 = smat_i->req_source2; 2185 2186 sbuf1 = smat_i->sbuf1; 2187 sbuf2 = smat_i->sbuf2; 2188 ptr = smat_i->ptr; 2189 tmp = smat_i->tmp; 2190 ctr = smat_i->ctr; 2191 2192 pa = smat_i->pa; 2193 req_size = smat_i->req_size; 2194 req_source1 = smat_i->req_source1; 2195 2196 allcolumns[i] = smat_i->allcolumns; 2197 row2proc[i] = smat_i->row2proc; 2198 rmap[i] = smat_i->rmap; 2199 cmap[i] = smat_i->cmap; 2200 } 2201 2202 if (!ismax){ /* Get dummy submatrices and retrieve struct submatis1 */ 2203 if (!submats[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"submats are null, cannot reuse"); 2204 subc = (Mat_SeqAIJ*)(submats[0]->data); 2205 smat_i = subc->submatis1; 2206 2207 nrqs = smat_i->nrqs; 2208 nrqr = smat_i->nrqr; 2209 rbuf1 = smat_i->rbuf1; 2210 rbuf2 = smat_i->rbuf2; 2211 rbuf3 = smat_i->rbuf3; 2212 req_source2 = smat_i->req_source2; 2213 2214 sbuf1 = smat_i->sbuf1; 2215 sbuf2 = smat_i->sbuf2; 2216 ptr = smat_i->ptr; 2217 tmp = smat_i->tmp; 2218 ctr = smat_i->ctr; 2219 2220 pa = smat_i->pa; 2221 req_size = smat_i->req_size; 2222 req_source1 = smat_i->req_source1; 2223 2224 allcolumns[0] = PETSC_TRUE; 2225 } 2226 } else { /* scall == MAT_INITIAL_MATRIX */ 2227 /* Get some new tags to keep the communication clean */ 2228 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 2229 ierr = PetscObjectGetNewTag((PetscObject)C,&tag3);CHKERRQ(ierr); 2230 2231 /* evaluate communication - mesg to who, length of mesg, and buffer space 2232 required. Based on this, buffers are allocated, and data copied into them*/ 2233 ierr = PetscCalloc4(size,&w1,size,&w2,size,&w3,size,&w4);CHKERRQ(ierr); /* mesg size, initialize work vectors */ 2234 2235 for (i=0; i<ismax; i++) { 2236 jmax = nrow[i]; 2237 irow_i = irow[i]; 2238 2239 ierr = PetscMalloc1(jmax,&row2proc_i);CHKERRQ(ierr); 2240 row2proc[i] = row2proc_i; 2241 2242 if (issorted[i]) proc = 0; 2243 for (j=0; j<jmax; j++) { 2244 if (!issorted[i]) proc = 0; 2245 row = irow_i[j]; 2246 while (row >= C->rmap->range[proc+1]) proc++; 2247 w4[proc]++; 2248 row2proc_i[j] = proc; /* map row index to proc */ 2249 } 2250 for (j=0; j<size; j++) { 2251 if (w4[j]) { w1[j] += w4[j]; w3[j]++; w4[j] = 0;} 2252 } 2253 } 2254 2255 nrqs = 0; /* no of outgoing messages */ 2256 msz = 0; /* total mesg length (for all procs) */ 2257 w1[rank] = 0; /* no mesg sent to self */ 2258 w3[rank] = 0; 2259 for (i=0; i<size; i++) { 2260 if (w1[i]) { w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 2261 } 2262 ierr = PetscMalloc1(nrqs+1,&pa);CHKERRQ(ierr); /*(proc -array)*/ 2263 for (i=0,j=0; i<size; i++) { 2264 if (w1[i]) { pa[j] = i; j++; } 2265 } 2266 2267 /* Each message would have a header = 1 + 2*(no of IS) + data */ 2268 for (i=0; i<nrqs; i++) { 2269 j = pa[i]; 2270 w1[j] += w2[j] + 2* w3[j]; 2271 msz += w1[j]; 2272 } 2273 ierr = PetscInfo2(0,"Number of outgoing messages %D Total message length %D\n",nrqs,msz);CHKERRQ(ierr); 2274 2275 /* Determine the number of messages to expect, their lengths, from from-ids */ 2276 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 2277 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 2278 2279 /* Now post the Irecvs corresponding to these messages */ 2280 tag0 = ((PetscObject)C)->tag; 2281 ierr = PetscPostIrecvInt(comm,tag0,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);CHKERRQ(ierr); 2282 2283 ierr = PetscFree(onodes1);CHKERRQ(ierr); 2284 ierr = PetscFree(olengths1);CHKERRQ(ierr); 2285 2286 /* Allocate Memory for outgoing messages */ 2287 ierr = PetscMalloc4(size,&sbuf1,size,&ptr,2*msz,&tmp,size,&ctr);CHKERRQ(ierr); 2288 ierr = PetscMemzero(sbuf1,size*sizeof(PetscInt*));CHKERRQ(ierr); 2289 ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr); 2290 2291 { 2292 PetscInt *iptr = tmp; 2293 k = 0; 2294 for (i=0; i<nrqs; i++) { 2295 j = pa[i]; 2296 iptr += k; 2297 sbuf1[j] = iptr; 2298 k = w1[j]; 2299 } 2300 } 2301 2302 /* Form the outgoing messages. Initialize the header space */ 2303 for (i=0; i<nrqs; i++) { 2304 j = pa[i]; 2305 sbuf1[j][0] = 0; 2306 ierr = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr); 2307 ptr[j] = sbuf1[j] + 2*w3[j] + 1; 2308 } 2309 2310 /* Parse the isrow and copy data into outbuf */ 2311 for (i=0; i<ismax; i++) { 2312 row2proc_i = row2proc[i]; 2313 ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr); 2314 irow_i = irow[i]; 2315 jmax = nrow[i]; 2316 for (j=0; j<jmax; j++) { /* parse the indices of each IS */ 2317 proc = row2proc_i[j]; 2318 if (proc != rank) { /* copy to the outgoing buf*/ 2319 ctr[proc]++; 2320 *ptr[proc] = irow_i[j]; 2321 ptr[proc]++; 2322 } 2323 } 2324 /* Update the headers for the current IS */ 2325 for (j=0; j<size; j++) { /* Can Optimise this loop too */ 2326 if ((ctr_j = ctr[j])) { 2327 sbuf1_j = sbuf1[j]; 2328 k = ++sbuf1_j[0]; 2329 sbuf1_j[2*k] = ctr_j; 2330 sbuf1_j[2*k-1] = i; 2331 } 2332 } 2333 } 2334 2335 /* Now post the sends */ 2336 ierr = PetscMalloc1(nrqs+1,&s_waits1);CHKERRQ(ierr); 2337 for (i=0; i<nrqs; ++i) { 2338 j = pa[i]; 2339 ierr = MPI_Isend(sbuf1[j],w1[j],MPIU_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr); 2340 } 2341 2342 /* Post Receives to capture the buffer size */ 2343 ierr = PetscMalloc1(nrqs+1,&r_waits2);CHKERRQ(ierr); 2344 ierr = PetscMalloc3(nrqs+1,&req_source2,nrqs+1,&rbuf2,nrqs+1,&rbuf3);CHKERRQ(ierr); 2345 rbuf2[0] = tmp + msz; 2346 for (i=1; i<nrqs; ++i) { 2347 rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]]; 2348 } 2349 for (i=0; i<nrqs; ++i) { 2350 j = pa[i]; 2351 ierr = MPI_Irecv(rbuf2[i],w1[j],MPIU_INT,j,tag2,comm,r_waits2+i);CHKERRQ(ierr); 2352 } 2353 2354 /* Send to other procs the buf size they should allocate */ 2355 /* Receive messages*/ 2356 ierr = PetscMalloc1(nrqr+1,&s_waits2);CHKERRQ(ierr); 2357 ierr = PetscMalloc1(nrqr+1,&r_status1);CHKERRQ(ierr); 2358 ierr = PetscMalloc3(nrqr,&sbuf2,nrqr,&req_size,nrqr,&req_source1);CHKERRQ(ierr); 2359 { 2360 PetscInt *sAi = a->i,*sBi = b->i,id,rstart = C->rmap->rstart; 2361 PetscInt *sbuf2_i; 2362 2363 ierr = MPI_Waitall(nrqr,r_waits1,r_status1);CHKERRQ(ierr); 2364 for (i=0; i<nrqr; ++i) { 2365 req_size[i] = 0; 2366 rbuf1_i = rbuf1[i]; 2367 start = 2*rbuf1_i[0] + 1; 2368 ierr = MPI_Get_count(r_status1+i,MPIU_INT,&end);CHKERRQ(ierr); 2369 ierr = PetscMalloc1(end+1,&sbuf2[i]);CHKERRQ(ierr); 2370 sbuf2_i = sbuf2[i]; 2371 for (j=start; j<end; j++) { 2372 id = rbuf1_i[j] - rstart; 2373 ncols = sAi[id+1] - sAi[id] + sBi[id+1] - sBi[id]; 2374 sbuf2_i[j] = ncols; 2375 req_size[i] += ncols; 2376 } 2377 req_source1[i] = r_status1[i].MPI_SOURCE; 2378 /* form the header */ 2379 sbuf2_i[0] = req_size[i]; 2380 for (j=1; j<start; j++) sbuf2_i[j] = rbuf1_i[j]; 2381 2382 ierr = MPI_Isend(sbuf2_i,end,MPIU_INT,req_source1[i],tag2,comm,s_waits2+i);CHKERRQ(ierr); 2383 } 2384 } 2385 ierr = PetscFree(r_status1);CHKERRQ(ierr); 2386 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 2387 ierr = PetscFree4(w1,w2,w3,w4);CHKERRQ(ierr); 2388 2389 /* Receive messages*/ 2390 ierr = PetscMalloc1(nrqs+1,&r_waits3);CHKERRQ(ierr); 2391 ierr = PetscMalloc1(nrqs+1,&r_status2);CHKERRQ(ierr); 2392 2393 ierr = MPI_Waitall(nrqs,r_waits2,r_status2);CHKERRQ(ierr); 2394 for (i=0; i<nrqs; ++i) { 2395 ierr = PetscMalloc1(rbuf2[i][0]+1,&rbuf3[i]);CHKERRQ(ierr); 2396 req_source2[i] = r_status2[i].MPI_SOURCE; 2397 ierr = MPI_Irecv(rbuf3[i],rbuf2[i][0],MPIU_INT,req_source2[i],tag3,comm,r_waits3+i);CHKERRQ(ierr); 2398 } 2399 ierr = PetscFree(r_status2);CHKERRQ(ierr); 2400 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 2401 2402 /* Wait on sends1 and sends2 */ 2403 ierr = PetscMalloc1(nrqs+1,&s_status1);CHKERRQ(ierr); 2404 ierr = PetscMalloc1(nrqr+1,&s_status2);CHKERRQ(ierr); 2405 2406 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);} 2407 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);} 2408 ierr = PetscFree(s_status1);CHKERRQ(ierr); 2409 ierr = PetscFree(s_status2);CHKERRQ(ierr); 2410 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 2411 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 2412 2413 /* Now allocate sending buffers for a->j, and send them off */ 2414 ierr = PetscMalloc1(nrqr+1,&sbuf_aj);CHKERRQ(ierr); 2415 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 2416 ierr = PetscMalloc1(j+1,&sbuf_aj[0]);CHKERRQ(ierr); 2417 for (i=1; i<nrqr; i++) sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1]; 2418 2419 ierr = PetscMalloc1(nrqr+1,&s_waits3);CHKERRQ(ierr); 2420 { 2421 PetscInt nzA,nzB,*a_i = a->i,*b_i = b->i,lwrite; 2422 PetscInt *cworkA,*cworkB,cstart = C->cmap->rstart,rstart = C->rmap->rstart,*bmap = c->garray; 2423 PetscInt cend = C->cmap->rend; 2424 PetscInt *a_j = a->j,*b_j = b->j,ctmp; 2425 2426 for (i=0; i<nrqr; i++) { 2427 rbuf1_i = rbuf1[i]; 2428 sbuf_aj_i = sbuf_aj[i]; 2429 ct1 = 2*rbuf1_i[0] + 1; 2430 ct2 = 0; 2431 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 2432 kmax = rbuf1[i][2*j]; 2433 for (k=0; k<kmax; k++,ct1++) { 2434 row = rbuf1_i[ct1] - rstart; 2435 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 2436 ncols = nzA + nzB; 2437 cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row]; 2438 2439 /* load the column indices for this row into cols */ 2440 cols = sbuf_aj_i + ct2; 2441 2442 lwrite = 0; 2443 for (l=0; l<nzB; l++) { 2444 if ((ctmp = bmap[cworkB[l]]) < cstart) cols[lwrite++] = ctmp; 2445 } 2446 for (l=0; l<nzA; l++) cols[lwrite++] = cstart + cworkA[l]; 2447 for (l=0; l<nzB; l++) { 2448 if ((ctmp = bmap[cworkB[l]]) >= cend) cols[lwrite++] = ctmp; 2449 } 2450 2451 ct2 += ncols; 2452 } 2453 } 2454 ierr = MPI_Isend(sbuf_aj_i,req_size[i],MPIU_INT,req_source1[i],tag3,comm,s_waits3+i);CHKERRQ(ierr); 2455 } 2456 } 2457 ierr = PetscMalloc2(nrqs+1,&r_status3,nrqr+1,&s_status3);CHKERRQ(ierr); 2458 2459 /* create col map: global col of C -> local col of submatrices */ 2460 { 2461 const PetscInt *icol_i; 2462 #if defined(PETSC_USE_CTABLE) 2463 for (i=0; i<ismax; i++) { 2464 if (!allcolumns[i]) { 2465 ierr = PetscTableCreate(ncol[i]+1,C->cmap->N+1,&cmap[i]);CHKERRQ(ierr); 2466 2467 jmax = ncol[i]; 2468 icol_i = icol[i]; 2469 cmap_i = cmap[i]; 2470 for (j=0; j<jmax; j++) { 2471 ierr = PetscTableAdd(cmap[i],icol_i[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr); 2472 } 2473 } else cmap[i] = NULL; 2474 } 2475 #else 2476 for (i=0; i<ismax; i++) { 2477 if (!allcolumns[i]) { 2478 ierr = PetscCalloc1(C->cmap->N,&cmap[i]);CHKERRQ(ierr); 2479 jmax = ncol[i]; 2480 icol_i = icol[i]; 2481 cmap_i = cmap[i]; 2482 for (j=0; j<jmax; j++) { 2483 cmap_i[icol_i[j]] = j+1; 2484 } 2485 } else cmap[i] = NULL; 2486 } 2487 #endif 2488 } 2489 2490 /* Create lens which is required for MatCreate... */ 2491 for (i=0,j=0; i<ismax; i++) j += nrow[i]; 2492 ierr = PetscMalloc1(ismax,&lens);CHKERRQ(ierr); 2493 2494 if (ismax) { 2495 ierr = PetscCalloc1(j,&lens[0]);CHKERRQ(ierr); 2496 } 2497 for (i=1; i<ismax; i++) lens[i] = lens[i-1] + nrow[i-1]; 2498 2499 /* Update lens from local data */ 2500 for (i=0; i<ismax; i++) { 2501 row2proc_i = row2proc[i]; 2502 jmax = nrow[i]; 2503 if (!allcolumns[i]) cmap_i = cmap[i]; 2504 irow_i = irow[i]; 2505 lens_i = lens[i]; 2506 for (j=0; j<jmax; j++) { 2507 row = irow_i[j]; 2508 proc = row2proc_i[j]; 2509 if (proc == rank) { 2510 ierr = MatGetRow_MPIAIJ(C,row,&ncols,&cols,0);CHKERRQ(ierr); 2511 if (!allcolumns[i]) { 2512 for (k=0; k<ncols; k++) { 2513 #if defined(PETSC_USE_CTABLE) 2514 ierr = PetscTableFind(cmap_i,cols[k]+1,&tcol);CHKERRQ(ierr); 2515 #else 2516 tcol = cmap_i[cols[k]]; 2517 #endif 2518 if (tcol) lens_i[j]++; 2519 } 2520 } else { /* allcolumns */ 2521 lens_i[j] = ncols; 2522 } 2523 ierr = MatRestoreRow_MPIAIJ(C,row,&ncols,&cols,0);CHKERRQ(ierr); 2524 } 2525 } 2526 } 2527 2528 /* Create row map: global row of C -> local row of submatrices */ 2529 #if defined(PETSC_USE_CTABLE) 2530 for (i=0; i<ismax; i++) { 2531 ierr = PetscTableCreate(nrow[i]+1,C->rmap->N+1,&rmap[i]);CHKERRQ(ierr); 2532 irow_i = irow[i]; 2533 jmax = nrow[i]; 2534 for (j=0; j<jmax; j++) { 2535 ierr = PetscTableAdd(rmap[i],irow_i[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr); 2536 } 2537 } 2538 #else 2539 for (i=0; i<ismax; i++) { 2540 ierr = PetscCalloc1(C->rmap->N,&rmap[i]);CHKERRQ(ierr); 2541 rmap_i = rmap[i]; 2542 irow_i = irow[i]; 2543 jmax = nrow[i]; 2544 for (j=0; j<jmax; j++) { 2545 rmap_i[irow_i[j]] = j; 2546 } 2547 } 2548 #endif 2549 2550 /* Update lens from offproc data */ 2551 { 2552 PetscInt *rbuf2_i,*rbuf3_i,*sbuf1_i; 2553 2554 ierr = MPI_Waitall(nrqs,r_waits3,r_status3);CHKERRQ(ierr); 2555 for (tmp2=0; tmp2<nrqs; tmp2++) { 2556 sbuf1_i = sbuf1[pa[tmp2]]; 2557 jmax = sbuf1_i[0]; 2558 ct1 = 2*jmax+1; 2559 ct2 = 0; 2560 rbuf2_i = rbuf2[tmp2]; 2561 rbuf3_i = rbuf3[tmp2]; 2562 for (j=1; j<=jmax; j++) { 2563 is_no = sbuf1_i[2*j-1]; 2564 max1 = sbuf1_i[2*j]; 2565 lens_i = lens[is_no]; 2566 if (!allcolumns[is_no]) cmap_i = cmap[is_no]; 2567 rmap_i = rmap[is_no]; 2568 for (k=0; k<max1; k++,ct1++) { 2569 #if defined(PETSC_USE_CTABLE) 2570 ierr = PetscTableFind(rmap_i,sbuf1_i[ct1]+1,&row);CHKERRQ(ierr); 2571 row--; 2572 if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); 2573 #else 2574 row = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */ 2575 #endif 2576 max2 = rbuf2_i[ct1]; 2577 for (l=0; l<max2; l++,ct2++) { 2578 if (!allcolumns[is_no]) { 2579 #if defined(PETSC_USE_CTABLE) 2580 ierr = PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr); 2581 #else 2582 tcol = cmap_i[rbuf3_i[ct2]]; 2583 #endif 2584 if (tcol) lens_i[row]++; 2585 } else { /* allcolumns */ 2586 lens_i[row]++; /* lens_i[row] += max2 ? */ 2587 } 2588 } 2589 } 2590 } 2591 } 2592 } 2593 ierr = PetscFree(r_waits3);CHKERRQ(ierr); 2594 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits3,s_status3);CHKERRQ(ierr);} 2595 ierr = PetscFree2(r_status3,s_status3);CHKERRQ(ierr); 2596 ierr = PetscFree(s_waits3);CHKERRQ(ierr); 2597 2598 /* Create the submatrices */ 2599 for (i=0; i<ismax; i++) { 2600 PetscInt rbs,cbs; 2601 2602 ierr = ISGetBlockSize(isrow[i],&rbs);CHKERRQ(ierr); 2603 ierr = ISGetBlockSize(iscol[i],&cbs);CHKERRQ(ierr); 2604 2605 ierr = MatCreate(PETSC_COMM_SELF,submats+i);CHKERRQ(ierr); 2606 ierr = MatSetSizes(submats[i],nrow[i],ncol[i],PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 2607 2608 ierr = MatSetBlockSizes(submats[i],rbs,cbs);CHKERRQ(ierr); 2609 ierr = MatSetType(submats[i],((PetscObject)A)->type_name);CHKERRQ(ierr); 2610 ierr = MatSeqAIJSetPreallocation(submats[i],0,lens[i]);CHKERRQ(ierr); 2611 2612 /* create struct Mat_SubMat and attached it to submat */ 2613 ierr = PetscNew(&smat_i);CHKERRQ(ierr); 2614 subc = (Mat_SeqAIJ*)submats[i]->data; 2615 subc->submatis1 = smat_i; 2616 2617 smat_i->destroy = submats[i]->ops->destroy; 2618 submats[i]->ops->destroy = MatDestroy_MPIAIJ_MatGetSubmatrices; 2619 submats[i]->factortype = C->factortype; 2620 2621 smat_i->id = i; 2622 smat_i->nrqs = nrqs; 2623 smat_i->nrqr = nrqr; 2624 smat_i->rbuf1 = rbuf1; 2625 smat_i->rbuf2 = rbuf2; 2626 smat_i->rbuf3 = rbuf3; 2627 smat_i->sbuf2 = sbuf2; 2628 smat_i->req_source2 = req_source2; 2629 2630 smat_i->sbuf1 = sbuf1; 2631 smat_i->ptr = ptr; 2632 smat_i->tmp = tmp; 2633 smat_i->ctr = ctr; 2634 2635 smat_i->pa = pa; 2636 smat_i->req_size = req_size; 2637 smat_i->req_source1 = req_source1; 2638 2639 smat_i->allcolumns = allcolumns[i]; 2640 smat_i->singleis = PETSC_FALSE; 2641 smat_i->row2proc = row2proc[i]; 2642 smat_i->rmap = rmap[i]; 2643 smat_i->cmap = cmap[i]; 2644 } 2645 2646 if (!ismax) { /* Create dummy submats[0] for reuse struct subc */ 2647 ierr = MatCreate(PETSC_COMM_SELF,&submats[0]);CHKERRQ(ierr); 2648 ierr = MatSetSizes(submats[0],0,0,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 2649 ierr = MatSetType(submats[0],((PetscObject)A)->type_name);CHKERRQ(ierr); 2650 ierr = MatSeqAIJSetPreallocation(submats[0],0,NULL);CHKERRQ(ierr); 2651 2652 /* create struct Mat_SubMat and attached it to submat */ 2653 ierr = PetscNew(&smat_i);CHKERRQ(ierr); 2654 subc = (Mat_SeqAIJ*)submats[0]->data; 2655 subc->submatis1 = smat_i; 2656 2657 smat_i->destroy = submats[0]->ops->destroy; 2658 submats[0]->ops->destroy = MatDestroy_MPIAIJ_MatGetSubmatrices; 2659 submats[0]->factortype = C->factortype; 2660 2661 smat_i->id = i; 2662 smat_i->nrqs = nrqs; 2663 smat_i->nrqr = nrqr; 2664 smat_i->rbuf1 = rbuf1; 2665 smat_i->rbuf2 = rbuf2; 2666 smat_i->rbuf3 = rbuf3; 2667 smat_i->sbuf2 = sbuf2; 2668 smat_i->req_source2 = req_source2; 2669 2670 smat_i->sbuf1 = sbuf1; 2671 smat_i->ptr = ptr; 2672 smat_i->tmp = tmp; 2673 smat_i->ctr = ctr; 2674 2675 smat_i->pa = pa; 2676 smat_i->req_size = req_size; 2677 smat_i->req_source1 = req_source1; 2678 2679 smat_i->allcolumns = PETSC_TRUE; 2680 smat_i->singleis = PETSC_FALSE; 2681 smat_i->row2proc = NULL; 2682 smat_i->rmap = NULL; 2683 smat_i->cmap = NULL; 2684 } 2685 2686 if (ismax) {ierr = PetscFree(lens[0]);CHKERRQ(ierr);} 2687 ierr = PetscFree(lens);CHKERRQ(ierr); 2688 ierr = PetscFree(sbuf_aj[0]);CHKERRQ(ierr); 2689 ierr = PetscFree(sbuf_aj);CHKERRQ(ierr); 2690 2691 } /* endof scall == MAT_INITIAL_MATRIX */ 2692 2693 /* Post recv matrix values */ 2694 ierr = PetscObjectGetNewTag((PetscObject)C,&tag4);CHKERRQ(ierr); 2695 ierr = PetscMalloc1(nrqs+1,&rbuf4);CHKERRQ(ierr); 2696 ierr = PetscMalloc1(nrqs+1,&r_waits4);CHKERRQ(ierr); 2697 ierr = PetscMalloc1(nrqs+1,&r_status4);CHKERRQ(ierr); 2698 ierr = PetscMalloc1(nrqr+1,&s_status4);CHKERRQ(ierr); 2699 for (i=0; i<nrqs; ++i) { 2700 ierr = PetscMalloc1(rbuf2[i][0]+1,&rbuf4[i]);CHKERRQ(ierr); 2701 ierr = MPI_Irecv(rbuf4[i],rbuf2[i][0],MPIU_SCALAR,req_source2[i],tag4,comm,r_waits4+i);CHKERRQ(ierr); 2702 } 2703 2704 /* Allocate sending buffers for a->a, and send them off */ 2705 ierr = PetscMalloc1(nrqr+1,&sbuf_aa);CHKERRQ(ierr); 2706 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 2707 ierr = PetscMalloc1(j+1,&sbuf_aa[0]);CHKERRQ(ierr); 2708 for (i=1; i<nrqr; i++) sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]; 2709 2710 ierr = PetscMalloc1(nrqr+1,&s_waits4);CHKERRQ(ierr); 2711 { 2712 PetscInt nzA,nzB,*a_i = a->i,*b_i = b->i, *cworkB,lwrite; 2713 PetscInt cstart = C->cmap->rstart,rstart = C->rmap->rstart,*bmap = c->garray; 2714 PetscInt cend = C->cmap->rend; 2715 PetscInt *b_j = b->j; 2716 PetscScalar *vworkA,*vworkB,*a_a = a->a,*b_a = b->a; 2717 2718 for (i=0; i<nrqr; i++) { 2719 rbuf1_i = rbuf1[i]; 2720 sbuf_aa_i = sbuf_aa[i]; 2721 ct1 = 2*rbuf1_i[0]+1; 2722 ct2 = 0; 2723 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 2724 kmax = rbuf1_i[2*j]; 2725 for (k=0; k<kmax; k++,ct1++) { 2726 row = rbuf1_i[ct1] - rstart; 2727 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 2728 ncols = nzA + nzB; 2729 cworkB = b_j + b_i[row]; 2730 vworkA = a_a + a_i[row]; 2731 vworkB = b_a + b_i[row]; 2732 2733 /* load the column values for this row into vals*/ 2734 vals = sbuf_aa_i+ct2; 2735 2736 lwrite = 0; 2737 for (l=0; l<nzB; l++) { 2738 if ((bmap[cworkB[l]]) < cstart) vals[lwrite++] = vworkB[l]; 2739 } 2740 for (l=0; l<nzA; l++) vals[lwrite++] = vworkA[l]; 2741 for (l=0; l<nzB; l++) { 2742 if ((bmap[cworkB[l]]) >= cend) vals[lwrite++] = vworkB[l]; 2743 } 2744 2745 ct2 += ncols; 2746 } 2747 } 2748 ierr = MPI_Isend(sbuf_aa_i,req_size[i],MPIU_SCALAR,req_source1[i],tag4,comm,s_waits4+i);CHKERRQ(ierr); 2749 } 2750 } 2751 2752 /* Assemble the matrices */ 2753 /* First assemble the local rows */ 2754 for (i=0; i<ismax; i++) { 2755 row2proc_i = row2proc[i]; 2756 subc = (Mat_SeqAIJ*)submats[i]->data; 2757 imat_ilen = subc->ilen; 2758 imat_j = subc->j; 2759 imat_i = subc->i; 2760 imat_a = subc->a; 2761 2762 if (!allcolumns[i]) cmap_i = cmap[i]; 2763 rmap_i = rmap[i]; 2764 irow_i = irow[i]; 2765 jmax = nrow[i]; 2766 for (j=0; j<jmax; j++) { 2767 row = irow_i[j]; 2768 proc = row2proc_i[j]; 2769 if (proc == rank) { 2770 old_row = row; 2771 #if defined(PETSC_USE_CTABLE) 2772 ierr = PetscTableFind(rmap_i,row+1,&row);CHKERRQ(ierr); 2773 row--; 2774 #else 2775 row = rmap_i[row]; 2776 #endif 2777 ilen_row = imat_ilen[row]; 2778 ierr = MatGetRow_MPIAIJ(C,old_row,&ncols,&cols,&vals);CHKERRQ(ierr); 2779 mat_i = imat_i[row]; 2780 mat_a = imat_a + mat_i; 2781 mat_j = imat_j + mat_i; 2782 if (!allcolumns[i]) { 2783 for (k=0; k<ncols; k++) { 2784 #if defined(PETSC_USE_CTABLE) 2785 ierr = PetscTableFind(cmap_i,cols[k]+1,&tcol);CHKERRQ(ierr); 2786 #else 2787 tcol = cmap_i[cols[k]]; 2788 #endif 2789 if (tcol) { 2790 *mat_j++ = tcol - 1; 2791 *mat_a++ = vals[k]; 2792 ilen_row++; 2793 } 2794 } 2795 } else { /* allcolumns */ 2796 for (k=0; k<ncols; k++) { 2797 *mat_j++ = cols[k]; /* global col index! */ 2798 *mat_a++ = vals[k]; 2799 ilen_row++; 2800 } 2801 } 2802 ierr = MatRestoreRow_MPIAIJ(C,old_row,&ncols,&cols,&vals);CHKERRQ(ierr); 2803 2804 imat_ilen[row] = ilen_row; 2805 } 2806 } 2807 } 2808 2809 /* Now assemble the off proc rows */ 2810 ierr = MPI_Waitall(nrqs,r_waits4,r_status4);CHKERRQ(ierr); 2811 for (tmp2=0; tmp2<nrqs; tmp2++) { 2812 sbuf1_i = sbuf1[pa[tmp2]]; 2813 jmax = sbuf1_i[0]; 2814 ct1 = 2*jmax + 1; 2815 ct2 = 0; 2816 rbuf2_i = rbuf2[tmp2]; 2817 rbuf3_i = rbuf3[tmp2]; 2818 rbuf4_i = rbuf4[tmp2]; 2819 for (j=1; j<=jmax; j++) { 2820 is_no = sbuf1_i[2*j-1]; 2821 rmap_i = rmap[is_no]; 2822 if (!allcolumns[is_no]) cmap_i = cmap[is_no]; 2823 subc = (Mat_SeqAIJ*)submats[is_no]->data; 2824 imat_ilen = subc->ilen; 2825 imat_j = subc->j; 2826 imat_i = subc->i; 2827 imat_a = subc->a; 2828 max1 = sbuf1_i[2*j]; 2829 for (k=0; k<max1; k++,ct1++) { 2830 row = sbuf1_i[ct1]; 2831 #if defined(PETSC_USE_CTABLE) 2832 ierr = PetscTableFind(rmap_i,row+1,&row);CHKERRQ(ierr); 2833 row--; 2834 #else 2835 row = rmap_i[row]; 2836 #endif 2837 ilen = imat_ilen[row]; 2838 mat_i = imat_i[row]; 2839 mat_a = imat_a + mat_i; 2840 mat_j = imat_j + mat_i; 2841 max2 = rbuf2_i[ct1]; 2842 if (!allcolumns[is_no]) { 2843 for (l=0; l<max2; l++,ct2++) { 2844 #if defined(PETSC_USE_CTABLE) 2845 ierr = PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr); 2846 #else 2847 tcol = cmap_i[rbuf3_i[ct2]]; 2848 #endif 2849 if (tcol) { 2850 *mat_j++ = tcol - 1; 2851 *mat_a++ = rbuf4_i[ct2]; 2852 ilen++; 2853 } 2854 } 2855 } else { /* allcolumns */ 2856 for (l=0; l<max2; l++,ct2++) { 2857 *mat_j++ = rbuf3_i[ct2]; /* same global column index of C */ 2858 *mat_a++ = rbuf4_i[ct2]; 2859 ilen++; 2860 } 2861 } 2862 imat_ilen[row] = ilen; 2863 } 2864 } 2865 } 2866 2867 if (!iscsorted) { /* sort column indices of the rows */ 2868 for (i=0; i<ismax; i++) { 2869 subc = (Mat_SeqAIJ*)submats[i]->data; 2870 imat_j = subc->j; 2871 imat_i = subc->i; 2872 imat_a = subc->a; 2873 imat_ilen = subc->ilen; 2874 2875 if (allcolumns[i]) continue; 2876 jmax = nrow[i]; 2877 for (j=0; j<jmax; j++) { 2878 mat_i = imat_i[j]; 2879 mat_a = imat_a + mat_i; 2880 mat_j = imat_j + mat_i; 2881 ierr = PetscSortIntWithScalarArray(imat_ilen[j],mat_j,mat_a);CHKERRQ(ierr); 2882 } 2883 } 2884 } 2885 2886 ierr = PetscFree(r_status4);CHKERRQ(ierr); 2887 ierr = PetscFree(r_waits4);CHKERRQ(ierr); 2888 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits4,s_status4);CHKERRQ(ierr);} 2889 ierr = PetscFree(s_waits4);CHKERRQ(ierr); 2890 ierr = PetscFree(s_status4);CHKERRQ(ierr); 2891 2892 /* Restore the indices */ 2893 for (i=0; i<ismax; i++) { 2894 ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr); 2895 if (!allcolumns[i]) { 2896 ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr); 2897 } 2898 } 2899 2900 for (i=0; i<ismax; i++) { 2901 ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2902 ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2903 } 2904 if (!ismax) { /* a dummy matrix */ 2905 ierr = MatAssemblyBegin(submats[0],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2906 ierr = MatAssemblyEnd(submats[0],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2907 } 2908 2909 /* Destroy allocated memory */ 2910 ierr = PetscFree(sbuf_aa[0]);CHKERRQ(ierr); 2911 ierr = PetscFree(sbuf_aa);CHKERRQ(ierr); 2912 ierr = PetscFree5(irow,icol,nrow,ncol,issorted);CHKERRQ(ierr); 2913 2914 for (i=0; i<nrqs; ++i) { 2915 ierr = PetscFree(rbuf4[i]);CHKERRQ(ierr); 2916 } 2917 ierr = PetscFree(rbuf4);CHKERRQ(ierr); 2918 2919 ierr = PetscFree4(row2proc,cmap,rmap,allcolumns);CHKERRQ(ierr); 2920 PetscFunctionReturn(0); 2921 } 2922 2923 /* 2924 Permute A & B into C's *local* index space using rowemb,dcolemb for A and rowemb,ocolemb for B. 2925 Embeddings are supposed to be injections and the above implies that the range of rowemb is a subset 2926 of [0,m), dcolemb is in [0,n) and ocolemb is in [N-n). 2927 If pattern == DIFFERENT_NONZERO_PATTERN, C is preallocated according to A&B. 2928 After that B's columns are mapped into C's global column space, so that C is in the "disassembled" 2929 state, and needs to be "assembled" later by compressing B's column space. 2930 2931 This function may be called in lieu of preallocation, so C should not be expected to be preallocated. 2932 Following this call, C->A & C->B have been created, even if empty. 2933 */ 2934 PetscErrorCode MatSetSeqMats_MPIAIJ(Mat C,IS rowemb,IS dcolemb,IS ocolemb,MatStructure pattern,Mat A,Mat B) 2935 { 2936 /* If making this function public, change the error returned in this function away from _PLIB. */ 2937 PetscErrorCode ierr; 2938 Mat_MPIAIJ *aij; 2939 Mat_SeqAIJ *Baij; 2940 PetscBool seqaij,Bdisassembled; 2941 PetscInt m,n,*nz,i,j,ngcol,col,rstart,rend,shift,count; 2942 PetscScalar v; 2943 const PetscInt *rowindices,*colindices; 2944 2945 PetscFunctionBegin; 2946 /* Check to make sure the component matrices (and embeddings) are compatible with C. */ 2947 if (A) { 2948 ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJ,&seqaij);CHKERRQ(ierr); 2949 if (!seqaij) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Diagonal matrix is of wrong type"); 2950 if (rowemb) { 2951 ierr = ISGetLocalSize(rowemb,&m);CHKERRQ(ierr); 2952 if (m != A->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Row IS of size %D is incompatible with diag matrix row size %D",m,A->rmap->n); 2953 } else { 2954 if (C->rmap->n != A->rmap->n) { 2955 SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Diag seq matrix is row-incompatible with the MPIAIJ matrix"); 2956 } 2957 } 2958 if (dcolemb) { 2959 ierr = ISGetLocalSize(dcolemb,&n);CHKERRQ(ierr); 2960 if (n != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Diag col IS of size %D is incompatible with diag matrix col size %D",n,A->cmap->n); 2961 } else { 2962 if (C->cmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Diag seq matrix is col-incompatible with the MPIAIJ matrix"); 2963 } 2964 } 2965 if (B) { 2966 ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJ,&seqaij);CHKERRQ(ierr); 2967 if (!seqaij) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Off-diagonal matrix is of wrong type"); 2968 if (rowemb) { 2969 ierr = ISGetLocalSize(rowemb,&m);CHKERRQ(ierr); 2970 if (m != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Row IS of size %D is incompatible with off-diag matrix row size %D",m,A->rmap->n); 2971 } else { 2972 if (C->rmap->n != B->rmap->n) { 2973 SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Off-diag seq matrix is row-incompatible with the MPIAIJ matrix"); 2974 } 2975 } 2976 if (ocolemb) { 2977 ierr = ISGetLocalSize(ocolemb,&n);CHKERRQ(ierr); 2978 if (n != B->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Off-diag col IS of size %D is incompatible with off-diag matrix col size %D",n,B->cmap->n); 2979 } else { 2980 if (C->cmap->N - C->cmap->n != B->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Off-diag seq matrix is col-incompatible with the MPIAIJ matrix"); 2981 } 2982 } 2983 2984 aij = (Mat_MPIAIJ*)(C->data); 2985 if (!aij->A) { 2986 /* Mimic parts of MatMPIAIJSetPreallocation() */ 2987 ierr = MatCreate(PETSC_COMM_SELF,&aij->A);CHKERRQ(ierr); 2988 ierr = MatSetSizes(aij->A,C->rmap->n,C->cmap->n,C->rmap->n,C->cmap->n);CHKERRQ(ierr); 2989 ierr = MatSetBlockSizesFromMats(aij->A,C,C);CHKERRQ(ierr); 2990 ierr = MatSetType(aij->A,MATSEQAIJ);CHKERRQ(ierr); 2991 ierr = PetscLogObjectParent((PetscObject)C,(PetscObject)aij->A);CHKERRQ(ierr); 2992 } 2993 if (A) { 2994 ierr = MatSetSeqMat_SeqAIJ(aij->A,rowemb,dcolemb,pattern,A);CHKERRQ(ierr); 2995 } else { 2996 ierr = MatSetUp(aij->A);CHKERRQ(ierr); 2997 } 2998 if (B) { /* Destroy the old matrix or the column map, depending on the sparsity pattern. */ 2999 /* 3000 If pattern == DIFFERENT_NONZERO_PATTERN, we reallocate B and 3001 need to "disassemble" B -- convert it to using C's global indices. 3002 To insert the values we take the safer, albeit more expensive, route of MatSetValues(). 3003 3004 If pattern == SUBSET_NONZERO_PATTERN, we do not "disassemble" B and do not reallocate; 3005 we MatZeroValues(B) first, so there may be a bunch of zeros that, perhaps, could be compacted out. 3006 3007 TODO: Put B's values into aij->B's aij structure in place using the embedding ISs? 3008 At least avoid calling MatSetValues() and the implied searches? 3009 */ 3010 3011 if (B && pattern == DIFFERENT_NONZERO_PATTERN) { 3012 #if defined(PETSC_USE_CTABLE) 3013 ierr = PetscTableDestroy(&aij->colmap);CHKERRQ(ierr); 3014 #else 3015 ierr = PetscFree(aij->colmap);CHKERRQ(ierr); 3016 /* A bit of a HACK: ideally we should deal with case aij->B all in one code block below. */ 3017 if (aij->B) { 3018 ierr = PetscLogObjectMemory((PetscObject)C,-aij->B->cmap->n*sizeof(PetscInt));CHKERRQ(ierr); 3019 } 3020 #endif 3021 ngcol = 0; 3022 if (aij->lvec) { 3023 ierr = VecGetSize(aij->lvec,&ngcol);CHKERRQ(ierr); 3024 } 3025 if (aij->garray) { 3026 ierr = PetscFree(aij->garray);CHKERRQ(ierr); 3027 ierr = PetscLogObjectMemory((PetscObject)C,-ngcol*sizeof(PetscInt));CHKERRQ(ierr); 3028 } 3029 ierr = VecDestroy(&aij->lvec);CHKERRQ(ierr); 3030 ierr = VecScatterDestroy(&aij->Mvctx);CHKERRQ(ierr); 3031 } 3032 if (aij->B && B && pattern == DIFFERENT_NONZERO_PATTERN) { 3033 ierr = MatDestroy(&aij->B);CHKERRQ(ierr); 3034 } 3035 if (aij->B && B && pattern == SUBSET_NONZERO_PATTERN) { 3036 ierr = MatZeroEntries(aij->B);CHKERRQ(ierr); 3037 } 3038 } 3039 Bdisassembled = PETSC_FALSE; 3040 if (!aij->B) { 3041 ierr = MatCreate(PETSC_COMM_SELF,&aij->B);CHKERRQ(ierr); 3042 ierr = PetscLogObjectParent((PetscObject)C,(PetscObject)aij->B);CHKERRQ(ierr); 3043 ierr = MatSetSizes(aij->B,C->rmap->n,C->cmap->N,C->rmap->n,C->cmap->N);CHKERRQ(ierr); 3044 ierr = MatSetBlockSizesFromMats(aij->B,B,B);CHKERRQ(ierr); 3045 ierr = MatSetType(aij->B,MATSEQAIJ);CHKERRQ(ierr); 3046 Bdisassembled = PETSC_TRUE; 3047 } 3048 if (B) { 3049 Baij = (Mat_SeqAIJ*)(B->data); 3050 if (pattern == DIFFERENT_NONZERO_PATTERN) { 3051 ierr = PetscMalloc1(B->rmap->n,&nz);CHKERRQ(ierr); 3052 for (i=0; i<B->rmap->n; i++) { 3053 nz[i] = Baij->i[i+1] - Baij->i[i]; 3054 } 3055 ierr = MatSeqAIJSetPreallocation(aij->B,0,nz);CHKERRQ(ierr); 3056 ierr = PetscFree(nz);CHKERRQ(ierr); 3057 } 3058 3059 ierr = PetscLayoutGetRange(C->rmap,&rstart,&rend);CHKERRQ(ierr); 3060 shift = rend-rstart; 3061 count = 0; 3062 rowindices = NULL; 3063 colindices = NULL; 3064 if (rowemb) { 3065 ierr = ISGetIndices(rowemb,&rowindices);CHKERRQ(ierr); 3066 } 3067 if (ocolemb) { 3068 ierr = ISGetIndices(ocolemb,&colindices);CHKERRQ(ierr); 3069 } 3070 for (i=0; i<B->rmap->n; i++) { 3071 PetscInt row; 3072 row = i; 3073 if (rowindices) row = rowindices[i]; 3074 for (j=Baij->i[i]; j<Baij->i[i+1]; j++) { 3075 col = Baij->j[count]; 3076 if (colindices) col = colindices[col]; 3077 if (Bdisassembled && col>=rstart) col += shift; 3078 v = Baij->a[count]; 3079 ierr = MatSetValues(aij->B,1,&row,1,&col,&v,INSERT_VALUES);CHKERRQ(ierr); 3080 ++count; 3081 } 3082 } 3083 /* No assembly for aij->B is necessary. */ 3084 /* FIXME: set aij->B's nonzerostate correctly. */ 3085 } else { 3086 ierr = MatSetUp(aij->B);CHKERRQ(ierr); 3087 } 3088 C->preallocated = PETSC_TRUE; 3089 C->was_assembled = PETSC_FALSE; 3090 C->assembled = PETSC_FALSE; 3091 /* 3092 C will need to be assembled so that aij->B can be compressed into local form in MatSetUpMultiply_MPIAIJ(). 3093 Furthermore, its nonzerostate will need to be based on that of aij->A's and aij->B's. 3094 */ 3095 PetscFunctionReturn(0); 3096 } 3097 3098 /* 3099 B uses local indices with column indices ranging between 0 and N-n; they must be interpreted using garray. 3100 */ 3101 PetscErrorCode MatGetSeqMats_MPIAIJ(Mat C,Mat *A,Mat *B) 3102 { 3103 Mat_MPIAIJ *aij = (Mat_MPIAIJ*) (C->data); 3104 3105 PetscFunctionBegin; 3106 PetscValidPointer(A,2); 3107 PetscValidPointer(B,3); 3108 /* FIXME: make sure C is assembled */ 3109 *A = aij->A; 3110 *B = aij->B; 3111 /* Note that we don't incref *A and *B, so be careful! */ 3112 PetscFunctionReturn(0); 3113 } 3114 3115 /* 3116 Extract MPI submatrices encoded by pairs of IS that may live on subcomms of C. 3117 NOT SCALABLE due to the use of ISGetNonlocalIS() (see below). 3118 */ 3119 PetscErrorCode MatGetSubMatricesMPI_MPIXAIJ(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[], 3120 PetscErrorCode(*getsubmats_seq)(Mat,PetscInt,const IS[],const IS[],MatReuse,Mat**), 3121 PetscErrorCode(*getlocalmats)(Mat,Mat*,Mat*), 3122 PetscErrorCode(*setseqmat)(Mat,IS,IS,MatStructure,Mat), 3123 PetscErrorCode(*setseqmats)(Mat,IS,IS,IS,MatStructure,Mat,Mat)) 3124 { 3125 PetscErrorCode ierr; 3126 PetscMPIInt isize,flag; 3127 PetscInt i,ii,cismax,ispar; 3128 Mat *A,*B; 3129 IS *isrow_p,*iscol_p,*cisrow,*ciscol,*ciscol_p; 3130 3131 PetscFunctionBegin; 3132 if (!ismax) PetscFunctionReturn(0); 3133 3134 for (i = 0, cismax = 0; i < ismax; ++i) { 3135 PetscMPIInt isize; 3136 ierr = MPI_Comm_compare(((PetscObject)isrow[i])->comm,((PetscObject)iscol[i])->comm,&flag);CHKERRQ(ierr); 3137 if (flag != MPI_IDENT) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Row and column index sets must have the same communicator"); 3138 ierr = MPI_Comm_size(((PetscObject)isrow[i])->comm, &isize);CHKERRQ(ierr); 3139 if (isize > 1) ++cismax; 3140 } 3141 3142 /* 3143 If cismax is zero on all C's ranks, then and only then can we use purely sequential matrix extraction. 3144 ispar counts the number of parallel ISs across C's comm. 3145 */ 3146 ierr = MPIU_Allreduce(&cismax,&ispar,1,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C));CHKERRQ(ierr); 3147 if (!ispar) { /* Sequential ISs only across C's comm, so can call the sequential matrix extraction subroutine. */ 3148 ierr = (*getsubmats_seq)(C,ismax,isrow,iscol,scall,submat);CHKERRQ(ierr); 3149 PetscFunctionReturn(0); 3150 } 3151 3152 /* if (ispar) */ 3153 /* 3154 Construct the "complements" -- the off-processor indices -- of the iscol ISs for parallel ISs only. 3155 These are used to extract the off-diag portion of the resulting parallel matrix. 3156 The row IS for the off-diag portion is the same as for the diag portion, 3157 so we merely alias (without increfing) the row IS, while skipping those that are sequential. 3158 */ 3159 ierr = PetscMalloc2(cismax,&cisrow,cismax,&ciscol);CHKERRQ(ierr); 3160 ierr = PetscMalloc1(cismax,&ciscol_p);CHKERRQ(ierr); 3161 for (i = 0, ii = 0; i < ismax; ++i) { 3162 ierr = MPI_Comm_size(((PetscObject)isrow[i])->comm,&isize);CHKERRQ(ierr); 3163 if (isize > 1) { 3164 /* 3165 TODO: This is the part that's ***NOT SCALABLE***. 3166 To fix this we need to extract just the indices of C's nonzero columns 3167 that lie on the intersection of isrow[i] and ciscol[ii] -- the nonlocal 3168 part of iscol[i] -- without actually computing ciscol[ii]. This also has 3169 to be done without serializing on the IS list, so, most likely, it is best 3170 done by rewriting MatGetSubMatrices_MPIAIJ() directly. 3171 */ 3172 ierr = ISGetNonlocalIS(iscol[i],&(ciscol[ii]));CHKERRQ(ierr); 3173 /* Now we have to 3174 (a) make sure ciscol[ii] is sorted, since, even if the off-proc indices 3175 were sorted on each rank, concatenated they might no longer be sorted; 3176 (b) Use ISSortPermutation() to construct ciscol_p, the mapping from the 3177 indices in the nondecreasing order to the original index positions. 3178 If ciscol[ii] is strictly increasing, the permutation IS is NULL. 3179 */ 3180 ierr = ISSortPermutation(ciscol[ii],PETSC_FALSE,ciscol_p+ii);CHKERRQ(ierr); 3181 ierr = ISSort(ciscol[ii]);CHKERRQ(ierr); 3182 ++ii; 3183 } 3184 } 3185 ierr = PetscMalloc2(ismax,&isrow_p,ismax,&iscol_p);CHKERRQ(ierr); 3186 for (i = 0, ii = 0; i < ismax; ++i) { 3187 PetscInt j,issize; 3188 const PetscInt *indices; 3189 3190 /* 3191 Permute the indices into a nondecreasing order. Reject row and col indices with duplicates. 3192 */ 3193 ierr = ISSortPermutation(isrow[i],PETSC_FALSE,isrow_p+i);CHKERRQ(ierr); 3194 ierr = ISSort(isrow[i]);CHKERRQ(ierr); 3195 ierr = ISGetLocalSize(isrow[i],&issize);CHKERRQ(ierr); 3196 ierr = ISGetIndices(isrow[i],&indices);CHKERRQ(ierr); 3197 for (j = 1; j < issize; ++j) { 3198 if (indices[j] == indices[j-1]) { 3199 SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Repeated indices in row IS %D: indices at %D and %D are both %D",i,j-1,j,indices[j]); 3200 } 3201 } 3202 ierr = ISRestoreIndices(isrow[i],&indices);CHKERRQ(ierr); 3203 3204 3205 ierr = ISSortPermutation(iscol[i],PETSC_FALSE,iscol_p+i);CHKERRQ(ierr); 3206 ierr = ISSort(iscol[i]);CHKERRQ(ierr); 3207 ierr = ISGetLocalSize(iscol[i],&issize);CHKERRQ(ierr); 3208 ierr = ISGetIndices(iscol[i],&indices);CHKERRQ(ierr); 3209 for (j = 1; j < issize; ++j) { 3210 if (indices[j-1] == indices[j]) { 3211 SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Repeated indices in col IS %D: indices at %D and %D are both %D",i,j-1,j,indices[j]); 3212 } 3213 } 3214 ierr = ISRestoreIndices(iscol[i],&indices);CHKERRQ(ierr); 3215 ierr = MPI_Comm_size(((PetscObject)isrow[i])->comm,&isize);CHKERRQ(ierr); 3216 if (isize > 1) { 3217 cisrow[ii] = isrow[i]; 3218 ++ii; 3219 } 3220 } 3221 /* 3222 Allocate the necessary arrays to hold the resulting parallel matrices as well as the intermediate 3223 array of sequential matrices underlying the resulting parallel matrices. 3224 Which arrays to allocate is based on the value of MatReuse scall and whether ISs are sorted and/or 3225 contain duplicates. 3226 3227 There are as many diag matrices as there are original index sets. There are only as many parallel 3228 and off-diag matrices, as there are parallel (comm size > 1) index sets. 3229 3230 ARRAYS that can hold Seq matrices get allocated in any event -- either here or by getsubmats_seq(): 3231 - If the array of MPI matrices already exists and is being reused, we need to allocate the array 3232 and extract the underlying seq matrices into it to serve as placeholders, into which getsubmats_seq 3233 will deposite the extracted diag and off-diag parts. Thus, we allocate the A&B arrays and fill them 3234 with A[i] and B[ii] extracted from the corresponding MPI submat. 3235 - However, if the rows, A's column indices or B's column indices are not sorted, the extracted A[i] & B[ii] 3236 will have a different order from what getsubmats_seq expects. To handle this case -- indicated 3237 by a nonzero isrow_p[i], iscol_p[i], or ciscol_p[ii] -- we duplicate A[i] --> AA[i], B[ii] --> BB[ii] 3238 (retrieve composed AA[i] or BB[ii]) and reuse them here. AA[i] and BB[ii] are then used to permute its 3239 values into A[i] and B[ii] sitting inside the corresponding submat. 3240 - If no reuse is taking place then getsubmats_seq will allocate the A&B arrays and create the corresponding 3241 A[i], B[ii], AA[i] or BB[ii] matrices. 3242 */ 3243 /* Parallel matrix array is allocated here only if no reuse is taking place. If reused, it is passed in by the caller. */ 3244 if (scall == MAT_INITIAL_MATRIX) { 3245 ierr = PetscMalloc1(ismax,submat);CHKERRQ(ierr); 3246 } 3247 3248 /* Now obtain the sequential A and B submatrices separately. */ 3249 /* scall=MAT_REUSE_MATRIX is not handled yet, because getsubmats_seq() requires reuse of A and B */ 3250 ierr = (*getsubmats_seq)(C,ismax,isrow,iscol,MAT_INITIAL_MATRIX,&A);CHKERRQ(ierr); 3251 ierr = (*getsubmats_seq)(C,cismax,cisrow,ciscol,MAT_INITIAL_MATRIX,&B);CHKERRQ(ierr); 3252 3253 /* 3254 If scall == MAT_REUSE_MATRIX AND the permutations are NULL, we are done, since the sequential 3255 matrices A & B have been extracted directly into the parallel matrices containing them, or 3256 simply into the sequential matrix identical with the corresponding A (if isize == 1). 3257 Note that in that case colmap doesn't need to be rebuilt, since the matrices are expected 3258 to have the same sparsity pattern. 3259 Otherwise, A and/or B have to be properly embedded into C's index spaces and the correct colmap 3260 must be constructed for C. This is done by setseqmat(s). 3261 */ 3262 for (i = 0, ii = 0; i < ismax; ++i) { 3263 /* 3264 TODO: cache ciscol, permutation ISs and maybe cisrow? What about isrow & iscol? 3265 That way we can avoid sorting and computing permutations when reusing. 3266 To this end: 3267 - remove the old cache, if it exists, when extracting submatrices with MAT_INITIAL_MATRIX 3268 - if caching arrays to hold the ISs, make and compose a container for them so that it can 3269 be destroyed upon destruction of C (use PetscContainerUserDestroy() to clear out the contents). 3270 */ 3271 MatStructure pattern; 3272 pattern = DIFFERENT_NONZERO_PATTERN; 3273 3274 ierr = MPI_Comm_size(((PetscObject)isrow[i])->comm,&isize);CHKERRQ(ierr); 3275 /* Construct submat[i] from the Seq pieces A (and B, if necessary). */ 3276 if (isize > 1) { 3277 if (scall == MAT_INITIAL_MATRIX) { 3278 ierr = MatCreate(((PetscObject)isrow[i])->comm,(*submat)+i);CHKERRQ(ierr); 3279 ierr = MatSetSizes((*submat)[i],A[i]->rmap->n,A[i]->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 3280 ierr = MatSetType((*submat)[i],MATMPIAIJ);CHKERRQ(ierr); 3281 ierr = PetscLayoutSetUp((*submat)[i]->rmap);CHKERRQ(ierr); 3282 ierr = PetscLayoutSetUp((*submat)[i]->cmap);CHKERRQ(ierr); 3283 } 3284 /* 3285 For each parallel isrow[i], insert the extracted sequential matrices into the parallel matrix. 3286 */ 3287 { 3288 Mat AA,BB; 3289 AA = A[i]; 3290 BB = B[ii]; 3291 if (AA || BB) { 3292 ierr = setseqmats((*submat)[i],isrow_p[i],iscol_p[i],ciscol_p[ii],pattern,AA,BB);CHKERRQ(ierr); 3293 ierr = MatAssemblyBegin((*submat)[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 3294 ierr = MatAssemblyEnd((*submat)[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 3295 } 3296 3297 ierr = MatDestroy(&AA);CHKERRQ(ierr); 3298 ierr = MatDestroy(&BB);CHKERRQ(ierr); 3299 } 3300 ierr = ISDestroy(ciscol+ii);CHKERRQ(ierr); 3301 ierr = ISDestroy(ciscol_p+ii);CHKERRQ(ierr); 3302 ++ii; 3303 } else { /* if (isize == 1) */ 3304 if (scall == MAT_REUSE_MATRIX) { 3305 ierr = MatDestroy(&(*submat)[i]);CHKERRQ(ierr); 3306 } 3307 if (isrow_p[i] || iscol_p[i]) { 3308 ierr = MatDuplicate(A[i],MAT_DO_NOT_COPY_VALUES,(*submat)+i);CHKERRQ(ierr); 3309 ierr = setseqmat((*submat)[i],isrow_p[i],iscol_p[i],pattern,A[i]);CHKERRQ(ierr); 3310 /* Otherwise A is extracted straight into (*submats)[i]. */ 3311 /* TODO: Compose A[i] on (*submat([i] for future use, if ((isrow_p[i] || iscol_p[i]) && MAT_INITIAL_MATRIX). */ 3312 ierr = MatDestroy(A+i);CHKERRQ(ierr); 3313 } else (*submat)[i] = A[i]; 3314 } 3315 ierr = ISDestroy(&isrow_p[i]);CHKERRQ(ierr); 3316 ierr = ISDestroy(&iscol_p[i]);CHKERRQ(ierr); 3317 } 3318 ierr = PetscFree2(cisrow,ciscol);CHKERRQ(ierr); 3319 ierr = PetscFree2(isrow_p,iscol_p);CHKERRQ(ierr); 3320 ierr = PetscFree(ciscol_p);CHKERRQ(ierr); 3321 ierr = PetscFree(A);CHKERRQ(ierr); 3322 ierr = PetscFree(B);CHKERRQ(ierr); 3323 PetscFunctionReturn(0); 3324 } 3325 3326 PetscErrorCode MatGetSubMatricesMPI_MPIAIJ(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[]) 3327 { 3328 PetscErrorCode ierr; 3329 3330 PetscFunctionBegin; 3331 ierr = MatGetSubMatricesMPI_MPIXAIJ(C,ismax,isrow,iscol,scall,submat,MatGetSubMatrices_MPIAIJ,MatGetSeqMats_MPIAIJ,MatSetSeqMat_SeqAIJ,MatSetSeqMats_MPIAIJ);CHKERRQ(ierr); 3332 PetscFunctionReturn(0); 3333 } 3334