xref: /petsc/src/mat/utils/overlapsplit.c (revision bac5b06fd7acf2dbdffb3ef782ccfe7a95cc790d)
1 /*
2  * overlapsplit.c: increase the overlap of a 'big' subdomain across several processor cores
3  *
4  * Author: Fande Kong <fdkong.jd@gmail.com>
5  */
6 
7 #include <petscsf.h>
8 #include <petsc/private/matimpl.h>
9 
10 
11 #undef __FUNCT__
12 #define __FUNCT__ "MatIncreaseOverlapSplit_Single"
13 
14 /*
15  * Increase overlap for the sub-matrix across sub communicator
16  * sub-matrix could be a graph or numerical matrix
17  * */
18 PetscErrorCode  MatIncreaseOverlapSplit_Single(Mat mat,IS *is,PetscInt ov)
19 {
20   PetscInt         i,nindx,*indices_sc,*indices_ov,localsize,*localsizes_sc,localsize_tmp;
21   PetscInt         *indices_ov_rd,nroots,nleaves,*localoffsets,*indices_recv,*sources_sc,*sources_sc_rd;
22   const PetscInt   *indices;
23   PetscMPIInt      srank,ssize,issamecomm,k,grank;
24   IS               is_sc,allis_sc,partitioning;
25   MPI_Comm         gcomm,dcomm,scomm;
26   PetscSF          sf;
27   PetscSFNode      *remote;
28   Mat              *smat;
29   MatPartitioning  part;
30   PetscErrorCode   ierr;
31 
32   PetscFunctionBegin;
33   /* get a sub communicator before call individual MatIncreaseOverlap
34    * since the sub communicator may be changed.
35    * */
36   ierr = PetscObjectGetComm((PetscObject)(*is),&dcomm);CHKERRQ(ierr);
37   /*make a copy before the original one is deleted*/
38   ierr = PetscCommDuplicate(dcomm,&scomm,NULL);CHKERRQ(ierr);
39   /*get a global communicator, where mat should be a global matrix  */
40   ierr = PetscObjectGetComm((PetscObject)mat,&gcomm);CHKERRQ(ierr);
41 #if 0
42   ierr = MPI_Barrier(gcomm);CHKERRQ(ierr);
43   ierr = PetscPrintf(PETSC_COMM_SELF,"before mat->ops->increaseoverlap\n");CHKERRQ(ierr);
44 #endif
45   /*increase overlap on each individual subdomain*/
46   ierr = (*mat->ops->increaseoverlap)(mat,1,is,ov);CHKERRQ(ierr);
47 #if 0
48   ierr = PetscPrintf(gcomm,"after mat->ops->increaseoverlap \n");CHKERRQ(ierr);
49   ierr = ISView(*is,PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr);
50   ierr = MPI_Barrier(gcomm);CHKERRQ(ierr);
51 #endif
52   /*compare communicators */
53   ierr = MPI_Comm_compare(gcomm,scomm,&issamecomm);CHKERRQ(ierr);
54   /* if the sub-communicator is the same as the global communicator,
55    * user does not want to use a sub-communicator
56    * */
57   if(issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT) PetscFunctionReturn(0);
58   /* if the sub-communicator is petsc_comm_self,
59    * user also does not care the sub-communicator
60    * */
61   ierr = MPI_Comm_compare(scomm,PETSC_COMM_SELF,&issamecomm);CHKERRQ(ierr);
62   if(issamecomm == MPI_IDENT || issamecomm == MPI_CONGRUENT){PetscFunctionReturn(0);}
63   /*local rank, size in a sub-communicator  */
64   ierr = MPI_Comm_rank(scomm,&srank);CHKERRQ(ierr);
65   ierr = MPI_Comm_size(scomm,&ssize);CHKERRQ(ierr);
66   ierr = MPI_Comm_rank(gcomm,&grank);CHKERRQ(ierr);
67   /*create a new IS based on sub-communicator
68    * since the old IS is often based on petsc_comm_self
69    * */
70   ierr = ISGetLocalSize(*is,&nindx);CHKERRQ(ierr);
71   ierr = PetscCalloc1(nindx,&indices_sc);CHKERRQ(ierr);
72   ierr = ISGetIndices(*is,&indices);CHKERRQ(ierr);
73   ierr = PetscMemcpy(indices_sc,indices,sizeof(PetscInt)*nindx);CHKERRQ(ierr);
74   ierr = ISRestoreIndices(*is,&indices);CHKERRQ(ierr);
75   /*we do not need any more*/
76   ierr = ISDestroy(is);CHKERRQ(ierr);
77   /*create a index set based on the sub communicator  */
78   ierr = ISCreateGeneral(scomm,nindx,indices_sc,PETSC_OWN_POINTER,&is_sc);CHKERRQ(ierr);
79   /*gather all indices within  the sub communicator*/
80   ierr = ISAllGather(is_sc,&allis_sc);CHKERRQ(ierr);
81 #if 0
82   ierr = ISView(allis_sc,PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr);
83   ierr = MPI_Barrier(gcomm);CHKERRQ(ierr);
84 #endif
85   ierr = ISDestroy(&is_sc);CHKERRQ(ierr);
86   /* gather local sizes */
87   ierr = PetscMalloc1(ssize,&localsizes_sc);CHKERRQ(ierr);
88   /*get individual local sizes for all index sets*/
89   ierr = MPI_Gather(&nindx,1,MPIU_INT,localsizes_sc,1,MPIU_INT,0,scomm);CHKERRQ(ierr);
90 #if 0
91   if(!srank){
92 	for(i=0; i<ssize; i++){
93 	  ierr = PetscPrintf(PETSC_COMM_SELF," localsize[%d]: %d \n",i,localsizes_sc[i]);CHKERRQ(ierr);
94 	}
95   }
96   ierr = MPI_Barrier(gcomm);CHKERRQ(ierr);
97 #endif
98   /*only root does these computations */
99   if(!srank){
100    /*get local size for the big index set*/
101    ierr = ISGetLocalSize(allis_sc,&localsize);CHKERRQ(ierr);
102    ierr = PetscCalloc2(localsize,&indices_ov,localsize,&sources_sc);CHKERRQ(ierr);
103    ierr = PetscCalloc2(localsize,&indices_ov_rd,localsize,&sources_sc_rd);CHKERRQ(ierr);
104    ierr = ISGetIndices(allis_sc,&indices);CHKERRQ(ierr);
105    ierr = PetscMemcpy(indices_ov,indices,sizeof(PetscInt)*localsize);CHKERRQ(ierr);
106    ierr = ISRestoreIndices(allis_sc,&indices);CHKERRQ(ierr);
107    /*we do not need it any more */
108    ierr = ISDestroy(&allis_sc);CHKERRQ(ierr);
109    /*assign corresponding sources */
110    localsize_tmp = 0;
111    for(k=0; k<ssize; k++){
112      for(i=0; i<localsizes_sc[k]; i++){
113        sources_sc[localsize_tmp++] = k;
114      }
115    }
116    /*record where indices come from */
117    ierr = PetscSortIntWithArray(localsize,indices_ov,sources_sc);CHKERRQ(ierr);
118 #if 0
119    ierr = PetscIntView(localsize,indices_ov,PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr);
120    ierr = PetscIntView(localsize,sources_sc,PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr);
121 #endif
122    /*count local sizes for reduced indices */
123    ierr = PetscMemzero(localsizes_sc,sizeof(PetscInt)*ssize);CHKERRQ(ierr);
124    /*initialize the first entity*/
125    if(localsize){
126 	 indices_ov_rd[0] = indices_ov[0];
127 	 sources_sc_rd[0] = sources_sc[0];
128 	 localsizes_sc[sources_sc[0]]++;
129    }
130    localsize_tmp = 1;
131    /*remove duplicate integers */
132    for(i=1; i<localsize; i++){
133 	 if(indices_ov[i] != indices_ov[i-1]){
134 	   indices_ov_rd[localsize_tmp]   = indices_ov[i];
135 	   sources_sc_rd[localsize_tmp++] = sources_sc[i];
136 	   localsizes_sc[sources_sc[i]]++;
137 	 }
138    }
139    ierr = PetscFree2(indices_ov,sources_sc);CHKERRQ(ierr);
140    ierr = PetscCalloc1(ssize+1,&localoffsets);CHKERRQ(ierr);
141    for(k=0; k<ssize; k++){
142 	 localoffsets[k+1] = localoffsets[k] + localsizes_sc[k];
143    }
144    /*construct a star forest to send data back */
145    nleaves = localoffsets[ssize];
146    ierr = PetscMemzero(localoffsets,(ssize+1)*sizeof(PetscInt));CHKERRQ(ierr);
147    nroots  = localsizes_sc[srank];
148    ierr = PetscCalloc1(nleaves,&remote);CHKERRQ(ierr);
149    for(i=0; i<nleaves; i++){
150 	 remote[i].rank  = sources_sc_rd[i];
151 	 remote[i].index = localoffsets[sources_sc_rd[i]]++;
152    }
153    ierr = PetscFree(localoffsets);CHKERRQ(ierr);
154 #if 0
155    if(grank==2){
156 	 ierr = PetscIntView(localsize_tmp,indices_ov_rd,PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr);
157    }
158 #endif
159   }else{
160    ierr = ISDestroy(&allis_sc);CHKERRQ(ierr);
161    /*Allocate a 'zero' pointer */
162    ierr = PetscCalloc1(0,&remote);CHKERRQ(ierr);
163    nleaves = 0;
164    indices_ov_rd = 0;
165    sources_sc_rd = 0;
166   }
167   /*scatter sizes to everybody */
168   ierr = MPI_Scatter(localsizes_sc,1, MPIU_INT,&nroots,1, MPIU_INT,0,scomm);CHKERRQ(ierr);
169   /*free memory */
170   ierr = PetscFree(localsizes_sc);CHKERRQ(ierr);
171   ierr = PetscCalloc1(nroots,&indices_recv);CHKERRQ(ierr);
172   /*ierr = MPI_Comm_dup(scomm,&dcomm);CHKERRQ(ierr);*/
173   /*set data back to every body */
174   ierr = PetscSFCreate(scomm,&sf);CHKERRQ(ierr);
175   ierr = PetscSFSetType(sf,PETSCSFBASIC);CHKERRQ(ierr);
176   ierr = PetscSFSetFromOptions(sf);CHKERRQ(ierr);
177   ierr = PetscSFSetGraph(sf,nroots,nleaves,PETSC_NULL,PETSC_OWN_POINTER,remote,PETSC_OWN_POINTER);CHKERRQ(ierr);
178 #if 0
179   ierr = PetscSFView(sf,PETSC_NULL);CHKERRQ(ierr);
180   ierr = MPI_Barrier(gcomm);CHKERRQ(ierr);
181 #endif
182   ierr = PetscSFReduceBegin(sf,MPIU_INT,indices_ov_rd,indices_recv,MPIU_REPLACE);CHKERRQ(ierr);
183   ierr = PetscSFReduceEnd(sf,MPIU_INT,indices_ov_rd,indices_recv,MPIU_REPLACE);CHKERRQ(ierr);
184   ierr = PetscSFDestroy(&sf);CHKERRQ(ierr);
185   /* free memory */
186   ierr = PetscFree2(indices_ov_rd,sources_sc_rd);CHKERRQ(ierr);
187   /*create a index set*/
188   ierr = ISCreateGeneral(scomm,nroots,indices_recv,PETSC_OWN_POINTER,&is_sc);CHKERRQ(ierr);
189 #if 0
190   ierr = ISView(is_sc,PETSC_NULL);CHKERRQ(ierr);
191   ierr = MPI_Barrier(gcomm);CHKERRQ(ierr);
192 #endif
193   /*construct a parallel submatrix */
194   ierr = PetscCalloc1(1,&smat);CHKERRQ(ierr);
195 #if 0
196   ierr = ISView(allis_sc,PETSC_NULL);CHKERRQ(ierr);
197   ierr = MPI_Barrier(gcomm);CHKERRQ(ierr);
198   //ierr = ISView(is_sc,PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr);
199   MPI_Comm   comm1, comm2;
200   ierr = PetscObjectGetComm((PetscObject)is_sc,&comm1);CHKERRQ(ierr);
201   ierr = PetscObjectGetComm((PetscObject)allis_sc,&comm2);CHKERRQ(ierr);
202   /*ierr = PetscCommDuplicate(comm1,&comm2,NULL);CHKERRQ(ierr);*/
203   /*ierr = MPI_Comm_dup(comm1,&comm2);CHKERRQ(ierr);*/
204   ierr = MPI_Comm_compare(comm2,comm1,&issamecomm);CHKERRQ(ierr);
205   if(issamecomm == MPI_IDENT){
206     ierr=PetscPrintf(gcomm,"the same communicator \n");CHKERRQ(ierr);
207   }else{
208   	ierr=PetscPrintf(gcomm,"different communicator \n");CHKERRQ(ierr);
209   }
210 #endif
211 #if 0
212   ierr = ISView(is_sc,PETSC_NULL);CHKERRQ(ierr);
213 #endif
214   /*Get square sub matrices */
215   ierr = MatGetSubMatricesMPI(mat,1,&is_sc,&is_sc,MAT_INITIAL_MATRIX,&smat);CHKERRQ(ierr);
216   /* we do not need them any more */
217   ierr = ISDestroy(&allis_sc);CHKERRQ(ierr);
218 #if 0
219   ierr = MatView(smat[0],PETSC_NULL);CHKERRQ(ierr);
220   ierr = MPI_Barrier(gcomm);CHKERRQ(ierr);
221 #endif
222   /*create a partitioner to repartition the sub-matrix*/
223   ierr = MatPartitioningCreate(scomm,&part);CHKERRQ(ierr);
224   ierr = MatPartitioningSetAdjacency(part,smat[0]);CHKERRQ(ierr);
225 #if PETSC_HAVE_PARMETIS
226   /* if there exists a ParMETIS installation, we try to use ParMETIS
227    * because a repartition routine possibly work better
228    * */
229   ierr = MatPartitioningSetType(part,MATPARTITIONINGPARMETIS);CHKERRQ(ierr);
230   /*try to use reparition function, instead of partition function */
231   ierr = MatPartitioningParmetisSetRepartition(part);CHKERRQ(ierr);
232 #else
233   /*we at least provide a default partitioner to rebalance the computation  */
234   ierr = MatPartitioningSetType(part,MATPARTITIONINGAVERAGE);CHKERRQ(ierr);
235 #endif
236   /*user can pick up any partitioner by using an option*/
237   ierr = MatPartitioningSetFromOptions(part);CHKERRQ(ierr);
238   /* apply partition */
239   ierr = MatPartitioningApply(part,&partitioning);CHKERRQ(ierr);
240   ierr = MatPartitioningDestroy(&part);CHKERRQ(ierr);
241   ierr = MatDestroy(&(smat[0]));CHKERRQ(ierr);
242   ierr = PetscFree(smat);CHKERRQ(ierr);
243 #if 0
244   ierr = ISView(partitioning,PETSC_NULL);CHKERRQ(ierr);
245   ierr = MPI_Barrier(gcomm);CHKERRQ(ierr);
246 #endif
247   /* get local rows including  overlap */
248   ierr = ISBuildTwoSided(partitioning,is_sc,is);CHKERRQ(ierr);
249 #if 0
250   ierr = ISView(*is,PETSC_NULL);CHKERRQ(ierr);
251   ierr = MPI_Barrier(gcomm);CHKERRQ(ierr);
252 #endif
253   /* destroy */
254   ierr = ISDestroy(&is_sc);CHKERRQ(ierr);
255   PetscFunctionReturn(0);
256 }
257 
258 
259