xref: /petsc/src/sys/objects/subcomm.c (revision fe998a80077c9ee0917a39496df43fc256e1b478)
1 
2 /*
3      Provides utility routines for split MPI communicator.
4 */
5 #include <petscsys.h>    /*I   "petscsys.h"    I*/
6 #include <petsc-private/threadcommimpl.h> /* Petsc_ThreadComm_keyval */
7 #include <petscviewer.h>
8 
9 const char *const PetscSubcommTypes[] = {"GENERAL","CONTIGUOUS","INTERLACED","PetscSubcommType","PETSC_SUBCOMM_",0};
10 
11 extern PetscErrorCode PetscSubcommCreate_contiguous(PetscSubcomm);
12 extern PetscErrorCode PetscSubcommCreate_interlaced(PetscSubcomm);
13 #undef __FUNCT__
14 #define __FUNCT__ "PetscSubcommSetFromOptions"
15 PetscErrorCode PetscSubcommSetFromOptions(PetscSubcomm psubcomm)
16 {
17   PetscErrorCode ierr;
18   PetscSubcommType type;
19   PetscBool      flg;
20 
21   PetscFunctionBegin;
22   if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"Must call PetscSubcommCreate firt");
23   type = psubcomm->type;
24   ierr = PetscOptionsEnum("-psubcomm_type","PETSc subcommunicator","PetscSubcommSetType",PetscSubcommTypes,(PetscEnum)type,(PetscEnum*)&type,&flg);CHKERRQ(ierr);
25   if (flg && psubcomm->type != type) {
26     /* free old structures */
27     ierr = PetscCommDestroy(&(psubcomm)->dupparent);CHKERRQ(ierr);
28     ierr = PetscCommDestroy(&(psubcomm)->comm);CHKERRQ(ierr);
29     ierr = PetscFree((psubcomm)->subsize);CHKERRQ(ierr);
30     switch (type) {
31     case PETSC_SUBCOMM_GENERAL:
32       SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Runtime option PETSC_SUBCOMM_GENERAL is not supported, use PetscSubcommSetTypeGeneral()");
33     case PETSC_SUBCOMM_CONTIGUOUS:
34       ierr = PetscSubcommCreate_contiguous(psubcomm);CHKERRQ(ierr);
35       break;
36     case PETSC_SUBCOMM_INTERLACED:
37       ierr = PetscSubcommCreate_interlaced(psubcomm);CHKERRQ(ierr);
38       break;
39     default:
40       SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"PetscSubcommType %s is not supported yet",PetscSubcommTypes[type]);
41     }
42   }
43 
44   ierr = PetscOptionsHasName(NULL, "-psubcomm_view", &flg);CHKERRQ(ierr);
45   if (flg) {
46     ierr = PetscSubcommView(psubcomm,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
47   }
48   PetscFunctionReturn(0);
49 }
50 
51 #undef __FUNCT__
52 #define __FUNCT__ "PetscSubcommView"
53 PetscErrorCode PetscSubcommView(PetscSubcomm psubcomm,PetscViewer viewer)
54 {
55   PetscErrorCode    ierr;
56   PetscBool         iascii;
57   PetscViewerFormat format;
58 
59   PetscFunctionBegin;
60   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
61   if (iascii) {
62     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
63     if (format == PETSC_VIEWER_DEFAULT) {
64       MPI_Comm    comm=psubcomm->parent;
65       PetscMPIInt rank,size,subsize,subrank,duprank;
66 
67       ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
68       ierr = PetscViewerASCIIPrintf(viewer,"PetscSubcomm type %s with total %d MPI processes:\n",PetscSubcommTypes[psubcomm->type],size);CHKERRQ(ierr);
69       ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
70       ierr = MPI_Comm_size(psubcomm->comm,&subsize);CHKERRQ(ierr);
71       ierr = MPI_Comm_rank(psubcomm->comm,&subrank);CHKERRQ(ierr);
72       ierr = MPI_Comm_rank(psubcomm->dupparent,&duprank);CHKERRQ(ierr);
73       ierr = PetscSynchronizedPrintf(comm,"  [%d], color %d, sub-size %d, sub-rank %d, duprank %d\n",rank,psubcomm->color,subsize,subrank,duprank);
74       ierr = PetscSynchronizedFlush(comm,PETSC_STDOUT);CHKERRQ(ierr);
75     }
76   } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Not supported yet");
77   PetscFunctionReturn(0);
78 }
79 
80 #undef __FUNCT__
81 #define __FUNCT__ "PetscSubcommSetNumber"
82 /*@C
83   PetscSubcommSetNumber - Set total number of subcommunicators.
84 
85    Collective on MPI_Comm
86 
87    Input Parameter:
88 +  psubcomm - PetscSubcomm context
89 -  nsubcomm - the total number of subcommunicators in psubcomm
90 
91    Level: advanced
92 
93 .keywords: communicator
94 
95 .seealso: PetscSubcommCreate(),PetscSubcommDestroy(),PetscSubcommSetType(),PetscSubcommSetTypeGeneral()
96 @*/
97 PetscErrorCode  PetscSubcommSetNumber(PetscSubcomm psubcomm,PetscInt nsubcomm)
98 {
99   PetscErrorCode ierr;
100   MPI_Comm       comm=psubcomm->parent;
101   PetscMPIInt    msub,size;
102 
103   PetscFunctionBegin;
104   if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"PetscSubcomm is not created. Call PetscSubcommCreate() first");
105   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
106   ierr = PetscMPIIntCast(nsubcomm,&msub);CHKERRQ(ierr);
107   if (msub < 1 || msub > size) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE, "Num of subcommunicators %d cannot be < 1 or > input comm size %d",msub,size);
108 
109   psubcomm->n = msub;
110   PetscFunctionReturn(0);
111 }
112 
113 #undef __FUNCT__
114 #define __FUNCT__ "PetscSubcommSetType"
115 /*@C
116   PetscSubcommSetType - Set type of subcommunicators.
117 
118    Collective on MPI_Comm
119 
120    Input Parameter:
121 +  psubcomm - PetscSubcomm context
122 -  subcommtype - subcommunicator type, PETSC_SUBCOMM_CONTIGUOUS,PETSC_SUBCOMM_INTERLACED
123 
124    Level: advanced
125 
126 .keywords: communicator
127 
128 .seealso: PetscSubcommCreate(),PetscSubcommDestroy(),PetscSubcommSetNumber(),PetscSubcommSetTypeGeneral()
129 @*/
130 PetscErrorCode  PetscSubcommSetType(PetscSubcomm psubcomm,PetscSubcommType subcommtype)
131 {
132   PetscErrorCode ierr;
133 
134   PetscFunctionBegin;
135   if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"PetscSubcomm is not created. Call PetscSubcommCreate()");
136   if (psubcomm->n < 1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subcommunicators %d is incorrect. Call PetscSubcommSetNumber()",psubcomm->n);
137 
138   if (subcommtype == PETSC_SUBCOMM_CONTIGUOUS) {
139     ierr = PetscSubcommCreate_contiguous(psubcomm);CHKERRQ(ierr);
140   } else if (subcommtype == PETSC_SUBCOMM_INTERLACED) {
141     ierr = PetscSubcommCreate_interlaced(psubcomm);CHKERRQ(ierr);
142   } else SETERRQ1(psubcomm->parent,PETSC_ERR_SUP,"PetscSubcommType %s is not supported yet",PetscSubcommTypes[subcommtype]);
143   PetscFunctionReturn(0);
144 }
145 
146 #undef __FUNCT__
147 #define __FUNCT__ "PetscSubcommSetTypeGeneral"
148 /*@C
149   PetscSubcommSetTypeGeneral - Set a PetscSubcomm from user's specifications
150 
151    Collective on MPI_Comm
152 
153    Input Parameter:
154 +  psubcomm - PetscSubcomm context
155 .  color   - control of subset assignment (nonnegative integer). Processes with the same color are in the same subcommunicator.
156 -  subrank - rank in the subcommunicator
157 
158    Level: advanced
159 
160 .keywords: communicator, create
161 
162 .seealso: PetscSubcommCreate(),PetscSubcommDestroy(),PetscSubcommSetNumber(),PetscSubcommSetType()
163 @*/
164 PetscErrorCode PetscSubcommSetTypeGeneral(PetscSubcomm psubcomm,PetscMPIInt color,PetscMPIInt subrank)
165 {
166   PetscErrorCode ierr;
167   MPI_Comm       subcomm=0,dupcomm=0,comm=psubcomm->parent;
168   PetscMPIInt    size,icolor,duprank,*recvbuf,sendbuf[3],mysubsize,rank,*subsize;
169   PetscMPIInt    i,nsubcomm=psubcomm->n;
170 
171   PetscFunctionBegin;
172   if (!psubcomm) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"PetscSubcomm is not created. Call PetscSubcommCreate()");
173   if (nsubcomm < 1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subcommunicators %d is incorrect. Call PetscSubcommSetNumber()",nsubcomm);
174 
175   ierr = MPI_Comm_split(comm,color,subrank,&subcomm);CHKERRQ(ierr);
176 
177   /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
178   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
179   ierr = PetscMalloc1(2*size,&recvbuf);CHKERRQ(ierr);
180 
181   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
182   ierr = MPI_Comm_size(subcomm,&mysubsize);CHKERRQ(ierr);
183 
184   sendbuf[0] = color;
185   sendbuf[1] = mysubsize;
186   ierr = MPI_Allgather(sendbuf,2,MPI_INT,recvbuf,2,MPI_INT,comm);CHKERRQ(ierr);
187 
188   ierr = PetscMalloc1(nsubcomm,&subsize);CHKERRQ(ierr);
189   for (i=0; i<2*size; i+=2) {
190     subsize[recvbuf[i]] = recvbuf[i+1];
191   }
192   ierr = PetscFree(recvbuf);CHKERRQ(ierr);
193 
194   duprank = 0;
195   for (icolor=0; icolor<nsubcomm; icolor++) {
196     if (icolor != color) { /* not color of this process */
197       duprank += subsize[icolor];
198     } else {
199       duprank += subrank;
200       break;
201     }
202   }
203   ierr = MPI_Comm_split(comm,0,duprank,&dupcomm);CHKERRQ(ierr);
204 
205   ierr = PetscCommDuplicate(dupcomm,&psubcomm->dupparent,NULL);CHKERRQ(ierr);
206   ierr = PetscCommDuplicate(subcomm,&psubcomm->comm,NULL);CHKERRQ(ierr);
207   ierr = MPI_Comm_free(&dupcomm);CHKERRQ(ierr);
208   ierr = MPI_Comm_free(&subcomm);CHKERRQ(ierr);
209 
210   psubcomm->color   = color;
211   psubcomm->subsize = subsize;
212   psubcomm->type    = PETSC_SUBCOMM_GENERAL;
213   PetscFunctionReturn(0);
214 }
215 
216 #undef __FUNCT__
217 #define __FUNCT__ "PetscSubcommDestroy"
218 PetscErrorCode  PetscSubcommDestroy(PetscSubcomm *psubcomm)
219 {
220   PetscErrorCode ierr;
221 
222   PetscFunctionBegin;
223   if (!*psubcomm) PetscFunctionReturn(0);
224   ierr = PetscCommDestroy(&(*psubcomm)->dupparent);CHKERRQ(ierr);
225   ierr = PetscCommDestroy(&(*psubcomm)->comm);CHKERRQ(ierr);
226   ierr = PetscFree((*psubcomm)->subsize);CHKERRQ(ierr);
227   ierr = PetscFree((*psubcomm));CHKERRQ(ierr);
228   PetscFunctionReturn(0);
229 }
230 
231 #undef __FUNCT__
232 #define __FUNCT__ "PetscSubcommCreate"
233 /*@C
234   PetscSubcommCreate - Create a PetscSubcomm context.
235 
236    Collective on MPI_Comm
237 
238    Input Parameter:
239 .  comm - MPI communicator
240 
241    Output Parameter:
242 .  psubcomm - location to store the PetscSubcomm context
243 
244    Level: advanced
245 
246 .keywords: communicator, create
247 
248 .seealso: PetscSubcommDestroy()
249 @*/
250 PetscErrorCode  PetscSubcommCreate(MPI_Comm comm,PetscSubcomm *psubcomm)
251 {
252   PetscErrorCode ierr;
253   PetscMPIInt    rank,size;
254 
255   PetscFunctionBegin;
256   ierr = PetscNew(psubcomm);CHKERRQ(ierr);
257 
258   /* set defaults */
259   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
260   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
261 
262   (*psubcomm)->parent    = comm;
263   (*psubcomm)->dupparent = comm;
264   (*psubcomm)->comm      = PETSC_COMM_SELF;
265   (*psubcomm)->n         = size;
266   (*psubcomm)->color     = rank;
267   (*psubcomm)->subsize   = NULL;
268   (*psubcomm)->type      = PETSC_SUBCOMM_INTERLACED;
269   PetscFunctionReturn(0);
270 }
271 
272 #undef __FUNCT__
273 #define __FUNCT__ "PetscSubcommCreate_contiguous"
274 PetscErrorCode PetscSubcommCreate_contiguous(PetscSubcomm psubcomm)
275 {
276   PetscErrorCode ierr;
277   PetscMPIInt    rank,size,*subsize,duprank=-1,subrank=-1;
278   PetscMPIInt    np_subcomm,nleftover,i,color=-1,rankstart,nsubcomm=psubcomm->n;
279   MPI_Comm       subcomm=0,dupcomm=0,comm=psubcomm->parent;
280 
281   PetscFunctionBegin;
282   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
283   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
284 
285   /* get size of each subcommunicator */
286   ierr = PetscMalloc1(1+nsubcomm,&subsize);CHKERRQ(ierr);
287 
288   np_subcomm = size/nsubcomm;
289   nleftover  = size - nsubcomm*np_subcomm;
290   for (i=0; i<nsubcomm; i++) {
291     subsize[i] = np_subcomm;
292     if (i<nleftover) subsize[i]++;
293   }
294 
295   /* get color and subrank of this proc */
296   rankstart = 0;
297   for (i=0; i<nsubcomm; i++) {
298     if (rank >= rankstart && rank < rankstart+subsize[i]) {
299       color   = i;
300       subrank = rank - rankstart;
301       duprank = rank;
302       break;
303     } else rankstart += subsize[i];
304   }
305 
306   ierr = MPI_Comm_split(comm,color,subrank,&subcomm);CHKERRQ(ierr);
307 
308   /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
309   ierr = MPI_Comm_split(comm,0,duprank,&dupcomm);CHKERRQ(ierr);
310   {
311     PetscThreadComm tcomm;
312     ierr = PetscCommGetThreadComm(comm,&tcomm);CHKERRQ(ierr);
313     ierr = MPI_Attr_put(dupcomm,Petsc_ThreadComm_keyval,tcomm);CHKERRQ(ierr);
314     tcomm->refct++;
315     ierr = MPI_Attr_put(subcomm,Petsc_ThreadComm_keyval,tcomm);CHKERRQ(ierr);
316     tcomm->refct++;
317   }
318   ierr = PetscCommDuplicate(dupcomm,&psubcomm->dupparent,NULL);CHKERRQ(ierr);
319   ierr = PetscCommDuplicate(subcomm,&psubcomm->comm,NULL);CHKERRQ(ierr);
320   ierr = MPI_Comm_free(&dupcomm);CHKERRQ(ierr);
321   ierr = MPI_Comm_free(&subcomm);CHKERRQ(ierr);
322 
323   psubcomm->color   = color;
324   psubcomm->subsize = subsize;
325   psubcomm->type    = PETSC_SUBCOMM_CONTIGUOUS;
326   PetscFunctionReturn(0);
327 }
328 
329 #undef __FUNCT__
330 #define __FUNCT__ "PetscSubcommCreate_interlaced"
331 /*
332    Note:
333    In PCREDUNDANT, to avoid data scattering from subcomm back to original comm, we create subcommunicators
334    by iteratively taking a process into a subcommunicator.
335    Example: size=4, nsubcomm=(*psubcomm)->n=3
336      comm=(*psubcomm)->parent:
337       rank:     [0]  [1]  [2]  [3]
338       color:     0    1    2    0
339 
340      subcomm=(*psubcomm)->comm:
341       subrank:  [0]  [0]  [0]  [1]
342 
343      dupcomm=(*psubcomm)->dupparent:
344       duprank:  [0]  [2]  [3]  [1]
345 
346      Here, subcomm[color = 0] has subsize=2, owns process [0] and [3]
347            subcomm[color = 1] has subsize=1, owns process [1]
348            subcomm[color = 2] has subsize=1, owns process [2]
349            dupcomm has same number of processes as comm, and its duprank maps
350            processes in subcomm contiguously into a 1d array:
351             duprank: [0] [1]      [2]         [3]
352             rank:    [0] [3]      [1]         [2]
353                     subcomm[0] subcomm[1]  subcomm[2]
354 */
355 
356 PetscErrorCode PetscSubcommCreate_interlaced(PetscSubcomm psubcomm)
357 {
358   PetscErrorCode ierr;
359   PetscMPIInt    rank,size,*subsize,duprank,subrank;
360   PetscMPIInt    np_subcomm,nleftover,i,j,color,nsubcomm=psubcomm->n;
361   MPI_Comm       subcomm=0,dupcomm=0,comm=psubcomm->parent;
362 
363   PetscFunctionBegin;
364   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
365   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
366 
367   /* get size of each subcommunicator */
368   ierr = PetscMalloc1(1+nsubcomm,&subsize);CHKERRQ(ierr);
369 
370   np_subcomm = size/nsubcomm;
371   nleftover  = size - nsubcomm*np_subcomm;
372   for (i=0; i<nsubcomm; i++) {
373     subsize[i] = np_subcomm;
374     if (i<nleftover) subsize[i]++;
375   }
376 
377   /* find color for this proc */
378   color   = rank%nsubcomm;
379   subrank = rank/nsubcomm;
380 
381   ierr = MPI_Comm_split(comm,color,subrank,&subcomm);CHKERRQ(ierr);
382 
383   j = 0; duprank = 0;
384   for (i=0; i<nsubcomm; i++) {
385     if (j == color) {
386       duprank += subrank;
387       break;
388     }
389     duprank += subsize[i]; j++;
390   }
391 
392   /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
393   ierr = MPI_Comm_split(comm,0,duprank,&dupcomm);CHKERRQ(ierr);
394   {
395     PetscThreadComm tcomm;
396     ierr = PetscCommGetThreadComm(comm,&tcomm);CHKERRQ(ierr);
397     ierr = MPI_Attr_put(dupcomm,Petsc_ThreadComm_keyval,tcomm);CHKERRQ(ierr);
398     tcomm->refct++;
399     ierr = MPI_Attr_put(subcomm,Petsc_ThreadComm_keyval,tcomm);CHKERRQ(ierr);
400     tcomm->refct++;
401   }
402   ierr = PetscCommDuplicate(dupcomm,&psubcomm->dupparent,NULL);CHKERRQ(ierr);
403   ierr = PetscCommDuplicate(subcomm,&psubcomm->comm,NULL);CHKERRQ(ierr);
404   ierr = MPI_Comm_free(&dupcomm);CHKERRQ(ierr);
405   ierr = MPI_Comm_free(&subcomm);CHKERRQ(ierr);
406 
407   psubcomm->color   = color;
408   psubcomm->subsize = subsize;
409   psubcomm->type    = PETSC_SUBCOMM_INTERLACED;
410   PetscFunctionReturn(0);
411 }
412 
413 
414