xref: /petsc/src/sys/utils/mpitr.c (revision d71ae5a4db6382e7f06317b8d368875286fe9008)
1 
2 /*
3     Code for tracing mistakes in MPI usage. For example, sends that are never received,
4   nonblocking messages that are not correctly waited for, etc.
5 */
6 
7 #include <petscsys.h> /*I "petscsys.h" I*/
8 
9 #if defined(PETSC_USE_LOG) && !defined(PETSC_HAVE_MPIUNI)
10 
11 /*@C
12    PetscMPIDump - Dumps a listing of incomplete MPI operations, such as sends that
13    have never been received, etc.
14 
15    Collective on `PETSC_COMM_WORLD`
16 
17    Input Parameter:
18 .  fp - file pointer.  If fp is NULL, stdout is assumed.
19 
20    Options Database Key:
21 .  -mpidump - Dumps MPI incompleteness during call to PetscFinalize()
22 
23     Level: developer
24 
25 .seealso: `PetscMallocDump()`
26  @*/
27 PetscErrorCode PetscMPIDump(FILE *fd)
28 {
29   PetscMPIInt rank;
30   double      tsends, trecvs, work;
31   int         err;
32 
33   PetscFunctionBegin;
34   PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank));
35   if (!fd) fd = PETSC_STDOUT;
36 
37   /* Did we wait on all the non-blocking sends and receives? */
38   PetscCall(PetscSequentialPhaseBegin(PETSC_COMM_WORLD, 1));
39   if (petsc_irecv_ct + petsc_isend_ct != petsc_sum_of_waits_ct) {
40     PetscCall(PetscFPrintf(PETSC_COMM_SELF, fd, "[%d]You have not waited on all non-blocking sends and receives", rank));
41     PetscCall(PetscFPrintf(PETSC_COMM_SELF, fd, "[%d]Number non-blocking sends %g receives %g number of waits %g\n", rank, petsc_isend_ct, petsc_irecv_ct, petsc_sum_of_waits_ct));
42     err = fflush(fd);
43     PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fflush() failed on file");
44   }
45   PetscCall(PetscSequentialPhaseEnd(PETSC_COMM_WORLD, 1));
46   /* Did we receive all the messages that we sent? */
47   work = petsc_irecv_ct + petsc_recv_ct;
48   PetscCallMPI(MPI_Reduce(&work, &trecvs, 1, MPI_DOUBLE, MPI_SUM, 0, PETSC_COMM_WORLD));
49   work = petsc_isend_ct + petsc_send_ct;
50   PetscCallMPI(MPI_Reduce(&work, &tsends, 1, MPI_DOUBLE, MPI_SUM, 0, PETSC_COMM_WORLD));
51   if (rank == 0 && tsends != trecvs) {
52     PetscCall(PetscFPrintf(PETSC_COMM_SELF, fd, "Total number sends %g not equal receives %g\n", tsends, trecvs));
53     err = fflush(fd);
54     PetscCheck(!err, PETSC_COMM_SELF, PETSC_ERR_SYS, "fflush() failed on file");
55   }
56   PetscFunctionReturn(0);
57 }
58 
59 #else
60 
61 PetscErrorCode PetscMPIDump(FILE *fd)
62 {
63   PetscFunctionBegin;
64   PetscFunctionReturn(0);
65 }
66 
67 #endif
68 
69 #if defined(PETSC_HAVE_MPI_PROCESS_SHARED_MEMORY)
70 /*
71     OpenMPI version of MPI_Win_allocate_shared() does not provide __float128 alignment so we provide
72     a utility that insures alignment up to data item size.
73 */
74 PetscErrorCode MPIU_Win_allocate_shared(MPI_Aint sz, PetscMPIInt szind, MPI_Info info, MPI_Comm comm, void *ptr, MPI_Win *win)
75 {
76   float *tmp;
77 
78   PetscFunctionBegin;
79   PetscCallMPI(MPI_Win_allocate_shared(16 + sz, szind, info, comm, &tmp, win));
80   tmp += ((size_t)tmp) % szind ? szind / 4 - ((((size_t)tmp) % szind) / 4) : 0;
81   *(void **)ptr = (void *)tmp;
82   PetscFunctionReturn(0);
83 }
84 
85 PETSC_EXTERN PetscErrorCode MPIU_Win_shared_query(MPI_Win win, PetscMPIInt rank, MPI_Aint *sz, PetscMPIInt *szind, void *ptr)
86 {
87   float *tmp;
88 
89   PetscFunctionBegin;
90   PetscCallMPI(MPI_Win_shared_query(win, rank, sz, szind, &tmp));
91   PetscCheck(*szind > 0, PETSC_COMM_SELF, PETSC_ERR_LIB, "szkind %d must be positive", *szind);
92   tmp += ((size_t)tmp) % *szind ? *szind / 4 - ((((size_t)tmp) % *szind) / 4) : 0;
93   *(void **)ptr = (void *)tmp;
94   PetscFunctionReturn(0);
95 }
96 
97 #endif
98