xref: /petsc/src/sys/utils/server.c (revision 9f0612e409f6220a780be6348417bea34ef34962)
1*9f0612e4SBarry Smith /*
2*9f0612e4SBarry Smith     Code for allocating Unix shared memory on MPI rank 0 and later accessing it from other MPI processes
3*9f0612e4SBarry Smith */
4*9f0612e4SBarry Smith #include <petscsys.h>
5*9f0612e4SBarry Smith 
6*9f0612e4SBarry Smith PetscBool PCMPIServerActive    = PETSC_FALSE; // PETSc is running in server mode
7*9f0612e4SBarry Smith PetscBool PCMPIServerInSolve   = PETSC_FALSE; // A parallel server solve is occuring
8*9f0612e4SBarry Smith PetscBool PCMPIServerUseShmget = PETSC_TRUE;  // Use Unix shared memory for distributing objects
9*9f0612e4SBarry Smith 
10*9f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
11*9f0612e4SBarry Smith   #include <sys/shm.h>
12*9f0612e4SBarry Smith   #include <sys/mman.h>
13*9f0612e4SBarry Smith   #include <errno.h>
14*9f0612e4SBarry Smith 
15*9f0612e4SBarry Smith typedef struct _PetscShmgetAllocation *PetscShmgetAllocation;
16*9f0612e4SBarry Smith struct _PetscShmgetAllocation {
17*9f0612e4SBarry Smith   void                 *addr; // address on this process; points to same physical address on all processes
18*9f0612e4SBarry Smith   int                   shmkey, shmid;
19*9f0612e4SBarry Smith   size_t                sz;
20*9f0612e4SBarry Smith   PetscShmgetAllocation next;
21*9f0612e4SBarry Smith };
22*9f0612e4SBarry Smith static PetscShmgetAllocation allocations = NULL;
23*9f0612e4SBarry Smith 
24*9f0612e4SBarry Smith typedef struct {
25*9f0612e4SBarry Smith   size_t shmkey[3];
26*9f0612e4SBarry Smith   size_t sz[3];
27*9f0612e4SBarry Smith } BcastInfo;
28*9f0612e4SBarry Smith 
29*9f0612e4SBarry Smith #endif
30*9f0612e4SBarry Smith 
31*9f0612e4SBarry Smith /*@C
32*9f0612e4SBarry Smith   PetscShmgetAddressesFinalize - frees any shared memory that was allocated by `PetscShmgetAllocateArray()` but
33*9f0612e4SBarry Smith   not deallocated with `PetscShmgetDeallocateArray()`
34*9f0612e4SBarry Smith 
35*9f0612e4SBarry Smith   Level: developer
36*9f0612e4SBarry Smith 
37*9f0612e4SBarry Smith   Notes:
38*9f0612e4SBarry Smith   This prevents any shared memory allocated, but not deallocated, from remaining on the system and preventing
39*9f0612e4SBarry Smith   its future use.
40*9f0612e4SBarry Smith 
41*9f0612e4SBarry Smith   If the program crashes outstanding shared memory allocations may remain.
42*9f0612e4SBarry Smith 
43*9f0612e4SBarry Smith .seealso: `PetscShmgetAllocateArray()`, `PetscShmgetDeallocateArray()`, `PetscShmgetUnmapAddresses()`
44*9f0612e4SBarry Smith @*/
45*9f0612e4SBarry Smith PetscErrorCode PetscShmgetAddressesFinalize(void)
46*9f0612e4SBarry Smith {
47*9f0612e4SBarry Smith   PetscFunctionBegin;
48*9f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
49*9f0612e4SBarry Smith   PetscShmgetAllocation next = allocations, previous = NULL;
50*9f0612e4SBarry Smith 
51*9f0612e4SBarry Smith   while (next) {
52*9f0612e4SBarry Smith     PetscCheck(!shmctl(next->shmid, IPC_RMID, NULL), PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to free shared memory key %d shmid %d %s", next->shmkey, next->shmid, strerror(errno));
53*9f0612e4SBarry Smith     previous = next;
54*9f0612e4SBarry Smith     next     = next->next;
55*9f0612e4SBarry Smith     PetscCall(PetscFree(previous));
56*9f0612e4SBarry Smith   }
57*9f0612e4SBarry Smith #endif
58*9f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
59*9f0612e4SBarry Smith }
60*9f0612e4SBarry Smith 
61*9f0612e4SBarry Smith /* takes a void so can work bsan safe with PetscObjectContainerCompose() */
62*9f0612e4SBarry Smith PetscErrorCode PCMPIServerAddressesDestroy(void *ctx)
63*9f0612e4SBarry Smith {
64*9f0612e4SBarry Smith   PCMPIServerAddresses *addresses = (PCMPIServerAddresses *)ctx;
65*9f0612e4SBarry Smith 
66*9f0612e4SBarry Smith   PetscFunctionBegin;
67*9f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
68*9f0612e4SBarry Smith   PetscCall(PetscShmgetUnmapAddresses(addresses->n, addresses->addr));
69*9f0612e4SBarry Smith   PetscCall(PetscFree(addresses));
70*9f0612e4SBarry Smith #endif
71*9f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
72*9f0612e4SBarry Smith }
73*9f0612e4SBarry Smith 
74*9f0612e4SBarry Smith /*@C
75*9f0612e4SBarry Smith   PetscShmgetMapAddresses - given shared address on the first MPI process determines the
76*9f0612e4SBarry Smith   addresses on the other MPI processes that map to the same physical memory
77*9f0612e4SBarry Smith 
78*9f0612e4SBarry Smith   Input Parameters:
79*9f0612e4SBarry Smith + comm       - the `MPI_Comm` to scatter the address
80*9f0612e4SBarry Smith . n          - the number of addresses, each obtained on MPI process zero by `PetscShmgetAllocateArray()`
81*9f0612e4SBarry Smith - baseaddres - the addresses on the first MPI process, ignored on all but first process
82*9f0612e4SBarry Smith 
83*9f0612e4SBarry Smith   Output Parameter:
84*9f0612e4SBarry Smith . addres - the addresses on each MPI process, the array of void * must already be allocated
85*9f0612e4SBarry Smith 
86*9f0612e4SBarry Smith   Level: developer
87*9f0612e4SBarry Smith 
88*9f0612e4SBarry Smith   Note:
89*9f0612e4SBarry Smith   This routine does nothing if `PETSC_HAVE_SHMGET` is not defined
90*9f0612e4SBarry Smith 
91*9f0612e4SBarry Smith .seealso: `PetscShmgetDeallocateArray()`, `PetscShmgetAllocateArray()`, `PetscShmgetUnmapAddresses()`
92*9f0612e4SBarry Smith @*/
93*9f0612e4SBarry Smith PetscErrorCode PetscShmgetMapAddresses(MPI_Comm comm, PetscInt n, const void **baseaddres, void **addres)
94*9f0612e4SBarry Smith {
95*9f0612e4SBarry Smith   PetscFunctionBegin;
96*9f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
97*9f0612e4SBarry Smith   if (PetscGlobalRank == 0) {
98*9f0612e4SBarry Smith     BcastInfo bcastinfo = {
99*9f0612e4SBarry Smith       {0, 0, 0},
100*9f0612e4SBarry Smith       {0, 0, 0}
101*9f0612e4SBarry Smith     };
102*9f0612e4SBarry Smith     for (PetscInt i = 0; i < n; i++) {
103*9f0612e4SBarry Smith       PetscShmgetAllocation allocation = allocations;
104*9f0612e4SBarry Smith 
105*9f0612e4SBarry Smith       while (allocation) {
106*9f0612e4SBarry Smith         if (allocation->addr == baseaddres[i]) {
107*9f0612e4SBarry Smith           bcastinfo.shmkey[i] = allocation->shmkey;
108*9f0612e4SBarry Smith           bcastinfo.sz[i]     = allocation->sz;
109*9f0612e4SBarry Smith           addres[i]           = (void *)baseaddres[i];
110*9f0612e4SBarry Smith           break;
111*9f0612e4SBarry Smith         }
112*9f0612e4SBarry Smith         allocation = allocation->next;
113*9f0612e4SBarry Smith       }
114*9f0612e4SBarry Smith       PetscCheck(allocation, comm, PETSC_ERR_PLIB, "Unable to locate PCMPI allocated shared address %p", baseaddres[i]);
115*9f0612e4SBarry Smith     }
116*9f0612e4SBarry Smith     PetscCall(PetscInfo(NULL, "Mapping PCMPI Server array %p\n", addres[0]));
117*9f0612e4SBarry Smith     PetscCallMPI(MPI_Bcast(&bcastinfo, 6, MPIU_SIZE_T, 0, comm));
118*9f0612e4SBarry Smith   } else {
119*9f0612e4SBarry Smith     BcastInfo bcastinfo = {
120*9f0612e4SBarry Smith       {0, 0, 0},
121*9f0612e4SBarry Smith       {0, 0, 0}
122*9f0612e4SBarry Smith     };
123*9f0612e4SBarry Smith     int    shmkey = 0;
124*9f0612e4SBarry Smith     size_t sz     = 0;
125*9f0612e4SBarry Smith 
126*9f0612e4SBarry Smith     PetscCallMPI(MPI_Bcast(&bcastinfo, 6, MPIU_SIZE_T, 0, comm));
127*9f0612e4SBarry Smith     for (PetscInt i = 0; i < n; i++) {
128*9f0612e4SBarry Smith       PetscShmgetAllocation next = allocations, previous = NULL;
129*9f0612e4SBarry Smith 
130*9f0612e4SBarry Smith       shmkey = (int)bcastinfo.shmkey[i];
131*9f0612e4SBarry Smith       sz     = bcastinfo.sz[i];
132*9f0612e4SBarry Smith       while (next) {
133*9f0612e4SBarry Smith         if (next->shmkey == shmkey) addres[i] = (void *)next->addr;
134*9f0612e4SBarry Smith         previous = next;
135*9f0612e4SBarry Smith         next     = next->next;
136*9f0612e4SBarry Smith       }
137*9f0612e4SBarry Smith       if (!next) {
138*9f0612e4SBarry Smith         PetscShmgetAllocation allocation;
139*9f0612e4SBarry Smith         PetscCall(PetscCalloc(sizeof(struct _PetscShmgetAllocation), &allocation));
140*9f0612e4SBarry Smith         allocation->shmkey = shmkey;
141*9f0612e4SBarry Smith         allocation->sz     = sz;
142*9f0612e4SBarry Smith         allocation->shmid  = shmget(allocation->shmkey, allocation->sz, 0666);
143*9f0612e4SBarry Smith         PetscCheck(allocation->shmid != -1, PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to map PCMPI shared memory key %d of size %d", allocation->shmkey, (int)allocation->sz);
144*9f0612e4SBarry Smith         allocation->addr = shmat(allocation->shmid, (void *)0, 0);
145*9f0612e4SBarry Smith         PetscCheck(allocation->addr, PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to map PCMPI shared memory key %d", allocation->shmkey);
146*9f0612e4SBarry Smith         addres[i] = allocation->addr;
147*9f0612e4SBarry Smith         if (previous) previous->next = allocation;
148*9f0612e4SBarry Smith         else allocations = allocation;
149*9f0612e4SBarry Smith       }
150*9f0612e4SBarry Smith     }
151*9f0612e4SBarry Smith   }
152*9f0612e4SBarry Smith #endif
153*9f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
154*9f0612e4SBarry Smith }
155*9f0612e4SBarry Smith 
156*9f0612e4SBarry Smith /*@C
157*9f0612e4SBarry Smith   PetscShmgetUnmapAddresses - given shared addresses on a MPI process unlink it
158*9f0612e4SBarry Smith 
159*9f0612e4SBarry Smith   Input Parameters:
160*9f0612e4SBarry Smith + n      - the number of addresses, each obtained on MPI process zero by `PetscShmgetAllocateArray()`
161*9f0612e4SBarry Smith - addres - the addresses
162*9f0612e4SBarry Smith 
163*9f0612e4SBarry Smith   Level: developer
164*9f0612e4SBarry Smith 
165*9f0612e4SBarry Smith   Note:
166*9f0612e4SBarry Smith   This routine does nothing if `PETSC_HAVE_SHMGET` is not defined
167*9f0612e4SBarry Smith 
168*9f0612e4SBarry Smith .seealso: `PetscShmgetDeallocateArray()`, `PetscShmgetAllocateArray()`, `PetscShmgetMapAddresses()`
169*9f0612e4SBarry Smith @*/
170*9f0612e4SBarry Smith PetscErrorCode PetscShmgetUnmapAddresses(PetscInt n, void **addres)
171*9f0612e4SBarry Smith {
172*9f0612e4SBarry Smith   PetscFunctionBegin;
173*9f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
174*9f0612e4SBarry Smith   if (PetscGlobalRank > 0) {
175*9f0612e4SBarry Smith     for (PetscInt i = 0; i < n; i++) {
176*9f0612e4SBarry Smith       PetscShmgetAllocation next = allocations, previous = NULL;
177*9f0612e4SBarry Smith       PetscBool             found = PETSC_FALSE;
178*9f0612e4SBarry Smith 
179*9f0612e4SBarry Smith       while (next) {
180*9f0612e4SBarry Smith         if (next->addr == addres[i]) {
181*9f0612e4SBarry Smith           PetscCheck(!shmdt(next->addr), PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to shmdt() location %s", strerror(errno));
182*9f0612e4SBarry Smith           if (previous) previous->next = next->next;
183*9f0612e4SBarry Smith           else allocations = next->next;
184*9f0612e4SBarry Smith           PetscCall(PetscFree(next));
185*9f0612e4SBarry Smith           found = PETSC_TRUE;
186*9f0612e4SBarry Smith           break;
187*9f0612e4SBarry Smith         }
188*9f0612e4SBarry Smith         previous = next;
189*9f0612e4SBarry Smith         next     = next->next;
190*9f0612e4SBarry Smith       }
191*9f0612e4SBarry Smith       PetscCheck(found, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Unable to find address %p to unmap", addres[i]);
192*9f0612e4SBarry Smith     }
193*9f0612e4SBarry Smith   }
194*9f0612e4SBarry Smith #endif
195*9f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
196*9f0612e4SBarry Smith }
197*9f0612e4SBarry Smith 
198*9f0612e4SBarry Smith /*@C
199*9f0612e4SBarry Smith   PetscShmgetAllocateArray - allocates shared memory accessable by all MPI processes in the server
200*9f0612e4SBarry Smith 
201*9f0612e4SBarry Smith   Not Collective, only called on the first MPI process
202*9f0612e4SBarry Smith 
203*9f0612e4SBarry Smith   Input Parameters:
204*9f0612e4SBarry Smith + sz  - the number of elements in the array
205*9f0612e4SBarry Smith - asz - the size of an entry in the array, for example `sizeof(PetscScalar)`
206*9f0612e4SBarry Smith 
207*9f0612e4SBarry Smith   Output Parameters:
208*9f0612e4SBarry Smith . addr - the address of the array
209*9f0612e4SBarry Smith 
210*9f0612e4SBarry Smith   Level: developer
211*9f0612e4SBarry Smith 
212*9f0612e4SBarry Smith   Notes:
213*9f0612e4SBarry Smith   Uses `PetscMalloc()` if `PETSC_HAVE_SHMGET` is not defined or the MPI linear solver server is not running
214*9f0612e4SBarry Smith 
215*9f0612e4SBarry Smith   Sometimes when a program crashes, shared memory IDs may remain, making it impossible to rerun the program.
216*9f0612e4SBarry Smith   Use $PETSC_DIR/lib/petsc/bin/petscfreesharedmemory to free that memory
217*9f0612e4SBarry Smith 
218*9f0612e4SBarry Smith   Use the Unix command `ipcs -m` to see what memory IDs are currently allocated and `ipcrm -m ID` to remove a memory ID
219*9f0612e4SBarry Smith 
220*9f0612e4SBarry Smith   Use the Unix command `ipcrm --all` or `for i in $(ipcs -m | tail -$(expr $(ipcs -m | wc -l) - 3) | tr -s ' ' | cut -d" " -f3); do ipcrm -M $i; done`
221*9f0612e4SBarry Smith   to delete all the currently allocated memory IDs.
222*9f0612e4SBarry Smith 
223*9f0612e4SBarry Smith   Under Apple macOS the following file must be copied to /Library/LaunchDaemons/sharedmemory.plist and the machine rebooted before using shared memory
224*9f0612e4SBarry Smith .vb
225*9f0612e4SBarry Smith <?xml version="1.0" encoding="UTF-8"?>
226*9f0612e4SBarry Smith <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
227*9f0612e4SBarry Smith <plist version="1.0">
228*9f0612e4SBarry Smith <dict>
229*9f0612e4SBarry Smith  <key>Label</key>
230*9f0612e4SBarry Smith  <string>shmemsetup</string>
231*9f0612e4SBarry Smith  <key>UserName</key>
232*9f0612e4SBarry Smith  <string>root</string>
233*9f0612e4SBarry Smith  <key>GroupName</key>
234*9f0612e4SBarry Smith  <string>wheel</string>
235*9f0612e4SBarry Smith  <key>ProgramArguments</key>
236*9f0612e4SBarry Smith  <array>
237*9f0612e4SBarry Smith  <string>/usr/sbin/sysctl</string>
238*9f0612e4SBarry Smith  <string>-w</string>
239*9f0612e4SBarry Smith  <string>kern.sysv.shmmax=4194304000</string>
240*9f0612e4SBarry Smith  <string>kern.sysv.shmmni=2064</string>
241*9f0612e4SBarry Smith  <string>kern.sysv.shmseg=2064</string>
242*9f0612e4SBarry Smith  <string>kern.sysv.shmall=131072000</string>
243*9f0612e4SBarry Smith   </array>
244*9f0612e4SBarry Smith  <key>KeepAlive</key>
245*9f0612e4SBarry Smith  <false/>
246*9f0612e4SBarry Smith  <key>RunAtLoad</key>
247*9f0612e4SBarry Smith  <true/>
248*9f0612e4SBarry Smith </dict>
249*9f0612e4SBarry Smith </plist>
250*9f0612e4SBarry Smith .ve
251*9f0612e4SBarry Smith 
252*9f0612e4SBarry Smith   Fortran Note:
253*9f0612e4SBarry Smith   The calling sequence is `PetscShmgetAllocateArray[Scalar,Int](PetscInt start, PetscInt len, Petsc[Scalar,Int], pointer :: d1(:), ierr)`
254*9f0612e4SBarry Smith 
255*9f0612e4SBarry Smith   Developer Note:
256*9f0612e4SBarry Smith   More specifically this uses `PetscMalloc()` if `!PCMPIServerUseShmget` || `!PCMPIServerActive` || `PCMPIServerInSolve`
257*9f0612e4SBarry Smith   where `PCMPIServerInSolve` indicates that the solve is nested inside a MPI linear solver server solve and hence should
258*9f0612e4SBarry Smith   not allocate the vector and matrix memory in shared memory.
259*9f0612e4SBarry Smith 
260*9f0612e4SBarry Smith .seealso: [](sec_pcmpi), `PCMPIServerBegin()`, `PCMPI`, `KSPCheckPCMPI()`, `PetscShmgetDeallocateArray()`
261*9f0612e4SBarry Smith @*/
262*9f0612e4SBarry Smith PetscErrorCode PetscShmgetAllocateArray(size_t sz, size_t asz, void **addr)
263*9f0612e4SBarry Smith {
264*9f0612e4SBarry Smith   PetscFunctionBegin;
265*9f0612e4SBarry Smith   if (!PCMPIServerUseShmget || !PCMPIServerActive || PCMPIServerInSolve) PetscCall(PetscMalloc(sz * asz, addr));
266*9f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
267*9f0612e4SBarry Smith   else {
268*9f0612e4SBarry Smith     PetscShmgetAllocation allocation;
269*9f0612e4SBarry Smith     static int            shmkeys = 10;
270*9f0612e4SBarry Smith 
271*9f0612e4SBarry Smith     PetscCall(PetscCalloc(sizeof(struct _PetscShmgetAllocation), &allocation));
272*9f0612e4SBarry Smith     allocation->shmkey = shmkeys++;
273*9f0612e4SBarry Smith     allocation->sz     = sz * asz;
274*9f0612e4SBarry Smith     allocation->shmid  = shmget(allocation->shmkey, allocation->sz, 0666 | IPC_CREAT);
275*9f0612e4SBarry Smith     PetscCheck(allocation->shmid != -1, PETSC_COMM_SELF, PETSC_ERR_LIB, "Unable to schmget() of size %d with key %d %s see PetscShmgetAllocateArray()", (int)allocation->sz, allocation->shmkey, strerror(errno));
276*9f0612e4SBarry Smith     allocation->addr = shmat(allocation->shmid, (void *)0, 0);
277*9f0612e4SBarry Smith     PetscCheck(allocation->addr, PETSC_COMM_SELF, PETSC_ERR_LIB, "Unable to shmat() of shmid %d %s", (int)allocation->shmid, strerror(errno));
278*9f0612e4SBarry Smith   #if PETSC_SIZEOF_VOID_P == 8
279*9f0612e4SBarry Smith     PetscCheck((uint64_t)allocation->addr != 0xffffffffffffffff, PETSC_COMM_SELF, PETSC_ERR_LIB, "shmat() of shmid %d returned 0xffffffffffffffff %s", (int)allocation->shmid, strerror(errno));
280*9f0612e4SBarry Smith   #endif
281*9f0612e4SBarry Smith 
282*9f0612e4SBarry Smith     if (!allocations) allocations = allocation;
283*9f0612e4SBarry Smith     else {
284*9f0612e4SBarry Smith       PetscShmgetAllocation next = allocations;
285*9f0612e4SBarry Smith       while (next->next) next = next->next;
286*9f0612e4SBarry Smith       next->next = allocation;
287*9f0612e4SBarry Smith     }
288*9f0612e4SBarry Smith     *addr = allocation->addr;
289*9f0612e4SBarry Smith     PetscCall(PetscInfo(NULL, "Allocating PCMPI Server array %p shmkey %d shmid %d size %d\n", *addr, allocation->shmkey, allocation->shmid, (int)allocation->sz));
290*9f0612e4SBarry Smith   }
291*9f0612e4SBarry Smith #endif
292*9f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
293*9f0612e4SBarry Smith }
294*9f0612e4SBarry Smith 
295*9f0612e4SBarry Smith /*@C
296*9f0612e4SBarry Smith   PetscShmgetDeallocateArray - deallocates shared memory accessable by all MPI processes in the server
297*9f0612e4SBarry Smith 
298*9f0612e4SBarry Smith   Not Collective, only called on the first MPI process
299*9f0612e4SBarry Smith 
300*9f0612e4SBarry Smith   Input Parameter:
301*9f0612e4SBarry Smith . addr - the address of array
302*9f0612e4SBarry Smith 
303*9f0612e4SBarry Smith   Level: developer
304*9f0612e4SBarry Smith 
305*9f0612e4SBarry Smith   Note:
306*9f0612e4SBarry Smith   Uses `PetscFree()` if `PETSC_HAVE_SHMGET` is not defined or the MPI linear solver server is not running
307*9f0612e4SBarry Smith 
308*9f0612e4SBarry Smith   Fortran Note:
309*9f0612e4SBarry Smith   The calling sequence is `PetscShmgetDeallocateArray[Scalar,Int](Petsc[Scalar,Int], pointer :: d1(:), ierr)`
310*9f0612e4SBarry Smith 
311*9f0612e4SBarry Smith .seealso: [](sec_pcmpi), `PCMPIServerBegin()`, `PCMPI`, `KSPCheckPCMPI()`, `PetscShmgetAllocateArray()`
312*9f0612e4SBarry Smith @*/
313*9f0612e4SBarry Smith PetscErrorCode PetscShmgetDeallocateArray(void **addr)
314*9f0612e4SBarry Smith {
315*9f0612e4SBarry Smith   PetscFunctionBegin;
316*9f0612e4SBarry Smith   if (!*addr) PetscFunctionReturn(PETSC_SUCCESS);
317*9f0612e4SBarry Smith   if (!PCMPIServerUseShmget || !PCMPIServerActive || PCMPIServerInSolve) PetscCall(PetscFree(*addr));
318*9f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
319*9f0612e4SBarry Smith   else {
320*9f0612e4SBarry Smith     PetscShmgetAllocation next = allocations, previous = NULL;
321*9f0612e4SBarry Smith 
322*9f0612e4SBarry Smith     while (next) {
323*9f0612e4SBarry Smith       if (next->addr == *addr) {
324*9f0612e4SBarry Smith         PetscCall(PetscInfo(NULL, "Deallocating PCMPI Server array %p shmkey %d shmid %d size %d\n", *addr, next->shmkey, next->shmid, (int)next->sz));
325*9f0612e4SBarry Smith         PetscCheck(!shmctl(next->shmid, IPC_RMID, NULL), PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to free shared memory addr %p key %d shmid %d %s", *addr, next->shmkey, next->shmid, strerror(errno));
326*9f0612e4SBarry Smith         *addr = NULL;
327*9f0612e4SBarry Smith         if (previous) previous->next = next->next;
328*9f0612e4SBarry Smith         else allocations = next->next;
329*9f0612e4SBarry Smith         PetscCall(PetscFree(next));
330*9f0612e4SBarry Smith         PetscFunctionReturn(PETSC_SUCCESS);
331*9f0612e4SBarry Smith       }
332*9f0612e4SBarry Smith       previous = next;
333*9f0612e4SBarry Smith       next     = next->next;
334*9f0612e4SBarry Smith     }
335*9f0612e4SBarry Smith     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Unable to locate PCMPI allocated shared memory address %p", *addr);
336*9f0612e4SBarry Smith   }
337*9f0612e4SBarry Smith #endif
338*9f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
339*9f0612e4SBarry Smith }
340*9f0612e4SBarry Smith 
341*9f0612e4SBarry Smith #if defined(PETSC_USE_FORTRAN_BINDINGS)
342*9f0612e4SBarry Smith   #include <petsc/private/f90impl.h>
343*9f0612e4SBarry Smith 
344*9f0612e4SBarry Smith   #if defined(PETSC_HAVE_FORTRAN_CAPS)
345*9f0612e4SBarry Smith     #define petscshmgetallocatearrayscalar_   PETSCSHMGETALLOCATEARRAYSCALAR
346*9f0612e4SBarry Smith     #define petscshmgetdeallocatearrayscalar_ PETSCSHMGETDEALLOCATEARRAYSCALAR
347*9f0612e4SBarry Smith     #define petscshmgetallocatearrayint_      PETSCSHMGETALLOCATEARRAYINT
348*9f0612e4SBarry Smith     #define petscshmgetdeallocatearrayint_    PETSCSHMGETDEALLOCATEARRAYINT
349*9f0612e4SBarry Smith   #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
350*9f0612e4SBarry Smith     #define petscshmgetallocatearrayscalar_   petscshmgetallocatearrayscalar
351*9f0612e4SBarry Smith     #define petscshmgetdeallocatearrayscalar_ petscshmgetdeallocatearrayscalar
352*9f0612e4SBarry Smith     #define petscshmgetallocatearrayint_      petscshmgetallocatearrayint
353*9f0612e4SBarry Smith     #define petscshmgetdeallocatearrayint_    petscshmgetdeallocatearrayint
354*9f0612e4SBarry Smith   #endif
355*9f0612e4SBarry Smith 
356*9f0612e4SBarry Smith PETSC_EXTERN void petscshmgetallocatearrayscalar_(PetscInt *start, PetscInt *len, F90Array1d *a, PetscErrorCode *ierr PETSC_F90_2PTR_PROTO(ptrd))
357*9f0612e4SBarry Smith {
358*9f0612e4SBarry Smith   PetscScalar *aa;
359*9f0612e4SBarry Smith 
360*9f0612e4SBarry Smith   *ierr = PetscShmgetAllocateArray(*len, sizeof(PetscScalar), (void **)&aa);
361*9f0612e4SBarry Smith   if (*ierr) return;
362*9f0612e4SBarry Smith   *ierr = F90Array1dCreate(aa, MPIU_SCALAR, *start, *len, a PETSC_F90_2PTR_PARAM(ptrd));
363*9f0612e4SBarry Smith }
364*9f0612e4SBarry Smith 
365*9f0612e4SBarry Smith PETSC_EXTERN void petscshmgetdeallocatearrayscalar_(F90Array1d *a, PetscErrorCode *ierr PETSC_F90_2PTR_PROTO(ptrd))
366*9f0612e4SBarry Smith {
367*9f0612e4SBarry Smith   PetscScalar *aa;
368*9f0612e4SBarry Smith 
369*9f0612e4SBarry Smith   *ierr = F90Array1dAccess(a, MPIU_SCALAR, (void **)&aa PETSC_F90_2PTR_PARAM(ptrd));
370*9f0612e4SBarry Smith   if (*ierr) return;
371*9f0612e4SBarry Smith   *ierr = PetscShmgetDeallocateArray((void **)&aa);
372*9f0612e4SBarry Smith   if (*ierr) return;
373*9f0612e4SBarry Smith   *ierr = F90Array1dDestroy(a, MPIU_SCALAR PETSC_F90_2PTR_PARAM(ptrd));
374*9f0612e4SBarry Smith }
375*9f0612e4SBarry Smith 
376*9f0612e4SBarry Smith PETSC_EXTERN void petscshmgetallocatearrayint_(PetscInt *start, PetscInt *len, F90Array1d *a, PetscErrorCode *ierr PETSC_F90_2PTR_PROTO(ptrd))
377*9f0612e4SBarry Smith {
378*9f0612e4SBarry Smith   PetscScalar *aa;
379*9f0612e4SBarry Smith 
380*9f0612e4SBarry Smith   *ierr = PetscShmgetAllocateArray(*len, sizeof(PetscInt), (void **)&aa);
381*9f0612e4SBarry Smith   if (*ierr) return;
382*9f0612e4SBarry Smith   *ierr = F90Array1dCreate(aa, MPIU_INT, *start, *len, a PETSC_F90_2PTR_PARAM(ptrd));
383*9f0612e4SBarry Smith }
384*9f0612e4SBarry Smith 
385*9f0612e4SBarry Smith PETSC_EXTERN void petscshmgetdeallocatearrayint_(F90Array1d *a, PetscErrorCode *ierr PETSC_F90_2PTR_PROTO(ptrd))
386*9f0612e4SBarry Smith {
387*9f0612e4SBarry Smith   PetscScalar *aa;
388*9f0612e4SBarry Smith 
389*9f0612e4SBarry Smith   *ierr = F90Array1dAccess(a, MPIU_INT, (void **)&aa PETSC_F90_2PTR_PARAM(ptrd));
390*9f0612e4SBarry Smith   if (*ierr) return;
391*9f0612e4SBarry Smith   *ierr = PetscShmgetDeallocateArray((void **)&aa);
392*9f0612e4SBarry Smith   if (*ierr) return;
393*9f0612e4SBarry Smith   *ierr = F90Array1dDestroy(a, MPIU_INT PETSC_F90_2PTR_PARAM(ptrd));
394*9f0612e4SBarry Smith }
395*9f0612e4SBarry Smith 
396*9f0612e4SBarry Smith #endif
397