xref: /petsc/src/sys/utils/server.c (revision 7a5338279d92d13360d231b9bd26d284f35eaa49)
19f0612e4SBarry Smith /*
29f0612e4SBarry Smith     Code for allocating Unix shared memory on MPI rank 0 and later accessing it from other MPI processes
39f0612e4SBarry Smith */
49f0612e4SBarry Smith #include <petscsys.h>
59f0612e4SBarry Smith 
69f0612e4SBarry Smith PetscBool PCMPIServerActive    = PETSC_FALSE; // PETSc is running in server mode
7927f4375SPierre Jolivet PetscBool PCMPIServerInSolve   = PETSC_FALSE; // A parallel server solve is occurring
89f0612e4SBarry Smith PetscBool PCMPIServerUseShmget = PETSC_TRUE;  // Use Unix shared memory for distributing objects
99f0612e4SBarry Smith 
109f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
119f0612e4SBarry Smith   #include <sys/shm.h>
129f0612e4SBarry Smith   #include <sys/mman.h>
139f0612e4SBarry Smith   #include <errno.h>
149f0612e4SBarry Smith 
159f0612e4SBarry Smith typedef struct _PetscShmgetAllocation *PetscShmgetAllocation;
169f0612e4SBarry Smith struct _PetscShmgetAllocation {
179f0612e4SBarry Smith   void                 *addr; // address on this process; points to same physical address on all processes
189f0612e4SBarry Smith   int                   shmkey, shmid;
199f0612e4SBarry Smith   size_t                sz;
209f0612e4SBarry Smith   PetscShmgetAllocation next;
219f0612e4SBarry Smith };
229f0612e4SBarry Smith static PetscShmgetAllocation allocations = NULL;
239f0612e4SBarry Smith 
249f0612e4SBarry Smith typedef struct {
259f0612e4SBarry Smith   size_t shmkey[3];
269f0612e4SBarry Smith   size_t sz[3];
279f0612e4SBarry Smith } BcastInfo;
289f0612e4SBarry Smith 
299f0612e4SBarry Smith #endif
309f0612e4SBarry Smith 
319f0612e4SBarry Smith /*@C
329f0612e4SBarry Smith   PetscShmgetAddressesFinalize - frees any shared memory that was allocated by `PetscShmgetAllocateArray()` but
339f0612e4SBarry Smith   not deallocated with `PetscShmgetDeallocateArray()`
349f0612e4SBarry Smith 
359f0612e4SBarry Smith   Level: developer
369f0612e4SBarry Smith 
379f0612e4SBarry Smith   Notes:
389f0612e4SBarry Smith   This prevents any shared memory allocated, but not deallocated, from remaining on the system and preventing
399f0612e4SBarry Smith   its future use.
409f0612e4SBarry Smith 
419f0612e4SBarry Smith   If the program crashes outstanding shared memory allocations may remain.
429f0612e4SBarry Smith 
439f0612e4SBarry Smith .seealso: `PetscShmgetAllocateArray()`, `PetscShmgetDeallocateArray()`, `PetscShmgetUnmapAddresses()`
449f0612e4SBarry Smith @*/
459f0612e4SBarry Smith PetscErrorCode PetscShmgetAddressesFinalize(void)
469f0612e4SBarry Smith {
479f0612e4SBarry Smith   PetscFunctionBegin;
489f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
499f0612e4SBarry Smith   PetscShmgetAllocation next = allocations, previous = NULL;
509f0612e4SBarry Smith 
519f0612e4SBarry Smith   while (next) {
527255af2bSBarry Smith     PetscCheck(!shmctl(next->shmid, IPC_RMID, NULL), PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to free shared memory key %d shmid %d %s, see PCMPIServerBegin()", next->shmkey, next->shmid, strerror(errno));
539f0612e4SBarry Smith     previous = next;
549f0612e4SBarry Smith     next     = next->next;
559f0612e4SBarry Smith     PetscCall(PetscFree(previous));
569f0612e4SBarry Smith   }
579f0612e4SBarry Smith #endif
589f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
599f0612e4SBarry Smith }
609f0612e4SBarry Smith 
619f0612e4SBarry Smith /* takes a void so can work bsan safe with PetscObjectContainerCompose() */
6249abdd8aSBarry Smith PetscErrorCode PCMPIServerAddressesDestroy(void **ctx)
639f0612e4SBarry Smith {
6449abdd8aSBarry Smith   PCMPIServerAddresses *addresses = (PCMPIServerAddresses *)*ctx;
659f0612e4SBarry Smith 
669f0612e4SBarry Smith   PetscFunctionBegin;
679f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
689f0612e4SBarry Smith   PetscCall(PetscShmgetUnmapAddresses(addresses->n, addresses->addr));
699f0612e4SBarry Smith   PetscCall(PetscFree(addresses));
709f0612e4SBarry Smith #endif
719f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
729f0612e4SBarry Smith }
739f0612e4SBarry Smith 
749f0612e4SBarry Smith /*@C
759f0612e4SBarry Smith   PetscShmgetMapAddresses - given shared address on the first MPI process determines the
769f0612e4SBarry Smith   addresses on the other MPI processes that map to the same physical memory
779f0612e4SBarry Smith 
789f0612e4SBarry Smith   Input Parameters:
799f0612e4SBarry Smith + comm       - the `MPI_Comm` to scatter the address
809f0612e4SBarry Smith . n          - the number of addresses, each obtained on MPI process zero by `PetscShmgetAllocateArray()`
819f0612e4SBarry Smith - baseaddres - the addresses on the first MPI process, ignored on all but first process
829f0612e4SBarry Smith 
839f0612e4SBarry Smith   Output Parameter:
849f0612e4SBarry Smith . addres - the addresses on each MPI process, the array of void * must already be allocated
859f0612e4SBarry Smith 
869f0612e4SBarry Smith   Level: developer
879f0612e4SBarry Smith 
889f0612e4SBarry Smith   Note:
899f0612e4SBarry Smith   This routine does nothing if `PETSC_HAVE_SHMGET` is not defined
909f0612e4SBarry Smith 
919f0612e4SBarry Smith .seealso: `PetscShmgetDeallocateArray()`, `PetscShmgetAllocateArray()`, `PetscShmgetUnmapAddresses()`
929f0612e4SBarry Smith @*/
939f0612e4SBarry Smith PetscErrorCode PetscShmgetMapAddresses(MPI_Comm comm, PetscInt n, const void **baseaddres, void **addres)
949f0612e4SBarry Smith {
959f0612e4SBarry Smith   PetscFunctionBegin;
969f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
979f0612e4SBarry Smith   if (PetscGlobalRank == 0) {
989f0612e4SBarry Smith     BcastInfo bcastinfo = {
999f0612e4SBarry Smith       {0, 0, 0},
1009f0612e4SBarry Smith       {0, 0, 0}
1019f0612e4SBarry Smith     };
1029f0612e4SBarry Smith     for (PetscInt i = 0; i < n; i++) {
1039f0612e4SBarry Smith       PetscShmgetAllocation allocation = allocations;
1049f0612e4SBarry Smith 
1059f0612e4SBarry Smith       while (allocation) {
1069f0612e4SBarry Smith         if (allocation->addr == baseaddres[i]) {
1079f0612e4SBarry Smith           bcastinfo.shmkey[i] = allocation->shmkey;
1089f0612e4SBarry Smith           bcastinfo.sz[i]     = allocation->sz;
1099f0612e4SBarry Smith           addres[i]           = (void *)baseaddres[i];
1109f0612e4SBarry Smith           break;
1119f0612e4SBarry Smith         }
1129f0612e4SBarry Smith         allocation = allocation->next;
1139f0612e4SBarry Smith       }
1147255af2bSBarry Smith       PetscCheck(allocation, comm, PETSC_ERR_PLIB, "Unable to locate PCMPI allocated shared address %p, see PCMPIServerBegin()", baseaddres[i]);
1159f0612e4SBarry Smith     }
1169f0612e4SBarry Smith     PetscCall(PetscInfo(NULL, "Mapping PCMPI Server array %p\n", addres[0]));
1179f0612e4SBarry Smith     PetscCallMPI(MPI_Bcast(&bcastinfo, 6, MPIU_SIZE_T, 0, comm));
1189f0612e4SBarry Smith   } else {
1199f0612e4SBarry Smith     BcastInfo bcastinfo = {
1209f0612e4SBarry Smith       {0, 0, 0},
1219f0612e4SBarry Smith       {0, 0, 0}
1229f0612e4SBarry Smith     };
1239f0612e4SBarry Smith     int    shmkey = 0;
1249f0612e4SBarry Smith     size_t sz     = 0;
1259f0612e4SBarry Smith 
1269f0612e4SBarry Smith     PetscCallMPI(MPI_Bcast(&bcastinfo, 6, MPIU_SIZE_T, 0, comm));
1279f0612e4SBarry Smith     for (PetscInt i = 0; i < n; i++) {
1289f0612e4SBarry Smith       PetscShmgetAllocation next = allocations, previous = NULL;
1299f0612e4SBarry Smith 
1309f0612e4SBarry Smith       shmkey = (int)bcastinfo.shmkey[i];
1319f0612e4SBarry Smith       sz     = bcastinfo.sz[i];
1329f0612e4SBarry Smith       while (next) {
133835f2295SStefano Zampini         if (next->shmkey == shmkey) addres[i] = next->addr;
1349f0612e4SBarry Smith         previous = next;
1359f0612e4SBarry Smith         next     = next->next;
1369f0612e4SBarry Smith       }
1379f0612e4SBarry Smith       if (!next) {
1389f0612e4SBarry Smith         PetscShmgetAllocation allocation;
1399f0612e4SBarry Smith         PetscCall(PetscCalloc(sizeof(struct _PetscShmgetAllocation), &allocation));
1409f0612e4SBarry Smith         allocation->shmkey = shmkey;
1419f0612e4SBarry Smith         allocation->sz     = sz;
1429f0612e4SBarry Smith         allocation->shmid  = shmget(allocation->shmkey, allocation->sz, 0666);
1437255af2bSBarry Smith         PetscCheck(allocation->shmid != -1, PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to map PCMPI shared memory key %d of size %d, see PCMPIServerBegin()", allocation->shmkey, (int)allocation->sz);
144c8025a54SPierre Jolivet         allocation->addr = shmat(allocation->shmid, NULL, 0);
1457255af2bSBarry Smith         PetscCheck(allocation->addr, PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to map PCMPI shared memory key %d, see PCMPIServerBegin()", allocation->shmkey);
1469f0612e4SBarry Smith         addres[i] = allocation->addr;
1479f0612e4SBarry Smith         if (previous) previous->next = allocation;
1489f0612e4SBarry Smith         else allocations = allocation;
1499f0612e4SBarry Smith       }
1509f0612e4SBarry Smith     }
1519f0612e4SBarry Smith   }
1529f0612e4SBarry Smith #endif
1539f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
1549f0612e4SBarry Smith }
1559f0612e4SBarry Smith 
1569f0612e4SBarry Smith /*@C
1579f0612e4SBarry Smith   PetscShmgetUnmapAddresses - given shared addresses on a MPI process unlink it
1589f0612e4SBarry Smith 
1599f0612e4SBarry Smith   Input Parameters:
1609f0612e4SBarry Smith + n      - the number of addresses, each obtained on MPI process zero by `PetscShmgetAllocateArray()`
1619f0612e4SBarry Smith - addres - the addresses
1629f0612e4SBarry Smith 
1639f0612e4SBarry Smith   Level: developer
1649f0612e4SBarry Smith 
1659f0612e4SBarry Smith   Note:
1669f0612e4SBarry Smith   This routine does nothing if `PETSC_HAVE_SHMGET` is not defined
1679f0612e4SBarry Smith 
1689f0612e4SBarry Smith .seealso: `PetscShmgetDeallocateArray()`, `PetscShmgetAllocateArray()`, `PetscShmgetMapAddresses()`
1699f0612e4SBarry Smith @*/
1709f0612e4SBarry Smith PetscErrorCode PetscShmgetUnmapAddresses(PetscInt n, void **addres)
1719f0612e4SBarry Smith {
1729f0612e4SBarry Smith   PetscFunctionBegin;
1739f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
1749f0612e4SBarry Smith   if (PetscGlobalRank > 0) {
1759f0612e4SBarry Smith     for (PetscInt i = 0; i < n; i++) {
1769f0612e4SBarry Smith       PetscShmgetAllocation next = allocations, previous = NULL;
1779f0612e4SBarry Smith       PetscBool             found = PETSC_FALSE;
1789f0612e4SBarry Smith 
1799f0612e4SBarry Smith       while (next) {
1809f0612e4SBarry Smith         if (next->addr == addres[i]) {
1817255af2bSBarry Smith           PetscCheck(!shmdt(next->addr), PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to shmdt() location %s, see PCMPIServerBegin()", strerror(errno));
1829f0612e4SBarry Smith           if (previous) previous->next = next->next;
1839f0612e4SBarry Smith           else allocations = next->next;
1849f0612e4SBarry Smith           PetscCall(PetscFree(next));
1859f0612e4SBarry Smith           found = PETSC_TRUE;
1869f0612e4SBarry Smith           break;
1879f0612e4SBarry Smith         }
1889f0612e4SBarry Smith         previous = next;
1899f0612e4SBarry Smith         next     = next->next;
1909f0612e4SBarry Smith       }
1917255af2bSBarry Smith       PetscCheck(found, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Unable to find address %p to unmap, see PCMPIServerBegin()", addres[i]);
1929f0612e4SBarry Smith     }
1939f0612e4SBarry Smith   }
1949f0612e4SBarry Smith #endif
1959f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
1969f0612e4SBarry Smith }
1979f0612e4SBarry Smith 
1989f0612e4SBarry Smith /*@C
199d7c1f440SPierre Jolivet   PetscShmgetAllocateArray - allocates shared memory accessible by all MPI processes in the server
2009f0612e4SBarry Smith 
2019f0612e4SBarry Smith   Not Collective, only called on the first MPI process
2029f0612e4SBarry Smith 
2039f0612e4SBarry Smith   Input Parameters:
2049f0612e4SBarry Smith + sz  - the number of elements in the array
2059f0612e4SBarry Smith - asz - the size of an entry in the array, for example `sizeof(PetscScalar)`
2069f0612e4SBarry Smith 
2079f0612e4SBarry Smith   Output Parameters:
2089f0612e4SBarry Smith . addr - the address of the array
2099f0612e4SBarry Smith 
2109f0612e4SBarry Smith   Level: developer
2119f0612e4SBarry Smith 
2129f0612e4SBarry Smith   Notes:
2139f0612e4SBarry Smith   Uses `PetscMalloc()` if `PETSC_HAVE_SHMGET` is not defined or the MPI linear solver server is not running
2149f0612e4SBarry Smith 
2159f0612e4SBarry Smith   Sometimes when a program crashes, shared memory IDs may remain, making it impossible to rerun the program.
2167255af2bSBarry Smith   Use
2177255af2bSBarry Smith .vb
2187255af2bSBarry Smith   $PETSC_DIR/lib/petsc/bin/petscfreesharedmemory
2197255af2bSBarry Smith .ve to free that memory. The Linux command `ipcrm --all` or macOS command `for i in $(ipcs -m | tail -$(expr $(ipcs -m | wc -l) - 3) | tr -s ' ' | cut -d" " -f3); do ipcrm -M $i; done`
2207255af2bSBarry Smith   will also free the memory.
2219f0612e4SBarry Smith 
2229f0612e4SBarry Smith   Use the Unix command `ipcs -m` to see what memory IDs are currently allocated and `ipcrm -m ID` to remove a memory ID
2239f0612e4SBarry Smith 
2247255af2bSBarry Smith   Under Apple macOS the following file must be copied to /Library/LaunchDaemons/sharedmemory.plist (ensure this file is owned by root and not the user)
2257255af2bSBarry Smith   and the machine rebooted before using shared memory
2269f0612e4SBarry Smith .vb
2279f0612e4SBarry Smith <?xml version="1.0" encoding="UTF-8"?>
2289f0612e4SBarry Smith <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
2299f0612e4SBarry Smith <plist version="1.0">
2309f0612e4SBarry Smith <dict>
2319f0612e4SBarry Smith  <key>Label</key>
2329f0612e4SBarry Smith  <string>shmemsetup</string>
2339f0612e4SBarry Smith  <key>UserName</key>
2349f0612e4SBarry Smith  <string>root</string>
2359f0612e4SBarry Smith  <key>GroupName</key>
2369f0612e4SBarry Smith  <string>wheel</string>
2379f0612e4SBarry Smith  <key>ProgramArguments</key>
2389f0612e4SBarry Smith  <array>
2399f0612e4SBarry Smith  <string>/usr/sbin/sysctl</string>
2409f0612e4SBarry Smith  <string>-w</string>
2419f0612e4SBarry Smith  <string>kern.sysv.shmmax=4194304000</string>
2429f0612e4SBarry Smith  <string>kern.sysv.shmmni=2064</string>
2439f0612e4SBarry Smith  <string>kern.sysv.shmseg=2064</string>
2449f0612e4SBarry Smith  <string>kern.sysv.shmall=131072000</string>
2459f0612e4SBarry Smith   </array>
2469f0612e4SBarry Smith  <key>KeepAlive</key>
2479f0612e4SBarry Smith  <false/>
2489f0612e4SBarry Smith  <key>RunAtLoad</key>
2499f0612e4SBarry Smith  <true/>
2509f0612e4SBarry Smith </dict>
2519f0612e4SBarry Smith </plist>
2529f0612e4SBarry Smith .ve
2539f0612e4SBarry Smith 
2547255af2bSBarry Smith   Use the command
2557255af2bSBarry Smith .vb
2567255af2bSBarry Smith   /usr/sbin/sysctl -a | grep shm
2577255af2bSBarry Smith .ve
2587255af2bSBarry Smith   to confirm that the shared memory limits you have requested are available.
2597255af2bSBarry Smith 
2609f0612e4SBarry Smith   Fortran Note:
2619f0612e4SBarry Smith   The calling sequence is `PetscShmgetAllocateArray[Scalar,Int](PetscInt start, PetscInt len, Petsc[Scalar,Int], pointer :: d1(:), ierr)`
2629f0612e4SBarry Smith 
2639f0612e4SBarry Smith   Developer Note:
2649f0612e4SBarry Smith   More specifically this uses `PetscMalloc()` if `!PCMPIServerUseShmget` || `!PCMPIServerActive` || `PCMPIServerInSolve`
2659f0612e4SBarry Smith   where `PCMPIServerInSolve` indicates that the solve is nested inside a MPI linear solver server solve and hence should
2669f0612e4SBarry Smith   not allocate the vector and matrix memory in shared memory.
2679f0612e4SBarry Smith 
2689f0612e4SBarry Smith .seealso: [](sec_pcmpi), `PCMPIServerBegin()`, `PCMPI`, `KSPCheckPCMPI()`, `PetscShmgetDeallocateArray()`
2699f0612e4SBarry Smith @*/
2709f0612e4SBarry Smith PetscErrorCode PetscShmgetAllocateArray(size_t sz, size_t asz, void **addr)
2719f0612e4SBarry Smith {
2729f0612e4SBarry Smith   PetscFunctionBegin;
2739f0612e4SBarry Smith   if (!PCMPIServerUseShmget || !PCMPIServerActive || PCMPIServerInSolve) PetscCall(PetscMalloc(sz * asz, addr));
2749f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
2759f0612e4SBarry Smith   else {
2769f0612e4SBarry Smith     PetscShmgetAllocation allocation;
2779f0612e4SBarry Smith     static int            shmkeys = 10;
2789f0612e4SBarry Smith 
2799f0612e4SBarry Smith     PetscCall(PetscCalloc(sizeof(struct _PetscShmgetAllocation), &allocation));
2809f0612e4SBarry Smith     allocation->shmkey = shmkeys++;
2819f0612e4SBarry Smith     allocation->sz     = sz * asz;
2829f0612e4SBarry Smith     allocation->shmid  = shmget(allocation->shmkey, allocation->sz, 0666 | IPC_CREAT);
2839f0612e4SBarry Smith     PetscCheck(allocation->shmid != -1, PETSC_COMM_SELF, PETSC_ERR_LIB, "Unable to schmget() of size %d with key %d %s see PetscShmgetAllocateArray()", (int)allocation->sz, allocation->shmkey, strerror(errno));
284c8025a54SPierre Jolivet     allocation->addr = shmat(allocation->shmid, NULL, 0);
285835f2295SStefano Zampini     PetscCheck(allocation->addr, PETSC_COMM_SELF, PETSC_ERR_LIB, "Unable to shmat() of shmid %d %s", allocation->shmid, strerror(errno));
2869f0612e4SBarry Smith   #if PETSC_SIZEOF_VOID_P == 8
287*7a533827SSatish Balay     PetscCheck((uint64_t)allocation->addr != 0xffffffffffffffff, PETSC_COMM_SELF, PETSC_ERR_LIB, "shmat() of shmid %d returned 0xffffffffffffffff %s, see PCMPIServerBegin()", allocation->shmid, strerror(errno));
2889f0612e4SBarry Smith   #endif
2899f0612e4SBarry Smith 
2909f0612e4SBarry Smith     if (!allocations) allocations = allocation;
2919f0612e4SBarry Smith     else {
2929f0612e4SBarry Smith       PetscShmgetAllocation next = allocations;
2939f0612e4SBarry Smith       while (next->next) next = next->next;
2949f0612e4SBarry Smith       next->next = allocation;
2959f0612e4SBarry Smith     }
2969f0612e4SBarry Smith     *addr = allocation->addr;
2979f0612e4SBarry Smith     PetscCall(PetscInfo(NULL, "Allocating PCMPI Server array %p shmkey %d shmid %d size %d\n", *addr, allocation->shmkey, allocation->shmid, (int)allocation->sz));
2989f0612e4SBarry Smith   }
2999f0612e4SBarry Smith #endif
3009f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
3019f0612e4SBarry Smith }
3029f0612e4SBarry Smith 
3039f0612e4SBarry Smith /*@C
304d7c1f440SPierre Jolivet   PetscShmgetDeallocateArray - deallocates shared memory accessible by all MPI processes in the server
3059f0612e4SBarry Smith 
3069f0612e4SBarry Smith   Not Collective, only called on the first MPI process
3079f0612e4SBarry Smith 
3089f0612e4SBarry Smith   Input Parameter:
3099f0612e4SBarry Smith . addr - the address of array
3109f0612e4SBarry Smith 
3119f0612e4SBarry Smith   Level: developer
3129f0612e4SBarry Smith 
3139f0612e4SBarry Smith   Note:
3149f0612e4SBarry Smith   Uses `PetscFree()` if `PETSC_HAVE_SHMGET` is not defined or the MPI linear solver server is not running
3159f0612e4SBarry Smith 
3169f0612e4SBarry Smith   Fortran Note:
3179f0612e4SBarry Smith   The calling sequence is `PetscShmgetDeallocateArray[Scalar,Int](Petsc[Scalar,Int], pointer :: d1(:), ierr)`
3189f0612e4SBarry Smith 
3199f0612e4SBarry Smith .seealso: [](sec_pcmpi), `PCMPIServerBegin()`, `PCMPI`, `KSPCheckPCMPI()`, `PetscShmgetAllocateArray()`
3209f0612e4SBarry Smith @*/
3219f0612e4SBarry Smith PetscErrorCode PetscShmgetDeallocateArray(void **addr)
3229f0612e4SBarry Smith {
3239f0612e4SBarry Smith   PetscFunctionBegin;
3249f0612e4SBarry Smith   if (!*addr) PetscFunctionReturn(PETSC_SUCCESS);
3259f0612e4SBarry Smith   if (!PCMPIServerUseShmget || !PCMPIServerActive || PCMPIServerInSolve) PetscCall(PetscFree(*addr));
3269f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
3279f0612e4SBarry Smith   else {
3289f0612e4SBarry Smith     PetscShmgetAllocation next = allocations, previous = NULL;
3299f0612e4SBarry Smith 
3309f0612e4SBarry Smith     while (next) {
3319f0612e4SBarry Smith       if (next->addr == *addr) {
3329f0612e4SBarry Smith         PetscCall(PetscInfo(NULL, "Deallocating PCMPI Server array %p shmkey %d shmid %d size %d\n", *addr, next->shmkey, next->shmid, (int)next->sz));
3337255af2bSBarry Smith         PetscCheck(!shmdt(next->addr), PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to shmdt() location %s, see PCMPIServerBegin()", strerror(errno));
3347255af2bSBarry Smith         PetscCheck(!shmctl(next->shmid, IPC_RMID, NULL), PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to free shared memory addr %p key %d shmid %d %s, see PCMPIServerBegin()", *addr, next->shmkey, next->shmid, strerror(errno));
3359f0612e4SBarry Smith         *addr = NULL;
3369f0612e4SBarry Smith         if (previous) previous->next = next->next;
3379f0612e4SBarry Smith         else allocations = next->next;
3389f0612e4SBarry Smith         PetscCall(PetscFree(next));
3399f0612e4SBarry Smith         PetscFunctionReturn(PETSC_SUCCESS);
3409f0612e4SBarry Smith       }
3419f0612e4SBarry Smith       previous = next;
3429f0612e4SBarry Smith       next     = next->next;
3439f0612e4SBarry Smith     }
3449f0612e4SBarry Smith     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Unable to locate PCMPI allocated shared memory address %p", *addr);
3459f0612e4SBarry Smith   }
3469f0612e4SBarry Smith #endif
3479f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
3489f0612e4SBarry Smith }
3499f0612e4SBarry Smith 
3509f0612e4SBarry Smith #if defined(PETSC_USE_FORTRAN_BINDINGS)
3519f0612e4SBarry Smith   #include <petsc/private/f90impl.h>
3529f0612e4SBarry Smith 
3539f0612e4SBarry Smith   #if defined(PETSC_HAVE_FORTRAN_CAPS)
3549f0612e4SBarry Smith     #define petscshmgetallocatearrayscalar_   PETSCSHMGETALLOCATEARRAYSCALAR
3559f0612e4SBarry Smith     #define petscshmgetdeallocatearrayscalar_ PETSCSHMGETDEALLOCATEARRAYSCALAR
3569f0612e4SBarry Smith     #define petscshmgetallocatearrayint_      PETSCSHMGETALLOCATEARRAYINT
3579f0612e4SBarry Smith     #define petscshmgetdeallocatearrayint_    PETSCSHMGETDEALLOCATEARRAYINT
3589f0612e4SBarry Smith   #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
3599f0612e4SBarry Smith     #define petscshmgetallocatearrayscalar_   petscshmgetallocatearrayscalar
3609f0612e4SBarry Smith     #define petscshmgetdeallocatearrayscalar_ petscshmgetdeallocatearrayscalar
3619f0612e4SBarry Smith     #define petscshmgetallocatearrayint_      petscshmgetallocatearrayint
3629f0612e4SBarry Smith     #define petscshmgetdeallocatearrayint_    petscshmgetdeallocatearrayint
3639f0612e4SBarry Smith   #endif
3649f0612e4SBarry Smith 
3659f0612e4SBarry Smith PETSC_EXTERN void petscshmgetallocatearrayscalar_(PetscInt *start, PetscInt *len, F90Array1d *a, PetscErrorCode *ierr PETSC_F90_2PTR_PROTO(ptrd))
3669f0612e4SBarry Smith {
3679f0612e4SBarry Smith   PetscScalar *aa;
3689f0612e4SBarry Smith 
3699f0612e4SBarry Smith   *ierr = PetscShmgetAllocateArray(*len, sizeof(PetscScalar), (void **)&aa);
3709f0612e4SBarry Smith   if (*ierr) return;
3719f0612e4SBarry Smith   *ierr = F90Array1dCreate(aa, MPIU_SCALAR, *start, *len, a PETSC_F90_2PTR_PARAM(ptrd));
3729f0612e4SBarry Smith }
3739f0612e4SBarry Smith 
3749f0612e4SBarry Smith PETSC_EXTERN void petscshmgetdeallocatearrayscalar_(F90Array1d *a, PetscErrorCode *ierr PETSC_F90_2PTR_PROTO(ptrd))
3759f0612e4SBarry Smith {
3769f0612e4SBarry Smith   PetscScalar *aa;
3779f0612e4SBarry Smith 
3789f0612e4SBarry Smith   *ierr = F90Array1dAccess(a, MPIU_SCALAR, (void **)&aa PETSC_F90_2PTR_PARAM(ptrd));
3799f0612e4SBarry Smith   if (*ierr) return;
3809f0612e4SBarry Smith   *ierr = PetscShmgetDeallocateArray((void **)&aa);
3819f0612e4SBarry Smith   if (*ierr) return;
3829f0612e4SBarry Smith   *ierr = F90Array1dDestroy(a, MPIU_SCALAR PETSC_F90_2PTR_PARAM(ptrd));
3839f0612e4SBarry Smith }
3849f0612e4SBarry Smith 
3859f0612e4SBarry Smith PETSC_EXTERN void petscshmgetallocatearrayint_(PetscInt *start, PetscInt *len, F90Array1d *a, PetscErrorCode *ierr PETSC_F90_2PTR_PROTO(ptrd))
3869f0612e4SBarry Smith {
3879f0612e4SBarry Smith   PetscScalar *aa;
3889f0612e4SBarry Smith 
3899f0612e4SBarry Smith   *ierr = PetscShmgetAllocateArray(*len, sizeof(PetscInt), (void **)&aa);
3909f0612e4SBarry Smith   if (*ierr) return;
3919f0612e4SBarry Smith   *ierr = F90Array1dCreate(aa, MPIU_INT, *start, *len, a PETSC_F90_2PTR_PARAM(ptrd));
3929f0612e4SBarry Smith }
3939f0612e4SBarry Smith 
3949f0612e4SBarry Smith PETSC_EXTERN void petscshmgetdeallocatearrayint_(F90Array1d *a, PetscErrorCode *ierr PETSC_F90_2PTR_PROTO(ptrd))
3959f0612e4SBarry Smith {
3969f0612e4SBarry Smith   PetscScalar *aa;
3979f0612e4SBarry Smith 
3989f0612e4SBarry Smith   *ierr = F90Array1dAccess(a, MPIU_INT, (void **)&aa PETSC_F90_2PTR_PARAM(ptrd));
3999f0612e4SBarry Smith   if (*ierr) return;
4009f0612e4SBarry Smith   *ierr = PetscShmgetDeallocateArray((void **)&aa);
4019f0612e4SBarry Smith   if (*ierr) return;
4029f0612e4SBarry Smith   *ierr = F90Array1dDestroy(a, MPIU_INT PETSC_F90_2PTR_PARAM(ptrd));
4039f0612e4SBarry Smith }
4049f0612e4SBarry Smith 
4059f0612e4SBarry Smith #endif
406