xref: /petsc/src/sys/utils/server.c (revision d7c1f4409a34685d8dcd545a97d161d483d89f66)
19f0612e4SBarry Smith /*
29f0612e4SBarry Smith     Code for allocating Unix shared memory on MPI rank 0 and later accessing it from other MPI processes
39f0612e4SBarry Smith */
49f0612e4SBarry Smith #include <petscsys.h>
59f0612e4SBarry Smith 
69f0612e4SBarry Smith PetscBool PCMPIServerActive    = PETSC_FALSE; // PETSc is running in server mode
79f0612e4SBarry Smith PetscBool PCMPIServerInSolve   = PETSC_FALSE; // A parallel server solve is occuring
89f0612e4SBarry Smith PetscBool PCMPIServerUseShmget = PETSC_TRUE;  // Use Unix shared memory for distributing objects
99f0612e4SBarry Smith 
109f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
119f0612e4SBarry Smith   #include <sys/shm.h>
129f0612e4SBarry Smith   #include <sys/mman.h>
139f0612e4SBarry Smith   #include <errno.h>
149f0612e4SBarry Smith 
159f0612e4SBarry Smith typedef struct _PetscShmgetAllocation *PetscShmgetAllocation;
169f0612e4SBarry Smith struct _PetscShmgetAllocation {
179f0612e4SBarry Smith   void                 *addr; // address on this process; points to same physical address on all processes
189f0612e4SBarry Smith   int                   shmkey, shmid;
199f0612e4SBarry Smith   size_t                sz;
209f0612e4SBarry Smith   PetscShmgetAllocation next;
219f0612e4SBarry Smith };
229f0612e4SBarry Smith static PetscShmgetAllocation allocations = NULL;
239f0612e4SBarry Smith 
249f0612e4SBarry Smith typedef struct {
259f0612e4SBarry Smith   size_t shmkey[3];
269f0612e4SBarry Smith   size_t sz[3];
279f0612e4SBarry Smith } BcastInfo;
289f0612e4SBarry Smith 
299f0612e4SBarry Smith #endif
309f0612e4SBarry Smith 
319f0612e4SBarry Smith /*@C
329f0612e4SBarry Smith   PetscShmgetAddressesFinalize - frees any shared memory that was allocated by `PetscShmgetAllocateArray()` but
339f0612e4SBarry Smith   not deallocated with `PetscShmgetDeallocateArray()`
349f0612e4SBarry Smith 
359f0612e4SBarry Smith   Level: developer
369f0612e4SBarry Smith 
379f0612e4SBarry Smith   Notes:
389f0612e4SBarry Smith   This prevents any shared memory allocated, but not deallocated, from remaining on the system and preventing
399f0612e4SBarry Smith   its future use.
409f0612e4SBarry Smith 
419f0612e4SBarry Smith   If the program crashes outstanding shared memory allocations may remain.
429f0612e4SBarry Smith 
439f0612e4SBarry Smith .seealso: `PetscShmgetAllocateArray()`, `PetscShmgetDeallocateArray()`, `PetscShmgetUnmapAddresses()`
449f0612e4SBarry Smith @*/
459f0612e4SBarry Smith PetscErrorCode PetscShmgetAddressesFinalize(void)
469f0612e4SBarry Smith {
479f0612e4SBarry Smith   PetscFunctionBegin;
489f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
499f0612e4SBarry Smith   PetscShmgetAllocation next = allocations, previous = NULL;
509f0612e4SBarry Smith 
519f0612e4SBarry Smith   while (next) {
529f0612e4SBarry Smith     PetscCheck(!shmctl(next->shmid, IPC_RMID, NULL), PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to free shared memory key %d shmid %d %s", next->shmkey, next->shmid, strerror(errno));
539f0612e4SBarry Smith     previous = next;
549f0612e4SBarry Smith     next     = next->next;
559f0612e4SBarry Smith     PetscCall(PetscFree(previous));
569f0612e4SBarry Smith   }
579f0612e4SBarry Smith #endif
589f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
599f0612e4SBarry Smith }
609f0612e4SBarry Smith 
619f0612e4SBarry Smith /* takes a void so can work bsan safe with PetscObjectContainerCompose() */
629f0612e4SBarry Smith PetscErrorCode PCMPIServerAddressesDestroy(void *ctx)
639f0612e4SBarry Smith {
649f0612e4SBarry Smith   PCMPIServerAddresses *addresses = (PCMPIServerAddresses *)ctx;
659f0612e4SBarry Smith 
669f0612e4SBarry Smith   PetscFunctionBegin;
679f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
689f0612e4SBarry Smith   PetscCall(PetscShmgetUnmapAddresses(addresses->n, addresses->addr));
699f0612e4SBarry Smith   PetscCall(PetscFree(addresses));
709f0612e4SBarry Smith #endif
719f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
729f0612e4SBarry Smith }
739f0612e4SBarry Smith 
749f0612e4SBarry Smith /*@C
759f0612e4SBarry Smith   PetscShmgetMapAddresses - given shared address on the first MPI process determines the
769f0612e4SBarry Smith   addresses on the other MPI processes that map to the same physical memory
779f0612e4SBarry Smith 
789f0612e4SBarry Smith   Input Parameters:
799f0612e4SBarry Smith + comm       - the `MPI_Comm` to scatter the address
809f0612e4SBarry Smith . n          - the number of addresses, each obtained on MPI process zero by `PetscShmgetAllocateArray()`
819f0612e4SBarry Smith - baseaddres - the addresses on the first MPI process, ignored on all but first process
829f0612e4SBarry Smith 
839f0612e4SBarry Smith   Output Parameter:
849f0612e4SBarry Smith . addres - the addresses on each MPI process, the array of void * must already be allocated
859f0612e4SBarry Smith 
869f0612e4SBarry Smith   Level: developer
879f0612e4SBarry Smith 
889f0612e4SBarry Smith   Note:
899f0612e4SBarry Smith   This routine does nothing if `PETSC_HAVE_SHMGET` is not defined
909f0612e4SBarry Smith 
919f0612e4SBarry Smith .seealso: `PetscShmgetDeallocateArray()`, `PetscShmgetAllocateArray()`, `PetscShmgetUnmapAddresses()`
929f0612e4SBarry Smith @*/
939f0612e4SBarry Smith PetscErrorCode PetscShmgetMapAddresses(MPI_Comm comm, PetscInt n, const void **baseaddres, void **addres)
949f0612e4SBarry Smith {
959f0612e4SBarry Smith   PetscFunctionBegin;
969f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
979f0612e4SBarry Smith   if (PetscGlobalRank == 0) {
989f0612e4SBarry Smith     BcastInfo bcastinfo = {
999f0612e4SBarry Smith       {0, 0, 0},
1009f0612e4SBarry Smith       {0, 0, 0}
1019f0612e4SBarry Smith     };
1029f0612e4SBarry Smith     for (PetscInt i = 0; i < n; i++) {
1039f0612e4SBarry Smith       PetscShmgetAllocation allocation = allocations;
1049f0612e4SBarry Smith 
1059f0612e4SBarry Smith       while (allocation) {
1069f0612e4SBarry Smith         if (allocation->addr == baseaddres[i]) {
1079f0612e4SBarry Smith           bcastinfo.shmkey[i] = allocation->shmkey;
1089f0612e4SBarry Smith           bcastinfo.sz[i]     = allocation->sz;
1099f0612e4SBarry Smith           addres[i]           = (void *)baseaddres[i];
1109f0612e4SBarry Smith           break;
1119f0612e4SBarry Smith         }
1129f0612e4SBarry Smith         allocation = allocation->next;
1139f0612e4SBarry Smith       }
1149f0612e4SBarry Smith       PetscCheck(allocation, comm, PETSC_ERR_PLIB, "Unable to locate PCMPI allocated shared address %p", baseaddres[i]);
1159f0612e4SBarry Smith     }
1169f0612e4SBarry Smith     PetscCall(PetscInfo(NULL, "Mapping PCMPI Server array %p\n", addres[0]));
1179f0612e4SBarry Smith     PetscCallMPI(MPI_Bcast(&bcastinfo, 6, MPIU_SIZE_T, 0, comm));
1189f0612e4SBarry Smith   } else {
1199f0612e4SBarry Smith     BcastInfo bcastinfo = {
1209f0612e4SBarry Smith       {0, 0, 0},
1219f0612e4SBarry Smith       {0, 0, 0}
1229f0612e4SBarry Smith     };
1239f0612e4SBarry Smith     int    shmkey = 0;
1249f0612e4SBarry Smith     size_t sz     = 0;
1259f0612e4SBarry Smith 
1269f0612e4SBarry Smith     PetscCallMPI(MPI_Bcast(&bcastinfo, 6, MPIU_SIZE_T, 0, comm));
1279f0612e4SBarry Smith     for (PetscInt i = 0; i < n; i++) {
1289f0612e4SBarry Smith       PetscShmgetAllocation next = allocations, previous = NULL;
1299f0612e4SBarry Smith 
1309f0612e4SBarry Smith       shmkey = (int)bcastinfo.shmkey[i];
1319f0612e4SBarry Smith       sz     = bcastinfo.sz[i];
1329f0612e4SBarry Smith       while (next) {
1339f0612e4SBarry Smith         if (next->shmkey == shmkey) addres[i] = (void *)next->addr;
1349f0612e4SBarry Smith         previous = next;
1359f0612e4SBarry Smith         next     = next->next;
1369f0612e4SBarry Smith       }
1379f0612e4SBarry Smith       if (!next) {
1389f0612e4SBarry Smith         PetscShmgetAllocation allocation;
1399f0612e4SBarry Smith         PetscCall(PetscCalloc(sizeof(struct _PetscShmgetAllocation), &allocation));
1409f0612e4SBarry Smith         allocation->shmkey = shmkey;
1419f0612e4SBarry Smith         allocation->sz     = sz;
1429f0612e4SBarry Smith         allocation->shmid  = shmget(allocation->shmkey, allocation->sz, 0666);
1439f0612e4SBarry Smith         PetscCheck(allocation->shmid != -1, PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to map PCMPI shared memory key %d of size %d", allocation->shmkey, (int)allocation->sz);
144c8025a54SPierre Jolivet         allocation->addr = shmat(allocation->shmid, NULL, 0);
1459f0612e4SBarry Smith         PetscCheck(allocation->addr, PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to map PCMPI shared memory key %d", allocation->shmkey);
1469f0612e4SBarry Smith         addres[i] = allocation->addr;
1479f0612e4SBarry Smith         if (previous) previous->next = allocation;
1489f0612e4SBarry Smith         else allocations = allocation;
1499f0612e4SBarry Smith       }
1509f0612e4SBarry Smith     }
1519f0612e4SBarry Smith   }
1529f0612e4SBarry Smith #endif
1539f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
1549f0612e4SBarry Smith }
1559f0612e4SBarry Smith 
1569f0612e4SBarry Smith /*@C
1579f0612e4SBarry Smith   PetscShmgetUnmapAddresses - given shared addresses on a MPI process unlink it
1589f0612e4SBarry Smith 
1599f0612e4SBarry Smith   Input Parameters:
1609f0612e4SBarry Smith + n      - the number of addresses, each obtained on MPI process zero by `PetscShmgetAllocateArray()`
1619f0612e4SBarry Smith - addres - the addresses
1629f0612e4SBarry Smith 
1639f0612e4SBarry Smith   Level: developer
1649f0612e4SBarry Smith 
1659f0612e4SBarry Smith   Note:
1669f0612e4SBarry Smith   This routine does nothing if `PETSC_HAVE_SHMGET` is not defined
1679f0612e4SBarry Smith 
1689f0612e4SBarry Smith .seealso: `PetscShmgetDeallocateArray()`, `PetscShmgetAllocateArray()`, `PetscShmgetMapAddresses()`
1699f0612e4SBarry Smith @*/
1709f0612e4SBarry Smith PetscErrorCode PetscShmgetUnmapAddresses(PetscInt n, void **addres)
1719f0612e4SBarry Smith {
1729f0612e4SBarry Smith   PetscFunctionBegin;
1739f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
1749f0612e4SBarry Smith   if (PetscGlobalRank > 0) {
1759f0612e4SBarry Smith     for (PetscInt i = 0; i < n; i++) {
1769f0612e4SBarry Smith       PetscShmgetAllocation next = allocations, previous = NULL;
1779f0612e4SBarry Smith       PetscBool             found = PETSC_FALSE;
1789f0612e4SBarry Smith 
1799f0612e4SBarry Smith       while (next) {
1809f0612e4SBarry Smith         if (next->addr == addres[i]) {
1819f0612e4SBarry Smith           PetscCheck(!shmdt(next->addr), PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to shmdt() location %s", strerror(errno));
1829f0612e4SBarry Smith           if (previous) previous->next = next->next;
1839f0612e4SBarry Smith           else allocations = next->next;
1849f0612e4SBarry Smith           PetscCall(PetscFree(next));
1859f0612e4SBarry Smith           found = PETSC_TRUE;
1869f0612e4SBarry Smith           break;
1879f0612e4SBarry Smith         }
1889f0612e4SBarry Smith         previous = next;
1899f0612e4SBarry Smith         next     = next->next;
1909f0612e4SBarry Smith       }
1919f0612e4SBarry Smith       PetscCheck(found, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Unable to find address %p to unmap", addres[i]);
1929f0612e4SBarry Smith     }
1939f0612e4SBarry Smith   }
1949f0612e4SBarry Smith #endif
1959f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
1969f0612e4SBarry Smith }
1979f0612e4SBarry Smith 
1989f0612e4SBarry Smith /*@C
199*d7c1f440SPierre Jolivet   PetscShmgetAllocateArray - allocates shared memory accessible by all MPI processes in the server
2009f0612e4SBarry Smith 
2019f0612e4SBarry Smith   Not Collective, only called on the first MPI process
2029f0612e4SBarry Smith 
2039f0612e4SBarry Smith   Input Parameters:
2049f0612e4SBarry Smith + sz  - the number of elements in the array
2059f0612e4SBarry Smith - asz - the size of an entry in the array, for example `sizeof(PetscScalar)`
2069f0612e4SBarry Smith 
2079f0612e4SBarry Smith   Output Parameters:
2089f0612e4SBarry Smith . addr - the address of the array
2099f0612e4SBarry Smith 
2109f0612e4SBarry Smith   Level: developer
2119f0612e4SBarry Smith 
2129f0612e4SBarry Smith   Notes:
2139f0612e4SBarry Smith   Uses `PetscMalloc()` if `PETSC_HAVE_SHMGET` is not defined or the MPI linear solver server is not running
2149f0612e4SBarry Smith 
2159f0612e4SBarry Smith   Sometimes when a program crashes, shared memory IDs may remain, making it impossible to rerun the program.
2169f0612e4SBarry Smith   Use $PETSC_DIR/lib/petsc/bin/petscfreesharedmemory to free that memory
2179f0612e4SBarry Smith 
2189f0612e4SBarry Smith   Use the Unix command `ipcs -m` to see what memory IDs are currently allocated and `ipcrm -m ID` to remove a memory ID
2199f0612e4SBarry Smith 
2209f0612e4SBarry Smith   Use the Unix command `ipcrm --all` or `for i in $(ipcs -m | tail -$(expr $(ipcs -m | wc -l) - 3) | tr -s ' ' | cut -d" " -f3); do ipcrm -M $i; done`
2219f0612e4SBarry Smith   to delete all the currently allocated memory IDs.
2229f0612e4SBarry Smith 
2239f0612e4SBarry Smith   Under Apple macOS the following file must be copied to /Library/LaunchDaemons/sharedmemory.plist and the machine rebooted before using shared memory
2249f0612e4SBarry Smith .vb
2259f0612e4SBarry Smith <?xml version="1.0" encoding="UTF-8"?>
2269f0612e4SBarry Smith <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
2279f0612e4SBarry Smith <plist version="1.0">
2289f0612e4SBarry Smith <dict>
2299f0612e4SBarry Smith  <key>Label</key>
2309f0612e4SBarry Smith  <string>shmemsetup</string>
2319f0612e4SBarry Smith  <key>UserName</key>
2329f0612e4SBarry Smith  <string>root</string>
2339f0612e4SBarry Smith  <key>GroupName</key>
2349f0612e4SBarry Smith  <string>wheel</string>
2359f0612e4SBarry Smith  <key>ProgramArguments</key>
2369f0612e4SBarry Smith  <array>
2379f0612e4SBarry Smith  <string>/usr/sbin/sysctl</string>
2389f0612e4SBarry Smith  <string>-w</string>
2399f0612e4SBarry Smith  <string>kern.sysv.shmmax=4194304000</string>
2409f0612e4SBarry Smith  <string>kern.sysv.shmmni=2064</string>
2419f0612e4SBarry Smith  <string>kern.sysv.shmseg=2064</string>
2429f0612e4SBarry Smith  <string>kern.sysv.shmall=131072000</string>
2439f0612e4SBarry Smith   </array>
2449f0612e4SBarry Smith  <key>KeepAlive</key>
2459f0612e4SBarry Smith  <false/>
2469f0612e4SBarry Smith  <key>RunAtLoad</key>
2479f0612e4SBarry Smith  <true/>
2489f0612e4SBarry Smith </dict>
2499f0612e4SBarry Smith </plist>
2509f0612e4SBarry Smith .ve
2519f0612e4SBarry Smith 
2529f0612e4SBarry Smith   Fortran Note:
2539f0612e4SBarry Smith   The calling sequence is `PetscShmgetAllocateArray[Scalar,Int](PetscInt start, PetscInt len, Petsc[Scalar,Int], pointer :: d1(:), ierr)`
2549f0612e4SBarry Smith 
2559f0612e4SBarry Smith   Developer Note:
2569f0612e4SBarry Smith   More specifically this uses `PetscMalloc()` if `!PCMPIServerUseShmget` || `!PCMPIServerActive` || `PCMPIServerInSolve`
2579f0612e4SBarry Smith   where `PCMPIServerInSolve` indicates that the solve is nested inside a MPI linear solver server solve and hence should
2589f0612e4SBarry Smith   not allocate the vector and matrix memory in shared memory.
2599f0612e4SBarry Smith 
2609f0612e4SBarry Smith .seealso: [](sec_pcmpi), `PCMPIServerBegin()`, `PCMPI`, `KSPCheckPCMPI()`, `PetscShmgetDeallocateArray()`
2619f0612e4SBarry Smith @*/
2629f0612e4SBarry Smith PetscErrorCode PetscShmgetAllocateArray(size_t sz, size_t asz, void **addr)
2639f0612e4SBarry Smith {
2649f0612e4SBarry Smith   PetscFunctionBegin;
2659f0612e4SBarry Smith   if (!PCMPIServerUseShmget || !PCMPIServerActive || PCMPIServerInSolve) PetscCall(PetscMalloc(sz * asz, addr));
2669f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
2679f0612e4SBarry Smith   else {
2689f0612e4SBarry Smith     PetscShmgetAllocation allocation;
2699f0612e4SBarry Smith     static int            shmkeys = 10;
2709f0612e4SBarry Smith 
2719f0612e4SBarry Smith     PetscCall(PetscCalloc(sizeof(struct _PetscShmgetAllocation), &allocation));
2729f0612e4SBarry Smith     allocation->shmkey = shmkeys++;
2739f0612e4SBarry Smith     allocation->sz     = sz * asz;
2749f0612e4SBarry Smith     allocation->shmid  = shmget(allocation->shmkey, allocation->sz, 0666 | IPC_CREAT);
2759f0612e4SBarry Smith     PetscCheck(allocation->shmid != -1, PETSC_COMM_SELF, PETSC_ERR_LIB, "Unable to schmget() of size %d with key %d %s see PetscShmgetAllocateArray()", (int)allocation->sz, allocation->shmkey, strerror(errno));
276c8025a54SPierre Jolivet     allocation->addr = shmat(allocation->shmid, NULL, 0);
2779f0612e4SBarry Smith     PetscCheck(allocation->addr, PETSC_COMM_SELF, PETSC_ERR_LIB, "Unable to shmat() of shmid %d %s", (int)allocation->shmid, strerror(errno));
2789f0612e4SBarry Smith   #if PETSC_SIZEOF_VOID_P == 8
2799f0612e4SBarry Smith     PetscCheck((uint64_t)allocation->addr != 0xffffffffffffffff, PETSC_COMM_SELF, PETSC_ERR_LIB, "shmat() of shmid %d returned 0xffffffffffffffff %s", (int)allocation->shmid, strerror(errno));
2809f0612e4SBarry Smith   #endif
2819f0612e4SBarry Smith 
2829f0612e4SBarry Smith     if (!allocations) allocations = allocation;
2839f0612e4SBarry Smith     else {
2849f0612e4SBarry Smith       PetscShmgetAllocation next = allocations;
2859f0612e4SBarry Smith       while (next->next) next = next->next;
2869f0612e4SBarry Smith       next->next = allocation;
2879f0612e4SBarry Smith     }
2889f0612e4SBarry Smith     *addr = allocation->addr;
2899f0612e4SBarry Smith     PetscCall(PetscInfo(NULL, "Allocating PCMPI Server array %p shmkey %d shmid %d size %d\n", *addr, allocation->shmkey, allocation->shmid, (int)allocation->sz));
2909f0612e4SBarry Smith   }
2919f0612e4SBarry Smith #endif
2929f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
2939f0612e4SBarry Smith }
2949f0612e4SBarry Smith 
2959f0612e4SBarry Smith /*@C
296*d7c1f440SPierre Jolivet   PetscShmgetDeallocateArray - deallocates shared memory accessible by all MPI processes in the server
2979f0612e4SBarry Smith 
2989f0612e4SBarry Smith   Not Collective, only called on the first MPI process
2999f0612e4SBarry Smith 
3009f0612e4SBarry Smith   Input Parameter:
3019f0612e4SBarry Smith . addr - the address of array
3029f0612e4SBarry Smith 
3039f0612e4SBarry Smith   Level: developer
3049f0612e4SBarry Smith 
3059f0612e4SBarry Smith   Note:
3069f0612e4SBarry Smith   Uses `PetscFree()` if `PETSC_HAVE_SHMGET` is not defined or the MPI linear solver server is not running
3079f0612e4SBarry Smith 
3089f0612e4SBarry Smith   Fortran Note:
3099f0612e4SBarry Smith   The calling sequence is `PetscShmgetDeallocateArray[Scalar,Int](Petsc[Scalar,Int], pointer :: d1(:), ierr)`
3109f0612e4SBarry Smith 
3119f0612e4SBarry Smith .seealso: [](sec_pcmpi), `PCMPIServerBegin()`, `PCMPI`, `KSPCheckPCMPI()`, `PetscShmgetAllocateArray()`
3129f0612e4SBarry Smith @*/
3139f0612e4SBarry Smith PetscErrorCode PetscShmgetDeallocateArray(void **addr)
3149f0612e4SBarry Smith {
3159f0612e4SBarry Smith   PetscFunctionBegin;
3169f0612e4SBarry Smith   if (!*addr) PetscFunctionReturn(PETSC_SUCCESS);
3179f0612e4SBarry Smith   if (!PCMPIServerUseShmget || !PCMPIServerActive || PCMPIServerInSolve) PetscCall(PetscFree(*addr));
3189f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
3199f0612e4SBarry Smith   else {
3209f0612e4SBarry Smith     PetscShmgetAllocation next = allocations, previous = NULL;
3219f0612e4SBarry Smith 
3229f0612e4SBarry Smith     while (next) {
3239f0612e4SBarry Smith       if (next->addr == *addr) {
3249f0612e4SBarry Smith         PetscCall(PetscInfo(NULL, "Deallocating PCMPI Server array %p shmkey %d shmid %d size %d\n", *addr, next->shmkey, next->shmid, (int)next->sz));
3259f0612e4SBarry Smith         PetscCheck(!shmctl(next->shmid, IPC_RMID, NULL), PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to free shared memory addr %p key %d shmid %d %s", *addr, next->shmkey, next->shmid, strerror(errno));
3269f0612e4SBarry Smith         *addr = NULL;
3279f0612e4SBarry Smith         if (previous) previous->next = next->next;
3289f0612e4SBarry Smith         else allocations = next->next;
3299f0612e4SBarry Smith         PetscCall(PetscFree(next));
3309f0612e4SBarry Smith         PetscFunctionReturn(PETSC_SUCCESS);
3319f0612e4SBarry Smith       }
3329f0612e4SBarry Smith       previous = next;
3339f0612e4SBarry Smith       next     = next->next;
3349f0612e4SBarry Smith     }
3359f0612e4SBarry Smith     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Unable to locate PCMPI allocated shared memory address %p", *addr);
3369f0612e4SBarry Smith   }
3379f0612e4SBarry Smith #endif
3389f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
3399f0612e4SBarry Smith }
3409f0612e4SBarry Smith 
3419f0612e4SBarry Smith #if defined(PETSC_USE_FORTRAN_BINDINGS)
3429f0612e4SBarry Smith   #include <petsc/private/f90impl.h>
3439f0612e4SBarry Smith 
3449f0612e4SBarry Smith   #if defined(PETSC_HAVE_FORTRAN_CAPS)
3459f0612e4SBarry Smith     #define petscshmgetallocatearrayscalar_   PETSCSHMGETALLOCATEARRAYSCALAR
3469f0612e4SBarry Smith     #define petscshmgetdeallocatearrayscalar_ PETSCSHMGETDEALLOCATEARRAYSCALAR
3479f0612e4SBarry Smith     #define petscshmgetallocatearrayint_      PETSCSHMGETALLOCATEARRAYINT
3489f0612e4SBarry Smith     #define petscshmgetdeallocatearrayint_    PETSCSHMGETDEALLOCATEARRAYINT
3499f0612e4SBarry Smith   #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
3509f0612e4SBarry Smith     #define petscshmgetallocatearrayscalar_   petscshmgetallocatearrayscalar
3519f0612e4SBarry Smith     #define petscshmgetdeallocatearrayscalar_ petscshmgetdeallocatearrayscalar
3529f0612e4SBarry Smith     #define petscshmgetallocatearrayint_      petscshmgetallocatearrayint
3539f0612e4SBarry Smith     #define petscshmgetdeallocatearrayint_    petscshmgetdeallocatearrayint
3549f0612e4SBarry Smith   #endif
3559f0612e4SBarry Smith 
3569f0612e4SBarry Smith PETSC_EXTERN void petscshmgetallocatearrayscalar_(PetscInt *start, PetscInt *len, F90Array1d *a, PetscErrorCode *ierr PETSC_F90_2PTR_PROTO(ptrd))
3579f0612e4SBarry Smith {
3589f0612e4SBarry Smith   PetscScalar *aa;
3599f0612e4SBarry Smith 
3609f0612e4SBarry Smith   *ierr = PetscShmgetAllocateArray(*len, sizeof(PetscScalar), (void **)&aa);
3619f0612e4SBarry Smith   if (*ierr) return;
3629f0612e4SBarry Smith   *ierr = F90Array1dCreate(aa, MPIU_SCALAR, *start, *len, a PETSC_F90_2PTR_PARAM(ptrd));
3639f0612e4SBarry Smith }
3649f0612e4SBarry Smith 
3659f0612e4SBarry Smith PETSC_EXTERN void petscshmgetdeallocatearrayscalar_(F90Array1d *a, PetscErrorCode *ierr PETSC_F90_2PTR_PROTO(ptrd))
3669f0612e4SBarry Smith {
3679f0612e4SBarry Smith   PetscScalar *aa;
3689f0612e4SBarry Smith 
3699f0612e4SBarry Smith   *ierr = F90Array1dAccess(a, MPIU_SCALAR, (void **)&aa PETSC_F90_2PTR_PARAM(ptrd));
3709f0612e4SBarry Smith   if (*ierr) return;
3719f0612e4SBarry Smith   *ierr = PetscShmgetDeallocateArray((void **)&aa);
3729f0612e4SBarry Smith   if (*ierr) return;
3739f0612e4SBarry Smith   *ierr = F90Array1dDestroy(a, MPIU_SCALAR PETSC_F90_2PTR_PARAM(ptrd));
3749f0612e4SBarry Smith }
3759f0612e4SBarry Smith 
3769f0612e4SBarry Smith PETSC_EXTERN void petscshmgetallocatearrayint_(PetscInt *start, PetscInt *len, F90Array1d *a, PetscErrorCode *ierr PETSC_F90_2PTR_PROTO(ptrd))
3779f0612e4SBarry Smith {
3789f0612e4SBarry Smith   PetscScalar *aa;
3799f0612e4SBarry Smith 
3809f0612e4SBarry Smith   *ierr = PetscShmgetAllocateArray(*len, sizeof(PetscInt), (void **)&aa);
3819f0612e4SBarry Smith   if (*ierr) return;
3829f0612e4SBarry Smith   *ierr = F90Array1dCreate(aa, MPIU_INT, *start, *len, a PETSC_F90_2PTR_PARAM(ptrd));
3839f0612e4SBarry Smith }
3849f0612e4SBarry Smith 
3859f0612e4SBarry Smith PETSC_EXTERN void petscshmgetdeallocatearrayint_(F90Array1d *a, PetscErrorCode *ierr PETSC_F90_2PTR_PROTO(ptrd))
3869f0612e4SBarry Smith {
3879f0612e4SBarry Smith   PetscScalar *aa;
3889f0612e4SBarry Smith 
3899f0612e4SBarry Smith   *ierr = F90Array1dAccess(a, MPIU_INT, (void **)&aa PETSC_F90_2PTR_PARAM(ptrd));
3909f0612e4SBarry Smith   if (*ierr) return;
3919f0612e4SBarry Smith   *ierr = PetscShmgetDeallocateArray((void **)&aa);
3929f0612e4SBarry Smith   if (*ierr) return;
3939f0612e4SBarry Smith   *ierr = F90Array1dDestroy(a, MPIU_INT PETSC_F90_2PTR_PARAM(ptrd));
3949f0612e4SBarry Smith }
3959f0612e4SBarry Smith 
3969f0612e4SBarry Smith #endif
397