xref: /petsc/src/sys/utils/server.c (revision 4e8208cbcbc709572b8abe32f33c78b69c819375)
19f0612e4SBarry Smith /*
29f0612e4SBarry Smith     Code for allocating Unix shared memory on MPI rank 0 and later accessing it from other MPI processes
39f0612e4SBarry Smith */
4ce78bad3SBarry Smith #include <petsc/private/petscimpl.h>
59f0612e4SBarry Smith #include <petscsys.h>
69f0612e4SBarry Smith 
79f0612e4SBarry Smith PetscBool PCMPIServerActive    = PETSC_FALSE; // PETSc is running in server mode
8927f4375SPierre Jolivet PetscBool PCMPIServerInSolve   = PETSC_FALSE; // A parallel server solve is occurring
99f0612e4SBarry Smith PetscBool PCMPIServerUseShmget = PETSC_TRUE;  // Use Unix shared memory for distributing objects
109f0612e4SBarry Smith 
119f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
129f0612e4SBarry Smith   #include <sys/shm.h>
139f0612e4SBarry Smith   #include <sys/mman.h>
149f0612e4SBarry Smith   #include <errno.h>
159f0612e4SBarry Smith 
169f0612e4SBarry Smith typedef struct _PetscShmgetAllocation *PetscShmgetAllocation;
179f0612e4SBarry Smith struct _PetscShmgetAllocation {
189f0612e4SBarry Smith   void                 *addr; // address on this process; points to same physical address on all processes
199f0612e4SBarry Smith   int                   shmkey, shmid;
209f0612e4SBarry Smith   size_t                sz;
219f0612e4SBarry Smith   PetscShmgetAllocation next;
229f0612e4SBarry Smith };
239f0612e4SBarry Smith static PetscShmgetAllocation allocations = NULL;
249f0612e4SBarry Smith 
259f0612e4SBarry Smith typedef struct {
269f0612e4SBarry Smith   size_t shmkey[3];
279f0612e4SBarry Smith   size_t sz[3];
289f0612e4SBarry Smith } BcastInfo;
299f0612e4SBarry Smith 
309f0612e4SBarry Smith #endif
319f0612e4SBarry Smith 
329f0612e4SBarry Smith /*@C
339f0612e4SBarry Smith   PetscShmgetAddressesFinalize - frees any shared memory that was allocated by `PetscShmgetAllocateArray()` but
349f0612e4SBarry Smith   not deallocated with `PetscShmgetDeallocateArray()`
359f0612e4SBarry Smith 
369f0612e4SBarry Smith   Level: developer
379f0612e4SBarry Smith 
389f0612e4SBarry Smith   Notes:
399f0612e4SBarry Smith   This prevents any shared memory allocated, but not deallocated, from remaining on the system and preventing
409f0612e4SBarry Smith   its future use.
419f0612e4SBarry Smith 
429f0612e4SBarry Smith   If the program crashes outstanding shared memory allocations may remain.
439f0612e4SBarry Smith 
449f0612e4SBarry Smith .seealso: `PetscShmgetAllocateArray()`, `PetscShmgetDeallocateArray()`, `PetscShmgetUnmapAddresses()`
459f0612e4SBarry Smith @*/
PetscShmgetAddressesFinalize(void)469f0612e4SBarry Smith PetscErrorCode PetscShmgetAddressesFinalize(void)
479f0612e4SBarry Smith {
489f0612e4SBarry Smith   PetscFunctionBegin;
499f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
509f0612e4SBarry Smith   PetscShmgetAllocation next = allocations, previous = NULL;
519f0612e4SBarry Smith 
529f0612e4SBarry Smith   while (next) {
537255af2bSBarry Smith     PetscCheck(!shmctl(next->shmid, IPC_RMID, NULL), PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to free shared memory key %d shmid %d %s, see PCMPIServerBegin()", next->shmkey, next->shmid, strerror(errno));
549f0612e4SBarry Smith     previous = next;
559f0612e4SBarry Smith     next     = next->next;
569f0612e4SBarry Smith     PetscCall(PetscFree(previous));
579f0612e4SBarry Smith   }
589f0612e4SBarry Smith #endif
599f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
609f0612e4SBarry Smith }
619f0612e4SBarry Smith 
629f0612e4SBarry Smith /* takes a void so can work bsan safe with PetscObjectContainerCompose() */
PCMPIServerAddressesDestroy(PetscCtxRt ctx)63*2a8381b2SBarry Smith PetscErrorCode PCMPIServerAddressesDestroy(PetscCtxRt ctx)
649f0612e4SBarry Smith {
65*2a8381b2SBarry Smith   PCMPIServerAddresses *addresses = *(PCMPIServerAddresses **)ctx;
669f0612e4SBarry Smith 
679f0612e4SBarry Smith   PetscFunctionBegin;
689f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
699f0612e4SBarry Smith   PetscCall(PetscShmgetUnmapAddresses(addresses->n, addresses->addr));
709f0612e4SBarry Smith   PetscCall(PetscFree(addresses));
71db532a06SPierre Jolivet #else
72db532a06SPierre Jolivet   (void)addresses;
739f0612e4SBarry Smith #endif
749f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
759f0612e4SBarry Smith }
769f0612e4SBarry Smith 
779f0612e4SBarry Smith /*@C
789f0612e4SBarry Smith   PetscShmgetMapAddresses - given shared address on the first MPI process determines the
799f0612e4SBarry Smith   addresses on the other MPI processes that map to the same physical memory
809f0612e4SBarry Smith 
819f0612e4SBarry Smith   Input Parameters:
829f0612e4SBarry Smith + comm       - the `MPI_Comm` to scatter the address
839f0612e4SBarry Smith . n          - the number of addresses, each obtained on MPI process zero by `PetscShmgetAllocateArray()`
849f0612e4SBarry Smith - baseaddres - the addresses on the first MPI process, ignored on all but first process
859f0612e4SBarry Smith 
869f0612e4SBarry Smith   Output Parameter:
879f0612e4SBarry Smith . addres - the addresses on each MPI process, the array of void * must already be allocated
889f0612e4SBarry Smith 
899f0612e4SBarry Smith   Level: developer
909f0612e4SBarry Smith 
919f0612e4SBarry Smith   Note:
929f0612e4SBarry Smith   This routine does nothing if `PETSC_HAVE_SHMGET` is not defined
939f0612e4SBarry Smith 
949f0612e4SBarry Smith .seealso: `PetscShmgetDeallocateArray()`, `PetscShmgetAllocateArray()`, `PetscShmgetUnmapAddresses()`
959f0612e4SBarry Smith @*/
PetscShmgetMapAddresses(MPI_Comm comm,PetscInt n,const void ** baseaddres,void ** addres)969f0612e4SBarry Smith PetscErrorCode PetscShmgetMapAddresses(MPI_Comm comm, PetscInt n, const void **baseaddres, void **addres)
979f0612e4SBarry Smith {
989f0612e4SBarry Smith   PetscFunctionBegin;
999f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
1009f0612e4SBarry Smith   if (PetscGlobalRank == 0) {
1019f0612e4SBarry Smith     BcastInfo bcastinfo = {
1029f0612e4SBarry Smith       {0, 0, 0},
1039f0612e4SBarry Smith       {0, 0, 0}
1049f0612e4SBarry Smith     };
1059f0612e4SBarry Smith     for (PetscInt i = 0; i < n; i++) {
1069f0612e4SBarry Smith       PetscShmgetAllocation allocation = allocations;
1079f0612e4SBarry Smith 
1089f0612e4SBarry Smith       while (allocation) {
1099f0612e4SBarry Smith         if (allocation->addr == baseaddres[i]) {
1109f0612e4SBarry Smith           bcastinfo.shmkey[i] = allocation->shmkey;
1119f0612e4SBarry Smith           bcastinfo.sz[i]     = allocation->sz;
1129f0612e4SBarry Smith           addres[i]           = (void *)baseaddres[i];
1139f0612e4SBarry Smith           break;
1149f0612e4SBarry Smith         }
1159f0612e4SBarry Smith         allocation = allocation->next;
1169f0612e4SBarry Smith       }
1177255af2bSBarry Smith       PetscCheck(allocation, comm, PETSC_ERR_PLIB, "Unable to locate PCMPI allocated shared address %p, see PCMPIServerBegin()", baseaddres[i]);
1189f0612e4SBarry Smith     }
1199f0612e4SBarry Smith     PetscCall(PetscInfo(NULL, "Mapping PCMPI Server array %p\n", addres[0]));
1209f0612e4SBarry Smith     PetscCallMPI(MPI_Bcast(&bcastinfo, 6, MPIU_SIZE_T, 0, comm));
1219f0612e4SBarry Smith   } else {
1229f0612e4SBarry Smith     BcastInfo bcastinfo = {
1239f0612e4SBarry Smith       {0, 0, 0},
1249f0612e4SBarry Smith       {0, 0, 0}
1259f0612e4SBarry Smith     };
1269f0612e4SBarry Smith     int    shmkey = 0;
1279f0612e4SBarry Smith     size_t sz     = 0;
1289f0612e4SBarry Smith 
1299f0612e4SBarry Smith     PetscCallMPI(MPI_Bcast(&bcastinfo, 6, MPIU_SIZE_T, 0, comm));
1309f0612e4SBarry Smith     for (PetscInt i = 0; i < n; i++) {
1319f0612e4SBarry Smith       PetscShmgetAllocation next = allocations, previous = NULL;
1329f0612e4SBarry Smith 
1339f0612e4SBarry Smith       shmkey = (int)bcastinfo.shmkey[i];
1349f0612e4SBarry Smith       sz     = bcastinfo.sz[i];
1359f0612e4SBarry Smith       while (next) {
136835f2295SStefano Zampini         if (next->shmkey == shmkey) addres[i] = next->addr;
1379f0612e4SBarry Smith         previous = next;
1389f0612e4SBarry Smith         next     = next->next;
1399f0612e4SBarry Smith       }
1409f0612e4SBarry Smith       if (!next) {
1419f0612e4SBarry Smith         PetscShmgetAllocation allocation;
1429f0612e4SBarry Smith         PetscCall(PetscCalloc(sizeof(struct _PetscShmgetAllocation), &allocation));
1439f0612e4SBarry Smith         allocation->shmkey = shmkey;
1449f0612e4SBarry Smith         allocation->sz     = sz;
1459f0612e4SBarry Smith         allocation->shmid  = shmget(allocation->shmkey, allocation->sz, 0666);
1467255af2bSBarry Smith         PetscCheck(allocation->shmid != -1, PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to map PCMPI shared memory key %d of size %d, see PCMPIServerBegin()", allocation->shmkey, (int)allocation->sz);
147c8025a54SPierre Jolivet         allocation->addr = shmat(allocation->shmid, NULL, 0);
1487255af2bSBarry Smith         PetscCheck(allocation->addr, PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to map PCMPI shared memory key %d, see PCMPIServerBegin()", allocation->shmkey);
1499f0612e4SBarry Smith         addres[i] = allocation->addr;
1509f0612e4SBarry Smith         if (previous) previous->next = allocation;
1519f0612e4SBarry Smith         else allocations = allocation;
1529f0612e4SBarry Smith       }
1539f0612e4SBarry Smith     }
1549f0612e4SBarry Smith   }
1559f0612e4SBarry Smith #endif
1569f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
1579f0612e4SBarry Smith }
1589f0612e4SBarry Smith 
1599f0612e4SBarry Smith /*@C
1609f0612e4SBarry Smith   PetscShmgetUnmapAddresses - given shared addresses on a MPI process unlink it
1619f0612e4SBarry Smith 
1629f0612e4SBarry Smith   Input Parameters:
1639f0612e4SBarry Smith + n      - the number of addresses, each obtained on MPI process zero by `PetscShmgetAllocateArray()`
1649f0612e4SBarry Smith - addres - the addresses
1659f0612e4SBarry Smith 
1669f0612e4SBarry Smith   Level: developer
1679f0612e4SBarry Smith 
1689f0612e4SBarry Smith   Note:
1699f0612e4SBarry Smith   This routine does nothing if `PETSC_HAVE_SHMGET` is not defined
1709f0612e4SBarry Smith 
1719f0612e4SBarry Smith .seealso: `PetscShmgetDeallocateArray()`, `PetscShmgetAllocateArray()`, `PetscShmgetMapAddresses()`
1729f0612e4SBarry Smith @*/
PetscShmgetUnmapAddresses(PetscInt n,void ** addres)173ce78bad3SBarry Smith PetscErrorCode PetscShmgetUnmapAddresses(PetscInt n, void **addres) PeNS
1749f0612e4SBarry Smith {
1759f0612e4SBarry Smith   PetscFunctionBegin;
1769f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
1779f0612e4SBarry Smith   if (PetscGlobalRank > 0) {
1789f0612e4SBarry Smith     for (PetscInt i = 0; i < n; i++) {
1799f0612e4SBarry Smith       PetscShmgetAllocation next = allocations, previous = NULL;
1809f0612e4SBarry Smith       PetscBool             found = PETSC_FALSE;
1819f0612e4SBarry Smith 
1829f0612e4SBarry Smith       while (next) {
1839f0612e4SBarry Smith         if (next->addr == addres[i]) {
1847255af2bSBarry Smith           PetscCheck(!shmdt(next->addr), PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to shmdt() location %s, see PCMPIServerBegin()", strerror(errno));
1859f0612e4SBarry Smith           if (previous) previous->next = next->next;
1869f0612e4SBarry Smith           else allocations = next->next;
1879f0612e4SBarry Smith           PetscCall(PetscFree(next));
1889f0612e4SBarry Smith           found = PETSC_TRUE;
1899f0612e4SBarry Smith           break;
1909f0612e4SBarry Smith         }
1919f0612e4SBarry Smith         previous = next;
1929f0612e4SBarry Smith         next     = next->next;
1939f0612e4SBarry Smith       }
1947255af2bSBarry Smith       PetscCheck(found, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Unable to find address %p to unmap, see PCMPIServerBegin()", addres[i]);
1959f0612e4SBarry Smith     }
1969f0612e4SBarry Smith   }
1979f0612e4SBarry Smith #endif
1989f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
1999f0612e4SBarry Smith }
2009f0612e4SBarry Smith 
2019f0612e4SBarry Smith /*@C
202d7c1f440SPierre Jolivet   PetscShmgetAllocateArray - allocates shared memory accessible by all MPI processes in the server
2039f0612e4SBarry Smith 
2049f0612e4SBarry Smith   Not Collective, only called on the first MPI process
2059f0612e4SBarry Smith 
2069f0612e4SBarry Smith   Input Parameters:
2079f0612e4SBarry Smith + sz  - the number of elements in the array
2089f0612e4SBarry Smith - asz - the size of an entry in the array, for example `sizeof(PetscScalar)`
2099f0612e4SBarry Smith 
2109f0612e4SBarry Smith   Output Parameters:
2119f0612e4SBarry Smith . addr - the address of the array
2129f0612e4SBarry Smith 
2139f0612e4SBarry Smith   Level: developer
2149f0612e4SBarry Smith 
2159f0612e4SBarry Smith   Notes:
2169f0612e4SBarry Smith   Uses `PetscMalloc()` if `PETSC_HAVE_SHMGET` is not defined or the MPI linear solver server is not running
2179f0612e4SBarry Smith 
2189f0612e4SBarry Smith   Sometimes when a program crashes, shared memory IDs may remain, making it impossible to rerun the program.
2197255af2bSBarry Smith   Use
2207255af2bSBarry Smith .vb
221873ad108SBarry Smith   $PETSC_DIR/lib/petsc/bin/petscfreesharedmemory.sh
2227255af2bSBarry Smith .ve to free that memory. The Linux command `ipcrm --all` or macOS command `for i in $(ipcs -m | tail -$(expr $(ipcs -m | wc -l) - 3) | tr -s ' ' | cut -d" " -f3); do ipcrm -M $i; done`
2237255af2bSBarry Smith   will also free the memory.
2249f0612e4SBarry Smith 
2259f0612e4SBarry Smith   Use the Unix command `ipcs -m` to see what memory IDs are currently allocated and `ipcrm -m ID` to remove a memory ID
2269f0612e4SBarry Smith 
2277255af2bSBarry Smith   Under Apple macOS the following file must be copied to /Library/LaunchDaemons/sharedmemory.plist (ensure this file is owned by root and not the user)
2287255af2bSBarry Smith   and the machine rebooted before using shared memory
2299f0612e4SBarry Smith .vb
2309f0612e4SBarry Smith <?xml version="1.0" encoding="UTF-8"?>
2319f0612e4SBarry Smith <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
2329f0612e4SBarry Smith <plist version="1.0">
2339f0612e4SBarry Smith <dict>
2349f0612e4SBarry Smith  <key>Label</key>
2359f0612e4SBarry Smith  <string>shmemsetup</string>
2369f0612e4SBarry Smith  <key>UserName</key>
2379f0612e4SBarry Smith  <string>root</string>
2389f0612e4SBarry Smith  <key>GroupName</key>
2399f0612e4SBarry Smith  <string>wheel</string>
2409f0612e4SBarry Smith  <key>ProgramArguments</key>
2419f0612e4SBarry Smith  <array>
2429f0612e4SBarry Smith  <string>/usr/sbin/sysctl</string>
2439f0612e4SBarry Smith  <string>-w</string>
2449f0612e4SBarry Smith  <string>kern.sysv.shmmax=4194304000</string>
2459f0612e4SBarry Smith  <string>kern.sysv.shmmni=2064</string>
2469f0612e4SBarry Smith  <string>kern.sysv.shmseg=2064</string>
2479f0612e4SBarry Smith  <string>kern.sysv.shmall=131072000</string>
2489f0612e4SBarry Smith   </array>
2499f0612e4SBarry Smith  <key>KeepAlive</key>
2509f0612e4SBarry Smith  <false/>
2519f0612e4SBarry Smith  <key>RunAtLoad</key>
2529f0612e4SBarry Smith  <true/>
2539f0612e4SBarry Smith </dict>
2549f0612e4SBarry Smith </plist>
2559f0612e4SBarry Smith .ve
2569f0612e4SBarry Smith 
2577255af2bSBarry Smith   Use the command
2587255af2bSBarry Smith .vb
2597255af2bSBarry Smith   /usr/sbin/sysctl -a | grep shm
2607255af2bSBarry Smith .ve
2617255af2bSBarry Smith   to confirm that the shared memory limits you have requested are available.
2627255af2bSBarry Smith 
2639f0612e4SBarry Smith   Fortran Note:
2649f0612e4SBarry Smith   The calling sequence is `PetscShmgetAllocateArray[Scalar,Int](PetscInt start, PetscInt len, Petsc[Scalar,Int], pointer :: d1(:), ierr)`
2659f0612e4SBarry Smith 
2669f0612e4SBarry Smith   Developer Note:
2679f0612e4SBarry Smith   More specifically this uses `PetscMalloc()` if `!PCMPIServerUseShmget` || `!PCMPIServerActive` || `PCMPIServerInSolve`
2689f0612e4SBarry Smith   where `PCMPIServerInSolve` indicates that the solve is nested inside a MPI linear solver server solve and hence should
2699f0612e4SBarry Smith   not allocate the vector and matrix memory in shared memory.
2709f0612e4SBarry Smith 
2719f0612e4SBarry Smith .seealso: [](sec_pcmpi), `PCMPIServerBegin()`, `PCMPI`, `KSPCheckPCMPI()`, `PetscShmgetDeallocateArray()`
2729f0612e4SBarry Smith @*/
PetscShmgetAllocateArray(size_t sz,size_t asz,void * addr[])273ce78bad3SBarry Smith PetscErrorCode PetscShmgetAllocateArray(size_t sz, size_t asz, void *addr[])
2749f0612e4SBarry Smith {
2759f0612e4SBarry Smith   PetscFunctionBegin;
2769f0612e4SBarry Smith   if (!PCMPIServerUseShmget || !PCMPIServerActive || PCMPIServerInSolve) PetscCall(PetscMalloc(sz * asz, addr));
2779f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
2789f0612e4SBarry Smith   else {
2799f0612e4SBarry Smith     PetscShmgetAllocation allocation;
2809f0612e4SBarry Smith     static int            shmkeys = 10;
2819f0612e4SBarry Smith 
2829f0612e4SBarry Smith     PetscCall(PetscCalloc(sizeof(struct _PetscShmgetAllocation), &allocation));
2839f0612e4SBarry Smith     allocation->shmkey = shmkeys++;
2849f0612e4SBarry Smith     allocation->sz     = sz * asz;
2859f0612e4SBarry Smith     allocation->shmid  = shmget(allocation->shmkey, allocation->sz, 0666 | IPC_CREAT);
2869f0612e4SBarry Smith     PetscCheck(allocation->shmid != -1, PETSC_COMM_SELF, PETSC_ERR_LIB, "Unable to schmget() of size %d with key %d %s see PetscShmgetAllocateArray()", (int)allocation->sz, allocation->shmkey, strerror(errno));
287c8025a54SPierre Jolivet     allocation->addr = shmat(allocation->shmid, NULL, 0);
288835f2295SStefano Zampini     PetscCheck(allocation->addr, PETSC_COMM_SELF, PETSC_ERR_LIB, "Unable to shmat() of shmid %d %s", allocation->shmid, strerror(errno));
2899f0612e4SBarry Smith   #if PETSC_SIZEOF_VOID_P == 8
2907a533827SSatish Balay     PetscCheck((uint64_t)allocation->addr != 0xffffffffffffffff, PETSC_COMM_SELF, PETSC_ERR_LIB, "shmat() of shmid %d returned 0xffffffffffffffff %s, see PCMPIServerBegin()", allocation->shmid, strerror(errno));
2919f0612e4SBarry Smith   #endif
2929f0612e4SBarry Smith 
2939f0612e4SBarry Smith     if (!allocations) allocations = allocation;
2949f0612e4SBarry Smith     else {
2959f0612e4SBarry Smith       PetscShmgetAllocation next = allocations;
2969f0612e4SBarry Smith       while (next->next) next = next->next;
2979f0612e4SBarry Smith       next->next = allocation;
2989f0612e4SBarry Smith     }
2999f0612e4SBarry Smith     *addr = allocation->addr;
3009f0612e4SBarry Smith     PetscCall(PetscInfo(NULL, "Allocating PCMPI Server array %p shmkey %d shmid %d size %d\n", *addr, allocation->shmkey, allocation->shmid, (int)allocation->sz));
3019f0612e4SBarry Smith   }
3029f0612e4SBarry Smith #endif
3039f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
3049f0612e4SBarry Smith }
3059f0612e4SBarry Smith 
3069f0612e4SBarry Smith /*@C
307d7c1f440SPierre Jolivet   PetscShmgetDeallocateArray - deallocates shared memory accessible by all MPI processes in the server
3089f0612e4SBarry Smith 
3099f0612e4SBarry Smith   Not Collective, only called on the first MPI process
3109f0612e4SBarry Smith 
3119f0612e4SBarry Smith   Input Parameter:
3129f0612e4SBarry Smith . addr - the address of array
3139f0612e4SBarry Smith 
3149f0612e4SBarry Smith   Level: developer
3159f0612e4SBarry Smith 
3169f0612e4SBarry Smith   Note:
3179f0612e4SBarry Smith   Uses `PetscFree()` if `PETSC_HAVE_SHMGET` is not defined or the MPI linear solver server is not running
3189f0612e4SBarry Smith 
3199f0612e4SBarry Smith   Fortran Note:
3209f0612e4SBarry Smith   The calling sequence is `PetscShmgetDeallocateArray[Scalar,Int](Petsc[Scalar,Int], pointer :: d1(:), ierr)`
3219f0612e4SBarry Smith 
3229f0612e4SBarry Smith .seealso: [](sec_pcmpi), `PCMPIServerBegin()`, `PCMPI`, `KSPCheckPCMPI()`, `PetscShmgetAllocateArray()`
3239f0612e4SBarry Smith @*/
PetscShmgetDeallocateArray(void * addr[])324ce78bad3SBarry Smith PetscErrorCode PetscShmgetDeallocateArray(void *addr[])
3259f0612e4SBarry Smith {
3269f0612e4SBarry Smith   PetscFunctionBegin;
3279f0612e4SBarry Smith   if (!*addr) PetscFunctionReturn(PETSC_SUCCESS);
3289f0612e4SBarry Smith   if (!PCMPIServerUseShmget || !PCMPIServerActive || PCMPIServerInSolve) PetscCall(PetscFree(*addr));
3299f0612e4SBarry Smith #if defined(PETSC_HAVE_SHMGET)
3309f0612e4SBarry Smith   else {
3319f0612e4SBarry Smith     PetscShmgetAllocation next = allocations, previous = NULL;
3329f0612e4SBarry Smith 
3339f0612e4SBarry Smith     while (next) {
3349f0612e4SBarry Smith       if (next->addr == *addr) {
3359f0612e4SBarry Smith         PetscCall(PetscInfo(NULL, "Deallocating PCMPI Server array %p shmkey %d shmid %d size %d\n", *addr, next->shmkey, next->shmid, (int)next->sz));
3367255af2bSBarry Smith         PetscCheck(!shmdt(next->addr), PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to shmdt() location %s, see PCMPIServerBegin()", strerror(errno));
3377255af2bSBarry Smith         PetscCheck(!shmctl(next->shmid, IPC_RMID, NULL), PETSC_COMM_SELF, PETSC_ERR_SYS, "Unable to free shared memory addr %p key %d shmid %d %s, see PCMPIServerBegin()", *addr, next->shmkey, next->shmid, strerror(errno));
3389f0612e4SBarry Smith         *addr = NULL;
3399f0612e4SBarry Smith         if (previous) previous->next = next->next;
3409f0612e4SBarry Smith         else allocations = next->next;
3419f0612e4SBarry Smith         PetscCall(PetscFree(next));
3429f0612e4SBarry Smith         PetscFunctionReturn(PETSC_SUCCESS);
3439f0612e4SBarry Smith       }
3449f0612e4SBarry Smith       previous = next;
3459f0612e4SBarry Smith       next     = next->next;
3469f0612e4SBarry Smith     }
3479f0612e4SBarry Smith     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Unable to locate PCMPI allocated shared memory address %p", *addr);
3489f0612e4SBarry Smith   }
3499f0612e4SBarry Smith #endif
3509f0612e4SBarry Smith   PetscFunctionReturn(PETSC_SUCCESS);
3519f0612e4SBarry Smith }
3529f0612e4SBarry Smith 
3539f0612e4SBarry Smith #if defined(PETSC_USE_FORTRAN_BINDINGS)
3546dd63270SBarry Smith   #include <petsc/private/ftnimpl.h>
3559f0612e4SBarry Smith 
3569f0612e4SBarry Smith   #if defined(PETSC_HAVE_FORTRAN_CAPS)
3579f0612e4SBarry Smith     #define petscshmgetallocatearrayscalar_   PETSCSHMGETALLOCATEARRAYSCALAR
3589f0612e4SBarry Smith     #define petscshmgetdeallocatearrayscalar_ PETSCSHMGETDEALLOCATEARRAYSCALAR
3599f0612e4SBarry Smith     #define petscshmgetallocatearrayint_      PETSCSHMGETALLOCATEARRAYINT
3609f0612e4SBarry Smith     #define petscshmgetdeallocatearrayint_    PETSCSHMGETDEALLOCATEARRAYINT
3619f0612e4SBarry Smith   #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
3629f0612e4SBarry Smith     #define petscshmgetallocatearrayscalar_   petscshmgetallocatearrayscalar
3639f0612e4SBarry Smith     #define petscshmgetdeallocatearrayscalar_ petscshmgetdeallocatearrayscalar
3649f0612e4SBarry Smith     #define petscshmgetallocatearrayint_      petscshmgetallocatearrayint
3659f0612e4SBarry Smith     #define petscshmgetdeallocatearrayint_    petscshmgetdeallocatearrayint
3669f0612e4SBarry Smith   #endif
3679f0612e4SBarry Smith 
petscshmgetallocatearrayscalar_(PetscInt * start,PetscInt * len,F90Array1d * a,PetscErrorCode * ierr PETSC_F90_2PTR_PROTO (ptrd))3689f0612e4SBarry Smith PETSC_EXTERN void petscshmgetallocatearrayscalar_(PetscInt *start, PetscInt *len, F90Array1d *a, PetscErrorCode *ierr PETSC_F90_2PTR_PROTO(ptrd))
3699f0612e4SBarry Smith {
3709f0612e4SBarry Smith   PetscScalar *aa;
3719f0612e4SBarry Smith 
3729f0612e4SBarry Smith   *ierr = PetscShmgetAllocateArray(*len, sizeof(PetscScalar), (void **)&aa);
3739f0612e4SBarry Smith   if (*ierr) return;
3749f0612e4SBarry Smith   *ierr = F90Array1dCreate(aa, MPIU_SCALAR, *start, *len, a PETSC_F90_2PTR_PARAM(ptrd));
3759f0612e4SBarry Smith }
3769f0612e4SBarry Smith 
petscshmgetdeallocatearrayscalar_(F90Array1d * a,PetscErrorCode * ierr PETSC_F90_2PTR_PROTO (ptrd))3779f0612e4SBarry Smith PETSC_EXTERN void petscshmgetdeallocatearrayscalar_(F90Array1d *a, PetscErrorCode *ierr PETSC_F90_2PTR_PROTO(ptrd))
3789f0612e4SBarry Smith {
3799f0612e4SBarry Smith   PetscScalar *aa;
3809f0612e4SBarry Smith 
3819f0612e4SBarry Smith   *ierr = F90Array1dAccess(a, MPIU_SCALAR, (void **)&aa PETSC_F90_2PTR_PARAM(ptrd));
3829f0612e4SBarry Smith   if (*ierr) return;
3839f0612e4SBarry Smith   *ierr = PetscShmgetDeallocateArray((void **)&aa);
3849f0612e4SBarry Smith   if (*ierr) return;
3859f0612e4SBarry Smith   *ierr = F90Array1dDestroy(a, MPIU_SCALAR PETSC_F90_2PTR_PARAM(ptrd));
3869f0612e4SBarry Smith }
3879f0612e4SBarry Smith 
petscshmgetallocatearrayint_(PetscInt * start,PetscInt * len,F90Array1d * a,PetscErrorCode * ierr PETSC_F90_2PTR_PROTO (ptrd))3889f0612e4SBarry Smith PETSC_EXTERN void petscshmgetallocatearrayint_(PetscInt *start, PetscInt *len, F90Array1d *a, PetscErrorCode *ierr PETSC_F90_2PTR_PROTO(ptrd))
3899f0612e4SBarry Smith {
390ce78bad3SBarry Smith   PetscInt *aa;
3919f0612e4SBarry Smith 
3929f0612e4SBarry Smith   *ierr = PetscShmgetAllocateArray(*len, sizeof(PetscInt), (void **)&aa);
3939f0612e4SBarry Smith   if (*ierr) return;
3949f0612e4SBarry Smith   *ierr = F90Array1dCreate(aa, MPIU_INT, *start, *len, a PETSC_F90_2PTR_PARAM(ptrd));
3959f0612e4SBarry Smith }
3969f0612e4SBarry Smith 
petscshmgetdeallocatearrayint_(F90Array1d * a,PetscErrorCode * ierr PETSC_F90_2PTR_PROTO (ptrd))3979f0612e4SBarry Smith PETSC_EXTERN void petscshmgetdeallocatearrayint_(F90Array1d *a, PetscErrorCode *ierr PETSC_F90_2PTR_PROTO(ptrd))
3989f0612e4SBarry Smith {
399ce78bad3SBarry Smith   PetscInt *aa;
4009f0612e4SBarry Smith 
4019f0612e4SBarry Smith   *ierr = F90Array1dAccess(a, MPIU_INT, (void **)&aa PETSC_F90_2PTR_PARAM(ptrd));
4029f0612e4SBarry Smith   if (*ierr) return;
4039f0612e4SBarry Smith   *ierr = PetscShmgetDeallocateArray((void **)&aa);
4049f0612e4SBarry Smith   if (*ierr) return;
4059f0612e4SBarry Smith   *ierr = F90Array1dDestroy(a, MPIU_INT PETSC_F90_2PTR_PARAM(ptrd));
4069f0612e4SBarry Smith }
4079f0612e4SBarry Smith 
4089f0612e4SBarry Smith #endif
409