xref: /honee/include/petsc-ceed-utils.h (revision 0aab724992399e7bc3a3c0143aa36f671b31063e)
1 // SPDX-FileCopyrightText: Copyright (c) 2017-2024, HONEE contributors.
2 // SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause
3 #pragma once
4 
5 #include <ceed.h>
6 #include <petsc-ceed.h>
7 #include <petscdm.h>
8 
9 /**
10   @brief Copy the reference to a `Vec`.
11          Note: If `vec_copy` is non-null, it is assumed to be a valid pointer to a `Vec` and `VecDestroy()` will be called.
12 
13   Collective across MPI processes.
14 
15   @param[in]   vec       `Vec` to reference
16   @param[out]  vec_copy  Copy of reference
17 
18   @return An error code: 0 - success, otherwise - failure
19 **/
20 static inline PetscErrorCode VecReferenceCopy(Vec vec, Vec *vec_copy) {
21   PetscFunctionBeginUser;
22   PetscCall(PetscObjectReference((PetscObject)vec));
23   PetscCall(VecDestroy(vec_copy));
24   *vec_copy = vec;
25   PetscFunctionReturn(PETSC_SUCCESS);
26 }
27 
28 /**
29   @brief Copy the reference to a `DM`.
30          Note: If `dm_copy` is non-null, it is assumed to be a valid pointer to a `DM` and `DMDestroy()` will be called.
31 
32   Collective across MPI processes.
33 
34   @param[in]   dm       `DM` to reference
35   @param[out]  dm_copy  Copy of reference
36 
37   @return An error code: 0 - success, otherwise - failure
38 **/
39 static inline PetscErrorCode DMReferenceCopy(DM dm, DM *dm_copy) {
40   PetscFunctionBeginUser;
41   PetscCall(PetscObjectReference((PetscObject)dm));
42   PetscCall(DMDestroy(dm_copy));
43   *dm_copy = dm;
44   PetscFunctionReturn(PETSC_SUCCESS);
45 }
46 
47 /**
48   @brief Translate PetscMemType to CeedMemType
49 
50   @param[in]  mem_type  PetscMemType
51 
52   @return Equivalent CeedMemType
53 **/
54 static inline CeedMemType MemTypePetscToCeed(PetscMemType mem_type) { return PetscMemTypeDevice(mem_type) ? CEED_MEM_DEVICE : CEED_MEM_HOST; }
55 
56 /**
57   @brief Translate array of `PetscInt` to `CeedInt`.
58     If the types differ, `array_ceed` is freed with `free()` and `array_petsc` is allocated with `malloc()`.
59     Caller is responsible for freeing `array_ceed` with `PetscFree()`.
60 
61   Not collective across MPI processes.
62 
63   @param[in]      num_entries  Number of array entries
64   @param[in,out]  array_petsc  Array of `PetscInt`
65   @param[out]     array_ceed   Array of `CeedInt`
66 
67   @return An error code: 0 - success, otherwise - failure
68 **/
69 static inline PetscErrorCode IntArrayCeedToPetsc(PetscCount num_entries, CeedInt **array_ceed, PetscInt **array_petsc) {
70   const CeedInt  int_c = 0;
71   const PetscInt int_p = 0;
72 
73   PetscFunctionBeginUser;
74   if (sizeof(int_c) == sizeof(int_p)) {
75     *array_petsc = (PetscInt *)*array_ceed;
76   } else {
77     *array_petsc = malloc(num_entries * sizeof(PetscInt));
78     for (PetscInt i = 0; i < num_entries; i++) (*array_petsc)[i] = (*array_ceed)[i];
79     free(*array_ceed);
80   }
81   *array_ceed = NULL;
82   PetscFunctionReturn(PETSC_SUCCESS);
83 }
84 
85 /**
86   @brief Translate array of `PetscInt` to `CeedInt`.
87     If the types differ, `array_petsc` is freed with `PetscFree()` and `array_ceed` is allocated with `PetscMalloc1()`.
88     Caller is responsible for freeing `array_ceed` with `PetscFree()`.
89 
90   Not collective across MPI processes.
91 
92   @param[in]      num_entries  Number of array entries
93   @param[in,out]  array_petsc  Array of `PetscInt`
94   @param[out]     array_ceed   Array of `CeedInt`
95 
96   @return An error code: 0 - success, otherwise - failure
97 **/
98 static inline PetscErrorCode IntArrayPetscToCeed(PetscCount num_entries, PetscInt **array_petsc, CeedInt **array_ceed) {
99   const CeedInt  int_c = 0;
100   const PetscInt int_p = 0;
101 
102   PetscFunctionBeginUser;
103   if (sizeof(int_c) == sizeof(int_p)) {
104     *array_ceed = (CeedInt *)*array_petsc;
105   } else {
106     PetscCall(PetscMalloc1(num_entries, array_ceed));
107     for (PetscInt i = 0; i < num_entries; i++) (*array_ceed)[i] = (*array_petsc)[i];
108     PetscCall(PetscFree(*array_petsc));
109   }
110   *array_petsc = NULL;
111   PetscFunctionReturn(PETSC_SUCCESS);
112 }
113 
114 /**
115   @brief Transfer array from PETSc `Vec` to `CeedVector`.
116 
117   Collective across MPI processes.
118 
119   @param[in]   X_petsc   PETSc `Vec`
120   @param[out]  mem_type  PETSc `MemType`
121   @param[out]  x_ceed    `CeedVector`
122 
123   @return An error code: 0 - success, otherwise - failure
124 **/
125 static inline PetscErrorCode VecPetscToCeed(Vec X_petsc, PetscMemType *mem_type, CeedVector x_ceed) {
126   PetscScalar *x;
127 
128   PetscFunctionBeginUser;
129   PetscCall(VecGetArrayAndMemType(X_petsc, &x, mem_type));
130   PetscCallCeed(CeedVectorReturnCeed(x_ceed), CeedVectorSetArray(x_ceed, MemTypePetscToCeed(*mem_type), CEED_USE_POINTER, x));
131   PetscFunctionReturn(PETSC_SUCCESS);
132 }
133 
134 /**
135   @brief Transfer array from `CeedVector` to PETSc `Vec`.
136 
137   Collective across MPI processes.
138 
139   @param[in]   x_ceed    `CeedVector`
140   @param[in]   mem_type  PETSc `MemType`
141   @param[out]  X_petsc   PETSc `Vec`
142 
143   @return An error code: 0 - success, otherwise - failure
144 **/
145 static inline PetscErrorCode VecCeedToPetsc(CeedVector x_ceed, PetscMemType mem_type, Vec X_petsc) {
146   PetscScalar *x;
147 
148   PetscFunctionBeginUser;
149   PetscCallCeed(CeedVectorReturnCeed(x_ceed), CeedVectorTakeArray(x_ceed, MemTypePetscToCeed(mem_type), &x));
150   PetscCall(VecRestoreArrayAndMemType(X_petsc, &x));
151   PetscFunctionReturn(PETSC_SUCCESS);
152 }
153 
154 /**
155   @brief Transfer read only array from PETSc `Vec` to `CeedVector`.
156 
157   Collective across MPI processes.
158 
159   @param[in]   X_petsc   PETSc `Vec`
160   @param[out]  mem_type  PETSc `MemType`
161   @param[out]  x_ceed    `CeedVector`
162 
163   @return An error code: 0 - success, otherwise - failure
164 **/
165 static inline PetscErrorCode VecReadPetscToCeed(Vec X_petsc, PetscMemType *mem_type, CeedVector x_ceed) {
166   PetscScalar *x;
167 
168   PetscFunctionBeginUser;
169   PetscCall(VecGetArrayReadAndMemType(X_petsc, (const PetscScalar **)&x, mem_type));
170   PetscCallCeed(CeedVectorReturnCeed(x_ceed), CeedVectorSetArray(x_ceed, MemTypePetscToCeed(*mem_type), CEED_USE_POINTER, x));
171   PetscFunctionReturn(PETSC_SUCCESS);
172 }
173 
174 /**
175   @brief Transfer read only array from `CeedVector` to PETSc `Vec`.
176 
177   Collective across MPI processes.
178 
179   @param[in]   x_ceed    `CeedVector`
180   @param[in]   mem_type  PETSc `MemType`
181   @param[out]  X_petsc   PETSc `Vec`
182 
183   @return An error code: 0 - success, otherwise - failure
184 **/
185 static inline PetscErrorCode VecReadCeedToPetsc(CeedVector x_ceed, PetscMemType mem_type, Vec X_petsc) {
186   PetscScalar *x;
187 
188   PetscFunctionBeginUser;
189   PetscCallCeed(CeedVectorReturnCeed(x_ceed), CeedVectorTakeArray(x_ceed, MemTypePetscToCeed(mem_type), &x));
190   PetscCall(VecRestoreArrayReadAndMemType(X_petsc, (const PetscScalar **)&x));
191   PetscFunctionReturn(PETSC_SUCCESS);
192 }
193 
194 /**
195   @brief Copy PETSc `Vec` data into `CeedVector`
196 
197   @param[in]   X_petsc PETSc `Vec`
198   @param[out]  x_ceed  `CeedVector`
199 
200   @return An error code: 0 - success, otherwise - failure
201 **/
202 static inline PetscErrorCode VecCopyPetscToCeed(Vec X_petsc, CeedVector x_ceed) {
203   PetscScalar *x;
204   PetscMemType mem_type;
205   PetscInt     X_size;
206   CeedSize     x_size;
207   Ceed         ceed;
208 
209   PetscFunctionBeginUser;
210   PetscCall(CeedVectorGetCeed(x_ceed, &ceed));
211   PetscCall(VecGetLocalSize(X_petsc, &X_size));
212   PetscCallCeed(ceed, CeedVectorGetLength(x_ceed, &x_size));
213   PetscCheck(X_size == x_size, PETSC_COMM_WORLD, PETSC_ERR_ARG_SIZ, "X_petsc (%" PetscInt_FMT ") and x_ceed (%" CeedSize_FMT ") must be same size",
214              X_size, x_size);
215 
216   PetscCall(VecGetArrayReadAndMemType(X_petsc, (const PetscScalar **)&x, &mem_type));
217   PetscCallCeed(ceed, CeedVectorSetArray(x_ceed, MemTypePetscToCeed(mem_type), CEED_COPY_VALUES, x));
218   PetscCall(VecRestoreArrayReadAndMemType(X_petsc, (const PetscScalar **)&x));
219   PetscCheck(CeedDestroy(&ceed) == CEED_ERROR_SUCCESS, PetscObjectComm((PetscObject)X_petsc), PETSC_ERR_LIB, "Destroying Ceed object failed");
220   PetscFunctionReturn(PETSC_SUCCESS);
221 }
222 
223 /**
224   @brief Return the quadrature size from the eval mode, dimension, and number of components
225 
226   @param[in]  eval_mode       The basis evaluation mode
227   @param[in]  dim             The basis dimension
228   @param[in]  num_components  The basis number of components
229 
230   @return The maximum of the two integers
231 
232 **/
233 static inline CeedInt GetCeedQuadratureSize(CeedEvalMode eval_mode, CeedInt dim, CeedInt num_components) {
234   switch (eval_mode) {
235     case CEED_EVAL_INTERP:
236       return num_components;
237     case CEED_EVAL_GRAD:
238       return dim * num_components;
239     default:
240       return -1;
241   }
242 }
243 
244 /**
245   @brief Convert from DMPolytopeType to CeedElemTopology
246 
247   @param[in]  cell_type  DMPolytopeType for the cell
248 
249   @return CeedElemTopology, or 0 if no equivelent CeedElemTopology was found
250 **/
251 static inline CeedElemTopology PolytopeTypePetscToCeed(DMPolytopeType cell_type) {
252   switch (cell_type) {
253     case DM_POLYTOPE_TRIANGLE:
254       return CEED_TOPOLOGY_TRIANGLE;
255     case DM_POLYTOPE_QUADRILATERAL:
256       return CEED_TOPOLOGY_QUAD;
257     case DM_POLYTOPE_TETRAHEDRON:
258       return CEED_TOPOLOGY_TET;
259     case DM_POLYTOPE_HEXAHEDRON:
260       return CEED_TOPOLOGY_HEX;
261     default:
262       return 0;
263   }
264 }
265 
266 /**
267   @brief Get default `MatType` for a given `VecType`
268 
269   Not collective across MPI processes.
270 
271   @param[in]   vec_type  `VecType` to check
272   @param[out]  mat_type  Default `MatType` for `VecType`
273 
274   @return An error code: 0 - success, otherwise - failure
275 **/
276 static inline PetscErrorCode DefaultMatTypeFromVecType(VecType vec_type, MatType *mat_type) {
277   PetscFunctionBeginUser;
278   if (strstr(vec_type, VECCUDA)) *mat_type = MATAIJCUSPARSE;
279   else if (strstr(vec_type, VECHIP)) *mat_type = MATAIJHIPSPARSE;
280   else if (strstr(vec_type, VECKOKKOS)) *mat_type = MATAIJKOKKOS;
281   else *mat_type = MATAIJ;
282   PetscFunctionReturn(PETSC_SUCCESS);
283 }
284 
285 /**
286   @brief Get the default `PetscMemType` for a `Mat`
287 
288   Not collective across MPI processes.
289 
290   @param[in]   mat       `Mat` to get `PetscMemType` for
291   @param[out]  mem_type  Current default `PetscMemType` for @a mat
292 
293   @return An error code: 0 - success, otherwise - failure
294 **/
295 static inline PetscErrorCode MatGetCurrentMemType(Mat mat, PetscMemType *mem_type) {
296   PetscBool bound;
297 
298   PetscFunctionBeginUser;
299   *mem_type = PETSC_MEMTYPE_HOST;
300   PetscCall(MatBoundToCPU(mat, &bound));
301   if (!bound) {
302     VecType vec_type;
303 
304     PetscCall(MatGetVecType(mat, &vec_type));
305     if (strstr(vec_type, VECCUDA)) *mem_type = PETSC_MEMTYPE_CUDA;
306     else if (strstr(vec_type, VECHIP)) *mem_type = PETSC_MEMTYPE_HIP;
307     else if (strstr(vec_type, VECKOKKOS)) *mem_type = PETSC_MEMTYPE_KOKKOS;
308   }
309   PetscFunctionReturn(PETSC_SUCCESS);
310 }
311