xref: /libCEED/backends/cuda-ref/ceed-cuda-ref-restriction.c (revision e03fef56705b317edc4a39dfee40c8366660a6d6)
1 // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 #include <ceed.h>
9 #include <ceed/backend.h>
10 #include <ceed/jit-tools.h>
11 #include <cuda.h>
12 #include <cuda_runtime.h>
13 #include <stdbool.h>
14 #include <stddef.h>
15 #include <string.h>
16 
17 #include "../cuda/ceed-cuda-common.h"
18 #include "../cuda/ceed-cuda-compile.h"
19 #include "ceed-cuda-ref.h"
20 
21 //------------------------------------------------------------------------------
22 // Compile restriction kernels
23 //------------------------------------------------------------------------------
24 static inline int CeedElemRestrictionSetupCompile_Cuda(CeedElemRestriction rstr) {
25   Ceed                      ceed;
26   bool                      is_deterministic;
27   char                     *restriction_kernel_source;
28   const char               *restriction_kernel_path;
29   CeedInt                   num_elem, num_comp, elem_size, comp_stride;
30   CeedRestrictionType       rstr_type;
31   CeedElemRestriction_Cuda *impl;
32 
33   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
34   CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed));
35   CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type));
36   CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &num_elem));
37   CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr, &num_comp));
38   CeedCallBackend(CeedElemRestrictionGetCompStride(rstr, &comp_stride));
39   if (rstr_type == CEED_RESTRICTION_POINTS) {
40     CeedCallBackend(CeedElemRestrictionGetMaxPointsInElement(rstr, &elem_size));
41   } else {
42     CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size));
43   }
44   is_deterministic = impl->d_l_vec_indices != NULL;
45 
46   // Compile CUDA kernels
47   switch (rstr_type) {
48     case CEED_RESTRICTION_STRIDED: {
49       bool    has_backend_strides;
50       CeedInt strides[3] = {1, num_elem * elem_size, elem_size};
51 
52       CeedCallBackend(CeedElemRestrictionHasBackendStrides(rstr, &has_backend_strides));
53       if (!has_backend_strides) {
54         CeedCallBackend(CeedElemRestrictionGetStrides(rstr, strides));
55       }
56 
57       CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-strided.h", &restriction_kernel_path));
58       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source -----\n");
59       CeedCallBackend(CeedLoadSourceToBuffer(ceed, restriction_kernel_path, &restriction_kernel_source));
60       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source Complete! -----\n");
61       CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem,
62                                        "RSTR_NUM_COMP", num_comp, "RSTR_STRIDE_NODES", strides[0], "RSTR_STRIDE_COMP", strides[1], "RSTR_STRIDE_ELEM",
63                                        strides[2]));
64       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "StridedNoTranspose", &impl->ApplyNoTranspose));
65       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "StridedTranspose", &impl->ApplyTranspose));
66     } break;
67     case CEED_RESTRICTION_POINTS: {
68       const char *offset_kernel_path;
69       char      **file_paths     = NULL;
70       CeedInt     num_file_paths = 0;
71 
72       CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-at-points.h", &restriction_kernel_path));
73       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source -----\n");
74       CeedCallBackend(CeedLoadSourceAndInitializeBuffer(ceed, restriction_kernel_path, &num_file_paths, &file_paths, &restriction_kernel_source));
75       CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-offset.h", &offset_kernel_path));
76       CeedCallBackend(CeedLoadSourceToInitializedBuffer(ceed, offset_kernel_path, &num_file_paths, &file_paths, &restriction_kernel_source));
77       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source Complete! -----\n");
78       CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem,
79                                        "RSTR_NUM_COMP", num_comp, "RSTR_NUM_NODES", impl->num_nodes, "RSTR_COMP_STRIDE", comp_stride,
80                                        "USE_DETERMINISTIC", is_deterministic ? 1 : 0));
81       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetNoTranspose", &impl->ApplyNoTranspose));
82       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "AtPointsTranspose", &impl->ApplyTranspose));
83       // Cleanup
84       CeedCallBackend(CeedFree(&offset_kernel_path));
85       for (CeedInt i = 0; i < num_file_paths; i++) CeedCallBackend(CeedFree(&file_paths[i]));
86       CeedCallBackend(CeedFree(&file_paths));
87     } break;
88     case CEED_RESTRICTION_STANDARD: {
89       CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-offset.h", &restriction_kernel_path));
90       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source -----\n");
91       CeedCallBackend(CeedLoadSourceToBuffer(ceed, restriction_kernel_path, &restriction_kernel_source));
92       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source Complete! -----\n");
93       CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem,
94                                        "RSTR_NUM_COMP", num_comp, "RSTR_NUM_NODES", impl->num_nodes, "RSTR_COMP_STRIDE", comp_stride,
95                                        "USE_DETERMINISTIC", is_deterministic ? 1 : 0));
96       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetNoTranspose", &impl->ApplyNoTranspose));
97       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetTranspose", &impl->ApplyTranspose));
98     } break;
99     case CEED_RESTRICTION_ORIENTED: {
100       const char *offset_kernel_path;
101       char      **file_paths     = NULL;
102       CeedInt     num_file_paths = 0;
103 
104       CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-oriented.h", &restriction_kernel_path));
105       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source -----\n");
106       CeedCallBackend(CeedLoadSourceAndInitializeBuffer(ceed, restriction_kernel_path, &num_file_paths, &file_paths, &restriction_kernel_source));
107       CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-offset.h", &offset_kernel_path));
108       CeedCallBackend(CeedLoadSourceToInitializedBuffer(ceed, offset_kernel_path, &num_file_paths, &file_paths, &restriction_kernel_source));
109       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source Complete! -----\n");
110       CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem,
111                                        "RSTR_NUM_COMP", num_comp, "RSTR_NUM_NODES", impl->num_nodes, "RSTR_COMP_STRIDE", comp_stride,
112                                        "USE_DETERMINISTIC", is_deterministic ? 1 : 0));
113       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OrientedNoTranspose", &impl->ApplyNoTranspose));
114       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetNoTranspose", &impl->ApplyUnsignedNoTranspose));
115       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OrientedTranspose", &impl->ApplyTranspose));
116       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetTranspose", &impl->ApplyUnsignedTranspose));
117       // Cleanup
118       CeedCallBackend(CeedFree(&offset_kernel_path));
119       for (CeedInt i = 0; i < num_file_paths; i++) CeedCallBackend(CeedFree(&file_paths[i]));
120       CeedCallBackend(CeedFree(&file_paths));
121     } break;
122     case CEED_RESTRICTION_CURL_ORIENTED: {
123       const char *offset_kernel_path;
124       char      **file_paths     = NULL;
125       CeedInt     num_file_paths = 0;
126 
127       CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-curl-oriented.h", &restriction_kernel_path));
128       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source -----\n");
129       CeedCallBackend(CeedLoadSourceAndInitializeBuffer(ceed, restriction_kernel_path, &num_file_paths, &file_paths, &restriction_kernel_source));
130       CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-offset.h", &offset_kernel_path));
131       CeedCallBackend(CeedLoadSourceToInitializedBuffer(ceed, offset_kernel_path, &num_file_paths, &file_paths, &restriction_kernel_source));
132       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source Complete! -----\n");
133       CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem,
134                                        "RSTR_NUM_COMP", num_comp, "RSTR_NUM_NODES", impl->num_nodes, "RSTR_COMP_STRIDE", comp_stride,
135                                        "USE_DETERMINISTIC", is_deterministic ? 1 : 0));
136       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedNoTranspose", &impl->ApplyNoTranspose));
137       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedUnsignedNoTranspose", &impl->ApplyUnsignedNoTranspose));
138       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetNoTranspose", &impl->ApplyUnorientedNoTranspose));
139       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedTranspose", &impl->ApplyTranspose));
140       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedUnsignedTranspose", &impl->ApplyUnsignedTranspose));
141       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetTranspose", &impl->ApplyUnorientedTranspose));
142       // Cleanup
143       CeedCallBackend(CeedFree(&offset_kernel_path));
144       for (CeedInt i = 0; i < num_file_paths; i++) CeedCallBackend(CeedFree(&file_paths[i]));
145       CeedCallBackend(CeedFree(&file_paths));
146     } break;
147   }
148   CeedCallBackend(CeedFree(&restriction_kernel_path));
149   CeedCallBackend(CeedFree(&restriction_kernel_source));
150   return CEED_ERROR_SUCCESS;
151 }
152 
153 //------------------------------------------------------------------------------
154 // Core apply restriction code
155 //------------------------------------------------------------------------------
156 static inline int CeedElemRestrictionApply_Cuda_Core(CeedElemRestriction rstr, CeedTransposeMode t_mode, bool use_signs, bool use_orients,
157                                                      CeedVector u, CeedVector v, CeedRequest *request) {
158   Ceed                      ceed;
159   CeedRestrictionType       rstr_type;
160   const CeedScalar         *d_u;
161   CeedScalar               *d_v;
162   CeedElemRestriction_Cuda *impl;
163 
164   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
165   CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed));
166   CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type));
167 
168   // Assemble kernel if needed
169   if (!impl->module) {
170     CeedCallBackend(CeedElemRestrictionSetupCompile_Cuda(rstr));
171   }
172 
173   // Get vectors
174   CeedCallBackend(CeedVectorGetArrayRead(u, CEED_MEM_DEVICE, &d_u));
175   if (t_mode == CEED_TRANSPOSE) {
176     // Sum into for transpose mode, e-vec to l-vec
177     CeedCallBackend(CeedVectorGetArray(v, CEED_MEM_DEVICE, &d_v));
178   } else {
179     // Overwrite for notranspose mode, l-vec to e-vec
180     CeedCallBackend(CeedVectorGetArrayWrite(v, CEED_MEM_DEVICE, &d_v));
181   }
182 
183   // Restrict
184   if (t_mode == CEED_NOTRANSPOSE) {
185     // L-vector -> E-vector
186     CeedInt elem_size;
187 
188     CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size));
189     const CeedInt block_size = elem_size < 1024 ? (elem_size > 32 ? elem_size : 32) : 1024;
190     const CeedInt grid       = CeedDivUpInt(impl->num_nodes, block_size);
191 
192     switch (rstr_type) {
193       case CEED_RESTRICTION_STRIDED: {
194         void *args[] = {&d_u, &d_v};
195 
196         CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args));
197       } break;
198       case CEED_RESTRICTION_POINTS:
199       case CEED_RESTRICTION_STANDARD: {
200         void *args[] = {&impl->d_offsets, &d_u, &d_v};
201 
202         CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args));
203       } break;
204       case CEED_RESTRICTION_ORIENTED: {
205         if (use_signs) {
206           void *args[] = {&impl->d_offsets, &impl->d_orients, &d_u, &d_v};
207 
208           CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args));
209         } else {
210           void *args[] = {&impl->d_offsets, &d_u, &d_v};
211 
212           CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedNoTranspose, grid, block_size, args));
213         }
214       } break;
215       case CEED_RESTRICTION_CURL_ORIENTED: {
216         if (use_signs && use_orients) {
217           void *args[] = {&impl->d_offsets, &impl->d_curl_orients, &d_u, &d_v};
218 
219           CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args));
220         } else if (use_orients) {
221           void *args[] = {&impl->d_offsets, &impl->d_curl_orients, &d_u, &d_v};
222 
223           CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedNoTranspose, grid, block_size, args));
224         } else {
225           void *args[] = {&impl->d_offsets, &d_u, &d_v};
226 
227           CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnorientedNoTranspose, grid, block_size, args));
228         }
229       } break;
230     }
231   } else {
232     // E-vector -> L-vector
233     const bool    is_deterministic = impl->d_l_vec_indices != NULL;
234     const CeedInt block_size       = 32;
235     const CeedInt grid             = CeedDivUpInt(impl->num_nodes, block_size);
236 
237     switch (rstr_type) {
238       case CEED_RESTRICTION_STRIDED: {
239         void *args[] = {&d_u, &d_v};
240 
241         CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args));
242       } break;
243       case CEED_RESTRICTION_POINTS: {
244         if (!is_deterministic) {
245           void *args[] = {&impl->d_offsets, &impl->d_points_per_elem, &d_u, &d_v};
246 
247           CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args));
248         } else {
249           void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_points_per_elem, &impl->d_t_offsets, &d_u, &d_v};
250 
251           CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args));
252         }
253       } break;
254       case CEED_RESTRICTION_STANDARD: {
255         if (!is_deterministic) {
256           void *args[] = {&impl->d_offsets, &d_u, &d_v};
257 
258           CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args));
259         } else {
260           void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &d_u, &d_v};
261 
262           CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args));
263         }
264       } break;
265       case CEED_RESTRICTION_ORIENTED: {
266         if (use_signs) {
267           if (!is_deterministic) {
268             void *args[] = {&impl->d_offsets, &impl->d_orients, &d_u, &d_v};
269 
270             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args));
271           } else {
272             void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &impl->d_orients, &d_u, &d_v};
273 
274             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args));
275           }
276         } else {
277           if (!is_deterministic) {
278             void *args[] = {&impl->d_offsets, &d_u, &d_v};
279 
280             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args));
281           } else {
282             void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &d_u, &d_v};
283 
284             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args));
285           }
286         }
287       } break;
288       case CEED_RESTRICTION_CURL_ORIENTED: {
289         if (use_signs && use_orients) {
290           if (!is_deterministic) {
291             void *args[] = {&impl->d_offsets, &impl->d_curl_orients, &d_u, &d_v};
292 
293             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args));
294           } else {
295             void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &impl->d_curl_orients, &d_u, &d_v};
296 
297             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args));
298           }
299         } else if (use_orients) {
300           if (!is_deterministic) {
301             void *args[] = {&impl->d_offsets, &impl->d_curl_orients, &d_u, &d_v};
302 
303             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args));
304           } else {
305             void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &impl->d_curl_orients, &d_u, &d_v};
306 
307             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args));
308           }
309         } else {
310           if (!is_deterministic) {
311             void *args[] = {&impl->d_offsets, &d_u, &d_v};
312 
313             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnorientedTranspose, grid, block_size, args));
314           } else {
315             void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &d_u, &d_v};
316 
317             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnorientedTranspose, grid, block_size, args));
318           }
319         }
320       } break;
321     }
322   }
323 
324   if (request != CEED_REQUEST_IMMEDIATE && request != CEED_REQUEST_ORDERED) *request = NULL;
325 
326   // Restore arrays
327   CeedCallBackend(CeedVectorRestoreArrayRead(u, &d_u));
328   CeedCallBackend(CeedVectorRestoreArray(v, &d_v));
329   return CEED_ERROR_SUCCESS;
330 }
331 
332 //------------------------------------------------------------------------------
333 // Apply restriction
334 //------------------------------------------------------------------------------
335 static int CeedElemRestrictionApply_Cuda(CeedElemRestriction rstr, CeedTransposeMode t_mode, CeedVector u, CeedVector v, CeedRequest *request) {
336   return CeedElemRestrictionApply_Cuda_Core(rstr, t_mode, true, true, u, v, request);
337 }
338 
339 //------------------------------------------------------------------------------
340 // Apply unsigned restriction
341 //------------------------------------------------------------------------------
342 static int CeedElemRestrictionApplyUnsigned_Cuda(CeedElemRestriction rstr, CeedTransposeMode t_mode, CeedVector u, CeedVector v,
343                                                  CeedRequest *request) {
344   return CeedElemRestrictionApply_Cuda_Core(rstr, t_mode, false, true, u, v, request);
345 }
346 
347 //------------------------------------------------------------------------------
348 // Apply unoriented restriction
349 //------------------------------------------------------------------------------
350 static int CeedElemRestrictionApplyUnoriented_Cuda(CeedElemRestriction rstr, CeedTransposeMode t_mode, CeedVector u, CeedVector v,
351                                                    CeedRequest *request) {
352   return CeedElemRestrictionApply_Cuda_Core(rstr, t_mode, false, false, u, v, request);
353 }
354 
355 //------------------------------------------------------------------------------
356 // Get offsets
357 //------------------------------------------------------------------------------
358 static int CeedElemRestrictionGetOffsets_Cuda(CeedElemRestriction rstr, CeedMemType mem_type, const CeedInt **offsets) {
359   CeedElemRestriction_Cuda *impl;
360   CeedRestrictionType       rstr_type;
361 
362   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
363   CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type));
364   switch (mem_type) {
365     case CEED_MEM_HOST:
366       *offsets = rstr_type == CEED_RESTRICTION_POINTS ? impl->h_offsets_at_points : impl->h_offsets;
367       break;
368     case CEED_MEM_DEVICE:
369       *offsets = rstr_type == CEED_RESTRICTION_POINTS ? impl->d_offsets_at_points : impl->d_offsets;
370       break;
371   }
372   return CEED_ERROR_SUCCESS;
373 }
374 
375 //------------------------------------------------------------------------------
376 // Get orientations
377 //------------------------------------------------------------------------------
378 static int CeedElemRestrictionGetOrientations_Cuda(CeedElemRestriction rstr, CeedMemType mem_type, const bool **orients) {
379   CeedElemRestriction_Cuda *impl;
380   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
381 
382   switch (mem_type) {
383     case CEED_MEM_HOST:
384       *orients = impl->h_orients;
385       break;
386     case CEED_MEM_DEVICE:
387       *orients = impl->d_orients;
388       break;
389   }
390   return CEED_ERROR_SUCCESS;
391 }
392 
393 //------------------------------------------------------------------------------
394 // Get curl-conforming orientations
395 //------------------------------------------------------------------------------
396 static int CeedElemRestrictionGetCurlOrientations_Cuda(CeedElemRestriction rstr, CeedMemType mem_type, const CeedInt8 **curl_orients) {
397   CeedElemRestriction_Cuda *impl;
398   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
399 
400   switch (mem_type) {
401     case CEED_MEM_HOST:
402       *curl_orients = impl->h_curl_orients;
403       break;
404     case CEED_MEM_DEVICE:
405       *curl_orients = impl->d_curl_orients;
406       break;
407   }
408   return CEED_ERROR_SUCCESS;
409 }
410 
411 //------------------------------------------------------------------------------
412 // Get offset for padded AtPoints E-layout
413 //------------------------------------------------------------------------------
414 static int CeedElemRestrictionGetAtPointsElementOffset_Cuda(CeedElemRestriction rstr, CeedInt elem, CeedSize *elem_offset) {
415   CeedInt layout[3];
416 
417   CeedCallBackend(CeedElemRestrictionGetELayout(rstr, layout));
418   *elem_offset = 0 * layout[0] + 0 * layout[1] + elem * layout[2];
419   return CEED_ERROR_SUCCESS;
420 }
421 
422 //------------------------------------------------------------------------------
423 // Destroy restriction
424 //------------------------------------------------------------------------------
425 static int CeedElemRestrictionDestroy_Cuda(CeedElemRestriction rstr) {
426   Ceed                      ceed;
427   CeedElemRestriction_Cuda *impl;
428 
429   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
430   CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed));
431   if (impl->module) {
432     CeedCallCuda(ceed, cuModuleUnload(impl->module));
433   }
434   CeedCallBackend(CeedFree(&impl->h_offsets_owned));
435   CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_offsets_owned));
436   CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_t_offsets));
437   CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_t_indices));
438   CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_l_vec_indices));
439   CeedCallBackend(CeedFree(&impl->h_orients_owned));
440   CeedCallCuda(ceed, cudaFree((bool *)impl->d_orients_owned));
441   CeedCallBackend(CeedFree(&impl->h_curl_orients_owned));
442   CeedCallCuda(ceed, cudaFree((CeedInt8 *)impl->d_curl_orients_owned));
443   CeedCallBackend(CeedFree(&impl->h_offsets_at_points_owned));
444   CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_offsets_at_points_owned));
445   CeedCallBackend(CeedFree(&impl->h_points_per_elem_owned));
446   CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_points_per_elem_owned));
447   CeedCallBackend(CeedFree(&impl));
448   return CEED_ERROR_SUCCESS;
449 }
450 
451 //------------------------------------------------------------------------------
452 // Create transpose offsets and indices
453 //------------------------------------------------------------------------------
454 static int CeedElemRestrictionOffset_Cuda(const CeedElemRestriction rstr, const CeedInt elem_size, const CeedInt *indices) {
455   Ceed                      ceed;
456   bool                     *is_node;
457   CeedSize                  l_size;
458   CeedInt                   num_elem, num_comp, num_nodes = 0;
459   CeedInt                  *ind_to_offset, *l_vec_indices, *t_offsets, *t_indices;
460   CeedRestrictionType       rstr_type;
461   CeedElemRestriction_Cuda *impl;
462 
463   CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed));
464   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
465   CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &num_elem));
466   CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type));
467   CeedCallBackend(CeedElemRestrictionGetLVectorSize(rstr, &l_size));
468   CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr, &num_comp));
469   const CeedInt size_indices = num_elem * elem_size;
470 
471   // Count num_nodes
472   CeedCallBackend(CeedCalloc(l_size, &is_node));
473 
474   for (CeedInt i = 0; i < size_indices; i++) is_node[indices[i]] = 1;
475   for (CeedInt i = 0; i < l_size; i++) num_nodes += is_node[i];
476   impl->num_nodes = num_nodes;
477 
478   // L-vector offsets array
479   CeedCallBackend(CeedCalloc(l_size, &ind_to_offset));
480   CeedCallBackend(CeedCalloc(num_nodes, &l_vec_indices));
481   for (CeedInt i = 0, j = 0; i < l_size; i++) {
482     if (is_node[i]) {
483       l_vec_indices[j] = i;
484       ind_to_offset[i] = j++;
485     }
486   }
487   CeedCallBackend(CeedFree(&is_node));
488 
489   // Compute transpose offsets and indices
490   const CeedInt size_offsets = num_nodes + 1;
491 
492   CeedCallBackend(CeedCalloc(size_offsets, &t_offsets));
493   CeedCallBackend(CeedMalloc(size_indices, &t_indices));
494   // Count node multiplicity
495   for (CeedInt e = 0; e < num_elem; ++e) {
496     for (CeedInt i = 0; i < elem_size; ++i) ++t_offsets[ind_to_offset[indices[elem_size * e + i]] + 1];
497   }
498   // Convert to running sum
499   for (CeedInt i = 1; i < size_offsets; ++i) t_offsets[i] += t_offsets[i - 1];
500   // List all E-vec indices associated with L-vec node
501   for (CeedInt e = 0; e < num_elem; ++e) {
502     for (CeedInt i = 0; i < elem_size; ++i) {
503       const CeedInt lid = elem_size * e + i;
504       const CeedInt gid = indices[lid];
505 
506       t_indices[t_offsets[ind_to_offset[gid]]++] = lid;
507     }
508   }
509   // Reset running sum
510   for (int i = size_offsets - 1; i > 0; --i) t_offsets[i] = t_offsets[i - 1];
511   t_offsets[0] = 0;
512 
513   // Copy data to device
514   // -- L-vector indices
515   CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_l_vec_indices, num_nodes * sizeof(CeedInt)));
516   CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->d_l_vec_indices, l_vec_indices, num_nodes * sizeof(CeedInt), cudaMemcpyHostToDevice));
517   // -- Transpose offsets
518   CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_t_offsets, size_offsets * sizeof(CeedInt)));
519   CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->d_t_offsets, t_offsets, size_offsets * sizeof(CeedInt), cudaMemcpyHostToDevice));
520   // -- Transpose indices
521   CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_t_indices, size_indices * sizeof(CeedInt)));
522   CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->d_t_indices, t_indices, size_indices * sizeof(CeedInt), cudaMemcpyHostToDevice));
523 
524   // Cleanup
525   CeedCallBackend(CeedFree(&ind_to_offset));
526   CeedCallBackend(CeedFree(&l_vec_indices));
527   CeedCallBackend(CeedFree(&t_offsets));
528   CeedCallBackend(CeedFree(&t_indices));
529   return CEED_ERROR_SUCCESS;
530 }
531 
532 //------------------------------------------------------------------------------
533 // Create restriction
534 //------------------------------------------------------------------------------
535 int CeedElemRestrictionCreate_Cuda(CeedMemType mem_type, CeedCopyMode copy_mode, const CeedInt *offsets, const bool *orients,
536                                    const CeedInt8 *curl_orients, CeedElemRestriction rstr) {
537   Ceed                      ceed, ceed_parent;
538   bool                      is_deterministic;
539   CeedInt                   num_elem, num_comp, elem_size;
540   CeedRestrictionType       rstr_type;
541   CeedElemRestriction_Cuda *impl;
542 
543   CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed));
544   CeedCallBackend(CeedGetParent(ceed, &ceed_parent));
545   CeedCallBackend(CeedIsDeterministic(ceed_parent, &is_deterministic));
546   CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &num_elem));
547   CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr, &num_comp));
548   CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size));
549   CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type));
550   // Use max number of points as elem size for AtPoints restrictions
551   if (rstr_type == CEED_RESTRICTION_POINTS) {
552     CeedInt max_points = 0;
553 
554     for (CeedInt i = 0; i < num_elem; i++) {
555       max_points = CeedIntMax(max_points, offsets[i + 1] - offsets[i]);
556     }
557     elem_size = max_points;
558   }
559   const CeedInt size = num_elem * elem_size;
560 
561   CeedCallBackend(CeedCalloc(1, &impl));
562   impl->num_nodes = size;
563   CeedCallBackend(CeedElemRestrictionSetData(rstr, impl));
564 
565   // Set layouts
566   {
567     bool    has_backend_strides;
568     CeedInt layout[3] = {1, size, elem_size};
569 
570     CeedCallBackend(CeedElemRestrictionSetELayout(rstr, layout));
571     if (rstr_type == CEED_RESTRICTION_STRIDED) {
572       CeedCallBackend(CeedElemRestrictionHasBackendStrides(rstr, &has_backend_strides));
573       if (has_backend_strides) {
574         CeedCallBackend(CeedElemRestrictionSetLLayout(rstr, layout));
575       }
576     }
577   }
578 
579   // Pad AtPoints indices
580   if (rstr_type == CEED_RESTRICTION_POINTS) {
581     CeedSize offsets_len = elem_size * num_elem, at_points_size = num_elem + 1;
582     CeedInt  max_points = elem_size, *offsets_padded, *points_per_elem;
583 
584     CeedCheck(mem_type == CEED_MEM_HOST, ceed, CEED_ERROR_BACKEND, "only MemType Host supported when creating AtPoints restriction");
585     CeedCallBackend(CeedMalloc(offsets_len, &offsets_padded));
586     CeedCallBackend(CeedMalloc(num_elem, &points_per_elem));
587     for (CeedInt i = 0; i < num_elem; i++) {
588       CeedInt num_points = offsets[i + 1] - offsets[i];
589 
590       points_per_elem[i] = num_points;
591       at_points_size += num_points;
592       // -- Copy all points in element
593       for (CeedInt j = 0; j < num_points; j++) {
594         offsets_padded[i * max_points + j] = offsets[offsets[i] + j] * num_comp;
595       }
596       // -- Replicate out last point in element
597       for (CeedInt j = num_points; j < max_points; j++) {
598         offsets_padded[i * max_points + j] = offsets[offsets[i] + num_points - 1] * num_comp;
599       }
600     }
601     CeedCallBackend(CeedSetHostCeedIntArray(offsets, copy_mode, at_points_size, &impl->h_offsets_at_points_owned, &impl->h_offsets_at_points_borrowed,
602                                             &impl->h_offsets_at_points));
603     CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_offsets_at_points_owned, at_points_size * sizeof(CeedInt)));
604     CeedCallCuda(ceed, cudaMemcpy((CeedInt **)impl->d_offsets_at_points_owned, impl->h_offsets_at_points, at_points_size * sizeof(CeedInt),
605                                   cudaMemcpyHostToDevice));
606     impl->d_offsets_at_points = (CeedInt *)impl->d_offsets_at_points_owned;
607 
608     // -- Use padded offsets for the rest of the setup
609     offsets   = (const CeedInt *)offsets_padded;
610     copy_mode = CEED_OWN_POINTER;
611     CeedCallBackend(CeedElemRestrictionSetAtPointsEVectorSize(rstr, elem_size * num_elem * num_comp));
612 
613     // -- Points per element
614     CeedCallBackend(CeedSetHostCeedIntArray(points_per_elem, CEED_OWN_POINTER, num_elem, &impl->h_points_per_elem_owned,
615                                             &impl->h_points_per_elem_borrowed, &impl->h_points_per_elem));
616     CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_points_per_elem_owned, num_elem * sizeof(CeedInt)));
617     CeedCallCuda(ceed,
618                  cudaMemcpy((CeedInt **)impl->d_points_per_elem_owned, impl->h_points_per_elem, num_elem * sizeof(CeedInt), cudaMemcpyHostToDevice));
619     impl->d_points_per_elem = (CeedInt *)impl->d_points_per_elem_owned;
620   }
621 
622   // Set up device offset/orientation arrays
623   if (rstr_type != CEED_RESTRICTION_STRIDED) {
624     switch (mem_type) {
625       case CEED_MEM_HOST: {
626         CeedCallBackend(CeedSetHostCeedIntArray(offsets, copy_mode, size, &impl->h_offsets_owned, &impl->h_offsets_borrowed, &impl->h_offsets));
627         CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_offsets_owned, size * sizeof(CeedInt)));
628         CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->d_offsets_owned, impl->h_offsets, size * sizeof(CeedInt), cudaMemcpyHostToDevice));
629         impl->d_offsets = (CeedInt *)impl->d_offsets_owned;
630         if (is_deterministic) CeedCallBackend(CeedElemRestrictionOffset_Cuda(rstr, elem_size, offsets));
631       } break;
632       case CEED_MEM_DEVICE: {
633         CeedCallBackend(CeedSetDeviceCeedIntArray_Cuda(ceed, offsets, copy_mode, size, &impl->d_offsets_owned, &impl->d_offsets_borrowed,
634                                                        (const CeedInt **)&impl->d_offsets));
635         CeedCallBackend(CeedMalloc(size, &impl->h_offsets_owned));
636         CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->h_offsets_owned, impl->d_offsets, size * sizeof(CeedInt), cudaMemcpyDeviceToHost));
637         impl->h_offsets = impl->h_offsets_owned;
638         if (is_deterministic) CeedCallBackend(CeedElemRestrictionOffset_Cuda(rstr, elem_size, offsets));
639       } break;
640     }
641 
642     // Orientation data
643     if (rstr_type == CEED_RESTRICTION_ORIENTED) {
644       switch (mem_type) {
645         case CEED_MEM_HOST: {
646           CeedCallBackend(CeedSetHostBoolArray(orients, copy_mode, size, &impl->h_orients_owned, &impl->h_orients_borrowed, &impl->h_orients));
647           CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_orients_owned, size * sizeof(bool)));
648           CeedCallCuda(ceed, cudaMemcpy((bool *)impl->d_orients_owned, impl->h_orients, size * sizeof(bool), cudaMemcpyHostToDevice));
649           impl->d_orients = impl->d_orients_owned;
650         } break;
651         case CEED_MEM_DEVICE: {
652           CeedCallBackend(CeedSetDeviceBoolArray_Cuda(ceed, orients, copy_mode, size, &impl->d_orients_owned, &impl->d_orients_borrowed,
653                                                       (const bool **)&impl->d_orients));
654           CeedCallBackend(CeedMalloc(size, &impl->h_orients_owned));
655           CeedCallCuda(ceed, cudaMemcpy((bool *)impl->h_orients_owned, impl->d_orients, size * sizeof(bool), cudaMemcpyDeviceToHost));
656           impl->h_orients = impl->h_orients_owned;
657         } break;
658       }
659     } else if (rstr_type == CEED_RESTRICTION_CURL_ORIENTED) {
660       switch (mem_type) {
661         case CEED_MEM_HOST: {
662           CeedCallBackend(CeedSetHostCeedInt8Array(curl_orients, copy_mode, 3 * size, &impl->h_curl_orients_owned, &impl->h_curl_orients_borrowed,
663                                                    &impl->h_curl_orients));
664           CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_curl_orients_owned, 3 * size * sizeof(CeedInt8)));
665           CeedCallCuda(ceed,
666                        cudaMemcpy((CeedInt8 *)impl->d_curl_orients_owned, impl->h_curl_orients, 3 * size * sizeof(CeedInt8), cudaMemcpyHostToDevice));
667           impl->d_curl_orients = impl->d_curl_orients_owned;
668         } break;
669         case CEED_MEM_DEVICE: {
670           CeedCallBackend(CeedSetDeviceCeedInt8Array_Cuda(ceed, curl_orients, copy_mode, 3 * size, &impl->d_curl_orients_owned,
671                                                           &impl->d_curl_orients_borrowed, (const CeedInt8 **)&impl->d_curl_orients));
672           CeedCallBackend(CeedMalloc(3 * size, &impl->h_curl_orients_owned));
673           CeedCallCuda(ceed,
674                        cudaMemcpy((CeedInt8 *)impl->h_curl_orients_owned, impl->d_curl_orients, 3 * size * sizeof(CeedInt8), cudaMemcpyDeviceToHost));
675           impl->h_curl_orients = impl->h_curl_orients_owned;
676         } break;
677       }
678     }
679   }
680 
681   // Register backend functions
682   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "Apply", CeedElemRestrictionApply_Cuda));
683   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "ApplyUnsigned", CeedElemRestrictionApplyUnsigned_Cuda));
684   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "ApplyUnoriented", CeedElemRestrictionApplyUnoriented_Cuda));
685   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetOffsets", CeedElemRestrictionGetOffsets_Cuda));
686   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetOrientations", CeedElemRestrictionGetOrientations_Cuda));
687   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetCurlOrientations", CeedElemRestrictionGetCurlOrientations_Cuda));
688   if (rstr_type == CEED_RESTRICTION_POINTS) {
689     CeedCallBackend(
690         CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetAtPointsElementOffset", CeedElemRestrictionGetAtPointsElementOffset_Cuda));
691   }
692   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "Destroy", CeedElemRestrictionDestroy_Cuda));
693   return CEED_ERROR_SUCCESS;
694 }
695 
696 //------------------------------------------------------------------------------
697