xref: /libCEED/backends/cuda-ref/ceed-cuda-ref-restriction.c (revision 081aa29d34bc3c9f1ddcf3f40577a15fe8f99266)
1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 #include <ceed.h>
9 #include <ceed/backend.h>
10 #include <ceed/jit-tools.h>
11 #include <cuda.h>
12 #include <cuda_runtime.h>
13 #include <stdbool.h>
14 #include <stddef.h>
15 #include <string.h>
16 
17 #include "../cuda/ceed-cuda-common.h"
18 #include "../cuda/ceed-cuda-compile.h"
19 #include "ceed-cuda-ref.h"
20 
21 //------------------------------------------------------------------------------
22 // Compile restriction kernels
23 //------------------------------------------------------------------------------
24 static inline int CeedElemRestrictionSetupCompile_Cuda(CeedElemRestriction rstr) {
25   Ceed                      ceed;
26   bool                      is_deterministic;
27   char                     *restriction_kernel_source;
28   const char               *restriction_kernel_path;
29   CeedInt                   num_elem, num_comp, elem_size, comp_stride;
30   CeedRestrictionType       rstr_type;
31   CeedElemRestriction_Cuda *impl;
32 
33   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
34   CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed));
35   CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &num_elem));
36   CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr, &num_comp));
37   CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size));
38   CeedCallBackend(CeedElemRestrictionGetCompStride(rstr, &comp_stride));
39   CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type));
40   is_deterministic = impl->d_l_vec_indices != NULL;
41 
42   // Compile CUDA kernels
43   switch (rstr_type) {
44     case CEED_RESTRICTION_STRIDED: {
45       CeedInt strides[3] = {1, num_elem * elem_size, elem_size};
46       bool    has_backend_strides;
47 
48       CeedCallBackend(CeedElemRestrictionHasBackendStrides(rstr, &has_backend_strides));
49       if (!has_backend_strides) {
50         CeedCallBackend(CeedElemRestrictionGetStrides(rstr, strides));
51       }
52 
53       CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-strided.h", &restriction_kernel_path));
54       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source -----\n");
55       CeedCallBackend(CeedLoadSourceToBuffer(ceed, restriction_kernel_path, &restriction_kernel_source));
56       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source Complete! -----\n");
57       CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem,
58                                        "RSTR_NUM_COMP", num_comp, "RSTR_STRIDE_NODES", strides[0], "RSTR_STRIDE_COMP", strides[1], "RSTR_STRIDE_ELEM",
59                                        strides[2]));
60       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "StridedNoTranspose", &impl->ApplyNoTranspose));
61       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "StridedTranspose", &impl->ApplyTranspose));
62     } break;
63     case CEED_RESTRICTION_STANDARD: {
64       CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-offset.h", &restriction_kernel_path));
65       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source -----\n");
66       CeedCallBackend(CeedLoadSourceToBuffer(ceed, restriction_kernel_path, &restriction_kernel_source));
67       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source Complete! -----\n");
68       CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem,
69                                        "RSTR_NUM_COMP", num_comp, "RSTR_NUM_NODES", impl->num_nodes, "RSTR_COMP_STRIDE", comp_stride,
70                                        "USE_DETERMINISTIC", is_deterministic ? 1 : 0));
71       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetNoTranspose", &impl->ApplyNoTranspose));
72       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetTranspose", &impl->ApplyTranspose));
73     } break;
74     case CEED_RESTRICTION_ORIENTED: {
75       const char *offset_kernel_path;
76 
77       CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-oriented.h", &restriction_kernel_path));
78       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source -----\n");
79       CeedCallBackend(CeedLoadSourceToBuffer(ceed, restriction_kernel_path, &restriction_kernel_source));
80       CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-offset.h", &offset_kernel_path));
81       CeedCallBackend(CeedLoadSourceToInitializedBuffer(ceed, offset_kernel_path, &restriction_kernel_source));
82       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source Complete! -----\n");
83       CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem,
84                                        "RSTR_NUM_COMP", num_comp, "RSTR_NUM_NODES", impl->num_nodes, "RSTR_COMP_STRIDE", comp_stride,
85                                        "USE_DETERMINISTIC", is_deterministic ? 1 : 0));
86       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OrientedNoTranspose", &impl->ApplyNoTranspose));
87       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetNoTranspose", &impl->ApplyUnsignedNoTranspose));
88       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OrientedTranspose", &impl->ApplyTranspose));
89       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetTranspose", &impl->ApplyUnsignedTranspose));
90       CeedCallBackend(CeedFree(&offset_kernel_path));
91     } break;
92     case CEED_RESTRICTION_CURL_ORIENTED: {
93       const char *offset_kernel_path;
94 
95       CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-curl-oriented.h", &restriction_kernel_path));
96       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source -----\n");
97       CeedCallBackend(CeedLoadSourceToBuffer(ceed, restriction_kernel_path, &restriction_kernel_source));
98       CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-offset.h", &offset_kernel_path));
99       CeedCallBackend(CeedLoadSourceToInitializedBuffer(ceed, offset_kernel_path, &restriction_kernel_source));
100       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source Complete! -----\n");
101       CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem,
102                                        "RSTR_NUM_COMP", num_comp, "RSTR_NUM_NODES", impl->num_nodes, "RSTR_COMP_STRIDE", comp_stride,
103                                        "USE_DETERMINISTIC", is_deterministic ? 1 : 0));
104       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedNoTranspose", &impl->ApplyNoTranspose));
105       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedUnsignedNoTranspose", &impl->ApplyUnsignedNoTranspose));
106       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetNoTranspose", &impl->ApplyUnorientedNoTranspose));
107       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedTranspose", &impl->ApplyTranspose));
108       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedUnsignedTranspose", &impl->ApplyUnsignedTranspose));
109       CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetTranspose", &impl->ApplyUnorientedTranspose));
110       CeedCallBackend(CeedFree(&offset_kernel_path));
111     } break;
112     case CEED_RESTRICTION_POINTS: {
113       // LCOV_EXCL_START
114       return CeedError(ceed, CEED_ERROR_UNSUPPORTED, "Backend does not implement restriction CeedElemRestrictionAtPoints");
115       // LCOV_EXCL_STOP
116     } break;
117   }
118   CeedCallBackend(CeedFree(&restriction_kernel_path));
119   CeedCallBackend(CeedFree(&restriction_kernel_source));
120   return CEED_ERROR_SUCCESS;
121 }
122 
123 //------------------------------------------------------------------------------
124 // Core apply restriction code
125 //------------------------------------------------------------------------------
126 static inline int CeedElemRestrictionApply_Cuda_Core(CeedElemRestriction rstr, CeedTransposeMode t_mode, bool use_signs, bool use_orients,
127                                                      CeedVector u, CeedVector v, CeedRequest *request) {
128   Ceed                      ceed;
129   CeedRestrictionType       rstr_type;
130   const CeedScalar         *d_u;
131   CeedScalar               *d_v;
132   CeedElemRestriction_Cuda *impl;
133 
134   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
135   CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed));
136   CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type));
137 
138   // Assemble kernel if needed
139   if (!impl->module) {
140     CeedCallBackend(CeedElemRestrictionSetupCompile_Cuda(rstr));
141   }
142 
143   // Get vectors
144   CeedCallBackend(CeedVectorGetArrayRead(u, CEED_MEM_DEVICE, &d_u));
145   if (t_mode == CEED_TRANSPOSE) {
146     // Sum into for transpose mode, e-vec to l-vec
147     CeedCallBackend(CeedVectorGetArray(v, CEED_MEM_DEVICE, &d_v));
148   } else {
149     // Overwrite for notranspose mode, l-vec to e-vec
150     CeedCallBackend(CeedVectorGetArrayWrite(v, CEED_MEM_DEVICE, &d_v));
151   }
152 
153   // Restrict
154   if (t_mode == CEED_NOTRANSPOSE) {
155     // L-vector -> E-vector
156     CeedInt elem_size;
157 
158     CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size));
159     const CeedInt block_size = elem_size < 1024 ? (elem_size > 32 ? elem_size : 32) : 1024;
160     const CeedInt grid       = CeedDivUpInt(impl->num_nodes, block_size);
161 
162     switch (rstr_type) {
163       case CEED_RESTRICTION_STRIDED: {
164         void *args[] = {&d_u, &d_v};
165 
166         CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args));
167       } break;
168       case CEED_RESTRICTION_STANDARD: {
169         void *args[] = {&impl->d_offsets, &d_u, &d_v};
170 
171         CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args));
172       } break;
173       case CEED_RESTRICTION_ORIENTED: {
174         if (use_signs) {
175           void *args[] = {&impl->d_offsets, &impl->d_orients, &d_u, &d_v};
176 
177           CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args));
178         } else {
179           void *args[] = {&impl->d_offsets, &d_u, &d_v};
180 
181           CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedNoTranspose, grid, block_size, args));
182         }
183       } break;
184       case CEED_RESTRICTION_CURL_ORIENTED: {
185         if (use_signs && use_orients) {
186           void *args[] = {&impl->d_offsets, &impl->d_curl_orients, &d_u, &d_v};
187 
188           CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args));
189         } else if (use_orients) {
190           void *args[] = {&impl->d_offsets, &impl->d_curl_orients, &d_u, &d_v};
191 
192           CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedNoTranspose, grid, block_size, args));
193         } else {
194           void *args[] = {&impl->d_offsets, &d_u, &d_v};
195 
196           CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnorientedNoTranspose, grid, block_size, args));
197         }
198       } break;
199       case CEED_RESTRICTION_POINTS: {
200         // LCOV_EXCL_START
201         return CeedError(ceed, CEED_ERROR_UNSUPPORTED, "Backend does not implement restriction CeedElemRestrictionAtPoints");
202         // LCOV_EXCL_STOP
203       } break;
204     }
205   } else {
206     // E-vector -> L-vector
207     const bool    is_deterministic = impl->d_l_vec_indices != NULL;
208     const CeedInt block_size       = 32;
209     const CeedInt grid             = CeedDivUpInt(impl->num_nodes, block_size);
210 
211     switch (rstr_type) {
212       case CEED_RESTRICTION_STRIDED: {
213         void *args[] = {&d_u, &d_v};
214 
215         CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args));
216       } break;
217       case CEED_RESTRICTION_STANDARD: {
218         if (!is_deterministic) {
219           void *args[] = {&impl->d_offsets, &d_u, &d_v};
220 
221           CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args));
222         } else {
223           void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &d_u, &d_v};
224 
225           CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args));
226         }
227       } break;
228       case CEED_RESTRICTION_ORIENTED: {
229         if (use_signs) {
230           if (!is_deterministic) {
231             void *args[] = {&impl->d_offsets, &impl->d_orients, &d_u, &d_v};
232 
233             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args));
234           } else {
235             void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &impl->d_orients, &d_u, &d_v};
236 
237             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args));
238           }
239         } else {
240           if (!is_deterministic) {
241             void *args[] = {&impl->d_offsets, &d_u, &d_v};
242 
243             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args));
244           } else {
245             void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &d_u, &d_v};
246 
247             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args));
248           }
249         }
250       } break;
251       case CEED_RESTRICTION_CURL_ORIENTED: {
252         if (use_signs && use_orients) {
253           if (!is_deterministic) {
254             void *args[] = {&impl->d_offsets, &impl->d_curl_orients, &d_u, &d_v};
255 
256             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args));
257           } else {
258             void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &impl->d_curl_orients, &d_u, &d_v};
259 
260             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args));
261           }
262         } else if (use_orients) {
263           if (!is_deterministic) {
264             void *args[] = {&impl->d_offsets, &impl->d_curl_orients, &d_u, &d_v};
265 
266             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args));
267           } else {
268             void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &impl->d_curl_orients, &d_u, &d_v};
269 
270             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args));
271           }
272         } else {
273           if (!is_deterministic) {
274             void *args[] = {&impl->d_offsets, &d_u, &d_v};
275 
276             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnorientedTranspose, grid, block_size, args));
277           } else {
278             void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &d_u, &d_v};
279 
280             CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnorientedTranspose, grid, block_size, args));
281           }
282         }
283       } break;
284       case CEED_RESTRICTION_POINTS: {
285         // LCOV_EXCL_START
286         return CeedError(ceed, CEED_ERROR_UNSUPPORTED, "Backend does not implement restriction CeedElemRestrictionAtPoints");
287         // LCOV_EXCL_STOP
288       } break;
289     }
290   }
291 
292   if (request != CEED_REQUEST_IMMEDIATE && request != CEED_REQUEST_ORDERED) *request = NULL;
293 
294   // Restore arrays
295   CeedCallBackend(CeedVectorRestoreArrayRead(u, &d_u));
296   CeedCallBackend(CeedVectorRestoreArray(v, &d_v));
297   return CEED_ERROR_SUCCESS;
298 }
299 
300 //------------------------------------------------------------------------------
301 // Apply restriction
302 //------------------------------------------------------------------------------
303 static int CeedElemRestrictionApply_Cuda(CeedElemRestriction rstr, CeedTransposeMode t_mode, CeedVector u, CeedVector v, CeedRequest *request) {
304   return CeedElemRestrictionApply_Cuda_Core(rstr, t_mode, true, true, u, v, request);
305 }
306 
307 //------------------------------------------------------------------------------
308 // Apply unsigned restriction
309 //------------------------------------------------------------------------------
310 static int CeedElemRestrictionApplyUnsigned_Cuda(CeedElemRestriction rstr, CeedTransposeMode t_mode, CeedVector u, CeedVector v,
311                                                  CeedRequest *request) {
312   return CeedElemRestrictionApply_Cuda_Core(rstr, t_mode, false, true, u, v, request);
313 }
314 
315 //------------------------------------------------------------------------------
316 // Apply unoriented restriction
317 //------------------------------------------------------------------------------
318 static int CeedElemRestrictionApplyUnoriented_Cuda(CeedElemRestriction rstr, CeedTransposeMode t_mode, CeedVector u, CeedVector v,
319                                                    CeedRequest *request) {
320   return CeedElemRestrictionApply_Cuda_Core(rstr, t_mode, false, false, u, v, request);
321 }
322 
323 //------------------------------------------------------------------------------
324 // Get offsets
325 //------------------------------------------------------------------------------
326 static int CeedElemRestrictionGetOffsets_Cuda(CeedElemRestriction rstr, CeedMemType mem_type, const CeedInt **offsets) {
327   CeedElemRestriction_Cuda *impl;
328 
329   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
330   switch (mem_type) {
331     case CEED_MEM_HOST:
332       *offsets = impl->h_offsets;
333       break;
334     case CEED_MEM_DEVICE:
335       *offsets = impl->d_offsets;
336       break;
337   }
338   return CEED_ERROR_SUCCESS;
339 }
340 
341 //------------------------------------------------------------------------------
342 // Get orientations
343 //------------------------------------------------------------------------------
344 static int CeedElemRestrictionGetOrientations_Cuda(CeedElemRestriction rstr, CeedMemType mem_type, const bool **orients) {
345   CeedElemRestriction_Cuda *impl;
346   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
347 
348   switch (mem_type) {
349     case CEED_MEM_HOST:
350       *orients = impl->h_orients;
351       break;
352     case CEED_MEM_DEVICE:
353       *orients = impl->d_orients;
354       break;
355   }
356   return CEED_ERROR_SUCCESS;
357 }
358 
359 //------------------------------------------------------------------------------
360 // Get curl-conforming orientations
361 //------------------------------------------------------------------------------
362 static int CeedElemRestrictionGetCurlOrientations_Cuda(CeedElemRestriction rstr, CeedMemType mem_type, const CeedInt8 **curl_orients) {
363   CeedElemRestriction_Cuda *impl;
364   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
365 
366   switch (mem_type) {
367     case CEED_MEM_HOST:
368       *curl_orients = impl->h_curl_orients;
369       break;
370     case CEED_MEM_DEVICE:
371       *curl_orients = impl->d_curl_orients;
372       break;
373   }
374   return CEED_ERROR_SUCCESS;
375 }
376 
377 //------------------------------------------------------------------------------
378 // Destroy restriction
379 //------------------------------------------------------------------------------
380 static int CeedElemRestrictionDestroy_Cuda(CeedElemRestriction rstr) {
381   Ceed                      ceed;
382   CeedElemRestriction_Cuda *impl;
383 
384   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
385   CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed));
386   if (impl->module) {
387     CeedCallCuda(ceed, cuModuleUnload(impl->module));
388   }
389   CeedCallBackend(CeedFree(&impl->h_offsets_owned));
390   CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_offsets_owned));
391   CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_t_offsets));
392   CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_t_indices));
393   CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_l_vec_indices));
394   CeedCallBackend(CeedFree(&impl->h_orients_owned));
395   CeedCallCuda(ceed, cudaFree((bool *)impl->d_orients_owned));
396   CeedCallBackend(CeedFree(&impl->h_curl_orients_owned));
397   CeedCallCuda(ceed, cudaFree((CeedInt8 *)impl->d_curl_orients_owned));
398   CeedCallBackend(CeedFree(&impl));
399   return CEED_ERROR_SUCCESS;
400 }
401 
402 //------------------------------------------------------------------------------
403 // Create transpose offsets and indices
404 //------------------------------------------------------------------------------
405 static int CeedElemRestrictionOffset_Cuda(const CeedElemRestriction rstr, const CeedInt *indices) {
406   Ceed                      ceed;
407   bool                     *is_node;
408   CeedSize                  l_size;
409   CeedInt                   num_elem, elem_size, num_comp, num_nodes = 0;
410   CeedInt                  *ind_to_offset, *l_vec_indices, *t_offsets, *t_indices;
411   CeedElemRestriction_Cuda *impl;
412 
413   CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed));
414   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
415   CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &num_elem));
416   CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size));
417   CeedCallBackend(CeedElemRestrictionGetLVectorSize(rstr, &l_size));
418   CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr, &num_comp));
419   const CeedInt size_indices = num_elem * elem_size;
420 
421   // Count num_nodes
422   CeedCallBackend(CeedCalloc(l_size, &is_node));
423 
424   for (CeedInt i = 0; i < size_indices; i++) is_node[indices[i]] = 1;
425   for (CeedInt i = 0; i < l_size; i++) num_nodes += is_node[i];
426   impl->num_nodes = num_nodes;
427 
428   // L-vector offsets array
429   CeedCallBackend(CeedCalloc(l_size, &ind_to_offset));
430   CeedCallBackend(CeedCalloc(num_nodes, &l_vec_indices));
431   for (CeedInt i = 0, j = 0; i < l_size; i++) {
432     if (is_node[i]) {
433       l_vec_indices[j] = i;
434       ind_to_offset[i] = j++;
435     }
436   }
437   CeedCallBackend(CeedFree(&is_node));
438 
439   // Compute transpose offsets and indices
440   const CeedInt size_offsets = num_nodes + 1;
441 
442   CeedCallBackend(CeedCalloc(size_offsets, &t_offsets));
443   CeedCallBackend(CeedMalloc(size_indices, &t_indices));
444   // Count node multiplicity
445   for (CeedInt e = 0; e < num_elem; ++e) {
446     for (CeedInt i = 0; i < elem_size; ++i) ++t_offsets[ind_to_offset[indices[elem_size * e + i]] + 1];
447   }
448   // Convert to running sum
449   for (CeedInt i = 1; i < size_offsets; ++i) t_offsets[i] += t_offsets[i - 1];
450   // List all E-vec indices associated with L-vec node
451   for (CeedInt e = 0; e < num_elem; ++e) {
452     for (CeedInt i = 0; i < elem_size; ++i) {
453       const CeedInt lid = elem_size * e + i;
454       const CeedInt gid = indices[lid];
455 
456       t_indices[t_offsets[ind_to_offset[gid]]++] = lid;
457     }
458   }
459   // Reset running sum
460   for (int i = size_offsets - 1; i > 0; --i) t_offsets[i] = t_offsets[i - 1];
461   t_offsets[0] = 0;
462 
463   // Copy data to device
464   // -- L-vector indices
465   CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_l_vec_indices, num_nodes * sizeof(CeedInt)));
466   CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->d_l_vec_indices, l_vec_indices, num_nodes * sizeof(CeedInt), cudaMemcpyHostToDevice));
467   // -- Transpose offsets
468   CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_t_offsets, size_offsets * sizeof(CeedInt)));
469   CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->d_t_offsets, t_offsets, size_offsets * sizeof(CeedInt), cudaMemcpyHostToDevice));
470   // -- Transpose indices
471   CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_t_indices, size_indices * sizeof(CeedInt)));
472   CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->d_t_indices, t_indices, size_indices * sizeof(CeedInt), cudaMemcpyHostToDevice));
473 
474   // Cleanup
475   CeedCallBackend(CeedFree(&ind_to_offset));
476   CeedCallBackend(CeedFree(&l_vec_indices));
477   CeedCallBackend(CeedFree(&t_offsets));
478   CeedCallBackend(CeedFree(&t_indices));
479   return CEED_ERROR_SUCCESS;
480 }
481 
482 //------------------------------------------------------------------------------
483 // Create restriction
484 //------------------------------------------------------------------------------
485 int CeedElemRestrictionCreate_Cuda(CeedMemType mem_type, CeedCopyMode copy_mode, const CeedInt *offsets, const bool *orients,
486                                    const CeedInt8 *curl_orients, CeedElemRestriction rstr) {
487   Ceed                      ceed, ceed_parent;
488   bool                      is_deterministic;
489   CeedInt                   num_elem, elem_size;
490   CeedRestrictionType       rstr_type;
491   CeedElemRestriction_Cuda *impl;
492 
493   CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed));
494   CeedCallBackend(CeedGetParent(ceed, &ceed_parent));
495   CeedCallBackend(CeedIsDeterministic(ceed_parent, &is_deterministic));
496   CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &num_elem));
497   CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size));
498   CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type));
499   const CeedInt size = num_elem * elem_size;
500 
501   CeedCallBackend(CeedCalloc(1, &impl));
502   impl->num_nodes = size;
503   CeedCallBackend(CeedElemRestrictionSetData(rstr, impl));
504 
505   // Set layouts
506   {
507     bool    has_backend_strides;
508     CeedInt layout[3] = {1, size, elem_size};
509 
510     CeedCallBackend(CeedElemRestrictionSetELayout(rstr, layout));
511     if (rstr_type == CEED_RESTRICTION_STRIDED) {
512       CeedCallBackend(CeedElemRestrictionHasBackendStrides(rstr, &has_backend_strides));
513       if (has_backend_strides) {
514         CeedCallBackend(CeedElemRestrictionSetLLayout(rstr, layout));
515       }
516     }
517   }
518 
519   // Set up device offset/orientation arrays
520   if (rstr_type != CEED_RESTRICTION_STRIDED) {
521     switch (mem_type) {
522       case CEED_MEM_HOST: {
523         CeedCallBackend(CeedSetHostCeedIntArray(offsets, copy_mode, size, &impl->h_offsets_owned, &impl->h_offsets_borrowed, &impl->h_offsets));
524         CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_offsets_owned, size * sizeof(CeedInt)));
525         CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->d_offsets_owned, impl->h_offsets, size * sizeof(CeedInt), cudaMemcpyHostToDevice));
526         impl->d_offsets = (CeedInt *)impl->d_offsets_owned;
527         if (is_deterministic) CeedCallBackend(CeedElemRestrictionOffset_Cuda(rstr, offsets));
528       } break;
529       case CEED_MEM_DEVICE: {
530         CeedCallBackend(CeedSetDeviceCeedIntArray_Cuda(ceed, offsets, copy_mode, size, &impl->d_offsets_owned, &impl->d_offsets_borrowed,
531                                                        (const CeedInt **)&impl->d_offsets));
532         CeedCallBackend(CeedMalloc(size, &impl->h_offsets_owned));
533         CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->h_offsets_owned, impl->d_offsets, size * sizeof(CeedInt), cudaMemcpyDeviceToHost));
534         impl->h_offsets = impl->h_offsets_owned;
535         if (is_deterministic) CeedCallBackend(CeedElemRestrictionOffset_Cuda(rstr, offsets));
536       } break;
537     }
538 
539     // Orientation data
540     if (rstr_type == CEED_RESTRICTION_ORIENTED) {
541       switch (mem_type) {
542         case CEED_MEM_HOST: {
543           CeedCallBackend(CeedSetHostBoolArray(orients, copy_mode, size, &impl->h_orients_owned, &impl->h_orients_borrowed, &impl->h_orients));
544           CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_orients_owned, size * sizeof(bool)));
545           CeedCallCuda(ceed, cudaMemcpy((bool *)impl->d_orients_owned, impl->h_orients, size * sizeof(bool), cudaMemcpyHostToDevice));
546           impl->d_orients = impl->d_orients_owned;
547         } break;
548         case CEED_MEM_DEVICE: {
549           CeedCallBackend(CeedSetDeviceBoolArray_Cuda(ceed, orients, copy_mode, size, &impl->d_orients_owned, &impl->d_orients_borrowed,
550                                                       (const bool **)&impl->d_orients));
551           CeedCallBackend(CeedMalloc(size, &impl->h_orients_owned));
552           CeedCallCuda(ceed, cudaMemcpy((bool *)impl->h_orients_owned, impl->d_orients, size * sizeof(bool), cudaMemcpyDeviceToHost));
553           impl->h_orients = impl->h_orients_owned;
554         } break;
555       }
556     } else if (rstr_type == CEED_RESTRICTION_CURL_ORIENTED) {
557       switch (mem_type) {
558         case CEED_MEM_HOST: {
559           CeedCallBackend(CeedSetHostCeedInt8Array(curl_orients, copy_mode, 3 * size, &impl->h_curl_orients_owned, &impl->h_curl_orients_borrowed,
560                                                    &impl->h_curl_orients));
561           CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_curl_orients_owned, 3 * size * sizeof(CeedInt8)));
562           CeedCallCuda(ceed,
563                        cudaMemcpy((CeedInt8 *)impl->d_curl_orients_owned, impl->h_curl_orients, 3 * size * sizeof(CeedInt8), cudaMemcpyHostToDevice));
564           impl->d_curl_orients = impl->d_curl_orients_owned;
565         } break;
566         case CEED_MEM_DEVICE: {
567           CeedCallBackend(CeedSetDeviceCeedInt8Array_Cuda(ceed, curl_orients, copy_mode, 3 * size, &impl->d_curl_orients_owned,
568                                                           &impl->d_curl_orients_borrowed, (const CeedInt8 **)&impl->d_curl_orients));
569           CeedCallBackend(CeedMalloc(3 * size, &impl->h_curl_orients_owned));
570           CeedCallCuda(ceed,
571                        cudaMemcpy((CeedInt8 *)impl->h_curl_orients_owned, impl->d_curl_orients, 3 * size * sizeof(CeedInt8), cudaMemcpyDeviceToHost));
572           impl->h_curl_orients = impl->h_curl_orients_owned;
573         } break;
574       }
575     }
576   }
577 
578   // Register backend functions
579   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "Apply", CeedElemRestrictionApply_Cuda));
580   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "ApplyUnsigned", CeedElemRestrictionApplyUnsigned_Cuda));
581   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "ApplyUnoriented", CeedElemRestrictionApplyUnoriented_Cuda));
582   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetOffsets", CeedElemRestrictionGetOffsets_Cuda));
583   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetOrientations", CeedElemRestrictionGetOrientations_Cuda));
584   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetCurlOrientations", CeedElemRestrictionGetCurlOrientations_Cuda));
585   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "Destroy", CeedElemRestrictionDestroy_Cuda));
586   return CEED_ERROR_SUCCESS;
587 }
588 
589 //------------------------------------------------------------------------------
590