xref: /libCEED/backends/cuda-ref/ceed-cuda-ref-restriction.c (revision 697283f1b4bcfd5a4fa41321a5e73a7d367713ae)
1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 #include <ceed.h>
9 #include <ceed/backend.h>
10 #include <ceed/jit-tools.h>
11 #include <cuda.h>
12 #include <cuda_runtime.h>
13 #include <stdbool.h>
14 #include <stddef.h>
15 #include <string.h>
16 
17 #include "../cuda/ceed-cuda-common.h"
18 #include "../cuda/ceed-cuda-compile.h"
19 #include "ceed-cuda-ref.h"
20 
21 //------------------------------------------------------------------------------
22 // Core apply restriction code
23 //------------------------------------------------------------------------------
24 static inline int CeedElemRestrictionApply_Cuda_Core(CeedElemRestriction rstr, CeedTransposeMode t_mode, bool use_signs, bool use_orients,
25                                                      CeedVector u, CeedVector v, CeedRequest *request) {
26   Ceed                      ceed;
27   CeedInt                   num_elem, elem_size;
28   CeedRestrictionType       rstr_type;
29   const CeedScalar         *d_u;
30   CeedScalar               *d_v;
31   CeedElemRestriction_Cuda *impl;
32   CUfunction                kernel;
33 
34   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
35   CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed));
36   CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &num_elem));
37   CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size));
38   CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type));
39   const CeedInt num_nodes = impl->num_nodes;
40 
41   // Get vectors
42   CeedCallBackend(CeedVectorGetArrayRead(u, CEED_MEM_DEVICE, &d_u));
43   if (t_mode == CEED_TRANSPOSE) {
44     // Sum into for transpose mode, e-vec to l-vec
45     CeedCallBackend(CeedVectorGetArray(v, CEED_MEM_DEVICE, &d_v));
46   } else {
47     // Overwrite for notranspose mode, l-vec to e-vec
48     CeedCallBackend(CeedVectorGetArrayWrite(v, CEED_MEM_DEVICE, &d_v));
49   }
50 
51   // Restrict
52   if (t_mode == CEED_NOTRANSPOSE) {
53     // L-vector -> E-vector
54     const CeedInt block_size = elem_size < 1024 ? (elem_size > 32 ? elem_size : 32) : 1024;
55     const CeedInt grid       = CeedDivUpInt(num_nodes, block_size);
56 
57     switch (rstr_type) {
58       case CEED_RESTRICTION_STRIDED: {
59         kernel       = impl->StridedNoTranspose;
60         void *args[] = {&num_elem, &d_u, &d_v};
61 
62         CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
63       } break;
64       case CEED_RESTRICTION_STANDARD: {
65         kernel       = impl->OffsetNoTranspose;
66         void *args[] = {&num_elem, &impl->d_ind, &d_u, &d_v};
67 
68         CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
69       } break;
70       case CEED_RESTRICTION_ORIENTED: {
71         if (use_signs) {
72           kernel       = impl->OrientedNoTranspose;
73           void *args[] = {&num_elem, &impl->d_ind, &impl->d_orients, &d_u, &d_v};
74 
75           CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
76         } else {
77           kernel       = impl->OffsetNoTranspose;
78           void *args[] = {&num_elem, &impl->d_ind, &d_u, &d_v};
79 
80           CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
81         }
82       } break;
83       case CEED_RESTRICTION_CURL_ORIENTED: {
84         if (use_signs && use_orients) {
85           kernel       = impl->CurlOrientedNoTranspose;
86           void *args[] = {&num_elem, &impl->d_ind, &impl->d_curl_orients, &d_u, &d_v};
87 
88           CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
89         } else if (use_orients) {
90           kernel       = impl->CurlOrientedUnsignedNoTranspose;
91           void *args[] = {&num_elem, &impl->d_ind, &impl->d_curl_orients, &d_u, &d_v};
92 
93           CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
94         } else {
95           kernel       = impl->OffsetNoTranspose;
96           void *args[] = {&num_elem, &impl->d_ind, &d_u, &d_v};
97 
98           CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
99         }
100       } break;
101       case CEED_RESTRICTION_POINTS: {
102         // LCOV_EXCL_START
103         return CeedError(ceed, CEED_ERROR_UNSUPPORTED, "Backend does not implement restriction CeedElemRestrictionAtPoints");
104         // LCOV_EXCL_STOP
105       } break;
106     }
107   } else {
108     // E-vector -> L-vector
109     const CeedInt block_size = 32;
110     const CeedInt grid       = CeedDivUpInt(num_nodes, block_size);
111 
112     switch (rstr_type) {
113       case CEED_RESTRICTION_STRIDED: {
114         kernel       = impl->StridedTranspose;
115         void *args[] = {&num_elem, &d_u, &d_v};
116 
117         CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
118       } break;
119       case CEED_RESTRICTION_STANDARD: {
120         if (impl->OffsetTranspose) {
121           kernel       = impl->OffsetTranspose;
122           void *args[] = {&num_elem, &impl->d_ind, &d_u, &d_v};
123 
124           CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
125         } else {
126           kernel       = impl->OffsetTransposeDet;
127           void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &d_u, &d_v};
128 
129           CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
130         }
131       } break;
132       case CEED_RESTRICTION_ORIENTED: {
133         if (use_signs) {
134           if (impl->OrientedTranspose) {
135             kernel       = impl->OrientedTranspose;
136             void *args[] = {&num_elem, &impl->d_ind, &impl->d_orients, &d_u, &d_v};
137 
138             CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
139           } else {
140             kernel       = impl->OrientedTransposeDet;
141             void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &impl->d_orients, &d_u, &d_v};
142 
143             CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
144           }
145         } else {
146           if (impl->OffsetTranspose) {
147             kernel       = impl->OffsetTranspose;
148             void *args[] = {&num_elem, &impl->d_ind, &d_u, &d_v};
149 
150             CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
151           } else {
152             kernel       = impl->OffsetTransposeDet;
153             void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &d_u, &d_v};
154 
155             CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
156           }
157         }
158       } break;
159       case CEED_RESTRICTION_CURL_ORIENTED: {
160         if (use_signs && use_orients) {
161           if (impl->CurlOrientedTranspose) {
162             kernel       = impl->CurlOrientedTranspose;
163             void *args[] = {&num_elem, &impl->d_ind, &impl->d_curl_orients, &d_u, &d_v};
164 
165             CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
166           } else {
167             kernel       = impl->CurlOrientedTransposeDet;
168             void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &impl->d_curl_orients, &d_u, &d_v};
169 
170             CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
171           }
172         } else if (use_orients) {
173           if (impl->CurlOrientedUnsignedTranspose) {
174             kernel       = impl->CurlOrientedUnsignedTranspose;
175             void *args[] = {&num_elem, &impl->d_ind, &impl->d_curl_orients, &d_u, &d_v};
176 
177             CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
178           } else {
179             kernel       = impl->CurlOrientedUnsignedTransposeDet;
180             void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &impl->d_curl_orients, &d_u, &d_v};
181 
182             CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
183           }
184         } else {
185           if (impl->OffsetTranspose) {
186             kernel       = impl->OffsetTranspose;
187             void *args[] = {&num_elem, &impl->d_ind, &d_u, &d_v};
188 
189             CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
190           } else {
191             kernel       = impl->OffsetTransposeDet;
192             void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &d_u, &d_v};
193 
194             CeedCallBackend(CeedRunKernel_Cuda(ceed, kernel, grid, block_size, args));
195           }
196         }
197       } break;
198       case CEED_RESTRICTION_POINTS: {
199         // LCOV_EXCL_START
200         return CeedError(ceed, CEED_ERROR_UNSUPPORTED, "Backend does not implement restriction CeedElemRestrictionAtPoints");
201         // LCOV_EXCL_STOP
202       } break;
203     }
204   }
205 
206   if (request != CEED_REQUEST_IMMEDIATE && request != CEED_REQUEST_ORDERED) *request = NULL;
207 
208   // Restore arrays
209   CeedCallBackend(CeedVectorRestoreArrayRead(u, &d_u));
210   CeedCallBackend(CeedVectorRestoreArray(v, &d_v));
211   return CEED_ERROR_SUCCESS;
212 }
213 
214 //------------------------------------------------------------------------------
215 // Apply restriction
216 //------------------------------------------------------------------------------
217 static int CeedElemRestrictionApply_Cuda(CeedElemRestriction rstr, CeedTransposeMode t_mode, CeedVector u, CeedVector v, CeedRequest *request) {
218   return CeedElemRestrictionApply_Cuda_Core(rstr, t_mode, true, true, u, v, request);
219 }
220 
221 //------------------------------------------------------------------------------
222 // Apply unsigned restriction
223 //------------------------------------------------------------------------------
224 static int CeedElemRestrictionApplyUnsigned_Cuda(CeedElemRestriction rstr, CeedTransposeMode t_mode, CeedVector u, CeedVector v,
225                                                  CeedRequest *request) {
226   return CeedElemRestrictionApply_Cuda_Core(rstr, t_mode, false, true, u, v, request);
227 }
228 
229 //------------------------------------------------------------------------------
230 // Apply unoriented restriction
231 //------------------------------------------------------------------------------
232 static int CeedElemRestrictionApplyUnoriented_Cuda(CeedElemRestriction rstr, CeedTransposeMode t_mode, CeedVector u, CeedVector v,
233                                                    CeedRequest *request) {
234   return CeedElemRestrictionApply_Cuda_Core(rstr, t_mode, false, false, u, v, request);
235 }
236 
237 //------------------------------------------------------------------------------
238 // Get offsets
239 //------------------------------------------------------------------------------
240 static int CeedElemRestrictionGetOffsets_Cuda(CeedElemRestriction rstr, CeedMemType mem_type, const CeedInt **offsets) {
241   CeedElemRestriction_Cuda *impl;
242 
243   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
244   switch (mem_type) {
245     case CEED_MEM_HOST:
246       *offsets = impl->h_ind;
247       break;
248     case CEED_MEM_DEVICE:
249       *offsets = impl->d_ind;
250       break;
251   }
252   return CEED_ERROR_SUCCESS;
253 }
254 
255 //------------------------------------------------------------------------------
256 // Get orientations
257 //------------------------------------------------------------------------------
258 static int CeedElemRestrictionGetOrientations_Cuda(CeedElemRestriction rstr, CeedMemType mem_type, const bool **orients) {
259   CeedElemRestriction_Cuda *impl;
260   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
261 
262   switch (mem_type) {
263     case CEED_MEM_HOST:
264       *orients = impl->h_orients;
265       break;
266     case CEED_MEM_DEVICE:
267       *orients = impl->d_orients;
268       break;
269   }
270   return CEED_ERROR_SUCCESS;
271 }
272 
273 //------------------------------------------------------------------------------
274 // Get curl-conforming orientations
275 //------------------------------------------------------------------------------
276 static int CeedElemRestrictionGetCurlOrientations_Cuda(CeedElemRestriction rstr, CeedMemType mem_type, const CeedInt8 **curl_orients) {
277   CeedElemRestriction_Cuda *impl;
278   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
279 
280   switch (mem_type) {
281     case CEED_MEM_HOST:
282       *curl_orients = impl->h_curl_orients;
283       break;
284     case CEED_MEM_DEVICE:
285       *curl_orients = impl->d_curl_orients;
286       break;
287   }
288   return CEED_ERROR_SUCCESS;
289 }
290 
291 //------------------------------------------------------------------------------
292 // Destroy restriction
293 //------------------------------------------------------------------------------
294 static int CeedElemRestrictionDestroy_Cuda(CeedElemRestriction rstr) {
295   Ceed                      ceed;
296   CeedElemRestriction_Cuda *impl;
297 
298   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
299   CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed));
300   CeedCallCuda(ceed, cuModuleUnload(impl->module));
301   CeedCallBackend(CeedFree(&impl->h_ind_allocated));
302   CeedCallCuda(ceed, cudaFree(impl->d_ind_allocated));
303   CeedCallCuda(ceed, cudaFree(impl->d_t_offsets));
304   CeedCallCuda(ceed, cudaFree(impl->d_t_indices));
305   CeedCallCuda(ceed, cudaFree(impl->d_l_vec_indices));
306   CeedCallBackend(CeedFree(&impl->h_orients_allocated));
307   CeedCallCuda(ceed, cudaFree(impl->d_orients_allocated));
308   CeedCallBackend(CeedFree(&impl->h_curl_orients_allocated));
309   CeedCallCuda(ceed, cudaFree(impl->d_curl_orients_allocated));
310   CeedCallBackend(CeedFree(&impl));
311   return CEED_ERROR_SUCCESS;
312 }
313 
314 //------------------------------------------------------------------------------
315 // Create transpose offsets and indices
316 //------------------------------------------------------------------------------
317 static int CeedElemRestrictionOffset_Cuda(const CeedElemRestriction rstr, const CeedInt *indices) {
318   Ceed                      ceed;
319   bool                     *is_node;
320   CeedSize                  l_size;
321   CeedInt                   num_elem, elem_size, num_comp, num_nodes = 0;
322   CeedInt                  *ind_to_offset, *l_vec_indices, *t_offsets, *t_indices;
323   CeedElemRestriction_Cuda *impl;
324 
325   CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed));
326   CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl));
327   CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &num_elem));
328   CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size));
329   CeedCallBackend(CeedElemRestrictionGetLVectorSize(rstr, &l_size));
330   CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr, &num_comp));
331   const CeedInt size_indices = num_elem * elem_size;
332 
333   // Count num_nodes
334   CeedCallBackend(CeedCalloc(l_size, &is_node));
335 
336   for (CeedInt i = 0; i < size_indices; i++) is_node[indices[i]] = 1;
337   for (CeedInt i = 0; i < l_size; i++) num_nodes += is_node[i];
338   impl->num_nodes = num_nodes;
339 
340   // L-vector offsets array
341   CeedCallBackend(CeedCalloc(l_size, &ind_to_offset));
342   CeedCallBackend(CeedCalloc(num_nodes, &l_vec_indices));
343   for (CeedInt i = 0, j = 0; i < l_size; i++) {
344     if (is_node[i]) {
345       l_vec_indices[j] = i;
346       ind_to_offset[i] = j++;
347     }
348   }
349   CeedCallBackend(CeedFree(&is_node));
350 
351   // Compute transpose offsets and indices
352   const CeedInt size_offsets = num_nodes + 1;
353 
354   CeedCallBackend(CeedCalloc(size_offsets, &t_offsets));
355   CeedCallBackend(CeedMalloc(size_indices, &t_indices));
356   // Count node multiplicity
357   for (CeedInt e = 0; e < num_elem; ++e) {
358     for (CeedInt i = 0; i < elem_size; ++i) ++t_offsets[ind_to_offset[indices[elem_size * e + i]] + 1];
359   }
360   // Convert to running sum
361   for (CeedInt i = 1; i < size_offsets; ++i) t_offsets[i] += t_offsets[i - 1];
362   // List all E-vec indices associated with L-vec node
363   for (CeedInt e = 0; e < num_elem; ++e) {
364     for (CeedInt i = 0; i < elem_size; ++i) {
365       const CeedInt lid = elem_size * e + i;
366       const CeedInt gid = indices[lid];
367 
368       t_indices[t_offsets[ind_to_offset[gid]]++] = lid;
369     }
370   }
371   // Reset running sum
372   for (int i = size_offsets - 1; i > 0; --i) t_offsets[i] = t_offsets[i - 1];
373   t_offsets[0] = 0;
374 
375   // Copy data to device
376   // -- L-vector indices
377   CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_l_vec_indices, num_nodes * sizeof(CeedInt)));
378   CeedCallCuda(ceed, cudaMemcpy(impl->d_l_vec_indices, l_vec_indices, num_nodes * sizeof(CeedInt), cudaMemcpyHostToDevice));
379   // -- Transpose offsets
380   CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_t_offsets, size_offsets * sizeof(CeedInt)));
381   CeedCallCuda(ceed, cudaMemcpy(impl->d_t_offsets, t_offsets, size_offsets * sizeof(CeedInt), cudaMemcpyHostToDevice));
382   // -- Transpose indices
383   CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_t_indices, size_indices * sizeof(CeedInt)));
384   CeedCallCuda(ceed, cudaMemcpy(impl->d_t_indices, t_indices, size_indices * sizeof(CeedInt), cudaMemcpyHostToDevice));
385 
386   // Cleanup
387   CeedCallBackend(CeedFree(&ind_to_offset));
388   CeedCallBackend(CeedFree(&l_vec_indices));
389   CeedCallBackend(CeedFree(&t_offsets));
390   CeedCallBackend(CeedFree(&t_indices));
391   return CEED_ERROR_SUCCESS;
392 }
393 
394 //------------------------------------------------------------------------------
395 // Create restriction
396 //------------------------------------------------------------------------------
397 int CeedElemRestrictionCreate_Cuda(CeedMemType mem_type, CeedCopyMode copy_mode, const CeedInt *indices, const bool *orients,
398                                    const CeedInt8 *curl_orients, CeedElemRestriction rstr) {
399   Ceed                      ceed, ceed_parent;
400   bool                      is_deterministic;
401   CeedInt                   num_elem, num_comp, elem_size, comp_stride = 1;
402   CeedRestrictionType       rstr_type;
403   char                     *restriction_kernel_path, *restriction_kernel_source;
404   CeedElemRestriction_Cuda *impl;
405 
406   CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed));
407   CeedCallBackend(CeedGetParent(ceed, &ceed_parent));
408   CeedCallBackend(CeedIsDeterministic(ceed_parent, &is_deterministic));
409   CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &num_elem));
410   CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr, &num_comp));
411   CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size));
412   const CeedInt size       = num_elem * elem_size;
413   CeedInt       strides[3] = {1, size, elem_size};
414   CeedInt       layout[3]  = {1, elem_size * num_elem, elem_size};
415 
416   // Stride data
417   CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type));
418   if (rstr_type == CEED_RESTRICTION_STRIDED) {
419     bool has_backend_strides;
420 
421     CeedCallBackend(CeedElemRestrictionHasBackendStrides(rstr, &has_backend_strides));
422     if (!has_backend_strides) {
423       CeedCallBackend(CeedElemRestrictionGetStrides(rstr, &strides));
424     }
425   } else {
426     CeedCallBackend(CeedElemRestrictionGetCompStride(rstr, &comp_stride));
427   }
428 
429   CeedCallBackend(CeedCalloc(1, &impl));
430   impl->num_nodes                = size;
431   impl->h_ind                    = NULL;
432   impl->h_ind_allocated          = NULL;
433   impl->d_ind                    = NULL;
434   impl->d_ind_allocated          = NULL;
435   impl->d_t_indices              = NULL;
436   impl->d_t_offsets              = NULL;
437   impl->h_orients                = NULL;
438   impl->h_orients_allocated      = NULL;
439   impl->d_orients                = NULL;
440   impl->d_orients_allocated      = NULL;
441   impl->h_curl_orients           = NULL;
442   impl->h_curl_orients_allocated = NULL;
443   impl->d_curl_orients           = NULL;
444   impl->d_curl_orients_allocated = NULL;
445   CeedCallBackend(CeedElemRestrictionSetData(rstr, impl));
446   CeedCallBackend(CeedElemRestrictionSetELayout(rstr, layout));
447 
448   // Set up device offset/orientation arrays
449   if (rstr_type != CEED_RESTRICTION_STRIDED) {
450     switch (mem_type) {
451       case CEED_MEM_HOST: {
452         switch (copy_mode) {
453           case CEED_OWN_POINTER:
454             impl->h_ind_allocated = (CeedInt *)indices;
455             impl->h_ind           = (CeedInt *)indices;
456             break;
457           case CEED_USE_POINTER:
458             impl->h_ind = (CeedInt *)indices;
459             break;
460           case CEED_COPY_VALUES:
461             CeedCallBackend(CeedMalloc(size, &impl->h_ind_allocated));
462             memcpy(impl->h_ind_allocated, indices, size * sizeof(CeedInt));
463             impl->h_ind = impl->h_ind_allocated;
464             break;
465         }
466         CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_ind, size * sizeof(CeedInt)));
467         impl->d_ind_allocated = impl->d_ind;  // We own the device memory
468         CeedCallCuda(ceed, cudaMemcpy(impl->d_ind, indices, size * sizeof(CeedInt), cudaMemcpyHostToDevice));
469         if (is_deterministic) CeedCallBackend(CeedElemRestrictionOffset_Cuda(rstr, indices));
470       } break;
471       case CEED_MEM_DEVICE: {
472         switch (copy_mode) {
473           case CEED_COPY_VALUES:
474             CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_ind, size * sizeof(CeedInt)));
475             impl->d_ind_allocated = impl->d_ind;  // We own the device memory
476             CeedCallCuda(ceed, cudaMemcpy(impl->d_ind, indices, size * sizeof(CeedInt), cudaMemcpyDeviceToDevice));
477             break;
478           case CEED_OWN_POINTER:
479             impl->d_ind           = (CeedInt *)indices;
480             impl->d_ind_allocated = impl->d_ind;
481             break;
482           case CEED_USE_POINTER:
483             impl->d_ind = (CeedInt *)indices;
484             break;
485         }
486         CeedCallBackend(CeedMalloc(size, &impl->h_ind_allocated));
487         CeedCallCuda(ceed, cudaMemcpy(impl->h_ind_allocated, impl->d_ind, size * sizeof(CeedInt), cudaMemcpyDeviceToHost));
488         impl->h_ind = impl->h_ind_allocated;
489         if (is_deterministic) CeedCallBackend(CeedElemRestrictionOffset_Cuda(rstr, indices));
490       } break;
491     }
492 
493     // Orientation data
494     if (rstr_type == CEED_RESTRICTION_ORIENTED) {
495       switch (mem_type) {
496         case CEED_MEM_HOST: {
497           switch (copy_mode) {
498             case CEED_OWN_POINTER:
499               impl->h_orients_allocated = (bool *)orients;
500               impl->h_orients           = (bool *)orients;
501               break;
502             case CEED_USE_POINTER:
503               impl->h_orients = (bool *)orients;
504               break;
505             case CEED_COPY_VALUES:
506               CeedCallBackend(CeedMalloc(size, &impl->h_orients_allocated));
507               memcpy(impl->h_orients_allocated, orients, size * sizeof(bool));
508               impl->h_orients = impl->h_orients_allocated;
509               break;
510           }
511           CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_orients, size * sizeof(bool)));
512           impl->d_orients_allocated = impl->d_orients;  // We own the device memory
513           CeedCallCuda(ceed, cudaMemcpy(impl->d_orients, orients, size * sizeof(bool), cudaMemcpyHostToDevice));
514         } break;
515         case CEED_MEM_DEVICE: {
516           switch (copy_mode) {
517             case CEED_COPY_VALUES:
518               CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_orients, size * sizeof(bool)));
519               impl->d_orients_allocated = impl->d_orients;  // We own the device memory
520               CeedCallCuda(ceed, cudaMemcpy(impl->d_orients, orients, size * sizeof(bool), cudaMemcpyDeviceToDevice));
521               break;
522             case CEED_OWN_POINTER:
523               impl->d_orients           = (bool *)orients;
524               impl->d_orients_allocated = impl->d_orients;
525               break;
526             case CEED_USE_POINTER:
527               impl->d_orients = (bool *)orients;
528               break;
529           }
530           CeedCallBackend(CeedMalloc(size, &impl->h_orients_allocated));
531           CeedCallCuda(ceed, cudaMemcpy(impl->h_orients_allocated, impl->d_orients, size * sizeof(bool), cudaMemcpyDeviceToHost));
532           impl->h_orients = impl->h_orients_allocated;
533         } break;
534       }
535     } else if (rstr_type == CEED_RESTRICTION_CURL_ORIENTED) {
536       switch (mem_type) {
537         case CEED_MEM_HOST: {
538           switch (copy_mode) {
539             case CEED_OWN_POINTER:
540               impl->h_curl_orients_allocated = (CeedInt8 *)curl_orients;
541               impl->h_curl_orients           = (CeedInt8 *)curl_orients;
542               break;
543             case CEED_USE_POINTER:
544               impl->h_curl_orients = (CeedInt8 *)curl_orients;
545               break;
546             case CEED_COPY_VALUES:
547               CeedCallBackend(CeedMalloc(3 * size, &impl->h_curl_orients_allocated));
548               memcpy(impl->h_curl_orients_allocated, curl_orients, 3 * size * sizeof(CeedInt8));
549               impl->h_curl_orients = impl->h_curl_orients_allocated;
550               break;
551           }
552           CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_curl_orients, 3 * size * sizeof(CeedInt8)));
553           impl->d_curl_orients_allocated = impl->d_curl_orients;  // We own the device memory
554           CeedCallCuda(ceed, cudaMemcpy(impl->d_curl_orients, curl_orients, 3 * size * sizeof(CeedInt8), cudaMemcpyHostToDevice));
555         } break;
556         case CEED_MEM_DEVICE: {
557           switch (copy_mode) {
558             case CEED_COPY_VALUES:
559               CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_curl_orients, 3 * size * sizeof(CeedInt8)));
560               impl->d_curl_orients_allocated = impl->d_curl_orients;  // We own the device memory
561               CeedCallCuda(ceed, cudaMemcpy(impl->d_curl_orients, curl_orients, 3 * size * sizeof(CeedInt8), cudaMemcpyDeviceToDevice));
562               break;
563             case CEED_OWN_POINTER:
564               impl->d_curl_orients           = (CeedInt8 *)curl_orients;
565               impl->d_curl_orients_allocated = impl->d_curl_orients;
566               break;
567             case CEED_USE_POINTER:
568               impl->d_curl_orients = (CeedInt8 *)curl_orients;
569               break;
570           }
571           CeedCallBackend(CeedMalloc(3 * size, &impl->h_curl_orients_allocated));
572           CeedCallCuda(ceed, cudaMemcpy(impl->h_curl_orients_allocated, impl->d_curl_orients, 3 * size * sizeof(CeedInt8), cudaMemcpyDeviceToHost));
573           impl->h_curl_orients = impl->h_curl_orients_allocated;
574         } break;
575       }
576     }
577   }
578 
579   // Compile CUDA kernels
580   CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction.h", &restriction_kernel_path));
581   CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source -----\n");
582   CeedCallBackend(CeedLoadSourceToBuffer(ceed, restriction_kernel_path, &restriction_kernel_source));
583   CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source Complete! -----\n");
584   CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 8, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem,
585                                    "RSTR_NUM_COMP", num_comp, "RSTR_NUM_NODES", impl->num_nodes, "RSTR_COMP_STRIDE", comp_stride, "RSTR_STRIDE_NODES",
586                                    strides[0], "RSTR_STRIDE_COMP", strides[1], "RSTR_STRIDE_ELEM", strides[2]));
587   CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "StridedNoTranspose", &impl->StridedNoTranspose));
588   CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "StridedTranspose", &impl->StridedTranspose));
589   CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetNoTranspose", &impl->OffsetNoTranspose));
590   CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OrientedNoTranspose", &impl->OrientedNoTranspose));
591   CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedNoTranspose", &impl->CurlOrientedNoTranspose));
592   CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedUnsignedNoTranspose", &impl->CurlOrientedUnsignedNoTranspose));
593   if (!is_deterministic) {
594     CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetTranspose", &impl->OffsetTranspose));
595     CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OrientedTranspose", &impl->OrientedTranspose));
596     CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedTranspose", &impl->CurlOrientedTranspose));
597     CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedUnsignedTranspose", &impl->CurlOrientedUnsignedTranspose));
598   } else {
599     CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetTransposeDet", &impl->OffsetTransposeDet));
600     CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OrientedTransposeDet", &impl->OrientedTransposeDet));
601     CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedTransposeDet", &impl->CurlOrientedTransposeDet));
602     CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedUnsignedTransposeDet", &impl->CurlOrientedUnsignedTransposeDet));
603   }
604   CeedCallBackend(CeedFree(&restriction_kernel_path));
605   CeedCallBackend(CeedFree(&restriction_kernel_source));
606 
607   // Register backend functions
608   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "Apply", CeedElemRestrictionApply_Cuda));
609   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "ApplyUnsigned", CeedElemRestrictionApplyUnsigned_Cuda));
610   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "ApplyUnoriented", CeedElemRestrictionApplyUnoriented_Cuda));
611   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetOffsets", CeedElemRestrictionGetOffsets_Cuda));
612   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetOrientations", CeedElemRestrictionGetOrientations_Cuda));
613   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetCurlOrientations", CeedElemRestrictionGetCurlOrientations_Cuda));
614   CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "Destroy", CeedElemRestrictionDestroy_Cuda));
615   return CEED_ERROR_SUCCESS;
616 }
617 
618 //------------------------------------------------------------------------------
619