xref: /libCEED/include/ceed/jit-source/hip/hip-ref-restriction-offset.h (revision d06a2c125c427f0694fb06710c5ee86c8219d1b1)
1 // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 /// @file
9 /// Internal header for HIP offset element restriction kernels
10 
11 #include <ceed.h>
12 
13 //------------------------------------------------------------------------------
14 // L-vector -> E-vector, standard (with offsets)
15 //------------------------------------------------------------------------------
16 extern "C" __global__ void OffsetNoTranspose(const CeedInt *__restrict__ indices, const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
17   for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) {
18     const CeedInt ind      = indices[node];
19     const CeedInt loc_node = node % RSTR_ELEM_SIZE;
20     const CeedInt elem     = node / RSTR_ELEM_SIZE;
21 
22     for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
23       v[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE] = u[ind + comp * RSTR_COMP_STRIDE];
24     }
25   }
26 }
27 
28 //------------------------------------------------------------------------------
29 // E-vector -> L-vector, standard (with offsets)
30 //------------------------------------------------------------------------------
31 #if !USE_DETERMINISTIC
32 extern "C" __global__ void OffsetTranspose(const CeedInt *__restrict__ indices, const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
33   for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; node < RSTR_NUM_ELEM * RSTR_ELEM_SIZE; node += blockDim.x * gridDim.x) {
34     const CeedInt ind      = indices[node];
35     const CeedInt loc_node = node % RSTR_ELEM_SIZE;
36     const CeedInt elem     = node / RSTR_ELEM_SIZE;
37 
38     for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
39       atomicAdd(&v[ind + comp * RSTR_COMP_STRIDE], u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE]);
40     }
41   }
42 }
43 #else
44 extern "C" __global__ void OffsetTranspose(const CeedInt *__restrict__ l_vec_indices, const CeedInt *__restrict__ t_indices,
45                                            const CeedInt *__restrict__ t_offsets, const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) {
46   CeedScalar value[RSTR_NUM_COMP];
47 
48   for (CeedInt i = blockIdx.x * blockDim.x + threadIdx.x; i < RSTR_NUM_NODES; i += blockDim.x * gridDim.x) {
49     const CeedInt ind     = l_vec_indices[i];
50     const CeedInt range_1 = t_offsets[i];
51     const CeedInt range_N = t_offsets[i + 1];
52 
53     for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) value[comp] = 0.0;
54 
55     for (CeedInt j = range_1; j < range_N; j++) {
56       const CeedInt t_ind    = t_indices[j];
57       const CeedInt loc_node = t_ind % RSTR_ELEM_SIZE;
58       const CeedInt elem     = t_ind / RSTR_ELEM_SIZE;
59 
60       for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) {
61         value[comp] += u[loc_node + comp * RSTR_ELEM_SIZE * RSTR_NUM_ELEM + elem * RSTR_ELEM_SIZE];
62       }
63     }
64 
65     for (CeedInt comp = 0; comp < RSTR_NUM_COMP; comp++) v[ind + comp * RSTR_COMP_STRIDE] += value[comp];
66   }
67 }
68 #endif
69