xref: /libCEED/backends/sycl-shared/ceed-sycl-shared-basis.sycl.cpp (revision 397164e9a381afe5b03480c905b54f999517fbf7)
1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 #include <ceed/backend.h>
9 #include <ceed/ceed.h>
10 #include <ceed/jit-tools.h>
11 
12 #include <map>
13 #include <string_view>
14 #include <sycl/sycl.hpp>
15 
16 #include "../sycl/ceed-sycl-compile.hpp"
17 #include "ceed-sycl-shared.hpp"
18 
19 //------------------------------------------------------------------------------
20 // Compute the local range of for basis kernels
21 //------------------------------------------------------------------------------
22 static int ComputeLocalRange(Ceed ceed, CeedInt dim, CeedInt thread_1d, CeedInt *local_range, CeedInt max_group_size = 256) {
23   local_range[0]               = thread_1d;
24   local_range[1]               = (dim > 1) ? thread_1d : 1;
25   const CeedInt min_group_size = local_range[0] * local_range[1];
26 
27   CeedCheck(min_group_size <= max_group_size, ceed, CEED_ERROR_BACKEND, "Requested group size is smaller than the required minimum.");
28 
29   local_range[2] = max_group_size / min_group_size;  // elements per group
30   return CEED_ERROR_SUCCESS;
31 }
32 
33 //------------------------------------------------------------------------------
34 // Apply basis
35 //------------------------------------------------------------------------------
36 int CeedBasisApplyTensor_Sycl_shared(CeedBasis basis, const CeedInt num_elem, CeedTransposeMode t_mode, CeedEvalMode eval_mode, CeedVector u,
37                                      CeedVector v) {
38   Ceed                   ceed;
39   Ceed_Sycl             *ceed_Sycl;
40   const CeedScalar      *d_u;
41   CeedScalar            *d_v;
42   CeedBasis_Sycl_shared *impl;
43 
44   CeedCallBackend(CeedBasisGetCeed(basis, &ceed));
45   CeedCallBackend(CeedGetData(ceed, &ceed_Sycl));
46   CeedCallBackend(CeedBasisGetData(basis, &impl));
47 
48   // Read vectors
49   if (eval_mode != CEED_EVAL_WEIGHT) {
50     CeedCallBackend(CeedVectorGetArrayRead(u, CEED_MEM_DEVICE, &d_u));
51   }
52   CeedCallBackend(CeedVectorGetArrayWrite(v, CEED_MEM_DEVICE, &d_v));
53 
54   // Apply basis operation
55   switch (eval_mode) {
56     case CEED_EVAL_INTERP: {
57       CeedInt       *lrange         = impl->interp_local_range;
58       const CeedInt &elem_per_group = lrange[2];
59       const CeedInt  group_count    = (num_elem / elem_per_group) + !!(num_elem % elem_per_group);
60       //-----------
61       sycl::range<3>    local_range(lrange[2], lrange[1], lrange[0]);
62       sycl::range<3>    global_range(group_count * lrange[2], lrange[1], lrange[0]);
63       sycl::nd_range<3> kernel_range(global_range, local_range);
64       //-----------
65       sycl::kernel *interp_kernel = (t_mode == CEED_TRANSPOSE) ? impl->interp_transpose_kernel : impl->interp_kernel;
66 
67       // Order queue
68       sycl::event e = ceed_Sycl->sycl_queue.ext_oneapi_submit_barrier();
69 
70       ceed_Sycl->sycl_queue.submit([&](sycl::handler &cgh) {
71         cgh.depends_on(e);
72         cgh.set_args(num_elem, impl->d_interp_1d, d_u, d_v);
73         cgh.parallel_for(kernel_range, *interp_kernel);
74       });
75 
76     } break;
77     case CEED_EVAL_GRAD: {
78       CeedInt       *lrange         = impl->grad_local_range;
79       const CeedInt &elem_per_group = lrange[2];
80       const CeedInt  group_count    = (num_elem / elem_per_group) + !!(num_elem % elem_per_group);
81       //-----------
82       sycl::range<3>    local_range(lrange[2], lrange[1], lrange[0]);
83       sycl::range<3>    global_range(group_count * lrange[2], lrange[1], lrange[0]);
84       sycl::nd_range<3> kernel_range(global_range, local_range);
85       //-----------
86       sycl::kernel     *grad_kernel = (t_mode == CEED_TRANSPOSE) ? impl->grad_transpose_kernel : impl->grad_kernel;
87       const CeedScalar *d_grad_1d   = (impl->d_collo_grad_1d) ? impl->d_collo_grad_1d : impl->d_grad_1d;
88       // Order queue
89       sycl::event e = ceed_Sycl->sycl_queue.ext_oneapi_submit_barrier();
90 
91       ceed_Sycl->sycl_queue.submit([&](sycl::handler &cgh) {
92         cgh.depends_on(e);
93         cgh.set_args(num_elem, impl->d_interp_1d, d_grad_1d, d_u, d_v);
94         cgh.parallel_for(kernel_range, *grad_kernel);
95       });
96     } break;
97     case CEED_EVAL_WEIGHT: {
98       CeedInt       *lrange         = impl->weight_local_range;
99       const CeedInt &elem_per_group = lrange[2];
100       const CeedInt  group_count    = (num_elem / elem_per_group) + !!(num_elem % elem_per_group);
101       //-----------
102       sycl::range<3>    local_range(lrange[2], lrange[1], lrange[0]);
103       sycl::range<3>    global_range(group_count * lrange[2], lrange[1], lrange[0]);
104       sycl::nd_range<3> kernel_range(global_range, local_range);
105       //-----------
106       // Order queue
107       sycl::event e = ceed_Sycl->sycl_queue.ext_oneapi_submit_barrier();
108 
109       ceed_Sycl->sycl_queue.submit([&](sycl::handler &cgh) {
110         cgh.depends_on(e);
111         cgh.set_args(num_elem, impl->d_q_weight_1d, d_v);
112         cgh.parallel_for(kernel_range, *(impl->weight_kernel));
113       });
114     } break;
115     // LCOV_EXCL_START
116     // Evaluate the divergence to/from the quadrature points
117     case CEED_EVAL_DIV:
118       return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_DIV not supported");
119     // Evaluate the curl to/from the quadrature points
120     case CEED_EVAL_CURL:
121       return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_CURL not supported");
122     // Take no action, BasisApply should not have been called
123     case CEED_EVAL_NONE:
124       return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_NONE does not make sense in this context");
125       // LCOV_EXCL_STOP
126   }
127 
128   // Restore vectors
129   if (eval_mode != CEED_EVAL_WEIGHT) {
130     CeedCallBackend(CeedVectorRestoreArrayRead(u, &d_u));
131   }
132   CeedCallBackend(CeedVectorRestoreArray(v, &d_v));
133   return CEED_ERROR_SUCCESS;
134 }
135 
136 //------------------------------------------------------------------------------
137 // Destroy basis
138 //------------------------------------------------------------------------------
139 static int CeedBasisDestroy_Sycl_shared(CeedBasis basis) {
140   Ceed                   ceed;
141   Ceed_Sycl             *data;
142   CeedBasis_Sycl_shared *impl;
143 
144   CeedCallBackend(CeedBasisGetCeed(basis, &ceed));
145   CeedCallBackend(CeedBasisGetData(basis, &impl));
146   CeedCallBackend(CeedGetData(ceed, &data));
147   CeedCallSycl(ceed, data->sycl_queue.wait_and_throw());
148   CeedCallSycl(ceed, sycl::free(impl->d_q_weight_1d, data->sycl_context));
149   CeedCallSycl(ceed, sycl::free(impl->d_interp_1d, data->sycl_context));
150   CeedCallSycl(ceed, sycl::free(impl->d_grad_1d, data->sycl_context));
151   CeedCallSycl(ceed, sycl::free(impl->d_collo_grad_1d, data->sycl_context));
152 
153   delete impl->interp_kernel;
154   delete impl->interp_transpose_kernel;
155   delete impl->grad_kernel;
156   delete impl->grad_transpose_kernel;
157   delete impl->weight_kernel;
158   delete impl->sycl_module;
159 
160   CeedCallBackend(CeedFree(&impl));
161   return CEED_ERROR_SUCCESS;
162 }
163 
164 //------------------------------------------------------------------------------
165 // Create tensor basis
166 // TODO: Refactor
167 //------------------------------------------------------------------------------
168 int CeedBasisCreateTensorH1_Sycl_shared(CeedInt dim, CeedInt P_1d, CeedInt Q_1d, const CeedScalar *interp_1d, const CeedScalar *grad_1d,
169                                         const CeedScalar *q_ref_1d, const CeedScalar *q_weight_1d, CeedBasis basis) {
170   Ceed                   ceed;
171   Ceed_Sycl             *data;
172   char                  *basis_kernel_path, *basis_kernel_source;
173   CeedInt                num_comp;
174   CeedBasis_Sycl_shared *impl;
175 
176   CeedCallBackend(CeedBasisGetCeed(basis, &ceed));
177   CeedCallBackend(CeedCalloc(1, &impl));
178   CeedCallBackend(CeedGetData(ceed, &data));
179   CeedCallBackend(CeedBasisGetNumComponents(basis, &num_comp));
180 
181   const CeedInt thread_1d = CeedIntMax(Q_1d, P_1d);
182   const CeedInt num_nodes = CeedIntPow(P_1d, dim);
183   const CeedInt num_qpts  = CeedIntPow(Q_1d, dim);
184 
185   CeedInt *interp_lrange = impl->interp_local_range;
186 
187   CeedCallBackend(ComputeLocalRange(ceed, dim, thread_1d, interp_lrange));
188   const CeedInt interp_group_size = interp_lrange[0] * interp_lrange[1] * interp_lrange[2];
189 
190   CeedInt *grad_lrange = impl->grad_local_range;
191 
192   CeedCallBackend(ComputeLocalRange(ceed, dim, thread_1d, grad_lrange));
193   const CeedInt grad_group_size = grad_lrange[0] * grad_lrange[1] * grad_lrange[2];
194 
195   CeedCallBackend(ComputeLocalRange(ceed, dim, Q_1d, impl->weight_local_range));
196 
197   // Copy basis data to GPU
198   CeedCallSycl(ceed, impl->d_q_weight_1d = sycl::malloc_device<CeedScalar>(Q_1d, data->sycl_device, data->sycl_context));
199   sycl::event copy_weight = data->sycl_queue.copy<CeedScalar>(q_weight_1d, impl->d_q_weight_1d, Q_1d);
200 
201   const CeedInt interp_length = Q_1d * P_1d;
202   CeedCallSycl(ceed, impl->d_interp_1d = sycl::malloc_device<CeedScalar>(interp_length, data->sycl_device, data->sycl_context));
203   sycl::event copy_interp = data->sycl_queue.copy<CeedScalar>(interp_1d, impl->d_interp_1d, interp_length);
204 
205   CeedCallSycl(ceed, impl->d_grad_1d = sycl::malloc_device<CeedScalar>(interp_length, data->sycl_device, data->sycl_context));
206   sycl::event copy_grad = data->sycl_queue.copy<CeedScalar>(grad_1d, impl->d_grad_1d, interp_length);
207 
208   CeedCallSycl(ceed, sycl::event::wait_and_throw({copy_weight, copy_interp, copy_grad}));
209 
210   // Compute collocated gradient and copy to GPU
211   impl->d_collo_grad_1d          = NULL;
212   const bool has_collocated_grad = (dim == 3) && (Q_1d >= P_1d);
213 
214   if (has_collocated_grad) {
215     CeedScalar   *collo_grad_1d;
216     const CeedInt cgrad_length = Q_1d * Q_1d;
217 
218     CeedCallBackend(CeedMalloc(Q_1d * Q_1d, &collo_grad_1d));
219     CeedCallBackend(CeedBasisGetCollocatedGrad(basis, collo_grad_1d));
220     CeedCallSycl(ceed, impl->d_collo_grad_1d = sycl::malloc_device<CeedScalar>(cgrad_length, data->sycl_device, data->sycl_context));
221     CeedCallSycl(ceed, data->sycl_queue.copy<CeedScalar>(collo_grad_1d, impl->d_collo_grad_1d, cgrad_length).wait_and_throw());
222     CeedCallBackend(CeedFree(&collo_grad_1d));
223   }
224 
225   // ---[Refactor into separate function]------>
226   // Define compile-time constants
227   std::map<std::string, CeedInt> jit_constants;
228   jit_constants["BASIS_DIM"]                 = dim;
229   jit_constants["BASIS_Q_1D"]                = Q_1d;
230   jit_constants["BASIS_P_1D"]                = P_1d;
231   jit_constants["T_1D"]                      = thread_1d;
232   jit_constants["BASIS_NUM_COMP"]            = num_comp;
233   jit_constants["BASIS_NUM_NODES"]           = num_nodes;
234   jit_constants["BASIS_NUM_QPTS"]            = num_qpts;
235   jit_constants["BASIS_HAS_COLLOCATED_GRAD"] = has_collocated_grad;
236   jit_constants["BASIS_INTERP_SCRATCH_SIZE"] = interp_group_size;
237   jit_constants["BASIS_GRAD_SCRATCH_SIZE"]   = grad_group_size;
238 
239   // Load kernel source
240   CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/sycl/sycl-shared-basis-tensor.h", &basis_kernel_path));
241   CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source -----\n");
242   CeedCallBackend(CeedLoadSourceToBuffer(ceed, basis_kernel_path, &basis_kernel_source));
243   CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source Complete -----\n");
244 
245   // Compile kernels into a kernel bundle
246   CeedCallBackend(CeedBuildModule_Sycl(ceed, basis_kernel_source, &impl->sycl_module, jit_constants));
247 
248   // Load kernel functions
249   CeedCallBackend(CeedGetKernel_Sycl(ceed, impl->sycl_module, "Interp", &impl->interp_kernel));
250   CeedCallBackend(CeedGetKernel_Sycl(ceed, impl->sycl_module, "InterpTranspose", &impl->interp_transpose_kernel));
251   CeedCallBackend(CeedGetKernel_Sycl(ceed, impl->sycl_module, "Grad", &impl->grad_kernel));
252   CeedCallBackend(CeedGetKernel_Sycl(ceed, impl->sycl_module, "GradTranspose", &impl->grad_transpose_kernel));
253   CeedCallBackend(CeedGetKernel_Sycl(ceed, impl->sycl_module, "Weight", &impl->weight_kernel));
254 
255   // Clean-up
256   CeedCallBackend(CeedFree(&basis_kernel_path));
257   CeedCallBackend(CeedFree(&basis_kernel_source));
258   // <---[Refactor into separate function]------
259 
260   CeedCallBackend(CeedBasisSetData(basis, impl));
261 
262   // Register backend functions
263   CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Basis", basis, "Apply", CeedBasisApplyTensor_Sycl_shared));
264   CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Basis", basis, "Destroy", CeedBasisDestroy_Sycl_shared));
265   return CEED_ERROR_SUCCESS;
266 }
267 
268 //------------------------------------------------------------------------------
269