1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 #include <ceed.h> 9 #include <ceed/backend.h> 10 #include <ceed/jit-tools.h> 11 #include <cuda.h> 12 #include <cuda_runtime.h> 13 14 #include "../cuda/ceed-cuda-common.h" 15 #include "../cuda/ceed-cuda-compile.h" 16 #include "ceed-cuda-ref.h" 17 18 //------------------------------------------------------------------------------ 19 // Basis apply - tensor 20 //------------------------------------------------------------------------------ 21 int CeedBasisApply_Cuda(CeedBasis basis, const CeedInt num_elem, CeedTransposeMode t_mode, CeedEvalMode eval_mode, CeedVector u, CeedVector v) { 22 Ceed ceed; 23 CeedInt Q_1d, dim; 24 const CeedInt transpose = t_mode == CEED_TRANSPOSE; 25 const int max_block_size = 32; 26 const CeedScalar *d_u; 27 CeedScalar *d_v; 28 CeedBasis_Cuda *data; 29 30 CeedCallBackend(CeedBasisGetCeed(basis, &ceed)); 31 CeedCallBackend(CeedBasisGetData(basis, &data)); 32 33 // Read vectors 34 if (u != CEED_VECTOR_NONE) CeedCallBackend(CeedVectorGetArrayRead(u, CEED_MEM_DEVICE, &d_u)); 35 else CeedCheck(eval_mode == CEED_EVAL_WEIGHT, ceed, CEED_ERROR_BACKEND, "An input vector is required for this CeedEvalMode"); 36 CeedCallBackend(CeedVectorGetArrayWrite(v, CEED_MEM_DEVICE, &d_v)); 37 38 // Clear v for transpose operation 39 if (transpose) { 40 CeedSize length; 41 42 CeedCallBackend(CeedVectorGetLength(v, &length)); 43 CeedCallCuda(ceed, cudaMemset(d_v, 0, length * sizeof(CeedScalar))); 44 } 45 CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &Q_1d)); 46 CeedCallBackend(CeedBasisGetDimension(basis, &dim)); 47 48 // Basis action 49 switch (eval_mode) { 50 case CEED_EVAL_INTERP: { 51 void *interp_args[] = {(void *)&num_elem, (void *)&transpose, &data->d_interp_1d, &d_u, &d_v}; 52 const CeedInt block_size = CeedIntMin(CeedIntPow(Q_1d, dim), max_block_size); 53 54 CeedCallBackend(CeedRunKernel_Cuda(ceed, data->Interp, num_elem, block_size, interp_args)); 55 } break; 56 case CEED_EVAL_GRAD: { 57 void *grad_args[] = {(void *)&num_elem, (void *)&transpose, &data->d_interp_1d, &data->d_grad_1d, &d_u, &d_v}; 58 const CeedInt block_size = max_block_size; 59 60 CeedCallBackend(CeedRunKernel_Cuda(ceed, data->Grad, num_elem, block_size, grad_args)); 61 } break; 62 case CEED_EVAL_WEIGHT: { 63 void *weight_args[] = {(void *)&num_elem, (void *)&data->d_q_weight_1d, &d_v}; 64 const int block_size_x = Q_1d; 65 const int block_size_y = dim >= 2 ? Q_1d : 1; 66 67 CeedCallBackend(CeedRunKernelDim_Cuda(ceed, data->Weight, num_elem, block_size_x, block_size_y, 1, weight_args)); 68 } break; 69 // LCOV_EXCL_START 70 // Evaluate the divergence to/from the quadrature points 71 case CEED_EVAL_DIV: 72 return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_DIV not supported"); 73 // Evaluate the curl to/from the quadrature points 74 case CEED_EVAL_CURL: 75 return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_CURL not supported"); 76 // Take no action, BasisApply should not have been called 77 case CEED_EVAL_NONE: 78 return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_NONE does not make sense in this context"); 79 // LCOV_EXCL_STOP 80 } 81 82 // Restore vectors 83 if (eval_mode != CEED_EVAL_WEIGHT) { 84 CeedCallBackend(CeedVectorRestoreArrayRead(u, &d_u)); 85 } 86 CeedCallBackend(CeedVectorRestoreArray(v, &d_v)); 87 return CEED_ERROR_SUCCESS; 88 } 89 90 //------------------------------------------------------------------------------ 91 // Basis apply - non-tensor 92 //------------------------------------------------------------------------------ 93 int CeedBasisApplyNonTensor_Cuda(CeedBasis basis, const CeedInt num_elem, CeedTransposeMode t_mode, CeedEvalMode eval_mode, CeedVector u, 94 CeedVector v) { 95 Ceed ceed; 96 CeedInt num_nodes, num_qpts; 97 const CeedInt transpose = t_mode == CEED_TRANSPOSE; 98 const int elems_per_block = 1; 99 const int grid = CeedDivUpInt(num_elem, elems_per_block); 100 const CeedScalar *d_u; 101 CeedScalar *d_v; 102 CeedBasisNonTensor_Cuda *data; 103 104 CeedCallBackend(CeedBasisGetCeed(basis, &ceed)); 105 CeedCallBackend(CeedBasisGetData(basis, &data)); 106 CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis, &num_qpts)); 107 CeedCallBackend(CeedBasisGetNumNodes(basis, &num_nodes)); 108 109 // Read vectors 110 if (eval_mode != CEED_EVAL_WEIGHT) { 111 CeedCallBackend(CeedVectorGetArrayRead(u, CEED_MEM_DEVICE, &d_u)); 112 } 113 CeedCallBackend(CeedVectorGetArrayWrite(v, CEED_MEM_DEVICE, &d_v)); 114 115 // Clear v for transpose operation 116 if (transpose) { 117 CeedSize length; 118 119 CeedCallBackend(CeedVectorGetLength(v, &length)); 120 CeedCallCuda(ceed, cudaMemset(d_v, 0, length * sizeof(CeedScalar))); 121 } 122 123 // Apply basis operation 124 switch (eval_mode) { 125 case CEED_EVAL_INTERP: { 126 void *interp_args[] = {(void *)&num_elem, &data->d_interp, &d_u, &d_v}; 127 const int block_size_x = transpose ? num_nodes : num_qpts; 128 129 if (transpose) { 130 CeedCallBackend(CeedRunKernelDim_Cuda(ceed, data->InterpTranspose, grid, block_size_x, 1, elems_per_block, interp_args)); 131 } else { 132 CeedCallBackend(CeedRunKernelDim_Cuda(ceed, data->Interp, grid, block_size_x, 1, elems_per_block, interp_args)); 133 } 134 } break; 135 case CEED_EVAL_GRAD: { 136 void *grad_args[] = {(void *)&num_elem, &data->d_grad, &d_u, &d_v}; 137 const int block_size_x = transpose ? num_nodes : num_qpts; 138 139 if (transpose) { 140 CeedCallBackend(CeedRunKernelDim_Cuda(ceed, data->DerivTranspose, grid, block_size_x, 1, elems_per_block, grad_args)); 141 } else { 142 CeedCallBackend(CeedRunKernelDim_Cuda(ceed, data->Deriv, grid, block_size_x, 1, elems_per_block, grad_args)); 143 } 144 } break; 145 case CEED_EVAL_DIV: { 146 void *div_args[] = {(void *)&num_elem, &data->d_div, &d_u, &d_v}; 147 const int block_size_x = transpose ? num_nodes : num_qpts; 148 149 if (transpose) { 150 CeedCallBackend(CeedRunKernelDim_Cuda(ceed, data->DerivTranspose, grid, block_size_x, 1, elems_per_block, div_args)); 151 } else { 152 CeedCallBackend(CeedRunKernelDim_Cuda(ceed, data->Deriv, grid, block_size_x, 1, elems_per_block, div_args)); 153 } 154 } break; 155 case CEED_EVAL_CURL: { 156 void *curl_args[] = {(void *)&num_elem, &data->d_curl, &d_u, &d_v}; 157 const int block_size_x = transpose ? num_nodes : num_qpts; 158 159 if (transpose) { 160 CeedCallBackend(CeedRunKernelDim_Cuda(ceed, data->DerivTranspose, grid, block_size_x, 1, elems_per_block, curl_args)); 161 } else { 162 CeedCallBackend(CeedRunKernelDim_Cuda(ceed, data->Deriv, grid, block_size_x, 1, elems_per_block, curl_args)); 163 } 164 } break; 165 case CEED_EVAL_WEIGHT: { 166 void *weight_args[] = {(void *)&num_elem, (void *)&data->d_q_weight, &d_v}; 167 168 CeedCallBackend(CeedRunKernelDim_Cuda(ceed, data->Weight, grid, num_qpts, 1, elems_per_block, weight_args)); 169 } break; 170 // LCOV_EXCL_START 171 // Take no action, BasisApply should not have been called 172 case CEED_EVAL_NONE: 173 return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_NONE does not make sense in this context"); 174 // LCOV_EXCL_STOP 175 } 176 177 // Restore vectors 178 if (eval_mode != CEED_EVAL_WEIGHT) { 179 CeedCallBackend(CeedVectorRestoreArrayRead(u, &d_u)); 180 } 181 CeedCallBackend(CeedVectorRestoreArray(v, &d_v)); 182 return CEED_ERROR_SUCCESS; 183 } 184 185 //------------------------------------------------------------------------------ 186 // Destroy tensor basis 187 //------------------------------------------------------------------------------ 188 static int CeedBasisDestroy_Cuda(CeedBasis basis) { 189 Ceed ceed; 190 CeedBasis_Cuda *data; 191 192 CeedCallBackend(CeedBasisGetCeed(basis, &ceed)); 193 CeedCallBackend(CeedBasisGetData(basis, &data)); 194 CeedCallCuda(ceed, cuModuleUnload(data->module)); 195 CeedCallCuda(ceed, cudaFree(data->d_q_weight_1d)); 196 CeedCallCuda(ceed, cudaFree(data->d_interp_1d)); 197 CeedCallCuda(ceed, cudaFree(data->d_grad_1d)); 198 CeedCallBackend(CeedFree(&data)); 199 return CEED_ERROR_SUCCESS; 200 } 201 202 //------------------------------------------------------------------------------ 203 // Destroy non-tensor basis 204 //------------------------------------------------------------------------------ 205 static int CeedBasisDestroyNonTensor_Cuda(CeedBasis basis) { 206 Ceed ceed; 207 CeedBasisNonTensor_Cuda *data; 208 209 CeedCallBackend(CeedBasisGetCeed(basis, &ceed)); 210 CeedCallBackend(CeedBasisGetData(basis, &data)); 211 CeedCallCuda(ceed, cuModuleUnload(data->module)); 212 CeedCallCuda(ceed, cudaFree(data->d_q_weight)); 213 CeedCallCuda(ceed, cudaFree(data->d_interp)); 214 CeedCallCuda(ceed, cudaFree(data->d_grad)); 215 CeedCallCuda(ceed, cudaFree(data->d_div)); 216 CeedCallCuda(ceed, cudaFree(data->d_curl)); 217 CeedCallBackend(CeedFree(&data)); 218 return CEED_ERROR_SUCCESS; 219 } 220 221 //------------------------------------------------------------------------------ 222 // Create tensor 223 //------------------------------------------------------------------------------ 224 int CeedBasisCreateTensorH1_Cuda(CeedInt dim, CeedInt P_1d, CeedInt Q_1d, const CeedScalar *interp_1d, const CeedScalar *grad_1d, 225 const CeedScalar *q_ref_1d, const CeedScalar *q_weight_1d, CeedBasis basis) { 226 Ceed ceed; 227 char *basis_kernel_path, *basis_kernel_source; 228 CeedInt num_comp; 229 const CeedInt q_bytes = Q_1d * sizeof(CeedScalar); 230 const CeedInt interp_bytes = q_bytes * P_1d; 231 CeedBasis_Cuda *data; 232 233 CeedCallBackend(CeedBasisGetCeed(basis, &ceed)); 234 CeedCallBackend(CeedCalloc(1, &data)); 235 236 // Copy data to GPU 237 CeedCallCuda(ceed, cudaMalloc((void **)&data->d_q_weight_1d, q_bytes)); 238 CeedCallCuda(ceed, cudaMemcpy(data->d_q_weight_1d, q_weight_1d, q_bytes, cudaMemcpyHostToDevice)); 239 CeedCallCuda(ceed, cudaMalloc((void **)&data->d_interp_1d, interp_bytes)); 240 CeedCallCuda(ceed, cudaMemcpy(data->d_interp_1d, interp_1d, interp_bytes, cudaMemcpyHostToDevice)); 241 CeedCallCuda(ceed, cudaMalloc((void **)&data->d_grad_1d, interp_bytes)); 242 CeedCallCuda(ceed, cudaMemcpy(data->d_grad_1d, grad_1d, interp_bytes, cudaMemcpyHostToDevice)); 243 244 // Compile basis kernels 245 CeedCallBackend(CeedBasisGetNumComponents(basis, &num_comp)); 246 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-basis-tensor.h", &basis_kernel_path)); 247 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source -----\n"); 248 CeedCallBackend(CeedLoadSourceToBuffer(ceed, basis_kernel_path, &basis_kernel_source)); 249 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source Complete! -----\n"); 250 CeedCallBackend(CeedCompile_Cuda(ceed, basis_kernel_source, &data->module, 7, "BASIS_Q_1D", Q_1d, "BASIS_P_1D", P_1d, "BASIS_BUF_LEN", 251 num_comp * CeedIntPow(Q_1d > P_1d ? Q_1d : P_1d, dim), "BASIS_DIM", dim, "BASIS_NUM_COMP", num_comp, 252 "BASIS_NUM_NODES", CeedIntPow(P_1d, dim), "BASIS_NUM_QPTS", CeedIntPow(Q_1d, dim))); 253 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "Interp", &data->Interp)); 254 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "Grad", &data->Grad)); 255 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "Weight", &data->Weight)); 256 CeedCallBackend(CeedFree(&basis_kernel_path)); 257 CeedCallBackend(CeedFree(&basis_kernel_source)); 258 259 CeedCallBackend(CeedBasisSetData(basis, data)); 260 261 // Register backend functions 262 CeedCallBackend(CeedSetBackendFunction(ceed, "Basis", basis, "Apply", CeedBasisApply_Cuda)); 263 CeedCallBackend(CeedSetBackendFunction(ceed, "Basis", basis, "Destroy", CeedBasisDestroy_Cuda)); 264 return CEED_ERROR_SUCCESS; 265 } 266 267 //------------------------------------------------------------------------------ 268 // Create non-tensor H^1 269 //------------------------------------------------------------------------------ 270 int CeedBasisCreateH1_Cuda(CeedElemTopology topo, CeedInt dim, CeedInt num_nodes, CeedInt num_qpts, const CeedScalar *interp, const CeedScalar *grad, 271 const CeedScalar *q_ref, const CeedScalar *q_weight, CeedBasis basis) { 272 Ceed ceed; 273 char *basis_kernel_path, *basis_kernel_source; 274 CeedInt num_comp, q_comp_interp, q_comp_grad; 275 const CeedInt q_bytes = num_qpts * sizeof(CeedScalar); 276 CeedBasisNonTensor_Cuda *data; 277 278 CeedCallBackend(CeedBasisGetCeed(basis, &ceed)); 279 CeedCallBackend(CeedCalloc(1, &data)); 280 281 // Copy basis data to GPU 282 CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, CEED_EVAL_INTERP, &q_comp_interp)); 283 CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, CEED_EVAL_GRAD, &q_comp_grad)); 284 CeedCallCuda(ceed, cudaMalloc((void **)&data->d_q_weight, q_bytes)); 285 CeedCallCuda(ceed, cudaMemcpy(data->d_q_weight, q_weight, q_bytes, cudaMemcpyHostToDevice)); 286 if (interp) { 287 const CeedInt interp_bytes = q_bytes * num_nodes * q_comp_interp; 288 289 CeedCallCuda(ceed, cudaMalloc((void **)&data->d_interp, interp_bytes)); 290 CeedCallCuda(ceed, cudaMemcpy(data->d_interp, interp, interp_bytes, cudaMemcpyHostToDevice)); 291 } 292 if (grad) { 293 const CeedInt grad_bytes = q_bytes * num_nodes * q_comp_grad; 294 295 CeedCallCuda(ceed, cudaMalloc((void **)&data->d_grad, grad_bytes)); 296 CeedCallCuda(ceed, cudaMemcpy(data->d_grad, grad, grad_bytes, cudaMemcpyHostToDevice)); 297 } 298 299 // Compile basis kernels 300 CeedCallBackend(CeedBasisGetNumComponents(basis, &num_comp)); 301 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-basis-nontensor.h", &basis_kernel_path)); 302 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source -----\n"); 303 CeedCallBackend(CeedLoadSourceToBuffer(ceed, basis_kernel_path, &basis_kernel_source)); 304 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source Complete! -----\n"); 305 CeedCallBackend(CeedCompile_Cuda(ceed, basis_kernel_source, &data->module, 5, "BASIS_Q", num_qpts, "BASIS_P", num_nodes, "BASIS_Q_COMP_INTERP", 306 q_comp_interp, "BASIS_Q_COMP_DERIV", q_comp_grad, "BASIS_NUM_COMP", num_comp)); 307 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "Interp", &data->Interp)); 308 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "InterpTranspose", &data->InterpTranspose)); 309 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "Deriv", &data->Deriv)); 310 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "DerivTranspose", &data->DerivTranspose)); 311 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "Weight", &data->Weight)); 312 CeedCallBackend(CeedFree(&basis_kernel_path)); 313 CeedCallBackend(CeedFree(&basis_kernel_source)); 314 315 CeedCallBackend(CeedBasisSetData(basis, data)); 316 317 // Register backend functions 318 CeedCallBackend(CeedSetBackendFunction(ceed, "Basis", basis, "Apply", CeedBasisApplyNonTensor_Cuda)); 319 CeedCallBackend(CeedSetBackendFunction(ceed, "Basis", basis, "Destroy", CeedBasisDestroyNonTensor_Cuda)); 320 return CEED_ERROR_SUCCESS; 321 } 322 323 //------------------------------------------------------------------------------ 324 // Create non-tensor H(div) 325 //------------------------------------------------------------------------------ 326 int CeedBasisCreateHdiv_Cuda(CeedElemTopology topo, CeedInt dim, CeedInt num_nodes, CeedInt num_qpts, const CeedScalar *interp, const CeedScalar *div, 327 const CeedScalar *q_ref, const CeedScalar *q_weight, CeedBasis basis) { 328 Ceed ceed; 329 char *basis_kernel_path, *basis_kernel_source; 330 CeedInt num_comp, q_comp_interp, q_comp_div; 331 const CeedInt q_bytes = num_qpts * sizeof(CeedScalar); 332 CeedBasisNonTensor_Cuda *data; 333 334 CeedCallBackend(CeedBasisGetCeed(basis, &ceed)); 335 CeedCallBackend(CeedCalloc(1, &data)); 336 337 // Copy basis data to GPU 338 CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, CEED_EVAL_INTERP, &q_comp_interp)); 339 CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, CEED_EVAL_DIV, &q_comp_div)); 340 CeedCallCuda(ceed, cudaMalloc((void **)&data->d_q_weight, q_bytes)); 341 CeedCallCuda(ceed, cudaMemcpy(data->d_q_weight, q_weight, q_bytes, cudaMemcpyHostToDevice)); 342 if (interp) { 343 const CeedInt interp_bytes = q_bytes * num_nodes * q_comp_interp; 344 345 CeedCallCuda(ceed, cudaMalloc((void **)&data->d_interp, interp_bytes)); 346 CeedCallCuda(ceed, cudaMemcpy(data->d_interp, interp, interp_bytes, cudaMemcpyHostToDevice)); 347 } 348 if (div) { 349 const CeedInt div_bytes = q_bytes * num_nodes * q_comp_div; 350 351 CeedCallCuda(ceed, cudaMalloc((void **)&data->d_div, div_bytes)); 352 CeedCallCuda(ceed, cudaMemcpy(data->d_div, div, div_bytes, cudaMemcpyHostToDevice)); 353 } 354 355 // Compile basis kernels 356 CeedCallBackend(CeedBasisGetNumComponents(basis, &num_comp)); 357 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-basis-nontensor.h", &basis_kernel_path)); 358 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source -----\n"); 359 CeedCallBackend(CeedLoadSourceToBuffer(ceed, basis_kernel_path, &basis_kernel_source)); 360 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source Complete! -----\n"); 361 CeedCallBackend(CeedCompile_Cuda(ceed, basis_kernel_source, &data->module, 5, "BASIS_Q", num_qpts, "BASIS_P", num_nodes, "BASIS_Q_COMP_INTERP", 362 q_comp_interp, "BASIS_Q_COMP_DERIV", q_comp_div, "BASIS_NUM_COMP", num_comp)); 363 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "Interp", &data->Interp)); 364 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "InterpTranspose", &data->InterpTranspose)); 365 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "Deriv", &data->Deriv)); 366 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "DerivTranspose", &data->DerivTranspose)); 367 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "Weight", &data->Weight)); 368 CeedCallBackend(CeedFree(&basis_kernel_path)); 369 CeedCallBackend(CeedFree(&basis_kernel_source)); 370 371 CeedCallBackend(CeedBasisSetData(basis, data)); 372 373 // Register backend functions 374 CeedCallBackend(CeedSetBackendFunction(ceed, "Basis", basis, "Apply", CeedBasisApplyNonTensor_Cuda)); 375 CeedCallBackend(CeedSetBackendFunction(ceed, "Basis", basis, "Destroy", CeedBasisDestroyNonTensor_Cuda)); 376 return CEED_ERROR_SUCCESS; 377 } 378 379 //------------------------------------------------------------------------------ 380 // Create non-tensor H(curl) 381 //------------------------------------------------------------------------------ 382 int CeedBasisCreateHcurl_Cuda(CeedElemTopology topo, CeedInt dim, CeedInt num_nodes, CeedInt num_qpts, const CeedScalar *interp, 383 const CeedScalar *curl, const CeedScalar *q_ref, const CeedScalar *q_weight, CeedBasis basis) { 384 Ceed ceed; 385 char *basis_kernel_path, *basis_kernel_source; 386 CeedInt num_comp, q_comp_interp, q_comp_curl; 387 const CeedInt q_bytes = num_qpts * sizeof(CeedScalar); 388 CeedBasisNonTensor_Cuda *data; 389 390 CeedCallBackend(CeedBasisGetCeed(basis, &ceed)); 391 CeedCallBackend(CeedCalloc(1, &data)); 392 393 // Copy basis data to GPU 394 CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, CEED_EVAL_INTERP, &q_comp_interp)); 395 CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, CEED_EVAL_CURL, &q_comp_curl)); 396 CeedCallCuda(ceed, cudaMalloc((void **)&data->d_q_weight, q_bytes)); 397 CeedCallCuda(ceed, cudaMemcpy(data->d_q_weight, q_weight, q_bytes, cudaMemcpyHostToDevice)); 398 if (interp) { 399 const CeedInt interp_bytes = q_bytes * num_nodes * q_comp_interp; 400 401 CeedCallCuda(ceed, cudaMalloc((void **)&data->d_interp, interp_bytes)); 402 CeedCallCuda(ceed, cudaMemcpy(data->d_interp, interp, interp_bytes, cudaMemcpyHostToDevice)); 403 } 404 if (curl) { 405 const CeedInt curl_bytes = q_bytes * num_nodes * q_comp_curl; 406 407 CeedCallCuda(ceed, cudaMalloc((void **)&data->d_curl, curl_bytes)); 408 CeedCallCuda(ceed, cudaMemcpy(data->d_curl, curl, curl_bytes, cudaMemcpyHostToDevice)); 409 } 410 411 // Compile basis kernels 412 CeedCallBackend(CeedBasisGetNumComponents(basis, &num_comp)); 413 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-basis-nontensor.h", &basis_kernel_path)); 414 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source -----\n"); 415 CeedCallBackend(CeedLoadSourceToBuffer(ceed, basis_kernel_path, &basis_kernel_source)); 416 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source Complete! -----\n"); 417 CeedCallBackend(CeedCompile_Cuda(ceed, basis_kernel_source, &data->module, 5, "BASIS_Q", num_qpts, "BASIS_P", num_nodes, "BASIS_Q_COMP_INTERP", 418 q_comp_interp, "BASIS_Q_COMP_DERIV", q_comp_curl, "BASIS_NUM_COMP", num_comp)); 419 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "Interp", &data->Interp)); 420 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "InterpTranspose", &data->InterpTranspose)); 421 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "Deriv", &data->Deriv)); 422 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "DerivTranspose", &data->DerivTranspose)); 423 CeedCallBackend(CeedGetKernel_Cuda(ceed, data->module, "Weight", &data->Weight)); 424 CeedCallBackend(CeedFree(&basis_kernel_path)); 425 CeedCallBackend(CeedFree(&basis_kernel_source)); 426 427 CeedCallBackend(CeedBasisSetData(basis, data)); 428 429 // Register backend functions 430 CeedCallBackend(CeedSetBackendFunction(ceed, "Basis", basis, "Apply", CeedBasisApplyNonTensor_Cuda)); 431 CeedCallBackend(CeedSetBackendFunction(ceed, "Basis", basis, "Destroy", CeedBasisDestroyNonTensor_Cuda)); 432 return CEED_ERROR_SUCCESS; 433 } 434 435 //------------------------------------------------------------------------------ 436