1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 #include <ceed.h> 9 #include <ceed/backend.h> 10 #include <ceed/jit-tools.h> 11 #include <string.h> 12 13 #ifdef CEED_MAGMA_USE_HIP 14 #include "../hip/ceed-hip-common.h" 15 #include "../hip/ceed-hip-compile.h" 16 #else 17 #include "../cuda/ceed-cuda-common.h" 18 #include "../cuda/ceed-cuda-compile.h" 19 #endif 20 #include "ceed-magma-common.h" 21 #include "ceed-magma.h" 22 23 #include "ceed-magma-gemm-nontensor.h" 24 #include "ceed-magma-gemm-selector.h" 25 26 //------------------------------------------------------------------------------ 27 // Basis apply - tensor 28 //------------------------------------------------------------------------------ 29 static int CeedBasisApply_Magma(CeedBasis basis, CeedInt num_elem, CeedTransposeMode t_mode, CeedEvalMode e_mode, CeedVector u, CeedVector v) { 30 Ceed ceed; 31 Ceed_Magma *data; 32 CeedInt dim, num_comp, num_nodes, P_1d, Q_1d, P, Q; 33 const CeedScalar *d_u; 34 CeedScalar *d_v; 35 CeedBasis_Magma *impl; 36 37 CeedCallBackend(CeedBasisGetCeed(basis, &ceed)); 38 CeedCallBackend(CeedGetData(ceed, &data)); 39 CeedCallBackend(CeedBasisGetData(basis, &impl)); 40 CeedCallBackend(CeedBasisGetDimension(basis, &dim)); 41 CeedCallBackend(CeedBasisGetNumComponents(basis, &num_comp)); 42 CeedCallBackend(CeedBasisGetNumNodes(basis, &num_nodes)); 43 CeedCallBackend(CeedBasisGetNumNodes1D(basis, &P_1d)); 44 CeedCallBackend(CeedBasisGetNumQuadraturePoints1D(basis, &Q_1d)); 45 P = P_1d; 46 Q = Q_1d; 47 if (t_mode == CEED_TRANSPOSE) { 48 P = Q_1d; 49 Q = P_1d; 50 } 51 52 // Read vectors 53 if (u != CEED_VECTOR_NONE) CeedCallBackend(CeedVectorGetArrayRead(u, CEED_MEM_DEVICE, &d_u)); 54 else CeedCheck(e_mode == CEED_EVAL_WEIGHT, ceed, CEED_ERROR_BACKEND, "An input vector is required for this CeedEvalMode"); 55 CeedCallBackend(CeedVectorGetArrayWrite(v, CEED_MEM_DEVICE, &d_v)); 56 57 // Apply basis operation 58 switch (e_mode) { 59 case CEED_EVAL_INTERP: { 60 // Define element sizes for dofs/quad 61 CeedInt elem_qpts_size = CeedIntPow(Q_1d, dim); 62 CeedInt elem_dofs_size = CeedIntPow(P_1d, dim); 63 64 // E-vector ordering -------------- Q-vector ordering 65 // component component 66 // elem elem 67 // node node 68 69 // --- Define strides for NOTRANSPOSE mode: --- 70 // Input (d_u) is E-vector, output (d_v) is Q-vector 71 72 // Element strides 73 CeedInt u_elem_stride = elem_dofs_size; 74 CeedInt v_elem_stride = elem_qpts_size; 75 // Component strides 76 CeedInt u_comp_stride = num_elem * elem_dofs_size; 77 CeedInt v_comp_stride = num_elem * elem_qpts_size; 78 if (t_mode == CEED_TRANSPOSE) { 79 // Input (d_u) is Q-vector, output (d_v) is E-vector 80 // Element strides 81 v_elem_stride = elem_dofs_size; 82 u_elem_stride = elem_qpts_size; 83 // Component strides 84 v_comp_stride = num_elem * elem_dofs_size; 85 u_comp_stride = num_elem * elem_qpts_size; 86 } 87 CeedInt num_threads = 1; 88 CeedInt num_t_col = 1; 89 CeedInt shared_mem = 0; 90 CeedInt max_P_Q = CeedIntMax(P, Q); 91 92 switch (dim) { 93 case 1: 94 num_threads = max_P_Q; 95 num_t_col = MAGMA_BASIS_NTCOL(num_threads, MAGMA_MAXTHREADS_1D); 96 shared_mem += sizeof(CeedScalar) * num_t_col * (num_comp * (1 * P + 1 * Q)); 97 shared_mem += sizeof(CeedScalar) * (P * Q); 98 break; 99 case 2: 100 num_threads = max_P_Q; 101 num_t_col = MAGMA_BASIS_NTCOL(num_threads, MAGMA_MAXTHREADS_2D); 102 shared_mem += P * Q * sizeof(CeedScalar); // for sT 103 // for reforming rU we need P x P, and for the intermediate output we need P x Q 104 shared_mem += num_t_col * (P * max_P_Q * sizeof(CeedScalar)); 105 break; 106 case 3: 107 num_threads = max_P_Q * max_P_Q; 108 num_t_col = MAGMA_BASIS_NTCOL(num_threads, MAGMA_MAXTHREADS_3D); 109 shared_mem += sizeof(CeedScalar) * (P * Q); // for sT 110 // rU needs P^2 x P, the intermediate output needs max(P^2 x Q, P x Q^2) 111 shared_mem += sizeof(CeedScalar) * num_t_col * (CeedIntMax(P * P * max_P_Q, P * Q * Q)); 112 break; 113 } 114 CeedInt grid = CeedDivUpInt(num_elem, num_t_col); 115 void *args[] = {&impl->d_interp_1d, &d_u, &u_elem_stride, &u_comp_stride, &d_v, &v_elem_stride, &v_comp_stride, &num_elem}; 116 117 if (t_mode == CEED_TRANSPOSE) { 118 CeedCallBackend(CeedRunKernelDimSharedMagma(ceed, impl->InterpTranspose, grid, num_threads, num_t_col, 1, shared_mem, args)); 119 } else { 120 CeedCallBackend(CeedRunKernelDimSharedMagma(ceed, impl->Interp, grid, num_threads, num_t_col, 1, shared_mem, args)); 121 } 122 } break; 123 case CEED_EVAL_GRAD: { 124 // Define element sizes for dofs/quad 125 CeedInt elem_qpts_size = CeedIntPow(Q_1d, dim); 126 CeedInt elem_dofs_size = CeedIntPow(P_1d, dim); 127 128 // In CEED_NOTRANSPOSE mode: 129 // d_u is (P^dim x nc), column-major layout (nc = num_comp) 130 // d_v is (Q^dim x nc x dim), column-major layout (nc = num_comp) 131 // In CEED_TRANSPOSE mode, the sizes of d_u and d_v are switched. 132 133 // E-vector ordering -------------- Q-vector ordering 134 // dim 135 // component component 136 // elem elem 137 // node node 138 139 // --- Define strides for NOTRANSPOSE mode: --- 140 // Input (d_u) is E-vector, output (d_v) is Q-vector 141 142 // Element strides 143 CeedInt u_elem_stride = elem_dofs_size; 144 CeedInt v_elem_stride = elem_qpts_size; 145 // Component strides 146 CeedInt u_comp_stride = num_elem * elem_dofs_size; 147 CeedInt v_comp_stride = num_elem * elem_qpts_size; 148 // Dimension strides 149 CeedInt u_dim_stride = 0; 150 CeedInt v_dim_stride = num_elem * elem_qpts_size * num_comp; 151 if (t_mode == CEED_TRANSPOSE) { 152 // Input (d_u) is Q-vector, output (d_v) is E-vector 153 // Element strides 154 v_elem_stride = elem_dofs_size; 155 u_elem_stride = elem_qpts_size; 156 // Component strides 157 v_comp_stride = num_elem * elem_dofs_size; 158 u_comp_stride = num_elem * elem_qpts_size; 159 // Dimension strides 160 v_dim_stride = 0; 161 u_dim_stride = num_elem * elem_qpts_size * num_comp; 162 } 163 CeedInt num_threads = 1; 164 CeedInt num_t_col = 1; 165 CeedInt shared_mem = 0; 166 CeedInt max_P_Q = CeedIntMax(P, Q); 167 168 switch (dim) { 169 case 1: 170 num_threads = max_P_Q; 171 num_t_col = MAGMA_BASIS_NTCOL(num_threads, MAGMA_MAXTHREADS_1D); 172 shared_mem += sizeof(CeedScalar) * num_t_col * (num_comp * (1 * P + 1 * Q)); 173 shared_mem += sizeof(CeedScalar) * (P * Q); 174 break; 175 case 2: 176 num_threads = max_P_Q; 177 num_t_col = MAGMA_BASIS_NTCOL(num_threads, MAGMA_MAXTHREADS_2D); 178 shared_mem += sizeof(CeedScalar) * 2 * P * Q; // for sTinterp and sTgrad 179 // for reforming rU we need P x P, and for the intermediate output we need P x Q 180 shared_mem += sizeof(CeedScalar) * num_t_col * (P * max_P_Q); 181 break; 182 case 3: 183 num_threads = max_P_Q * max_P_Q; 184 num_t_col = MAGMA_BASIS_NTCOL(num_threads, MAGMA_MAXTHREADS_3D); 185 shared_mem += sizeof(CeedScalar) * 2 * P * Q; // for sTinterp and sTgrad 186 // rU needs P^2 x P, the intermediate outputs need (P^2 x Q + P x Q^2) 187 shared_mem += sizeof(CeedScalar) * num_t_col * CeedIntMax(P * P * P, (P * P * Q) + (P * Q * Q)); 188 break; 189 } 190 CeedInt grid = CeedDivUpInt(num_elem, num_t_col); 191 void *args[] = {&impl->d_interp_1d, &impl->d_grad_1d, &d_u, &u_elem_stride, &u_comp_stride, &u_dim_stride, &d_v, 192 &v_elem_stride, &v_comp_stride, &v_dim_stride, &num_elem}; 193 194 if (t_mode == CEED_TRANSPOSE) { 195 CeedCallBackend(CeedRunKernelDimSharedMagma(ceed, impl->GradTranspose, grid, num_threads, num_t_col, 1, shared_mem, args)); 196 } else { 197 CeedCallBackend(CeedRunKernelDimSharedMagma(ceed, impl->Grad, grid, num_threads, num_t_col, 1, shared_mem, args)); 198 } 199 } break; 200 case CEED_EVAL_WEIGHT: { 201 CeedCheck(t_mode != CEED_TRANSPOSE, ceed, CEED_ERROR_BACKEND, "CEED_EVAL_WEIGHT incompatible with CEED_TRANSPOSE"); 202 CeedInt elem_dofs_size = CeedIntPow(Q, dim); 203 CeedInt num_threads = 1; 204 CeedInt num_t_col = 1; 205 CeedInt shared_mem = 0; 206 207 switch (dim) { 208 case 1: 209 num_threads = Q; 210 num_t_col = MAGMA_BASIS_NTCOL(num_threads, MAGMA_MAXTHREADS_1D); 211 shared_mem += sizeof(CeedScalar) * Q; // for d_q_weight_1d 212 shared_mem += sizeof(CeedScalar) * num_t_col * Q; // for output 213 break; 214 case 2: 215 num_threads = Q; 216 num_t_col = MAGMA_BASIS_NTCOL(num_threads, MAGMA_MAXTHREADS_2D); 217 shared_mem += sizeof(CeedScalar) * Q; // for d_q_weight_1d 218 break; 219 case 3: 220 num_threads = Q * Q; 221 num_t_col = MAGMA_BASIS_NTCOL(num_threads, MAGMA_MAXTHREADS_3D); 222 shared_mem += sizeof(CeedScalar) * Q; // for d_q_weight_1d 223 break; 224 } 225 CeedInt grid = CeedDivUpInt(num_elem, num_t_col); 226 void *args[] = {&impl->d_q_weight_1d, &d_v, &elem_dofs_size, &num_elem}; 227 228 CeedCallBackend(CeedRunKernelDimSharedMagma(ceed, impl->Weight, grid, num_threads, num_t_col, 1, shared_mem, args)); 229 } break; 230 // LCOV_EXCL_START 231 case CEED_EVAL_DIV: 232 return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_DIV not supported"); 233 case CEED_EVAL_CURL: 234 return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_CURL not supported"); 235 case CEED_EVAL_NONE: 236 return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_NONE does not make sense in this context"); 237 // LCOV_EXCL_STOP 238 } 239 240 // Must sync to ensure completeness 241 ceed_magma_queue_sync(data->queue); 242 243 // Restore vectors 244 if (e_mode != CEED_EVAL_WEIGHT) { 245 CeedCallBackend(CeedVectorRestoreArrayRead(u, &d_u)); 246 } 247 CeedCallBackend(CeedVectorRestoreArray(v, &d_v)); 248 return CEED_ERROR_SUCCESS; 249 } 250 251 //------------------------------------------------------------------------------ 252 // Basis apply - non-tensor 253 //------------------------------------------------------------------------------ 254 static int CeedBasisApplyNonTensor_Magma(CeedBasis basis, CeedInt num_elem, CeedTransposeMode t_mode, CeedEvalMode e_mode, CeedVector u, 255 CeedVector v) { 256 Ceed ceed; 257 Ceed_Magma *data; 258 CeedInt num_comp, q_comp, num_nodes, num_qpts, P, Q, N; 259 const CeedScalar *d_u; 260 CeedScalar *d_v; 261 CeedBasisNonTensor_Magma *impl; 262 263 CeedCallBackend(CeedBasisGetCeed(basis, &ceed)); 264 CeedCallBackend(CeedGetData(ceed, &data)); 265 CeedCallBackend(CeedBasisGetData(basis, &impl)); 266 CeedCallBackend(CeedBasisGetNumComponents(basis, &num_comp)); 267 CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, e_mode, &q_comp)); 268 CeedCallBackend(CeedBasisGetNumNodes(basis, &num_nodes)); 269 CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis, &num_qpts)); 270 P = num_nodes; 271 Q = num_qpts; 272 N = num_elem * num_comp; 273 274 // Read vectors 275 if (u != CEED_VECTOR_NONE) CeedCallBackend(CeedVectorGetArrayRead(u, CEED_MEM_DEVICE, &d_u)); 276 else CeedCheck(e_mode == CEED_EVAL_WEIGHT, ceed, CEED_ERROR_BACKEND, "An input vector is required for this CeedEvalMode"); 277 CeedCallBackend(CeedVectorGetArrayWrite(v, CEED_MEM_DEVICE, &d_v)); 278 279 // Apply basis operation 280 if (e_mode != CEED_EVAL_WEIGHT) { 281 const CeedScalar *d_b = NULL; 282 switch (e_mode) { 283 case CEED_EVAL_INTERP: 284 d_b = impl->d_interp; 285 break; 286 case CEED_EVAL_GRAD: 287 d_b = impl->d_grad; 288 break; 289 case CEED_EVAL_DIV: 290 d_b = impl->d_div; 291 break; 292 case CEED_EVAL_CURL: 293 d_b = impl->d_curl; 294 break; 295 // LCOV_EXCL_START 296 case CEED_EVAL_WEIGHT: 297 return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_WEIGHT does not make sense in this context"); 298 case CEED_EVAL_NONE: 299 return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_NONE does not make sense in this context"); 300 // LCOV_EXCL_STOP 301 } 302 303 // Apply basis operation 304 if (P <= MAGMA_NONTENSOR_CUSTOM_KERNEL_MAX_P && Q <= MAGMA_NONTENSOR_CUSTOM_KERNEL_MAX_Q) { 305 CeedInt n_array[MAGMA_NONTENSOR_KERNEL_INSTANCES] = {MAGMA_NONTENSOR_KERNEL_N_VALUES}; 306 CeedInt iN = 0, diff = abs(n_array[iN] - N), idiff; 307 CeedInt M = (t_mode == CEED_TRANSPOSE) ? P : Q, K = (t_mode == CEED_TRANSPOSE) ? Q : P; 308 309 for (CeedInt in = iN + 1; in < MAGMA_NONTENSOR_KERNEL_INSTANCES; in++) { 310 idiff = abs(n_array[in] - N); 311 if (idiff < diff) { 312 iN = in; 313 diff = idiff; 314 } 315 } 316 317 // Compile kernels for N as needed 318 if (!impl->NB_interp[iN]) { 319 CeedFESpace fe_space; 320 CeedInt q_comp_interp, q_comp_deriv; 321 Ceed ceed_delegate; 322 char *basis_kernel_path, *basis_kernel_source; 323 magma_int_t arch = magma_getdevice_arch(); 324 325 // Tuning parameters for NB 326 CeedCallBackend(CeedBasisGetFESpace(basis, &fe_space)); 327 CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, CEED_EVAL_INTERP, &q_comp_interp)); 328 switch (fe_space) { 329 case CEED_FE_SPACE_H1: 330 CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, CEED_EVAL_GRAD, &q_comp_deriv)); 331 break; 332 case CEED_FE_SPACE_HDIV: 333 CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, CEED_EVAL_DIV, &q_comp_deriv)); 334 break; 335 case CEED_FE_SPACE_HCURL: 336 CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, CEED_EVAL_CURL, &q_comp_deriv)); 337 break; 338 } 339 impl->NB_interp[iN] = nontensor_rtc_get_nb(arch, 'n', q_comp_interp, P, Q, n_array[iN]); 340 impl->NB_interp_t[iN] = nontensor_rtc_get_nb(arch, 't', q_comp_interp, P, Q, n_array[iN]); 341 impl->NB_deriv[iN] = nontensor_rtc_get_nb(arch, 'n', q_comp_deriv, P, Q, n_array[iN]); 342 impl->NB_deriv_t[iN] = nontensor_rtc_get_nb(arch, 't', q_comp_deriv, P, Q, n_array[iN]); 343 344 // The RTC compilation code expects a Ceed with the common Ceed_Cuda or Ceed_Hip data 345 CeedCallBackend(CeedGetDelegate(ceed, &ceed_delegate)); 346 347 // Compile kernels 348 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/magma/magma-basis-interp-deriv-nontensor.h", &basis_kernel_path)); 349 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source -----\n"); 350 CeedCallBackend(CeedLoadSourceToBuffer(ceed, basis_kernel_path, &basis_kernel_source)); 351 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source Complete! -----\n"); 352 CeedCallBackend(CeedCompileMagma(ceed_delegate, basis_kernel_source, &impl->module_interp[iN], 8, "BASIS_Q_COMP_INTERP", q_comp_interp, 353 "BASIS_Q_COMP_DERIV", q_comp_deriv, "BASIS_P", P, "BASIS_Q", Q, "BASIS_NB_INTERP_N", impl->NB_interp[iN], 354 "BASIS_NB_INTERP_T", impl->NB_interp_t[iN], "BASIS_NB_DERIV_N", impl->NB_deriv[iN], "BASIS_NB_DERIV_T", 355 impl->NB_deriv_t[iN])); 356 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module_interp[iN], "magma_interp_nontensor_n", &impl->Interp[iN])); 357 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module_interp[iN], "magma_interp_nontensor_t", &impl->InterpTranspose[iN])); 358 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module_interp[iN], "magma_deriv_nontensor_n", &impl->Deriv[iN])); 359 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module_interp[iN], "magma_deriv_nontensor_t", &impl->DerivTranspose[iN])); 360 CeedCallBackend(CeedFree(&basis_kernel_path)); 361 CeedCallBackend(CeedFree(&basis_kernel_source)); 362 } 363 CeedMagmaFunction Kernel; 364 CeedInt NB; 365 if (e_mode == CEED_EVAL_INTERP) { 366 if (t_mode == CEED_TRANSPOSE) { 367 Kernel = impl->InterpTranspose[iN]; 368 NB = impl->NB_interp_t[iN]; 369 } else { 370 Kernel = impl->Interp[iN]; 371 NB = impl->NB_interp[iN]; 372 } 373 } else { 374 if (t_mode == CEED_TRANSPOSE) { 375 Kernel = impl->DerivTranspose[iN]; 376 NB = impl->NB_deriv_t[iN]; 377 } else { 378 Kernel = impl->Deriv[iN]; 379 NB = impl->NB_deriv[iN]; 380 } 381 } 382 CeedInt num_t_col = MAGMA_BASIS_NTCOL(M, MAGMA_MAXTHREADS_1D); 383 CeedInt grid = CeedDivUpInt(N, num_t_col * NB); 384 CeedInt shared_mem_A = P * Q * sizeof(CeedScalar); 385 CeedInt shared_mem_B = num_t_col * K * NB * sizeof(CeedScalar); 386 CeedInt shared_mem = (t_mode != CEED_TRANSPOSE && q_comp > 1) ? (shared_mem_A + shared_mem_B) : CeedIntMax(shared_mem_A, shared_mem_B); 387 void *args[] = {&N, &d_b, &d_u, &d_v}; 388 389 CeedCallBackend(CeedRunKernelDimSharedMagma(ceed, Kernel, grid, M, num_t_col, 1, shared_mem, args)); 390 } else { 391 for (CeedInt d = 0; d < q_comp; d++) { 392 if (t_mode == CEED_TRANSPOSE) { 393 const CeedScalar beta = (d > 0) ? 1.0 : 0.0; 394 magma_gemm_nontensor(MagmaNoTrans, MagmaNoTrans, P, N, Q, 1.0, d_b + d * P * Q, P, d_u + d * N * Q, Q, beta, d_v, P, data->queue); 395 } else { 396 magma_gemm_nontensor(MagmaTrans, MagmaNoTrans, Q, N, P, 1.0, d_b + d * P * Q, P, d_u, P, 0.0, d_v + d * N * Q, Q, data->queue); 397 } 398 } 399 } 400 } else { 401 CeedCheck(t_mode != CEED_TRANSPOSE, ceed, CEED_ERROR_BACKEND, "CEED_EVAL_WEIGHT incompatible with CEED_TRANSPOSE"); 402 CeedInt num_t_col = MAGMA_BASIS_NTCOL(Q, MAGMA_MAXTHREADS_1D); 403 CeedInt grid = CeedDivUpInt(num_elem, num_t_col); 404 CeedInt shared_mem = Q * sizeof(CeedScalar) + num_t_col * Q * sizeof(CeedScalar); 405 void *args[] = {&num_elem, &impl->d_q_weight, &d_v}; 406 407 CeedCallBackend(CeedRunKernelDimSharedMagma(ceed, impl->Weight, grid, Q, num_t_col, 1, shared_mem, args)); 408 } 409 410 // Must sync to ensure completeness 411 ceed_magma_queue_sync(data->queue); 412 413 // Restore vectors 414 if (e_mode != CEED_EVAL_WEIGHT) { 415 CeedCallBackend(CeedVectorRestoreArrayRead(u, &d_u)); 416 } 417 CeedCallBackend(CeedVectorRestoreArray(v, &d_v)); 418 return CEED_ERROR_SUCCESS; 419 } 420 421 //------------------------------------------------------------------------------ 422 // Destroy tensor basis 423 //------------------------------------------------------------------------------ 424 static int CeedBasisDestroy_Magma(CeedBasis basis) { 425 Ceed ceed; 426 CeedBasis_Magma *impl; 427 428 CeedCallBackend(CeedBasisGetCeed(basis, &ceed)); 429 CeedCallBackend(CeedBasisGetData(basis, &impl)); 430 #ifdef CEED_MAGMA_USE_HIP 431 CeedCallHip(ceed, hipModuleUnload(impl->module)); 432 #else 433 CeedCallCuda(ceed, cuModuleUnload(impl->module)); 434 #endif 435 CeedCallBackend(magma_free(impl->d_interp_1d)); 436 CeedCallBackend(magma_free(impl->d_grad_1d)); 437 CeedCallBackend(magma_free(impl->d_q_weight_1d)); 438 CeedCallBackend(CeedFree(&impl)); 439 return CEED_ERROR_SUCCESS; 440 } 441 442 //------------------------------------------------------------------------------ 443 // Destroy non-tensor basis 444 //------------------------------------------------------------------------------ 445 static int CeedBasisDestroyNonTensor_Magma(CeedBasis basis) { 446 Ceed ceed; 447 CeedBasisNonTensor_Magma *impl; 448 449 CeedCallBackend(CeedBasisGetCeed(basis, &ceed)); 450 CeedCallBackend(CeedBasisGetData(basis, &impl)); 451 #ifdef CEED_MAGMA_USE_HIP 452 CeedCallHip(ceed, hipModuleUnload(impl->module_weight)); 453 #else 454 CeedCallCuda(ceed, cuModuleUnload(impl->module_weight)); 455 #endif 456 for (CeedInt in = 0; in < MAGMA_NONTENSOR_KERNEL_INSTANCES; in++) { 457 if (impl->module_interp[in]) { 458 #ifdef CEED_MAGMA_USE_HIP 459 CeedCallHip(ceed, hipModuleUnload(impl->module_interp[in])); 460 #else 461 CeedCallCuda(ceed, cuModuleUnload(impl->module_interp[in])); 462 #endif 463 } 464 } 465 CeedCallBackend(magma_free(impl->d_interp)); 466 CeedCallBackend(magma_free(impl->d_grad)); 467 CeedCallBackend(magma_free(impl->d_div)); 468 CeedCallBackend(magma_free(impl->d_curl)); 469 CeedCallBackend(magma_free(impl->d_q_weight)); 470 CeedCallBackend(CeedFree(&impl)); 471 return CEED_ERROR_SUCCESS; 472 } 473 474 //------------------------------------------------------------------------------ 475 // Create tensor 476 //------------------------------------------------------------------------------ 477 int CeedBasisCreateTensorH1_Magma(CeedInt dim, CeedInt P_1d, CeedInt Q_1d, const CeedScalar *interp_1d, const CeedScalar *grad_1d, 478 const CeedScalar *q_ref_1d, const CeedScalar *q_weight_1d, CeedBasis basis) { 479 Ceed ceed, ceed_delegate; 480 Ceed_Magma *data; 481 char *interp_kernel_path, *grad_kernel_path, *weight_kernel_path, *basis_kernel_source; 482 CeedInt num_comp; 483 CeedBasis_Magma *impl; 484 485 CeedCallBackend(CeedBasisGetCeed(basis, &ceed)); 486 CeedCallBackend(CeedGetData(ceed, &data)); 487 CeedCallBackend(CeedCalloc(1, &impl)); 488 489 // Copy basis data to GPU 490 CeedCallBackend(magma_malloc((void **)&impl->d_q_weight_1d, Q_1d * sizeof(q_weight_1d[0]))); 491 magma_setvector(Q_1d, sizeof(q_weight_1d[0]), q_weight_1d, 1, impl->d_q_weight_1d, 1, data->queue); 492 CeedCallBackend(magma_malloc((void **)&impl->d_interp_1d, Q_1d * P_1d * sizeof(interp_1d[0]))); 493 magma_setvector(Q_1d * P_1d, sizeof(interp_1d[0]), interp_1d, 1, impl->d_interp_1d, 1, data->queue); 494 CeedCallBackend(magma_malloc((void **)&impl->d_grad_1d, Q_1d * P_1d * sizeof(grad_1d[0]))); 495 magma_setvector(Q_1d * P_1d, sizeof(grad_1d[0]), grad_1d, 1, impl->d_grad_1d, 1, data->queue); 496 497 // The RTC compilation code expects a Ceed with the common Ceed_Cuda or Ceed_Hip data 498 CeedCallBackend(CeedGetDelegate(ceed, &ceed_delegate)); 499 500 // Compile kernels 501 CeedCallBackend(CeedBasisGetNumComponents(basis, &num_comp)); 502 { 503 char *interp_kernel_name_base = "ceed/jit-source/magma/magma-basis-interp"; 504 CeedInt interp_kernel_name_len = strlen(interp_kernel_name_base) + 6; 505 char interp_kernel_name[interp_kernel_name_len]; 506 507 snprintf(interp_kernel_name, interp_kernel_name_len, "%s-%" CeedInt_FMT "d.h", interp_kernel_name_base, dim); 508 CeedCallBackend(CeedGetJitAbsolutePath(ceed, interp_kernel_name, &interp_kernel_path)); 509 } 510 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source -----\n"); 511 CeedCallBackend(CeedLoadSourceToBuffer(ceed, interp_kernel_path, &basis_kernel_source)); 512 { 513 char *grad_kernel_name_base = "ceed/jit-source/magma/magma-basis-grad"; 514 CeedInt grad_kernel_name_len = strlen(grad_kernel_name_base) + 6; 515 char grad_kernel_name[grad_kernel_name_len]; 516 517 snprintf(grad_kernel_name, grad_kernel_name_len, "%s-%" CeedInt_FMT "d.h", grad_kernel_name_base, dim); 518 CeedCallBackend(CeedGetJitAbsolutePath(ceed, grad_kernel_name, &grad_kernel_path)); 519 } 520 CeedCallBackend(CeedLoadSourceToInitializedBuffer(ceed, grad_kernel_path, &basis_kernel_source)); 521 { 522 char *weight_kernel_name_base = "ceed/jit-source/magma/magma-basis-weight"; 523 CeedInt weight_kernel_name_len = strlen(weight_kernel_name_base) + 6; 524 char weight_kernel_name[weight_kernel_name_len]; 525 526 snprintf(weight_kernel_name, weight_kernel_name_len, "%s-%" CeedInt_FMT "d.h", weight_kernel_name_base, dim); 527 CeedCallBackend(CeedGetJitAbsolutePath(ceed, weight_kernel_name, &weight_kernel_path)); 528 } 529 CeedCallBackend(CeedLoadSourceToInitializedBuffer(ceed, weight_kernel_path, &basis_kernel_source)); 530 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source Complete! -----\n"); 531 CeedCallBackend(CeedCompileMagma(ceed_delegate, basis_kernel_source, &impl->module, 5, "BASIS_DIM", dim, "BASIS_NUM_COMP", num_comp, "BASIS_P", 532 P_1d, "BASIS_Q", Q_1d, "BASIS_MAX_P_Q", CeedIntMax(P_1d, Q_1d))); 533 switch (dim) { 534 case 1: 535 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module, "magma_interpn_1d_kernel", &impl->Interp)); 536 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module, "magma_interpt_1d_kernel", &impl->InterpTranspose)); 537 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module, "magma_gradn_1d_kernel", &impl->Grad)); 538 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module, "magma_gradt_1d_kernel", &impl->GradTranspose)); 539 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module, "magma_weight_1d_kernel", &impl->Weight)); 540 break; 541 case 2: 542 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module, "magma_interpn_2d_kernel", &impl->Interp)); 543 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module, "magma_interpt_2d_kernel", &impl->InterpTranspose)); 544 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module, "magma_gradn_2d_kernel", &impl->Grad)); 545 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module, "magma_gradt_2d_kernel", &impl->GradTranspose)); 546 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module, "magma_weight_2d_kernel", &impl->Weight)); 547 break; 548 case 3: 549 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module, "magma_interpn_3d_kernel", &impl->Interp)); 550 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module, "magma_interpt_3d_kernel", &impl->InterpTranspose)); 551 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module, "magma_gradn_3d_kernel", &impl->Grad)); 552 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module, "magma_gradt_3d_kernel", &impl->GradTranspose)); 553 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module, "magma_weight_3d_kernel", &impl->Weight)); 554 break; 555 } 556 CeedCallBackend(CeedFree(&interp_kernel_path)); 557 CeedCallBackend(CeedFree(&grad_kernel_path)); 558 CeedCallBackend(CeedFree(&weight_kernel_path)); 559 CeedCallBackend(CeedFree(&basis_kernel_source)); 560 561 CeedCallBackend(CeedBasisSetData(basis, impl)); 562 563 CeedCallBackend(CeedSetBackendFunction(ceed, "Basis", basis, "Apply", CeedBasisApply_Magma)); 564 CeedCallBackend(CeedSetBackendFunction(ceed, "Basis", basis, "Destroy", CeedBasisDestroy_Magma)); 565 return CEED_ERROR_SUCCESS; 566 } 567 568 //------------------------------------------------------------------------------ 569 // Create non-tensor H^1 570 //------------------------------------------------------------------------------ 571 int CeedBasisCreateH1_Magma(CeedElemTopology topo, CeedInt dim, CeedInt num_nodes, CeedInt num_qpts, const CeedScalar *interp, const CeedScalar *grad, 572 const CeedScalar *q_ref, const CeedScalar *q_weight, CeedBasis basis) { 573 Ceed ceed, ceed_delegate; 574 Ceed_Magma *data; 575 char *weight_kernel_path, *basis_kernel_source; 576 CeedBasisNonTensor_Magma *impl; 577 578 CeedCallBackend(CeedBasisGetCeed(basis, &ceed)); 579 CeedCallBackend(CeedGetData(ceed, &data)); 580 CeedCallBackend(CeedCalloc(1, &impl)); 581 582 // Copy basis data to GPU 583 CeedCallBackend(magma_malloc((void **)&impl->d_q_weight, num_qpts * sizeof(q_weight[0]))); 584 magma_setvector(num_qpts, sizeof(q_weight[0]), q_weight, 1, impl->d_q_weight, 1, data->queue); 585 if (interp) { 586 CeedInt q_comp_interp; 587 588 CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, CEED_EVAL_INTERP, &q_comp_interp)); 589 CeedCallBackend(magma_malloc((void **)&impl->d_interp, num_qpts * num_nodes * q_comp_interp * sizeof(interp[0]))); 590 magma_setvector(num_qpts * num_nodes * q_comp_interp, sizeof(interp[0]), interp, 1, impl->d_interp, 1, data->queue); 591 } 592 if (grad) { 593 CeedInt q_comp_grad; 594 595 CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, CEED_EVAL_GRAD, &q_comp_grad)); 596 CeedCallBackend(magma_malloc((void **)&impl->d_grad, num_qpts * num_nodes * q_comp_grad * sizeof(grad[0]))); 597 magma_setvector(num_qpts * num_nodes * q_comp_grad, sizeof(grad[0]), grad, 1, impl->d_grad, 1, data->queue); 598 } 599 600 // The RTC compilation code expects a Ceed with the common Ceed_Cuda or Ceed_Hip data 601 CeedCallBackend(CeedGetDelegate(ceed, &ceed_delegate)); 602 603 // Compile weight kernel (the remainder of kernel compilation happens at first call to CeedBasisApply) 604 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/magma/magma-basis-weight-nontensor.h", &weight_kernel_path)); 605 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source -----\n"); 606 CeedCallBackend(CeedLoadSourceToBuffer(ceed, weight_kernel_path, &basis_kernel_source)); 607 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source Complete! -----\n"); 608 CeedCallBackend(CeedCompileMagma(ceed_delegate, basis_kernel_source, &impl->module_weight, 1, "BASIS_Q", num_qpts)); 609 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module_weight, "magma_weight_nontensor", &impl->Weight)); 610 CeedCallBackend(CeedFree(&weight_kernel_path)); 611 CeedCallBackend(CeedFree(&basis_kernel_source)); 612 613 CeedCallBackend(CeedBasisSetData(basis, impl)); 614 615 // Register backend functions 616 CeedCallBackend(CeedSetBackendFunction(ceed, "Basis", basis, "Apply", CeedBasisApplyNonTensor_Magma)); 617 CeedCallBackend(CeedSetBackendFunction(ceed, "Basis", basis, "Destroy", CeedBasisDestroyNonTensor_Magma)); 618 return CEED_ERROR_SUCCESS; 619 } 620 621 //------------------------------------------------------------------------------ 622 // Create non-tensor H(div) 623 //------------------------------------------------------------------------------ 624 int CeedBasisCreateHdiv_Magma(CeedElemTopology topo, CeedInt dim, CeedInt num_nodes, CeedInt num_qpts, const CeedScalar *interp, 625 const CeedScalar *div, const CeedScalar *q_ref, const CeedScalar *q_weight, CeedBasis basis) { 626 Ceed ceed, ceed_delegate; 627 Ceed_Magma *data; 628 char *weight_kernel_path, *basis_kernel_source; 629 CeedBasisNonTensor_Magma *impl; 630 631 CeedCallBackend(CeedBasisGetCeed(basis, &ceed)); 632 CeedCallBackend(CeedGetData(ceed, &data)); 633 CeedCallBackend(CeedCalloc(1, &impl)); 634 635 // Copy basis data to GPU 636 CeedCallBackend(magma_malloc((void **)&impl->d_q_weight, num_qpts * sizeof(q_weight[0]))); 637 magma_setvector(num_qpts, sizeof(q_weight[0]), q_weight, 1, impl->d_q_weight, 1, data->queue); 638 if (interp) { 639 CeedInt q_comp_interp; 640 641 CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, CEED_EVAL_INTERP, &q_comp_interp)); 642 CeedCallBackend(magma_malloc((void **)&impl->d_interp, num_qpts * num_nodes * q_comp_interp * sizeof(interp[0]))); 643 magma_setvector(num_qpts * num_nodes * q_comp_interp, sizeof(interp[0]), interp, 1, impl->d_interp, 1, data->queue); 644 } 645 if (div) { 646 CeedInt q_comp_div; 647 648 CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, CEED_EVAL_DIV, &q_comp_div)); 649 CeedCallBackend(magma_malloc((void **)&impl->d_div, num_qpts * num_nodes * q_comp_div * sizeof(div[0]))); 650 magma_setvector(num_qpts * num_nodes * q_comp_div, sizeof(div[0]), div, 1, impl->d_div, 1, data->queue); 651 } 652 653 // The RTC compilation code expects a Ceed with the common Ceed_Cuda or Ceed_Hip data 654 CeedCallBackend(CeedGetDelegate(ceed, &ceed_delegate)); 655 656 // Compile weight kernel (the remainder of kernel compilation happens at first call to CeedBasisApply) 657 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/magma/magma-basis-weight-nontensor.h", &weight_kernel_path)); 658 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source -----\n"); 659 CeedCallBackend(CeedLoadSourceToBuffer(ceed, weight_kernel_path, &basis_kernel_source)); 660 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source Complete! -----\n"); 661 CeedCallBackend(CeedCompileMagma(ceed_delegate, basis_kernel_source, &impl->module_weight, 1, "BASIS_Q", num_qpts)); 662 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module_weight, "magma_weight_nontensor", &impl->Weight)); 663 CeedCallBackend(CeedFree(&weight_kernel_path)); 664 CeedCallBackend(CeedFree(&basis_kernel_source)); 665 666 CeedCallBackend(CeedBasisSetData(basis, impl)); 667 668 // Register backend functions 669 CeedCallBackend(CeedSetBackendFunction(ceed, "Basis", basis, "Apply", CeedBasisApplyNonTensor_Magma)); 670 CeedCallBackend(CeedSetBackendFunction(ceed, "Basis", basis, "Destroy", CeedBasisDestroyNonTensor_Magma)); 671 return CEED_ERROR_SUCCESS; 672 } 673 674 //------------------------------------------------------------------------------ 675 // Create non-tensor H(curl) 676 //------------------------------------------------------------------------------ 677 int CeedBasisCreateHcurl_Magma(CeedElemTopology topo, CeedInt dim, CeedInt num_nodes, CeedInt num_qpts, const CeedScalar *interp, 678 const CeedScalar *curl, const CeedScalar *q_ref, const CeedScalar *q_weight, CeedBasis basis) { 679 Ceed ceed, ceed_delegate; 680 Ceed_Magma *data; 681 char *weight_kernel_path, *basis_kernel_source; 682 CeedBasisNonTensor_Magma *impl; 683 684 CeedCallBackend(CeedBasisGetCeed(basis, &ceed)); 685 CeedCallBackend(CeedGetData(ceed, &data)); 686 CeedCallBackend(CeedCalloc(1, &impl)); 687 688 // Copy basis data to GPU 689 CeedCallBackend(magma_malloc((void **)&impl->d_q_weight, num_qpts * sizeof(q_weight[0]))); 690 magma_setvector(num_qpts, sizeof(q_weight[0]), q_weight, 1, impl->d_q_weight, 1, data->queue); 691 if (interp) { 692 CeedInt q_comp_interp; 693 694 CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, CEED_EVAL_INTERP, &q_comp_interp)); 695 CeedCallBackend(magma_malloc((void **)&impl->d_interp, num_qpts * num_nodes * q_comp_interp * sizeof(interp[0]))); 696 magma_setvector(num_qpts * num_nodes * q_comp_interp, sizeof(interp[0]), interp, 1, impl->d_interp, 1, data->queue); 697 } 698 if (curl) { 699 CeedInt q_comp_curl; 700 701 CeedCallBackend(CeedBasisGetNumQuadratureComponents(basis, CEED_EVAL_CURL, &q_comp_curl)); 702 CeedCallBackend(magma_malloc((void **)&impl->d_curl, num_qpts * num_nodes * q_comp_curl * sizeof(curl[0]))); 703 magma_setvector(num_qpts * num_nodes * q_comp_curl, sizeof(curl[0]), curl, 1, impl->d_curl, 1, data->queue); 704 } 705 706 // The RTC compilation code expects a Ceed with the common Ceed_Cuda or Ceed_Hip data 707 CeedCallBackend(CeedGetDelegate(ceed, &ceed_delegate)); 708 709 // Compile weight kernel (the remainder of kernel compilation happens at first call to CeedBasisApply) 710 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/magma/magma-basis-weight-nontensor.h", &weight_kernel_path)); 711 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source -----\n"); 712 CeedCallBackend(CeedLoadSourceToBuffer(ceed, weight_kernel_path, &basis_kernel_source)); 713 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Basis Kernel Source Complete! -----\n"); 714 CeedCallBackend(CeedCompileMagma(ceed_delegate, basis_kernel_source, &impl->module_weight, 1, "BASIS_Q", num_qpts)); 715 CeedCallBackend(CeedGetKernelMagma(ceed, impl->module_weight, "magma_weight_nontensor", &impl->Weight)); 716 CeedCallBackend(CeedFree(&weight_kernel_path)); 717 CeedCallBackend(CeedFree(&basis_kernel_source)); 718 719 CeedCallBackend(CeedBasisSetData(basis, impl)); 720 721 // Register backend functions 722 CeedCallBackend(CeedSetBackendFunction(ceed, "Basis", basis, "Apply", CeedBasisApplyNonTensor_Magma)); 723 CeedCallBackend(CeedSetBackendFunction(ceed, "Basis", basis, "Destroy", CeedBasisDestroyNonTensor_Magma)); 724 return CEED_ERROR_SUCCESS; 725 } 726 727 //------------------------------------------------------------------------------ 728