1 // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 #include <ceed.h> 9 #include <ceed/backend.h> 10 #include <ceed/jit-tools.h> 11 #include <cuda.h> 12 #include <cuda_runtime.h> 13 #include <stdbool.h> 14 #include <stddef.h> 15 #include <string.h> 16 17 #include "../cuda/ceed-cuda-common.h" 18 #include "../cuda/ceed-cuda-compile.h" 19 #include "ceed-cuda-ref.h" 20 21 //------------------------------------------------------------------------------ 22 // Compile restriction kernels 23 //------------------------------------------------------------------------------ 24 static inline int CeedElemRestrictionSetupCompile_Cuda(CeedElemRestriction rstr) { 25 Ceed ceed; 26 bool is_deterministic; 27 char *restriction_kernel_source; 28 const char *restriction_kernel_path; 29 CeedInt num_elem, num_comp, elem_size, comp_stride; 30 CeedRestrictionType rstr_type; 31 CeedElemRestriction_Cuda *impl; 32 33 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 34 CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed)); 35 CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type)); 36 CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &num_elem)); 37 CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr, &num_comp)); 38 CeedCallBackend(CeedElemRestrictionGetCompStride(rstr, &comp_stride)); 39 if (rstr_type == CEED_RESTRICTION_POINTS) { 40 CeedCallBackend(CeedElemRestrictionGetMaxPointsInElement(rstr, &elem_size)); 41 } else { 42 CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size)); 43 } 44 is_deterministic = impl->d_l_vec_indices != NULL; 45 46 // Compile CUDA kernels 47 switch (rstr_type) { 48 case CEED_RESTRICTION_STRIDED: { 49 bool has_backend_strides; 50 CeedInt strides[3] = {1, num_elem * elem_size, elem_size}; 51 52 CeedCallBackend(CeedElemRestrictionHasBackendStrides(rstr, &has_backend_strides)); 53 if (!has_backend_strides) { 54 CeedCallBackend(CeedElemRestrictionGetStrides(rstr, strides)); 55 } 56 57 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-strided.h", &restriction_kernel_path)); 58 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source -----\n"); 59 CeedCallBackend(CeedLoadSourceToBuffer(ceed, restriction_kernel_path, &restriction_kernel_source)); 60 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source Complete! -----\n"); 61 CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem, 62 "RSTR_NUM_COMP", num_comp, "RSTR_STRIDE_NODES", strides[0], "RSTR_STRIDE_COMP", strides[1], "RSTR_STRIDE_ELEM", 63 strides[2])); 64 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "StridedNoTranspose", &impl->ApplyNoTranspose)); 65 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "StridedTranspose", &impl->ApplyTranspose)); 66 } break; 67 case CEED_RESTRICTION_POINTS: 68 case CEED_RESTRICTION_STANDARD: { 69 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-offset.h", &restriction_kernel_path)); 70 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source -----\n"); 71 CeedCallBackend(CeedLoadSourceToBuffer(ceed, restriction_kernel_path, &restriction_kernel_source)); 72 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source Complete! -----\n"); 73 CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem, 74 "RSTR_NUM_COMP", num_comp, "RSTR_NUM_NODES", impl->num_nodes, "RSTR_COMP_STRIDE", comp_stride, 75 "USE_DETERMINISTIC", is_deterministic ? 1 : 0)); 76 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetNoTranspose", &impl->ApplyNoTranspose)); 77 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetTranspose", &impl->ApplyTranspose)); 78 } break; 79 case CEED_RESTRICTION_ORIENTED: { 80 const char *offset_kernel_path; 81 char **file_paths = NULL; 82 CeedInt num_file_paths = 0; 83 84 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-oriented.h", &restriction_kernel_path)); 85 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source -----\n"); 86 CeedCallBackend(CeedLoadSourceAndInitializeBuffer(ceed, restriction_kernel_path, &num_file_paths, &file_paths, &restriction_kernel_source)); 87 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-offset.h", &offset_kernel_path)); 88 CeedCallBackend(CeedLoadSourceToInitializedBuffer(ceed, offset_kernel_path, &num_file_paths, &file_paths, &restriction_kernel_source)); 89 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source Complete! -----\n"); 90 CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem, 91 "RSTR_NUM_COMP", num_comp, "RSTR_NUM_NODES", impl->num_nodes, "RSTR_COMP_STRIDE", comp_stride, 92 "USE_DETERMINISTIC", is_deterministic ? 1 : 0)); 93 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OrientedNoTranspose", &impl->ApplyNoTranspose)); 94 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetNoTranspose", &impl->ApplyUnsignedNoTranspose)); 95 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OrientedTranspose", &impl->ApplyTranspose)); 96 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetTranspose", &impl->ApplyUnsignedTranspose)); 97 // Cleanup 98 CeedCallBackend(CeedFree(&offset_kernel_path)); 99 for (CeedInt i = 0; i < num_file_paths; i++) CeedCall(CeedFree(&file_paths[i])); 100 CeedCall(CeedFree(&file_paths)); 101 } break; 102 case CEED_RESTRICTION_CURL_ORIENTED: { 103 const char *offset_kernel_path; 104 char **file_paths = NULL; 105 CeedInt num_file_paths = 0; 106 107 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-curl-oriented.h", &restriction_kernel_path)); 108 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source -----\n"); 109 CeedCallBackend(CeedLoadSourceAndInitializeBuffer(ceed, restriction_kernel_path, &num_file_paths, &file_paths, &restriction_kernel_source)); 110 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-offset.h", &offset_kernel_path)); 111 CeedCallBackend(CeedLoadSourceToInitializedBuffer(ceed, offset_kernel_path, &num_file_paths, &file_paths, &restriction_kernel_source)); 112 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source Complete! -----\n"); 113 CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem, 114 "RSTR_NUM_COMP", num_comp, "RSTR_NUM_NODES", impl->num_nodes, "RSTR_COMP_STRIDE", comp_stride, 115 "USE_DETERMINISTIC", is_deterministic ? 1 : 0)); 116 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedNoTranspose", &impl->ApplyNoTranspose)); 117 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedUnsignedNoTranspose", &impl->ApplyUnsignedNoTranspose)); 118 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetNoTranspose", &impl->ApplyUnorientedNoTranspose)); 119 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedTranspose", &impl->ApplyTranspose)); 120 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedUnsignedTranspose", &impl->ApplyUnsignedTranspose)); 121 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetTranspose", &impl->ApplyUnorientedTranspose)); 122 // Cleanup 123 CeedCallBackend(CeedFree(&offset_kernel_path)); 124 for (CeedInt i = 0; i < num_file_paths; i++) CeedCall(CeedFree(&file_paths[i])); 125 CeedCall(CeedFree(&file_paths)); 126 } break; 127 } 128 CeedCallBackend(CeedFree(&restriction_kernel_path)); 129 CeedCallBackend(CeedFree(&restriction_kernel_source)); 130 return CEED_ERROR_SUCCESS; 131 } 132 133 //------------------------------------------------------------------------------ 134 // Core apply restriction code 135 //------------------------------------------------------------------------------ 136 static inline int CeedElemRestrictionApply_Cuda_Core(CeedElemRestriction rstr, CeedTransposeMode t_mode, bool use_signs, bool use_orients, 137 CeedVector u, CeedVector v, CeedRequest *request) { 138 Ceed ceed; 139 CeedRestrictionType rstr_type; 140 const CeedScalar *d_u; 141 CeedScalar *d_v; 142 CeedElemRestriction_Cuda *impl; 143 144 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 145 CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed)); 146 CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type)); 147 148 // Assemble kernel if needed 149 if (!impl->module) { 150 CeedCallBackend(CeedElemRestrictionSetupCompile_Cuda(rstr)); 151 } 152 153 // Get vectors 154 CeedCallBackend(CeedVectorGetArrayRead(u, CEED_MEM_DEVICE, &d_u)); 155 if (t_mode == CEED_TRANSPOSE) { 156 // Sum into for transpose mode, e-vec to l-vec 157 CeedCallBackend(CeedVectorGetArray(v, CEED_MEM_DEVICE, &d_v)); 158 } else { 159 // Overwrite for notranspose mode, l-vec to e-vec 160 CeedCallBackend(CeedVectorGetArrayWrite(v, CEED_MEM_DEVICE, &d_v)); 161 } 162 163 // Restrict 164 if (t_mode == CEED_NOTRANSPOSE) { 165 // L-vector -> E-vector 166 CeedInt elem_size; 167 168 CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size)); 169 const CeedInt block_size = elem_size < 1024 ? (elem_size > 32 ? elem_size : 32) : 1024; 170 const CeedInt grid = CeedDivUpInt(impl->num_nodes, block_size); 171 172 switch (rstr_type) { 173 case CEED_RESTRICTION_STRIDED: { 174 void *args[] = {&d_u, &d_v}; 175 176 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args)); 177 } break; 178 case CEED_RESTRICTION_POINTS: 179 case CEED_RESTRICTION_STANDARD: { 180 void *args[] = {&impl->d_offsets, &d_u, &d_v}; 181 182 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args)); 183 } break; 184 case CEED_RESTRICTION_ORIENTED: { 185 if (use_signs) { 186 void *args[] = {&impl->d_offsets, &impl->d_orients, &d_u, &d_v}; 187 188 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args)); 189 } else { 190 void *args[] = {&impl->d_offsets, &d_u, &d_v}; 191 192 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedNoTranspose, grid, block_size, args)); 193 } 194 } break; 195 case CEED_RESTRICTION_CURL_ORIENTED: { 196 if (use_signs && use_orients) { 197 void *args[] = {&impl->d_offsets, &impl->d_curl_orients, &d_u, &d_v}; 198 199 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args)); 200 } else if (use_orients) { 201 void *args[] = {&impl->d_offsets, &impl->d_curl_orients, &d_u, &d_v}; 202 203 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedNoTranspose, grid, block_size, args)); 204 } else { 205 void *args[] = {&impl->d_offsets, &d_u, &d_v}; 206 207 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnorientedNoTranspose, grid, block_size, args)); 208 } 209 } break; 210 } 211 } else { 212 // E-vector -> L-vector 213 const bool is_deterministic = impl->d_l_vec_indices != NULL; 214 const CeedInt block_size = 32; 215 const CeedInt grid = CeedDivUpInt(impl->num_nodes, block_size); 216 217 switch (rstr_type) { 218 case CEED_RESTRICTION_STRIDED: { 219 void *args[] = {&d_u, &d_v}; 220 221 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 222 } break; 223 case CEED_RESTRICTION_POINTS: 224 case CEED_RESTRICTION_STANDARD: { 225 if (!is_deterministic) { 226 void *args[] = {&impl->d_offsets, &d_u, &d_v}; 227 228 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 229 } else { 230 void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &d_u, &d_v}; 231 232 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 233 } 234 } break; 235 case CEED_RESTRICTION_ORIENTED: { 236 if (use_signs) { 237 if (!is_deterministic) { 238 void *args[] = {&impl->d_offsets, &impl->d_orients, &d_u, &d_v}; 239 240 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 241 } else { 242 void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &impl->d_orients, &d_u, &d_v}; 243 244 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 245 } 246 } else { 247 if (!is_deterministic) { 248 void *args[] = {&impl->d_offsets, &d_u, &d_v}; 249 250 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args)); 251 } else { 252 void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &d_u, &d_v}; 253 254 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args)); 255 } 256 } 257 } break; 258 case CEED_RESTRICTION_CURL_ORIENTED: { 259 if (use_signs && use_orients) { 260 if (!is_deterministic) { 261 void *args[] = {&impl->d_offsets, &impl->d_curl_orients, &d_u, &d_v}; 262 263 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 264 } else { 265 void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &impl->d_curl_orients, &d_u, &d_v}; 266 267 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 268 } 269 } else if (use_orients) { 270 if (!is_deterministic) { 271 void *args[] = {&impl->d_offsets, &impl->d_curl_orients, &d_u, &d_v}; 272 273 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args)); 274 } else { 275 void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &impl->d_curl_orients, &d_u, &d_v}; 276 277 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args)); 278 } 279 } else { 280 if (!is_deterministic) { 281 void *args[] = {&impl->d_offsets, &d_u, &d_v}; 282 283 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnorientedTranspose, grid, block_size, args)); 284 } else { 285 void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &d_u, &d_v}; 286 287 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnorientedTranspose, grid, block_size, args)); 288 } 289 } 290 } break; 291 } 292 } 293 294 if (request != CEED_REQUEST_IMMEDIATE && request != CEED_REQUEST_ORDERED) *request = NULL; 295 296 // Restore arrays 297 CeedCallBackend(CeedVectorRestoreArrayRead(u, &d_u)); 298 CeedCallBackend(CeedVectorRestoreArray(v, &d_v)); 299 return CEED_ERROR_SUCCESS; 300 } 301 302 //------------------------------------------------------------------------------ 303 // Apply restriction 304 //------------------------------------------------------------------------------ 305 static int CeedElemRestrictionApply_Cuda(CeedElemRestriction rstr, CeedTransposeMode t_mode, CeedVector u, CeedVector v, CeedRequest *request) { 306 return CeedElemRestrictionApply_Cuda_Core(rstr, t_mode, true, true, u, v, request); 307 } 308 309 //------------------------------------------------------------------------------ 310 // Apply unsigned restriction 311 //------------------------------------------------------------------------------ 312 static int CeedElemRestrictionApplyUnsigned_Cuda(CeedElemRestriction rstr, CeedTransposeMode t_mode, CeedVector u, CeedVector v, 313 CeedRequest *request) { 314 return CeedElemRestrictionApply_Cuda_Core(rstr, t_mode, false, true, u, v, request); 315 } 316 317 //------------------------------------------------------------------------------ 318 // Apply unoriented restriction 319 //------------------------------------------------------------------------------ 320 static int CeedElemRestrictionApplyUnoriented_Cuda(CeedElemRestriction rstr, CeedTransposeMode t_mode, CeedVector u, CeedVector v, 321 CeedRequest *request) { 322 return CeedElemRestrictionApply_Cuda_Core(rstr, t_mode, false, false, u, v, request); 323 } 324 325 //------------------------------------------------------------------------------ 326 // Get offsets 327 //------------------------------------------------------------------------------ 328 static int CeedElemRestrictionGetOffsets_Cuda(CeedElemRestriction rstr, CeedMemType mem_type, const CeedInt **offsets) { 329 CeedElemRestriction_Cuda *impl; 330 CeedRestrictionType rstr_type; 331 332 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 333 CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type)); 334 switch (mem_type) { 335 case CEED_MEM_HOST: 336 *offsets = rstr_type == CEED_RESTRICTION_POINTS ? impl->h_offsets_at_points : impl->h_offsets; 337 break; 338 case CEED_MEM_DEVICE: 339 *offsets = rstr_type == CEED_RESTRICTION_POINTS ? impl->d_offsets_at_points : impl->d_offsets; 340 break; 341 } 342 return CEED_ERROR_SUCCESS; 343 } 344 345 //------------------------------------------------------------------------------ 346 // Get orientations 347 //------------------------------------------------------------------------------ 348 static int CeedElemRestrictionGetOrientations_Cuda(CeedElemRestriction rstr, CeedMemType mem_type, const bool **orients) { 349 CeedElemRestriction_Cuda *impl; 350 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 351 352 switch (mem_type) { 353 case CEED_MEM_HOST: 354 *orients = impl->h_orients; 355 break; 356 case CEED_MEM_DEVICE: 357 *orients = impl->d_orients; 358 break; 359 } 360 return CEED_ERROR_SUCCESS; 361 } 362 363 //------------------------------------------------------------------------------ 364 // Get curl-conforming orientations 365 //------------------------------------------------------------------------------ 366 static int CeedElemRestrictionGetCurlOrientations_Cuda(CeedElemRestriction rstr, CeedMemType mem_type, const CeedInt8 **curl_orients) { 367 CeedElemRestriction_Cuda *impl; 368 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 369 370 switch (mem_type) { 371 case CEED_MEM_HOST: 372 *curl_orients = impl->h_curl_orients; 373 break; 374 case CEED_MEM_DEVICE: 375 *curl_orients = impl->d_curl_orients; 376 break; 377 } 378 return CEED_ERROR_SUCCESS; 379 } 380 381 //------------------------------------------------------------------------------ 382 // Get offset for padded AtPoints E-layout 383 //------------------------------------------------------------------------------ 384 static int CeedElemRestrictionGetAtPointsElementOffset_Cuda(CeedElemRestriction rstr, CeedInt elem, CeedSize *elem_offset) { 385 CeedInt layout[3]; 386 387 CeedCallBackend(CeedElemRestrictionGetELayout(rstr, layout)); 388 *elem_offset = 0 * layout[0] + 0 * layout[1] + elem * layout[2]; 389 return CEED_ERROR_SUCCESS; 390 } 391 392 //------------------------------------------------------------------------------ 393 // Destroy restriction 394 //------------------------------------------------------------------------------ 395 static int CeedElemRestrictionDestroy_Cuda(CeedElemRestriction rstr) { 396 Ceed ceed; 397 CeedElemRestriction_Cuda *impl; 398 399 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 400 CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed)); 401 if (impl->module) { 402 CeedCallCuda(ceed, cuModuleUnload(impl->module)); 403 } 404 CeedCallBackend(CeedFree(&impl->h_offsets_owned)); 405 CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_offsets_owned)); 406 CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_t_offsets)); 407 CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_t_indices)); 408 CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_l_vec_indices)); 409 CeedCallBackend(CeedFree(&impl->h_orients_owned)); 410 CeedCallCuda(ceed, cudaFree((bool *)impl->d_orients_owned)); 411 CeedCallBackend(CeedFree(&impl->h_curl_orients_owned)); 412 CeedCallCuda(ceed, cudaFree((CeedInt8 *)impl->d_curl_orients_owned)); 413 CeedCallBackend(CeedFree(&impl->h_offsets_at_points_owned)); 414 CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_offsets_at_points_owned)); 415 CeedCallBackend(CeedFree(&impl)); 416 return CEED_ERROR_SUCCESS; 417 } 418 419 //------------------------------------------------------------------------------ 420 // Create transpose offsets and indices 421 //------------------------------------------------------------------------------ 422 static int CeedElemRestrictionOffset_Cuda(const CeedElemRestriction rstr, const CeedInt elem_size, const CeedInt *indices) { 423 Ceed ceed; 424 bool *is_node; 425 CeedSize l_size; 426 CeedInt num_elem, num_comp, num_nodes = 0; 427 CeedInt *ind_to_offset, *l_vec_indices, *t_offsets, *t_indices; 428 CeedRestrictionType rstr_type; 429 CeedElemRestriction_Cuda *impl; 430 431 CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed)); 432 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 433 CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &num_elem)); 434 CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type)); 435 CeedCallBackend(CeedElemRestrictionGetLVectorSize(rstr, &l_size)); 436 CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr, &num_comp)); 437 const CeedInt size_indices = num_elem * elem_size; 438 439 // Count num_nodes 440 CeedCallBackend(CeedCalloc(l_size, &is_node)); 441 442 for (CeedInt i = 0; i < size_indices; i++) is_node[indices[i]] = 1; 443 for (CeedInt i = 0; i < l_size; i++) num_nodes += is_node[i]; 444 impl->num_nodes = num_nodes; 445 446 // L-vector offsets array 447 CeedCallBackend(CeedCalloc(l_size, &ind_to_offset)); 448 CeedCallBackend(CeedCalloc(num_nodes, &l_vec_indices)); 449 for (CeedInt i = 0, j = 0; i < l_size; i++) { 450 if (is_node[i]) { 451 l_vec_indices[j] = i; 452 ind_to_offset[i] = j++; 453 } 454 } 455 CeedCallBackend(CeedFree(&is_node)); 456 457 // Compute transpose offsets and indices 458 const CeedInt size_offsets = num_nodes + 1; 459 460 CeedCallBackend(CeedCalloc(size_offsets, &t_offsets)); 461 CeedCallBackend(CeedMalloc(size_indices, &t_indices)); 462 // Count node multiplicity 463 for (CeedInt e = 0; e < num_elem; ++e) { 464 for (CeedInt i = 0; i < elem_size; ++i) ++t_offsets[ind_to_offset[indices[elem_size * e + i]] + 1]; 465 } 466 // Convert to running sum 467 for (CeedInt i = 1; i < size_offsets; ++i) t_offsets[i] += t_offsets[i - 1]; 468 // List all E-vec indices associated with L-vec node 469 for (CeedInt e = 0; e < num_elem; ++e) { 470 for (CeedInt i = 0; i < elem_size; ++i) { 471 const CeedInt lid = elem_size * e + i; 472 const CeedInt gid = indices[lid]; 473 474 t_indices[t_offsets[ind_to_offset[gid]]++] = lid; 475 } 476 } 477 // Reset running sum 478 for (int i = size_offsets - 1; i > 0; --i) t_offsets[i] = t_offsets[i - 1]; 479 t_offsets[0] = 0; 480 481 // Copy data to device 482 // -- L-vector indices 483 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_l_vec_indices, num_nodes * sizeof(CeedInt))); 484 CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->d_l_vec_indices, l_vec_indices, num_nodes * sizeof(CeedInt), cudaMemcpyHostToDevice)); 485 // -- Transpose offsets 486 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_t_offsets, size_offsets * sizeof(CeedInt))); 487 CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->d_t_offsets, t_offsets, size_offsets * sizeof(CeedInt), cudaMemcpyHostToDevice)); 488 // -- Transpose indices 489 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_t_indices, size_indices * sizeof(CeedInt))); 490 CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->d_t_indices, t_indices, size_indices * sizeof(CeedInt), cudaMemcpyHostToDevice)); 491 492 // Cleanup 493 CeedCallBackend(CeedFree(&ind_to_offset)); 494 CeedCallBackend(CeedFree(&l_vec_indices)); 495 CeedCallBackend(CeedFree(&t_offsets)); 496 CeedCallBackend(CeedFree(&t_indices)); 497 return CEED_ERROR_SUCCESS; 498 } 499 500 //------------------------------------------------------------------------------ 501 // Create restriction 502 //------------------------------------------------------------------------------ 503 int CeedElemRestrictionCreate_Cuda(CeedMemType mem_type, CeedCopyMode copy_mode, const CeedInt *offsets, const bool *orients, 504 const CeedInt8 *curl_orients, CeedElemRestriction rstr) { 505 Ceed ceed, ceed_parent; 506 bool is_deterministic; 507 CeedInt num_elem, num_comp, elem_size; 508 CeedRestrictionType rstr_type; 509 CeedElemRestriction_Cuda *impl; 510 511 CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed)); 512 CeedCallBackend(CeedGetParent(ceed, &ceed_parent)); 513 CeedCallBackend(CeedIsDeterministic(ceed_parent, &is_deterministic)); 514 CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &num_elem)); 515 CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr, &num_comp)); 516 CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size)); 517 CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type)); 518 // Use max number of points as elem size for AtPoints restrictions 519 if (rstr_type == CEED_RESTRICTION_POINTS) { 520 CeedInt max_points = 0; 521 522 for (CeedInt i = 0; i < num_elem; i++) { 523 max_points = CeedIntMax(max_points, offsets[i + 1] - offsets[i]); 524 } 525 elem_size = max_points; 526 } 527 const CeedInt size = num_elem * elem_size; 528 529 CeedCallBackend(CeedCalloc(1, &impl)); 530 impl->num_nodes = size; 531 CeedCallBackend(CeedElemRestrictionSetData(rstr, impl)); 532 533 // Set layouts 534 { 535 bool has_backend_strides; 536 CeedInt layout[3] = {1, size, elem_size}; 537 538 CeedCallBackend(CeedElemRestrictionSetELayout(rstr, layout)); 539 if (rstr_type == CEED_RESTRICTION_STRIDED) { 540 CeedCallBackend(CeedElemRestrictionHasBackendStrides(rstr, &has_backend_strides)); 541 if (has_backend_strides) { 542 CeedCallBackend(CeedElemRestrictionSetLLayout(rstr, layout)); 543 } 544 } 545 } 546 547 // Pad AtPoints indices 548 if (rstr_type == CEED_RESTRICTION_POINTS) { 549 CeedSize offsets_len = elem_size * num_elem, at_points_size = num_elem + 1; 550 CeedInt max_points = elem_size, *offsets_padded; 551 552 CeedCheck(mem_type == CEED_MEM_HOST, ceed, CEED_ERROR_BACKEND, "only MemType Host supported when creating AtPoints restriction"); 553 CeedCallBackend(CeedMalloc(offsets_len, &offsets_padded)); 554 for (CeedInt i = 0; i < num_elem; i++) { 555 CeedInt num_points = offsets[i + 1] - offsets[i]; 556 557 at_points_size += num_points; 558 // -- Copy all points in element 559 for (CeedInt j = 0; j < num_points; j++) { 560 offsets_padded[i * max_points + j] = offsets[offsets[i] + j] * num_comp; 561 } 562 // -- Replicate out last point in element 563 for (CeedInt j = num_points; j < max_points; j++) { 564 offsets_padded[i * max_points + j] = offsets[offsets[i] + num_points - 1] * num_comp; 565 } 566 } 567 CeedCallBackend(CeedSetHostCeedIntArray(offsets, copy_mode, at_points_size, &impl->h_offsets_at_points_owned, &impl->h_offsets_at_points_borrowed, 568 &impl->h_offsets_at_points)); 569 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_offsets_at_points_owned, at_points_size * sizeof(CeedInt))); 570 CeedCallCuda(ceed, cudaMemcpy((CeedInt **)impl->d_offsets_at_points_owned, impl->h_offsets_at_points, at_points_size * sizeof(CeedInt), 571 cudaMemcpyHostToDevice)); 572 impl->d_offsets_at_points = (CeedInt *)impl->d_offsets_at_points_owned; 573 574 // -- Use padded offsets for the rest of the setup 575 offsets = (const CeedInt *)offsets_padded; 576 copy_mode = CEED_OWN_POINTER; 577 CeedCallBackend(CeedElemRestrictionSetAtPointsEVectorSize(rstr, at_points_size * num_comp)); 578 } 579 580 // Set up device offset/orientation arrays 581 if (rstr_type != CEED_RESTRICTION_STRIDED) { 582 switch (mem_type) { 583 case CEED_MEM_HOST: { 584 CeedCallBackend(CeedSetHostCeedIntArray(offsets, copy_mode, size, &impl->h_offsets_owned, &impl->h_offsets_borrowed, &impl->h_offsets)); 585 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_offsets_owned, size * sizeof(CeedInt))); 586 CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->d_offsets_owned, impl->h_offsets, size * sizeof(CeedInt), cudaMemcpyHostToDevice)); 587 impl->d_offsets = (CeedInt *)impl->d_offsets_owned; 588 if (is_deterministic) CeedCallBackend(CeedElemRestrictionOffset_Cuda(rstr, elem_size, offsets)); 589 } break; 590 case CEED_MEM_DEVICE: { 591 CeedCallBackend(CeedSetDeviceCeedIntArray_Cuda(ceed, offsets, copy_mode, size, &impl->d_offsets_owned, &impl->d_offsets_borrowed, 592 (const CeedInt **)&impl->d_offsets)); 593 CeedCallBackend(CeedMalloc(size, &impl->h_offsets_owned)); 594 CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->h_offsets_owned, impl->d_offsets, size * sizeof(CeedInt), cudaMemcpyDeviceToHost)); 595 impl->h_offsets = impl->h_offsets_owned; 596 if (is_deterministic) CeedCallBackend(CeedElemRestrictionOffset_Cuda(rstr, elem_size, offsets)); 597 } break; 598 } 599 600 // Orientation data 601 if (rstr_type == CEED_RESTRICTION_ORIENTED) { 602 switch (mem_type) { 603 case CEED_MEM_HOST: { 604 CeedCallBackend(CeedSetHostBoolArray(orients, copy_mode, size, &impl->h_orients_owned, &impl->h_orients_borrowed, &impl->h_orients)); 605 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_orients_owned, size * sizeof(bool))); 606 CeedCallCuda(ceed, cudaMemcpy((bool *)impl->d_orients_owned, impl->h_orients, size * sizeof(bool), cudaMemcpyHostToDevice)); 607 impl->d_orients = impl->d_orients_owned; 608 } break; 609 case CEED_MEM_DEVICE: { 610 CeedCallBackend(CeedSetDeviceBoolArray_Cuda(ceed, orients, copy_mode, size, &impl->d_orients_owned, &impl->d_orients_borrowed, 611 (const bool **)&impl->d_orients)); 612 CeedCallBackend(CeedMalloc(size, &impl->h_orients_owned)); 613 CeedCallCuda(ceed, cudaMemcpy((bool *)impl->h_orients_owned, impl->d_orients, size * sizeof(bool), cudaMemcpyDeviceToHost)); 614 impl->h_orients = impl->h_orients_owned; 615 } break; 616 } 617 } else if (rstr_type == CEED_RESTRICTION_CURL_ORIENTED) { 618 switch (mem_type) { 619 case CEED_MEM_HOST: { 620 CeedCallBackend(CeedSetHostCeedInt8Array(curl_orients, copy_mode, 3 * size, &impl->h_curl_orients_owned, &impl->h_curl_orients_borrowed, 621 &impl->h_curl_orients)); 622 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_curl_orients_owned, 3 * size * sizeof(CeedInt8))); 623 CeedCallCuda(ceed, 624 cudaMemcpy((CeedInt8 *)impl->d_curl_orients_owned, impl->h_curl_orients, 3 * size * sizeof(CeedInt8), cudaMemcpyHostToDevice)); 625 impl->d_curl_orients = impl->d_curl_orients_owned; 626 } break; 627 case CEED_MEM_DEVICE: { 628 CeedCallBackend(CeedSetDeviceCeedInt8Array_Cuda(ceed, curl_orients, copy_mode, 3 * size, &impl->d_curl_orients_owned, 629 &impl->d_curl_orients_borrowed, (const CeedInt8 **)&impl->d_curl_orients)); 630 CeedCallBackend(CeedMalloc(3 * size, &impl->h_curl_orients_owned)); 631 CeedCallCuda(ceed, 632 cudaMemcpy((CeedInt8 *)impl->h_curl_orients_owned, impl->d_curl_orients, 3 * size * sizeof(CeedInt8), cudaMemcpyDeviceToHost)); 633 impl->h_curl_orients = impl->h_curl_orients_owned; 634 } break; 635 } 636 } 637 } 638 639 // Register backend functions 640 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "Apply", CeedElemRestrictionApply_Cuda)); 641 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "ApplyUnsigned", CeedElemRestrictionApplyUnsigned_Cuda)); 642 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "ApplyUnoriented", CeedElemRestrictionApplyUnoriented_Cuda)); 643 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetOffsets", CeedElemRestrictionGetOffsets_Cuda)); 644 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetOrientations", CeedElemRestrictionGetOrientations_Cuda)); 645 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetCurlOrientations", CeedElemRestrictionGetCurlOrientations_Cuda)); 646 if (rstr_type == CEED_RESTRICTION_POINTS) { 647 CeedCallBackend( 648 CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetAtPointsElementOffset", CeedElemRestrictionGetAtPointsElementOffset_Cuda)); 649 } 650 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "Destroy", CeedElemRestrictionDestroy_Cuda)); 651 return CEED_ERROR_SUCCESS; 652 } 653 654 //------------------------------------------------------------------------------ 655