1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 #include <ceed.h> 9 #include <ceed/backend.h> 10 #include <ceed/jit-tools.h> 11 #include <cuda.h> 12 #include <cuda_runtime.h> 13 #include <stdbool.h> 14 #include <stddef.h> 15 #include <string.h> 16 17 #include "../cuda/ceed-cuda-common.h" 18 #include "../cuda/ceed-cuda-compile.h" 19 #include "ceed-cuda-ref.h" 20 21 //------------------------------------------------------------------------------ 22 // Compile restriction kernels 23 //------------------------------------------------------------------------------ 24 static inline int CeedElemRestrictionSetupCompile_Cuda(CeedElemRestriction rstr) { 25 Ceed ceed; 26 bool is_deterministic; 27 CeedInt num_elem, num_comp, elem_size, comp_stride; 28 CeedRestrictionType rstr_type; 29 char *restriction_kernel_path, *restriction_kernel_source; 30 CeedElemRestriction_Cuda *impl; 31 32 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 33 CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed)); 34 CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &num_elem)); 35 CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr, &num_comp)); 36 CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size)); 37 CeedCallBackend(CeedElemRestrictionGetCompStride(rstr, &comp_stride)); 38 CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type)); 39 is_deterministic = impl->d_l_vec_indices != NULL; 40 41 // Compile CUDA kernels 42 switch (rstr_type) { 43 case CEED_RESTRICTION_STRIDED: { 44 CeedInt strides[3] = {1, num_elem * elem_size, elem_size}; 45 bool has_backend_strides; 46 47 CeedCallBackend(CeedElemRestrictionHasBackendStrides(rstr, &has_backend_strides)); 48 if (!has_backend_strides) { 49 CeedCallBackend(CeedElemRestrictionGetStrides(rstr, &strides)); 50 } 51 52 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-strided.h", &restriction_kernel_path)); 53 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source -----\n"); 54 CeedCallBackend(CeedLoadSourceToBuffer(ceed, restriction_kernel_path, &restriction_kernel_source)); 55 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source Complete! -----\n"); 56 CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem, 57 "RSTR_NUM_COMP", num_comp, "RSTR_STRIDE_NODES", strides[0], "RSTR_STRIDE_COMP", strides[1], "RSTR_STRIDE_ELEM", 58 strides[2])); 59 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "StridedNoTranspose", &impl->ApplyNoTranspose)); 60 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "StridedTranspose", &impl->ApplyTranspose)); 61 } break; 62 case CEED_RESTRICTION_STANDARD: { 63 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-offset.h", &restriction_kernel_path)); 64 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source -----\n"); 65 CeedCallBackend(CeedLoadSourceToBuffer(ceed, restriction_kernel_path, &restriction_kernel_source)); 66 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source Complete! -----\n"); 67 CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem, 68 "RSTR_NUM_COMP", num_comp, "RSTR_NUM_NODES", impl->num_nodes, "RSTR_COMP_STRIDE", comp_stride, 69 "USE_DETERMINISTIC", is_deterministic ? 1 : 0)); 70 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetNoTranspose", &impl->ApplyNoTranspose)); 71 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetTranspose", &impl->ApplyTranspose)); 72 } break; 73 case CEED_RESTRICTION_ORIENTED: { 74 char *offset_kernel_path; 75 76 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-oriented.h", &restriction_kernel_path)); 77 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source -----\n"); 78 CeedCallBackend(CeedLoadSourceToBuffer(ceed, restriction_kernel_path, &restriction_kernel_source)); 79 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-offset.h", &offset_kernel_path)); 80 CeedCallBackend(CeedLoadSourceToInitializedBuffer(ceed, offset_kernel_path, &restriction_kernel_source)); 81 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source Complete! -----\n"); 82 CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem, 83 "RSTR_NUM_COMP", num_comp, "RSTR_NUM_NODES", impl->num_nodes, "RSTR_COMP_STRIDE", comp_stride, 84 "USE_DETERMINISTIC", is_deterministic ? 1 : 0)); 85 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OrientedNoTranspose", &impl->ApplyNoTranspose)); 86 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetNoTranspose", &impl->ApplyUnsignedNoTranspose)); 87 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OrientedTranspose", &impl->ApplyTranspose)); 88 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetTranspose", &impl->ApplyUnsignedTranspose)); 89 CeedCallBackend(CeedFree(&offset_kernel_path)); 90 } break; 91 case CEED_RESTRICTION_CURL_ORIENTED: { 92 char *offset_kernel_path; 93 94 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-curl-oriented.h", &restriction_kernel_path)); 95 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source -----\n"); 96 CeedCallBackend(CeedLoadSourceToBuffer(ceed, restriction_kernel_path, &restriction_kernel_source)); 97 CeedCallBackend(CeedGetJitAbsolutePath(ceed, "ceed/jit-source/cuda/cuda-ref-restriction-offset.h", &offset_kernel_path)); 98 CeedCallBackend(CeedLoadSourceToInitializedBuffer(ceed, offset_kernel_path, &restriction_kernel_source)); 99 CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "----- Loading Restriction Kernel Source Complete! -----\n"); 100 CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem, 101 "RSTR_NUM_COMP", num_comp, "RSTR_NUM_NODES", impl->num_nodes, "RSTR_COMP_STRIDE", comp_stride, 102 "USE_DETERMINISTIC", is_deterministic ? 1 : 0)); 103 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedNoTranspose", &impl->ApplyNoTranspose)); 104 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedUnsignedNoTranspose", &impl->ApplyUnsignedNoTranspose)); 105 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetNoTranspose", &impl->ApplyUnorientedNoTranspose)); 106 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedTranspose", &impl->ApplyTranspose)); 107 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedUnsignedTranspose", &impl->ApplyUnsignedTranspose)); 108 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetTranspose", &impl->ApplyUnorientedTranspose)); 109 CeedCallBackend(CeedFree(&offset_kernel_path)); 110 } break; 111 case CEED_RESTRICTION_POINTS: { 112 // LCOV_EXCL_START 113 return CeedError(ceed, CEED_ERROR_UNSUPPORTED, "Backend does not implement restriction CeedElemRestrictionAtPoints"); 114 // LCOV_EXCL_STOP 115 } break; 116 } 117 CeedCallBackend(CeedFree(&restriction_kernel_path)); 118 CeedCallBackend(CeedFree(&restriction_kernel_source)); 119 return CEED_ERROR_SUCCESS; 120 } 121 122 //------------------------------------------------------------------------------ 123 // Core apply restriction code 124 //------------------------------------------------------------------------------ 125 static inline int CeedElemRestrictionApply_Cuda_Core(CeedElemRestriction rstr, CeedTransposeMode t_mode, bool use_signs, bool use_orients, 126 CeedVector u, CeedVector v, CeedRequest *request) { 127 Ceed ceed; 128 CeedRestrictionType rstr_type; 129 const CeedScalar *d_u; 130 CeedScalar *d_v; 131 CeedElemRestriction_Cuda *impl; 132 133 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 134 CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed)); 135 CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type)); 136 137 // Assemble kernel if needed 138 if (!impl->module) { 139 CeedCallBackend(CeedElemRestrictionSetupCompile_Cuda(rstr)); 140 } 141 142 // Get vectors 143 CeedCallBackend(CeedVectorGetArrayRead(u, CEED_MEM_DEVICE, &d_u)); 144 if (t_mode == CEED_TRANSPOSE) { 145 // Sum into for transpose mode, e-vec to l-vec 146 CeedCallBackend(CeedVectorGetArray(v, CEED_MEM_DEVICE, &d_v)); 147 } else { 148 // Overwrite for notranspose mode, l-vec to e-vec 149 CeedCallBackend(CeedVectorGetArrayWrite(v, CEED_MEM_DEVICE, &d_v)); 150 } 151 152 // Restrict 153 if (t_mode == CEED_NOTRANSPOSE) { 154 // L-vector -> E-vector 155 CeedInt elem_size; 156 157 CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size)); 158 const CeedInt block_size = elem_size < 1024 ? (elem_size > 32 ? elem_size : 32) : 1024; 159 const CeedInt grid = CeedDivUpInt(impl->num_nodes, block_size); 160 161 switch (rstr_type) { 162 case CEED_RESTRICTION_STRIDED: { 163 void *args[] = {&d_u, &d_v}; 164 165 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args)); 166 } break; 167 case CEED_RESTRICTION_STANDARD: { 168 void *args[] = {&impl->d_ind, &d_u, &d_v}; 169 170 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args)); 171 } break; 172 case CEED_RESTRICTION_ORIENTED: { 173 if (use_signs) { 174 void *args[] = {&impl->d_ind, &impl->d_orients, &d_u, &d_v}; 175 176 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args)); 177 } else { 178 void *args[] = {&impl->d_ind, &d_u, &d_v}; 179 180 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedNoTranspose, grid, block_size, args)); 181 } 182 } break; 183 case CEED_RESTRICTION_CURL_ORIENTED: { 184 if (use_signs && use_orients) { 185 void *args[] = {&impl->d_ind, &impl->d_curl_orients, &d_u, &d_v}; 186 187 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args)); 188 } else if (use_orients) { 189 void *args[] = {&impl->d_ind, &impl->d_curl_orients, &d_u, &d_v}; 190 191 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedNoTranspose, grid, block_size, args)); 192 } else { 193 void *args[] = {&impl->d_ind, &d_u, &d_v}; 194 195 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnorientedNoTranspose, grid, block_size, args)); 196 } 197 } break; 198 case CEED_RESTRICTION_POINTS: { 199 // LCOV_EXCL_START 200 return CeedError(ceed, CEED_ERROR_UNSUPPORTED, "Backend does not implement restriction CeedElemRestrictionAtPoints"); 201 // LCOV_EXCL_STOP 202 } break; 203 } 204 } else { 205 // E-vector -> L-vector 206 const bool is_deterministic = impl->d_l_vec_indices != NULL; 207 const CeedInt block_size = 32; 208 const CeedInt grid = CeedDivUpInt(impl->num_nodes, block_size); 209 210 switch (rstr_type) { 211 case CEED_RESTRICTION_STRIDED: { 212 void *args[] = {&d_u, &d_v}; 213 214 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 215 } break; 216 case CEED_RESTRICTION_STANDARD: { 217 if (!is_deterministic) { 218 void *args[] = {&impl->d_ind, &d_u, &d_v}; 219 220 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 221 } else { 222 void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &d_u, &d_v}; 223 224 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 225 } 226 } break; 227 case CEED_RESTRICTION_ORIENTED: { 228 if (use_signs) { 229 if (!is_deterministic) { 230 void *args[] = {&impl->d_ind, &impl->d_orients, &d_u, &d_v}; 231 232 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 233 } else { 234 void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &impl->d_orients, &d_u, &d_v}; 235 236 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 237 } 238 } else { 239 if (!is_deterministic) { 240 void *args[] = {&impl->d_ind, &d_u, &d_v}; 241 242 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args)); 243 } else { 244 void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &d_u, &d_v}; 245 246 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args)); 247 } 248 } 249 } break; 250 case CEED_RESTRICTION_CURL_ORIENTED: { 251 if (use_signs && use_orients) { 252 if (!is_deterministic) { 253 void *args[] = {&impl->d_ind, &impl->d_curl_orients, &d_u, &d_v}; 254 255 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 256 } else { 257 void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &impl->d_curl_orients, &d_u, &d_v}; 258 259 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 260 } 261 } else if (use_orients) { 262 if (!is_deterministic) { 263 void *args[] = {&impl->d_ind, &impl->d_curl_orients, &d_u, &d_v}; 264 265 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args)); 266 } else { 267 void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &impl->d_curl_orients, &d_u, &d_v}; 268 269 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args)); 270 } 271 } else { 272 if (!is_deterministic) { 273 void *args[] = {&impl->d_ind, &d_u, &d_v}; 274 275 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnorientedTranspose, grid, block_size, args)); 276 } else { 277 void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &d_u, &d_v}; 278 279 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnorientedTranspose, grid, block_size, args)); 280 } 281 } 282 } break; 283 case CEED_RESTRICTION_POINTS: { 284 // LCOV_EXCL_START 285 return CeedError(ceed, CEED_ERROR_UNSUPPORTED, "Backend does not implement restriction CeedElemRestrictionAtPoints"); 286 // LCOV_EXCL_STOP 287 } break; 288 } 289 } 290 291 if (request != CEED_REQUEST_IMMEDIATE && request != CEED_REQUEST_ORDERED) *request = NULL; 292 293 // Restore arrays 294 CeedCallBackend(CeedVectorRestoreArrayRead(u, &d_u)); 295 CeedCallBackend(CeedVectorRestoreArray(v, &d_v)); 296 return CEED_ERROR_SUCCESS; 297 } 298 299 //------------------------------------------------------------------------------ 300 // Apply restriction 301 //------------------------------------------------------------------------------ 302 static int CeedElemRestrictionApply_Cuda(CeedElemRestriction rstr, CeedTransposeMode t_mode, CeedVector u, CeedVector v, CeedRequest *request) { 303 return CeedElemRestrictionApply_Cuda_Core(rstr, t_mode, true, true, u, v, request); 304 } 305 306 //------------------------------------------------------------------------------ 307 // Apply unsigned restriction 308 //------------------------------------------------------------------------------ 309 static int CeedElemRestrictionApplyUnsigned_Cuda(CeedElemRestriction rstr, CeedTransposeMode t_mode, CeedVector u, CeedVector v, 310 CeedRequest *request) { 311 return CeedElemRestrictionApply_Cuda_Core(rstr, t_mode, false, true, u, v, request); 312 } 313 314 //------------------------------------------------------------------------------ 315 // Apply unoriented restriction 316 //------------------------------------------------------------------------------ 317 static int CeedElemRestrictionApplyUnoriented_Cuda(CeedElemRestriction rstr, CeedTransposeMode t_mode, CeedVector u, CeedVector v, 318 CeedRequest *request) { 319 return CeedElemRestrictionApply_Cuda_Core(rstr, t_mode, false, false, u, v, request); 320 } 321 322 //------------------------------------------------------------------------------ 323 // Get offsets 324 //------------------------------------------------------------------------------ 325 static int CeedElemRestrictionGetOffsets_Cuda(CeedElemRestriction rstr, CeedMemType mem_type, const CeedInt **offsets) { 326 CeedElemRestriction_Cuda *impl; 327 328 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 329 switch (mem_type) { 330 case CEED_MEM_HOST: 331 *offsets = impl->h_ind; 332 break; 333 case CEED_MEM_DEVICE: 334 *offsets = impl->d_ind; 335 break; 336 } 337 return CEED_ERROR_SUCCESS; 338 } 339 340 //------------------------------------------------------------------------------ 341 // Get orientations 342 //------------------------------------------------------------------------------ 343 static int CeedElemRestrictionGetOrientations_Cuda(CeedElemRestriction rstr, CeedMemType mem_type, const bool **orients) { 344 CeedElemRestriction_Cuda *impl; 345 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 346 347 switch (mem_type) { 348 case CEED_MEM_HOST: 349 *orients = impl->h_orients; 350 break; 351 case CEED_MEM_DEVICE: 352 *orients = impl->d_orients; 353 break; 354 } 355 return CEED_ERROR_SUCCESS; 356 } 357 358 //------------------------------------------------------------------------------ 359 // Get curl-conforming orientations 360 //------------------------------------------------------------------------------ 361 static int CeedElemRestrictionGetCurlOrientations_Cuda(CeedElemRestriction rstr, CeedMemType mem_type, const CeedInt8 **curl_orients) { 362 CeedElemRestriction_Cuda *impl; 363 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 364 365 switch (mem_type) { 366 case CEED_MEM_HOST: 367 *curl_orients = impl->h_curl_orients; 368 break; 369 case CEED_MEM_DEVICE: 370 *curl_orients = impl->d_curl_orients; 371 break; 372 } 373 return CEED_ERROR_SUCCESS; 374 } 375 376 //------------------------------------------------------------------------------ 377 // Destroy restriction 378 //------------------------------------------------------------------------------ 379 static int CeedElemRestrictionDestroy_Cuda(CeedElemRestriction rstr) { 380 Ceed ceed; 381 CeedElemRestriction_Cuda *impl; 382 383 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 384 CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed)); 385 if (impl->module) { 386 CeedCallCuda(ceed, cuModuleUnload(impl->module)); 387 } 388 CeedCallBackend(CeedFree(&impl->h_ind_allocated)); 389 CeedCallCuda(ceed, cudaFree(impl->d_ind_allocated)); 390 CeedCallCuda(ceed, cudaFree(impl->d_t_offsets)); 391 CeedCallCuda(ceed, cudaFree(impl->d_t_indices)); 392 CeedCallCuda(ceed, cudaFree(impl->d_l_vec_indices)); 393 CeedCallBackend(CeedFree(&impl->h_orients_allocated)); 394 CeedCallCuda(ceed, cudaFree(impl->d_orients_allocated)); 395 CeedCallBackend(CeedFree(&impl->h_curl_orients_allocated)); 396 CeedCallCuda(ceed, cudaFree(impl->d_curl_orients_allocated)); 397 CeedCallBackend(CeedFree(&impl)); 398 return CEED_ERROR_SUCCESS; 399 } 400 401 //------------------------------------------------------------------------------ 402 // Create transpose offsets and indices 403 //------------------------------------------------------------------------------ 404 static int CeedElemRestrictionOffset_Cuda(const CeedElemRestriction rstr, const CeedInt *indices) { 405 Ceed ceed; 406 bool *is_node; 407 CeedSize l_size; 408 CeedInt num_elem, elem_size, num_comp, num_nodes = 0; 409 CeedInt *ind_to_offset, *l_vec_indices, *t_offsets, *t_indices; 410 CeedElemRestriction_Cuda *impl; 411 412 CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed)); 413 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 414 CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &num_elem)); 415 CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size)); 416 CeedCallBackend(CeedElemRestrictionGetLVectorSize(rstr, &l_size)); 417 CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr, &num_comp)); 418 const CeedInt size_indices = num_elem * elem_size; 419 420 // Count num_nodes 421 CeedCallBackend(CeedCalloc(l_size, &is_node)); 422 423 for (CeedInt i = 0; i < size_indices; i++) is_node[indices[i]] = 1; 424 for (CeedInt i = 0; i < l_size; i++) num_nodes += is_node[i]; 425 impl->num_nodes = num_nodes; 426 427 // L-vector offsets array 428 CeedCallBackend(CeedCalloc(l_size, &ind_to_offset)); 429 CeedCallBackend(CeedCalloc(num_nodes, &l_vec_indices)); 430 for (CeedInt i = 0, j = 0; i < l_size; i++) { 431 if (is_node[i]) { 432 l_vec_indices[j] = i; 433 ind_to_offset[i] = j++; 434 } 435 } 436 CeedCallBackend(CeedFree(&is_node)); 437 438 // Compute transpose offsets and indices 439 const CeedInt size_offsets = num_nodes + 1; 440 441 CeedCallBackend(CeedCalloc(size_offsets, &t_offsets)); 442 CeedCallBackend(CeedMalloc(size_indices, &t_indices)); 443 // Count node multiplicity 444 for (CeedInt e = 0; e < num_elem; ++e) { 445 for (CeedInt i = 0; i < elem_size; ++i) ++t_offsets[ind_to_offset[indices[elem_size * e + i]] + 1]; 446 } 447 // Convert to running sum 448 for (CeedInt i = 1; i < size_offsets; ++i) t_offsets[i] += t_offsets[i - 1]; 449 // List all E-vec indices associated with L-vec node 450 for (CeedInt e = 0; e < num_elem; ++e) { 451 for (CeedInt i = 0; i < elem_size; ++i) { 452 const CeedInt lid = elem_size * e + i; 453 const CeedInt gid = indices[lid]; 454 455 t_indices[t_offsets[ind_to_offset[gid]]++] = lid; 456 } 457 } 458 // Reset running sum 459 for (int i = size_offsets - 1; i > 0; --i) t_offsets[i] = t_offsets[i - 1]; 460 t_offsets[0] = 0; 461 462 // Copy data to device 463 // -- L-vector indices 464 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_l_vec_indices, num_nodes * sizeof(CeedInt))); 465 CeedCallCuda(ceed, cudaMemcpy(impl->d_l_vec_indices, l_vec_indices, num_nodes * sizeof(CeedInt), cudaMemcpyHostToDevice)); 466 // -- Transpose offsets 467 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_t_offsets, size_offsets * sizeof(CeedInt))); 468 CeedCallCuda(ceed, cudaMemcpy(impl->d_t_offsets, t_offsets, size_offsets * sizeof(CeedInt), cudaMemcpyHostToDevice)); 469 // -- Transpose indices 470 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_t_indices, size_indices * sizeof(CeedInt))); 471 CeedCallCuda(ceed, cudaMemcpy(impl->d_t_indices, t_indices, size_indices * sizeof(CeedInt), cudaMemcpyHostToDevice)); 472 473 // Cleanup 474 CeedCallBackend(CeedFree(&ind_to_offset)); 475 CeedCallBackend(CeedFree(&l_vec_indices)); 476 CeedCallBackend(CeedFree(&t_offsets)); 477 CeedCallBackend(CeedFree(&t_indices)); 478 return CEED_ERROR_SUCCESS; 479 } 480 481 //------------------------------------------------------------------------------ 482 // Create restriction 483 //------------------------------------------------------------------------------ 484 int CeedElemRestrictionCreate_Cuda(CeedMemType mem_type, CeedCopyMode copy_mode, const CeedInt *indices, const bool *orients, 485 const CeedInt8 *curl_orients, CeedElemRestriction rstr) { 486 Ceed ceed, ceed_parent; 487 bool is_deterministic; 488 CeedInt num_elem, elem_size; 489 CeedRestrictionType rstr_type; 490 CeedElemRestriction_Cuda *impl; 491 492 CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed)); 493 CeedCallBackend(CeedGetParent(ceed, &ceed_parent)); 494 CeedCallBackend(CeedIsDeterministic(ceed_parent, &is_deterministic)); 495 CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &num_elem)); 496 CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size)); 497 const CeedInt size = num_elem * elem_size; 498 CeedInt layout[3] = {1, size, elem_size}; 499 500 CeedCallBackend(CeedCalloc(1, &impl)); 501 impl->num_nodes = size; 502 impl->h_ind = NULL; 503 impl->h_ind_allocated = NULL; 504 impl->d_ind = NULL; 505 impl->d_ind_allocated = NULL; 506 impl->d_t_indices = NULL; 507 impl->d_t_offsets = NULL; 508 impl->h_orients = NULL; 509 impl->h_orients_allocated = NULL; 510 impl->d_orients = NULL; 511 impl->d_orients_allocated = NULL; 512 impl->h_curl_orients = NULL; 513 impl->h_curl_orients_allocated = NULL; 514 impl->d_curl_orients = NULL; 515 impl->d_curl_orients_allocated = NULL; 516 CeedCallBackend(CeedElemRestrictionSetData(rstr, impl)); 517 CeedCallBackend(CeedElemRestrictionSetELayout(rstr, layout)); 518 519 // Set up device offset/orientation arrays 520 CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type)); 521 if (rstr_type != CEED_RESTRICTION_STRIDED) { 522 switch (mem_type) { 523 case CEED_MEM_HOST: { 524 switch (copy_mode) { 525 case CEED_OWN_POINTER: 526 impl->h_ind_allocated = (CeedInt *)indices; 527 impl->h_ind = (CeedInt *)indices; 528 break; 529 case CEED_USE_POINTER: 530 impl->h_ind = (CeedInt *)indices; 531 break; 532 case CEED_COPY_VALUES: 533 CeedCallBackend(CeedMalloc(size, &impl->h_ind_allocated)); 534 memcpy(impl->h_ind_allocated, indices, size * sizeof(CeedInt)); 535 impl->h_ind = impl->h_ind_allocated; 536 break; 537 } 538 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_ind, size * sizeof(CeedInt))); 539 impl->d_ind_allocated = impl->d_ind; // We own the device memory 540 CeedCallCuda(ceed, cudaMemcpy(impl->d_ind, indices, size * sizeof(CeedInt), cudaMemcpyHostToDevice)); 541 if (is_deterministic) CeedCallBackend(CeedElemRestrictionOffset_Cuda(rstr, indices)); 542 } break; 543 case CEED_MEM_DEVICE: { 544 switch (copy_mode) { 545 case CEED_COPY_VALUES: 546 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_ind, size * sizeof(CeedInt))); 547 impl->d_ind_allocated = impl->d_ind; // We own the device memory 548 CeedCallCuda(ceed, cudaMemcpy(impl->d_ind, indices, size * sizeof(CeedInt), cudaMemcpyDeviceToDevice)); 549 break; 550 case CEED_OWN_POINTER: 551 impl->d_ind = (CeedInt *)indices; 552 impl->d_ind_allocated = impl->d_ind; 553 break; 554 case CEED_USE_POINTER: 555 impl->d_ind = (CeedInt *)indices; 556 break; 557 } 558 CeedCallBackend(CeedMalloc(size, &impl->h_ind_allocated)); 559 CeedCallCuda(ceed, cudaMemcpy(impl->h_ind_allocated, impl->d_ind, size * sizeof(CeedInt), cudaMemcpyDeviceToHost)); 560 impl->h_ind = impl->h_ind_allocated; 561 if (is_deterministic) CeedCallBackend(CeedElemRestrictionOffset_Cuda(rstr, indices)); 562 } break; 563 } 564 565 // Orientation data 566 if (rstr_type == CEED_RESTRICTION_ORIENTED) { 567 switch (mem_type) { 568 case CEED_MEM_HOST: { 569 switch (copy_mode) { 570 case CEED_OWN_POINTER: 571 impl->h_orients_allocated = (bool *)orients; 572 impl->h_orients = (bool *)orients; 573 break; 574 case CEED_USE_POINTER: 575 impl->h_orients = (bool *)orients; 576 break; 577 case CEED_COPY_VALUES: 578 CeedCallBackend(CeedMalloc(size, &impl->h_orients_allocated)); 579 memcpy(impl->h_orients_allocated, orients, size * sizeof(bool)); 580 impl->h_orients = impl->h_orients_allocated; 581 break; 582 } 583 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_orients, size * sizeof(bool))); 584 impl->d_orients_allocated = impl->d_orients; // We own the device memory 585 CeedCallCuda(ceed, cudaMemcpy(impl->d_orients, orients, size * sizeof(bool), cudaMemcpyHostToDevice)); 586 } break; 587 case CEED_MEM_DEVICE: { 588 switch (copy_mode) { 589 case CEED_COPY_VALUES: 590 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_orients, size * sizeof(bool))); 591 impl->d_orients_allocated = impl->d_orients; // We own the device memory 592 CeedCallCuda(ceed, cudaMemcpy(impl->d_orients, orients, size * sizeof(bool), cudaMemcpyDeviceToDevice)); 593 break; 594 case CEED_OWN_POINTER: 595 impl->d_orients = (bool *)orients; 596 impl->d_orients_allocated = impl->d_orients; 597 break; 598 case CEED_USE_POINTER: 599 impl->d_orients = (bool *)orients; 600 break; 601 } 602 CeedCallBackend(CeedMalloc(size, &impl->h_orients_allocated)); 603 CeedCallCuda(ceed, cudaMemcpy(impl->h_orients_allocated, impl->d_orients, size * sizeof(bool), cudaMemcpyDeviceToHost)); 604 impl->h_orients = impl->h_orients_allocated; 605 } break; 606 } 607 } else if (rstr_type == CEED_RESTRICTION_CURL_ORIENTED) { 608 switch (mem_type) { 609 case CEED_MEM_HOST: { 610 switch (copy_mode) { 611 case CEED_OWN_POINTER: 612 impl->h_curl_orients_allocated = (CeedInt8 *)curl_orients; 613 impl->h_curl_orients = (CeedInt8 *)curl_orients; 614 break; 615 case CEED_USE_POINTER: 616 impl->h_curl_orients = (CeedInt8 *)curl_orients; 617 break; 618 case CEED_COPY_VALUES: 619 CeedCallBackend(CeedMalloc(3 * size, &impl->h_curl_orients_allocated)); 620 memcpy(impl->h_curl_orients_allocated, curl_orients, 3 * size * sizeof(CeedInt8)); 621 impl->h_curl_orients = impl->h_curl_orients_allocated; 622 break; 623 } 624 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_curl_orients, 3 * size * sizeof(CeedInt8))); 625 impl->d_curl_orients_allocated = impl->d_curl_orients; // We own the device memory 626 CeedCallCuda(ceed, cudaMemcpy(impl->d_curl_orients, curl_orients, 3 * size * sizeof(CeedInt8), cudaMemcpyHostToDevice)); 627 } break; 628 case CEED_MEM_DEVICE: { 629 switch (copy_mode) { 630 case CEED_COPY_VALUES: 631 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_curl_orients, 3 * size * sizeof(CeedInt8))); 632 impl->d_curl_orients_allocated = impl->d_curl_orients; // We own the device memory 633 CeedCallCuda(ceed, cudaMemcpy(impl->d_curl_orients, curl_orients, 3 * size * sizeof(CeedInt8), cudaMemcpyDeviceToDevice)); 634 break; 635 case CEED_OWN_POINTER: 636 impl->d_curl_orients = (CeedInt8 *)curl_orients; 637 impl->d_curl_orients_allocated = impl->d_curl_orients; 638 break; 639 case CEED_USE_POINTER: 640 impl->d_curl_orients = (CeedInt8 *)curl_orients; 641 break; 642 } 643 CeedCallBackend(CeedMalloc(3 * size, &impl->h_curl_orients_allocated)); 644 CeedCallCuda(ceed, cudaMemcpy(impl->h_curl_orients_allocated, impl->d_curl_orients, 3 * size * sizeof(CeedInt8), cudaMemcpyDeviceToHost)); 645 impl->h_curl_orients = impl->h_curl_orients_allocated; 646 } break; 647 } 648 } 649 } 650 651 // Register backend functions 652 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "Apply", CeedElemRestrictionApply_Cuda)); 653 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "ApplyUnsigned", CeedElemRestrictionApplyUnsigned_Cuda)); 654 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "ApplyUnoriented", CeedElemRestrictionApplyUnoriented_Cuda)); 655 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetOffsets", CeedElemRestrictionGetOffsets_Cuda)); 656 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetOrientations", CeedElemRestrictionGetOrientations_Cuda)); 657 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetCurlOrientations", CeedElemRestrictionGetCurlOrientations_Cuda)); 658 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "Destroy", CeedElemRestrictionDestroy_Cuda)); 659 return CEED_ERROR_SUCCESS; 660 } 661 662 //------------------------------------------------------------------------------ 663