1 // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 #include <ceed.h> 9 #include <ceed/backend.h> 10 #include <ceed/jit-tools.h> 11 #include <cuda.h> 12 #include <cuda_runtime.h> 13 #include <stdbool.h> 14 #include <stddef.h> 15 #include <string.h> 16 17 #include "../cuda/ceed-cuda-common.h" 18 #include "../cuda/ceed-cuda-compile.h" 19 #include "ceed-cuda-ref.h" 20 21 //------------------------------------------------------------------------------ 22 // Compile restriction kernels 23 //------------------------------------------------------------------------------ 24 static inline int CeedElemRestrictionSetupCompile_Cuda(CeedElemRestriction rstr) { 25 Ceed ceed; 26 bool is_deterministic; 27 CeedInt num_elem, num_comp, elem_size, comp_stride; 28 CeedRestrictionType rstr_type; 29 CeedElemRestriction_Cuda *impl; 30 31 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 32 CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed)); 33 CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type)); 34 CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &num_elem)); 35 CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr, &num_comp)); 36 CeedCallBackend(CeedElemRestrictionGetCompStride(rstr, &comp_stride)); 37 if (rstr_type == CEED_RESTRICTION_POINTS) { 38 CeedCallBackend(CeedElemRestrictionGetMaxPointsInElement(rstr, &elem_size)); 39 } else { 40 CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size)); 41 } 42 is_deterministic = impl->d_l_vec_indices != NULL; 43 44 // Compile CUDA kernels 45 switch (rstr_type) { 46 case CEED_RESTRICTION_STRIDED: { 47 const char restriction_kernel_source[] = "// Strided restriction source\n#include <ceed/jit-source/cuda/cuda-ref-restriction-strided.h>\n"; 48 bool has_backend_strides; 49 CeedInt strides[3] = {1, num_elem * elem_size, elem_size}; 50 51 CeedCallBackend(CeedElemRestrictionHasBackendStrides(rstr, &has_backend_strides)); 52 if (!has_backend_strides) { 53 CeedCallBackend(CeedElemRestrictionGetStrides(rstr, strides)); 54 } 55 CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem, 56 "RSTR_NUM_COMP", num_comp, "RSTR_STRIDE_NODES", strides[0], "RSTR_STRIDE_COMP", strides[1], "RSTR_STRIDE_ELEM", 57 strides[2])); 58 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "StridedNoTranspose", &impl->ApplyNoTranspose)); 59 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "StridedTranspose", &impl->ApplyTranspose)); 60 } break; 61 case CEED_RESTRICTION_STANDARD: { 62 const char restriction_kernel_source[] = "// Standard restriction source\n#include <ceed/jit-source/cuda/cuda-ref-restriction-offset.h>\n"; 63 64 CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem, 65 "RSTR_NUM_COMP", num_comp, "RSTR_NUM_NODES", impl->num_nodes, "RSTR_COMP_STRIDE", comp_stride, 66 "USE_DETERMINISTIC", is_deterministic ? 1 : 0)); 67 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetNoTranspose", &impl->ApplyNoTranspose)); 68 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetTranspose", &impl->ApplyTranspose)); 69 } break; 70 case CEED_RESTRICTION_POINTS: { 71 const char restriction_kernel_source[] = 72 "// AtPoints restriction source\n#include <ceed/jit-source/cuda/cuda-ref-restriction-at-points.h>\n\n" 73 "// Standard restriction source\n#include <ceed/jit-source/cuda/cuda-ref-restriction-offset.h>\n"; 74 75 CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem, 76 "RSTR_NUM_COMP", num_comp, "RSTR_NUM_NODES", impl->num_nodes, "RSTR_COMP_STRIDE", comp_stride, 77 "USE_DETERMINISTIC", is_deterministic ? 1 : 0)); 78 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetNoTranspose", &impl->ApplyNoTranspose)); 79 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "AtPointsTranspose", &impl->ApplyTranspose)); 80 } break; 81 case CEED_RESTRICTION_ORIENTED: { 82 const char restriction_kernel_source[] = 83 "// Oriented restriction source\n#include <ceed/jit-source/cuda/cuda-ref-restriction-oriented.h>\n\n" 84 "// Standard restriction source\n#include <ceed/jit-source/cuda/cuda-ref-restriction-offset.h>\n"; 85 86 CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem, 87 "RSTR_NUM_COMP", num_comp, "RSTR_NUM_NODES", impl->num_nodes, "RSTR_COMP_STRIDE", comp_stride, 88 "USE_DETERMINISTIC", is_deterministic ? 1 : 0)); 89 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OrientedNoTranspose", &impl->ApplyNoTranspose)); 90 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetNoTranspose", &impl->ApplyUnsignedNoTranspose)); 91 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OrientedTranspose", &impl->ApplyTranspose)); 92 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetTranspose", &impl->ApplyUnsignedTranspose)); 93 } break; 94 case CEED_RESTRICTION_CURL_ORIENTED: { 95 const char restriction_kernel_source[] = 96 "// Curl oriented restriction source\n#include <ceed/jit-source/cuda/cuda-ref-restriction-curl-oriented.h>\n\n" 97 "// Standard restriction source\n#include <ceed/jit-source/cuda/cuda-ref-restriction-offset.h>\n"; 98 CeedCallBackend(CeedCompile_Cuda(ceed, restriction_kernel_source, &impl->module, 6, "RSTR_ELEM_SIZE", elem_size, "RSTR_NUM_ELEM", num_elem, 99 "RSTR_NUM_COMP", num_comp, "RSTR_NUM_NODES", impl->num_nodes, "RSTR_COMP_STRIDE", comp_stride, 100 "USE_DETERMINISTIC", is_deterministic ? 1 : 0)); 101 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedNoTranspose", &impl->ApplyNoTranspose)); 102 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedUnsignedNoTranspose", &impl->ApplyUnsignedNoTranspose)); 103 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetNoTranspose", &impl->ApplyUnorientedNoTranspose)); 104 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedTranspose", &impl->ApplyTranspose)); 105 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "CurlOrientedUnsignedTranspose", &impl->ApplyUnsignedTranspose)); 106 CeedCallBackend(CeedGetKernel_Cuda(ceed, impl->module, "OffsetTranspose", &impl->ApplyUnorientedTranspose)); 107 } break; 108 } 109 return CEED_ERROR_SUCCESS; 110 } 111 112 //------------------------------------------------------------------------------ 113 // Core apply restriction code 114 //------------------------------------------------------------------------------ 115 static inline int CeedElemRestrictionApply_Cuda_Core(CeedElemRestriction rstr, CeedTransposeMode t_mode, bool use_signs, bool use_orients, 116 CeedVector u, CeedVector v, CeedRequest *request) { 117 Ceed ceed; 118 CeedRestrictionType rstr_type; 119 const CeedScalar *d_u; 120 CeedScalar *d_v; 121 CeedElemRestriction_Cuda *impl; 122 123 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 124 CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed)); 125 CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type)); 126 127 // Assemble kernel if needed 128 if (!impl->module) { 129 CeedCallBackend(CeedElemRestrictionSetupCompile_Cuda(rstr)); 130 } 131 132 // Get vectors 133 CeedCallBackend(CeedVectorGetArrayRead(u, CEED_MEM_DEVICE, &d_u)); 134 if (t_mode == CEED_TRANSPOSE) { 135 // Sum into for transpose mode, e-vec to l-vec 136 CeedCallBackend(CeedVectorGetArray(v, CEED_MEM_DEVICE, &d_v)); 137 } else { 138 // Overwrite for notranspose mode, l-vec to e-vec 139 CeedCallBackend(CeedVectorGetArrayWrite(v, CEED_MEM_DEVICE, &d_v)); 140 } 141 142 // Restrict 143 if (t_mode == CEED_NOTRANSPOSE) { 144 // L-vector -> E-vector 145 CeedInt elem_size; 146 147 CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size)); 148 const CeedInt block_size = elem_size < 1024 ? (elem_size > 32 ? elem_size : 32) : 1024; 149 const CeedInt grid = CeedDivUpInt(impl->num_nodes, block_size); 150 151 switch (rstr_type) { 152 case CEED_RESTRICTION_STRIDED: { 153 void *args[] = {&d_u, &d_v}; 154 155 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args)); 156 } break; 157 case CEED_RESTRICTION_POINTS: 158 case CEED_RESTRICTION_STANDARD: { 159 void *args[] = {&impl->d_offsets, &d_u, &d_v}; 160 161 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args)); 162 } break; 163 case CEED_RESTRICTION_ORIENTED: { 164 if (use_signs) { 165 void *args[] = {&impl->d_offsets, &impl->d_orients, &d_u, &d_v}; 166 167 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args)); 168 } else { 169 void *args[] = {&impl->d_offsets, &d_u, &d_v}; 170 171 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedNoTranspose, grid, block_size, args)); 172 } 173 } break; 174 case CEED_RESTRICTION_CURL_ORIENTED: { 175 if (use_signs && use_orients) { 176 void *args[] = {&impl->d_offsets, &impl->d_curl_orients, &d_u, &d_v}; 177 178 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyNoTranspose, grid, block_size, args)); 179 } else if (use_orients) { 180 void *args[] = {&impl->d_offsets, &impl->d_curl_orients, &d_u, &d_v}; 181 182 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedNoTranspose, grid, block_size, args)); 183 } else { 184 void *args[] = {&impl->d_offsets, &d_u, &d_v}; 185 186 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnorientedNoTranspose, grid, block_size, args)); 187 } 188 } break; 189 } 190 } else { 191 // E-vector -> L-vector 192 const bool is_deterministic = impl->d_l_vec_indices != NULL; 193 const CeedInt block_size = 32; 194 const CeedInt grid = CeedDivUpInt(impl->num_nodes, block_size); 195 196 switch (rstr_type) { 197 case CEED_RESTRICTION_STRIDED: { 198 void *args[] = {&d_u, &d_v}; 199 200 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 201 } break; 202 case CEED_RESTRICTION_POINTS: { 203 if (!is_deterministic) { 204 void *args[] = {&impl->d_offsets, &impl->d_points_per_elem, &d_u, &d_v}; 205 206 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 207 } else { 208 void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_points_per_elem, &impl->d_t_offsets, &d_u, &d_v}; 209 210 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 211 } 212 } break; 213 case CEED_RESTRICTION_STANDARD: { 214 if (!is_deterministic) { 215 void *args[] = {&impl->d_offsets, &d_u, &d_v}; 216 217 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 218 } else { 219 void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &d_u, &d_v}; 220 221 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 222 } 223 } break; 224 case CEED_RESTRICTION_ORIENTED: { 225 if (use_signs) { 226 if (!is_deterministic) { 227 void *args[] = {&impl->d_offsets, &impl->d_orients, &d_u, &d_v}; 228 229 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 230 } else { 231 void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &impl->d_orients, &d_u, &d_v}; 232 233 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 234 } 235 } else { 236 if (!is_deterministic) { 237 void *args[] = {&impl->d_offsets, &d_u, &d_v}; 238 239 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args)); 240 } else { 241 void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &d_u, &d_v}; 242 243 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args)); 244 } 245 } 246 } break; 247 case CEED_RESTRICTION_CURL_ORIENTED: { 248 if (use_signs && use_orients) { 249 if (!is_deterministic) { 250 void *args[] = {&impl->d_offsets, &impl->d_curl_orients, &d_u, &d_v}; 251 252 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 253 } else { 254 void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &impl->d_curl_orients, &d_u, &d_v}; 255 256 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyTranspose, grid, block_size, args)); 257 } 258 } else if (use_orients) { 259 if (!is_deterministic) { 260 void *args[] = {&impl->d_offsets, &impl->d_curl_orients, &d_u, &d_v}; 261 262 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args)); 263 } else { 264 void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &impl->d_curl_orients, &d_u, &d_v}; 265 266 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnsignedTranspose, grid, block_size, args)); 267 } 268 } else { 269 if (!is_deterministic) { 270 void *args[] = {&impl->d_offsets, &d_u, &d_v}; 271 272 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnorientedTranspose, grid, block_size, args)); 273 } else { 274 void *args[] = {&impl->d_l_vec_indices, &impl->d_t_indices, &impl->d_t_offsets, &d_u, &d_v}; 275 276 CeedCallBackend(CeedRunKernel_Cuda(ceed, impl->ApplyUnorientedTranspose, grid, block_size, args)); 277 } 278 } 279 } break; 280 } 281 } 282 283 if (request != CEED_REQUEST_IMMEDIATE && request != CEED_REQUEST_ORDERED) *request = NULL; 284 285 // Restore arrays 286 CeedCallBackend(CeedVectorRestoreArrayRead(u, &d_u)); 287 CeedCallBackend(CeedVectorRestoreArray(v, &d_v)); 288 return CEED_ERROR_SUCCESS; 289 } 290 291 //------------------------------------------------------------------------------ 292 // Apply restriction 293 //------------------------------------------------------------------------------ 294 static int CeedElemRestrictionApply_Cuda(CeedElemRestriction rstr, CeedTransposeMode t_mode, CeedVector u, CeedVector v, CeedRequest *request) { 295 return CeedElemRestrictionApply_Cuda_Core(rstr, t_mode, true, true, u, v, request); 296 } 297 298 //------------------------------------------------------------------------------ 299 // Apply unsigned restriction 300 //------------------------------------------------------------------------------ 301 static int CeedElemRestrictionApplyUnsigned_Cuda(CeedElemRestriction rstr, CeedTransposeMode t_mode, CeedVector u, CeedVector v, 302 CeedRequest *request) { 303 return CeedElemRestrictionApply_Cuda_Core(rstr, t_mode, false, true, u, v, request); 304 } 305 306 //------------------------------------------------------------------------------ 307 // Apply unoriented restriction 308 //------------------------------------------------------------------------------ 309 static int CeedElemRestrictionApplyUnoriented_Cuda(CeedElemRestriction rstr, CeedTransposeMode t_mode, CeedVector u, CeedVector v, 310 CeedRequest *request) { 311 return CeedElemRestrictionApply_Cuda_Core(rstr, t_mode, false, false, u, v, request); 312 } 313 314 //------------------------------------------------------------------------------ 315 // Get offsets 316 //------------------------------------------------------------------------------ 317 static int CeedElemRestrictionGetOffsets_Cuda(CeedElemRestriction rstr, CeedMemType mem_type, const CeedInt **offsets) { 318 CeedElemRestriction_Cuda *impl; 319 CeedRestrictionType rstr_type; 320 321 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 322 CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type)); 323 switch (mem_type) { 324 case CEED_MEM_HOST: 325 *offsets = rstr_type == CEED_RESTRICTION_POINTS ? impl->h_offsets_at_points : impl->h_offsets; 326 break; 327 case CEED_MEM_DEVICE: 328 *offsets = rstr_type == CEED_RESTRICTION_POINTS ? impl->d_offsets_at_points : impl->d_offsets; 329 break; 330 } 331 return CEED_ERROR_SUCCESS; 332 } 333 334 //------------------------------------------------------------------------------ 335 // Get orientations 336 //------------------------------------------------------------------------------ 337 static int CeedElemRestrictionGetOrientations_Cuda(CeedElemRestriction rstr, CeedMemType mem_type, const bool **orients) { 338 CeedElemRestriction_Cuda *impl; 339 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 340 341 switch (mem_type) { 342 case CEED_MEM_HOST: 343 *orients = impl->h_orients; 344 break; 345 case CEED_MEM_DEVICE: 346 *orients = impl->d_orients; 347 break; 348 } 349 return CEED_ERROR_SUCCESS; 350 } 351 352 //------------------------------------------------------------------------------ 353 // Get curl-conforming orientations 354 //------------------------------------------------------------------------------ 355 static int CeedElemRestrictionGetCurlOrientations_Cuda(CeedElemRestriction rstr, CeedMemType mem_type, const CeedInt8 **curl_orients) { 356 CeedElemRestriction_Cuda *impl; 357 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 358 359 switch (mem_type) { 360 case CEED_MEM_HOST: 361 *curl_orients = impl->h_curl_orients; 362 break; 363 case CEED_MEM_DEVICE: 364 *curl_orients = impl->d_curl_orients; 365 break; 366 } 367 return CEED_ERROR_SUCCESS; 368 } 369 370 //------------------------------------------------------------------------------ 371 // Get offset for padded AtPoints E-layout 372 //------------------------------------------------------------------------------ 373 static int CeedElemRestrictionGetAtPointsElementOffset_Cuda(CeedElemRestriction rstr, CeedInt elem, CeedSize *elem_offset) { 374 CeedInt layout[3]; 375 376 CeedCallBackend(CeedElemRestrictionGetELayout(rstr, layout)); 377 *elem_offset = 0 * layout[0] + 0 * layout[1] + elem * layout[2]; 378 return CEED_ERROR_SUCCESS; 379 } 380 381 //------------------------------------------------------------------------------ 382 // Destroy restriction 383 //------------------------------------------------------------------------------ 384 static int CeedElemRestrictionDestroy_Cuda(CeedElemRestriction rstr) { 385 Ceed ceed; 386 CeedElemRestriction_Cuda *impl; 387 388 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 389 CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed)); 390 if (impl->module) { 391 CeedCallCuda(ceed, cuModuleUnload(impl->module)); 392 } 393 CeedCallBackend(CeedFree(&impl->h_offsets_owned)); 394 CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_offsets_owned)); 395 CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_t_offsets)); 396 CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_t_indices)); 397 CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_l_vec_indices)); 398 CeedCallBackend(CeedFree(&impl->h_orients_owned)); 399 CeedCallCuda(ceed, cudaFree((bool *)impl->d_orients_owned)); 400 CeedCallBackend(CeedFree(&impl->h_curl_orients_owned)); 401 CeedCallCuda(ceed, cudaFree((CeedInt8 *)impl->d_curl_orients_owned)); 402 CeedCallBackend(CeedFree(&impl->h_offsets_at_points_owned)); 403 CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_offsets_at_points_owned)); 404 CeedCallBackend(CeedFree(&impl->h_points_per_elem_owned)); 405 CeedCallCuda(ceed, cudaFree((CeedInt *)impl->d_points_per_elem_owned)); 406 CeedCallBackend(CeedFree(&impl)); 407 return CEED_ERROR_SUCCESS; 408 } 409 410 //------------------------------------------------------------------------------ 411 // Create transpose offsets and indices 412 //------------------------------------------------------------------------------ 413 static int CeedElemRestrictionOffset_Cuda(const CeedElemRestriction rstr, const CeedInt elem_size, const CeedInt *indices) { 414 Ceed ceed; 415 bool *is_node; 416 CeedSize l_size; 417 CeedInt num_elem, num_comp, num_nodes = 0; 418 CeedInt *ind_to_offset, *l_vec_indices, *t_offsets, *t_indices; 419 CeedRestrictionType rstr_type; 420 CeedElemRestriction_Cuda *impl; 421 422 CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed)); 423 CeedCallBackend(CeedElemRestrictionGetData(rstr, &impl)); 424 CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &num_elem)); 425 CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type)); 426 CeedCallBackend(CeedElemRestrictionGetLVectorSize(rstr, &l_size)); 427 CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr, &num_comp)); 428 const CeedInt size_indices = num_elem * elem_size; 429 430 // Count num_nodes 431 CeedCallBackend(CeedCalloc(l_size, &is_node)); 432 433 for (CeedInt i = 0; i < size_indices; i++) is_node[indices[i]] = 1; 434 for (CeedInt i = 0; i < l_size; i++) num_nodes += is_node[i]; 435 impl->num_nodes = num_nodes; 436 437 // L-vector offsets array 438 CeedCallBackend(CeedCalloc(l_size, &ind_to_offset)); 439 CeedCallBackend(CeedCalloc(num_nodes, &l_vec_indices)); 440 for (CeedInt i = 0, j = 0; i < l_size; i++) { 441 if (is_node[i]) { 442 l_vec_indices[j] = i; 443 ind_to_offset[i] = j++; 444 } 445 } 446 CeedCallBackend(CeedFree(&is_node)); 447 448 // Compute transpose offsets and indices 449 const CeedInt size_offsets = num_nodes + 1; 450 451 CeedCallBackend(CeedCalloc(size_offsets, &t_offsets)); 452 CeedCallBackend(CeedMalloc(size_indices, &t_indices)); 453 // Count node multiplicity 454 for (CeedInt e = 0; e < num_elem; ++e) { 455 for (CeedInt i = 0; i < elem_size; ++i) ++t_offsets[ind_to_offset[indices[elem_size * e + i]] + 1]; 456 } 457 // Convert to running sum 458 for (CeedInt i = 1; i < size_offsets; ++i) t_offsets[i] += t_offsets[i - 1]; 459 // List all E-vec indices associated with L-vec node 460 for (CeedInt e = 0; e < num_elem; ++e) { 461 for (CeedInt i = 0; i < elem_size; ++i) { 462 const CeedInt lid = elem_size * e + i; 463 const CeedInt gid = indices[lid]; 464 465 t_indices[t_offsets[ind_to_offset[gid]]++] = lid; 466 } 467 } 468 // Reset running sum 469 for (int i = size_offsets - 1; i > 0; --i) t_offsets[i] = t_offsets[i - 1]; 470 t_offsets[0] = 0; 471 472 // Copy data to device 473 // -- L-vector indices 474 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_l_vec_indices, num_nodes * sizeof(CeedInt))); 475 CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->d_l_vec_indices, l_vec_indices, num_nodes * sizeof(CeedInt), cudaMemcpyHostToDevice)); 476 // -- Transpose offsets 477 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_t_offsets, size_offsets * sizeof(CeedInt))); 478 CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->d_t_offsets, t_offsets, size_offsets * sizeof(CeedInt), cudaMemcpyHostToDevice)); 479 // -- Transpose indices 480 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_t_indices, size_indices * sizeof(CeedInt))); 481 CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->d_t_indices, t_indices, size_indices * sizeof(CeedInt), cudaMemcpyHostToDevice)); 482 483 // Cleanup 484 CeedCallBackend(CeedFree(&ind_to_offset)); 485 CeedCallBackend(CeedFree(&l_vec_indices)); 486 CeedCallBackend(CeedFree(&t_offsets)); 487 CeedCallBackend(CeedFree(&t_indices)); 488 return CEED_ERROR_SUCCESS; 489 } 490 491 //------------------------------------------------------------------------------ 492 // Create restriction 493 //------------------------------------------------------------------------------ 494 int CeedElemRestrictionCreate_Cuda(CeedMemType mem_type, CeedCopyMode copy_mode, const CeedInt *offsets, const bool *orients, 495 const CeedInt8 *curl_orients, CeedElemRestriction rstr) { 496 Ceed ceed, ceed_parent; 497 bool is_deterministic; 498 CeedInt num_elem, num_comp, elem_size; 499 CeedRestrictionType rstr_type; 500 CeedElemRestriction_Cuda *impl; 501 502 CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed)); 503 CeedCallBackend(CeedGetParent(ceed, &ceed_parent)); 504 CeedCallBackend(CeedIsDeterministic(ceed_parent, &is_deterministic)); 505 CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &num_elem)); 506 CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr, &num_comp)); 507 CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size)); 508 CeedCallBackend(CeedElemRestrictionGetType(rstr, &rstr_type)); 509 // Use max number of points as elem size for AtPoints restrictions 510 if (rstr_type == CEED_RESTRICTION_POINTS) { 511 CeedInt max_points = 0; 512 513 for (CeedInt i = 0; i < num_elem; i++) { 514 max_points = CeedIntMax(max_points, offsets[i + 1] - offsets[i]); 515 } 516 elem_size = max_points; 517 } 518 const CeedInt size = num_elem * elem_size; 519 520 CeedCallBackend(CeedCalloc(1, &impl)); 521 impl->num_nodes = size; 522 CeedCallBackend(CeedElemRestrictionSetData(rstr, impl)); 523 524 // Set layouts 525 { 526 bool has_backend_strides; 527 CeedInt layout[3] = {1, size, elem_size}; 528 529 CeedCallBackend(CeedElemRestrictionSetELayout(rstr, layout)); 530 if (rstr_type == CEED_RESTRICTION_STRIDED) { 531 CeedCallBackend(CeedElemRestrictionHasBackendStrides(rstr, &has_backend_strides)); 532 if (has_backend_strides) { 533 CeedCallBackend(CeedElemRestrictionSetLLayout(rstr, layout)); 534 } 535 } 536 } 537 538 // Pad AtPoints indices 539 if (rstr_type == CEED_RESTRICTION_POINTS) { 540 CeedSize offsets_len = elem_size * num_elem, at_points_size = num_elem + 1; 541 CeedInt max_points = elem_size, *offsets_padded, *points_per_elem; 542 543 CeedCheck(mem_type == CEED_MEM_HOST, ceed, CEED_ERROR_BACKEND, "only MemType Host supported when creating AtPoints restriction"); 544 CeedCallBackend(CeedMalloc(offsets_len, &offsets_padded)); 545 CeedCallBackend(CeedMalloc(num_elem, &points_per_elem)); 546 for (CeedInt i = 0; i < num_elem; i++) { 547 CeedInt num_points = offsets[i + 1] - offsets[i]; 548 549 points_per_elem[i] = num_points; 550 at_points_size += num_points; 551 // -- Copy all points in element 552 for (CeedInt j = 0; j < num_points; j++) { 553 offsets_padded[i * max_points + j] = offsets[offsets[i] + j] * num_comp; 554 } 555 // -- Replicate out last point in element 556 for (CeedInt j = num_points; j < max_points; j++) { 557 offsets_padded[i * max_points + j] = offsets[offsets[i] + num_points - 1] * num_comp; 558 } 559 } 560 CeedCallBackend(CeedSetHostCeedIntArray(offsets, copy_mode, at_points_size, &impl->h_offsets_at_points_owned, &impl->h_offsets_at_points_borrowed, 561 &impl->h_offsets_at_points)); 562 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_offsets_at_points_owned, at_points_size * sizeof(CeedInt))); 563 CeedCallCuda(ceed, cudaMemcpy((CeedInt **)impl->d_offsets_at_points_owned, impl->h_offsets_at_points, at_points_size * sizeof(CeedInt), 564 cudaMemcpyHostToDevice)); 565 impl->d_offsets_at_points = (CeedInt *)impl->d_offsets_at_points_owned; 566 567 // -- Use padded offsets for the rest of the setup 568 offsets = (const CeedInt *)offsets_padded; 569 copy_mode = CEED_OWN_POINTER; 570 CeedCallBackend(CeedElemRestrictionSetAtPointsEVectorSize(rstr, elem_size * num_elem * num_comp)); 571 572 // -- Points per element 573 CeedCallBackend(CeedSetHostCeedIntArray(points_per_elem, CEED_OWN_POINTER, num_elem, &impl->h_points_per_elem_owned, 574 &impl->h_points_per_elem_borrowed, &impl->h_points_per_elem)); 575 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_points_per_elem_owned, num_elem * sizeof(CeedInt))); 576 CeedCallCuda(ceed, 577 cudaMemcpy((CeedInt **)impl->d_points_per_elem_owned, impl->h_points_per_elem, num_elem * sizeof(CeedInt), cudaMemcpyHostToDevice)); 578 impl->d_points_per_elem = (CeedInt *)impl->d_points_per_elem_owned; 579 } 580 581 // Set up device offset/orientation arrays 582 if (rstr_type != CEED_RESTRICTION_STRIDED) { 583 switch (mem_type) { 584 case CEED_MEM_HOST: { 585 CeedCallBackend(CeedSetHostCeedIntArray(offsets, copy_mode, size, &impl->h_offsets_owned, &impl->h_offsets_borrowed, &impl->h_offsets)); 586 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_offsets_owned, size * sizeof(CeedInt))); 587 CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->d_offsets_owned, impl->h_offsets, size * sizeof(CeedInt), cudaMemcpyHostToDevice)); 588 impl->d_offsets = (CeedInt *)impl->d_offsets_owned; 589 if (is_deterministic) CeedCallBackend(CeedElemRestrictionOffset_Cuda(rstr, elem_size, offsets)); 590 } break; 591 case CEED_MEM_DEVICE: { 592 CeedCallBackend(CeedSetDeviceCeedIntArray_Cuda(ceed, offsets, copy_mode, size, &impl->d_offsets_owned, &impl->d_offsets_borrowed, 593 (const CeedInt **)&impl->d_offsets)); 594 CeedCallBackend(CeedMalloc(size, &impl->h_offsets_owned)); 595 CeedCallCuda(ceed, cudaMemcpy((CeedInt *)impl->h_offsets_owned, impl->d_offsets, size * sizeof(CeedInt), cudaMemcpyDeviceToHost)); 596 impl->h_offsets = impl->h_offsets_owned; 597 if (is_deterministic) CeedCallBackend(CeedElemRestrictionOffset_Cuda(rstr, elem_size, offsets)); 598 } break; 599 } 600 601 // Orientation data 602 if (rstr_type == CEED_RESTRICTION_ORIENTED) { 603 switch (mem_type) { 604 case CEED_MEM_HOST: { 605 CeedCallBackend(CeedSetHostBoolArray(orients, copy_mode, size, &impl->h_orients_owned, &impl->h_orients_borrowed, &impl->h_orients)); 606 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_orients_owned, size * sizeof(bool))); 607 CeedCallCuda(ceed, cudaMemcpy((bool *)impl->d_orients_owned, impl->h_orients, size * sizeof(bool), cudaMemcpyHostToDevice)); 608 impl->d_orients = impl->d_orients_owned; 609 } break; 610 case CEED_MEM_DEVICE: { 611 CeedCallBackend(CeedSetDeviceBoolArray_Cuda(ceed, orients, copy_mode, size, &impl->d_orients_owned, &impl->d_orients_borrowed, 612 (const bool **)&impl->d_orients)); 613 CeedCallBackend(CeedMalloc(size, &impl->h_orients_owned)); 614 CeedCallCuda(ceed, cudaMemcpy((bool *)impl->h_orients_owned, impl->d_orients, size * sizeof(bool), cudaMemcpyDeviceToHost)); 615 impl->h_orients = impl->h_orients_owned; 616 } break; 617 } 618 } else if (rstr_type == CEED_RESTRICTION_CURL_ORIENTED) { 619 switch (mem_type) { 620 case CEED_MEM_HOST: { 621 CeedCallBackend(CeedSetHostCeedInt8Array(curl_orients, copy_mode, 3 * size, &impl->h_curl_orients_owned, &impl->h_curl_orients_borrowed, 622 &impl->h_curl_orients)); 623 CeedCallCuda(ceed, cudaMalloc((void **)&impl->d_curl_orients_owned, 3 * size * sizeof(CeedInt8))); 624 CeedCallCuda(ceed, 625 cudaMemcpy((CeedInt8 *)impl->d_curl_orients_owned, impl->h_curl_orients, 3 * size * sizeof(CeedInt8), cudaMemcpyHostToDevice)); 626 impl->d_curl_orients = impl->d_curl_orients_owned; 627 } break; 628 case CEED_MEM_DEVICE: { 629 CeedCallBackend(CeedSetDeviceCeedInt8Array_Cuda(ceed, curl_orients, copy_mode, 3 * size, &impl->d_curl_orients_owned, 630 &impl->d_curl_orients_borrowed, (const CeedInt8 **)&impl->d_curl_orients)); 631 CeedCallBackend(CeedMalloc(3 * size, &impl->h_curl_orients_owned)); 632 CeedCallCuda(ceed, 633 cudaMemcpy((CeedInt8 *)impl->h_curl_orients_owned, impl->d_curl_orients, 3 * size * sizeof(CeedInt8), cudaMemcpyDeviceToHost)); 634 impl->h_curl_orients = impl->h_curl_orients_owned; 635 } break; 636 } 637 } 638 } 639 640 // Register backend functions 641 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "Apply", CeedElemRestrictionApply_Cuda)); 642 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "ApplyUnsigned", CeedElemRestrictionApplyUnsigned_Cuda)); 643 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "ApplyUnoriented", CeedElemRestrictionApplyUnoriented_Cuda)); 644 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetOffsets", CeedElemRestrictionGetOffsets_Cuda)); 645 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetOrientations", CeedElemRestrictionGetOrientations_Cuda)); 646 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetCurlOrientations", CeedElemRestrictionGetCurlOrientations_Cuda)); 647 if (rstr_type == CEED_RESTRICTION_POINTS) { 648 CeedCallBackend( 649 CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "GetAtPointsElementOffset", CeedElemRestrictionGetAtPointsElementOffset_Cuda)); 650 } 651 CeedCallBackend(CeedSetBackendFunction(ceed, "ElemRestriction", rstr, "Destroy", CeedElemRestrictionDestroy_Cuda)); 652 return CEED_ERROR_SUCCESS; 653 } 654 655 //------------------------------------------------------------------------------ 656