1 // Copyright (c) 2017-2025, Lawrence Livermore National Security, LLC and other 2 // CEED contributors. All Rights Reserved. See the top-level LICENSE and NOTICE 3 // files for details. 4 // 5 // SPDX-License-Identifier: BSD-2-Clause 6 // 7 // This file is part of CEED: http://github.com/ceed 8 9 #include <ceed/backend.h> 10 #include <ceed/ceed.h> 11 12 #include <cassert> 13 #include <string> 14 #include <sycl/sycl.hpp> 15 16 #include "../sycl/ceed-sycl-compile.hpp" 17 #include "ceed-sycl-ref.hpp" 18 19 class CeedOperatorSyclLinearDiagonal; 20 class CeedOperatorSyclLinearAssemble; 21 class CeedOperatorSyclLinearAssembleFallback; 22 23 //------------------------------------------------------------------------------ 24 // Get Basis Emode Pointer 25 //------------------------------------------------------------------------------ 26 void CeedOperatorGetBasisPointer_Sycl(const CeedScalar **basis_ptr, CeedEvalMode eval_mode, const CeedScalar *identity, const CeedScalar *interp, 27 const CeedScalar *grad) { 28 switch (eval_mode) { 29 case CEED_EVAL_NONE: 30 *basis_ptr = identity; 31 break; 32 case CEED_EVAL_INTERP: 33 *basis_ptr = interp; 34 break; 35 case CEED_EVAL_GRAD: 36 *basis_ptr = grad; 37 break; 38 case CEED_EVAL_WEIGHT: 39 case CEED_EVAL_DIV: 40 case CEED_EVAL_CURL: 41 break; // Caught by QF Assembly 42 } 43 } 44 45 //------------------------------------------------------------------------------ 46 // Destroy operator 47 //------------------------------------------------------------------------------ 48 static int CeedOperatorDestroy_Sycl(CeedOperator op) { 49 Ceed ceed; 50 Ceed_Sycl *sycl_data; 51 CeedOperator_Sycl *impl; 52 53 CeedCallBackend(CeedOperatorGetData(op, &impl)); 54 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 55 CeedCallBackend(CeedGetData(ceed, &sycl_data)); 56 57 // Apply data 58 for (CeedInt i = 0; i < impl->num_e_in + impl->num_e_out; i++) { 59 CeedCallBackend(CeedVectorDestroy(&impl->e_vecs[i])); 60 } 61 CeedCallBackend(CeedFree(&impl->e_vecs)); 62 63 for (CeedInt i = 0; i < impl->num_e_in; i++) { 64 CeedCallBackend(CeedVectorDestroy(&impl->q_vecs_in[i])); 65 } 66 CeedCallBackend(CeedFree(&impl->q_vecs_in)); 67 68 for (CeedInt i = 0; i < impl->num_e_out; i++) { 69 CeedCallBackend(CeedVectorDestroy(&impl->q_vecs_out[i])); 70 } 71 CeedCallBackend(CeedFree(&impl->q_vecs_out)); 72 73 // QFunction assembly data 74 for (CeedInt i = 0; i < impl->num_active_in; i++) { 75 CeedCallBackend(CeedVectorDestroy(&impl->qf_active_in[i])); 76 } 77 CeedCallBackend(CeedFree(&impl->qf_active_in)); 78 79 // Diag data 80 if (impl->diag) { 81 CeedCallBackend(CeedFree(&impl->diag->h_eval_mode_in)); 82 CeedCallBackend(CeedFree(&impl->diag->h_eval_mode_out)); 83 84 CeedCallSycl(ceed, sycl_data->sycl_queue.wait_and_throw()); 85 CeedCallSycl(ceed, sycl::free(impl->diag->d_eval_mode_in, sycl_data->sycl_context)); 86 CeedCallSycl(ceed, sycl::free(impl->diag->d_eval_mode_out, sycl_data->sycl_context)); 87 CeedCallSycl(ceed, sycl::free(impl->diag->d_identity, sycl_data->sycl_context)); 88 CeedCallSycl(ceed, sycl::free(impl->diag->d_interp_in, sycl_data->sycl_context)); 89 CeedCallSycl(ceed, sycl::free(impl->diag->d_interp_out, sycl_data->sycl_context)); 90 CeedCallSycl(ceed, sycl::free(impl->diag->d_grad_in, sycl_data->sycl_context)); 91 CeedCallSycl(ceed, sycl::free(impl->diag->d_grad_out, sycl_data->sycl_context)); 92 93 CeedCallBackend(CeedVectorDestroy(&impl->diag->elem_diag)); 94 CeedCallBackend(CeedVectorDestroy(&impl->diag->point_block_elem_diag)); 95 CeedCallBackend(CeedElemRestrictionDestroy(&impl->diag->diag_rstr)); 96 CeedCallBackend(CeedElemRestrictionDestroy(&impl->diag->point_block_diag_rstr)); 97 CeedCallBackend(CeedBasisDestroy(&impl->diag->basis_in)); 98 CeedCallBackend(CeedBasisDestroy(&impl->diag->basis_out)); 99 } 100 CeedCallBackend(CeedFree(&impl->diag)); 101 102 if (impl->asmb) { 103 CeedCallSycl(ceed, sycl_data->sycl_queue.wait_and_throw()); 104 CeedCallSycl(ceed, sycl::free(impl->asmb->d_B_in, sycl_data->sycl_context)); 105 CeedCallSycl(ceed, sycl::free(impl->asmb->d_B_out, sycl_data->sycl_context)); 106 } 107 CeedCallBackend(CeedFree(&impl->asmb)); 108 109 CeedCallBackend(CeedFree(&impl)); 110 CeedCallBackend(CeedDestroy(&ceed)); 111 return CEED_ERROR_SUCCESS; 112 } 113 114 //------------------------------------------------------------------------------ 115 // Setup infields or outfields 116 //------------------------------------------------------------------------------ 117 static int CeedOperatorSetupFields_Sycl(CeedQFunction qf, CeedOperator op, bool is_input, CeedVector *e_vecs, CeedVector *q_vecs, CeedInt start_e, 118 CeedInt num_fields, CeedInt Q, CeedInt num_elem) { 119 Ceed ceed; 120 CeedSize q_size; 121 bool is_strided, skip_restriction; 122 CeedInt size; 123 CeedOperatorField *op_fields; 124 CeedQFunctionField *qf_fields; 125 126 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 127 if (is_input) { 128 CeedCallBackend(CeedOperatorGetFields(op, NULL, &op_fields, NULL, NULL)); 129 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_fields, NULL, NULL)); 130 } else { 131 CeedCallBackend(CeedOperatorGetFields(op, NULL, NULL, NULL, &op_fields)); 132 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, NULL, NULL, &qf_fields)); 133 } 134 135 // Loop over fields 136 for (CeedInt i = 0; i < num_fields; i++) { 137 CeedEvalMode eval_mode; 138 CeedVector vec; 139 CeedElemRestriction elem_rstr; 140 141 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_fields[i], &eval_mode)); 142 143 is_strided = false; 144 skip_restriction = false; 145 if (eval_mode != CEED_EVAL_WEIGHT) { 146 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_fields[i], &elem_rstr)); 147 148 // Check whether this field can skip the element restriction: 149 // must be passive input, with eval_mode NONE, and have a strided restriction with CEED_STRIDES_BACKEND. 150 151 // First, check whether the field is input or output: 152 if (is_input) { 153 // Check for passive input: 154 CeedCallBackend(CeedOperatorFieldGetVector(op_fields[i], &vec)); 155 if (vec != CEED_VECTOR_ACTIVE) { 156 // Check eval_mode 157 if (eval_mode == CEED_EVAL_NONE) { 158 // Check for is_strided restriction 159 CeedCallBackend(CeedElemRestrictionIsStrided(elem_rstr, &is_strided)); 160 if (is_strided) { 161 // Check if vector is already in preferred backend ordering 162 CeedCallBackend(CeedElemRestrictionHasBackendStrides(elem_rstr, &skip_restriction)); 163 } 164 } 165 } 166 CeedCallBackend(CeedVectorDestroy(&vec)); 167 } 168 if (skip_restriction) { 169 // We do not need an E-Vector, but will use the input field vector's data directly in the operator application 170 e_vecs[i + start_e] = NULL; 171 } else { 172 CeedCallBackend(CeedElemRestrictionCreateVector(elem_rstr, NULL, &e_vecs[i + start_e])); 173 } 174 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 175 } 176 177 switch (eval_mode) { 178 case CEED_EVAL_NONE: 179 CeedCallBackend(CeedQFunctionFieldGetSize(qf_fields[i], &size)); 180 q_size = (CeedSize)num_elem * Q * size; 181 CeedCallBackend(CeedVectorCreate(ceed, q_size, &q_vecs[i])); 182 break; 183 case CEED_EVAL_INTERP: 184 CeedCallBackend(CeedQFunctionFieldGetSize(qf_fields[i], &size)); 185 q_size = (CeedSize)num_elem * Q * size; 186 CeedCallBackend(CeedVectorCreate(ceed, q_size, &q_vecs[i])); 187 break; 188 case CEED_EVAL_GRAD: 189 CeedCallBackend(CeedQFunctionFieldGetSize(qf_fields[i], &size)); 190 q_size = (CeedSize)num_elem * Q * size; 191 CeedCallBackend(CeedVectorCreate(ceed, q_size, &q_vecs[i])); 192 break; 193 case CEED_EVAL_WEIGHT: { 194 CeedBasis basis; 195 196 // Note: only on input fields 197 q_size = (CeedSize)num_elem * Q; 198 CeedCallBackend(CeedVectorCreate(ceed, q_size, &q_vecs[i])); 199 CeedCallBackend(CeedOperatorFieldGetBasis(op_fields[i], &basis)); 200 CeedCallBackend(CeedBasisApply(basis, num_elem, CEED_NOTRANSPOSE, CEED_EVAL_WEIGHT, CEED_VECTOR_NONE, q_vecs[i])); 201 CeedCallBackend(CeedBasisDestroy(&basis)); 202 break; 203 } 204 case CEED_EVAL_DIV: 205 break; // TODO: Not implemented 206 case CEED_EVAL_CURL: 207 break; // TODO: Not implemented 208 } 209 } 210 CeedCallBackend(CeedDestroy(&ceed)); 211 return CEED_ERROR_SUCCESS; 212 } 213 214 //------------------------------------------------------------------------------ 215 // CeedOperator needs to connect all the named fields (be they active or 216 // passive) to the named inputs and outputs of its CeedQFunction. 217 //------------------------------------------------------------------------------ 218 static int CeedOperatorSetup_Sycl(CeedOperator op) { 219 bool is_setup_done; 220 CeedInt Q, num_elem, num_input_fields, num_output_fields; 221 CeedQFunctionField *qf_input_fields, *qf_output_fields; 222 CeedQFunction qf; 223 CeedOperatorField *op_input_fields, *op_output_fields; 224 CeedOperator_Sycl *impl; 225 226 CeedCallBackend(CeedOperatorIsSetupDone(op, &is_setup_done)); 227 if (is_setup_done) return CEED_ERROR_SUCCESS; 228 229 CeedCallBackend(CeedOperatorGetData(op, &impl)); 230 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 231 CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q)); 232 CeedCallBackend(CeedOperatorGetNumElements(op, &num_elem)); 233 CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields)); 234 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields)); 235 236 // Allocate 237 CeedCallBackend(CeedCalloc(num_input_fields + num_output_fields, &impl->e_vecs)); 238 239 CeedCallBackend(CeedCalloc(CEED_FIELD_MAX, &impl->q_vecs_in)); 240 CeedCallBackend(CeedCalloc(CEED_FIELD_MAX, &impl->q_vecs_out)); 241 242 impl->num_e_in = num_input_fields; 243 impl->num_e_out = num_output_fields; 244 245 // Set up infield and outfield e_vecs and q_vecs 246 // Infields 247 CeedCallBackend(CeedOperatorSetupFields_Sycl(qf, op, true, impl->e_vecs, impl->q_vecs_in, 0, num_input_fields, Q, num_elem)); 248 // Outfields 249 CeedCallBackend(CeedOperatorSetupFields_Sycl(qf, op, false, impl->e_vecs, impl->q_vecs_out, num_input_fields, num_output_fields, Q, num_elem)); 250 251 CeedCallBackend(CeedOperatorSetSetupDone(op)); 252 CeedCallBackend(CeedQFunctionDestroy(&qf)); 253 return CEED_ERROR_SUCCESS; 254 } 255 256 //------------------------------------------------------------------------------ 257 // Setup Operator Inputs 258 //------------------------------------------------------------------------------ 259 static inline int CeedOperatorSetupInputs_Sycl(CeedInt num_input_fields, CeedQFunctionField *qf_input_fields, CeedOperatorField *op_input_fields, 260 CeedVector in_vec, const bool skip_active, CeedScalar *e_data[2 * CEED_FIELD_MAX], 261 CeedOperator_Sycl *impl, CeedRequest *request) { 262 for (CeedInt i = 0; i < num_input_fields; i++) { 263 bool is_active; 264 CeedEvalMode eval_mode; 265 CeedVector vec; 266 267 // Get input vector 268 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec)); 269 is_active = vec == CEED_VECTOR_ACTIVE; 270 if (is_active) { 271 if (skip_active) continue; 272 else vec = in_vec; 273 } 274 275 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 276 if (eval_mode == CEED_EVAL_WEIGHT) { // Skip 277 } else { 278 // Restrict, if necessary 279 if (!impl->e_vecs[i]) { 280 // No restriction for this field; read data directly from vec. 281 CeedCallBackend(CeedVectorGetArrayRead(vec, CEED_MEM_DEVICE, (const CeedScalar **)&e_data[i])); 282 } else { 283 CeedElemRestriction elem_rstr; 284 285 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &elem_rstr)); 286 CeedCallBackend(CeedElemRestrictionApply(elem_rstr, CEED_NOTRANSPOSE, vec, impl->e_vecs[i], request)); 287 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 288 CeedCallBackend(CeedVectorGetArrayRead(impl->e_vecs[i], CEED_MEM_DEVICE, (const CeedScalar **)&e_data[i])); 289 } 290 } 291 if (!is_active) CeedCallBackend(CeedVectorDestroy(&vec)); 292 } 293 return CEED_ERROR_SUCCESS; 294 } 295 296 //------------------------------------------------------------------------------ 297 // Input Basis Action 298 //------------------------------------------------------------------------------ 299 static inline int CeedOperatorInputBasis_Sycl(CeedInt num_elem, CeedQFunctionField *qf_input_fields, CeedOperatorField *op_input_fields, 300 CeedInt num_input_fields, const bool skip_active, CeedScalar *e_data[2 * CEED_FIELD_MAX], 301 CeedOperator_Sycl *impl) { 302 for (CeedInt i = 0; i < num_input_fields; i++) { 303 CeedEvalMode eval_mode; 304 305 // Skip active input 306 if (skip_active) { 307 bool is_active; 308 CeedVector vec; 309 310 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec)); 311 is_active = vec == CEED_VECTOR_ACTIVE; 312 CeedCallBackend(CeedVectorDestroy(&vec)); 313 if (is_active) continue; 314 } 315 // Basis action 316 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 317 switch (eval_mode) { 318 case CEED_EVAL_NONE: 319 CeedCallBackend(CeedVectorSetArray(impl->q_vecs_in[i], CEED_MEM_DEVICE, CEED_USE_POINTER, e_data[i])); 320 break; 321 case CEED_EVAL_INTERP: 322 case CEED_EVAL_GRAD: { 323 CeedBasis basis; 324 325 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 326 CeedCallBackend(CeedBasisApply(basis, num_elem, CEED_NOTRANSPOSE, eval_mode, impl->e_vecs[i], impl->q_vecs_in[i])); 327 CeedCallBackend(CeedBasisDestroy(&basis)); 328 break; 329 } 330 case CEED_EVAL_WEIGHT: 331 break; // No action 332 case CEED_EVAL_DIV: 333 break; // TODO: Not implemented 334 case CEED_EVAL_CURL: 335 break; // TODO: Not implemented 336 } 337 } 338 return CEED_ERROR_SUCCESS; 339 } 340 341 //------------------------------------------------------------------------------ 342 // Restore Input Vectors 343 //------------------------------------------------------------------------------ 344 static inline int CeedOperatorRestoreInputs_Sycl(CeedInt num_input_fields, CeedQFunctionField *qf_input_fields, CeedOperatorField *op_input_fields, 345 const bool skip_active, CeedScalar *e_data[2 * CEED_FIELD_MAX], CeedOperator_Sycl *impl) { 346 for (CeedInt i = 0; i < num_input_fields; i++) { 347 bool is_active; 348 CeedEvalMode eval_mode; 349 CeedVector vec; 350 351 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec)); 352 is_active = vec == CEED_VECTOR_ACTIVE; 353 // Skip active input 354 if (skip_active) { 355 if (is_active) continue; 356 } 357 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode)); 358 if (eval_mode == CEED_EVAL_WEIGHT) { // Skip 359 } else { 360 if (!impl->e_vecs[i]) { // This was a skip_restriction case 361 CeedCallBackend(CeedVectorRestoreArrayRead(vec, (const CeedScalar **)&e_data[i])); 362 } else { 363 CeedCallBackend(CeedVectorRestoreArrayRead(impl->e_vecs[i], (const CeedScalar **)&e_data[i])); 364 } 365 } 366 if (!is_active) CeedCallBackend(CeedVectorDestroy(&vec)); 367 } 368 return CEED_ERROR_SUCCESS; 369 } 370 371 //------------------------------------------------------------------------------ 372 // Apply and add to output 373 //------------------------------------------------------------------------------ 374 static int CeedOperatorApplyAdd_Sycl(CeedOperator op, CeedVector in_vec, CeedVector out_vec, CeedRequest *request) { 375 CeedInt Q, num_elem, elem_size, num_input_fields, num_output_fields, size; 376 CeedEvalMode eval_mode; 377 CeedScalar *e_data[2 * CEED_FIELD_MAX] = {0}; 378 CeedQFunctionField *qf_input_fields, *qf_output_fields; 379 CeedQFunction qf; 380 CeedOperatorField *op_input_fields, *op_output_fields; 381 CeedOperator_Sycl *impl; 382 383 CeedCallBackend(CeedOperatorGetData(op, &impl)); 384 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 385 CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q)); 386 CeedCallBackend(CeedOperatorGetNumElements(op, &num_elem)); 387 CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields)); 388 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields)); 389 390 // Setup 391 CeedCallBackend(CeedOperatorSetup_Sycl(op)); 392 393 // Input Evecs and Restriction 394 CeedCallBackend(CeedOperatorSetupInputs_Sycl(num_input_fields, qf_input_fields, op_input_fields, in_vec, false, e_data, impl, request)); 395 396 // Input basis apply if needed 397 CeedCallBackend(CeedOperatorInputBasis_Sycl(num_elem, qf_input_fields, op_input_fields, num_input_fields, false, e_data, impl)); 398 399 // Output pointers, as necessary 400 for (CeedInt i = 0; i < num_output_fields; i++) { 401 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 402 if (eval_mode == CEED_EVAL_NONE) { 403 // Set the output Q-Vector to use the E-Vector data directly 404 CeedCallBackend(CeedVectorGetArrayWrite(impl->e_vecs[i + impl->num_e_in], CEED_MEM_DEVICE, &e_data[i + num_input_fields])); 405 CeedCallBackend(CeedVectorSetArray(impl->q_vecs_out[i], CEED_MEM_DEVICE, CEED_USE_POINTER, e_data[i + num_input_fields])); 406 } 407 } 408 409 // Q function 410 CeedCallBackend(CeedQFunctionApply(qf, num_elem * Q, impl->q_vecs_in, impl->q_vecs_out)); 411 412 // Output basis apply if needed 413 for (CeedInt i = 0; i < num_output_fields; i++) { 414 CeedElemRestriction elem_rstr; 415 416 // Get elem_size, eval_mode, size 417 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr)); 418 CeedCallBackend(CeedElemRestrictionGetElementSize(elem_rstr, &elem_size)); 419 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 420 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 421 CeedCallBackend(CeedQFunctionFieldGetSize(qf_output_fields[i], &size)); 422 // Basis action 423 switch (eval_mode) { 424 case CEED_EVAL_NONE: 425 break; 426 case CEED_EVAL_INTERP: 427 case CEED_EVAL_GRAD: { 428 CeedBasis basis; 429 430 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 431 CeedCallBackend(CeedBasisApply(basis, num_elem, CEED_TRANSPOSE, eval_mode, impl->q_vecs_out[i], impl->e_vecs[i + impl->num_e_in])); 432 CeedCallBackend(CeedBasisDestroy(&basis)); 433 break; 434 } 435 // LCOV_EXCL_START 436 case CEED_EVAL_WEIGHT: 437 return CeedError(CeedOperatorReturnCeed(op), CEED_ERROR_BACKEND, "CEED_EVAL_WEIGHT cannot be an output evaluation mode"); 438 break; // Should not occur 439 case CEED_EVAL_DIV: 440 case CEED_EVAL_CURL: 441 return CeedError(CeedOperatorReturnCeed(op), CEED_ERROR_BACKEND, "%s not supported", CeedEvalModes[eval_mode]); 442 break; // Should not occur 443 // LCOV_EXCL_STOP 444 } 445 } 446 447 // Output restriction 448 for (CeedInt i = 0; i < num_output_fields; i++) { 449 bool is_active; 450 CeedEvalMode eval_mode; 451 CeedVector vec; 452 CeedElemRestriction elem_rstr; 453 454 // Restore evec 455 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode)); 456 if (eval_mode == CEED_EVAL_NONE) { 457 CeedCallBackend(CeedVectorRestoreArray(impl->e_vecs[i + impl->num_e_in], &e_data[i + num_input_fields])); 458 } 459 // Restrict 460 CeedCallBackend(CeedOperatorFieldGetVector(op_output_fields[i], &vec)); 461 is_active = vec == CEED_VECTOR_ACTIVE; 462 if (is_active) vec = out_vec; 463 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &elem_rstr)); 464 CeedCallBackend(CeedElemRestrictionApply(elem_rstr, CEED_TRANSPOSE, impl->e_vecs[i + impl->num_e_in], vec, request)); 465 if (!is_active) CeedCallBackend(CeedVectorDestroy(&vec)); 466 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 467 } 468 469 // Restore input arrays 470 CeedCallBackend(CeedOperatorRestoreInputs_Sycl(num_input_fields, qf_input_fields, op_input_fields, false, e_data, impl)); 471 CeedCallBackend(CeedQFunctionDestroy(&qf)); 472 return CEED_ERROR_SUCCESS; 473 } 474 475 //------------------------------------------------------------------------------ 476 // Core code for assembling linear QFunction 477 //------------------------------------------------------------------------------ 478 static inline int CeedOperatorLinearAssembleQFunctionCore_Sycl(CeedOperator op, bool build_objects, CeedVector *assembled, 479 CeedElemRestriction *elem_rstr, CeedRequest *request) { 480 Ceed ceed_parent; 481 CeedSize q_size; 482 CeedInt num_active_in, num_active_out, Q, num_elem, num_input_fields, num_output_fields, size; 483 CeedScalar *assembled_array, *e_data[2 * CEED_FIELD_MAX] = {NULL}; 484 CeedVector *active_in; 485 CeedQFunctionField *qf_input_fields, *qf_output_fields; 486 CeedQFunction qf; 487 CeedOperatorField *op_input_fields, *op_output_fields; 488 CeedOperator_Sycl *impl; 489 490 CeedCallBackend(CeedOperatorGetFallbackParentCeed(op, &ceed_parent)); 491 CeedCallBackend(CeedOperatorGetData(op, &impl)); 492 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 493 CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q)); 494 CeedCallBackend(CeedOperatorGetNumElements(op, &num_elem)); 495 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields)); 496 CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields)); 497 active_in = impl->qf_active_in; 498 num_active_in = impl->num_active_in, num_active_out = impl->num_active_out; 499 500 // Setup 501 CeedCallBackend(CeedOperatorSetup_Sycl(op)); 502 503 // Input Evecs and Restriction 504 CeedCallBackend(CeedOperatorSetupInputs_Sycl(num_input_fields, qf_input_fields, op_input_fields, NULL, true, e_data, impl, request)); 505 506 // Count number of active input fields 507 if (!num_active_in) { 508 for (CeedInt i = 0; i < num_input_fields; i++) { 509 CeedScalar *q_vec_array; 510 CeedVector vec; 511 512 // Check if active input 513 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec)); 514 if (vec == CEED_VECTOR_ACTIVE) { 515 CeedCallBackend(CeedQFunctionFieldGetSize(qf_input_fields[i], &size)); 516 CeedCallBackend(CeedVectorSetValue(impl->q_vecs_in[i], 0.0)); 517 CeedCallBackend(CeedVectorGetArray(impl->q_vecs_in[i], CEED_MEM_DEVICE, &q_vec_array)); 518 CeedCallBackend(CeedRealloc(num_active_in + size, &active_in)); 519 for (CeedInt field = 0; field < size; field++) { 520 q_size = (CeedSize)Q * num_elem; 521 CeedCallBackend(CeedVectorCreate(ceed_parent, q_size, &active_in[num_active_in + field])); 522 CeedCallBackend(CeedVectorSetArray(active_in[num_active_in + field], CEED_MEM_DEVICE, CEED_USE_POINTER, 523 &q_vec_array[field * Q * num_elem])); 524 } 525 num_active_in += size; 526 CeedCallBackend(CeedVectorRestoreArray(impl->q_vecs_in[i], &q_vec_array)); 527 } 528 CeedCallBackend(CeedVectorDestroy(&vec)); 529 } 530 impl->num_active_in = num_active_in; 531 impl->qf_active_in = active_in; 532 } 533 534 // Count number of active output fields 535 if (!num_active_out) { 536 for (CeedInt i = 0; i < num_output_fields; i++) { 537 CeedVector vec; 538 539 // Check if active output 540 CeedCallBackend(CeedOperatorFieldGetVector(op_output_fields[i], &vec)); 541 if (vec == CEED_VECTOR_ACTIVE) { 542 CeedCallBackend(CeedQFunctionFieldGetSize(qf_output_fields[i], &size)); 543 num_active_out += size; 544 } 545 CeedCallBackend(CeedVectorDestroy(&vec)); 546 } 547 impl->num_active_out = num_active_out; 548 } 549 550 // Check sizes 551 CeedCheck(num_active_in > 0 && num_active_out > 0, CeedOperatorReturnCeed(op), CEED_ERROR_BACKEND, 552 "Cannot assemble QFunction without active inputs and outputs"); 553 554 // Build objects if needed 555 if (build_objects) { 556 CeedSize l_size = (CeedSize)num_elem * Q * num_active_in * num_active_out; 557 CeedInt strides[3] = {1, num_elem * Q, Q}; /* *NOPAD* */ 558 559 // Create output restriction 560 CeedCallBackend(CeedElemRestrictionCreateStrided(ceed_parent, num_elem, Q, num_active_in * num_active_out, l_size, strides, elem_rstr)); 561 // Create assembled vector 562 CeedCallBackend(CeedVectorCreate(ceed_parent, l_size, assembled)); 563 } 564 CeedCallBackend(CeedVectorSetValue(*assembled, 0.0)); 565 CeedCallBackend(CeedVectorGetArray(*assembled, CEED_MEM_DEVICE, &assembled_array)); 566 567 // Input basis apply 568 CeedCallBackend(CeedOperatorInputBasis_Sycl(num_elem, qf_input_fields, op_input_fields, num_input_fields, true, e_data, impl)); 569 570 // Assemble QFunction 571 for (CeedInt in = 0; in < num_active_in; in++) { 572 // Set Inputs 573 CeedCallBackend(CeedVectorSetValue(active_in[in], 1.0)); 574 if (num_active_in > 1) { 575 CeedCallBackend(CeedVectorSetValue(active_in[(in + num_active_in - 1) % num_active_in], 0.0)); 576 } 577 // Set Outputs 578 for (CeedInt out = 0; out < num_output_fields; out++) { 579 CeedVector vec; 580 581 // Check if active output 582 CeedCallBackend(CeedOperatorFieldGetVector(op_output_fields[out], &vec)); 583 if (vec == CEED_VECTOR_ACTIVE) { 584 CeedCallBackend(CeedVectorSetArray(impl->q_vecs_out[out], CEED_MEM_DEVICE, CEED_USE_POINTER, assembled_array)); 585 CeedCallBackend(CeedQFunctionFieldGetSize(qf_output_fields[out], &size)); 586 assembled_array += size * Q * num_elem; // Advance the pointer by the size of the output 587 } 588 CeedCallBackend(CeedVectorDestroy(&vec)); 589 } 590 // Apply QFunction 591 CeedCallBackend(CeedQFunctionApply(qf, Q * num_elem, impl->q_vecs_in, impl->q_vecs_out)); 592 } 593 594 // Un-set output Qvecs to prevent accidental overwrite of Assembled 595 for (CeedInt out = 0; out < num_output_fields; out++) { 596 CeedVector vec; 597 598 // Check if active output 599 CeedCallBackend(CeedOperatorFieldGetVector(op_output_fields[out], &vec)); 600 if (vec == CEED_VECTOR_ACTIVE) { 601 CeedCallBackend(CeedVectorTakeArray(impl->q_vecs_out[out], CEED_MEM_DEVICE, NULL)); 602 } 603 CeedCallBackend(CeedVectorDestroy(&vec)); 604 } 605 606 // Restore input arrays 607 CeedCallBackend(CeedOperatorRestoreInputs_Sycl(num_input_fields, qf_input_fields, op_input_fields, true, e_data, impl)); 608 609 // Restore output 610 CeedCallBackend(CeedVectorRestoreArray(*assembled, &assembled_array)); 611 CeedCallBackend(CeedDestroy(&ceed_parent)); 612 CeedCallBackend(CeedQFunctionDestroy(&qf)); 613 return CEED_ERROR_SUCCESS; 614 } 615 616 //------------------------------------------------------------------------------ 617 // Assemble Linear QFunction 618 //------------------------------------------------------------------------------ 619 static int CeedOperatorLinearAssembleQFunction_Sycl(CeedOperator op, CeedVector *assembled, CeedElemRestriction *elem_rstr, CeedRequest *request) { 620 return CeedOperatorLinearAssembleQFunctionCore_Sycl(op, true, assembled, elem_rstr, request); 621 } 622 623 //------------------------------------------------------------------------------ 624 // Update Assembled Linear QFunction 625 //------------------------------------------------------------------------------ 626 static int CeedOperatorLinearAssembleQFunctionUpdate_Sycl(CeedOperator op, CeedVector assembled, CeedElemRestriction elem_rstr, 627 CeedRequest *request) { 628 return CeedOperatorLinearAssembleQFunctionCore_Sycl(op, false, &assembled, &elem_rstr, request); 629 } 630 631 //------------------------------------------------------------------------------ 632 // Assemble diagonal setup 633 //------------------------------------------------------------------------------ 634 static inline int CeedOperatorAssembleDiagonalSetup_Sycl(CeedOperator op) { 635 Ceed ceed; 636 Ceed_Sycl *sycl_data; 637 CeedInt num_input_fields, num_output_fields, num_eval_mode_in = 0, num_comp = 0, dim = 1, num_eval_mode_out = 0; 638 CeedEvalMode *eval_mode_in = NULL, *eval_mode_out = NULL; 639 CeedElemRestriction rstr_in = NULL, rstr_out = NULL; 640 CeedBasis basis_in = NULL, basis_out = NULL; 641 CeedQFunctionField *qf_fields; 642 CeedQFunction qf; 643 CeedOperatorField *op_fields; 644 CeedOperator_Sycl *impl; 645 646 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 647 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 648 CeedCallBackend(CeedQFunctionGetNumArgs(qf, &num_input_fields, &num_output_fields)); 649 650 // Determine active input basis 651 CeedCallBackend(CeedOperatorGetFields(op, NULL, &op_fields, NULL, NULL)); 652 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_fields, NULL, NULL)); 653 for (CeedInt i = 0; i < num_input_fields; i++) { 654 CeedVector vec; 655 656 CeedCallBackend(CeedOperatorFieldGetVector(op_fields[i], &vec)); 657 if (vec == CEED_VECTOR_ACTIVE) { 658 CeedEvalMode eval_mode; 659 CeedElemRestriction elem_rstr; 660 CeedBasis basis; 661 662 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_fields[i], &elem_rstr)); 663 if (!rstr_in) CeedCallBackend(CeedElemRestrictionReferenceCopy(elem_rstr, &rstr_in)); 664 CeedCheck(rstr_in == elem_rstr, ceed, CEED_ERROR_BACKEND, "Backend does not implement multi-field non-composite operator diagonal assembly"); 665 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 666 CeedCallBackend(CeedOperatorFieldGetBasis(op_fields[i], &basis)); 667 if (!basis_in) CeedCallBackend(CeedBasisReferenceCopy(basis, &basis_in)); 668 CeedCheck(basis_in == basis, ceed, CEED_ERROR_BACKEND, "Backend does not implement operator diagonal assembly with multiple active bases"); 669 CeedCallBackend(CeedBasisDestroy(&basis)); 670 CeedCallBackend(CeedBasisGetDimension(basis_in, &dim)); 671 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_fields[i], &eval_mode)); 672 switch (eval_mode) { 673 case CEED_EVAL_NONE: 674 case CEED_EVAL_INTERP: 675 CeedCallBackend(CeedRealloc(num_eval_mode_in + 1, &eval_mode_in)); 676 eval_mode_in[num_eval_mode_in] = eval_mode; 677 num_eval_mode_in += 1; 678 break; 679 case CEED_EVAL_GRAD: 680 CeedCallBackend(CeedRealloc(num_eval_mode_in + dim, &eval_mode_in)); 681 for (CeedInt d = 0; d < dim; d++) eval_mode_in[num_eval_mode_in + d] = eval_mode; 682 num_eval_mode_in += dim; 683 break; 684 case CEED_EVAL_WEIGHT: 685 case CEED_EVAL_DIV: 686 case CEED_EVAL_CURL: 687 break; // Caught by QF Assembly 688 } 689 } 690 CeedCallBackend(CeedVectorDestroy(&vec)); 691 } 692 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_in)); 693 694 // Determine active output basis 695 CeedCallBackend(CeedOperatorGetFields(op, NULL, NULL, NULL, &op_fields)); 696 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, NULL, NULL, &qf_fields)); 697 for (CeedInt i = 0; i < num_output_fields; i++) { 698 CeedVector vec; 699 700 CeedCallBackend(CeedOperatorFieldGetVector(op_fields[i], &vec)); 701 if (vec == CEED_VECTOR_ACTIVE) { 702 CeedEvalMode eval_mode; 703 CeedElemRestriction elem_rstr; 704 CeedBasis basis; 705 706 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_fields[i], &elem_rstr)); 707 if (!rstr_out) CeedCallBackend(CeedElemRestrictionReferenceCopy(elem_rstr, &rstr_out)); 708 CeedCheck(rstr_out == elem_rstr, ceed, CEED_ERROR_BACKEND, "Backend does not implement multi-field non-composite operator diagonal assembly"); 709 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 710 CeedCallBackend(CeedOperatorFieldGetBasis(op_fields[i], &basis)); 711 if (!basis_out) CeedCallBackend(CeedBasisReferenceCopy(basis, &basis_out)); 712 CeedCheck(basis_out == basis, ceed, CEED_ERROR_BACKEND, "Backend does not implement operator diagonal assembly with multiple active bases"); 713 CeedCallBackend(CeedBasisDestroy(&basis)); 714 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_fields[i], &eval_mode)); 715 switch (eval_mode) { 716 case CEED_EVAL_NONE: 717 case CEED_EVAL_INTERP: 718 CeedCallBackend(CeedRealloc(num_eval_mode_out + 1, &eval_mode_out)); 719 eval_mode_out[num_eval_mode_out] = eval_mode; 720 num_eval_mode_out += 1; 721 break; 722 case CEED_EVAL_GRAD: 723 CeedCallBackend(CeedRealloc(num_eval_mode_out + dim, &eval_mode_out)); 724 for (CeedInt d = 0; d < dim; d++) eval_mode_out[num_eval_mode_out + d] = eval_mode; 725 num_eval_mode_out += dim; 726 break; 727 case CEED_EVAL_WEIGHT: 728 case CEED_EVAL_DIV: 729 case CEED_EVAL_CURL: 730 break; // Caught by QF Assembly 731 } 732 } 733 CeedCallBackend(CeedVectorDestroy(&vec)); 734 } 735 736 // Operator data struct 737 CeedCallBackend(CeedOperatorGetData(op, &impl)); 738 CeedCallBackend(CeedGetData(ceed, &sycl_data)); 739 CeedCallBackend(CeedCalloc(1, &impl->diag)); 740 CeedOperatorDiag_Sycl *diag = impl->diag; 741 742 CeedCallBackend(CeedBasisReferenceCopy(basis_in, &diag->basis_in)); 743 CeedCallBackend(CeedBasisReferenceCopy(basis_out, &diag->basis_out)); 744 diag->h_eval_mode_in = eval_mode_in; 745 diag->h_eval_mode_out = eval_mode_out; 746 diag->num_eval_mode_in = num_eval_mode_in; 747 diag->num_eval_mode_out = num_eval_mode_out; 748 749 // Kernel parameters 750 CeedInt num_nodes, num_qpts; 751 CeedCallBackend(CeedBasisGetNumNodes(basis_in, &num_nodes)); 752 CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis_in, &num_qpts)); 753 CeedCallBackend(CeedBasisGetNumComponents(basis_in, &num_comp)); 754 diag->num_nodes = num_nodes; 755 diag->num_qpts = num_qpts; 756 diag->num_comp = num_comp; 757 758 // Basis matrices 759 const CeedInt i_len = num_qpts * num_nodes; 760 const CeedInt g_len = num_qpts * num_nodes * dim; 761 const CeedScalar *interp_in, *interp_out, *grad_in, *grad_out; 762 763 // CEED_EVAL_NONE 764 CeedScalar *identity = NULL; 765 bool has_eval_none = false; 766 for (CeedInt i = 0; i < num_eval_mode_in; i++) has_eval_none = has_eval_none || (eval_mode_in[i] == CEED_EVAL_NONE); 767 for (CeedInt i = 0; i < num_eval_mode_out; i++) has_eval_none = has_eval_none || (eval_mode_out[i] == CEED_EVAL_NONE); 768 769 std::vector<sycl::event> e; 770 771 if (!sycl_data->sycl_queue.is_in_order()) e = {sycl_data->sycl_queue.ext_oneapi_submit_barrier()}; 772 773 std::vector<sycl::event> copy_events; 774 775 if (has_eval_none) { 776 CeedCallBackend(CeedCalloc(num_qpts * num_nodes, &identity)); 777 for (CeedSize i = 0; i < (num_nodes < num_qpts ? num_nodes : num_qpts); i++) identity[i * num_nodes + i] = 1.0; 778 CeedCallSycl(ceed, diag->d_identity = sycl::malloc_device<CeedScalar>(i_len, sycl_data->sycl_device, sycl_data->sycl_context)); 779 sycl::event identity_copy = sycl_data->sycl_queue.copy<CeedScalar>(identity, diag->d_identity, i_len, e); 780 copy_events.push_back(identity_copy); 781 } 782 783 // CEED_EVAL_INTERP 784 CeedCallBackend(CeedBasisGetInterp(basis_in, &interp_in)); 785 CeedCallSycl(ceed, diag->d_interp_in = sycl::malloc_device<CeedScalar>(i_len, sycl_data->sycl_device, sycl_data->sycl_context)); 786 sycl::event interp_in_copy = sycl_data->sycl_queue.copy<CeedScalar>(interp_in, diag->d_interp_in, i_len, e); 787 copy_events.push_back(interp_in_copy); 788 789 CeedCallBackend(CeedBasisGetInterp(basis_out, &interp_out)); 790 CeedCallSycl(ceed, diag->d_interp_out = sycl::malloc_device<CeedScalar>(i_len, sycl_data->sycl_device, sycl_data->sycl_context)); 791 sycl::event interp_out_copy = sycl_data->sycl_queue.copy<CeedScalar>(interp_out, diag->d_interp_out, i_len, e); 792 copy_events.push_back(interp_out_copy); 793 794 // CEED_EVAL_GRAD 795 CeedCallBackend(CeedBasisGetGrad(basis_in, &grad_in)); 796 CeedCallSycl(ceed, diag->d_grad_in = sycl::malloc_device<CeedScalar>(g_len, sycl_data->sycl_device, sycl_data->sycl_context)); 797 sycl::event grad_in_copy = sycl_data->sycl_queue.copy<CeedScalar>(grad_in, diag->d_grad_in, g_len, e); 798 copy_events.push_back(grad_in_copy); 799 800 CeedCallBackend(CeedBasisGetGrad(basis_out, &grad_out)); 801 CeedCallSycl(ceed, diag->d_grad_out = sycl::malloc_device<CeedScalar>(g_len, sycl_data->sycl_device, sycl_data->sycl_context)); 802 sycl::event grad_out_copy = sycl_data->sycl_queue.copy<CeedScalar>(grad_out, diag->d_grad_out, g_len, e); 803 copy_events.push_back(grad_out_copy); 804 805 // Arrays of eval_modes 806 CeedCallSycl(ceed, diag->d_eval_mode_in = sycl::malloc_device<CeedEvalMode>(num_eval_mode_in, sycl_data->sycl_device, sycl_data->sycl_context)); 807 sycl::event eval_mode_in_copy = sycl_data->sycl_queue.copy<CeedEvalMode>(eval_mode_in, diag->d_eval_mode_in, num_eval_mode_in, e); 808 copy_events.push_back(eval_mode_in_copy); 809 810 CeedCallSycl(ceed, diag->d_eval_mode_out = sycl::malloc_device<CeedEvalMode>(num_eval_mode_out, sycl_data->sycl_device, sycl_data->sycl_context)); 811 sycl::event eval_mode_out_copy = sycl_data->sycl_queue.copy<CeedEvalMode>(eval_mode_out, diag->d_eval_mode_out, num_eval_mode_out, e); 812 copy_events.push_back(eval_mode_out_copy); 813 814 // Restriction 815 CeedCallBackend(CeedElemRestrictionReferenceCopy(rstr_out, &diag->diag_rstr)); 816 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_out)); 817 818 // Wait for all copies to complete and handle exceptions 819 CeedCallSycl(ceed, sycl::event::wait_and_throw(copy_events)); 820 821 // Cleanup 822 CeedCallBackend(CeedDestroy(&ceed)); 823 CeedCallBackend(CeedBasisDestroy(&basis_in)); 824 CeedCallBackend(CeedBasisDestroy(&basis_out)); 825 CeedCallBackend(CeedQFunctionDestroy(&qf)); 826 return CEED_ERROR_SUCCESS; 827 } 828 829 //------------------------------------------------------------------------------ 830 // Kernel for diagonal assembly 831 //------------------------------------------------------------------------------ 832 static int CeedOperatorLinearDiagonal_Sycl(sycl::queue &sycl_queue, const bool is_point_block, const CeedInt num_elem, 833 const CeedOperatorDiag_Sycl *diag, const CeedScalar *assembled_qf_array, CeedScalar *elem_diag_array) { 834 const CeedSize num_nodes = diag->num_nodes; 835 const CeedSize num_qpts = diag->num_qpts; 836 const CeedSize num_comp = diag->num_comp; 837 const CeedSize num_eval_mode_in = diag->num_eval_mode_in; 838 const CeedSize num_eval_mode_out = diag->num_eval_mode_out; 839 const CeedScalar *identity = diag->d_identity; 840 const CeedScalar *interp_in = diag->d_interp_in; 841 const CeedScalar *grad_in = diag->d_grad_in; 842 const CeedScalar *interp_out = diag->d_interp_out; 843 const CeedScalar *grad_out = diag->d_grad_out; 844 const CeedEvalMode *eval_mode_in = diag->d_eval_mode_in; 845 const CeedEvalMode *eval_mode_out = diag->d_eval_mode_out; 846 847 sycl::range<1> kernel_range(num_elem * num_nodes); 848 849 std::vector<sycl::event> e; 850 851 if (!sycl_queue.is_in_order()) e = {sycl_queue.ext_oneapi_submit_barrier()}; 852 853 sycl_queue.parallel_for<CeedOperatorSyclLinearDiagonal>(kernel_range, e, [=](sycl::id<1> idx) { 854 const CeedInt tid = idx % num_nodes; 855 const CeedInt e = idx / num_nodes; 856 857 // Compute the diagonal of B^T D B 858 // Each element 859 CeedInt d_out = -1; 860 // Each basis eval mode pair 861 for (CeedSize e_out = 0; e_out < num_eval_mode_out; e_out++) { 862 const CeedScalar *bt = NULL; 863 864 if (eval_mode_out[e_out] == CEED_EVAL_GRAD) ++d_out; 865 CeedOperatorGetBasisPointer_Sycl(&bt, eval_mode_out[e_out], identity, interp_out, &grad_out[d_out * num_qpts * num_nodes]); 866 CeedInt d_in = -1; 867 868 for (CeedSize e_in = 0; e_in < num_eval_mode_in; e_in++) { 869 const CeedScalar *b = NULL; 870 871 if (eval_mode_in[e_in] == CEED_EVAL_GRAD) ++d_in; 872 CeedOperatorGetBasisPointer_Sycl(&b, eval_mode_in[e_in], identity, interp_in, &grad_in[d_in * num_qpts * num_nodes]); 873 // Each component 874 for (CeedSize comp_out = 0; comp_out < num_comp; comp_out++) { 875 // Each qpoint/node pair 876 if (is_point_block) { 877 // Point Block Diagonal 878 for (CeedInt comp_in = 0; comp_in < num_comp; comp_in++) { 879 CeedScalar e_value = 0.0; 880 881 for (CeedSize q = 0; q < num_qpts; q++) { 882 const CeedScalar qf_value = 883 assembled_qf_array[((((e_in * num_comp + comp_in) * num_eval_mode_out + e_out) * num_comp + comp_out) * num_elem + e) * num_qpts + 884 q]; 885 886 e_value += bt[q * num_nodes + tid] * qf_value * b[q * num_nodes + tid]; 887 } 888 elem_diag_array[((comp_out * num_comp + comp_in) * num_elem + e) * num_nodes + tid] += e_value; 889 } 890 } else { 891 // Diagonal Only 892 CeedScalar e_value = 0.0; 893 894 for (CeedSize q = 0; q < num_qpts; q++) { 895 const CeedScalar qf_value = 896 assembled_qf_array[((((e_in * num_comp + comp_out) * num_eval_mode_out + e_out) * num_comp + comp_out) * num_elem + e) * num_qpts + 897 q]; 898 e_value += bt[q * num_nodes + tid] * qf_value * b[q * num_nodes + tid]; 899 } 900 elem_diag_array[(comp_out * num_elem + e) * num_nodes + tid] += e_value; 901 } 902 } 903 } 904 } 905 }); 906 return CEED_ERROR_SUCCESS; 907 } 908 909 //------------------------------------------------------------------------------ 910 // Assemble diagonal common code 911 //------------------------------------------------------------------------------ 912 static inline int CeedOperatorAssembleDiagonalCore_Sycl(CeedOperator op, CeedVector assembled, CeedRequest *request, const bool is_point_block) { 913 Ceed ceed; 914 Ceed_Sycl *sycl_data; 915 CeedInt num_elem; 916 CeedScalar *elem_diag_array; 917 const CeedScalar *assembled_qf_array; 918 CeedVector assembled_qf = NULL; 919 CeedOperator_Sycl *impl; 920 921 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 922 CeedCallBackend(CeedGetData(ceed, &sycl_data)); 923 CeedCallBackend(CeedDestroy(&ceed)); 924 CeedCallBackend(CeedOperatorGetData(op, &impl)); 925 926 // Assemble QFunction 927 { 928 CeedElemRestriction elem_rstr = NULL; 929 930 CeedCallBackend(CeedOperatorLinearAssembleQFunctionBuildOrUpdate(op, &assembled_qf, &elem_rstr, request)); 931 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 932 } 933 934 // Setup 935 if (!impl->diag) { 936 CeedCallBackend(CeedOperatorAssembleDiagonalSetup_Sycl(op)); 937 } 938 CeedOperatorDiag_Sycl *diag = impl->diag; 939 940 assert(diag != NULL); 941 942 // Restriction 943 if (is_point_block && !diag->point_block_diag_rstr) { 944 CeedCallBackend(CeedOperatorCreateActivePointBlockRestriction(diag->diag_rstr, &diag->point_block_diag_rstr)); 945 } 946 CeedElemRestriction diag_rstr = is_point_block ? diag->point_block_diag_rstr : diag->diag_rstr; 947 948 // Create diagonal vector 949 CeedVector elem_diag = is_point_block ? diag->point_block_elem_diag : diag->elem_diag; 950 951 if (!elem_diag) { 952 CeedCallBackend(CeedElemRestrictionCreateVector(diag_rstr, NULL, &elem_diag)); 953 if (is_point_block) diag->point_block_elem_diag = elem_diag; 954 else diag->elem_diag = elem_diag; 955 } 956 CeedCallBackend(CeedVectorSetValue(elem_diag, 0.0)); 957 958 // Assemble element operator diagonals 959 CeedCallBackend(CeedVectorGetArray(elem_diag, CEED_MEM_DEVICE, &elem_diag_array)); 960 CeedCallBackend(CeedVectorGetArrayRead(assembled_qf, CEED_MEM_DEVICE, &assembled_qf_array)); 961 CeedCallBackend(CeedElemRestrictionGetNumElements(diag_rstr, &num_elem)); 962 963 // Compute the diagonal of B^T D B 964 CeedCallBackend(CeedOperatorLinearDiagonal_Sycl(sycl_data->sycl_queue, is_point_block, num_elem, diag, assembled_qf_array, elem_diag_array)); 965 966 // Wait for queue to complete and handle exceptions 967 sycl_data->sycl_queue.wait_and_throw(); 968 969 // Restore arrays 970 CeedCallBackend(CeedVectorRestoreArray(elem_diag, &elem_diag_array)); 971 CeedCallBackend(CeedVectorRestoreArrayRead(assembled_qf, &assembled_qf_array)); 972 973 // Assemble local operator diagonal 974 CeedCallBackend(CeedElemRestrictionApply(diag_rstr, CEED_TRANSPOSE, elem_diag, assembled, request)); 975 976 // Cleanup 977 CeedCallBackend(CeedVectorDestroy(&assembled_qf)); 978 return CEED_ERROR_SUCCESS; 979 } 980 981 //------------------------------------------------------------------------------ 982 // Assemble Linear Diagonal 983 //------------------------------------------------------------------------------ 984 static int CeedOperatorLinearAssembleAddDiagonal_Sycl(CeedOperator op, CeedVector assembled, CeedRequest *request) { 985 CeedCallBackend(CeedOperatorAssembleDiagonalCore_Sycl(op, assembled, request, false)); 986 return CEED_ERROR_SUCCESS; 987 } 988 989 //------------------------------------------------------------------------------ 990 // Assemble Linear Point Block Diagonal 991 //------------------------------------------------------------------------------ 992 static int CeedOperatorLinearAssembleAddPointBlockDiagonal_Sycl(CeedOperator op, CeedVector assembled, CeedRequest *request) { 993 CeedCallBackend(CeedOperatorAssembleDiagonalCore_Sycl(op, assembled, request, true)); 994 return CEED_ERROR_SUCCESS; 995 } 996 997 //------------------------------------------------------------------------------ 998 // Single operator assembly setup 999 //------------------------------------------------------------------------------ 1000 static int CeedOperatorAssembleSingleSetup_Sycl(CeedOperator op) { 1001 Ceed ceed; 1002 CeedInt num_input_fields, num_output_fields, num_eval_mode_in = 0, dim = 1, num_B_in_mats_to_load = 0, size_B_in = 0, num_eval_mode_out = 0, 1003 num_B_out_mats_to_load = 0, size_B_out = 0, num_qpts = 0, elem_size = 0, num_elem, num_comp, 1004 mat_start = 0; 1005 CeedEvalMode *eval_mode_in = NULL, *eval_mode_out = NULL; 1006 const CeedScalar *interp_in, *grad_in; 1007 CeedElemRestriction rstr_in = NULL, rstr_out = NULL; 1008 CeedBasis basis_in = NULL, basis_out = NULL; 1009 CeedQFunctionField *qf_fields; 1010 CeedQFunction qf; 1011 CeedOperatorField *input_fields, *output_fields; 1012 CeedOperator_Sycl *impl; 1013 1014 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 1015 CeedCallBackend(CeedOperatorGetData(op, &impl)); 1016 1017 // Get input and output fields 1018 CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &input_fields, &num_output_fields, &output_fields)); 1019 1020 // Determine active input basis eval mode 1021 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 1022 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_fields, NULL, NULL)); 1023 // Note that the kernel will treat each dimension of a gradient action separately; 1024 // i.e., when an active input has a CEED_EVAL_GRAD mode, num_ eval_mode_in will increment by dim. 1025 // However, for the purposes of loading the B matrices, it will be treated as one mode, and we will load/copy the entire gradient matrix at once, 1026 // so num_B_in_mats_to_load will be incremented by 1. 1027 for (CeedInt i = 0; i < num_input_fields; i++) { 1028 CeedVector vec; 1029 1030 CeedCallBackend(CeedOperatorFieldGetVector(input_fields[i], &vec)); 1031 if (vec == CEED_VECTOR_ACTIVE) { 1032 CeedEvalMode eval_mode; 1033 CeedElemRestriction elem_rstr; 1034 CeedBasis basis; 1035 1036 CeedCallBackend(CeedOperatorFieldGetElemRestriction(input_fields[i], &elem_rstr)); 1037 if (!rstr_in) CeedCallBackend(CeedElemRestrictionReferenceCopy(elem_rstr, &rstr_in)); 1038 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 1039 CeedCallBackend(CeedElemRestrictionGetElementSize(rstr_in, &elem_size)); 1040 CeedCallBackend(CeedOperatorFieldGetBasis(input_fields[i], &basis)); 1041 if (!basis_in) CeedCallBackend(CeedBasisReferenceCopy(basis, &basis_in)); 1042 CeedCheck(basis_in == basis, ceed, CEED_ERROR_BACKEND, "Backend does not implement operator assembly with multiple active bases"); 1043 CeedCallBackend(CeedBasisDestroy(&basis)); 1044 CeedCallBackend(CeedBasisGetDimension(basis_in, &dim)); 1045 CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis_in, &num_qpts)); 1046 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_fields[i], &eval_mode)); 1047 if (eval_mode != CEED_EVAL_NONE) { 1048 CeedCallBackend(CeedRealloc(num_B_in_mats_to_load + 1, &eval_mode_in)); 1049 eval_mode_in[num_B_in_mats_to_load] = eval_mode; 1050 num_B_in_mats_to_load += 1; 1051 if (eval_mode == CEED_EVAL_GRAD) { 1052 num_eval_mode_in += dim; 1053 size_B_in += dim * elem_size * num_qpts; 1054 } else { 1055 num_eval_mode_in += 1; 1056 size_B_in += elem_size * num_qpts; 1057 } 1058 } 1059 } 1060 CeedCallBackend(CeedVectorDestroy(&vec)); 1061 } 1062 1063 // Determine active output basis; basis_out and rstr_out only used if same as input, TODO 1064 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, NULL, NULL, &qf_fields)); 1065 for (CeedInt i = 0; i < num_output_fields; i++) { 1066 CeedVector vec; 1067 1068 CeedCallBackend(CeedOperatorFieldGetVector(output_fields[i], &vec)); 1069 if (vec == CEED_VECTOR_ACTIVE) { 1070 CeedEvalMode eval_mode; 1071 CeedElemRestriction elem_rstr; 1072 CeedBasis basis; 1073 1074 CeedCallBackend(CeedOperatorFieldGetElemRestriction(output_fields[i], &elem_rstr)); 1075 if (!rstr_out) CeedCallBackend(CeedElemRestrictionReferenceCopy(elem_rstr, &rstr_out)); 1076 CeedCheck(rstr_out == rstr_in, ceed, CEED_ERROR_BACKEND, "Backend does not implement multi-field non-composite operator assembly"); 1077 CeedCallBackend(CeedElemRestrictionDestroy(&elem_rstr)); 1078 CeedCallBackend(CeedOperatorFieldGetBasis(output_fields[i], &basis)); 1079 if (!basis_out) CeedCallBackend(CeedBasisReferenceCopy(basis, &basis_out)); 1080 CeedCheck(basis_out == basis, ceed, CEED_ERROR_BACKEND, "Backend does not implement operator assembly with multiple active bases"); 1081 CeedCallBackend(CeedBasisDestroy(&basis)); 1082 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_fields[i], &eval_mode)); 1083 if (eval_mode != CEED_EVAL_NONE) { 1084 CeedCallBackend(CeedRealloc(num_B_out_mats_to_load + 1, &eval_mode_out)); 1085 eval_mode_out[num_B_out_mats_to_load] = eval_mode; 1086 num_B_out_mats_to_load += 1; 1087 if (eval_mode == CEED_EVAL_GRAD) { 1088 num_eval_mode_out += dim; 1089 size_B_out += dim * elem_size * num_qpts; 1090 } else { 1091 num_eval_mode_out += 1; 1092 size_B_out += elem_size * num_qpts; 1093 } 1094 } 1095 } 1096 CeedCallBackend(CeedVectorDestroy(&vec)); 1097 } 1098 CeedCheck(num_eval_mode_in > 0 && num_eval_mode_out > 0, ceed, CEED_ERROR_UNSUPPORTED, "Cannot assemble operator without inputs/outputs"); 1099 1100 CeedCallBackend(CeedElemRestrictionGetNumElements(rstr_in, &num_elem)); 1101 CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr_in, &num_comp)); 1102 1103 CeedCallBackend(CeedCalloc(1, &impl->asmb)); 1104 CeedOperatorAssemble_Sycl *asmb = impl->asmb; 1105 asmb->num_elem = num_elem; 1106 1107 Ceed_Sycl *sycl_data; 1108 CeedCallBackend(CeedGetData(ceed, &sycl_data)); 1109 1110 // Kernel setup 1111 int elems_per_block = 1; 1112 asmb->elems_per_block = elems_per_block; 1113 asmb->block_size_x = elem_size; 1114 asmb->block_size_y = elem_size; 1115 asmb->num_eval_mode_in = num_eval_mode_in; 1116 asmb->num_eval_mode_out = num_eval_mode_out; 1117 asmb->num_qpts = num_qpts; 1118 asmb->num_nodes = elem_size; 1119 asmb->block_size = elem_size * elem_size * elems_per_block; 1120 asmb->num_comp = num_comp; 1121 1122 // Build 'full' B matrices (not 1D arrays used for tensor-product matrices 1123 CeedCallBackend(CeedBasisGetInterp(basis_in, &interp_in)); 1124 CeedCallBackend(CeedBasisGetGrad(basis_in, &grad_in)); 1125 1126 // Load into B_in, in order that they will be used in eval_mode 1127 CeedCallSycl(ceed, asmb->d_B_in = sycl::malloc_device<CeedScalar>(size_B_in, sycl_data->sycl_device, sycl_data->sycl_context)); 1128 for (int i = 0; i < num_B_in_mats_to_load; i++) { 1129 CeedEvalMode eval_mode = eval_mode_in[i]; 1130 1131 if (eval_mode == CEED_EVAL_INTERP) { 1132 std::vector<sycl::event> e; 1133 1134 if (!sycl_data->sycl_queue.is_in_order()) e = {sycl_data->sycl_queue.ext_oneapi_submit_barrier()}; 1135 sycl_data->sycl_queue.copy<CeedScalar>(interp_in, &asmb->d_B_in[mat_start], elem_size * num_qpts, e); 1136 mat_start += elem_size * num_qpts; 1137 } else if (eval_mode == CEED_EVAL_GRAD) { 1138 std::vector<sycl::event> e; 1139 1140 if (!sycl_data->sycl_queue.is_in_order()) e = {sycl_data->sycl_queue.ext_oneapi_submit_barrier()}; 1141 sycl_data->sycl_queue.copy<CeedScalar>(grad_in, &asmb->d_B_in[mat_start], dim * elem_size * num_qpts, e); 1142 mat_start += dim * elem_size * num_qpts; 1143 } 1144 } 1145 1146 const CeedScalar *interp_out, *grad_out; 1147 // Note that this function currently assumes 1 basis, so this should always be true 1148 // for now 1149 if (basis_out == basis_in) { 1150 interp_out = interp_in; 1151 grad_out = grad_in; 1152 } else { 1153 CeedCallBackend(CeedBasisGetInterp(basis_out, &interp_out)); 1154 CeedCallBackend(CeedBasisGetGrad(basis_out, &grad_out)); 1155 } 1156 1157 // Load into B_out, in order that they will be used in eval_mode 1158 mat_start = 0; 1159 CeedCallSycl(ceed, asmb->d_B_out = sycl::malloc_device<CeedScalar>(size_B_out, sycl_data->sycl_device, sycl_data->sycl_context)); 1160 for (int i = 0; i < num_B_out_mats_to_load; i++) { 1161 CeedEvalMode eval_mode = eval_mode_out[i]; 1162 1163 if (eval_mode == CEED_EVAL_INTERP) { 1164 std::vector<sycl::event> e; 1165 1166 if (!sycl_data->sycl_queue.is_in_order()) e = {sycl_data->sycl_queue.ext_oneapi_submit_barrier()}; 1167 sycl_data->sycl_queue.copy<CeedScalar>(interp_out, &asmb->d_B_out[mat_start], elem_size * num_qpts, e); 1168 mat_start += elem_size * num_qpts; 1169 } else if (eval_mode == CEED_EVAL_GRAD) { 1170 std::vector<sycl::event> e; 1171 1172 if (!sycl_data->sycl_queue.is_in_order()) e = {sycl_data->sycl_queue.ext_oneapi_submit_barrier()}; 1173 sycl_data->sycl_queue.copy<CeedScalar>(grad_out, &asmb->d_B_out[mat_start], dim * elem_size * num_qpts, e); 1174 mat_start += dim * elem_size * num_qpts; 1175 } 1176 } 1177 CeedCallBackend(CeedDestroy(&ceed)); 1178 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_in)); 1179 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_out)); 1180 CeedCallBackend(CeedBasisDestroy(&basis_in)); 1181 CeedCallBackend(CeedBasisDestroy(&basis_out)); 1182 CeedCallBackend(CeedQFunctionDestroy(&qf)); 1183 return CEED_ERROR_SUCCESS; 1184 } 1185 1186 //------------------------------------------------------------------------------ 1187 // Matrix assembly kernel for low-order elements (3D thread block) 1188 //------------------------------------------------------------------------------ 1189 static int CeedOperatorLinearAssemble_Sycl(sycl::queue &sycl_queue, const CeedOperator_Sycl *impl, const CeedScalar *qf_array, 1190 CeedScalar *values_array) { 1191 // This kernels assumes B_in and B_out have the same number of quadrature points and basis points. 1192 // TODO: expand to more general cases 1193 CeedOperatorAssemble_Sycl *asmb = impl->asmb; 1194 const CeedInt num_elem = asmb->num_elem; 1195 const CeedSize num_nodes = asmb->num_nodes; 1196 const CeedSize num_comp = asmb->num_comp; 1197 const CeedSize num_qpts = asmb->num_qpts; 1198 const CeedSize num_eval_mode_in = asmb->num_eval_mode_in; 1199 const CeedSize num_eval_mode_out = asmb->num_eval_mode_out; 1200 1201 // Strides for final output ordering, determined by the reference (inference) implementation of the symbolic assembly, slowest --> fastest: element, 1202 // comp_in, comp_out, node_row, node_col 1203 const CeedSize comp_out_stride = num_nodes * num_nodes; 1204 const CeedSize comp_in_stride = comp_out_stride * num_comp; 1205 const CeedSize e_stride = comp_in_stride * num_comp; 1206 // Strides for QF array, slowest --> fastest: eval_mode_in, comp_in, eval_mode_out, comp_out, elem, qpt 1207 const CeedSize q_e_stride = num_qpts; 1208 const CeedSize q_comp_out_stride = num_elem * q_e_stride; 1209 const CeedSize q_eval_mode_out_stride = q_comp_out_stride * num_comp; 1210 const CeedSize q_comp_in_stride = q_eval_mode_out_stride * num_eval_mode_out; 1211 const CeedSize q_eval_mode_in_stride = q_comp_in_stride * num_comp; 1212 1213 CeedScalar *B_in, *B_out; 1214 B_in = asmb->d_B_in; 1215 B_out = asmb->d_B_out; 1216 const CeedInt block_size_x = asmb->block_size_x; 1217 const CeedInt block_size_y = asmb->block_size_y; 1218 1219 sycl::range<3> kernel_range(num_elem, block_size_y, block_size_x); 1220 1221 std::vector<sycl::event> e; 1222 1223 if (!sycl_queue.is_in_order()) e = {sycl_queue.ext_oneapi_submit_barrier()}; 1224 sycl_queue.parallel_for<CeedOperatorSyclLinearAssemble>(kernel_range, e, [=](sycl::id<3> idx) { 1225 const int e = idx.get(0); // Element index 1226 const int l = idx.get(1); // The output column index of each B^TDB operation 1227 const int i = idx.get(2); // The output row index of each B^TDB operation 1228 // such that we have (Bout^T)_ij D_jk Bin_kl = C_il 1229 for (CeedSize comp_in = 0; comp_in < num_comp; comp_in++) { 1230 for (CeedSize comp_out = 0; comp_out < num_comp; comp_out++) { 1231 CeedScalar result = 0.0; 1232 CeedSize qf_index_comp = q_comp_in_stride * comp_in + q_comp_out_stride * comp_out + q_e_stride * e; 1233 1234 for (CeedSize eval_mode_in = 0; eval_mode_in < num_eval_mode_in; eval_mode_in++) { 1235 CeedSize b_in_index = eval_mode_in * num_qpts * num_nodes; 1236 1237 for (CeedSize eval_mode_out = 0; eval_mode_out < num_eval_mode_out; eval_mode_out++) { 1238 CeedSize b_out_index = eval_mode_out * num_qpts * num_nodes; 1239 CeedSize qf_index = qf_index_comp + q_eval_mode_out_stride * eval_mode_out + q_eval_mode_in_stride * eval_mode_in; 1240 1241 // Perform the B^T D B operation for this 'chunk' of D (the qf_array) 1242 for (CeedSize j = 0; j < num_qpts; j++) { 1243 result += B_out[b_out_index + j * num_nodes + i] * qf_array[qf_index + j] * B_in[b_in_index + j * num_nodes + l]; 1244 } 1245 } // end of eval_mode_out 1246 } // end of eval_mode_in 1247 CeedSize val_index = comp_in_stride * comp_in + comp_out_stride * comp_out + e_stride * e + num_nodes * i + l; 1248 1249 values_array[val_index] = result; 1250 } // end of out component 1251 } // end of in component 1252 }); 1253 return CEED_ERROR_SUCCESS; 1254 } 1255 1256 //------------------------------------------------------------------------------ 1257 // Fallback kernel for larger orders (1D thread block) 1258 //------------------------------------------------------------------------------ 1259 /* 1260 static int CeedOperatorLinearAssembleFallback_Sycl(sycl::queue &sycl_queue, const CeedOperator_Sycl *impl, const CeedScalar *qf_array, 1261 CeedScalar *values_array) { 1262 // This kernel assumes B_in and B_out have the same number of quadrature points and basis points. 1263 // TODO: expand to more general cases 1264 CeedOperatorAssemble_Sycl *asmb = impl->asmb; 1265 const CeedInt num_elem = asmb->num_elem; 1266 const CeedInt num_nodes = asmb->num_nodes; 1267 const CeedInt num_comp = asmb->num_comp; 1268 const CeedInt num_qpts = asmb->num_qpts; 1269 const CeedInt num_eval_mode_in = asmb->num_eval_mode_in; 1270 const CeedInt num_eval_mode_out = asmb->num_eval_mode_out; 1271 1272 // Strides for final output ordering, determined by the reference (interface) implementation of the symbolic assembly, slowest --> fastest: elememt, 1273 // comp_in, comp_out, node_row, node_col 1274 const CeedInt comp_out_stride = num_nodes * num_nodes; 1275 const CeedInt comp_in_stride = comp_out_stride * num_comp; 1276 const CeedInt e_stride = comp_in_stride * num_comp; 1277 // Strides for QF array, slowest --> fastest: eval_mode_in, comp_in, eval_mode_out, comp_out, elem, qpt 1278 const CeedInt q_e_stride = num_qpts; 1279 const CeedInt q_comp_out_stride = num_elem * q_e_stride; 1280 const CeedInt q_eval_mode_out_stride = q_comp_out_stride * num_comp; 1281 const CeedInt q_comp_in_stride = q_eval_mode_out_stride * num_eval_mode_out; 1282 const CeedInt q_eval_mode_in_stride = q_comp_in_stride * num_comp; 1283 1284 CeedScalar *B_in, *B_out; 1285 B_in = asmb->d_B_in; 1286 B_out = asmb->d_B_out; 1287 const CeedInt elems_per_block = asmb->elems_per_block; 1288 const CeedInt block_size_x = asmb->block_size_x; 1289 const CeedInt block_size_y = asmb->block_size_y; // This will be 1 for the fallback kernel 1290 1291 const CeedInt grid = num_elem / elems_per_block + ((num_elem / elems_per_block * elems_per_block < num_elem) ? 1 : 0); 1292 sycl::range<3> local_range(block_size_x, block_size_y, elems_per_block); 1293 sycl::range<3> global_range(grid * block_size_x, block_size_y, elems_per_block); 1294 sycl::nd_range<3> kernel_range(global_range, local_range); 1295 1296 sycl_queue.parallel_for<CeedOperatorSyclLinearAssembleFallback>(kernel_range, [=](sycl::nd_item<3> work_item) { 1297 const CeedInt blockIdx = work_item.get_group(0); 1298 const CeedInt gridDimx = work_item.get_group_range(0); 1299 const CeedInt threadIdx = work_item.get_local_id(0); 1300 const CeedInt threadIdz = work_item.get_local_id(2); 1301 const CeedInt blockDimz = work_item.get_local_range(2); 1302 1303 const int l = threadIdx; // The output column index of each B^TDB operation 1304 // such that we have (Bout^T)_ij D_jk Bin_kl = C_il 1305 for (CeedInt e = blockIdx * blockDimz + threadIdz; e < num_elem; e += gridDimx * blockDimz) { 1306 for (CeedInt comp_in = 0; comp_in < num_comp; comp_in++) { 1307 for (CeedInt comp_out = 0; comp_out < num_comp; comp_out++) { 1308 for (CeedInt i = 0; i < num_nodes; i++) { 1309 CeedScalar result = 0.0; 1310 CeedInt qf_index_comp = q_comp_in_stride * comp_in + q_comp_out_stride * comp_out + q_e_stride * e; 1311 for (CeedInt eval_mode_in = 0; eval_mode_in < num_eval_mode_in; eval_mode_in++) { 1312 CeedInt b_in_index = eval_mode_in * num_qpts * num_nodes; 1313 for (CeedInt eval_mode_out = 0; eval_mode_out < num_eval_mode_out; eval_mode_out++) { 1314 CeedInt b_out_index = eval_mode_out * num_qpts * num_nodes; 1315 CeedInt qf_index = qf_index_comp + q_eval_mode_out_stride * eval_mode_out + q_eval_mode_in_stride * eval_mode_in; 1316 // Perform the B^T D B operation for this 'chunk' of D (the qf_array) 1317 for (CeedInt j = 0; j < num_qpts; j++) { 1318 result += B_out[b_out_index + j * num_nodes + i] * qf_array[qf_index + j] * B_in[b_in_index + j * num_nodes + l]; 1319 } 1320 } // end of eval_mode_out 1321 } // end of eval_mode_in 1322 CeedInt val_index = comp_in_stride * comp_in + comp_out_stride * comp_out + e_stride * e + num_nodes * i + l; 1323 values_array[val_index] = result; 1324 } // end of loop over element node index, i 1325 } // end of out component 1326 } // end of in component 1327 } // end of element loop 1328 }); 1329 return CEED_ERROR_SUCCESS; 1330 }*/ 1331 1332 //------------------------------------------------------------------------------ 1333 // Assemble matrix data for COO matrix of assembled operator. 1334 // The sparsity pattern is set by CeedOperatorLinearAssembleSymbolic. 1335 // 1336 // Note that this (and other assembly routines) currently assume only one active 1337 // input restriction/basis per operator (could have multiple basis eval modes). 1338 // TODO: allow multiple active input restrictions/basis objects 1339 //------------------------------------------------------------------------------ 1340 static int CeedOperatorAssembleSingle_Sycl(CeedOperator op, CeedInt offset, CeedVector values) { 1341 Ceed ceed; 1342 Ceed_Sycl *sycl_data; 1343 CeedScalar *values_array; 1344 const CeedScalar *qf_array; 1345 CeedVector assembled_qf = NULL; 1346 CeedElemRestriction rstr_q = NULL; 1347 CeedOperator_Sycl *impl; 1348 1349 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 1350 CeedCallBackend(CeedGetData(ceed, &sycl_data)); 1351 CeedCallBackend(CeedDestroy(&ceed)); 1352 CeedCallBackend(CeedOperatorGetData(op, &impl)); 1353 1354 // Setup 1355 if (!impl->asmb) { 1356 CeedCallBackend(CeedOperatorAssembleSingleSetup_Sycl(op)); 1357 assert(impl->asmb != NULL); 1358 } 1359 1360 // Assemble QFunction 1361 CeedCallBackend(CeedOperatorLinearAssembleQFunctionBuildOrUpdate(op, &assembled_qf, &rstr_q, CEED_REQUEST_IMMEDIATE)); 1362 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_q)); 1363 CeedCallBackend(CeedVectorGetArrayWrite(values, CEED_MEM_DEVICE, &values_array)); 1364 values_array += offset; 1365 CeedCallBackend(CeedVectorGetArrayRead(assembled_qf, CEED_MEM_DEVICE, &qf_array)); 1366 1367 // Compute B^T D B 1368 CeedCallBackend(CeedOperatorLinearAssemble_Sycl(sycl_data->sycl_queue, impl, qf_array, values_array)); 1369 1370 // Wait for kernels to be completed 1371 // Kris: Review if this is necessary -- enqueing an async barrier may be sufficient 1372 sycl_data->sycl_queue.wait_and_throw(); 1373 1374 // Restore arrays 1375 CeedCallBackend(CeedVectorRestoreArray(values, &values_array)); 1376 CeedCallBackend(CeedVectorRestoreArrayRead(assembled_qf, &qf_array)); 1377 1378 // Cleanup 1379 CeedCallBackend(CeedVectorDestroy(&assembled_qf)); 1380 return CEED_ERROR_SUCCESS; 1381 } 1382 1383 //------------------------------------------------------------------------------ 1384 // Create operator 1385 //------------------------------------------------------------------------------ 1386 int CeedOperatorCreate_Sycl(CeedOperator op) { 1387 Ceed ceed; 1388 CeedOperator_Sycl *impl; 1389 1390 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 1391 1392 CeedCallBackend(CeedCalloc(1, &impl)); 1393 CeedCallBackend(CeedOperatorSetData(op, impl)); 1394 1395 CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Operator", op, "LinearAssembleQFunction", CeedOperatorLinearAssembleQFunction_Sycl)); 1396 CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Operator", op, "LinearAssembleQFunctionUpdate", CeedOperatorLinearAssembleQFunctionUpdate_Sycl)); 1397 CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Operator", op, "LinearAssembleAddDiagonal", CeedOperatorLinearAssembleAddDiagonal_Sycl)); 1398 CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Operator", op, "LinearAssembleAddPointBlockDiagonal", 1399 CeedOperatorLinearAssembleAddPointBlockDiagonal_Sycl)); 1400 CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Operator", op, "LinearAssembleSingle", CeedOperatorAssembleSingle_Sycl)); 1401 CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Operator", op, "ApplyAdd", CeedOperatorApplyAdd_Sycl)); 1402 CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Operator", op, "Destroy", CeedOperatorDestroy_Sycl)); 1403 CeedCallBackend(CeedDestroy(&ceed)); 1404 return CEED_ERROR_SUCCESS; 1405 } 1406 1407 //------------------------------------------------------------------------------ 1408