1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other 2 // CEED contributors. All Rights Reserved. See the top-level LICENSE and NOTICE 3 // files for details. 4 // 5 // SPDX-License-Identifier: BSD-2-Clause 6 // 7 // This file is part of CEED: http://github.com/ceed 8 9 #include <ceed/backend.h> 10 #include <ceed/ceed.h> 11 12 #include <cassert> 13 #include <string> 14 #include <sycl/sycl.hpp> 15 16 #include "../sycl/ceed-sycl-compile.hpp" 17 #include "ceed-sycl-ref.hpp" 18 19 class CeedOperatorSyclLinearDiagonal; 20 class CeedOperatorSyclLinearAssemble; 21 class CeedOperatorSyclLinearAssembleFallback; 22 23 //------------------------------------------------------------------------------ 24 // Get Basis Emode Pointer 25 //------------------------------------------------------------------------------ 26 void CeedOperatorGetBasisPointer_Sycl(const CeedScalar **basis_ptr, CeedEvalMode e_mode, const CeedScalar *identity, const CeedScalar *interp, 27 const CeedScalar *grad) { 28 switch (e_mode) { 29 case CEED_EVAL_NONE: 30 *basis_ptr = identity; 31 break; 32 case CEED_EVAL_INTERP: 33 *basis_ptr = interp; 34 break; 35 case CEED_EVAL_GRAD: 36 *basis_ptr = grad; 37 break; 38 case CEED_EVAL_WEIGHT: 39 case CEED_EVAL_DIV: 40 case CEED_EVAL_CURL: 41 break; // Caught by QF Assembly 42 } 43 } 44 45 //------------------------------------------------------------------------------ 46 // Destroy operator 47 //------------------------------------------------------------------------------ 48 static int CeedOperatorDestroy_Sycl(CeedOperator op) { 49 Ceed ceed; 50 Ceed_Sycl *sycl_data; 51 CeedOperator_Sycl *impl; 52 53 CeedCallBackend(CeedOperatorGetData(op, &impl)); 54 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 55 CeedCallBackend(CeedGetData(ceed, &sycl_data)); 56 57 // Apply data 58 for (CeedInt i = 0; i < impl->num_e_in + impl->num_e_out; i++) { 59 CeedCallBackend(CeedVectorDestroy(&impl->e_vecs[i])); 60 } 61 CeedCallBackend(CeedFree(&impl->e_vecs)); 62 63 for (CeedInt i = 0; i < impl->num_e_in; i++) { 64 CeedCallBackend(CeedVectorDestroy(&impl->q_vecs_in[i])); 65 } 66 CeedCallBackend(CeedFree(&impl->q_vecs_in)); 67 68 for (CeedInt i = 0; i < impl->num_e_out; i++) { 69 CeedCallBackend(CeedVectorDestroy(&impl->q_vecs_out[i])); 70 } 71 CeedCallBackend(CeedFree(&impl->q_vecs_out)); 72 73 // QFunction assembly data 74 for (CeedInt i = 0; i < impl->num_active_in; i++) { 75 CeedCallBackend(CeedVectorDestroy(&impl->qf_active_in[i])); 76 } 77 CeedCallBackend(CeedFree(&impl->qf_active_in)); 78 79 // Diag data 80 if (impl->diag) { 81 CeedCallBackend(CeedFree(&impl->diag->h_e_mode_in)); 82 CeedCallBackend(CeedFree(&impl->diag->h_e_mode_out)); 83 84 CeedCallSycl(ceed, sycl_data->sycl_queue.wait_and_throw()); 85 CeedCallSycl(ceed, sycl::free(impl->diag->d_e_mode_in, sycl_data->sycl_context)); 86 CeedCallSycl(ceed, sycl::free(impl->diag->d_e_mode_out, sycl_data->sycl_context)); 87 CeedCallSycl(ceed, sycl::free(impl->diag->d_identity, sycl_data->sycl_context)); 88 CeedCallSycl(ceed, sycl::free(impl->diag->d_interp_in, sycl_data->sycl_context)); 89 CeedCallSycl(ceed, sycl::free(impl->diag->d_interp_out, sycl_data->sycl_context)); 90 CeedCallSycl(ceed, sycl::free(impl->diag->d_grad_in, sycl_data->sycl_context)); 91 CeedCallSycl(ceed, sycl::free(impl->diag->d_grad_out, sycl_data->sycl_context)); 92 CeedCallBackend(CeedElemRestrictionDestroy(&impl->diag->point_block_diag_rstr)); 93 94 CeedCallBackend(CeedVectorDestroy(&impl->diag->elem_diag)); 95 CeedCallBackend(CeedVectorDestroy(&impl->diag->point_block_elem_diag)); 96 } 97 CeedCallBackend(CeedFree(&impl->diag)); 98 99 if (impl->asmb) { 100 CeedCallSycl(ceed, sycl_data->sycl_queue.wait_and_throw()); 101 CeedCallSycl(ceed, sycl::free(impl->asmb->d_B_in, sycl_data->sycl_context)); 102 CeedCallSycl(ceed, sycl::free(impl->asmb->d_B_out, sycl_data->sycl_context)); 103 } 104 CeedCallBackend(CeedFree(&impl->asmb)); 105 106 CeedCallBackend(CeedFree(&impl)); 107 return CEED_ERROR_SUCCESS; 108 } 109 110 //------------------------------------------------------------------------------ 111 // Setup infields or outfields 112 //------------------------------------------------------------------------------ 113 static int CeedOperatorSetupFields_Sycl(CeedQFunction qf, CeedOperator op, bool is_input, CeedVector *e_vecs, CeedVector *q_vecs, CeedInt start_e, 114 CeedInt num_fields, CeedInt Q, CeedInt num_elem) { 115 Ceed ceed; 116 CeedSize q_size; 117 bool is_strided, skip_restriction; 118 CeedInt dim, size; 119 CeedOperatorField *op_fields; 120 CeedQFunctionField *qf_fields; 121 122 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 123 if (is_input) { 124 CeedCallBackend(CeedOperatorGetFields(op, NULL, &op_fields, NULL, NULL)); 125 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_fields, NULL, NULL)); 126 } else { 127 CeedCallBackend(CeedOperatorGetFields(op, NULL, NULL, NULL, &op_fields)); 128 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, NULL, NULL, &qf_fields)); 129 } 130 131 // Loop over fields 132 for (CeedInt i = 0; i < num_fields; i++) { 133 CeedEvalMode e_mode; 134 CeedVector vec; 135 CeedElemRestriction rstr; 136 CeedBasis basis; 137 138 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_fields[i], &e_mode)); 139 140 is_strided = false; 141 skip_restriction = false; 142 if (e_mode != CEED_EVAL_WEIGHT) { 143 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_fields[i], &rstr)); 144 145 // Check whether this field can skip the element restriction: 146 // must be passive input, with e_mode NONE, and have a strided restriction with CEED_STRIDES_BACKEND. 147 148 // First, check whether the field is input or output: 149 if (is_input) { 150 // Check for passive input: 151 CeedCallBackend(CeedOperatorFieldGetVector(op_fields[i], &vec)); 152 if (vec != CEED_VECTOR_ACTIVE) { 153 // Check e_mode 154 if (e_mode == CEED_EVAL_NONE) { 155 // Check for is_strided restriction 156 CeedCallBackend(CeedElemRestrictionIsStrided(rstr, &is_strided)); 157 if (is_strided) { 158 // Check if vector is already in preferred backend ordering 159 CeedCallBackend(CeedElemRestrictionHasBackendStrides(rstr, &skip_restriction)); 160 } 161 } 162 } 163 } 164 if (skip_restriction) { 165 // We do not need an E-Vector, but will use the input field vector's data directly in the operator application 166 e_vecs[i + start_e] = NULL; 167 } else { 168 CeedCallBackend(CeedElemRestrictionCreateVector(rstr, NULL, &e_vecs[i + start_e])); 169 } 170 } 171 172 switch (e_mode) { 173 case CEED_EVAL_NONE: 174 CeedCallBackend(CeedQFunctionFieldGetSize(qf_fields[i], &size)); 175 q_size = (CeedSize)num_elem * Q * size; 176 CeedCallBackend(CeedVectorCreate(ceed, q_size, &q_vecs[i])); 177 break; 178 case CEED_EVAL_INTERP: 179 CeedCallBackend(CeedQFunctionFieldGetSize(qf_fields[i], &size)); 180 q_size = (CeedSize)num_elem * Q * size; 181 CeedCallBackend(CeedVectorCreate(ceed, q_size, &q_vecs[i])); 182 break; 183 case CEED_EVAL_GRAD: 184 CeedCallBackend(CeedOperatorFieldGetBasis(op_fields[i], &basis)); 185 CeedCallBackend(CeedQFunctionFieldGetSize(qf_fields[i], &size)); 186 CeedCallBackend(CeedBasisGetDimension(basis, &dim)); 187 q_size = (CeedSize)num_elem * Q * size; 188 CeedCallBackend(CeedVectorCreate(ceed, q_size, &q_vecs[i])); 189 break; 190 case CEED_EVAL_WEIGHT: // Only on input fields 191 CeedCallBackend(CeedOperatorFieldGetBasis(op_fields[i], &basis)); 192 q_size = (CeedSize)num_elem * Q; 193 CeedCallBackend(CeedVectorCreate(ceed, q_size, &q_vecs[i])); 194 CeedCallBackend(CeedBasisApply(basis, num_elem, CEED_NOTRANSPOSE, CEED_EVAL_WEIGHT, NULL, q_vecs[i])); 195 break; 196 case CEED_EVAL_DIV: 197 break; // TODO: Not implemented 198 case CEED_EVAL_CURL: 199 break; // TODO: Not implemented 200 } 201 } 202 return CEED_ERROR_SUCCESS; 203 } 204 205 //------------------------------------------------------------------------------ 206 // CeedOperator needs to connect all the named fields (be they active or 207 // passive) to the named inputs and outputs of its CeedQFunction. 208 //------------------------------------------------------------------------------ 209 static int CeedOperatorSetup_Sycl(CeedOperator op) { 210 Ceed ceed; 211 bool is_setup_done; 212 CeedInt Q, num_elem, num_input_fields, num_output_fields; 213 CeedQFunctionField *qf_input_fields, *qf_output_fields; 214 CeedQFunction qf; 215 CeedOperatorField *op_input_fields, *op_output_fields; 216 CeedOperator_Sycl *impl; 217 218 CeedCallBackend(CeedOperatorIsSetupDone(op, &is_setup_done)); 219 if (is_setup_done) return CEED_ERROR_SUCCESS; 220 221 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 222 CeedCallBackend(CeedOperatorGetData(op, &impl)); 223 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 224 CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q)); 225 CeedCallBackend(CeedOperatorGetNumElements(op, &num_elem)); 226 CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields)); 227 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields)); 228 229 // Allocate 230 CeedCallBackend(CeedCalloc(num_input_fields + num_output_fields, &impl->e_vecs)); 231 232 CeedCallBackend(CeedCalloc(CEED_FIELD_MAX, &impl->q_vecs_in)); 233 CeedCallBackend(CeedCalloc(CEED_FIELD_MAX, &impl->q_vecs_out)); 234 235 impl->num_e_in = num_input_fields; 236 impl->num_e_out = num_output_fields; 237 238 // Set up infield and outfield e_vecs and q_vecs 239 // Infields 240 CeedCallBackend(CeedOperatorSetupFields_Sycl(qf, op, true, impl->e_vecs, impl->q_vecs_in, 0, num_input_fields, Q, num_elem)); 241 // Outfields 242 CeedCallBackend(CeedOperatorSetupFields_Sycl(qf, op, false, impl->e_vecs, impl->q_vecs_out, num_input_fields, num_output_fields, Q, num_elem)); 243 244 CeedCallBackend(CeedOperatorSetSetupDone(op)); 245 return CEED_ERROR_SUCCESS; 246 } 247 248 //------------------------------------------------------------------------------ 249 // Setup Operator Inputs 250 //------------------------------------------------------------------------------ 251 static inline int CeedOperatorSetupInputs_Sycl(CeedInt num_input_fields, CeedQFunctionField *qf_input_fields, CeedOperatorField *op_input_fields, 252 CeedVector in_vec, const bool skip_active, CeedScalar *e_data[2 * CEED_FIELD_MAX], 253 CeedOperator_Sycl *impl, CeedRequest *request) { 254 for (CeedInt i = 0; i < num_input_fields; i++) { 255 CeedEvalMode e_mode; 256 CeedVector vec; 257 CeedElemRestriction rstr; 258 259 // Get input vector 260 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec)); 261 if (vec == CEED_VECTOR_ACTIVE) { 262 if (skip_active) continue; 263 else vec = in_vec; 264 } 265 266 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &e_mode)); 267 if (e_mode == CEED_EVAL_WEIGHT) { // Skip 268 } else { 269 // Get input vector 270 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec)); 271 // Get input element restriction 272 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &rstr)); 273 if (vec == CEED_VECTOR_ACTIVE) vec = in_vec; 274 // Restrict, if necessary 275 if (!impl->e_vecs[i]) { 276 // No restriction for this field; read data directly from vec. 277 CeedCallBackend(CeedVectorGetArrayRead(vec, CEED_MEM_DEVICE, (const CeedScalar **)&e_data[i])); 278 } else { 279 CeedCallBackend(CeedElemRestrictionApply(rstr, CEED_NOTRANSPOSE, vec, impl->e_vecs[i], request)); 280 // Get evec 281 CeedCallBackend(CeedVectorGetArrayRead(impl->e_vecs[i], CEED_MEM_DEVICE, (const CeedScalar **)&e_data[i])); 282 } 283 } 284 } 285 return CEED_ERROR_SUCCESS; 286 } 287 288 //------------------------------------------------------------------------------ 289 // Input Basis Action 290 //------------------------------------------------------------------------------ 291 static inline int CeedOperatorInputBasis_Sycl(CeedInt num_elem, CeedQFunctionField *qf_input_fields, CeedOperatorField *op_input_fields, 292 CeedInt num_input_fields, const bool skip_active, CeedScalar *e_data[2 * CEED_FIELD_MAX], 293 CeedOperator_Sycl *impl) { 294 for (CeedInt i = 0; i < num_input_fields; i++) { 295 CeedInt elem_size, size; 296 CeedElemRestriction rstr; 297 CeedEvalMode e_mode; 298 CeedBasis basis; 299 300 // Skip active input 301 if (skip_active) { 302 CeedVector vec; 303 304 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec)); 305 if (vec == CEED_VECTOR_ACTIVE) continue; 306 } 307 // Get elem_size, e_mode, size 308 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_input_fields[i], &rstr)); 309 CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size)); 310 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &e_mode)); 311 CeedCallBackend(CeedQFunctionFieldGetSize(qf_input_fields[i], &size)); 312 // Basis action 313 switch (e_mode) { 314 case CEED_EVAL_NONE: 315 CeedCallBackend(CeedVectorSetArray(impl->q_vecs_in[i], CEED_MEM_DEVICE, CEED_USE_POINTER, e_data[i])); 316 break; 317 case CEED_EVAL_INTERP: 318 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 319 CeedCallBackend(CeedBasisApply(basis, num_elem, CEED_NOTRANSPOSE, CEED_EVAL_INTERP, impl->e_vecs[i], impl->q_vecs_in[i])); 320 break; 321 case CEED_EVAL_GRAD: 322 CeedCallBackend(CeedOperatorFieldGetBasis(op_input_fields[i], &basis)); 323 CeedCallBackend(CeedBasisApply(basis, num_elem, CEED_NOTRANSPOSE, CEED_EVAL_GRAD, impl->e_vecs[i], impl->q_vecs_in[i])); 324 break; 325 case CEED_EVAL_WEIGHT: 326 break; // No action 327 case CEED_EVAL_DIV: 328 break; // TODO: Not implemented 329 case CEED_EVAL_CURL: 330 break; // TODO: Not implemented 331 } 332 } 333 return CEED_ERROR_SUCCESS; 334 } 335 336 //------------------------------------------------------------------------------ 337 // Restore Input Vectors 338 //------------------------------------------------------------------------------ 339 static inline int CeedOperatorRestoreInputs_Sycl(CeedInt num_input_fields, CeedQFunctionField *qf_input_fields, CeedOperatorField *op_input_fields, 340 const bool skip_active, CeedScalar *e_data[2 * CEED_FIELD_MAX], CeedOperator_Sycl *impl) { 341 for (CeedInt i = 0; i < num_input_fields; i++) { 342 CeedEvalMode e_mode; 343 CeedVector vec; 344 345 // Skip active input 346 if (skip_active) { 347 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec)); 348 if (vec == CEED_VECTOR_ACTIVE) continue; 349 } 350 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &e_mode)); 351 if (e_mode == CEED_EVAL_WEIGHT) { // Skip 352 } else { 353 if (!impl->e_vecs[i]) { // This was a skip_restriction case 354 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec)); 355 CeedCallBackend(CeedVectorRestoreArrayRead(vec, (const CeedScalar **)&e_data[i])); 356 } else { 357 CeedCallBackend(CeedVectorRestoreArrayRead(impl->e_vecs[i], (const CeedScalar **)&e_data[i])); 358 } 359 } 360 } 361 return CEED_ERROR_SUCCESS; 362 } 363 364 //------------------------------------------------------------------------------ 365 // Apply and add to output 366 //------------------------------------------------------------------------------ 367 static int CeedOperatorApplyAdd_Sycl(CeedOperator op, CeedVector in_vec, CeedVector out_vec, CeedRequest *request) { 368 CeedInt Q, num_elem, elem_size, num_input_fields, num_output_fields, size; 369 CeedEvalMode e_mode; 370 CeedScalar *e_data[2 * CEED_FIELD_MAX] = {0}; 371 CeedQFunctionField *qf_input_fields, *qf_output_fields; 372 CeedQFunction qf; 373 CeedOperatorField *op_input_fields, *op_output_fields; 374 CeedOperator_Sycl *impl; 375 376 CeedCallBackend(CeedOperatorGetData(op, &impl)); 377 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 378 CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q)); 379 CeedCallBackend(CeedOperatorGetNumElements(op, &num_elem)); 380 CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields)); 381 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields)); 382 383 // Setup 384 CeedCallBackend(CeedOperatorSetup_Sycl(op)); 385 386 // Input Evecs and Restriction 387 CeedCallBackend(CeedOperatorSetupInputs_Sycl(num_input_fields, qf_input_fields, op_input_fields, in_vec, false, e_data, impl, request)); 388 389 // Input basis apply if needed 390 CeedCallBackend(CeedOperatorInputBasis_Sycl(num_elem, qf_input_fields, op_input_fields, num_input_fields, false, e_data, impl)); 391 392 // Output pointers, as necessary 393 for (CeedInt i = 0; i < num_output_fields; i++) { 394 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &e_mode)); 395 if (e_mode == CEED_EVAL_NONE) { 396 // Set the output Q-Vector to use the E-Vector data directly 397 CeedCallBackend(CeedVectorGetArrayWrite(impl->e_vecs[i + impl->num_e_in], CEED_MEM_DEVICE, &e_data[i + num_input_fields])); 398 CeedCallBackend(CeedVectorSetArray(impl->q_vecs_out[i], CEED_MEM_DEVICE, CEED_USE_POINTER, e_data[i + num_input_fields])); 399 } 400 } 401 402 // Q function 403 CeedCallBackend(CeedQFunctionApply(qf, num_elem * Q, impl->q_vecs_in, impl->q_vecs_out)); 404 405 // Output basis apply if needed 406 for (CeedInt i = 0; i < num_output_fields; i++) { 407 CeedElemRestriction rstr; 408 CeedBasis basis; 409 410 // Get elem_size, e_mode, size 411 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &rstr)); 412 CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size)); 413 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &e_mode)); 414 CeedCallBackend(CeedQFunctionFieldGetSize(qf_output_fields[i], &size)); 415 // Basis action 416 switch (e_mode) { 417 case CEED_EVAL_NONE: 418 break; 419 case CEED_EVAL_INTERP: 420 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 421 CeedCallBackend(CeedBasisApply(basis, num_elem, CEED_TRANSPOSE, CEED_EVAL_INTERP, impl->q_vecs_out[i], impl->e_vecs[i + impl->num_e_in])); 422 break; 423 case CEED_EVAL_GRAD: 424 CeedCallBackend(CeedOperatorFieldGetBasis(op_output_fields[i], &basis)); 425 CeedCallBackend(CeedBasisApply(basis, num_elem, CEED_TRANSPOSE, CEED_EVAL_GRAD, impl->q_vecs_out[i], impl->e_vecs[i + impl->num_e_in])); 426 break; 427 // LCOV_EXCL_START 428 case CEED_EVAL_WEIGHT: 429 Ceed ceed; 430 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 431 return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_WEIGHT cannot be an output evaluation mode"); 432 break; // Should not occur 433 case CEED_EVAL_DIV: 434 break; // TODO: Not implemented 435 case CEED_EVAL_CURL: 436 break; // TODO: Not implemented 437 // LCOV_EXCL_STOP 438 } 439 } 440 441 // Output restriction 442 for (CeedInt i = 0; i < num_output_fields; i++) { 443 CeedVector vec; 444 CeedElemRestriction rstr; 445 446 // Restore evec 447 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &e_mode)); 448 if (e_mode == CEED_EVAL_NONE) { 449 CeedCallBackend(CeedVectorRestoreArray(impl->e_vecs[i + impl->num_e_in], &e_data[i + num_input_fields])); 450 } 451 // Get output vector 452 CeedCallBackend(CeedOperatorFieldGetVector(op_output_fields[i], &vec)); 453 // Restrict 454 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_output_fields[i], &rstr)); 455 // Active 456 if (vec == CEED_VECTOR_ACTIVE) vec = out_vec; 457 458 CeedCallBackend(CeedElemRestrictionApply(rstr, CEED_TRANSPOSE, impl->e_vecs[i + impl->num_e_in], vec, request)); 459 } 460 461 // Restore input arrays 462 CeedCallBackend(CeedOperatorRestoreInputs_Sycl(num_input_fields, qf_input_fields, op_input_fields, false, e_data, impl)); 463 return CEED_ERROR_SUCCESS; 464 } 465 466 //------------------------------------------------------------------------------ 467 // Core code for assembling linear QFunction 468 //------------------------------------------------------------------------------ 469 static inline int CeedOperatorLinearAssembleQFunctionCore_Sycl(CeedOperator op, bool build_objects, CeedVector *assembled, CeedElemRestriction *rstr, 470 CeedRequest *request) { 471 Ceed ceed, ceed_parent; 472 CeedSize q_size; 473 CeedInt num_active_in, num_active_out, Q, num_elem, num_input_fields, num_output_fields, size; 474 CeedScalar *assembled_array, *e_data[2 * CEED_FIELD_MAX] = {NULL}; 475 CeedVector *active_in; 476 CeedQFunctionField *qf_input_fields, *qf_output_fields; 477 CeedQFunction qf; 478 CeedOperatorField *op_input_fields, *op_output_fields; 479 CeedOperator_Sycl *impl; 480 481 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 482 CeedCallBackend(CeedOperatorGetFallbackParentCeed(op, &ceed_parent)); 483 CeedCallBackend(CeedOperatorGetData(op, &impl)); 484 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 485 CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q)); 486 CeedCallBackend(CeedOperatorGetNumElements(op, &num_elem)); 487 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields)); 488 CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields)); 489 active_in = impl->qf_active_in; 490 num_active_in = impl->num_active_in, num_active_out = impl->num_active_out; 491 492 // Setup 493 CeedCallBackend(CeedOperatorSetup_Sycl(op)); 494 495 // Check for identity 496 bool is_identity_qf; 497 CeedCallBackend(CeedQFunctionIsIdentity(qf, &is_identity_qf)); 498 if (is_identity_qf) { 499 // LCOV_EXCL_START 500 return CeedError(ceed, CEED_ERROR_BACKEND, "Assembling identity QFunctions not supported"); 501 // LCOV_EXCL_STOP 502 } 503 504 // Input Evecs and Restriction 505 CeedCallBackend(CeedOperatorSetupInputs_Sycl(num_input_fields, qf_input_fields, op_input_fields, NULL, true, e_data, impl, request)); 506 507 // Count number of active input fields 508 if (!num_active_in) { 509 for (CeedInt i = 0; i < num_input_fields; i++) { 510 CeedScalar *q_vec_array; 511 CeedVector vec; 512 513 // Get input vector 514 CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec)); 515 // Check if active input 516 if (vec == CEED_VECTOR_ACTIVE) { 517 CeedCallBackend(CeedQFunctionFieldGetSize(qf_input_fields[i], &size)); 518 CeedCallBackend(CeedVectorSetValue(impl->q_vecs_in[i], 0.0)); 519 CeedCallBackend(CeedVectorGetArray(impl->q_vecs_in[i], CEED_MEM_DEVICE, &q_vec_array)); 520 CeedCallBackend(CeedRealloc(num_active_in + size, &active_in)); 521 for (CeedInt field = 0; field < size; field++) { 522 q_size = (CeedSize)Q * num_elem; 523 CeedCallBackend(CeedVectorCreate(ceed, q_size, &active_in[num_active_in + field])); 524 CeedCallBackend( 525 CeedVectorSetArray(active_in[num_active_in + field], CEED_MEM_DEVICE, CEED_USE_POINTER, &q_vec_array[field * Q * num_elem])); 526 } 527 num_active_in += size; 528 CeedCallBackend(CeedVectorRestoreArray(impl->q_vecs_in[i], &q_vec_array)); 529 } 530 } 531 impl->num_active_in = num_active_in; 532 impl->qf_active_in = active_in; 533 } 534 535 // Count number of active output fields 536 if (!num_active_out) { 537 for (CeedInt i = 0; i < num_output_fields; i++) { 538 CeedVector vec; 539 540 // Get output vector 541 CeedCallBackend(CeedOperatorFieldGetVector(op_output_fields[i], &vec)); 542 // Check if active output 543 if (vec == CEED_VECTOR_ACTIVE) { 544 CeedCallBackend(CeedQFunctionFieldGetSize(qf_output_fields[i], &size)); 545 num_active_out += size; 546 } 547 } 548 impl->num_active_out = num_active_out; 549 } 550 551 // Check sizes 552 if (!num_active_in || !num_active_out) { 553 // LCOV_EXCL_START 554 return CeedError(ceed, CEED_ERROR_BACKEND, "Cannot assemble QFunction without active inputs and outputs"); 555 // LCOV_EXCL_STOP 556 } 557 558 // Build objects if needed 559 if (build_objects) { 560 CeedSize l_size = (CeedSize)num_elem * Q * num_active_in * num_active_out; 561 CeedInt strides[3] = {1, num_elem * Q, Q}; /* *NOPAD* */ 562 563 // Create output restriction 564 CeedCallBackend(CeedElemRestrictionCreateStrided(ceed_parent, num_elem, Q, num_active_in * num_active_out, 565 num_active_in * num_active_out * num_elem * Q, strides, rstr)); 566 // Create assembled vector 567 CeedCallBackend(CeedVectorCreate(ceed_parent, l_size, assembled)); 568 } 569 CeedCallBackend(CeedVectorSetValue(*assembled, 0.0)); 570 CeedCallBackend(CeedVectorGetArray(*assembled, CEED_MEM_DEVICE, &assembled_array)); 571 572 // Input basis apply 573 CeedCallBackend(CeedOperatorInputBasis_Sycl(num_elem, qf_input_fields, op_input_fields, num_input_fields, true, e_data, impl)); 574 575 // Assemble QFunction 576 for (CeedInt in = 0; in < num_active_in; in++) { 577 // Set Inputs 578 CeedCallBackend(CeedVectorSetValue(active_in[in], 1.0)); 579 if (num_active_in > 1) { 580 CeedCallBackend(CeedVectorSetValue(active_in[(in + num_active_in - 1) % num_active_in], 0.0)); 581 } 582 // Set Outputs 583 for (CeedInt out = 0; out < num_output_fields; out++) { 584 CeedVector vec; 585 586 // Get output vector 587 CeedCallBackend(CeedOperatorFieldGetVector(op_output_fields[out], &vec)); 588 // Check if active output 589 if (vec == CEED_VECTOR_ACTIVE) { 590 CeedCallBackend(CeedVectorSetArray(impl->q_vecs_out[out], CEED_MEM_DEVICE, CEED_USE_POINTER, assembled_array)); 591 CeedCallBackend(CeedQFunctionFieldGetSize(qf_output_fields[out], &size)); 592 assembled_array += size * Q * num_elem; // Advance the pointer by the size of the output 593 } 594 } 595 // Apply QFunction 596 CeedCallBackend(CeedQFunctionApply(qf, Q * num_elem, impl->q_vecs_in, impl->q_vecs_out)); 597 } 598 599 // Un-set output Qvecs to prevent accidental overwrite of Assembled 600 for (CeedInt out = 0; out < num_output_fields; out++) { 601 CeedVector vec; 602 603 // Get output vector 604 CeedCallBackend(CeedOperatorFieldGetVector(op_output_fields[out], &vec)); 605 // Check if active output 606 if (vec == CEED_VECTOR_ACTIVE) { 607 CeedCallBackend(CeedVectorTakeArray(impl->q_vecs_out[out], CEED_MEM_DEVICE, NULL)); 608 } 609 } 610 611 // Restore input arrays 612 CeedCallBackend(CeedOperatorRestoreInputs_Sycl(num_input_fields, qf_input_fields, op_input_fields, true, e_data, impl)); 613 614 // Restore output 615 CeedCallBackend(CeedVectorRestoreArray(*assembled, &assembled_array)); 616 return CEED_ERROR_SUCCESS; 617 } 618 619 //------------------------------------------------------------------------------ 620 // Assemble Linear QFunction 621 //------------------------------------------------------------------------------ 622 static int CeedOperatorLinearAssembleQFunction_Sycl(CeedOperator op, CeedVector *assembled, CeedElemRestriction *rstr, CeedRequest *request) { 623 return CeedOperatorLinearAssembleQFunctionCore_Sycl(op, true, assembled, rstr, request); 624 } 625 626 //------------------------------------------------------------------------------ 627 // Update Assembled Linear QFunction 628 //------------------------------------------------------------------------------ 629 static int CeedOperatorLinearAssembleQFunctionUpdate_Sycl(CeedOperator op, CeedVector assembled, CeedElemRestriction rstr, CeedRequest *request) { 630 return CeedOperatorLinearAssembleQFunctionCore_Sycl(op, false, &assembled, &rstr, request); 631 } 632 633 //------------------------------------------------------------------------------ 634 // Create point block restriction 635 //------------------------------------------------------------------------------ 636 static int CreatePBRestriction(CeedElemRestriction rstr, CeedElemRestriction *point_block_rstr) { 637 Ceed ceed; 638 CeedSize l_size; 639 CeedInt num_elem, num_comp, elem_size, comp_stride, *point_block_offsets; 640 const CeedInt *offsets; 641 642 CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed)); 643 CeedCallBackend(CeedElemRestrictionGetOffsets(rstr, CEED_MEM_HOST, &offsets)); 644 645 // Expand offsets 646 CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &num_elem)); 647 CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr, &num_comp)); 648 CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elem_size)); 649 CeedCallBackend(CeedElemRestrictionGetCompStride(rstr, &comp_stride)); 650 CeedCallBackend(CeedElemRestrictionGetLVectorSize(rstr, &l_size)); 651 CeedInt shift = num_comp; 652 653 if (comp_stride != 1) shift *= num_comp; 654 CeedCallBackend(CeedCalloc(num_elem * elem_size, &point_block_offsets)); 655 for (CeedInt i = 0; i < num_elem * elem_size; i++) { 656 point_block_offsets[i] = offsets[i] * shift; 657 } 658 659 // Create new restriction 660 CeedCallBackend(CeedElemRestrictionCreate(ceed, num_elem, elem_size, num_comp * num_comp, 1, l_size * num_comp, CEED_MEM_HOST, CEED_OWN_POINTER, 661 point_block_offsets, point_block_rstr)); 662 663 // Cleanup 664 CeedCallBackend(CeedElemRestrictionRestoreOffsets(rstr, &offsets)); 665 return CEED_ERROR_SUCCESS; 666 } 667 668 //------------------------------------------------------------------------------ 669 // Assemble diagonal setup 670 //------------------------------------------------------------------------------ 671 static inline int CeedOperatorAssembleDiagonalSetup_Sycl(CeedOperator op, const bool is_point_block) { 672 Ceed ceed; 673 Ceed_Sycl *sycl_data; 674 CeedInt num_input_fields, num_output_fields, num_e_mode_in = 0, num_comp = 0, dim = 1, num_e_mode_out = 0; 675 CeedEvalMode *e_mode_in = NULL, *e_mode_out = NULL; 676 CeedBasis basis_in = NULL, basis_out = NULL; 677 CeedElemRestriction rstr_in = NULL, rstr_out = NULL; 678 CeedQFunctionField *qf_fields; 679 CeedQFunction qf; 680 CeedOperatorField *op_fields; 681 CeedOperator_Sycl *impl; 682 683 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 684 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 685 CeedCallBackend(CeedQFunctionGetNumArgs(qf, &num_input_fields, &num_output_fields)); 686 687 // Determine active input basis 688 CeedCallBackend(CeedOperatorGetFields(op, NULL, &op_fields, NULL, NULL)); 689 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_fields, NULL, NULL)); 690 for (CeedInt i = 0; i < num_input_fields; i++) { 691 CeedVector vec; 692 693 CeedCallBackend(CeedOperatorFieldGetVector(op_fields[i], &vec)); 694 if (vec == CEED_VECTOR_ACTIVE) { 695 CeedEvalMode e_mode; 696 CeedElemRestriction rstr; 697 698 CeedCallBackend(CeedOperatorFieldGetBasis(op_fields[i], &basis_in)); 699 CeedCallBackend(CeedBasisGetNumComponents(basis_in, &num_comp)); 700 CeedCallBackend(CeedBasisGetDimension(basis_in, &dim)); 701 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_fields[i], &rstr)); 702 if (rstr_in && rstr_in != rstr) { 703 // LCOV_EXCL_START 704 return CeedError(ceed, CEED_ERROR_BACKEND, "Backend does not implement multi-field non-composite operator diagonal assembly"); 705 // LCOV_EXCL_STOP 706 } 707 rstr_in = rstr; 708 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_fields[i], &e_mode)); 709 switch (e_mode) { 710 case CEED_EVAL_NONE: 711 case CEED_EVAL_INTERP: 712 CeedCallBackend(CeedRealloc(num_e_mode_in + 1, &e_mode_in)); 713 e_mode_in[num_e_mode_in] = e_mode; 714 num_e_mode_in += 1; 715 break; 716 case CEED_EVAL_GRAD: 717 CeedCallBackend(CeedRealloc(num_e_mode_in + dim, &e_mode_in)); 718 for (CeedInt d = 0; d < dim; d++) e_mode_in[num_e_mode_in + d] = e_mode; 719 num_e_mode_in += dim; 720 break; 721 case CEED_EVAL_WEIGHT: 722 case CEED_EVAL_DIV: 723 case CEED_EVAL_CURL: 724 break; // Caught by QF Assembly 725 } 726 } 727 } 728 729 // Determine active output basis 730 CeedCallBackend(CeedOperatorGetFields(op, NULL, NULL, NULL, &op_fields)); 731 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, NULL, NULL, &qf_fields)); 732 for (CeedInt i = 0; i < num_output_fields; i++) { 733 CeedVector vec; 734 735 CeedCallBackend(CeedOperatorFieldGetVector(op_fields[i], &vec)); 736 if (vec == CEED_VECTOR_ACTIVE) { 737 CeedEvalMode e_mode; 738 CeedElemRestriction rstr; 739 740 CeedCallBackend(CeedOperatorFieldGetBasis(op_fields[i], &basis_out)); 741 CeedCallBackend(CeedOperatorFieldGetElemRestriction(op_fields[i], &rstr)); 742 if (rstr_out && rstr_out != rstr) { 743 // LCOV_EXCL_START 744 return CeedError(ceed, CEED_ERROR_BACKEND, "Backend does not implement multi-field non-composite operator diagonal assembly"); 745 // LCOV_EXCL_STOP 746 } 747 rstr_out = rstr; 748 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_fields[i], &e_mode)); 749 switch (e_mode) { 750 case CEED_EVAL_NONE: 751 case CEED_EVAL_INTERP: 752 CeedCallBackend(CeedRealloc(num_e_mode_out + 1, &e_mode_out)); 753 e_mode_out[num_e_mode_out] = e_mode; 754 num_e_mode_out += 1; 755 break; 756 case CEED_EVAL_GRAD: 757 CeedCallBackend(CeedRealloc(num_e_mode_out + dim, &e_mode_out)); 758 for (CeedInt d = 0; d < dim; d++) e_mode_out[num_e_mode_out + d] = e_mode; 759 num_e_mode_out += dim; 760 break; 761 case CEED_EVAL_WEIGHT: 762 case CEED_EVAL_DIV: 763 case CEED_EVAL_CURL: 764 break; // Caught by QF Assembly 765 } 766 } 767 } 768 769 // Operator data struct 770 CeedCallBackend(CeedOperatorGetData(op, &impl)); 771 CeedCallBackend(CeedGetData(ceed, &sycl_data)); 772 CeedCallBackend(CeedCalloc(1, &impl->diag)); 773 CeedOperatorDiag_Sycl *diag = impl->diag; 774 775 diag->basis_in = basis_in; 776 diag->basis_out = basis_out; 777 diag->h_e_mode_in = e_mode_in; 778 diag->h_e_mode_out = e_mode_out; 779 diag->num_e_mode_in = num_e_mode_in; 780 diag->num_e_mode_out = num_e_mode_out; 781 782 // Kernel parameters 783 CeedInt num_nodes, num_qpts; 784 CeedCallBackend(CeedBasisGetNumNodes(basis_in, &num_nodes)); 785 CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis_in, &num_qpts)); 786 diag->num_nodes = num_nodes; 787 diag->num_qpts = num_qpts; 788 diag->num_comp = num_comp; 789 790 // Basis matrices 791 const CeedInt i_len = num_qpts * num_nodes; 792 const CeedInt g_len = num_qpts * num_nodes * dim; 793 const CeedScalar *interp_in, *interp_out, *grad_in, *grad_out; 794 795 // CEED_EVAL_NONE 796 CeedScalar *identity = NULL; 797 bool has_eval_none = false; 798 for (CeedInt i = 0; i < num_e_mode_in; i++) has_eval_none = has_eval_none || (e_mode_in[i] == CEED_EVAL_NONE); 799 for (CeedInt i = 0; i < num_e_mode_out; i++) has_eval_none = has_eval_none || (e_mode_out[i] == CEED_EVAL_NONE); 800 801 // Order queue 802 sycl::event e = sycl_data->sycl_queue.ext_oneapi_submit_barrier(); 803 804 std::vector<sycl::event> copy_events; 805 if (has_eval_none) { 806 CeedCallBackend(CeedCalloc(num_qpts * num_nodes, &identity)); 807 for (CeedSize i = 0; i < (num_nodes < num_qpts ? num_nodes : num_qpts); i++) identity[i * num_nodes + i] = 1.0; 808 CeedCallSycl(ceed, diag->d_identity = sycl::malloc_device<CeedScalar>(i_len, sycl_data->sycl_device, sycl_data->sycl_context)); 809 sycl::event identity_copy = sycl_data->sycl_queue.copy<CeedScalar>(identity, diag->d_identity, i_len, {e}); 810 copy_events.push_back(identity_copy); 811 } 812 813 // CEED_EVAL_INTERP 814 CeedCallBackend(CeedBasisGetInterp(basis_in, &interp_in)); 815 CeedCallSycl(ceed, diag->d_interp_in = sycl::malloc_device<CeedScalar>(i_len, sycl_data->sycl_device, sycl_data->sycl_context)); 816 sycl::event interp_in_copy = sycl_data->sycl_queue.copy<CeedScalar>(interp_in, diag->d_interp_in, i_len, {e}); 817 copy_events.push_back(interp_in_copy); 818 819 CeedCallBackend(CeedBasisGetInterp(basis_out, &interp_out)); 820 CeedCallSycl(ceed, diag->d_interp_out = sycl::malloc_device<CeedScalar>(i_len, sycl_data->sycl_device, sycl_data->sycl_context)); 821 sycl::event interp_out_copy = sycl_data->sycl_queue.copy<CeedScalar>(interp_out, diag->d_interp_out, i_len, {e}); 822 copy_events.push_back(interp_out_copy); 823 824 // CEED_EVAL_GRAD 825 CeedCallBackend(CeedBasisGetGrad(basis_in, &grad_in)); 826 CeedCallSycl(ceed, diag->d_grad_in = sycl::malloc_device<CeedScalar>(g_len, sycl_data->sycl_device, sycl_data->sycl_context)); 827 sycl::event grad_in_copy = sycl_data->sycl_queue.copy<CeedScalar>(grad_in, diag->d_grad_in, g_len, {e}); 828 copy_events.push_back(grad_in_copy); 829 830 CeedCallBackend(CeedBasisGetGrad(basis_out, &grad_out)); 831 CeedCallSycl(ceed, diag->d_grad_out = sycl::malloc_device<CeedScalar>(g_len, sycl_data->sycl_device, sycl_data->sycl_context)); 832 sycl::event grad_out_copy = sycl_data->sycl_queue.copy<CeedScalar>(grad_out, diag->d_grad_out, g_len, {e}); 833 copy_events.push_back(grad_out_copy); 834 835 // Arrays of e_modes 836 CeedCallSycl(ceed, diag->d_e_mode_in = sycl::malloc_device<CeedEvalMode>(num_e_mode_in, sycl_data->sycl_device, sycl_data->sycl_context)); 837 sycl::event e_mode_in_copy = sycl_data->sycl_queue.copy<CeedEvalMode>(e_mode_in, diag->d_e_mode_in, num_e_mode_in, {e}); 838 copy_events.push_back(e_mode_in_copy); 839 840 CeedCallSycl(ceed, diag->d_e_mode_out = sycl::malloc_device<CeedEvalMode>(num_e_mode_out, sycl_data->sycl_device, sycl_data->sycl_context)); 841 sycl::event e_mode_out_copy = sycl_data->sycl_queue.copy<CeedEvalMode>(e_mode_out, diag->d_e_mode_out, num_e_mode_out, {e}); 842 copy_events.push_back(e_mode_out_copy); 843 844 // Restriction 845 diag->diag_rstr = rstr_out; 846 847 // Wait for all copies to complete and handle exceptions 848 CeedCallSycl(ceed, sycl::event::wait_and_throw(copy_events)); 849 return CEED_ERROR_SUCCESS; 850 } 851 852 //------------------------------------------------------------------------------ 853 // Kernel for diagonal assembly 854 //------------------------------------------------------------------------------ 855 static int CeedOperatorLinearDiagonal_Sycl(sycl::queue &sycl_queue, const bool is_point_block, const CeedInt num_elem, 856 const CeedOperatorDiag_Sycl *diag, const CeedScalar *assembled_qf_array, CeedScalar *elem_diag_array) { 857 const CeedSize num_nodes = diag->num_nodes; 858 const CeedSize num_qpts = diag->num_qpts; 859 const CeedSize num_comp = diag->num_comp; 860 const CeedSize num_e_mode_in = diag->num_e_mode_in; 861 const CeedSize num_e_mode_out = diag->num_e_mode_out; 862 const CeedScalar *identity = diag->d_identity; 863 const CeedScalar *interp_in = diag->d_interp_in; 864 const CeedScalar *grad_in = diag->d_grad_in; 865 const CeedScalar *interp_out = diag->d_interp_out; 866 const CeedScalar *grad_out = diag->d_grad_out; 867 const CeedEvalMode *e_mode_in = diag->d_e_mode_in; 868 const CeedEvalMode *e_mode_out = diag->d_e_mode_out; 869 870 sycl::range<1> kernel_range(num_elem * num_nodes); 871 872 // Order queue 873 sycl::event e = sycl_queue.ext_oneapi_submit_barrier(); 874 sycl_queue.parallel_for<CeedOperatorSyclLinearDiagonal>(kernel_range, {e}, [=](sycl::id<1> idx) { 875 const CeedInt tid = idx % num_nodes; 876 const CeedInt e = idx / num_nodes; 877 878 // Compute the diagonal of B^T D B 879 // Each element 880 CeedInt d_out = -1; 881 // Each basis eval mode pair 882 for (CeedSize e_out = 0; e_out < num_e_mode_out; e_out++) { 883 const CeedScalar *bt = NULL; 884 885 if (e_mode_out[e_out] == CEED_EVAL_GRAD) ++d_out; 886 CeedOperatorGetBasisPointer_Sycl(&bt, e_mode_out[e_out], identity, interp_out, &grad_out[d_out * num_qpts * num_nodes]); 887 CeedInt d_in = -1; 888 889 for (CeedSize e_in = 0; e_in < num_e_mode_in; e_in++) { 890 const CeedScalar *b = NULL; 891 892 if (e_mode_in[e_in] == CEED_EVAL_GRAD) ++d_in; 893 CeedOperatorGetBasisPointer_Sycl(&b, e_mode_in[e_in], identity, interp_in, &grad_in[d_in * num_qpts * num_nodes]); 894 // Each component 895 for (CeedSize comp_out = 0; comp_out < num_comp; comp_out++) { 896 // Each qpoint/node pair 897 if (is_point_block) { 898 // Point Block Diagonal 899 for (CeedInt comp_in = 0; comp_in < num_comp; comp_in++) { 900 CeedScalar e_value = 0.0; 901 902 for (CeedSize q = 0; q < num_qpts; q++) { 903 const CeedScalar qf_value = 904 assembled_qf_array[((((e_in * num_comp + comp_in) * num_e_mode_out + e_out) * num_comp + comp_out) * num_elem + e) * num_qpts + 905 q]; 906 907 e_value += bt[q * num_nodes + tid] * qf_value * b[q * num_nodes + tid]; 908 } 909 elem_diag_array[((comp_out * num_comp + comp_in) * num_elem + e) * num_nodes + tid] += e_value; 910 } 911 } else { 912 // Diagonal Only 913 CeedScalar e_value = 0.0; 914 915 for (CeedSize q = 0; q < num_qpts; q++) { 916 const CeedScalar qf_value = 917 assembled_qf_array[((((e_in * num_comp + comp_out) * num_e_mode_out + e_out) * num_comp + comp_out) * num_elem + e) * num_qpts + q]; 918 e_value += bt[q * num_nodes + tid] * qf_value * b[q * num_nodes + tid]; 919 } 920 elem_diag_array[(comp_out * num_elem + e) * num_nodes + tid] += e_value; 921 } 922 } 923 } 924 } 925 }); 926 return CEED_ERROR_SUCCESS; 927 } 928 929 //------------------------------------------------------------------------------ 930 // Assemble diagonal common code 931 //------------------------------------------------------------------------------ 932 static inline int CeedOperatorAssembleDiagonalCore_Sycl(CeedOperator op, CeedVector assembled, CeedRequest *request, const bool is_point_block) { 933 Ceed ceed; 934 Ceed_Sycl *sycl_data; 935 CeedInt num_elem; 936 CeedScalar *elem_diag_array; 937 const CeedScalar *assembled_qf_array; 938 CeedVector assembled_qf = NULL; 939 CeedElemRestriction rstr = NULL; 940 CeedOperator_Sycl *impl; 941 942 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 943 CeedCallBackend(CeedOperatorGetData(op, &impl)); 944 CeedCallBackend(CeedGetData(ceed, &sycl_data)); 945 946 // Assemble QFunction 947 CeedCallBackend(CeedOperatorLinearAssembleQFunctionBuildOrUpdate(op, &assembled_qf, &rstr, request)); 948 CeedCallBackend(CeedElemRestrictionDestroy(&rstr)); 949 950 // Setup 951 if (!impl->diag) { 952 CeedCallBackend(CeedOperatorAssembleDiagonalSetup_Sycl(op, is_point_block)); 953 } 954 CeedOperatorDiag_Sycl *diag = impl->diag; 955 956 assert(diag != NULL); 957 958 // Restriction 959 if (is_point_block && !diag->point_block_diag_rstr) { 960 CeedElemRestriction point_block_diag_rstr; 961 CeedCallBackend(CreatePBRestriction(diag->diag_rstr, &point_block_diag_rstr)); 962 diag->point_block_diag_rstr = point_block_diag_rstr; 963 } 964 CeedElemRestriction diag_rstr = is_point_block ? diag->point_block_diag_rstr : diag->diag_rstr; 965 966 // Create diagonal vector 967 CeedVector elem_diag = is_point_block ? diag->point_block_elem_diag : diag->elem_diag; 968 969 if (!elem_diag) { 970 CeedCallBackend(CeedElemRestrictionCreateVector(diag_rstr, NULL, &elem_diag)); 971 if (is_point_block) diag->point_block_elem_diag = elem_diag; 972 else diag->elem_diag = elem_diag; 973 } 974 CeedCallBackend(CeedVectorSetValue(elem_diag, 0.0)); 975 976 // Assemble element operator diagonals 977 CeedCallBackend(CeedVectorGetArray(elem_diag, CEED_MEM_DEVICE, &elem_diag_array)); 978 CeedCallBackend(CeedVectorGetArrayRead(assembled_qf, CEED_MEM_DEVICE, &assembled_qf_array)); 979 CeedCallBackend(CeedElemRestrictionGetNumElements(diag_rstr, &num_elem)); 980 981 // Compute the diagonal of B^T D B 982 // Umesh: This needs to be reviewed later 983 // if (is_point_block) { 984 // CeedCallBackend(CeedOperatorLinearPointBlockDiagonal_Sycl(sycl_data->sycl_queue, num_elem, diag, assembled_qf_array, elem_diag_array)); 985 //} else { 986 CeedCallBackend(CeedOperatorLinearDiagonal_Sycl(sycl_data->sycl_queue, is_point_block, num_elem, diag, assembled_qf_array, elem_diag_array)); 987 // } 988 989 // Wait for queue to complete and handle exceptions 990 sycl_data->sycl_queue.wait_and_throw(); 991 992 // Restore arrays 993 CeedCallBackend(CeedVectorRestoreArray(elem_diag, &elem_diag_array)); 994 CeedCallBackend(CeedVectorRestoreArrayRead(assembled_qf, &assembled_qf_array)); 995 996 // Assemble local operator diagonal 997 CeedCallBackend(CeedElemRestrictionApply(diag_rstr, CEED_TRANSPOSE, elem_diag, assembled, request)); 998 999 // Cleanup 1000 CeedCallBackend(CeedVectorDestroy(&assembled_qf)); 1001 return CEED_ERROR_SUCCESS; 1002 } 1003 1004 //------------------------------------------------------------------------------ 1005 // Assemble Linear Diagonal 1006 //------------------------------------------------------------------------------ 1007 static int CeedOperatorLinearAssembleAddDiagonal_Sycl(CeedOperator op, CeedVector assembled, CeedRequest *request) { 1008 CeedCallBackend(CeedOperatorAssembleDiagonalCore_Sycl(op, assembled, request, false)); 1009 return CEED_ERROR_SUCCESS; 1010 } 1011 1012 //------------------------------------------------------------------------------ 1013 // Assemble Linear Point Block Diagonal 1014 //------------------------------------------------------------------------------ 1015 static int CeedOperatorLinearAssembleAddPointBlockDiagonal_Sycl(CeedOperator op, CeedVector assembled, CeedRequest *request) { 1016 CeedCallBackend(CeedOperatorAssembleDiagonalCore_Sycl(op, assembled, request, true)); 1017 return CEED_ERROR_SUCCESS; 1018 } 1019 1020 //------------------------------------------------------------------------------ 1021 // Single operator assembly setup 1022 //------------------------------------------------------------------------------ 1023 static int CeedSingleOperatorAssembleSetup_Sycl(CeedOperator op) { 1024 Ceed ceed; 1025 CeedInt num_input_fields, num_output_fields, num_e_mode_in = 0, dim = 1, num_B_in_mats_to_load = 0, size_B_in = 0, num_e_mode_out = 0, 1026 num_B_out_mats_to_load = 0, size_B_out = 0, num_qpts = 0, elem_size = 0, num_elem, num_comp, 1027 mat_start = 0; 1028 CeedEvalMode *eval_mode_in = NULL, *eval_mode_out = NULL; 1029 const CeedScalar *interp_in, *grad_in; 1030 CeedElemRestriction rstr_in = NULL, rstr_out = NULL; 1031 CeedBasis basis_in = NULL, basis_out = NULL; 1032 CeedQFunctionField *qf_fields; 1033 CeedQFunction qf; 1034 CeedOperatorField *input_fields, *output_fields; 1035 CeedOperator_Sycl *impl; 1036 1037 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 1038 CeedCallBackend(CeedOperatorGetData(op, &impl)); 1039 1040 // Get input and output fields 1041 CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &input_fields, &num_output_fields, &output_fields)); 1042 1043 // Determine active input basis eval mode 1044 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 1045 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_fields, NULL, NULL)); 1046 // Note that the kernel will treat each dimension of a gradient action separately; 1047 // i.e., when an active input has a CEED_EVAL_GRAD mode, num_ e_mode_in will increment by dim. 1048 // However, for the purposes of load_ing the B matrices, it will be treated as one mode, and we will load/copy the entire gradient matrix at once, 1049 // so num_B_in_mats_to_load will be incremented by 1. 1050 for (CeedInt i = 0; i < num_input_fields; i++) { 1051 CeedEvalMode eval_mode; 1052 CeedVector vec; 1053 1054 CeedCallBackend(CeedOperatorFieldGetVector(input_fields[i], &vec)); 1055 if (vec == CEED_VECTOR_ACTIVE) { 1056 CeedCallBackend(CeedOperatorFieldGetBasis(input_fields[i], &basis_in)); 1057 CeedCallBackend(CeedBasisGetDimension(basis_in, &dim)); 1058 CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis_in, &num_qpts)); 1059 CeedCallBackend(CeedOperatorFieldGetElemRestriction(input_fields[i], &rstr_in)); 1060 CeedCallBackend(CeedElemRestrictionGetElementSize(rstr_in, &elem_size)); 1061 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_fields[i], &eval_mode)); 1062 if (eval_mode != CEED_EVAL_NONE) { 1063 CeedCallBackend(CeedRealloc(num_B_in_mats_to_load + 1, &eval_mode_in)); 1064 eval_mode_in[num_B_in_mats_to_load] = eval_mode; 1065 num_B_in_mats_to_load += 1; 1066 if (eval_mode == CEED_EVAL_GRAD) { 1067 num_e_mode_in += dim; 1068 size_B_in += dim * elem_size * num_qpts; 1069 } else { 1070 num_e_mode_in += 1; 1071 size_B_in += elem_size * num_qpts; 1072 } 1073 } 1074 } 1075 } 1076 1077 // Determine active output basis; basis_out and rstr_out only used if same as input, TODO 1078 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, NULL, NULL, &qf_fields)); 1079 for (CeedInt i = 0; i < num_output_fields; i++) { 1080 CeedEvalMode eval_mode; 1081 CeedVector vec; 1082 1083 CeedCallBackend(CeedOperatorFieldGetVector(output_fields[i], &vec)); 1084 if (vec == CEED_VECTOR_ACTIVE) { 1085 CeedCallBackend(CeedOperatorFieldGetBasis(output_fields[i], &basis_out)); 1086 CeedCallBackend(CeedOperatorFieldGetElemRestriction(output_fields[i], &rstr_out)); 1087 if (rstr_out && rstr_out != rstr_in) { 1088 // LCOV_EXCL_START 1089 return CeedError(ceed, CEED_ERROR_BACKEND, "Backend does not implement multi-field non-composite operator assembly"); 1090 // LCOV_EXCL_STOP 1091 } 1092 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_fields[i], &eval_mode)); 1093 if (eval_mode != CEED_EVAL_NONE) { 1094 CeedCallBackend(CeedRealloc(num_B_out_mats_to_load + 1, &eval_mode_out)); 1095 eval_mode_out[num_B_out_mats_to_load] = eval_mode; 1096 num_B_out_mats_to_load += 1; 1097 if (eval_mode == CEED_EVAL_GRAD) { 1098 num_e_mode_out += dim; 1099 size_B_out += dim * elem_size * num_qpts; 1100 } else { 1101 num_e_mode_out += 1; 1102 size_B_out += elem_size * num_qpts; 1103 } 1104 } 1105 } 1106 } 1107 1108 if (num_e_mode_in == 0 || num_e_mode_out == 0) { 1109 // LCOV_EXCL_START 1110 return CeedError(ceed, CEED_ERROR_UNSUPPORTED, "Cannot assemble operator without inputs/outputs"); 1111 // LCOV_EXCL_STOP 1112 } 1113 1114 CeedCallBackend(CeedElemRestrictionGetNumElements(rstr_in, &num_elem)); 1115 CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr_in, &num_comp)); 1116 1117 CeedCallBackend(CeedCalloc(1, &impl->asmb)); 1118 CeedOperatorAssemble_Sycl *asmb = impl->asmb; 1119 asmb->num_elem = num_elem; 1120 1121 Ceed_Sycl *sycl_data; 1122 CeedCallBackend(CeedGetData(ceed, &sycl_data)); 1123 1124 // Kernel setup 1125 int elem_per_block = 1; 1126 asmb->elem_per_block = elem_per_block; 1127 CeedInt block_size = elem_size * elem_size * elem_per_block; 1128 1129 /* CeedInt maxThreadsPerBlock = sycl_data->sycl_device.get_info<sycl::info::device::max_work_group_size>(); 1130 bool fallback = block_size > maxThreadsPerBlock; 1131 asmb->fallback = fallback; 1132 if (fallback) { 1133 // Use fallback kernel with 1D threadblock 1134 block_size = elem_size * elem_per_block; 1135 asmb->block_size_x = elem_size; 1136 asmb->block_size_y = 1; 1137 } else { // Use kernel with 2D threadblock 1138 asmb->block_size_x = elem_size; 1139 asmb->block_size_y = elem_size; 1140 }*/ 1141 asmb->block_size_x = elem_size; 1142 asmb->block_size_y = elem_size; 1143 asmb->num_e_mode_in = num_e_mode_in; 1144 asmb->num_e_mode_out = num_e_mode_out; 1145 asmb->num_qpts = num_qpts; 1146 asmb->num_nodes = elem_size; 1147 asmb->block_size = block_size; 1148 asmb->num_comp = num_comp; 1149 1150 // Build 'full' B matrices (not 1D arrays used for tensor-product matrices 1151 CeedCallBackend(CeedBasisGetInterp(basis_in, &interp_in)); 1152 CeedCallBackend(CeedBasisGetGrad(basis_in, &grad_in)); 1153 1154 // Load into B_in, in order that they will be used in eval_mode 1155 CeedCallSycl(ceed, asmb->d_B_in = sycl::malloc_device<CeedScalar>(size_B_in, sycl_data->sycl_device, sycl_data->sycl_context)); 1156 for (int i = 0; i < num_B_in_mats_to_load; i++) { 1157 CeedEvalMode eval_mode = eval_mode_in[i]; 1158 1159 if (eval_mode == CEED_EVAL_INTERP) { 1160 // Order queue 1161 sycl::event e = sycl_data->sycl_queue.ext_oneapi_submit_barrier(); 1162 sycl_data->sycl_queue.copy<CeedScalar>(interp_in, &asmb->d_B_in[mat_start], elem_size * num_qpts, {e}); 1163 mat_start += elem_size * num_qpts; 1164 } else if (eval_mode == CEED_EVAL_GRAD) { 1165 // Order queue 1166 sycl::event e = sycl_data->sycl_queue.ext_oneapi_submit_barrier(); 1167 sycl_data->sycl_queue.copy<CeedScalar>(grad_in, &asmb->d_B_in[mat_start], dim * elem_size * num_qpts, {e}); 1168 mat_start += dim * elem_size * num_qpts; 1169 } 1170 } 1171 1172 const CeedScalar *interp_out, *grad_out; 1173 // Note that this function currently assumes 1 basis, so this should always be true 1174 // for now 1175 if (basis_out == basis_in) { 1176 interp_out = interp_in; 1177 grad_out = grad_in; 1178 } else { 1179 CeedCallBackend(CeedBasisGetInterp(basis_out, &interp_out)); 1180 CeedCallBackend(CeedBasisGetGrad(basis_out, &grad_out)); 1181 } 1182 1183 // Load into B_out, in order that they will be used in eval_mode 1184 mat_start = 0; 1185 CeedCallSycl(ceed, asmb->d_B_out = sycl::malloc_device<CeedScalar>(size_B_out, sycl_data->sycl_device, sycl_data->sycl_context)); 1186 for (int i = 0; i < num_B_out_mats_to_load; i++) { 1187 CeedEvalMode eval_mode = eval_mode_out[i]; 1188 1189 if (eval_mode == CEED_EVAL_INTERP) { 1190 // Order queue 1191 sycl::event e = sycl_data->sycl_queue.ext_oneapi_submit_barrier(); 1192 sycl_data->sycl_queue.copy<CeedScalar>(interp_out, &asmb->d_B_out[mat_start], elem_size * num_qpts, {e}); 1193 mat_start += elem_size * num_qpts; 1194 } else if (eval_mode == CEED_EVAL_GRAD) { 1195 // Order queue 1196 sycl::event e = sycl_data->sycl_queue.ext_oneapi_submit_barrier(); 1197 sycl_data->sycl_queue.copy<CeedScalar>(grad_out, &asmb->d_B_out[mat_start], dim * elem_size * num_qpts, {e}); 1198 mat_start += dim * elem_size * num_qpts; 1199 } 1200 } 1201 return CEED_ERROR_SUCCESS; 1202 } 1203 1204 //------------------------------------------------------------------------------ 1205 // Matrix assembly kernel for low-order elements (3D thread block) 1206 //------------------------------------------------------------------------------ 1207 static int CeedOperatorLinearAssemble_Sycl(sycl::queue &sycl_queue, const CeedOperator_Sycl *impl, const CeedScalar *qf_array, 1208 CeedScalar *values_array) { 1209 // This kernels assumes B_in and B_out have the same number of quadrature points and basis points. 1210 // TODO: expand to more general cases 1211 CeedOperatorAssemble_Sycl *asmb = impl->asmb; 1212 const CeedInt num_elem = asmb->num_elem; 1213 const CeedSize num_nodes = asmb->num_nodes; 1214 const CeedSize num_comp = asmb->num_comp; 1215 const CeedSize num_qpts = asmb->num_qpts; 1216 const CeedSize num_e_mode_in = asmb->num_e_mode_in; 1217 const CeedSize num_e_mode_out = asmb->num_e_mode_out; 1218 1219 // Strides for final output ordering, determined by the reference (inference) implementation of the symbolic assembly, slowest --> fastest: element, 1220 // comp_in, comp_out, node_row, node_col 1221 const CeedSize comp_out_stride = num_nodes * num_nodes; 1222 const CeedSize comp_in_stride = comp_out_stride * num_comp; 1223 const CeedSize e_stride = comp_in_stride * num_comp; 1224 // Strides for QF array, slowest --> fastest: e_mode_in, comp_in, e_mode_out, comp_out, elem, qpt 1225 const CeedSize q_e_stride = num_qpts; 1226 const CeedSize q_comp_out_stride = num_elem * q_e_stride; 1227 const CeedSize q_e_mode_out_stride = q_comp_out_stride * num_comp; 1228 const CeedSize q_comp_in_stride = q_e_mode_out_stride * num_e_mode_out; 1229 const CeedSize q_e_mode_in_stride = q_comp_in_stride * num_comp; 1230 1231 CeedScalar *B_in, *B_out; 1232 B_in = asmb->d_B_in; 1233 B_out = asmb->d_B_out; 1234 const CeedInt block_size_x = asmb->block_size_x; 1235 const CeedInt block_size_y = asmb->block_size_y; 1236 1237 sycl::range<3> kernel_range(num_elem, block_size_y, block_size_x); 1238 1239 // Order queue 1240 sycl::event e = sycl_queue.ext_oneapi_submit_barrier(); 1241 sycl_queue.parallel_for<CeedOperatorSyclLinearAssemble>(kernel_range, {e}, [=](sycl::id<3> idx) { 1242 const int e = idx.get(0); // Element index 1243 const int l = idx.get(1); // The output column index of each B^TDB operation 1244 const int i = idx.get(2); // The output row index of each B^TDB operation 1245 // such that we have (Bout^T)_ij D_jk Bin_kl = C_il 1246 for (CeedSize comp_in = 0; comp_in < num_comp; comp_in++) { 1247 for (CeedSize comp_out = 0; comp_out < num_comp; comp_out++) { 1248 CeedScalar result = 0.0; 1249 CeedSize qf_index_comp = q_comp_in_stride * comp_in + q_comp_out_stride * comp_out + q_e_stride * e; 1250 1251 for (CeedSize e_mode_in = 0; e_mode_in < num_e_mode_in; e_mode_in++) { 1252 CeedSize b_in_index = e_mode_in * num_qpts * num_nodes; 1253 1254 for (CeedSize e_mode_out = 0; e_mode_out < num_e_mode_out; e_mode_out++) { 1255 CeedSize b_out_index = e_mode_out * num_qpts * num_nodes; 1256 CeedSize qf_index = qf_index_comp + q_e_mode_out_stride * e_mode_out + q_e_mode_in_stride * e_mode_in; 1257 1258 // Perform the B^T D B operation for this 'chunk' of D (the qf_array) 1259 for (CeedSize j = 0; j < num_qpts; j++) { 1260 result += B_out[b_out_index + j * num_nodes + i] * qf_array[qf_index + j] * B_in[b_in_index + j * num_nodes + l]; 1261 } 1262 } // end of e_mode_out 1263 } // end of e_mode_in 1264 CeedSize val_index = comp_in_stride * comp_in + comp_out_stride * comp_out + e_stride * e + num_nodes * i + l; 1265 1266 values_array[val_index] = result; 1267 } // end of out component 1268 } // end of in component 1269 }); 1270 return CEED_ERROR_SUCCESS; 1271 } 1272 1273 //------------------------------------------------------------------------------ 1274 // Fallback kernel for larger orders (1D thread block) 1275 //------------------------------------------------------------------------------ 1276 /* 1277 static int CeedOperatorLinearAssembleFallback_Sycl(sycl::queue &sycl_queue, const CeedOperator_Sycl *impl, const CeedScalar *qf_array, 1278 CeedScalar *values_array) { 1279 // This kernel assumes B_in and B_out have the same number of quadrature points and basis points. 1280 // TODO: expand to more general cases 1281 CeedOperatorAssemble_Sycl *asmb = impl->asmb; 1282 const CeedInt num_elem = asmb->num_elem; 1283 const CeedInt num_nodes = asmb->num_nodes; 1284 const CeedInt num_comp = asmb->num_comp; 1285 const CeedInt num_qpts = asmb->num_qpts; 1286 const CeedInt num_e_mode_in = asmb->num_e_mode_in; 1287 const CeedInt num_e_mode_out = asmb->num_e_mode_out; 1288 1289 // Strides for final output ordering, determined by the reference (interface) implementation of the symbolic assembly, slowest --> fastest: elememt, 1290 // comp_in, comp_out, node_row, node_col 1291 const CeedInt comp_out_stride = num_nodes * num_nodes; 1292 const CeedInt comp_in_stride = comp_out_stride * num_comp; 1293 const CeedInt e_stride = comp_in_stride * num_comp; 1294 // Strides for QF array, slowest --> fastest: e_mode_in, comp_in, e_mode_out, comp_out, elem, qpt 1295 const CeedInt q_e_stride = num_qpts; 1296 const CeedInt q_comp_out_stride = num_elem * q_e_stride; 1297 const CeedInt q_e_mode_out_stride = q_comp_out_stride * num_comp; 1298 const CeedInt q_comp_in_stride = q_e_mode_out_stride * num_e_mode_out; 1299 const CeedInt q_e_mode_in_stride = q_comp_in_stride * num_comp; 1300 1301 CeedScalar *B_in, *B_out; 1302 B_in = asmb->d_B_in; 1303 B_out = asmb->d_B_out; 1304 const CeedInt elem_per_block = asmb->elem_per_block; 1305 const CeedInt block_size_x = asmb->block_size_x; 1306 const CeedInt block_size_y = asmb->block_size_y; // This will be 1 for the fallback kernel 1307 1308 const CeedInt grid = num_elem / elem_per_block + ((num_elem / elem_per_block * elem_per_block < num_elem) ? 1 : 0); 1309 sycl::range<3> local_range(block_size_x, block_size_y, elem_per_block); 1310 sycl::range<3> global_range(grid * block_size_x, block_size_y, elem_per_block); 1311 sycl::nd_range<3> kernel_range(global_range, local_range); 1312 1313 sycl_queue.parallel_for<CeedOperatorSyclLinearAssembleFallback>(kernel_range, [=](sycl::nd_item<3> work_item) { 1314 const CeedInt blockIdx = work_item.get_group(0); 1315 const CeedInt gridDimx = work_item.get_group_range(0); 1316 const CeedInt threadIdx = work_item.get_local_id(0); 1317 const CeedInt threadIdz = work_item.get_local_id(2); 1318 const CeedInt blockDimz = work_item.get_local_range(2); 1319 1320 const int l = threadIdx; // The output column index of each B^TDB operation 1321 // such that we have (Bout^T)_ij D_jk Bin_kl = C_il 1322 for (CeedInt e = blockIdx * blockDimz + threadIdz; e < num_elem; e += gridDimx * blockDimz) { 1323 for (CeedInt comp_in = 0; comp_in < num_comp; comp_in++) { 1324 for (CeedInt comp_out = 0; comp_out < num_comp; comp_out++) { 1325 for (CeedInt i = 0; i < num_nodes; i++) { 1326 CeedScalar result = 0.0; 1327 CeedInt qf_index_comp = q_comp_in_stride * comp_in + q_comp_out_stride * comp_out + q_e_stride * e; 1328 for (CeedInt e_mode_in = 0; e_mode_in < num_e_mode_in; e_mode_in++) { 1329 CeedInt b_in_index = e_mode_in * num_qpts * num_nodes; 1330 for (CeedInt e_mode_out = 0; e_mode_out < num_e_mode_out; e_mode_out++) { 1331 CeedInt b_out_index = e_mode_out * num_qpts * num_nodes; 1332 CeedInt qf_index = qf_index_comp + q_e_mode_out_stride * e_mode_out + q_e_mode_in_stride * e_mode_in; 1333 // Perform the B^T D B operation for this 'chunk' of D (the qf_array) 1334 for (CeedInt j = 0; j < num_qpts; j++) { 1335 result += B_out[b_out_index + j * num_nodes + i] * qf_array[qf_index + j] * B_in[b_in_index + j * num_nodes + l]; 1336 } 1337 } // end of e_mode_out 1338 } // end of e_mode_in 1339 CeedInt val_index = comp_in_stride * comp_in + comp_out_stride * comp_out + e_stride * e + num_nodes * i + l; 1340 values_array[val_index] = result; 1341 } // end of loop over element node index, i 1342 } // end of out component 1343 } // end of in component 1344 } // end of element loop 1345 }); 1346 return CEED_ERROR_SUCCESS; 1347 }*/ 1348 1349 //------------------------------------------------------------------------------ 1350 // Assemble matrix data for COO matrix of assembled operator. 1351 // The sparsity pattern is set by CeedOperatorLinearAssembleSymbolic. 1352 // 1353 // Note that this (and other assembly routines) currently assume only one active 1354 // input restriction/basis per operator (could have multiple basis eval modes). 1355 // TODO: allow multiple active input restrictions/basis objects 1356 //------------------------------------------------------------------------------ 1357 static int CeedSingleOperatorAssemble_Sycl(CeedOperator op, CeedInt offset, CeedVector values) { 1358 Ceed ceed; 1359 Ceed_Sycl *sycl_data; 1360 CeedScalar *values_array; 1361 const CeedScalar *qf_array; 1362 CeedVector assembled_qf = NULL; 1363 CeedElemRestriction rstr_q = NULL; 1364 CeedOperator_Sycl *impl; 1365 1366 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 1367 CeedCallBackend(CeedOperatorGetData(op, &impl)); 1368 CeedCallBackend(CeedGetData(ceed, &sycl_data)); 1369 1370 // Setup 1371 if (!impl->asmb) { 1372 CeedCallBackend(CeedSingleOperatorAssembleSetup_Sycl(op)); 1373 assert(impl->asmb != NULL); 1374 } 1375 1376 // Assemble QFunction 1377 CeedCallBackend(CeedOperatorLinearAssembleQFunctionBuildOrUpdate(op, &assembled_qf, &rstr_q, CEED_REQUEST_IMMEDIATE)); 1378 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_q)); 1379 CeedCallBackend(CeedVectorGetArrayWrite(values, CEED_MEM_DEVICE, &values_array)); 1380 values_array += offset; 1381 CeedCallBackend(CeedVectorGetArrayRead(assembled_qf, CEED_MEM_DEVICE, &qf_array)); 1382 1383 // Compute B^T D B 1384 CeedCallBackend(CeedOperatorLinearAssemble_Sycl(sycl_data->sycl_queue, impl, qf_array, values_array)); 1385 1386 // Wait for kernels to be completed 1387 // Kris: Review if this is necessary -- enqueing an async barrier may be sufficient 1388 sycl_data->sycl_queue.wait_and_throw(); 1389 1390 // Restore arrays 1391 CeedCallBackend(CeedVectorRestoreArray(values, &values_array)); 1392 CeedCallBackend(CeedVectorRestoreArrayRead(assembled_qf, &qf_array)); 1393 1394 // Cleanup 1395 CeedCallBackend(CeedVectorDestroy(&assembled_qf)); 1396 return CEED_ERROR_SUCCESS; 1397 } 1398 1399 //------------------------------------------------------------------------------ 1400 // Create operator 1401 //------------------------------------------------------------------------------ 1402 int CeedOperatorCreate_Sycl(CeedOperator op) { 1403 Ceed ceed; 1404 CeedOperator_Sycl *impl; 1405 1406 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 1407 1408 CeedCallBackend(CeedCalloc(1, &impl)); 1409 CeedCallBackend(CeedOperatorSetData(op, impl)); 1410 1411 CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Operator", op, "LinearAssembleQFunction", CeedOperatorLinearAssembleQFunction_Sycl)); 1412 CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Operator", op, "LinearAssembleQFunctionUpdate", CeedOperatorLinearAssembleQFunctionUpdate_Sycl)); 1413 CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Operator", op, "LinearAssembleAddDiagonal", CeedOperatorLinearAssembleAddDiagonal_Sycl)); 1414 CeedCallBackend( 1415 CeedSetBackendFunctionCpp(ceed, "Operator", op, "LinearAssembleAddPointBlockDiagonal", CeedOperatorLinearAssembleAddPointBlockDiagonal_Sycl)); 1416 CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Operator", op, "LinearAssembleSingle", CeedSingleOperatorAssemble_Sycl)); 1417 CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Operator", op, "ApplyAdd", CeedOperatorApplyAdd_Sycl)); 1418 CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Operator", op, "Destroy", CeedOperatorDestroy_Sycl)); 1419 return CEED_ERROR_SUCCESS; 1420 } 1421 1422 //------------------------------------------------------------------------------ 1423