1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other 2 // CEED contributors. All Rights Reserved. See the top-level LICENSE and NOTICE 3 // files for details. 4 // 5 // SPDX-License-Identifier: BSD-2-Clause 6 // 7 // This file is part of CEED: http://github.com/ceed 8 9 #include <ceed/backend.h> 10 #include <ceed/ceed.h> 11 12 #include <cassert> 13 #include <string> 14 #include <sycl/sycl.hpp> 15 16 #include "../sycl/ceed-sycl-compile.hpp" 17 #include "ceed-sycl-ref.hpp" 18 19 class CeedOperatorSyclLinearDiagonal; 20 class CeedOperatorSyclLinearAssemble; 21 class CeedOperatorSyclLinearAssembleFallback; 22 23 //------------------------------------------------------------------------------ 24 // Get Basis Emode Pointer 25 //------------------------------------------------------------------------------ 26 void CeedOperatorGetBasisPointer_Sycl(const CeedScalar **basisptr, CeedEvalMode emode, const CeedScalar *identity, const CeedScalar *interp, 27 const CeedScalar *grad) { 28 switch (emode) { 29 case CEED_EVAL_NONE: 30 *basisptr = identity; 31 break; 32 case CEED_EVAL_INTERP: 33 *basisptr = interp; 34 break; 35 case CEED_EVAL_GRAD: 36 *basisptr = grad; 37 break; 38 case CEED_EVAL_WEIGHT: 39 case CEED_EVAL_DIV: 40 case CEED_EVAL_CURL: 41 break; // Caught by QF Assembly 42 } 43 } 44 45 //------------------------------------------------------------------------------ 46 // Destroy operator 47 //------------------------------------------------------------------------------ 48 static int CeedOperatorDestroy_Sycl(CeedOperator op) { 49 CeedOperator_Sycl *impl; 50 CeedCallBackend(CeedOperatorGetData(op, &impl)); 51 Ceed ceed; 52 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 53 Ceed_Sycl *sycl_data; 54 CeedCallBackend(CeedGetData(ceed, &sycl_data)); 55 56 // Apply data 57 for (CeedInt i = 0; i < impl->numein + impl->numeout; i++) { 58 CeedCallBackend(CeedVectorDestroy(&impl->evecs[i])); 59 } 60 CeedCallBackend(CeedFree(&impl->evecs)); 61 62 for (CeedInt i = 0; i < impl->numein; i++) { 63 CeedCallBackend(CeedVectorDestroy(&impl->qvecsin[i])); 64 } 65 CeedCallBackend(CeedFree(&impl->qvecsin)); 66 67 for (CeedInt i = 0; i < impl->numeout; i++) { 68 CeedCallBackend(CeedVectorDestroy(&impl->qvecsout[i])); 69 } 70 CeedCallBackend(CeedFree(&impl->qvecsout)); 71 72 // QFunction assembly data 73 for (CeedInt i = 0; i < impl->qfnumactivein; i++) { 74 CeedCallBackend(CeedVectorDestroy(&impl->qfactivein[i])); 75 } 76 CeedCallBackend(CeedFree(&impl->qfactivein)); 77 78 // Diag data 79 if (impl->diag) { 80 CeedCallBackend(CeedFree(&impl->diag->h_emodein)); 81 CeedCallBackend(CeedFree(&impl->diag->h_emodeout)); 82 83 CeedCallSycl(ceed, sycl_data->sycl_queue.wait_and_throw()); 84 CeedCallSycl(ceed, sycl::free(impl->diag->d_emodein, sycl_data->sycl_context)); 85 CeedCallSycl(ceed, sycl::free(impl->diag->d_emodeout, sycl_data->sycl_context)); 86 CeedCallSycl(ceed, sycl::free(impl->diag->d_identity, sycl_data->sycl_context)); 87 CeedCallSycl(ceed, sycl::free(impl->diag->d_interpin, sycl_data->sycl_context)); 88 CeedCallSycl(ceed, sycl::free(impl->diag->d_interpout, sycl_data->sycl_context)); 89 CeedCallSycl(ceed, sycl::free(impl->diag->d_gradin, sycl_data->sycl_context)); 90 CeedCallSycl(ceed, sycl::free(impl->diag->d_gradout, sycl_data->sycl_context)); 91 CeedCallBackend(CeedElemRestrictionDestroy(&impl->diag->pbdiagrstr)); 92 93 CeedCallBackend(CeedVectorDestroy(&impl->diag->elemdiag)); 94 CeedCallBackend(CeedVectorDestroy(&impl->diag->pbelemdiag)); 95 } 96 CeedCallBackend(CeedFree(&impl->diag)); 97 98 if (impl->asmb) { 99 CeedCallSycl(ceed, sycl_data->sycl_queue.wait_and_throw()); 100 CeedCallSycl(ceed, sycl::free(impl->asmb->d_B_in, sycl_data->sycl_context)); 101 CeedCallSycl(ceed, sycl::free(impl->asmb->d_B_out, sycl_data->sycl_context)); 102 } 103 CeedCallBackend(CeedFree(&impl->asmb)); 104 105 CeedCallBackend(CeedFree(&impl)); 106 return CEED_ERROR_SUCCESS; 107 } 108 109 //------------------------------------------------------------------------------ 110 // Setup infields or outfields 111 //------------------------------------------------------------------------------ 112 static int CeedOperatorSetupFields_Sycl(CeedQFunction qf, CeedOperator op, bool isinput, CeedVector *evecs, CeedVector *qvecs, CeedInt starte, 113 CeedInt numfields, CeedInt Q, CeedInt numelements) { 114 CeedInt dim, size; 115 CeedSize q_size; 116 Ceed ceed; 117 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 118 CeedBasis basis; 119 CeedElemRestriction Erestrict; 120 CeedOperatorField *opfields; 121 CeedQFunctionField *qffields; 122 CeedVector fieldvec; 123 bool strided; 124 bool skiprestrict; 125 126 if (isinput) { 127 CeedCallBackend(CeedOperatorGetFields(op, NULL, &opfields, NULL, NULL)); 128 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qffields, NULL, NULL)); 129 } else { 130 CeedCallBackend(CeedOperatorGetFields(op, NULL, NULL, NULL, &opfields)); 131 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, NULL, NULL, &qffields)); 132 } 133 134 // Loop over fields 135 for (CeedInt i = 0; i < numfields; i++) { 136 CeedEvalMode emode; 137 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qffields[i], &emode)); 138 139 strided = false; 140 skiprestrict = false; 141 if (emode != CEED_EVAL_WEIGHT) { 142 CeedCallBackend(CeedOperatorFieldGetElemRestriction(opfields[i], &Erestrict)); 143 144 // Check whether this field can skip the element restriction: 145 // must be passive input, with emode NONE, and have a strided restriction with CEED_STRIDES_BACKEND. 146 147 // First, check whether the field is input or output: 148 if (isinput) { 149 // Check for passive input: 150 CeedCallBackend(CeedOperatorFieldGetVector(opfields[i], &fieldvec)); 151 if (fieldvec != CEED_VECTOR_ACTIVE) { 152 // Check emode 153 if (emode == CEED_EVAL_NONE) { 154 // Check for strided restriction 155 CeedCallBackend(CeedElemRestrictionIsStrided(Erestrict, &strided)); 156 if (strided) { 157 // Check if vector is already in preferred backend ordering 158 CeedCallBackend(CeedElemRestrictionHasBackendStrides(Erestrict, &skiprestrict)); 159 } 160 } 161 } 162 } 163 if (skiprestrict) { 164 // We do not need an E-Vector, but will use the input field vector's data directly in the operator application 165 evecs[i + starte] = NULL; 166 } else { 167 CeedCallBackend(CeedElemRestrictionCreateVector(Erestrict, NULL, &evecs[i + starte])); 168 } 169 } 170 171 switch (emode) { 172 case CEED_EVAL_NONE: 173 CeedCallBackend(CeedQFunctionFieldGetSize(qffields[i], &size)); 174 q_size = (CeedSize)numelements * Q * size; 175 CeedCallBackend(CeedVectorCreate(ceed, q_size, &qvecs[i])); 176 break; 177 case CEED_EVAL_INTERP: 178 CeedCallBackend(CeedQFunctionFieldGetSize(qffields[i], &size)); 179 q_size = (CeedSize)numelements * Q * size; 180 CeedCallBackend(CeedVectorCreate(ceed, q_size, &qvecs[i])); 181 break; 182 case CEED_EVAL_GRAD: 183 CeedCallBackend(CeedOperatorFieldGetBasis(opfields[i], &basis)); 184 CeedCallBackend(CeedQFunctionFieldGetSize(qffields[i], &size)); 185 CeedCallBackend(CeedBasisGetDimension(basis, &dim)); 186 q_size = (CeedSize)numelements * Q * size; 187 CeedCallBackend(CeedVectorCreate(ceed, q_size, &qvecs[i])); 188 break; 189 case CEED_EVAL_WEIGHT: // Only on input fields 190 CeedCallBackend(CeedOperatorFieldGetBasis(opfields[i], &basis)); 191 q_size = (CeedSize)numelements * Q; 192 CeedCallBackend(CeedVectorCreate(ceed, q_size, &qvecs[i])); 193 CeedCallBackend(CeedBasisApply(basis, numelements, CEED_NOTRANSPOSE, CEED_EVAL_WEIGHT, NULL, qvecs[i])); 194 break; 195 case CEED_EVAL_DIV: 196 break; // TODO: Not implemented 197 case CEED_EVAL_CURL: 198 break; // TODO: Not implemented 199 } 200 } 201 return CEED_ERROR_SUCCESS; 202 } 203 204 //------------------------------------------------------------------------------ 205 // CeedOperator needs to connect all the named fields (be they active or 206 // passive) to the named inputs and outputs of its CeedQFunction. 207 //------------------------------------------------------------------------------ 208 static int CeedOperatorSetup_Sycl(CeedOperator op) { 209 bool setupdone; 210 CeedCallBackend(CeedOperatorIsSetupDone(op, &setupdone)); 211 if (setupdone) return CEED_ERROR_SUCCESS; 212 213 Ceed ceed; 214 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 215 CeedOperator_Sycl *impl; 216 CeedCallBackend(CeedOperatorGetData(op, &impl)); 217 CeedQFunction qf; 218 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 219 CeedInt Q, numelements, numinputfields, numoutputfields; 220 CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q)); 221 CeedCallBackend(CeedOperatorGetNumElements(op, &numelements)); 222 CeedOperatorField *opinputfields, *opoutputfields; 223 CeedCallBackend(CeedOperatorGetFields(op, &numinputfields, &opinputfields, &numoutputfields, &opoutputfields)); 224 CeedQFunctionField *qfinputfields, *qfoutputfields; 225 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qfinputfields, NULL, &qfoutputfields)); 226 227 // Allocate 228 CeedCallBackend(CeedCalloc(numinputfields + numoutputfields, &impl->evecs)); 229 230 CeedCallBackend(CeedCalloc(CEED_FIELD_MAX, &impl->qvecsin)); 231 CeedCallBackend(CeedCalloc(CEED_FIELD_MAX, &impl->qvecsout)); 232 233 impl->numein = numinputfields; 234 impl->numeout = numoutputfields; 235 236 // Set up infield and outfield evecs and qvecs 237 // Infields 238 CeedCallBackend(CeedOperatorSetupFields_Sycl(qf, op, true, impl->evecs, impl->qvecsin, 0, numinputfields, Q, numelements)); 239 240 // Outfields 241 CeedCallBackend(CeedOperatorSetupFields_Sycl(qf, op, false, impl->evecs, impl->qvecsout, numinputfields, numoutputfields, Q, numelements)); 242 243 CeedCallBackend(CeedOperatorSetSetupDone(op)); 244 return CEED_ERROR_SUCCESS; 245 } 246 247 //------------------------------------------------------------------------------ 248 // Setup Operator Inputs 249 //------------------------------------------------------------------------------ 250 static inline int CeedOperatorSetupInputs_Sycl(CeedInt numinputfields, CeedQFunctionField *qfinputfields, CeedOperatorField *opinputfields, 251 CeedVector invec, const bool skipactive, CeedScalar *edata[2 * CEED_FIELD_MAX], 252 CeedOperator_Sycl *impl, CeedRequest *request) { 253 CeedEvalMode emode; 254 CeedVector vec; 255 CeedElemRestriction Erestrict; 256 257 for (CeedInt i = 0; i < numinputfields; i++) { 258 // Get input vector 259 CeedCallBackend(CeedOperatorFieldGetVector(opinputfields[i], &vec)); 260 if (vec == CEED_VECTOR_ACTIVE) { 261 if (skipactive) continue; 262 else vec = invec; 263 } 264 265 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qfinputfields[i], &emode)); 266 if (emode == CEED_EVAL_WEIGHT) { // Skip 267 } else { 268 // Get input vector 269 CeedCallBackend(CeedOperatorFieldGetVector(opinputfields[i], &vec)); 270 // Get input element restriction 271 CeedCallBackend(CeedOperatorFieldGetElemRestriction(opinputfields[i], &Erestrict)); 272 if (vec == CEED_VECTOR_ACTIVE) vec = invec; 273 // Restrict, if necessary 274 if (!impl->evecs[i]) { 275 // No restriction for this field; read data directly from vec. 276 CeedCallBackend(CeedVectorGetArrayRead(vec, CEED_MEM_DEVICE, (const CeedScalar **)&edata[i])); 277 } else { 278 CeedCallBackend(CeedElemRestrictionApply(Erestrict, CEED_NOTRANSPOSE, vec, impl->evecs[i], request)); 279 // Get evec 280 CeedCallBackend(CeedVectorGetArrayRead(impl->evecs[i], CEED_MEM_DEVICE, (const CeedScalar **)&edata[i])); 281 } 282 } 283 } 284 return CEED_ERROR_SUCCESS; 285 } 286 287 //------------------------------------------------------------------------------ 288 // Input Basis Action 289 //------------------------------------------------------------------------------ 290 static inline int CeedOperatorInputBasis_Sycl(CeedInt numelements, CeedQFunctionField *qfinputfields, CeedOperatorField *opinputfields, 291 CeedInt numinputfields, const bool skipactive, CeedScalar *edata[2 * CEED_FIELD_MAX], 292 CeedOperator_Sycl *impl) { 293 CeedInt elemsize, size; 294 CeedElemRestriction Erestrict; 295 CeedEvalMode emode; 296 CeedBasis basis; 297 298 for (CeedInt i = 0; i < numinputfields; i++) { 299 // Skip active input 300 if (skipactive) { 301 CeedVector vec; 302 CeedCallBackend(CeedOperatorFieldGetVector(opinputfields[i], &vec)); 303 if (vec == CEED_VECTOR_ACTIVE) continue; 304 } 305 // Get elemsize, emode, size 306 CeedCallBackend(CeedOperatorFieldGetElemRestriction(opinputfields[i], &Erestrict)); 307 CeedCallBackend(CeedElemRestrictionGetElementSize(Erestrict, &elemsize)); 308 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qfinputfields[i], &emode)); 309 CeedCallBackend(CeedQFunctionFieldGetSize(qfinputfields[i], &size)); 310 // Basis action 311 switch (emode) { 312 case CEED_EVAL_NONE: 313 CeedCallBackend(CeedVectorSetArray(impl->qvecsin[i], CEED_MEM_DEVICE, CEED_USE_POINTER, edata[i])); 314 break; 315 case CEED_EVAL_INTERP: 316 CeedCallBackend(CeedOperatorFieldGetBasis(opinputfields[i], &basis)); 317 CeedCallBackend(CeedBasisApply(basis, numelements, CEED_NOTRANSPOSE, CEED_EVAL_INTERP, impl->evecs[i], impl->qvecsin[i])); 318 break; 319 case CEED_EVAL_GRAD: 320 CeedCallBackend(CeedOperatorFieldGetBasis(opinputfields[i], &basis)); 321 CeedCallBackend(CeedBasisApply(basis, numelements, CEED_NOTRANSPOSE, CEED_EVAL_GRAD, impl->evecs[i], impl->qvecsin[i])); 322 break; 323 case CEED_EVAL_WEIGHT: 324 break; // No action 325 case CEED_EVAL_DIV: 326 break; // TODO: Not implemented 327 case CEED_EVAL_CURL: 328 break; // TODO: Not implemented 329 } 330 } 331 return CEED_ERROR_SUCCESS; 332 } 333 334 //------------------------------------------------------------------------------ 335 // Restore Input Vectors 336 //------------------------------------------------------------------------------ 337 static inline int CeedOperatorRestoreInputs_Sycl(CeedInt numinputfields, CeedQFunctionField *qfinputfields, CeedOperatorField *opinputfields, 338 const bool skipactive, CeedScalar *edata[2 * CEED_FIELD_MAX], CeedOperator_Sycl *impl) { 339 CeedEvalMode emode; 340 CeedVector vec; 341 342 for (CeedInt i = 0; i < numinputfields; i++) { 343 // Skip active input 344 if (skipactive) { 345 CeedCallBackend(CeedOperatorFieldGetVector(opinputfields[i], &vec)); 346 if (vec == CEED_VECTOR_ACTIVE) continue; 347 } 348 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qfinputfields[i], &emode)); 349 if (emode == CEED_EVAL_WEIGHT) { // Skip 350 } else { 351 if (!impl->evecs[i]) { // This was a skiprestrict case 352 CeedCallBackend(CeedOperatorFieldGetVector(opinputfields[i], &vec)); 353 CeedCallBackend(CeedVectorRestoreArrayRead(vec, (const CeedScalar **)&edata[i])); 354 } else { 355 CeedCallBackend(CeedVectorRestoreArrayRead(impl->evecs[i], (const CeedScalar **)&edata[i])); 356 } 357 } 358 } 359 return CEED_ERROR_SUCCESS; 360 } 361 362 //------------------------------------------------------------------------------ 363 // Apply and add to output 364 //------------------------------------------------------------------------------ 365 static int CeedOperatorApplyAdd_Sycl(CeedOperator op, CeedVector invec, CeedVector outvec, CeedRequest *request) { 366 CeedOperator_Sycl *impl; 367 CeedCallBackend(CeedOperatorGetData(op, &impl)); 368 CeedQFunction qf; 369 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 370 CeedInt Q, numelements, elemsize, numinputfields, numoutputfields, size; 371 CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q)); 372 CeedCallBackend(CeedOperatorGetNumElements(op, &numelements)); 373 CeedOperatorField *opinputfields, *opoutputfields; 374 CeedCallBackend(CeedOperatorGetFields(op, &numinputfields, &opinputfields, &numoutputfields, &opoutputfields)); 375 CeedQFunctionField *qfinputfields, *qfoutputfields; 376 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qfinputfields, NULL, &qfoutputfields)); 377 CeedEvalMode emode; 378 CeedVector vec; 379 CeedBasis basis; 380 CeedElemRestriction Erestrict; 381 CeedScalar *edata[2 * CEED_FIELD_MAX] = {0}; 382 383 // Setup 384 CeedCallBackend(CeedOperatorSetup_Sycl(op)); 385 386 // Input Evecs and Restriction 387 CeedCallBackend(CeedOperatorSetupInputs_Sycl(numinputfields, qfinputfields, opinputfields, invec, false, edata, impl, request)); 388 389 // Input basis apply if needed 390 CeedCallBackend(CeedOperatorInputBasis_Sycl(numelements, qfinputfields, opinputfields, numinputfields, false, edata, impl)); 391 392 // Output pointers, as necessary 393 for (CeedInt i = 0; i < numoutputfields; i++) { 394 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qfoutputfields[i], &emode)); 395 if (emode == CEED_EVAL_NONE) { 396 // Set the output Q-Vector to use the E-Vector data directly 397 CeedCallBackend(CeedVectorGetArrayWrite(impl->evecs[i + impl->numein], CEED_MEM_DEVICE, &edata[i + numinputfields])); 398 CeedCallBackend(CeedVectorSetArray(impl->qvecsout[i], CEED_MEM_DEVICE, CEED_USE_POINTER, edata[i + numinputfields])); 399 } 400 } 401 402 // Q function 403 CeedCallBackend(CeedQFunctionApply(qf, numelements * Q, impl->qvecsin, impl->qvecsout)); 404 405 // Output basis apply if needed 406 for (CeedInt i = 0; i < numoutputfields; i++) { 407 // Get elemsize, emode, size 408 CeedCallBackend(CeedOperatorFieldGetElemRestriction(opoutputfields[i], &Erestrict)); 409 CeedCallBackend(CeedElemRestrictionGetElementSize(Erestrict, &elemsize)); 410 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qfoutputfields[i], &emode)); 411 CeedCallBackend(CeedQFunctionFieldGetSize(qfoutputfields[i], &size)); 412 // Basis action 413 switch (emode) { 414 case CEED_EVAL_NONE: 415 break; 416 case CEED_EVAL_INTERP: 417 CeedCallBackend(CeedOperatorFieldGetBasis(opoutputfields[i], &basis)); 418 CeedCallBackend(CeedBasisApply(basis, numelements, CEED_TRANSPOSE, CEED_EVAL_INTERP, impl->qvecsout[i], impl->evecs[i + impl->numein])); 419 break; 420 case CEED_EVAL_GRAD: 421 CeedCallBackend(CeedOperatorFieldGetBasis(opoutputfields[i], &basis)); 422 CeedCallBackend(CeedBasisApply(basis, numelements, CEED_TRANSPOSE, CEED_EVAL_GRAD, impl->qvecsout[i], impl->evecs[i + impl->numein])); 423 break; 424 // LCOV_EXCL_START 425 case CEED_EVAL_WEIGHT: 426 Ceed ceed; 427 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 428 return CeedError(ceed, CEED_ERROR_BACKEND, "CEED_EVAL_WEIGHT cannot be an output evaluation mode"); 429 break; // Should not occur 430 case CEED_EVAL_DIV: 431 break; // TODO: Not implemented 432 case CEED_EVAL_CURL: 433 break; // TODO: Not implemented 434 // LCOV_EXCL_STOP 435 } 436 } 437 438 // Output restriction 439 for (CeedInt i = 0; i < numoutputfields; i++) { 440 // Restore evec 441 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qfoutputfields[i], &emode)); 442 if (emode == CEED_EVAL_NONE) { 443 CeedCallBackend(CeedVectorRestoreArray(impl->evecs[i + impl->numein], &edata[i + numinputfields])); 444 } 445 // Get output vector 446 CeedCallBackend(CeedOperatorFieldGetVector(opoutputfields[i], &vec)); 447 // Restrict 448 CeedCallBackend(CeedOperatorFieldGetElemRestriction(opoutputfields[i], &Erestrict)); 449 // Active 450 if (vec == CEED_VECTOR_ACTIVE) vec = outvec; 451 452 CeedCallBackend(CeedElemRestrictionApply(Erestrict, CEED_TRANSPOSE, impl->evecs[i + impl->numein], vec, request)); 453 } 454 455 // Restore input arrays 456 CeedCallBackend(CeedOperatorRestoreInputs_Sycl(numinputfields, qfinputfields, opinputfields, false, edata, impl)); 457 return CEED_ERROR_SUCCESS; 458 } 459 460 //------------------------------------------------------------------------------ 461 // Core code for assembling linear QFunction 462 //------------------------------------------------------------------------------ 463 static inline int CeedOperatorLinearAssembleQFunctionCore_Sycl(CeedOperator op, bool build_objects, CeedVector *assembled, CeedElemRestriction *rstr, 464 CeedRequest *request) { 465 Ceed ceed, ceedparent; 466 CeedOperator_Sycl *impl; 467 CeedQFunction qf; 468 CeedQFunctionField *qfinputfields, *qfoutputfields; 469 CeedOperatorField *opinputfields, *opoutputfields; 470 CeedVector vec, *activein; 471 CeedInt numactivein, numactiveout, Q, numelements, numinputfields, numoutputfields, size; 472 CeedSize q_size; 473 CeedScalar *a, *tmp, *edata[2 * CEED_FIELD_MAX] = {NULL}; 474 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 475 CeedCallBackend(CeedOperatorGetFallbackParentCeed(op, &ceedparent)); 476 CeedCallBackend(CeedOperatorGetData(op, &impl)); 477 activein = impl->qfactivein; 478 numactivein = impl->qfnumactivein, numactiveout = impl->qfnumactiveout; 479 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 480 CeedCallBackend(CeedOperatorGetNumQuadraturePoints(op, &Q)); 481 CeedCallBackend(CeedOperatorGetNumElements(op, &numelements)); 482 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qfinputfields, NULL, &qfoutputfields)); 483 CeedCallBackend(CeedOperatorGetFields(op, &numinputfields, &opinputfields, &numoutputfields, &opoutputfields)); 484 485 // Setup 486 CeedCallBackend(CeedOperatorSetup_Sycl(op)); 487 488 // Check for identity 489 bool identityqf; 490 CeedCallBackend(CeedQFunctionIsIdentity(qf, &identityqf)); 491 if (identityqf) { 492 // LCOV_EXCL_START 493 return CeedError(ceed, CEED_ERROR_BACKEND, "Assembling identity QFunctions not supported"); 494 // LCOV_EXCL_STOP 495 } 496 497 // Input Evecs and Restriction 498 CeedCallBackend(CeedOperatorSetupInputs_Sycl(numinputfields, qfinputfields, opinputfields, NULL, true, edata, impl, request)); 499 500 // Count number of active input fields 501 if (!numactivein) { 502 for (CeedInt i = 0; i < numinputfields; i++) { 503 // Get input vector 504 CeedCallBackend(CeedOperatorFieldGetVector(opinputfields[i], &vec)); 505 // Check if active input 506 if (vec == CEED_VECTOR_ACTIVE) { 507 CeedCallBackend(CeedQFunctionFieldGetSize(qfinputfields[i], &size)); 508 CeedCallBackend(CeedVectorSetValue(impl->qvecsin[i], 0.0)); 509 CeedCallBackend(CeedVectorGetArray(impl->qvecsin[i], CEED_MEM_DEVICE, &tmp)); 510 CeedCallBackend(CeedRealloc(numactivein + size, &activein)); 511 for (CeedInt field = 0; field < size; field++) { 512 q_size = (CeedSize)Q * numelements; 513 CeedCallBackend(CeedVectorCreate(ceed, q_size, &activein[numactivein + field])); 514 CeedCallBackend(CeedVectorSetArray(activein[numactivein + field], CEED_MEM_DEVICE, CEED_USE_POINTER, &tmp[field * Q * numelements])); 515 } 516 numactivein += size; 517 CeedCallBackend(CeedVectorRestoreArray(impl->qvecsin[i], &tmp)); 518 } 519 } 520 impl->qfnumactivein = numactivein; 521 impl->qfactivein = activein; 522 } 523 524 // Count number of active output fields 525 if (!numactiveout) { 526 for (CeedInt i = 0; i < numoutputfields; i++) { 527 // Get output vector 528 CeedCallBackend(CeedOperatorFieldGetVector(opoutputfields[i], &vec)); 529 // Check if active output 530 if (vec == CEED_VECTOR_ACTIVE) { 531 CeedCallBackend(CeedQFunctionFieldGetSize(qfoutputfields[i], &size)); 532 numactiveout += size; 533 } 534 } 535 impl->qfnumactiveout = numactiveout; 536 } 537 538 // Check sizes 539 if (!numactivein || !numactiveout) { 540 // LCOV_EXCL_START 541 return CeedError(ceed, CEED_ERROR_BACKEND, "Cannot assemble QFunction without active inputs and outputs"); 542 // LCOV_EXCL_STOP 543 } 544 545 // Build objects if needed 546 if (build_objects) { 547 // Create output restriction 548 CeedInt strides[3] = {1, numelements * Q, Q}; /* *NOPAD* */ 549 CeedCallBackend(CeedElemRestrictionCreateStrided(ceedparent, numelements, Q, numactivein * numactiveout, 550 numactivein * numactiveout * numelements * Q, strides, rstr)); 551 // Create assembled vector 552 CeedSize l_size = (CeedSize)numelements * Q * numactivein * numactiveout; 553 CeedCallBackend(CeedVectorCreate(ceedparent, l_size, assembled)); 554 } 555 CeedCallBackend(CeedVectorSetValue(*assembled, 0.0)); 556 CeedCallBackend(CeedVectorGetArray(*assembled, CEED_MEM_DEVICE, &a)); 557 558 // Input basis apply 559 CeedCallBackend(CeedOperatorInputBasis_Sycl(numelements, qfinputfields, opinputfields, numinputfields, true, edata, impl)); 560 561 // Assemble QFunction 562 for (CeedInt in = 0; in < numactivein; in++) { 563 // Set Inputs 564 CeedCallBackend(CeedVectorSetValue(activein[in], 1.0)); 565 if (numactivein > 1) { 566 CeedCallBackend(CeedVectorSetValue(activein[(in + numactivein - 1) % numactivein], 0.0)); 567 } 568 // Set Outputs 569 for (CeedInt out = 0; out < numoutputfields; out++) { 570 // Get output vector 571 CeedCallBackend(CeedOperatorFieldGetVector(opoutputfields[out], &vec)); 572 // Check if active output 573 if (vec == CEED_VECTOR_ACTIVE) { 574 CeedCallBackend(CeedVectorSetArray(impl->qvecsout[out], CEED_MEM_DEVICE, CEED_USE_POINTER, a)); 575 CeedCallBackend(CeedQFunctionFieldGetSize(qfoutputfields[out], &size)); 576 a += size * Q * numelements; // Advance the pointer by the size of the output 577 } 578 } 579 // Apply QFunction 580 CeedCallBackend(CeedQFunctionApply(qf, Q * numelements, impl->qvecsin, impl->qvecsout)); 581 } 582 583 // Un-set output Qvecs to prevent accidental overwrite of Assembled 584 for (CeedInt out = 0; out < numoutputfields; out++) { 585 // Get output vector 586 CeedCallBackend(CeedOperatorFieldGetVector(opoutputfields[out], &vec)); 587 // Check if active output 588 if (vec == CEED_VECTOR_ACTIVE) { 589 CeedCallBackend(CeedVectorTakeArray(impl->qvecsout[out], CEED_MEM_DEVICE, NULL)); 590 } 591 } 592 593 // Restore input arrays 594 CeedCallBackend(CeedOperatorRestoreInputs_Sycl(numinputfields, qfinputfields, opinputfields, true, edata, impl)); 595 596 // Restore output 597 CeedCallBackend(CeedVectorRestoreArray(*assembled, &a)); 598 599 return CEED_ERROR_SUCCESS; 600 } 601 602 //------------------------------------------------------------------------------ 603 // Assemble Linear QFunction 604 //------------------------------------------------------------------------------ 605 static int CeedOperatorLinearAssembleQFunction_Sycl(CeedOperator op, CeedVector *assembled, CeedElemRestriction *rstr, CeedRequest *request) { 606 return CeedOperatorLinearAssembleQFunctionCore_Sycl(op, true, assembled, rstr, request); 607 } 608 609 //------------------------------------------------------------------------------ 610 // Update Assembled Linear QFunction 611 //------------------------------------------------------------------------------ 612 static int CeedOperatorLinearAssembleQFunctionUpdate_Sycl(CeedOperator op, CeedVector assembled, CeedElemRestriction rstr, CeedRequest *request) { 613 return CeedOperatorLinearAssembleQFunctionCore_Sycl(op, false, &assembled, &rstr, request); 614 } 615 616 //------------------------------------------------------------------------------ 617 // Create point block restriction 618 //------------------------------------------------------------------------------ 619 static int CreatePBRestriction(CeedElemRestriction rstr, CeedElemRestriction *pbRstr) { 620 Ceed ceed; 621 CeedCallBackend(CeedElemRestrictionGetCeed(rstr, &ceed)); 622 const CeedInt *offsets; 623 CeedCallBackend(CeedElemRestrictionGetOffsets(rstr, CEED_MEM_HOST, &offsets)); 624 625 // Expand offsets 626 CeedInt nelem, ncomp, elemsize, compstride, *pbOffsets; 627 CeedSize l_size; 628 CeedCallBackend(CeedElemRestrictionGetNumElements(rstr, &nelem)); 629 CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr, &ncomp)); 630 CeedCallBackend(CeedElemRestrictionGetElementSize(rstr, &elemsize)); 631 CeedCallBackend(CeedElemRestrictionGetCompStride(rstr, &compstride)); 632 CeedCallBackend(CeedElemRestrictionGetLVectorSize(rstr, &l_size)); 633 CeedInt shift = ncomp; 634 if (compstride != 1) shift *= ncomp; 635 CeedCallBackend(CeedCalloc(nelem * elemsize, &pbOffsets)); 636 for (CeedInt i = 0; i < nelem * elemsize; i++) { 637 pbOffsets[i] = offsets[i] * shift; 638 } 639 640 // Create new restriction 641 CeedCallBackend( 642 CeedElemRestrictionCreate(ceed, nelem, elemsize, ncomp * ncomp, 1, l_size * ncomp, CEED_MEM_HOST, CEED_OWN_POINTER, pbOffsets, pbRstr)); 643 644 // Cleanup 645 CeedCallBackend(CeedElemRestrictionRestoreOffsets(rstr, &offsets)); 646 647 return CEED_ERROR_SUCCESS; 648 } 649 650 //------------------------------------------------------------------------------ 651 // Assemble diagonal setup 652 //------------------------------------------------------------------------------ 653 static inline int CeedOperatorAssembleDiagonalSetup_Sycl(CeedOperator op, const bool pointBlock) { 654 Ceed ceed; 655 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 656 CeedQFunction qf; 657 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 658 CeedInt numinputfields, numoutputfields; 659 CeedCallBackend(CeedQFunctionGetNumArgs(qf, &numinputfields, &numoutputfields)); 660 661 // Determine active input basis 662 CeedOperatorField *opfields; 663 CeedQFunctionField *qffields; 664 CeedCallBackend(CeedOperatorGetFields(op, NULL, &opfields, NULL, NULL)); 665 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qffields, NULL, NULL)); 666 CeedInt numemodein = 0, ncomp = 0, dim = 1; 667 CeedEvalMode *emodein = NULL; 668 CeedBasis basisin = NULL; 669 CeedElemRestriction rstrin = NULL; 670 for (CeedInt i = 0; i < numinputfields; i++) { 671 CeedVector vec; 672 CeedCallBackend(CeedOperatorFieldGetVector(opfields[i], &vec)); 673 if (vec == CEED_VECTOR_ACTIVE) { 674 CeedElemRestriction rstr; 675 CeedCallBackend(CeedOperatorFieldGetBasis(opfields[i], &basisin)); 676 CeedCallBackend(CeedBasisGetNumComponents(basisin, &ncomp)); 677 CeedCallBackend(CeedBasisGetDimension(basisin, &dim)); 678 CeedCallBackend(CeedOperatorFieldGetElemRestriction(opfields[i], &rstr)); 679 if (rstrin && rstrin != rstr) { 680 // LCOV_EXCL_START 681 return CeedError(ceed, CEED_ERROR_BACKEND, "Backend does not implement multi-field non-composite operator diagonal assembly"); 682 // LCOV_EXCL_STOP 683 } 684 rstrin = rstr; 685 CeedEvalMode emode; 686 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qffields[i], &emode)); 687 switch (emode) { 688 case CEED_EVAL_NONE: 689 case CEED_EVAL_INTERP: 690 CeedCallBackend(CeedRealloc(numemodein + 1, &emodein)); 691 emodein[numemodein] = emode; 692 numemodein += 1; 693 break; 694 case CEED_EVAL_GRAD: 695 CeedCallBackend(CeedRealloc(numemodein + dim, &emodein)); 696 for (CeedInt d = 0; d < dim; d++) emodein[numemodein + d] = emode; 697 numemodein += dim; 698 break; 699 case CEED_EVAL_WEIGHT: 700 case CEED_EVAL_DIV: 701 case CEED_EVAL_CURL: 702 break; // Caught by QF Assembly 703 } 704 } 705 } 706 707 // Determine active output basis 708 CeedCallBackend(CeedOperatorGetFields(op, NULL, NULL, NULL, &opfields)); 709 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, NULL, NULL, &qffields)); 710 CeedInt numemodeout = 0; 711 CeedEvalMode *emodeout = NULL; 712 CeedBasis basisout = NULL; 713 CeedElemRestriction rstrout = NULL; 714 for (CeedInt i = 0; i < numoutputfields; i++) { 715 CeedVector vec; 716 CeedCallBackend(CeedOperatorFieldGetVector(opfields[i], &vec)); 717 if (vec == CEED_VECTOR_ACTIVE) { 718 CeedElemRestriction rstr; 719 CeedCallBackend(CeedOperatorFieldGetBasis(opfields[i], &basisout)); 720 CeedCallBackend(CeedOperatorFieldGetElemRestriction(opfields[i], &rstr)); 721 if (rstrout && rstrout != rstr) { 722 // LCOV_EXCL_START 723 return CeedError(ceed, CEED_ERROR_BACKEND, "Backend does not implement multi-field non-composite operator diagonal assembly"); 724 // LCOV_EXCL_STOP 725 } 726 rstrout = rstr; 727 CeedEvalMode emode; 728 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qffields[i], &emode)); 729 switch (emode) { 730 case CEED_EVAL_NONE: 731 case CEED_EVAL_INTERP: 732 CeedCallBackend(CeedRealloc(numemodeout + 1, &emodeout)); 733 emodeout[numemodeout] = emode; 734 numemodeout += 1; 735 break; 736 case CEED_EVAL_GRAD: 737 CeedCallBackend(CeedRealloc(numemodeout + dim, &emodeout)); 738 for (CeedInt d = 0; d < dim; d++) emodeout[numemodeout + d] = emode; 739 numemodeout += dim; 740 break; 741 case CEED_EVAL_WEIGHT: 742 case CEED_EVAL_DIV: 743 case CEED_EVAL_CURL: 744 break; // Caught by QF Assembly 745 } 746 } 747 } 748 749 // Operator data struct 750 CeedOperator_Sycl *impl; 751 CeedCallBackend(CeedOperatorGetData(op, &impl)); 752 Ceed_Sycl *sycl_data; 753 CeedCallBackend(CeedGetData(ceed, &sycl_data)); 754 CeedCallBackend(CeedCalloc(1, &impl->diag)); 755 CeedOperatorDiag_Sycl *diag = impl->diag; 756 diag->basisin = basisin; 757 diag->basisout = basisout; 758 diag->h_emodein = emodein; 759 diag->h_emodeout = emodeout; 760 diag->numemodein = numemodein; 761 diag->numemodeout = numemodeout; 762 763 // Kernel parameters 764 CeedInt nnodes, nqpts; 765 CeedCallBackend(CeedBasisGetNumNodes(basisin, &nnodes)); 766 CeedCallBackend(CeedBasisGetNumQuadraturePoints(basisin, &nqpts)); 767 diag->nnodes = nnodes; 768 diag->nqpts = nqpts; 769 diag->ncomp = ncomp; 770 771 // Basis matrices 772 const CeedInt iLen = nqpts * nnodes; 773 const CeedInt gLen = nqpts * nnodes * dim; 774 const CeedScalar *interpin, *interpout, *gradin, *gradout; 775 776 // CEED_EVAL_NONE 777 CeedScalar *identity = NULL; 778 bool evalNone = false; 779 for (CeedInt i = 0; i < numemodein; i++) evalNone = evalNone || (emodein[i] == CEED_EVAL_NONE); 780 for (CeedInt i = 0; i < numemodeout; i++) evalNone = evalNone || (emodeout[i] == CEED_EVAL_NONE); 781 782 // Order queue 783 sycl::event e = sycl_data->sycl_queue.ext_oneapi_submit_barrier(); 784 785 std::vector<sycl::event> copy_events; 786 if (evalNone) { 787 CeedCallBackend(CeedCalloc(nqpts * nnodes, &identity)); 788 for (CeedSize i = 0; i < (nnodes < nqpts ? nnodes : nqpts); i++) identity[i * nnodes + i] = 1.0; 789 CeedCallSycl(ceed, diag->d_identity = sycl::malloc_device<CeedScalar>(iLen, sycl_data->sycl_device, sycl_data->sycl_context)); 790 sycl::event identity_copy = sycl_data->sycl_queue.copy<CeedScalar>(identity, diag->d_identity, iLen, {e}); 791 copy_events.push_back(identity_copy); 792 } 793 794 // CEED_EVAL_INTERP 795 CeedCallBackend(CeedBasisGetInterp(basisin, &interpin)); 796 CeedCallSycl(ceed, diag->d_interpin = sycl::malloc_device<CeedScalar>(iLen, sycl_data->sycl_device, sycl_data->sycl_context)); 797 sycl::event interpin_copy = sycl_data->sycl_queue.copy<CeedScalar>(interpin, diag->d_interpin, iLen, {e}); 798 copy_events.push_back(interpin_copy); 799 800 CeedCallBackend(CeedBasisGetInterp(basisout, &interpout)); 801 CeedCallSycl(ceed, diag->d_interpout = sycl::malloc_device<CeedScalar>(iLen, sycl_data->sycl_device, sycl_data->sycl_context)); 802 sycl::event interpout_copy = sycl_data->sycl_queue.copy<CeedScalar>(interpout, diag->d_interpout, iLen, {e}); 803 copy_events.push_back(interpout_copy); 804 805 // CEED_EVAL_GRAD 806 CeedCallBackend(CeedBasisGetGrad(basisin, &gradin)); 807 CeedCallSycl(ceed, diag->d_gradin = sycl::malloc_device<CeedScalar>(gLen, sycl_data->sycl_device, sycl_data->sycl_context)); 808 sycl::event gradin_copy = sycl_data->sycl_queue.copy<CeedScalar>(gradin, diag->d_gradin, gLen, {e}); 809 copy_events.push_back(gradin_copy); 810 811 CeedCallBackend(CeedBasisGetGrad(basisout, &gradout)); 812 CeedCallSycl(ceed, diag->d_gradout = sycl::malloc_device<CeedScalar>(gLen, sycl_data->sycl_device, sycl_data->sycl_context)); 813 sycl::event gradout_copy = sycl_data->sycl_queue.copy<CeedScalar>(gradout, diag->d_gradout, gLen, {e}); 814 copy_events.push_back(gradout_copy); 815 816 // Arrays of emodes 817 CeedCallSycl(ceed, diag->d_emodein = sycl::malloc_device<CeedEvalMode>(numemodein, sycl_data->sycl_device, sycl_data->sycl_context)); 818 sycl::event emodein_copy = sycl_data->sycl_queue.copy<CeedEvalMode>(emodein, diag->d_emodein, numemodein, {e}); 819 copy_events.push_back(emodein_copy); 820 821 CeedCallSycl(ceed, diag->d_emodeout = sycl::malloc_device<CeedEvalMode>(numemodeout, sycl_data->sycl_device, sycl_data->sycl_context)); 822 sycl::event emodeout_copy = sycl_data->sycl_queue.copy<CeedEvalMode>(emodeout, diag->d_emodeout, numemodeout, {e}); 823 copy_events.push_back(emodeout_copy); 824 825 // Restriction 826 diag->diagrstr = rstrout; 827 828 // Wait for all copies to complete and handle exceptions 829 CeedCallSycl(ceed, sycl::event::wait_and_throw(copy_events)); 830 831 return CEED_ERROR_SUCCESS; 832 } 833 834 //------------------------------------------------------------------------------ 835 // Kernel for diagonal assembly 836 //------------------------------------------------------------------------------ 837 static int CeedOperatorLinearDiagonal_Sycl(sycl::queue &sycl_queue, const bool pointBlock, const CeedInt nelem, const CeedOperatorDiag_Sycl *diag, 838 const CeedScalar *assembledqfarray, CeedScalar *elemdiagarray) { 839 const CeedSize nnodes = diag->nnodes; 840 const CeedSize nqpts = diag->nqpts; 841 const CeedSize ncomp = diag->ncomp; 842 const CeedSize numemodein = diag->numemodein; 843 const CeedSize numemodeout = diag->numemodeout; 844 845 const CeedScalar *identity = diag->d_identity; 846 const CeedScalar *interpin = diag->d_interpin; 847 const CeedScalar *gradin = diag->d_gradin; 848 const CeedScalar *interpout = diag->d_interpout; 849 const CeedScalar *gradout = diag->d_gradout; 850 const CeedEvalMode *emodein = diag->d_emodein; 851 const CeedEvalMode *emodeout = diag->d_emodeout; 852 853 sycl::range<1> kernel_range(nelem * nnodes); 854 855 // Order queue 856 sycl::event e = sycl_queue.ext_oneapi_submit_barrier(); 857 sycl_queue.parallel_for<CeedOperatorSyclLinearDiagonal>(kernel_range, {e}, [=](sycl::id<1> idx) { 858 const CeedInt tid = idx % nnodes; 859 const CeedInt e = idx / nnodes; 860 861 // Compute the diagonal of B^T D B 862 // Each element 863 CeedInt dout = -1; 864 // Each basis eval mode pair 865 for (CeedSize eout = 0; eout < numemodeout; eout++) { 866 const CeedScalar *bt = NULL; 867 if (emodeout[eout] == CEED_EVAL_GRAD) ++dout; 868 CeedOperatorGetBasisPointer_Sycl(&bt, emodeout[eout], identity, interpout, &gradout[dout * nqpts * nnodes]); 869 CeedInt din = -1; 870 for (CeedSize ein = 0; ein < numemodein; ein++) { 871 const CeedScalar *b = NULL; 872 if (emodein[ein] == CEED_EVAL_GRAD) ++din; 873 CeedOperatorGetBasisPointer_Sycl(&b, emodein[ein], identity, interpin, &gradin[din * nqpts * nnodes]); 874 // Each component 875 for (CeedSize compOut = 0; compOut < ncomp; compOut++) { 876 // Each qpoint/node pair 877 if (pointBlock) { 878 // Point Block Diagonal 879 for (CeedInt compIn = 0; compIn < ncomp; compIn++) { 880 CeedScalar evalue = 0.0; 881 for (CeedSize q = 0; q < nqpts; q++) { 882 const CeedScalar qfvalue = 883 assembledqfarray[((((ein * ncomp + compIn) * numemodeout + eout) * ncomp + compOut) * nelem + e) * nqpts + q]; 884 evalue += bt[q * nnodes + tid] * qfvalue * b[q * nnodes + tid]; 885 } 886 elemdiagarray[((compOut * ncomp + compIn) * nelem + e) * nnodes + tid] += evalue; 887 } 888 } else { 889 // Diagonal Only 890 CeedScalar evalue = 0.0; 891 for (CeedSize q = 0; q < nqpts; q++) { 892 const CeedScalar qfvalue = 893 assembledqfarray[((((ein * ncomp + compOut) * numemodeout + eout) * ncomp + compOut) * nelem + e) * nqpts + q]; 894 evalue += bt[q * nnodes + tid] * qfvalue * b[q * nnodes + tid]; 895 } 896 elemdiagarray[(compOut * nelem + e) * nnodes + tid] += evalue; 897 } 898 } 899 } 900 } 901 }); 902 return CEED_ERROR_SUCCESS; 903 } 904 905 //------------------------------------------------------------------------------ 906 // Assemble diagonal common code 907 //------------------------------------------------------------------------------ 908 static inline int CeedOperatorAssembleDiagonalCore_Sycl(CeedOperator op, CeedVector assembled, CeedRequest *request, const bool pointBlock) { 909 Ceed ceed; 910 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 911 CeedOperator_Sycl *impl; 912 CeedCallBackend(CeedOperatorGetData(op, &impl)); 913 Ceed_Sycl *sycl_data; 914 CeedCallBackend(CeedGetData(ceed, &sycl_data)); 915 916 // Assemble QFunction 917 CeedVector assembledqf = NULL; 918 CeedElemRestriction rstr = NULL; 919 CeedCallBackend(CeedOperatorLinearAssembleQFunctionBuildOrUpdate(op, &assembledqf, &rstr, request)); 920 CeedCallBackend(CeedElemRestrictionDestroy(&rstr)); 921 922 // Setup 923 if (!impl->diag) { 924 CeedCallBackend(CeedOperatorAssembleDiagonalSetup_Sycl(op, pointBlock)); 925 } 926 CeedOperatorDiag_Sycl *diag = impl->diag; 927 assert(diag != NULL); 928 929 // Restriction 930 if (pointBlock && !diag->pbdiagrstr) { 931 CeedElemRestriction pbdiagrstr; 932 CeedCallBackend(CreatePBRestriction(diag->diagrstr, &pbdiagrstr)); 933 diag->pbdiagrstr = pbdiagrstr; 934 } 935 CeedElemRestriction diagrstr = pointBlock ? diag->pbdiagrstr : diag->diagrstr; 936 937 // Create diagonal vector 938 CeedVector elemdiag = pointBlock ? diag->pbelemdiag : diag->elemdiag; 939 if (!elemdiag) { 940 CeedCallBackend(CeedElemRestrictionCreateVector(diagrstr, NULL, &elemdiag)); 941 if (pointBlock) diag->pbelemdiag = elemdiag; 942 else diag->elemdiag = elemdiag; 943 } 944 CeedCallBackend(CeedVectorSetValue(elemdiag, 0.0)); 945 946 // Assemble element operator diagonals 947 CeedScalar *elemdiagarray; 948 const CeedScalar *assembledqfarray; 949 CeedCallBackend(CeedVectorGetArray(elemdiag, CEED_MEM_DEVICE, &elemdiagarray)); 950 CeedCallBackend(CeedVectorGetArrayRead(assembledqf, CEED_MEM_DEVICE, &assembledqfarray)); 951 CeedInt nelem; 952 CeedCallBackend(CeedElemRestrictionGetNumElements(diagrstr, &nelem)); 953 954 // Compute the diagonal of B^T D B 955 // Umesh: This needs to be reviewed later 956 // if (pointBlock) { 957 // CeedCallBackend(CeedOperatorLinearPointBlockDiagonal_Sycl(sycl_data->sycl_queue, nelem, diag, assembledqfarray, elemdiagarray)); 958 //} else { 959 CeedCallBackend(CeedOperatorLinearDiagonal_Sycl(sycl_data->sycl_queue, pointBlock, nelem, diag, assembledqfarray, elemdiagarray)); 960 // } 961 962 // Wait for queue to complete and handle exceptions 963 sycl_data->sycl_queue.wait_and_throw(); 964 965 // Restore arrays 966 CeedCallBackend(CeedVectorRestoreArray(elemdiag, &elemdiagarray)); 967 CeedCallBackend(CeedVectorRestoreArrayRead(assembledqf, &assembledqfarray)); 968 969 // Assemble local operator diagonal 970 CeedCallBackend(CeedElemRestrictionApply(diagrstr, CEED_TRANSPOSE, elemdiag, assembled, request)); 971 972 // Cleanup 973 CeedCallBackend(CeedVectorDestroy(&assembledqf)); 974 975 return CEED_ERROR_SUCCESS; 976 } 977 978 //------------------------------------------------------------------------------ 979 // Assemble Linear Diagonal 980 //------------------------------------------------------------------------------ 981 static int CeedOperatorLinearAssembleAddDiagonal_Sycl(CeedOperator op, CeedVector assembled, CeedRequest *request) { 982 CeedCallBackend(CeedOperatorAssembleDiagonalCore_Sycl(op, assembled, request, false)); 983 return CEED_ERROR_SUCCESS; 984 } 985 986 //------------------------------------------------------------------------------ 987 // Assemble Linear Point Block Diagonal 988 //------------------------------------------------------------------------------ 989 static int CeedOperatorLinearAssembleAddPointBlockDiagonal_Sycl(CeedOperator op, CeedVector assembled, CeedRequest *request) { 990 CeedCallBackend(CeedOperatorAssembleDiagonalCore_Sycl(op, assembled, request, true)); 991 return CEED_ERROR_SUCCESS; 992 } 993 994 //------------------------------------------------------------------------------ 995 // Single operator assembly setup 996 //------------------------------------------------------------------------------ 997 static int CeedSingleOperatorAssembleSetup_Sycl(CeedOperator op) { 998 Ceed ceed; 999 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 1000 CeedOperator_Sycl *impl; 1001 CeedCallBackend(CeedOperatorGetData(op, &impl)); 1002 1003 // Get input and output fields 1004 CeedInt num_input_fields, num_output_fields; 1005 CeedOperatorField *input_fields; 1006 CeedOperatorField *output_fields; 1007 CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &input_fields, &num_output_fields, &output_fields)); 1008 1009 // Determine active input basis eval mode 1010 CeedQFunction qf; 1011 CeedCallBackend(CeedOperatorGetQFunction(op, &qf)); 1012 CeedQFunctionField *qf_fields; 1013 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_fields, NULL, NULL)); 1014 // Note that the kernel will treat each dimension of a gradient action separately; 1015 // i.e., when an active input has a CEED_EVAL_GRAD mode, num_emode_in will increment by dim. 1016 // However, for the purposes of loading the B matrices, it will be treated as one mode, and we will load/copy the entire gradient matrix at once, so 1017 // num_B_in_mats_to_load will be incremented by 1. 1018 CeedInt num_emode_in = 0, dim = 1, num_B_in_mats_to_load = 0, size_B_in = 0; 1019 CeedEvalMode *eval_mode_in = NULL; // will be of size num_B_in_mats_load 1020 CeedBasis basis_in = NULL; 1021 CeedInt nqpts = 0, esize = 0; 1022 CeedElemRestriction rstr_in = NULL; 1023 for (CeedInt i = 0; i < num_input_fields; i++) { 1024 CeedVector vec; 1025 CeedCallBackend(CeedOperatorFieldGetVector(input_fields[i], &vec)); 1026 if (vec == CEED_VECTOR_ACTIVE) { 1027 CeedCallBackend(CeedOperatorFieldGetBasis(input_fields[i], &basis_in)); 1028 CeedCallBackend(CeedBasisGetDimension(basis_in, &dim)); 1029 CeedCallBackend(CeedBasisGetNumQuadraturePoints(basis_in, &nqpts)); 1030 CeedCallBackend(CeedOperatorFieldGetElemRestriction(input_fields[i], &rstr_in)); 1031 CeedCallBackend(CeedElemRestrictionGetElementSize(rstr_in, &esize)); 1032 CeedEvalMode eval_mode; 1033 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_fields[i], &eval_mode)); 1034 if (eval_mode != CEED_EVAL_NONE) { 1035 CeedCallBackend(CeedRealloc(num_B_in_mats_to_load + 1, &eval_mode_in)); 1036 eval_mode_in[num_B_in_mats_to_load] = eval_mode; 1037 num_B_in_mats_to_load += 1; 1038 if (eval_mode == CEED_EVAL_GRAD) { 1039 num_emode_in += dim; 1040 size_B_in += dim * esize * nqpts; 1041 } else { 1042 num_emode_in += 1; 1043 size_B_in += esize * nqpts; 1044 } 1045 } 1046 } 1047 } 1048 1049 // Determine active output basis; basis_out and rstr_out only used if same as input, TODO 1050 CeedCallBackend(CeedQFunctionGetFields(qf, NULL, NULL, NULL, &qf_fields)); 1051 CeedInt num_emode_out = 0, num_B_out_mats_to_load = 0, size_B_out = 0; 1052 CeedEvalMode *eval_mode_out = NULL; 1053 CeedBasis basis_out = NULL; 1054 CeedElemRestriction rstr_out = NULL; 1055 for (CeedInt i = 0; i < num_output_fields; i++) { 1056 CeedVector vec; 1057 CeedCallBackend(CeedOperatorFieldGetVector(output_fields[i], &vec)); 1058 if (vec == CEED_VECTOR_ACTIVE) { 1059 CeedCallBackend(CeedOperatorFieldGetBasis(output_fields[i], &basis_out)); 1060 CeedCallBackend(CeedOperatorFieldGetElemRestriction(output_fields[i], &rstr_out)); 1061 if (rstr_out && rstr_out != rstr_in) { 1062 // LCOV_EXCL_START 1063 return CeedError(ceed, CEED_ERROR_BACKEND, "Backend does not implement multi-field non-composite operator assembly"); 1064 // LCOV_EXCL_STOP 1065 } 1066 CeedEvalMode eval_mode; 1067 CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_fields[i], &eval_mode)); 1068 if (eval_mode != CEED_EVAL_NONE) { 1069 CeedCallBackend(CeedRealloc(num_B_out_mats_to_load + 1, &eval_mode_out)); 1070 eval_mode_out[num_B_out_mats_to_load] = eval_mode; 1071 num_B_out_mats_to_load += 1; 1072 if (eval_mode == CEED_EVAL_GRAD) { 1073 num_emode_out += dim; 1074 size_B_out += dim * esize * nqpts; 1075 } else { 1076 num_emode_out += 1; 1077 size_B_out += esize * nqpts; 1078 } 1079 } 1080 } 1081 } 1082 1083 if (num_emode_in == 0 || num_emode_out == 0) { 1084 // LCOV_EXCL_START 1085 return CeedError(ceed, CEED_ERROR_UNSUPPORTED, "Cannot assemble operator without inputs/outputs"); 1086 // LCOV_EXCL_STOP 1087 } 1088 1089 CeedInt nelem, ncomp; 1090 CeedCallBackend(CeedElemRestrictionGetNumElements(rstr_in, &nelem)); 1091 CeedCallBackend(CeedElemRestrictionGetNumComponents(rstr_in, &ncomp)); 1092 1093 CeedCallBackend(CeedCalloc(1, &impl->asmb)); 1094 CeedOperatorAssemble_Sycl *asmb = impl->asmb; 1095 asmb->nelem = nelem; 1096 1097 Ceed_Sycl *sycl_data; 1098 CeedCallBackend(CeedGetData(ceed, &sycl_data)); 1099 1100 // Kernel setup 1101 int elemsPerBlock = 1; 1102 asmb->elemsPerBlock = elemsPerBlock; 1103 CeedInt block_size = esize * esize * elemsPerBlock; 1104 /* CeedInt maxThreadsPerBlock = sycl_data->sycl_device.get_info<sycl::info::device::max_work_group_size>(); 1105 bool fallback = block_size > maxThreadsPerBlock; 1106 asmb->fallback = fallback; 1107 if (fallback) { 1108 // Use fallback kernel with 1D threadblock 1109 block_size = esize * elemsPerBlock; 1110 asmb->block_size_x = esize; 1111 asmb->block_size_y = 1; 1112 } else { // Use kernel with 2D threadblock 1113 asmb->block_size_x = esize; 1114 asmb->block_size_y = esize; 1115 }*/ 1116 asmb->block_size_x = esize; 1117 asmb->block_size_y = esize; 1118 asmb->numemodein = num_emode_in; 1119 asmb->numemodeout = num_emode_out; 1120 asmb->nqpts = nqpts; 1121 asmb->nnodes = esize; 1122 asmb->block_size = block_size; 1123 asmb->ncomp = ncomp; 1124 1125 // Build 'full' B matrices (not 1D arrays used for tensor-product matrices 1126 const CeedScalar *interp_in, *grad_in; 1127 CeedCallBackend(CeedBasisGetInterp(basis_in, &interp_in)); 1128 CeedCallBackend(CeedBasisGetGrad(basis_in, &grad_in)); 1129 1130 // Load into B_in, in order that they will be used in eval_mode 1131 CeedInt mat_start = 0; 1132 CeedCallSycl(ceed, asmb->d_B_in = sycl::malloc_device<CeedScalar>(size_B_in, sycl_data->sycl_device, sycl_data->sycl_context)); 1133 for (int i = 0; i < num_B_in_mats_to_load; i++) { 1134 CeedEvalMode eval_mode = eval_mode_in[i]; 1135 if (eval_mode == CEED_EVAL_INTERP) { 1136 // Order queue 1137 sycl::event e = sycl_data->sycl_queue.ext_oneapi_submit_barrier(); 1138 sycl_data->sycl_queue.copy<CeedScalar>(interp_in, &asmb->d_B_in[mat_start], esize * nqpts, {e}); 1139 mat_start += esize * nqpts; 1140 } else if (eval_mode == CEED_EVAL_GRAD) { 1141 // Order queue 1142 sycl::event e = sycl_data->sycl_queue.ext_oneapi_submit_barrier(); 1143 sycl_data->sycl_queue.copy<CeedScalar>(grad_in, &asmb->d_B_in[mat_start], dim * esize * nqpts, {e}); 1144 mat_start += dim * esize * nqpts; 1145 } 1146 } 1147 1148 const CeedScalar *interp_out, *grad_out; 1149 // Note that this function currently assumes 1 basis, so this should always be true 1150 // for now 1151 if (basis_out == basis_in) { 1152 interp_out = interp_in; 1153 grad_out = grad_in; 1154 } else { 1155 CeedCallBackend(CeedBasisGetInterp(basis_out, &interp_out)); 1156 CeedCallBackend(CeedBasisGetGrad(basis_out, &grad_out)); 1157 } 1158 1159 // Load into B_out, in order that they will be used in eval_mode 1160 mat_start = 0; 1161 CeedCallSycl(ceed, asmb->d_B_out = sycl::malloc_device<CeedScalar>(size_B_out, sycl_data->sycl_device, sycl_data->sycl_context)); 1162 for (int i = 0; i < num_B_out_mats_to_load; i++) { 1163 CeedEvalMode eval_mode = eval_mode_out[i]; 1164 if (eval_mode == CEED_EVAL_INTERP) { 1165 // Order queue 1166 sycl::event e = sycl_data->sycl_queue.ext_oneapi_submit_barrier(); 1167 sycl_data->sycl_queue.copy<CeedScalar>(interp_out, &asmb->d_B_out[mat_start], esize * nqpts, {e}); 1168 mat_start += esize * nqpts; 1169 } else if (eval_mode == CEED_EVAL_GRAD) { 1170 // Order queue 1171 sycl::event e = sycl_data->sycl_queue.ext_oneapi_submit_barrier(); 1172 sycl_data->sycl_queue.copy<CeedScalar>(grad_out, &asmb->d_B_out[mat_start], dim * esize * nqpts, {e}); 1173 mat_start += dim * esize * nqpts; 1174 } 1175 } 1176 return CEED_ERROR_SUCCESS; 1177 } 1178 1179 //------------------------------------------------------------------------------ 1180 // Matrix assembly kernel for low-order elements (3D thread block) 1181 //------------------------------------------------------------------------------ 1182 static int CeedOperatorLinearAssemble_Sycl(sycl::queue &sycl_queue, const CeedOperator_Sycl *impl, const CeedScalar *qf_array, 1183 CeedScalar *values_array) { 1184 // This kernels assumes B_in and B_out have the same number of quadrature points and basis points. 1185 // TODO: expand to more general cases 1186 CeedOperatorAssemble_Sycl *asmb = impl->asmb; 1187 const CeedInt nelem = asmb->nelem; 1188 const CeedSize nnodes = asmb->nnodes; 1189 const CeedSize ncomp = asmb->ncomp; 1190 const CeedSize nqpts = asmb->nqpts; 1191 const CeedSize numemodein = asmb->numemodein; 1192 const CeedSize numemodeout = asmb->numemodeout; 1193 1194 // Strides for final output ordering, determined by the reference (inference) implementation of the symbolic assembly, slowest --> fastest: element, 1195 // comp_in, comp_out, node_row, node_col 1196 const CeedSize comp_out_stride = nnodes * nnodes; 1197 const CeedSize comp_in_stride = comp_out_stride * ncomp; 1198 const CeedSize e_stride = comp_in_stride * ncomp; 1199 // Strides for QF array, slowest --> fastest: emode_in, comp_in, emode_out, comp_out, elem, qpt 1200 const CeedSize qe_stride = nqpts; 1201 const CeedSize qcomp_out_stride = nelem * qe_stride; 1202 const CeedSize qemode_out_stride = qcomp_out_stride * ncomp; 1203 const CeedSize qcomp_in_stride = qemode_out_stride * numemodeout; 1204 const CeedSize qemode_in_stride = qcomp_in_stride * ncomp; 1205 1206 CeedScalar *B_in, *B_out; 1207 B_in = asmb->d_B_in; 1208 B_out = asmb->d_B_out; 1209 const CeedInt block_size_x = asmb->block_size_x; 1210 const CeedInt block_size_y = asmb->block_size_y; 1211 1212 sycl::range<3> kernel_range(nelem, block_size_y, block_size_x); 1213 1214 // Order queue 1215 sycl::event e = sycl_queue.ext_oneapi_submit_barrier(); 1216 sycl_queue.parallel_for<CeedOperatorSyclLinearAssemble>(kernel_range, {e}, [=](sycl::id<3> idx) { 1217 const int e = idx.get(0); // Element index 1218 const int l = idx.get(1); // The output column index of each B^TDB operation 1219 const int i = idx.get(2); // The output row index of each B^TDB operation 1220 // such that we have (Bout^T)_ij D_jk Bin_kl = C_il 1221 for (CeedSize comp_in = 0; comp_in < ncomp; comp_in++) { 1222 for (CeedSize comp_out = 0; comp_out < ncomp; comp_out++) { 1223 CeedScalar result = 0.0; 1224 CeedSize qf_index_comp = qcomp_in_stride * comp_in + qcomp_out_stride * comp_out + qe_stride * e; 1225 for (CeedSize emode_in = 0; emode_in < numemodein; emode_in++) { 1226 CeedSize b_in_index = emode_in * nqpts * nnodes; 1227 for (CeedSize emode_out = 0; emode_out < numemodeout; emode_out++) { 1228 CeedSize b_out_index = emode_out * nqpts * nnodes; 1229 CeedSize qf_index = qf_index_comp + qemode_out_stride * emode_out + qemode_in_stride * emode_in; 1230 // Perform the B^T D B operation for this 'chunk' of D (the qf_array) 1231 for (CeedSize j = 0; j < nqpts; j++) { 1232 result += B_out[b_out_index + j * nnodes + i] * qf_array[qf_index + j] * B_in[b_in_index + j * nnodes + l]; 1233 } 1234 } // end of emode_out 1235 } // end of emode_in 1236 CeedSize val_index = comp_in_stride * comp_in + comp_out_stride * comp_out + e_stride * e + nnodes * i + l; 1237 values_array[val_index] = result; 1238 } // end of out component 1239 } // end of in component 1240 }); 1241 1242 return CEED_ERROR_SUCCESS; 1243 } 1244 1245 //------------------------------------------------------------------------------ 1246 // Fallback kernel for larger orders (1D thread block) 1247 //------------------------------------------------------------------------------ 1248 /* 1249 static int CeedOperatorLinearAssembleFallback_Sycl(sycl::queue &sycl_queue, const CeedOperator_Sycl *impl, const CeedScalar *qf_array, 1250 CeedScalar *values_array) { 1251 // This kernel assumes B_in and B_out have the same number of quadrature points and basis points. 1252 // TODO: expand to more general cases 1253 CeedOperatorAssemble_Sycl *asmb = impl->asmb; 1254 const CeedInt nelem = asmb->nelem; 1255 const CeedInt nnodes = asmb->nnodes; 1256 const CeedInt ncomp = asmb->ncomp; 1257 const CeedInt nqpts = asmb->nqpts; 1258 const CeedInt numemodein = asmb->numemodein; 1259 const CeedInt numemodeout = asmb->numemodeout; 1260 1261 // Strides for final output ordering, determined by the reference (interface) implementation of the symbolic assembly, slowest --> fastest: elememt, 1262 // comp_in, comp_out, node_row, node_col 1263 const CeedInt comp_out_stride = nnodes * nnodes; 1264 const CeedInt comp_in_stride = comp_out_stride * ncomp; 1265 const CeedInt e_stride = comp_in_stride * ncomp; 1266 // Strides for QF array, slowest --> fastest: emode_in, comp_in, emode_out, comp_out, elem, qpt 1267 const CeedInt qe_stride = nqpts; 1268 const CeedInt qcomp_out_stride = nelem * qe_stride; 1269 const CeedInt qemode_out_stride = qcomp_out_stride * ncomp; 1270 const CeedInt qcomp_in_stride = qemode_out_stride * numemodeout; 1271 const CeedInt qemode_in_stride = qcomp_in_stride * ncomp; 1272 1273 CeedScalar *B_in, *B_out; 1274 B_in = asmb->d_B_in; 1275 B_out = asmb->d_B_out; 1276 const CeedInt elemsPerBlock = asmb->elemsPerBlock; 1277 const CeedInt block_size_x = asmb->block_size_x; 1278 const CeedInt block_size_y = asmb->block_size_y; // This will be 1 for the fallback kernel 1279 1280 const CeedInt grid = nelem / elemsPerBlock + ((nelem / elemsPerBlock * elemsPerBlock < nelem) ? 1 : 0); 1281 sycl::range<3> local_range(block_size_x, block_size_y, elemsPerBlock); 1282 sycl::range<3> global_range(grid * block_size_x, block_size_y, elemsPerBlock); 1283 sycl::nd_range<3> kernel_range(global_range, local_range); 1284 1285 sycl_queue.parallel_for<CeedOperatorSyclLinearAssembleFallback>(kernel_range, [=](sycl::nd_item<3> work_item) { 1286 const CeedInt blockIdx = work_item.get_group(0); 1287 const CeedInt gridDimx = work_item.get_group_range(0); 1288 const CeedInt threadIdx = work_item.get_local_id(0); 1289 const CeedInt threadIdz = work_item.get_local_id(2); 1290 const CeedInt blockDimz = work_item.get_local_range(2); 1291 1292 const int l = threadIdx; // The output column index of each B^TDB operation 1293 // such that we have (Bout^T)_ij D_jk Bin_kl = C_il 1294 for (CeedInt e = blockIdx * blockDimz + threadIdz; e < nelem; e += gridDimx * blockDimz) { 1295 for (CeedInt comp_in = 0; comp_in < ncomp; comp_in++) { 1296 for (CeedInt comp_out = 0; comp_out < ncomp; comp_out++) { 1297 for (CeedInt i = 0; i < nnodes; i++) { 1298 CeedScalar result = 0.0; 1299 CeedInt qf_index_comp = qcomp_in_stride * comp_in + qcomp_out_stride * comp_out + qe_stride * e; 1300 for (CeedInt emode_in = 0; emode_in < numemodein; emode_in++) { 1301 CeedInt b_in_index = emode_in * nqpts * nnodes; 1302 for (CeedInt emode_out = 0; emode_out < numemodeout; emode_out++) { 1303 CeedInt b_out_index = emode_out * nqpts * nnodes; 1304 CeedInt qf_index = qf_index_comp + qemode_out_stride * emode_out + qemode_in_stride * emode_in; 1305 // Perform the B^T D B operation for this 'chunk' of D (the qf_array) 1306 for (CeedInt j = 0; j < nqpts; j++) { 1307 result += B_out[b_out_index + j * nnodes + i] * qf_array[qf_index + j] * B_in[b_in_index + j * nnodes + l]; 1308 } 1309 } // end of emode_out 1310 } // end of emode_in 1311 CeedInt val_index = comp_in_stride * comp_in + comp_out_stride * comp_out + e_stride * e + nnodes * i + l; 1312 values_array[val_index] = result; 1313 } // end of loop over element node index, i 1314 } // end of out component 1315 } // end of in component 1316 } // end of element loop 1317 }); 1318 return CEED_ERROR_SUCCESS; 1319 }*/ 1320 1321 //------------------------------------------------------------------------------ 1322 // Assemble matrix data for COO matrix of assembled operator. 1323 // The sparsity pattern is set by CeedOperatorLinearAssembleSymbolic. 1324 // 1325 // Note that this (and other assembly routines) currently assume only one active 1326 // input restriction/basis per operator (could have multiple basis eval modes). 1327 // TODO: allow multiple active input restrictions/basis objects 1328 //------------------------------------------------------------------------------ 1329 static int CeedSingleOperatorAssemble_Sycl(CeedOperator op, CeedInt offset, CeedVector values) { 1330 Ceed ceed; 1331 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 1332 CeedOperator_Sycl *impl; 1333 CeedCallBackend(CeedOperatorGetData(op, &impl)); 1334 Ceed_Sycl *sycl_data; 1335 CeedCallBackend(CeedGetData(ceed, &sycl_data)); 1336 1337 // Setup 1338 if (!impl->asmb) { 1339 CeedCallBackend(CeedSingleOperatorAssembleSetup_Sycl(op)); 1340 assert(impl->asmb != NULL); 1341 } 1342 1343 // Assemble QFunction 1344 CeedVector assembled_qf = NULL; 1345 CeedElemRestriction rstr_q = NULL; 1346 CeedCallBackend(CeedOperatorLinearAssembleQFunctionBuildOrUpdate(op, &assembled_qf, &rstr_q, CEED_REQUEST_IMMEDIATE)); 1347 CeedCallBackend(CeedElemRestrictionDestroy(&rstr_q)); 1348 CeedScalar *values_array; 1349 CeedCallBackend(CeedVectorGetArrayWrite(values, CEED_MEM_DEVICE, &values_array)); 1350 values_array += offset; 1351 const CeedScalar *qf_array; 1352 CeedCallBackend(CeedVectorGetArrayRead(assembled_qf, CEED_MEM_DEVICE, &qf_array)); 1353 1354 // Compute B^T D B 1355 CeedCallBackend(CeedOperatorLinearAssemble_Sycl(sycl_data->sycl_queue, impl, qf_array, values_array)); 1356 1357 // Wait for kernels to be completed 1358 // Kris: Review if this is necessary -- enqueing an async barrier may be sufficient 1359 sycl_data->sycl_queue.wait_and_throw(); 1360 1361 // Restore arrays 1362 CeedCallBackend(CeedVectorRestoreArray(values, &values_array)); 1363 CeedCallBackend(CeedVectorRestoreArrayRead(assembled_qf, &qf_array)); 1364 1365 // Cleanup 1366 CeedCallBackend(CeedVectorDestroy(&assembled_qf)); 1367 1368 return CEED_ERROR_SUCCESS; 1369 } 1370 1371 //------------------------------------------------------------------------------ 1372 // Create operator 1373 //------------------------------------------------------------------------------ 1374 int CeedOperatorCreate_Sycl(CeedOperator op) { 1375 Ceed ceed; 1376 CeedCallBackend(CeedOperatorGetCeed(op, &ceed)); 1377 CeedOperator_Sycl *impl; 1378 1379 CeedCallBackend(CeedCalloc(1, &impl)); 1380 CeedCallBackend(CeedOperatorSetData(op, impl)); 1381 1382 CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Operator", op, "LinearAssembleQFunction", CeedOperatorLinearAssembleQFunction_Sycl)); 1383 CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Operator", op, "LinearAssembleQFunctionUpdate", CeedOperatorLinearAssembleQFunctionUpdate_Sycl)); 1384 CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Operator", op, "LinearAssembleAddDiagonal", CeedOperatorLinearAssembleAddDiagonal_Sycl)); 1385 CeedCallBackend( 1386 CeedSetBackendFunctionCpp(ceed, "Operator", op, "LinearAssembleAddPointBlockDiagonal", CeedOperatorLinearAssembleAddPointBlockDiagonal_Sycl)); 1387 CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Operator", op, "LinearAssembleSingle", CeedSingleOperatorAssemble_Sycl)); 1388 CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Operator", op, "ApplyAdd", CeedOperatorApplyAdd_Sycl)); 1389 CeedCallBackend(CeedSetBackendFunctionCpp(ceed, "Operator", op, "Destroy", CeedOperatorDestroy_Sycl)); 1390 return CEED_ERROR_SUCCESS; 1391 } 1392 1393 //------------------------------------------------------------------------------ 1394