1 // Copyright (c) 2017-2022, Lawrence Livermore National Security, LLC and other CEED contributors. 2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details. 3 // 4 // SPDX-License-Identifier: BSD-2-Clause 5 // 6 // This file is part of CEED: http://github.com/ceed 7 8 #include <ceed/ceed.h> 9 #include <ceed/backend.h> 10 #include <assert.h> 11 #include <cuda.h> 12 #include <cuda_runtime.h> 13 #include <stdbool.h> 14 #include <string.h> 15 #include "ceed-cuda-ref.h" 16 #include "../cuda/ceed-cuda-compile.h" 17 18 //------------------------------------------------------------------------------ 19 // Destroy operator 20 //------------------------------------------------------------------------------ 21 static int CeedOperatorDestroy_Cuda(CeedOperator op) { 22 int ierr; 23 CeedOperator_Cuda *impl; 24 ierr = CeedOperatorGetData(op, &impl); CeedChkBackend(ierr); 25 26 // Apply data 27 for (CeedInt i = 0; i < impl->numein + impl->numeout; i++) { 28 ierr = CeedVectorDestroy(&impl->evecs[i]); CeedChkBackend(ierr); 29 } 30 ierr = CeedFree(&impl->evecs); CeedChkBackend(ierr); 31 32 for (CeedInt i = 0; i < impl->numein; i++) { 33 ierr = CeedVectorDestroy(&impl->qvecsin[i]); CeedChkBackend(ierr); 34 } 35 ierr = CeedFree(&impl->qvecsin); CeedChkBackend(ierr); 36 37 for (CeedInt i = 0; i < impl->numeout; i++) { 38 ierr = CeedVectorDestroy(&impl->qvecsout[i]); CeedChkBackend(ierr); 39 } 40 ierr = CeedFree(&impl->qvecsout); CeedChkBackend(ierr); 41 42 // QFunction assembly data 43 for (CeedInt i=0; i<impl->qfnumactivein; i++) { 44 ierr = CeedVectorDestroy(&impl->qfactivein[i]); CeedChkBackend(ierr); 45 } 46 ierr = CeedFree(&impl->qfactivein); CeedChkBackend(ierr); 47 48 // Diag data 49 if (impl->diag) { 50 Ceed ceed; 51 ierr = CeedOperatorGetCeed(op, &ceed); CeedChkBackend(ierr); 52 CeedChk_Cu(ceed, cuModuleUnload(impl->diag->module)); 53 ierr = CeedFree(&impl->diag->h_emodein); CeedChkBackend(ierr); 54 ierr = CeedFree(&impl->diag->h_emodeout); CeedChkBackend(ierr); 55 ierr = cudaFree(impl->diag->d_emodein); CeedChk_Cu(ceed, ierr); 56 ierr = cudaFree(impl->diag->d_emodeout); CeedChk_Cu(ceed, ierr); 57 ierr = cudaFree(impl->diag->d_identity); CeedChk_Cu(ceed, ierr); 58 ierr = cudaFree(impl->diag->d_interpin); CeedChk_Cu(ceed, ierr); 59 ierr = cudaFree(impl->diag->d_interpout); CeedChk_Cu(ceed, ierr); 60 ierr = cudaFree(impl->diag->d_gradin); CeedChk_Cu(ceed, ierr); 61 ierr = cudaFree(impl->diag->d_gradout); CeedChk_Cu(ceed, ierr); 62 ierr = CeedElemRestrictionDestroy(&impl->diag->pbdiagrstr); 63 CeedChkBackend(ierr); 64 ierr = CeedVectorDestroy(&impl->diag->elemdiag); CeedChkBackend(ierr); 65 ierr = CeedVectorDestroy(&impl->diag->pbelemdiag); CeedChkBackend(ierr); 66 } 67 ierr = CeedFree(&impl->diag); CeedChkBackend(ierr); 68 69 if (impl->asmb) { 70 Ceed ceed; 71 ierr = CeedOperatorGetCeed(op, &ceed); CeedChkBackend(ierr); 72 CeedChk_Cu(ceed, cuModuleUnload(impl->asmb->module)); 73 ierr = cudaFree(impl->asmb->d_B_in); CeedChk_Cu(ceed, ierr); 74 ierr = cudaFree(impl->asmb->d_B_out); CeedChk_Cu(ceed, ierr); 75 } 76 ierr = CeedFree(&impl->asmb); CeedChkBackend(ierr); 77 78 ierr = CeedFree(&impl); CeedChkBackend(ierr); 79 return CEED_ERROR_SUCCESS; 80 } 81 82 //------------------------------------------------------------------------------ 83 // Setup infields or outfields 84 //------------------------------------------------------------------------------ 85 static int CeedOperatorSetupFields_Cuda(CeedQFunction qf, CeedOperator op, 86 bool isinput, CeedVector *evecs, 87 CeedVector *qvecs, CeedInt starte, 88 CeedInt numfields, CeedInt Q, 89 CeedInt numelements) { 90 CeedInt dim, ierr, size; 91 Ceed ceed; 92 ierr = CeedOperatorGetCeed(op, &ceed); CeedChkBackend(ierr); 93 CeedBasis basis; 94 CeedElemRestriction Erestrict; 95 CeedOperatorField *opfields; 96 CeedQFunctionField *qffields; 97 CeedVector fieldvec; 98 bool strided; 99 bool skiprestrict; 100 101 if (isinput) { 102 ierr = CeedOperatorGetFields(op, NULL, &opfields, NULL, NULL); 103 CeedChkBackend(ierr); 104 ierr = CeedQFunctionGetFields(qf, NULL, &qffields, NULL, NULL); 105 CeedChkBackend(ierr); 106 } else { 107 ierr = CeedOperatorGetFields(op, NULL, NULL, NULL, &opfields); 108 CeedChkBackend(ierr); 109 ierr = CeedQFunctionGetFields(qf, NULL, NULL, NULL, &qffields); 110 CeedChkBackend(ierr); 111 } 112 113 // Loop over fields 114 for (CeedInt i = 0; i < numfields; i++) { 115 CeedEvalMode emode; 116 ierr = CeedQFunctionFieldGetEvalMode(qffields[i], &emode); CeedChkBackend(ierr); 117 118 strided = false; 119 skiprestrict = false; 120 if (emode != CEED_EVAL_WEIGHT) { 121 ierr = CeedOperatorFieldGetElemRestriction(opfields[i], &Erestrict); 122 CeedChkBackend(ierr); 123 124 // Check whether this field can skip the element restriction: 125 // must be passive input, with emode NONE, and have a strided restriction with 126 // CEED_STRIDES_BACKEND. 127 128 // First, check whether the field is input or output: 129 if (isinput) { 130 // Check for passive input: 131 ierr = CeedOperatorFieldGetVector(opfields[i], &fieldvec); CeedChkBackend(ierr); 132 if (fieldvec != CEED_VECTOR_ACTIVE) { 133 // Check emode 134 if (emode == CEED_EVAL_NONE) { 135 // Check for strided restriction 136 ierr = CeedElemRestrictionIsStrided(Erestrict, &strided); 137 CeedChkBackend(ierr); 138 if (strided) { 139 // Check if vector is already in preferred backend ordering 140 ierr = CeedElemRestrictionHasBackendStrides(Erestrict, 141 &skiprestrict); CeedChkBackend(ierr); 142 } 143 } 144 } 145 } 146 if (skiprestrict) { 147 // We do not need an E-Vector, but will use the input field vector's data 148 // directly in the operator application. 149 evecs[i + starte] = NULL; 150 } else { 151 ierr = CeedElemRestrictionCreateVector(Erestrict, NULL, 152 &evecs[i + starte]); 153 CeedChkBackend(ierr); 154 } 155 } 156 157 switch (emode) { 158 case CEED_EVAL_NONE: 159 ierr = CeedQFunctionFieldGetSize(qffields[i], &size); CeedChkBackend(ierr); 160 ierr = CeedVectorCreate(ceed, numelements * Q * size, &qvecs[i]); 161 CeedChkBackend(ierr); 162 break; 163 case CEED_EVAL_INTERP: 164 ierr = CeedQFunctionFieldGetSize(qffields[i], &size); CeedChkBackend(ierr); 165 ierr = CeedVectorCreate(ceed, numelements * Q * size, &qvecs[i]); 166 CeedChkBackend(ierr); 167 break; 168 case CEED_EVAL_GRAD: 169 ierr = CeedOperatorFieldGetBasis(opfields[i], &basis); CeedChkBackend(ierr); 170 ierr = CeedQFunctionFieldGetSize(qffields[i], &size); CeedChkBackend(ierr); 171 ierr = CeedBasisGetDimension(basis, &dim); CeedChkBackend(ierr); 172 ierr = CeedVectorCreate(ceed, numelements * Q * size, &qvecs[i]); 173 CeedChkBackend(ierr); 174 break; 175 case CEED_EVAL_WEIGHT: // Only on input fields 176 ierr = CeedOperatorFieldGetBasis(opfields[i], &basis); CeedChkBackend(ierr); 177 ierr = CeedVectorCreate(ceed, numelements * Q, &qvecs[i]); CeedChkBackend(ierr); 178 ierr = CeedBasisApply(basis, numelements, CEED_NOTRANSPOSE, 179 CEED_EVAL_WEIGHT, NULL, qvecs[i]); CeedChkBackend(ierr); 180 break; 181 case CEED_EVAL_DIV: 182 break; // TODO: Not implemented 183 case CEED_EVAL_CURL: 184 break; // TODO: Not implemented 185 } 186 } 187 return CEED_ERROR_SUCCESS; 188 } 189 190 //------------------------------------------------------------------------------ 191 // CeedOperator needs to connect all the named fields (be they active or passive) 192 // to the named inputs and outputs of its CeedQFunction. 193 //------------------------------------------------------------------------------ 194 static int CeedOperatorSetup_Cuda(CeedOperator op) { 195 int ierr; 196 bool setupdone; 197 ierr = CeedOperatorIsSetupDone(op, &setupdone); CeedChkBackend(ierr); 198 if (setupdone) 199 return CEED_ERROR_SUCCESS; 200 Ceed ceed; 201 ierr = CeedOperatorGetCeed(op, &ceed); CeedChkBackend(ierr); 202 CeedOperator_Cuda *impl; 203 ierr = CeedOperatorGetData(op, &impl); CeedChkBackend(ierr); 204 CeedQFunction qf; 205 ierr = CeedOperatorGetQFunction(op, &qf); CeedChkBackend(ierr); 206 CeedInt Q, numelements, numinputfields, numoutputfields; 207 ierr = CeedOperatorGetNumQuadraturePoints(op, &Q); CeedChkBackend(ierr); 208 ierr = CeedOperatorGetNumElements(op, &numelements); CeedChkBackend(ierr); 209 CeedChkBackend(ierr); 210 CeedOperatorField *opinputfields, *opoutputfields; 211 ierr = CeedOperatorGetFields(op, &numinputfields, &opinputfields, 212 &numoutputfields, &opoutputfields); 213 CeedChkBackend(ierr); 214 CeedQFunctionField *qfinputfields, *qfoutputfields; 215 ierr = CeedQFunctionGetFields(qf, NULL, &qfinputfields, NULL, &qfoutputfields); 216 CeedChkBackend(ierr); 217 218 // Allocate 219 ierr = CeedCalloc(numinputfields + numoutputfields, &impl->evecs); 220 CeedChkBackend(ierr); 221 222 ierr = CeedCalloc(CEED_FIELD_MAX, &impl->qvecsin); CeedChkBackend(ierr); 223 ierr = CeedCalloc(CEED_FIELD_MAX, &impl->qvecsout); CeedChkBackend(ierr); 224 225 impl->numein = numinputfields; impl->numeout = numoutputfields; 226 227 // Set up infield and outfield evecs and qvecs 228 // Infields 229 ierr = CeedOperatorSetupFields_Cuda(qf, op, true, 230 impl->evecs, impl->qvecsin, 0, 231 numinputfields, Q, numelements); 232 CeedChkBackend(ierr); 233 234 // Outfields 235 ierr = CeedOperatorSetupFields_Cuda(qf, op, false, 236 impl->evecs, impl->qvecsout, 237 numinputfields, numoutputfields, Q, 238 numelements); CeedChkBackend(ierr); 239 240 ierr = CeedOperatorSetSetupDone(op); CeedChkBackend(ierr); 241 return CEED_ERROR_SUCCESS; 242 } 243 244 //------------------------------------------------------------------------------ 245 // Setup Operator Inputs 246 //------------------------------------------------------------------------------ 247 static inline int CeedOperatorSetupInputs_Cuda(CeedInt numinputfields, 248 CeedQFunctionField *qfinputfields, CeedOperatorField *opinputfields, 249 CeedVector invec, const bool skipactive, CeedScalar *edata[2*CEED_FIELD_MAX], 250 CeedOperator_Cuda *impl, CeedRequest *request) { 251 CeedInt ierr; 252 CeedEvalMode emode; 253 CeedVector vec; 254 CeedElemRestriction Erestrict; 255 256 for (CeedInt i = 0; i < numinputfields; i++) { 257 // Get input vector 258 ierr = CeedOperatorFieldGetVector(opinputfields[i], &vec); CeedChkBackend(ierr); 259 if (vec == CEED_VECTOR_ACTIVE) { 260 if (skipactive) 261 continue; 262 else 263 vec = invec; 264 } 265 266 ierr = CeedQFunctionFieldGetEvalMode(qfinputfields[i], &emode); 267 CeedChkBackend(ierr); 268 if (emode == CEED_EVAL_WEIGHT) { // Skip 269 } else { 270 // Get input vector 271 ierr = CeedOperatorFieldGetVector(opinputfields[i], &vec); CeedChkBackend(ierr); 272 // Get input element restriction 273 ierr = CeedOperatorFieldGetElemRestriction(opinputfields[i], &Erestrict); 274 CeedChkBackend(ierr); 275 if (vec == CEED_VECTOR_ACTIVE) 276 vec = invec; 277 // Restrict, if necessary 278 if (!impl->evecs[i]) { 279 // No restriction for this field; read data directly from vec. 280 ierr = CeedVectorGetArrayRead(vec, CEED_MEM_DEVICE, 281 (const CeedScalar **) &edata[i]); 282 CeedChkBackend(ierr); 283 } else { 284 ierr = CeedElemRestrictionApply(Erestrict, CEED_NOTRANSPOSE, vec, 285 impl->evecs[i], request); CeedChkBackend(ierr); 286 // Get evec 287 ierr = CeedVectorGetArrayRead(impl->evecs[i], CEED_MEM_DEVICE, 288 (const CeedScalar **) &edata[i]); 289 CeedChkBackend(ierr); 290 } 291 } 292 } 293 return CEED_ERROR_SUCCESS; 294 } 295 296 //------------------------------------------------------------------------------ 297 // Input Basis Action 298 //------------------------------------------------------------------------------ 299 static inline int CeedOperatorInputBasis_Cuda(CeedInt numelements, 300 CeedQFunctionField *qfinputfields, CeedOperatorField *opinputfields, 301 CeedInt numinputfields, const bool skipactive, 302 CeedScalar *edata[2*CEED_FIELD_MAX],CeedOperator_Cuda *impl) { 303 CeedInt ierr; 304 CeedInt elemsize, size; 305 CeedElemRestriction Erestrict; 306 CeedEvalMode emode; 307 CeedBasis basis; 308 309 for (CeedInt i=0; i<numinputfields; i++) { 310 // Skip active input 311 if (skipactive) { 312 CeedVector vec; 313 ierr = CeedOperatorFieldGetVector(opinputfields[i], &vec); CeedChkBackend(ierr); 314 if (vec == CEED_VECTOR_ACTIVE) 315 continue; 316 } 317 // Get elemsize, emode, size 318 ierr = CeedOperatorFieldGetElemRestriction(opinputfields[i], &Erestrict); 319 CeedChkBackend(ierr); 320 ierr = CeedElemRestrictionGetElementSize(Erestrict, &elemsize); 321 CeedChkBackend(ierr); 322 ierr = CeedQFunctionFieldGetEvalMode(qfinputfields[i], &emode); 323 CeedChkBackend(ierr); 324 ierr = CeedQFunctionFieldGetSize(qfinputfields[i], &size); CeedChkBackend(ierr); 325 // Basis action 326 switch (emode) { 327 case CEED_EVAL_NONE: 328 ierr = CeedVectorSetArray(impl->qvecsin[i], CEED_MEM_DEVICE, 329 CEED_USE_POINTER, edata[i]); CeedChkBackend(ierr); 330 break; 331 case CEED_EVAL_INTERP: 332 ierr = CeedOperatorFieldGetBasis(opinputfields[i], &basis); 333 CeedChkBackend(ierr); 334 ierr = CeedBasisApply(basis, numelements, CEED_NOTRANSPOSE, 335 CEED_EVAL_INTERP, impl->evecs[i], 336 impl->qvecsin[i]); CeedChkBackend(ierr); 337 break; 338 case CEED_EVAL_GRAD: 339 ierr = CeedOperatorFieldGetBasis(opinputfields[i], &basis); 340 CeedChkBackend(ierr); 341 ierr = CeedBasisApply(basis, numelements, CEED_NOTRANSPOSE, 342 CEED_EVAL_GRAD, impl->evecs[i], 343 impl->qvecsin[i]); CeedChkBackend(ierr); 344 break; 345 case CEED_EVAL_WEIGHT: 346 break; // No action 347 case CEED_EVAL_DIV: 348 break; // TODO: Not implemented 349 case CEED_EVAL_CURL: 350 break; // TODO: Not implemented 351 } 352 } 353 return CEED_ERROR_SUCCESS; 354 } 355 356 //------------------------------------------------------------------------------ 357 // Restore Input Vectors 358 //------------------------------------------------------------------------------ 359 static inline int CeedOperatorRestoreInputs_Cuda(CeedInt numinputfields, 360 CeedQFunctionField *qfinputfields, CeedOperatorField *opinputfields, 361 const bool skipactive, CeedScalar *edata[2*CEED_FIELD_MAX], 362 CeedOperator_Cuda *impl) { 363 CeedInt ierr; 364 CeedEvalMode emode; 365 CeedVector vec; 366 367 for (CeedInt i = 0; i < numinputfields; i++) { 368 // Skip active input 369 if (skipactive) { 370 ierr = CeedOperatorFieldGetVector(opinputfields[i], &vec); CeedChkBackend(ierr); 371 if (vec == CEED_VECTOR_ACTIVE) 372 continue; 373 } 374 ierr = CeedQFunctionFieldGetEvalMode(qfinputfields[i], &emode); 375 CeedChkBackend(ierr); 376 if (emode == CEED_EVAL_WEIGHT) { // Skip 377 } else { 378 if (!impl->evecs[i]) { // This was a skiprestrict case 379 ierr = CeedOperatorFieldGetVector(opinputfields[i], &vec); CeedChkBackend(ierr); 380 ierr = CeedVectorRestoreArrayRead(vec, 381 (const CeedScalar **)&edata[i]); 382 CeedChkBackend(ierr); 383 } else { 384 ierr = CeedVectorRestoreArrayRead(impl->evecs[i], 385 (const CeedScalar **) &edata[i]); 386 CeedChkBackend(ierr); 387 } 388 } 389 } 390 return CEED_ERROR_SUCCESS; 391 } 392 393 //------------------------------------------------------------------------------ 394 // Apply and add to output 395 //------------------------------------------------------------------------------ 396 static int CeedOperatorApplyAdd_Cuda(CeedOperator op, CeedVector invec, 397 CeedVector outvec, CeedRequest *request) { 398 int ierr; 399 CeedOperator_Cuda *impl; 400 ierr = CeedOperatorGetData(op, &impl); CeedChkBackend(ierr); 401 CeedQFunction qf; 402 ierr = CeedOperatorGetQFunction(op, &qf); CeedChkBackend(ierr); 403 CeedInt Q, numelements, elemsize, numinputfields, numoutputfields, size; 404 ierr = CeedOperatorGetNumQuadraturePoints(op, &Q); CeedChkBackend(ierr); 405 ierr = CeedOperatorGetNumElements(op, &numelements); CeedChkBackend(ierr); 406 CeedChkBackend(ierr); 407 CeedOperatorField *opinputfields, *opoutputfields; 408 ierr = CeedOperatorGetFields(op, &numinputfields, &opinputfields, 409 &numoutputfields, &opoutputfields); 410 CeedChkBackend(ierr); 411 CeedQFunctionField *qfinputfields, *qfoutputfields; 412 ierr = CeedQFunctionGetFields(qf, NULL, &qfinputfields, NULL, &qfoutputfields); 413 CeedChkBackend(ierr); 414 CeedEvalMode emode; 415 CeedVector vec; 416 CeedBasis basis; 417 CeedElemRestriction Erestrict; 418 CeedScalar *edata[2*CEED_FIELD_MAX] = {0}; 419 420 // Setup 421 ierr = CeedOperatorSetup_Cuda(op); CeedChkBackend(ierr); 422 423 // Input Evecs and Restriction 424 ierr = CeedOperatorSetupInputs_Cuda(numinputfields, qfinputfields, 425 opinputfields, invec, false, edata, 426 impl, request); CeedChkBackend(ierr); 427 428 // Input basis apply if needed 429 ierr = CeedOperatorInputBasis_Cuda(numelements, qfinputfields, opinputfields, 430 numinputfields, false, edata, impl); 431 CeedChkBackend(ierr); 432 433 // Output pointers, as necessary 434 for (CeedInt i = 0; i < numoutputfields; i++) { 435 ierr = CeedQFunctionFieldGetEvalMode(qfoutputfields[i], &emode); 436 CeedChkBackend(ierr); 437 if (emode == CEED_EVAL_NONE) { 438 // Set the output Q-Vector to use the E-Vector data directly. 439 ierr = CeedVectorGetArrayWrite(impl->evecs[i + impl->numein], CEED_MEM_DEVICE, 440 &edata[i + numinputfields]); CeedChkBackend(ierr); 441 ierr = CeedVectorSetArray(impl->qvecsout[i], CEED_MEM_DEVICE, 442 CEED_USE_POINTER, edata[i + numinputfields]); 443 CeedChkBackend(ierr); 444 } 445 } 446 447 // Q function 448 ierr = CeedQFunctionApply(qf, numelements * Q, impl->qvecsin, impl->qvecsout); 449 CeedChkBackend(ierr); 450 451 // Output basis apply if needed 452 for (CeedInt i = 0; i < numoutputfields; i++) { 453 // Get elemsize, emode, size 454 ierr = CeedOperatorFieldGetElemRestriction(opoutputfields[i], &Erestrict); 455 CeedChkBackend(ierr); 456 ierr = CeedElemRestrictionGetElementSize(Erestrict, &elemsize); 457 CeedChkBackend(ierr); 458 ierr = CeedQFunctionFieldGetEvalMode(qfoutputfields[i], &emode); 459 CeedChkBackend(ierr); 460 ierr = CeedQFunctionFieldGetSize(qfoutputfields[i], &size); 461 CeedChkBackend(ierr); 462 // Basis action 463 switch (emode) { 464 case CEED_EVAL_NONE: 465 break; 466 case CEED_EVAL_INTERP: 467 ierr = CeedOperatorFieldGetBasis(opoutputfields[i], &basis); 468 CeedChkBackend(ierr); 469 ierr = CeedBasisApply(basis, numelements, CEED_TRANSPOSE, 470 CEED_EVAL_INTERP, impl->qvecsout[i], 471 impl->evecs[i + impl->numein]); CeedChkBackend(ierr); 472 break; 473 case CEED_EVAL_GRAD: 474 ierr = CeedOperatorFieldGetBasis(opoutputfields[i], &basis); 475 CeedChkBackend(ierr); 476 ierr = CeedBasisApply(basis, numelements, CEED_TRANSPOSE, 477 CEED_EVAL_GRAD, impl->qvecsout[i], 478 impl->evecs[i + impl->numein]); CeedChkBackend(ierr); 479 break; 480 // LCOV_EXCL_START 481 case CEED_EVAL_WEIGHT: { 482 Ceed ceed; 483 ierr = CeedOperatorGetCeed(op, &ceed); CeedChkBackend(ierr); 484 return CeedError(ceed, CEED_ERROR_BACKEND, 485 "CEED_EVAL_WEIGHT cannot be an output evaluation mode"); 486 break; // Should not occur 487 } 488 case CEED_EVAL_DIV: 489 break; // TODO: Not implemented 490 case CEED_EVAL_CURL: 491 break; // TODO: Not implemented 492 // LCOV_EXCL_STOP 493 } 494 } 495 496 // Output restriction 497 for (CeedInt i = 0; i < numoutputfields; i++) { 498 // Restore evec 499 ierr = CeedQFunctionFieldGetEvalMode(qfoutputfields[i], &emode); 500 CeedChkBackend(ierr); 501 if (emode == CEED_EVAL_NONE) { 502 ierr = CeedVectorRestoreArray(impl->evecs[i+impl->numein], 503 &edata[i + numinputfields]); 504 CeedChkBackend(ierr); 505 } 506 // Get output vector 507 ierr = CeedOperatorFieldGetVector(opoutputfields[i], &vec); 508 CeedChkBackend(ierr); 509 // Restrict 510 ierr = CeedOperatorFieldGetElemRestriction(opoutputfields[i], &Erestrict); 511 CeedChkBackend(ierr); 512 // Active 513 if (vec == CEED_VECTOR_ACTIVE) 514 vec = outvec; 515 516 ierr = CeedElemRestrictionApply(Erestrict, CEED_TRANSPOSE, 517 impl->evecs[i + impl->numein], vec, 518 request); CeedChkBackend(ierr); 519 } 520 521 // Restore input arrays 522 ierr = CeedOperatorRestoreInputs_Cuda(numinputfields, qfinputfields, 523 opinputfields, false, edata, impl); 524 CeedChkBackend(ierr); 525 return CEED_ERROR_SUCCESS; 526 } 527 528 //------------------------------------------------------------------------------ 529 // Core code for assembling linear QFunction 530 //------------------------------------------------------------------------------ 531 static inline int CeedOperatorLinearAssembleQFunctionCore_Cuda(CeedOperator op, 532 bool build_objects, CeedVector *assembled, CeedElemRestriction *rstr, 533 CeedRequest *request) { 534 int ierr; 535 CeedOperator_Cuda *impl; 536 ierr = CeedOperatorGetData(op, &impl); CeedChkBackend(ierr); 537 CeedQFunction qf; 538 ierr = CeedOperatorGetQFunction(op, &qf); CeedChkBackend(ierr); 539 CeedInt Q, numelements, numinputfields, numoutputfields, size; 540 ierr = CeedOperatorGetNumQuadraturePoints(op, &Q); CeedChkBackend(ierr); 541 ierr = CeedOperatorGetNumElements(op, &numelements); CeedChkBackend(ierr); 542 CeedChkBackend(ierr); 543 CeedOperatorField *opinputfields, *opoutputfields; 544 ierr = CeedOperatorGetFields(op, &numinputfields, &opinputfields, 545 &numoutputfields, &opoutputfields); 546 CeedChkBackend(ierr); 547 CeedQFunctionField *qfinputfields, *qfoutputfields; 548 ierr = CeedQFunctionGetFields(qf, NULL, &qfinputfields, NULL, &qfoutputfields); 549 CeedChkBackend(ierr); 550 CeedVector vec; 551 CeedInt numactivein = impl->qfnumactivein, numactiveout = impl->qfnumactiveout; 552 CeedVector *activein = impl->qfactivein; 553 CeedScalar *a, *tmp; 554 Ceed ceed, ceedparent; 555 ierr = CeedOperatorGetCeed(op, &ceed); CeedChkBackend(ierr); 556 ierr = CeedGetOperatorFallbackParentCeed(ceed, &ceedparent); 557 CeedChkBackend(ierr); 558 ceedparent = ceedparent ? ceedparent : ceed; 559 CeedScalar *edata[2*CEED_FIELD_MAX]; 560 561 // Setup 562 ierr = CeedOperatorSetup_Cuda(op); CeedChkBackend(ierr); 563 564 // Check for identity 565 bool identityqf; 566 ierr = CeedQFunctionIsIdentity(qf, &identityqf); CeedChkBackend(ierr); 567 if (identityqf) 568 // LCOV_EXCL_START 569 return CeedError(ceed, CEED_ERROR_BACKEND, 570 "Assembling identity QFunctions not supported"); 571 // LCOV_EXCL_STOP 572 573 // Input Evecs and Restriction 574 ierr = CeedOperatorSetupInputs_Cuda(numinputfields, qfinputfields, 575 opinputfields, NULL, true, edata, 576 impl, request); CeedChkBackend(ierr); 577 578 // Count number of active input fields 579 if (!numactivein) { 580 for (CeedInt i=0; i<numinputfields; i++) { 581 // Get input vector 582 ierr = CeedOperatorFieldGetVector(opinputfields[i], &vec); CeedChkBackend(ierr); 583 // Check if active input 584 if (vec == CEED_VECTOR_ACTIVE) { 585 ierr = CeedQFunctionFieldGetSize(qfinputfields[i], &size); CeedChkBackend(ierr); 586 ierr = CeedVectorSetValue(impl->qvecsin[i], 0.0); CeedChkBackend(ierr); 587 ierr = CeedVectorGetArray(impl->qvecsin[i], CEED_MEM_DEVICE, &tmp); 588 CeedChkBackend(ierr); 589 ierr = CeedRealloc(numactivein + size, &activein); CeedChkBackend(ierr); 590 for (CeedInt field = 0; field < size; field++) { 591 ierr = CeedVectorCreate(ceed, Q*numelements, 592 &activein[numactivein+field]); CeedChkBackend(ierr); 593 ierr = CeedVectorSetArray(activein[numactivein+field], CEED_MEM_DEVICE, 594 CEED_USE_POINTER, &tmp[field*Q*numelements]); 595 CeedChkBackend(ierr); 596 } 597 numactivein += size; 598 ierr = CeedVectorRestoreArray(impl->qvecsin[i], &tmp); CeedChkBackend(ierr); 599 } 600 } 601 impl->qfnumactivein = numactivein; 602 impl->qfactivein = activein; 603 } 604 605 // Count number of active output fields 606 if (!numactiveout) { 607 for (CeedInt i=0; i<numoutputfields; i++) { 608 // Get output vector 609 ierr = CeedOperatorFieldGetVector(opoutputfields[i], &vec); 610 CeedChkBackend(ierr); 611 // Check if active output 612 if (vec == CEED_VECTOR_ACTIVE) { 613 ierr = CeedQFunctionFieldGetSize(qfoutputfields[i], &size); 614 CeedChkBackend(ierr); 615 numactiveout += size; 616 } 617 } 618 impl->qfnumactiveout = numactiveout; 619 } 620 621 // Check sizes 622 if (!numactivein || !numactiveout) 623 // LCOV_EXCL_START 624 return CeedError(ceed, CEED_ERROR_BACKEND, 625 "Cannot assemble QFunction without active inputs " 626 "and outputs"); 627 // LCOV_EXCL_STOP 628 629 // Build objects if needed 630 if (build_objects) { 631 // Create output restriction 632 CeedInt strides[3] = {1, numelements*Q, Q}; /* *NOPAD* */ 633 ierr = CeedElemRestrictionCreateStrided(ceedparent, numelements, Q, 634 numactivein*numactiveout, 635 numactivein*numactiveout*numelements*Q, 636 strides, rstr); CeedChkBackend(ierr); 637 // Create assembled vector 638 ierr = CeedVectorCreate(ceedparent, numelements*Q*numactivein*numactiveout, 639 assembled); CeedChkBackend(ierr); 640 } 641 ierr = CeedVectorSetValue(*assembled, 0.0); CeedChkBackend(ierr); 642 ierr = CeedVectorGetArray(*assembled, CEED_MEM_DEVICE, &a); 643 CeedChkBackend(ierr); 644 645 // Input basis apply 646 ierr = CeedOperatorInputBasis_Cuda(numelements, qfinputfields, opinputfields, 647 numinputfields, true, edata, impl); 648 CeedChkBackend(ierr); 649 650 // Assemble QFunction 651 for (CeedInt in=0; in<numactivein; in++) { 652 // Set Inputs 653 ierr = CeedVectorSetValue(activein[in], 1.0); CeedChkBackend(ierr); 654 if (numactivein > 1) { 655 ierr = CeedVectorSetValue(activein[(in+numactivein-1)%numactivein], 656 0.0); CeedChkBackend(ierr); 657 } 658 // Set Outputs 659 for (CeedInt out=0; out<numoutputfields; out++) { 660 // Get output vector 661 ierr = CeedOperatorFieldGetVector(opoutputfields[out], &vec); 662 CeedChkBackend(ierr); 663 // Check if active output 664 if (vec == CEED_VECTOR_ACTIVE) { 665 CeedVectorSetArray(impl->qvecsout[out], CEED_MEM_DEVICE, 666 CEED_USE_POINTER, a); CeedChkBackend(ierr); 667 ierr = CeedQFunctionFieldGetSize(qfoutputfields[out], &size); 668 CeedChkBackend(ierr); 669 a += size*Q*numelements; // Advance the pointer by the size of the output 670 } 671 } 672 // Apply QFunction 673 ierr = CeedQFunctionApply(qf, Q*numelements, impl->qvecsin, impl->qvecsout); 674 CeedChkBackend(ierr); 675 } 676 677 // Un-set output Qvecs to prevent accidental overwrite of Assembled 678 for (CeedInt out=0; out<numoutputfields; out++) { 679 // Get output vector 680 ierr = CeedOperatorFieldGetVector(opoutputfields[out], &vec); 681 CeedChkBackend(ierr); 682 // Check if active output 683 if (vec == CEED_VECTOR_ACTIVE) { 684 ierr = CeedVectorTakeArray(impl->qvecsout[out], CEED_MEM_DEVICE, NULL); 685 CeedChkBackend(ierr); 686 } 687 } 688 689 // Restore input arrays 690 ierr = CeedOperatorRestoreInputs_Cuda(numinputfields, qfinputfields, 691 opinputfields, true, edata, impl); 692 CeedChkBackend(ierr); 693 694 // Restore output 695 ierr = CeedVectorRestoreArray(*assembled, &a); CeedChkBackend(ierr); 696 697 return CEED_ERROR_SUCCESS; 698 } 699 700 //------------------------------------------------------------------------------ 701 // Assemble Linear QFunction 702 //------------------------------------------------------------------------------ 703 static int CeedOperatorLinearAssembleQFunction_Cuda(CeedOperator op, 704 CeedVector *assembled, CeedElemRestriction *rstr, CeedRequest *request) { 705 return CeedOperatorLinearAssembleQFunctionCore_Cuda(op, true, assembled, rstr, 706 request); 707 } 708 709 //------------------------------------------------------------------------------ 710 // Update Assembled Linear QFunction 711 //------------------------------------------------------------------------------ 712 static int CeedOperatorLinearAssembleQFunctionUpdate_Cuda(CeedOperator op, 713 CeedVector assembled, CeedElemRestriction rstr, CeedRequest *request) { 714 return CeedOperatorLinearAssembleQFunctionCore_Cuda(op, false, &assembled, 715 &rstr, request); 716 } 717 718 //------------------------------------------------------------------------------ 719 // Diagonal assembly kernels 720 //------------------------------------------------------------------------------ 721 // *INDENT-OFF* 722 static const char *diagonalkernels = QUOTE( 723 724 typedef enum { 725 /// Perform no evaluation (either because there is no data or it is already at 726 /// quadrature points) 727 CEED_EVAL_NONE = 0, 728 /// Interpolate from nodes to quadrature points 729 CEED_EVAL_INTERP = 1, 730 /// Evaluate gradients at quadrature points from input in a nodal basis 731 CEED_EVAL_GRAD = 2, 732 /// Evaluate divergence at quadrature points from input in a nodal basis 733 CEED_EVAL_DIV = 4, 734 /// Evaluate curl at quadrature points from input in a nodal basis 735 CEED_EVAL_CURL = 8, 736 /// Using no input, evaluate quadrature weights on the reference element 737 CEED_EVAL_WEIGHT = 16, 738 } CeedEvalMode; 739 740 //------------------------------------------------------------------------------ 741 // Get Basis Emode Pointer 742 //------------------------------------------------------------------------------ 743 extern "C" __device__ void CeedOperatorGetBasisPointer_Cuda(const CeedScalar **basisptr, 744 CeedEvalMode emode, const CeedScalar *identity, const CeedScalar *interp, 745 const CeedScalar *grad) { 746 switch (emode) { 747 case CEED_EVAL_NONE: 748 *basisptr = identity; 749 break; 750 case CEED_EVAL_INTERP: 751 *basisptr = interp; 752 break; 753 case CEED_EVAL_GRAD: 754 *basisptr = grad; 755 break; 756 case CEED_EVAL_WEIGHT: 757 case CEED_EVAL_DIV: 758 case CEED_EVAL_CURL: 759 break; // Caught by QF Assembly 760 } 761 } 762 763 //------------------------------------------------------------------------------ 764 // Core code for diagonal assembly 765 //------------------------------------------------------------------------------ 766 __device__ void diagonalCore(const CeedInt nelem, 767 const CeedScalar maxnorm, const bool pointBlock, 768 const CeedScalar *identity, 769 const CeedScalar *interpin, const CeedScalar *gradin, 770 const CeedScalar *interpout, const CeedScalar *gradout, 771 const CeedEvalMode *emodein, const CeedEvalMode *emodeout, 772 const CeedScalar *__restrict__ assembledqfarray, 773 CeedScalar *__restrict__ elemdiagarray) { 774 const int tid = threadIdx.x; // running with P threads, tid is evec node 775 const CeedScalar qfvaluebound = maxnorm*1e-12; 776 777 // Compute the diagonal of B^T D B 778 // Each element 779 for (CeedInt e = blockIdx.x*blockDim.z + threadIdx.z; e < nelem; 780 e += gridDim.x*blockDim.z) { 781 CeedInt dout = -1; 782 // Each basis eval mode pair 783 for (CeedInt eout = 0; eout < NUMEMODEOUT; eout++) { 784 const CeedScalar *bt = NULL; 785 if (emodeout[eout] == CEED_EVAL_GRAD) 786 dout += 1; 787 CeedOperatorGetBasisPointer_Cuda(&bt, emodeout[eout], identity, interpout, 788 &gradout[dout*NQPTS*NNODES]); 789 CeedInt din = -1; 790 for (CeedInt ein = 0; ein < NUMEMODEIN; ein++) { 791 const CeedScalar *b = NULL; 792 if (emodein[ein] == CEED_EVAL_GRAD) 793 din += 1; 794 CeedOperatorGetBasisPointer_Cuda(&b, emodein[ein], identity, interpin, 795 &gradin[din*NQPTS*NNODES]); 796 // Each component 797 for (CeedInt compOut = 0; compOut < NCOMP; compOut++) { 798 // Each qpoint/node pair 799 if (pointBlock) { 800 // Point Block Diagonal 801 for (CeedInt compIn = 0; compIn < NCOMP; compIn++) { 802 CeedScalar evalue = 0.; 803 for (CeedInt q = 0; q < NQPTS; q++) { 804 const CeedScalar qfvalue = 805 assembledqfarray[((((ein*NCOMP+compIn)*NUMEMODEOUT+eout)* 806 NCOMP+compOut)*nelem+e)*NQPTS+q]; 807 if (abs(qfvalue) > qfvaluebound) 808 evalue += bt[q*NNODES+tid] * qfvalue * b[q*NNODES+tid]; 809 } 810 elemdiagarray[((compOut*NCOMP+compIn)*nelem+e)*NNODES+tid] += evalue; 811 } 812 } else { 813 // Diagonal Only 814 CeedScalar evalue = 0.; 815 for (CeedInt q = 0; q < NQPTS; q++) { 816 const CeedScalar qfvalue = 817 assembledqfarray[((((ein*NCOMP+compOut)*NUMEMODEOUT+eout)* 818 NCOMP+compOut)*nelem+e)*NQPTS+q]; 819 if (abs(qfvalue) > qfvaluebound) 820 evalue += bt[q*NNODES+tid] * qfvalue * b[q*NNODES+tid]; 821 } 822 elemdiagarray[(compOut*nelem+e)*NNODES+tid] += evalue; 823 } 824 } 825 } 826 } 827 } 828 } 829 830 //------------------------------------------------------------------------------ 831 // Linear diagonal 832 //------------------------------------------------------------------------------ 833 extern "C" __global__ void linearDiagonal(const CeedInt nelem, 834 const CeedScalar maxnorm, const CeedScalar *identity, 835 const CeedScalar *interpin, const CeedScalar *gradin, 836 const CeedScalar *interpout, const CeedScalar *gradout, 837 const CeedEvalMode *emodein, const CeedEvalMode *emodeout, 838 const CeedScalar *__restrict__ assembledqfarray, 839 CeedScalar *__restrict__ elemdiagarray) { 840 diagonalCore(nelem, maxnorm, false, identity, interpin, gradin, interpout, 841 gradout, emodein, emodeout, assembledqfarray, elemdiagarray); 842 } 843 844 //------------------------------------------------------------------------------ 845 // Linear point block diagonal 846 //------------------------------------------------------------------------------ 847 extern "C" __global__ void linearPointBlockDiagonal(const CeedInt nelem, 848 const CeedScalar maxnorm, const CeedScalar *identity, 849 const CeedScalar *interpin, const CeedScalar *gradin, 850 const CeedScalar *interpout, const CeedScalar *gradout, 851 const CeedEvalMode *emodein, const CeedEvalMode *emodeout, 852 const CeedScalar *__restrict__ assembledqfarray, 853 CeedScalar *__restrict__ elemdiagarray) { 854 diagonalCore(nelem, maxnorm, true, identity, interpin, gradin, interpout, 855 gradout, emodein, emodeout, assembledqfarray, elemdiagarray); 856 } 857 858 ); 859 // *INDENT-ON* 860 861 //------------------------------------------------------------------------------ 862 // Create point block restriction 863 //------------------------------------------------------------------------------ 864 static int CreatePBRestriction(CeedElemRestriction rstr, 865 CeedElemRestriction *pbRstr) { 866 int ierr; 867 Ceed ceed; 868 ierr = CeedElemRestrictionGetCeed(rstr, &ceed); CeedChkBackend(ierr); 869 const CeedInt *offsets; 870 ierr = CeedElemRestrictionGetOffsets(rstr, CEED_MEM_HOST, &offsets); 871 CeedChkBackend(ierr); 872 873 // Expand offsets 874 CeedInt nelem, ncomp, elemsize, compstride, max = 1, *pbOffsets; 875 ierr = CeedElemRestrictionGetNumElements(rstr, &nelem); CeedChkBackend(ierr); 876 ierr = CeedElemRestrictionGetNumComponents(rstr, &ncomp); CeedChkBackend(ierr); 877 ierr = CeedElemRestrictionGetElementSize(rstr, &elemsize); CeedChkBackend(ierr); 878 ierr = CeedElemRestrictionGetCompStride(rstr, &compstride); 879 CeedChkBackend(ierr); 880 CeedInt shift = ncomp; 881 if (compstride != 1) 882 shift *= ncomp; 883 ierr = CeedCalloc(nelem*elemsize, &pbOffsets); CeedChkBackend(ierr); 884 for (CeedInt i = 0; i < nelem*elemsize; i++) { 885 pbOffsets[i] = offsets[i]*shift; 886 if (pbOffsets[i] > max) 887 max = pbOffsets[i]; 888 } 889 890 // Create new restriction 891 ierr = CeedElemRestrictionCreate(ceed, nelem, elemsize, ncomp*ncomp, 1, 892 max + ncomp*ncomp, CEED_MEM_HOST, 893 CEED_OWN_POINTER, pbOffsets, pbRstr); 894 CeedChkBackend(ierr); 895 896 // Cleanup 897 ierr = CeedElemRestrictionRestoreOffsets(rstr, &offsets); CeedChkBackend(ierr); 898 899 return CEED_ERROR_SUCCESS; 900 } 901 902 //------------------------------------------------------------------------------ 903 // Assemble diagonal setup 904 //------------------------------------------------------------------------------ 905 static inline int CeedOperatorAssembleDiagonalSetup_Cuda(CeedOperator op, 906 const bool pointBlock) { 907 int ierr; 908 Ceed ceed; 909 ierr = CeedOperatorGetCeed(op, &ceed); CeedChkBackend(ierr); 910 CeedQFunction qf; 911 ierr = CeedOperatorGetQFunction(op, &qf); CeedChkBackend(ierr); 912 CeedInt numinputfields, numoutputfields; 913 ierr = CeedQFunctionGetNumArgs(qf, &numinputfields, &numoutputfields); 914 CeedChkBackend(ierr); 915 916 // Determine active input basis 917 CeedOperatorField *opfields; 918 CeedQFunctionField *qffields; 919 ierr = CeedOperatorGetFields(op, NULL, &opfields, NULL, NULL); 920 CeedChkBackend(ierr); 921 ierr = CeedQFunctionGetFields(qf, NULL, &qffields, NULL, NULL); 922 CeedChkBackend(ierr); 923 CeedInt numemodein = 0, ncomp = 0, dim = 1; 924 CeedEvalMode *emodein = NULL; 925 CeedBasis basisin = NULL; 926 CeedElemRestriction rstrin = NULL; 927 for (CeedInt i = 0; i < numinputfields; i++) { 928 CeedVector vec; 929 ierr = CeedOperatorFieldGetVector(opfields[i], &vec); CeedChkBackend(ierr); 930 if (vec == CEED_VECTOR_ACTIVE) { 931 CeedElemRestriction rstr; 932 ierr = CeedOperatorFieldGetBasis(opfields[i], &basisin); CeedChkBackend(ierr); 933 ierr = CeedBasisGetNumComponents(basisin, &ncomp); CeedChkBackend(ierr); 934 ierr = CeedBasisGetDimension(basisin, &dim); CeedChkBackend(ierr); 935 ierr = CeedOperatorFieldGetElemRestriction(opfields[i], &rstr); 936 CeedChkBackend(ierr); 937 if (rstrin && rstrin != rstr) 938 // LCOV_EXCL_START 939 return CeedError(ceed, CEED_ERROR_BACKEND, 940 "Multi-field non-composite operator diagonal assembly not supported"); 941 // LCOV_EXCL_STOP 942 rstrin = rstr; 943 CeedEvalMode emode; 944 ierr = CeedQFunctionFieldGetEvalMode(qffields[i], &emode); 945 CeedChkBackend(ierr); 946 switch (emode) { 947 case CEED_EVAL_NONE: 948 case CEED_EVAL_INTERP: 949 ierr = CeedRealloc(numemodein + 1, &emodein); CeedChkBackend(ierr); 950 emodein[numemodein] = emode; 951 numemodein += 1; 952 break; 953 case CEED_EVAL_GRAD: 954 ierr = CeedRealloc(numemodein + dim, &emodein); CeedChkBackend(ierr); 955 for (CeedInt d = 0; d < dim; d++) 956 emodein[numemodein+d] = emode; 957 numemodein += dim; 958 break; 959 case CEED_EVAL_WEIGHT: 960 case CEED_EVAL_DIV: 961 case CEED_EVAL_CURL: 962 break; // Caught by QF Assembly 963 } 964 } 965 } 966 967 // Determine active output basis 968 ierr = CeedOperatorGetFields(op, NULL, NULL, NULL, &opfields); 969 CeedChkBackend(ierr); 970 ierr = CeedQFunctionGetFields(qf, NULL, NULL, NULL, &qffields); 971 CeedChkBackend(ierr); 972 CeedInt numemodeout = 0; 973 CeedEvalMode *emodeout = NULL; 974 CeedBasis basisout = NULL; 975 CeedElemRestriction rstrout = NULL; 976 for (CeedInt i = 0; i < numoutputfields; i++) { 977 CeedVector vec; 978 ierr = CeedOperatorFieldGetVector(opfields[i], &vec); CeedChkBackend(ierr); 979 if (vec == CEED_VECTOR_ACTIVE) { 980 CeedElemRestriction rstr; 981 ierr = CeedOperatorFieldGetBasis(opfields[i], &basisout); CeedChkBackend(ierr); 982 ierr = CeedOperatorFieldGetElemRestriction(opfields[i], &rstr); 983 CeedChkBackend(ierr); 984 if (rstrout && rstrout != rstr) 985 // LCOV_EXCL_START 986 return CeedError(ceed, CEED_ERROR_BACKEND, 987 "Multi-field non-composite operator diagonal assembly not supported"); 988 // LCOV_EXCL_STOP 989 rstrout = rstr; 990 CeedEvalMode emode; 991 ierr = CeedQFunctionFieldGetEvalMode(qffields[i], &emode); CeedChkBackend(ierr); 992 switch (emode) { 993 case CEED_EVAL_NONE: 994 case CEED_EVAL_INTERP: 995 ierr = CeedRealloc(numemodeout + 1, &emodeout); CeedChkBackend(ierr); 996 emodeout[numemodeout] = emode; 997 numemodeout += 1; 998 break; 999 case CEED_EVAL_GRAD: 1000 ierr = CeedRealloc(numemodeout + dim, &emodeout); CeedChkBackend(ierr); 1001 for (CeedInt d = 0; d < dim; d++) 1002 emodeout[numemodeout+d] = emode; 1003 numemodeout += dim; 1004 break; 1005 case CEED_EVAL_WEIGHT: 1006 case CEED_EVAL_DIV: 1007 case CEED_EVAL_CURL: 1008 break; // Caught by QF Assembly 1009 } 1010 } 1011 } 1012 1013 // Operator data struct 1014 CeedOperator_Cuda *impl; 1015 ierr = CeedOperatorGetData(op, &impl); CeedChkBackend(ierr); 1016 ierr = CeedCalloc(1, &impl->diag); CeedChkBackend(ierr); 1017 CeedOperatorDiag_Cuda *diag = impl->diag; 1018 diag->basisin = basisin; 1019 diag->basisout = basisout; 1020 diag->h_emodein = emodein; 1021 diag->h_emodeout = emodeout; 1022 diag->numemodein = numemodein; 1023 diag->numemodeout = numemodeout; 1024 1025 // Assemble kernel 1026 CeedInt nnodes, nqpts; 1027 ierr = CeedBasisGetNumNodes(basisin, &nnodes); CeedChkBackend(ierr); 1028 ierr = CeedBasisGetNumQuadraturePoints(basisin, &nqpts); CeedChkBackend(ierr); 1029 diag->nnodes = nnodes; 1030 ierr = CeedCompileCuda(ceed, diagonalkernels, &diag->module, 5, 1031 "NUMEMODEIN", numemodein, 1032 "NUMEMODEOUT", numemodeout, 1033 "NNODES", nnodes, 1034 "NQPTS", nqpts, 1035 "NCOMP", ncomp 1036 ); CeedChk_Cu(ceed, ierr); 1037 ierr = CeedGetKernelCuda(ceed, diag->module, "linearDiagonal", 1038 &diag->linearDiagonal); CeedChk_Cu(ceed, ierr); 1039 ierr = CeedGetKernelCuda(ceed, diag->module, "linearPointBlockDiagonal", 1040 &diag->linearPointBlock); 1041 CeedChk_Cu(ceed, ierr); 1042 1043 // Basis matrices 1044 const CeedInt qBytes = nqpts * sizeof(CeedScalar); 1045 const CeedInt iBytes = qBytes * nnodes; 1046 const CeedInt gBytes = qBytes * nnodes * dim; 1047 const CeedInt eBytes = sizeof(CeedEvalMode); 1048 const CeedScalar *interpin, *interpout, *gradin, *gradout; 1049 1050 // CEED_EVAL_NONE 1051 CeedScalar *identity = NULL; 1052 bool evalNone = false; 1053 for (CeedInt i=0; i<numemodein; i++) 1054 evalNone = evalNone || (emodein[i] == CEED_EVAL_NONE); 1055 for (CeedInt i=0; i<numemodeout; i++) 1056 evalNone = evalNone || (emodeout[i] == CEED_EVAL_NONE); 1057 if (evalNone) { 1058 ierr = CeedCalloc(nqpts*nnodes, &identity); CeedChkBackend(ierr); 1059 for (CeedInt i=0; i<(nnodes<nqpts?nnodes:nqpts); i++) 1060 identity[i*nnodes+i] = 1.0; 1061 ierr = cudaMalloc((void **)&diag->d_identity, iBytes); CeedChk_Cu(ceed, ierr); 1062 ierr = cudaMemcpy(diag->d_identity, identity, iBytes, 1063 cudaMemcpyHostToDevice); CeedChk_Cu(ceed, ierr); 1064 } 1065 1066 // CEED_EVAL_INTERP 1067 ierr = CeedBasisGetInterp(basisin, &interpin); CeedChkBackend(ierr); 1068 ierr = cudaMalloc((void **)&diag->d_interpin, iBytes); CeedChk_Cu(ceed, ierr); 1069 ierr = cudaMemcpy(diag->d_interpin, interpin, iBytes, 1070 cudaMemcpyHostToDevice); CeedChk_Cu(ceed, ierr); 1071 ierr = CeedBasisGetInterp(basisout, &interpout); CeedChkBackend(ierr); 1072 ierr = cudaMalloc((void **)&diag->d_interpout, iBytes); CeedChk_Cu(ceed, ierr); 1073 ierr = cudaMemcpy(diag->d_interpout, interpout, iBytes, 1074 cudaMemcpyHostToDevice); CeedChk_Cu(ceed, ierr); 1075 1076 // CEED_EVAL_GRAD 1077 ierr = CeedBasisGetGrad(basisin, &gradin); CeedChkBackend(ierr); 1078 ierr = cudaMalloc((void **)&diag->d_gradin, gBytes); CeedChk_Cu(ceed, ierr); 1079 ierr = cudaMemcpy(diag->d_gradin, gradin, gBytes, 1080 cudaMemcpyHostToDevice); CeedChk_Cu(ceed, ierr); 1081 ierr = CeedBasisGetGrad(basisout, &gradout); CeedChkBackend(ierr); 1082 ierr = cudaMalloc((void **)&diag->d_gradout, gBytes); CeedChk_Cu(ceed, ierr); 1083 ierr = cudaMemcpy(diag->d_gradout, gradout, gBytes, 1084 cudaMemcpyHostToDevice); CeedChk_Cu(ceed, ierr); 1085 1086 // Arrays of emodes 1087 ierr = cudaMalloc((void **)&diag->d_emodein, numemodein * eBytes); 1088 CeedChk_Cu(ceed, ierr); 1089 ierr = cudaMemcpy(diag->d_emodein, emodein, numemodein * eBytes, 1090 cudaMemcpyHostToDevice); CeedChk_Cu(ceed, ierr); 1091 ierr = cudaMalloc((void **)&diag->d_emodeout, numemodeout * eBytes); 1092 CeedChk_Cu(ceed, ierr); 1093 ierr = cudaMemcpy(diag->d_emodeout, emodeout, numemodeout * eBytes, 1094 cudaMemcpyHostToDevice); CeedChk_Cu(ceed, ierr); 1095 1096 // Restriction 1097 diag->diagrstr = rstrout; 1098 1099 return CEED_ERROR_SUCCESS; 1100 } 1101 1102 //------------------------------------------------------------------------------ 1103 // Assemble diagonal common code 1104 //------------------------------------------------------------------------------ 1105 static inline int CeedOperatorAssembleDiagonalCore_Cuda(CeedOperator op, 1106 CeedVector assembled, CeedRequest *request, const bool pointBlock) { 1107 int ierr; 1108 Ceed ceed; 1109 ierr = CeedOperatorGetCeed(op, &ceed); CeedChkBackend(ierr); 1110 CeedOperator_Cuda *impl; 1111 ierr = CeedOperatorGetData(op, &impl); CeedChkBackend(ierr); 1112 1113 // Assemble QFunction 1114 CeedVector assembledqf; 1115 CeedElemRestriction rstr; 1116 ierr = CeedOperatorLinearAssembleQFunctionBuildOrUpdate(op, &assembledqf, 1117 &rstr, request); CeedChkBackend(ierr); 1118 ierr = CeedElemRestrictionDestroy(&rstr); CeedChkBackend(ierr); 1119 CeedScalar maxnorm = 0; 1120 ierr = CeedVectorNorm(assembledqf, CEED_NORM_MAX, &maxnorm); 1121 CeedChkBackend(ierr); 1122 1123 // Setup 1124 if (!impl->diag) { 1125 ierr = CeedOperatorAssembleDiagonalSetup_Cuda(op, pointBlock); 1126 CeedChkBackend(ierr); 1127 } 1128 CeedOperatorDiag_Cuda *diag = impl->diag; 1129 assert(diag != NULL); 1130 1131 // Restriction 1132 if (pointBlock && !diag->pbdiagrstr) { 1133 CeedElemRestriction pbdiagrstr; 1134 ierr = CreatePBRestriction(diag->diagrstr, &pbdiagrstr); CeedChkBackend(ierr); 1135 diag->pbdiagrstr = pbdiagrstr; 1136 } 1137 CeedElemRestriction diagrstr = pointBlock ? diag->pbdiagrstr : diag->diagrstr; 1138 1139 // Create diagonal vector 1140 CeedVector elemdiag = pointBlock ? diag->pbelemdiag : diag->elemdiag; 1141 if (!elemdiag) { 1142 ierr = CeedElemRestrictionCreateVector(diagrstr, NULL, &elemdiag); 1143 CeedChkBackend(ierr); 1144 if (pointBlock) 1145 diag->pbelemdiag = elemdiag; 1146 else 1147 diag->elemdiag = elemdiag; 1148 } 1149 ierr = CeedVectorSetValue(elemdiag, 0.0); CeedChkBackend(ierr); 1150 1151 // Assemble element operator diagonals 1152 CeedScalar *elemdiagarray; 1153 const CeedScalar *assembledqfarray; 1154 ierr = CeedVectorGetArray(elemdiag, CEED_MEM_DEVICE, &elemdiagarray); 1155 CeedChkBackend(ierr); 1156 ierr = CeedVectorGetArrayRead(assembledqf, CEED_MEM_DEVICE, &assembledqfarray); 1157 CeedChkBackend(ierr); 1158 CeedInt nelem; 1159 ierr = CeedElemRestrictionGetNumElements(diagrstr, &nelem); 1160 CeedChkBackend(ierr); 1161 1162 // Compute the diagonal of B^T D B 1163 int elemsPerBlock = 1; 1164 int grid = nelem/elemsPerBlock+((nelem/elemsPerBlock*elemsPerBlock<nelem)?1:0); 1165 void *args[] = {(void *) &nelem, (void *) &maxnorm, &diag->d_identity, 1166 &diag->d_interpin, &diag->d_gradin, &diag->d_interpout, 1167 &diag->d_gradout, &diag->d_emodein, &diag->d_emodeout, 1168 &assembledqfarray, &elemdiagarray 1169 }; 1170 if (pointBlock) { 1171 ierr = CeedRunKernelDimCuda(ceed, diag->linearPointBlock, grid, 1172 diag->nnodes, 1, elemsPerBlock, args); 1173 CeedChkBackend(ierr); 1174 } else { 1175 ierr = CeedRunKernelDimCuda(ceed, diag->linearDiagonal, grid, 1176 diag->nnodes, 1, elemsPerBlock, args); 1177 CeedChkBackend(ierr); 1178 } 1179 1180 // Restore arrays 1181 ierr = CeedVectorRestoreArray(elemdiag, &elemdiagarray); CeedChkBackend(ierr); 1182 ierr = CeedVectorRestoreArrayRead(assembledqf, &assembledqfarray); 1183 CeedChkBackend(ierr); 1184 1185 // Assemble local operator diagonal 1186 ierr = CeedElemRestrictionApply(diagrstr, CEED_TRANSPOSE, elemdiag, 1187 assembled, request); CeedChkBackend(ierr); 1188 1189 // Cleanup 1190 ierr = CeedVectorDestroy(&assembledqf); CeedChkBackend(ierr); 1191 1192 return CEED_ERROR_SUCCESS; 1193 } 1194 1195 //------------------------------------------------------------------------------ 1196 // Assemble composite diagonal common code 1197 //------------------------------------------------------------------------------ 1198 static inline int CeedOperatorLinearAssembleAddDiagonalCompositeCore_Cuda( 1199 CeedOperator op, CeedVector assembled, CeedRequest *request, 1200 const bool pointBlock) { 1201 int ierr; 1202 CeedInt numSub; 1203 CeedOperator *subOperators; 1204 ierr = CeedOperatorGetNumSub(op, &numSub); CeedChkBackend(ierr); 1205 ierr = CeedOperatorGetSubList(op, &subOperators); CeedChkBackend(ierr); 1206 for (CeedInt i = 0; i < numSub; i++) { 1207 ierr = CeedOperatorAssembleDiagonalCore_Cuda(subOperators[i], assembled, 1208 request, pointBlock); CeedChkBackend(ierr); 1209 } 1210 return CEED_ERROR_SUCCESS; 1211 } 1212 1213 //------------------------------------------------------------------------------ 1214 // Assemble Linear Diagonal 1215 //------------------------------------------------------------------------------ 1216 static int CeedOperatorLinearAssembleAddDiagonal_Cuda(CeedOperator op, 1217 CeedVector assembled, CeedRequest *request) { 1218 int ierr; 1219 bool isComposite; 1220 ierr = CeedOperatorIsComposite(op, &isComposite); CeedChkBackend(ierr); 1221 if (isComposite) { 1222 return CeedOperatorLinearAssembleAddDiagonalCompositeCore_Cuda(op, assembled, 1223 request, false); 1224 } else { 1225 return CeedOperatorAssembleDiagonalCore_Cuda(op, assembled, request, false); 1226 } 1227 } 1228 1229 //------------------------------------------------------------------------------ 1230 // Assemble Linear Point Block Diagonal 1231 //------------------------------------------------------------------------------ 1232 static int CeedOperatorLinearAssembleAddPointBlockDiagonal_Cuda(CeedOperator op, 1233 CeedVector assembled, CeedRequest *request) { 1234 int ierr; 1235 bool isComposite; 1236 ierr = CeedOperatorIsComposite(op, &isComposite); CeedChkBackend(ierr); 1237 if (isComposite) { 1238 return CeedOperatorLinearAssembleAddDiagonalCompositeCore_Cuda(op, assembled, 1239 request, true); 1240 } else { 1241 return CeedOperatorAssembleDiagonalCore_Cuda(op, assembled, request, true); 1242 } 1243 } 1244 1245 //------------------------------------------------------------------------------ 1246 // Matrix assembly kernel for low-order elements (2D thread block) 1247 //------------------------------------------------------------------------------ 1248 // *INDENT-OFF* 1249 static const char *assemblykernel = QUOTE( 1250 extern "C" __launch_bounds__(BLOCK_SIZE) 1251 __global__ void linearAssemble(const CeedScalar *B_in, const CeedScalar *B_out, 1252 const CeedScalar *__restrict__ qf_array, 1253 CeedScalar *__restrict__ values_array) { 1254 1255 // This kernel assumes B_in and B_out have the same number of quadrature points and 1256 // basis points. 1257 // TODO: expand to more general cases 1258 const int i = threadIdx.x; // The output row index of each B^TDB operation 1259 const int l = threadIdx.y; // The output column index of each B^TDB operation 1260 // such that we have (Bout^T)_ij D_jk Bin_kl = C_il 1261 1262 // Strides for final output ordering, determined by the reference (interface) implementation of 1263 // the symbolic assembly, slowest --> fastest: element, comp_in, comp_out, node_row, node_col 1264 const CeedInt comp_out_stride = NNODES * NNODES; 1265 const CeedInt comp_in_stride = comp_out_stride * NCOMP; 1266 const CeedInt e_stride = comp_in_stride * NCOMP; 1267 // Strides for QF array, slowest --> fastest: emode_in, comp_in, emode_out, comp_out, elem, qpt 1268 const CeedInt qe_stride = NQPTS; 1269 const CeedInt qcomp_out_stride = NELEM * qe_stride; 1270 const CeedInt qemode_out_stride = qcomp_out_stride * NCOMP; 1271 const CeedInt qcomp_in_stride = qemode_out_stride * NUMEMODEOUT; 1272 const CeedInt qemode_in_stride = qcomp_in_stride * NCOMP; 1273 1274 // Loop over each element (if necessary) 1275 for (CeedInt e = blockIdx.x*blockDim.z + threadIdx.z; e < NELEM; 1276 e += gridDim.x*blockDim.z) { 1277 for (CeedInt comp_in = 0; comp_in < NCOMP; comp_in++) { 1278 for (CeedInt comp_out = 0; comp_out < NCOMP; comp_out++) { 1279 CeedScalar result = 0.0; 1280 CeedInt qf_index_comp = qcomp_in_stride * comp_in + qcomp_out_stride * comp_out + qe_stride * e; 1281 for (CeedInt emode_in = 0; emode_in < NUMEMODEIN; emode_in++) { 1282 CeedInt b_in_index = emode_in * NQPTS * NNODES; 1283 for (CeedInt emode_out = 0; emode_out < NUMEMODEOUT; emode_out++) { 1284 CeedInt b_out_index = emode_out * NQPTS * NNODES; 1285 CeedInt qf_index = qf_index_comp + qemode_out_stride * emode_out + qemode_in_stride * emode_in; 1286 // Perform the B^T D B operation for this 'chunk' of D (the qf_array) 1287 for (CeedInt j = 0; j < NQPTS; j++) { 1288 result += B_out[b_out_index + j * NNODES + i] * qf_array[qf_index + j] * B_in[b_in_index + j * NNODES + l]; 1289 } 1290 1291 }// end of emode_out 1292 } // end of emode_in 1293 CeedInt val_index = comp_in_stride * comp_in + comp_out_stride * comp_out + e_stride * e + NNODES * i + l; 1294 values_array[val_index] = result; 1295 } // end of out component 1296 } // end of in component 1297 } // end of element loop 1298 } 1299 ); 1300 1301 //------------------------------------------------------------------------------ 1302 // Fallback kernel for larger orders (1D thread block) 1303 //------------------------------------------------------------------------------ 1304 static const char *assemblykernelbigelem = QUOTE( 1305 extern "C" __launch_bounds__(BLOCK_SIZE) 1306 __global__ void linearAssemble(const CeedScalar *B_in, const CeedScalar *B_out, 1307 const CeedScalar *__restrict__ qf_array, 1308 CeedScalar *__restrict__ values_array) { 1309 1310 // This kernel assumes B_in and B_out have the same number of quadrature points and 1311 // basis points. 1312 // TODO: expand to more general cases 1313 const int l = threadIdx.x; // The output column index of each B^TDB operation 1314 // such that we have (Bout^T)_ij D_jk Bin_kl = C_il 1315 1316 // Strides for final output ordering, determined by the reference (interface) implementation of 1317 // the symbolic assembly, slowest --> fastest: element, comp_in, comp_out, node_row, node_col 1318 const CeedInt comp_out_stride = NNODES * NNODES; 1319 const CeedInt comp_in_stride = comp_out_stride * NCOMP; 1320 const CeedInt e_stride = comp_in_stride * NCOMP; 1321 // Strides for QF array, slowest --> fastest: emode_in, comp_in, emode_out, comp_out, elem, qpt 1322 const CeedInt qe_stride = NQPTS; 1323 const CeedInt qcomp_out_stride = NELEM * qe_stride; 1324 const CeedInt qemode_out_stride = qcomp_out_stride * NCOMP; 1325 const CeedInt qcomp_in_stride = qemode_out_stride * NUMEMODEOUT; 1326 const CeedInt qemode_in_stride = qcomp_in_stride * NCOMP; 1327 1328 // Loop over each element (if necessary) 1329 for (CeedInt e = blockIdx.x*blockDim.z + threadIdx.z; e < NELEM; 1330 e += gridDim.x*blockDim.z) { 1331 for (CeedInt comp_in = 0; comp_in < NCOMP; comp_in++) { 1332 for (CeedInt comp_out = 0; comp_out < NCOMP; comp_out++) { 1333 for (CeedInt i = 0; i < NNODES; i++) { 1334 CeedScalar result = 0.0; 1335 CeedInt qf_index_comp = qcomp_in_stride * comp_in + qcomp_out_stride * comp_out + qe_stride * e; 1336 for (CeedInt emode_in = 0; emode_in < NUMEMODEIN; emode_in++) { 1337 CeedInt b_in_index = emode_in * NQPTS * NNODES; 1338 for (CeedInt emode_out = 0; emode_out < NUMEMODEOUT; emode_out++) { 1339 CeedInt b_out_index = emode_out * NQPTS * NNODES; 1340 CeedInt qf_index = qf_index_comp + qemode_out_stride * emode_out + qemode_in_stride * emode_in; 1341 // Perform the B^T D B operation for this 'chunk' of D (the qf_array) 1342 for (CeedInt j = 0; j < NQPTS; j++) { 1343 result += B_out[b_out_index + j * NNODES + i] * qf_array[qf_index + j] * B_in[b_in_index + j * NNODES + l]; 1344 } 1345 1346 }// end of emode_out 1347 } // end of emode_in 1348 CeedInt val_index = comp_in_stride * comp_in + comp_out_stride * comp_out + e_stride * e + NNODES * i + l; 1349 values_array[val_index] = result; 1350 } // end of loop over element node index, i 1351 } // end of out component 1352 } // end of in component 1353 } // end of element loop 1354 } 1355 ); 1356 // *INDENT-ON* 1357 1358 //------------------------------------------------------------------------------ 1359 // Single operator assembly setup 1360 //------------------------------------------------------------------------------ 1361 static int CeedSingleOperatorAssembleSetup_Cuda(CeedOperator op) { 1362 int ierr; 1363 Ceed ceed; 1364 ierr = CeedOperatorGetCeed(op, &ceed); CeedChkBackend(ierr); 1365 CeedOperator_Cuda *impl; 1366 ierr = CeedOperatorGetData(op, &impl); CeedChkBackend(ierr); 1367 1368 // Get intput and output fields 1369 CeedInt num_input_fields, num_output_fields; 1370 CeedOperatorField *input_fields; 1371 CeedOperatorField *output_fields; 1372 ierr = CeedOperatorGetFields(op, &num_input_fields, &input_fields, 1373 &num_output_fields, &output_fields); CeedChk(ierr); 1374 1375 // Determine active input basis eval mode 1376 CeedQFunction qf; 1377 ierr = CeedOperatorGetQFunction(op, &qf); CeedChk(ierr); 1378 CeedQFunctionField *qf_fields; 1379 ierr = CeedQFunctionGetFields(qf, NULL, &qf_fields, NULL, NULL); CeedChk(ierr); 1380 // Note that the kernel will treat each dimension of a gradient action separately; 1381 // i.e., when an active input has a CEED_EVAL_GRAD mode, num_emode_in will increment 1382 // by dim. However, for the purposes of loading the B matrices, it will be treated 1383 // as one mode, and we will load/copy the entire gradient matrix at once, so 1384 // num_B_in_mats_to_load will be incremented by 1. 1385 CeedInt num_emode_in = 0, dim = 1, num_B_in_mats_to_load = 0, size_B_in = 0; 1386 CeedEvalMode *eval_mode_in = NULL; //will be of size num_B_in_mats_load 1387 CeedBasis basis_in = NULL; 1388 CeedInt nqpts, esize; 1389 CeedElemRestriction rstr_in = NULL; 1390 for (CeedInt i=0; i<num_input_fields; i++) { 1391 CeedVector vec; 1392 ierr = CeedOperatorFieldGetVector(input_fields[i], &vec); CeedChk(ierr); 1393 if (vec == CEED_VECTOR_ACTIVE) { 1394 ierr = CeedOperatorFieldGetBasis(input_fields[i], &basis_in); 1395 CeedChk(ierr); 1396 ierr = CeedBasisGetDimension(basis_in, &dim); CeedChk(ierr); 1397 ierr = CeedBasisGetNumQuadraturePoints(basis_in, &nqpts); CeedChk(ierr); 1398 ierr = CeedOperatorFieldGetElemRestriction(input_fields[i], &rstr_in); 1399 ierr = CeedElemRestrictionGetElementSize(rstr_in, &esize); CeedChk(ierr); 1400 CeedChk(ierr); 1401 CeedEvalMode eval_mode; 1402 ierr = CeedQFunctionFieldGetEvalMode(qf_fields[i], &eval_mode); 1403 CeedChk(ierr); 1404 if (eval_mode != CEED_EVAL_NONE) { 1405 ierr = CeedRealloc(num_B_in_mats_to_load + 1, &eval_mode_in); CeedChk(ierr); 1406 eval_mode_in[num_B_in_mats_to_load] = eval_mode; 1407 num_B_in_mats_to_load += 1; 1408 if (eval_mode == CEED_EVAL_GRAD) { 1409 num_emode_in += dim; 1410 size_B_in += dim * esize * nqpts; 1411 } else { 1412 num_emode_in +=1; 1413 size_B_in += esize * nqpts; 1414 } 1415 } 1416 } 1417 } 1418 1419 // Determine active output basis; basis_out and rstr_out only used if same as input, TODO 1420 ierr = CeedQFunctionGetFields(qf, NULL, NULL, NULL, &qf_fields); CeedChk(ierr); 1421 CeedInt num_emode_out = 0, num_B_out_mats_to_load = 0, size_B_out = 0; 1422 CeedEvalMode *eval_mode_out = NULL; 1423 CeedBasis basis_out = NULL; 1424 CeedElemRestriction rstr_out = NULL; 1425 for (CeedInt i=0; i<num_output_fields; i++) { 1426 CeedVector vec; 1427 ierr = CeedOperatorFieldGetVector(output_fields[i], &vec); CeedChk(ierr); 1428 if (vec == CEED_VECTOR_ACTIVE) { 1429 ierr = CeedOperatorFieldGetBasis(output_fields[i], &basis_out); 1430 CeedChk(ierr); 1431 ierr = CeedOperatorFieldGetElemRestriction(output_fields[i], &rstr_out); 1432 CeedChk(ierr); 1433 if (rstr_out && rstr_out != rstr_in) 1434 // LCOV_EXCL_START 1435 return CeedError(ceed, CEED_ERROR_BACKEND, 1436 "Multi-field non-composite operator assembly not supported"); 1437 // LCOV_EXCL_STOP 1438 CeedEvalMode eval_mode; 1439 ierr = CeedQFunctionFieldGetEvalMode(qf_fields[i], &eval_mode); 1440 CeedChk(ierr); 1441 if (eval_mode != CEED_EVAL_NONE) { 1442 ierr = CeedRealloc(num_B_out_mats_to_load + 1, &eval_mode_out); CeedChk(ierr); 1443 eval_mode_out[num_B_out_mats_to_load] = eval_mode; 1444 num_B_out_mats_to_load += 1; 1445 if (eval_mode == CEED_EVAL_GRAD) { 1446 num_emode_out += dim; 1447 size_B_out += dim * esize * nqpts; 1448 } else { 1449 num_emode_out +=1; 1450 size_B_out += esize * nqpts; 1451 } 1452 } 1453 } 1454 } 1455 1456 if (num_emode_in == 0 || num_emode_out == 0) 1457 // LCOV_EXCL_START 1458 return CeedError(ceed, CEED_ERROR_UNSUPPORTED, 1459 "Cannot assemble operator without inputs/outputs"); 1460 // LCOV_EXCL_STOP 1461 1462 CeedInt nelem, ncomp; 1463 ierr = CeedElemRestrictionGetNumElements(rstr_in, &nelem); CeedChk(ierr); 1464 ierr = CeedElemRestrictionGetNumComponents(rstr_in, &ncomp); CeedChk(ierr); 1465 1466 ierr = CeedCalloc(1, &impl->asmb); CeedChkBackend(ierr); 1467 CeedOperatorAssemble_Cuda *asmb = impl->asmb; 1468 asmb->nelem = nelem; 1469 1470 // Compile kernels 1471 int elemsPerBlock = 1; 1472 asmb->elemsPerBlock = elemsPerBlock; 1473 CeedInt block_size = esize * esize * elemsPerBlock; 1474 Ceed_Cuda *cuda_data; 1475 ierr = CeedGetData(ceed, &cuda_data); CeedChkBackend(ierr); 1476 if (block_size > cuda_data->device_prop.maxThreadsPerBlock) { 1477 // Use fallback kernel with 1D threadblock 1478 block_size = esize * elemsPerBlock; 1479 asmb->block_size_x = esize; 1480 asmb->block_size_y = 1; 1481 ierr = CeedCompileCuda(ceed, assemblykernelbigelem, &asmb->module, 7, 1482 "NELEM", nelem, 1483 "NUMEMODEIN", num_emode_in, 1484 "NUMEMODEOUT", num_emode_out, 1485 "NQPTS", nqpts, 1486 "NNODES", esize, 1487 "BLOCK_SIZE", block_size, 1488 "NCOMP", ncomp 1489 ); CeedChk_Cu(ceed, ierr); 1490 } else { // Use kernel with 2D threadblock 1491 asmb->block_size_x = esize; 1492 asmb->block_size_y = esize; 1493 ierr = CeedCompileCuda(ceed, assemblykernel, &asmb->module, 7, 1494 "NELEM", nelem, 1495 "NUMEMODEIN", num_emode_in, 1496 "NUMEMODEOUT", num_emode_out, 1497 "NQPTS", nqpts, 1498 "NNODES", esize, 1499 "BLOCK_SIZE", block_size, 1500 "NCOMP", ncomp 1501 ); CeedChk_Cu(ceed, ierr); 1502 } 1503 ierr = CeedGetKernelCuda(ceed, asmb->module, "linearAssemble", 1504 &asmb->linearAssemble); CeedChk_Cu(ceed, ierr); 1505 1506 // Build 'full' B matrices (not 1D arrays used for tensor-product matrices) 1507 const CeedScalar *interp_in, *grad_in; 1508 ierr = CeedBasisGetInterp(basis_in, &interp_in); CeedChkBackend(ierr); 1509 ierr = CeedBasisGetGrad(basis_in, &grad_in); CeedChkBackend(ierr); 1510 1511 // Load into B_in, in order that they will be used in eval_mode 1512 const CeedInt inBytes = size_B_in * sizeof(CeedScalar); 1513 CeedInt mat_start = 0; 1514 ierr = cudaMalloc((void **) &asmb->d_B_in, inBytes); CeedChk_Cu(ceed, ierr); 1515 for (int i = 0; i < num_B_in_mats_to_load; i++) { 1516 CeedEvalMode eval_mode = eval_mode_in[i]; 1517 if (eval_mode == CEED_EVAL_INTERP) { 1518 ierr = cudaMemcpy(&asmb->d_B_in[mat_start], interp_in, 1519 esize * nqpts * sizeof(CeedScalar), 1520 cudaMemcpyHostToDevice); CeedChk_Cu(ceed, ierr); 1521 mat_start += esize * nqpts; 1522 } else if (eval_mode == CEED_EVAL_GRAD) { 1523 ierr = cudaMemcpy(asmb->d_B_in, grad_in, 1524 dim * esize * nqpts * sizeof(CeedScalar), 1525 cudaMemcpyHostToDevice); CeedChk_Cu(ceed, ierr); 1526 mat_start += dim * esize * nqpts; 1527 } 1528 } 1529 1530 const CeedScalar *interp_out, *grad_out; 1531 // Note that this function currently assumes 1 basis, so this should always be true 1532 // for now 1533 if (basis_out == basis_in) { 1534 interp_out = interp_in; 1535 grad_out = grad_in; 1536 } else { 1537 ierr = CeedBasisGetInterp(basis_out, &interp_out); CeedChkBackend(ierr); 1538 ierr = CeedBasisGetGrad(basis_out, &grad_out); CeedChkBackend(ierr); 1539 } 1540 1541 // Load into B_out, in order that they will be used in eval_mode 1542 const CeedInt outBytes = size_B_out * sizeof(CeedScalar); 1543 mat_start = 0; 1544 ierr = cudaMalloc((void **) &asmb->d_B_out, outBytes); CeedChk_Cu(ceed, ierr); 1545 for (int i = 0; i < num_B_out_mats_to_load; i++) { 1546 CeedEvalMode eval_mode = eval_mode_out[i]; 1547 if (eval_mode == CEED_EVAL_INTERP) { 1548 ierr = cudaMemcpy(&asmb->d_B_out[mat_start], interp_out, 1549 esize * nqpts * sizeof(CeedScalar), 1550 cudaMemcpyHostToDevice); CeedChk_Cu(ceed, ierr); 1551 mat_start += esize * nqpts; 1552 } else if (eval_mode == CEED_EVAL_GRAD) { 1553 ierr = cudaMemcpy(&asmb->d_B_out[mat_start], grad_out, 1554 dim * esize * nqpts * sizeof(CeedScalar), 1555 cudaMemcpyHostToDevice); CeedChk_Cu(ceed, ierr); 1556 mat_start += dim * esize * nqpts; 1557 } 1558 } 1559 return CEED_ERROR_SUCCESS; 1560 } 1561 1562 //------------------------------------------------------------------------------ 1563 // Single operator assembly 1564 //------------------------------------------------------------------------------ 1565 static int CeedSingleOperatorAssemble_Cuda(CeedOperator op, CeedInt offset, 1566 CeedVector values) { 1567 1568 int ierr; 1569 Ceed ceed; 1570 ierr = CeedOperatorGetCeed(op, &ceed); CeedChkBackend(ierr); 1571 CeedOperator_Cuda *impl; 1572 ierr = CeedOperatorGetData(op, &impl); CeedChkBackend(ierr); 1573 1574 // Setup 1575 if (!impl->asmb) { 1576 ierr = CeedSingleOperatorAssembleSetup_Cuda(op); 1577 CeedChkBackend(ierr); 1578 } 1579 1580 // Assemble QFunction 1581 CeedVector assembled_qf; 1582 CeedElemRestriction rstr_q; 1583 ierr = CeedOperatorLinearAssembleQFunctionBuildOrUpdate( 1584 op, &assembled_qf, &rstr_q, CEED_REQUEST_IMMEDIATE); CeedChk(ierr); 1585 ierr = CeedElemRestrictionDestroy(&rstr_q); CeedChkBackend(ierr); 1586 CeedScalar *values_array; 1587 ierr = CeedVectorGetArrayWrite(values, CEED_MEM_DEVICE, &values_array); 1588 CeedChkBackend(ierr); 1589 values_array += offset; 1590 const CeedScalar *qf_array; 1591 ierr = CeedVectorGetArrayRead(assembled_qf, CEED_MEM_DEVICE, &qf_array); 1592 CeedChkBackend(ierr); 1593 1594 // Compute B^T D B 1595 const CeedInt nelem = impl->asmb->nelem; 1596 const CeedInt elemsPerBlock = impl->asmb->elemsPerBlock; 1597 const CeedInt grid = nelem/elemsPerBlock+(( 1598 nelem/elemsPerBlock*elemsPerBlock<nelem)?1:0); 1599 void *args[] = {&impl->asmb->d_B_in, &impl->asmb->d_B_out, 1600 &qf_array, &values_array 1601 }; 1602 ierr = CeedRunKernelDimCuda(ceed, impl->asmb->linearAssemble, grid, 1603 impl->asmb->block_size_x, impl->asmb->block_size_y, 1604 elemsPerBlock, args); 1605 CeedChkBackend(ierr); 1606 1607 1608 // Restore arrays 1609 ierr = CeedVectorRestoreArray(values, &values_array); CeedChkBackend(ierr); 1610 ierr = CeedVectorRestoreArrayRead(assembled_qf, &qf_array); 1611 CeedChkBackend(ierr); 1612 1613 // Cleanup 1614 ierr = CeedVectorDestroy(&assembled_qf); CeedChkBackend(ierr); 1615 1616 return CEED_ERROR_SUCCESS; 1617 } 1618 1619 //------------------------------------------------------------------------------ 1620 // Assemble matrix data for COO matrix of assembled operator. 1621 // The sparsity pattern is set by CeedOperatorLinearAssembleSymbolic. 1622 // 1623 // Note that this (and other assembly routines) currently assume only one 1624 // active input restriction/basis per operator (could have multiple basis eval 1625 // modes). 1626 // TODO: allow multiple active input restrictions/basis objects 1627 //------------------------------------------------------------------------------ 1628 int CeedOperatorLinearAssemble_Cuda(CeedOperator op, CeedVector values) { 1629 1630 // As done in the default implementation, loop through suboperators 1631 // for composite operators, or call single operator assembly otherwise 1632 bool is_composite; 1633 CeedInt ierr; 1634 ierr = CeedOperatorIsComposite(op, &is_composite); CeedChk(ierr); 1635 1636 CeedElemRestriction rstr; 1637 CeedInt num_elem, elem_size, num_comp; 1638 1639 CeedInt offset = 0; 1640 if (is_composite) { 1641 CeedInt num_suboperators; 1642 ierr = CeedOperatorGetNumSub(op, &num_suboperators); CeedChk(ierr); 1643 CeedOperator *sub_operators; 1644 ierr = CeedOperatorGetSubList(op, &sub_operators); CeedChk(ierr); 1645 for (int k = 0; k < num_suboperators; ++k) { 1646 ierr = CeedSingleOperatorAssemble_Cuda(sub_operators[k], offset, values); 1647 CeedChk(ierr); 1648 ierr = CeedOperatorGetActiveElemRestriction(sub_operators[k], &rstr); 1649 CeedChk(ierr); 1650 ierr = CeedElemRestrictionGetNumElements(rstr, &num_elem); CeedChk(ierr); 1651 ierr = CeedElemRestrictionGetElementSize(rstr, &elem_size); CeedChk(ierr); 1652 ierr = CeedElemRestrictionGetNumComponents(rstr, &num_comp); CeedChk(ierr); 1653 offset += elem_size*num_comp * elem_size*num_comp * num_elem; 1654 } 1655 } else { 1656 ierr = CeedSingleOperatorAssemble_Cuda(op, offset, values); CeedChk(ierr); 1657 } 1658 1659 return CEED_ERROR_SUCCESS; 1660 } 1661 //------------------------------------------------------------------------------ 1662 // Create operator 1663 //------------------------------------------------------------------------------ 1664 int CeedOperatorCreate_Cuda(CeedOperator op) { 1665 int ierr; 1666 Ceed ceed; 1667 ierr = CeedOperatorGetCeed(op, &ceed); CeedChkBackend(ierr); 1668 CeedOperator_Cuda *impl; 1669 1670 ierr = CeedCalloc(1, &impl); CeedChkBackend(ierr); 1671 ierr = CeedOperatorSetData(op, impl); CeedChkBackend(ierr); 1672 1673 ierr = CeedSetBackendFunction(ceed, "Operator", op, "LinearAssembleQFunction", 1674 CeedOperatorLinearAssembleQFunction_Cuda); 1675 CeedChkBackend(ierr); 1676 ierr = CeedSetBackendFunction(ceed, "Operator", op, 1677 "LinearAssembleQFunctionUpdate", 1678 CeedOperatorLinearAssembleQFunctionUpdate_Cuda); 1679 CeedChkBackend(ierr); 1680 ierr = CeedSetBackendFunction(ceed, "Operator", op, "LinearAssembleAddDiagonal", 1681 CeedOperatorLinearAssembleAddDiagonal_Cuda); 1682 CeedChkBackend(ierr); 1683 ierr = CeedSetBackendFunction(ceed, "Operator", op, 1684 "LinearAssembleAddPointBlockDiagonal", 1685 CeedOperatorLinearAssembleAddPointBlockDiagonal_Cuda); 1686 CeedChkBackend(ierr); 1687 ierr = CeedSetBackendFunction(ceed, "Operator", op, 1688 "LinearAssemble", CeedOperatorLinearAssemble_Cuda); 1689 CeedChkBackend(ierr); 1690 ierr = CeedSetBackendFunction(ceed, "Operator", op, "ApplyAdd", 1691 CeedOperatorApplyAdd_Cuda); CeedChkBackend(ierr); 1692 ierr = CeedSetBackendFunction(ceed, "Operator", op, "Destroy", 1693 CeedOperatorDestroy_Cuda); CeedChkBackend(ierr); 1694 return CEED_ERROR_SUCCESS; 1695 } 1696 1697 //------------------------------------------------------------------------------ 1698 // Composite Operator Create 1699 //------------------------------------------------------------------------------ 1700 int CeedCompositeOperatorCreate_Cuda(CeedOperator op) { 1701 int ierr; 1702 Ceed ceed; 1703 ierr = CeedOperatorGetCeed(op, &ceed); CeedChkBackend(ierr); 1704 1705 ierr = CeedSetBackendFunction(ceed, "Operator", op, "LinearAssembleAddDiagonal", 1706 CeedOperatorLinearAssembleAddDiagonal_Cuda); 1707 CeedChkBackend(ierr); 1708 ierr = CeedSetBackendFunction(ceed, "Operator", op, 1709 "LinearAssembleAddPointBlockDiagonal", 1710 CeedOperatorLinearAssembleAddPointBlockDiagonal_Cuda); 1711 CeedChkBackend(ierr); 1712 ierr = CeedSetBackendFunction(ceed, "Operator", op, 1713 "LinearAssemble", CeedOperatorLinearAssemble_Cuda); 1714 CeedChkBackend(ierr); 1715 return CEED_ERROR_SUCCESS; 1716 } 1717 //------------------------------------------------------------------------------ 1718