1 // Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at 2 // the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights 3 // reserved. See files LICENSE and NOTICE for details. 4 // 5 // This file is part of CEED, a collection of benchmarks, miniapps, software 6 // libraries and APIs for efficient high-order finite element and spectral 7 // element discretizations for exascale applications. For more information and 8 // source code availability see http://github.com/ceed. 9 // 10 // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC, 11 // a collaborative effort of two U.S. Department of Energy organizations (Office 12 // of Science and the National Nuclear Security Administration) responsible for 13 // the planning and preparation of a capable exascale ecosystem, including 14 // software, applications, hardware, advanced system engineering and early 15 // testbed platforms, in support of the nation's exascale computing imperative. 16 17 #include <ceed-impl.h> 18 #include <string.h> 19 #include "magma.h" 20 21 typedef struct { 22 CeedScalar *array; 23 CeedScalar *darray; 24 } CeedVector_Magma; 25 26 typedef struct { 27 const CeedInt *indices; 28 CeedInt *indices_allocated; 29 } CeedElemRestriction_Magma; 30 31 typedef struct { 32 CeedVector etmp; 33 CeedVector qdata; 34 } CeedOperator_Magma; 35 36 // ***************************************************************************** 37 // * Initialize vector vec (after free mem) with values from array based on cmode 38 // * CEED_COPY_VALUES: memory is allocated in vec->array_allocated, made equal 39 // * to array, and data is copied (not store passed pointer) 40 // * CEED_OWN_POINTER: vec->data->array_allocated and vec->data->array = array 41 // * CEED_USE_POINTER: vec->data->array = array (can modify; no ownership) 42 // * mtype: CEED_MEM_HOST or CEED_MEM_DEVICE 43 // ***************************************************************************** 44 static int CeedVectorSetArray_Magma(CeedVector vec, CeedMemType mtype, 45 CeedCopyMode cmode, CeedScalar *array) { 46 CeedVector_Magma *impl = vec->data; 47 int ierr; 48 49 if (mtype != CEED_MEM_HOST) 50 return CeedError(vec->ceed, 1, "Only MemType = HOST supported"); 51 52 switch (cmode) { 53 case CEED_COPY_VALUES: 54 if (impl->darray == NULL) { 55 ierr = magma_malloc( (void**)&impl->darray, 56 vec->length * sizeof(CeedScalar)); 57 CeedChk(ierr); 58 } 59 if (array) 60 magma_setvector(vec->length, sizeof(array[0]), 61 array, 1, impl->darray, 1); 62 break; 63 case CEED_OWN_POINTER: 64 if (impl->darray == NULL){ 65 ierr = magma_malloc( (void**)&impl->darray, 66 vec->length * sizeof(CeedScalar)); 67 CeedChk(ierr); 68 } 69 if (array) 70 magma_setvector(vec->length, sizeof(array[0]), 71 array, 1, impl->darray, 1); 72 73 ierr = CeedFree(array); CeedChk(ierr); 74 break; 75 case CEED_USE_POINTER: 76 if (impl->darray != NULL){ 77 ierr = magma_free(impl->darray); 78 CeedChk(ierr); 79 impl->darray = NULL; 80 } 81 impl->array = array; 82 } 83 return 0; 84 } 85 86 // ***************************************************************************** 87 // * Get data from Device vector vec into CeedScalar array on the CPU 88 // ***************************************************************************** 89 static int CeedVectorGetArray_Magma(CeedVector vec, CeedMemType mtype, 90 CeedScalar **array) { 91 CeedVector_Magma *impl = vec->data; 92 int ierr; 93 94 if (mtype != CEED_MEM_HOST) 95 return CeedError(vec->ceed, 1, "Can only provide to HOST memory"); 96 97 if (!impl->array) { // Allocate if array is not yet allocated 98 ierr = CeedMalloc(vec->length, &impl->array); CeedChk(ierr); 99 } 100 if (!impl->darray){ // Get data from the Device if owned (darray!=NULL) 101 magma_getvector(vec->length, sizeof(array[0]), 102 impl->darray, 1, array, 1); 103 } 104 *array = impl->array; 105 return 0; 106 } 107 108 // ***************************************************************************** 109 // * Get data from Device vector vec into CeedScalar array on the CPU for reading 110 // ***************************************************************************** 111 static int CeedVectorGetArrayRead_Magma(CeedVector vec, CeedMemType mtype, 112 const CeedScalar **array) { 113 CeedVector_Magma *impl = vec->data; 114 int ierr; 115 116 if (mtype != CEED_MEM_HOST) 117 return CeedError(vec->ceed, 1, "Can only provide to HOST memory"); 118 119 if (!impl->array) { // Allocate if array is not yet allocated 120 ierr = CeedMalloc(vec->length, &impl->array); CeedChk(ierr); 121 } 122 if (!impl->darray){ // Get data from the Device if owned (darray!=NULL) 123 magma_getvector(vec->length, sizeof(array[0]), 124 impl->darray, 1, array, 1); 125 } 126 127 *array = impl->array; 128 return 0; 129 } 130 131 // ***************************************************************************** 132 // * Restore vector vec with values from array, where array received its values 133 // * from vec and possibly modified them. 134 // ***************************************************************************** 135 static int CeedVectorRestoreArray_Magma(CeedVector vec, CeedScalar **array) { 136 CeedVector_Magma *impl = vec->data; 137 138 if (!impl->darray) {// vec owns the data, so it is on GPU 139 magma_setvector(vec->length, sizeof(*array[0]), 140 *array, 1, impl->darray, 1); 141 } 142 *array = NULL; 143 return 0; 144 } 145 146 // ***************************************************************************** 147 // * Restore vector vec with values from array, where array received its values 148 // * from vec to only read them; in this case vec may have been modified meanwhile 149 // * and needs to be restored here. 150 // ***************************************************************************** 151 static int CeedVectorRestoreArrayRead_Magma(CeedVector vec, 152 const CeedScalar **array) { 153 CeedVector_Magma *impl = vec->data; 154 155 if (!impl->darray) {// vec owns the data, so it is on GPU 156 magma_setvector(vec->length, sizeof(*array[0]), 157 *array, 1, impl->darray, 1); 158 } 159 *array = NULL; 160 return 0; 161 } 162 163 static int CeedVectorDestroy_Magma(CeedVector vec) { 164 CeedVector_Magma *impl = vec->data; 165 int ierr; 166 167 if (!impl->array) {ierr = CeedFree(&impl->array); CeedChk(ierr);} 168 ierr = magma_free(impl->darray); CeedChk(ierr); 169 ierr = CeedFree(&vec->data); CeedChk(ierr); 170 return 0; 171 } 172 173 // ***************************************************************************** 174 // * Create vector vec of size n 175 // ***************************************************************************** 176 static int CeedVectorCreate_Magma(Ceed ceed, CeedInt n, CeedVector vec) { 177 CeedVector_Magma *impl; 178 int ierr; 179 180 vec->SetArray = CeedVectorSetArray_Magma; 181 vec->GetArray = CeedVectorGetArray_Magma; 182 vec->GetArrayRead = CeedVectorGetArrayRead_Magma; 183 vec->RestoreArray = CeedVectorRestoreArray_Magma; 184 vec->RestoreArrayRead = CeedVectorRestoreArrayRead_Magma; 185 vec->Destroy = CeedVectorDestroy_Magma; 186 ierr = CeedCalloc(1,&impl); CeedChk(ierr); 187 impl->darray = NULL; 188 vec->data = impl; 189 return 0; 190 } 191 192 // ***************************************************************************** 193 // * Apply restriction operator r to u: v = r(rmode) u 194 // ***************************************************************************** 195 static int CeedElemRestrictionApply_Magma(CeedElemRestriction r, 196 CeedTransposeMode tmode, CeedInt ncomp, 197 CeedTransposeMode lmode, CeedVector u, 198 CeedVector v, CeedRequest *request) { 199 CeedElemRestriction_Magma *impl = r->data; 200 int ierr; 201 const CeedScalar *uu; 202 CeedScalar *vv; 203 CeedInt esize = r->nelem*r->elemsize; 204 205 ierr = CeedVectorGetArrayRead(u, CEED_MEM_HOST, &uu); CeedChk(ierr); 206 ierr = CeedVectorGetArray(v, CEED_MEM_HOST, &vv); CeedChk(ierr); 207 if (tmode == CEED_NOTRANSPOSE) { 208 // Perform: v = r * u 209 if (ncomp == 1) { 210 for (CeedInt i=0; i<esize; i++) vv[i] = uu[impl->indices[i]]; 211 } else { 212 // vv is (elemsize x ncomp x nelem), column-major 213 if (lmode == CEED_NOTRANSPOSE) { // u is (ndof x ncomp), column-major 214 for (CeedInt e = 0; e < r->nelem; e++) 215 for (CeedInt d = 0; d < ncomp; d++) 216 for (CeedInt i=0; i<r->elemsize; i++) { 217 vv[i+r->elemsize*(d+ncomp*e)] = 218 uu[impl->indices[i+r->elemsize*e]+r->ndof*d]; 219 } 220 } else { // u is (ncomp x ndof), column-major 221 for (CeedInt e = 0; e < r->nelem; e++) 222 for (CeedInt d = 0; d < ncomp; d++) 223 for (CeedInt i=0; i<r->elemsize; i++) { 224 vv[i+r->elemsize*(d+ncomp*e)] = 225 uu[d+ncomp*impl->indices[i+r->elemsize*e]]; 226 } 227 } 228 } 229 } else { 230 // Note: in transpose mode, we perform: v += r^t * u 231 if (ncomp == 1) { 232 for (CeedInt i=0; i<esize; i++) vv[impl->indices[i]] += uu[i]; 233 } else { 234 // u is (elemsize x ncomp x nelem) 235 if (lmode == CEED_NOTRANSPOSE) { // vv is (ndof x ncomp), column-major 236 for (CeedInt e = 0; e < r->nelem; e++) 237 for (CeedInt d = 0; d < ncomp; d++) 238 for (CeedInt i=0; i<r->elemsize; i++) { 239 vv[impl->indices[i+r->elemsize*e]+r->ndof*d] += 240 uu[i+r->elemsize*(d+e*ncomp)]; 241 } 242 } else { // vv is (ncomp x ndof), column-major 243 for (CeedInt e = 0; e < r->nelem; e++) 244 for (CeedInt d = 0; d < ncomp; d++) 245 for (CeedInt i=0; i<r->elemsize; i++) { 246 vv[d+ncomp*impl->indices[i+r->elemsize*e]] += 247 uu[i+r->elemsize*(d+e*ncomp)]; 248 } 249 } 250 } 251 } 252 ierr = CeedVectorRestoreArrayRead(u, &uu); CeedChk(ierr); 253 ierr = CeedVectorRestoreArray(v, &vv); CeedChk(ierr); 254 if (request != CEED_REQUEST_IMMEDIATE && request != CEED_REQUEST_ORDERED) 255 *request = NULL; 256 return 0; 257 } 258 259 static int CeedElemRestrictionDestroy_Magma(CeedElemRestriction r) { 260 CeedElemRestriction_Magma *impl = r->data; 261 int ierr; 262 263 ierr = CeedFree(&impl->indices_allocated); CeedChk(ierr); 264 ierr = CeedFree(&r->data); CeedChk(ierr); 265 return 0; 266 } 267 268 static int CeedElemRestrictionCreate_Magma(CeedElemRestriction r, 269 CeedMemType mtype, 270 CeedCopyMode cmode, 271 const CeedInt *indices) { 272 int ierr; 273 CeedElemRestriction_Magma *impl; 274 275 if (mtype != CEED_MEM_HOST) 276 return CeedError(r->ceed, 1, "Only MemType = HOST supported"); 277 ierr = CeedCalloc(1,&impl); CeedChk(ierr); 278 switch (cmode) { 279 case CEED_COPY_VALUES: 280 ierr = CeedMalloc(r->nelem*r->elemsize, &impl->indices_allocated); 281 CeedChk(ierr); 282 memcpy(impl->indices_allocated, indices, 283 r->nelem * r->elemsize * sizeof(indices[0])); 284 impl->indices = impl->indices_allocated; 285 break; 286 case CEED_OWN_POINTER: 287 impl->indices_allocated = (CeedInt *)indices; 288 impl->indices = impl->indices_allocated; 289 break; 290 case CEED_USE_POINTER: 291 impl->indices = indices; 292 } 293 r->data = impl; 294 r->Apply = CeedElemRestrictionApply_Magma; 295 r->Destroy = CeedElemRestrictionDestroy_Magma; 296 return 0; 297 } 298 299 // Contracts on the middle index 300 // NOTRANSPOSE: V_ajc = T_jb U_abc 301 // TRANSPOSE: V_ajc = T_bj U_abc 302 // If Add != 0, "=" is replaced by "+=" 303 static int CeedTensorContract_Magma(Ceed ceed, 304 CeedInt A, CeedInt B, CeedInt C, CeedInt J, 305 const CeedScalar *t, CeedTransposeMode tmode, 306 const CeedInt Add, 307 const CeedScalar *u, CeedScalar *v) { 308 CeedInt tstride0 = B, tstride1 = 1; 309 if (tmode == CEED_TRANSPOSE) { 310 tstride0 = 1; tstride1 = J; 311 } 312 313 for (CeedInt a=0; a<A; a++) { 314 for (CeedInt j=0; j<J; j++) { 315 if (!Add) { 316 for (CeedInt c=0; c<C; c++) 317 v[(a*J+j)*C+c] = 0; 318 } 319 for (CeedInt b=0; b<B; b++) { 320 for (CeedInt c=0; c<C; c++) { 321 v[(a*J+j)*C+c] += t[j*tstride0 + b*tstride1] * u[(a*B+b)*C+c]; 322 } 323 } 324 } 325 } 326 return 0; 327 } 328 329 static int CeedBasisApply_Magma(CeedBasis basis, CeedTransposeMode tmode, 330 CeedEvalMode emode, 331 const CeedScalar *u, CeedScalar *v) { 332 int ierr; 333 const CeedInt dim = basis->dim; 334 const CeedInt ndof = basis->ndof; 335 const CeedInt nqpt = ndof*CeedPowInt(basis->Q1d, dim); 336 const CeedInt add = (tmode == CEED_TRANSPOSE); 337 338 if (tmode == CEED_TRANSPOSE) { 339 const CeedInt vsize = ndof*CeedPowInt(basis->P1d, dim); 340 for (CeedInt i = 0; i < vsize; i++) 341 v[i] = (CeedScalar) 0; 342 } 343 if (emode & CEED_EVAL_INTERP) { 344 CeedInt P = basis->P1d, Q = basis->Q1d; 345 if (tmode == CEED_TRANSPOSE) { 346 P = basis->Q1d; Q = basis->P1d; 347 } 348 CeedInt pre = ndof*CeedPowInt(P, dim-1), post = 1; 349 CeedScalar tmp[2][ndof*Q*CeedPowInt(P>Q?P:Q, dim-1)]; 350 for (CeedInt d=0; d<dim; d++) { 351 ierr = CeedTensorContract_Magma(basis->ceed, pre, P, post, Q, basis->interp1d, 352 tmode, add&&(d==dim-1), 353 d==0?u:tmp[d%2], d==dim-1?v:tmp[(d+1)%2]); 354 CeedChk(ierr); 355 pre /= P; 356 post *= Q; 357 } 358 if (tmode == CEED_NOTRANSPOSE) { 359 v += nqpt; 360 } else { 361 u += nqpt; 362 } 363 } 364 if (emode & CEED_EVAL_GRAD) { 365 CeedInt P = basis->P1d, Q = basis->Q1d; 366 // In CEED_NOTRANSPOSE mode: 367 // u is (P^dim x nc), column-major layout (nc = ndof) 368 // v is (Q^dim x nc x dim), column-major layout (nc = ndof) 369 // In CEED_TRANSPOSE mode, the sizes of u and v are switched. 370 if (tmode == CEED_TRANSPOSE) { 371 P = basis->Q1d, Q = basis->P1d; 372 } 373 CeedScalar tmp[2][ndof*Q*CeedPowInt(P>Q?P:Q, dim-1)]; 374 for (CeedInt p = 0; p < dim; p++) { 375 CeedInt pre = ndof*CeedPowInt(P, dim-1), post = 1; 376 for (CeedInt d=0; d<dim; d++) { 377 ierr = CeedTensorContract_Magma(basis->ceed, pre, P, post, Q, 378 (p==d)?basis->grad1d:basis->interp1d, 379 tmode, add&&(d==dim-1), 380 d==0?u:tmp[d%2], d==dim-1?v:tmp[(d+1)%2]); 381 CeedChk(ierr); 382 pre /= P; 383 post *= Q; 384 } 385 if (tmode == CEED_NOTRANSPOSE) { 386 v += nqpt; 387 } else { 388 u += nqpt; 389 } 390 } 391 } 392 if (emode & CEED_EVAL_WEIGHT) { 393 if (tmode == CEED_TRANSPOSE) 394 return CeedError(basis->ceed, 1, 395 "CEED_EVAL_WEIGHT incompatible with CEED_TRANSPOSE"); 396 CeedInt Q = basis->Q1d; 397 for (CeedInt d=0; d<dim; d++) { 398 CeedInt pre = CeedPowInt(Q, dim-d-1), post = CeedPowInt(Q, d); 399 for (CeedInt i=0; i<pre; i++) { 400 for (CeedInt j=0; j<Q; j++) { 401 for (CeedInt k=0; k<post; k++) { 402 v[(i*Q + j)*post + k] = basis->qweight1d[j] 403 * (d == 0 ? 1 : v[(i*Q + j)*post + k]); 404 } 405 } 406 } 407 } 408 } 409 return 0; 410 } 411 412 static int CeedBasisDestroy_Magma(CeedBasis basis) { 413 return 0; 414 } 415 416 static int CeedBasisCreateTensorH1_Magma(Ceed ceed, CeedInt dim, CeedInt P1d, 417 CeedInt Q1d, const CeedScalar *interp1d, 418 const CeedScalar *grad1d, 419 const CeedScalar *qref1d, 420 const CeedScalar *qweight1d, 421 CeedBasis basis) { 422 basis->Apply = CeedBasisApply_Magma; 423 basis->Destroy = CeedBasisDestroy_Magma; 424 return 0; 425 } 426 427 static int CeedQFunctionApply_Magma(CeedQFunction qf, void *qdata, CeedInt Q, 428 const CeedScalar *const *u, 429 CeedScalar *const *v) { 430 int ierr; 431 ierr = qf->function(qf->ctx, qdata, Q, u, v); CeedChk(ierr); 432 return 0; 433 } 434 435 static int CeedQFunctionDestroy_Magma(CeedQFunction qf) { 436 return 0; 437 } 438 439 static int CeedQFunctionCreate_Magma(CeedQFunction qf) { 440 qf->Apply = CeedQFunctionApply_Magma; 441 qf->Destroy = CeedQFunctionDestroy_Magma; 442 return 0; 443 } 444 445 static int CeedOperatorDestroy_Magma(CeedOperator op) { 446 CeedOperator_Magma *impl = op->data; 447 int ierr; 448 449 ierr = CeedVectorDestroy(&impl->etmp); CeedChk(ierr); 450 ierr = CeedVectorDestroy(&impl->qdata); CeedChk(ierr); 451 ierr = CeedFree(&op->data); CeedChk(ierr); 452 return 0; 453 } 454 455 static int CeedOperatorApply_Magma(CeedOperator op, CeedVector qdata, 456 CeedVector ustate, 457 CeedVector residual, CeedRequest *request) { 458 CeedOperator_Magma *impl = op->data; 459 CeedVector etmp; 460 CeedInt Q; 461 const CeedInt nc = op->basis->ndof, dim = op->basis->dim; 462 CeedScalar *Eu; 463 char *qd; 464 int ierr; 465 CeedTransposeMode lmode = CEED_NOTRANSPOSE; 466 467 if (!impl->etmp) { 468 ierr = CeedVectorCreate(op->ceed, 469 nc * op->Erestrict->nelem * op->Erestrict->elemsize, 470 &impl->etmp); CeedChk(ierr); 471 // etmp is allocated when CeedVectorGetArray is called below 472 } 473 etmp = impl->etmp; 474 if (op->qf->inmode & ~CEED_EVAL_WEIGHT) { 475 ierr = CeedElemRestrictionApply(op->Erestrict, CEED_NOTRANSPOSE, 476 nc, lmode, ustate, etmp, 477 CEED_REQUEST_IMMEDIATE); CeedChk(ierr); 478 } 479 ierr = CeedBasisGetNumQuadraturePoints(op->basis, &Q); CeedChk(ierr); 480 ierr = CeedVectorGetArray(etmp, CEED_MEM_HOST, &Eu); CeedChk(ierr); 481 ierr = CeedVectorGetArray(qdata, CEED_MEM_HOST, (CeedScalar**)&qd); 482 CeedChk(ierr); 483 for (CeedInt e=0; e<op->Erestrict->nelem; e++) { 484 CeedScalar BEu[Q*nc*(dim+2)], BEv[Q*nc*(dim+2)], *out[5] = {0,0,0,0,0}; 485 const CeedScalar *in[5] = {0,0,0,0,0}; 486 // TODO: quadrature weights can be computed just once 487 ierr = CeedBasisApply(op->basis, CEED_NOTRANSPOSE, op->qf->inmode, 488 &Eu[e*op->Erestrict->elemsize*nc], BEu); 489 CeedChk(ierr); 490 CeedScalar *u_ptr = BEu, *v_ptr = BEv; 491 if (op->qf->inmode & CEED_EVAL_INTERP) { in[0] = u_ptr; u_ptr += Q*nc; } 492 if (op->qf->inmode & CEED_EVAL_GRAD) { in[1] = u_ptr; u_ptr += Q*nc*dim; } 493 if (op->qf->inmode & CEED_EVAL_WEIGHT) { in[4] = u_ptr; u_ptr += Q; } 494 if (op->qf->outmode & CEED_EVAL_INTERP) { out[0] = v_ptr; v_ptr += Q*nc; } 495 if (op->qf->outmode & CEED_EVAL_GRAD) { out[1] = v_ptr; v_ptr += Q*nc*dim; } 496 ierr = CeedQFunctionApply(op->qf, &qd[e*Q*op->qf->qdatasize], Q, in, out); 497 CeedChk(ierr); 498 ierr = CeedBasisApply(op->basis, CEED_TRANSPOSE, op->qf->outmode, BEv, 499 &Eu[e*op->Erestrict->elemsize*nc]); 500 CeedChk(ierr); 501 } 502 ierr = CeedVectorRestoreArray(etmp, &Eu); CeedChk(ierr); 503 if (residual) { 504 CeedScalar *res; 505 CeedVectorGetArray(residual, CEED_MEM_HOST, &res); 506 for (int i = 0; i < residual->length; i++) 507 res[i] = (CeedScalar)0; 508 ierr = CeedElemRestrictionApply(op->Erestrict, CEED_TRANSPOSE, 509 nc, lmode, etmp, residual, 510 CEED_REQUEST_IMMEDIATE); CeedChk(ierr); 511 } 512 if (request != CEED_REQUEST_IMMEDIATE && request != CEED_REQUEST_ORDERED) 513 *request = NULL; 514 return 0; 515 } 516 517 static int CeedOperatorGetQData_Magma(CeedOperator op, CeedVector *qdata) { 518 CeedOperator_Magma *impl = op->data; 519 int ierr; 520 521 if (!impl->qdata) { 522 CeedInt Q; 523 ierr = CeedBasisGetNumQuadraturePoints(op->basis, &Q); CeedChk(ierr); 524 ierr = CeedVectorCreate(op->ceed, 525 op->Erestrict->nelem * Q 526 * op->qf->qdatasize / sizeof(CeedScalar), 527 &impl->qdata); CeedChk(ierr); 528 } 529 *qdata = impl->qdata; 530 return 0; 531 } 532 533 static int CeedOperatorCreate_Magma(CeedOperator op) { 534 CeedOperator_Magma *impl; 535 int ierr; 536 537 ierr = CeedCalloc(1, &impl); CeedChk(ierr); 538 op->data = impl; 539 op->Destroy = CeedOperatorDestroy_Magma; 540 op->Apply = CeedOperatorApply_Magma; 541 op->GetQData = CeedOperatorGetQData_Magma; 542 return 0; 543 } 544 545 // ***************************************************************************** 546 // * INIT 547 // ***************************************************************************** 548 static int CeedInit_Magma(const char *resource, Ceed ceed) { 549 if (strcmp(resource, "/cpu/magma") 550 && strcmp(resource, "/gpu/magma")) 551 return CeedError(ceed, 1, "MAGMA backend cannot use resource: %s", resource); 552 553 magma_init(); 554 //magma_print_environment(); 555 556 ceed->VecCreate = CeedVectorCreate_Magma; 557 ceed->BasisCreateTensorH1 = CeedBasisCreateTensorH1_Magma; 558 ceed->ElemRestrictionCreate = CeedElemRestrictionCreate_Magma; 559 ceed->QFunctionCreate = CeedQFunctionCreate_Magma; 560 ceed->OperatorCreate = CeedOperatorCreate_Magma; 561 return 0; 562 } 563 564 // ***************************************************************************** 565 // * REGISTER 566 // ***************************************************************************** 567 __attribute__((constructor)) 568 static void Register(void) { 569 CeedRegister("/cpu/magma", CeedInit_Magma); 570 CeedRegister("/gpu/magma", CeedInit_Magma); 571 } 572