1 // Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at 2 // the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights 3 // reserved. See files LICENSE and NOTICE for details. 4 // 5 // This file is part of CEED, a collection of benchmarks, miniapps, software 6 // libraries and APIs for efficient high-order finite element and spectral 7 // element discretizations for exascale applications. For more information and 8 // source code availability see http://github.com/ceed. 9 // 10 // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC, 11 // a collaborative effort of two U.S. Department of Energy organizations (Office 12 // of Science and the National Nuclear Security Administration) responsible for 13 // the planning and preparation of a capable exascale ecosystem, including 14 // software, applications, hardware, advanced system engineering and early 15 // testbed platforms, in support of the nation's exascale computing imperative. 16 17 #include <ceed-impl.h> 18 #include <string.h> 19 20 typedef struct { 21 CeedScalar *array; 22 CeedScalar *array_allocated; 23 } CeedVector_Ref; 24 25 typedef struct { 26 const CeedInt *indices; 27 CeedInt *indices_allocated; 28 } CeedElemRestriction_Ref; 29 30 typedef struct { 31 CeedVector etmp; 32 CeedVector qdata; 33 } CeedOperator_Ref; 34 35 static int CeedVectorSetArray_Ref(CeedVector vec, CeedMemType mtype, 36 CeedCopyMode cmode, CeedScalar *array) { 37 CeedVector_Ref *impl = vec->data; 38 int ierr; 39 40 if (mtype != CEED_MEM_HOST) 41 return CeedError(vec->ceed, 1, "Only MemType = HOST supported"); 42 ierr = CeedFree(&impl->array_allocated); CeedChk(ierr); 43 switch (cmode) { 44 case CEED_COPY_VALUES: 45 ierr = CeedMalloc(vec->length, &impl->array_allocated); CeedChk(ierr); 46 impl->array = impl->array_allocated; 47 if (array) memcpy(impl->array, array, vec->length * sizeof(array[0])); 48 break; 49 case CEED_OWN_POINTER: 50 impl->array_allocated = array; 51 impl->array = array; 52 break; 53 case CEED_USE_POINTER: 54 impl->array = array; 55 } 56 return 0; 57 } 58 59 static int CeedVectorGetArray_Ref(CeedVector vec, CeedMemType mtype, 60 CeedScalar **array) { 61 CeedVector_Ref *impl = vec->data; 62 int ierr; 63 64 if (mtype != CEED_MEM_HOST) 65 return CeedError(vec->ceed, 1, "Can only provide to HOST memory"); 66 if (!impl->array) { // Allocate if array is not yet allocated 67 ierr = CeedVectorSetArray(vec, CEED_MEM_HOST, CEED_COPY_VALUES, NULL); 68 CeedChk(ierr); 69 } 70 *array = impl->array; 71 return 0; 72 } 73 74 static int CeedVectorGetArrayRead_Ref(CeedVector vec, CeedMemType mtype, 75 const CeedScalar **array) { 76 CeedVector_Ref *impl = vec->data; 77 int ierr; 78 79 if (mtype != CEED_MEM_HOST) 80 return CeedError(vec->ceed, 1, "Can only provide to HOST memory"); 81 if (!impl->array) { // Allocate if array is not yet allocated 82 ierr = CeedVectorSetArray(vec, CEED_MEM_HOST, CEED_COPY_VALUES, NULL); 83 CeedChk(ierr); 84 } 85 *array = impl->array; 86 return 0; 87 } 88 89 static int CeedVectorRestoreArray_Ref(CeedVector vec, CeedScalar **array) { 90 *array = NULL; 91 return 0; 92 } 93 94 static int CeedVectorRestoreArrayRead_Ref(CeedVector vec, 95 const CeedScalar **array) { 96 *array = NULL; 97 return 0; 98 } 99 100 static int CeedVectorDestroy_Ref(CeedVector vec) { 101 CeedVector_Ref *impl = vec->data; 102 int ierr; 103 104 ierr = CeedFree(&impl->array_allocated); CeedChk(ierr); 105 ierr = CeedFree(&vec->data); CeedChk(ierr); 106 return 0; 107 } 108 109 static int CeedVectorCreate_Ref(Ceed ceed, CeedInt n, CeedVector vec) { 110 CeedVector_Ref *impl; 111 int ierr; 112 113 vec->SetArray = CeedVectorSetArray_Ref; 114 vec->GetArray = CeedVectorGetArray_Ref; 115 vec->GetArrayRead = CeedVectorGetArrayRead_Ref; 116 vec->RestoreArray = CeedVectorRestoreArray_Ref; 117 vec->RestoreArrayRead = CeedVectorRestoreArrayRead_Ref; 118 vec->Destroy = CeedVectorDestroy_Ref; 119 ierr = CeedCalloc(1,&impl); CeedChk(ierr); 120 vec->data = impl; 121 return 0; 122 } 123 124 static int CeedElemRestrictionApply_Ref(CeedElemRestriction r, 125 CeedTransposeMode tmode, CeedInt ncomp, 126 CeedTransposeMode lmode, CeedVector u, 127 CeedVector v, CeedRequest *request) { 128 CeedElemRestriction_Ref *impl = r->data; 129 int ierr; 130 const CeedScalar *uu; 131 CeedScalar *vv; 132 CeedInt esize = r->nelem*r->elemsize; 133 134 ierr = CeedVectorGetArrayRead(u, CEED_MEM_HOST, &uu); CeedChk(ierr); 135 ierr = CeedVectorGetArray(v, CEED_MEM_HOST, &vv); CeedChk(ierr); 136 if (tmode == CEED_NOTRANSPOSE) { 137 // Perform: v = r * u 138 if (ncomp == 1) { 139 for (CeedInt i=0; i<esize; i++) vv[i] = uu[impl->indices[i]]; 140 } else { 141 // vv is (elemsize x ncomp x nelem), column-major 142 if (lmode == CEED_NOTRANSPOSE) { // u is (ndof x ncomp), column-major 143 for (CeedInt e = 0; e < r->nelem; e++) 144 for (CeedInt d = 0; d < ncomp; d++) 145 for (CeedInt i=0; i<r->elemsize; i++) { 146 vv[i+r->elemsize*(d+ncomp*e)] = 147 uu[impl->indices[i+r->elemsize*e]+r->ndof*d]; 148 } 149 } else { // u is (ncomp x ndof), column-major 150 for (CeedInt e = 0; e < r->nelem; e++) 151 for (CeedInt d = 0; d < ncomp; d++) 152 for (CeedInt i=0; i<r->elemsize; i++) { 153 vv[i+r->elemsize*(d+ncomp*e)] = 154 uu[d+ncomp*impl->indices[i+r->elemsize*e]]; 155 } 156 } 157 } 158 } else { 159 // Note: in transpose mode, we perform: v += r^t * u 160 if (ncomp == 1) { 161 for (CeedInt i=0; i<esize; i++){ 162 vv[impl->indices[i]] += uu[i]; 163 printf("\n\tv[%d]=u[%d]",impl->indices[i],i); 164 } 165 } else { 166 // u is (elemsize x ncomp x nelem) 167 if (lmode == CEED_NOTRANSPOSE) { // vv is (ndof x ncomp), column-major 168 for (CeedInt e = 0; e < r->nelem; e++) 169 for (CeedInt d = 0; d < ncomp; d++) 170 for (CeedInt i=0; i<r->elemsize; i++) { 171 vv[impl->indices[i+r->elemsize*e]+r->ndof*d] += 172 uu[i+r->elemsize*(d+e*ncomp)]; 173 } 174 } else { // vv is (ncomp x ndof), column-major 175 for (CeedInt e = 0; e < r->nelem; e++) 176 for (CeedInt d = 0; d < ncomp; d++) 177 for (CeedInt i=0; i<r->elemsize; i++) { 178 vv[d+ncomp*impl->indices[i+r->elemsize*e]] += 179 uu[i+r->elemsize*(d+e*ncomp)]; 180 } 181 } 182 } 183 } 184 ierr = CeedVectorRestoreArrayRead(u, &uu); CeedChk(ierr); 185 ierr = CeedVectorRestoreArray(v, &vv); CeedChk(ierr); 186 if (request != CEED_REQUEST_IMMEDIATE && request != CEED_REQUEST_ORDERED) 187 *request = NULL; 188 return 0; 189 } 190 191 static int CeedElemRestrictionDestroy_Ref(CeedElemRestriction r) { 192 CeedElemRestriction_Ref *impl = r->data; 193 int ierr; 194 195 ierr = CeedFree(&impl->indices_allocated); CeedChk(ierr); 196 ierr = CeedFree(&r->data); CeedChk(ierr); 197 return 0; 198 } 199 200 static int CeedElemRestrictionCreate_Ref(CeedElemRestriction r, 201 CeedMemType mtype, 202 CeedCopyMode cmode, const CeedInt *indices) { 203 int ierr; 204 CeedElemRestriction_Ref *impl; 205 206 if (mtype != CEED_MEM_HOST) 207 return CeedError(r->ceed, 1, "Only MemType = HOST supported"); 208 ierr = CeedCalloc(1,&impl); CeedChk(ierr); 209 switch (cmode) { 210 case CEED_COPY_VALUES: 211 ierr = CeedMalloc(r->nelem*r->elemsize, &impl->indices_allocated); 212 CeedChk(ierr); 213 memcpy(impl->indices_allocated, indices, 214 r->nelem * r->elemsize * sizeof(indices[0])); 215 impl->indices = impl->indices_allocated; 216 break; 217 case CEED_OWN_POINTER: 218 impl->indices_allocated = (CeedInt *)indices; 219 impl->indices = impl->indices_allocated; 220 break; 221 case CEED_USE_POINTER: 222 impl->indices = indices; 223 } 224 r->data = impl; 225 r->Apply = CeedElemRestrictionApply_Ref; 226 r->Destroy = CeedElemRestrictionDestroy_Ref; 227 return 0; 228 } 229 230 // Contracts on the middle index 231 // NOTRANSPOSE: V_ajc = T_jb U_abc 232 // TRANSPOSE: V_ajc = T_bj U_abc 233 // If Add != 0, "=" is replaced by "+=" 234 static int CeedTensorContract_Ref(Ceed ceed, 235 CeedInt A, CeedInt B, CeedInt C, CeedInt J, 236 const CeedScalar *t, CeedTransposeMode tmode, 237 const CeedInt Add, 238 const CeedScalar *u, CeedScalar *v) { 239 CeedInt tstride0 = B, tstride1 = 1; 240 if (tmode == CEED_TRANSPOSE) { 241 tstride0 = 1; tstride1 = J; 242 } 243 244 for (CeedInt a=0; a<A; a++) { 245 for (CeedInt j=0; j<J; j++) { 246 if (!Add) { 247 for (CeedInt c=0; c<C; c++) 248 v[(a*J+j)*C+c] = 0; 249 } 250 for (CeedInt b=0; b<B; b++) { 251 for (CeedInt c=0; c<C; c++) { 252 v[(a*J+j)*C+c] += t[j*tstride0 + b*tstride1] * u[(a*B+b)*C+c]; 253 } 254 } 255 } 256 } 257 return 0; 258 } 259 260 static int CeedBasisApply_Ref(CeedBasis basis, CeedTransposeMode tmode, 261 CeedEvalMode emode, 262 const CeedScalar *u, CeedScalar *v) { 263 int ierr; 264 const CeedInt dim = basis->dim; 265 const CeedInt ndof = basis->ndof; 266 const CeedInt nqpt = ndof*CeedPowInt(basis->Q1d, dim); 267 const CeedInt add = (tmode == CEED_TRANSPOSE); 268 269 if (tmode == CEED_TRANSPOSE) { 270 const CeedInt vsize = ndof*CeedPowInt(basis->P1d, dim); 271 for (CeedInt i = 0; i < vsize; i++) 272 v[i] = (CeedScalar) 0; 273 } 274 if (emode & CEED_EVAL_INTERP) { 275 CeedInt P = basis->P1d, Q = basis->Q1d; 276 if (tmode == CEED_TRANSPOSE) { 277 P = basis->Q1d; Q = basis->P1d; 278 } 279 CeedInt pre = ndof*CeedPowInt(P, dim-1), post = 1; 280 CeedScalar tmp[2][ndof*Q*CeedPowInt(P>Q?P:Q, dim-1)]; 281 for (CeedInt d=0; d<dim; d++) { 282 ierr = CeedTensorContract_Ref(basis->ceed, pre, P, post, Q, basis->interp1d, 283 tmode, add&&(d==dim-1), 284 d==0?u:tmp[d%2], d==dim-1?v:tmp[(d+1)%2]); 285 CeedChk(ierr); 286 pre /= P; 287 post *= Q; 288 } 289 if (tmode == CEED_NOTRANSPOSE) { 290 v += nqpt; 291 } else { 292 u += nqpt; 293 } 294 } 295 if (emode & CEED_EVAL_GRAD) { 296 CeedInt P = basis->P1d, Q = basis->Q1d; 297 // In CEED_NOTRANSPOSE mode: 298 // u is (P^dim x nc), column-major layout (nc = ndof) 299 // v is (Q^dim x nc x dim), column-major layout (nc = ndof) 300 // In CEED_TRANSPOSE mode, the sizes of u and v are switched. 301 if (tmode == CEED_TRANSPOSE) { 302 P = basis->Q1d, Q = basis->P1d; 303 } 304 CeedScalar tmp[2][ndof*Q*CeedPowInt(P>Q?P:Q, dim-1)]; 305 for (CeedInt p = 0; p < dim; p++) { 306 CeedInt pre = ndof*CeedPowInt(P, dim-1), post = 1; 307 for (CeedInt d=0; d<dim; d++) { 308 ierr = CeedTensorContract_Ref(basis->ceed, pre, P, post, Q, 309 (p==d)?basis->grad1d:basis->interp1d, 310 tmode, add&&(d==dim-1), 311 d==0?u:tmp[d%2], d==dim-1?v:tmp[(d+1)%2]); 312 CeedChk(ierr); 313 pre /= P; 314 post *= Q; 315 } 316 if (tmode == CEED_NOTRANSPOSE) { 317 v += nqpt; 318 } else { 319 u += nqpt; 320 } 321 } 322 } 323 if (emode & CEED_EVAL_WEIGHT) { 324 if (tmode == CEED_TRANSPOSE) 325 return CeedError(basis->ceed, 1, 326 "CEED_EVAL_WEIGHT incompatible with CEED_TRANSPOSE"); 327 CeedInt Q = basis->Q1d; 328 for (CeedInt d=0; d<dim; d++) { 329 CeedInt pre = CeedPowInt(Q, dim-d-1), post = CeedPowInt(Q, d); 330 for (CeedInt i=0; i<pre; i++) { 331 for (CeedInt j=0; j<Q; j++) { 332 for (CeedInt k=0; k<post; k++) { 333 v[(i*Q + j)*post + k] = basis->qweight1d[j] 334 * (d == 0 ? 1 : v[(i*Q + j)*post + k]); 335 } 336 } 337 } 338 } 339 } 340 return 0; 341 } 342 343 static int CeedBasisDestroy_Ref(CeedBasis basis) { 344 return 0; 345 } 346 347 static int CeedBasisCreateTensorH1_Ref(Ceed ceed, CeedInt dim, CeedInt P1d, 348 CeedInt Q1d, const CeedScalar *interp1d, 349 const CeedScalar *grad1d, 350 const CeedScalar *qref1d, 351 const CeedScalar *qweight1d, 352 CeedBasis basis) { 353 basis->Apply = CeedBasisApply_Ref; 354 basis->Destroy = CeedBasisDestroy_Ref; 355 return 0; 356 } 357 358 static int CeedQFunctionApply_Ref(CeedQFunction qf, void *qdata, CeedInt Q, 359 const CeedScalar *const *u, 360 CeedScalar *const *v) { 361 int ierr; 362 ierr = qf->function(qf->ctx, qdata, Q, u, v); CeedChk(ierr); 363 return 0; 364 } 365 366 static int CeedQFunctionDestroy_Ref(CeedQFunction qf) { 367 return 0; 368 } 369 370 static int CeedQFunctionCreate_Ref(CeedQFunction qf) { 371 qf->Apply = CeedQFunctionApply_Ref; 372 qf->Destroy = CeedQFunctionDestroy_Ref; 373 return 0; 374 } 375 376 static int CeedOperatorDestroy_Ref(CeedOperator op) { 377 CeedOperator_Ref *impl = op->data; 378 int ierr; 379 380 ierr = CeedVectorDestroy(&impl->etmp); CeedChk(ierr); 381 ierr = CeedVectorDestroy(&impl->qdata); CeedChk(ierr); 382 ierr = CeedFree(&op->data); CeedChk(ierr); 383 return 0; 384 } 385 386 static int CeedOperatorApply_Ref(CeedOperator op, CeedVector qdata, 387 CeedVector ustate, 388 CeedVector residual, CeedRequest *request) { 389 CeedOperator_Ref *impl = op->data; 390 CeedVector etmp; 391 CeedInt Q; 392 const CeedInt nc = op->basis->ndof, dim = op->basis->dim; 393 CeedScalar *Eu; 394 char *qd; 395 int ierr; 396 CeedTransposeMode lmode = CEED_NOTRANSPOSE; 397 398 if (!impl->etmp) { 399 ierr = CeedVectorCreate(op->ceed, 400 nc * op->Erestrict->nelem * op->Erestrict->elemsize, 401 &impl->etmp); CeedChk(ierr); 402 // etmp is allocated when CeedVectorGetArray is called below 403 } 404 etmp = impl->etmp; 405 if (op->qf->inmode & ~CEED_EVAL_WEIGHT) { 406 ierr = CeedElemRestrictionApply(op->Erestrict, CEED_NOTRANSPOSE, 407 nc, lmode, ustate, etmp, 408 CEED_REQUEST_IMMEDIATE); CeedChk(ierr); 409 } 410 ierr = CeedBasisGetNumQuadraturePoints(op->basis, &Q); CeedChk(ierr); 411 ierr = CeedVectorGetArray(etmp, CEED_MEM_HOST, &Eu); CeedChk(ierr); 412 ierr = CeedVectorGetArray(qdata, CEED_MEM_HOST, (CeedScalar**)&qd); 413 CeedChk(ierr); 414 for (CeedInt e=0; e<op->Erestrict->nelem; e++) { 415 CeedScalar BEu[Q*nc*(dim+2)], BEv[Q*nc*(dim+2)], *out[5] = {0,0,0,0,0}; 416 const CeedScalar *in[5] = {0,0,0,0,0}; 417 // TODO: quadrature weights can be computed just once 418 ierr = CeedBasisApply(op->basis, CEED_NOTRANSPOSE, op->qf->inmode, 419 &Eu[e*op->Erestrict->elemsize*nc], BEu); 420 CeedChk(ierr); 421 CeedScalar *u_ptr = BEu, *v_ptr = BEv; 422 if (op->qf->inmode & CEED_EVAL_INTERP) { in[0] = u_ptr; u_ptr += Q*nc; } 423 if (op->qf->inmode & CEED_EVAL_GRAD) { in[1] = u_ptr; u_ptr += Q*nc*dim; } 424 if (op->qf->inmode & CEED_EVAL_WEIGHT) { in[4] = u_ptr; u_ptr += Q; } 425 if (op->qf->outmode & CEED_EVAL_INTERP) { out[0] = v_ptr; v_ptr += Q*nc; } 426 if (op->qf->outmode & CEED_EVAL_GRAD) { out[1] = v_ptr; v_ptr += Q*nc*dim; } 427 ierr = CeedQFunctionApply(op->qf, &qd[e*Q*op->qf->qdatasize], Q, in, out); 428 CeedChk(ierr); 429 ierr = CeedBasisApply(op->basis, CEED_TRANSPOSE, op->qf->outmode, BEv, 430 &Eu[e*op->Erestrict->elemsize*nc]); 431 CeedChk(ierr); 432 } 433 ierr = CeedVectorRestoreArray(etmp, &Eu); CeedChk(ierr); 434 if (residual) { 435 CeedScalar *res; 436 CeedVectorGetArray(residual, CEED_MEM_HOST, &res); 437 for (int i = 0; i < residual->length; i++) 438 res[i] = (CeedScalar)0; 439 ierr = CeedElemRestrictionApply(op->Erestrict, CEED_TRANSPOSE, 440 nc, lmode, etmp, residual, 441 CEED_REQUEST_IMMEDIATE); CeedChk(ierr); 442 } 443 if (request != CEED_REQUEST_IMMEDIATE && request != CEED_REQUEST_ORDERED) 444 *request = NULL; 445 return 0; 446 } 447 448 static int CeedOperatorGetQData_Ref(CeedOperator op, CeedVector *qdata) { 449 CeedOperator_Ref *impl = op->data; 450 int ierr; 451 452 if (!impl->qdata) { 453 CeedInt Q; 454 ierr = CeedBasisGetNumQuadraturePoints(op->basis, &Q); CeedChk(ierr); 455 ierr = CeedVectorCreate(op->ceed, 456 op->Erestrict->nelem * Q 457 * op->qf->qdatasize / sizeof(CeedScalar), 458 &impl->qdata); CeedChk(ierr); 459 } 460 *qdata = impl->qdata; 461 return 0; 462 } 463 464 static int CeedOperatorCreate_Ref(CeedOperator op) { 465 CeedOperator_Ref *impl; 466 int ierr; 467 468 ierr = CeedCalloc(1, &impl); CeedChk(ierr); 469 op->data = impl; 470 op->Destroy = CeedOperatorDestroy_Ref; 471 op->Apply = CeedOperatorApply_Ref; 472 op->GetQData = CeedOperatorGetQData_Ref; 473 return 0; 474 } 475 476 static int CeedInit_Ref(const char *resource, Ceed ceed) { 477 if (strcmp(resource, "/cpu/self") 478 && strcmp(resource, "/cpu/self/ref")) 479 return CeedError(ceed, 1, "Ref backend cannot use resource: %s", resource); 480 ceed->VecCreate = CeedVectorCreate_Ref; 481 ceed->BasisCreateTensorH1 = CeedBasisCreateTensorH1_Ref; 482 ceed->ElemRestrictionCreate = CeedElemRestrictionCreate_Ref; 483 ceed->QFunctionCreate = CeedQFunctionCreate_Ref; 484 ceed->OperatorCreate = CeedOperatorCreate_Ref; 485 return 0; 486 } 487 488 __attribute__((constructor)) 489 static void Register(void) { 490 CeedRegister("/cpu/self/ref", CeedInit_Ref); 491 } 492