1 // Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC. 2 // Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707. 3 // All Rights reserved. See files LICENSE and NOTICE for details. 4 // 5 // This file is part of CEED, a collection of benchmarks, miniapps, software 6 // libraries and APIs for efficient high-order finite element and spectral 7 // element discretizations for exascale applications. For more information and 8 // source code availability see http://github.com/ceed. 9 // 10 // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC, 11 // a collaborative effort of two U.S. Department of Energy organizations (Office 12 // of Science and the National Nuclear Security Administration) responsible for 13 // the planning and preparation of a capable exascale ecosystem, including 14 // software, applications, hardware, advanced system engineering and early 15 // testbed platforms, in support of the nation's exascale computing imperative. 16 17 #include <ceed/ceed.h> 18 #include <ceed/backend.h> 19 #include <hip/hip_runtime.h> 20 #include <stdbool.h> 21 #include <stddef.h> 22 #include "ceed-hip-ref.h" 23 #include "../hip/ceed-hip-compile.h" 24 25 // *INDENT-OFF* 26 static const char *restrictionkernels = QUOTE( 27 28 //------------------------------------------------------------------------------ 29 // L-vector -> E-vector, strided 30 //------------------------------------------------------------------------------ 31 extern "C" __global__ void noTrStrided(const CeedInt nelem, 32 const CeedScalar *__restrict__ u, 33 CeedScalar *__restrict__ v) { 34 for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; 35 node < nelem*RESTRICTION_ELEMSIZE; 36 node += blockDim.x * gridDim.x) { 37 const CeedInt locNode = node % RESTRICTION_ELEMSIZE; 38 const CeedInt elem = node / RESTRICTION_ELEMSIZE; 39 40 for (CeedInt comp = 0; comp < RESTRICTION_NCOMP; ++comp) 41 v[locNode + comp*RESTRICTION_ELEMSIZE*RESTRICTION_NELEM + 42 elem*RESTRICTION_ELEMSIZE] = 43 u[locNode*STRIDE_NODES + comp*STRIDE_COMP + elem*STRIDE_ELEM]; 44 } 45 } 46 47 //------------------------------------------------------------------------------ 48 // L-vector -> E-vector, offsets provided 49 //------------------------------------------------------------------------------ 50 extern "C" __global__ void noTrOffset(const CeedInt nelem, 51 const CeedInt *__restrict__ indices, 52 const CeedScalar *__restrict__ u, 53 CeedScalar *__restrict__ v) { 54 for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; 55 node < nelem*RESTRICTION_ELEMSIZE; 56 node += blockDim.x * gridDim.x) { 57 const CeedInt ind = indices[node]; 58 const CeedInt locNode = node % RESTRICTION_ELEMSIZE; 59 const CeedInt elem = node / RESTRICTION_ELEMSIZE; 60 61 for (CeedInt comp = 0; comp < RESTRICTION_NCOMP; ++comp) 62 v[locNode + comp*RESTRICTION_ELEMSIZE*RESTRICTION_NELEM + 63 elem*RESTRICTION_ELEMSIZE] = 64 u[ind + comp*RESTRICTION_COMPSTRIDE]; 65 } 66 } 67 68 //------------------------------------------------------------------------------ 69 // E-vector -> L-vector, strided 70 //------------------------------------------------------------------------------ 71 extern "C" __global__ void trStrided(const CeedInt nelem, 72 const CeedScalar *__restrict__ u, CeedScalar *__restrict__ v) { 73 for (CeedInt node = blockIdx.x * blockDim.x + threadIdx.x; 74 node < nelem*RESTRICTION_ELEMSIZE; 75 node += blockDim.x * gridDim.x) { 76 const CeedInt locNode = node % RESTRICTION_ELEMSIZE; 77 const CeedInt elem = node / RESTRICTION_ELEMSIZE; 78 79 for (CeedInt comp = 0; comp < RESTRICTION_NCOMP; ++comp) 80 v[locNode*STRIDE_NODES + comp*STRIDE_COMP + elem*STRIDE_ELEM] += 81 u[locNode + comp*RESTRICTION_ELEMSIZE*RESTRICTION_NELEM + 82 elem*RESTRICTION_ELEMSIZE]; 83 } 84 } 85 86 //------------------------------------------------------------------------------ 87 // E-vector -> L-vector, offsets provided 88 //------------------------------------------------------------------------------ 89 extern "C" __global__ void trOffset(const CeedInt *__restrict__ lvec_indices, 90 const CeedInt *__restrict__ tindices, 91 const CeedInt *__restrict__ toffsets, 92 const CeedScalar *__restrict__ u, 93 CeedScalar *__restrict__ v) { 94 CeedScalar value[RESTRICTION_NCOMP]; 95 96 for (CeedInt i = blockIdx.x * blockDim.x + threadIdx.x; 97 i < RESTRICTION_NNODES; 98 i += blockDim.x * gridDim.x) { 99 const CeedInt ind = lvec_indices[i]; 100 const CeedInt rng1 = toffsets[i]; 101 const CeedInt rngN = toffsets[i+1]; 102 103 for (CeedInt comp = 0; comp < RESTRICTION_NCOMP; ++comp) 104 value[comp] = 0.0; 105 106 for (CeedInt j = rng1; j < rngN; ++j) { 107 const CeedInt tind = tindices[j]; 108 CeedInt locNode = tind % RESTRICTION_ELEMSIZE; 109 CeedInt elem = tind / RESTRICTION_ELEMSIZE; 110 111 for (CeedInt comp = 0; comp < RESTRICTION_NCOMP; ++comp) 112 value[comp] += u[locNode + comp*RESTRICTION_ELEMSIZE*RESTRICTION_NELEM + 113 elem*RESTRICTION_ELEMSIZE]; 114 } 115 116 for (CeedInt comp = 0; comp < RESTRICTION_NCOMP; ++comp) 117 v[ind + comp*RESTRICTION_COMPSTRIDE] += value[comp]; 118 } 119 } 120 121 ); 122 // *INDENT-ON* 123 124 //------------------------------------------------------------------------------ 125 // Apply restriction 126 //------------------------------------------------------------------------------ 127 static int CeedElemRestrictionApply_Hip(CeedElemRestriction r, 128 CeedTransposeMode tmode, CeedVector u, CeedVector v, CeedRequest *request) { 129 int ierr; 130 CeedElemRestriction_Hip *impl; 131 ierr = CeedElemRestrictionGetData(r, &impl); CeedChkBackend(ierr); 132 Ceed ceed; 133 ierr = CeedElemRestrictionGetCeed(r, &ceed); CeedChkBackend(ierr); 134 Ceed_Hip *data; 135 ierr = CeedGetData(ceed, &data); CeedChkBackend(ierr); 136 const CeedInt blocksize = 64; 137 const CeedInt nnodes = impl->nnodes; 138 CeedInt nelem, elemsize; 139 CeedElemRestrictionGetNumElements(r, &nelem); 140 ierr = CeedElemRestrictionGetElementSize(r, &elemsize); CeedChkBackend(ierr); 141 hipFunction_t kernel; 142 143 // Get vectors 144 const CeedScalar *d_u; 145 CeedScalar *d_v; 146 ierr = CeedVectorGetArrayRead(u, CEED_MEM_DEVICE, &d_u); CeedChkBackend(ierr); 147 if (tmode == CEED_TRANSPOSE) { 148 // Sum into for transpose mode, e-vec to l-vec 149 ierr = CeedVectorGetArray(v, CEED_MEM_DEVICE, &d_v); CeedChkBackend(ierr); 150 } else { 151 // Overwrite for notranspose mode, l-vec to e-vec 152 ierr = CeedVectorGetArrayWrite(v, CEED_MEM_DEVICE, &d_v); CeedChkBackend(ierr); 153 } 154 155 // Restrict 156 if (tmode == CEED_NOTRANSPOSE) { 157 // L-vector -> E-vector 158 if (impl->d_ind) { 159 // -- Offsets provided 160 kernel = impl->noTrOffset; 161 void *args[] = {&nelem, &impl->d_ind, &d_u, &d_v}; 162 CeedInt blocksize = elemsize<256?(elemsize>64?elemsize:64):256; 163 ierr = CeedRunKernelHip(ceed, kernel, CeedDivUpInt(nnodes, blocksize), 164 blocksize, args); CeedChkBackend(ierr); 165 } else { 166 // -- Strided restriction 167 kernel = impl->noTrStrided; 168 void *args[] = {&nelem, &d_u, &d_v}; 169 CeedInt blocksize = elemsize<256?(elemsize>64?elemsize:64):256; 170 ierr = CeedRunKernelHip(ceed, kernel, CeedDivUpInt(nnodes, blocksize), 171 blocksize, args); CeedChkBackend(ierr); 172 } 173 } else { 174 // E-vector -> L-vector 175 if (impl->d_ind) { 176 // -- Offsets provided 177 kernel = impl->trOffset; 178 void *args[] = {&impl->d_lvec_indices, &impl->d_tindices, 179 &impl->d_toffsets, &d_u, &d_v 180 }; 181 ierr = CeedRunKernelHip(ceed, kernel, CeedDivUpInt(nnodes, blocksize), 182 blocksize, args); CeedChkBackend(ierr); 183 } else { 184 // -- Strided restriction 185 kernel = impl->trStrided; 186 void *args[] = {&nelem, &d_u, &d_v}; 187 ierr = CeedRunKernelHip(ceed, kernel, CeedDivUpInt(nnodes, blocksize), 188 blocksize, args); CeedChkBackend(ierr); 189 } 190 } 191 192 if (request != CEED_REQUEST_IMMEDIATE && request != CEED_REQUEST_ORDERED) 193 *request = NULL; 194 195 // Restore arrays 196 ierr = CeedVectorRestoreArrayRead(u, &d_u); CeedChkBackend(ierr); 197 ierr = CeedVectorRestoreArray(v, &d_v); CeedChkBackend(ierr); 198 return CEED_ERROR_SUCCESS; 199 } 200 201 //------------------------------------------------------------------------------ 202 // Blocked not supported 203 //------------------------------------------------------------------------------ 204 int CeedElemRestrictionApplyBlock_Hip(CeedElemRestriction r, CeedInt block, 205 CeedTransposeMode tmode, CeedVector u, 206 CeedVector v, CeedRequest *request) { 207 // LCOV_EXCL_START 208 int ierr; 209 Ceed ceed; 210 ierr = CeedElemRestrictionGetCeed(r, &ceed); CeedChkBackend(ierr); 211 return CeedError(ceed, CEED_ERROR_BACKEND, 212 "Backend does not implement blocked restrictions"); 213 // LCOV_EXCL_STOP 214 } 215 216 //------------------------------------------------------------------------------ 217 // Get offsets 218 //------------------------------------------------------------------------------ 219 static int CeedElemRestrictionGetOffsets_Hip(CeedElemRestriction rstr, 220 CeedMemType mtype, const CeedInt **offsets) { 221 int ierr; 222 CeedElemRestriction_Hip *impl; 223 ierr = CeedElemRestrictionGetData(rstr, &impl); CeedChkBackend(ierr); 224 225 switch (mtype) { 226 case CEED_MEM_HOST: 227 *offsets = impl->h_ind; 228 break; 229 case CEED_MEM_DEVICE: 230 *offsets = impl->d_ind; 231 break; 232 } 233 return CEED_ERROR_SUCCESS; 234 } 235 236 //------------------------------------------------------------------------------ 237 // Destroy restriction 238 //------------------------------------------------------------------------------ 239 static int CeedElemRestrictionDestroy_Hip(CeedElemRestriction r) { 240 int ierr; 241 CeedElemRestriction_Hip *impl; 242 ierr = CeedElemRestrictionGetData(r, &impl); CeedChkBackend(ierr); 243 244 Ceed ceed; 245 ierr = CeedElemRestrictionGetCeed(r, &ceed); CeedChkBackend(ierr); 246 ierr = hipModuleUnload(impl->module); CeedChk_Hip(ceed, ierr); 247 ierr = CeedFree(&impl->h_ind_allocated); CeedChkBackend(ierr); 248 ierr = hipFree(impl->d_ind_allocated); CeedChk_Hip(ceed, ierr); 249 ierr = hipFree(impl->d_toffsets); CeedChk_Hip(ceed, ierr); 250 ierr = hipFree(impl->d_tindices); CeedChk_Hip(ceed, ierr); 251 ierr = hipFree(impl->d_lvec_indices); CeedChk_Hip(ceed, ierr); 252 253 ierr = CeedFree(&impl); CeedChkBackend(ierr); 254 return CEED_ERROR_SUCCESS; 255 } 256 257 //------------------------------------------------------------------------------ 258 // Create transpose offsets and indices 259 //------------------------------------------------------------------------------ 260 static int CeedElemRestrictionOffset_Hip(const CeedElemRestriction r, 261 const CeedInt *indices) { 262 int ierr; 263 Ceed ceed; 264 ierr = CeedElemRestrictionGetCeed(r, &ceed); CeedChkBackend(ierr); 265 CeedElemRestriction_Hip *impl; 266 ierr = CeedElemRestrictionGetData(r, &impl); CeedChkBackend(ierr); 267 CeedInt nelem, elemsize, lsize, ncomp; 268 ierr = CeedElemRestrictionGetNumElements(r, &nelem); CeedChkBackend(ierr); 269 ierr = CeedElemRestrictionGetElementSize(r, &elemsize); CeedChkBackend(ierr); 270 ierr = CeedElemRestrictionGetLVectorSize(r, &lsize); CeedChkBackend(ierr); 271 ierr = CeedElemRestrictionGetNumComponents(r, &ncomp); CeedChkBackend(ierr); 272 273 // Count nnodes 274 bool *isNode; 275 ierr = CeedCalloc(lsize, &isNode); CeedChkBackend(ierr); 276 const CeedInt sizeIndices = nelem * elemsize; 277 for (CeedInt i = 0; i < sizeIndices; i++) 278 isNode[indices[i]] = 1; 279 CeedInt nnodes = 0; 280 for (CeedInt i = 0; i < lsize; i++) 281 nnodes += isNode[i]; 282 impl->nnodes = nnodes; 283 284 // L-vector offsets array 285 CeedInt *ind_to_offset, *lvec_indices; 286 ierr = CeedCalloc(lsize, &ind_to_offset); CeedChkBackend(ierr); 287 ierr = CeedCalloc(nnodes, &lvec_indices); CeedChkBackend(ierr); 288 CeedInt j = 0; 289 for (CeedInt i = 0; i < lsize; i++) 290 if (isNode[i]) { 291 lvec_indices[j] = i; 292 ind_to_offset[i] = j++; 293 } 294 ierr = CeedFree(&isNode); CeedChkBackend(ierr); 295 296 // Compute transpose offsets and indices 297 const CeedInt sizeOffsets = nnodes + 1; 298 CeedInt *toffsets; 299 ierr = CeedCalloc(sizeOffsets, &toffsets); CeedChkBackend(ierr); 300 CeedInt *tindices; 301 ierr = CeedMalloc(sizeIndices, &tindices); CeedChkBackend(ierr); 302 // Count node multiplicity 303 for (CeedInt e = 0; e < nelem; ++e) 304 for (CeedInt i = 0; i < elemsize; ++i) 305 ++toffsets[ind_to_offset[indices[elemsize*e + i]] + 1]; 306 // Convert to running sum 307 for (CeedInt i = 1; i < sizeOffsets; ++i) 308 toffsets[i] += toffsets[i-1]; 309 // List all E-vec indices associated with L-vec node 310 for (CeedInt e = 0; e < nelem; ++e) { 311 for (CeedInt i = 0; i < elemsize; ++i) { 312 const CeedInt lid = elemsize*e + i; 313 const CeedInt gid = indices[lid]; 314 tindices[toffsets[ind_to_offset[gid]]++] = lid; 315 } 316 } 317 // Reset running sum 318 for (int i = sizeOffsets - 1; i > 0; --i) 319 toffsets[i] = toffsets[i - 1]; 320 toffsets[0] = 0; 321 322 // Copy data to device 323 // -- L-vector indices 324 ierr = hipMalloc((void **)&impl->d_lvec_indices, nnodes*sizeof(CeedInt)); 325 CeedChk_Hip(ceed, ierr); 326 ierr = hipMemcpy(impl->d_lvec_indices, lvec_indices, 327 nnodes*sizeof(CeedInt), hipMemcpyHostToDevice); 328 CeedChk_Hip(ceed, ierr); 329 // -- Transpose offsets 330 ierr = hipMalloc((void **)&impl->d_toffsets, sizeOffsets*sizeof(CeedInt)); 331 CeedChk_Hip(ceed, ierr); 332 ierr = hipMemcpy(impl->d_toffsets, toffsets, sizeOffsets*sizeof(CeedInt), 333 hipMemcpyHostToDevice); CeedChk_Hip(ceed, ierr); 334 // -- Transpose indices 335 ierr = hipMalloc((void **)&impl->d_tindices, sizeIndices*sizeof(CeedInt)); 336 CeedChk_Hip(ceed, ierr); 337 ierr = hipMemcpy(impl->d_tindices, tindices, sizeIndices*sizeof(CeedInt), 338 hipMemcpyHostToDevice); CeedChk_Hip(ceed, ierr); 339 340 // Cleanup 341 ierr = CeedFree(&ind_to_offset); CeedChkBackend(ierr); 342 ierr = CeedFree(&lvec_indices); CeedChkBackend(ierr); 343 ierr = CeedFree(&toffsets); CeedChkBackend(ierr); 344 ierr = CeedFree(&tindices); CeedChkBackend(ierr); 345 return CEED_ERROR_SUCCESS; 346 } 347 348 //------------------------------------------------------------------------------ 349 // Create restriction 350 //------------------------------------------------------------------------------ 351 int CeedElemRestrictionCreate_Hip(CeedMemType mtype, CeedCopyMode cmode, 352 const CeedInt *indices, 353 CeedElemRestriction r) { 354 int ierr; 355 Ceed ceed; 356 ierr = CeedElemRestrictionGetCeed(r, &ceed); CeedChkBackend(ierr); 357 CeedElemRestriction_Hip *impl; 358 ierr = CeedCalloc(1, &impl); CeedChkBackend(ierr); 359 CeedInt nelem, ncomp, elemsize; 360 ierr = CeedElemRestrictionGetNumElements(r, &nelem); CeedChkBackend(ierr); 361 ierr = CeedElemRestrictionGetNumComponents(r, &ncomp); CeedChkBackend(ierr); 362 ierr = CeedElemRestrictionGetElementSize(r, &elemsize); CeedChkBackend(ierr); 363 CeedInt size = nelem * elemsize; 364 CeedInt strides[3] = {1, size, elemsize}; 365 CeedInt compstride = 1; 366 367 // Stride data 368 bool isStrided; 369 ierr = CeedElemRestrictionIsStrided(r, &isStrided); CeedChkBackend(ierr); 370 if (isStrided) { 371 bool backendstrides; 372 ierr = CeedElemRestrictionHasBackendStrides(r, &backendstrides); 373 CeedChkBackend(ierr); 374 if (!backendstrides) { 375 ierr = CeedElemRestrictionGetStrides(r, &strides); CeedChkBackend(ierr); 376 } 377 } else { 378 ierr = CeedElemRestrictionGetCompStride(r, &compstride); CeedChkBackend(ierr); 379 } 380 381 impl->h_ind = NULL; 382 impl->h_ind_allocated = NULL; 383 impl->d_ind = NULL; 384 impl->d_ind_allocated = NULL; 385 impl->d_tindices = NULL; 386 impl->d_toffsets = NULL; 387 impl->nnodes = size; 388 ierr = CeedElemRestrictionSetData(r, impl); CeedChkBackend(ierr); 389 CeedInt layout[3] = {1, elemsize*nelem, elemsize}; 390 ierr = CeedElemRestrictionSetELayout(r, layout); CeedChkBackend(ierr); 391 392 // Set up device indices/offset arrays 393 if (mtype == CEED_MEM_HOST) { 394 switch (cmode) { 395 case CEED_OWN_POINTER: 396 impl->h_ind_allocated = (CeedInt *)indices; 397 impl->h_ind = (CeedInt *)indices; 398 break; 399 case CEED_USE_POINTER: 400 impl->h_ind = (CeedInt *)indices; 401 break; 402 case CEED_COPY_VALUES: 403 break; 404 } 405 if (indices != NULL) { 406 ierr = hipMalloc( (void **)&impl->d_ind, size * sizeof(CeedInt)); 407 CeedChk_Hip(ceed, ierr); 408 impl->d_ind_allocated = impl->d_ind; // We own the device memory 409 ierr = hipMemcpy(impl->d_ind, indices, size * sizeof(CeedInt), 410 hipMemcpyHostToDevice); 411 CeedChk_Hip(ceed, ierr); 412 ierr = CeedElemRestrictionOffset_Hip(r, indices); CeedChkBackend(ierr); 413 } 414 } else if (mtype == CEED_MEM_DEVICE) { 415 switch (cmode) { 416 case CEED_COPY_VALUES: 417 if (indices != NULL) { 418 ierr = hipMalloc( (void **)&impl->d_ind, size * sizeof(CeedInt)); 419 CeedChk_Hip(ceed, ierr); 420 impl->d_ind_allocated = impl->d_ind; // We own the device memory 421 ierr = hipMemcpy(impl->d_ind, indices, size * sizeof(CeedInt), 422 hipMemcpyDeviceToDevice); 423 CeedChk_Hip(ceed, ierr); 424 } 425 break; 426 case CEED_OWN_POINTER: 427 impl->d_ind = (CeedInt *)indices; 428 impl->d_ind_allocated = impl->d_ind; 429 break; 430 case CEED_USE_POINTER: 431 impl->d_ind = (CeedInt *)indices; 432 } 433 if (indices != NULL) { 434 ierr = CeedElemRestrictionOffset_Hip(r, indices); CeedChkBackend(ierr); 435 } 436 } else { 437 // LCOV_EXCL_START 438 return CeedError(ceed, CEED_ERROR_BACKEND, 439 "Only MemType = HOST or DEVICE supported"); 440 // LCOV_EXCL_STOP 441 } 442 443 // Compile HIP kernels 444 CeedInt nnodes = impl->nnodes; 445 ierr = CeedCompileHip(ceed, restrictionkernels, &impl->module, 8, 446 "RESTRICTION_ELEMSIZE", elemsize, 447 "RESTRICTION_NELEM", nelem, 448 "RESTRICTION_NCOMP", ncomp, 449 "RESTRICTION_NNODES", nnodes, 450 "RESTRICTION_COMPSTRIDE", compstride, 451 "STRIDE_NODES", strides[0], 452 "STRIDE_COMP", strides[1], 453 "STRIDE_ELEM", strides[2]); CeedChkBackend(ierr); 454 ierr = CeedGetKernelHip(ceed, impl->module, "noTrStrided", 455 &impl->noTrStrided); CeedChkBackend(ierr); 456 ierr = CeedGetKernelHip(ceed, impl->module, "noTrOffset", &impl->noTrOffset); 457 CeedChkBackend(ierr); 458 ierr = CeedGetKernelHip(ceed, impl->module, "trStrided", &impl->trStrided); 459 CeedChkBackend(ierr); 460 ierr = CeedGetKernelHip(ceed, impl->module, "trOffset", &impl->trOffset); 461 CeedChkBackend(ierr); 462 463 // Register backend functions 464 ierr = CeedSetBackendFunction(ceed, "ElemRestriction", r, "Apply", 465 CeedElemRestrictionApply_Hip); 466 CeedChkBackend(ierr); 467 ierr = CeedSetBackendFunction(ceed, "ElemRestriction", r, "ApplyBlock", 468 CeedElemRestrictionApplyBlock_Hip); 469 CeedChkBackend(ierr); 470 ierr = CeedSetBackendFunction(ceed, "ElemRestriction", r, "GetOffsets", 471 CeedElemRestrictionGetOffsets_Hip); 472 CeedChkBackend(ierr); 473 ierr = CeedSetBackendFunction(ceed, "ElemRestriction", r, "Destroy", 474 CeedElemRestrictionDestroy_Hip); 475 CeedChkBackend(ierr); 476 return CEED_ERROR_SUCCESS; 477 } 478 479 //------------------------------------------------------------------------------ 480 // Blocked not supported 481 //------------------------------------------------------------------------------ 482 int CeedElemRestrictionCreateBlocked_Hip(const CeedMemType mtype, 483 const CeedCopyMode cmode, const CeedInt *indices, CeedElemRestriction r) { 484 int ierr; 485 Ceed ceed; 486 ierr = CeedElemRestrictionGetCeed(r, &ceed); CeedChkBackend(ierr); 487 return CeedError(ceed, CEED_ERROR_BACKEND, 488 "Backend does not implement blocked restrictions"); 489 } 490 //------------------------------------------------------------------------------ 491