1241a4b83SYohann // Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC. 2241a4b83SYohann // Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707. 3241a4b83SYohann // All Rights reserved. See files LICENSE and NOTICE for details. 4241a4b83SYohann // 5241a4b83SYohann // This file is part of CEED, a collection of benchmarks, miniapps, software 6241a4b83SYohann // libraries and APIs for efficient high-order finite element and spectral 7241a4b83SYohann // element discretizations for exascale applications. For more information and 8241a4b83SYohann // source code availability see http://github.com/ceed. 9241a4b83SYohann // 10241a4b83SYohann // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC, 11241a4b83SYohann // a collaborative effort of two U.S. Department of Energy organizations (Office 12241a4b83SYohann // of Science and the National Nuclear Security Administration) responsible for 13241a4b83SYohann // the planning and preparation of a capable exascale ecosystem, including 14241a4b83SYohann // software, applications, hardware, advanced system engineering and early 15241a4b83SYohann // testbed platforms, in support of the nation's exascale computing imperative. 16241a4b83SYohann 17ec3da8bcSJed Brown #include <ceed/ceed.h> 18ec3da8bcSJed Brown #include <ceed/backend.h> 193d576824SJeremy L Thompson #include <stddef.h> 20241a4b83SYohann #include "ceed-cuda-gen.h" 21241a4b83SYohann #include "ceed-cuda-gen-operator-build.h" 223d576824SJeremy L Thompson #include "../cuda/ceed-cuda.h" 23241a4b83SYohann 24ab213215SJeremy L Thompson //------------------------------------------------------------------------------ 25ab213215SJeremy L Thompson // Destroy operator 26ab213215SJeremy L Thompson //------------------------------------------------------------------------------ 27241a4b83SYohann static int CeedOperatorDestroy_Cuda_gen(CeedOperator op) { 28241a4b83SYohann int ierr; 29241a4b83SYohann CeedOperator_Cuda_gen *impl; 30e15f9bd0SJeremy L Thompson ierr = CeedOperatorGetData(op, &impl); CeedChkBackend(ierr); 31e15f9bd0SJeremy L Thompson ierr = CeedFree(&impl); CeedChkBackend(ierr); 32e15f9bd0SJeremy L Thompson return CEED_ERROR_SUCCESS; 33241a4b83SYohann } 34241a4b83SYohann 3539532cebSJed Brown static int Waste(int threads_per_sm, int warp_size, int threads_per_elem, 3639532cebSJed Brown int elems_per_block) { 3739532cebSJed Brown int useful_threads_per_block = threads_per_elem * elems_per_block; 3839532cebSJed Brown // round up to nearest multiple of warp_size 3939532cebSJed Brown int block_size = ((useful_threads_per_block + warp_size - 1) / warp_size) * 4039532cebSJed Brown warp_size; 4139532cebSJed Brown int blocks_per_sm = threads_per_sm / block_size; 4239532cebSJed Brown return threads_per_sm - useful_threads_per_block * blocks_per_sm; 4339532cebSJed Brown } 4439532cebSJed Brown 4539532cebSJed Brown // Choose the least wasteful block size constrained by blocks_per_sm of 4639532cebSJed Brown // max_threads_per_block. 4739532cebSJed Brown // 4839532cebSJed Brown // The x and y part of block[] contains per-element sizes (specified on input) 4939532cebSJed Brown // while the z part is number of elements. 5039532cebSJed Brown // 5139532cebSJed Brown // Problem setting: we'd like to make occupancy high with relatively few 5239532cebSJed Brown // inactive threads. CUDA (cuOccupancyMaxPotentialBlockSize) can tell us how 5339532cebSJed Brown // many threads can run. 5439532cebSJed Brown // 5539532cebSJed Brown // Note that full occupancy sometimes can't be achieved by one thread block. For 5639532cebSJed Brown // example, an SM might support 1536 threads in total, but only 1024 within a 5739532cebSJed Brown // single thread block. So cuOccupancyMaxPotentialBlockSize may suggest a block 5839532cebSJed Brown // size of 768 so that two blocks can run, versus one block of 1024 will prevent 5939532cebSJed Brown // a second block from running. The cuda-gen kernels are pretty heavy with lots 6039532cebSJed Brown // of instruction-level parallelism (ILP) so we'll generally be okay with 6139532cebSJed Brown // relatvely low occupancy and smaller thread blocks, but we solve a reasonably 6239532cebSJed Brown // general problem here. Empirically, we find that blocks bigger than about 256 6339532cebSJed Brown // have higher latency and worse load balancing when the number of elements is 6439532cebSJed Brown // modest. 6539532cebSJed Brown // 6639532cebSJed Brown // cuda-gen can't choose block sizes arbitrarily; they need to be a multiple of 6739532cebSJed Brown // the number of quadrature points (or number of basis functions). They also 6839532cebSJed Brown // have a lot of __syncthreads(), which is another point against excessively 6939532cebSJed Brown // large thread blocks. Suppose I have elements with 7x7x7 quadrature points. 7039532cebSJed Brown // This will loop over the last dimension, so we have 7*7=49 threads per 7139532cebSJed Brown // element. Suppose we have two elements = 2*49=98 useful threads. CUDA 7239532cebSJed Brown // schedules in units of full warps (32 threads), so 128 CUDA hardware threads 7339532cebSJed Brown // are effectively committed to that block. Now suppose 7439532cebSJed Brown // cuOccupancyMaxPotentialBlockSize returned 352. We can schedule 2 blocks of 7539532cebSJed Brown // size 98 (196 useful threads using 256 hardware threads), but not a third 7639532cebSJed Brown // block (which would need a total of 384 hardware threads). 7739532cebSJed Brown // 7839532cebSJed Brown // If instead, we had packed 3 elements, we'd have 3*49=147 useful threads 7939532cebSJed Brown // occupying 160 slots, and could schedule two blocks. Alternatively, we could 8039532cebSJed Brown // pack a single block of 7 elements (2*49=343 useful threads) into the 354 8139532cebSJed Brown // slots. The latter has the least "waste", but __syncthreads() 8239532cebSJed Brown // over-synchronizes and it might not pay off relative to smaller blocks. 8339532cebSJed Brown static int BlockGridCalculate(CeedInt nelem, int blocks_per_sm, 84*13516544Snbeams int max_threads_per_block, int max_threads_z, 85*13516544Snbeams int warp_size, int block[3], int *grid) { 8639532cebSJed Brown const int threads_per_sm = blocks_per_sm * max_threads_per_block; 8739532cebSJed Brown const int threads_per_elem = block[0] * block[1]; 8839532cebSJed Brown int elems_per_block = 1; 8939532cebSJed Brown int waste = Waste(threads_per_sm, warp_size, threads_per_elem, 1); 9039532cebSJed Brown for (int i=2; 9139532cebSJed Brown i <= CeedIntMin(max_threads_per_block / threads_per_elem, nelem); 9239532cebSJed Brown i++) { 9339532cebSJed Brown int i_waste = Waste(threads_per_sm, warp_size, threads_per_elem, i); 9439532cebSJed Brown // We want to minimize waste, but smaller kernels have lower latency and 9539532cebSJed Brown // less __syncthreads() overhead so when a larger block size has the same 9639532cebSJed Brown // waste as a smaller one, go ahead and prefer the smaller block. 9739532cebSJed Brown if (i_waste < waste || (i_waste == waste && threads_per_elem * i <= 128)) { 9839532cebSJed Brown elems_per_block = i; 9939532cebSJed Brown waste = i_waste; 10039532cebSJed Brown } 10139532cebSJed Brown } 102*13516544Snbeams // In low-order elements, threads_per_elem may be sufficiently low to give 103*13516544Snbeams // an elems_per_block greater than allowable for the device, so we must check 104*13516544Snbeams // before setting the z-dimension size of the block. 105*13516544Snbeams block[2] = CeedIntMin(elems_per_block, max_threads_z); 10639532cebSJed Brown *grid = (nelem + elems_per_block - 1) / elems_per_block; 10739532cebSJed Brown return CEED_ERROR_SUCCESS; 10839532cebSJed Brown } 10939532cebSJed Brown 11039532cebSJed Brown // callback for cuOccupancyMaxPotentialBlockSize, providing the amount of 11139532cebSJed Brown // dynamic shared memory required for a thread block of size threads. 11239532cebSJed Brown static size_t dynamicSMemSize(int threads) { return threads * sizeof(CeedScalar); } 11339532cebSJed Brown 114ab213215SJeremy L Thompson //------------------------------------------------------------------------------ 115ab213215SJeremy L Thompson // Apply and add to output 116ab213215SJeremy L Thompson //------------------------------------------------------------------------------ 1173e0c3786SYohann Dudouit static int CeedOperatorApplyAdd_Cuda_gen(CeedOperator op, CeedVector invec, 118241a4b83SYohann CeedVector outvec, CeedRequest *request) { 119241a4b83SYohann int ierr; 120241a4b83SYohann Ceed ceed; 121e15f9bd0SJeremy L Thompson ierr = CeedOperatorGetCeed(op, &ceed); CeedChkBackend(ierr); 12239532cebSJed Brown Ceed_Cuda *cuda_data; 12339532cebSJed Brown ierr = CeedGetData(ceed, &cuda_data); CeedChkBackend(ierr); 124241a4b83SYohann CeedOperator_Cuda_gen *data; 125e15f9bd0SJeremy L Thompson ierr = CeedOperatorGetData(op, &data); CeedChkBackend(ierr); 126241a4b83SYohann CeedQFunction qf; 127241a4b83SYohann CeedQFunction_Cuda_gen *qf_data; 128e15f9bd0SJeremy L Thompson ierr = CeedOperatorGetQFunction(op, &qf); CeedChkBackend(ierr); 129e15f9bd0SJeremy L Thompson ierr = CeedQFunctionGetData(qf, &qf_data); CeedChkBackend(ierr); 130241a4b83SYohann CeedInt nelem, numinputfields, numoutputfields; 131e15f9bd0SJeremy L Thompson ierr = CeedOperatorGetNumElements(op, &nelem); CeedChkBackend(ierr); 132241a4b83SYohann ierr = CeedQFunctionGetNumArgs(qf, &numinputfields, &numoutputfields); 133e15f9bd0SJeremy L Thompson CeedChkBackend(ierr); 134241a4b83SYohann CeedOperatorField *opinputfields, *opoutputfields; 135241a4b83SYohann ierr = CeedOperatorGetFields(op, &opinputfields, &opoutputfields); 136e15f9bd0SJeremy L Thompson CeedChkBackend(ierr); 137241a4b83SYohann CeedQFunctionField *qfinputfields, *qfoutputfields; 138241a4b83SYohann ierr = CeedQFunctionGetFields(qf, &qfinputfields, &qfoutputfields); 139e15f9bd0SJeremy L Thompson CeedChkBackend(ierr); 140241a4b83SYohann CeedEvalMode emode; 1413b2939feSjeremylt CeedVector vec, outvecs[16] = {}; 142241a4b83SYohann 143241a4b83SYohann // Creation of the operator 144e15f9bd0SJeremy L Thompson ierr = CeedCudaGenOperatorBuild(op); CeedChkBackend(ierr); 145241a4b83SYohann 146241a4b83SYohann // Input vectors 147241a4b83SYohann for (CeedInt i = 0; i < numinputfields; i++) { 148241a4b83SYohann ierr = CeedQFunctionFieldGetEvalMode(qfinputfields[i], &emode); 149e15f9bd0SJeremy L Thompson CeedChkBackend(ierr); 150241a4b83SYohann if (emode == CEED_EVAL_WEIGHT) { // Skip 151241a4b83SYohann data->fields.in[i] = NULL; 152241a4b83SYohann } else { 153241a4b83SYohann // Get input vector 154e15f9bd0SJeremy L Thompson ierr = CeedOperatorFieldGetVector(opinputfields[i], &vec); CeedChkBackend(ierr); 155241a4b83SYohann if (vec == CEED_VECTOR_ACTIVE) vec = invec; 156241a4b83SYohann ierr = CeedVectorGetArrayRead(vec, CEED_MEM_DEVICE, &data->fields.in[i]); 157e15f9bd0SJeremy L Thompson CeedChkBackend(ierr); 158241a4b83SYohann } 159241a4b83SYohann } 160241a4b83SYohann 161241a4b83SYohann // Output vectors 162241a4b83SYohann for (CeedInt i = 0; i < numoutputfields; i++) { 163241a4b83SYohann ierr = CeedQFunctionFieldGetEvalMode(qfoutputfields[i], &emode); 164e15f9bd0SJeremy L Thompson CeedChkBackend(ierr); 165241a4b83SYohann if (emode == CEED_EVAL_WEIGHT) { // Skip 166241a4b83SYohann data->fields.out[i] = NULL; 167241a4b83SYohann } else { 168241a4b83SYohann // Get output vector 169e15f9bd0SJeremy L Thompson ierr = CeedOperatorFieldGetVector(opoutputfields[i], &vec); 170e15f9bd0SJeremy L Thompson CeedChkBackend(ierr); 171241a4b83SYohann if (vec == CEED_VECTOR_ACTIVE) vec = outvec; 1723b2939feSjeremylt outvecs[i] = vec; 1733b2939feSjeremylt // Check for multiple output modes 1743b2939feSjeremylt CeedInt index = -1; 1753b2939feSjeremylt for (CeedInt j = 0; j < i; j++) { 1763b2939feSjeremylt if (vec == outvecs[j]) { 1773b2939feSjeremylt index = j; 1783b2939feSjeremylt break; 1793b2939feSjeremylt } 1803b2939feSjeremylt } 1813b2939feSjeremylt if (index == -1) { 182241a4b83SYohann ierr = CeedVectorGetArray(vec, CEED_MEM_DEVICE, &data->fields.out[i]); 183e15f9bd0SJeremy L Thompson CeedChkBackend(ierr); 1843b2939feSjeremylt } else { 1853b2939feSjeremylt data->fields.out[i] = data->fields.out[index]; 1863b2939feSjeremylt } 187241a4b83SYohann } 188241a4b83SYohann } 189241a4b83SYohann 190777ff853SJeremy L Thompson // Get context data 191777ff853SJeremy L Thompson CeedQFunctionContext ctx; 192e15f9bd0SJeremy L Thompson ierr = CeedQFunctionGetInnerContext(qf, &ctx); CeedChkBackend(ierr); 193777ff853SJeremy L Thompson if (ctx) { 194777ff853SJeremy L Thompson ierr = CeedQFunctionContextGetData(ctx, CEED_MEM_DEVICE, &qf_data->d_c); 195e15f9bd0SJeremy L Thompson CeedChkBackend(ierr); 196241a4b83SYohann } 197241a4b83SYohann 198241a4b83SYohann // Apply operator 199288c0443SJeremy L Thompson void *opargs[] = {(void *) &nelem, &qf_data->d_c, &data->indices, 200d80fc06aSjeremylt &data->fields, &data->B, &data->G, &data->W 2017f823360Sjeremylt }; 202241a4b83SYohann const CeedInt dim = data->dim; 203241a4b83SYohann const CeedInt Q1d = data->Q1d; 20418d499f1SYohann const CeedInt P1d = data->maxP1d; 20518d499f1SYohann const CeedInt thread1d = CeedIntMax(Q1d, P1d); 20639532cebSJed Brown int max_threads_per_block, min_grid_size; 20739532cebSJed Brown CeedChk_Cu(ceed, cuOccupancyMaxPotentialBlockSize(&min_grid_size, 20839532cebSJed Brown &max_threads_per_block, data->op, dynamicSMemSize, 0, 0x10000)); 20939532cebSJed Brown int block[3] = {thread1d, dim < 2 ? 1 : thread1d, -1,}, grid; 21039532cebSJed Brown CeedChkBackend(BlockGridCalculate(nelem, 21139532cebSJed Brown min_grid_size/ cuda_data->deviceProp.multiProcessorCount, max_threads_per_block, 212*13516544Snbeams cuda_data->deviceProp.maxThreadsDim[2], 21339532cebSJed Brown cuda_data->deviceProp.warpSize, block, &grid)); 21439532cebSJed Brown CeedInt shared_mem = block[0] * block[1] * block[2] * sizeof(CeedScalar); 21539532cebSJed Brown ierr = CeedRunKernelDimSharedCuda(ceed, data->op, grid, block[0], block[1], 21639532cebSJed Brown block[2], shared_mem, opargs); 217e15f9bd0SJeremy L Thompson CeedChkBackend(ierr); 218241a4b83SYohann 219241a4b83SYohann // Restore input arrays 220241a4b83SYohann for (CeedInt i = 0; i < numinputfields; i++) { 221241a4b83SYohann ierr = CeedQFunctionFieldGetEvalMode(qfinputfields[i], &emode); 222e15f9bd0SJeremy L Thompson CeedChkBackend(ierr); 223241a4b83SYohann if (emode == CEED_EVAL_WEIGHT) { // Skip 224241a4b83SYohann } else { 225e15f9bd0SJeremy L Thompson ierr = CeedOperatorFieldGetVector(opinputfields[i], &vec); CeedChkBackend(ierr); 226241a4b83SYohann if (vec == CEED_VECTOR_ACTIVE) vec = invec; 227241a4b83SYohann ierr = CeedVectorRestoreArrayRead(vec, &data->fields.in[i]); 228e15f9bd0SJeremy L Thompson CeedChkBackend(ierr); 229241a4b83SYohann } 230241a4b83SYohann } 231241a4b83SYohann 232241a4b83SYohann // Restore output arrays 233241a4b83SYohann for (CeedInt i = 0; i < numoutputfields; i++) { 234241a4b83SYohann ierr = CeedQFunctionFieldGetEvalMode(qfoutputfields[i], &emode); 235e15f9bd0SJeremy L Thompson CeedChkBackend(ierr); 236241a4b83SYohann if (emode == CEED_EVAL_WEIGHT) { // Skip 237241a4b83SYohann } else { 238e15f9bd0SJeremy L Thompson ierr = CeedOperatorFieldGetVector(opoutputfields[i], &vec); 239e15f9bd0SJeremy L Thompson CeedChkBackend(ierr); 240241a4b83SYohann if (vec == CEED_VECTOR_ACTIVE) vec = outvec; 2413b2939feSjeremylt // Check for multiple output modes 2423b2939feSjeremylt CeedInt index = -1; 2433b2939feSjeremylt for (CeedInt j = 0; j < i; j++) { 2443b2939feSjeremylt if (vec == outvecs[j]) { 2453b2939feSjeremylt index = j; 2463b2939feSjeremylt break; 2473b2939feSjeremylt } 2483b2939feSjeremylt } 2493b2939feSjeremylt if (index == -1) { 250241a4b83SYohann ierr = CeedVectorRestoreArray(vec, &data->fields.out[i]); 251e15f9bd0SJeremy L Thompson CeedChkBackend(ierr); 252241a4b83SYohann } 253241a4b83SYohann } 2543b2939feSjeremylt } 255777ff853SJeremy L Thompson 256777ff853SJeremy L Thompson // Restore context data 257777ff853SJeremy L Thompson if (ctx) { 258777ff853SJeremy L Thompson ierr = CeedQFunctionContextRestoreData(ctx, &qf_data->d_c); 259e15f9bd0SJeremy L Thompson CeedChkBackend(ierr); 260777ff853SJeremy L Thompson } 261e15f9bd0SJeremy L Thompson return CEED_ERROR_SUCCESS; 262241a4b83SYohann } 263241a4b83SYohann 264ab213215SJeremy L Thompson //------------------------------------------------------------------------------ 265ab213215SJeremy L Thompson // Create operator 266ab213215SJeremy L Thompson //------------------------------------------------------------------------------ 267241a4b83SYohann int CeedOperatorCreate_Cuda_gen(CeedOperator op) { 268241a4b83SYohann int ierr; 269241a4b83SYohann Ceed ceed; 270e15f9bd0SJeremy L Thompson ierr = CeedOperatorGetCeed(op, &ceed); CeedChkBackend(ierr); 271241a4b83SYohann CeedOperator_Cuda_gen *impl; 272241a4b83SYohann 273e15f9bd0SJeremy L Thompson ierr = CeedCalloc(1, &impl); CeedChkBackend(ierr); 274e15f9bd0SJeremy L Thompson ierr = CeedOperatorSetData(op, impl); CeedChkBackend(ierr); 275241a4b83SYohann 2763e0c3786SYohann Dudouit ierr = CeedSetBackendFunction(ceed, "Operator", op, "ApplyAdd", 277e15f9bd0SJeremy L Thompson CeedOperatorApplyAdd_Cuda_gen); CeedChkBackend(ierr); 278241a4b83SYohann ierr = CeedSetBackendFunction(ceed, "Operator", op, "Destroy", 279e15f9bd0SJeremy L Thompson CeedOperatorDestroy_Cuda_gen); CeedChkBackend(ierr); 280e15f9bd0SJeremy L Thompson return CEED_ERROR_SUCCESS; 281241a4b83SYohann } 282ab213215SJeremy L Thompson //------------------------------------------------------------------------------ 283