// Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707. // All Rights reserved. See files LICENSE and NOTICE for details. // // This file is part of CEED, a collection of benchmarks, miniapps, software // libraries and APIs for efficient high-order finite element and spectral // element discretizations for exascale applications. For more information and // source code availability see http://github.com/ceed. // // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC, // a collaborative effort of two U.S. Department of Energy organizations (Office // of Science and the National Nuclear Security Administration) responsible for // the planning and preparation of a capable exascale ecosystem, including // software, applications, hardware, advanced system engineering and early // testbed platforms, in support of the nation's exascale computing imperative. #include "ceed-hip-shared.h" #include "../hip/ceed-hip-compile.h" //------------------------------------------------------------------------------ // Shared mem kernels //------------------------------------------------------------------------------ // *INDENT-OFF* static const char *kernelsShared = QUOTE( //------------------------------------------------------------------------------ // Sum input into output //------------------------------------------------------------------------------ inline __device__ void add(CeedScalar *r_V, const CeedScalar *r_U) { for (int i = 0; i < P1D; i++) r_V[i] += r_U[i]; } //------------------------------------------------------------------------------ // 1D //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Read DoFs //------------------------------------------------------------------------------ inline __device__ void readDofs1d(const int elem, const int tidx, const int tidy, const int tidz,const int comp, const int nelem, const CeedScalar *d_U, CeedScalar *slice) { for (int i = 0; i < P1D; i++) slice[i + tidz*T1D] = d_U[i + elem*P1D + comp*P1D*nelem]; for (int i = P1D; i < Q1D; i++) slice[i + tidz*T1D] = 0.0; } //------------------------------------------------------------------------------ // Write DoFs //------------------------------------------------------------------------------ inline __device__ void writeDofs1d(const int elem, const int tidx, const int tidy, const int comp, const int nelem, const CeedScalar &r_V, CeedScalar *d_V) { if (tidx required) break; else currentSize = currentSize * 2; } return currentSize; } //------------------------------------------------------------------------------ // Compute required thread block sizes for basis kernels given P, Q, dim, and // ncomp //------------------------------------------------------------------------------ static int ComputeBasisThreadBlockSizes(const CeedInt dim, const CeedInt P1d, const CeedInt Q1d, const CeedInt ncomp, CeedInt *blksizes) { // Note that this will use the same block sizes for all dimensions when compiling, // but as each basis object is defined for a particular dimension, we will never // call any kernels except the ones for the dimension for which we have computed the // block sizes. const CeedInt thread1d = CeedIntMax(P1d, Q1d); switch (dim) { case 1: { // Interp kernels: blksizes[0] = 256; // Grad kernels: blksizes[1] = 256; // Weight kernels: blksizes[2] = 256; } break; case 2: { // Interp kernels: CeedInt required = thread1d * thread1d * ncomp; blksizes[0] = ComputeBlockSizeFromRequirement(required); // Grad kernels: currently use same required minimum threads blksizes[1] = ComputeBlockSizeFromRequirement(required); // Weight kernels: required = CeedIntMax(64, Q1d * Q1d); blksizes[2] = ComputeBlockSizeFromRequirement(required); } break; case 3: { // Interp kernels: CeedInt required = thread1d * thread1d * ncomp; blksizes[0] = ComputeBlockSizeFromRequirement(required); // Grad kernels: currently use same required minimum threads blksizes[1] = ComputeBlockSizeFromRequirement(required); // Weight kernels: required = Q1d * Q1d * Q1d; blksizes[2] = ComputeBlockSizeFromRequirement(required); } } return 0; } //------------------------------------------------------------------------------ // Apply basis //------------------------------------------------------------------------------ int CeedBasisApplyTensor_Hip_shared(CeedBasis basis, const CeedInt nelem, CeedTransposeMode tmode, CeedEvalMode emode, CeedVector u, CeedVector v) { int ierr; Ceed ceed; ierr = CeedBasisGetCeed(basis, &ceed); CeedChk(ierr); Ceed_Hip_shared *ceed_Hip; CeedGetData(ceed, &ceed_Hip); CeedChk(ierr); CeedBasis_Hip_shared *data; CeedBasisGetData(basis, &data); CeedChk(ierr); const CeedInt transpose = tmode == CEED_TRANSPOSE; CeedInt dim, ncomp; ierr = CeedBasisGetDimension(basis, &dim); CeedChk(ierr); ierr = CeedBasisGetNumComponents(basis, &ncomp); CeedChk(ierr); // Read vectors const CeedScalar *d_u; CeedScalar *d_v; if (emode != CEED_EVAL_WEIGHT) { ierr = CeedVectorGetArrayRead(u, CEED_MEM_DEVICE, &d_u); CeedChk(ierr); } ierr = CeedVectorGetArray(v, CEED_MEM_DEVICE, &d_v); CeedChk(ierr); // Clear v for transpose mode if (tmode == CEED_TRANSPOSE) { CeedInt length; ierr = CeedVectorGetLength(v, &length); CeedChk(ierr); ierr = hipMemset(d_v, 0, length * sizeof(CeedScalar)); CeedChk(ierr); } // Apply basis operation switch (emode) { case CEED_EVAL_INTERP: { CeedInt P1d, Q1d; CeedInt blksize = data->blksizes[0]; ierr = CeedBasisGetNumNodes1D(basis, &P1d); CeedChk(ierr); ierr = CeedBasisGetNumQuadraturePoints1D(basis, &Q1d); CeedChk(ierr); CeedInt thread1d = CeedIntMax(Q1d, P1d); ierr = CeedHipInitInterp(data->d_interp1d, P1d, Q1d, &data->c_B); CeedChk(ierr); void *interpargs[] = {(void *) &nelem, (void *) &transpose, &data->c_B, &d_u, &d_v }; if (dim == 1) { CeedInt elemsPerBlock = 64*thread1d > 256? 256/thread1d : 64; elemsPerBlock = elemsPerBlock>0?elemsPerBlock:1; CeedInt grid = nelem/elemsPerBlock + ( (nelem/elemsPerBlock*elemsPerBlockinterp, grid, thread1d, 1, elemsPerBlock, sharedMem, interpargs); CeedChk(ierr); } else if (dim == 2) { // Check if required threads is small enough to do multiple elems const CeedInt elemsPerBlock = CeedIntMax(blksize/(thread1d*thread1d*ncomp), 1); CeedInt grid = nelem/elemsPerBlock + ( (nelem/elemsPerBlock*elemsPerBlockinterp, grid, thread1d, thread1d, ncomp*elemsPerBlock, sharedMem, interpargs); CeedChk(ierr); } else if (dim == 3) { CeedInt elemsPerBlock = 1; CeedInt grid = nelem/elemsPerBlock + ( (nelem/elemsPerBlock*elemsPerBlockinterp, grid, thread1d, thread1d, ncomp*elemsPerBlock, sharedMem, interpargs); CeedChk(ierr); } } break; case CEED_EVAL_GRAD: { CeedInt P1d, Q1d; CeedInt blksize = data->blksizes[1]; ierr = CeedBasisGetNumNodes1D(basis, &P1d); CeedChk(ierr); ierr = CeedBasisGetNumQuadraturePoints1D(basis, &Q1d); CeedChk(ierr); CeedInt thread1d = CeedIntMax(Q1d, P1d); ierr = CeedHipInitInterpGrad(data->d_interp1d, data->d_grad1d, P1d, Q1d, &data->c_B, &data->c_G); CeedChk(ierr); void *gradargs[] = {(void *) &nelem, (void *) &transpose, &data->c_B, &data->c_G, &d_u, &d_v }; if (dim == 1) { CeedInt elemsPerBlock = 64*thread1d > 256? 256/thread1d : 64; elemsPerBlock = elemsPerBlock>0?elemsPerBlock:1; CeedInt grid = nelem/elemsPerBlock + ( (nelem/elemsPerBlock*elemsPerBlockgrad, grid, thread1d, 1, elemsPerBlock, sharedMem, gradargs); CeedChk(ierr); } else if (dim == 2) { // Check if required threads is small enough to do multiple elems const CeedInt elemsPerBlock = CeedIntMax(blksize/(thread1d*thread1d*ncomp), 1); CeedInt grid = nelem/elemsPerBlock + ( (nelem/elemsPerBlock*elemsPerBlockgrad, grid, thread1d, thread1d, ncomp*elemsPerBlock, sharedMem, gradargs); CeedChk(ierr); } else if (dim == 3) { CeedInt elemsPerBlock = 1; CeedInt grid = nelem/elemsPerBlock + ( (nelem/elemsPerBlock*elemsPerBlockgrad, grid, thread1d, thread1d, ncomp*elemsPerBlock, sharedMem, gradargs); CeedChk(ierr); } } break; case CEED_EVAL_WEIGHT: { CeedInt Q1d; CeedInt blksize = data->blksizes[2]; ierr = CeedBasisGetNumQuadraturePoints1D(basis, &Q1d); CeedChk(ierr); void *weightargs[] = {(void *) &nelem, (void *) &data->d_qweight1d, &d_v}; if (dim == 1) { const CeedInt optElems = blksize/Q1d; const CeedInt elemsPerBlock = optElems>0?optElems:1; const CeedInt gridsize = nelem/elemsPerBlock + ( ( nelem/elemsPerBlock*elemsPerBlockweight, gridsize, Q1d, elemsPerBlock, 1, weightargs); CeedChk(ierr); } else if (dim == 2) { const CeedInt optElems = blksize/(Q1d*Q1d); const CeedInt elemsPerBlock = optElems>0?optElems:1; const CeedInt gridsize = nelem/elemsPerBlock + ( ( nelem/elemsPerBlock*elemsPerBlockweight, gridsize, Q1d, Q1d, elemsPerBlock, weightargs); CeedChk(ierr); } else if (dim == 3) { const CeedInt gridsize = nelem; ierr = CeedRunKernelDimHip(ceed, data->weight, gridsize, Q1d, Q1d, Q1d, weightargs); CeedChk(ierr); } } break; // LCOV_EXCL_START // Evaluate the divergence to/from the quadrature points case CEED_EVAL_DIV: return CeedError(ceed, 1, "CEED_EVAL_DIV not supported"); // Evaluate the curl to/from the quadrature points case CEED_EVAL_CURL: return CeedError(ceed, 1, "CEED_EVAL_CURL not supported"); // Take no action, BasisApply should not have been called case CEED_EVAL_NONE: return CeedError(ceed, 1, "CEED_EVAL_NONE does not make sense in this context"); // LCOV_EXCL_STOP } // Restore vectors if (emode != CEED_EVAL_WEIGHT) { ierr = CeedVectorRestoreArrayRead(u, &d_u); CeedChk(ierr); } ierr = CeedVectorRestoreArray(v, &d_v); CeedChk(ierr); return 0; } //------------------------------------------------------------------------------ // Destroy basis //------------------------------------------------------------------------------ static int CeedBasisDestroy_Hip_shared(CeedBasis basis) { int ierr; Ceed ceed; ierr = CeedBasisGetCeed(basis, &ceed); CeedChk(ierr); CeedBasis_Hip_shared *data; ierr = CeedBasisGetData(basis, &data); CeedChk(ierr); CeedChk_Hip(ceed, hipModuleUnload(data->module)); ierr = hipFree(data->d_qweight1d); CeedChk_Hip(ceed, ierr); ierr = hipFree(data->d_interp1d); CeedChk_Hip(ceed, ierr); ierr = hipFree(data->d_grad1d); CeedChk_Hip(ceed, ierr); ierr = hipFree(data->d_collograd1d); CeedChk_Hip(ceed, ierr); ierr = CeedFree(&data); CeedChk(ierr); return 0; } //------------------------------------------------------------------------------ // Create tensor basis //------------------------------------------------------------------------------ int CeedBasisCreateTensorH1_Hip_shared(CeedInt dim, CeedInt P1d, CeedInt Q1d, const CeedScalar *interp1d, const CeedScalar *grad1d, const CeedScalar *qref1d, const CeedScalar *qweight1d, CeedBasis basis) { int ierr; Ceed ceed; ierr = CeedBasisGetCeed(basis, &ceed); CeedChk(ierr); CeedBasis_Hip_shared *data; ierr = CeedCalloc(1, &data); CeedChk(ierr); // Copy basis data to GPU const CeedInt qBytes = Q1d * sizeof(CeedScalar); ierr = hipMalloc((void **)&data->d_qweight1d, qBytes); CeedChk_Hip(ceed, ierr); ierr = hipMemcpy(data->d_qweight1d, qweight1d, qBytes, hipMemcpyHostToDevice); CeedChk_Hip(ceed, ierr); const CeedInt iBytes = qBytes * P1d; ierr = hipMalloc((void **)&data->d_interp1d, iBytes); CeedChk_Hip(ceed, ierr); ierr = hipMemcpy(data->d_interp1d, interp1d, iBytes, hipMemcpyHostToDevice); CeedChk_Hip(ceed, ierr); ierr = hipMalloc((void **)&data->d_grad1d, iBytes); CeedChk_Hip(ceed, ierr); ierr = hipMemcpy(data->d_grad1d, grad1d, iBytes, hipMemcpyHostToDevice); CeedChk_Hip(ceed, ierr); // Compute collocated gradient and copy to GPU data->d_collograd1d = NULL; if (dim == 3 && Q1d >= P1d) { CeedScalar *collograd1d; ierr = CeedMalloc(Q1d*Q1d, &collograd1d); CeedChk(ierr); ierr = CeedBasisGetCollocatedGrad(basis, collograd1d); CeedChk(ierr); ierr = hipMalloc((void **)&data->d_collograd1d, qBytes * Q1d); CeedChk_Hip(ceed, ierr); ierr = hipMemcpy(data->d_collograd1d, collograd1d, qBytes * Q1d, hipMemcpyHostToDevice); CeedChk_Hip(ceed, ierr); ierr = CeedFree(&collograd1d); CeedChk(ierr); } // Set number of threads per block for basis kernels CeedInt ncomp; ierr = CeedBasisGetNumComponents(basis, &ncomp); CeedChk(ierr); ierr = ComputeBasisThreadBlockSizes(dim, P1d, Q1d, ncomp, data->blksizes); CeedChk(ierr); // Compile basis kernels ierr = CeedCompileHip(ceed, kernelsShared, &data->module, 11, "Q1D", Q1d, "P1D", P1d, "T1D", CeedIntMax(Q1d, P1d), "BASIS_BUF_LEN", ncomp * CeedIntPow(Q1d > P1d ? Q1d : P1d, dim), "BASIS_DIM", dim, "BASIS_NCOMP", ncomp, "BASIS_ELEMSIZE", CeedIntPow(P1d, dim), "BASIS_NQPT", CeedIntPow(Q1d, dim), "INTERP_BLKSIZE", data->blksizes[0], "GRAD_BLKSIZE", data->blksizes[1], "WEIGHT_BLKSIZE", data->blksizes[2] ); CeedChk(ierr); ierr = CeedGetKernelHip(ceed, data->module, "interp", &data->interp); CeedChk(ierr); ierr = CeedGetKernelHip(ceed, data->module, "grad", &data->grad); CeedChk(ierr); ierr = CeedGetKernelHip(ceed, data->module, "weight", &data->weight); CeedChk(ierr); ierr = CeedBasisSetData(basis, data); CeedChk(ierr); // Register backend functions ierr = CeedSetBackendFunction(ceed, "Basis", basis, "Apply", CeedBasisApplyTensor_Hip_shared); CeedChk(ierr); ierr = CeedSetBackendFunction(ceed, "Basis", basis, "Destroy", CeedBasisDestroy_Hip_shared); CeedChk(ierr); return 0; } //------------------------------------------------------------------------------