xref: /libCEED/backends/cuda-shared/ceed-cuda-shared-basis.c (revision d94769d2810d6a17f041360d7dcdb7c615f31392)
1 // Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC.
2 // Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707.
3 // All Rights reserved. See files LICENSE and NOTICE for details.
4 //
5 // This file is part of CEED, a collection of benchmarks, miniapps, software
6 // libraries and APIs for efficient high-order finite element and spectral
7 // element discretizations for exascale applications. For more information and
8 // source code availability see http://github.com/ceed.
9 //
10 // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
11 // a collaborative effort of two U.S. Department of Energy organizations (Office
12 // of Science and the National Nuclear Security Administration) responsible for
13 // the planning and preparation of a capable exascale ecosystem, including
14 // software, applications, hardware, advanced system engineering and early
15 // testbed platforms, in support of the nation's exascale computing imperative.
16 
17 #include <ceed-backend.h>
18 #include <ceed.h>
19 #include "ceed-cuda-shared.h"
20 #include "../cuda/ceed-cuda.h"
21 
22 //*********************
23 // shared mem kernels
24 static const char *kernelsShared = QUOTE(
25 
26 inline __device__ void add(CeedScalar *r_V, const CeedScalar *r_U) {
27   for (int i = 0; i < Q1D; i++)
28     r_V[i] += r_U[i];
29 }
30 
31 //////////
32 //  1D  //
33 //////////
34 
35 inline __device__ void readDofs1d(const int elem, const int tidx,
36                                   const int tidy, const int tidz,const int comp,
37                                   const int nelem, const CeedScalar *d_U, CeedScalar *slice) {
38   for (int i = 0; i < P1D; i++)
39     slice[i+tidz*Q1D] = d_U[i + comp*P1D + elem*BASIS_NCOMP*P1D];
40   for (int i = P1D; i < Q1D; i++)
41     slice[i+tidz*Q1D] = 0.0;
42 }
43 
44 inline __device__ void writeDofs1d(const int elem, const int tidx,
45                                    const int tidy, const int comp,
46                                    const int nelem, const CeedScalar &r_V, CeedScalar *d_V) {
47   if (tidx<P1D) {
48     d_V[tidx + comp*P1D + elem*BASIS_NCOMP*P1D] = r_V;
49   }
50 }
51 
52 inline __device__ void readQuads1d(const int elem, const int tidx,
53                                    const int tidy, const int tidz, const int comp,
54                                    const int dim, const int nelem, const CeedScalar *d_U, CeedScalar *slice) {
55   for (int i = 0; i < Q1D; i++)
56     slice[i+tidz*Q1D] = d_U[i + elem*Q1D + comp*Q1D*nelem + dim*BASIS_NCOMP*nelem*Q1D];
57 }
58 
59 inline __device__ void writeQuads1d(const int elem, const int tidx,
60                                     const int tidy, const int comp,
61                                     const int dim, const int nelem, const CeedScalar &r_V, CeedScalar *d_V) {
62   d_V[tidx + elem*Q1D + comp*Q1D*nelem + dim*BASIS_NCOMP*nelem*Q1D] = r_V;
63 }
64 
65 inline __device__ void ContractX1d(CeedScalar *slice, const int tidx,
66                                    const int tidy, const int tidz,
67                                    const CeedScalar &U, const CeedScalar *B, CeedScalar &V) {
68   V = 0.0;
69   for (int i = 0; i < P1D; ++i) {
70     V += B[i + tidx*P1D] * slice[i+tidz*Q1D];//contract x direction
71   }
72 }
73 
74 inline __device__ void ContractTransposeX1d(CeedScalar *slice, const int tidx,
75     const int tidy, const int tidz,
76     const CeedScalar &U, const CeedScalar *B, CeedScalar &V) {
77   V = 0.0;
78   for (int i = 0; i < Q1D; ++i) {
79     V += B[tidx + i*P1D] * slice[i+tidz*Q1D];//contract x direction
80   }
81 }
82 
83 inline __device__ void interp1d(const CeedInt nelem, const int transpose,
84                                 const CeedScalar *c_B, const CeedScalar *__restrict__ d_U,
85                                 CeedScalar *__restrict__ d_V,
86                                 CeedScalar *slice) {
87   CeedScalar r_V;
88   CeedScalar r_t;
89 
90   const int tidx = threadIdx.x;
91   const int tidy = threadIdx.y;
92   const int tidz = threadIdx.z;
93 
94 
95   for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < nelem;
96        elem += gridDim.x*blockDim.z) {
97     for(int comp=0; comp<BASIS_NCOMP; comp++) {
98       if(!transpose) {
99         readDofs1d(elem, tidx, tidy, tidz, comp, nelem, d_U, slice);
100         ContractX1d(slice, tidx, tidy, tidz, r_t, c_B, r_V);
101         writeQuads1d(elem, tidx, tidy, comp, 0, nelem, r_V, d_V);
102       } else {
103         readQuads1d(elem, tidx, tidy, tidz, comp, 0, nelem, d_U, slice);
104         ContractTransposeX1d(slice, tidx, tidy, tidz, r_t, c_B, r_V);
105         writeDofs1d(elem, tidx, tidy, comp, nelem, r_V, d_V);
106       }
107     }
108   }
109 }
110 
111 inline __device__ void grad1d(const CeedInt nelem, const int transpose,
112                               const CeedScalar *c_B, const CeedScalar *c_G,
113                               const CeedScalar *__restrict__ d_U, CeedScalar *__restrict__ d_V,
114                               CeedScalar *slice) {
115   CeedScalar r_U;
116   CeedScalar r_V;
117 
118   const int tidx = threadIdx.x;
119   const int tidy = threadIdx.y;
120   const int tidz = threadIdx.z;
121   int dim;
122 
123   for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < nelem;
124        elem += gridDim.x*blockDim.z) {
125     for(int comp=0; comp<BASIS_NCOMP; comp++) {
126       if(!transpose) {
127         readDofs1d(elem, tidx, tidy, tidz, comp, nelem, d_U, slice);
128         ContractX1d(slice, tidx, tidy, tidz, r_U, c_G, r_V);
129         dim = 0;
130         writeQuads1d(elem, tidx, tidy, comp, dim, nelem, r_V, d_V);
131       } else {
132         dim = 0;
133         readQuads1d(elem, tidx, tidy, tidz, comp, dim, nelem, d_U, slice);
134         ContractTransposeX1d(slice, tidx, tidy, tidz, r_U, c_G, r_V);
135         writeDofs1d(elem, tidx, tidy, comp, nelem, r_V, d_V);
136       }
137     }
138   }
139 }
140 //////////
141 //  2D  //
142 //////////
143 
144 inline __device__ void readDofs2d(const int elem, const int tidx,
145                                   const int tidy, const int comp,
146                                   const int nelem, const CeedScalar *d_U, CeedScalar &U) {
147   U = (tidx<P1D
148        && tidy<P1D) ? d_U[tidx + tidy*P1D + comp*P1D*P1D + elem*BASIS_NCOMP*P1D*P1D ] :
149       0.0;
150 }
151 
152 inline __device__ void writeDofs2d(const int elem, const int tidx,
153                                    const int tidy, const int comp,
154                                    const int nelem, const CeedScalar &r_V, CeedScalar *d_V) {
155   if (tidx<P1D && tidy<P1D) {
156     d_V[tidx + tidy*P1D + comp*P1D*P1D + elem*BASIS_NCOMP*P1D*P1D ] = r_V;
157   }
158 }
159 
160 inline __device__ void readQuads2d(const int elem, const int tidx,
161                                    const int tidy, const int comp,
162                                    const int dim, const int nelem, const CeedScalar *d_U, CeedScalar &U ) {
163   U = d_U[tidx + tidy*Q1D + elem*Q1D*Q1D + comp*Q1D*Q1D*nelem +
164                dim*BASIS_NCOMP*nelem*Q1D*Q1D];
165 }
166 
167 inline __device__ void writeQuads2d(const int elem, const int tidx,
168                                     const int tidy, const int comp,
169                                     const int dim, const int nelem, const CeedScalar &r_V, CeedScalar *d_V) {
170   d_V[tidx + tidy*Q1D + elem*Q1D*Q1D + comp*Q1D*Q1D*nelem +
171            dim*BASIS_NCOMP*nelem*Q1D*Q1D ] = r_V;
172 }
173 
174 inline __device__ void ContractX2d(CeedScalar *slice, const int tidx,
175                                    const int tidy, const int tidz,
176                                    const CeedScalar &U, const CeedScalar *B, CeedScalar &V) {
177   slice[tidx+tidy*Q1D+tidz*Q1D*Q1D] = U;
178   __syncthreads();
179   V = 0.0;
180   for (int i = 0; i < P1D; ++i) {
181     V += B[i + tidx*P1D] * slice[i + tidy*Q1D + tidz*Q1D*Q1D];//contract x direction
182   }
183   __syncthreads();
184 }
185 
186 inline __device__ void ContractY2d(CeedScalar *slice, const int tidx,
187                                    const int tidy, const int tidz,
188                                    const CeedScalar &U, const CeedScalar *B, CeedScalar &V) {
189   slice[tidx+tidy*Q1D+tidz*Q1D*Q1D] = U;
190   __syncthreads();
191   V = 0.0;
192   for (int i = 0; i < P1D; ++i) {
193     V += B[i + tidy*P1D] * slice[tidx + i*Q1D + tidz*Q1D*Q1D];//contract y direction
194   }
195   __syncthreads();
196 }
197 
198 inline __device__ void ContractTransposeY2d(CeedScalar *slice, const int tidx,
199     const int tidy, const int tidz,
200     const CeedScalar &U, const CeedScalar *B, CeedScalar &V) {
201   slice[tidx+tidy*Q1D+tidz*Q1D*Q1D] = U;
202   __syncthreads();
203   V = 0.0;
204   if (tidy<P1D) {
205     for (int i = 0; i < Q1D; ++i) {
206       V += B[tidy + i*P1D] * slice[tidx + i*Q1D + tidz*Q1D*Q1D];//contract y direction
207     }
208   }
209   __syncthreads();
210 }
211 
212 inline __device__ void ContractTransposeX2d(CeedScalar *slice, const int tidx,
213     const int tidy, const int tidz,
214     const CeedScalar &U, const CeedScalar *B, CeedScalar &V) {
215   slice[tidx+tidy*Q1D+tidz*Q1D*Q1D] = U;
216   __syncthreads();
217   V = 0.0;
218   if (tidx<P1D) {
219     for (int i = 0; i < Q1D; ++i) {
220       V += B[tidx + i*P1D] * slice[i + tidy*Q1D + tidz*Q1D*Q1D];//contract x direction
221     }
222   }
223   __syncthreads();
224 }
225 
226 inline __device__ void interp2d(const CeedInt nelem, const int transpose,
227                                 const CeedScalar *c_B, const CeedScalar *__restrict__ d_U,
228                                 CeedScalar *__restrict__ d_V,
229                                 CeedScalar *slice) {
230   CeedScalar r_V;
231   CeedScalar r_t;
232 
233   const int tidx = threadIdx.x;
234   const int tidy = threadIdx.y;
235   const int tidz = threadIdx.z;
236   const int blockElem = tidz/BASIS_NCOMP;
237   const int elemsPerBlock = blockDim.z/BASIS_NCOMP;
238   const int comp = tidz%BASIS_NCOMP;
239 
240   for (CeedInt elem = blockIdx.x*elemsPerBlock + blockElem; elem < nelem;
241        elem += gridDim.x*elemsPerBlock) {
242     // for(int comp=0; comp<BASIS_NCOMP; comp++) {
243       const int comp = tidz%BASIS_NCOMP;
244       r_V = 0.0;
245       r_t = 0.0;
246       if(!transpose) {
247         readDofs2d(elem, tidx, tidy, comp, nelem, d_U, r_V);
248         ContractX2d(slice, tidx, tidy, tidz, r_V, c_B, r_t);
249         ContractY2d(slice, tidx, tidy, tidz, r_t, c_B, r_V);
250         writeQuads2d(elem, tidx, tidy, comp, 0, nelem, r_V, d_V);
251       } else {
252         readQuads2d(elem, tidx, tidy, comp, 0, nelem, d_U, r_V);
253         ContractTransposeY2d(slice, tidx, tidy, tidz, r_V, c_B, r_t);
254         ContractTransposeX2d(slice, tidx, tidy, tidz, r_t, c_B, r_V);
255         writeDofs2d(elem, tidx, tidy, comp, nelem, r_V, d_V);
256       }
257     // }
258   }
259 }
260 
261 inline __device__ void grad2d(const CeedInt nelem, const int transpose,
262                               const CeedScalar *c_B, const CeedScalar *c_G,
263                               const CeedScalar *__restrict__ d_U, CeedScalar *__restrict__ d_V,
264                               CeedScalar *slice) {
265   CeedScalar r_U;
266   CeedScalar r_V;
267   CeedScalar r_t;
268 
269   const int tidx = threadIdx.x;
270   const int tidy = threadIdx.y;
271   const int tidz = threadIdx.z;
272   const int blockElem = tidz/BASIS_NCOMP;
273   const int elemsPerBlock = blockDim.z/BASIS_NCOMP;
274   const int comp = tidz%BASIS_NCOMP;
275   int dim;
276 
277   for (CeedInt elem = blockIdx.x*elemsPerBlock + blockElem; elem < nelem;
278        elem += gridDim.x*elemsPerBlock) {
279     // for(int comp=0; comp<BASIS_NCOMP; comp++) {
280       if(!transpose) {
281         readDofs2d(elem, tidx, tidy, comp, nelem, d_U, r_U);
282         ContractX2d(slice, tidx, tidy, tidz, r_U, c_G, r_t);
283         ContractY2d(slice, tidx, tidy, tidz, r_t, c_B, r_V);
284         dim = 0;
285         writeQuads2d(elem, tidx, tidy, comp, dim, nelem, r_V, d_V);
286         ContractX2d(slice, tidx, tidy, tidz, r_U, c_B, r_t);
287         ContractY2d(slice, tidx, tidy, tidz, r_t, c_G, r_V);
288         dim = 1;
289         writeQuads2d(elem, tidx, tidy, comp, dim, nelem, r_V, d_V);
290       } else {
291         dim = 0;
292         readQuads2d(elem, tidx, tidy, comp, dim, nelem, d_U, r_U);
293         ContractTransposeY2d(slice, tidx, tidy, tidz, r_U, c_B, r_t);
294         ContractTransposeX2d(slice, tidx, tidy, tidz, r_t, c_G, r_V);
295         dim = 1;
296         readQuads2d(elem, tidx, tidy, comp, dim, nelem, d_U, r_U);
297         ContractTransposeY2d(slice, tidx, tidy, tidz, r_U, c_G, r_t);
298         ContractTransposeX2d(slice, tidx, tidy, tidz, r_t, c_B, r_U);
299         r_V+=r_U;
300         writeDofs2d(elem, tidx, tidy, comp, nelem, r_V, d_V);
301       }
302     // }
303   }
304 }
305 //////////
306 //  3D  //
307 //////////
308 
309 inline __device__ void readDofs3d(const int elem, const int tidx,
310                                   const int tidy, const int comp,
311                                   const int nelem, const CeedScalar *d_U, CeedScalar *r_U) {
312   for (int i = 0; i < P1D; i++)
313     r_U[i] = (tidx<P1D
314               && tidy<P1D) ? d_U[tidx + tidy*P1D + i*P1D*P1D + comp*P1D*P1D*P1D +
315                                       elem*BASIS_NCOMP*P1D*P1D*P1D ] : 0.0;
316   for (int i = P1D; i < Q1D; i++)
317     r_U[i] = 0.0;
318 }
319 
320 inline __device__ void readQuads3d(const int elem, const int tidx,
321                                    const int tidy, const int comp,
322                                    const int dim, const int nelem, const CeedScalar *d_U, CeedScalar *r_U) {
323   for (int i = 0; i < Q1D; i++)
324     r_U[i] = d_U[tidx + tidy*Q1D + i*Q1D*Q1D + elem*Q1D*Q1D*Q1D +
325                  comp*Q1D*Q1D*Q1D*nelem + dim*BASIS_NCOMP*nelem*Q1D*Q1D*Q1D];
326 }
327 
328 inline __device__ void writeDofs3d(const int elem, const int tidx,
329                                    const int tidy, const int comp,
330                                    const int nelem, const CeedScalar *r_V, CeedScalar *d_V) {
331   if (tidx<P1D && tidy<P1D) {
332     for (int i = 0; i < P1D; i++)
333       d_V[tidx + tidy*P1D + i*P1D*P1D + comp*P1D*P1D*P1D +
334           elem*BASIS_NCOMP*P1D*P1D*P1D ] = r_V[i];
335   }
336 }
337 
338 inline __device__ void writeQuads3d(const int elem, const int tidx,
339                                     const int tidy, const int comp,
340                                     const int dim, const int nelem, const CeedScalar *r_V, CeedScalar *d_V) {
341   for (int i = 0; i < Q1D; i++)
342     d_V[tidx + tidy*Q1D + i*Q1D*Q1D + elem*Q1D*Q1D*Q1D + comp*Q1D*Q1D*Q1D*nelem +
343         dim*BASIS_NCOMP*nelem*Q1D*Q1D*Q1D ] = r_V[i];
344 }
345 
346 inline __device__ void ContractX3d(CeedScalar *slice, const int tidx,
347                                    const int tidy,
348                                    const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
349   for (int k = 0; k < P1D; ++k) {
350     slice[tidx+tidy*Q1D] = U[k];
351     __syncthreads();
352     V[k] = 0.0;
353     for (int i = 0; i < P1D; ++i) {
354       V[k] += B[i + tidx*P1D] * slice[i + tidy*Q1D];//contract x direction
355     }
356     __syncthreads();
357   }
358 }
359 
360 inline __device__ void ContractY3d(CeedScalar *slice, const int tidx,
361                                    const int tidy,
362                                    const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
363   for (int k = 0; k < P1D; ++k) {
364     slice[tidx+tidy*Q1D] = U[k];
365     __syncthreads();
366     V[k] = 0.0;
367     for (int i = 0; i < P1D; ++i) {
368       V[k] += B[i + tidy*P1D] * slice[tidx + i*Q1D];//contract y direction
369     }
370     __syncthreads();
371   }
372 }
373 
374 inline __device__ void ContractZ3d(CeedScalar *slice, const int tidx,
375                                    const int tidy,
376                                    const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
377   for (int k = 0; k < Q1D; ++k) {
378     V[k] = 0.0;
379     for (int i = 0; i < P1D; ++i) {
380       V[k] += B[i + k*P1D] * U[i];//contract z direction
381     }
382   }
383 }
384 
385 inline __device__ void ContractTransposeZ3d(CeedScalar *slice, const int tidx,
386     const int tidy,
387     const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
388   for (int k = 0; k < Q1D; ++k) {
389     V[k] = 0.0;
390     if (k<P1D) {
391       for (int i = 0; i < Q1D; ++i) {
392         V[k] += B[k + i*P1D] * U[i];//contract z direction
393       }
394     }
395   }
396 }
397 
398 inline __device__ void ContractTransposeY3d(CeedScalar *slice, const int tidx,
399     const int tidy,
400     const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
401   for (int k = 0; k < P1D; ++k) {
402     slice[tidx+tidy*Q1D] = U[k];
403     __syncthreads();
404     V[k] = 0.0;
405     if (tidy<P1D) {
406       for (int i = 0; i < Q1D; ++i) {
407         V[k] += B[tidy + i*P1D] * slice[tidx + i*Q1D];//contract y direction
408       }
409     }
410     __syncthreads();
411   }
412 }
413 
414 inline __device__ void ContractTransposeX3d(CeedScalar *slice, const int tidx,
415     const int tidy,
416     const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
417   for (int k = 0; k < P1D; ++k) {
418     slice[tidx+tidy*Q1D] = U[k];
419     __syncthreads();
420     V[k] = 0.0;
421     if (tidx<P1D) {
422       for (int i = 0; i < Q1D; ++i) {
423         V[k] += B[tidx + i*P1D] * slice[i + tidy*Q1D];//contract x direction
424       }
425     }
426     __syncthreads();
427   }
428 }
429 
430 inline __device__ void interp3d(const CeedInt nelem, const int transpose,
431                                 const CeedScalar *c_B, const CeedScalar *__restrict__ d_U,
432                                 CeedScalar *__restrict__ d_V,
433                                 CeedScalar *slice) {
434   CeedScalar r_V[Q1D];
435   CeedScalar r_t[Q1D];
436 
437   const int tidx = threadIdx.x;
438   const int tidy = threadIdx.y;
439 
440   for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < nelem;
441        elem += gridDim.x*blockDim.z) {
442     for(int comp=0; comp<BASIS_NCOMP; comp++) {
443       for (int i = 0; i < Q1D; ++i) {
444         r_V[i] = 0.0;
445         r_t[i] = 0.0;
446       }
447       if(!transpose) {
448         readDofs3d(elem, tidx, tidy, comp, nelem, d_U, r_V);
449         ContractX3d(slice, tidx, tidy, r_V, c_B, r_t);
450         ContractY3d(slice, tidx, tidy, r_t, c_B, r_V);
451         ContractZ3d(slice, tidx, tidy, r_V, c_B, r_t);
452         writeQuads3d(elem, tidx, tidy, comp, 0, nelem, r_t, d_V);
453       } else {
454         readQuads3d(elem, tidx, tidy, comp, 0, nelem, d_U, r_V);
455         ContractTransposeZ3d(slice, tidx, tidy, r_V, c_B, r_t);
456         ContractTransposeY3d(slice, tidx, tidy, r_t, c_B, r_V);
457         ContractTransposeX3d(slice, tidx, tidy, r_V, c_B, r_t);
458         writeDofs3d(elem, tidx, tidy, comp, nelem, r_t, d_V);
459       }
460     }
461   }
462 }
463 
464 inline __device__ void grad3d(const CeedInt nelem, const int transpose,
465                               const CeedScalar *c_B, const CeedScalar *c_G,
466                               const CeedScalar *__restrict__ d_U, CeedScalar *__restrict__ d_V,
467                               CeedScalar *slice) {
468   //use P1D for one of these
469   CeedScalar r_U[Q1D];
470   CeedScalar r_V[Q1D];
471   CeedScalar r_t[Q1D];
472 
473   const int tidx = threadIdx.x;
474   const int tidy = threadIdx.y;
475   int dim;
476 
477   for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < nelem;
478        elem += gridDim.x*blockDim.z) {
479     for(int comp=0; comp<BASIS_NCOMP; comp++) {
480       if(!transpose) {
481         readDofs3d(elem, tidx, tidy, comp, nelem, d_U, r_U);
482         ContractX3d(slice, tidx, tidy, r_U, c_G, r_V);
483         ContractY3d(slice, tidx, tidy, r_V, c_B, r_t);
484         ContractZ3d(slice, tidx, tidy, r_t, c_B, r_V);
485         dim = 0;
486         writeQuads3d(elem, tidx, tidy, comp, dim, nelem, r_V, d_V);
487         ContractX3d(slice, tidx, tidy, r_U, c_B, r_V);
488         ContractY3d(slice, tidx, tidy, r_V, c_G, r_t);
489         ContractZ3d(slice, tidx, tidy, r_t, c_B, r_V);
490         dim = 1;
491         writeQuads3d(elem, tidx, tidy, comp, dim, nelem, r_V, d_V);
492         ContractX3d(slice, tidx, tidy, r_U, c_B, r_V);
493         ContractY3d(slice, tidx, tidy, r_V, c_B, r_t);
494         ContractZ3d(slice, tidx, tidy, r_t, c_G, r_V);
495         dim = 2;
496         writeQuads3d(elem, tidx, tidy, comp, dim, nelem, r_V, d_V);
497       } else {
498         dim = 0;
499         readQuads3d(elem, tidx, tidy, comp, dim, nelem, d_U, r_U);
500         ContractTransposeZ3d(slice, tidx, tidy, r_U, c_B, r_t);
501         ContractTransposeY3d(slice, tidx, tidy, r_t, c_B, r_U);
502         ContractTransposeX3d(slice, tidx, tidy, r_U, c_G, r_V);
503         dim = 1;
504         readQuads3d(elem, tidx, tidy, comp, dim, nelem, d_U, r_U);
505         ContractTransposeZ3d(slice, tidx, tidy, r_U, c_B, r_t);
506         ContractTransposeY3d(slice, tidx, tidy, r_t, c_G, r_U);
507         ContractTransposeX3d(slice, tidx, tidy, r_U, c_B, r_t);
508         add(r_V, r_t);
509         dim = 2;
510         readQuads3d(elem, tidx, tidy, comp, dim, nelem, d_U, r_U);
511         ContractTransposeZ3d(slice, tidx, tidy, r_U, c_G, r_t);
512         ContractTransposeY3d(slice, tidx, tidy, r_t, c_B, r_U);
513         ContractTransposeX3d(slice, tidx, tidy, r_U, c_B, r_t);
514         add(r_V, r_t);
515         writeDofs3d(elem, tidx, tidy, comp, nelem, r_V, d_V);
516       }
517     }
518   }
519 }
520 
521 /////////////
522 // Kernels //
523 /////////////
524 extern "C" __global__ void interp(const CeedInt nelem, const int transpose,
525                                   const CeedScalar *c_B, const CeedScalar *__restrict__ d_U,
526                                   CeedScalar *__restrict__ d_V) {
527   // __shared__ double slice[Q1D*Q1D];//Fix me if ElemPerBlock>1
528   extern __shared__ double slice[];
529   if (BASIS_DIM==1) {
530     interp1d(nelem, transpose, c_B, d_U, d_V, slice);
531   } else if (BASIS_DIM==2) {
532     interp2d(nelem, transpose, c_B, d_U, d_V, slice);
533   } else if (BASIS_DIM==3) {
534     interp3d(nelem, transpose, c_B, d_U, d_V, slice);
535   }
536 }
537 
538 extern "C" __global__ void grad(const CeedInt nelem, const int transpose,
539                                 const CeedScalar *c_B, const CeedScalar *c_G,
540                                 const CeedScalar *__restrict__ d_U, CeedScalar *__restrict__ d_V) {
541   // __shared__ double slice[Q1D*Q1D];//Fix me if ElemPerBlock>1
542   extern __shared__ double slice[];
543   if (BASIS_DIM==1) {
544     grad1d(nelem, transpose, c_B, c_G, d_U, d_V, slice);
545   } else if (BASIS_DIM==2) {
546     grad2d(nelem, transpose, c_B, c_G, d_U, d_V, slice);
547   } else if (BASIS_DIM==3) {
548     grad3d(nelem, transpose, c_B, c_G, d_U, d_V, slice);
549   }
550 }
551 
552 /////////////
553 // Weights //
554 /////////////
555 __device__ void weight1d(const CeedInt nelem, const CeedScalar *qweight1d,
556                          CeedScalar *w) {
557   const int tid = threadIdx.x;
558   const CeedScalar weight = qweight1d[tid];
559   for (CeedInt elem = blockIdx.x*blockDim.y + threadIdx.y; elem < nelem;
560        elem += gridDim.x*blockDim.y) {
561     const int ind = elem*Q1D + tid;
562     w[ind] = weight;
563   }
564 }
565 
566 __device__ void weight2d(const CeedInt nelem, const CeedScalar *qweight1d,
567                          CeedScalar *w) {
568   const int i = threadIdx.x;
569   const int j = threadIdx.y;
570   const CeedScalar weight = qweight1d[i]*qweight1d[j];
571   for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < nelem;
572        elem += gridDim.x*blockDim.z) {
573     const int ind = elem*Q1D*Q1D + i + j*Q1D;
574     w[ind] = weight;
575   }
576 }
577 
578 __device__ void weight3d(const CeedInt nelem, const CeedScalar *qweight1d,
579                          CeedScalar *w) {
580   const int i = threadIdx.x;
581   const int j = threadIdx.y;
582   const int k = threadIdx.z;
583   const CeedScalar weight = qweight1d[i]*qweight1d[j]*qweight1d[k];
584   for (int e = blockIdx.x; e < nelem; e += gridDim.x) {
585     const int ind = e*Q1D*Q1D*Q1D + i + j*Q1D + k*Q1D*Q1D;
586     w[ind] = weight;
587   }
588 }
589 
590 extern "C" __global__ void weight(const CeedInt nelem,
591                                   const CeedScalar *__restrict__ qweight1d, CeedScalar *__restrict__ v) {
592   if (BASIS_DIM==1) {
593     weight1d(nelem, qweight1d, v);
594   } else if (BASIS_DIM==2) {
595     weight2d(nelem, qweight1d, v);
596   } else if (BASIS_DIM==3) {
597     weight3d(nelem, qweight1d, v);
598   }
599 }
600 
601                                    );
602 
603 int CeedCudaInitInterp(CeedScalar *d_B, CeedInt P1d, CeedInt Q1d,
604                        CeedScalar **c_B);
605 int CeedCudaInitInterpGrad(CeedScalar *d_B, CeedScalar *d_G, CeedInt P1d,
606                            CeedInt Q1d, CeedScalar **c_B_ptr, CeedScalar **c_G_ptr);
607 
608 int CeedBasisApplyTensor_Cuda_shared(CeedBasis basis, const CeedInt nelem,
609                                      CeedTransposeMode tmode,
610                                      CeedEvalMode emode, CeedVector u, CeedVector v) {
611   int ierr;
612   Ceed ceed;
613   ierr = CeedBasisGetCeed(basis, &ceed); CeedChk(ierr);
614   Ceed_Cuda_shared *ceed_Cuda;
615   CeedGetData(ceed, (void *) &ceed_Cuda); CeedChk(ierr);
616   CeedBasis_Cuda_shared *data;
617   CeedBasisGetData(basis, (void *)&data); CeedChk(ierr);
618   const CeedInt transpose = tmode == CEED_TRANSPOSE;
619   CeedInt dim, ncomp;
620   ierr = CeedBasisGetDimension(basis, &dim); CeedChk(ierr);
621   ierr = CeedBasisGetNumComponents(basis, &ncomp); CeedChk(ierr);
622 
623   const CeedScalar *d_u;
624   CeedScalar *d_v;
625   if(emode!=CEED_EVAL_WEIGHT) {
626     ierr = CeedVectorGetArrayRead(u, CEED_MEM_DEVICE, &d_u); CeedChk(ierr);
627   }
628   ierr = CeedVectorGetArray(v, CEED_MEM_DEVICE, &d_v); CeedChk(ierr);
629 
630   if (tmode == CEED_TRANSPOSE) {
631     CeedInt length;
632     ierr = CeedVectorGetLength(v, &length); CeedChk(ierr);
633     ierr = cudaMemset(d_v, 0, length * sizeof(CeedScalar)); CeedChk(ierr);
634   }
635   if (emode == CEED_EVAL_INTERP) {
636     //TODO: check performance difference between c_B and d_B
637     CeedInt P1d, Q1d;
638     ierr = CeedBasisGetNumNodes1D(basis, &P1d); CeedChk(ierr);
639     ierr = CeedBasisGetNumQuadraturePoints1D(basis, &Q1d); CeedChk(ierr);
640     // if (ceed_Cuda->Q1d != Q1d || ceed_Cuda->P1d != P1d)
641     // {
642     //   ceed_Cuda->Q1d = Q1d;
643     //   ceed_Cuda->P1d = P1d;
644     //   ceed_Cuda->grad = false;
645       ierr = CeedCudaInitInterp(data->d_interp1d, P1d, Q1d, &data->c_B);
646       CeedChk(ierr);
647     // }
648     void *interpargs[] = {(void *) &nelem, (void *) &transpose, &data->c_B, &d_u, &d_v};
649     // void *interpargs[] = {(void *) &nelem, (void *) &transpose, &data->d_interp1d, &d_u, &d_v};
650     if (dim==1)
651     {
652       CeedInt elemsPerBlock = 32;
653       CeedInt grid = nelem/elemsPerBlock + ( (nelem/elemsPerBlock*elemsPerBlock<nelem)? 1 : 0 );
654       CeedInt sharedMem = elemsPerBlock*Q1d*sizeof(CeedScalar);
655       ierr = run_kernel_dim_shared(ceed, data->interp, grid, Q1d, 1, elemsPerBlock, sharedMem,
656                             interpargs);
657       CeedChk(ierr);
658     } else if (dim==2) {
659       const CeedInt optElems[7] = {0,32,8,6,4,2,8};
660       CeedInt elemsPerBlock = Q1d < 7 ? optElems[Q1d]/ncomp : 1;
661       CeedInt grid = nelem/elemsPerBlock + ( (nelem/elemsPerBlock*elemsPerBlock<nelem)? 1 : 0 );
662       CeedInt sharedMem = ncomp*elemsPerBlock*Q1d*Q1d*sizeof(CeedScalar);
663       ierr = run_kernel_dim_shared(ceed, data->interp, grid, Q1d, Q1d, ncomp*elemsPerBlock, sharedMem,
664                             interpargs);
665       CeedChk(ierr);
666     } else if (dim==3) {
667       CeedInt elemsPerBlock = 1;
668       CeedInt grid = nelem/elemsPerBlock + ( (nelem/elemsPerBlock*elemsPerBlock<nelem)? 1 : 0 );
669       CeedInt sharedMem = Q1d*Q1d*Q1d*sizeof(CeedScalar);
670       ierr = run_kernel_dim_shared(ceed, data->interp, grid, Q1d, Q1d, elemsPerBlock, sharedMem,
671                             interpargs);
672       CeedChk(ierr);
673     }
674   } else if (emode == CEED_EVAL_GRAD) {
675     CeedInt P1d, Q1d;
676     ierr = CeedBasisGetNumNodes1D(basis, &P1d); CeedChk(ierr);
677     ierr = CeedBasisGetNumQuadraturePoints1D(basis, &Q1d); CeedChk(ierr);
678     // if (ceed_Cuda->Q1d != Q1d || ceed_Cuda->P1d != P1d || !data->grad)
679     // {
680     //   ceed_Cuda->Q1d = Q1d;
681     //   ceed_Cuda->P1d = P1d;
682     //   ceed_Cuda->grad = true;
683       ierr = CeedCudaInitInterpGrad(data->d_interp1d, data->d_grad1d, P1d,
684                                     Q1d, &data->c_B, &data->c_G);
685       CeedChk(ierr);
686     // }
687     void *gradargs[] = {(void *) &nelem, (void *) &transpose, &data->c_B, &data->c_G, &d_u, &d_v};
688     // void *gradargs[] = {(void *) &nelem, (void *) &transpose, &data->d_interp1d, &data->d_grad1d, &d_u, &d_v};
689     if (dim==1)
690     {
691       CeedInt elemsPerBlock = 32;
692       CeedInt grid = nelem/elemsPerBlock + ( (nelem/elemsPerBlock*elemsPerBlock<nelem)? 1 : 0 );
693       CeedInt sharedMem = elemsPerBlock*Q1d*sizeof(CeedScalar);
694       ierr = run_kernel_dim_shared(ceed, data->grad, grid, Q1d, 1, elemsPerBlock, sharedMem,
695                           gradargs);
696       CeedChk(ierr);
697     } else if (dim==2) {
698       const CeedInt optElems[7] = {0,32,8,6,4,2,8};
699       CeedInt elemsPerBlock = Q1d < 7 ? optElems[Q1d]/ncomp : 1;
700       CeedInt grid = nelem/elemsPerBlock + ( (nelem/elemsPerBlock*elemsPerBlock<nelem)? 1 : 0 );
701       CeedInt sharedMem = ncomp*elemsPerBlock*Q1d*Q1d*sizeof(CeedScalar);
702       ierr = run_kernel_dim_shared(ceed, data->grad, grid, Q1d, Q1d, ncomp*elemsPerBlock, sharedMem,
703                           gradargs);
704       CeedChk(ierr);
705     } else if (dim==3) {
706       CeedInt elemsPerBlock = 1;
707       CeedInt grid = nelem/elemsPerBlock + ( (nelem/elemsPerBlock*elemsPerBlock<nelem)? 1 : 0 );
708       CeedInt sharedMem = Q1d*Q1d*Q1d*sizeof(CeedScalar);
709       ierr = run_kernel_dim_shared(ceed, data->grad, grid, Q1d, Q1d, elemsPerBlock, sharedMem,
710                           gradargs);
711       CeedChk(ierr);
712     }
713   } else if (emode == CEED_EVAL_WEIGHT) {
714     CeedInt Q1d;
715     ierr = CeedBasisGetNumQuadraturePoints1D(basis, &Q1d); CeedChk(ierr);
716     void *weightargs[] = {(void *) &nelem, (void *) &data->d_qweight1d, &d_v};
717     if(dim==1){
718       const CeedInt elemsPerBlock = 32/Q1d;
719       const CeedInt gridsize = nelem/elemsPerBlock + ( (nelem/elemsPerBlock*elemsPerBlock<nelem)? 1 : 0 );
720       ierr = run_kernel_dim(ceed, data->weight, gridsize, Q1d, elemsPerBlock, 1, weightargs);
721     } else if(dim==2) {
722       const CeedInt optElems = 32/(Q1d*Q1d);
723       const CeedInt elemsPerBlock = optElems>0?optElems:1;
724       const CeedInt gridsize = nelem/elemsPerBlock + ( (nelem/elemsPerBlock*elemsPerBlock<nelem)? 1 : 0 );
725       ierr = run_kernel_dim(ceed, data->weight, gridsize, Q1d, Q1d, elemsPerBlock, weightargs);
726     } else if(dim==3) {
727       const CeedInt gridsize = nelem;
728       ierr = run_kernel_dim(ceed, data->weight, gridsize, Q1d, Q1d, Q1d, weightargs);
729     }
730   }
731 
732   if(emode!=CEED_EVAL_WEIGHT) {
733     ierr = CeedVectorRestoreArrayRead(u, &d_u); CeedChk(ierr);
734   }
735   ierr = CeedVectorRestoreArray(v, &d_v); CeedChk(ierr);
736 
737   return 0;
738 }
739 
740 static int CeedBasisDestroy_Cuda_shared(CeedBasis basis) {
741   int ierr;
742   Ceed ceed;
743   ierr = CeedBasisGetCeed(basis, &ceed); CeedChk(ierr);
744 
745   CeedBasis_Cuda_shared *data;
746   ierr = CeedBasisGetData(basis, (void *) &data); CeedChk(ierr);
747 
748   CeedChk_Cu(ceed, cuModuleUnload(data->module));
749 
750   ierr = cudaFree(data->d_qweight1d); CeedChk_Cu(ceed, ierr);
751   ierr = cudaFree(data->d_interp1d); CeedChk_Cu(ceed, ierr);
752   ierr = cudaFree(data->d_grad1d); CeedChk_Cu(ceed, ierr);
753 
754   ierr = CeedFree(&data); CeedChk(ierr);
755 
756   return 0;
757 }
758 
759 int CeedBasisCreateTensorH1_Cuda_shared(CeedInt dim, CeedInt P1d, CeedInt Q1d,
760                                         const CeedScalar *interp1d,
761                                         const CeedScalar *grad1d,
762                                         const CeedScalar *qref1d,
763                                         const CeedScalar *qweight1d,
764                                         CeedBasis basis) {
765   int ierr;
766   Ceed ceed;
767   ierr = CeedBasisGetCeed(basis, &ceed); CeedChk(ierr);
768   CeedBasis_Cuda_shared *data;
769   ierr = CeedCalloc(1, &data); CeedChk(ierr);
770 
771   const CeedInt qBytes = Q1d * sizeof(CeedScalar);
772   ierr = cudaMalloc((void **)&data->d_qweight1d, qBytes); CeedChk_Cu(ceed, ierr);
773   ierr = cudaMemcpy(data->d_qweight1d, qweight1d, qBytes,
774                     cudaMemcpyHostToDevice); CeedChk_Cu(ceed, ierr);
775 
776   const CeedInt iBytes = qBytes * P1d;
777   ierr = cudaMalloc((void **)&data->d_interp1d, iBytes); CeedChk_Cu(ceed, ierr);
778   ierr = cudaMemcpy(data->d_interp1d, interp1d, iBytes,
779                     cudaMemcpyHostToDevice); CeedChk_Cu(ceed, ierr);
780 
781   ierr = cudaMalloc((void **)&data->d_grad1d, iBytes); CeedChk_Cu(ceed, ierr);
782   ierr = cudaMemcpy(data->d_grad1d, grad1d, iBytes,
783                     cudaMemcpyHostToDevice); CeedChk_Cu(ceed, ierr);
784 
785   CeedInt ncomp;
786   ierr = CeedBasisGetNumComponents(basis, &ncomp); CeedChk(ierr);
787   ierr = compile(ceed, kernelsShared, &data->module, 7,
788                  "Q1D", Q1d,
789                  "P1D", P1d,
790                  "BASIS_BUF_LEN", ncomp * CeedIntPow(Q1d > P1d ?
791                      Q1d : P1d, dim),
792                  "BASIS_DIM", dim,
793                  "BASIS_NCOMP", ncomp,
794                  "BASIS_ELEMSIZE", CeedIntPow(P1d, dim),
795                  "BASIS_NQPT", CeedIntPow(Q1d, dim)
796                 ); CeedChk(ierr);
797   ierr = get_kernel(ceed, data->module, "interp", &data->interp);
798   CeedChk(ierr);
799   ierr = get_kernel(ceed, data->module, "grad", &data->grad);
800   CeedChk(ierr);
801   ierr = get_kernel(ceed, data->module, "weight", &data->weight);
802   CeedChk(ierr);
803 
804   ierr = CeedBasisSetData(basis, (void *)&data);
805   CeedChk(ierr);
806   ierr = CeedSetBackendFunction(ceed, "Basis", basis, "Apply",
807                                 CeedBasisApplyTensor_Cuda_shared);
808   CeedChk(ierr);
809   ierr = CeedSetBackendFunction(ceed, "Basis", basis, "Destroy",
810                                 CeedBasisDestroy_Cuda_shared);
811   CeedChk(ierr);
812   return 0;
813 }
814 
815 int CeedBasisCreateH1_Cuda_shared(CeedElemTopology topo, CeedInt dim,
816                                   CeedInt ndof, CeedInt nqpts,
817                                   const CeedScalar *interp,
818                                   const CeedScalar *grad,
819                                   const CeedScalar *qref,
820                                   const CeedScalar *qweight,
821                                   CeedBasis basis) {
822   int ierr;
823   Ceed ceed;
824   ierr = CeedBasisGetCeed(basis, &ceed); CeedChk(ierr);
825   return CeedError(ceed, 1, "Backend does not implement generic H1 basis");
826 }
827