xref: /libCEED/backends/cuda-gen/ceed-cuda-gen-operator-build.cpp (revision 3d05795b24e9e6af60384c9274f8e85b9ec224cd)
1 // Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC.
2 // Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707.
3 // All Rights reserved. See files LICENSE and NOTICE for details.
4 //
5 // This file is part of CEED, a collection of benchmarks, miniapps, software
6 // libraries and APIs for efficient high-order finite element and spectral
7 // element discretizations for exascale applications. For more information and
8 // source code availability see http://github.com/ceed.
9 //
10 // The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
11 // a collaborative effort of two U.S. Department of Energy organizations (Office
12 // of Science and the National Nuclear Security Administration) responsible for
13 // the planning and preparation of a capable exascale ecosystem, including
14 // software, applications, hardware, advanced system engineering and early
15 // testbed platforms, in support of the nation's exascale computing imperative.
16 #include <ceed-backend.h>
17 #include "ceed-cuda-gen.h"
18 #include <iostream>
19 #include <sstream>
20 #include "../cuda/ceed-cuda.h"
21 #include "../cuda-reg/ceed-cuda-reg.h"
22 #include "../cuda-shared/ceed-cuda-shared.h"
23 
24 static const char *atomicAdd = QUOTE(
25 __device__ double atomicAdd(double *address, double val) {
26   unsigned long long int *address_as_ull = (unsigned long long int *)address;
27   unsigned long long int old = *address_as_ull, assumed;
28   do {
29     assumed = old;
30     old =
31       atomicCAS(address_as_ull, assumed,
32                 __double_as_longlong(val +
33                                      __longlong_as_double(assumed)));
34     // Note: uses integer comparison to avoid hang in case of NaN
35     // (since NaN != NaN)
36   } while (assumed != old);
37   return __longlong_as_double(old);
38 }
39 );
40 
41 static const char *deviceFunctions = QUOTE(
42 
43 typedef struct { const CeedScalar* in[16]; CeedScalar* out[16]; } CudaFields;
44 typedef struct { CeedInt* in[16]; CeedInt* out[16]; } CudaFieldsInt;
45 
46 typedef struct {
47   CeedInt tidx;
48   CeedInt tidy;
49   CeedInt tidz;
50   CeedInt tid;
51   CeedScalar* slice;
52 } BackendData;
53 
54 template <int P, int Q>
55 inline __device__ void loadMatrix(BackendData& data, const CeedScalar* d_B, CeedScalar* B) {
56   for (CeedInt i=data.tid; i<P*Q; i+=blockDim.x*blockDim.y*blockDim.z) {
57     B[i] = d_B[i];
58   }
59 }
60 
61 //****
62 // 1D
63 template <int NCOMP, int P1d>
64 inline __device__ void readDofs1d(BackendData& data, const CeedInt nnodes, const CeedInt elem, const CeedInt* indices, const CeedScalar* d_u, CeedScalar* r_u) {
65   if (data.tidx<P1d)
66   {
67     const CeedInt node = data.tidx;
68     const CeedInt ind = indices[node + elem * P1d];
69     for (CeedInt comp = 0; comp < NCOMP; ++comp) {
70       r_u[comp] = d_u[ind + nnodes * comp];
71     }
72   }
73 }
74 
75 template <int NCOMP, int P1d>
76 inline __device__ void readDofsTranspose1d(BackendData& data, const CeedInt nnodes, const CeedInt elem, const CeedInt* indices, const CeedScalar* d_u, CeedScalar* r_u) {
77   if (data.tidx<P1d)
78   {
79     const CeedInt node = data.tidx;
80     const CeedInt ind = indices[node + elem * P1d];
81     for (CeedInt comp = 0; comp < NCOMP; ++comp) {
82       r_u[comp] = d_u[ind * NCOMP + comp];
83     }
84   }
85 }
86 
87 template <int NCOMP, int P1d, int STRIDES_NODE, int STRIDES_COMP, int STRIDES_ELEM>
88 inline __device__ void readDofsStrided1d(BackendData& data, const CeedInt elem, const CeedScalar* d_u, CeedScalar* r_u) {
89   if (data.tidx<P1d)
90   {
91     const CeedInt node = data.tidx;
92     const CeedInt ind = node * STRIDES_NODE + elem * STRIDES_ELEM;
93     for (CeedInt comp = 0; comp < NCOMP; ++comp) {
94       r_u[comp] = d_u[ind + comp * STRIDES_COMP];
95     }
96   }
97 }
98 
99 template <int NCOMP, int P1d>
100 inline __device__ void writeDofs1d(BackendData& data, const CeedInt nnodes, const CeedInt elem, const CeedInt* indices, const CeedScalar* r_v, CeedScalar* d_v) {
101   if (data.tidx<P1d)
102   {
103     const CeedInt node = data.tidx;
104     const CeedInt ind = indices[node + elem * P1d];
105     for (CeedInt comp = 0; comp < NCOMP; ++comp) {
106       atomicAdd(&d_v[ind + nnodes * comp], r_v[comp]);
107     }
108   }
109 }
110 
111 template <int NCOMP, int P1d>
112 inline __device__ void writeDofsTranspose1d(BackendData& data, const CeedInt nnodes, const CeedInt elem, const CeedInt* indices, const CeedScalar* r_v, CeedScalar* d_v) {
113   if (data.tidx<P1d)
114   {
115     const CeedInt node = data.tidx;
116     const CeedInt ind = indices[node + elem * P1d];
117     for (CeedInt comp = 0; comp < NCOMP; ++comp) {
118       atomicAdd(&d_v[ind * NCOMP + comp], r_v[comp]);
119     }
120   }
121 }
122 
123 template <int NCOMP, int P1d, int STRIDES_NODE, int STRIDES_COMP, int STRIDES_ELEM>
124 inline __device__ void writeDofsStrided1d(BackendData& data, const CeedInt elem, const CeedScalar* r_v, CeedScalar* d_v) {
125   if (data.tidx<P1d)
126   {
127     const CeedInt node = data.tidx;
128     const CeedInt ind = node * STRIDES_NODE + elem * STRIDES_ELEM;
129     for (CeedInt comp = 0; comp < NCOMP; ++comp) {
130       d_v[ind + comp * STRIDES_COMP] += r_v[comp];
131     }
132   }
133 }
134 
135 template <int NCOMP, int P1d, int Q1d>
136 inline __device__ void ContractX1d(BackendData& data,
137                                    const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
138   data.slice[data.tidx] = *U;
139   __syncthreads();
140   *V = 0.0;
141   for (CeedInt i = 0; i < P1d; ++i) {
142     *V += B[i + data.tidx*P1d] * data.slice[i];//contract x direction
143   }
144   __syncthreads();
145 }
146 
147 template <int NCOMP, int P1d, int Q1d>
148 inline __device__ void ContractTransposeX1d(BackendData& data,
149                                             const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
150   data.slice[data.tidx] = *U;
151   __syncthreads();
152   *V = 0.0;
153   for (CeedInt i = 0; i < Q1d; ++i) {
154     *V += B[data.tidx + i*P1d] * data.slice[i];//contract x direction
155   }
156   __syncthreads();
157 }
158 
159 template <int NCOMP, int P1d, int Q1d>
160 inline __device__ void interp1d(BackendData& data, const CeedScalar *__restrict__ r_U, const CeedScalar *c_B,
161                                 CeedScalar *__restrict__ r_V) {
162   for (CeedInt comp=0; comp<NCOMP; comp++) {
163     ContractX1d<NCOMP,P1d,Q1d>(data, r_U+comp, c_B, r_V+comp);
164   }
165 }
166 
167 template <int NCOMP, int P1d, int Q1d>
168 inline __device__ void interpTranspose1d(BackendData& data, const CeedScalar *__restrict__ r_U, const CeedScalar *c_B,
169                                 CeedScalar *__restrict__ r_V) {
170   for (CeedInt comp=0; comp<NCOMP; comp++) {
171     ContractTransposeX1d<NCOMP,P1d,Q1d>(data, r_U+comp, c_B, r_V+comp);
172   }
173 }
174 
175 template <int NCOMP, int P1d, int Q1d>
176 inline __device__ void grad1d(BackendData& data, const CeedScalar *__restrict__ r_U,
177                               const CeedScalar *c_B, const CeedScalar *c_G,
178                               CeedScalar *__restrict__ r_V) {
179   for (CeedInt comp=0; comp<NCOMP; comp++) {
180     ContractX1d<NCOMP,P1d,Q1d>(data, r_U+comp, c_G, r_V+comp);
181   }
182 }
183 
184 template <int NCOMP, int P1d, int Q1d>
185 inline __device__ void gradTranspose1d(BackendData& data, const CeedScalar *__restrict__ r_U,
186                               const CeedScalar *c_B, const CeedScalar *c_G,
187                               CeedScalar *__restrict__ r_V) {
188   for (CeedInt comp=0; comp<NCOMP; comp++) {
189     ContractTransposeX1d<NCOMP,P1d,Q1d>(data, r_U+comp, c_G, r_V+comp);
190   }
191 }
192 
193 //****
194 // 2D
195 template <int NCOMP, int P1d>
196 inline __device__ void readDofs2d(BackendData& data, const CeedInt nnodes, const CeedInt elem, const CeedInt* indices, const CeedScalar* d_u, CeedScalar* r_u) {
197   if (data.tidx<P1d && data.tidy<P1d)
198   {
199     const CeedInt node = data.tidx + data.tidy*P1d;
200     const CeedInt ind = indices[node + elem * P1d*P1d];
201     for (CeedInt comp = 0; comp < NCOMP; ++comp) {
202       r_u[comp] = d_u[ind + nnodes * comp];
203     }
204   }
205 }
206 
207 template <int NCOMP, int P1d>
208 inline __device__ void readDofsTranspose2d(BackendData& data, const CeedInt nnodes, const CeedInt elem, const CeedInt* indices, const CeedScalar* d_u, CeedScalar* r_u) {
209   if (data.tidx<P1d && data.tidy<P1d)
210   {
211     const CeedInt node = data.tidx + data.tidy*P1d;
212     const CeedInt ind = indices[node + elem * P1d*P1d];
213     for (CeedInt comp = 0; comp < NCOMP; ++comp) {
214       r_u[comp] = d_u[ind * NCOMP + comp];
215     }
216   }
217 }
218 
219 template <int NCOMP, int P1d, int STRIDES_NODE, int STRIDES_COMP, int STRIDES_ELEM>
220 inline __device__ void readDofsStrided2d(BackendData& data, const CeedInt elem, const CeedScalar* d_u, CeedScalar* r_u) {
221   if (data.tidx<P1d && data.tidy<P1d)
222   {
223     const CeedInt node = data.tidx + data.tidy*P1d;
224     const CeedInt ind = node * STRIDES_NODE + elem * STRIDES_ELEM;
225     for (CeedInt comp = 0; comp < NCOMP; ++comp) {
226       r_u[comp] = d_u[ind + comp * STRIDES_COMP];
227     }
228   }
229 }
230 
231 template <int NCOMP, int P1d>
232 inline __device__ void writeDofs2d(BackendData& data, const CeedInt nnodes, const CeedInt elem, const CeedInt* indices, const CeedScalar* r_v, CeedScalar* d_v) {
233   if (data.tidx<P1d && data.tidy<P1d)
234   {
235     const CeedInt node = data.tidx + data.tidy*P1d;
236     const CeedInt ind = indices[node + elem * P1d*P1d];
237     for (CeedInt comp = 0; comp < NCOMP; ++comp) {
238       atomicAdd(&d_v[ind + nnodes * comp], r_v[comp]);
239     }
240   }
241 }
242 
243 template <int NCOMP, int P1d>
244 inline __device__ void writeDofsTranspose2d(BackendData& data, const CeedInt nnodes, const CeedInt elem, const CeedInt* indices, const CeedScalar* r_v, CeedScalar* d_v) {
245   if (data.tidx<P1d && data.tidy<P1d)
246   {
247     const CeedInt node = data.tidx + data.tidy*P1d;
248     const CeedInt ind = indices[node + elem * P1d*P1d];
249     for (CeedInt comp = 0; comp < NCOMP; ++comp) {
250       atomicAdd(&d_v[ind * NCOMP + comp], r_v[comp]);
251     }
252   }
253 }
254 
255 template <int NCOMP, int P1d, int STRIDES_NODE, int STRIDES_COMP, int STRIDES_ELEM>
256 inline __device__ void writeDofsStrided2d(BackendData& data, const CeedInt elem, const CeedScalar* r_v, CeedScalar* d_v) {
257   if (data.tidx<P1d && data.tidy<P1d)
258   {
259     const CeedInt node = data.tidx + data.tidy*P1d;
260     const CeedInt ind = node * STRIDES_NODE + elem * STRIDES_ELEM;
261     for (CeedInt comp = 0; comp < NCOMP; ++comp) {
262       d_v[ind + comp * STRIDES_COMP] += r_v[comp];
263     }
264   }
265 }
266 
267 template <int NCOMP, int P1d, int Q1d>
268 inline __device__ void ContractX2d(BackendData& data,
269                                    const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
270   data.slice[data.tidx+data.tidy*Q1d] = *U;
271   __syncthreads();
272   *V = 0.0;
273   for (CeedInt i = 0; i < P1d; ++i) {
274     *V += B[i + data.tidx*P1d] * data.slice[i + data.tidy*Q1d];//contract x direction
275   }
276   __syncthreads();
277 }
278 
279 template <int NCOMP, int P1d, int Q1d>
280 inline __device__ void ContractY2d(BackendData& data,
281                                    const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
282   data.slice[data.tidx+data.tidy*Q1d] = *U;
283   __syncthreads();
284   *V = 0.0;
285   for (CeedInt i = 0; i < P1d; ++i) {
286     *V += B[i + data.tidy*P1d] * data.slice[data.tidx + i*Q1d];//contract y direction
287   }
288   __syncthreads();
289 }
290 
291 template <int NCOMP, int P1d, int Q1d>
292 inline __device__ void ContractYTranspose2d(BackendData& data,
293                                             const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
294   data.slice[data.tidx+data.tidy*Q1d] = *U;
295   __syncthreads();
296   *V = 0.0;
297   if (data.tidy<P1d) {
298     for (CeedInt i = 0; i < Q1d; ++i) {
299       *V += B[data.tidy + i*P1d] * data.slice[data.tidx + i*Q1d];//contract y direction
300     }
301   }
302   __syncthreads();
303 }
304 
305 template <int NCOMP, int P1d, int Q1d>
306 inline __device__ void ContractXTranspose2d(BackendData& data,
307                                             const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
308   data.slice[data.tidx+data.tidy*Q1d] = *U;
309   __syncthreads();
310   *V = 0.0;
311   if (data.tidx<P1d) {
312     for (CeedInt i = 0; i < Q1d; ++i) {
313       *V += B[data.tidx + i*P1d] * data.slice[i + data.tidy*Q1d];//contract x direction
314     }
315   }
316   __syncthreads();
317 }
318 
319 template <int NCOMP, int P1d, int Q1d>
320 inline __device__ void ContractXTransposeAdd2d(BackendData& data,
321                                             const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
322   data.slice[data.tidx+data.tidy*Q1d] = *U;
323   __syncthreads();
324   if (data.tidx<P1d) {
325     for (CeedInt i = 0; i < Q1d; ++i) {
326       *V += B[data.tidx + i*P1d] * data.slice[i + data.tidy*Q1d];//contract x direction
327     }
328   }
329   __syncthreads();
330 }
331 
332 template <int NCOMP, int P1d, int Q1d>
333 inline __device__ void interp2d(BackendData& data, const CeedScalar *__restrict__ r_U, const CeedScalar *c_B,
334                                 CeedScalar *__restrict__ r_V) {
335   CeedScalar r_t[1];
336   for (CeedInt comp=0; comp<NCOMP; comp++) {
337     ContractX2d<NCOMP,P1d,Q1d>(data, r_U+comp, c_B, r_t);
338     ContractY2d<NCOMP,P1d,Q1d>(data, r_t, c_B, r_V+comp);
339   }
340 }
341 
342 template <int NCOMP, int P1d, int Q1d>
343 inline __device__ void interpTranspose2d(BackendData& data, const CeedScalar *__restrict__ r_U, const CeedScalar *c_B,
344                                 CeedScalar *__restrict__ r_V) {
345   CeedScalar r_t[1];
346   for (CeedInt comp=0; comp<NCOMP; comp++) {
347     ContractYTranspose2d<NCOMP,P1d,Q1d>(data, r_U+comp, c_B, r_t);
348     ContractXTranspose2d<NCOMP,P1d,Q1d>(data, r_t, c_B, r_V+comp);
349   }
350 }
351 
352 template <int NCOMP, int P1d, int Q1d>
353 inline __device__ void grad2d(BackendData& data, const CeedScalar *__restrict__ r_U,
354                               const CeedScalar *c_B, const CeedScalar *c_G,
355                               CeedScalar *__restrict__ r_V) {
356   CeedScalar r_t[1];
357   for (CeedInt comp=0; comp<NCOMP; comp++) {
358     ContractX2d<NCOMP,P1d,Q1d>(data, r_U+comp, c_G, r_t);
359     ContractY2d<NCOMP,P1d,Q1d>(data, r_t, c_B, r_V+comp+0*NCOMP);
360     ContractX2d<NCOMP,P1d,Q1d>(data, r_U+comp, c_B, r_t);
361     ContractY2d<NCOMP,P1d,Q1d>(data, r_t, c_G, r_V+comp+1*NCOMP);
362   }
363 }
364 
365 template <int NCOMP, int P1d, int Q1d>
366 inline __device__ void gradTranspose2d(BackendData& data, const CeedScalar *__restrict__ r_U,
367                               const CeedScalar *c_B, const CeedScalar *c_G,
368                               CeedScalar *__restrict__ r_V) {
369   CeedScalar r_t[1];
370   for (CeedInt comp=0; comp<NCOMP; comp++) {
371     ContractYTranspose2d<NCOMP,P1d,Q1d>(data, r_U+comp+0*NCOMP, c_B, r_t);
372     ContractXTranspose2d<NCOMP,P1d,Q1d>(data, r_t, c_G, r_V+comp);
373     ContractYTranspose2d<NCOMP,P1d,Q1d>(data, r_U+comp+1*NCOMP, c_G, r_t);
374     ContractXTransposeAdd2d<NCOMP,P1d,Q1d>(data, r_t, c_B, r_V+comp);
375   }
376 }
377 
378 //****
379 // 3D
380 template <int NCOMP, int P1d>
381 inline __device__ void readDofs3d(BackendData& data, const CeedInt nnodes, const CeedInt elem, const CeedInt* indices, const CeedScalar* d_u, CeedScalar* r_u) {
382   if (data.tidx<P1d && data.tidy<P1d) {
383     for (CeedInt z = 0; z < P1d; ++z) {
384       const CeedInt node = data.tidx + data.tidy*P1d + z*P1d*P1d;
385       const CeedInt ind = indices[node + elem * P1d*P1d*P1d];
386       for (CeedInt comp = 0; comp < NCOMP; ++comp) {
387         r_u[z+comp*P1d] = d_u[ind + nnodes * comp];
388       }
389     }
390   }
391 }
392 
393 template <int NCOMP, int P1d>
394 inline __device__ void readDofsTranspose3d(BackendData& data, const CeedInt nnodes, const CeedInt elem, const CeedInt* indices, const CeedScalar* d_u, CeedScalar* r_u) {
395   if (data.tidx<P1d && data.tidy<P1d) {
396     for (CeedInt z = 0; z < P1d; ++z) {
397       const CeedInt node = data.tidx + data.tidy*P1d + z*P1d*P1d;
398       const CeedInt ind = indices[node + elem * P1d*P1d*P1d];
399       for (CeedInt comp = 0; comp < NCOMP; ++comp) {
400         r_u[z+comp*P1d] = d_u[ind * NCOMP + comp];
401       }
402     }
403   }
404 }
405 
406 template <int NCOMP, int P1d, int STRIDES_NODE, int STRIDES_COMP, int STRIDES_ELEM>
407 inline __device__ void readDofsStrided3d(BackendData& data, const CeedInt elem, const CeedScalar* d_u, CeedScalar* r_u) {
408   if (data.tidx<P1d && data.tidy<P1d) {
409     for (CeedInt z = 0; z < P1d; ++z) {
410       const CeedInt node = data.tidx + data.tidy*P1d + z*P1d*P1d;
411       const CeedInt ind = node * STRIDES_NODE + elem * STRIDES_ELEM;
412       for (CeedInt comp = 0; comp < NCOMP; ++comp) {
413         r_u[z+comp*P1d] = d_u[ind + comp * STRIDES_COMP];
414       }
415     }
416   }
417 }
418 
419 template <int NCOMP, int Q1d>
420 inline __device__ void readSliceQuads3d(BackendData& data, const CeedInt nquads, const CeedInt elem, const CeedInt q, const CeedInt* indices, const CeedScalar* d_u, CeedScalar* r_u) {
421   const CeedInt node = data.tidx + data.tidy*Q1d + q*Q1d*Q1d;
422   const CeedInt ind = indices[node + elem * Q1d*Q1d*Q1d];;
423   for (CeedInt comp = 0; comp < NCOMP; ++comp) {
424     r_u[comp] = d_u[ind + nquads * comp];
425   }
426 }
427 
428 template <int NCOMP, int Q1d>
429 inline __device__ void readSliceQuadsTranspose3d(BackendData& data, const CeedInt nquads, const CeedInt elem, const CeedInt q, const CeedInt* indices, const CeedScalar* d_u, CeedScalar* r_u) {
430   const CeedInt node = data.tidx + data.tidy*Q1d + q*Q1d*Q1d;
431   const CeedInt ind = indices[node + elem * Q1d*Q1d*Q1d];
432   for (CeedInt comp = 0; comp < NCOMP; ++comp) {
433     r_u[comp] = d_u[ind * NCOMP + comp];
434   }
435 }
436 
437 template <int NCOMP, int Q1d, int STRIDES_NODE, int STRIDES_COMP, int STRIDES_ELEM>
438 inline __device__ void readSliceQuadsStrided3d(BackendData& data, const CeedInt elem, const CeedInt q, const CeedScalar* d_u, CeedScalar* r_u) {
439   const CeedInt node = data.tidx + data.tidy*Q1d + q*Q1d*Q1d;
440   const CeedInt ind = node * STRIDES_NODE + elem * STRIDES_ELEM;
441   for (CeedInt comp = 0; comp < NCOMP; ++comp) {
442     r_u[comp] = d_u[ind + comp * STRIDES_COMP];
443   }
444 }
445 
446 template <int NCOMP, int P1d>
447 inline __device__ void writeDofs3d(BackendData& data, const CeedInt nnodes, const CeedInt elem, const CeedInt* indices, const CeedScalar* r_v, CeedScalar* d_v) {
448   if (data.tidx<P1d && data.tidy<P1d) {
449     for (CeedInt z = 0; z < P1d; ++z) {
450       const CeedInt node = data.tidx + data.tidy*P1d + z*P1d*P1d;
451       const CeedInt ind = indices[node + elem * P1d*P1d*P1d];
452       for (CeedInt comp = 0; comp < NCOMP; ++comp) {
453         atomicAdd(&d_v[ind + nnodes * comp], r_v[z+comp*P1d]);
454       }
455     }
456   }
457 }
458 
459 template <int NCOMP, int P1d>
460 inline __device__ void writeDofsTranspose3d(BackendData& data, const CeedInt nnodes, const CeedInt elem, const CeedInt* indices, const CeedScalar* r_v, CeedScalar* d_v) {
461   if (data.tidx<P1d && data.tidy<P1d) {
462     for (CeedInt z = 0; z < P1d; ++z) {
463       const CeedInt node = data.tidx + data.tidy*P1d + z*P1d*P1d;
464       const CeedInt ind = indices[node + elem * P1d*P1d*P1d];
465       for (CeedInt comp = 0; comp < NCOMP; ++comp) {
466         atomicAdd(&d_v[ind * NCOMP + comp], r_v[z+comp*P1d]);
467       }
468     }
469   }
470 }
471 
472 template <int NCOMP, int P1d, int STRIDES_NODE, int STRIDES_COMP, int STRIDES_ELEM>
473 inline __device__ void writeDofsStrided3d(BackendData& data, const CeedInt elem, const CeedScalar* r_v, CeedScalar* d_v) {
474   if (data.tidx<P1d && data.tidy<P1d) {
475     for (CeedInt z = 0; z < P1d; ++z) {
476       const CeedInt node = data.tidx + data.tidy*P1d + z*P1d*P1d;
477       const CeedInt ind = node * STRIDES_NODE + elem * STRIDES_ELEM;
478       for (CeedInt comp = 0; comp < NCOMP; ++comp) {
479         d_v[ind + comp * STRIDES_COMP] += r_v[z+comp*P1d];
480       }
481     }
482   }
483 }
484 
485 template <int NCOMP, int P1d, int Q1d>
486 inline __device__ void ContractX3d(BackendData& data,
487                                    const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
488   CeedScalar r_B[P1d];
489   for (CeedInt i = 0; i < P1d; ++i) {
490     r_B[i] = B[i + data.tidx*P1d];
491   }
492   for (CeedInt k = 0; k < P1d; ++k) {
493     data.slice[data.tidx+data.tidy*Q1d] = U[k];
494     __syncthreads();
495     V[k] = 0.0;
496     for (CeedInt i = 0; i < P1d; ++i) {
497       V[k] += r_B[i] * data.slice[i + data.tidy*Q1d];//contract x direction
498     }
499     __syncthreads();
500   }
501 }
502 
503 template <int NCOMP, int P1d, int Q1d>
504 inline __device__ void ContractY3d(BackendData& data,
505                                    const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
506   CeedScalar r_B[P1d];
507   for (CeedInt i = 0; i < P1d; ++i) {
508     r_B[i] = B[i + data.tidy*P1d];
509   }
510   for (CeedInt k = 0; k < P1d; ++k) {
511     data.slice[data.tidx+data.tidy*Q1d] = U[k];
512     __syncthreads();
513     V[k] = 0.0;
514     for (CeedInt i = 0; i < P1d; ++i) {
515       V[k] += r_B[i] * data.slice[data.tidx + i*Q1d];//contract y direction
516     }
517     __syncthreads();
518   }
519 }
520 
521 template <int NCOMP, int P1d, int Q1d>
522 inline __device__ void ContractZ3d(BackendData& data,
523                                    const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
524   for (CeedInt k = 0; k < Q1d; ++k) {
525     V[k] = 0.0;
526     for (CeedInt i = 0; i < P1d; ++i) {
527       V[k] += B[i + k*P1d] * U[i];//contract z direction
528     }
529   }
530 }
531 
532 template <int NCOMP, int P1d, int Q1d>
533 inline __device__ void ContractTransposeZ3d(BackendData& data,
534     const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
535   for (CeedInt k = 0; k < Q1d; ++k) {
536     V[k] = 0.0;
537     if (k<P1d) {
538       for (CeedInt i = 0; i < Q1d; ++i) {
539         V[k] += B[k + i*P1d] * U[i];//contract z direction
540       }
541     }
542   }
543 }
544 
545 template <int NCOMP, int P1d, int Q1d>
546 inline __device__ void ContractTransposeY3d(BackendData& data,
547     const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
548   CeedScalar r_B[Q1d];
549   for (CeedInt i = 0; i < Q1d; ++i)
550   {
551     r_B[i] = B[data.tidy + i*P1d];
552   }
553   for (CeedInt k = 0; k < P1d; ++k) {
554     data.slice[data.tidx+data.tidy*Q1d] = U[k];
555     __syncthreads();
556     V[k] = 0.0;
557     if (data.tidy<P1d) {
558       for (CeedInt i = 0; i < Q1d; ++i) {
559         V[k] += r_B[i] * data.slice[data.tidx + i*Q1d];//contract y direction
560       }
561     }
562     __syncthreads();
563   }
564 }
565 
566 template <int NCOMP, int P1d, int Q1d>
567 inline __device__ void ContractTransposeAddY3d(BackendData& data,
568     const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
569   CeedScalar r_B[Q1d];
570   for (CeedInt i = 0; i < Q1d; ++i) {
571     r_B[i] = B[data.tidy + i*P1d];
572   }
573   for (CeedInt k = 0; k < P1d; ++k) {
574     data.slice[data.tidx+data.tidy*Q1d] = U[k];
575     __syncthreads();
576     if (data.tidy<P1d) {
577       for (CeedInt i = 0; i < Q1d; ++i) {
578         V[k] += r_B[i] * data.slice[data.tidx + i*Q1d];//contract y direction
579       }
580     }
581     __syncthreads();
582   }
583 }
584 
585 template <int NCOMP, int P1d, int Q1d>
586 inline __device__ void ContractTransposeX3d(BackendData& data,
587     const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
588   CeedScalar r_B[Q1d];
589   for (CeedInt i = 0; i < Q1d; ++i) {
590     r_B[i] = B[data.tidx + i*P1d];
591   }
592   for (CeedInt k = 0; k < P1d; ++k) {
593     data.slice[data.tidx+data.tidy*Q1d] = U[k];
594     __syncthreads();
595     V[k] = 0.0;
596     if (data.tidx<P1d) {
597       for (CeedInt i = 0; i < Q1d; ++i) {
598         V[k] += r_B[i] * data.slice[i + data.tidy*Q1d];//contract x direction
599       }
600     }
601     __syncthreads();
602   }
603 }
604 
605 template <int NCOMP, int P1d, int Q1d>
606 inline __device__ void ContractTransposeAddX3d(BackendData& data,
607     const CeedScalar *U, const CeedScalar *B, CeedScalar *V) {
608   CeedScalar r_B[Q1d];
609   for (CeedInt i = 0; i < Q1d; ++i) {
610     r_B[i] = B[data.tidx + i*P1d];
611   }
612   for (CeedInt k = 0; k < P1d; ++k) {
613     data.slice[data.tidx+data.tidy*Q1d] = U[k];
614     __syncthreads();
615     if (data.tidx<P1d) {
616       for (CeedInt i = 0; i < Q1d; ++i) {
617         V[k] += r_B[i] * data.slice[i + data.tidy*Q1d];//contract x direction
618       }
619     }
620     __syncthreads();
621   }
622 }
623 
624 template <int NCOMP, int P1d, int Q1d>
625 inline __device__ void interp3d(BackendData& data, const CeedScalar *__restrict__ r_U, const CeedScalar *c_B,
626                                 CeedScalar *__restrict__ r_V) {
627   CeedScalar r_t1[Q1d];
628   CeedScalar r_t2[Q1d];
629   for (CeedInt comp=0; comp<NCOMP; comp++) {
630     ContractX3d<NCOMP,P1d,Q1d>(data, r_U+comp*P1d, c_B, r_t1);
631     ContractY3d<NCOMP,P1d,Q1d>(data, r_t1, c_B, r_t2);
632     ContractZ3d<NCOMP,P1d,Q1d>(data, r_t2, c_B, r_V+comp*Q1d);
633   }
634 }
635 
636 template <int NCOMP, int P1d, int Q1d>
637 inline __device__ void interpTranspose3d(BackendData& data, const CeedScalar *__restrict__ r_U, const CeedScalar *c_B,
638                                 CeedScalar *__restrict__ r_V) {
639   CeedScalar r_t1[Q1d];
640   CeedScalar r_t2[Q1d];
641   for (CeedInt comp=0; comp<NCOMP; comp++) {
642     ContractTransposeZ3d<NCOMP,P1d,Q1d>(data, r_U+comp*Q1d, c_B, r_t1);
643     ContractTransposeY3d<NCOMP,P1d,Q1d>(data, r_t1, c_B, r_t2);
644     ContractTransposeX3d<NCOMP,P1d,Q1d>(data, r_t2, c_B, r_V+comp*P1d);
645   }
646 }
647 
648 template <int NCOMP, int P1d, int Q1d>
649 inline __device__ void grad3d(BackendData& data, const CeedScalar *__restrict__ r_U,
650                               const CeedScalar *c_B, const CeedScalar *c_G,
651                               CeedScalar *__restrict__ r_V) {
652   CeedScalar r_t1[Q1d];
653   CeedScalar r_t2[Q1d];
654   for (CeedInt comp=0; comp<NCOMP; comp++) {
655     ContractX3d<NCOMP,P1d,Q1d>(data, r_U+comp*P1d, c_G, r_t1);
656     ContractY3d<NCOMP,P1d,Q1d>(data, r_t1, c_B, r_t2);
657     ContractZ3d<NCOMP,P1d,Q1d>(data, r_t2, c_B, r_V+comp*Q1d+0*NCOMP*Q1d);
658     ContractX3d<NCOMP,P1d,Q1d>(data, r_U+comp*P1d, c_B, r_t1);
659     ContractY3d<NCOMP,P1d,Q1d>(data, r_t1, c_G, r_t2);
660     ContractZ3d<NCOMP,P1d,Q1d>(data, r_t2, c_B, r_V+comp*Q1d+1*NCOMP*Q1d);
661     ContractX3d<NCOMP,P1d,Q1d>(data, r_U+comp*P1d, c_B, r_t1);
662     ContractY3d<NCOMP,P1d,Q1d>(data, r_t1, c_B, r_t2);
663     ContractZ3d<NCOMP,P1d,Q1d>(data, r_t2, c_G, r_V+comp*Q1d+2*NCOMP*Q1d);
664   }
665 }
666 
667 template <int NCOMP, int P1d, int Q1d>
668 inline __device__ void gradTranspose3d(BackendData& data, const CeedScalar *__restrict__ r_U,
669                                        const CeedScalar *c_B, const CeedScalar *c_G,
670                                        CeedScalar *__restrict__ r_V) {
671   CeedScalar r_t1[Q1d];
672   CeedScalar r_t2[Q1d];
673   for (CeedInt comp=0; comp<NCOMP; comp++) {
674     ContractTransposeZ3d<NCOMP,P1d,Q1d>(data, r_U+comp*Q1d+0*NCOMP*Q1d, c_B, r_t1);
675     ContractTransposeY3d<NCOMP,P1d,Q1d>(data, r_t1, c_B, r_t2);
676     ContractTransposeX3d<NCOMP,P1d,Q1d>(data, r_t2, c_G, r_V+comp*P1d);
677     ContractTransposeZ3d<NCOMP,P1d,Q1d>(data, r_U+comp*Q1d+1*NCOMP*Q1d, c_B, r_t1);
678     ContractTransposeY3d<NCOMP,P1d,Q1d>(data, r_t1, c_G, r_t2);
679     ContractTransposeAddX3d<NCOMP,P1d,Q1d>(data, r_t2, c_B, r_V+comp*P1d);
680     ContractTransposeZ3d<NCOMP,P1d,Q1d>(data, r_U+comp*Q1d+2*NCOMP*Q1d, c_G, r_t1);
681     ContractTransposeY3d<NCOMP,P1d,Q1d>(data, r_t1, c_B, r_t2);
682     ContractTransposeAddX3d<NCOMP,P1d,Q1d>(data, r_t2, c_B, r_V+comp*P1d);
683   }
684 }
685 
686 template <int NCOMP, int Q1d>
687 inline __device__ void gradCollo3d(BackendData& data, const CeedInt q,
688                                    const CeedScalar *__restrict__ r_U,
689                                    const CeedScalar *c_G, CeedScalar *__restrict__ r_V) {
690   for (CeedInt comp = 0; comp < NCOMP; ++comp) {
691     data.slice[data.tidx+data.tidy*Q1d] = r_U[q + comp*Q1d];
692     __syncthreads();
693     // X derivative
694     r_V[comp+0*NCOMP] = 0.0;
695     for (CeedInt i = 0; i < Q1d; ++i) {
696       r_V[comp+0*NCOMP] += c_G[i + data.tidx*Q1d] * data.slice[i + data.tidy*Q1d];//contract x direction (X derivative)
697     }
698     // Y derivative
699     r_V[comp+1*NCOMP] = 0.0;
700     for (CeedInt i = 0; i < Q1d; ++i) {
701       r_V[comp+1*NCOMP] += c_G[i + data.tidy*Q1d] * data.slice[data.tidx + i*Q1d];//contract y direction (Y derivative)
702     }
703     // Z derivative
704     r_V[comp+2*NCOMP] = 0.0;
705     for (CeedInt i = 0; i < Q1d; ++i) {
706       r_V[comp+2*NCOMP] += c_G[i + q*Q1d] * r_U[i + comp*Q1d];//contract z direction (Z derivative)
707     }
708     __syncthreads();
709   }
710 }
711 
712 template <int NCOMP, int Q1d>
713 inline __device__ void gradColloTranspose3d(BackendData& data, const CeedInt q,
714                                             const CeedScalar *__restrict__ r_U,
715                                             const CeedScalar *c_G, CeedScalar *__restrict__ r_V) {
716   for (CeedInt comp = 0; comp < NCOMP; ++comp) {
717     // X derivative
718     data.slice[data.tidx+data.tidy*Q1d] = r_U[comp + 0*NCOMP];
719     __syncthreads();
720     for (CeedInt i = 0; i < Q1d; ++i) {
721       r_V[q+comp*Q1d] += c_G[data.tidx + i*Q1d] * data.slice[i + data.tidy*Q1d];//contract x direction (X derivative)
722     }
723     __syncthreads();
724     // Y derivative
725     data.slice[data.tidx+data.tidy*Q1d] = r_U[comp + 1*NCOMP];
726     __syncthreads();
727     for (CeedInt i = 0; i < Q1d; ++i) {
728       r_V[q+comp*Q1d] += c_G[data.tidy + i*Q1d] * data.slice[data.tidx + i*Q1d];//contract y direction (Y derivative)
729     }
730     __syncthreads();
731     // Z derivative
732     for (CeedInt i = 0; i < Q1d; ++i) {
733       r_V[i+comp*Q1d] += c_G[i + q*Q1d] * r_U[comp + 2*NCOMP];// PARTIAL contract z direction (Z derivative)
734     }
735   }
736 }
737 
738 template <int Q1d>
739 inline __device__ void weight1d(BackendData& data, const CeedScalar *qweight1d, CeedScalar *w) {
740   *w = qweight1d[data.tidx];
741 }
742 
743 template <int Q1d>
744 inline __device__ void weight2d(BackendData& data, const CeedScalar *qweight1d, CeedScalar *w) {
745   *w = qweight1d[data.tidx]*qweight1d[data.tidy];
746 }
747 
748 template <int Q1d>
749 inline __device__ void weight3d(BackendData& data, const CeedScalar *qweight1d, CeedScalar *w) {
750   const CeedScalar pw = qweight1d[data.tidx]*qweight1d[data.tidy];
751   for (CeedInt z = 0; z < Q1d; ++z)
752   {
753     w[z] = pw*qweight1d[z];
754   }
755 }
756 
757 );
758 
759 extern "C" int CeedCudaGenOperatorBuild(CeedOperator op) {
760 
761   using std::ostringstream;
762   using std::string;
763   int ierr;
764   bool setupdone;
765   ierr = CeedOperatorGetSetupStatus(op, &setupdone); CeedChk(ierr);
766   if (setupdone) return 0;
767   Ceed ceed;
768   ierr = CeedOperatorGetCeed(op, &ceed); CeedChk(ierr);
769   CeedOperator_Cuda_gen *data;
770   ierr = CeedOperatorGetData(op, (void**)&data); CeedChk(ierr);
771   CeedQFunction qf;
772   CeedQFunction_Cuda_gen *qf_data;
773   ierr = CeedOperatorGetQFunction(op, &qf); CeedChk(ierr);
774   ierr = CeedQFunctionGetData(qf, (void **)&qf_data); CeedChk(ierr);
775   CeedInt Q, P1d, Q1d = -1, numelements, elemsize, numinputfields, numoutputfields, ncomp, dim, nnodes;
776   ierr = CeedOperatorGetNumQuadraturePoints(op, &Q); CeedChk(ierr);
777   ierr = CeedOperatorGetNumElements(op, &numelements); CeedChk(ierr);
778   ierr = CeedQFunctionGetNumArgs(qf, &numinputfields, &numoutputfields);
779   CeedChk(ierr);
780   CeedOperatorField *opinputfields, *opoutputfields;
781   ierr = CeedOperatorGetFields(op, &opinputfields, &opoutputfields);
782   CeedChk(ierr);
783   CeedQFunctionField *qfinputfields, *qfoutputfields;
784   ierr = CeedQFunctionGetFields(qf, &qfinputfields, &qfoutputfields);
785   CeedChk(ierr);
786   CeedEvalMode emode;
787   CeedInterlaceMode imode;
788   CeedBasis basis;
789   CeedBasis_Cuda_shared *basis_data;
790   CeedElemRestriction Erestrict;
791   CeedElemRestriction_Cuda_reg *restr_data;
792 
793   ostringstream code;
794   string devFunctions(deviceFunctions);
795 
796   // Add atomicAdd function for old NVidia architectures
797   struct cudaDeviceProp prop;
798   Ceed delegate;
799   CeedGetDelegate(ceed, &delegate);
800   Ceed_Cuda *ceed_data;
801   ierr = CeedGetData(delegate, (void **)&ceed_data); CeedChk(ierr);
802   ierr = cudaGetDeviceProperties(&prop, ceed_data->deviceId);
803   if (prop.major<6){
804     code << atomicAdd;
805   }
806 
807   code << devFunctions;
808 
809   string qFunction(qf_data->qFunctionSource);
810 
811   code << "\n#define CEED_QFUNCTION(name) inline __device__ int name\n";
812   code << "\n#define CeedPragmaSIMD\n";
813   code << qFunction;
814 
815   // Setup
816   code << "\nextern \"C\" __global__ void oper(CeedInt nelem, void* ctx, CudaFieldsInt indices, CudaFields fields, CudaFields B, CudaFields G, CeedScalar* W) {\n";
817   // Input Evecs and Restriction
818   for (CeedInt i = 0; i < numinputfields; i++) {
819     ierr = CeedQFunctionFieldGetEvalMode(qfinputfields[i], &emode);
820     CeedChk(ierr);
821     if (emode == CEED_EVAL_WEIGHT) { // Skip
822     } else {
823       code << "const CeedScalar* d_u" <<i<<" = fields.in["<<i<<"];\n";
824       if (emode != CEED_EVAL_NONE)
825       {
826         ierr = CeedOperatorFieldGetBasis(opinputfields[i], &basis); CeedChk(ierr);
827         bool isTensor;
828         ierr = CeedBasisGetTensorStatus(basis, &isTensor); CeedChk(ierr);
829         //TODO check that all are the same
830         ierr = CeedBasisGetDimension(basis, &dim); CeedChk(ierr);
831         if (isTensor)
832         {
833           //TODO check that all are the same
834           ierr = CeedBasisGetNumQuadraturePoints1D(basis, &Q1d); CeedChk(ierr);
835         } else {
836           return CeedError(ceed, 1, "Backend does not implement operators with non-tensor basis");
837         }
838       }
839     }
840   }
841   data->dim = dim;
842   data->Q1d = Q1d;
843 
844   for (CeedInt i = 0; i < numoutputfields; i++) {
845     code << "CeedScalar* d_v"<<i<<" = fields.out["<<i<<"];\n";
846   }
847   code << "const CeedInt Dim = "<<dim<<";\n";
848   code << "const CeedInt Q1d = "<<Q1d<<";\n";
849   // code << "const CeedInt Q   = "<<Q<<";\n";
850   code << "extern __shared__ CeedScalar slice[];\n";
851   code << "BackendData data;\n";
852   code << "data.tidx = threadIdx.x;\n";
853   code << "data.tidy = threadIdx.y;\n";
854   code << "data.tidz = threadIdx.z;\n";
855   code << "data.tid  = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.y*blockDim.x;\n";
856   code << "data.slice = slice+data.tidz*Q1d"<<(dim>1?"*Q1d":"")<<";\n";
857 
858   code << "\n// Input field constants and basis data\n";
859   //Initialize constants, and matrices B and G
860   for (CeedInt i = 0; i < numinputfields; i++) {
861     code << "// -- Input field "<<i<<" --\n";
862     // Get elemsize, emode, ncomp
863     ierr = CeedOperatorFieldGetElemRestriction(opinputfields[i], &Erestrict);
864     CeedChk(ierr);
865     ierr = CeedElemRestrictionGetElementSize(Erestrict, &elemsize);
866     CeedChk(ierr);
867     ierr = CeedQFunctionFieldGetEvalMode(qfinputfields[i], &emode);
868     CeedChk(ierr);
869     ierr = CeedElemRestrictionGetNumComponents(Erestrict, &ncomp);
870     CeedChk(ierr);
871 
872     // Set field constants
873     if (emode != CEED_EVAL_WEIGHT) {
874       ierr = CeedOperatorFieldGetBasis(opinputfields[i], &basis); CeedChk(ierr);
875       if (basis != CEED_BASIS_COLLOCATED) {
876         ierr = CeedBasisGetNumNodes1D(basis, &P1d); CeedChk(ierr);
877         code << "  const CeedInt P_in_"<<i<<" = "<<P1d<<";\n";
878       } else {
879         code << "  const CeedInt P_in_"<<i<<" = "<<Q1d<<";\n";
880       }
881       code << "  const CeedInt ncomp_in_"<<i<<" = "<<ncomp<<";\n";
882     }
883 
884     // Load basis data
885     code << "// EvalMode: "<<CeedEvalModes[emode]<<"\n";
886     switch (emode) {
887     case CEED_EVAL_NONE:
888       break;
889     case CEED_EVAL_INTERP:
890       ierr = CeedBasisGetData(basis, (void **)&basis_data); CeedChk(ierr);
891       data->B.in[i] = basis_data->d_interp1d;
892       code << "__shared__ double s_B_in_"<<i<<"["<<P1d*Q1d<<"];\n";
893       code << "loadMatrix<P_in_"<<i<<",Q1d>(data, B.in["<<i<<"], s_B_in_"<<i<<");\n";
894       break;
895     case CEED_EVAL_GRAD:
896       ierr = CeedBasisGetData(basis, (void **)&basis_data); CeedChk(ierr);
897       data->B.in[i] = basis_data->d_interp1d;
898       code << "__shared__ double s_B_in_"<<i<<"["<<P1d*Q1d<<"];\n";
899       code << "loadMatrix<P_in_"<<i<<",Q1d>(data, B.in["<<i<<"], s_B_in_"<<i<<");\n";
900       if (basis_data->d_collograd1d) {
901         data->G.in[i] = basis_data->d_collograd1d;
902         code << "__shared__ double s_G_in_"<<i<<"["<<Q1d*Q1d<<"];\n";
903         code << "loadMatrix<Q1d,Q1d>(data, G.in["<<i<<"], s_G_in_"<<i<<");\n";
904       } else {
905         data->G.in[i] = basis_data->d_grad1d;
906         code << "__shared__ double s_G_in_"<<i<<"["<<P1d*Q1d<<"];\n";
907         code << "loadMatrix<P_in_"<<i<<",Q1d>(data, G.in["<<i<<"], s_G_in_"<<i<<");\n";
908       }
909       break;
910     case CEED_EVAL_WEIGHT:
911       break; // No action
912     case CEED_EVAL_DIV:
913       break; // TODO: Not implemented
914     case CEED_EVAL_CURL:
915       break; // TODO: Not implemented
916     }
917   }
918 
919   code << "\n// Output field constants and basis data\n";
920   for (CeedInt i = 0; i < numoutputfields; i++) {
921     code << "// -- Output field "<<i<<" --\n";
922     // Get elemsize, emode, ncomp
923     ierr = CeedOperatorFieldGetElemRestriction(opoutputfields[i], &Erestrict);
924     CeedChk(ierr);
925     ierr = CeedElemRestrictionGetElementSize(Erestrict, &elemsize);
926     CeedChk(ierr);
927     ierr = CeedQFunctionFieldGetEvalMode(qfoutputfields[i], &emode);
928     CeedChk(ierr);
929     ierr = CeedElemRestrictionGetNumComponents(Erestrict, &ncomp);
930     CeedChk(ierr);
931 
932     // Set field constants
933     ierr = CeedOperatorFieldGetBasis(opoutputfields[i], &basis); CeedChk(ierr);
934     if (basis != CEED_BASIS_COLLOCATED) {
935       ierr = CeedBasisGetNumNodes1D(basis, &P1d); CeedChk(ierr);
936       code << "  const CeedInt P_out_"<<i<<" = "<<P1d<<";\n";
937     } else {
938       code << "  const CeedInt P_out_"<<i<<" = "<<Q1d<<";\n";
939     }
940     code << "  const CeedInt ncomp_out_"<<i<<" = "<<ncomp<<";\n";
941 
942     // Load basis data
943     code << "// EvalMode: "<<CeedEvalModes[emode]<<"\n";
944     switch (emode) {
945     case CEED_EVAL_NONE:
946       break; // No action
947     case CEED_EVAL_INTERP:
948       ierr = CeedBasisGetData(basis, (void **)&basis_data); CeedChk(ierr);
949       data->B.out[i] = basis_data->d_interp1d;
950       code << "  __shared__ double s_B_out_"<<i<<"["<<P1d*Q1d<<"];\n";
951       code << "  loadMatrix<P_out_"<<i<<",Q1d>(data, B.out["<<i<<"], s_B_out_"<<i<<");\n";
952       break;
953     case CEED_EVAL_GRAD:
954       ierr = CeedBasisGetData(basis, (void **)&basis_data); CeedChk(ierr);
955       data->B.out[i] = basis_data->d_interp1d;
956       code << "__shared__ double s_B_out_"<<i<<"["<<P1d*Q1d<<"];\n";
957       code << "loadMatrix<P_out_"<<i<<",Q1d>(data, B.out["<<i<<"], s_B_out_"<<i<<");\n";
958       if (basis_data->d_collograd1d) {
959         data->G.out[i] = basis_data->d_collograd1d;
960         code << "__shared__ double s_G_out_"<<i<<"["<<Q1d*Q1d<<"];\n";
961         code << "loadMatrix<Q1d,Q1d>(data, G.out["<<i<<"], s_G_out_"<<i<<");\n";
962       } else {
963         data->G.out[i] = basis_data->d_grad1d;
964         code << "__shared__ double s_G_out_"<<i<<"["<<P1d*Q1d<<"];\n";
965         code << "loadMatrix<P_out_"<<i<<",Q1d>(data, G.out["<<i<<"], s_G_out_"<<i<<");\n";
966       }
967       break;
968     case CEED_EVAL_WEIGHT: {
969       Ceed ceed;
970       ierr = CeedOperatorGetCeed(op, &ceed); CeedChk(ierr);
971       return CeedError(ceed, 1,
972                        "CEED_EVAL_WEIGHT cannot be an output evaluation mode");
973       break; // Should not occur
974     }
975     case CEED_EVAL_DIV:
976       break; // TODO: Not implemented
977     case CEED_EVAL_CURL:
978       break; // TODO: Not implemented
979     }
980   }
981   code << "\n";
982   code << "__syncthreads();\n";
983   code << "for (CeedInt elem = blockIdx.x*blockDim.z + threadIdx.z; elem < nelem; elem += gridDim.x*blockDim.z) {\n";
984   // Input basis apply if needed
985   // Generate the correct eval mode code for each input
986   code << "\n// Input field restrictions and basis actions\n";
987   for (CeedInt i = 0; i < numinputfields; i++) {
988     code << "  // -- Input field "<<i<<" --\n";
989     // Get elemsize, emode, ncomp
990     ierr = CeedOperatorFieldGetElemRestriction(opinputfields[i], &Erestrict);
991     CeedChk(ierr);
992     ierr = CeedElemRestrictionGetElementSize(Erestrict, &elemsize);
993     CeedChk(ierr);
994     ierr = CeedQFunctionFieldGetEvalMode(qfinputfields[i], &emode);
995     CeedChk(ierr);
996     ierr = CeedElemRestrictionGetNumComponents(Erestrict, &ncomp);
997     CeedChk(ierr);
998 
999     // Restriction
1000     if (emode != CEED_EVAL_WEIGHT &&
1001         !((emode == CEED_EVAL_NONE) && basis_data->d_collograd1d)) {
1002       code << "  CeedScalar r_u"<<i<<"[ncomp_in_"<<i<<"*P_in_"<<i<<"];\n";
1003       ierr = CeedElemRestrictionGetData(Erestrict, (void **)&restr_data); CeedChk(ierr);
1004       data->indices.in[i] = restr_data->d_ind;
1005       if (data->indices.in[i]) {
1006         ierr = CeedElemRestrictionGetNumNodes(Erestrict, &nnodes); CeedChk(ierr);
1007         code << "  const CeedInt nnodes_in_"<<i<<" = "<<nnodes<<";\n";
1008         ierr = CeedElemRestrictionGetIMode(Erestrict, &imode); CeedChk(ierr);
1009         code << "  // InterlaceMode: "<<CeedInterlaceModes[imode]<<"\n";
1010         code << "  readDofs"<<(imode==CEED_NONINTERLACED?"":"Transpose")<<dim<<"d<ncomp_in_"<<i<<",P_in_"<<i<<">(data, nnodes_in_"<<i<<", elem, indices.in["<<i<<"], d_u"<<i<<", r_u"<<i<<");\n";
1011       } else {
1012         bool backendstrides;
1013         ierr = CeedElemRestrictionGetBackendStridesStatus(Erestrict,
1014                                                           &backendstrides);
1015         CeedChk(ierr);
1016         CeedInt strides[3] = {1, elemsize, elemsize*ncomp};
1017         if (!backendstrides) {
1018           ierr = CeedElemRestrictionGetStrides(Erestrict, &strides);
1019           CeedChk(ierr);
1020         }
1021         code << "  // Strides: {"<<strides[0]<<", "<<strides[1]<<", "<<strides[2]<<"}\n";
1022         code << "  readDofsStrided"<<dim<<"d<ncomp_in_"<<i<<",P_in_"<<i<<","<<strides[0]<<","<<strides[1]<<","<<strides[2]<<">(data, elem, d_u"<<i<<", r_u"<<i<<");\n";
1023       }
1024     }
1025 
1026     // Basis action
1027     code << "// EvalMode: "<<CeedEvalModes[emode]<<"\n";
1028     switch (emode) {
1029     case CEED_EVAL_NONE:
1030       if (!basis_data->d_collograd1d) {
1031         code << "  CeedScalar* r_t"<<i<<" = r_u"<<i<<";\n";
1032       }
1033       break;
1034     case CEED_EVAL_INTERP:
1035       code << "  CeedScalar r_t"<<i<<"[ncomp_in_"<<i<<"*Q1d];\n";
1036       code << "  interp"<<dim<<"d<ncomp_in_"<<i<<",P_in_"<<i<<",Q1d>(data, r_u"<<i<<", s_B_in_"<<i<<", r_t"<<i<<");\n";
1037       break;
1038     case CEED_EVAL_GRAD:
1039       if (basis_data->d_collograd1d) {
1040         code << "  CeedScalar r_t"<<i<<"[ncomp_in_"<<i<<"*Q1d];\n";
1041         code << "  interp"<<dim<<"d<ncomp_in_"<<i<<",P_in_"<<i<<",Q1d>(data, r_u"<<i<<", s_B_in_"<<i<<", r_t"<<i<<");\n";
1042       } else {
1043         code << "  CeedScalar r_t"<<i<<"[ncomp_in_"<<i<<"*Dim*Q1d];\n";
1044         code << "  grad"<<dim<<"d<ncomp_in_"<<i<<",P_in_"<<i<<",Q1d>(data, r_u"<<i<<", s_B_in_"<<i<<", s_G_in_"<<i<<", r_t"<<i<<");\n";
1045       }
1046       break;
1047     case CEED_EVAL_WEIGHT:
1048       code << "  CeedScalar r_t"<<i<<"[Q1d];\n";
1049       ierr = CeedOperatorFieldGetBasis(opinputfields[i], &basis); CeedChk(ierr);
1050       ierr = CeedBasisGetData(basis, (void **)&basis_data); CeedChk(ierr);
1051       data->W = basis_data->d_qweight1d;
1052       code << "  weight"<<dim<<"d<Q1d>(data, W, r_t"<<i<<");\n";
1053       break; // No action
1054     case CEED_EVAL_DIV:
1055       break; // TODO: Not implemented
1056     case CEED_EVAL_CURL:
1057       break; // TODO: Not implemented
1058     }
1059   }
1060 
1061   // Q function
1062   code << "\n// Output field setup\n";
1063   for (CeedInt i = 0; i < numoutputfields; i++) {
1064       code << "  // -- Output field "<<i<<" --\n";
1065     ierr = CeedQFunctionFieldGetEvalMode(qfoutputfields[i], &emode);
1066     CeedChk(ierr);
1067     if (emode==CEED_EVAL_GRAD)
1068     {
1069       if (basis_data->d_collograd1d) {
1070         //Accumulator for gradient slices
1071         code << "  CeedScalar r_tt"<<i<<"[ncomp_out_"<<i<<"*Q1d];\n";
1072         code << "  for (CeedInt i = 0; i < ncomp_out_"<<i<<"; ++i) {\n";
1073         code << "    for (CeedInt j = 0; j < Q1d; ++j) {\n";
1074         code << "      r_tt"<<i<<"[j + i*Q1d] = 0.0;\n";
1075         code << "    }\n";
1076         code << "  }\n";
1077       } else {
1078         code << "  CeedScalar r_tt"<<i<<"[ncomp_out_"<<i<<"*Dim*Q1d];\n";
1079       }
1080     }
1081     if (emode==CEED_EVAL_NONE || emode==CEED_EVAL_INTERP)
1082     {
1083       code << "  CeedScalar r_tt"<<i<<"[ncomp_out_"<<i<<"*Q1d];\n";
1084     }
1085   }
1086   //We treat quadrature points per slice in 3d to save registers
1087   if (basis_data->d_collograd1d) {
1088     code << "\n  // Note: Collocated Gradient\n";
1089     code << "#pragma unroll\n";
1090     code << "for (CeedInt q=0; q<Q1d; q++) {\n";
1091     for (CeedInt i = 0; i < numinputfields; i++) {
1092       code << "  // -- Input field "<<i<<" --\n";
1093       // Get elemsize, emode, ncomp
1094       ierr = CeedQFunctionFieldGetEvalMode(qfinputfields[i], &emode);
1095       CeedChk(ierr);
1096       // Basis action
1097       code << "// EvalMode: "<<CeedEvalModes[emode]<<"\n";
1098       switch (emode) {
1099       case CEED_EVAL_NONE:
1100         code << "  CeedScalar r_q"<<i<<"[ncomp_in_"<<i<<"];\n";
1101         ierr = CeedElemRestrictionGetData(Erestrict, (void **)&restr_data); CeedChk(ierr);
1102         data->indices.in[i] = restr_data->d_ind;
1103         if (data->indices.in[i]) {
1104           ierr = CeedElemRestrictionGetIMode(Erestrict, &imode); CeedChk(ierr);
1105           code << "  // InterlaceMode: "<<CeedInterlaceModes[imode]<<"\n";
1106           code << "  readSliceQuads"<<(imode==CEED_NONINTERLACED?"":"Transpose")<<"3d<ncomp_in_"<<i<<",Q1d>(data, nquads_in_"<<i<<", elem, q, indices.in["<<i<<"], d_u"<<i<<", r_q"<<i<<");\n";
1107         } else {
1108           bool backendstrides;
1109           ierr = CeedElemRestrictionGetBackendStridesStatus(Erestrict,
1110                                                             &backendstrides);
1111           CeedChk(ierr);
1112           CeedInt strides[3] = {1, elemsize, elemsize*ncomp};
1113           if (!backendstrides) {
1114             ierr = CeedElemRestrictionGetStrides(Erestrict, &strides);
1115             CeedChk(ierr);
1116           }
1117           code << "  // Strides: {"<<strides[0]<<", "<<strides[1]<<", "<<strides[2]<<"}\n";
1118           code << "  readSliceQuadsStrided"<<"3d<ncomp_in_"<<i<<",Q1d"","<<strides[0]<<","<<strides[1]<<","<<strides[2]<<">(data, elem, q, d_u"<<i<<", r_q"<<i<<");\n";
1119         }
1120         break;
1121       case CEED_EVAL_INTERP:
1122         code << "  CeedScalar r_q"<<i<<"[ncomp_in_"<<i<<"];\n";
1123         code << "  for (CeedInt j = 0; j < ncomp_in_"<<i<<" ; ++j) {\n";
1124         code << "    r_q"<<i<<"[j] = r_t"<<i<<"[q + j*Q1d];\n";
1125         code << "  }\n";
1126         break;
1127       case CEED_EVAL_GRAD:
1128         code << "  CeedScalar r_q"<<i<<"[ncomp_in_"<<i<<"*Dim];\n";
1129         code << "  gradCollo3d<ncomp_in_"<<i<<",Q1d>(data, q, r_t"<<i<<", s_G_in_"<<i<<", r_q"<<i<<");\n";
1130         break;
1131       case CEED_EVAL_WEIGHT:
1132         code << "  CeedScalar r_q"<<i<<"[1];\n";
1133         code << "  r_q"<<i<<"[0] = r_t"<<i<<"[q];\n";
1134         break; // No action
1135       case CEED_EVAL_DIV:
1136         break; // TODO: Not implemented
1137       case CEED_EVAL_CURL:
1138         break; // TODO: Not implemented
1139       }
1140     }
1141     for (CeedInt i = 0; i < numoutputfields; i++) {
1142       code << "  // -- Output field "<<i<<" --\n";
1143       ierr = CeedQFunctionFieldGetEvalMode(qfoutputfields[i], &emode);
1144       CeedChk(ierr);
1145       // Basis action
1146       switch (emode) {
1147       case CEED_EVAL_NONE:
1148         code << "  CeedScalar r_qq"<<i<<"[ncomp_out_"<<i<<"];\n";
1149         break; // No action
1150       case CEED_EVAL_INTERP:
1151         code << "  CeedScalar r_qq"<<i<<"[ncomp_out_"<<i<<"];\n";
1152         break;
1153       case CEED_EVAL_GRAD:
1154         code << "  CeedScalar r_qq"<<i<<"[ncomp_out_"<<i<<"*Dim];\n";
1155         break;
1156       case CEED_EVAL_WEIGHT:
1157         break; // Should not occur
1158       case CEED_EVAL_DIV:
1159         break; // TODO: Not implemented
1160       case CEED_EVAL_CURL:
1161         break; // TODO: Not implemented
1162       }
1163     }
1164   } else {
1165       code << "\n  // Note: No Collocated Gradient\n";
1166     for (CeedInt i = 0; i < numinputfields; i++) {
1167       code << "  // -- Input field "<<i<<" --\n";
1168       code << "  CeedScalar* r_q"<<i<<" = r_t"<<i<<";\n";
1169     }
1170     for (CeedInt i = 0; i < numoutputfields; i++) {
1171       code << "  // -- Output field "<<i<<" --\n";
1172       code << "  CeedScalar* r_qq"<<i<<" = r_tt"<<i<<";\n";
1173     }
1174   }
1175   code << "  // QFunction Inputs and outputs\n";
1176   code << "  CeedScalar* in["<<numinputfields<<"];\n";
1177   for (CeedInt i = 0; i < numinputfields; i++) {
1178     code << "  // -- Input field "<<i<<" --\n";
1179     code << "  in["<<i<<"] = r_q"<<i<<";\n";
1180   }
1181   code << "  CeedScalar* out["<<numoutputfields<<"];\n";
1182   for (CeedInt i = 0; i < numoutputfields; i++) {
1183     code << "  // -- Output field "<<i<<" --\n";
1184     code << "  out["<<i<<"] = r_qq"<<i<<";\n";
1185   }
1186   code << "\n  // Apply QFunction\n";
1187   string qFunctionName(qf_data->qFunctionName);
1188   code << "  "<<qFunctionName<<"(ctx, ";
1189   if (dim != 3 || basis_data->d_collograd1d) {
1190     code << "1 ";
1191   } else {
1192     code << "Q1d ";
1193   }
1194   code << ", in, out);\n";
1195   if (basis_data->d_collograd1d) {
1196     code << "\n  // Note: Collocated Gradient\n";
1197     for (CeedInt i = 0; i < numoutputfields; i++) {
1198       code << "  // -- Output field "<<i<<" --\n";
1199       ierr = CeedQFunctionFieldGetEvalMode(qfoutputfields[i], &emode);
1200       CeedChk(ierr);
1201       // Basis action
1202       code << "  // EvalMode: "<<CeedEvalModes[emode]<<"\n";
1203       switch (emode) {
1204       case CEED_EVAL_NONE:
1205         code << "  for (CeedInt j = 0; j < ncomp_out_"<<i<<" ; ++j) {\n";
1206         code << "    r_tt"<<i<<"[q + j*Q1d] = r_qq"<<i<<"[j];\n";
1207         code << "  }\n";
1208         break; // No action
1209       case CEED_EVAL_INTERP:
1210         code << "  for (CeedInt j = 0; j < ncomp_out_"<<i<<" ; ++j) {\n";
1211         code << "    r_tt"<<i<<"[q + j*Q1d] = r_qq"<<i<<"[j];\n";
1212         code << "  }\n";
1213         break;
1214       case CEED_EVAL_GRAD:
1215         code << "  gradColloTranspose3d<ncomp_out_"<<i<<",Q1d>(data, q, r_qq"<<i<<", s_G_out_"<<i<<", r_tt"<<i<<");\n";
1216         break;
1217       case CEED_EVAL_WEIGHT:
1218         break; // Should not occur
1219       case CEED_EVAL_DIV:
1220         break; // TODO: Not implemented
1221       case CEED_EVAL_CURL:
1222         break; // TODO: Not implemented
1223       }
1224     }
1225     code << "}\n";
1226   }
1227 
1228   // Output basis apply if needed
1229   // Generate the correct eval mode code for each output
1230   code << "\n// Output field basis action and restrictions\n";
1231   for (CeedInt i = 0; i < numoutputfields; i++) {
1232     code << "  // -- Output field "<<i<<" --\n";
1233     // Get elemsize, emode, ncomp
1234     ierr = CeedOperatorFieldGetElemRestriction(opoutputfields[i], &Erestrict);
1235     CeedChk(ierr);
1236     ierr = CeedElemRestrictionGetElementSize(Erestrict, &elemsize);
1237     CeedChk(ierr);
1238     ierr = CeedQFunctionFieldGetEvalMode(qfoutputfields[i], &emode);
1239     CeedChk(ierr);
1240     ierr = CeedElemRestrictionGetNumComponents(Erestrict, &ncomp);
1241     CeedChk(ierr);
1242     // Basis action
1243     code << "  // EvalMode: "<<CeedEvalModes[emode]<<"\n";
1244     switch (emode) {
1245     case CEED_EVAL_NONE:
1246       code << "  CeedScalar* r_v"<<i<<" = r_tt"<<i<<";\n";
1247       break; // No action
1248     case CEED_EVAL_INTERP:
1249       code << "  CeedScalar r_v"<<i<<"[ncomp_out_"<<i<<"*P_out_"<<i<<"];\n";
1250       code << "  interpTranspose"<<dim<<"d<ncomp_out_"<<i<<",P_out_"<<i<<",Q1d>(data, r_tt"<<i<<", s_B_out_"<<i<<", r_v"<<i<<");\n";
1251       break;
1252     case CEED_EVAL_GRAD:
1253       code << "  CeedScalar r_v"<<i<<"[ncomp_out_"<<i<<"*P_out_"<<i<<"];\n";
1254       if (basis_data->d_collograd1d) {
1255         code << "  interpTranspose"<<dim<<"d<ncomp_out_"<<i<<",P_out_"<<i<<",Q1d>(data, r_tt"<<i<<", s_B_out_"<<i<<", r_v"<<i<<");\n";
1256       } else {
1257         code << "  gradTranspose"<<dim<<"d<ncomp_out_"<<i<<",P_out_"<<i<<",Q1d>(data, r_tt"<<i<<", s_B_out_"<<i<<", s_G_out_"<<i<<", r_v"<<i<<");\n";
1258       }
1259       break;
1260     case CEED_EVAL_WEIGHT: {
1261       Ceed ceed;
1262       ierr = CeedOperatorGetCeed(op, &ceed); CeedChk(ierr);
1263       return CeedError(ceed, 1,
1264                        "CEED_EVAL_WEIGHT cannot be an output evaluation mode");
1265       break; // Should not occur
1266     }
1267     case CEED_EVAL_DIV:
1268       break; // TODO: Not implemented
1269     case CEED_EVAL_CURL:
1270       break; // TODO: Not implemented
1271     }
1272     // Restriction
1273     ierr = CeedElemRestrictionGetData(Erestrict, (void **)&restr_data); CeedChk(ierr);
1274     data->indices.out[i] = restr_data->d_ind;
1275     if (data->indices.out[i]) {
1276       ierr = CeedElemRestrictionGetNumNodes(Erestrict, &nnodes); CeedChk(ierr);
1277       code << "  const CeedInt nnodes_out_"<<i<<" = "<<nnodes<<";\n";
1278       ierr = CeedElemRestrictionGetIMode(Erestrict, &imode); CeedChk(ierr);
1279       code << "  // InterlaceMode: "<<CeedInterlaceModes[imode]<<"\n";
1280       code << "  writeDofs"<<(imode==CEED_NONINTERLACED?"":"Transpose")<<dim<<"d<ncomp_out_"<<i<<",P_out_"<<i<<">(data, nnodes_out_"<<i<<", elem, indices.out["<<i<<"], r_v"<<i<<", d_v"<<i<<");\n";
1281     } else {
1282       bool backendstrides;
1283       ierr = CeedElemRestrictionGetBackendStridesStatus(Erestrict,
1284                                                         &backendstrides);
1285       CeedChk(ierr);
1286       CeedInt strides[3] = {1, elemsize, elemsize*ncomp};
1287       if (!backendstrides) {
1288         ierr = CeedElemRestrictionGetStrides(Erestrict, &strides);
1289         CeedChk(ierr);
1290       }
1291       code << "  // Strides: {"<<strides[0]<<", "<<strides[1]<<", "<<strides[2]<<"}\n";
1292       code << "  writeDofsStrided"<<dim<<"d<ncomp_out_"<<i<<",P_out_"<<i<<","<<strides[0]<<","<<strides[1]<<","<<strides[2]<<">(data, elem, r_v"<<i<<", d_v"<<i<<");\n";
1293     }
1294   }
1295 
1296   code << "  }\n";
1297   code << "}\n\n";
1298 
1299 //  std::cout << code.str();
1300 
1301   ierr = CeedCompileCuda(ceed, code.str().c_str(), &data->module, 0);
1302   CeedChk(ierr);
1303   ierr = CeedGetKernelCuda(ceed, data->module, "oper", &data->op);
1304   CeedChk(ierr);
1305 
1306   ierr = CeedOperatorSetSetupDone(op); CeedChk(ierr);
1307 
1308   return 0;
1309 }
1310