xref: /libCEED/backends/cuda-gen/ceed-cuda-gen-operator.c (revision b13efd58b277efef1db70d6f06eaaf4d415a7642)
1 // Copyright (c) 2017-2024, Lawrence Livermore National Security, LLC and other CEED contributors.
2 // All Rights Reserved. See the top-level LICENSE and NOTICE files for details.
3 //
4 // SPDX-License-Identifier: BSD-2-Clause
5 //
6 // This file is part of CEED:  http://github.com/ceed
7 
8 #include <ceed.h>
9 #include <ceed/backend.h>
10 #include <ceed/jit-source/cuda/cuda-types.h>
11 #include <stddef.h>
12 
13 #include "../cuda/ceed-cuda-common.h"
14 #include "../cuda/ceed-cuda-compile.h"
15 #include "ceed-cuda-gen-operator-build.h"
16 #include "ceed-cuda-gen.h"
17 
18 //------------------------------------------------------------------------------
19 // Destroy operator
20 //------------------------------------------------------------------------------
21 static int CeedOperatorDestroy_Cuda_gen(CeedOperator op) {
22   CeedOperator_Cuda_gen *impl;
23 
24   CeedCallBackend(CeedOperatorGetData(op, &impl));
25   CeedCallBackend(CeedFree(&impl));
26   return CEED_ERROR_SUCCESS;
27 }
28 
29 static int Waste(int threads_per_sm, int warp_size, int threads_per_elem, int elems_per_block) {
30   int useful_threads_per_block = threads_per_elem * elems_per_block;
31   // round up to nearest multiple of warp_size
32   int block_size    = CeedDivUpInt(useful_threads_per_block, warp_size) * warp_size;
33   int blocks_per_sm = threads_per_sm / block_size;
34   return threads_per_sm - useful_threads_per_block * blocks_per_sm;
35 }
36 
37 // Choose the least wasteful block size constrained by blocks_per_sm of max_threads_per_block.
38 //
39 // The x and y part of block[] contains per-element sizes (specified on input) while the z part is number of elements.
40 //
41 // Problem setting: we'd like to make occupancy high with relatively few inactive threads. CUDA (cuOccupancyMaxPotentialBlockSize) can tell us how
42 // many threads can run.
43 //
44 // Note that full occupancy sometimes can't be achieved by one thread block.
45 // For example, an SM might support 1536 threads in total, but only 1024 within a single thread block.
46 // So cuOccupancyMaxPotentialBlockSize may suggest a block size of 768 so that two blocks can run, versus one block of 1024 will prevent a second
47 // block from running. The cuda-gen kernels are pretty heavy with lots of instruction-level parallelism (ILP) so we'll generally be okay with
48 // relatively low occupancy and smaller thread blocks, but we solve a reasonably general problem here. Empirically, we find that blocks bigger than
49 // about 256 have higher latency and worse load balancing when the number of elements is modest.
50 //
51 // cuda-gen can't choose block sizes arbitrarily; they need to be a multiple of the number of quadrature points (or number of basis functions).
52 // They also have a lot of __syncthreads(), which is another point against excessively large thread blocks.
53 // Suppose I have elements with 7x7x7 quadrature points.
54 // This will loop over the last dimension, so we have 7*7=49 threads per element.
55 // Suppose we have two elements = 2*49=98 useful threads.
56 // CUDA schedules in units of full warps (32 threads), so 128 CUDA hardware threads are effectively committed to that block.
57 // Now suppose cuOccupancyMaxPotentialBlockSize returned 352.
58 // We can schedule 2 blocks of size 98 (196 useful threads using 256 hardware threads), but not a third block (which would need a total of 384
59 // hardware threads).
60 //
61 // If instead, we had packed 3 elements, we'd have 3*49=147 useful threads occupying 160 slots, and could schedule two blocks.
62 // Alternatively, we could pack a single block of 7 elements (2*49=343 useful threads) into the 354 slots.
63 // The latter has the least "waste", but __syncthreads() over-synchronizes and it might not pay off relative to smaller blocks.
64 static int BlockGridCalculate(CeedInt num_elem, int blocks_per_sm, int max_threads_per_block, int max_threads_z, int warp_size, int block[3],
65                               int *grid) {
66   const int threads_per_sm   = blocks_per_sm * max_threads_per_block;
67   const int threads_per_elem = block[0] * block[1];
68   int       elems_per_block  = 1;
69   int       waste            = Waste(threads_per_sm, warp_size, threads_per_elem, 1);
70 
71   for (int i = 2; i <= CeedIntMin(max_threads_per_block / threads_per_elem, num_elem); i++) {
72     int i_waste = Waste(threads_per_sm, warp_size, threads_per_elem, i);
73 
74     // We want to minimize waste, but smaller kernels have lower latency and less __syncthreads() overhead so when a larger block size has the same
75     // waste as a smaller one, go ahead and prefer the smaller block.
76     if (i_waste < waste || (i_waste == waste && threads_per_elem * i <= 128)) {
77       elems_per_block = i;
78       waste           = i_waste;
79     }
80   }
81   // In low-order elements, threads_per_elem may be sufficiently low to give an elems_per_block greater than allowable for the device, so we must
82   // check before setting the z-dimension size of the block.
83   block[2] = CeedIntMin(elems_per_block, max_threads_z);
84   *grid    = CeedDivUpInt(num_elem, elems_per_block);
85   return CEED_ERROR_SUCCESS;
86 }
87 
88 // callback for cuOccupancyMaxPotentialBlockSize, providing the amount of dynamic shared memory required for a thread block of size threads.
89 static size_t dynamicSMemSize(int threads) { return threads * sizeof(CeedScalar); }
90 
91 //------------------------------------------------------------------------------
92 // Apply and add to output
93 //------------------------------------------------------------------------------
94 static int CeedOperatorApplyAdd_Cuda_gen(CeedOperator op, CeedVector input_vec, CeedVector output_vec, CeedRequest *request) {
95   Ceed                    ceed;
96   Ceed_Cuda              *cuda_data;
97   CeedInt                 num_elem, num_input_fields, num_output_fields;
98   CeedEvalMode            eval_mode;
99   CeedVector              output_vecs[CEED_FIELD_MAX] = {NULL};
100   CeedQFunctionField     *qf_input_fields, *qf_output_fields;
101   CeedQFunction_Cuda_gen *qf_data;
102   CeedQFunction           qf;
103   CeedOperatorField      *op_input_fields, *op_output_fields;
104   CeedOperator_Cuda_gen  *data;
105 
106   CeedCallBackend(CeedOperatorGetCeed(op, &ceed));
107   CeedCallBackend(CeedGetData(ceed, &cuda_data));
108   CeedCallBackend(CeedOperatorGetData(op, &data));
109   CeedCallBackend(CeedOperatorGetQFunction(op, &qf));
110   CeedCallBackend(CeedQFunctionGetData(qf, &qf_data));
111   CeedCallBackend(CeedOperatorGetNumElements(op, &num_elem));
112   CeedCallBackend(CeedOperatorGetFields(op, &num_input_fields, &op_input_fields, &num_output_fields, &op_output_fields));
113   CeedCallBackend(CeedQFunctionGetFields(qf, NULL, &qf_input_fields, NULL, &qf_output_fields));
114 
115   // Check for tensor-product bases
116   {
117     bool has_tensor_bases;
118 
119     CeedCallBackend(CeedOperatorHasTensorBases(op, &has_tensor_bases));
120     // -- Fallback to ref if not all bases are tensor-product
121     if (!has_tensor_bases) {
122       CeedOperator op_fallback;
123 
124       CeedDebug256(ceed, CEED_DEBUG_COLOR_SUCCESS, "Falling back to /gpu/cuda/ref CeedOperator due to non-tensor bases");
125       CeedCallBackend(CeedOperatorGetFallback(op, &op_fallback));
126       CeedCallBackend(CeedOperatorApplyAdd(op_fallback, input_vec, output_vec, request));
127       return CEED_ERROR_SUCCESS;
128     }
129   }
130 
131   // Creation of the operator
132   CeedCallBackend(CeedOperatorBuildKernel_Cuda_gen(op));
133 
134   // Input vectors
135   for (CeedInt i = 0; i < num_input_fields; i++) {
136     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
137     if (eval_mode == CEED_EVAL_WEIGHT) {  // Skip
138       data->fields.inputs[i] = NULL;
139     } else {
140       bool       is_active;
141       CeedVector vec;
142 
143       // Get input vector
144       CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec));
145       is_active = vec == CEED_VECTOR_ACTIVE;
146       if (is_active) vec = input_vec;
147       CeedCallBackend(CeedVectorGetArrayRead(vec, CEED_MEM_DEVICE, &data->fields.inputs[i]));
148       if (!is_active) CeedCallBackend(CeedVectorDestroy(&vec));
149     }
150   }
151 
152   // Output vectors
153   for (CeedInt i = 0; i < num_output_fields; i++) {
154     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
155     if (eval_mode == CEED_EVAL_WEIGHT) {  // Skip
156       data->fields.outputs[i] = NULL;
157     } else {
158       bool       is_active;
159       CeedVector vec;
160 
161       // Get output vector
162       CeedCallBackend(CeedOperatorFieldGetVector(op_output_fields[i], &vec));
163       is_active = vec == CEED_VECTOR_ACTIVE;
164       if (is_active) vec = output_vec;
165       output_vecs[i] = vec;
166       // Check for multiple output modes
167       CeedInt index = -1;
168 
169       for (CeedInt j = 0; j < i; j++) {
170         if (vec == output_vecs[j]) {
171           index = j;
172           break;
173         }
174       }
175       if (index == -1) {
176         CeedCallBackend(CeedVectorGetArray(vec, CEED_MEM_DEVICE, &data->fields.outputs[i]));
177       } else {
178         data->fields.outputs[i] = data->fields.outputs[index];
179       }
180       if (!is_active) CeedCallBackend(CeedVectorDestroy(&vec));
181     }
182   }
183 
184   // Get context data
185   CeedCallBackend(CeedQFunctionGetInnerContextData(qf, CEED_MEM_DEVICE, &qf_data->d_c));
186 
187   // Apply operator
188   void         *opargs[]  = {(void *)&num_elem, &qf_data->d_c, &data->indices, &data->fields, &data->B, &data->G, &data->W};
189   const CeedInt dim       = data->dim;
190   const CeedInt Q_1d      = data->Q_1d;
191   const CeedInt P_1d      = data->max_P_1d;
192   const CeedInt thread_1d = CeedIntMax(Q_1d, P_1d);
193   int           max_threads_per_block, min_grid_size;
194 
195   CeedCallCuda(ceed, cuOccupancyMaxPotentialBlockSize(&min_grid_size, &max_threads_per_block, data->op, dynamicSMemSize, 0, 0x10000));
196   int block[3] =
197       {
198           thread_1d,
199           dim < 2 ? 1 : thread_1d,
200           -1,
201       },
202       grid;
203 
204   CeedCallBackend(BlockGridCalculate(num_elem, min_grid_size / cuda_data->device_prop.multiProcessorCount, max_threads_per_block,
205                                      cuda_data->device_prop.maxThreadsDim[2], cuda_data->device_prop.warpSize, block, &grid));
206   CeedInt shared_mem = block[0] * block[1] * block[2] * sizeof(CeedScalar);
207 
208   CeedCallBackend(CeedRunKernelDimShared_Cuda(ceed, data->op, grid, block[0], block[1], block[2], shared_mem, opargs));
209 
210   // Restore input arrays
211   for (CeedInt i = 0; i < num_input_fields; i++) {
212     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_input_fields[i], &eval_mode));
213     if (eval_mode == CEED_EVAL_WEIGHT) {  // Skip
214     } else {
215       bool       is_active;
216       CeedVector vec;
217 
218       CeedCallBackend(CeedOperatorFieldGetVector(op_input_fields[i], &vec));
219       is_active = vec == CEED_VECTOR_ACTIVE;
220       if (is_active) vec = input_vec;
221       CeedCallBackend(CeedVectorRestoreArrayRead(vec, &data->fields.inputs[i]));
222       if (!is_active) CeedCallBackend(CeedVectorDestroy(&vec));
223     }
224   }
225 
226   // Restore output arrays
227   for (CeedInt i = 0; i < num_output_fields; i++) {
228     CeedCallBackend(CeedQFunctionFieldGetEvalMode(qf_output_fields[i], &eval_mode));
229     if (eval_mode == CEED_EVAL_WEIGHT) {  // Skip
230     } else {
231       bool       is_active;
232       CeedVector vec;
233 
234       CeedCallBackend(CeedOperatorFieldGetVector(op_output_fields[i], &vec));
235       is_active = vec == CEED_VECTOR_ACTIVE;
236       if (is_active) vec = output_vec;
237       // Check for multiple output modes
238       CeedInt index = -1;
239       for (CeedInt j = 0; j < i; j++) {
240         if (vec == output_vecs[j]) {
241           index = j;
242           break;
243         }
244       }
245       if (index == -1) {
246         CeedCallBackend(CeedVectorRestoreArray(vec, &data->fields.outputs[i]));
247       }
248       if (!is_active) CeedCallBackend(CeedVectorDestroy(&vec));
249     }
250   }
251 
252   // Restore context data
253   CeedCallBackend(CeedQFunctionRestoreInnerContextData(qf, &qf_data->d_c));
254   return CEED_ERROR_SUCCESS;
255 }
256 
257 //------------------------------------------------------------------------------
258 // Create operator
259 //------------------------------------------------------------------------------
260 int CeedOperatorCreate_Cuda_gen(CeedOperator op) {
261   Ceed                   ceed;
262   CeedOperator_Cuda_gen *impl;
263 
264   CeedCallBackend(CeedOperatorGetCeed(op, &ceed));
265   CeedCallBackend(CeedCalloc(1, &impl));
266   CeedCallBackend(CeedOperatorSetData(op, impl));
267   CeedCallBackend(CeedSetBackendFunction(ceed, "Operator", op, "ApplyAdd", CeedOperatorApplyAdd_Cuda_gen));
268   CeedCallBackend(CeedSetBackendFunction(ceed, "Operator", op, "Destroy", CeedOperatorDestroy_Cuda_gen));
269   return CEED_ERROR_SUCCESS;
270 }
271 
272 //------------------------------------------------------------------------------
273