/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/develop/include/ck_tile/ops/gemm/kernel/grouped_gemm_kernel.hpp Source File

/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/develop/include/ck_tile/ops/gemm/kernel/grouped_gemm_kernel.hpp Source File#

Composable Kernel: /home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/develop/include/ck_tile/ops/gemm/kernel/grouped_gemm_kernel.hpp Source File
grouped_gemm_kernel.hpp
Go to the documentation of this file.
1 // Copyright (c) Advanced Micro Devices, Inc., or its affiliates.
2 // SPDX-License-Identifier: MIT
3 
4 #pragma once
5 
13 #include "ck_tile/host.hpp"
14 
15 #include <hip/hip_runtime.h>
16 
17 namespace ck_tile {
18 
26 
27 template <index_t NumDTensor = 0>
29 {
30  CK_TILE_HOST GroupedGemmHostArgs(const void* a_ptr_,
31  const void* b_ptr_,
32  const std::array<const void*, NumDTensor>& ds_ptr_,
33  void* e_ptr_,
34  index_t k_batch_,
35  index_t M_,
36  index_t N_,
37  index_t K_,
38  index_t stride_A_,
39  index_t stride_B_,
40  const std::array<index_t, NumDTensor>& stride_Ds_,
41  index_t stride_E_)
42  : a_ptr(a_ptr_),
43  b_ptr(b_ptr_),
44  ds_ptr(ds_ptr_),
45  e_ptr(e_ptr_),
46  M(M_),
47  N(N_),
48  K(K_),
49  stride_A(stride_A_),
50  stride_B(stride_B_),
51  stride_Ds(stride_Ds_),
52  stride_E(stride_E_),
53  k_batch(k_batch_)
54  {
55  }
56 
57  const void* a_ptr;
58  const void* b_ptr;
59  const std::array<const void*, NumDTensor> ds_ptr;
60  union
61  {
62  void* e_ptr;
63  void* c_ptr;
64  };
65 
71  const std::array<index_t, NumDTensor> stride_Ds;
72  union
73  {
76  };
77 
79 };
80 
81 template <index_t NumDTensor = 0>
83 {
87 
88  GemmTransKernelArg() = delete;
90  index_t bl_start,
91  index_t bl_end)
92  : group_karg{std::move(karg)}, block_start{bl_start}, block_end{bl_end}
93  {
94  }
95 
97  : group_karg{std::move(karg)}, block_start{0}, block_end{0}
98  {
99  }
100 };
101 
102 template <typename TilePartitioner_, typename GemmPipeline_, typename EpiloguePipeline_>
104 {
108 
112 
117 
123 
124  static constexpr index_t NumDTensor_ = DsDataType::size();
125 
127  static_assert(
129  "ALayout and ADataType must be scalars. Multiple parameters are not currently supported.");
130 
132  static_assert(
134  "BLayout and BDataType must be scalars. Multiple parameters are not currently supported.");
135 
137  static_assert(!is_detected<is_tuple, CLayout>::value &&
139  "C/CLayout and C/EDataType must be scalars.");
140 
143 
144  static constexpr index_t kBlockSize = GemmPipeline::BlockSize;
145  static constexpr bool UsePersistentKernel = GemmPipeline::UsePersistentKernel;
146 
147  [[nodiscard]] CK_TILE_HOST static const std::string GetName()
148  {
149  // clang-format off
150  using P_ = GemmPipeline;
151 
152  return concat('_', "gemm_grouped", gemm_prec_str<ADataType, BDataType>(),
153  concat('x', P_::MPerBlock, P_::NPerBlock, P_::KPerBlock),
154  concat('x', P_::GetVectorSizeA(), P_::GetVectorSizeB(), P_::GetVectorSizeC()),
155  concat('x', P_::kPadM, P_::kPadN, P_::kPadK),
156  (UsePersistentKernel ? "Persistent" : "NonPersistent"),
157  (NumDTensor_ == 2 ? "MultiD" : "NoMultiD"),
158  (GemmPipeline::DoubleSmemBuffer ? "DoubleSmemBuffer" : "SingleSmemBuffer"));
159  // clang-format on
160  }
161 
162  CK_TILE_HOST static auto
163  GetWorkSpaceSize(const std::vector<GroupedGemmHostArgs<>>& gemm_descs) -> std::size_t
164  {
165  return gemm_descs.size() * sizeof(GemmTransKernelArg<NumDTensor_>);
166  }
167 
168  CK_TILE_HOST static auto GetWorkSpaceSize(index_t group_count) -> std::size_t
169  {
170  return group_count * sizeof(GemmTransKernelArg<NumDTensor_>);
171  }
172 
173  CK_TILE_HOST static auto BlockSize() -> dim3
174  {
175  if(is_wave32())
176  {
177  return dim3(kBlockSize / 2);
178  }
179  else
180  {
181  return dim3(kBlockSize);
182  }
183  }
184 
191  CK_TILE_HOST static auto MaxOccupancyGridSize(const stream_config& s) -> dim3
192  {
193  using ConstantPointer = const void CK_TILE_CONSTANT_ADDRESS_SPACE*;
194  const auto kernel = kentry<1, Kernel, ConstantPointer, index_t>;
195  int occupancy;
197  hipOccupancyMaxActiveBlocksPerMultiprocessor(&occupancy, kernel, kBlockSize, 0));
198  const int grid_size = get_available_compute_units(s) * occupancy;
199  return dim3(grid_size, 1, 1);
200  }
201 
202  CK_TILE_HOST static auto
203  GridSize(const std::vector<GroupedGemmHostArgs<NumDTensor_>>& gemm_descs)
204  {
205  index_t grid_size = 0;
206  for(const auto& it_desc : gemm_descs)
207  {
208  const auto local_grid_size = TilePartitioner::GridSize(it_desc.M, it_desc.N);
209  grid_size += local_grid_size * it_desc.k_batch;
210  }
211  return dim3(grid_size, 1, 1);
212  }
213 
214  CK_TILE_HOST static auto
215  MakeKargs(const std::vector<GroupedGemmHostArgs<NumDTensor_>>& gemm_descs)
216  -> std::vector<GemmTransKernelArg<NumDTensor_>>
217  {
218  std::vector<GemmTransKernelArg<NumDTensor_>> gemm_kernel_args_;
219  index_t group_count = ck_tile::type_convert<ck_tile::index_t>(gemm_descs.size());
220  index_t grid_size = 0;
221  gemm_kernel_args_.reserve(group_count);
222 
223  for(std::size_t i = 0; i < gemm_descs.size(); ++i)
224  {
225  const index_t M = gemm_descs[i].M;
226  const index_t N = gemm_descs[i].N;
227  const index_t K = gemm_descs[i].K;
228 
229  if(M == 0 || N == 0 || K == 0)
230  {
231  continue;
232  }
233 
234  const index_t stride_a = gemm_descs[i].stride_A;
235  const index_t stride_b = gemm_descs[i].stride_B;
236  const index_t stride_e = gemm_descs[i].stride_E;
237  auto stride_ds = gemm_descs[i].stride_Ds;
238 
239  const index_t grid_size_grp = TilePartitioner::GridSize(M, N) * gemm_descs[i].k_batch;
240 
241  const index_t block_start = grid_size;
242  const index_t block_end = grid_size + grid_size_grp;
243 
244  grid_size += grid_size_grp;
245 
247  {type_convert<const ADataType*>(gemm_descs[i].a_ptr)},
248  {type_convert<const BDataType*>(gemm_descs[i].b_ptr)},
249  {gemm_descs[i].ds_ptr},
250  type_convert<CDataType*>(gemm_descs[i].e_ptr),
251  M,
252  N,
253  K,
254  {stride_a},
255  {stride_b},
256  stride_ds,
257  stride_e,
258  gemm_descs[i].k_batch};
259 
260  gemm_kernel_args_.emplace_back(std::move(karg), block_start, block_end);
261  }
262 
263  return gemm_kernel_args_;
264  }
265 
266  CK_TILE_HOST static bool
268  {
269  for(const auto& karg : kargs)
270  {
271  if(!Base::IsSupportedArgument(karg.group_karg))
272  {
273  return false;
274  }
275  }
276  return true;
277  }
278 
279  CK_TILE_HOST_DEVICE static constexpr auto GetSmemSize() -> index_t
280  {
281  return max(GemmPipeline::GetSmemSize(), EpiloguePipeline::GetSmemSize());
282  }
283 
285  const tuple<index_t, index_t>& block_idx_2d,
286  const index_t block_idx_z) const
287  {
288 
289  static_assert(GemmPipeline::DoubleSmemBuffer || !GemmPipeline::Preshuffle,
290  "SingleSmemBuffer and Preshuffle cannot both be enabled simultaneously!");
291 
292  const auto [iM, iN] = block_idx_2d;
293 
294  const index_t i_m = amd_wave_read_first_lane(iM * TilePartitioner::MPerBlock);
295  const index_t i_n = amd_wave_read_first_lane(iN * TilePartitioner::NPerBlock);
296 
297  const typename Base::SplitKBatchOffset splitk_batch_offset(kargs, block_idx_z);
298 
299  const ADataType* a_ptr = static_cast<const ADataType*>(kargs.as_ptr[0]) +
300  splitk_batch_offset.as_k_split_offset[0];
301  const BDataType* b_ptr = static_cast<const BDataType*>(kargs.bs_ptr[0]) +
302  splitk_batch_offset.bs_k_split_offset[0];
303  CDataType* c_ptr = static_cast<CDataType*>(kargs.e_ptr);
304 
305  // allocate LDS
306  __shared__ char smem_ptr[GetSmemSize()];
307 
308  // TO DO:
309  // Can we simplify this branching logic?
310  if constexpr(GemmPipeline::DoubleSmemBuffer == true)
311  {
312 
314  a_ptr, b_ptr, c_ptr, kargs.ds_ptr, smem_ptr, kargs, splitk_batch_offset, i_m, i_n);
315  }
316  else // SingleSmemBuffer
317  {
318 
319  if constexpr(UsePersistentKernel)
320  {
322  b_ptr,
323  kargs.ds_ptr,
324  c_ptr,
325  smem_ptr,
326  kargs,
327  splitk_batch_offset,
328  i_m,
329  i_n);
330  }
331  else // Non-persistent kernel
332  {
333  Base::RunGemm({a_ptr},
334  {b_ptr},
335  kargs.ds_ptr,
336  c_ptr,
337  smem_ptr,
338  kargs,
339  splitk_batch_offset,
340  i_m,
341  i_n);
342  }
343  }
344  }
345 
365  CK_TILE_DEVICE static void
367  const BDataType* b_ptr,
368  const std::array<const void*, NumDTensor_>& ds_ptr,
369  CDataType* c_ptr,
370  void* smem_ptr_0,
372  const typename Base::SplitKBatchOffset& splitk_batch_offset,
373  const index_t block_idx_m,
374  const index_t block_idx_n)
375  {
376  // Create block windows using specialized methods
377  const auto& a_block_window =
378  Base::MakeABlockWindows({a_ptr}, kargs, splitk_batch_offset.splitted_k, block_idx_m)
379  .at(Base::I0);
380  const auto& b_block_window =
381  Base::MakeBBlockWindows({b_ptr}, kargs, splitk_batch_offset.splitted_k, block_idx_n)
382  .at(Base::I0);
383  const auto& d_block_window =
384  Base::MakeDBlockWindows(ds_ptr, kargs, block_idx_m, block_idx_n);
385 
386  const index_t num_loop =
387  amd_wave_read_first_lane(TilePartitioner::GetLoopNum(splitk_batch_offset.splitted_k));
388 
389  // Run GEMM cooperatively by whole workgroup.
390  const auto& c_block_tile = GemmPipeline{}.template operator()(
391  a_block_window, b_block_window, num_loop, smem_ptr_0);
392 
393  // Run Epilogue Pipeline
394  if(kargs.k_batch == 1)
395  {
396  auto c_block_window = Base::template MakeCBlockWindows<memory_operation_enum::set>(
397  c_ptr, kargs, block_idx_m, block_idx_n);
398 
399  EpiloguePipeline{}(c_block_window, c_block_tile, d_block_window, smem_ptr_0);
400  }
401  else
402  {
403  auto c_block_window =
404  Base::template MakeCBlockWindows<memory_operation_enum::atomic_add>(
405  c_ptr, kargs, block_idx_m, block_idx_n);
406 
407  EpiloguePipeline{}(c_block_window, c_block_tile, d_block_window, smem_ptr_0);
408  }
409  }
410 
426  CK_TILE_DEVICE static void
428  const BDataType* b_ptr,
429  CDataType* c_ptr,
430  const std::array<const void*, NumDTensor_>& ds_ptr,
431  void* __restrict__ smem_ptr,
433  const typename Base::SplitKBatchOffset& splitk_batch_offset,
434  const index_t block_idx_m,
435  const index_t block_idx_n)
436  {
437  // Create block windows using specialized methods
438  const auto& a_block_window =
439  Base::MakeABlockWindows({a_ptr}, kargs, splitk_batch_offset.splitted_k, block_idx_m)
440  .at(Base::I0);
441  const auto& b_block_window =
442  Base::MakeBBlockWindows({b_ptr}, kargs, splitk_batch_offset.splitted_k, block_idx_n)
443  .at(Base::I0);
444  const auto& d_block_window =
445  Base::MakeDBlockWindows(ds_ptr, kargs, block_idx_m, block_idx_n);
446 
447  const index_t num_loop =
448  amd_wave_read_first_lane(TilePartitioner::GetLoopNum(splitk_batch_offset.splitted_k));
449 
450  // Run GEMM cooperatively by whole workgroup.
451  const auto& c_block_tile =
452  GemmPipeline{}.template operator()(a_block_window, b_block_window, num_loop, smem_ptr);
453 
454  // Run Epilogue Pipeline
455  if(kargs.k_batch == 1)
456  {
457  auto c_block_window = Base::template MakeCBlockWindows<memory_operation_enum::set>(
458  c_ptr, kargs, block_idx_m, block_idx_n);
459 
460  EpiloguePipeline{}(c_block_window, c_block_tile, d_block_window, smem_ptr);
461  }
462  else
463  {
464  auto c_block_window =
465  Base::template MakeCBlockWindows<memory_operation_enum::atomic_add>(
466  c_ptr, kargs, block_idx_m, block_idx_n);
467 
468  EpiloguePipeline{}(c_block_window, c_block_tile, d_block_window, smem_ptr);
469  }
470  }
471 
473  index_t block_id,
474  index_t group_count) const
475  {
476  index_t left = 0;
477  index_t right = group_count;
478  index_t group_id = index_t((left + right) >> 1);
479 
480  while((!(block_id >= gemm_desc_ptr[group_id].block_start &&
481  block_id < gemm_desc_ptr[group_id].block_end)) &&
482  left <= right)
483  {
484  if(block_id < gemm_desc_ptr[group_id].block_start)
485  {
486  right = group_id;
487  }
488  else
489  {
490  left = group_id;
491  }
492  group_id = index_t((left + right) >> 1);
493  }
494 
495  return group_id;
496  }
497 
498  // For non-persistent kernels
499  template <bool U = UsePersistentKernel, typename = std::enable_if_t<!U>>
500  CK_TILE_DEVICE void operator()(const void CK_TILE_CONSTANT_ADDRESS_SPACE* gemm_descs_const,
501  index_t group_count) const
502  {
503  const index_t block_id = ck_tile::get_block_1d_id();
504  const auto gemm_desc_ptr = reinterpret_cast<const GemmTransKernelArg<NumDTensor_>*>(
505  cast_pointer_to_generic_address_space(gemm_descs_const));
506 
507  const index_t group_id = FindGroupId(gemm_desc_ptr, block_id, group_count);
508  const auto& kargs = gemm_desc_ptr[group_id];
509 
510  const auto grid_size_2d = TilePartitioner::GridSize(kargs.group_karg.M, kargs.group_karg.N);
511  const auto block_idx_2d = OffsetTile1DPartitioner::GetOffsetedTileIndex(
512  0,
513  kargs.group_karg.M,
514  kargs.group_karg.N,
515  (block_id - kargs.block_start) % grid_size_2d);
516  Run(kargs.group_karg, block_idx_2d, (block_id - kargs.block_start) / grid_size_2d);
517  }
518 
519  // For persistent kernels
520  template <bool U = UsePersistentKernel,
521  typename = std::enable_if_t<U>,
522  typename = void> // extra template parameter to avoid redefinition
523  CK_TILE_DEVICE void operator()(const void CK_TILE_CONSTANT_ADDRESS_SPACE* gemm_descs_const,
524  const index_t group_count) const
525  {
526  const index_t grid_size = ck_tile::get_grid_size();
527  const auto gemm_desc_ptr = reinterpret_cast<const GemmTransKernelArg<NumDTensor_>*>(
528  cast_pointer_to_generic_address_space(gemm_descs_const));
529  index_t block_id = ck_tile::get_block_1d_id(); // initial block_id
530  index_t cum_grid_size = 0;
531  for(index_t group_id = 0; group_id < group_count; ++group_id)
532  {
533  const auto& kargs = gemm_desc_ptr[group_id].group_karg;
534  const auto& k_batch = kargs.k_batch;
535  const auto block_start = cum_grid_size;
536  cum_grid_size += TilePartitioner::GridSize(kargs.M, kargs.N) * k_batch;
537  while(block_id < cum_grid_size)
538  {
539  const auto grid_size_2d = TilePartitioner::GridSize(kargs.M, kargs.N);
540  const auto block_idx_2d = OffsetTile1DPartitioner::GetOffsetedTileIndex(
541  0, kargs.M, kargs.N, (block_id - block_start) % grid_size_2d);
542  Run(kargs, block_idx_2d, (block_id - block_start) / grid_size_2d);
543  block_sync_lds();
544  block_id = block_id + grid_size; // advance to next block
545  // NOTE: this check is redundant but helps the compiler avoid spilling some VGPR
546  if(block_id >= cum_grid_size)
547  {
548  break; // exit the loop if all blocks are processed
549  }
550  }
551  }
552  }
553 };
554 
555 } // namespace ck_tile
#define CK_TILE_DEVICE
Definition: config.hpp:45
#define CK_TILE_HOST
Definition: config.hpp:44
#define CK_TILE_HOST_DEVICE
Definition: config.hpp:46
#define HIP_CHECK_ERROR(retval_or_funcall)
Definition: hip_check_error.hpp:23
Definition: cluster_descriptor.hpp:13
__device__ uint32_t amd_wave_read_first_lane(uint16_t v)
Definition: amd_buffer_addressing.hpp:36
int32_t index_t
Definition: integer.hpp:9
auto concat(const Ts &... xs) -> std::enable_if_t<!AllConvertibleToStringView< Ts... >, std::string >
Definition: concat.hpp:43
remove_cv_t< std::remove_reference_t< T > > remove_cvref_t
Definition: type_traits.hpp:21
typename detail::detector< nonesuch, void, Op, Args... >::value_t is_detected
Definition: type_traits.hpp:67
constexpr CK_TILE_HOST_DEVICE T max(T x)
Definition: math.hpp:157
__device__ index_t get_grid_size()
Definition: get_id.hpp:49
__device__ index_t get_block_1d_id()
Definition: get_id.hpp:47
__device__ T * cast_pointer_to_generic_address_space(T CK_CONSTANT_ADDRESS_SPACE *p)
Definition: amd_address_space.hpp:24
__device__ void block_sync_lds()
Definition: synchronization.hpp:16
Definition: grouped_gemm_kernel.hpp:83
GemmTransKernelArg(UniversalGemmKernelArgs< 1, 1, NumDTensor > &&karg, index_t bl_start, index_t bl_end)
Definition: grouped_gemm_kernel.hpp:89
UniversalGemmKernelArgs< 1, 1, NumDTensor > group_karg
Definition: grouped_gemm_kernel.hpp:84
GemmTransKernelArg(UniversalGemmKernelArgs< 1, 1, NumDTensor > &&karg)
Definition: grouped_gemm_kernel.hpp:96
ck_tile::index_t block_start
Definition: grouped_gemm_kernel.hpp:85
ck_tile::index_t block_end
Definition: grouped_gemm_kernel.hpp:86
The Grouped GEMM kernel host arguments.
Definition: grouped_gemm_kernel.hpp:29
void * e_ptr
Definition: grouped_gemm_kernel.hpp:62
index_t stride_E
Definition: grouped_gemm_kernel.hpp:74
CK_TILE_HOST GroupedGemmHostArgs(const void *a_ptr_, const void *b_ptr_, const std::array< const void *, NumDTensor > &ds_ptr_, void *e_ptr_, index_t k_batch_, index_t M_, index_t N_, index_t K_, index_t stride_A_, index_t stride_B_, const std::array< index_t, NumDTensor > &stride_Ds_, index_t stride_E_)
Definition: grouped_gemm_kernel.hpp:30
index_t stride_C
Definition: grouped_gemm_kernel.hpp:75
index_t k_batch
Definition: grouped_gemm_kernel.hpp:78
index_t stride_A
Definition: grouped_gemm_kernel.hpp:69
index_t M
Definition: grouped_gemm_kernel.hpp:66
void * c_ptr
Definition: grouped_gemm_kernel.hpp:63
index_t stride_B
Definition: grouped_gemm_kernel.hpp:70
const void * b_ptr
Definition: grouped_gemm_kernel.hpp:58
const void * a_ptr
Definition: grouped_gemm_kernel.hpp:57
index_t N
Definition: grouped_gemm_kernel.hpp:67
index_t K
Definition: grouped_gemm_kernel.hpp:68
const std::array< const void *, NumDTensor > ds_ptr
Definition: grouped_gemm_kernel.hpp:59
const std::array< index_t, NumDTensor > stride_Ds
Definition: grouped_gemm_kernel.hpp:71
Definition: grouped_gemm_kernel.hpp:104
remove_cvref_t< TilePartitioner_ > TilePartitioner
Definition: grouped_gemm_kernel.hpp:109
static constexpr index_t NumDTensor_
Definition: grouped_gemm_kernel.hpp:124
static CK_TILE_HOST auto GetWorkSpaceSize(index_t group_count) -> std::size_t
Definition: grouped_gemm_kernel.hpp:168
static CK_TILE_HOST bool IsSupportedArgument(const std::vector< GemmTransKernelArg< NumDTensor_ >> &kargs)
Definition: grouped_gemm_kernel.hpp:267
static CK_TILE_DEVICE void RunGemmWithPipelineSelection(const ADataType *a_ptr, const BDataType *b_ptr, const std::array< const void *, NumDTensor_ > &ds_ptr, CDataType *c_ptr, void *smem_ptr_0, const UniversalGemmKernelArgs< 1, 1, NumDTensor_ > &kargs, const typename Base::SplitKBatchOffset &splitk_batch_offset, const index_t block_idx_m, const index_t block_idx_n)
Runs single GEMM problem cooperatively by whole workgroup.
Definition: grouped_gemm_kernel.hpp:366
remove_cvref_t< typename GemmPipeline::ALayout > ALayout
Definition: grouped_gemm_kernel.hpp:114
CK_TILE_DEVICE void Run(const UniversalGemmKernelArgs< 1, 1, NumDTensor_ > &kargs, const tuple< index_t, index_t > &block_idx_2d, const index_t block_idx_z) const
Definition: grouped_gemm_kernel.hpp:284
remove_cvref_t< typename GemmPipeline::BLayout > BLayout
Definition: grouped_gemm_kernel.hpp:115
CK_TILE_DEVICE void operator()(const void CK_TILE_CONSTANT_ADDRESS_SPACE *gemm_descs_const, index_t group_count) const
Definition: grouped_gemm_kernel.hpp:500
remove_cvref_t< typename GemmPipeline::BDataType > BDataType
Definition: grouped_gemm_kernel.hpp:120
static constexpr index_t kBlockSize
Definition: grouped_gemm_kernel.hpp:144
static constexpr CK_TILE_HOST_DEVICE auto GetSmemSize() -> index_t
Definition: grouped_gemm_kernel.hpp:279
static CK_TILE_HOST auto GridSize(const std::vector< GroupedGemmHostArgs< NumDTensor_ >> &gemm_descs)
Definition: grouped_gemm_kernel.hpp:203
remove_cvref_t< typename GemmPipeline::CLayout > CLayout
Definition: grouped_gemm_kernel.hpp:116
remove_cvref_t< EpiloguePipeline_ > EpiloguePipeline
Definition: grouped_gemm_kernel.hpp:111
CK_TILE_DEVICE void operator()(const void CK_TILE_CONSTANT_ADDRESS_SPACE *gemm_descs_const, const index_t group_count) const
Definition: grouped_gemm_kernel.hpp:523
CK_TILE_DEVICE index_t FindGroupId(const GemmTransKernelArg< NumDTensor_ > *gemm_desc_ptr, index_t block_id, index_t group_count) const
Definition: grouped_gemm_kernel.hpp:472
static CK_TILE_HOST auto BlockSize() -> dim3
Definition: grouped_gemm_kernel.hpp:173
remove_cvref_t< typename EpiloguePipeline::ODataType > CDataType
Definition: grouped_gemm_kernel.hpp:121
static CK_TILE_HOST auto MaxOccupancyGridSize(const stream_config &s) -> dim3
Get the maximum occupancy grid size for the persistent kernel on the current device.
Definition: grouped_gemm_kernel.hpp:191
static CK_TILE_HOST const std::string GetName()
Definition: grouped_gemm_kernel.hpp:147
remove_cvref_t< typename EpiloguePipeline::DsDataType > DsDataType
Definition: grouped_gemm_kernel.hpp:122
remove_cvref_t< GemmPipeline_ > GemmPipeline
Definition: grouped_gemm_kernel.hpp:110
static CK_TILE_HOST auto MakeKargs(const std::vector< GroupedGemmHostArgs< NumDTensor_ >> &gemm_descs) -> std::vector< GemmTransKernelArg< NumDTensor_ >>
Definition: grouped_gemm_kernel.hpp:215
remove_cvref_t< typename GemmPipeline::ADataType > ADataType
Specify the data type configurations for A, B, C/E.
Definition: grouped_gemm_kernel.hpp:119
static CK_TILE_HOST auto GetWorkSpaceSize(const std::vector< GroupedGemmHostArgs<>> &gemm_descs) -> std::size_t
Definition: grouped_gemm_kernel.hpp:163
static CK_TILE_DEVICE void RunGemmWithPipelineSelection2LDS(const ADataType *a_ptr, const BDataType *b_ptr, CDataType *c_ptr, const std::array< const void *, NumDTensor_ > &ds_ptr, void *__restrict__ smem_ptr, const UniversalGemmKernelArgs< 1, 1, NumDTensor_ > &kargs, const typename Base::SplitKBatchOffset &splitk_batch_offset, const index_t block_idx_m, const index_t block_idx_n)
Runs single GEMM problem cooperatively by whole workgroup.
Definition: grouped_gemm_kernel.hpp:427
static constexpr bool UsePersistentKernel
Definition: grouped_gemm_kernel.hpp:145
Struct used to calculate offseted tile indexes.
Definition: gemm_tile_partitioner.hpp:184
static CK_TILE_DEVICE auto GetOffsetedTileIndex(index_t block_start, index_t M, index_t N) noexcept -> const tuple< index_t, index_t >
The function subtracts the block's start (offset) from 1D raw-indexes.
Definition: gemm_tile_partitioner.hpp:192
Definition: universal_gemm_kernel.hpp:327
std::array< index_t, NumATensor > as_k_split_offset
Definition: universal_gemm_kernel.hpp:395
index_t splitted_k
Definition: universal_gemm_kernel.hpp:397
std::array< index_t, NumBTensor > bs_k_split_offset
Definition: universal_gemm_kernel.hpp:396
The GEMM kernel device arguments.
Definition: universal_gemm_kernel.hpp:86
void * e_ptr
The E output tensor's pointer to device memory.
Definition: universal_gemm_kernel.hpp:94
const std::array< const void *, NumDTensor > ds_ptr
The Ds input tensor's pointer to device memory.
Definition: universal_gemm_kernel.hpp:92
const std::array< const void *, NumATensor > as_ptr
The As input tensor's pointer to device memory.
Definition: universal_gemm_kernel.hpp:88
index_t k_batch
Definition: universal_gemm_kernel.hpp:113
const std::array< const void *, NumBTensor > bs_ptr
The Bs input tensor's pointer to device memory.
Definition: universal_gemm_kernel.hpp:90
The Universal GEMM kernel template.
Definition: universal_gemm_kernel.hpp:154
static CK_TILE_DEVICE auto MakeBBlockWindows(const std::array< const BDataType *, NumBTensor > &bs_ptr, const KernelArgs &kargs, const index_t k_size, const index_t i_n)
Definition: universal_gemm_kernel.hpp:742
static CK_TILE_DEVICE auto MakeDBlockWindows(const std::array< const void *, NumDTensor > &ds_ptr, const KernelArgs &kargs, const index_t i_m, const index_t i_n)
Definition: universal_gemm_kernel.hpp:893
static constexpr auto I0
Definition: universal_gemm_kernel.hpp:236
static CK_TILE_HOST bool IsSupportedArgument(const KernelArgs &kargs)
Definition: universal_gemm_kernel.hpp:400
static CK_TILE_DEVICE auto MakeABlockWindows(const std::array< const ADataType *, NumATensor > &as_ptr, const KernelArgs &kargs, const index_t k_size, const index_t i_m)
Definition: universal_gemm_kernel.hpp:665
static CK_TILE_DEVICE void RunGemm(const std::array< const ADataType *, NumATensor > &as_ptr, const std::array< const BDataType *, NumBTensor > &bs_ptr, const std::array< const void *, NumDTensor > &ds_ptr, EDataType *e_ptr, void *smem_ptr, const KernelArgs &kargs, const SplitKBatchOffset &splitk_batch_offset, const index_t block_idx_m, const index_t block_idx_n)
Runs single GEMM problem cooperatively by whole workgroup.
Definition: universal_gemm_kernel.hpp:1039
Definition: stream_config.hpp:30
Definition: tuple.hpp:192