include/ck/tensor_operation/gpu/block/blockwise_gemm_dpp.hpp Source File

include/ck/tensor_operation/gpu/block/blockwise_gemm_dpp.hpp Source File#

Composable Kernel: include/ck/tensor_operation/gpu/block/blockwise_gemm_dpp.hpp Source File
blockwise_gemm_dpp.hpp
Go to the documentation of this file.
1 // SPDX-License-Identifier: MIT
2 // Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
3 
4 #pragma once
5 
10 
11 namespace ck {
12 
22 template <index_t BlockSize,
23  typename ABDataType,
24  typename AccDataType,
25  typename AK0MK1BlockDesc,
26  typename BK0NK1BlockDesc,
27  index_t MPerDpp,
28  index_t NPerDpp,
29  index_t MRepeat,
30  index_t NRepeat,
31  index_t KPack>
33 {
34  static constexpr auto I0 = Number<0>{};
35  static constexpr auto I1 = Number<1>{};
36  static constexpr auto I2 = Number<2>{};
37  static constexpr auto I3 = Number<3>{};
38 
40 
41  static constexpr index_t WaveSize = get_warp_size();
42 
43  static constexpr index_t MPerBlock = AK0MK1BlockDesc{}.GetLength(I1);
44  static constexpr index_t NPerBlock = BK0NK1BlockDesc{}.GetLength(I1);
45  static constexpr index_t KPerBlock =
46  BK0NK1BlockDesc{}.GetLength(I0) * BK0NK1BlockDesc{}.GetLength(I2);
47 
48  static constexpr index_t A_K0 = AK0MK1BlockDesc{}.GetLength(I0);
49  static constexpr index_t B_K0 = BK0NK1BlockDesc{}.GetLength(I0);
50  static constexpr index_t A_K1 = AK0MK1BlockDesc{}.GetLength(I2);
51  static constexpr index_t B_K1 = BK0NK1BlockDesc{}.GetLength(I2);
52 
54 
55  static constexpr index_t KPerThread = KPerBlock / dpp_gemm.K0PerDpp;
56 
57  static constexpr index_t MWaves = MPerBlock / (MRepeat * MPerDpp);
58  static constexpr index_t NWaves = NPerBlock / (NRepeat * NPerDpp);
59 
61  AccDataType,
62  MRepeat * NRepeat,
63  dpp_gemm.GetRegSizePerDpp(),
64  true>
66 
67  __host__ __device__ constexpr auto& GetCThreadBuffer() { return c_thread_buf_; }
68 
69  __device__ static auto GetWaveIdx()
70  {
71  const index_t thread_id = ThisThreadBlock::GetThreadId();
72 
73  constexpr auto threadid_to_wave_idx_adaptor = make_single_stage_tensor_adaptor(
77 
78  return threadid_to_wave_idx_adaptor.CalculateBottomIndex(make_multi_index(thread_id));
79  }
80 
82  {
83  const auto wave_idx = GetWaveIdx();
84  const auto waveId_m = wave_idx[I0];
85  const auto dpp_a_idx = dpp_gemm.CalculateAThreadOriginDataIndex_K_M();
86  const auto dpp_a_idx_k = dpp_a_idx[I0];
87  const auto dpp_a_idx_m = dpp_a_idx[I1];
88  return make_tuple(0, waveId_m, dpp_a_idx_m, KPerThread * dpp_a_idx_k);
89  }
90 
92  {
93  const auto wave_idx = GetWaveIdx();
94  const auto waveId_n = wave_idx[I1];
95  const auto dpp_b_idx = dpp_gemm.CalculateBThreadOriginDataIndex_K_N();
96  const auto dpp_b_idx_k = dpp_b_idx[I0];
97  const auto dpp_b_idx_n = dpp_b_idx[I1];
98  return make_tuple(0, waveId_n, dpp_b_idx_n, KPerThread * dpp_b_idx_k);
99  }
100 
101  template <index_t m0, index_t n0>
103  {
104  const auto wave_idx = GetWaveIdx();
105  const auto waveId_m = wave_idx[I0];
106  const auto waveId_n = wave_idx[I1];
107 
108  const auto blk_idx = dpp_gemm.GetBeginOfThreadBlk();
109  const auto blk_m_offset = blk_idx[I0];
110  const auto blk_n_offset = blk_idx[I1];
111 
112  constexpr auto mrepeat_mwave_MPerDpp_to_m_adaptor = make_single_stage_tensor_adaptor(
113  make_tuple(make_unmerge_transform(make_tuple(MRepeat, MWaves, MPerDpp))),
116 
117  constexpr auto nrepeat_nwave_NPerDpp_to_n_adaptor = make_single_stage_tensor_adaptor(
118  make_tuple(make_unmerge_transform(make_tuple(NRepeat, NWaves, NPerDpp))),
121 
122  const index_t c_thread_m = mrepeat_mwave_MPerDpp_to_m_adaptor.CalculateBottomIndex(
123  make_tuple(m0, waveId_m, blk_m_offset))[I0];
124  const index_t c_thread_n = nrepeat_nwave_NPerDpp_to_n_adaptor.CalculateBottomIndex(
125  make_tuple(n0, waveId_n, blk_n_offset))[I0];
126 
127  return make_tuple(c_thread_m, c_thread_n);
128  }
129 
131  {
132  static_assert(AK0MK1BlockDesc::IsKnownAtCompileTime() &&
133  BK0NK1BlockDesc::IsKnownAtCompileTime(),
134  "Wrong! Block descriptors should be known at the time of compilation.");
135 
136 #if defined(__HIP_DEVICE_COMPILE__)
137  // Host wave size can be different than the device one and this assert could fail for host,
138  // but it does matter only for device.
139  static_assert(ThisThreadBlock::GetNumOfThread() == MWaves * NWaves * WaveSize,
140  "ThisThreadBlock::GetNumOfThread() != MWaves * NWaves * WaveSize\n");
141 #endif
142 
143  static_assert(MPerBlock % (MPerDpp * MRepeat) == 0,
144  "Invalid parameters. MPerBlock must be divisible by MPerDpp * MRepeat.");
145  static_assert(NPerBlock % (NPerDpp * NRepeat) == 0,
146  "Invalid parameters. NPerBlock must be divisible by NPerDpp * NRepeat.");
147  }
148 
149  __host__ __device__ static constexpr auto GetCThreadDescriptor_M0_N0_M1_N1_M2_N2()
150  {
151  constexpr auto c_m_n_tblk_lens = dpp_gemm.GetCMNThreadBlkLengths();
152  constexpr auto M = c_m_n_tblk_lens[I0];
153  constexpr auto N = c_m_n_tblk_lens[I1];
154 
157  }
158 
159  __host__ __device__ static constexpr auto GetCThreadDescriptor_G_M0_N0_M1_N1_M2_N2()
160  {
161  constexpr auto c_m_n_tblk_lens = dpp_gemm.GetCMNThreadBlkLengths();
162  constexpr auto M = c_m_n_tblk_lens[I0];
163  constexpr auto N = c_m_n_tblk_lens[I1];
164 
167  }
168 
169  __host__ __device__ static constexpr auto GetCBlockDescriptor_M0_N0_M1_N1_M2_N2()
170  {
171  constexpr auto c_block_desc_m0_n0_m1_n1_m2_n2 =
173  Number<NRepeat>{},
174  Number<MWaves>{},
175  Number<NWaves>{},
176  Number<MPerDpp>{},
177  Number<NPerDpp>{}));
178 
179  return c_block_desc_m0_n0_m1_n1_m2_n2;
180  }
181 
182  __host__ __device__ static constexpr auto GetCBlockDescriptor_G_M0_N0_M1_N1_M2_N2()
183  {
184  constexpr auto c_block_desc_g_m0_n0_m1_n1_m2_n2 =
186  Number<MRepeat>{},
187  Number<NRepeat>{},
188  Number<MWaves>{},
189  Number<NWaves>{},
190  Number<MPerDpp>{},
191  Number<NPerDpp>{}));
192  return c_block_desc_g_m0_n0_m1_n1_m2_n2;
193  }
194 
195  template <typename CGridDesc_M_N>
196  __host__ __device__ static constexpr auto
197  MakeCGridDescriptor_M0_N0_M1_N1_M2_N2(const CGridDesc_M_N& c_grid_desc_m_n)
198  {
199  const auto M = c_grid_desc_m_n.GetLength(I0);
200  const auto N = c_grid_desc_m_n.GetLength(I1);
201 
202  const auto c_grid_desc_m0_n0_m1_n1_m2_n2 = transform_tensor_descriptor(
203  c_grid_desc_m_n,
204  make_tuple(make_unmerge_transform(make_tuple(M / (MWaves * MPerDpp), MWaves, MPerDpp)),
205  make_unmerge_transform(make_tuple(N / (NWaves * NPerDpp), NWaves, NPerDpp))),
208 
209  return c_grid_desc_m0_n0_m1_n1_m2_n2;
210  }
211 
212  template <typename CGridDesc_G_M_N>
213  __host__ __device__ static constexpr auto
214  MakeCGridDescriptor_G_M0_N0_M1_N1_M2_N2(const CGridDesc_G_M_N& c_grid_desc_g_m_n)
215  {
216  const auto G = c_grid_desc_g_m_n.GetLength(I0);
217  const auto M = c_grid_desc_g_m_n.GetLength(I1);
218  const auto N = c_grid_desc_g_m_n.GetLength(I2);
219 
220  const auto c_grid_desc_g_m0_n0_m1_n1_m2_n2 = transform_tensor_descriptor(
221  c_grid_desc_g_m_n,
223  make_unmerge_transform(make_tuple(M / (MWaves * MPerDpp), MWaves, MPerDpp)),
224  make_unmerge_transform(make_tuple(N / (NWaves * NPerDpp), NWaves, NPerDpp))),
227 
228  return c_grid_desc_g_m0_n0_m1_n1_m2_n2;
229  }
230 
231  __host__ __device__ static constexpr auto MakeABlockDescriptor_M0_M1_M2_K()
232  {
234  AK0MK1BlockDesc{},
235  make_tuple(
241  }
242 
243  __host__ __device__ static constexpr auto MakeBBlockDescriptor_N0_N1_N2_K()
244  {
246  BK0NK1BlockDesc{},
247  make_tuple(
253  }
254 
257 
258  template <typename ABlockBuffer, typename BBlockBuffer, typename CThreadBuffer>
259  __device__ void Run(const ABlockBuffer& a_block_buf,
260  const BBlockBuffer& b_block_buf,
261  CThreadBuffer& c_thread_buf) const
262  {
263  auto a_thread_buf = make_static_buffer<AddressSpaceEnum::Vgpr, ABDataType>(
264  a_thread_desc_.GetElementSpaceSize());
265  auto b_thread_buf = make_static_buffer<AddressSpaceEnum::Vgpr, ABDataType>(
266  b_thread_desc_.GetElementSpaceSize());
267 
268  static_for<0, MRepeat, 1>{}([&](auto m0) {
269  // read A
271  make_tuple(m0, I0, I0, I0),
272  a_block_buf,
274  make_tuple(I0, I0, I0, I0),
275  a_thread_buf);
276 
277  static_for<0, NRepeat, 1>{}([&](auto n0) {
278  // read B
280  make_tuple(n0, I0, I0, I0),
281  b_block_buf,
283  make_tuple(I0, I0, I0, I0),
284  b_thread_buf);
285 
286  static_for<0, KPerThread, KPack>{}([&](auto k) {
287  vector_type<ABDataType, KPack> a_thread_vec;
288  vector_type<ABDataType, KPack> b_thread_vec;
289 
290  static_for<0, KPack, 1>{}([&](auto i) {
291  a_thread_vec.template AsType<ABDataType>()(i) = a_thread_buf
292  [Number<a_thread_desc_.CalculateOffset(make_tuple(0, 0, 0, k + i))>{}];
293  b_thread_vec.template AsType<ABDataType>()(i) = b_thread_buf
294  [Number<b_thread_desc_.CalculateOffset(make_tuple(0, 0, 0, k + i))>{}];
295  });
296 
297  using dpp_input_type =
298  typename vector_type<ABDataType, dpp_gemm.K1PerDpp>::type;
299 
300  constexpr index_t c_offset =
301  c_thread_desc_.CalculateOffset(make_tuple(m0, n0, 0));
302 
303  dpp_gemm.Run(a_thread_vec.template AsType<dpp_input_type>(),
304  b_thread_vec.template AsType<dpp_input_type>(),
305  c_thread_buf.GetVectorTypeReference(Number<c_offset>{}));
306  });
307  });
308  });
309  }
310 
311  protected:
312  // A[M0, M1, M2, KPerThread]
313  static constexpr auto a_thread_desc_ =
315 
316  // B[N0, N1, N2, KPerThread]
317  static constexpr auto b_thread_desc_ =
319 
320  // C[M, N, NumRegDpp]
322  make_tuple(Number<MRepeat>{}, Number<NRepeat>{}, dpp_gemm.GetRegSizePerDpp()));
323 
325  ABDataType,
326  decltype(a_block_desc_m0_m1_m2_k),
327  decltype(a_thread_desc_),
330  3,
331  A_K1,
332  A_K1>;
333 
335  ABDataType,
336  decltype(b_block_desc_n0_n1_n2_k),
337  decltype(b_thread_desc_),
340  3,
341  B_K1,
342  B_K1>;
343 
346 };
347 
348 } // namespace ck
Definition: ck.hpp:264
__host__ constexpr __device__ auto make_multi_index(Xs &&... xs)
Definition: array_multi_index.hpp:15
__host__ constexpr __device__ index_t get_warp_size()
Definition: get_id.hpp:10
__host__ constexpr __device__ auto make_naive_tensor_descriptor_packed(const Tuple< Lengths... > &lengths)
Definition: tensor_descriptor_helper.hpp:101
__host__ constexpr __device__ auto make_merge_transform(const LowLengths &low_lengths)
Definition: multi_index_transform_helper.hpp:55
__host__ constexpr __device__ auto make_merge_transform_v3_division_mod(const LowLengths &low_lengths)
Definition: multi_index_transform_helper.hpp:84
__host__ constexpr __device__ auto make_single_stage_tensor_adaptor(const Transforms &transforms, LowerDimensionOldTopIdss, UpperDimensionNewTopIdss)
Definition: tensor_adaptor.hpp:429
__host__ constexpr __device__ auto make_pass_through_transform(const LowLength &low_length)
Definition: multi_index_transform_helper.hpp:12
__host__ constexpr __device__ auto make_tuple(Xs &&... xs)
Definition: tuple.hpp:211
__host__ constexpr __device__ auto make_unmerge_transform(const UpLengths &up_lengths, integral_constant< bool, Use24BitIntegerCalculation >=integral_constant< bool, false >{})
Definition: multi_index_transform_helper.hpp:90
int32_t index_t
Definition: ck.hpp:289
__host__ constexpr __device__ auto transform_tensor_descriptor(const OldTensorDescriptor &old_tensor_desc, const NewTransforms &new_transforms, NewLowerDimensionOldVisibleIdss, NewUpperDimensionNewVisibleIdss)
Definition: tensor_descriptor.hpp:319
Definition: blockwise_gemm_dpp.hpp:33
static constexpr index_t KPerBlock
Definition: blockwise_gemm_dpp.hpp:45
static constexpr index_t NWaves
Definition: blockwise_gemm_dpp.hpp:58
static constexpr index_t B_K1
Definition: blockwise_gemm_dpp.hpp:51
BThreadCopy b_thread_copy_
Definition: blockwise_gemm_dpp.hpp:345
static constexpr index_t A_K0
Definition: blockwise_gemm_dpp.hpp:48
static __device__ auto CalculateAThreadOriginDataIndex_M0_M1_M2_K()
Definition: blockwise_gemm_dpp.hpp:81
ThisThreadBlock< BlockSize > ThisThreadBlock
Definition: blockwise_gemm_dpp.hpp:39
static constexpr auto c_thread_desc_
Definition: blockwise_gemm_dpp.hpp:321
static constexpr auto I2
Definition: blockwise_gemm_dpp.hpp:36
__host__ static constexpr __device__ auto MakeBBlockDescriptor_N0_N1_N2_K()
Definition: blockwise_gemm_dpp.hpp:243
static constexpr auto I3
Definition: blockwise_gemm_dpp.hpp:37
static __device__ auto GetWaveIdx()
Definition: blockwise_gemm_dpp.hpp:69
__host__ static constexpr __device__ auto GetCThreadDescriptor_M0_N0_M1_N1_M2_N2()
Definition: blockwise_gemm_dpp.hpp:149
static constexpr auto I1
Definition: blockwise_gemm_dpp.hpp:35
static constexpr index_t MPerBlock
Definition: blockwise_gemm_dpp.hpp:43
__host__ static constexpr __device__ auto MakeCGridDescriptor_M0_N0_M1_N1_M2_N2(const CGridDesc_M_N &c_grid_desc_m_n)
Definition: blockwise_gemm_dpp.hpp:197
__host__ static constexpr __device__ auto MakeABlockDescriptor_M0_M1_M2_K()
Definition: blockwise_gemm_dpp.hpp:231
static __device__ auto CalculateCThreadOriginDataIndex(Number< m0 >, Number< n0 >)
Definition: blockwise_gemm_dpp.hpp:102
__host__ static constexpr __device__ auto MakeCGridDescriptor_G_M0_N0_M1_N1_M2_N2(const CGridDesc_G_M_N &c_grid_desc_g_m_n)
Definition: blockwise_gemm_dpp.hpp:214
static constexpr index_t WaveSize
Definition: blockwise_gemm_dpp.hpp:41
static constexpr auto I0
Definition: blockwise_gemm_dpp.hpp:34
static constexpr auto b_thread_desc_
Definition: blockwise_gemm_dpp.hpp:317
__host__ static constexpr __device__ auto GetCThreadDescriptor_G_M0_N0_M1_N1_M2_N2()
Definition: blockwise_gemm_dpp.hpp:159
static constexpr auto b_block_desc_n0_n1_n2_k
Definition: blockwise_gemm_dpp.hpp:256
static constexpr index_t NPerBlock
Definition: blockwise_gemm_dpp.hpp:44
__host__ static constexpr __device__ auto GetCBlockDescriptor_G_M0_N0_M1_N1_M2_N2()
Definition: blockwise_gemm_dpp.hpp:182
StaticBufferTupleOfVector< AddressSpaceEnum::Vgpr, AccDataType, MRepeat *NRepeat, dpp_gemm.GetRegSizePerDpp(), true > c_thread_buf_
Definition: blockwise_gemm_dpp.hpp:65
static constexpr index_t MWaves
Definition: blockwise_gemm_dpp.hpp:57
static constexpr index_t KPerThread
Definition: blockwise_gemm_dpp.hpp:55
static __device__ auto CalculateBThreadOriginDataIndex_N0_N1_N2_K()
Definition: blockwise_gemm_dpp.hpp:91
__host__ constexpr __device__ auto & GetCThreadBuffer()
Definition: blockwise_gemm_dpp.hpp:67
__device__ void Run(const ABlockBuffer &a_block_buf, const BBlockBuffer &b_block_buf, CThreadBuffer &c_thread_buf) const
Definition: blockwise_gemm_dpp.hpp:259
static constexpr auto a_block_desc_m0_m1_m2_k
Definition: blockwise_gemm_dpp.hpp:255
static constexpr auto dpp_gemm
Definition: blockwise_gemm_dpp.hpp:53
static constexpr auto a_thread_desc_
Definition: blockwise_gemm_dpp.hpp:313
__host__ static constexpr __device__ auto GetCBlockDescriptor_M0_N0_M1_N1_M2_N2()
Definition: blockwise_gemm_dpp.hpp:169
static constexpr index_t B_K0
Definition: blockwise_gemm_dpp.hpp:49
__host__ __device__ BlockwiseGemmDpp_ak0mak1_bk0nbk1_m0n0m1n1m2n2()
Definition: blockwise_gemm_dpp.hpp:130
static constexpr index_t A_K1
Definition: blockwise_gemm_dpp.hpp:50
AThreadCopy a_thread_copy_
Definition: blockwise_gemm_dpp.hpp:344
Definition: dpp_gemm.hpp:426
Definition: sequence.hpp:43
Definition: static_buffer.hpp:75
static __device__ index_t GetThreadId()
Definition: thread_group.hpp:19
static constexpr __device__ index_t GetNumOfThread()
Definition: thread_group.hpp:15
__device__ void Run(const SrcDesc &, const SrcRefToOriginDisplacement &, const SrcBuffer &src_buf, const DstDesc &, const DstOriginIdx &, DstBuffer &dst_buf) const
Definition: threadwise_tensor_slice_transfer.hpp:1036
Definition: integral_constant.hpp:10
Definition: functional2.hpp:31
Definition: data_type.hpp:347