21 template <
typename GridwiseGemm,
24 typename AGridDesc_AK0_M_AK1,
25 typename BGridDesc_BK0_N_BK1,
26 typename CGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl,
27 typename AElementwiseOperation,
28 typename BElementwiseOperation,
29 typename CElementwiseOperation,
30 typename Block2CTileMap,
31 bool HasMainK0BlockLoop>
33 #if CK_USE_LAUNCH_BOUNDS
37 const FloatAB* __restrict__ p_a_grid,
38 const FloatAB* __restrict__ p_b_grid,
39 FloatC* __restrict__ p_c_grid,
40 const AGridDesc_AK0_M_AK1 a_grid_desc_ak0_m_ak1,
41 const BGridDesc_BK0_N_BK1 b_grid_desc_bk0_n_bk1,
42 const CGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl
43 c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl,
44 const AElementwiseOperation a_element_op,
45 const BElementwiseOperation b_element_op,
46 const CElementwiseOperation c_element_op,
47 const Block2CTileMap block_2_ctile_map)
49 #if defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx94__) || defined(__gfx11__) || \
51 if constexpr(GridwiseGemm::template IsValidCompilationParameter<>())
53 __shared__
char p_shared[GridwiseGemm::GetSharedMemoryNumberOfByte()];
55 GridwiseGemm::template Run<HasMainK0BlockLoop>(
60 a_grid_desc_ak0_m_ak1,
61 b_grid_desc_bk0_n_bk1,
62 c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl,
72 ignore = a_grid_desc_ak0_m_ak1;
73 ignore = b_grid_desc_bk0_n_bk1;
74 ignore = c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl;
78 ignore = block_2_ctile_map;
86 typename FloatCShuffle,
89 typename AGridDesc_AK0_M_AK1,
90 typename BGridDesc_BK0_N_BK1,
91 typename CGridDesc_M_N,
92 typename AElementwiseOperation,
93 typename BElementwiseOperation,
94 typename CElementwiseOperation,
104 typename ABlockTransferThreadClusterLengths_AK0_M_AK1,
105 typename ABlockTransferThreadClusterArrangeOrder,
106 typename ABlockTransferSrcAccessOrder,
107 index_t ABlockTransferSrcVectorDim,
108 index_t ABlockTransferSrcScalarPerVector,
109 index_t ABlockTransferDstScalarPerVector_K1,
110 bool AThreadTransferSrcResetCoordinateAfterRun,
111 bool ABlockLdsExtraM,
112 typename BBlockTransferThreadClusterLengths_BK0_N_BK1,
113 typename BBlockTransferThreadClusterArrangeOrder,
114 typename BBlockTransferSrcAccessOrder,
115 index_t BBlockTransferSrcVectorDim,
116 index_t BBlockTransferSrcScalarPerVector,
117 index_t BBlockTransferDstScalarPerVector_K1,
118 bool BThreadTransferSrcResetCoordinateAfterRun,
119 bool BBlockLdsExtraN,
120 index_t CShuffleMXdlPerWavePerShuffle,
121 index_t CShuffleNXdlPerWavePerShuffle,
122 typename CBlockTransferClusterLengths_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl,
123 index_t CBlockTransferScalarPerVector_NWaveNPerXdl,
124 index_t NumGemmKPrefetchStage = 1,
138 static constexpr
auto AK0 =
Number<KPerBlock / AK1Value>{};
139 static constexpr
auto BK0 =
Number<KPerBlock / BK1Value>{};
146 decltype(GridwiseGemmPipeline_Selector<PipelineVer, NumGemmKPrefetchStage>())>;
150 constexpr
auto max_lds_align =
AK1;
153 constexpr
auto a_block_desc_ak0_m_ak1 = [&]() {
154 if constexpr(ABlockLdsExtraM)
167 return a_block_desc_ak0_m_ak1;
172 constexpr
auto max_lds_align =
BK1;
175 constexpr
auto b_block_desc_bk0_n_bk1 = [&]() {
176 if constexpr(BBlockLdsExtraN)
189 return b_block_desc_bk0_n_bk1;
192 __host__ __device__
static constexpr
auto
195 constexpr
index_t MWave = MPerBlock / (MXdlPerWave * MPerXdl);
196 constexpr
index_t NWave = NPerBlock / (NXdlPerWave * NPerXdl);
199 c_block_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl =
208 return c_block_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl;
218 constexpr
auto a_block_space_size_aligned =
221 constexpr
auto b_block_space_size_aligned =
225 constexpr
auto c_block_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl =
228 constexpr
auto c_block_size =
229 c_block_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl
230 .GetElementSpaceSize();
232 return math::max((a_block_space_size_aligned + b_block_space_size_aligned) *
234 c_block_size *
sizeof(FloatCShuffle));
241 return ck::tensor_operation::device::IsValidGemmCompilationParameter<
250 CGlobalMemoryDataOperation>();
254 template <
typename Block2CTileMap>
255 __host__ __device__
static constexpr
bool
257 const BGridDesc_BK0_N_BK1& b_grid_desc_bk0_n_bk1,
258 const CGridDesc_M_N& c_grid_desc_m_n,
259 const Block2CTileMap& block_2_ctile_map)
265 static_assert((MPerBlock % (MPerXdl * MXdlPerWave) == 0) &&
266 (NPerBlock % (NXdlPerWave * NPerXdl)) == 0,
267 "Invalid tuning param!");
269 const auto M = a_grid_desc_ak0_m_ak1.GetLength(
I1);
270 const auto N = b_grid_desc_bk0_n_bk1.GetLength(
I1);
271 const auto K = a_grid_desc_ak0_m_ak1.GetLength(
I0) * a_grid_desc_ak0_m_ak1.GetLength(
I2);
273 if(!(M == c_grid_desc_m_n.GetLength(
I0) && N == c_grid_desc_m_n.GetLength(
I1)))
276 if(!(M % MPerBlock == 0 && N % NPerBlock == 0 && K % KPerBlock == 0))
280 const auto num_k_loop = K / KPerBlock;
282 if(!GridwiseGemmPipe::IsSupported(num_k_loop))
287 if(!block_2_ctile_map.CheckValidity(c_grid_desc_m_n))
298 const index_t num_loop = K / KPerBlock;
300 return GridwiseGemmPipe::CalculateHasMainLoop(num_loop);
303 __host__ __device__
static constexpr
auto
305 const CGridDesc_M_N& c_grid_desc_m_n)
307 const auto M = c_grid_desc_m_n.GetLength(
I0);
308 const auto N = c_grid_desc_m_n.GetLength(
I1);
310 const auto MBlock = M / MPerBlock;
311 const auto NBlock = N / NPerBlock;
313 constexpr
index_t MWave = MPerBlock / (MXdlPerWave * MPerXdl);
314 constexpr
index_t NWave = NPerBlock / (NXdlPerWave * NPerXdl);
316 const auto c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl =
326 return c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl;
344 template <
bool HasMainK0BlockLoop,
typename Block2CTileMap>
345 __device__
static void
346 Run(
const FloatAB* __restrict__ p_a_grid,
347 const FloatAB* __restrict__ p_b_grid,
348 FloatC* __restrict__ p_c_grid,
349 void* __restrict__ p_shared,
350 const AGridDesc_AK0_M_AK1& a_grid_desc_ak0_m_ak1,
351 const BGridDesc_BK0_N_BK1& b_grid_desc_bk0_n_bk1,
353 c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl,
354 const AElementwiseOperation& a_element_op,
355 const BElementwiseOperation& b_element_op,
356 const CElementwiseOperation& c_element_op,
357 const Block2CTileMap& block_2_ctile_map)
359 const auto a_grid_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
360 p_a_grid, a_grid_desc_ak0_m_ak1.GetElementSpaceSize());
361 const auto b_grid_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
362 p_b_grid, b_grid_desc_bk0_n_bk1.GetElementSpaceSize());
363 auto c_grid_buf = make_dynamic_buffer<AddressSpaceEnum::Global>(
365 c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl
366 .GetElementSpaceSize());
369 const auto block_work_idx =
372 if(!block_2_ctile_map.ValidCTileIndex(
375 c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl
377 c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl
384 const index_t m_block_data_idx_on_grid =
385 __builtin_amdgcn_readfirstlane(block_work_idx[
I0] * MPerBlock);
387 const index_t n_block_data_idx_on_grid =
388 __builtin_amdgcn_readfirstlane(block_work_idx[
I1] * NPerBlock);
400 auto a_blockwise_copy =
402 AElementwiseOperation,
406 ABlockTransferThreadClusterLengths_AK0_M_AK1,
407 ABlockTransferThreadClusterArrangeOrder,
410 decltype(a_grid_desc_ak0_m_ak1),
411 decltype(a_block_desc_ak0_m_ak1),
412 ABlockTransferSrcAccessOrder,
414 ABlockTransferSrcVectorDim,
416 ABlockTransferSrcScalarPerVector,
417 ABlockTransferDstScalarPerVector_K1,
420 AThreadTransferSrcResetCoordinateAfterRun,
422 NumGemmKPrefetchStage>(
423 a_grid_desc_ak0_m_ak1,
426 a_block_desc_ak0_m_ak1,
431 auto b_blockwise_copy =
433 BElementwiseOperation,
437 BBlockTransferThreadClusterLengths_BK0_N_BK1,
438 BBlockTransferThreadClusterArrangeOrder,
441 decltype(b_grid_desc_bk0_n_bk1),
442 decltype(b_block_desc_bk0_n_bk1),
443 BBlockTransferSrcAccessOrder,
445 BBlockTransferSrcVectorDim,
447 BBlockTransferSrcScalarPerVector,
448 BBlockTransferDstScalarPerVector_K1,
451 BThreadTransferSrcResetCoordinateAfterRun,
453 NumGemmKPrefetchStage>(
454 b_grid_desc_bk0_n_bk1,
457 b_block_desc_bk0_n_bk1,
469 constexpr
bool is_single_rate_mfma =
477 constexpr
auto is_scale_mfma =
false;
481 selected_mfma.k_per_blk);
483 auto blockwise_gemm =
488 decltype(a_block_desc_ak0_m_ak1),
489 decltype(b_block_desc_bk0_n_bk1),
496 auto c_thread_buf = blockwise_gemm.GetCThreadBuffer();
500 a_block_desc_ak0_m_ak1.GetElementSpaceSize(), max_lds_align);
502 auto a_block_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>(
503 static_cast<FloatAB*
>(p_shared), a_block_desc_ak0_m_ak1.GetElementSpaceSize());
505 auto b_block_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>(
506 static_cast<FloatAB*
>(p_shared) + a_block_space_size_aligned,
507 b_block_desc_bk0_n_bk1.GetElementSpaceSize());
513 const index_t num_k_block_main_loop = __builtin_amdgcn_readfirstlane(
514 (a_grid_desc_ak0_m_ak1.GetLength(
I0) * a_grid_desc_ak0_m_ak1.GetLength(
I2)) /
517 GridwiseGemmPipe::template Run<HasMainK0BlockLoop>(a_grid_desc_ak0_m_ak1,
518 a_block_desc_ak0_m_ak1,
522 a_block_slice_copy_step,
523 b_grid_desc_bk0_n_bk1,
524 b_block_desc_bk0_n_bk1,
528 b_block_slice_copy_step,
531 num_k_block_main_loop);
535 static_assert(MXdlPerWave % CShuffleMXdlPerWavePerShuffle == 0 &&
536 NXdlPerWave % CShuffleNXdlPerWavePerShuffle == 0,
539 constexpr
index_t MWave = MPerBlock / (MXdlPerWave * MPerXdl);
540 constexpr
index_t NWave = NPerBlock / (NXdlPerWave * NPerXdl);
543 constexpr
auto c_thread_desc_m0_n0_m1_n1_m2_m3_m4_n2 =
544 blockwise_gemm.GetCThreadDescriptor_M0_N0_M1_N1_M2_M3_M4_N2();
548 constexpr
auto c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp =
549 blockwise_gemm.GetCBlockDescriptor_M0_N0_M1_N1_M2_M3_M4_N2();
551 constexpr
auto M0 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I0);
552 constexpr
auto N0 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I1);
553 constexpr
auto M1 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I2);
554 constexpr
auto N1 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I3);
555 constexpr
auto M2 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I4);
556 constexpr
auto M3 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I5);
557 constexpr
auto M4 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I6);
558 constexpr
auto N2 = c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2_tmp.GetLength(
I7);
560 constexpr
auto c_block_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl =
563 auto c_shuffle_block_buf = make_dynamic_buffer<AddressSpaceEnum::Lds>(
564 static_cast<FloatCShuffle*
>(p_shared),
565 c_block_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl
566 .GetElementSpaceSize());
569 c_block_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl,
596 const auto c_thread_mtx_on_block =
597 blockwise_gemm.CalculateCThreadOriginDataIndex(
I0,
I0,
I0,
I0);
599 const index_t m_thread_data_on_block = c_thread_mtx_on_block[
I0];
600 const index_t n_thread_data_on_block = c_thread_mtx_on_block[
I1];
602 const auto m_thread_data_on_block_to_m0_m1_m2_m3_m4_adaptor =
608 const auto m_thread_data_on_block_idx =
609 m_thread_data_on_block_to_m0_m1_m2_m3_m4_adaptor.CalculateBottomIndex(
612 const auto n_thread_data_on_block_to_n0_n1_n2_adaptor =
618 const auto n_thread_data_on_block_idx =
619 n_thread_data_on_block_to_n0_n1_n2_adaptor.CalculateBottomIndex(
623 auto c_thread_copy_vgpr_to_lds =
626 decltype(c_thread_desc_m0_n0_m1_n1_m2_m3_m4_n2),
627 decltype(c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2),
629 Sequence<CShuffleMXdlPerWavePerShuffle,
630 CShuffleNXdlPerWavePerShuffle,
643 c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2,
646 m_thread_data_on_block_idx[
I1],
647 n_thread_data_on_block_idx[
I1],
648 m_thread_data_on_block_idx[
I2],
649 m_thread_data_on_block_idx[
I3],
650 m_thread_data_on_block_idx[
I4],
651 n_thread_data_on_block_idx[
I2]),
657 CElementwiseOperation,
658 CGlobalMemoryDataOperation,
660 CShuffleMXdlPerWavePerShuffle,
663 CShuffleNXdlPerWavePerShuffle,
665 CBlockTransferClusterLengths_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl,
669 decltype(c_block_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl),
670 decltype(c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl),
673 CBlockTransferScalarPerVector_NWaveNPerXdl,
676 {c_block_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl,
678 c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl,
682 constexpr
auto mxdlperwave_forward_step =
684 constexpr
auto nxdlperwave_forward_step =
686 constexpr
auto nxdlperwave_backward_step =
690 constexpr
auto mxdlperwave = mxdlperwave_iter;
694 CShuffleNXdlPerWavePerShuffle>{}([&](
auto nxdlperwave_iter) {
695 constexpr
bool nxdlperwave_forward_sweep =
696 (mxdlperwave % (2 * CShuffleMXdlPerWavePerShuffle) == 0);
698 constexpr
index_t nxdlperwave_value =
699 nxdlperwave_forward_sweep
701 : (NXdlPerWave - nxdlperwave_iter - CShuffleNXdlPerWavePerShuffle);
709 c_thread_copy_vgpr_to_lds.Run(
710 c_thread_desc_m0_n0_m1_n1_m2_m3_m4_n2,
713 c_block_desc_m0_n0_m1_n1_m2_m3_m4_n2,
714 c_shuffle_block_buf);
720 c_block_copy_lds_to_global.Run(
721 c_block_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl,
723 c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl,
727 if constexpr(nxdlperwave_forward_sweep &&
728 (nxdlperwave < NXdlPerWave - CShuffleNXdlPerWavePerShuffle))
730 c_block_copy_lds_to_global.MoveDstSliceWindow(
731 c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl,
732 nxdlperwave_forward_step);
734 else if constexpr((!nxdlperwave_forward_sweep) && (nxdlperwave > 0))
736 c_block_copy_lds_to_global.MoveDstSliceWindow(
737 c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl,
738 nxdlperwave_backward_step);
743 if constexpr(mxdlperwave < MXdlPerWave - CShuffleMXdlPerWavePerShuffle)
745 c_block_copy_lds_to_global.MoveDstSliceWindow(
746 c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl,
747 mxdlperwave_forward_step);
#define CK_MIN_BLOCK_PER_CU
Definition: ck.hpp:31
#define CK_MAX_THREAD_PER_BLOCK
Definition: ck.hpp:30
Y __host__ constexpr __device__ auto lcm(X x, Y y)
Definition: math.hpp:198
__host__ constexpr __device__ auto integer_least_multiple(X x, Y y)
Definition: math.hpp:78
__host__ constexpr __device__ T max(T x)
Definition: math.hpp:84
__host__ constexpr __device__ auto make_multi_index(Xs &&... xs)
Definition: array_multi_index.hpp:15
__host__ constexpr __device__ auto make_naive_tensor_descriptor(const Tuple< Lengths... > &lengths, const Tuple< Strides... > &strides)
Definition: tensor_descriptor_helper.hpp:49
InMemoryDataOperationEnum
Definition: ck.hpp:277
__host__ constexpr __device__ auto make_naive_tensor_descriptor_packed(const Tuple< Lengths... > &lengths)
Definition: tensor_descriptor_helper.hpp:101
__host__ constexpr __device__ auto make_merge_transform(const LowLengths &low_lengths)
Definition: multi_index_transform_helper.hpp:55
__host__ constexpr __device__ auto make_naive_tensor_descriptor_aligned(const Tuple< Lengths... > &lengths, Align align)
Definition: tensor_descriptor_helper.hpp:132
__host__ constexpr __device__ auto make_single_stage_tensor_adaptor(const Transforms &transforms, LowerDimensionOldTopIdss, UpperDimensionNewTopIdss)
Definition: tensor_adaptor.hpp:425
__host__ constexpr __device__ auto make_freeze_transform(const LowerIndex &low_idx)
Definition: multi_index_transform_helper.hpp:151
constexpr detail::ignore_t ignore
Definition: ignore.hpp:20
__device__ index_t get_block_1d_id()
Definition: get_id.hpp:47
__host__ constexpr __device__ auto make_pass_through_transform(const LowLength &low_length)
Definition: multi_index_transform_helper.hpp:12
__host__ constexpr __device__ auto make_tuple(Xs &&... xs)
Definition: tuple.hpp:211
remove_cv_t< remove_reference_t< T > > remove_cvref_t
Definition: type.hpp:297
__host__ constexpr __device__ auto make_unmerge_transform(const UpLengths &up_lengths, integral_constant< bool, Use24BitIntegerCalculation >=integral_constant< bool, false >{})
Definition: multi_index_transform_helper.hpp:90
int32_t index_t
Definition: ck.hpp:299
__host__ constexpr __device__ auto transform_tensor_descriptor(const OldTensorDescriptor &old_tensor_desc, const NewTransforms &new_transforms, NewLowerDimensionOldVisibleIdss, NewUpperDimensionNewVisibleIdss)
Definition: tensor_descriptor.hpp:319
__device__ void block_sync_lds()
Definition: synchronization.hpp:10
PipelineVersion
Definition: gridwise_gemm_pipeline_selector.hpp:18
__global__ void kernel_gemm_xdlops_v3r1(const FloatAB *__restrict__ p_a_grid, const FloatAB *__restrict__ p_b_grid, FloatC *__restrict__ p_c_grid, const AGridDesc_AK0_M_AK1 a_grid_desc_ak0_m_ak1, const BGridDesc_BK0_N_BK1 b_grid_desc_bk0_n_bk1, const CGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl, const AElementwiseOperation a_element_op, const BElementwiseOperation b_element_op, const CElementwiseOperation c_element_op, const Block2CTileMap block_2_ctile_map)
Definition: gridwise_gemm_xdlops_v3r1.hpp:36
Definition: block_to_ctile_map.hpp:261
Definition: blockwise_gemm_smfmac_xdlops.hpp:44
Definition: gridwise_gemm_xdlops_v3r1.hpp:127
static __device__ void Run(const FloatAB *__restrict__ p_a_grid, const FloatAB *__restrict__ p_b_grid, FloatC *__restrict__ p_c_grid, void *__restrict__ p_shared, const AGridDesc_AK0_M_AK1 &a_grid_desc_ak0_m_ak1, const BGridDesc_BK0_N_BK1 &b_grid_desc_bk0_n_bk1, const CGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl &c_grid_desc_mblock_mxdlperwave_mwavemperxdl_nblock_nxdlperwave_nwavenperxdl, const AElementwiseOperation &a_element_op, const BElementwiseOperation &b_element_op, const CElementwiseOperation &c_element_op, const Block2CTileMap &block_2_ctile_map)
Definition: gridwise_gemm_xdlops_v3r1.hpp:346
__host__ static constexpr __device__ bool CalculateHasMainKBlockLoop(index_t K)
Definition: gridwise_gemm_xdlops_v3r1.hpp:296
__host__ static constexpr __device__ auto GetBBlockDescriptor_BK0PerBlock_NPerBlock_BK1()
Definition: gridwise_gemm_xdlops_v3r1.hpp:170
ThisThreadBlock< BlockSize > ThisThreadBlock
Definition: gridwise_gemm_xdlops_v3r1.hpp:143
static constexpr auto I6
Definition: gridwise_gemm_xdlops_v3r1.hpp:134
__host__ static constexpr __device__ auto MakeDefaultBlock2CTileMap(const CGridDesc_M_N &c_grid_desc_m_n, index_t, index_t)
Definition: gridwise_gemm_xdlops_v3r1.hpp:330
static constexpr auto I5
Definition: gridwise_gemm_xdlops_v3r1.hpp:133
static constexpr auto I7
Definition: gridwise_gemm_xdlops_v3r1.hpp:135
static __device__ constexpr bool IsValidCompilationParameter()
Definition: gridwise_gemm_xdlops_v3r1.hpp:239
static constexpr auto I4
Definition: gridwise_gemm_xdlops_v3r1.hpp:132
__host__ static constexpr __device__ bool CheckValidity(const AGridDesc_AK0_M_AK1 &a_grid_desc_ak0_m_ak1, const BGridDesc_BK0_N_BK1 &b_grid_desc_bk0_n_bk1, const CGridDesc_M_N &c_grid_desc_m_n, const Block2CTileMap &block_2_ctile_map)
Definition: gridwise_gemm_xdlops_v3r1.hpp:256
remove_cvref_t< decltype(GridwiseGemmPipeline_Selector< PipelineVer, NumGemmKPrefetchStage >())> GridwiseGemmPipe
Definition: gridwise_gemm_xdlops_v3r1.hpp:146
static constexpr auto I0
Definition: gridwise_gemm_xdlops_v3r1.hpp:128
remove_cvref_t< decltype(MakeCGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl(CGridDesc_M_N{}))> CGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl
Definition: gridwise_gemm_xdlops_v3r1.hpp:339
static constexpr auto AK0
Definition: gridwise_gemm_xdlops_v3r1.hpp:138
__host__ static constexpr __device__ auto MakeCGridDescriptor_MBlock_MXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl(const CGridDesc_M_N &c_grid_desc_m_n)
Definition: gridwise_gemm_xdlops_v3r1.hpp:304
remove_cvref_t< decltype(MakeDefaultBlock2CTileMap(CGridDesc_M_N{}, 1, 1))> DefaultBlock2CTileMap
Definition: gridwise_gemm_xdlops_v3r1.hpp:342
__host__ static constexpr __device__ auto GetCBlockDescriptor_MBlock_NXdlPerWave_MWaveMPerXdl_NBlock_NXdlPerWave_NWaveNPerXdl()
Definition: gridwise_gemm_xdlops_v3r1.hpp:193
static constexpr auto BK0
Definition: gridwise_gemm_xdlops_v3r1.hpp:139
__host__ static constexpr __device__ auto GetABlockDescriptor_AK0PerBlock_MPerBlock_AK1()
Definition: gridwise_gemm_xdlops_v3r1.hpp:148
static constexpr auto BK1
Definition: gridwise_gemm_xdlops_v3r1.hpp:141
static constexpr auto I1
Definition: gridwise_gemm_xdlops_v3r1.hpp:129
static constexpr auto I3
Definition: gridwise_gemm_xdlops_v3r1.hpp:131
__host__ static constexpr __device__ index_t GetSharedMemoryNumberOfByte()
Definition: gridwise_gemm_xdlops_v3r1.hpp:211
static constexpr auto AK1
Definition: gridwise_gemm_xdlops_v3r1.hpp:140
static constexpr auto I2
Definition: gridwise_gemm_xdlops_v3r1.hpp:130
Selects the appropriate MFMA instruction type and configuration for given data types and tile sizes o...
Definition: xdlops_gemm.hpp:1208
Definition: sequence.hpp:43
Blockwise data transfer.
Definition: thread_group_tensor_slice_transfer_v4r1.hpp:46
Definition: thread_group_tensor_slice_transfer_v6r1.hpp:34
Definition: threadwise_tensor_slice_transfer.hpp:39
Definition: integral_constant.hpp:20
Definition: functional2.hpp:33
Definition: unary_element_wise_operation.hpp:334