21 namespace tensor_operation {
 
   24 template <
typename ALayout,
 
   32           typename GemmAccDataType,
 
   33           typename CShuffleDataType,
 
   34           typename AElementwiseOperation,
 
   35           typename BElementwiseOperation,
 
   36           typename CElementwiseOperation,
 
   48           typename ABlockTransferThreadClusterLengths_AK0_M_AK1,
 
   49           typename ABlockTransferThreadClusterArrangeOrder,
 
   50           typename ABlockTransferSrcAccessOrder,
 
   51           index_t ABlockTransferSrcVectorDim,
 
   52           index_t ABlockTransferSrcScalarPerVector,
 
   53           index_t ABlockTransferDstScalarPerVector_AK1,
 
   55           typename BBlockTransferThreadClusterLengths_BK0_N_BK1,
 
   56           typename BBlockTransferThreadClusterArrangeOrder,
 
   57           typename BBlockTransferSrcAccessOrder,
 
   58           index_t BBlockTransferSrcVectorDim,
 
   59           index_t BBlockTransferSrcScalarPerVector,
 
   60           index_t BBlockTransferDstScalarPerVector_BK1,
 
   62           index_t CShuffleMXdlPerWavePerShuffle,
 
   63           index_t CShuffleNXdlPerWavePerShuffle,
 
   64           typename CShuffleBlockTransferClusterLengths_MBlock_MPerBlock_NBlock_NPerBlock,
 
   65           typename CDEShuffleBlockTransferScalarPerVectors,
 
   69           bool NSwizzle                               = 
false,
 
   70           bool IsInputGemm                            = 
true,
 
   71           bool MulRoutedWeight                        = 
true,
 
   72           bool PerTokenQuant                          = 
true,
 
   74           typename ComputeTypeA                       = CDataType,
 
   75           typename ComputeTypeB                       = ComputeTypeA,
 
   76           typename LDSTypeA                           = ComputeTypeA,
 
   77           typename LDSTypeB                           = ComputeTypeB>
 
   86                                                                    AElementwiseOperation,
 
   87                                                                    BElementwiseOperation,
 
   88                                                                    CElementwiseOperation>
 
   94     template <index_t NXdlPerWave_>
 
  106                         AElementwiseOperation,
 
  107                         BElementwiseOperation,
 
  108                         CElementwiseOperation,
 
  120                         ABlockTransferThreadClusterLengths_AK0_M_AK1,
 
  121                         ABlockTransferThreadClusterArrangeOrder,
 
  122                         ABlockTransferSrcAccessOrder,
 
  123                         ABlockTransferSrcVectorDim,
 
  124                         ABlockTransferSrcScalarPerVector,
 
  125                         ABlockTransferDstScalarPerVector_AK1,
 
  128                         BBlockTransferThreadClusterLengths_BK0_N_BK1,
 
  129                         BBlockTransferThreadClusterArrangeOrder,
 
  130                         BBlockTransferSrcAccessOrder,
 
  131                         BBlockTransferSrcVectorDim,
 
  132                         BBlockTransferSrcScalarPerVector,
 
  133                         BBlockTransferDstScalarPerVector_BK1,
 
  136                         CShuffleMXdlPerWavePerShuffle,
 
  137                         math::min(CShuffleNXdlPerWavePerShuffle, NXdlPerWave_),
 
  138                         CShuffleBlockTransferClusterLengths_MBlock_MPerBlock_NBlock_NPerBlock,
 
  139                         CDEShuffleBlockTransferScalarPerVectors,
 
  176         template <
typename Gr
idwiseGemm>
 
  177         float RunImp(
const typename GridwiseGemm::Argument& arg,
 
  180             if(stream_config.log_level_ > 0)
 
  185             if(!GridwiseGemm::CheckValidity(arg))
 
  187                 throw std::runtime_error(
"wrong! GridwiseGemm has invalid setting");
 
  191             std::tie(gdx, gdy, gdz) = GridwiseGemm::CalculateGridSize(arg.M, arg.N);
 
  195             index_t k_grain = arg.KBatch * KPerBlock;
 
  196             index_t K_split = (arg.K + k_grain - 1) / k_grain * KPerBlock;
 
  198             const bool has_main_k_block_loop = GridwiseGemm::CalculateHasMainKBlockLoop(K_split);
 
  200             const auto RunKernel = [&](
const auto& kernel) {
 
  201                 if(stream_config.flush_cache)
 
  204                     std::array<std::size_t, NumDTensor> DsSize;
 
  208                     const auto a_grid_desc_ak0_m_ak1 = GridwiseGemm::MakeAGridDescriptor_AK0_M_AK1(
 
  209                         arg_.M, arg_.MPadded, arg_.K, arg_.KPadded, arg_.StrideA, arg_.AK0);
 
  210                     const auto b_grid_desc_bk0_n_bk1 = GridwiseGemm::MakeBGridDescriptor_BK0_N_BK1(
 
  211                         arg_.K, arg_.KPadded, arg_.N, arg_.NPadded, arg_.StrideB, arg_.BK0);
 
  213                     auto size_a_buffer = a_grid_desc_ak0_m_ak1.GetElementSpaceSize() *
 
  215                     auto size_b_buffer = b_grid_desc_bk0_n_bk1.GetElementSpaceSize() *
 
  218                     const auto ds_grid_desc_m_n = GridwiseGemm::MakeDsGridDescriptor_M_N(
 
  219                         arg_.M, arg_.MPadded, arg_.N, arg_.NPadded, arg_.StrideDs);
 
  223                         DsSize[i] = ds_grid_desc_m_n[i].GetElementSpaceSize() * 
sizeof(DDataType);
 
  228                                      stream_config.rotating_count,
 
  232                     rotating_mem.Print();
 
  234                     auto run_flush_cache = [&]() {
 
  241                             hipGetErrorString(hipMemsetAsync(arg_.p_c_grid,
 
  243                                                              arg_.M * arg_.N * 
sizeof(CDataType),
 
  244                                                              stream_config.stream_id_));
 
  247                     ave_time = ck::utility::launch_and_time_kernel_with_preprocess<false>(
 
  259                         hipGetErrorString(hipMemsetAsync(arg.p_c_grid,
 
  261                                                          arg.M * arg.N * 
sizeof(CDataType),
 
  262                                                          stream_config.stream_id_));
 
  265                         stream_config, kernel, dim3(gdx, gdy, gdz), dim3(BlockSize), 0, arg);
 
  269             constexpr 
auto estimated_reg_a = MPerBlock * KPerBlock * 
sizeof(ADataType) / BlockSize /
 
  270                                              4 * (1 + GridwiseGemm::NWave);
 
  271             constexpr 
auto estimated_reg_b = NPerBlock * KPerBlock * 
sizeof(BDataType) / BlockSize /
 
  272                                              4 * (2) * (IsInputGemm ? 2 : 1);
 
  273             constexpr 
auto estimated_reg_c = MPerBlock * NPerBlock * 
sizeof(GemmAccDataType) /
 
  274                                              BlockSize / 4 * (IsInputGemm ? 2 : 1);
 
  275             constexpr 
auto estimated_reg_total =
 
  276                 estimated_reg_a + estimated_reg_b + estimated_reg_c;
 
  278             constexpr 
index_t minimum_occupancy = (estimated_reg_total >= 256) ? 1 : 2;
 
  280             constexpr 
auto MemoryDataOp =
 
  282             if(has_main_k_block_loop)
 
  288                         if(GridwiseGemm::CalculateKBlockLoopTailNum(K_split) == 
TailNumber::Odd)
 
  311                     if(GridwiseGemm::CalculateKBlockLoopTailNum(K_split) == 
TailNumber::Odd)
 
  332                     throw std::runtime_error(
"todo: only v1 & v2 support now");
 
  341                     if(GridwiseGemm::CalculateKBlockLoopTailNum(K_split) == 
TailNumber::Odd)
 
  363                     if(GridwiseGemm::CalculateKBlockLoopTailNum(K_split) == 
TailNumber::Odd)
 
  384                     throw std::runtime_error(
"todo: only v1 & v2 support now");
 
  398             return Run(*
dynamic_cast<const Argument*
>(p_arg), stream_config);
 
  415         if(!ck::is_xdl_wmma_supported<ComputeTypeA, ComputeTypeB, MPerXDL, NPerXDL>())
 
  431         if(arg.N % NPerBlock != 0 || arg.K % KPerBlock != 0)
 
  460                              const void* p_sorted_expert_ids,
 
  461                              const void* p_max_token_id,
 
  464                              std::array<const void*, NumDTensor> p_ds,
 
  473                              std::array<index_t, NumDTensor> StrideDs,
 
  476                              AElementwiseOperation a_element_op,
 
  477                              BElementwiseOperation b_element_op,
 
  478                              CElementwiseOperation c_element_op)
 
  481                         static_cast<const index_t*
>(p_sorted_expert_ids),
 
  482                         static_cast<const index_t*
>(p_max_token_id),
 
  483                         static_cast<const ADataType*
>(p_a),
 
  484                         static_cast<const BDataType*
>(p_b),
 
  486                         static_cast<CDataType*
>(p_c),
 
  507                                                       std::array<const void*, NumDTensor> p_ds,
 
  514                                                       std::array<ck::index_t, NumDTensor> StrideDs,
 
  517                                                       AElementwiseOperation a_element_op,
 
  518                                                       BElementwiseOperation b_element_op,
 
  519                                                       CElementwiseOperation c_element_op)
 override 
  521         return std::make_unique<Argument>(
nullptr,
 
  524                                           static_cast<const ADataType*
>(p_a),
 
  525                                           static_cast<const BDataType*
>(p_b),
 
  527                                           static_cast<CDataType*
>(p_c),
 
  546         return std::make_unique<Invoker>(
Invoker{});
 
  552         auto str = std::stringstream();
 
  554         std::map<BlockGemmPipelineScheduler, std::string> BlkGemmPipelineSchedulerToString{
 
  558         std::map<BlockGemmPipelineVersion, std::string> BlkGemmPipelineVersionToString{
 
  562         str << 
"DeviceMoeGEmm" 
  565             << std::string(ALayout::name)[0]
 
  566             << std::string(BLayout::name)[0]
 
  567             << std::string(CLayout::name)[0]
 
  572             << MPerBlock<<
"x"<<NPerBlock<<
"x"<<KPerBlock << 
", " 
  574             << MPerXDL<<
"x"<<NPerXDL << 
", " 
  576             << MXdlPerWave<<
"x" << NXdlPerWave<<
", " 
  578             << ABlockTransferSrcScalarPerVector<<
"x"<<BBlockTransferSrcScalarPerVector<<
", " 
  579             << 
"BlkGemmPipelineScheduler: " 
  580             << BlkGemmPipelineSchedulerToString[BlkGemmPipeSched] << 
", " 
  581             << 
"BlkGemmPipelineVersion: " 
  582             << BlkGemmPipelineVersionToString[BlkGemmPipelineVer] << 
", " 
  583             << 
"BlkGemmPipelinePrefetchStages: " 
  584             << GridwiseGemm64::BlockwiseGemmPipe::PrefetchStages;
 
#define INVOKER_RUN3_IMPL
Definition: device_base.hpp:114
 
#define GET_NXDL_PER_WAVE_IMPL
Definition: device_base.hpp:81
 
float launch_and_time_kernel(const StreamConfig &stream_config, F kernel, dim3 grid_dim, dim3 block_dim, std::size_t lds_byte, Args... args)
Definition: kernel_launch.hpp:14
 
__host__ constexpr __device__ T max(T x)
Definition: math.hpp:84
 
__host__ constexpr __device__ T min(T x)
Definition: math.hpp:116
 
std::string getGemmSpecializationString(const GemmSpecialization &s)
Definition: gemm_specialization.hpp:32
 
GemmSpecialization
Definition: gemm_specialization.hpp:11
 
void flush_icache()
Definition: flush_cache.hpp:383
 
typename tuple_element< I, TTuple >::type tuple_element_t
Definition: tuple.hpp:208
 
BlockGemmPipelineVersion
Definition: blkgemmpipe_scheduler.hpp:12
 
__global__ void kernel_moe_gemm(typename GridwiseGemm::Argument karg)
Definition: gridwise_moe_gemm.hpp:46
 
constexpr Tuple< Args &... > tie(Args &... args) noexcept
Definition: tuple.hpp:218
 
constexpr __device__ index_t get_warp_size()
Definition: get_id.hpp:10
 
constexpr bool is_same_v
Definition: type.hpp:283
 
BlockGemmPipelineScheduler
Definition: blkgemmpipe_scheduler.hpp:25
 
remove_cv_t< remove_reference_t< T > > remove_cvref_t
Definition: type.hpp:297
 
int32_t index_t
Definition: ck.hpp:299
 
__global__ void kernel_moe_gemm_2lds(typename GridwiseGemm::Argument karg)
Definition: gridwise_moe_gemm.hpp:84
 
bool is_bf16_atomic_supported()
Definition: device_prop.hpp:108
 
Definition: stream_config.hpp:10
 
Definition: gridwise_moe_gemm.hpp:659
 
Definition: gridwise_moe_gemm.hpp:171
 
static constexpr __host__ bool CheckValidity(const Argument &karg)
Definition: gridwise_moe_gemm.hpp:952
 
Definition: data_type.hpp:187
 
Definition: functional2.hpp:33
 
Definition: device_base.hpp:197
 
Definition: device_base.hpp:208
 
Definition: device_gemm_multiple_d.hpp:125
 
Definition: device_moe_gemm.hpp:175
 
INVOKER_RUN3_IMPL float Run(const BaseArgument *p_arg, const StreamConfig &stream_config=StreamConfig{}) override
Definition: device_moe_gemm.hpp:395
 
float RunImp(const typename GridwiseGemm::Argument &arg, const StreamConfig &stream_config=StreamConfig{})
Definition: device_moe_gemm.hpp:177
 
Definition: device_moe_gemm.hpp:89
 
int GetPreShuffleParameters() override
Definition: device_moe_gemm.hpp:171
 
static auto MakeArgument(const void *p_sorted_token_ids, const void *p_sorted_expert_ids, const void *p_max_token_id, const void *p_a, const void *p_b, std::array< const void *, NumDTensor > p_ds, void *p_c, index_t NumTokens, index_t TopK, index_t M, index_t N, index_t K, index_t StrideA, index_t StrideB, std::array< index_t, NumDTensor > StrideDs, index_t StrideC, index_t KBatch, AElementwiseOperation a_element_op, BElementwiseOperation b_element_op, CElementwiseOperation c_element_op)
Definition: device_moe_gemm.hpp:459
 
static constexpr bool IsValidCompilationParameter()
Definition: device_moe_gemm.hpp:402
 
static constexpr index_t BPackedSize
Definition: device_moe_gemm.hpp:164
 
static bool IsSupportedArgument(const Argument &arg)
Definition: device_moe_gemm.hpp:408
 
bool IsSupportedArgument(const BaseArgument *p_arg) override
Definition: device_moe_gemm.hpp:454
 
static constexpr auto NXdlPerWave32
Definition: device_moe_gemm.hpp:92
 
typename GridwiseGemm64::Argument Argument
Definition: device_moe_gemm.hpp:155
 
std::unique_ptr< BaseArgument > MakeArgumentPointer(const void *p_a, const void *p_b, std::array< const void *, NumDTensor > p_ds, void *p_c, index_t M, index_t N, index_t K, index_t StrideA, index_t StrideB, std::array< ck::index_t, NumDTensor > StrideDs, index_t StrideC, index_t KBatch, AElementwiseOperation a_element_op, BElementwiseOperation b_element_op, CElementwiseOperation c_element_op) override
Definition: device_moe_gemm.hpp:505
 
static constexpr index_t NumDTensor
Definition: device_moe_gemm.hpp:93
 
std::unique_ptr< BaseInvoker > MakeInvokerPointer() override
Definition: device_moe_gemm.hpp:544
 
static constexpr GET_NXDL_PER_WAVE_IMPL auto NXdlPerWave64
Definition: device_moe_gemm.hpp:91
 
std::string GetTypeString() const override
Definition: device_moe_gemm.hpp:550
 
static auto MakeInvoker()
Definition: device_moe_gemm.hpp:502
 
static constexpr index_t APackedSize
Definition: device_moe_gemm.hpp:157
 
Definition: flush_cache.hpp:174