/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/develop/include/ck/ck.hpp Source File

/home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/develop/include/ck/ck.hpp Source File#

Composable Kernel: /home/docs/checkouts/readthedocs.org/user_builds/advanced-micro-devices-composable-kernel/checkouts/develop/include/ck/ck.hpp Source File
ck.hpp
Go to the documentation of this file.
1 // SPDX-License-Identifier: MIT
2 // Copyright (c) 2018-2025, Advanced Micro Devices, Inc. All rights reserved.
3 
4 #pragma once
5 
6 #include "ck/config.h"
7 #include <stdint.h>
8 
9 #if !defined(__HIPCC_RTC__) || !defined(CK_CODE_GEN_RTC)
10 #ifndef CK_DONT_USE_HIP_RUNTIME_HEADERS
11 #include "hip/hip_runtime.h"
12 #include "hip/hip_fp16.h"
13 #endif
14 #endif
15 // to do: add various levels of logging with CK_LOG_LEVEL
16 
17 #ifndef CK_TIME_KERNEL
18 #define CK_TIME_KERNEL 1
19 #endif
20 
21 // constant address space for kernel parameter
22 // https://llvm.org/docs/AMDGPUUsage.html#address-spaces
23 #define CK_CONSTANT_ADDRESS_SPACE __attribute__((address_space(4)))
24 
25 // launch bounds
26 #define CK_USE_LAUNCH_BOUNDS 1
27 
28 #ifdef CK_USE_LAUNCH_BOUNDS
29 // for most kernels
30 #define CK_MAX_THREAD_PER_BLOCK 256
31 #define CK_MIN_BLOCK_PER_CU 2
32 
33 // for wavelet GEMM kernel
34 #define CK_WAVELET_MAX_THREAD_PER_BLOCK 512
35 #define CK_WAVELET_MIN_BLOCK_PER_CU 2
36 #endif
37 
38 // kernel attribute: amdgpu_waves_per_eu()
39 #ifdef CK_USE_WAVES_PER_EU
40 // for 1-wave kernels, control arguments of amdgpu_waves_per_eu() attribute
41 #ifndef CK_MIN_WAVES_PER_EU
42 #define CK_MIN_WAVES_PER_EU 0
43 #endif
44 
45 #ifndef CK_MAX_WAVES_PER_EU
46 #define CK_MAX_WAVES_PER_EU 0
47 #endif
48 
49 #else
50 #define CK_USE_WAVES_PER_EU 0
51 #endif
52 
53 // define general macros for various architectures
54 #if defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx942__) || defined(__gfx950__) || \
55  defined(__gfx9_4_generic__)
56 #define __gfx9__
57 #endif
58 #if defined(__gfx942__) || defined(__gfx950__) || defined(__gfx9_4_generic__)
59 #define __gfx94__
60 #endif
61 #if defined(__gfx1010__) || defined(__gfx1011__) || defined(__gfx1012__)
62 #define __gfx101__
63 #endif
64 #if defined(__gfx1030__) || defined(__gfx1031__) || defined(__gfx1032__) || \
65  defined(__gfx1034__) || defined(__gfx1035__) || defined(__gfx1036__) || \
66  defined(__gfx10_3_generic__)
67 #define __gfx103__
68 #endif
69 #if defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) || \
70  defined(__gfx1103__) || defined(__gfx1150__) || defined(__gfx1151__) || \
71  defined(__gfx1152__) || defined(__gfx11_generic__)
72 #define __gfx11__
73 #endif
74 #if defined(__gfx1200__) || defined(__gfx1201__) || defined(__gfx12_generic__)
75 #define __gfx12__
76 #endif
77 
78 // buffer resource
79 #ifndef __HIP_DEVICE_COMPILE__ // for host code
80 #define CK_BUFFER_RESOURCE_3RD_DWORD -1
81 #elif defined(__gfx803__) || defined(__gfx900__) || defined(__gfx906__) || defined(__gfx9__)
82 #define CK_BUFFER_RESOURCE_3RD_DWORD 0x00020000
83 #elif defined(__gfx103__)
84 #define CK_BUFFER_RESOURCE_3RD_DWORD 0x31014000
85 #elif defined(__gfx11__) || defined(__gfx12__)
86 #define CK_BUFFER_RESOURCE_3RD_DWORD 0x31004000
87 #endif
88 
89 // FMA instruction
90 #ifndef __HIP_DEVICE_COMPILE__ // for host code, define nothing
91 #elif defined(__gfx803__) || defined(__gfx900__) // for GPU code
92 #define CK_USE_AMD_V_MAC_F32
93 #elif defined(__gfx906__) || defined(__gfx9__) || defined(__gfx103__) // for GPU code
94 #define CK_USE_AMD_V_FMAC_F32
95 #define CK_USE_AMD_V_DOT2_F32_F16
96 #define CK_USE_AMD_V_DOT4_I32_I8
97 #elif defined(__gfx11__) || defined(__gfx12__)
98 #define CK_USE_AMD_V_FMAC_F32
99 #define CK_USE_AMD_V_DOT2_F32_F16
100 #define CK_USE_AMD_V_DOT4_I32_I8_GFX11
101 #endif
102 
103 // MFMA instruction
104 #ifndef __HIP_DEVICE_COMPILE__ // for host code
105 #define CK_USE_AMD_MFMA
106 #elif defined(__gfx9__) // for GPU code
107 #define CK_USE_AMD_MFMA
108 #endif
109 
110 #if(defined(__gfx90a__) || defined(__gfx94__))
111 #define CK_USE_AMD_MFMA_BF16_1K_OP
112 #endif
113 
114 #if defined(__gfx94__)
115 #define CK_USE_AMD_MFMA_GFX940
116 #endif
117 
118 // buffer load
119 #define CK_USE_AMD_BUFFER_LOAD 1
120 
121 // buffer store
122 #define CK_USE_AMD_BUFFER_STORE 1
123 
124 // buffer atomic add: integer
125 #define CK_USE_AMD_BUFFER_ATOMIC_ADD_INTEGER 1
126 
127 // buffer atomic add: floating point
128 #ifndef __HIP_DEVICE_COMPILE__ // for host code
129 #define CK_USE_AMD_BUFFER_ATOMIC_ADD_FLOAT 1
130 #elif defined(__gfx9__) || defined(__gfx12__) // for GPU code
131 #define CK_USE_AMD_BUFFER_ATOMIC_ADD_FLOAT 1
132 #else // for GPU code
133 #define CK_USE_AMD_BUFFER_ATOMIC_ADD_FLOAT 0
134 #endif
135 
136 #if(defined(__gfx90a__) || defined(__gfx94__)) // for GPU code
137 #define CK_USE_AMD_BUFFER_ATOMIC_MAX_FLOAT64 1
138 #else
139 #define CK_USE_AMD_BUFFER_ATOMIC_MAX_FLOAT64 0
140 #endif
141 
142 // inline asm
143 #define CK_USE_AMD_INLINE_ASM 1
144 
145 // inner product (V_MAC/V_FMAC)
146 #define CK_USE_AMD_V_MAC_INLINE_ASM 1
147 
148 // V_DOT inline instructions, less efficient since they require adding
149 // `s_nop`s to avoid hazard
150 #define CK_USE_AMD_V_DOT_INLINE_ASM 0
151 
152 // inner product using V_DOT with DPP8 modifiers
153 #define CK_USE_AMD_V_DOT_DPP8_INLINE_ASM 1
154 
155 // LDS direct loads using inline assembly
156 #define CK_USE_AMD_LDS_DIRECT_LOAD_INLINE_ASM 0
157 
158 // set rounding to nearest even as default for bf16 conversions
159 #define CK_USE_RNE_BF16_CONVERSION 1
160 
161 // set rounding to nearest even as default for f8 conversions
162 #define CK_USE_SR_F8_CONVERSION 0
163 
164 // set rounding to nearest even as default for f6 conversions
165 #define CK_USE_SR_F6_CONVERSION 0
166 
167 // set rounding to nearest even as default for f4 conversions
168 #define CK_USE_SR_F4_CONVERSION 0
169 
170 // shuffle pk_i4 values during conversion to optimize number of binary
171 // operations
172 #define CK_USE_PK4_LAYOUT_SHUFFLE 1
173 
174 // block synchronization only s_wait lgkmcnt(0), not vmcnt(0)
175 #define CK_EXPERIMENTAL_BLOCK_SYNC_LDS_WITHOUT_SYNC_VMEM 1
176 
177 // experimental feature: multi index implemented as array
178 #define CK_EXPERIMENTAL_USE_DYNAMICALLY_INDEXED_MULTI_INDEX 0
179 
180 // experimental feature: static tensor descriptor
181 #define CK_EXPERIMENTAL_STATIC_TENSOR_DESCRIPTOR 0
182 
183 // experimental feature: buffer load/store/atomic-add/ OOB trick
184 // This (ifndef) is a hack to use customized behavior for buffer load rather than using default
185 // setting. Don't use this hack unless absolutely necessary!
186 // FIXME: make the behavior of buffer load a configurable (template) parameter for each usage
187 #ifndef CK_EXPERIMENTAL_USE_BUFFER_LOAD_OOB_CHECK_OFFSET_TRICK
188 #define CK_EXPERIMENTAL_USE_BUFFER_LOAD_OOB_CHECK_OFFSET_TRICK 0
189 #endif
190 #define CK_EXPERIMENTAL_USE_BUFFER_STORE_OOB_CHECK_OFFSET_TRICK 1
191 #define CK_EXPERIMENTAL_USE_BUFFER_ATOMIC_ADD_OOB_CHECK_OFFSET_TRICK 1
192 #define CK_EXPERIMENTAL_USE_BUFFER_ATOMIC_MAX_OOB_CHECK_OFFSET_TRICK 1
193 
194 // experimental feature: in-regsiter sub-dword transpose
195 #define CK_EXPERIMENTAL_USE_IN_REGISTER_SUB_DWORD_TRANSPOSE 1
196 
197 // experimental feature: merge transformation use magic number division
198 #define CK_EXPERIMENTAL_MERGE_USE_MAGIC_DIVISION 1
199 
200 // experimental feature: use __builtin_memcpy instead of pointer cast to access a vector from
201 // pointer of scalar
202 #define CK_EXPERIMENTAL_USE_MEMCPY_FOR_VECTOR_ACCESS 0
203 
204 // experimental feature: use __builtin_memcpy instead of union to do bit_cast
205 #define CK_EXPERIMENTAL_USE_MEMCPY_FOR_BIT_CAST 1
206 
207 // experimental feature: optimize for inter-wave scheduling policy
208 #define CK_EXPERIMENTAL_INTER_WAVE_SCHEDULING 1
209 #define CK_EXPERIMENTAL_INTER_WAVE_SCHEDULING_MAC_CLUSTERS 1
210 // this will let make_default_loop_scheduler() return interwave scheduling flag by default
211 #define CK_EXPERIMENTAL_DEFAULT_TO_INTER_WAVE_SCHEDULING 0
212 // experimental feature: add instances using interwave scheduling
213 #define CK_EXPERIMENTAL_INTER_WAVE_INSTANCES 1
214 // experimental feature: add instances using pipeline v2
215 #define CK_EXPERIMENTAL_PIPELINE_V2_INSTANCES 1
216 // experimental feature: optimize pipeline v2 by IGLP strategy (value=ID of strategy)
217 #ifndef CK_EXPERIMENTAL_PIPELINE_V2_IGLP_OPT
218 #define CK_EXPERIMENTAL_PIPELINE_V2_IGLP_OPT 0
219 #endif
220 
221 // hack: have underlying assumption that need to be satsified, otherwise it's a bug
222 // hack for forcing register to keep idx_diff_low_const in SGPR. idx_diff_low_const must be
223 // thread-invariant, otherwise it's a bug
224 // TODO: separate index calculation into "compile-time", "global", "block", "wave", "thread"
225 #define CK_HACK_MERGE_CALCULATE_IDX_DIFF_LOW_CONST_USE_AMD_GCN_READ_FIRST_LANE 0
226 
227 // workaround: compiler crash when compiling recursive lambda
228 #define CK_WORKAROUND_SWDEV_275126 1
229 
230 // workaround: compiler crash when using buffer load/store for i8
231 #define CK_WORKAROUND_SWDEV_XXXXXX_INT8_BUFFER_LOAD_STORE_ISSUE 1
232 
233 // workaround: compiler gnerating inefficient ds_write instructions
234 #define CK_WORKAROUND_SWDEV_XXXXXX_INT8_DS_WRITE_ISSUE 1
235 
236 // workaround: verifaction failure, due to compiler regression, for conv bwd-data fp16 using some
237 // tuning parameter
238 #define CK_WORKAROUND_SWDEV_325164 0
239 
240 // workaround: compiler not emiting reciprocal instruction frm __frcp_rn()
241 #define CK_WORKAROUND_SWDEV_383542 1
242 
243 // workaround: compiler issue on gfx908
244 #define CK_WORKAROUND_SWDEV_388832 1
245 
246 // workaround: compiler issue on gfx950
247 #define CK_WORKAROUND_FP16_TO_FP8_CONVERSION 1
248 
249 // workaround: compiler issue on gfx950
250 #define CK_WORKAROUND_BF16_TO_FP8_CONVERSION 1
251 
252 // denorm test fix, necessary for gfx90a
253 #ifndef CK_GFX90A_DENORM_WORKAROUND
254 #define CK_GFX90A_DENORM_WORKAROUND 0
255 #endif // CK_GFX90A_DENORM_WORKAROUND
256 // Enable only for gfx90a
257 #if defined(__gfx90a__)
258 #if CK_GFX90A_DENORM_WORKAROUND
259 #define CK_GFX90A_DENORM_WORKAROUND 1
260 #endif // CK_GFX90A_DENORM_WORKAROUND is set to 1
261 #else
262 #define CK_GFX90A_DENORM_WORKAROUND 0
263 #endif // gfx90a
264 
265 // set flag to 1 to build deprecated instances
266 #define CK_BUILD_DEPRECATED 1
267 
268 namespace ck {
269 
270 #if defined(__GFX9__) || !defined(__HIP_DEVICE_COMPILE__)
271 __device__ static constexpr int WarpSize = 64;
272 #else
273 __device__ static constexpr int WarpSize = 32;
274 #endif
275 
277 {
278  Set,
279  AtomicAdd,
280  AtomicMax,
281  Add
282 };
283 
284 // FIXME: use regular Sequence and remove this
285 template <InMemoryDataOperationEnum... Is>
287 {
288  static constexpr int mSize = sizeof...(Is);
289 
290  __host__ __device__ static constexpr InMemoryDataOperationEnum At(int I)
291  {
292  // the last dummy element is to prevent compiler complain about empty array, when mSize = 0
294  return mData[I];
295  }
296 };
297 
298 // index type
299 using index_t = int32_t;
301 
302 } // namespace ck
Definition: ck.hpp:268
InMemoryDataOperationEnum
Definition: ck.hpp:277
int64_t long_index_t
Definition: ck.hpp:300
long int64_t
Definition: data_type.hpp:464
int32_t index_t
Definition: ck.hpp:299
signed int int32_t
Definition: stdint.h:123
__host__ static constexpr __device__ InMemoryDataOperationEnum At(int I)
Definition: ck.hpp:290
static constexpr int mSize
Definition: ck.hpp:288