fattn-wmma-f16.cuh 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569
  1. /**
  2. * llama.cpp - commit 8962422b1c6f9b8b15f5aeaea42600bcc2d44177 - do not edit this file
  3. *
  4. * MIT License
  5. *
  6. * Copyright (c) 2023-2024 The ggml authors
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in all
  16. * copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  21. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  24. * SOFTWARE.
  25. */
  26. #include "common.cuh"
  27. #include "fattn-common.cuh"
  28. #ifdef FP16_MMA_AVAILABLE
  29. #include <mma.h>
  30. #endif // FP16_MMA_AVAILABLE
  31. // D == head size, VKQ_stride == num VKQ rows calculated in parallel:
  32. template<int D, int ncols, int nwarps, int VKQ_stride, int parallel_blocks, typename KQ_acc_t, bool use_logit_softcap>
  33. #if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
  34. __launch_bounds__(nwarps*WARP_SIZE, 1)
  35. #endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
  36. static __global__ void flash_attn_ext_f16(
  37. const char * __restrict__ Q,
  38. const char * __restrict__ K,
  39. const char * __restrict__ V,
  40. const char * __restrict__ mask,
  41. float * __restrict__ dst,
  42. float2 * __restrict__ dst_meta,
  43. const float scale,
  44. const float max_bias,
  45. const float m0,
  46. const float m1,
  47. const uint32_t n_head_log2,
  48. const float logit_softcap,
  49. const int ne00,
  50. const int ne01,
  51. const int ne02,
  52. const int ne03,
  53. const int ne10,
  54. const int ne11,
  55. const int ne12,
  56. const int ne13,
  57. const int ne31,
  58. const int nb31,
  59. const int nb01,
  60. const int nb02,
  61. const int nb03,
  62. const int nb11,
  63. const int nb12,
  64. const int nb13,
  65. const int nb21,
  66. const int nb22,
  67. const int nb23,
  68. const int ne0,
  69. const int ne1,
  70. const int ne2,
  71. const int ne3) {
  72. #ifdef FP16_MMA_AVAILABLE
  73. // Skip unused kernel variants for faster compilation:
  74. if (use_logit_softcap && !(D == 128 || D == 256)) {
  75. NO_DEVICE_CODE;
  76. return;
  77. }
  78. //In this kernel Q, K, V are matrices while i, j, k are matrix indices.
  79. const int ic0 = ncols*(blockIdx.x / parallel_blocks); // Index of the first Q/QKV column to work on.
  80. const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel.
  81. static_assert(D <= FATTN_KQ_STRIDE, "D must be <= FATTN_KQ_STRIDE.");
  82. static_assert(ncols == 8 || ncols % 16 == 0, "ncols must be 8 or a multiple of 16.");
  83. constexpr int frag_m = ncols == 8 ? 32 : 16;
  84. constexpr int frag_n = ncols == 8 ? 8 : 16;
  85. static_assert(D % frag_m == 0, "If ncols == 8 then D % frag_m must be 0.");
  86. typedef nvcuda::wmma::fragment<nvcuda::wmma::matrix_a, frag_m, frag_n, 16, half, nvcuda::wmma::row_major> frag_a_K;
  87. typedef nvcuda::wmma::fragment<nvcuda::wmma::matrix_a, frag_m, frag_n, 16, half, nvcuda::wmma::col_major> frag_a_V;
  88. typedef nvcuda::wmma::fragment<nvcuda::wmma::matrix_b, frag_m, frag_n, 16, half, nvcuda::wmma::col_major> frag_b;
  89. typedef nvcuda::wmma::fragment<nvcuda::wmma::accumulator, frag_m, frag_n, 16, KQ_acc_t> frag_c_KQ;
  90. typedef nvcuda::wmma::fragment<nvcuda::wmma::accumulator, frag_m, frag_n, 16, half> frag_c_VKQ;
  91. constexpr int KQ_stride_tc = nwarps*frag_m; // Number of KQ rows calculated in parallel.
  92. constexpr int VKQ_ratio = KQ_stride_tc/VKQ_stride; // Number of parallel VKQ accumulators needed to keep all warps busy.
  93. static_assert(VKQ_ratio <= nwarps, "VKQ_ratio must be <= nwarps.");
  94. // Pad internal representation of KQ, KQV to reduce shared memory bank conflicts:
  95. constexpr int D_padded = D + 8;
  96. constexpr int kqs_padded = FATTN_KQ_STRIDE + 8;
  97. constexpr int kqar = sizeof(KQ_acc_t)/sizeof(half);
  98. const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix.
  99. const float * Q_f = (const float *) (Q + nb02* blockIdx.y + nb01*ic0);
  100. const half * K_h = (const half *) (K + nb12*(blockIdx.y / gqa_ratio));
  101. const half * V_h = (const half *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape
  102. const half * maskh = (const half *) mask + (nb31/sizeof(half))* ic0;
  103. const half2 * mask2 = (const half2 *) mask + (nb31/sizeof(half))*(ic0/2);
  104. const int stride_Q = nb01 / sizeof(float);
  105. const int stride_KV = nb11 / sizeof(half);
  106. const float slopef = get_alibi_slope(max_bias, blockIdx.y, n_head_log2, m0, m1);
  107. const half slopeh = __float2half(slopef);
  108. const half2 slope2 = make_half2(slopef, slopef);
  109. const half2 logit_softcap_2 = make_half2(logit_softcap, logit_softcap);
  110. frag_b Q_b[D/16][ncols/frag_n];
  111. // A single buffer for temporarily holding tiles of KQ and VKQ parts:
  112. constexpr int mem_KQ = ncols*kqs_padded*kqar;
  113. constexpr int mem_VKQ_parts = VKQ_ratio*ncols*D_padded;
  114. __shared__ half KQ[mem_KQ >= mem_VKQ_parts ? mem_KQ : mem_VKQ_parts];
  115. float * KQ_f = (float *) KQ;
  116. half2 * KQ2 = (half2 *) KQ;
  117. float KQ_rowsum_f[ncols/nwarps] = {0.0f};
  118. float KQ_max_f[ncols/nwarps];
  119. float KQ_max_scale_f[ncols/nwarps] = {0.0f};
  120. #pragma unroll
  121. for (int j = 0; j < ncols/nwarps; ++j) {
  122. KQ_max_f[j] = -FLT_MAX/2.0f;
  123. }
  124. half2 KQ_rowsum_h2[ncols/nwarps] = {{0.0f, 0.0f}};
  125. half2 KQ_max_h2[ncols/nwarps];
  126. half2 KQ_max_scale_h2[ncols/nwarps] = {{0.0f, 0.0f}};
  127. #pragma unroll
  128. for (int j = 0; j < ncols/nwarps; ++j) {
  129. KQ_max_h2[j] = make_half2(-HALF_MAX_HALF, -HALF_MAX_HALF);
  130. }
  131. __shared__ half VKQ[ncols*D_padded]; // Accumulator for final VKQ slice.
  132. half2 * VKQ2 = (half2 *) VKQ;
  133. #pragma unroll
  134. for (int j0 = 0; j0 < ncols; j0 += nwarps) {
  135. const int j = j0 + threadIdx.y;
  136. #pragma unroll
  137. for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) {
  138. const int i = i0 + threadIdx.x;
  139. if (i0 + WARP_SIZE > D/2 && i >= D/2) {
  140. break;
  141. }
  142. VKQ2[j*(D_padded/2) + i] = make_half2(0.0f, 0.0f);
  143. }
  144. }
  145. // Convert Q to half and apply scale, temporarily store in KQ:
  146. #pragma unroll
  147. for (int j0 = 0; j0 < ncols; j0 += nwarps) {
  148. const int j = j0 + threadIdx.y;
  149. #pragma unroll
  150. for (int i0 = 0; i0 < D; i0 += WARP_SIZE) {
  151. const int i = i0 + threadIdx.x;
  152. if (i0 + WARP_SIZE > D && i >= D) {
  153. break;
  154. }
  155. KQ[j*D_padded + i] = ic0 + j < ne01 ? Q_f[j*stride_Q + i] * scale : 0.0f;
  156. }
  157. }
  158. __syncthreads();
  159. // Load Q into tensor core fragments/registers since it will be used frequently:
  160. #pragma unroll
  161. for (int i0 = 0; i0 < D; i0 += 16) {
  162. #pragma unroll
  163. for (int j0 = 0; j0 < ncols; j0 += frag_n) {
  164. nvcuda::wmma::load_matrix_sync(Q_b[i0/16][j0/frag_n], KQ + j0*D_padded + i0, D_padded);
  165. }
  166. }
  167. __syncthreads();
  168. // Iterate over ne11 == previous tokens:
  169. for (int k_VKQ_0 = ip*FATTN_KQ_STRIDE; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*FATTN_KQ_STRIDE) {
  170. // Calculate tile of KQ:
  171. #pragma unroll
  172. for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE; i_KQ_0 += KQ_stride_tc) {
  173. frag_c_KQ KQ_c[ncols/frag_n];
  174. #pragma unroll
  175. for (int j = 0; j < ncols/frag_n; ++j) {
  176. nvcuda::wmma::fill_fragment(KQ_c[j], 0.0f);
  177. }
  178. #pragma unroll
  179. for (int k_KQ_0 = 0; k_KQ_0 < D; k_KQ_0 += 16) {
  180. frag_a_K K_a;
  181. nvcuda::wmma::load_matrix_sync(K_a, K_h + (k_VKQ_0 + i_KQ_0 + frag_m*threadIdx.y)*stride_KV + k_KQ_0, stride_KV);
  182. #pragma unroll
  183. for (int j = 0; j < ncols/frag_n; ++j) {
  184. nvcuda::wmma::mma_sync(KQ_c[j], K_a, Q_b[k_KQ_0/16][j], KQ_c[j]);
  185. }
  186. }
  187. #pragma unroll
  188. for (int j0 = 0; j0 < ncols; j0 += frag_n) {
  189. nvcuda::wmma::store_matrix_sync((KQ_acc_t *) KQ + j0*kqs_padded + i_KQ_0 + frag_m*threadIdx.y, KQ_c[j0/frag_n], kqs_padded, nvcuda::wmma::mem_col_major);
  190. }
  191. }
  192. __syncthreads();
  193. // Calculate softmax for each KQ column using the current max. value.
  194. // The divisor is stored in KQ_rowsum and will be applied at the end.
  195. #pragma unroll
  196. for (int j0 = 0; j0 < ncols; j0 += nwarps) {
  197. const int j = j0 + threadIdx.y;
  198. if (std::is_same<KQ_acc_t, float>::value) {
  199. float KQ_f_tmp[FATTN_KQ_STRIDE / WARP_SIZE];
  200. #pragma unroll
  201. for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) {
  202. const int k = k0 + threadIdx.x;
  203. KQ_f_tmp[k0/WARP_SIZE] = KQ_f[j*kqs_padded + k];
  204. if (use_logit_softcap) {
  205. KQ_f_tmp[k0/WARP_SIZE] = logit_softcap*tanhf(KQ_f_tmp[k0/WARP_SIZE]);
  206. }
  207. }
  208. float KQ_max_new = KQ_max_f[j0/nwarps];
  209. #pragma unroll
  210. for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) {
  211. const int k = k0 + threadIdx.x;
  212. KQ_f_tmp[k0/WARP_SIZE] += mask ? __half2float(slopeh*maskh[j*(nb31/sizeof(half)) + k_VKQ_0 + k]) : 0.0f;
  213. KQ_max_new = max(KQ_max_new, KQ_f_tmp[k0/WARP_SIZE]);
  214. }
  215. KQ_max_new = warp_reduce_max(KQ_max_new);
  216. const float diff = KQ_max_f[j0/nwarps] - KQ_max_new;
  217. KQ_max_scale_f[j0/nwarps] = expf(diff);
  218. if (diff <= SOFTMAX_FTZ_THRESHOLD) {
  219. KQ_max_scale_f[j0/nwarps] = 0.0f;
  220. }
  221. KQ_max_f[j0/nwarps] = KQ_max_new;
  222. float KQ_rowsum_add = 0.0f;
  223. #pragma unroll
  224. for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) {
  225. const int k = k0 + threadIdx.x;
  226. const float diff = KQ_f_tmp[k0/WARP_SIZE] - KQ_max_f[j0/nwarps];
  227. KQ_f_tmp[k0/WARP_SIZE] = expf(diff);
  228. if (diff <= SOFTMAX_FTZ_THRESHOLD) {
  229. KQ_f_tmp[k0/WARP_SIZE] = 0.0f;
  230. }
  231. KQ_rowsum_add += KQ_f_tmp[k0/WARP_SIZE];
  232. KQ[j*(kqar*kqs_padded) + k] = KQ_f_tmp[k0/WARP_SIZE];
  233. }
  234. KQ_rowsum_add = warp_reduce_sum(KQ_rowsum_add);
  235. // Scale previous KQ_rowsum to account for a potential increase in KQ_max:
  236. KQ_rowsum_f[j0/nwarps] = KQ_max_scale_f[j0/nwarps]*KQ_rowsum_f[j0/nwarps] + KQ_rowsum_add;
  237. } else {
  238. half2 KQ2_tmp[FATTN_KQ_STRIDE/(2*WARP_SIZE)];
  239. #pragma unroll
  240. for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) {
  241. const int k = k0 + threadIdx.x;
  242. KQ2_tmp[k0/WARP_SIZE] = KQ2[j*(kqs_padded/2) + k];
  243. if (use_logit_softcap) {
  244. // There is no dedicated tangens hyperbolicus function for half2.
  245. KQ2_tmp[k0/WARP_SIZE] = h2exp(KQ2_tmp[k0/WARP_SIZE]*make_half2(2.0f, 2.0f));
  246. KQ2_tmp[k0/WARP_SIZE] = (KQ2_tmp[k0/WARP_SIZE] - make_half2(1.0f, 1.0f))
  247. /(KQ2_tmp[k0/WARP_SIZE] + make_half2(1.0f, 1.0f));
  248. KQ2_tmp[k0/WARP_SIZE] *= logit_softcap_2;
  249. }
  250. }
  251. half2 KQ_max_new = KQ_max_h2[j0/nwarps];
  252. #pragma unroll
  253. for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) {
  254. const int k = k0 + threadIdx.x;
  255. KQ2_tmp[k0/WARP_SIZE] += mask ? slope2*mask2[(j*ne11 + k_VKQ_0)/2 + k] : make_half2(0.0f, 0.0f);
  256. KQ_max_new = ggml_cuda_hmax2(KQ_max_new, KQ2_tmp[k0/WARP_SIZE]);
  257. }
  258. KQ_max_new = __half2half2(warp_reduce_max(ggml_cuda_hmax(__low2half(KQ_max_new), __high2half(KQ_max_new))));
  259. const half2 diff = KQ_max_h2[j0/nwarps] - KQ_max_new;
  260. KQ_max_scale_h2[j0/nwarps] = h2exp(diff);
  261. const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD));
  262. *((uint32_t *) &KQ_max_scale_h2[j0/nwarps]) &= ftz_mask;
  263. KQ_max_h2[j0/nwarps] = KQ_max_new;
  264. half2 KQ_rowsum_add = make_half2(0.0f, 0.0f);
  265. #pragma unroll
  266. for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) {
  267. const int k = k0 + threadIdx.x;
  268. const half2 diff = KQ2_tmp[k0/WARP_SIZE] - KQ_max_h2[j0/nwarps];
  269. KQ2_tmp[k0/WARP_SIZE] = h2exp(diff);
  270. const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD));
  271. *((uint32_t *) &KQ2_tmp[k0/WARP_SIZE]) &= ftz_mask;
  272. KQ_rowsum_add += KQ2_tmp[k0/WARP_SIZE];
  273. KQ2[j*(kqs_padded/2) + k] = KQ2_tmp[k0/WARP_SIZE];
  274. }
  275. KQ_rowsum_add = warp_reduce_sum(KQ_rowsum_add);
  276. // Scale previous KQ_rowsum to account for a potential increase in KQ_max:
  277. KQ_rowsum_h2[j0/nwarps] = KQ_max_scale_h2[j0/nwarps]*KQ_rowsum_h2[j0/nwarps] + KQ_rowsum_add;
  278. }
  279. }
  280. __syncthreads();
  281. frag_b KQ_b[FATTN_KQ_STRIDE/(VKQ_ratio*16)][ncols/frag_n];
  282. #pragma unroll
  283. for (int j0 = 0; j0 < ncols; j0 += frag_n) {
  284. #pragma unroll
  285. for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += VKQ_ratio*16) {
  286. const int k = k0 + (threadIdx.y % VKQ_ratio)*16;
  287. nvcuda::wmma::load_matrix_sync(
  288. KQ_b[k0/(VKQ_ratio*16)][j0/frag_n],
  289. KQ + j0*(kqar*kqs_padded) + k,
  290. kqar*kqs_padded);
  291. }
  292. }
  293. frag_c_VKQ VKQ_c[D/VKQ_stride][ncols/frag_n];
  294. #pragma unroll
  295. for (int i_VKQ_0 = 0; i_VKQ_0 < D; i_VKQ_0 += VKQ_stride) {
  296. #pragma unroll
  297. for (int j = 0; j < ncols/frag_n; ++j) {
  298. nvcuda::wmma::fill_fragment(VKQ_c[i_VKQ_0/VKQ_stride][j], 0.0f);
  299. }
  300. #pragma unroll
  301. for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += VKQ_ratio*16) {
  302. const int k = k0 + (threadIdx.y % VKQ_ratio)*16;
  303. frag_a_V v_a;
  304. nvcuda::wmma::load_matrix_sync(v_a, V_h + (k_VKQ_0 + k)*stride_KV + i_VKQ_0 + frag_m*(threadIdx.y/VKQ_ratio), stride_KV);
  305. #pragma unroll
  306. for (int j = 0; j < ncols/frag_n; ++j) {
  307. nvcuda::wmma::mma_sync(VKQ_c[i_VKQ_0/VKQ_stride][j], v_a, KQ_b[k0/(VKQ_ratio*16)][j], VKQ_c[i_VKQ_0/VKQ_stride][j]);
  308. }
  309. }
  310. }
  311. __syncthreads();
  312. const int offset_k = (threadIdx.y % VKQ_ratio) * (ncols*D_padded);
  313. #pragma unroll
  314. for (int i_KQ_0 = 0; i_KQ_0 < D; i_KQ_0 += VKQ_stride) {
  315. #pragma unroll
  316. for (int j0 = 0; j0 < ncols; j0 += frag_n) {
  317. nvcuda::wmma::store_matrix_sync(
  318. KQ + offset_k + j0*D_padded + i_KQ_0 + frag_m*(threadIdx.y/VKQ_ratio),
  319. VKQ_c[i_KQ_0/VKQ_stride][j0/frag_n],
  320. D_padded, nvcuda::wmma::mem_col_major);
  321. }
  322. }
  323. __syncthreads();
  324. #pragma unroll
  325. for (int j0 = 0; j0 < ncols; j0 += nwarps) {
  326. const int j = j0 + threadIdx.y;
  327. half2 VKQ_scale;
  328. if (std::is_same<KQ_acc_t, float>::value) {
  329. VKQ_scale = make_half2(KQ_max_scale_f[j0/nwarps], KQ_max_scale_f[j0/nwarps]);
  330. } else {
  331. VKQ_scale = KQ_max_scale_h2[j0/nwarps];
  332. }
  333. #pragma unroll
  334. for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) {
  335. const int i = i0 + threadIdx.x;
  336. if (i0 + WARP_SIZE > D/2 && i >= D/2) {
  337. break;
  338. }
  339. half2 VKQ_add = make_half2(0.0f, 0.0f);
  340. #pragma unroll
  341. for (int l = 0; l < VKQ_ratio; ++l) {
  342. VKQ_add += KQ2[l*(ncols*D_padded/2) + j*(D_padded/2) + i];
  343. }
  344. VKQ2[j*(D_padded/2) + i] = VKQ_scale*VKQ2[j*(D_padded/2) + i] + VKQ_add;
  345. }
  346. }
  347. __syncthreads();
  348. }
  349. #pragma unroll
  350. for (int j0 = 0; j0 < ncols; j0 += nwarps) {
  351. const int j_VKQ = j0 + threadIdx.y;
  352. if (ic0 + j_VKQ >= ne01) {
  353. return;
  354. }
  355. const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip;
  356. float KQ_rowsum_j;
  357. if (std::is_same<KQ_acc_t, float>::value) {
  358. KQ_rowsum_j = KQ_rowsum_f[j0/nwarps];
  359. } else {
  360. KQ_rowsum_j = __low2float(KQ_rowsum_h2[j0/nwarps]) + __high2float(KQ_rowsum_h2[j0/nwarps]);
  361. }
  362. #pragma unroll
  363. for (int i0 = 0; i0 < D; i0 += WARP_SIZE) {
  364. const int i = i0 + threadIdx.x;
  365. if (i0 + WARP_SIZE > D && i >= D) {
  366. break;
  367. }
  368. float dst_val = VKQ[j_VKQ*D_padded + i];
  369. if (parallel_blocks == 1) {
  370. dst_val /= KQ_rowsum_j;
  371. }
  372. dst[j_dst*gridDim.y*D + blockIdx.y*D + i] = dst_val;
  373. }
  374. if (parallel_blocks == 1 || threadIdx.x != 0) {
  375. continue;
  376. }
  377. float2 dst_meta_val;
  378. if (std::is_same<KQ_acc_t, float>::value) {
  379. dst_meta_val.x = KQ_max_f[j0/nwarps];
  380. } else {
  381. dst_meta_val.x = __low2float(KQ_max_h2[j0/nwarps]);
  382. }
  383. dst_meta_val.y = KQ_rowsum_j;
  384. dst_meta[(ic0 + j_VKQ)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = dst_meta_val;
  385. }
  386. #else
  387. NO_DEVICE_CODE;
  388. #endif // FP16_MMA_AVAILABLE
  389. }
  390. constexpr int get_max_power_of_2(int x) {
  391. return x % 2 == 0 ? 2*get_max_power_of_2(x/2) : 1;
  392. }
  393. static_assert(get_max_power_of_2(1) == 1, "Test failed.");
  394. static_assert(get_max_power_of_2(2) == 2, "Test failed.");
  395. static_assert(get_max_power_of_2(4) == 4, "Test failed.");
  396. static_assert(get_max_power_of_2(6) == 2, "Test failed.");
  397. // Number of VKQ rows calculated in parallel:
  398. constexpr int get_VKQ_stride(int D, int nwarps, int frag_m) {
  399. return (get_max_power_of_2(D/frag_m) < nwarps ? get_max_power_of_2(D/frag_m) : nwarps)*frag_m;
  400. }
  401. static_assert(get_VKQ_stride(128, 1, 32) == 32, "Test failed.");
  402. static_assert(get_VKQ_stride(128, 2, 32) == 64, "Test failed.");
  403. static_assert(get_VKQ_stride(128, 4, 32) == 128, "Test failed.");
  404. static_assert(get_VKQ_stride( 64, 1, 32) == 32, "Test failed.");
  405. static_assert(get_VKQ_stride( 64, 2, 32) == 64, "Test failed.");
  406. static_assert(get_VKQ_stride( 64, 4, 32) == 64, "Test failed.");
  407. static_assert(get_VKQ_stride( 80, 1, 16) == 16, "Test failed.");
  408. static_assert(get_VKQ_stride( 80, 2, 16) == 16, "Test failed.");
  409. static_assert(get_VKQ_stride( 80, 4, 16) == 16, "Test failed.");
  410. template <int D, int cols_per_block, typename KQ_acc_t>
  411. void ggml_cuda_flash_attn_ext_wmma_f16_case(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
  412. const ggml_tensor * KQV = dst;
  413. const ggml_tensor * Q = dst->src[0];
  414. constexpr int nwarps = 4;
  415. constexpr int frag_m = cols_per_block == 8 && D % 32 == 0 ? 32 : 16;
  416. const int blocks_num_pb1 = ((Q->ne[1] + cols_per_block - 1) / cols_per_block)*Q->ne[2]*Q->ne[3];
  417. const int nsm = ggml_cuda_info().devices[ggml_cuda_get_device()].nsm;
  418. float logit_softcap;
  419. memcpy(&logit_softcap, (const float *) KQV->op_params + 2, sizeof(float));
  420. if (4*blocks_num_pb1 < 2*nsm) {
  421. constexpr int parallel_blocks = 4;
  422. fattn_kernel_t fattn_kernel;
  423. if (logit_softcap == 0.0f) {
  424. constexpr bool use_logit_softcap = false;
  425. fattn_kernel = flash_attn_ext_f16<
  426. D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>;
  427. } else {
  428. constexpr bool use_logit_softcap = true;
  429. fattn_kernel = flash_attn_ext_f16<
  430. D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>;
  431. }
  432. launch_fattn<D, parallel_blocks>(ctx, dst, fattn_kernel, nwarps, cols_per_block, true, true);
  433. return;
  434. }
  435. if (2*blocks_num_pb1 < 2*nsm) {
  436. constexpr int parallel_blocks = 2;
  437. fattn_kernel_t fattn_kernel;
  438. if (logit_softcap == 0.0f) {
  439. constexpr bool use_logit_softcap = false;
  440. fattn_kernel = flash_attn_ext_f16<
  441. D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>;
  442. } else {
  443. constexpr bool use_logit_softcap = true;
  444. fattn_kernel = flash_attn_ext_f16<
  445. D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>;
  446. }
  447. launch_fattn<D, parallel_blocks>(ctx, dst, fattn_kernel, nwarps, cols_per_block, true, true);
  448. return;
  449. }
  450. constexpr int parallel_blocks = 1;
  451. fattn_kernel_t fattn_kernel;
  452. if (logit_softcap == 0.0f) {
  453. constexpr bool use_logit_softcap = false;
  454. fattn_kernel = flash_attn_ext_f16<
  455. D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>;
  456. } else {
  457. constexpr bool use_logit_softcap = true;
  458. fattn_kernel = flash_attn_ext_f16<
  459. D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>;
  460. }
  461. launch_fattn<D, parallel_blocks>(ctx, dst, fattn_kernel, nwarps, cols_per_block, true, true);
  462. }
  463. #define DECL_FATTN_WMMA_F16_CASE(D, cols_per_block, KQ_acc_t) \
  464. template void ggml_cuda_flash_attn_ext_wmma_f16_case \
  465. <D, cols_per_block, KQ_acc_t>(ggml_backend_cuda_context & ctx, ggml_tensor * dst) \
  466. extern DECL_FATTN_WMMA_F16_CASE( 64, 16, float);
  467. extern DECL_FATTN_WMMA_F16_CASE( 80, 16, float);
  468. extern DECL_FATTN_WMMA_F16_CASE( 96, 16, float);
  469. extern DECL_FATTN_WMMA_F16_CASE(112, 16, float);
  470. extern DECL_FATTN_WMMA_F16_CASE(128, 16, float);
  471. extern DECL_FATTN_WMMA_F16_CASE(256, 16, float);
  472. extern DECL_FATTN_WMMA_F16_CASE( 64, 32, float);
  473. extern DECL_FATTN_WMMA_F16_CASE( 80, 32, float);
  474. extern DECL_FATTN_WMMA_F16_CASE( 96, 32, float);
  475. extern DECL_FATTN_WMMA_F16_CASE(112, 32, float);
  476. extern DECL_FATTN_WMMA_F16_CASE(128, 32, float);
  477. // extern DECL_FATTN_WMMA_F16_CASE(256, 16, float);
  478. extern DECL_FATTN_WMMA_F16_CASE( 64, 8, half);
  479. extern DECL_FATTN_WMMA_F16_CASE( 96, 8, half);
  480. extern DECL_FATTN_WMMA_F16_CASE(128, 8, half);
  481. extern DECL_FATTN_WMMA_F16_CASE(256, 8, half);
  482. extern DECL_FATTN_WMMA_F16_CASE( 64, 16, half);
  483. extern DECL_FATTN_WMMA_F16_CASE( 80, 16, half);
  484. extern DECL_FATTN_WMMA_F16_CASE( 96, 16, half);
  485. extern DECL_FATTN_WMMA_F16_CASE(112, 16, half);
  486. extern DECL_FATTN_WMMA_F16_CASE(128, 16, half);
  487. extern DECL_FATTN_WMMA_F16_CASE(256, 16, half);
  488. extern DECL_FATTN_WMMA_F16_CASE( 64, 32, half);
  489. extern DECL_FATTN_WMMA_F16_CASE( 80, 32, half);
  490. extern DECL_FATTN_WMMA_F16_CASE( 96, 32, half);
  491. extern DECL_FATTN_WMMA_F16_CASE(112, 32, half);
  492. extern DECL_FATTN_WMMA_F16_CASE(128, 32, half);
  493. extern DECL_FATTN_WMMA_F16_CASE(256, 16, half);