mmq.cu 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. /**
  2. * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file
  3. *
  4. * MIT License
  5. *
  6. * Copyright (c) 2023-2024 The ggml authors
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in all
  16. * copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  21. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  24. * SOFTWARE.
  25. */
  26. #include "mmq.cuh"
  27. void ggml_cuda_op_mul_mat_q(
  28. ggml_backend_cuda_context & ctx,
  29. const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i,
  30. const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols,
  31. const int64_t src1_padded_row_size, cudaStream_t stream) {
  32. const int64_t ne00 = src0->ne[0];
  33. const int64_t ne10 = src1->ne[0];
  34. const int64_t ne11 = src1->ne[1];
  35. GGML_ASSERT(ne10 % QK8_1 == 0);
  36. const int64_t ne0 = dst->ne[0];
  37. const int64_t row_diff = row_high - row_low;
  38. const int64_t stride00 = ne00 / ggml_blck_size(src0->type);
  39. int id = ggml_cuda_get_device();
  40. const int compute_capability = ggml_cuda_info().devices[id].cc;
  41. // the main device has a larger memory buffer to hold the results from all GPUs
  42. // nrows_dst == nrows of the matrix that the kernel writes into
  43. const int64_t nrows_dst = id == ctx.device ? ne0 : row_diff;
  44. // The stream-k decomposition is only faster for recent NVIDIA GPUs.
  45. // Also its fixup needs to allocate a temporary buffer in the memory pool.
  46. // There are multiple parallel CUDA streams for src1_ncols != ne11 which would introduce a race condition for this buffer.
  47. const bool use_stream_k = compute_capability >= GGML_CUDA_CC_VOLTA && compute_capability < GGML_CUDA_CC_OFFSET_AMD && src1_ncols == ne11;
  48. const mmq_args args = {src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stride00, src1_padded_row_size, src1_ncols, ne11, nrows_dst, use_stream_k};
  49. switch (src0->type) {
  50. case GGML_TYPE_Q4_0:
  51. mul_mat_q_case<GGML_TYPE_Q4_0>(ctx, args, stream);
  52. break;
  53. case GGML_TYPE_Q4_1:
  54. mul_mat_q_case<GGML_TYPE_Q4_1>(ctx, args, stream);
  55. break;
  56. case GGML_TYPE_Q5_0:
  57. mul_mat_q_case<GGML_TYPE_Q5_0>(ctx, args, stream);
  58. break;
  59. case GGML_TYPE_Q5_1:
  60. mul_mat_q_case<GGML_TYPE_Q5_1>(ctx, args, stream);
  61. break;
  62. case GGML_TYPE_Q8_0:
  63. mul_mat_q_case<GGML_TYPE_Q8_0>(ctx, args, stream);
  64. break;
  65. case GGML_TYPE_Q2_K:
  66. mul_mat_q_case<GGML_TYPE_Q2_K>(ctx, args, stream);
  67. break;
  68. case GGML_TYPE_Q3_K:
  69. mul_mat_q_case<GGML_TYPE_Q3_K>(ctx, args, stream);
  70. break;
  71. case GGML_TYPE_Q4_K:
  72. mul_mat_q_case<GGML_TYPE_Q4_K>(ctx, args, stream);
  73. break;
  74. case GGML_TYPE_Q5_K:
  75. mul_mat_q_case<GGML_TYPE_Q5_K>(ctx, args, stream);
  76. break;
  77. case GGML_TYPE_Q6_K:
  78. mul_mat_q_case<GGML_TYPE_Q6_K>(ctx, args, stream);
  79. break;
  80. case GGML_TYPE_IQ2_XXS:
  81. mul_mat_q_case<GGML_TYPE_IQ2_XXS>(ctx, args, stream);
  82. break;
  83. case GGML_TYPE_IQ2_XS:
  84. mul_mat_q_case<GGML_TYPE_IQ2_XS>(ctx, args, stream);
  85. break;
  86. case GGML_TYPE_IQ2_S:
  87. mul_mat_q_case<GGML_TYPE_IQ2_S>(ctx, args, stream);
  88. break;
  89. case GGML_TYPE_IQ3_XXS:
  90. mul_mat_q_case<GGML_TYPE_IQ3_XXS>(ctx, args, stream);
  91. break;
  92. case GGML_TYPE_IQ3_S:
  93. mul_mat_q_case<GGML_TYPE_IQ3_S>(ctx, args, stream);
  94. break;
  95. case GGML_TYPE_IQ1_S:
  96. mul_mat_q_case<GGML_TYPE_IQ1_S>(ctx, args, stream);
  97. break;
  98. case GGML_TYPE_IQ4_XS:
  99. mul_mat_q_case<GGML_TYPE_IQ4_XS>(ctx, args, stream);
  100. break;
  101. case GGML_TYPE_IQ4_NL:
  102. mul_mat_q_case<GGML_TYPE_IQ4_NL>(ctx, args, stream);
  103. break;
  104. default:
  105. GGML_ABORT("fatal error");
  106. break;
  107. }
  108. GGML_UNUSED(src1);
  109. GGML_UNUSED(dst);
  110. GGML_UNUSED(src1_ddf_i);
  111. }
  112. bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) {
  113. #ifdef GGML_CUDA_FORCE_CUBLAS
  114. return false;
  115. #endif // GGML_CUDA_FORCE_CUBLAS
  116. bool mmq_supported;
  117. switch (type) {
  118. case GGML_TYPE_Q4_0:
  119. case GGML_TYPE_Q4_1:
  120. case GGML_TYPE_Q5_0:
  121. case GGML_TYPE_Q5_1:
  122. case GGML_TYPE_Q8_0:
  123. case GGML_TYPE_Q2_K:
  124. case GGML_TYPE_Q3_K:
  125. case GGML_TYPE_Q4_K:
  126. case GGML_TYPE_Q5_K:
  127. case GGML_TYPE_Q6_K:
  128. case GGML_TYPE_IQ2_XXS:
  129. case GGML_TYPE_IQ2_XS:
  130. case GGML_TYPE_IQ2_S:
  131. case GGML_TYPE_IQ3_XXS:
  132. case GGML_TYPE_IQ3_S:
  133. case GGML_TYPE_IQ1_S:
  134. case GGML_TYPE_IQ4_XS:
  135. case GGML_TYPE_IQ4_NL:
  136. mmq_supported = true;
  137. break;
  138. default:
  139. mmq_supported = false;
  140. break;
  141. }
  142. if (!mmq_supported) {
  143. return false;
  144. }
  145. if (int8_mma_available(cc)) {
  146. return true;
  147. }
  148. if (cc < GGML_CUDA_CC_DP4A) {
  149. return false;
  150. }
  151. #ifdef GGML_CUDA_FORCE_MMQ
  152. return true;
  153. #endif //GGML_CUDA_FORCE_MMQ
  154. if (cc < GGML_CUDA_CC_OFFSET_AMD) {
  155. return cc < GGML_CUDA_CC_VOLTA || ne11 < MMQ_DP4A_MAX_BATCH_SIZE;
  156. }
  157. return (cc < GGML_CUDA_CC_RDNA3 && cc != GGML_CUDA_CC_CDNA && cc != GGML_CUDA_CC_VEGA20) || ne11 < MMQ_DP4A_MAX_BATCH_SIZE;
  158. }