123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176 |
- /**
- * llama.cpp - commit 8962422b1c6f9b8b15f5aeaea42600bcc2d44177 - do not edit this file
- *
- * MIT License
- *
- * Copyright (c) 2023-2024 The ggml authors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
- #include "mmq.cuh"
- void ggml_cuda_op_mul_mat_q(
- ggml_backend_cuda_context & ctx,
- const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i,
- const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols,
- const int64_t src1_padded_row_size, cudaStream_t stream) {
- const int64_t ne00 = src0->ne[0];
- const int64_t nb01 = src0->nb[1];
- const int64_t ne10 = src1->ne[0];
- const int64_t ne11 = src1->ne[1];
- GGML_ASSERT(ne10 % QK8_1 == 0);
- const int64_t ne0 = dst->ne[0];
- const int64_t row_diff = row_high - row_low;
- const int64_t stride00 = nb01 / ggml_type_size(src0->type);
- int id = ggml_cuda_get_device();
- const int compute_capability = ggml_cuda_info().devices[id].cc;
- // the main device has a larger memory buffer to hold the results from all GPUs
- // nrows_dst == nrows of the matrix that the kernel writes into
- const int64_t nrows_dst = id == ctx.device ? ne0 : row_diff;
- const mmq_args args = {src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stride00, src1_padded_row_size, src1_ncols, ne11, nrows_dst};
- switch (src0->type) {
- case GGML_TYPE_Q4_0:
- mul_mat_q_case<GGML_TYPE_Q4_0>(ctx, args, stream);
- break;
- case GGML_TYPE_Q4_1:
- mul_mat_q_case<GGML_TYPE_Q4_1>(ctx, args, stream);
- break;
- case GGML_TYPE_Q5_0:
- mul_mat_q_case<GGML_TYPE_Q5_0>(ctx, args, stream);
- break;
- case GGML_TYPE_Q5_1:
- mul_mat_q_case<GGML_TYPE_Q5_1>(ctx, args, stream);
- break;
- case GGML_TYPE_Q8_0:
- mul_mat_q_case<GGML_TYPE_Q8_0>(ctx, args, stream);
- break;
- case GGML_TYPE_Q2_K:
- mul_mat_q_case<GGML_TYPE_Q2_K>(ctx, args, stream);
- break;
- case GGML_TYPE_Q3_K:
- mul_mat_q_case<GGML_TYPE_Q3_K>(ctx, args, stream);
- break;
- case GGML_TYPE_Q4_K:
- mul_mat_q_case<GGML_TYPE_Q4_K>(ctx, args, stream);
- break;
- case GGML_TYPE_Q5_K:
- mul_mat_q_case<GGML_TYPE_Q5_K>(ctx, args, stream);
- break;
- case GGML_TYPE_Q6_K:
- mul_mat_q_case<GGML_TYPE_Q6_K>(ctx, args, stream);
- break;
- case GGML_TYPE_IQ2_XXS:
- mul_mat_q_case<GGML_TYPE_IQ2_XXS>(ctx, args, stream);
- break;
- case GGML_TYPE_IQ2_XS:
- mul_mat_q_case<GGML_TYPE_IQ2_XS>(ctx, args, stream);
- break;
- case GGML_TYPE_IQ2_S:
- mul_mat_q_case<GGML_TYPE_IQ2_S>(ctx, args, stream);
- break;
- case GGML_TYPE_IQ3_XXS:
- mul_mat_q_case<GGML_TYPE_IQ3_XXS>(ctx, args, stream);
- break;
- case GGML_TYPE_IQ3_S:
- mul_mat_q_case<GGML_TYPE_IQ3_S>(ctx, args, stream);
- break;
- case GGML_TYPE_IQ1_S:
- mul_mat_q_case<GGML_TYPE_IQ1_S>(ctx, args, stream);
- break;
- case GGML_TYPE_IQ4_XS:
- mul_mat_q_case<GGML_TYPE_IQ4_XS>(ctx, args, stream);
- break;
- case GGML_TYPE_IQ4_NL:
- mul_mat_q_case<GGML_TYPE_IQ4_NL>(ctx, args, stream);
- break;
- default:
- GGML_ABORT("fatal error");
- break;
- }
- GGML_UNUSED(src1);
- GGML_UNUSED(dst);
- GGML_UNUSED(src1_ddf_i);
- }
- bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) {
- #ifdef GGML_CUDA_FORCE_CUBLAS
- return false;
- #endif // GGML_CUDA_FORCE_CUBLAS
- bool mmq_supported;
- switch (type) {
- case GGML_TYPE_Q4_0:
- case GGML_TYPE_Q4_1:
- case GGML_TYPE_Q5_0:
- case GGML_TYPE_Q5_1:
- case GGML_TYPE_Q8_0:
- case GGML_TYPE_Q2_K:
- case GGML_TYPE_Q3_K:
- case GGML_TYPE_Q4_K:
- case GGML_TYPE_Q5_K:
- case GGML_TYPE_Q6_K:
- case GGML_TYPE_IQ2_XXS:
- case GGML_TYPE_IQ2_XS:
- case GGML_TYPE_IQ2_S:
- case GGML_TYPE_IQ3_XXS:
- case GGML_TYPE_IQ3_S:
- case GGML_TYPE_IQ1_S:
- case GGML_TYPE_IQ4_XS:
- case GGML_TYPE_IQ4_NL:
- mmq_supported = true;
- break;
- default:
- mmq_supported = false;
- break;
- }
- if (!mmq_supported) {
- return false;
- }
- if (int8_mma_available(cc)) {
- return true;
- }
- if (cc < MIN_CC_DP4A) {
- return false;
- }
- #ifdef GGML_CUDA_FORCE_MMQ
- return true;
- #endif //GGML_CUDA_FORCE_MMQ
- if (cc < CC_OFFSET_AMD) {
- return cc < CC_VOLTA || ne11 < MMQ_DP4A_MAX_BATCH_SIZE;
- }
- return cc < CC_RDNA3 || ne11 < MMQ_DP4A_MAX_BATCH_SIZE;
- }
|