123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314 |
- /**
- * llama.cpp - commit 8962422b1c6f9b8b15f5aeaea42600bcc2d44177 - do not edit this file
- *
- * MIT License
- *
- * Copyright (c) 2023-2024 The ggml authors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
- #include "binbcast.cuh"
- static __device__ __forceinline__ float op_repeat(const float a, const float b) {
- return b;
- GGML_UNUSED(a);
- }
- static __device__ __forceinline__ float op_add(const float a, const float b) {
- return a + b;
- }
- static __device__ __forceinline__ float op_sub(const float a, const float b) {
- return a - b;
- }
- static __device__ __forceinline__ float op_mul(const float a, const float b) {
- return a * b;
- }
- static __device__ __forceinline__ float op_div(const float a, const float b) {
- return a / b;
- }
- template<float (*bin_op)(const float, const float), typename src0_t, typename src1_t, typename dst_t>
- static __global__ void k_bin_bcast(const src0_t * src0, const src1_t * src1, dst_t * dst,
- int ne0, int ne1, int ne2, int ne3,
- int ne10, int ne11, int ne12, int ne13,
- /*int s0, */ int s1, int s2, int s3,
- /*int s00,*/ int s01, int s02, int s03,
- /*int s10,*/ int s11, int s12, int s13) {
- const int i0s = blockDim.x*blockIdx.x + threadIdx.x;
- const int i1 = (blockDim.y*blockIdx.y + threadIdx.y);
- const int i2 = (blockDim.z*blockIdx.z + threadIdx.z) / ne3;
- const int i3 = (blockDim.z*blockIdx.z + threadIdx.z) % ne3;
- if (i0s >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) {
- return;
- }
- const int i11 = i1 % ne11;
- const int i12 = i2 % ne12;
- const int i13 = i3 % ne13;
- const size_t i_src0 = i3*s03 + i2*s02 + i1*s01;
- const size_t i_src1 = i13*s13 + i12*s12 + i11*s11;
- const size_t i_dst = i3*s3 + i2*s2 + i1*s1;
- const src0_t * src0_row = src0 + i_src0;
- const src1_t * src1_row = src1 + i_src1;
- dst_t * dst_row = dst + i_dst;
- for (int i0 = i0s; i0 < ne0; i0 += blockDim.x*gridDim.x) {
- const int i10 = i0 % ne10;
- dst_row[i0] = (dst_t)bin_op(src0 ? (float)src0_row[i0] : 0.0f, (float)src1_row[i10]);
- }
- }
- template<float (*bin_op)(const float, const float), typename src0_t, typename src1_t, typename dst_t>
- static __global__ void k_bin_bcast_unravel(const src0_t * src0, const src1_t * src1, dst_t * dst,
- int ne0, int ne1, int ne2, int ne3,
- int ne10, int ne11, int ne12, int ne13,
- /*int s0, */ int s1, int s2, int s3,
- /*int s00,*/ int s01, int s02, int s03,
- /*int s10,*/ int s11, int s12, int s13) {
- const int i = blockDim.x*blockIdx.x + threadIdx.x;
- const int i3 = i/(ne2*ne1*ne0);
- const int i2 = (i/(ne1*ne0)) % ne2;
- const int i1 = (i/ne0) % ne1;
- const int i0 = i % ne0;
- if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) {
- return;
- }
- const int i11 = i1 % ne11;
- const int i12 = i2 % ne12;
- const int i13 = i3 % ne13;
- const size_t i_src0 = i3*s03 + i2*s02 + i1*s01;
- const size_t i_src1 = i13*s13 + i12*s12 + i11*s11;
- const size_t i_dst = i3*s3 + i2*s2 + i1*s1;
- const src0_t * src0_row = src0 + i_src0;
- const src1_t * src1_row = src1 + i_src1;
- dst_t * dst_row = dst + i_dst;
- const int i10 = i0 % ne10;
- dst_row[i0] = (dst_t)bin_op(src0 ? (float)src0_row[i0] : 0.0f, (float)src1_row[i10]);
- }
- template<float (*bin_op)(const float, const float)>
- struct bin_bcast_cuda {
- template<typename src0_t, typename src1_t, typename dst_t>
- void operator()(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst,
- const src0_t * src0_dd, const src1_t * src1_dd, dst_t * dst_dd,
- cudaStream_t stream) {
- GGML_TENSOR_BINARY_OP_LOCALS
- int nr0 = ne10/ne0;
- int nr1 = ne11/ne1;
- int nr2 = ne12/ne2;
- int nr3 = ne13/ne3;
- int nr[4] = { nr0, nr1, nr2, nr3 };
- // collapse dimensions until first broadcast dimension
- int64_t cne[] = {ne0, ne1, ne2, ne3};
- int64_t cne0[] = {ne00, ne01, ne02, ne03};
- int64_t cne1[] = {ne10, ne11, ne12, ne13};
- size_t cnb[] = {nb0, nb1, nb2, nb3};
- size_t cnb0[] = {nb00, nb01, nb02, nb03};
- size_t cnb1[] = {nb10, nb11, nb12, nb13};
- auto collapse = [](int64_t cne[]) {
- cne[0] *= cne[1];
- cne[1] = cne[2];
- cne[2] = cne[3];
- cne[3] = 1;
- };
- auto collapse_nb = [](size_t cnb[], const int64_t cne[]) {
- cnb[1] *= cne[1];
- cnb[2] *= cne[2];
- cnb[3] *= cne[3];
- };
- if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && ggml_is_contiguous(dst)) {
- for (int i = 0; i < 4; i++) {
- if (nr[i] != 1) {
- break;
- }
- if (i > 0) {
- collapse_nb(cnb, cne);
- collapse_nb(cnb0, cne0);
- collapse_nb(cnb1, cne1);
- collapse(cne);
- collapse(cne0);
- collapse(cne1);
- }
- }
- }
- {
- int64_t ne0 = cne[0];
- int64_t ne1 = cne[1];
- int64_t ne2 = cne[2];
- int64_t ne3 = cne[3];
- //int64_t ne00 = cne0[0]; GGML_UNUSED(ne00);
- //int64_t ne01 = cne0[1]; GGML_UNUSED(ne01);
- //int64_t ne02 = cne0[2]; GGML_UNUSED(ne02);
- //int64_t ne03 = cne0[3]; GGML_UNUSED(ne03);
- int64_t ne10 = cne1[0];
- int64_t ne11 = cne1[1];
- int64_t ne12 = cne1[2];
- int64_t ne13 = cne1[3];
- size_t nb0 = cnb[0];
- size_t nb1 = cnb[1];
- size_t nb2 = cnb[2];
- size_t nb3 = cnb[3];
- size_t nb00 = cnb0[0];
- size_t nb01 = cnb0[1];
- size_t nb02 = cnb0[2];
- size_t nb03 = cnb0[3];
- size_t nb10 = cnb1[0];
- size_t nb11 = cnb1[1];
- size_t nb12 = cnb1[2];
- size_t nb13 = cnb1[3];
- size_t s0 = nb0 / sizeof(dst_t);
- size_t s1 = nb1 / sizeof(dst_t);
- size_t s2 = nb2 / sizeof(dst_t);
- size_t s3 = nb3 / sizeof(dst_t);
- size_t s10 = nb10 / sizeof(src1_t);
- size_t s11 = nb11 / sizeof(src1_t);
- size_t s12 = nb12 / sizeof(src1_t);
- size_t s13 = nb13 / sizeof(src1_t);
- size_t s00 = nb00 / sizeof(src0_t);
- size_t s01 = nb01 / sizeof(src0_t);
- size_t s02 = nb02 / sizeof(src0_t);
- size_t s03 = nb03 / sizeof(src0_t);
- GGML_ASSERT(nb0 % sizeof(dst_t) == 0);
- GGML_ASSERT(nb1 % sizeof(dst_t) == 0);
- GGML_ASSERT(nb2 % sizeof(dst_t) == 0);
- GGML_ASSERT(nb3 % sizeof(dst_t) == 0);
- GGML_ASSERT(nb00 % sizeof(src0_t) == 0);
- GGML_ASSERT(nb01 % sizeof(src0_t) == 0);
- GGML_ASSERT(nb02 % sizeof(src0_t) == 0);
- GGML_ASSERT(nb03 % sizeof(src0_t) == 0);
- GGML_ASSERT(nb10 % sizeof(src1_t) == 0);
- GGML_ASSERT(nb11 % sizeof(src1_t) == 0);
- GGML_ASSERT(nb12 % sizeof(src1_t) == 0);
- GGML_ASSERT(nb13 % sizeof(src1_t) == 0);
- GGML_ASSERT(s0 == 1);
- GGML_ASSERT(s00 == 1);
- GGML_ASSERT(s10 == 1);
- const int block_size = 128;
- int64_t hne0 = std::max(ne0/2LL, 1LL);
- dim3 block_dims;
- block_dims.x = std::min<unsigned int>(hne0, block_size);
- block_dims.y = std::min<unsigned int>(ne1, block_size / block_dims.x);
- block_dims.z = std::min(std::min<unsigned int>(ne2*ne3, block_size / block_dims.x / block_dims.y), 64U);
- dim3 block_nums(
- (hne0 + block_dims.x - 1) / block_dims.x,
- (ne1 + block_dims.y - 1) / block_dims.y,
- (ne2*ne3 + block_dims.z - 1) / block_dims.z
- );
- if (block_nums.z > 65535) {
- // this is the maximum number of blocks in z dimension, fallback to 1D grid kernel
- int block_num = (ne0*ne1*ne2*ne3 + block_size - 1) / block_size;
- k_bin_bcast_unravel<bin_op><<<block_num, block_size, 0, stream>>>(
- src0_dd, src1_dd, dst_dd,
- ne0, ne1, ne2, ne3,
- ne10, ne11, ne12, ne13,
- /* s0, */ s1, s2, s3,
- /* s00, */ s01, s02, s03,
- /* s10, */ s11, s12, s13);
- } else {
- k_bin_bcast<bin_op><<<block_nums, block_dims, 0, stream>>>(
- src0_dd, src1_dd, dst_dd,
- ne0, ne1, ne2, ne3,
- ne10, ne11, ne12, ne13,
- /* s0, */ s1, s2, s3,
- /* s00, */ s01, s02, s03,
- /* s10, */ s11, s12, s13);
- }
- }
- }
- };
- template<class op>
- static void ggml_cuda_op_bin_bcast(
- const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
- const void * src0_dd, const void * src1_dd, void * dst_dd, cudaStream_t stream) {
- GGML_ASSERT(src1->type == GGML_TYPE_F32);
- if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
- op()(src0, src1, dst, (const float *)src0_dd, (const float *)src1_dd, (float *)dst_dd, stream);
- } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
- op()(src0, src1, dst, (const half *) src0_dd, (const float *)src1_dd, (half *) dst_dd, stream);
- } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
- op()(src0, src1, dst, (const half *) src0_dd, (const float *)src1_dd, (float *)dst_dd, stream);
- } else {
- fprintf(stderr, "%s: unsupported types: dst: %s, src0: %s, src1: %s\n", __func__,
- ggml_type_name(dst->type), ggml_type_name(src0->type), ggml_type_name(src1->type));
- GGML_ABORT("fatal error");
- }
- }
- void ggml_cuda_op_repeat(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
- ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_repeat>>(dst, dst->src[0], dst, nullptr, dst->src[0]->data, dst->data, ctx.stream());
- }
- void ggml_cuda_op_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
- ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_add>>(dst->src[0], dst->src[1], dst, dst->src[0]->data, dst->src[1]->data, dst->data, ctx.stream());
- }
- void ggml_cuda_op_sub(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
- ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_sub>>(dst->src[0], dst->src[1], dst, dst->src[0]->data, dst->src[1]->data, dst->data, ctx.stream());
- }
- void ggml_cuda_op_mul(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
- ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_mul>>(dst->src[0], dst->src[1], dst, dst->src[0]->data, dst->src[1]->data, dst->data, ctx.stream());
- }
- void ggml_cuda_op_div(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
- ggml_cuda_op_bin_bcast<bin_bcast_cuda<op_div>>(dst->src[0], dst->src[1], dst, dst->src[0]->data, dst->src[1]->data, dst->data, ctx.stream());
- }
|