conv-transpose-1d.cu 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. /**
  2. * llama.cpp - commit 8962422b1c6f9b8b15f5aeaea42600bcc2d44177 - do not edit this file
  3. *
  4. * MIT License
  5. *
  6. * Copyright (c) 2023-2024 The ggml authors
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in all
  16. * copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  21. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  24. * SOFTWARE.
  25. */
  26. #include "conv-transpose-1d.cuh"
  27. static __global__ void conv_transpose_1d_kernel(
  28. const int s0, const int p0, const int d0, const int output_size,
  29. const int src0_ne0, const int src0_ne1, const int src0_ne2, const int src0_ne3,
  30. const int src1_ne0, const int src1_ne1, const int src1_ne2, const int src1_ne3,
  31. const int dst_ne0, const int dst_ne1, const int dst_ne2, const int dst_ne3,
  32. const float * src0, const float * src1, float * dst) {
  33. int global_index = threadIdx.x + blockIdx.x * blockDim.x;
  34. if (global_index >= output_size) {
  35. return;
  36. }
  37. int out_index = global_index / dst_ne0;
  38. float accumulator = 0;
  39. for (int c = 0; c < src0_ne2; c++) {
  40. int idx = global_index % dst_ne0;
  41. int kernel_offset = (src0_ne0 * src0_ne1 * c) + (out_index * src0_ne0);
  42. int input_offset = src1_ne0 * c;
  43. for (int i = 0; i < src1_ne0; i++) {
  44. if (!(idx >= i*s0 && idx < i*s0 + src0_ne0)) {
  45. continue;
  46. }
  47. int weight_idx = idx - i*s0;
  48. float kernel_weight = src0[kernel_offset + weight_idx];
  49. float input_value = src1[input_offset+i];
  50. accumulator += kernel_weight * input_value;
  51. }
  52. }
  53. dst[global_index] = accumulator;
  54. }
  55. static void conv_transpose_1d_f32_f32_cuda(
  56. const int s0, const int p0, const int d0, const int output_size,
  57. const int src0_ne0, const int src0_ne1, const int src0_ne2, const int src0_ne3,
  58. const int src1_ne0, const int src1_ne1, const int src1_ne2, const int src1_ne3,
  59. const int dst_ne0, const int dst_ne1, const int dst_ne2, const int dst_ne3,
  60. const float * src0, const float * src1, float * dst,
  61. cudaStream_t stream) {
  62. const int num_blocks = (output_size + CUDA_CONV_TRANPOSE_1D_BLOCK_SIZE - 1) / CUDA_CONV_TRANPOSE_1D_BLOCK_SIZE;
  63. conv_transpose_1d_kernel<<<num_blocks,CUDA_CONV_TRANPOSE_1D_BLOCK_SIZE, 0, stream>>>(
  64. s0,p0,d0,output_size,
  65. src0_ne0, src0_ne1, src0_ne2, src0_ne3,
  66. src1_ne0, src1_ne1, src1_ne2, src1_ne3,
  67. dst_ne0, dst_ne1, dst_ne2, dst_ne3,
  68. src0,src1, dst);
  69. }
  70. void ggml_cuda_op_conv_transpose_1d(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
  71. const ggml_tensor * src0 = dst->src[0];
  72. const float * src0_d = (const float *)src0->data;
  73. const ggml_tensor * src1 = dst->src[1];
  74. const float * src1_d = (const float *)src1->data;
  75. float * dst_d = (float *)dst->data;
  76. cudaStream_t stream = ctx.stream();
  77. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  78. GGML_ASSERT( dst->type == GGML_TYPE_F32);
  79. GGML_ASSERT(ggml_is_contiguous(src0));
  80. GGML_ASSERT(ggml_is_contiguous(src1));
  81. const int32_t * opts = (const int32_t *)dst->op_params;
  82. const int s0 = opts[0];
  83. const int p0 = 0;//opts[3];
  84. const int d0 = 1;//opts[4];
  85. const int64_t kernel_size = ggml_nelements(src0);
  86. const int64_t input_size = ggml_nelements(src1);
  87. const int64_t output_size = ggml_nelements(dst);
  88. conv_transpose_1d_f32_f32_cuda(s0, p0, d0, output_size,
  89. src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3],
  90. src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3],
  91. dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3],
  92. src0_d, src1_d, dst_d, stream);
  93. }