pad.cu 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. /**
  2. * llama.cpp - commit 3f1ae2e32cde00c39b96be6d01c2997c29bae555 - do not edit this file
  3. *
  4. * MIT License
  5. *
  6. * Copyright (c) 2023-2024 The ggml authors
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in all
  16. * copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  21. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  24. * SOFTWARE.
  25. */
  26. #include "pad.cuh"
  27. static __global__ void pad_f32(const float * x, float * dst, const int ne0, const int ne00, const int ne01, const int ne02, const int ne03) {
  28. // blockIdx.z: idx of ne2*ne3, aka ne02*ne03
  29. // blockIdx.y: idx of ne1
  30. // blockIDx.x: idx of ne0 / BLOCK_SIZE
  31. int nidx = threadIdx.x + blockIdx.x * blockDim.x;
  32. if (nidx >= ne0) {
  33. return;
  34. }
  35. // operation
  36. int offset_dst =
  37. nidx +
  38. blockIdx.y * ne0 +
  39. blockIdx.z * ne0 * gridDim.y;
  40. if (nidx < ne00 && blockIdx.y < ne01 && blockIdx.z < ne02*ne03) {
  41. int offset_src =
  42. nidx +
  43. blockIdx.y * ne00 +
  44. blockIdx.z * ne00 * ne01;
  45. dst[offset_dst] = x[offset_src];
  46. } else {
  47. dst[offset_dst] = 0.0f;
  48. }
  49. }
  50. static void pad_f32_cuda(const float * x, float * dst,
  51. const int ne00, const int ne01, const int ne02, const int ne03,
  52. const int ne0, const int ne1, const int ne2, const int ne3, cudaStream_t stream) {
  53. int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE;
  54. dim3 gridDim(num_blocks, ne1, ne2*ne3);
  55. pad_f32<<<gridDim, CUDA_PAD_BLOCK_SIZE, 0, stream>>>(x, dst, ne0, ne00, ne01, ne02, ne03);
  56. }
  57. void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
  58. const ggml_tensor * src0 = dst->src[0];
  59. const float * src0_d = (const float *)src0->data;
  60. float * dst_d = (float *)dst->data;
  61. cudaStream_t stream = ctx.stream();
  62. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  63. GGML_ASSERT(dst->type == GGML_TYPE_F32);
  64. GGML_ASSERT(src0->ne[3] == 1 && dst->ne[3] == 1); // just 3D tensors
  65. pad_f32_cuda(src0_d, dst_d,
  66. src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3],
  67. dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], stream);
  68. }
  69. static __global__ void unpad_f32(const float * x, float * dst, const int ne0, const int ne00, const int ne01, const int ne02, const int ne03) {
  70. // blockIdx.z: idx of ne2*ne3, aka ne02*ne03
  71. // blockIdx.y: idx of ne1
  72. // blockIDx.x: idx of ne0 / BLOCK_SIZE
  73. int nidx = threadIdx.x + blockIdx.x * blockDim.x;
  74. if (nidx >= ne0) {
  75. return;
  76. }
  77. // operation
  78. int offset_dst =
  79. nidx +
  80. blockIdx.y * ne0 +
  81. blockIdx.z * ne0 * gridDim.y;
  82. if (nidx < ne00 && blockIdx.y < ne01 && blockIdx.z < ne02*ne03) {
  83. int offset_src =
  84. nidx +
  85. blockIdx.y * ne00 +
  86. blockIdx.z * ne00 * ne01;
  87. dst[offset_dst] = x[offset_src];
  88. }
  89. }
  90. static void unpad_f32_cuda(const float * x, float * dst,
  91. const int ne00, const int ne01, const int ne02, const int ne03,
  92. const int ne0, const int ne1, const int ne2, const int ne3, cudaStream_t stream) {
  93. int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE;
  94. dim3 gridDim(num_blocks, ne1, ne2*ne3);
  95. unpad_f32<<<gridDim, CUDA_PAD_BLOCK_SIZE, 0, stream>>>(x, dst, ne0, ne00, ne01, ne02, ne03);
  96. }
  97. void ggml_cuda_op_unpad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
  98. const ggml_tensor * src0 = dst->src[0];
  99. const float * src0_d = (const float *)src0->data;
  100. float * dst_d = (float *)dst->data;
  101. cudaStream_t stream = ctx.stream();
  102. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  103. GGML_ASSERT(dst->type == GGML_TYPE_F32);
  104. GGML_ASSERT(src0->ne[3] == 1 && dst->ne[3] == 1); // just 3D tensors
  105. unpad_f32_cuda(src0_d, dst_d,
  106. src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3],
  107. dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], stream);
  108. }