amx.cpp 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. /**
  2. * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file
  3. *
  4. * MIT License
  5. *
  6. * Copyright (c) 2023-2024 The ggml authors
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in all
  16. * copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  21. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  24. * SOFTWARE.
  25. */
  26. #include "amx.h"
  27. #include "common.h"
  28. #include "mmq.h"
  29. #include "ggml-backend-impl.h"
  30. #include "ggml-backend.h"
  31. #include "ggml-impl.h"
  32. #include "ggml-cpu.h"
  33. #include "ggml-cpu-traits.h"
  34. #if defined(__gnu_linux__)
  35. #include <sys/syscall.h>
  36. #include <unistd.h>
  37. #endif
  38. #include <cstdlib>
  39. #include <cstring>
  40. #include <memory>
  41. #if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
  42. // AMX type_trais
  43. namespace ggml::cpu::amx {
  44. class tensor_traits : public ggml::cpu::tensor_traits {
  45. bool work_size(int /* n_threads */, const struct ggml_tensor * op, size_t & size) override {
  46. size = ggml_backend_amx_desired_wsize(op);
  47. return true;
  48. }
  49. bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) override {
  50. if (op->op == GGML_OP_MUL_MAT) {
  51. ggml_backend_amx_mul_mat(params, op);
  52. return true;
  53. }
  54. return false;
  55. }
  56. };
  57. static ggml::cpu::tensor_traits * get_tensor_traits(ggml_backend_buffer_t, struct ggml_tensor *) {
  58. static tensor_traits traits;
  59. return &traits;
  60. }
  61. } // namespace ggml::cpu::amx
  62. // AMX buffer interface
  63. static void ggml_backend_amx_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  64. free(buffer->context);
  65. }
  66. static void * ggml_backend_amx_buffer_get_base(ggml_backend_buffer_t buffer) {
  67. return (void *) (buffer->context);
  68. }
  69. static void ggml_backend_amx_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
  70. tensor->extra = (void *) ggml::cpu::amx::get_tensor_traits(buffer, tensor);
  71. GGML_UNUSED(buffer);
  72. }
  73. static void ggml_backend_amx_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor,
  74. uint8_t value, size_t offset, size_t size) {
  75. memset((char *) tensor->data + offset, value, size);
  76. GGML_UNUSED(buffer);
  77. }
  78. static void ggml_backend_amx_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor,
  79. const void * data, size_t offset, size_t size) {
  80. if (qtype_has_amx_kernels(tensor->type)) {
  81. GGML_LOG_DEBUG("%s: amx repack tensor %s of type %s\n", __func__, tensor->name, ggml_type_name(tensor->type));
  82. ggml_backend_amx_convert_weight(tensor, data, offset, size);
  83. } else {
  84. memcpy((char *) tensor->data + offset, data, size);
  85. }
  86. GGML_UNUSED(buffer);
  87. }
  88. /*
  89. // need to figure what we need to do with buffer->extra.
  90. static void ggml_backend_amx_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  91. GGML_ASSERT(!qtype_has_amx_kernels(tensor->type));
  92. memcpy(data, (const char *)tensor->data + offset, size);
  93. GGML_UNUSED(buffer);
  94. }
  95. static bool ggml_backend_amx_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
  96. if (ggml_backend_buffer_is_host(src->buffer)) {
  97. if (qtype_has_amx_kernels(src->type)) {
  98. ggml_backend_amx_convert_weight(dst, src->data, 0, ggml_nbytes(dst));
  99. } else {
  100. memcpy(dst->data, src->data, ggml_nbytes(src));
  101. }
  102. return true;
  103. }
  104. return false;
  105. GGML_UNUSED(buffer);
  106. }
  107. */
  108. static void ggml_backend_amx_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  109. memset(buffer->context, value, buffer->size);
  110. }
  111. static ggml_backend_buffer_i ggml_backend_amx_buffer_interface = {
  112. /* .free_buffer = */ ggml_backend_amx_buffer_free_buffer,
  113. /* .get_base = */ ggml_backend_amx_buffer_get_base,
  114. /* .init_tensor = */ ggml_backend_amx_buffer_init_tensor,
  115. /* .memset_tensor = */ ggml_backend_amx_buffer_memset_tensor,
  116. /* .set_tensor = */ ggml_backend_amx_buffer_set_tensor,
  117. /* .get_tensor = */ nullptr,
  118. /* .cpy_tensor = */ nullptr,
  119. /* .clear = */ ggml_backend_amx_buffer_clear,
  120. /* .reset = */ nullptr,
  121. };
  122. static const char * ggml_backend_amx_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
  123. return "AMX";
  124. GGML_UNUSED(buft);
  125. }
  126. static ggml_backend_buffer_t ggml_backend_amx_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  127. void * data = ggml_aligned_malloc(size);
  128. if (data == NULL) {
  129. fprintf(stderr, "%s: failed to allocate buffer of size %zu\n", __func__, size);
  130. return NULL;
  131. }
  132. return ggml_backend_buffer_init(buft, ggml_backend_amx_buffer_interface, data, size);
  133. }
  134. static size_t ggml_backend_amx_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  135. return TENSOR_ALIGNMENT;
  136. GGML_UNUSED(buft);
  137. }
  138. namespace ggml::cpu::amx {
  139. class extra_buffer_type : ggml::cpu::extra_buffer_type {
  140. bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override {
  141. // handle only 2d gemm for now
  142. auto is_contiguous_2d = [](const struct ggml_tensor * t) {
  143. return ggml_is_contiguous(t) && t->ne[3] == 1 && t->ne[2] == 1;
  144. };
  145. if (op->op == GGML_OP_MUL_MAT && is_contiguous_2d(op->src[0]) && // src0 must be contiguous
  146. is_contiguous_2d(op->src[1]) && // src1 must be contiguous
  147. op->src[0]->buffer && op->src[0]->buffer->buft == ggml_backend_amx_buffer_type() &&
  148. op->ne[0] % (TILE_N * 2) == 0 && // out_features is 32x
  149. (qtype_has_amx_kernels(op->src[0]->type) || (op->src[0]->type == GGML_TYPE_F16))) {
  150. // src1 must be host buffer
  151. if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) {
  152. return false;
  153. }
  154. // src1 must be float32
  155. if (op->src[1]->type == GGML_TYPE_F32) {
  156. return true;
  157. }
  158. }
  159. return false;
  160. }
  161. ggml::cpu::tensor_traits * get_tensor_traits(const struct ggml_tensor * op) override {
  162. if (op->op == GGML_OP_MUL_MAT && op->src[0]->buffer &&
  163. op->src[0]->buffer->buft == ggml_backend_amx_buffer_type()) {
  164. return (ggml::cpu::tensor_traits *) op->src[0]->extra;
  165. }
  166. return nullptr;
  167. }
  168. };
  169. } // namespace ggml::cpu::amx
  170. static size_t ggml_backend_amx_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
  171. return ggml_backend_amx_get_alloc_size(tensor);
  172. GGML_UNUSED(buft);
  173. }
  174. #define ARCH_GET_XCOMP_PERM 0x1022
  175. #define ARCH_REQ_XCOMP_PERM 0x1023
  176. #define XFEATURE_XTILECFG 17
  177. #define XFEATURE_XTILEDATA 18
  178. static bool ggml_amx_init() {
  179. #if defined(__gnu_linux__)
  180. if (syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA)) {
  181. fprintf(stderr, "AMX is not ready to be used!\n");
  182. return false;
  183. }
  184. return true;
  185. #elif defined(_WIN32)
  186. return true;
  187. #endif
  188. }
  189. ggml_backend_buffer_type_t ggml_backend_amx_buffer_type() {
  190. static struct ggml_backend_buffer_type ggml_backend_buffer_type_amx = {
  191. /* .iface = */ {
  192. /* .get_name = */ ggml_backend_amx_buffer_type_get_name,
  193. /* .alloc_buffer = */ ggml_backend_amx_buffer_type_alloc_buffer,
  194. /* .get_alignment = */ ggml_backend_amx_buffer_type_get_alignment,
  195. /* .get_max_size = */ nullptr, // defaults to SIZE_MAX
  196. /* .get_alloc_size = */ ggml_backend_amx_buffer_type_get_alloc_size,
  197. /* .is_host = */ nullptr,
  198. },
  199. /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
  200. /* .context = */ new ggml::cpu::amx::extra_buffer_type(),
  201. };
  202. if (!ggml_amx_init()) {
  203. return nullptr;
  204. }
  205. return &ggml_backend_buffer_type_amx;
  206. }
  207. #endif // defined(__AMX_INT8__) && defined(__AVX512VNNI__)