ggml-cpu.h 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. /**
  2. * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file
  3. *
  4. * MIT License
  5. *
  6. * Copyright (c) 2023-2024 The ggml authors
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in all
  16. * copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  21. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  24. * SOFTWARE.
  25. */
  26. #pragma once
  27. #include "ggml.h"
  28. #include "ggml-backend.h"
  29. #ifdef __cplusplus
  30. extern "C" {
  31. #endif
  32. // the compute plan that needs to be prepared for ggml_graph_compute()
  33. // since https://github.com/ggerganov/ggml/issues/287
  34. struct ggml_cplan {
  35. size_t work_size; // size of work buffer, calculated by `ggml_graph_plan()`
  36. uint8_t * work_data; // work buffer, to be allocated by caller before calling to `ggml_graph_compute()`
  37. int n_threads;
  38. struct ggml_threadpool * threadpool;
  39. // abort ggml_graph_compute when true
  40. ggml_abort_callback abort_callback;
  41. void * abort_callback_data;
  42. };
  43. // numa strategies
  44. enum ggml_numa_strategy {
  45. GGML_NUMA_STRATEGY_DISABLED = 0,
  46. GGML_NUMA_STRATEGY_DISTRIBUTE = 1,
  47. GGML_NUMA_STRATEGY_ISOLATE = 2,
  48. GGML_NUMA_STRATEGY_NUMACTL = 3,
  49. GGML_NUMA_STRATEGY_MIRROR = 4,
  50. GGML_NUMA_STRATEGY_COUNT
  51. };
  52. GGML_BACKEND_API void ggml_numa_init(enum ggml_numa_strategy numa); // call once for better performance on NUMA systems
  53. GGML_BACKEND_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node
  54. GGML_BACKEND_API struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value);
  55. GGML_BACKEND_API struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value);
  56. GGML_BACKEND_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value);
  57. GGML_BACKEND_API struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value);
  58. GGML_BACKEND_API int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i);
  59. GGML_BACKEND_API void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value);
  60. GGML_BACKEND_API int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3);
  61. GGML_BACKEND_API void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value);
  62. GGML_BACKEND_API float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i);
  63. GGML_BACKEND_API void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value);
  64. GGML_BACKEND_API float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3);
  65. GGML_BACKEND_API void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value);
  66. GGML_BACKEND_API struct ggml_threadpool * ggml_threadpool_new (struct ggml_threadpool_params * params);
  67. GGML_BACKEND_API void ggml_threadpool_free (struct ggml_threadpool * threadpool);
  68. GGML_BACKEND_API int ggml_threadpool_get_n_threads (struct ggml_threadpool * threadpool);
  69. GGML_BACKEND_API void ggml_threadpool_pause (struct ggml_threadpool * threadpool);
  70. GGML_BACKEND_API void ggml_threadpool_resume (struct ggml_threadpool * threadpool);
  71. // ggml_graph_plan() has to be called before ggml_graph_compute()
  72. // when plan.work_size > 0, caller must allocate memory for plan.work_data
  73. GGML_BACKEND_API struct ggml_cplan ggml_graph_plan(
  74. const struct ggml_cgraph * cgraph,
  75. int n_threads, /* = GGML_DEFAULT_N_THREADS */
  76. struct ggml_threadpool * threadpool /* = NULL */ );
  77. GGML_BACKEND_API enum ggml_status ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);
  78. // same as ggml_graph_compute() but the work data is allocated as a part of the context
  79. // note: the drawback of this API is that you must have ensured that the context has enough memory for the work data
  80. GGML_BACKEND_API enum ggml_status ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads);
  81. //
  82. // system info
  83. //
  84. // x86
  85. GGML_BACKEND_API int ggml_cpu_has_sse3 (void);
  86. GGML_BACKEND_API int ggml_cpu_has_ssse3 (void);
  87. GGML_BACKEND_API int ggml_cpu_has_avx (void);
  88. GGML_BACKEND_API int ggml_cpu_has_avx_vnni (void);
  89. GGML_BACKEND_API int ggml_cpu_has_avx2 (void);
  90. GGML_BACKEND_API int ggml_cpu_has_f16c (void);
  91. GGML_BACKEND_API int ggml_cpu_has_fma (void);
  92. GGML_BACKEND_API int ggml_cpu_has_avx512 (void);
  93. GGML_BACKEND_API int ggml_cpu_has_avx512_vbmi(void);
  94. GGML_BACKEND_API int ggml_cpu_has_avx512_vnni(void);
  95. GGML_BACKEND_API int ggml_cpu_has_avx512_bf16(void);
  96. GGML_BACKEND_API int ggml_cpu_has_amx_int8 (void);
  97. // ARM
  98. GGML_BACKEND_API int ggml_cpu_has_neon (void);
  99. GGML_BACKEND_API int ggml_cpu_has_arm_fma (void);
  100. GGML_BACKEND_API int ggml_cpu_has_fp16_va (void);
  101. GGML_BACKEND_API int ggml_cpu_has_dotprod (void);
  102. GGML_BACKEND_API int ggml_cpu_has_matmul_int8(void);
  103. GGML_BACKEND_API int ggml_cpu_has_sve (void);
  104. GGML_BACKEND_API int ggml_cpu_get_sve_cnt (void); // sve vector length in bytes
  105. // other
  106. GGML_BACKEND_API int ggml_cpu_has_riscv_v (void);
  107. GGML_BACKEND_API int ggml_cpu_has_vsx (void);
  108. GGML_BACKEND_API int ggml_cpu_has_wasm_simd (void);
  109. GGML_BACKEND_API int ggml_cpu_has_llamafile (void);
  110. // Internal types and functions exposed for tests and benchmarks
  111. typedef void (*ggml_vec_dot_t) (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT x, size_t bx,
  112. const void * GGML_RESTRICT y, size_t by, int nrc);
  113. struct ggml_type_traits_cpu {
  114. ggml_from_float_t from_float;
  115. ggml_vec_dot_t vec_dot;
  116. enum ggml_type vec_dot_type;
  117. int64_t nrows; // number of rows to process simultaneously
  118. };
  119. GGML_BACKEND_API const struct ggml_type_traits_cpu * ggml_get_type_traits_cpu(enum ggml_type type);
  120. GGML_BACKEND_API void ggml_cpu_init(void);
  121. //
  122. // CPU backend
  123. //
  124. GGML_BACKEND_API ggml_backend_t ggml_backend_cpu_init(void);
  125. GGML_BACKEND_API bool ggml_backend_is_cpu (ggml_backend_t backend);
  126. GGML_BACKEND_API void ggml_backend_cpu_set_n_threads (ggml_backend_t backend_cpu, int n_threads);
  127. GGML_BACKEND_API void ggml_backend_cpu_set_threadpool (ggml_backend_t backend_cpu, ggml_threadpool_t threadpool);
  128. GGML_BACKEND_API void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_callback abort_callback, void * abort_callback_data);
  129. GGML_BACKEND_API ggml_backend_reg_t ggml_backend_cpu_reg(void);
  130. #ifdef __cplusplus
  131. }
  132. #endif