ggml.h 89 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470
  1. /**
  2. * llama.cpp - git 059031b8c40e1f4ba60586842c5b1ed3ddf61842
  3. *
  4. * MIT License
  5. *
  6. * Copyright (c) 2023-2024 The ggml authors
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in all
  16. * copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  21. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  24. * SOFTWARE.
  25. */
  26. #pragma once
  27. //
  28. // GGML Tensor Library
  29. //
  30. // This documentation is still a work in progress.
  31. // If you wish some specific topics to be covered, feel free to drop a comment:
  32. //
  33. // https://github.com/ggerganov/whisper.cpp/issues/40
  34. //
  35. // ## Overview
  36. //
  37. // This library implements:
  38. //
  39. // - a set of tensor operations
  40. // - automatic differentiation
  41. // - basic optimization algorithms
  42. //
  43. // The aim of this library is to provide a minimalistic approach for various machine learning tasks. This includes,
  44. // but is not limited to, the following:
  45. //
  46. // - linear regression
  47. // - support vector machines
  48. // - neural networks
  49. //
  50. // The library allows the user to define a certain function using the available tensor operations. This function
  51. // definition is represented internally via a computation graph. Each tensor operation in the function definition
  52. // corresponds to a node in the graph. Having the computation graph defined, the user can choose to compute the
  53. // function's value and/or its gradient with respect to the input variables. Optionally, the function can be optimized
  54. // using one of the available optimization algorithms.
  55. //
  56. // For example, here we define the function: f(x) = a*x^2 + b
  57. //
  58. // {
  59. // struct ggml_init_params params = {
  60. // .mem_size = 16*1024*1024,
  61. // .mem_buffer = NULL,
  62. // };
  63. //
  64. // // memory allocation happens here
  65. // struct ggml_context * ctx = ggml_init(params);
  66. //
  67. // struct ggml_tensor * x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  68. //
  69. // ggml_set_param(ctx, x); // x is an input variable
  70. //
  71. // struct ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  72. // struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
  73. // struct ggml_tensor * x2 = ggml_mul(ctx, x, x);
  74. // struct ggml_tensor * f = ggml_add(ctx, ggml_mul(ctx, a, x2), b);
  75. //
  76. // ...
  77. // }
  78. //
  79. // Notice that the function definition above does not involve any actual computation. The computation is performed only
  80. // when the user explicitly requests it. For example, to compute the function's value at x = 2.0:
  81. //
  82. // {
  83. // ...
  84. //
  85. // struct ggml_cgraph * gf = ggml_new_graph(ctx);
  86. // ggml_build_forward_expand(gf, f);
  87. //
  88. // // set the input variable and parameter values
  89. // ggml_set_f32(x, 2.0f);
  90. // ggml_set_f32(a, 3.0f);
  91. // ggml_set_f32(b, 4.0f);
  92. //
  93. // ggml_graph_compute_with_ctx(ctx, &gf, n_threads);
  94. //
  95. // printf("f = %f\n", ggml_get_f32_1d(f, 0));
  96. //
  97. // ...
  98. // }
  99. //
  100. // The actual computation is performed in the ggml_graph_compute() function.
  101. //
  102. // The ggml_new_tensor_...() functions create new tensors. They are allocated in the memory buffer provided to the
  103. // ggml_init() function. You have to be careful not to exceed the memory buffer size. Therefore, you have to know
  104. // in advance how much memory you need for your computation. Alternatively, you can allocate a large enough memory
  105. // and after defining the computation graph, call the ggml_used_mem() function to find out how much memory was
  106. // actually needed.
  107. //
  108. // The ggml_set_param() function marks a tensor as an input variable. This is used by the automatic
  109. // differentiation and optimization algorithms.
  110. //
  111. // The described approach allows to define the function graph once and then compute its forward or backward graphs
  112. // multiple times. All computations will use the same memory buffer allocated in the ggml_init() function. This way
  113. // the user can avoid the memory allocation overhead at runtime.
  114. //
  115. // The library supports multi-dimensional tensors - up to 4 dimensions. The FP16 and FP32 data types are first class
  116. // citizens, but in theory the library can be extended to support FP8 and integer data types.
  117. //
  118. // Each tensor operation produces a new tensor. Initially the library was envisioned to support only the use of unary
  119. // and binary operations. Most of the available operations fall into one of these two categories. With time, it became
  120. // clear that the library needs to support more complex operations. The way to support these operations is not clear
  121. // yet, but a few examples are demonstrated in the following operations:
  122. //
  123. // - ggml_permute()
  124. // - ggml_conv_1d_1s()
  125. // - ggml_conv_1d_2s()
  126. //
  127. // For each tensor operator, the library implements a forward and backward computation function. The forward function
  128. // computes the output tensor value given the input tensor values. The backward function computes the adjoint of the
  129. // input tensors given the adjoint of the output tensor. For a detailed explanation of what this means, take a
  130. // calculus class, or watch the following video:
  131. //
  132. // What is Automatic Differentiation?
  133. // https://www.youtube.com/watch?v=wG_nF1awSSY
  134. //
  135. //
  136. // ## Tensor data (struct ggml_tensor)
  137. //
  138. // The tensors are stored in memory via the ggml_tensor struct. The structure provides information about the size of
  139. // the tensor, the data type, and the memory buffer where the tensor data is stored. Additionally, it contains
  140. // pointers to the "source" tensors - i.e. the tensors that were used to compute the current tensor. For example:
  141. //
  142. // {
  143. // struct ggml_tensor * c = ggml_add(ctx, a, b);
  144. //
  145. // assert(c->src[0] == a);
  146. // assert(c->src[1] == b);
  147. // }
  148. //
  149. // The multi-dimensional tensors are stored in row-major order. The ggml_tensor struct contains fields for the
  150. // number of elements in each dimension ("ne") as well as the number of bytes ("nb", a.k.a. stride). This allows
  151. // to store tensors that are not contiguous in memory, which is useful for operations such as transposition and
  152. // permutation. All tensor operations have to take the stride into account and not assume that the tensor is
  153. // contiguous in memory.
  154. //
  155. // The data of the tensor is accessed via the "data" pointer. For example:
  156. //
  157. // {
  158. // const int nx = 2;
  159. // const int ny = 3;
  160. //
  161. // struct ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, nx, ny);
  162. //
  163. // for (int y = 0; y < ny; y++) {
  164. // for (int x = 0; x < nx; x++) {
  165. // *(float *) ((char *) a->data + y*a->nb[1] + x*a->nb[0]) = x + y;
  166. // }
  167. // }
  168. //
  169. // ...
  170. // }
  171. //
  172. // Alternatively, there are helper functions, such as ggml_get_f32_1d() and ggml_set_f32_1d() that can be used.
  173. //
  174. // ## The matrix multiplication operator (ggml_mul_mat)
  175. //
  176. // TODO
  177. //
  178. //
  179. // ## Multi-threading
  180. //
  181. // TODO
  182. //
  183. //
  184. // ## Overview of ggml.c
  185. //
  186. // TODO
  187. //
  188. //
  189. // ## SIMD optimizations
  190. //
  191. // TODO
  192. //
  193. //
  194. // ## Debugging ggml
  195. //
  196. // TODO
  197. //
  198. //
  199. #ifdef GGML_SHARED
  200. # if defined(_WIN32) && !defined(__MINGW32__)
  201. # ifdef GGML_BUILD
  202. # define GGML_API __declspec(dllexport)
  203. # else
  204. # define GGML_API __declspec(dllimport)
  205. # endif
  206. # else
  207. # define GGML_API __attribute__ ((visibility ("default")))
  208. # endif
  209. #else
  210. # define GGML_API
  211. #endif
  212. #ifdef GGML_MULTIPLATFORM
  213. # if defined(_WIN32)
  214. # define GGML_CALL
  215. # else
  216. # define GGML_CALL __attribute__((__ms_abi__))
  217. # endif
  218. #else
  219. # define GGML_CALL
  220. #endif
  221. // TODO: support for clang
  222. #ifdef __GNUC__
  223. # define GGML_DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
  224. #elif defined(_MSC_VER)
  225. # define GGML_DEPRECATED(func, hint) __declspec(deprecated(hint)) func
  226. #else
  227. # define GGML_DEPRECATED(func, hint) func
  228. #endif
  229. #ifndef __GNUC__
  230. # define GGML_ATTRIBUTE_FORMAT(...)
  231. #elif defined(__MINGW32__)
  232. # define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
  233. #else
  234. # define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
  235. #endif
  236. #include <stdbool.h>
  237. #include <stddef.h>
  238. #include <stdint.h>
  239. #include <stdio.h>
  240. #define GGML_FILE_MAGIC 0x67676d6c // "ggml"
  241. #define GGML_FILE_VERSION 1
  242. #define GGML_QNT_VERSION 2 // bump this on quantization format changes
  243. #define GGML_QNT_VERSION_FACTOR 1000 // do not change this
  244. #define GGML_MAX_DIMS 4
  245. #define GGML_MAX_PARAMS 2048
  246. #define GGML_MAX_CONTEXTS 64
  247. #define GGML_MAX_SRC 10
  248. #ifndef GGML_MAX_NAME
  249. #define GGML_MAX_NAME 64
  250. #endif
  251. #define GGML_MAX_OP_PARAMS 64
  252. #define GGML_DEFAULT_N_THREADS 4
  253. #define GGML_DEFAULT_GRAPH_SIZE 2048
  254. #if UINTPTR_MAX == 0xFFFFFFFF
  255. #define GGML_MEM_ALIGN 4
  256. #else
  257. #define GGML_MEM_ALIGN 16
  258. #endif
  259. #define GGML_EXIT_SUCCESS 0
  260. #define GGML_EXIT_ABORTED 1
  261. #define GGUF_MAGIC "GGUF"
  262. #define GGUF_VERSION 3
  263. #define GGUF_DEFAULT_ALIGNMENT 32
  264. #define GGML_UNUSED(x) (void)(x)
  265. #define GGML_PAD(x, n) (((x) + (n) - 1) & ~((n) - 1))
  266. #define GGML_ASSERT(x) \
  267. do { \
  268. if (!(x)) { \
  269. fflush(stdout); \
  270. fprintf(stderr, "GGML_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
  271. ggml_print_backtrace(); \
  272. abort(); \
  273. } \
  274. } while (0)
  275. #ifndef NDEBUG
  276. #define GGML_UNREACHABLE() GGML_ASSERT(!"statement should not be reached")
  277. #elif defined(__GNUC__)
  278. #define GGML_UNREACHABLE() __builtin_unreachable()
  279. #elif defined(_MSC_VER)
  280. #define GGML_UNREACHABLE() __assume(0)
  281. #else
  282. #define GGML_UNREACHABLE() ((void) 0)
  283. #endif
  284. // used to copy the number of elements and stride in bytes of tensors into local variables.
  285. // main purpose is to reduce code duplication and improve readability.
  286. //
  287. // example:
  288. //
  289. // GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne);
  290. // GGML_TENSOR_LOCALS(size_t, nb1, src1, nb);
  291. //
  292. #define GGML_TENSOR_LOCALS_1(type, prefix, pointer, array) \
  293. const type prefix##0 = (pointer)->array[0]; \
  294. GGML_UNUSED(prefix##0);
  295. #define GGML_TENSOR_LOCALS_2(type, prefix, pointer, array) \
  296. GGML_TENSOR_LOCALS_1 (type, prefix, pointer, array) \
  297. const type prefix##1 = (pointer)->array[1]; \
  298. GGML_UNUSED(prefix##1);
  299. #define GGML_TENSOR_LOCALS_3(type, prefix, pointer, array) \
  300. GGML_TENSOR_LOCALS_2 (type, prefix, pointer, array) \
  301. const type prefix##2 = (pointer)->array[2]; \
  302. GGML_UNUSED(prefix##2);
  303. #define GGML_TENSOR_LOCALS(type, prefix, pointer, array) \
  304. GGML_TENSOR_LOCALS_3 (type, prefix, pointer, array) \
  305. const type prefix##3 = (pointer)->array[3]; \
  306. GGML_UNUSED(prefix##3);
  307. #define GGML_TENSOR_UNARY_OP_LOCALS \
  308. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \
  309. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \
  310. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \
  311. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  312. #define GGML_TENSOR_BINARY_OP_LOCALS \
  313. GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \
  314. GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \
  315. GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) \
  316. GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) \
  317. GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \
  318. GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
  319. #ifdef __cplusplus
  320. extern "C" {
  321. #endif
  322. enum ggml_status {
  323. GGML_STATUS_ALLOC_FAILED = -2,
  324. GGML_STATUS_FAILED = -1,
  325. GGML_STATUS_SUCCESS = 0,
  326. GGML_STATUS_ABORTED = 1,
  327. };
  328. // get ggml_status name string
  329. GGML_API GGML_CALL const char * ggml_status_to_string(enum ggml_status status);
  330. // ieee 754-2008 half-precision float16
  331. // todo: make this not an integral type
  332. typedef uint16_t ggml_fp16_t;
  333. GGML_API float ggml_fp16_to_fp32(ggml_fp16_t);
  334. GGML_API ggml_fp16_t ggml_fp32_to_fp16(float);
  335. GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t *, float *, int64_t);
  336. GGML_API void ggml_fp32_to_fp16_row(const float *, ggml_fp16_t *, int64_t);
  337. // google brain half-precision bfloat16
  338. typedef struct { uint16_t bits; } ggml_bf16_t;
  339. GGML_API ggml_bf16_t ggml_fp32_to_bf16(float);
  340. GGML_API float ggml_bf16_to_fp32(ggml_bf16_t); // consider just doing << 16
  341. GGML_API void ggml_bf16_to_fp32_row(const ggml_bf16_t *, float *, int64_t);
  342. GGML_API void ggml_fp32_to_bf16_row(const float *, ggml_bf16_t *, int64_t);
  343. struct ggml_object;
  344. struct ggml_context;
  345. // NOTE: always add types at the end of the enum to keep backward compatibility
  346. enum ggml_type {
  347. GGML_TYPE_F32 = 0,
  348. GGML_TYPE_F16 = 1,
  349. GGML_TYPE_Q4_0 = 2,
  350. GGML_TYPE_Q4_1 = 3,
  351. // GGML_TYPE_Q4_2 = 4, support has been removed
  352. // GGML_TYPE_Q4_3 = 5, support has been removed
  353. GGML_TYPE_Q5_0 = 6,
  354. GGML_TYPE_Q5_1 = 7,
  355. GGML_TYPE_Q8_0 = 8,
  356. GGML_TYPE_Q8_1 = 9,
  357. GGML_TYPE_Q2_K = 10,
  358. GGML_TYPE_Q3_K = 11,
  359. GGML_TYPE_Q4_K = 12,
  360. GGML_TYPE_Q5_K = 13,
  361. GGML_TYPE_Q6_K = 14,
  362. GGML_TYPE_Q8_K = 15,
  363. GGML_TYPE_IQ2_XXS = 16,
  364. GGML_TYPE_IQ2_XS = 17,
  365. GGML_TYPE_IQ3_XXS = 18,
  366. GGML_TYPE_IQ1_S = 19,
  367. GGML_TYPE_IQ4_NL = 20,
  368. GGML_TYPE_IQ3_S = 21,
  369. GGML_TYPE_IQ2_S = 22,
  370. GGML_TYPE_IQ4_XS = 23,
  371. GGML_TYPE_I8 = 24,
  372. GGML_TYPE_I16 = 25,
  373. GGML_TYPE_I32 = 26,
  374. GGML_TYPE_I64 = 27,
  375. GGML_TYPE_F64 = 28,
  376. GGML_TYPE_IQ1_M = 29,
  377. GGML_TYPE_BF16 = 30,
  378. GGML_TYPE_COUNT,
  379. };
  380. // precision
  381. enum ggml_prec {
  382. GGML_PREC_DEFAULT,
  383. GGML_PREC_F32,
  384. };
  385. enum ggml_backend_type {
  386. GGML_BACKEND_TYPE_CPU = 0,
  387. GGML_BACKEND_TYPE_GPU = 10,
  388. GGML_BACKEND_TYPE_GPU_SPLIT = 20,
  389. };
  390. // model file types
  391. enum ggml_ftype {
  392. GGML_FTYPE_UNKNOWN = -1,
  393. GGML_FTYPE_ALL_F32 = 0,
  394. GGML_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
  395. GGML_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
  396. GGML_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
  397. GGML_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
  398. GGML_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
  399. GGML_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
  400. GGML_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
  401. GGML_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
  402. GGML_FTYPE_MOSTLY_Q3_K = 11, // except 1d tensors
  403. GGML_FTYPE_MOSTLY_Q4_K = 12, // except 1d tensors
  404. GGML_FTYPE_MOSTLY_Q5_K = 13, // except 1d tensors
  405. GGML_FTYPE_MOSTLY_Q6_K = 14, // except 1d tensors
  406. GGML_FTYPE_MOSTLY_IQ2_XXS = 15, // except 1d tensors
  407. GGML_FTYPE_MOSTLY_IQ2_XS = 16, // except 1d tensors
  408. GGML_FTYPE_MOSTLY_IQ3_XXS = 17, // except 1d tensors
  409. GGML_FTYPE_MOSTLY_IQ1_S = 18, // except 1d tensors
  410. GGML_FTYPE_MOSTLY_IQ4_NL = 19, // except 1d tensors
  411. GGML_FTYPE_MOSTLY_IQ3_S = 20, // except 1d tensors
  412. GGML_FTYPE_MOSTLY_IQ2_S = 21, // except 1d tensors
  413. GGML_FTYPE_MOSTLY_IQ4_XS = 22, // except 1d tensors
  414. GGML_FTYPE_MOSTLY_IQ1_M = 23, // except 1d tensors
  415. GGML_FTYPE_MOSTLY_BF16 = 24, // except 1d tensors
  416. };
  417. // available tensor operations:
  418. enum ggml_op {
  419. GGML_OP_NONE = 0,
  420. GGML_OP_DUP,
  421. GGML_OP_ADD,
  422. GGML_OP_ADD1,
  423. GGML_OP_ACC,
  424. GGML_OP_SUB,
  425. GGML_OP_MUL,
  426. GGML_OP_DIV,
  427. GGML_OP_SQR,
  428. GGML_OP_SQRT,
  429. GGML_OP_LOG,
  430. GGML_OP_SUM,
  431. GGML_OP_SUM_ROWS,
  432. GGML_OP_MEAN,
  433. GGML_OP_ARGMAX,
  434. GGML_OP_REPEAT,
  435. GGML_OP_REPEAT_BACK,
  436. GGML_OP_CONCAT,
  437. GGML_OP_SILU_BACK,
  438. GGML_OP_NORM, // normalize
  439. GGML_OP_RMS_NORM,
  440. GGML_OP_RMS_NORM_BACK,
  441. GGML_OP_GROUP_NORM,
  442. GGML_OP_MUL_MAT,
  443. GGML_OP_MUL_MAT_ID,
  444. GGML_OP_OUT_PROD,
  445. GGML_OP_SCALE,
  446. GGML_OP_SET,
  447. GGML_OP_CPY,
  448. GGML_OP_CONT,
  449. GGML_OP_RESHAPE,
  450. GGML_OP_VIEW,
  451. GGML_OP_PERMUTE,
  452. GGML_OP_TRANSPOSE,
  453. GGML_OP_GET_ROWS,
  454. GGML_OP_GET_ROWS_BACK,
  455. GGML_OP_DIAG,
  456. GGML_OP_DIAG_MASK_INF,
  457. GGML_OP_DIAG_MASK_ZERO,
  458. GGML_OP_SOFT_MAX,
  459. GGML_OP_SOFT_MAX_BACK,
  460. GGML_OP_ROPE,
  461. GGML_OP_ROPE_BACK,
  462. GGML_OP_CLAMP,
  463. GGML_OP_CONV_TRANSPOSE_1D,
  464. GGML_OP_IM2COL,
  465. GGML_OP_CONV_TRANSPOSE_2D,
  466. GGML_OP_POOL_1D,
  467. GGML_OP_POOL_2D,
  468. GGML_OP_UPSCALE, // nearest interpolate
  469. GGML_OP_PAD,
  470. GGML_OP_ARANGE,
  471. GGML_OP_TIMESTEP_EMBEDDING,
  472. GGML_OP_ARGSORT,
  473. GGML_OP_LEAKY_RELU,
  474. GGML_OP_FLASH_ATTN,
  475. GGML_OP_FLASH_ATTN_EXT,
  476. GGML_OP_FLASH_FF,
  477. GGML_OP_FLASH_ATTN_BACK,
  478. GGML_OP_SSM_CONV,
  479. GGML_OP_SSM_SCAN,
  480. GGML_OP_WIN_PART,
  481. GGML_OP_WIN_UNPART,
  482. GGML_OP_GET_REL_POS,
  483. GGML_OP_ADD_REL_POS,
  484. GGML_OP_UNARY,
  485. GGML_OP_MAP_UNARY,
  486. GGML_OP_MAP_BINARY,
  487. GGML_OP_MAP_CUSTOM1_F32,
  488. GGML_OP_MAP_CUSTOM2_F32,
  489. GGML_OP_MAP_CUSTOM3_F32,
  490. GGML_OP_MAP_CUSTOM1,
  491. GGML_OP_MAP_CUSTOM2,
  492. GGML_OP_MAP_CUSTOM3,
  493. GGML_OP_CROSS_ENTROPY_LOSS,
  494. GGML_OP_CROSS_ENTROPY_LOSS_BACK,
  495. GGML_OP_COUNT,
  496. };
  497. enum ggml_unary_op {
  498. GGML_UNARY_OP_ABS,
  499. GGML_UNARY_OP_SGN,
  500. GGML_UNARY_OP_NEG,
  501. GGML_UNARY_OP_STEP,
  502. GGML_UNARY_OP_TANH,
  503. GGML_UNARY_OP_ELU,
  504. GGML_UNARY_OP_RELU,
  505. GGML_UNARY_OP_SIGMOID,
  506. GGML_UNARY_OP_GELU,
  507. GGML_UNARY_OP_GELU_QUICK,
  508. GGML_UNARY_OP_SILU,
  509. GGML_UNARY_OP_HARDSWISH,
  510. GGML_UNARY_OP_HARDSIGMOID,
  511. GGML_UNARY_OP_COUNT,
  512. };
  513. enum ggml_object_type {
  514. GGML_OBJECT_TYPE_TENSOR,
  515. GGML_OBJECT_TYPE_GRAPH,
  516. GGML_OBJECT_TYPE_WORK_BUFFER
  517. };
  518. enum ggml_log_level {
  519. GGML_LOG_LEVEL_ERROR = 2,
  520. GGML_LOG_LEVEL_WARN = 3,
  521. GGML_LOG_LEVEL_INFO = 4,
  522. GGML_LOG_LEVEL_DEBUG = 5
  523. };
  524. enum ggml_tensor_flag {
  525. GGML_TENSOR_FLAG_INPUT = 1,
  526. GGML_TENSOR_FLAG_OUTPUT = 2,
  527. GGML_TENSOR_FLAG_PARAM = 4,
  528. };
  529. // ggml object
  530. struct ggml_object {
  531. size_t offs;
  532. size_t size;
  533. struct ggml_object * next;
  534. enum ggml_object_type type;
  535. char padding[4];
  536. };
  537. static const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object);
  538. // n-dimensional tensor
  539. struct ggml_tensor {
  540. enum ggml_type type;
  541. GGML_DEPRECATED(enum ggml_backend_type backend, "use the buffer type to find the storage location of the tensor");
  542. struct ggml_backend_buffer * buffer;
  543. int64_t ne[GGML_MAX_DIMS]; // number of elements
  544. size_t nb[GGML_MAX_DIMS]; // stride in bytes:
  545. // nb[0] = ggml_type_size(type)
  546. // nb[1] = nb[0] * (ne[0] / ggml_blck_size(type)) + padding
  547. // nb[i] = nb[i-1] * ne[i-1]
  548. // compute data
  549. enum ggml_op op;
  550. // op params - allocated as int32_t for alignment
  551. int32_t op_params[GGML_MAX_OP_PARAMS / sizeof(int32_t)];
  552. int32_t flags;
  553. struct ggml_tensor * grad;
  554. struct ggml_tensor * src[GGML_MAX_SRC];
  555. // performance
  556. int perf_runs;
  557. int64_t perf_cycles;
  558. int64_t perf_time_us;
  559. struct ggml_tensor * view_src;
  560. size_t view_offs;
  561. void * data;
  562. char name[GGML_MAX_NAME];
  563. void * extra; // extra things e.g. for ggml-cuda.cu
  564. char padding[8];
  565. };
  566. static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);
  567. // Abort callback
  568. // If not NULL, called before ggml computation
  569. // If it returns true, the computation is aborted
  570. typedef bool (*ggml_abort_callback)(void * data);
  571. // the compute plan that needs to be prepared for ggml_graph_compute()
  572. // since https://github.com/ggerganov/ggml/issues/287
  573. struct ggml_cplan {
  574. size_t work_size; // size of work buffer, calculated by `ggml_graph_plan()`
  575. uint8_t * work_data; // work buffer, to be allocated by caller before calling to `ggml_graph_compute()`
  576. int n_threads;
  577. // abort ggml_graph_compute when true
  578. ggml_abort_callback abort_callback;
  579. void * abort_callback_data;
  580. };
  581. enum ggml_cgraph_eval_order {
  582. GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT = 0,
  583. GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT,
  584. GGML_CGRAPH_EVAL_ORDER_COUNT
  585. };
  586. struct ggml_hash_set {
  587. size_t size;
  588. struct ggml_tensor ** keys;
  589. };
  590. // computation graph
  591. struct ggml_cgraph {
  592. int size;
  593. int n_nodes;
  594. int n_leafs;
  595. struct ggml_tensor ** nodes;
  596. struct ggml_tensor ** grads;
  597. struct ggml_tensor ** leafs;
  598. struct ggml_hash_set visited_hash_table;
  599. enum ggml_cgraph_eval_order order;
  600. // performance
  601. int perf_runs;
  602. int64_t perf_cycles;
  603. int64_t perf_time_us;
  604. };
  605. // scratch buffer
  606. struct ggml_scratch {
  607. size_t offs;
  608. size_t size;
  609. void * data;
  610. };
  611. struct ggml_init_params {
  612. // memory pool
  613. size_t mem_size; // bytes
  614. void * mem_buffer; // if NULL, memory will be allocated internally
  615. bool no_alloc; // don't allocate memory for the tensor data
  616. };
  617. // compute types
  618. // NOTE: the INIT or FINALIZE pass is not scheduled unless explicitly enabled.
  619. // This behavior was changed since https://github.com/ggerganov/llama.cpp/pull/1995.
  620. enum ggml_task_type {
  621. GGML_TASK_TYPE_INIT = 0,
  622. GGML_TASK_TYPE_COMPUTE,
  623. GGML_TASK_TYPE_FINALIZE,
  624. };
  625. struct ggml_compute_params {
  626. enum ggml_task_type type;
  627. // ith = thread index, nth = number of threads
  628. int ith, nth;
  629. // work buffer for all threads
  630. size_t wsize;
  631. void * wdata;
  632. };
  633. // numa strategies
  634. enum ggml_numa_strategy {
  635. GGML_NUMA_STRATEGY_DISABLED = 0,
  636. GGML_NUMA_STRATEGY_DISTRIBUTE = 1,
  637. GGML_NUMA_STRATEGY_ISOLATE = 2,
  638. GGML_NUMA_STRATEGY_NUMACTL = 3,
  639. GGML_NUMA_STRATEGY_MIRROR = 4,
  640. GGML_NUMA_STRATEGY_COUNT
  641. };
  642. //
  643. // GUID
  644. //
  645. // GUID types
  646. typedef uint8_t ggml_guid[16];
  647. typedef ggml_guid * ggml_guid_t;
  648. GGML_API bool ggml_guid_matches(ggml_guid_t guid_a, ggml_guid_t guid_b);
  649. // misc
  650. GGML_API void ggml_time_init(void); // call this once at the beginning of the program
  651. GGML_API int64_t ggml_time_ms(void);
  652. GGML_API int64_t ggml_time_us(void);
  653. GGML_API int64_t ggml_cycles(void);
  654. GGML_API int64_t ggml_cycles_per_ms(void);
  655. GGML_API void ggml_print_backtrace(void);
  656. // accepts a UTF-8 path, even on Windows
  657. GGML_API FILE * ggml_fopen(const char * fname, const char * mode);
  658. GGML_API void ggml_numa_init(enum ggml_numa_strategy numa); // call once for better performance on NUMA systems
  659. GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node
  660. GGML_API void ggml_print_object (const struct ggml_object * obj);
  661. GGML_API void ggml_print_objects(const struct ggml_context * ctx);
  662. GGML_API GGML_CALL int64_t ggml_nelements (const struct ggml_tensor * tensor);
  663. GGML_API GGML_CALL int64_t ggml_nrows (const struct ggml_tensor * tensor);
  664. GGML_API GGML_CALL size_t ggml_nbytes (const struct ggml_tensor * tensor);
  665. GGML_API size_t ggml_nbytes_pad (const struct ggml_tensor * tensor); // same as ggml_nbytes() but padded to GGML_MEM_ALIGN
  666. GGML_API GGML_CALL int ggml_blck_size(enum ggml_type type);
  667. GGML_API GGML_CALL size_t ggml_type_size(enum ggml_type type); // size in bytes for all elements in a block
  668. GGML_API GGML_CALL size_t ggml_row_size (enum ggml_type type, int64_t ne); // size in bytes for all elements in a row
  669. GGML_DEPRECATED(
  670. GGML_API double ggml_type_sizef(enum ggml_type type), // ggml_type_size()/ggml_blck_size() as float
  671. "use ggml_row_size() instead");
  672. GGML_API GGML_CALL const char * ggml_type_name(enum ggml_type type);
  673. GGML_API GGML_CALL const char * ggml_op_name (enum ggml_op op);
  674. GGML_API const char * ggml_op_symbol(enum ggml_op op);
  675. GGML_API const char * ggml_unary_op_name(enum ggml_unary_op op);
  676. GGML_API GGML_CALL const char * ggml_op_desc(const struct ggml_tensor * t); // unary or op name
  677. GGML_API GGML_CALL size_t ggml_element_size(const struct ggml_tensor * tensor);
  678. GGML_API GGML_CALL bool ggml_is_quantized(enum ggml_type type);
  679. // TODO: temporary until model loading of ggml examples is refactored
  680. GGML_API enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype);
  681. GGML_API GGML_CALL bool ggml_is_transposed(const struct ggml_tensor * tensor);
  682. GGML_API GGML_CALL bool ggml_is_contiguous(const struct ggml_tensor * tensor);
  683. GGML_API GGML_CALL bool ggml_is_permuted (const struct ggml_tensor * tensor);
  684. GGML_API GGML_CALL bool ggml_is_empty (const struct ggml_tensor * tensor);
  685. GGML_API bool ggml_is_scalar (const struct ggml_tensor * tensor);
  686. GGML_API bool ggml_is_vector (const struct ggml_tensor * tensor);
  687. GGML_API bool ggml_is_matrix (const struct ggml_tensor * tensor);
  688. GGML_API bool ggml_is_3d (const struct ggml_tensor * tensor);
  689. GGML_API int ggml_n_dims (const struct ggml_tensor * tensor); // returns 1 for scalars
  690. GGML_API bool ggml_are_same_shape (const struct ggml_tensor * t0, const struct ggml_tensor * t1);
  691. GGML_API bool ggml_are_same_stride(const struct ggml_tensor * t0, const struct ggml_tensor * t1);
  692. // use this to compute the memory overhead of a tensor
  693. GGML_API size_t ggml_tensor_overhead(void);
  694. GGML_API bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbytes);
  695. // main
  696. GGML_API struct ggml_context * ggml_init(struct ggml_init_params params);
  697. GGML_API void ggml_free(struct ggml_context * ctx);
  698. GGML_API size_t ggml_used_mem(const struct ggml_context * ctx);
  699. GGML_API size_t ggml_set_scratch (struct ggml_context * ctx, struct ggml_scratch scratch);
  700. GGML_API bool ggml_get_no_alloc(struct ggml_context * ctx);
  701. GGML_API void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc);
  702. GGML_API void * ggml_get_mem_buffer (const struct ggml_context * ctx);
  703. GGML_API size_t ggml_get_mem_size (const struct ggml_context * ctx);
  704. GGML_API size_t ggml_get_max_tensor_size(const struct ggml_context * ctx);
  705. GGML_API struct ggml_tensor * ggml_new_tensor(
  706. struct ggml_context * ctx,
  707. enum ggml_type type,
  708. int n_dims,
  709. const int64_t *ne);
  710. GGML_API struct ggml_tensor * ggml_new_tensor_1d(
  711. struct ggml_context * ctx,
  712. enum ggml_type type,
  713. int64_t ne0);
  714. GGML_API struct ggml_tensor * ggml_new_tensor_2d(
  715. struct ggml_context * ctx,
  716. enum ggml_type type,
  717. int64_t ne0,
  718. int64_t ne1);
  719. GGML_API struct ggml_tensor * ggml_new_tensor_3d(
  720. struct ggml_context * ctx,
  721. enum ggml_type type,
  722. int64_t ne0,
  723. int64_t ne1,
  724. int64_t ne2);
  725. GGML_API struct ggml_tensor * ggml_new_tensor_4d(
  726. struct ggml_context * ctx,
  727. enum ggml_type type,
  728. int64_t ne0,
  729. int64_t ne1,
  730. int64_t ne2,
  731. int64_t ne3);
  732. GGML_API struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value);
  733. GGML_API struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value);
  734. GGML_API struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src);
  735. GGML_API struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, struct ggml_tensor * src);
  736. // Context tensor enumeration and lookup
  737. GGML_API struct ggml_tensor * ggml_get_first_tensor(const struct ggml_context * ctx);
  738. GGML_API struct ggml_tensor * ggml_get_next_tensor (const struct ggml_context * ctx, struct ggml_tensor * tensor);
  739. GGML_API struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name);
  740. GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor);
  741. GGML_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value);
  742. GGML_API struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value);
  743. // Converts a flat index into coordinates
  744. GGML_API void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3);
  745. GGML_API int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i);
  746. GGML_API void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value);
  747. GGML_API int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3);
  748. GGML_API void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value);
  749. GGML_API float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i);
  750. GGML_API void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value);
  751. GGML_API float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3);
  752. GGML_API void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value);
  753. GGML_API void * ggml_get_data (const struct ggml_tensor * tensor);
  754. GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor);
  755. GGML_API GGML_CALL enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor);
  756. GGML_API const char * ggml_get_name (const struct ggml_tensor * tensor);
  757. GGML_API struct ggml_tensor * ggml_set_name ( struct ggml_tensor * tensor, const char * name);
  758. GGML_ATTRIBUTE_FORMAT(2, 3)
  759. GGML_API struct ggml_tensor * ggml_format_name( struct ggml_tensor * tensor, const char * fmt, ...);
  760. //
  761. // operations on tensors with backpropagation
  762. //
  763. GGML_API struct ggml_tensor * ggml_dup(
  764. struct ggml_context * ctx,
  765. struct ggml_tensor * a);
  766. // in-place, returns view(a)
  767. GGML_API struct ggml_tensor * ggml_dup_inplace(
  768. struct ggml_context * ctx,
  769. struct ggml_tensor * a);
  770. GGML_API struct ggml_tensor * ggml_add(
  771. struct ggml_context * ctx,
  772. struct ggml_tensor * a,
  773. struct ggml_tensor * b);
  774. GGML_API struct ggml_tensor * ggml_add_inplace(
  775. struct ggml_context * ctx,
  776. struct ggml_tensor * a,
  777. struct ggml_tensor * b);
  778. GGML_API struct ggml_tensor * ggml_add_cast(
  779. struct ggml_context * ctx,
  780. struct ggml_tensor * a,
  781. struct ggml_tensor * b,
  782. enum ggml_type type);
  783. GGML_API struct ggml_tensor * ggml_add1(
  784. struct ggml_context * ctx,
  785. struct ggml_tensor * a,
  786. struct ggml_tensor * b);
  787. GGML_API struct ggml_tensor * ggml_add1_inplace(
  788. struct ggml_context * ctx,
  789. struct ggml_tensor * a,
  790. struct ggml_tensor * b);
  791. // dst = a
  792. // view(dst, nb1, nb2, nb3, offset) += b
  793. // return dst
  794. GGML_API struct ggml_tensor * ggml_acc(
  795. struct ggml_context * ctx,
  796. struct ggml_tensor * a,
  797. struct ggml_tensor * b,
  798. size_t nb1,
  799. size_t nb2,
  800. size_t nb3,
  801. size_t offset);
  802. GGML_API struct ggml_tensor * ggml_acc_inplace(
  803. struct ggml_context * ctx,
  804. struct ggml_tensor * a,
  805. struct ggml_tensor * b,
  806. size_t nb1,
  807. size_t nb2,
  808. size_t nb3,
  809. size_t offset);
  810. GGML_API struct ggml_tensor * ggml_sub(
  811. struct ggml_context * ctx,
  812. struct ggml_tensor * a,
  813. struct ggml_tensor * b);
  814. GGML_API struct ggml_tensor * ggml_sub_inplace(
  815. struct ggml_context * ctx,
  816. struct ggml_tensor * a,
  817. struct ggml_tensor * b);
  818. GGML_API struct ggml_tensor * ggml_mul(
  819. struct ggml_context * ctx,
  820. struct ggml_tensor * a,
  821. struct ggml_tensor * b);
  822. GGML_API struct ggml_tensor * ggml_mul_inplace(
  823. struct ggml_context * ctx,
  824. struct ggml_tensor * a,
  825. struct ggml_tensor * b);
  826. GGML_API struct ggml_tensor * ggml_div(
  827. struct ggml_context * ctx,
  828. struct ggml_tensor * a,
  829. struct ggml_tensor * b);
  830. GGML_API struct ggml_tensor * ggml_div_inplace(
  831. struct ggml_context * ctx,
  832. struct ggml_tensor * a,
  833. struct ggml_tensor * b);
  834. GGML_API struct ggml_tensor * ggml_sqr(
  835. struct ggml_context * ctx,
  836. struct ggml_tensor * a);
  837. GGML_API struct ggml_tensor * ggml_sqr_inplace(
  838. struct ggml_context * ctx,
  839. struct ggml_tensor * a);
  840. GGML_API struct ggml_tensor * ggml_sqrt(
  841. struct ggml_context * ctx,
  842. struct ggml_tensor * a);
  843. GGML_API struct ggml_tensor * ggml_sqrt_inplace(
  844. struct ggml_context * ctx,
  845. struct ggml_tensor * a);
  846. GGML_API struct ggml_tensor * ggml_log(
  847. struct ggml_context * ctx,
  848. struct ggml_tensor * a);
  849. GGML_API struct ggml_tensor * ggml_log_inplace(
  850. struct ggml_context * ctx,
  851. struct ggml_tensor * a);
  852. // return scalar
  853. GGML_API struct ggml_tensor * ggml_sum(
  854. struct ggml_context * ctx,
  855. struct ggml_tensor * a);
  856. // sums along rows, with input shape [a,b,c,d] return shape [1,b,c,d]
  857. GGML_API struct ggml_tensor * ggml_sum_rows(
  858. struct ggml_context * ctx,
  859. struct ggml_tensor * a);
  860. // mean along rows
  861. GGML_API struct ggml_tensor * ggml_mean(
  862. struct ggml_context * ctx,
  863. struct ggml_tensor * a);
  864. // argmax along rows
  865. GGML_API struct ggml_tensor * ggml_argmax(
  866. struct ggml_context * ctx,
  867. struct ggml_tensor * a);
  868. // if a is the same shape as b, and a is not parameter, return a
  869. // otherwise, return a new tensor: repeat(a) to fit in b
  870. GGML_API struct ggml_tensor * ggml_repeat(
  871. struct ggml_context * ctx,
  872. struct ggml_tensor * a,
  873. struct ggml_tensor * b);
  874. // sums repetitions in a into shape of b
  875. GGML_API struct ggml_tensor * ggml_repeat_back(
  876. struct ggml_context * ctx,
  877. struct ggml_tensor * a,
  878. struct ggml_tensor * b);
  879. // concat a and b on dim 2
  880. // used in stable-diffusion
  881. GGML_API struct ggml_tensor * ggml_concat(
  882. struct ggml_context * ctx,
  883. struct ggml_tensor * a,
  884. struct ggml_tensor * b);
  885. GGML_API struct ggml_tensor * ggml_abs(
  886. struct ggml_context * ctx,
  887. struct ggml_tensor * a);
  888. GGML_API struct ggml_tensor * ggml_abs_inplace(
  889. struct ggml_context * ctx,
  890. struct ggml_tensor * a);
  891. GGML_API struct ggml_tensor * ggml_sgn(
  892. struct ggml_context * ctx,
  893. struct ggml_tensor * a);
  894. GGML_API struct ggml_tensor * ggml_sgn_inplace(
  895. struct ggml_context * ctx,
  896. struct ggml_tensor * a);
  897. GGML_API struct ggml_tensor * ggml_neg(
  898. struct ggml_context * ctx,
  899. struct ggml_tensor * a);
  900. GGML_API struct ggml_tensor * ggml_neg_inplace(
  901. struct ggml_context * ctx,
  902. struct ggml_tensor * a);
  903. GGML_API struct ggml_tensor * ggml_step(
  904. struct ggml_context * ctx,
  905. struct ggml_tensor * a);
  906. GGML_API struct ggml_tensor * ggml_step_inplace(
  907. struct ggml_context * ctx,
  908. struct ggml_tensor * a);
  909. GGML_API struct ggml_tensor * ggml_tanh(
  910. struct ggml_context * ctx,
  911. struct ggml_tensor * a);
  912. GGML_API struct ggml_tensor * ggml_tanh_inplace(
  913. struct ggml_context * ctx,
  914. struct ggml_tensor * a);
  915. GGML_API struct ggml_tensor * ggml_elu(
  916. struct ggml_context * ctx,
  917. struct ggml_tensor * a);
  918. GGML_API struct ggml_tensor * ggml_elu_inplace(
  919. struct ggml_context * ctx,
  920. struct ggml_tensor * a);
  921. GGML_API struct ggml_tensor * ggml_relu(
  922. struct ggml_context * ctx,
  923. struct ggml_tensor * a);
  924. GGML_API struct ggml_tensor * ggml_leaky_relu(
  925. struct ggml_context * ctx,
  926. struct ggml_tensor * a, float negative_slope, bool inplace);
  927. GGML_API struct ggml_tensor * ggml_relu_inplace(
  928. struct ggml_context * ctx,
  929. struct ggml_tensor * a);
  930. GGML_API struct ggml_tensor * ggml_sigmoid(
  931. struct ggml_context * ctx,
  932. struct ggml_tensor * a);
  933. GGML_API struct ggml_tensor * ggml_sigmoid_inplace(
  934. struct ggml_context * ctx,
  935. struct ggml_tensor * a);
  936. GGML_API struct ggml_tensor * ggml_gelu(
  937. struct ggml_context * ctx,
  938. struct ggml_tensor * a);
  939. GGML_API struct ggml_tensor * ggml_gelu_inplace(
  940. struct ggml_context * ctx,
  941. struct ggml_tensor * a);
  942. GGML_API struct ggml_tensor * ggml_gelu_quick(
  943. struct ggml_context * ctx,
  944. struct ggml_tensor * a);
  945. GGML_API struct ggml_tensor * ggml_gelu_quick_inplace(
  946. struct ggml_context * ctx,
  947. struct ggml_tensor * a);
  948. GGML_API struct ggml_tensor * ggml_silu(
  949. struct ggml_context * ctx,
  950. struct ggml_tensor * a);
  951. GGML_API struct ggml_tensor * ggml_silu_inplace(
  952. struct ggml_context * ctx,
  953. struct ggml_tensor * a);
  954. // a - x
  955. // b - dy
  956. GGML_API struct ggml_tensor * ggml_silu_back(
  957. struct ggml_context * ctx,
  958. struct ggml_tensor * a,
  959. struct ggml_tensor * b);
  960. // hardswish(x) = x * relu6(x + 3) / 6
  961. GGML_API struct ggml_tensor * ggml_hardswish(
  962. struct ggml_context * ctx,
  963. struct ggml_tensor * a);
  964. // hardsigmoid(x) = relu6(x + 3) / 6
  965. GGML_API struct ggml_tensor * ggml_hardsigmoid(
  966. struct ggml_context * ctx,
  967. struct ggml_tensor * a);
  968. // normalize along rows
  969. GGML_API struct ggml_tensor * ggml_norm(
  970. struct ggml_context * ctx,
  971. struct ggml_tensor * a,
  972. float eps);
  973. GGML_API struct ggml_tensor * ggml_norm_inplace(
  974. struct ggml_context * ctx,
  975. struct ggml_tensor * a,
  976. float eps);
  977. GGML_API struct ggml_tensor * ggml_rms_norm(
  978. struct ggml_context * ctx,
  979. struct ggml_tensor * a,
  980. float eps);
  981. GGML_API struct ggml_tensor * ggml_rms_norm_inplace(
  982. struct ggml_context * ctx,
  983. struct ggml_tensor * a,
  984. float eps);
  985. // group normalize along ne0*ne1*n_groups
  986. // used in stable-diffusion
  987. // TODO: eps is hardcoded to 1e-6 for now
  988. GGML_API struct ggml_tensor * ggml_group_norm(
  989. struct ggml_context * ctx,
  990. struct ggml_tensor * a,
  991. int n_groups);
  992. GGML_API struct ggml_tensor * ggml_group_norm_inplace(
  993. struct ggml_context * ctx,
  994. struct ggml_tensor * a,
  995. int n_groups);
  996. // a - x
  997. // b - dy
  998. GGML_API struct ggml_tensor * ggml_rms_norm_back(
  999. struct ggml_context * ctx,
  1000. struct ggml_tensor * a,
  1001. struct ggml_tensor * b,
  1002. float eps);
  1003. // A: k columns, n rows => [ne03, ne02, n, k]
  1004. // B: k columns, m rows (i.e. we transpose it internally) => [ne03 * x, ne02 * y, m, k]
  1005. // result is n columns, m rows => [ne03 * x, ne02 * y, m, n]
  1006. GGML_API struct ggml_tensor * ggml_mul_mat(
  1007. struct ggml_context * ctx,
  1008. struct ggml_tensor * a,
  1009. struct ggml_tensor * b);
  1010. // change the precision of a matrix multiplication
  1011. // set to GGML_PREC_F32 for higher precision (useful for phi-2)
  1012. GGML_API void ggml_mul_mat_set_prec(
  1013. struct ggml_tensor * a,
  1014. enum ggml_prec prec);
  1015. // indirect matrix multiplication
  1016. GGML_API struct ggml_tensor * ggml_mul_mat_id(
  1017. struct ggml_context * ctx,
  1018. struct ggml_tensor * as,
  1019. struct ggml_tensor * b,
  1020. struct ggml_tensor * ids);
  1021. // A: m columns, n rows,
  1022. // B: p columns, n rows,
  1023. // result is m columns, p rows
  1024. GGML_API struct ggml_tensor * ggml_out_prod(
  1025. struct ggml_context * ctx,
  1026. struct ggml_tensor * a,
  1027. struct ggml_tensor * b);
  1028. //
  1029. // operations on tensors without backpropagation
  1030. //
  1031. GGML_API struct ggml_tensor * ggml_scale(
  1032. struct ggml_context * ctx,
  1033. struct ggml_tensor * a,
  1034. float s);
  1035. // in-place, returns view(a)
  1036. GGML_API struct ggml_tensor * ggml_scale_inplace(
  1037. struct ggml_context * ctx,
  1038. struct ggml_tensor * a,
  1039. float s);
  1040. // b -> view(a,offset,nb1,nb2,3), return modified a
  1041. GGML_API struct ggml_tensor * ggml_set(
  1042. struct ggml_context * ctx,
  1043. struct ggml_tensor * a,
  1044. struct ggml_tensor * b,
  1045. size_t nb1,
  1046. size_t nb2,
  1047. size_t nb3,
  1048. size_t offset);
  1049. // b -> view(a,offset,nb1,nb2,3), return view(a)
  1050. GGML_API struct ggml_tensor * ggml_set_inplace(
  1051. struct ggml_context * ctx,
  1052. struct ggml_tensor * a,
  1053. struct ggml_tensor * b,
  1054. size_t nb1,
  1055. size_t nb2,
  1056. size_t nb3,
  1057. size_t offset);
  1058. GGML_API struct ggml_tensor * ggml_set_1d(
  1059. struct ggml_context * ctx,
  1060. struct ggml_tensor * a,
  1061. struct ggml_tensor * b,
  1062. size_t offset);
  1063. GGML_API struct ggml_tensor * ggml_set_1d_inplace(
  1064. struct ggml_context * ctx,
  1065. struct ggml_tensor * a,
  1066. struct ggml_tensor * b,
  1067. size_t offset);
  1068. // b -> view(a,offset,nb1,nb2,3), return modified a
  1069. GGML_API struct ggml_tensor * ggml_set_2d(
  1070. struct ggml_context * ctx,
  1071. struct ggml_tensor * a,
  1072. struct ggml_tensor * b,
  1073. size_t nb1,
  1074. size_t offset);
  1075. // b -> view(a,offset,nb1,nb2,3), return view(a)
  1076. GGML_API struct ggml_tensor * ggml_set_2d_inplace(
  1077. struct ggml_context * ctx,
  1078. struct ggml_tensor * a,
  1079. struct ggml_tensor * b,
  1080. size_t nb1,
  1081. size_t offset);
  1082. // a -> b, return view(b)
  1083. GGML_API struct ggml_tensor * ggml_cpy(
  1084. struct ggml_context * ctx,
  1085. struct ggml_tensor * a,
  1086. struct ggml_tensor * b);
  1087. GGML_API struct ggml_tensor * ggml_cast(
  1088. struct ggml_context * ctx,
  1089. struct ggml_tensor * a,
  1090. enum ggml_type type);
  1091. // make contiguous
  1092. GGML_API struct ggml_tensor * ggml_cont(
  1093. struct ggml_context * ctx,
  1094. struct ggml_tensor * a);
  1095. // make contiguous, with new shape
  1096. GGML_API struct ggml_tensor * ggml_cont_1d(
  1097. struct ggml_context * ctx,
  1098. struct ggml_tensor * a,
  1099. int64_t ne0);
  1100. GGML_API struct ggml_tensor * ggml_cont_2d(
  1101. struct ggml_context * ctx,
  1102. struct ggml_tensor * a,
  1103. int64_t ne0,
  1104. int64_t ne1);
  1105. GGML_API struct ggml_tensor * ggml_cont_3d(
  1106. struct ggml_context * ctx,
  1107. struct ggml_tensor * a,
  1108. int64_t ne0,
  1109. int64_t ne1,
  1110. int64_t ne2);
  1111. GGML_API struct ggml_tensor * ggml_cont_4d(
  1112. struct ggml_context * ctx,
  1113. struct ggml_tensor * a,
  1114. int64_t ne0,
  1115. int64_t ne1,
  1116. int64_t ne2,
  1117. int64_t ne3);
  1118. // return view(a), b specifies the new shape
  1119. // TODO: when we start computing gradient, make a copy instead of view
  1120. GGML_API struct ggml_tensor * ggml_reshape(
  1121. struct ggml_context * ctx,
  1122. struct ggml_tensor * a,
  1123. struct ggml_tensor * b);
  1124. // return view(a)
  1125. // TODO: when we start computing gradient, make a copy instead of view
  1126. GGML_API struct ggml_tensor * ggml_reshape_1d(
  1127. struct ggml_context * ctx,
  1128. struct ggml_tensor * a,
  1129. int64_t ne0);
  1130. GGML_API struct ggml_tensor * ggml_reshape_2d(
  1131. struct ggml_context * ctx,
  1132. struct ggml_tensor * a,
  1133. int64_t ne0,
  1134. int64_t ne1);
  1135. // return view(a)
  1136. // TODO: when we start computing gradient, make a copy instead of view
  1137. GGML_API struct ggml_tensor * ggml_reshape_3d(
  1138. struct ggml_context * ctx,
  1139. struct ggml_tensor * a,
  1140. int64_t ne0,
  1141. int64_t ne1,
  1142. int64_t ne2);
  1143. GGML_API struct ggml_tensor * ggml_reshape_4d(
  1144. struct ggml_context * ctx,
  1145. struct ggml_tensor * a,
  1146. int64_t ne0,
  1147. int64_t ne1,
  1148. int64_t ne2,
  1149. int64_t ne3);
  1150. // offset in bytes
  1151. GGML_API struct ggml_tensor * ggml_view_1d(
  1152. struct ggml_context * ctx,
  1153. struct ggml_tensor * a,
  1154. int64_t ne0,
  1155. size_t offset);
  1156. GGML_API struct ggml_tensor * ggml_view_2d(
  1157. struct ggml_context * ctx,
  1158. struct ggml_tensor * a,
  1159. int64_t ne0,
  1160. int64_t ne1,
  1161. size_t nb1, // row stride in bytes
  1162. size_t offset);
  1163. GGML_API struct ggml_tensor * ggml_view_3d(
  1164. struct ggml_context * ctx,
  1165. struct ggml_tensor * a,
  1166. int64_t ne0,
  1167. int64_t ne1,
  1168. int64_t ne2,
  1169. size_t nb1, // row stride in bytes
  1170. size_t nb2, // slice stride in bytes
  1171. size_t offset);
  1172. GGML_API struct ggml_tensor * ggml_view_4d(
  1173. struct ggml_context * ctx,
  1174. struct ggml_tensor * a,
  1175. int64_t ne0,
  1176. int64_t ne1,
  1177. int64_t ne2,
  1178. int64_t ne3,
  1179. size_t nb1, // row stride in bytes
  1180. size_t nb2, // slice stride in bytes
  1181. size_t nb3,
  1182. size_t offset);
  1183. GGML_API struct ggml_tensor * ggml_permute(
  1184. struct ggml_context * ctx,
  1185. struct ggml_tensor * a,
  1186. int axis0,
  1187. int axis1,
  1188. int axis2,
  1189. int axis3);
  1190. // alias for ggml_permute(ctx, a, 1, 0, 2, 3)
  1191. GGML_API struct ggml_tensor * ggml_transpose(
  1192. struct ggml_context * ctx,
  1193. struct ggml_tensor * a);
  1194. // supports 3D: a->ne[2] == b->ne[1]
  1195. GGML_API struct ggml_tensor * ggml_get_rows(
  1196. struct ggml_context * ctx,
  1197. struct ggml_tensor * a,
  1198. struct ggml_tensor * b);
  1199. GGML_API struct ggml_tensor * ggml_get_rows_back(
  1200. struct ggml_context * ctx,
  1201. struct ggml_tensor * a,
  1202. struct ggml_tensor * b,
  1203. struct ggml_tensor * c);
  1204. GGML_API struct ggml_tensor * ggml_diag(
  1205. struct ggml_context * ctx,
  1206. struct ggml_tensor * a);
  1207. // set elements above the diagonal to -INF
  1208. GGML_API struct ggml_tensor * ggml_diag_mask_inf(
  1209. struct ggml_context * ctx,
  1210. struct ggml_tensor * a,
  1211. int n_past);
  1212. // in-place, returns view(a)
  1213. GGML_API struct ggml_tensor * ggml_diag_mask_inf_inplace(
  1214. struct ggml_context * ctx,
  1215. struct ggml_tensor * a,
  1216. int n_past);
  1217. // set elements above the diagonal to 0
  1218. GGML_API struct ggml_tensor * ggml_diag_mask_zero(
  1219. struct ggml_context * ctx,
  1220. struct ggml_tensor * a,
  1221. int n_past);
  1222. // in-place, returns view(a)
  1223. GGML_API struct ggml_tensor * ggml_diag_mask_zero_inplace(
  1224. struct ggml_context * ctx,
  1225. struct ggml_tensor * a,
  1226. int n_past);
  1227. GGML_API struct ggml_tensor * ggml_soft_max(
  1228. struct ggml_context * ctx,
  1229. struct ggml_tensor * a);
  1230. // in-place, returns view(a)
  1231. GGML_API struct ggml_tensor * ggml_soft_max_inplace(
  1232. struct ggml_context * ctx,
  1233. struct ggml_tensor * a);
  1234. // fused soft_max(a*scale + mask*(ALiBi slope))
  1235. // mask is optional
  1236. // max_bias = 0.0f for no ALiBi
  1237. GGML_API struct ggml_tensor * ggml_soft_max_ext(
  1238. struct ggml_context * ctx,
  1239. struct ggml_tensor * a,
  1240. struct ggml_tensor * mask,
  1241. float scale,
  1242. float max_bias);
  1243. GGML_API struct ggml_tensor * ggml_soft_max_back(
  1244. struct ggml_context * ctx,
  1245. struct ggml_tensor * a,
  1246. struct ggml_tensor * b);
  1247. // in-place, returns view(a)
  1248. GGML_API struct ggml_tensor * ggml_soft_max_back_inplace(
  1249. struct ggml_context * ctx,
  1250. struct ggml_tensor * a,
  1251. struct ggml_tensor * b);
  1252. // rotary position embedding
  1253. // if mode & 1 == 1, skip n_past elements (DEPRECATED)
  1254. // if mode & 2 == 1, GPT-NeoX style
  1255. // if mode & 4 == 1, ChatGLM style
  1256. //
  1257. // b is an int32 vector with size a->ne[2], it contains the positions
  1258. GGML_API struct ggml_tensor * ggml_rope(
  1259. struct ggml_context * ctx,
  1260. struct ggml_tensor * a,
  1261. struct ggml_tensor * b,
  1262. int n_dims,
  1263. int mode,
  1264. int n_ctx);
  1265. // in-place, returns view(a)
  1266. GGML_API struct ggml_tensor * ggml_rope_inplace(
  1267. struct ggml_context * ctx,
  1268. struct ggml_tensor * a,
  1269. struct ggml_tensor * b,
  1270. int n_dims,
  1271. int mode,
  1272. int n_ctx);
  1273. // custom RoPE
  1274. GGML_API struct ggml_tensor * ggml_rope_custom(
  1275. struct ggml_context * ctx,
  1276. struct ggml_tensor * a,
  1277. struct ggml_tensor * b,
  1278. int n_dims,
  1279. int mode,
  1280. int n_ctx,
  1281. int n_orig_ctx,
  1282. float freq_base,
  1283. float freq_scale,
  1284. float ext_factor,
  1285. float attn_factor,
  1286. float beta_fast,
  1287. float beta_slow);
  1288. // in-place, returns view(a)
  1289. GGML_API struct ggml_tensor * ggml_rope_custom_inplace(
  1290. struct ggml_context * ctx,
  1291. struct ggml_tensor * a,
  1292. struct ggml_tensor * b,
  1293. int n_dims,
  1294. int mode,
  1295. int n_ctx,
  1296. int n_orig_ctx,
  1297. float freq_base,
  1298. float freq_scale,
  1299. float ext_factor,
  1300. float attn_factor,
  1301. float beta_fast,
  1302. float beta_slow);
  1303. // compute correction dims for YaRN RoPE scaling
  1304. GGML_CALL void ggml_rope_yarn_corr_dims(
  1305. int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2]);
  1306. // xPos RoPE, in-place, returns view(a)
  1307. GGML_API struct ggml_tensor * ggml_rope_xpos_inplace(
  1308. struct ggml_context * ctx,
  1309. struct ggml_tensor * a,
  1310. struct ggml_tensor * b,
  1311. int n_dims,
  1312. float base,
  1313. bool down);
  1314. // rotary position embedding backward, i.e compute dx from dy
  1315. // a - dy
  1316. GGML_API struct ggml_tensor * ggml_rope_back(
  1317. struct ggml_context * ctx,
  1318. struct ggml_tensor * a,
  1319. struct ggml_tensor * b,
  1320. int n_dims,
  1321. int mode,
  1322. int n_ctx,
  1323. int n_orig_ctx,
  1324. float freq_base,
  1325. float freq_scale,
  1326. float ext_factor,
  1327. float attn_factor,
  1328. float beta_fast,
  1329. float beta_slow,
  1330. float xpos_base,
  1331. bool xpos_down);
  1332. // clamp
  1333. // in-place, returns view(a)
  1334. GGML_API struct ggml_tensor * ggml_clamp(
  1335. struct ggml_context * ctx,
  1336. struct ggml_tensor * a,
  1337. float min,
  1338. float max);
  1339. GGML_API struct ggml_tensor * ggml_im2col(
  1340. struct ggml_context * ctx,
  1341. struct ggml_tensor * a,
  1342. struct ggml_tensor * b,
  1343. int s0,
  1344. int s1,
  1345. int p0,
  1346. int p1,
  1347. int d0,
  1348. int d1,
  1349. bool is_2D,
  1350. enum ggml_type dst_type);
  1351. GGML_API struct ggml_tensor * ggml_conv_depthwise_2d(
  1352. struct ggml_context * ctx,
  1353. struct ggml_tensor * a,
  1354. struct ggml_tensor * b,
  1355. int s0,
  1356. int s1,
  1357. int p0,
  1358. int p1,
  1359. int d0,
  1360. int d1);
  1361. GGML_API struct ggml_tensor * ggml_conv_1d(
  1362. struct ggml_context * ctx,
  1363. struct ggml_tensor * a,
  1364. struct ggml_tensor * b,
  1365. int s0, // stride
  1366. int p0, // padding
  1367. int d0); // dilation
  1368. // conv_1d with padding = half
  1369. // alias for ggml_conv_1d(a, b, s, a->ne[0]/2, d)
  1370. GGML_API struct ggml_tensor* ggml_conv_1d_ph(
  1371. struct ggml_context * ctx,
  1372. struct ggml_tensor * a,
  1373. struct ggml_tensor * b,
  1374. int s,
  1375. int d);
  1376. GGML_API struct ggml_tensor * ggml_conv_transpose_1d(
  1377. struct ggml_context * ctx,
  1378. struct ggml_tensor * a,
  1379. struct ggml_tensor * b,
  1380. int s0,
  1381. int p0,
  1382. int d0);
  1383. GGML_API struct ggml_tensor * ggml_conv_2d(
  1384. struct ggml_context * ctx,
  1385. struct ggml_tensor * a,
  1386. struct ggml_tensor * b,
  1387. int s0,
  1388. int s1,
  1389. int p0,
  1390. int p1,
  1391. int d0,
  1392. int d1);
  1393. // kernel size is a->ne[0] x a->ne[1]
  1394. // stride is equal to kernel size
  1395. // padding is zero
  1396. // example:
  1397. // a: 16 16 3 768
  1398. // b: 1024 1024 3 1
  1399. // res: 64 64 768 1
  1400. // used in sam
  1401. GGML_API struct ggml_tensor * ggml_conv_2d_sk_p0(
  1402. struct ggml_context * ctx,
  1403. struct ggml_tensor * a,
  1404. struct ggml_tensor * b);
  1405. // kernel size is a->ne[0] x a->ne[1]
  1406. // stride is 1
  1407. // padding is half
  1408. // example:
  1409. // a: 3 3 256 256
  1410. // b: 64 64 256 1
  1411. // res: 64 64 256 1
  1412. // used in sam
  1413. GGML_API struct ggml_tensor * ggml_conv_2d_s1_ph(
  1414. struct ggml_context * ctx,
  1415. struct ggml_tensor * a,
  1416. struct ggml_tensor * b);
  1417. GGML_API struct ggml_tensor * ggml_conv_transpose_2d_p0(
  1418. struct ggml_context * ctx,
  1419. struct ggml_tensor * a,
  1420. struct ggml_tensor * b,
  1421. int stride);
  1422. enum ggml_op_pool {
  1423. GGML_OP_POOL_MAX,
  1424. GGML_OP_POOL_AVG,
  1425. GGML_OP_POOL_COUNT,
  1426. };
  1427. GGML_API struct ggml_tensor * ggml_pool_1d(
  1428. struct ggml_context * ctx,
  1429. struct ggml_tensor * a,
  1430. enum ggml_op_pool op,
  1431. int k0, // kernel size
  1432. int s0, // stride
  1433. int p0); // padding
  1434. // the result will have 2*p0 padding for the first dimension
  1435. // and 2*p1 padding for the second dimension
  1436. GGML_API struct ggml_tensor * ggml_pool_2d(
  1437. struct ggml_context * ctx,
  1438. struct ggml_tensor * a,
  1439. enum ggml_op_pool op,
  1440. int k0,
  1441. int k1,
  1442. int s0,
  1443. int s1,
  1444. float p0,
  1445. float p1);
  1446. // nearest interpolate
  1447. // multiplies ne0 and ne1 by scale factor
  1448. // used in stable-diffusion
  1449. GGML_API struct ggml_tensor * ggml_upscale(
  1450. struct ggml_context * ctx,
  1451. struct ggml_tensor * a,
  1452. int scale_factor);
  1453. // nearest interpolate
  1454. // nearest interpolate to specified dimensions
  1455. // used in tortoise.cpp
  1456. GGML_API struct ggml_tensor * ggml_upscale_ext(
  1457. struct ggml_context * ctx,
  1458. struct ggml_tensor * a,
  1459. int ne0,
  1460. int ne1,
  1461. int ne2,
  1462. int ne3);
  1463. // pad each dimension with zeros: [x, ..., x] -> [x, ..., x, 0, ..., 0]
  1464. GGML_API struct ggml_tensor * ggml_pad(
  1465. struct ggml_context * ctx,
  1466. struct ggml_tensor * a,
  1467. int p0,
  1468. int p1,
  1469. int p2,
  1470. int p3);
  1471. // Ref: https://github.com/CompVis/stable-diffusion/blob/main/ldm/modules/diffusionmodules/util.py#L151
  1472. // timesteps: [N,]
  1473. // return: [N, dim]
  1474. GGML_API struct ggml_tensor * ggml_timestep_embedding(
  1475. struct ggml_context * ctx,
  1476. struct ggml_tensor * timesteps,
  1477. int dim,
  1478. int max_period);
  1479. // sort rows
  1480. enum ggml_sort_order {
  1481. GGML_SORT_ORDER_ASC,
  1482. GGML_SORT_ORDER_DESC,
  1483. };
  1484. GGML_API struct ggml_tensor * ggml_argsort(
  1485. struct ggml_context * ctx,
  1486. struct ggml_tensor * a,
  1487. enum ggml_sort_order order);
  1488. GGML_API struct ggml_tensor * ggml_arange(
  1489. struct ggml_context * ctx,
  1490. float start,
  1491. float stop,
  1492. float step);
  1493. // top k elements per row
  1494. GGML_API struct ggml_tensor * ggml_top_k(
  1495. struct ggml_context * ctx,
  1496. struct ggml_tensor * a,
  1497. int k);
  1498. GGML_API struct ggml_tensor * ggml_flash_attn(
  1499. struct ggml_context * ctx,
  1500. struct ggml_tensor * q,
  1501. struct ggml_tensor * k,
  1502. struct ggml_tensor * v,
  1503. bool masked);
  1504. #define GGML_KQ_MASK_PAD 32
  1505. // q: [n_embd, n_batch, n_head, 1]
  1506. // k: [n_embd, n_kv, n_head_kv, 1]
  1507. // v: [n_embd, n_kv, n_head_kv, 1] !! not transposed !!
  1508. // mask: [n_kv, n_batch_pad, 1, 1] !! n_batch_pad = GGML_PAD(n_batch, GGML_KQ_MASK_PAD) !!
  1509. // res: [n_embd, n_head, n_batch, 1] !! permuted !!
  1510. GGML_API struct ggml_tensor * ggml_flash_attn_ext(
  1511. struct ggml_context * ctx,
  1512. struct ggml_tensor * q,
  1513. struct ggml_tensor * k,
  1514. struct ggml_tensor * v,
  1515. struct ggml_tensor * mask,
  1516. float scale,
  1517. float max_bias);
  1518. GGML_API void ggml_flash_attn_ext_set_prec(
  1519. struct ggml_tensor * a,
  1520. enum ggml_prec prec);
  1521. GGML_API struct ggml_tensor * ggml_flash_attn_back(
  1522. struct ggml_context * ctx,
  1523. struct ggml_tensor * q,
  1524. struct ggml_tensor * k,
  1525. struct ggml_tensor * v,
  1526. struct ggml_tensor * d,
  1527. bool masked);
  1528. GGML_API struct ggml_tensor * ggml_flash_ff(
  1529. struct ggml_context * ctx,
  1530. struct ggml_tensor * a,
  1531. struct ggml_tensor * b0,
  1532. struct ggml_tensor * b1,
  1533. struct ggml_tensor * c0,
  1534. struct ggml_tensor * c1);
  1535. GGML_API struct ggml_tensor * ggml_ssm_conv(
  1536. struct ggml_context * ctx,
  1537. struct ggml_tensor * s,
  1538. struct ggml_tensor * x,
  1539. struct ggml_tensor * c,
  1540. struct ggml_tensor * sq);
  1541. GGML_API struct ggml_tensor * ggml_ssm_scan(
  1542. struct ggml_context * ctx,
  1543. struct ggml_tensor * s,
  1544. struct ggml_tensor * x,
  1545. struct ggml_tensor * dt,
  1546. struct ggml_tensor * A,
  1547. struct ggml_tensor * B,
  1548. struct ggml_tensor * C,
  1549. struct ggml_tensor * sq);
  1550. // partition into non-overlapping windows with padding if needed
  1551. // example:
  1552. // a: 768 64 64 1
  1553. // w: 14
  1554. // res: 768 14 14 25
  1555. // used in sam
  1556. GGML_API struct ggml_tensor * ggml_win_part(
  1557. struct ggml_context * ctx,
  1558. struct ggml_tensor * a,
  1559. int w);
  1560. // reverse of ggml_win_part
  1561. // used in sam
  1562. GGML_API struct ggml_tensor * ggml_win_unpart(
  1563. struct ggml_context * ctx,
  1564. struct ggml_tensor * a,
  1565. int w0,
  1566. int h0,
  1567. int w);
  1568. GGML_API struct ggml_tensor * ggml_unary(
  1569. struct ggml_context * ctx,
  1570. struct ggml_tensor * a,
  1571. enum ggml_unary_op op);
  1572. GGML_API struct ggml_tensor * ggml_unary_inplace(
  1573. struct ggml_context * ctx,
  1574. struct ggml_tensor * a,
  1575. enum ggml_unary_op op);
  1576. // used in sam
  1577. GGML_API struct ggml_tensor * ggml_get_rel_pos(
  1578. struct ggml_context * ctx,
  1579. struct ggml_tensor * a,
  1580. int qh,
  1581. int kh);
  1582. // used in sam
  1583. GGML_API struct ggml_tensor * ggml_add_rel_pos(
  1584. struct ggml_context * ctx,
  1585. struct ggml_tensor * a,
  1586. struct ggml_tensor * pw,
  1587. struct ggml_tensor * ph);
  1588. GGML_API struct ggml_tensor * ggml_add_rel_pos_inplace(
  1589. struct ggml_context * ctx,
  1590. struct ggml_tensor * a,
  1591. struct ggml_tensor * pw,
  1592. struct ggml_tensor * ph);
  1593. // custom operators
  1594. typedef void (*ggml_unary_op_f32_t) (const int, float *, const float *);
  1595. typedef void (*ggml_binary_op_f32_t)(const int, float *, const float *, const float *);
  1596. typedef void (*ggml_custom1_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *);
  1597. typedef void (*ggml_custom2_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *);
  1598. typedef void (*ggml_custom3_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *);
  1599. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_unary_f32(
  1600. struct ggml_context * ctx,
  1601. struct ggml_tensor * a,
  1602. ggml_unary_op_f32_t fun),
  1603. "use ggml_map_custom1 instead");
  1604. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_unary_inplace_f32(
  1605. struct ggml_context * ctx,
  1606. struct ggml_tensor * a,
  1607. ggml_unary_op_f32_t fun),
  1608. "use ggml_map_custom1_inplace instead");
  1609. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_binary_f32(
  1610. struct ggml_context * ctx,
  1611. struct ggml_tensor * a,
  1612. struct ggml_tensor * b,
  1613. ggml_binary_op_f32_t fun),
  1614. "use ggml_map_custom2 instead");
  1615. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_binary_inplace_f32(
  1616. struct ggml_context * ctx,
  1617. struct ggml_tensor * a,
  1618. struct ggml_tensor * b,
  1619. ggml_binary_op_f32_t fun),
  1620. "use ggml_map_custom2_inplace instead");
  1621. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom1_f32(
  1622. struct ggml_context * ctx,
  1623. struct ggml_tensor * a,
  1624. ggml_custom1_op_f32_t fun),
  1625. "use ggml_map_custom1 instead");
  1626. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom1_inplace_f32(
  1627. struct ggml_context * ctx,
  1628. struct ggml_tensor * a,
  1629. ggml_custom1_op_f32_t fun),
  1630. "use ggml_map_custom1_inplace instead");
  1631. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom2_f32(
  1632. struct ggml_context * ctx,
  1633. struct ggml_tensor * a,
  1634. struct ggml_tensor * b,
  1635. ggml_custom2_op_f32_t fun),
  1636. "use ggml_map_custom2 instead");
  1637. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom2_inplace_f32(
  1638. struct ggml_context * ctx,
  1639. struct ggml_tensor * a,
  1640. struct ggml_tensor * b,
  1641. ggml_custom2_op_f32_t fun),
  1642. "use ggml_map_custom2_inplace instead");
  1643. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom3_f32(
  1644. struct ggml_context * ctx,
  1645. struct ggml_tensor * a,
  1646. struct ggml_tensor * b,
  1647. struct ggml_tensor * c,
  1648. ggml_custom3_op_f32_t fun),
  1649. "use ggml_map_custom3 instead");
  1650. GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom3_inplace_f32(
  1651. struct ggml_context * ctx,
  1652. struct ggml_tensor * a,
  1653. struct ggml_tensor * b,
  1654. struct ggml_tensor * c,
  1655. ggml_custom3_op_f32_t fun),
  1656. "use ggml_map_custom3_inplace instead");
  1657. // custom operators v2
  1658. typedef void (*ggml_custom1_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, int ith, int nth, void * userdata);
  1659. typedef void (*ggml_custom2_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, int ith, int nth, void * userdata);
  1660. typedef void (*ggml_custom3_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, const struct ggml_tensor * c, int ith, int nth, void * userdata);
  1661. #define GGML_N_TASKS_MAX -1
  1662. GGML_API struct ggml_tensor * ggml_map_custom1(
  1663. struct ggml_context * ctx,
  1664. struct ggml_tensor * a,
  1665. ggml_custom1_op_t fun,
  1666. int n_tasks,
  1667. void * userdata);
  1668. GGML_API struct ggml_tensor * ggml_map_custom1_inplace(
  1669. struct ggml_context * ctx,
  1670. struct ggml_tensor * a,
  1671. ggml_custom1_op_t fun,
  1672. int n_tasks,
  1673. void * userdata);
  1674. GGML_API struct ggml_tensor * ggml_map_custom2(
  1675. struct ggml_context * ctx,
  1676. struct ggml_tensor * a,
  1677. struct ggml_tensor * b,
  1678. ggml_custom2_op_t fun,
  1679. int n_tasks,
  1680. void * userdata);
  1681. GGML_API struct ggml_tensor * ggml_map_custom2_inplace(
  1682. struct ggml_context * ctx,
  1683. struct ggml_tensor * a,
  1684. struct ggml_tensor * b,
  1685. ggml_custom2_op_t fun,
  1686. int n_tasks,
  1687. void * userdata);
  1688. GGML_API struct ggml_tensor * ggml_map_custom3(
  1689. struct ggml_context * ctx,
  1690. struct ggml_tensor * a,
  1691. struct ggml_tensor * b,
  1692. struct ggml_tensor * c,
  1693. ggml_custom3_op_t fun,
  1694. int n_tasks,
  1695. void * userdata);
  1696. GGML_API struct ggml_tensor * ggml_map_custom3_inplace(
  1697. struct ggml_context * ctx,
  1698. struct ggml_tensor * a,
  1699. struct ggml_tensor * b,
  1700. struct ggml_tensor * c,
  1701. ggml_custom3_op_t fun,
  1702. int n_tasks,
  1703. void * userdata);
  1704. // loss function
  1705. GGML_API struct ggml_tensor * ggml_cross_entropy_loss(
  1706. struct ggml_context * ctx,
  1707. struct ggml_tensor * a,
  1708. struct ggml_tensor * b);
  1709. GGML_API struct ggml_tensor * ggml_cross_entropy_loss_back(
  1710. struct ggml_context * ctx,
  1711. struct ggml_tensor * a,
  1712. struct ggml_tensor * b,
  1713. struct ggml_tensor * c);
  1714. //
  1715. // automatic differentiation
  1716. //
  1717. GGML_API void ggml_set_param(
  1718. struct ggml_context * ctx,
  1719. struct ggml_tensor * tensor);
  1720. GGML_API void ggml_build_forward_expand (struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
  1721. GGML_API void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep);
  1722. // graph allocation in a context
  1723. GGML_API struct ggml_cgraph * ggml_new_graph (struct ggml_context * ctx); // size = GGML_DEFAULT_GRAPH_SIZE, grads = false
  1724. GGML_API struct ggml_cgraph * ggml_new_graph_custom (struct ggml_context * ctx, size_t size, bool grads);
  1725. GGML_API struct ggml_cgraph * ggml_graph_dup (struct ggml_context * ctx, struct ggml_cgraph * cgraph);
  1726. GGML_API struct ggml_cgraph ggml_graph_view (struct ggml_cgraph * cgraph, int i0, int i1);
  1727. GGML_API void ggml_graph_cpy (struct ggml_cgraph * src, struct ggml_cgraph * dst);
  1728. GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph); // zero grads
  1729. GGML_API void ggml_graph_clear (struct ggml_cgraph * cgraph);
  1730. GGML_API size_t ggml_graph_overhead(void);
  1731. GGML_API size_t ggml_graph_overhead_custom(size_t size, bool grads);
  1732. // ggml_graph_plan() has to be called before ggml_graph_compute()
  1733. // when plan.work_size > 0, caller must allocate memory for plan.work_data
  1734. GGML_API struct ggml_cplan ggml_graph_plan (const struct ggml_cgraph * cgraph, int n_threads /*= GGML_DEFAULT_N_THREADS*/);
  1735. GGML_API enum ggml_status ggml_graph_compute ( struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);
  1736. // same as ggml_graph_compute() but the work data is allocated as a part of the context
  1737. // note: the drawback of this API is that you must have ensured that the context has enough memory for the work data
  1738. GGML_API enum ggml_status ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads);
  1739. GGML_API struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name);
  1740. GGML_API void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname);
  1741. GGML_API struct ggml_cgraph * ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval);
  1742. // print info and performance information for the graph
  1743. GGML_API void ggml_graph_print(const struct ggml_cgraph * cgraph);
  1744. // dump the graph into a file using the dot format
  1745. GGML_API void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename);
  1746. // build gradient checkpointing backward graph gb for gf using provided checkpoints
  1747. // gb_tmp will contain original backward graph with rewritten backward process nodes,
  1748. // but without the second forward pass nodes.
  1749. GGML_API void ggml_build_backward_gradient_checkpointing(
  1750. struct ggml_context * ctx,
  1751. struct ggml_cgraph * gf,
  1752. struct ggml_cgraph * gb,
  1753. struct ggml_cgraph * gb_tmp,
  1754. struct ggml_tensor * * checkpoints,
  1755. int n_checkpoints);
  1756. //
  1757. // optimization
  1758. //
  1759. // optimization methods
  1760. enum ggml_opt_type {
  1761. GGML_OPT_TYPE_ADAM,
  1762. GGML_OPT_TYPE_LBFGS,
  1763. };
  1764. // linesearch methods
  1765. enum ggml_linesearch {
  1766. GGML_LINESEARCH_DEFAULT = 1,
  1767. GGML_LINESEARCH_BACKTRACKING_ARMIJO = 0,
  1768. GGML_LINESEARCH_BACKTRACKING_WOLFE = 1,
  1769. GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE = 2,
  1770. };
  1771. // optimization return values
  1772. enum ggml_opt_result {
  1773. GGML_OPT_RESULT_OK = 0,
  1774. GGML_OPT_RESULT_DID_NOT_CONVERGE,
  1775. GGML_OPT_RESULT_NO_CONTEXT,
  1776. GGML_OPT_RESULT_INVALID_WOLFE,
  1777. GGML_OPT_RESULT_FAIL,
  1778. GGML_OPT_RESULT_CANCEL,
  1779. GGML_LINESEARCH_FAIL = -128,
  1780. GGML_LINESEARCH_MINIMUM_STEP,
  1781. GGML_LINESEARCH_MAXIMUM_STEP,
  1782. GGML_LINESEARCH_MAXIMUM_ITERATIONS,
  1783. GGML_LINESEARCH_INVALID_PARAMETERS,
  1784. };
  1785. typedef void (*ggml_opt_callback)(void * data, int accum_step, float * sched, bool * cancel);
  1786. typedef void (*ggml_log_callback)(enum ggml_log_level level, const char * text, void * user_data);
  1787. // optimization parameters
  1788. //
  1789. // see ggml.c (ggml_opt_default_params) for default values
  1790. //
  1791. struct ggml_opt_params {
  1792. enum ggml_opt_type type;
  1793. size_t graph_size;
  1794. int n_threads;
  1795. // delta-based convergence test
  1796. //
  1797. // if past == 0 - disabled
  1798. // if past > 0:
  1799. // stop if |f(x) - f(x_past)| < delta * max(1, |f(x)|)
  1800. //
  1801. int past;
  1802. float delta;
  1803. // maximum number of iterations without improvement
  1804. //
  1805. // if 0 - disabled
  1806. // if > 0:
  1807. // assume convergence if no cost improvement in this number of iterations
  1808. //
  1809. int max_no_improvement;
  1810. bool print_forward_graph;
  1811. bool print_backward_graph;
  1812. int n_gradient_accumulation;
  1813. // ADAM parameters
  1814. struct {
  1815. int n_iter;
  1816. float sched; // schedule multiplier (fixed, decay or warmup)
  1817. float decay; // weight decay for AdamW, use 0.0f to disable
  1818. int decay_min_ndim; // minimum number of tensor dimension to apply weight decay
  1819. float alpha; // learning rate
  1820. float beta1;
  1821. float beta2;
  1822. float eps; // epsilon for numerical stability
  1823. float eps_f; // epsilon for convergence test
  1824. float eps_g; // epsilon for convergence test
  1825. float gclip; // gradient clipping
  1826. } adam;
  1827. // LBFGS parameters
  1828. struct {
  1829. int m; // number of corrections to approximate the inv. Hessian
  1830. int n_iter;
  1831. int max_linesearch;
  1832. float eps; // convergence tolerance
  1833. float ftol; // line search tolerance
  1834. float wolfe;
  1835. float min_step;
  1836. float max_step;
  1837. enum ggml_linesearch linesearch;
  1838. } lbfgs;
  1839. };
  1840. struct ggml_opt_context {
  1841. struct ggml_context * ctx;
  1842. struct ggml_opt_params params;
  1843. int iter;
  1844. int64_t nx; // number of parameter elements
  1845. bool just_initialized;
  1846. float loss_before;
  1847. float loss_after;
  1848. struct {
  1849. struct ggml_tensor * g; // current gradient
  1850. struct ggml_tensor * m; // first moment
  1851. struct ggml_tensor * v; // second moment
  1852. struct ggml_tensor * pf; // past function values
  1853. float fx_best;
  1854. float fx_prev;
  1855. int n_no_improvement;
  1856. } adam;
  1857. struct {
  1858. struct ggml_tensor * x; // current parameters
  1859. struct ggml_tensor * xp; // previous parameters
  1860. struct ggml_tensor * g; // current gradient
  1861. struct ggml_tensor * gp; // previous gradient
  1862. struct ggml_tensor * d; // search direction
  1863. struct ggml_tensor * pf; // past function values
  1864. struct ggml_tensor * lmal; // the L-BFGS memory alpha
  1865. struct ggml_tensor * lmys; // the L-BFGS memory ys
  1866. struct ggml_tensor * lms; // the L-BFGS memory s
  1867. struct ggml_tensor * lmy; // the L-BFGS memory y
  1868. float fx_best;
  1869. float step;
  1870. int j;
  1871. int k;
  1872. int end;
  1873. int n_no_improvement;
  1874. } lbfgs;
  1875. };
  1876. GGML_API struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type);
  1877. // optimize the function defined by the tensor f
  1878. GGML_API enum ggml_opt_result ggml_opt(
  1879. struct ggml_context * ctx,
  1880. struct ggml_opt_params params,
  1881. struct ggml_tensor * f);
  1882. // initialize optimizer context
  1883. GGML_API void ggml_opt_init(
  1884. struct ggml_context * ctx,
  1885. struct ggml_opt_context * opt,
  1886. struct ggml_opt_params params,
  1887. int64_t nx);
  1888. // continue optimizing the function defined by the tensor f
  1889. GGML_API enum ggml_opt_result ggml_opt_resume(
  1890. struct ggml_context * ctx,
  1891. struct ggml_opt_context * opt,
  1892. struct ggml_tensor * f);
  1893. // continue optimizing the function defined by the tensor f
  1894. GGML_API enum ggml_opt_result ggml_opt_resume_g(
  1895. struct ggml_context * ctx,
  1896. struct ggml_opt_context * opt,
  1897. struct ggml_tensor * f,
  1898. struct ggml_cgraph * gf,
  1899. struct ggml_cgraph * gb,
  1900. ggml_opt_callback callback,
  1901. void * callback_data);
  1902. //
  1903. // tensor flags
  1904. //
  1905. GGML_API void ggml_set_input(struct ggml_tensor * tensor);
  1906. GGML_API void ggml_set_output(struct ggml_tensor * tensor);
  1907. //
  1908. // quantization
  1909. //
  1910. // - ggml_quantize_init can be called multiple times with the same type
  1911. // it will only initialize the quantization tables for the first call or after ggml_quantize_free
  1912. // automatically called by ggml_quantize_chunk for convenience
  1913. //
  1914. // - ggml_quantize_free will free any memory allocated by ggml_quantize_init
  1915. // call this at the end of the program to avoid memory leaks
  1916. //
  1917. // note: these are thread-safe
  1918. //
  1919. GGML_API void ggml_quantize_init(enum ggml_type type);
  1920. GGML_API void ggml_quantize_free(void);
  1921. // some quantization type cannot be used without an importance matrix
  1922. GGML_API bool ggml_quantize_requires_imatrix(enum ggml_type type);
  1923. // calls ggml_quantize_init internally (i.e. can allocate memory)
  1924. GGML_API size_t ggml_quantize_chunk(
  1925. enum ggml_type type,
  1926. const float * src,
  1927. void * dst,
  1928. int64_t start,
  1929. int64_t nrows,
  1930. int64_t n_per_row,
  1931. const float * imatrix);
  1932. //
  1933. // gguf
  1934. //
  1935. enum gguf_type {
  1936. GGUF_TYPE_UINT8 = 0,
  1937. GGUF_TYPE_INT8 = 1,
  1938. GGUF_TYPE_UINT16 = 2,
  1939. GGUF_TYPE_INT16 = 3,
  1940. GGUF_TYPE_UINT32 = 4,
  1941. GGUF_TYPE_INT32 = 5,
  1942. GGUF_TYPE_FLOAT32 = 6,
  1943. GGUF_TYPE_BOOL = 7,
  1944. GGUF_TYPE_STRING = 8,
  1945. GGUF_TYPE_ARRAY = 9,
  1946. GGUF_TYPE_UINT64 = 10,
  1947. GGUF_TYPE_INT64 = 11,
  1948. GGUF_TYPE_FLOAT64 = 12,
  1949. GGUF_TYPE_COUNT, // marks the end of the enum
  1950. };
  1951. struct gguf_context;
  1952. struct gguf_init_params {
  1953. bool no_alloc;
  1954. // if not NULL, create a ggml_context and allocate the tensor data in it
  1955. struct ggml_context ** ctx;
  1956. };
  1957. GGML_API struct gguf_context * gguf_init_empty(void);
  1958. GGML_API struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params);
  1959. //GGML_API struct gguf_context * gguf_init_from_buffer(..);
  1960. GGML_API void gguf_free(struct gguf_context * ctx);
  1961. GGML_API const char * gguf_type_name(enum gguf_type type);
  1962. GGML_API int gguf_get_version (const struct gguf_context * ctx);
  1963. GGML_API size_t gguf_get_alignment (const struct gguf_context * ctx);
  1964. GGML_API size_t gguf_get_data_offset(const struct gguf_context * ctx);
  1965. GGML_API void * gguf_get_data (const struct gguf_context * ctx);
  1966. GGML_API int gguf_get_n_kv(const struct gguf_context * ctx);
  1967. GGML_API int gguf_find_key(const struct gguf_context * ctx, const char * key);
  1968. GGML_API const char * gguf_get_key (const struct gguf_context * ctx, int key_id);
  1969. GGML_API enum gguf_type gguf_get_kv_type (const struct gguf_context * ctx, int key_id);
  1970. GGML_API enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id);
  1971. // will abort if the wrong type is used for the key
  1972. GGML_API uint8_t gguf_get_val_u8 (const struct gguf_context * ctx, int key_id);
  1973. GGML_API int8_t gguf_get_val_i8 (const struct gguf_context * ctx, int key_id);
  1974. GGML_API uint16_t gguf_get_val_u16 (const struct gguf_context * ctx, int key_id);
  1975. GGML_API int16_t gguf_get_val_i16 (const struct gguf_context * ctx, int key_id);
  1976. GGML_API uint32_t gguf_get_val_u32 (const struct gguf_context * ctx, int key_id);
  1977. GGML_API int32_t gguf_get_val_i32 (const struct gguf_context * ctx, int key_id);
  1978. GGML_API float gguf_get_val_f32 (const struct gguf_context * ctx, int key_id);
  1979. GGML_API uint64_t gguf_get_val_u64 (const struct gguf_context * ctx, int key_id);
  1980. GGML_API int64_t gguf_get_val_i64 (const struct gguf_context * ctx, int key_id);
  1981. GGML_API double gguf_get_val_f64 (const struct gguf_context * ctx, int key_id);
  1982. GGML_API bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id);
  1983. GGML_API const char * gguf_get_val_str (const struct gguf_context * ctx, int key_id);
  1984. GGML_API const void * gguf_get_val_data(const struct gguf_context * ctx, int key_id);
  1985. GGML_API int gguf_get_arr_n (const struct gguf_context * ctx, int key_id);
  1986. GGML_API const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id);
  1987. GGML_API const char * gguf_get_arr_str (const struct gguf_context * ctx, int key_id, int i);
  1988. GGML_API int gguf_get_n_tensors (const struct gguf_context * ctx);
  1989. GGML_API int gguf_find_tensor (const struct gguf_context * ctx, const char * name);
  1990. GGML_API size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int i);
  1991. GGML_API char * gguf_get_tensor_name (const struct gguf_context * ctx, int i);
  1992. GGML_API enum ggml_type gguf_get_tensor_type (const struct gguf_context * ctx, int i);
  1993. // removes key if it exists
  1994. GGML_API void gguf_remove_key(struct gguf_context * ctx, const char * key);
  1995. // overrides existing values or adds a new one
  1996. GGML_API void gguf_set_val_u8 (struct gguf_context * ctx, const char * key, uint8_t val);
  1997. GGML_API void gguf_set_val_i8 (struct gguf_context * ctx, const char * key, int8_t val);
  1998. GGML_API void gguf_set_val_u16 (struct gguf_context * ctx, const char * key, uint16_t val);
  1999. GGML_API void gguf_set_val_i16 (struct gguf_context * ctx, const char * key, int16_t val);
  2000. GGML_API void gguf_set_val_u32 (struct gguf_context * ctx, const char * key, uint32_t val);
  2001. GGML_API void gguf_set_val_i32 (struct gguf_context * ctx, const char * key, int32_t val);
  2002. GGML_API void gguf_set_val_f32 (struct gguf_context * ctx, const char * key, float val);
  2003. GGML_API void gguf_set_val_u64 (struct gguf_context * ctx, const char * key, uint64_t val);
  2004. GGML_API void gguf_set_val_i64 (struct gguf_context * ctx, const char * key, int64_t val);
  2005. GGML_API void gguf_set_val_f64 (struct gguf_context * ctx, const char * key, double val);
  2006. GGML_API void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val);
  2007. GGML_API void gguf_set_val_str (struct gguf_context * ctx, const char * key, const char * val);
  2008. GGML_API void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n);
  2009. GGML_API void gguf_set_arr_str (struct gguf_context * ctx, const char * key, const char ** data, int n);
  2010. // set or add KV pairs from another context
  2011. GGML_API void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src);
  2012. // manage tensor info
  2013. GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor);
  2014. GGML_API void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type);
  2015. GGML_API void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size);
  2016. // writing gguf files can be done in 2 ways:
  2017. //
  2018. // - write the entire gguf_context to a binary file in a single pass:
  2019. //
  2020. // gguf_write_to_file(ctx, fname);
  2021. //
  2022. // - first prepare a file with a placeholder for the meta data, write the tensor data, then write the meta data:
  2023. //
  2024. // FILE * f = fopen(fname, "wb");
  2025. // fseek(f, gguf_get_meta_size(ctx), SEEK_SET);
  2026. // fwrite(f, ...);
  2027. // void * data = gguf_meta_get_meta_data(ctx);
  2028. // fseek(f, 0, SEEK_SET);
  2029. // fwrite(f, data, gguf_get_meta_size(ctx));
  2030. // free(data);
  2031. // fclose(f);
  2032. //
  2033. // write the entire context to a binary file
  2034. GGML_API void gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta);
  2035. // get the size in bytes of the meta data (header, kv pairs, tensor info) including padding
  2036. GGML_API size_t gguf_get_meta_size(const struct gguf_context * ctx);
  2037. GGML_API void gguf_get_meta_data(const struct gguf_context * ctx, void * data);
  2038. //
  2039. // system info
  2040. //
  2041. GGML_API int ggml_cpu_has_avx (void);
  2042. GGML_API int ggml_cpu_has_avx_vnni (void);
  2043. GGML_API int ggml_cpu_has_avx2 (void);
  2044. GGML_API int ggml_cpu_has_avx512 (void);
  2045. GGML_API int ggml_cpu_has_avx512_vbmi(void);
  2046. GGML_API int ggml_cpu_has_avx512_vnni(void);
  2047. GGML_API int ggml_cpu_has_fma (void);
  2048. GGML_API int ggml_cpu_has_neon (void);
  2049. GGML_API int ggml_cpu_has_arm_fma (void);
  2050. GGML_API int ggml_cpu_has_metal (void);
  2051. GGML_API int ggml_cpu_has_f16c (void);
  2052. GGML_API int ggml_cpu_has_fp16_va (void);
  2053. GGML_API int ggml_cpu_has_wasm_simd (void);
  2054. GGML_API int ggml_cpu_has_blas (void);
  2055. GGML_API int ggml_cpu_has_cuda (void);
  2056. GGML_API int ggml_cpu_has_clblast (void);
  2057. GGML_API int ggml_cpu_has_vulkan (void);
  2058. GGML_API int ggml_cpu_has_kompute (void);
  2059. GGML_API int ggml_cpu_has_gpublas (void);
  2060. GGML_API int ggml_cpu_has_sse3 (void);
  2061. GGML_API int ggml_cpu_has_ssse3 (void);
  2062. GGML_API int ggml_cpu_has_sycl (void);
  2063. GGML_API int ggml_cpu_has_vsx (void);
  2064. GGML_API int ggml_cpu_has_matmul_int8(void);
  2065. //
  2066. // Internal types and functions exposed for tests and benchmarks
  2067. //
  2068. #ifdef __cplusplus
  2069. // restrict not standard in C++
  2070. #define GGML_RESTRICT
  2071. #else
  2072. #define GGML_RESTRICT restrict
  2073. #endif
  2074. typedef void (*ggml_to_float_t) (const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
  2075. typedef void (*ggml_from_float_t)(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
  2076. typedef void (*ggml_vec_dot_t) (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT x, size_t bx,
  2077. const void * GGML_RESTRICT y, size_t by, int nrc);
  2078. typedef struct {
  2079. const char * type_name;
  2080. int blck_size;
  2081. size_t type_size;
  2082. bool is_quantized;
  2083. ggml_to_float_t to_float;
  2084. ggml_from_float_t from_float;
  2085. ggml_from_float_t from_float_reference;
  2086. ggml_vec_dot_t vec_dot;
  2087. enum ggml_type vec_dot_type;
  2088. int64_t nrows; // number of rows to process simultaneously;
  2089. } ggml_type_traits_t;
  2090. GGML_API ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type);
  2091. #ifdef __cplusplus
  2092. }
  2093. #endif