llama.go 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668
  1. package llama
  2. //go:generate make -j 8
  3. /*
  4. #cgo CFLAGS: -O2 -std=c11 -DGGML_BUILD=1 -DNDEBUG -DLOG_DISABLE_LOGS -DGGML_USE_LLAMAFILE
  5. #cgo CXXFLAGS: -O2 -std=c++11 -DGGML_BUILD=1 -DNDEBUG -DLOG_DISABLE_LOGS -DGGML_USE_LLAMAFILE
  6. #cgo amd64,avx CFLAGS: -mavx
  7. #cgo amd64,avx CXXFLAGS: -mavx
  8. #cgo amd64,avx2 CFLAGS: -mavx2 -mfma
  9. #cgo amd64,avx2 CXXFLAGS: -mavx2 -mfma
  10. #cgo amd64,f16c CFLAGS: -mf16c
  11. #cgo amd64,f16c CXXFLAGS: -mf16c
  12. #cgo amd64,fma CFLAGS: -mfma
  13. #cgo amd64,fma CXXFLAGS: -mfma
  14. #cgo avx CFLAGS: -mavx
  15. #cgo avx CXXFLAGS: -mavx
  16. #cgo avx2 CFLAGS: -mavx2 -mfma -mf16c
  17. #cgo avx2 CXXFLAGS: -mavx2 -mfma -mf16c
  18. #cgo cuda CFLAGS: -fPIE -DGGML_USE_CUDA -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 -DGGML_CUDA_MMV_Y=1 -DGGML_BUILD=1
  19. #cgo cuda CFLAGS: -fPIE -DGGML_USE_CUDA -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 -DGGML_CUDA_MMV_Y=1 -DGGML_BUILD=1
  20. #cgo cuda CXXFLAGS: -DGGML_USE_CUDA -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 -DGGML_CUDA_MMV_Y=1 -DGGML_BUILD=1
  21. #cgo cuda CXXFLAGS: -DGGML_USE_CUDA -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 -DGGML_CUDA_MMV_Y=1 -DGGML_BUILD=1
  22. #cgo cuda_v11 LDFLAGS: -lggml_cuda_v11 -L/usr/local/cuda-11/lib64
  23. #cgo cuda_v12 LDFLAGS: -lggml_cuda_v12 -L/usr/local/cuda-12/lib64
  24. #cgo darwin,amd64 CFLAGS: -Wno-incompatible-pointer-types-discards-qualifiers
  25. #cgo darwin,amd64 CXXFLAGS: -Wno-incompatible-pointer-types-discards-qualifiers
  26. #cgo darwin,amd64 LDFLAGS: -framework Foundation
  27. #cgo darwin,amd64,avx2 CFLAGS: -DGGML_USE_ACCELERATE -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64
  28. #cgo darwin,amd64,avx2 CXXFLAGS: -DGGML_USE_ACCELERATE -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64
  29. #cgo darwin,amd64,avx2 LDFLAGS: -framework Accelerate
  30. #cgo darwin,arm64 CFLAGS: -DGGML_USE_METAL -DGGML_USE_ACCELERATE -DGGML_METAL_EMBED_LIBRARY -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64 -DGGML_USE_BLAS
  31. #cgo darwin,arm64 CXXFLAGS: -DGGML_USE_METAL -DGGML_USE_ACCELERATE -DGGML_METAL_EMBED_LIBRARY -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64 -DGGML_USE_BLAS
  32. #cgo darwin,arm64 LDFLAGS: -framework Foundation -framework Metal -framework MetalKit -framework Accelerate
  33. #cgo linux CFLAGS: -D_GNU_SOURCE
  34. #cgo linux CXXFLAGS: -D_GNU_SOURCE
  35. #cgo linux,amd64 LDFLAGS: -L${SRCDIR}/build/Linux/amd64
  36. #cgo linux,amd64 LDFLAGS: -L${SRCDIR}/build/Linux/amd64
  37. #cgo linux,arm64 CFLAGS: -D__aarch64__ -D__ARM_NEON -D__ARM_FEATURE_FMA -D__ARM_FEATURE_MATMUL_INT8
  38. #cgo linux,arm64 CXXFLAGS: -D__aarch64__ -D__ARM_NEON -D__ARM_FEATURE_FMA -D__ARM_FEATURE_MATMUL_INT8
  39. #cgo linux,arm64 LDFLAGS: -L${SRCDIR}/build/Linux/arm64
  40. #cgo linux,arm64,sve CFLAGS: -march=armv8.6-a+sve
  41. #cgo linux,arm64,sve CXXFLAGS: -march=armv8.6-a+sve
  42. #cgo linux,cuda LDFLAGS: -lcuda -lcudart -lcublas -lcublasLt -lpthread -ldl -lrt -lresolv
  43. #cgo linux,rocm LDFLAGS: -L/opt/rocm/lib -lpthread -ldl -lrt -lresolv
  44. #cgo rocm CFLAGS: -DGGML_USE_CUDA -DGGML_USE_HIPBLAS -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 -DGGML_CUDA_MMV_Y=1 -DGGML_BUILD=1
  45. #cgo rocm CXXFLAGS: -DGGML_USE_CUDA -DGGML_USE_HIPBLAS -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 -DGGML_CUDA_MMV_Y=1 -DGGML_BUILD=1
  46. #cgo rocm LDFLAGS: -L${SRCDIR} -lggml_rocm -lhipblas -lamdhip64 -lrocblas
  47. #cgo windows CFLAGS: -Wno-discarded-qualifiers -D_WIN32_WINNT=0x602
  48. #cgo windows CXXFLAGS: -D_WIN32_WINNT=0x602
  49. #cgo windows LDFLAGS: -lmsvcrt
  50. #cgo windows LDFLAGS: -lmsvcrt -static-libstdc++ -static-libgcc -static
  51. #cgo windows,amd64 LDFLAGS: -L${SRCDIR}/build/Windows/amd64
  52. #cgo windows,amd64 LDFLAGS: -L${SRCDIR}/build/Windows/amd64
  53. #cgo windows,arm64 CFLAGS: -D__aarch64__ -D__ARM_NEON -D__ARM_FEATURE_FMA
  54. #cgo windows,arm64 CXXFLAGS: -D__aarch64__ -D__ARM_NEON -D__ARM_FEATURE_FMA
  55. #cgo windows,arm64 LDFLAGS: -L${SRCDIR}/build/Windows/arm64
  56. #cgo windows,arm64 LDFLAGS: -L${SRCDIR}/build/Windows/arm64
  57. #cgo windows,cuda LDFLAGS: -lcuda -lcudart -lcublas -lcublasLt
  58. #cgo windows,rocm LDFLAGS: -lggml_rocm -lhipblas -lamdhip64 -lrocblas
  59. #include <stdlib.h>
  60. #include "llama.h"
  61. #include "clip.h"
  62. #include "ggml.h"
  63. #include "llava.h"
  64. #include "mllama.h"
  65. #include "sampling_ext.h"
  66. bool llamaProgressCallback(float progress, void *user_data);
  67. typedef enum {COMP_UNKNOWN,COMP_GCC,COMP_CLANG} COMPILER;
  68. COMPILER inline get_compiler() {
  69. #if defined(__clang__)
  70. return COMP_CLANG;
  71. #elif defined(__GNUC__)
  72. return COMP_GCC;
  73. #else
  74. return UNKNOWN_COMPILER;
  75. #endif
  76. }
  77. */
  78. import "C"
  79. import (
  80. _ "embed"
  81. "errors"
  82. "fmt"
  83. "runtime"
  84. "runtime/cgo"
  85. "slices"
  86. "strings"
  87. "unsafe"
  88. )
  89. var CpuFeatures = ""
  90. func BackendInit() {
  91. C.llama_backend_init()
  92. }
  93. func PrintSystemInfo() string {
  94. var compiler string
  95. switch C.get_compiler() {
  96. case C.COMP_UNKNOWN:
  97. compiler = "cgo(unknown_compiler)"
  98. case C.COMP_GCC:
  99. compiler = "cgo(gcc)"
  100. case C.COMP_CLANG:
  101. compiler = "cgo(clang)"
  102. }
  103. return C.GoString(C.llama_print_system_info()) + compiler
  104. }
  105. func GetModelArch(modelPath string) (string, error) {
  106. mp := C.CString(modelPath)
  107. defer C.free(unsafe.Pointer(mp))
  108. gguf_ctx := C.gguf_init_from_file(mp, C.struct_gguf_init_params{no_alloc: true, ctx: (**C.struct_ggml_context)(C.NULL)})
  109. if gguf_ctx == nil {
  110. return "", errors.New("unable to load model file")
  111. }
  112. defer C.gguf_free(gguf_ctx)
  113. key := C.CString("general.architecture")
  114. defer C.free(unsafe.Pointer(key))
  115. arch_index := C.gguf_find_key(gguf_ctx, key)
  116. if int(arch_index) < 0 {
  117. return "", errors.New("unknown model architecture")
  118. }
  119. arch := C.gguf_get_val_str(gguf_ctx, arch_index)
  120. return C.GoString(arch), nil
  121. }
  122. type ContextParams struct {
  123. c C.struct_llama_context_params
  124. }
  125. func NewContextParams(numCtx int, batchSize int, numSeqMax int, threads int, flashAttention bool) ContextParams {
  126. params := C.llama_context_default_params()
  127. params.n_ctx = C.uint(numCtx)
  128. params.n_batch = C.uint(batchSize)
  129. params.n_seq_max = C.uint(numSeqMax)
  130. params.n_threads = C.int(threads)
  131. params.n_threads_batch = params.n_threads
  132. params.embeddings = C.bool(true)
  133. params.flash_attn = C.bool(flashAttention)
  134. return ContextParams{c: params}
  135. }
  136. type Context struct {
  137. c *C.struct_llama_context
  138. numThreads int
  139. }
  140. func (c *Context) KvCacheClear() {
  141. C.llama_kv_cache_clear(c.c)
  142. }
  143. func (c *Context) Decode(batch *Batch) error {
  144. // Positive return values does not mean a fatal error, but rather a warning.
  145. // 0 - success
  146. // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
  147. // < 0 - error
  148. code := int(C.llama_decode(c.c, batch.c))
  149. if code < 0 {
  150. return fmt.Errorf("llama_decode failed with code %d", code)
  151. }
  152. if code > 0 {
  153. return fmt.Errorf("could not find a KV slot for the batch - try reducing the size of the batch or increase the context. code: %d", code)
  154. }
  155. return nil
  156. }
  157. func (c *Context) Model() *Model {
  158. return &Model{c: C.llama_get_model(c.c)}
  159. }
  160. func (c *Context) KvCacheSeqAdd(seqId int, p0 int, p1 int, delta int) {
  161. C.llama_kv_cache_seq_add(c.c, C.int(seqId), C.int(p0), C.int(p1), C.int(delta))
  162. }
  163. func (c *Context) KvCacheSeqRm(seqId int, p0 int, p1 int) bool {
  164. return bool(C.llama_kv_cache_seq_rm(c.c, C.int(seqId), C.int(p0), C.int(p1)))
  165. }
  166. func (c *Context) KvCacheSeqCp(srcSeqId int, dstSeqId int, p0 int, p1 int) {
  167. C.llama_kv_cache_seq_cp(c.c, C.int(srcSeqId), C.int(dstSeqId), C.int(p0), C.int(p1))
  168. }
  169. // Get the embeddings for a sequence id
  170. func (c *Context) GetEmbeddingsSeq(seqId int) []float32 {
  171. embeddings := unsafe.Pointer(C.llama_get_embeddings_seq(c.c, C.int(seqId)))
  172. if embeddings == nil {
  173. return nil
  174. }
  175. return unsafe.Slice((*float32)(embeddings), c.Model().NEmbd())
  176. }
  177. func (c *Context) GetEmbeddingsIth(i int) []float32 {
  178. embeddings := unsafe.Pointer(C.llama_get_embeddings_ith(c.c, C.int32_t(i)))
  179. if embeddings == nil {
  180. return nil
  181. }
  182. return unsafe.Slice((*float32)(embeddings), c.Model().NEmbd())
  183. }
  184. type ModelParams struct {
  185. NumGpuLayers int
  186. MainGpu int
  187. UseMmap bool
  188. UseMlock bool
  189. TensorSplit []float32
  190. Progress func(float32)
  191. VocabOnly bool
  192. }
  193. //export llamaProgressCallback
  194. func llamaProgressCallback(progress C.float, userData unsafe.Pointer) C.bool {
  195. handle := *(*cgo.Handle)(userData)
  196. callback := handle.Value().(func(float32))
  197. callback(float32(progress))
  198. return true
  199. }
  200. func LoadModelFromFile(modelPath string, params ModelParams) (*Model, error) {
  201. cparams := C.llama_model_default_params()
  202. cparams.n_gpu_layers = C.int(params.NumGpuLayers)
  203. cparams.main_gpu = C.int32_t(params.MainGpu)
  204. cparams.use_mmap = C.bool(params.UseMmap)
  205. cparams.use_mlock = C.bool(params.UseMlock)
  206. cparams.vocab_only = C.bool(params.VocabOnly)
  207. if len(params.TensorSplit) > 0 {
  208. tensorSplitData := &params.TensorSplit[0]
  209. var tensorSplitPin runtime.Pinner
  210. tensorSplitPin.Pin(tensorSplitData)
  211. defer tensorSplitPin.Unpin()
  212. cparams.tensor_split = (*C.float)(unsafe.Pointer(tensorSplitData))
  213. }
  214. if params.Progress != nil {
  215. handle := cgo.NewHandle(params.Progress)
  216. defer handle.Delete()
  217. var handlePin runtime.Pinner
  218. handlePin.Pin(&handle)
  219. defer handlePin.Unpin()
  220. cparams.progress_callback = C.llama_progress_callback(C.llamaProgressCallback)
  221. cparams.progress_callback_user_data = unsafe.Pointer(&handle)
  222. }
  223. m := Model{c: C.llama_load_model_from_file(C.CString(modelPath), cparams)}
  224. if m.c == nil {
  225. return nil, fmt.Errorf("unable to load model: %s", modelPath)
  226. }
  227. return &m, nil
  228. }
  229. func FreeModel(model *Model) {
  230. C.llama_free_model(model.c)
  231. }
  232. func NewContextWithModel(model *Model, params ContextParams) (*Context, error) {
  233. c := Context{
  234. c: C.llama_new_context_with_model(model.c, params.c),
  235. numThreads: int(params.c.n_threads),
  236. }
  237. if c.c == nil {
  238. return nil, errors.New("unable to create llama context")
  239. }
  240. return &c, nil
  241. }
  242. func (m *Model) NumVocab() int {
  243. return int(C.llama_n_vocab(m.c))
  244. }
  245. func (m *Model) TokenIsEog(token int) bool {
  246. return bool(C.llama_token_is_eog(m.c, C.llama_token(token)))
  247. }
  248. func (m *Model) AddBOSToken() bool {
  249. return bool(C.llama_add_bos_token(m.c))
  250. }
  251. func (m *Model) ApplyLoraFromFile(context *Context, loraPath string, scale float32, threads int) error {
  252. cLoraPath := C.CString(loraPath)
  253. defer C.free(unsafe.Pointer(cLoraPath))
  254. loraAdapter := C.llama_lora_adapter_init(m.c, cLoraPath)
  255. if loraAdapter == nil {
  256. return errors.New("unable to load lora")
  257. }
  258. err := -1
  259. if loraAdapter != nil {
  260. err = int(C.llama_lora_adapter_set(context.c, loraAdapter, C.float(scale)))
  261. }
  262. if err != 0 {
  263. return errors.New("error applying lora from file")
  264. }
  265. return nil
  266. }
  267. type Batch struct {
  268. c C.struct_llama_batch
  269. batchSize int
  270. maxSeq int
  271. embedSize int
  272. }
  273. // Creates a new batch for either word tokens or image embeddings (if embedSize is non-zero).
  274. // Batches cannot contain both types at the same time. batchSize is the maximum number of entries
  275. // that can be added per sequence
  276. func NewBatch(batchSize int, maxSeq int, embedSize int) (*Batch, error) {
  277. b := Batch{
  278. c: C.llama_batch_init(C.int(batchSize*maxSeq), C.int(embedSize), C.int(maxSeq)),
  279. batchSize: batchSize,
  280. maxSeq: maxSeq,
  281. embedSize: embedSize,
  282. }
  283. // Check to see if any of the allocations in llama_batch_init() failed
  284. nilPointer := (embedSize == 0 && b.c.token == nil) || (embedSize != 0 && b.c.embd == nil) ||
  285. b.c.pos == nil || b.c.n_seq_id == nil || b.c.seq_id == nil || b.c.logits == nil ||
  286. slices.Contains(unsafe.Slice(b.c.seq_id, b.allocSize()), nil)
  287. if nilPointer {
  288. C.llama_batch_free(b.c)
  289. return nil, fmt.Errorf("unable to allocate batch (batchSize=%v maxSeq=%v embedSize=%v)", batchSize, maxSeq, embedSize)
  290. }
  291. return &b, nil
  292. }
  293. func (b *Batch) Size() int {
  294. return b.batchSize
  295. }
  296. func (b *Batch) allocSize() int {
  297. return b.batchSize * b.maxSeq
  298. }
  299. func (b *Batch) NumTokens() int {
  300. return int(b.c.n_tokens)
  301. }
  302. func (b *Batch) IsEmbedding() bool {
  303. return b.embedSize != 0
  304. }
  305. // Add adds either a token or an image embedding to the batch depending on the type
  306. // when the batch was initialized. The other argument will be ignored. Adds to the
  307. // batch with the given position for the given sequence ids, and optionally instructs
  308. // to include logits.
  309. func (b *Batch) Add(token int, embed []float32, pos int, logits bool, seqIds ...int) {
  310. if !b.IsEmbedding() {
  311. unsafe.Slice(b.c.token, b.allocSize())[b.c.n_tokens] = C.llama_token(token)
  312. } else {
  313. copy(unsafe.Slice((*float32)(b.c.embd), b.allocSize()*b.embedSize)[int(b.c.n_tokens)*b.embedSize:], embed)
  314. }
  315. unsafe.Slice(b.c.pos, b.allocSize())[b.c.n_tokens] = C.llama_pos(pos)
  316. unsafe.Slice(b.c.n_seq_id, b.allocSize())[b.c.n_tokens] = C.int(len(seqIds))
  317. for i, s := range seqIds {
  318. unsafe.Slice((unsafe.Slice(b.c.seq_id, b.allocSize())[b.c.n_tokens]), C.int(len(seqIds)))[i] = C.int32_t(s)
  319. }
  320. if logits {
  321. unsafe.Slice(b.c.logits, b.allocSize())[b.c.n_tokens] = 1
  322. }
  323. b.c.n_tokens += 1
  324. }
  325. func (b *Batch) Clear() {
  326. b.c.n_tokens = 0
  327. }
  328. func (b *Batch) Free() {
  329. b.batchSize = 0
  330. C.llama_batch_free(b.c)
  331. }
  332. type Model struct {
  333. c *C.struct_llama_model
  334. }
  335. func (m *Model) TokenToPiece(token int) string {
  336. tokenLen := 12
  337. buf := make([]byte, tokenLen)
  338. tokenLen = int(C.llama_token_to_piece(
  339. m.c,
  340. C.int32_t(token),
  341. (*C.char)(unsafe.Pointer(&buf[0])),
  342. C.int32_t(tokenLen),
  343. C.int32_t(0),
  344. C.bool(true),
  345. ))
  346. if tokenLen < 0 {
  347. tokenLen = -tokenLen
  348. buf = make([]byte, tokenLen)
  349. C.llama_token_to_piece(
  350. m.c,
  351. C.int32_t(token),
  352. (*C.char)(unsafe.Pointer(&buf[0])),
  353. C.int32_t(tokenLen),
  354. C.int32_t(0),
  355. C.bool(true),
  356. )
  357. }
  358. return strings.TrimRight(string(buf), "\x00")
  359. }
  360. func (m *Model) Tokenize(text string, addSpecial bool, parseSpecial bool) ([]int, error) {
  361. maxTokens := len(text) + 2
  362. cTokens := make([]C.llama_token, maxTokens)
  363. cText := C.CString(text)
  364. defer C.free(unsafe.Pointer(cText))
  365. result := C.llama_tokenize(
  366. m.c,
  367. cText,
  368. C.int32_t(len(text)),
  369. &cTokens[0],
  370. C.int32_t(maxTokens),
  371. C.bool(addSpecial),
  372. C.bool(parseSpecial),
  373. )
  374. // if the result is negative, reallocate and retry with the correct buffer size
  375. if result < 0 {
  376. maxTokens = int(-result)
  377. cTokens = make([]C.llama_token, maxTokens)
  378. result = C.llama_tokenize(
  379. m.c,
  380. cText,
  381. C.int32_t(len(text)),
  382. &cTokens[0],
  383. C.int32_t(maxTokens),
  384. C.bool(addSpecial),
  385. C.bool(parseSpecial),
  386. )
  387. if result < 0 {
  388. return nil, fmt.Errorf("tokenization failed, required %d tokens", -result)
  389. }
  390. }
  391. tokens := make([]int, result)
  392. for i := range result {
  393. tokens[i] = int(cTokens[i])
  394. }
  395. return tokens, nil
  396. }
  397. func (m *Model) NEmbd() int {
  398. return int(C.llama_n_embd(m.c))
  399. }
  400. func Quantize(infile, outfile string, ftype uint32) error {
  401. cinfile := C.CString(infile)
  402. defer C.free(unsafe.Pointer(cinfile))
  403. coutfile := C.CString(outfile)
  404. defer C.free(unsafe.Pointer(coutfile))
  405. params := C.llama_model_quantize_default_params()
  406. params.nthread = -1
  407. params.ftype = ftype
  408. if rc := C.llama_model_quantize(cinfile, coutfile, &params); rc != 0 {
  409. return fmt.Errorf("llama_model_quantize: %d", rc)
  410. }
  411. return nil
  412. }
  413. // vision processing
  414. type ClipContext struct {
  415. c *C.struct_clip_ctx
  416. }
  417. func NewClipContext(llamaContext *Context, modelPath string) (*ClipContext, error) {
  418. mp := C.CString(modelPath)
  419. defer C.free(unsafe.Pointer(mp))
  420. c := C.clip_model_load(mp, 1)
  421. if c == nil {
  422. return nil, fmt.Errorf("unable to load clip model: %v", modelPath)
  423. }
  424. projEmbedSize := int(C.clip_n_mmproj_embd(c))
  425. modelEmbedSize := llamaContext.Model().NEmbd()
  426. if projEmbedSize != modelEmbedSize {
  427. return nil, fmt.Errorf("projector embedding size (%d) does not match model (%d)", projEmbedSize, modelEmbedSize)
  428. }
  429. return &ClipContext{c: c}, nil
  430. }
  431. func (c *ClipContext) Free() {
  432. C.clip_free(c.c)
  433. }
  434. func (c *ClipContext) NewEmbed(llamaContext *Context, data []byte) ([][]float32, error) {
  435. l := C.llava_image_embed_make_with_bytes(c.c, C.int(llamaContext.numThreads), (*C.uchar)(unsafe.Pointer(&data[0])), C.int(len(data)))
  436. if l == nil {
  437. return nil, errors.New("unable to make llava embedding from image")
  438. }
  439. numTokens := int(l.n_image_pos)
  440. numEmbed := llamaContext.Model().NEmbd()
  441. s := unsafe.Slice((*float32)(l.embed), numEmbed*numTokens)
  442. embed := make([][]float32, numTokens)
  443. rows := make([]float32, len(s))
  444. copy(rows, s)
  445. for i := range embed {
  446. embed[i] = rows[i*numEmbed : (i+1)*numEmbed]
  447. }
  448. C.llava_image_embed_free(l)
  449. return embed, nil
  450. }
  451. type MllamaContext struct {
  452. c *C.struct_mllama_ctx
  453. }
  454. func NewMllamaContext(llamaContext *Context, modelPath string) (*MllamaContext, error) {
  455. mp := C.CString(modelPath)
  456. defer C.free(unsafe.Pointer(mp))
  457. c := C.mllama_model_load(mp, 1)
  458. if c == nil {
  459. return nil, fmt.Errorf("unable to load mllama model: %v", modelPath)
  460. }
  461. projEmbedSize := int(C.mllama_n_embd(c))
  462. modelEmbedSize := llamaContext.Model().NEmbd()
  463. if projEmbedSize != modelEmbedSize {
  464. return nil, fmt.Errorf("projector embedding size (%d) does not match model (%d)", projEmbedSize, modelEmbedSize)
  465. }
  466. return &MllamaContext{c: c}, nil
  467. }
  468. func (m *MllamaContext) Free() {
  469. C.mllama_free(m.c)
  470. }
  471. func (m *MllamaContext) NewEmbed(llamaContext *Context, data []byte, aspectRatioId int) ([][]float32, error) {
  472. img := C.mllama_image_init()
  473. defer C.mllama_image_free(img)
  474. ok := bool(C.mllama_image_load_from_data(unsafe.Pointer(&data[0]), C.int(len(data)), 560, 560, 3, 4, C.int(aspectRatioId), img))
  475. if !ok {
  476. return nil, errors.New("unable to load mllama image data")
  477. }
  478. rows := make([]float32, m.EmbedSize(llamaContext))
  479. ok = bool(C.mllama_image_encode(m.c, C.int(llamaContext.numThreads), img, (*C.float)(unsafe.Pointer(&rows[0]))))
  480. if !ok {
  481. return nil, errors.New("unable to make mllama embedding from image")
  482. }
  483. embed := make([][]float32, 1)
  484. embed[0] = rows
  485. return embed, nil
  486. }
  487. func (m *MllamaContext) EmbedSize(llamaContext *Context) int {
  488. numTokens := int(C.mllama_n_positions(m.c) * C.mllama_n_tiles(m.c))
  489. numEmbed := llamaContext.Model().NEmbd()
  490. return numTokens * numEmbed
  491. }
  492. func (c *Context) SetCrossAttention(state bool) {
  493. C.llama_set_cross_attention(c.c, C.bool(state))
  494. }
  495. // sampling
  496. // TODO: this is a temporary wrapper to allow calling C++ code from CGo
  497. type SamplingContext struct {
  498. c *C.struct_gpt_sampler
  499. }
  500. type SamplingParams struct {
  501. TopK int
  502. TopP float32
  503. MinP float32
  504. TfsZ float32
  505. TypicalP float32
  506. Temp float32
  507. RepeatLastN int
  508. PenaltyRepeat float32
  509. PenaltyFreq float32
  510. PenaltyPresent float32
  511. Mirostat int
  512. MirostatTau float32
  513. MirostatEta float32
  514. PenalizeNl bool
  515. Seed uint32
  516. Grammar string
  517. }
  518. func NewSamplingContext(model *Model, params SamplingParams) (*SamplingContext, error) {
  519. var cparams C.struct_gpt_sampler_cparams
  520. cparams.top_k = C.int32_t(params.TopK)
  521. cparams.top_p = C.float(params.TopP)
  522. cparams.min_p = C.float(params.MinP)
  523. cparams.tfs_z = C.float(params.TfsZ)
  524. cparams.typical_p = C.float(params.TypicalP)
  525. cparams.temp = C.float(params.Temp)
  526. cparams.penalty_last_n = C.int32_t(params.RepeatLastN)
  527. cparams.penalty_repeat = C.float(params.PenaltyRepeat)
  528. cparams.penalty_freq = C.float(params.PenaltyFreq)
  529. cparams.penalty_present = C.float(params.PenaltyFreq)
  530. cparams.mirostat = C.int32_t(params.Mirostat)
  531. cparams.mirostat_tau = C.float(params.MirostatTau)
  532. cparams.mirostat_eta = C.float(params.MirostatEta)
  533. cparams.penalize_nl = C.bool(params.PenalizeNl)
  534. cparams.seed = C.uint32_t(params.Seed)
  535. grammar := C.CString(params.Grammar)
  536. defer C.free(unsafe.Pointer(grammar))
  537. cparams.grammar = grammar
  538. context := &SamplingContext{c: C.gpt_sampler_cinit(model.c, &cparams)}
  539. if context.c == nil {
  540. return nil, errors.New("unable to create sampling context")
  541. }
  542. runtime.SetFinalizer(context, func(s *SamplingContext) { C.gpt_sampler_cfree(s.c) })
  543. return context, nil
  544. }
  545. func (s *SamplingContext) Reset() {
  546. C.gpt_sampler_creset(s.c)
  547. }
  548. func (s *SamplingContext) Sample(llamaContext *Context, idx int) int {
  549. return int(C.gpt_sampler_csample(s.c, llamaContext.c, C.int(idx)))
  550. }
  551. func (s *SamplingContext) Accept(id int, applyGrammar bool) {
  552. C.gpt_sampler_caccept(s.c, C.llama_token(id), C.bool(applyGrammar))
  553. }