llama.go 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. package llama
  2. // #cgo CFLAGS: -std=c11 -DNDEBUG -DLOG_DISABLE_LOGS
  3. // #cgo CXXFLAGS: -std=c++11 -DNDEBUG -DLOG_DISABLE_LOGS
  4. // #cgo darwin,arm64 CFLAGS: -DGGML_USE_METAL -DGGML_METAL_EMBED_LIBRARY -DGGML_USE_ACCELERATE -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64
  5. // #cgo darwin,arm64 CXXFLAGS: -DGGML_USE_METAL -DGGML_METAL_EMBED_LIBRARY -DGGML_USE_ACCELERATE -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64
  6. // #cgo darwin,arm64 LDFLAGS: -ld_classic ${SRCDIR}/ggml-metal.o -framework Foundation -framework Metal -framework MetalKit -framework Accelerate
  7. // #cgo darwin,amd64 CFLAGS: -Wno-incompatible-pointer-types-discards-qualifiers
  8. // #cgo darwin,amd64 CXXFLAGS: -Wno-incompatible-pointer-types-discards-qualifiers
  9. // #cgo darwin,amd64 LDFLAGS: -ld_classic -framework Foundation -framework Accelerate
  10. // #cgo linux CFLAGS: -D_GNU_SOURCE
  11. // #cgo linux CXXFLAGS: -D_GNU_SOURCE
  12. // #cgo windows LDFLAGS: -lmsvcrt
  13. // #cgo avx CFLAGS: -mavx
  14. // #cgo avx CXXFLAGS: -mavx
  15. // #cgo avx2 CFLAGS: -mavx2 -mfma
  16. // #cgo avx2 CXXFLAGS: -mavx2 -mfma
  17. // #cgo cuda CFLAGS: -DGGML_USE_CUDA -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 -DGGML_CUDA_MMV_Y=1 -DGGML_BUILD=1
  18. // #cgo cuda CXXFLAGS: -DGGML_USE_CUDA -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 -DGGML_CUDA_MMV_Y=1 -DGGML_BUILD=1
  19. // #cgo rocm CFLAGS: -DGGML_USE_CUDA -DGGML_USE_HIPBLAS -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 -DGGML_CUDA_MMV_Y=1 -DGGML_BUILD=1
  20. // #cgo rocm CXXFLAGS: -DGGML_USE_CUDA -DGGML_USE_HIPBLAS -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 -DGGML_CUDA_MMV_Y=1 -DGGML_BUILD=1
  21. // #cgo rocm LDFLAGS: -L${SRCDIR} -lggml-hipblas -lhipblas -lamdhip64 -lrocblas
  22. // #cgo windows,cuda LDFLAGS: -L. -L"C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.3/lib/x64" -lggml-cuda -lcuda -lcudart -lcublas -lcublasLt
  23. // #cgo windows,rocm LDFLAGS: -L. -L"C:/Program Files/AMD/ROCm/5.7/lib"
  24. // #cgo linux,cuda LDFLAGS: -L${SRCDIR} -L/usr/local/cuda/lib64 -lggml-cuda -lcuda -lcudart -lcublas -lcublasLt -lpthread -ldl -lrt
  25. // #cgo linux,rocm LDFLAGS: -L/opt/rocm/lib
  26. // #include <stdlib.h>
  27. // #include "llama.h"
  28. // #include "clip.h"
  29. // #include "llava.h"
  30. import "C"
  31. import (
  32. "fmt"
  33. "runtime"
  34. "strings"
  35. "unsafe"
  36. "github.com/ollama/ollama/llm"
  37. )
  38. type Token int32
  39. type Pos int32
  40. type SeqId int32
  41. // SystemInfo is an unused example of calling llama.cpp functions using CGo
  42. func PrintSystemInfo() string {
  43. return C.GoString(C.llama_print_system_info())
  44. }
  45. func BackendInit() {
  46. C.llama_backend_init()
  47. }
  48. type ContextParams struct {
  49. c C.struct_llama_context_params
  50. }
  51. func NewContextParams() ContextParams {
  52. params := C.llama_context_default_params()
  53. params.seed = C.uint(1234)
  54. params.n_ctx = C.uint(2048)
  55. params.n_threads = C.uint(runtime.NumCPU())
  56. params.n_threads_batch = params.n_threads
  57. return ContextParams{c: params}
  58. }
  59. type ModelParams struct {
  60. c C.struct_llama_model_params
  61. }
  62. func NewModelParams() ModelParams {
  63. params := C.llama_model_default_params()
  64. params.n_gpu_layers = 999
  65. return ModelParams{c: params}
  66. }
  67. type Context struct {
  68. c *C.struct_llama_context
  69. }
  70. func (c *Context) Decode(batch Batch) error {
  71. // Positive return values does not mean a fatal error, but rather a warning.
  72. // 0 - success
  73. // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
  74. // < 0 - error
  75. code := int(C.llama_decode(c.c, batch.c))
  76. if code < 0 {
  77. return fmt.Errorf("llama_decode failed with code %d", code)
  78. }
  79. if code > 0 {
  80. return fmt.Errorf("could not find a KV slot for the batch - try reducing the size of the batch or increase the context. code: %d\n", code)
  81. }
  82. return nil
  83. }
  84. func (c *Context) GetModel() *Model {
  85. return &Model{c: C.llama_get_model(c.c)}
  86. }
  87. func (c *Context) SampleTokenGreedy(batch Batch) Token {
  88. nv := c.GetModel().NumVocab()
  89. // TODO(jmorganca): split this up into different functions
  90. candidates := (*C.struct_llama_token_data)(C.malloc(C.size_t(nv) * C.size_t(unsafe.Sizeof(C.struct_llama_token_data{}))))
  91. defer C.free(unsafe.Pointer(candidates))
  92. // get most recent logits
  93. logits := C.llama_get_logits_ith(c.c, C.int(batch.NumTokens()-1))
  94. for i := 0; i < int(nv); i++ {
  95. ptr := (*C.struct_llama_token_data)(unsafe.Pointer(uintptr(unsafe.Pointer(candidates)) + uintptr(i)*unsafe.Sizeof(C.struct_llama_token_data{})))
  96. ptr.id = C.int(i)
  97. ptr.logit = unsafe.Slice(logits, nv)[i]
  98. ptr.p = 0.0
  99. }
  100. return Token(C.llama_sample_token_greedy(c.c, &C.llama_token_data_array{
  101. data: candidates,
  102. size: C.size_t(nv),
  103. sorted: C.bool(false),
  104. }))
  105. }
  106. func LoadModelFromFile(modelPath string, params ModelParams) *Model {
  107. return &Model{c: C.llama_load_model_from_file(C.CString(modelPath), params.c)}
  108. }
  109. func NewContextWithModel(model *Model, params ContextParams) *Context {
  110. return &Context{c: C.llama_new_context_with_model(model.c, params.c)}
  111. }
  112. func (m *Model) NumVocab() int {
  113. return int(C.llama_n_vocab(m.c))
  114. }
  115. func (m *Model) TokenIsEog(token Token) bool {
  116. return bool(C.llama_token_is_eog(m.c, C.llama_token(token)))
  117. }
  118. type Batch struct {
  119. c C.struct_llama_batch
  120. }
  121. func NewBatch(nTokens int, embd int, maxSeq int) Batch {
  122. return Batch{c: C.llama_batch_init(C.int(nTokens), C.int(embd), C.int(maxSeq))}
  123. }
  124. func (b *Batch) NumTokens() int {
  125. return int(b.c.n_tokens)
  126. }
  127. func (b *Batch) Add(token Token, pos Pos, seqIds []SeqId, logits bool) {
  128. unsafe.Slice(b.c.token, 512)[b.c.n_tokens] = C.llama_token(token)
  129. unsafe.Slice(b.c.pos, 512)[b.c.n_tokens] = C.llama_pos(pos)
  130. unsafe.Slice(b.c.n_seq_id, 512)[b.c.n_tokens] = C.int(len(seqIds))
  131. for i, s := range seqIds {
  132. unsafe.Slice((unsafe.Slice(b.c.seq_id, 512)[b.c.n_tokens]), C.int(len(seqIds)))[i] = C.int32_t(s)
  133. }
  134. if logits {
  135. unsafe.Slice(b.c.logits, 512)[b.c.n_tokens] = 1
  136. }
  137. b.c.n_tokens += 1
  138. }
  139. func (b *Batch) Clear() {
  140. b.c.n_tokens = 0
  141. }
  142. // LLAMA_API struct llama_batch llama_batch_get_one(
  143. //
  144. // llama_token * tokens,
  145. // int32_t n_tokens,
  146. // llama_pos pos_0,
  147. // llama_seq_id seq_id);
  148. func BatchGetOne(tokens []Token, pos0 Pos, seqId SeqId) Batch {
  149. return Batch{c: C.llama_batch_get_one((*C.int)(unsafe.Pointer(&tokens[0])), C.int32_t(len(tokens)), C.int(pos0), C.int(seqId))}
  150. }
  151. type Model struct {
  152. c *C.struct_llama_model
  153. }
  154. func (m *Model) TokenToPiece(token Token) string {
  155. buf := make([]byte, 12)
  156. C.llama_token_to_piece(
  157. m.c,
  158. C.int32_t(token),
  159. (*C.char)(unsafe.Pointer(&buf[0])),
  160. C.int32_t(12),
  161. C.bool(true),
  162. )
  163. return strings.TrimRight(string(buf), "\x00")
  164. }
  165. func (m *Model) Tokenize(text string, maxTokens int, addSpecial bool, parseSpecial bool) ([]Token, error) {
  166. cTokens := make([]C.llama_token, maxTokens)
  167. cText := C.CString(text)
  168. defer C.free(unsafe.Pointer(cText))
  169. result := C.llama_tokenize(
  170. m.c,
  171. cText,
  172. C.int32_t(len(text)),
  173. &cTokens[0],
  174. C.int32_t(maxTokens),
  175. C.bool(addSpecial),
  176. C.bool(parseSpecial),
  177. )
  178. if result < 0 {
  179. return nil, fmt.Errorf("tokenization failed, required %d tokens", -result)
  180. }
  181. tokens := make([]Token, result)
  182. for i := 0; i < int(result); i++ {
  183. tokens[i] = Token(cTokens[i])
  184. }
  185. return tokens, nil
  186. }
  187. func Quantize(infile, outfile string, ftype llm.FileType) error {
  188. cinfile := C.CString(infile)
  189. defer C.free(unsafe.Pointer(cinfile))
  190. coutfile := C.CString(outfile)
  191. defer C.free(unsafe.Pointer(coutfile))
  192. params := C.llama_model_quantize_default_params()
  193. params.nthread = -1
  194. params.ftype = ftype.Value()
  195. if rc := C.llama_model_quantize(cinfile, coutfile, &params); rc != 0 {
  196. return fmt.Errorf("llama_model_quantize: %d", rc)
  197. }
  198. return nil
  199. }
  200. type ClipContext struct {
  201. c *C.struct_clip_ctx
  202. }
  203. func NewClipContext(modelPath string) *ClipContext {
  204. mp := C.CString(modelPath)
  205. defer C.free(unsafe.Pointer(mp))
  206. cc := C.clip_model_load(mp, 1)
  207. return &ClipContext{c: cc}
  208. }
  209. type LlavaContext struct {
  210. c *C.struct_llava_context
  211. }
  212. type LlavaImageEmbed struct {
  213. c *C.struct_llava_image_embed
  214. }
  215. func NewLlavaImageEmbed(clipContext *ClipContext, data []byte) *LlavaImageEmbed {
  216. return &LlavaImageEmbed{c: C.llava_image_embed_make_with_bytes(clipContext.c, C.int(runtime.NumCPU()), (*C.uchar)(unsafe.Pointer(&data[0])), C.int(len(data)))}
  217. }
  218. func LlavaEvalImageEmbed(llamaContext *Context, embed *LlavaImageEmbed, nBatch int, nPast *int) {
  219. C.llava_eval_image_embed(llamaContext.c, embed.c, C.int(nBatch), (*C.int)(unsafe.Pointer(nPast)))
  220. }