llama.go 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. // MIT License
  2. // Copyright (c) 2023 go-skynet authors
  3. // Permission is hereby granted, free of charge, to any person obtaining a copy
  4. // of this software and associated documentation files (the "Software"), to deal
  5. // in the Software without restriction, including without limitation the rights
  6. // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  7. // copies of the Software, and to permit persons to whom the Software is
  8. // furnished to do so, subject to the following conditions:
  9. // The above copyright notice and this permission notice shall be included in all
  10. // copies or substantial portions of the Software.
  11. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  12. // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  13. // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  14. // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  15. // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  16. // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  17. // SOFTWARE.
  18. package llama
  19. // #cgo LDFLAGS: -Lbuild -lbinding -lllama -lm -lggml_static -lstdc++
  20. // #cgo CXXFLAGS: -std=c++11
  21. // #cgo darwin LDFLAGS: -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
  22. // #include "binding/binding.h"
  23. // #include <stdlib.h>
  24. import "C"
  25. import (
  26. "fmt"
  27. "strings"
  28. "sync"
  29. "unsafe"
  30. )
  31. type LLama struct {
  32. ctx unsafe.Pointer
  33. embeddings bool
  34. contextSize int
  35. }
  36. func New(model string, mo ModelOptions) (*LLama, error) {
  37. modelPath := C.CString(model)
  38. defer C.free(unsafe.Pointer(modelPath))
  39. ctx := C.load_model(modelPath, C.int(mo.ContextSize), C.int(mo.Seed), C.bool(mo.F16Memory), C.bool(mo.MLock), C.bool(mo.Embeddings), C.bool(mo.MMap), C.bool(mo.LowVRAM), C.bool(mo.VocabOnly), C.int(mo.NGPULayers), C.int(mo.NBatch), C.CString(mo.MainGPU), C.CString(mo.TensorSplit), C.bool(mo.NUMA))
  40. if ctx == nil {
  41. return nil, fmt.Errorf("failed loading model")
  42. }
  43. ll := &LLama{ctx: ctx, contextSize: mo.ContextSize, embeddings: mo.Embeddings}
  44. return ll, nil
  45. }
  46. func (l *LLama) Free() {
  47. C.llama_binding_free_model(l.ctx)
  48. }
  49. func (l *LLama) Eval(text string, opts ...PredictOption) error {
  50. po := NewPredictOptions(opts...)
  51. input := C.CString(text)
  52. if po.Tokens == 0 {
  53. po.Tokens = 99999999
  54. }
  55. defer C.free(unsafe.Pointer(input))
  56. reverseCount := len(po.StopPrompts)
  57. reversePrompt := make([]*C.char, reverseCount)
  58. var pass **C.char
  59. for i, s := range po.StopPrompts {
  60. cs := C.CString(s)
  61. reversePrompt[i] = cs
  62. pass = &reversePrompt[0]
  63. defer C.free(unsafe.Pointer(cs))
  64. }
  65. cLogitBias := C.CString(po.LogitBias)
  66. defer C.free(unsafe.Pointer(cLogitBias))
  67. cMainGPU := C.CString(po.MainGPU)
  68. defer C.free(unsafe.Pointer(cMainGPU))
  69. cTensorSplit := C.CString(po.TensorSplit)
  70. defer C.free(unsafe.Pointer(cTensorSplit))
  71. params := C.llama_allocate_params(input, C.int(po.Seed), C.int(po.Threads), C.int(po.Tokens), C.int(po.TopK),
  72. C.float(po.TopP), C.float(po.Temperature), C.float(po.Penalty), C.int(po.Repeat),
  73. C.bool(po.IgnoreEOS), C.bool(po.F16KV),
  74. C.int(po.Batch), C.int(po.NKeep), pass, C.int(reverseCount),
  75. C.float(po.TailFreeSamplingZ), C.float(po.TypicalP), C.float(po.FrequencyPenalty), C.float(po.PresencePenalty),
  76. C.int(po.Mirostat), C.float(po.MirostatETA), C.float(po.MirostatTAU), C.bool(po.PenalizeNL), cLogitBias,
  77. C.bool(po.MLock), C.bool(po.MMap), cMainGPU, cTensorSplit,
  78. )
  79. defer C.llama_free_params(params)
  80. ret := C.eval(params, l.ctx, input)
  81. if ret != 0 {
  82. return fmt.Errorf("inference failed")
  83. }
  84. return nil
  85. }
  86. func (l *LLama) Predict(text string, po PredictOptions) (string, error) {
  87. if po.TokenCallback != nil {
  88. setCallback(l.ctx, po.TokenCallback)
  89. }
  90. input := C.CString(text)
  91. if po.Tokens == 0 {
  92. po.Tokens = 99999999
  93. }
  94. defer C.free(unsafe.Pointer(input))
  95. out := make([]byte, po.Tokens)
  96. reverseCount := len(po.StopPrompts)
  97. reversePrompt := make([]*C.char, reverseCount)
  98. var pass **C.char
  99. for i, s := range po.StopPrompts {
  100. cs := C.CString(s)
  101. reversePrompt[i] = cs
  102. pass = &reversePrompt[0]
  103. defer C.free(unsafe.Pointer(cs))
  104. }
  105. cLogitBias := C.CString(po.LogitBias)
  106. defer C.free(unsafe.Pointer(cLogitBias))
  107. cMainGPU := C.CString(po.MainGPU)
  108. defer C.free(unsafe.Pointer(cMainGPU))
  109. cTensorSplit := C.CString(po.TensorSplit)
  110. defer C.free(unsafe.Pointer(cTensorSplit))
  111. params := C.llama_allocate_params(input, C.int(po.Seed), C.int(po.Threads), C.int(po.Tokens), C.int(po.TopK),
  112. C.float(po.TopP), C.float(po.Temperature), C.float(po.Penalty), C.int(po.Repeat),
  113. C.bool(po.IgnoreEOS), C.bool(po.F16KV),
  114. C.int(po.Batch), C.int(po.NKeep), pass, C.int(reverseCount),
  115. C.float(po.TailFreeSamplingZ), C.float(po.TypicalP), C.float(po.FrequencyPenalty), C.float(po.PresencePenalty),
  116. C.int(po.Mirostat), C.float(po.MirostatETA), C.float(po.MirostatTAU), C.bool(po.PenalizeNL), cLogitBias,
  117. C.bool(po.MLock), C.bool(po.MMap), cMainGPU, cTensorSplit,
  118. )
  119. defer C.llama_free_params(params)
  120. ret := C.llama_predict(params, l.ctx, (*C.char)(unsafe.Pointer(&out[0])), C.bool(po.DebugMode))
  121. if ret != 0 {
  122. return "", fmt.Errorf("inference failed")
  123. }
  124. res := C.GoString((*C.char)(unsafe.Pointer(&out[0])))
  125. res = strings.TrimPrefix(res, " ")
  126. res = strings.TrimPrefix(res, text)
  127. res = strings.TrimPrefix(res, "\n")
  128. for _, s := range po.StopPrompts {
  129. res = strings.TrimRight(res, s)
  130. }
  131. if po.TokenCallback != nil {
  132. setCallback(l.ctx, nil)
  133. }
  134. return res, nil
  135. }
  136. // CGo only allows us to use static calls from C to Go, we can't just dynamically pass in func's.
  137. // This is the next best thing, we register the callbacks in this map and call tokenCallback from
  138. // the C code. We also attach a finalizer to LLama, so it will unregister the callback when the
  139. // garbage collection frees it.
  140. // SetTokenCallback registers a callback for the individual tokens created when running Predict. It
  141. // will be called once for each token. The callback shall return true as long as the model should
  142. // continue predicting the next token. When the callback returns false the predictor will return.
  143. // The tokens are just converted into Go strings, they are not trimmed or otherwise changed. Also
  144. // the tokens may not be valid UTF-8.
  145. // Pass in nil to remove a callback.
  146. //
  147. // It is save to call this method while a prediction is running.
  148. func (l *LLama) SetTokenCallback(callback func(token string) bool) {
  149. setCallback(l.ctx, callback)
  150. }
  151. var (
  152. m sync.Mutex
  153. callbacks = map[uintptr]func(string) bool{}
  154. )
  155. //export tokenCallback
  156. func tokenCallback(statePtr unsafe.Pointer, token *C.char) bool {
  157. m.Lock()
  158. defer m.Unlock()
  159. if callback, ok := callbacks[uintptr(statePtr)]; ok {
  160. return callback(C.GoString(token))
  161. }
  162. return true
  163. }
  164. // setCallback can be used to register a token callback for LLama. Pass in a nil callback to
  165. // remove the callback.
  166. func setCallback(statePtr unsafe.Pointer, callback func(string) bool) {
  167. m.Lock()
  168. defer m.Unlock()
  169. if callback == nil {
  170. delete(callbacks, uintptr(statePtr))
  171. } else {
  172. callbacks[uintptr(statePtr)] = callback
  173. }
  174. }