llama.go 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282
  1. package llama
  2. /*
  3. #cgo CPPFLAGS: -O3 -DNDEBUG=1
  4. #cgo CXXFLAGS: -std=c++11
  5. #cgo darwin CPPFLAGS: -DGGML_USE_METAL=1 -DGGML_METAL_NDEBUG=1
  6. #cgo darwin LDFLAGS: -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
  7. #include <stdlib.h>
  8. #include "llama.h"
  9. struct llama_sample_options
  10. {
  11. float repeat_penalty;
  12. float frequency_penalty;
  13. float presence_penalty;
  14. float temperature;
  15. int32_t top_k;
  16. float top_p;
  17. float tfs_z;
  18. float typical_p;
  19. int mirostat;
  20. float mirostat_tau;
  21. float mirostat_eta;
  22. };
  23. llama_token llama_sample(
  24. struct llama_context *ctx,
  25. struct llama_token_data *candidates,
  26. size_t n_candidates,
  27. const llama_token *last_tokens,
  28. size_t n_last_tokens,
  29. struct llama_sample_options *opts)
  30. {
  31. llama_token_data_array candidates_p = {
  32. candidates,
  33. n_candidates,
  34. false,
  35. };
  36. llama_sample_repetition_penalty(
  37. ctx, &candidates_p,
  38. last_tokens, n_last_tokens,
  39. opts->repeat_penalty);
  40. llama_sample_frequency_and_presence_penalties(
  41. ctx, &candidates_p,
  42. last_tokens, n_last_tokens,
  43. opts->frequency_penalty, opts->presence_penalty);
  44. if (opts->temperature <= 0) {
  45. return llama_sample_token_greedy(ctx, &candidates_p);
  46. }
  47. if (opts->mirostat == 1) {
  48. int mirostat_m = 100;
  49. float mirostat_mu = 2.0f * opts->mirostat_tau;
  50. llama_sample_temperature(ctx, &candidates_p, opts->temperature);
  51. return llama_sample_token_mirostat(
  52. ctx, &candidates_p,
  53. opts->mirostat_tau, opts->mirostat_eta,
  54. mirostat_m, &mirostat_mu);
  55. } else if (opts->mirostat == 2) {
  56. float mirostat_mu = 2.0f * opts->mirostat_tau;
  57. llama_sample_temperature(ctx, &candidates_p, opts->temperature);
  58. return llama_sample_token_mirostat_v2(
  59. ctx, &candidates_p,
  60. opts->mirostat_tau, opts->mirostat_eta,
  61. &mirostat_mu);
  62. } else {
  63. llama_sample_top_k(ctx, &candidates_p, opts->top_k, 1);
  64. llama_sample_tail_free(ctx, &candidates_p, opts->tfs_z, 1);
  65. llama_sample_typical(ctx, &candidates_p, opts->typical_p, 1);
  66. llama_sample_top_p(ctx, &candidates_p, opts->top_p, 1);
  67. llama_sample_temperature(ctx, &candidates_p, opts->temperature);
  68. return llama_sample_token(ctx, &candidates_p);
  69. }
  70. }
  71. */
  72. import "C"
  73. import (
  74. "bytes"
  75. "errors"
  76. "fmt"
  77. "io"
  78. "os"
  79. "strings"
  80. "time"
  81. "unicode/utf8"
  82. "unsafe"
  83. "github.com/jmorganca/ollama/api"
  84. )
  85. type llama struct {
  86. params *C.struct_llama_context_params
  87. model *C.struct_llama_model
  88. ctx *C.struct_llama_context
  89. api.Options
  90. }
  91. func New(model string, opts api.Options) (*llama, error) {
  92. if _, err := os.Stat(model); err != nil {
  93. return nil, err
  94. }
  95. llm := llama{Options: opts}
  96. C.llama_backend_init(C.bool(llm.UseNUMA))
  97. params := C.llama_context_default_params()
  98. params.seed = C.uint(llm.Seed)
  99. params.n_ctx = C.int(llm.NumCtx)
  100. params.n_batch = C.int(llm.NumBatch)
  101. params.n_gpu_layers = C.int(llm.NumGPU)
  102. params.main_gpu = C.int(llm.MainGPU)
  103. params.low_vram = C.bool(llm.LowVRAM)
  104. params.f16_kv = C.bool(llm.F16KV)
  105. params.logits_all = C.bool(llm.LogitsAll)
  106. params.vocab_only = C.bool(llm.VocabOnly)
  107. params.use_mmap = C.bool(llm.UseMMap)
  108. params.use_mlock = C.bool(llm.UseMLock)
  109. params.embedding = C.bool(llm.EmbeddingOnly)
  110. llm.params = &params
  111. cModel := C.CString(model)
  112. defer C.free(unsafe.Pointer(cModel))
  113. llm.model = C.llama_load_model_from_file(cModel, params)
  114. if llm.model == nil {
  115. return nil, errors.New("failed to load model")
  116. }
  117. llm.ctx = C.llama_new_context_with_model(llm.model, params)
  118. if llm.ctx == nil {
  119. return nil, errors.New("failed to create context")
  120. }
  121. // warm up the model
  122. bos := []C.llama_token{C.llama_token_bos()}
  123. C.llama_eval(llm.ctx, unsafe.SliceData(bos), C.int(len(bos)), 0, C.int(opts.NumThread))
  124. C.llama_reset_timings(llm.ctx)
  125. return &llm, nil
  126. }
  127. func (llm *llama) Close() {
  128. defer C.llama_free_model(llm.model)
  129. defer C.llama_free(llm.ctx)
  130. C.llama_print_timings(llm.ctx)
  131. }
  132. func (llm *llama) Predict(ctx []int, prompt string, fn func(api.GenerateResponse)) error {
  133. if input := llm.tokenize(prompt); input != nil {
  134. embd := make([]C.llama_token, len(ctx))
  135. for i := range ctx {
  136. embd[i] = C.llama_token(ctx[i])
  137. }
  138. return llm.generate(append(embd, input...), fn)
  139. }
  140. return errors.New("llama: tokenize")
  141. }
  142. func (llm *llama) tokenize(prompt string) []C.llama_token {
  143. cPrompt := C.CString(prompt)
  144. defer C.free(unsafe.Pointer(cPrompt))
  145. tokens := make([]C.llama_token, llm.NumCtx)
  146. if n := C.llama_tokenize(llm.ctx, cPrompt, unsafe.SliceData(tokens), C.int(len(tokens)), true); n > 0 {
  147. return tokens[:n]
  148. }
  149. return nil
  150. }
  151. func (llm *llama) detokenize(tokens ...C.llama_token) string {
  152. var sb strings.Builder
  153. for _, token := range tokens {
  154. sb.WriteString(C.GoString(C.llama_token_to_str(llm.ctx, token)))
  155. }
  156. return sb.String()
  157. }
  158. func (llm *llama) generate(input []C.llama_token, fn func(api.GenerateResponse)) error {
  159. var opts C.struct_llama_sample_options
  160. opts.repeat_penalty = C.float(llm.RepeatPenalty)
  161. opts.frequency_penalty = C.float(llm.FrequencyPenalty)
  162. opts.presence_penalty = C.float(llm.PresencePenalty)
  163. opts.temperature = C.float(llm.Temperature)
  164. opts.top_k = C.int(llm.TopK)
  165. opts.top_p = C.float(llm.TopP)
  166. opts.tfs_z = C.float(llm.TFSZ)
  167. opts.typical_p = C.float(llm.TypicalP)
  168. opts.mirostat = C.int(llm.Mirostat)
  169. opts.mirostat_tau = C.float(llm.MirostatTau)
  170. opts.mirostat_eta = C.float(llm.MirostatEta)
  171. output := deque[C.llama_token]{capacity: llm.NumCtx}
  172. context := deque[int]{capacity: llm.NumCtx / 2}
  173. for _, in := range input {
  174. context.PushLeft(int(in))
  175. }
  176. var b bytes.Buffer
  177. for C.llama_get_kv_cache_token_count(llm.ctx) < C.int(llm.NumCtx) {
  178. if retval := C.llama_eval(llm.ctx, unsafe.SliceData(input), C.int(len(input)), C.llama_get_kv_cache_token_count(llm.ctx), C.int(llm.NumThread)); retval != 0 {
  179. return errors.New("llama: eval")
  180. }
  181. token, err := llm.sample(output, &opts)
  182. if errors.Is(err, io.EOF) {
  183. break
  184. } else if err != nil {
  185. return err
  186. }
  187. b.WriteString(llm.detokenize(token))
  188. if utf8.Valid(b.Bytes()) || b.Len() >= utf8.UTFMax {
  189. // call the callback
  190. fn(api.GenerateResponse{
  191. Response: b.String(),
  192. })
  193. output.PushLeft(token)
  194. context.PushLeft(int(token))
  195. b.Reset()
  196. }
  197. input = []C.llama_token{token}
  198. }
  199. dur := func(ms float64) time.Duration {
  200. d, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
  201. if err != nil {
  202. panic(err)
  203. }
  204. return d
  205. }
  206. timings := C.llama_get_timings(llm.ctx)
  207. fn(api.GenerateResponse{
  208. Done: true,
  209. Context: context.Data(),
  210. PromptEvalCount: int(timings.n_p_eval),
  211. PromptEvalDuration: dur(float64(timings.t_p_eval_ms)),
  212. EvalCount: int(timings.n_eval),
  213. EvalDuration: dur(float64(timings.t_eval_ms)),
  214. })
  215. return nil
  216. }
  217. func (llm *llama) sample(output deque[C.llama_token], opts *C.struct_llama_sample_options) (C.llama_token, error) {
  218. numVocab := int(C.llama_n_vocab(llm.ctx))
  219. logits := unsafe.Slice(C.llama_get_logits(llm.ctx), numVocab)
  220. candidates := deque[C.struct_llama_token_data]{capacity: numVocab}
  221. for i := 0; i < candidates.Cap(); i++ {
  222. candidates.PushLeft(C.struct_llama_token_data{
  223. id: C.int(i),
  224. logit: logits[i],
  225. p: 0,
  226. })
  227. }
  228. token := C.llama_sample(
  229. llm.ctx,
  230. unsafe.SliceData(candidates.Data()), C.size_t(candidates.Len()),
  231. unsafe.SliceData(output.Data()), C.size_t(output.Len()),
  232. opts)
  233. if token != C.llama_token_eos() {
  234. return token, nil
  235. }
  236. return 0, io.EOF
  237. }