llama.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814
  1. package llm
  2. import (
  3. "bufio"
  4. "bytes"
  5. "context"
  6. "embed"
  7. "encoding/json"
  8. "errors"
  9. "fmt"
  10. "io"
  11. "io/fs"
  12. "log"
  13. "math/rand"
  14. "net/http"
  15. "os"
  16. "os/exec"
  17. "path"
  18. "path/filepath"
  19. "runtime"
  20. "strconv"
  21. "strings"
  22. "sync"
  23. "time"
  24. "github.com/jmorganca/ollama/api"
  25. "github.com/jmorganca/ollama/format"
  26. )
  27. const jsonGrammar = `
  28. root ::= object
  29. value ::= object | array | string | number | ("true" | "false" | "null") ws
  30. object ::=
  31. "{" ws (
  32. string ":" ws value
  33. ("," ws string ":" ws value)*
  34. )? "}" ws
  35. array ::=
  36. "[" ws (
  37. value
  38. ("," ws value)*
  39. )? "]" ws
  40. string ::=
  41. "\"" (
  42. [^"\\] |
  43. "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  44. )* "\"" ws
  45. number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
  46. # Optional space: by convention, applied in this grammar after literal chars when allowed
  47. ws ::= ([ \t\n] ws)?
  48. `
  49. //go:embed llama.cpp/*/build/*/bin/*
  50. var llamaCppEmbed embed.FS
  51. type ModelRunner struct {
  52. Path string // path to the model runner executable
  53. Accelerated bool
  54. }
  55. func chooseRunners(workDir, runnerType string) []ModelRunner {
  56. buildPath := path.Join("llama.cpp", runnerType, "build")
  57. var runners []ModelRunner
  58. // set the runners based on the OS
  59. // IMPORTANT: the order of the runners in the array is the priority order
  60. switch runtime.GOOS {
  61. case "darwin":
  62. if runtime.GOARCH == "arm64" {
  63. runners = []ModelRunner{{Path: path.Join(buildPath, "metal", "bin", "ollama-runner")}}
  64. } else {
  65. runners = []ModelRunner{{Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")}}
  66. }
  67. case "linux":
  68. runners = []ModelRunner{
  69. {Path: path.Join(buildPath, "cuda", "bin", "ollama-runner"), Accelerated: true},
  70. {Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")},
  71. }
  72. case "windows":
  73. // TODO: select windows GPU runner here when available
  74. runners = []ModelRunner{
  75. {Path: path.Join(buildPath, "cpu", "bin", "Release", "ollama-runner.exe")},
  76. }
  77. default:
  78. log.Printf("unknown OS, running on CPU: %s", runtime.GOOS)
  79. runners = []ModelRunner{
  80. {Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")},
  81. }
  82. }
  83. runnerAvailable := false // if no runner files are found in the embed, this flag will cause a fast fail
  84. for _, r := range runners {
  85. // find all the files in the runner's bin directory
  86. files, err := fs.Glob(llamaCppEmbed, path.Join(path.Dir(r.Path), "*"))
  87. if err != nil {
  88. // this is expected, ollama may be compiled without all runners packed in
  89. log.Printf("%s runner not found: %v", r.Path, err)
  90. continue
  91. }
  92. for _, f := range files {
  93. runnerAvailable = true
  94. srcFile, err := llamaCppEmbed.Open(f)
  95. if err != nil {
  96. log.Fatalf("read llama runner %s: %v", f, err)
  97. }
  98. defer srcFile.Close()
  99. // create the directory in case it does not exist, filepath.Dir() converts the file path to the OS's format
  100. destPath := filepath.Join(workDir, filepath.Dir(f))
  101. if err := os.MkdirAll(destPath, 0o755); err != nil {
  102. log.Fatalf("create runner temp dir %s: %v", filepath.Dir(f), err)
  103. }
  104. // create the path to the destination file, filepath.Base() converts the file path to the OS's format
  105. destFile := filepath.Join(destPath, filepath.Base(f))
  106. _, err = os.Stat(destFile)
  107. switch {
  108. case errors.Is(err, os.ErrNotExist):
  109. destFile, err := os.OpenFile(destFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
  110. if err != nil {
  111. log.Fatalf("write llama runner %s: %v", f, err)
  112. }
  113. defer destFile.Close()
  114. if _, err := io.Copy(destFile, srcFile); err != nil {
  115. log.Fatalf("copy llama runner %s: %v", f, err)
  116. }
  117. case err != nil:
  118. log.Fatalf("stat llama runner %s: %v", f, err)
  119. }
  120. }
  121. }
  122. if !runnerAvailable {
  123. log.Fatalf("%s runner not found", runnerType)
  124. }
  125. // return the runners to try in priority order
  126. localRunnersByPriority := []ModelRunner{}
  127. for _, r := range runners {
  128. // clean the ModelRunner paths so that they match the OS we are running on
  129. localRunnersByPriority = append(localRunnersByPriority, ModelRunner{
  130. Path: filepath.Clean(path.Join(workDir, r.Path)),
  131. Accelerated: r.Accelerated,
  132. })
  133. }
  134. return localRunnersByPriority
  135. }
  136. type llamaModel struct {
  137. hyperparameters llamaHyperparameters
  138. }
  139. func (llm *llamaModel) ModelFamily() string {
  140. return "llama"
  141. }
  142. func llamaModelType(numLayer uint32) string {
  143. switch numLayer {
  144. case 26:
  145. return "3B"
  146. case 32:
  147. return "7B"
  148. case 40:
  149. return "13B"
  150. case 48:
  151. return "34B"
  152. case 60:
  153. return "30B"
  154. case 80:
  155. return "65B"
  156. default:
  157. return "unknown"
  158. }
  159. }
  160. func (llm *llamaModel) ModelType() string {
  161. return llamaModelType(llm.hyperparameters.NumLayer)
  162. }
  163. func (llm *llamaModel) FileType() string {
  164. return fileType(llm.hyperparameters.FileType)
  165. }
  166. func (llm *llamaModel) NumLayers() int64 {
  167. return int64(llm.hyperparameters.NumLayer)
  168. }
  169. type llamaHyperparameters struct {
  170. // NumVocab is the size of the model's vocabulary.
  171. NumVocab uint32
  172. // NumEmbd is the size of the model's embedding layer.
  173. NumEmbd uint32
  174. NumMult uint32
  175. NumHead uint32
  176. // NumLayer is the number of layers in the model.
  177. NumLayer uint32
  178. NumRot uint32
  179. // FileType describes the quantization level of the model, e.g. Q4_0, Q5_K, etc.
  180. FileType uint32
  181. }
  182. type Running struct {
  183. Port int
  184. Cmd *exec.Cmd
  185. Cancel context.CancelFunc
  186. exitOnce sync.Once
  187. exitCh chan error // channel to receive the exit status of the subprocess
  188. *StatusWriter // captures error messages from the llama runner process
  189. }
  190. type llama struct {
  191. api.Options
  192. Running
  193. }
  194. var (
  195. errNvidiaSMI = errors.New("warning: gpu support may not be enabled, check that you have installed GPU drivers: nvidia-smi command failed")
  196. errAvailableVRAM = errors.New("not enough VRAM available, falling back to CPU only")
  197. )
  198. // CheckVRAM returns the free VRAM in bytes on Linux machines with NVIDIA GPUs
  199. func CheckVRAM() (int64, error) {
  200. cmd := exec.Command("nvidia-smi", "--query-gpu=memory.free", "--format=csv,noheader,nounits")
  201. var stdout bytes.Buffer
  202. cmd.Stdout = &stdout
  203. err := cmd.Run()
  204. if err != nil {
  205. return 0, errNvidiaSMI
  206. }
  207. var freeMiB int64
  208. scanner := bufio.NewScanner(&stdout)
  209. for scanner.Scan() {
  210. line := scanner.Text()
  211. if strings.Contains(line, "[Insufficient Permissions]") {
  212. return 0, fmt.Errorf("GPU support may not enabled, check you have installed GPU drivers and have the necessary permissions to run nvidia-smi")
  213. }
  214. vram, err := strconv.ParseInt(strings.TrimSpace(line), 10, 64)
  215. if err != nil {
  216. return 0, fmt.Errorf("failed to parse available VRAM: %v", err)
  217. }
  218. freeMiB += vram
  219. }
  220. freeBytes := freeMiB * 1024 * 1024
  221. if freeBytes < 2*format.GigaByte {
  222. log.Printf("less than 2 GB VRAM available")
  223. return 0, errAvailableVRAM
  224. }
  225. return freeBytes, nil
  226. }
  227. func NumGPU(numLayer, fileSizeBytes int64, opts api.Options) int {
  228. if opts.NumGPU != -1 {
  229. return opts.NumGPU
  230. }
  231. if runtime.GOOS == "linux" {
  232. freeBytes, err := CheckVRAM()
  233. if err != nil {
  234. if !errors.Is(err, errNvidiaSMI) {
  235. log.Print(err.Error())
  236. }
  237. // nvidia driver not installed or no nvidia GPU found
  238. return 0
  239. }
  240. /*
  241. Calculate bytes per layer, this will roughly be the size of the model file divided by the number of layers.
  242. We can store the model weights and the kv cache in vram,
  243. to enable kv chache vram storage add two additional layers to the number of layers retrieved from the model file.
  244. */
  245. bytesPerLayer := fileSizeBytes / numLayer
  246. // 75% of the absolute max number of layers we can fit in available VRAM, off-loading too many layers to the GPU can cause OOM errors
  247. layers := int(freeBytes/bytesPerLayer) * 3 / 4
  248. log.Printf("%d MB VRAM available, loading up to %d GPU layers", freeBytes/(1024*1024), layers)
  249. return layers
  250. }
  251. // default to enable metal on macOS
  252. return 1
  253. }
  254. // StatusWriter is a writer that captures error messages from the llama runner process
  255. type StatusWriter struct {
  256. ErrCh chan error
  257. LastErrMsg string
  258. }
  259. func NewStatusWriter() *StatusWriter {
  260. return &StatusWriter{
  261. ErrCh: make(chan error, 1),
  262. }
  263. }
  264. func (w *StatusWriter) Write(b []byte) (int, error) {
  265. var errMsg string
  266. if _, after, ok := bytes.Cut(b, []byte("error:")); ok {
  267. errMsg = string(bytes.TrimSpace(after))
  268. } else if _, after, ok := bytes.Cut(b, []byte("CUDA error")); ok {
  269. errMsg = string(bytes.TrimSpace(after))
  270. }
  271. if errMsg != "" {
  272. w.LastErrMsg = errMsg
  273. w.ErrCh <- fmt.Errorf("llama runner: %s", errMsg)
  274. }
  275. return os.Stderr.Write(b)
  276. }
  277. func newLlama(model string, adapters []string, runners []ModelRunner, numLayers int64, opts api.Options) (*llama, error) {
  278. fileInfo, err := os.Stat(model)
  279. if err != nil {
  280. return nil, err
  281. }
  282. if len(adapters) > 1 {
  283. return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
  284. }
  285. numGPU := NumGPU(numLayers, fileInfo.Size(), opts)
  286. params := []string{
  287. "--model", model,
  288. "--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
  289. "--batch-size", fmt.Sprintf("%d", opts.NumBatch),
  290. "--n-gpu-layers", fmt.Sprintf("%d", numGPU),
  291. "--embedding",
  292. }
  293. if opts.MainGPU > 0 {
  294. params = append(params, "--main-gpu", fmt.Sprintf("%d", opts.MainGPU))
  295. }
  296. if opts.RopeFrequencyBase > 0 {
  297. params = append(params, "--rope-freq-base", fmt.Sprintf("%f", opts.RopeFrequencyBase))
  298. }
  299. if opts.RopeFrequencyScale > 0 {
  300. params = append(params, "--rope-freq-scale", fmt.Sprintf("%f", opts.RopeFrequencyScale))
  301. }
  302. if opts.NumGQA > 0 {
  303. params = append(params, "--gqa", fmt.Sprintf("%d", opts.NumGQA))
  304. }
  305. if len(adapters) > 0 {
  306. // TODO: applying multiple adapters is not supported by the llama.cpp server yet
  307. params = append(params, "--lora", adapters[0])
  308. }
  309. if opts.NumThread > 0 {
  310. params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
  311. }
  312. if !opts.F16KV {
  313. params = append(params, "--memory-f32")
  314. }
  315. if opts.UseMLock {
  316. params = append(params, "--mlock")
  317. }
  318. if !opts.UseMMap {
  319. params = append(params, "--no-mmap")
  320. }
  321. if opts.UseNUMA {
  322. params = append(params, "--numa")
  323. }
  324. var runnerErr error
  325. // start the llama.cpp server with a retry in case the port is already in use
  326. for _, runner := range runners {
  327. if runner.Accelerated && numGPU == 0 {
  328. log.Printf("skipping accelerated runner because num_gpu=0")
  329. continue
  330. }
  331. if _, err := os.Stat(runner.Path); err != nil {
  332. log.Printf("llama runner not found: %v", err)
  333. continue
  334. }
  335. port := rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
  336. ctx, cancel := context.WithCancel(context.Background())
  337. cmd := exec.CommandContext(
  338. ctx,
  339. runner.Path,
  340. append(params, "--port", strconv.Itoa(port))...,
  341. )
  342. var libraryPaths []string
  343. if libraryPath, ok := os.LookupEnv("LD_LIBRARY_PATH"); ok {
  344. libraryPaths = append(libraryPaths, libraryPath)
  345. }
  346. libraryPaths = append(libraryPaths, filepath.Dir(runner.Path))
  347. cmd.Env = append(os.Environ(), fmt.Sprintf("LD_LIBRARY_PATH=%s", strings.Join(libraryPaths, ":")))
  348. cmd.Stdout = os.Stderr
  349. statusWriter := NewStatusWriter()
  350. cmd.Stderr = statusWriter
  351. llm := &llama{Options: opts, Running: Running{Port: port, Cmd: cmd, Cancel: cancel, exitCh: make(chan error)}}
  352. log.Print("starting llama runner")
  353. if err := llm.Cmd.Start(); err != nil {
  354. log.Printf("error starting the external llama runner: %v", err)
  355. continue
  356. }
  357. // monitor the llama runner process and signal when it exits
  358. go func() {
  359. err := llm.Cmd.Wait()
  360. // default to printing the exit message of the command process, it will probably just say 'exit staus 1'
  361. errMsg := err.Error()
  362. // try to set a better error message if llama runner logs captured an error
  363. if statusWriter.LastErrMsg != "" {
  364. errMsg = statusWriter.LastErrMsg
  365. }
  366. log.Println(errMsg)
  367. // llm.Cmd.Wait() can only be called once, use this exit channel to signal that the process has exited
  368. llm.exitOnce.Do(func() {
  369. close(llm.exitCh)
  370. })
  371. }()
  372. if err := waitForServer(llm); err != nil {
  373. log.Printf("error starting llama runner: %v", err)
  374. llm.Close()
  375. // default the runnerErr to the error returned by the most recent llama runner process
  376. runnerErr = err
  377. // capture the error directly from the runner process, if any
  378. select {
  379. case runnerErr = <-statusWriter.ErrCh:
  380. default:
  381. // the runner process probably timed out
  382. }
  383. // try again
  384. continue
  385. }
  386. // server started successfully
  387. return llm, nil
  388. }
  389. if runnerErr != nil {
  390. // this is the error returned from the llama runner process that failed most recently
  391. return nil, runnerErr
  392. }
  393. return nil, fmt.Errorf("failed to start a llama runner")
  394. }
  395. func waitForServer(llm *llama) error {
  396. start := time.Now()
  397. expiresAt := time.Now().Add(3 * time.Minute) // be generous with timeout, large models can take a while to load
  398. ticker := time.NewTicker(200 * time.Millisecond)
  399. defer ticker.Stop()
  400. log.Print("waiting for llama runner to start responding")
  401. for {
  402. select {
  403. case <-llm.exitCh:
  404. // failed to start subprocess
  405. return fmt.Errorf("llama runner process has terminated")
  406. case <-ticker.C:
  407. if time.Now().After(expiresAt) {
  408. // timeout
  409. return fmt.Errorf("timed out waiting for llama runner to start")
  410. }
  411. if err := llm.Ping(context.Background()); err == nil {
  412. // success
  413. log.Printf("llama runner started in %f seconds", time.Since(start).Seconds())
  414. return nil
  415. }
  416. }
  417. }
  418. }
  419. func (llm *llama) Close() {
  420. // signal the sub-process to terminate
  421. llm.Cancel()
  422. // wait for the command to exit to prevent race conditions with the next run
  423. <-llm.exitCh
  424. if llm.StatusWriter != nil && llm.StatusWriter.LastErrMsg != "" {
  425. log.Printf("llama runner stopped with error: %v", llm.StatusWriter.LastErrMsg)
  426. } else {
  427. log.Print("llama runner stopped successfully")
  428. }
  429. }
  430. func (llm *llama) SetOptions(opts api.Options) {
  431. llm.Options = opts
  432. }
  433. type prediction struct {
  434. Content string `json:"content"`
  435. Model string `json:"model"`
  436. Prompt string `json:"prompt"`
  437. Stop bool `json:"stop"`
  438. Timings struct {
  439. PredictedN int `json:"predicted_n"`
  440. PredictedMS float64 `json:"predicted_ms"`
  441. PromptN int `json:"prompt_n"`
  442. PromptMS float64 `json:"prompt_ms"`
  443. }
  444. }
  445. const maxBufferSize = 512 * format.KiloByte
  446. func (llm *llama) Predict(ctx context.Context, prevContext []int, prompt string, format string, fn func(api.GenerateResponse)) error {
  447. prevConvo, err := llm.Decode(ctx, prevContext)
  448. if err != nil {
  449. return err
  450. }
  451. // Remove leading spaces from prevConvo if present
  452. prevConvo = strings.TrimPrefix(prevConvo, " ")
  453. var nextContext strings.Builder
  454. nextContext.WriteString(prevConvo)
  455. nextContext.WriteString(prompt)
  456. request := map[string]any{
  457. "prompt": nextContext.String(),
  458. "stream": true,
  459. "n_predict": llm.NumPredict,
  460. "n_keep": llm.NumKeep,
  461. "main_gpu": llm.MainGPU,
  462. "temperature": llm.Temperature,
  463. "top_k": llm.TopK,
  464. "top_p": llm.TopP,
  465. "tfs_z": llm.TFSZ,
  466. "typical_p": llm.TypicalP,
  467. "repeat_last_n": llm.RepeatLastN,
  468. "repeat_penalty": llm.RepeatPenalty,
  469. "presence_penalty": llm.PresencePenalty,
  470. "frequency_penalty": llm.FrequencyPenalty,
  471. "mirostat": llm.Mirostat,
  472. "mirostat_tau": llm.MirostatTau,
  473. "mirostat_eta": llm.MirostatEta,
  474. "penalize_nl": llm.PenalizeNewline,
  475. "seed": llm.Seed,
  476. "stop": llm.Stop,
  477. }
  478. if format == "json" {
  479. request["grammar"] = jsonGrammar
  480. }
  481. // Handling JSON marshaling with special characters unescaped.
  482. buffer := &bytes.Buffer{}
  483. enc := json.NewEncoder(buffer)
  484. enc.SetEscapeHTML(false)
  485. if err := enc.Encode(request); err != nil {
  486. return fmt.Errorf("failed to marshal data: %v", err)
  487. }
  488. endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", llm.Port)
  489. req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
  490. if err != nil {
  491. return fmt.Errorf("error creating POST request: %v", err)
  492. }
  493. req.Header.Set("Content-Type", "application/json")
  494. resp, err := http.DefaultClient.Do(req)
  495. if err != nil {
  496. return fmt.Errorf("POST predict: %v", err)
  497. }
  498. defer resp.Body.Close()
  499. if resp.StatusCode >= 400 {
  500. bodyBytes, err := io.ReadAll(resp.Body)
  501. if err != nil {
  502. return fmt.Errorf("failed reading llm error response: %w", err)
  503. }
  504. log.Printf("llm predict error: %s", bodyBytes)
  505. return fmt.Errorf("%s", bodyBytes)
  506. }
  507. scanner := bufio.NewScanner(resp.Body)
  508. // increase the buffer size to avoid running out of space
  509. buf := make([]byte, 0, maxBufferSize)
  510. scanner.Buffer(buf, maxBufferSize)
  511. for scanner.Scan() {
  512. select {
  513. case <-ctx.Done():
  514. // This handles the request cancellation
  515. return ctx.Err()
  516. default:
  517. line := scanner.Bytes()
  518. if len(line) == 0 {
  519. continue
  520. }
  521. if evt, ok := bytes.CutPrefix(line, []byte("data: ")); ok {
  522. var p prediction
  523. if err := json.Unmarshal(evt, &p); err != nil {
  524. return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
  525. }
  526. if p.Content != "" {
  527. fn(api.GenerateResponse{Response: p.Content})
  528. nextContext.WriteString(p.Content)
  529. }
  530. if p.Stop {
  531. embd, err := llm.Encode(ctx, nextContext.String())
  532. if err != nil {
  533. return fmt.Errorf("encoding context: %v", err)
  534. }
  535. fn(api.GenerateResponse{
  536. Done: true,
  537. Context: embd,
  538. PromptEvalCount: p.Timings.PromptN,
  539. PromptEvalDuration: parseDurationMs(p.Timings.PromptMS),
  540. EvalCount: p.Timings.PredictedN,
  541. EvalDuration: parseDurationMs(p.Timings.PredictedMS),
  542. })
  543. return nil
  544. }
  545. }
  546. }
  547. }
  548. if err := scanner.Err(); err != nil {
  549. if strings.Contains(err.Error(), "unexpected EOF") {
  550. // this means the llama runner subprocess crashed
  551. llm.Close()
  552. if llm.StatusWriter != nil && llm.StatusWriter.LastErrMsg != "" {
  553. return fmt.Errorf("llama runner exited: %v", llm.StatusWriter.LastErrMsg)
  554. }
  555. return fmt.Errorf("llama runner exited, you may not have enough available memory to run this model")
  556. }
  557. return fmt.Errorf("error reading llm response: %v", err)
  558. }
  559. return nil
  560. }
  561. type TokenizeRequest struct {
  562. Content string `json:"content"`
  563. }
  564. type TokenizeResponse struct {
  565. Tokens []int `json:"tokens"`
  566. }
  567. func (llm *llama) Encode(ctx context.Context, prompt string) ([]int, error) {
  568. endpoint := fmt.Sprintf("http://127.0.0.1:%d/tokenize", llm.Port)
  569. data, err := json.Marshal(TokenizeRequest{Content: prompt})
  570. if err != nil {
  571. return nil, fmt.Errorf("marshaling encode data: %w", err)
  572. }
  573. req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
  574. if err != nil {
  575. return nil, fmt.Errorf("encode request: %w", err)
  576. }
  577. req.Header.Set("Content-Type", "application/json")
  578. resp, err := http.DefaultClient.Do(req)
  579. if err != nil {
  580. return nil, fmt.Errorf("do encode request: %w", err)
  581. }
  582. defer resp.Body.Close()
  583. body, err := io.ReadAll(resp.Body)
  584. if err != nil {
  585. return nil, fmt.Errorf("read encode request: %w", err)
  586. }
  587. if resp.StatusCode >= 400 {
  588. log.Printf("llm encode error: %s", body)
  589. return nil, fmt.Errorf("%s", body)
  590. }
  591. var encoded TokenizeResponse
  592. if err := json.Unmarshal(body, &encoded); err != nil {
  593. return nil, fmt.Errorf("unmarshal encode response: %w", err)
  594. }
  595. return encoded.Tokens, nil
  596. }
  597. type DetokenizeRequest struct {
  598. Tokens []int `json:"tokens"`
  599. }
  600. type DetokenizeResponse struct {
  601. Content string `json:"content"`
  602. }
  603. func (llm *llama) Decode(ctx context.Context, tokens []int) (string, error) {
  604. if len(tokens) == 0 {
  605. return "", nil
  606. }
  607. endpoint := fmt.Sprintf("http://127.0.0.1:%d/detokenize", llm.Port)
  608. data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
  609. if err != nil {
  610. return "", fmt.Errorf("marshaling decode data: %w", err)
  611. }
  612. req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
  613. if err != nil {
  614. return "", fmt.Errorf("decode request: %w", err)
  615. }
  616. req.Header.Set("Content-Type", "application/json")
  617. resp, err := http.DefaultClient.Do(req)
  618. if err != nil {
  619. return "", fmt.Errorf("do decode request: %w", err)
  620. }
  621. defer resp.Body.Close()
  622. body, err := io.ReadAll(resp.Body)
  623. if err != nil {
  624. return "", fmt.Errorf("read decode request: %w", err)
  625. }
  626. if resp.StatusCode >= 400 {
  627. log.Printf("llm decode error: %s", body)
  628. return "", fmt.Errorf("%s", body)
  629. }
  630. var decoded DetokenizeResponse
  631. if err := json.Unmarshal(body, &decoded); err != nil {
  632. return "", fmt.Errorf("unmarshal encode response: %w", err)
  633. }
  634. return decoded.Content, nil
  635. }
  636. type EmbeddingRequest struct {
  637. Content string `json:"content"`
  638. }
  639. type EmbeddingResponse struct {
  640. Embedding []float64 `json:"embedding"`
  641. }
  642. func (llm *llama) Embedding(ctx context.Context, input string) ([]float64, error) {
  643. endpoint := fmt.Sprintf("http://127.0.0.1:%d/embedding", llm.Port)
  644. data, err := json.Marshal(TokenizeRequest{Content: input})
  645. if err != nil {
  646. return nil, fmt.Errorf("error marshaling embed data: %w", err)
  647. }
  648. req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
  649. if err != nil {
  650. return nil, fmt.Errorf("error creating embed request: %w", err)
  651. }
  652. req.Header.Set("Content-Type", "application/json")
  653. resp, err := http.DefaultClient.Do(req)
  654. if err != nil {
  655. return nil, fmt.Errorf("POST embedding: %w", err)
  656. }
  657. defer resp.Body.Close()
  658. body, err := io.ReadAll(resp.Body)
  659. if err != nil {
  660. return nil, fmt.Errorf("error reading embed response: %w", err)
  661. }
  662. if resp.StatusCode >= 400 {
  663. log.Printf("llm encode error: %s", body)
  664. return nil, fmt.Errorf("%s", body)
  665. }
  666. var embedding EmbeddingResponse
  667. if err := json.Unmarshal(body, &embedding); err != nil {
  668. return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
  669. }
  670. return embedding.Embedding, nil
  671. }
  672. // Ping checks that the server subprocess is still running and responding to requests
  673. func (llm *llama) Ping(ctx context.Context) error {
  674. resp, err := http.Head(fmt.Sprintf("http://127.0.0.1:%d", llm.Port))
  675. if err != nil {
  676. return fmt.Errorf("ping resp: %w", err)
  677. }
  678. if resp.StatusCode != http.StatusOK {
  679. return fmt.Errorf("unexpected ping status: %s", resp.Status)
  680. }
  681. return nil
  682. }