llama.go 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808
  1. package llm
  2. import (
  3. "bufio"
  4. "bytes"
  5. "context"
  6. "embed"
  7. "encoding/json"
  8. "errors"
  9. "fmt"
  10. "io"
  11. "io/fs"
  12. "log"
  13. "math/rand"
  14. "net/http"
  15. "os"
  16. "os/exec"
  17. "path"
  18. "path/filepath"
  19. "runtime"
  20. "strconv"
  21. "strings"
  22. "sync"
  23. "time"
  24. "github.com/jmorganca/ollama/api"
  25. "github.com/jmorganca/ollama/format"
  26. )
  27. const jsonGrammar = `
  28. root ::= object
  29. value ::= object | array | string | number | ("true" | "false" | "null") ws
  30. object ::=
  31. "{" ws (
  32. string ":" ws value
  33. ("," ws string ":" ws value)*
  34. )? "}" ws
  35. array ::=
  36. "[" ws (
  37. value
  38. ("," ws value)*
  39. )? "]" ws
  40. string ::=
  41. "\"" (
  42. [^"\\] |
  43. "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  44. )* "\"" ws
  45. number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
  46. # Optional space: by convention, applied in this grammar after literal chars when allowed
  47. ws ::= ([ \t\n] ws)?
  48. `
  49. //go:embed llama.cpp/*/build/*/bin/*
  50. var llamaCppEmbed embed.FS
  51. type ModelRunner struct {
  52. Path string // path to the model runner executable
  53. Accelerated bool
  54. }
  55. func chooseRunners(workDir, runnerType string) []ModelRunner {
  56. buildPath := path.Join("llama.cpp", runnerType, "build")
  57. var runners []ModelRunner
  58. // set the runners based on the OS
  59. // IMPORTANT: the order of the runners in the array is the priority order
  60. switch runtime.GOOS {
  61. case "darwin":
  62. runners = []ModelRunner{
  63. {Path: path.Join(buildPath, "metal", "bin", "ollama-runner")},
  64. {Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")},
  65. }
  66. case "linux":
  67. runners = []ModelRunner{
  68. {Path: path.Join(buildPath, "cuda", "bin", "ollama-runner"), Accelerated: true},
  69. {Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")},
  70. }
  71. case "windows":
  72. // TODO: select windows GPU runner here when available
  73. runners = []ModelRunner{
  74. {Path: path.Join(buildPath, "cpu", "bin", "Release", "ollama-runner.exe")},
  75. }
  76. default:
  77. log.Printf("unknown OS, running on CPU: %s", runtime.GOOS)
  78. runners = []ModelRunner{
  79. {Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")},
  80. }
  81. }
  82. runnerAvailable := false // if no runner files are found in the embed, this flag will cause a fast fail
  83. for _, r := range runners {
  84. // find all the files in the runner's bin directory
  85. files, err := fs.Glob(llamaCppEmbed, path.Join(path.Dir(r.Path), "*"))
  86. if err != nil {
  87. // this is expected, ollama may be compiled without all runners packed in
  88. log.Printf("%s runner not found: %v", r.Path, err)
  89. continue
  90. }
  91. for _, f := range files {
  92. runnerAvailable = true
  93. srcFile, err := llamaCppEmbed.Open(f)
  94. if err != nil {
  95. log.Fatalf("read llama runner %s: %v", f, err)
  96. }
  97. defer srcFile.Close()
  98. // create the directory in case it does not exist, filepath.Dir() converts the file path to the OS's format
  99. destPath := filepath.Join(workDir, filepath.Dir(f))
  100. if err := os.MkdirAll(destPath, 0o755); err != nil {
  101. log.Fatalf("create runner temp dir %s: %v", filepath.Dir(f), err)
  102. }
  103. // create the path to the destination file, filepath.Base() converts the file path to the OS's format
  104. destFile := filepath.Join(destPath, filepath.Base(f))
  105. _, err = os.Stat(destFile)
  106. switch {
  107. case errors.Is(err, os.ErrNotExist):
  108. destFile, err := os.OpenFile(destFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
  109. if err != nil {
  110. log.Fatalf("write llama runner %s: %v", f, err)
  111. }
  112. defer destFile.Close()
  113. if _, err := io.Copy(destFile, srcFile); err != nil {
  114. log.Fatalf("copy llama runner %s: %v", f, err)
  115. }
  116. case err != nil:
  117. log.Fatalf("stat llama runner %s: %v", f, err)
  118. }
  119. }
  120. }
  121. if !runnerAvailable {
  122. log.Fatalf("%s runner not found", runnerType)
  123. }
  124. // return the runners to try in priority order
  125. localRunnersByPriority := []ModelRunner{}
  126. for _, r := range runners {
  127. // clean the ModelRunner paths so that they match the OS we are running on
  128. localRunnersByPriority = append(localRunnersByPriority, ModelRunner{
  129. Path: filepath.Clean(path.Join(workDir, r.Path)),
  130. Accelerated: r.Accelerated,
  131. })
  132. }
  133. return localRunnersByPriority
  134. }
  135. type llamaModel struct {
  136. hyperparameters llamaHyperparameters
  137. }
  138. func (llm *llamaModel) ModelFamily() string {
  139. return "llama"
  140. }
  141. func llamaModelType(numLayer uint32) string {
  142. switch numLayer {
  143. case 26:
  144. return "3B"
  145. case 32:
  146. return "7B"
  147. case 40:
  148. return "13B"
  149. case 48:
  150. return "34B"
  151. case 60:
  152. return "30B"
  153. case 80:
  154. return "65B"
  155. default:
  156. return "unknown"
  157. }
  158. }
  159. func (llm *llamaModel) ModelType() string {
  160. return llamaModelType(llm.hyperparameters.NumLayer)
  161. }
  162. func (llm *llamaModel) FileType() string {
  163. return fileType(llm.hyperparameters.FileType)
  164. }
  165. func (llm *llamaModel) NumLayers() int64 {
  166. return int64(llm.hyperparameters.NumLayer)
  167. }
  168. type llamaHyperparameters struct {
  169. // NumVocab is the size of the model's vocabulary.
  170. NumVocab uint32
  171. // NumEmbd is the size of the model's embedding layer.
  172. NumEmbd uint32
  173. NumMult uint32
  174. NumHead uint32
  175. // NumLayer is the number of layers in the model.
  176. NumLayer uint32
  177. NumRot uint32
  178. // FileType describes the quantization level of the model, e.g. Q4_0, Q5_K, etc.
  179. FileType uint32
  180. }
  181. type Running struct {
  182. Port int
  183. Cmd *exec.Cmd
  184. Cancel context.CancelFunc
  185. exitOnce sync.Once
  186. exitCh chan error // channel to receive the exit status of the subprocess
  187. *StatusWriter // captures error messages from the llama runner process
  188. }
  189. type llama struct {
  190. api.Options
  191. Running
  192. }
  193. var (
  194. errNvidiaSMI = errors.New("nvidia-smi command failed")
  195. errAvailableVRAM = errors.New("not enough VRAM available, falling back to CPU only")
  196. )
  197. // CheckVRAM returns the free VRAM in bytes on Linux machines with NVIDIA GPUs
  198. func CheckVRAM() (int64, error) {
  199. cmd := exec.Command("nvidia-smi", "--query-gpu=memory.free", "--format=csv,noheader,nounits")
  200. var stdout bytes.Buffer
  201. cmd.Stdout = &stdout
  202. err := cmd.Run()
  203. if err != nil {
  204. return 0, errNvidiaSMI
  205. }
  206. var freeMiB int64
  207. scanner := bufio.NewScanner(&stdout)
  208. for scanner.Scan() {
  209. line := scanner.Text()
  210. if strings.Contains(line, "[Insufficient Permissions]") {
  211. return 0, fmt.Errorf("GPU support may not enabled, check you have installed GPU drivers and have the necessary permissions to run nvidia-smi")
  212. }
  213. vram, err := strconv.ParseInt(strings.TrimSpace(line), 10, 64)
  214. if err != nil {
  215. return 0, fmt.Errorf("failed to parse available VRAM: %v", err)
  216. }
  217. freeMiB += vram
  218. }
  219. freeBytes := freeMiB * 1024 * 1024
  220. if freeBytes < 2*format.GigaByte {
  221. log.Printf("less than 2 GB VRAM available")
  222. return 0, errAvailableVRAM
  223. }
  224. return freeBytes, nil
  225. }
  226. func NumGPU(numLayer, fileSizeBytes int64, opts api.Options) int {
  227. if opts.NumGPU != -1 {
  228. return opts.NumGPU
  229. }
  230. if runtime.GOOS == "linux" {
  231. freeBytes, err := CheckVRAM()
  232. if err != nil {
  233. if !errors.Is(err, errNvidiaSMI) {
  234. log.Print(err.Error())
  235. }
  236. // nvidia driver not installed or no nvidia GPU found
  237. return 0
  238. }
  239. /*
  240. Calculate bytes per layer, this will roughly be the size of the model file divided by the number of layers.
  241. We can store the model weights and the kv cache in vram,
  242. to enable kv chache vram storage add two additional layers to the number of layers retrieved from the model file.
  243. */
  244. bytesPerLayer := fileSizeBytes / numLayer
  245. // 75% of the absolute max number of layers we can fit in available VRAM, off-loading too many layers to the GPU can cause OOM errors
  246. layers := int(freeBytes/bytesPerLayer) * 3 / 4
  247. log.Printf("%d MB VRAM available, loading up to %d GPU layers", freeBytes/(1024*1024), layers)
  248. return layers
  249. }
  250. // default to enable metal on macOS
  251. return 1
  252. }
  253. // StatusWriter is a writer that captures error messages from the llama runner process
  254. type StatusWriter struct {
  255. ErrCh chan error
  256. LastErrMsg string
  257. }
  258. func NewStatusWriter() *StatusWriter {
  259. return &StatusWriter{
  260. ErrCh: make(chan error, 1),
  261. }
  262. }
  263. func (w *StatusWriter) Write(b []byte) (int, error) {
  264. var errMsg string
  265. if _, after, ok := bytes.Cut(b, []byte("error:")); ok {
  266. errMsg = string(bytes.TrimSpace(after))
  267. } else if _, after, ok := bytes.Cut(b, []byte("CUDA error")); ok {
  268. errMsg = string(bytes.TrimSpace(after))
  269. }
  270. if errMsg != "" {
  271. w.LastErrMsg = errMsg
  272. w.ErrCh <- fmt.Errorf("llama runner: %s", errMsg)
  273. }
  274. return os.Stderr.Write(b)
  275. }
  276. func newLlama(model string, adapters []string, runners []ModelRunner, numLayers int64, opts api.Options) (*llama, error) {
  277. fileInfo, err := os.Stat(model)
  278. if err != nil {
  279. return nil, err
  280. }
  281. if len(adapters) > 1 {
  282. return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
  283. }
  284. numGPU := NumGPU(numLayers, fileInfo.Size(), opts)
  285. params := []string{
  286. "--model", model,
  287. "--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
  288. "--batch-size", fmt.Sprintf("%d", opts.NumBatch),
  289. "--n-gpu-layers", fmt.Sprintf("%d", numGPU),
  290. "--embedding",
  291. }
  292. if opts.RopeFrequencyBase > 0 {
  293. params = append(params, "--rope-freq-base", fmt.Sprintf("%f", opts.RopeFrequencyBase))
  294. }
  295. if opts.RopeFrequencyScale > 0 {
  296. params = append(params, "--rope-freq-scale", fmt.Sprintf("%f", opts.RopeFrequencyScale))
  297. }
  298. if opts.NumGQA > 0 {
  299. params = append(params, "--gqa", fmt.Sprintf("%d", opts.NumGQA))
  300. }
  301. if len(adapters) > 0 {
  302. // TODO: applying multiple adapters is not supported by the llama.cpp server yet
  303. params = append(params, "--lora", adapters[0])
  304. }
  305. if opts.NumThread > 0 {
  306. params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
  307. }
  308. if !opts.F16KV {
  309. params = append(params, "--memory-f32")
  310. }
  311. if opts.UseMLock {
  312. params = append(params, "--mlock")
  313. }
  314. if !opts.UseMMap {
  315. params = append(params, "--no-mmap")
  316. }
  317. if opts.UseNUMA {
  318. params = append(params, "--numa")
  319. }
  320. var runnerErr error
  321. // start the llama.cpp server with a retry in case the port is already in use
  322. for _, runner := range runners {
  323. if runner.Accelerated && numGPU == 0 {
  324. log.Printf("skipping accelerated runner because num_gpu=0")
  325. continue
  326. }
  327. if _, err := os.Stat(runner.Path); err != nil {
  328. log.Printf("llama runner not found: %v", err)
  329. continue
  330. }
  331. port := rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
  332. ctx, cancel := context.WithCancel(context.Background())
  333. cmd := exec.CommandContext(
  334. ctx,
  335. runner.Path,
  336. append(params, "--port", strconv.Itoa(port))...,
  337. )
  338. var libraryPaths []string
  339. if libraryPath, ok := os.LookupEnv("LD_LIBRARY_PATH"); ok {
  340. libraryPaths = append(libraryPaths, libraryPath)
  341. }
  342. libraryPaths = append(libraryPaths, filepath.Dir(runner.Path))
  343. cmd.Env = append(os.Environ(), fmt.Sprintf("LD_LIBRARY_PATH=%s", strings.Join(libraryPaths, ":")))
  344. cmd.Stdout = os.Stderr
  345. statusWriter := NewStatusWriter()
  346. cmd.Stderr = statusWriter
  347. llm := &llama{Options: opts, Running: Running{Port: port, Cmd: cmd, Cancel: cancel, exitCh: make(chan error)}}
  348. log.Print("starting llama runner")
  349. if err := llm.Cmd.Start(); err != nil {
  350. log.Printf("error starting the external llama runner: %v", err)
  351. continue
  352. }
  353. // monitor the llama runner process and signal when it exits
  354. go func() {
  355. err := llm.Cmd.Wait()
  356. // default to printing the exit message of the command process, it will probably just say 'exit staus 1'
  357. errMsg := err.Error()
  358. // try to set a better error message if llama runner logs captured an error
  359. if statusWriter.LastErrMsg != "" {
  360. errMsg = statusWriter.LastErrMsg
  361. }
  362. log.Println(errMsg)
  363. // llm.Cmd.Wait() can only be called once, use this exit channel to signal that the process has exited
  364. llm.exitOnce.Do(func() {
  365. close(llm.exitCh)
  366. })
  367. }()
  368. if err := waitForServer(llm); err != nil {
  369. log.Printf("error starting llama runner: %v", err)
  370. llm.Close()
  371. // default the runnerErr to the error returned by the most recent llama runner process
  372. runnerErr = err
  373. // capture the error directly from the runner process, if any
  374. select {
  375. case runnerErr = <-statusWriter.ErrCh:
  376. default:
  377. // the runner process probably timed out
  378. }
  379. // try again
  380. continue
  381. }
  382. // server started successfully
  383. return llm, nil
  384. }
  385. if runnerErr != nil {
  386. // this is the error returned from the llama runner process that failed most recently
  387. return nil, runnerErr
  388. }
  389. return nil, fmt.Errorf("failed to start a llama runner")
  390. }
  391. func waitForServer(llm *llama) error {
  392. start := time.Now()
  393. expiresAt := time.Now().Add(3 * time.Minute) // be generous with timeout, large models can take a while to load
  394. ticker := time.NewTicker(200 * time.Millisecond)
  395. defer ticker.Stop()
  396. log.Print("waiting for llama runner to start responding")
  397. for {
  398. select {
  399. case <-llm.exitCh:
  400. // failed to start subprocess
  401. return fmt.Errorf("llama runner process has terminated")
  402. case <-ticker.C:
  403. if time.Now().After(expiresAt) {
  404. // timeout
  405. return fmt.Errorf("timed out waiting for llama runner to start")
  406. }
  407. if err := llm.Ping(context.Background()); err == nil {
  408. // success
  409. log.Printf("llama runner started in %f seconds", time.Since(start).Seconds())
  410. return nil
  411. }
  412. }
  413. }
  414. }
  415. func (llm *llama) Close() {
  416. // signal the sub-process to terminate
  417. llm.Cancel()
  418. // wait for the command to exit to prevent race conditions with the next run
  419. <-llm.exitCh
  420. if llm.StatusWriter != nil && llm.StatusWriter.LastErrMsg != "" {
  421. log.Printf("llama runner stopped with error: %v", llm.StatusWriter.LastErrMsg)
  422. } else {
  423. log.Print("llama runner stopped successfully")
  424. }
  425. }
  426. func (llm *llama) SetOptions(opts api.Options) {
  427. llm.Options = opts
  428. }
  429. type prediction struct {
  430. Content string `json:"content"`
  431. Model string `json:"model"`
  432. Prompt string `json:"prompt"`
  433. Stop bool `json:"stop"`
  434. Timings struct {
  435. PredictedN int `json:"predicted_n"`
  436. PredictedMS float64 `json:"predicted_ms"`
  437. PromptN int `json:"prompt_n"`
  438. PromptMS float64 `json:"prompt_ms"`
  439. }
  440. }
  441. const maxBufferSize = 512 * format.KiloByte
  442. func (llm *llama) Predict(ctx context.Context, prevContext []int, prompt string, format string, fn func(api.GenerateResponse)) error {
  443. prevConvo, err := llm.Decode(ctx, prevContext)
  444. if err != nil {
  445. return err
  446. }
  447. // Remove leading spaces from prevConvo if present
  448. prevConvo = strings.TrimPrefix(prevConvo, " ")
  449. var nextContext strings.Builder
  450. nextContext.WriteString(prevConvo)
  451. nextContext.WriteString(prompt)
  452. request := map[string]any{
  453. "prompt": nextContext.String(),
  454. "stream": true,
  455. "n_predict": llm.NumPredict,
  456. "n_keep": llm.NumKeep,
  457. "temperature": llm.Temperature,
  458. "top_k": llm.TopK,
  459. "top_p": llm.TopP,
  460. "tfs_z": llm.TFSZ,
  461. "typical_p": llm.TypicalP,
  462. "repeat_last_n": llm.RepeatLastN,
  463. "repeat_penalty": llm.RepeatPenalty,
  464. "presence_penalty": llm.PresencePenalty,
  465. "frequency_penalty": llm.FrequencyPenalty,
  466. "mirostat": llm.Mirostat,
  467. "mirostat_tau": llm.MirostatTau,
  468. "mirostat_eta": llm.MirostatEta,
  469. "penalize_nl": llm.PenalizeNewline,
  470. "seed": llm.Seed,
  471. "stop": llm.Stop,
  472. }
  473. if format == "json" {
  474. request["grammar"] = jsonGrammar
  475. }
  476. // Handling JSON marshaling with special characters unescaped.
  477. buffer := &bytes.Buffer{}
  478. enc := json.NewEncoder(buffer)
  479. enc.SetEscapeHTML(false)
  480. if err := enc.Encode(request); err != nil {
  481. return fmt.Errorf("failed to marshal data: %v", err)
  482. }
  483. endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", llm.Port)
  484. req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
  485. if err != nil {
  486. return fmt.Errorf("error creating POST request: %v", err)
  487. }
  488. req.Header.Set("Content-Type", "application/json")
  489. resp, err := http.DefaultClient.Do(req)
  490. if err != nil {
  491. return fmt.Errorf("POST predict: %v", err)
  492. }
  493. defer resp.Body.Close()
  494. if resp.StatusCode >= 400 {
  495. bodyBytes, err := io.ReadAll(resp.Body)
  496. if err != nil {
  497. return fmt.Errorf("failed reading llm error response: %w", err)
  498. }
  499. log.Printf("llm predict error: %s", bodyBytes)
  500. return fmt.Errorf("%s", bodyBytes)
  501. }
  502. scanner := bufio.NewScanner(resp.Body)
  503. // increase the buffer size to avoid running out of space
  504. buf := make([]byte, 0, maxBufferSize)
  505. scanner.Buffer(buf, maxBufferSize)
  506. for scanner.Scan() {
  507. select {
  508. case <-ctx.Done():
  509. // This handles the request cancellation
  510. return ctx.Err()
  511. default:
  512. line := scanner.Bytes()
  513. if len(line) == 0 {
  514. continue
  515. }
  516. if evt, ok := bytes.CutPrefix(line, []byte("data: ")); ok {
  517. var p prediction
  518. if err := json.Unmarshal(evt, &p); err != nil {
  519. return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
  520. }
  521. if p.Content != "" {
  522. fn(api.GenerateResponse{Response: p.Content})
  523. nextContext.WriteString(p.Content)
  524. }
  525. if p.Stop {
  526. embd, err := llm.Encode(ctx, nextContext.String())
  527. if err != nil {
  528. return fmt.Errorf("encoding context: %v", err)
  529. }
  530. fn(api.GenerateResponse{
  531. Done: true,
  532. Context: embd,
  533. PromptEvalCount: p.Timings.PromptN,
  534. PromptEvalDuration: parseDurationMs(p.Timings.PromptMS),
  535. EvalCount: p.Timings.PredictedN,
  536. EvalDuration: parseDurationMs(p.Timings.PredictedMS),
  537. })
  538. return nil
  539. }
  540. }
  541. }
  542. }
  543. if err := scanner.Err(); err != nil {
  544. if strings.Contains(err.Error(), "unexpected EOF") {
  545. // this means the llama runner subprocess crashed
  546. llm.Close()
  547. if llm.StatusWriter != nil && llm.StatusWriter.LastErrMsg != "" {
  548. return fmt.Errorf("llama runner exited: %v", llm.StatusWriter.LastErrMsg)
  549. }
  550. return fmt.Errorf("llama runner exited, you may not have enough available memory to run this model")
  551. }
  552. return fmt.Errorf("error reading llm response: %v", err)
  553. }
  554. return nil
  555. }
  556. type TokenizeRequest struct {
  557. Content string `json:"content"`
  558. }
  559. type TokenizeResponse struct {
  560. Tokens []int `json:"tokens"`
  561. }
  562. func (llm *llama) Encode(ctx context.Context, prompt string) ([]int, error) {
  563. endpoint := fmt.Sprintf("http://127.0.0.1:%d/tokenize", llm.Port)
  564. data, err := json.Marshal(TokenizeRequest{Content: prompt})
  565. if err != nil {
  566. return nil, fmt.Errorf("marshaling encode data: %w", err)
  567. }
  568. req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
  569. if err != nil {
  570. return nil, fmt.Errorf("encode request: %w", err)
  571. }
  572. req.Header.Set("Content-Type", "application/json")
  573. resp, err := http.DefaultClient.Do(req)
  574. if err != nil {
  575. return nil, fmt.Errorf("do encode request: %w", err)
  576. }
  577. defer resp.Body.Close()
  578. body, err := io.ReadAll(resp.Body)
  579. if err != nil {
  580. return nil, fmt.Errorf("read encode request: %w", err)
  581. }
  582. if resp.StatusCode >= 400 {
  583. log.Printf("llm encode error: %s", body)
  584. return nil, fmt.Errorf("%s", body)
  585. }
  586. var encoded TokenizeResponse
  587. if err := json.Unmarshal(body, &encoded); err != nil {
  588. return nil, fmt.Errorf("unmarshal encode response: %w", err)
  589. }
  590. return encoded.Tokens, nil
  591. }
  592. type DetokenizeRequest struct {
  593. Tokens []int `json:"tokens"`
  594. }
  595. type DetokenizeResponse struct {
  596. Content string `json:"content"`
  597. }
  598. func (llm *llama) Decode(ctx context.Context, tokens []int) (string, error) {
  599. if len(tokens) == 0 {
  600. return "", nil
  601. }
  602. endpoint := fmt.Sprintf("http://127.0.0.1:%d/detokenize", llm.Port)
  603. data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
  604. if err != nil {
  605. return "", fmt.Errorf("marshaling decode data: %w", err)
  606. }
  607. req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
  608. if err != nil {
  609. return "", fmt.Errorf("decode request: %w", err)
  610. }
  611. req.Header.Set("Content-Type", "application/json")
  612. resp, err := http.DefaultClient.Do(req)
  613. if err != nil {
  614. return "", fmt.Errorf("do decode request: %w", err)
  615. }
  616. defer resp.Body.Close()
  617. body, err := io.ReadAll(resp.Body)
  618. if err != nil {
  619. return "", fmt.Errorf("read decode request: %w", err)
  620. }
  621. if resp.StatusCode >= 400 {
  622. log.Printf("llm decode error: %s", body)
  623. return "", fmt.Errorf("%s", body)
  624. }
  625. var decoded DetokenizeResponse
  626. if err := json.Unmarshal(body, &decoded); err != nil {
  627. return "", fmt.Errorf("unmarshal encode response: %w", err)
  628. }
  629. return decoded.Content, nil
  630. }
  631. type EmbeddingRequest struct {
  632. Content string `json:"content"`
  633. }
  634. type EmbeddingResponse struct {
  635. Embedding []float64 `json:"embedding"`
  636. }
  637. func (llm *llama) Embedding(ctx context.Context, input string) ([]float64, error) {
  638. endpoint := fmt.Sprintf("http://127.0.0.1:%d/embedding", llm.Port)
  639. data, err := json.Marshal(TokenizeRequest{Content: input})
  640. if err != nil {
  641. return nil, fmt.Errorf("error marshaling embed data: %w", err)
  642. }
  643. req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewBuffer(data))
  644. if err != nil {
  645. return nil, fmt.Errorf("error creating embed request: %w", err)
  646. }
  647. req.Header.Set("Content-Type", "application/json")
  648. resp, err := http.DefaultClient.Do(req)
  649. if err != nil {
  650. return nil, fmt.Errorf("POST embedding: %w", err)
  651. }
  652. defer resp.Body.Close()
  653. body, err := io.ReadAll(resp.Body)
  654. if err != nil {
  655. return nil, fmt.Errorf("error reading embed response: %w", err)
  656. }
  657. if resp.StatusCode >= 400 {
  658. log.Printf("llm encode error: %s", body)
  659. return nil, fmt.Errorf("%s", body)
  660. }
  661. var embedding EmbeddingResponse
  662. if err := json.Unmarshal(body, &embedding); err != nil {
  663. return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
  664. }
  665. return embedding.Embedding, nil
  666. }
  667. // Ping checks that the server subprocess is still running and responding to requests
  668. func (llm *llama) Ping(ctx context.Context) error {
  669. resp, err := http.Head(fmt.Sprintf("http://127.0.0.1:%d", llm.Port))
  670. if err != nil {
  671. return fmt.Errorf("ping resp: %w", err)
  672. }
  673. if resp.StatusCode != http.StatusOK {
  674. return fmt.Errorf("unexpected ping status: %s", resp.Status)
  675. }
  676. return nil
  677. }