server.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. package llm
  2. import (
  3. "bufio"
  4. "bytes"
  5. "context"
  6. "encoding/json"
  7. "errors"
  8. "fmt"
  9. "io"
  10. "log"
  11. "log/slog"
  12. "math/rand"
  13. "net"
  14. "net/http"
  15. "os"
  16. "os/exec"
  17. "path/filepath"
  18. "runtime"
  19. "slices"
  20. "strconv"
  21. "strings"
  22. "time"
  23. "github.com/ollama/ollama/api"
  24. "github.com/ollama/ollama/format"
  25. "github.com/ollama/ollama/gpu"
  26. )
  27. // LlamaServer is an instance of the llama.cpp server
  28. type LlamaServer struct {
  29. port int
  30. cmd *exec.Cmd
  31. done chan error // Channel to signal when the process exits
  32. status *StatusWriter
  33. options *api.Options
  34. }
  35. var cpuOnlyFamilies = []string{
  36. "mamba",
  37. }
  38. func NewLlamaServer(model string, adapters, projectors []string, opts *api.Options) (*LlamaServer, error) {
  39. if _, err := os.Stat(model); err != nil {
  40. return nil, err
  41. }
  42. f, err := os.Open(model)
  43. if err != nil {
  44. return nil, err
  45. }
  46. defer f.Close()
  47. ggml, _, err := DecodeGGML(f)
  48. if err != nil {
  49. return nil, err
  50. }
  51. if opts.NumCtx > int(ggml.KV().ContextLength()) {
  52. slog.Warn("requested context length is greater than model max context length", "requested", opts.NumCtx, "model", ggml.KV().ContextLength())
  53. opts.NumCtx = int(ggml.KV().ContextLength())
  54. }
  55. if opts.NumCtx < 4 {
  56. opts.NumCtx = 4
  57. }
  58. availableMemory, _ := gpu.CheckVRAM()
  59. info := gpu.GetGPUInfo()
  60. usedMemory := info.MinimumMemory
  61. for _, projector := range projectors {
  62. usedMemory += projectorMemoryRequirements(projector)
  63. // multimodal models require at least 2048 context
  64. opts.NumCtx = max(opts.NumCtx, 2048)
  65. }
  66. // fp16 k,v = (1 (k) + 1 (v)) * sizeof(float16) * n_ctx * n_layer * n_embd / n_head * n_head_kv
  67. kv := 2 * 2 * int64(opts.NumCtx) * int64(ggml.KV().BlockCount()) * int64(ggml.KV().EmbeddingLength()) / int64(ggml.KV().HeadCount()) * int64(ggml.KV().HeadCountKV())
  68. // this amount is the overhead + tensors in memory
  69. // TODO: get this from the llama.cpp's graph calculations instead of
  70. // estimating it's 1/6 * kv_cache_size * num_gqa
  71. graph := int64(ggml.KV().GQA()) * kv / 6
  72. usedMemory += graph
  73. if usedMemory > availableMemory || slices.Contains(cpuOnlyFamilies, ggml.KV().Architecture()) {
  74. info.Library = "cpu"
  75. }
  76. requiredMemory := usedMemory
  77. var layers int
  78. for i := 0; i < int(ggml.KV().BlockCount()); i++ {
  79. layerMemory := ggml.LayerSize(fmt.Sprintf("blk.%d.", i)) + kv/int64(ggml.KV().BlockCount())
  80. requiredMemory += layerMemory
  81. if availableMemory > usedMemory+layerMemory && (opts.NumGPU < 0 || layers < opts.NumGPU) {
  82. usedMemory += layerMemory
  83. layers++
  84. }
  85. }
  86. memOutputLayer := ggml.LayerSize("output.")
  87. requiredMemory += memOutputLayer
  88. // only offload output layer if all repeating layers are offloaded
  89. if layers >= int(ggml.KV().BlockCount()) && availableMemory > usedMemory+memOutputLayer {
  90. usedMemory += memOutputLayer
  91. layers++
  92. }
  93. slog.Info(
  94. "offload to gpu",
  95. "layers", layers,
  96. "required", format.HumanBytes2(requiredMemory),
  97. "used", format.HumanBytes2(usedMemory),
  98. "available", format.HumanBytes2(availableMemory),
  99. "kv", format.HumanBytes2(kv),
  100. "graph", format.HumanBytes2(graph),
  101. )
  102. if opts.NumGPU < 0 && info.Library != "cpu" {
  103. opts.NumGPU = layers
  104. }
  105. if len(adapters) > 1 {
  106. return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
  107. }
  108. availableServers := availableServers()
  109. servers := serversForGpu(info)
  110. demandLib := os.Getenv("OLLAMA_LLM_LIBRARY")
  111. if demandLib != "" {
  112. serverPath := availableServers[demandLib]
  113. if serverPath == "" {
  114. slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
  115. } else {
  116. slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
  117. servers = []string{demandLib}
  118. }
  119. }
  120. if len(servers) == 0 {
  121. return nil, fmt.Errorf("no servers found for %v", info)
  122. }
  123. params := []string{
  124. "--model", model,
  125. "--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
  126. "--batch-size", fmt.Sprintf("%d", opts.NumBatch),
  127. "--embedding",
  128. }
  129. if debug := os.Getenv("OLLAMA_DEBUG"); debug != "" {
  130. params = append(params, "--log-format", "json")
  131. } else {
  132. params = append(params, "--log-disable")
  133. }
  134. if opts.NumGPU > 0 {
  135. params = append(params, "--n-gpu-layers", fmt.Sprintf("%d", opts.NumGPU))
  136. }
  137. if debug := os.Getenv("OLLAMA_DEBUG"); debug != "" {
  138. params = append(params, "--verbose")
  139. }
  140. if opts.MainGPU > 0 {
  141. params = append(params, "--main-gpu", fmt.Sprintf("%d", opts.MainGPU))
  142. }
  143. if opts.RopeFrequencyBase > 0 {
  144. params = append(params, "--rope-freq-base", fmt.Sprintf("%f", opts.RopeFrequencyBase))
  145. }
  146. if opts.RopeFrequencyScale > 0 {
  147. params = append(params, "--rope-freq-scale", fmt.Sprintf("%f", opts.RopeFrequencyScale))
  148. }
  149. if len(adapters) > 0 {
  150. // TODO: applying multiple adapters is not supported by the llama.cpp server yet
  151. params = append(params, "--lora", adapters[0])
  152. }
  153. if len(projectors) > 0 {
  154. // TODO: applying multiple projectors is not supported by the llama.cpp server yet
  155. params = append(params, "--mmproj", projectors[0])
  156. }
  157. if opts.NumThread > 0 {
  158. params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
  159. }
  160. if !opts.F16KV {
  161. params = append(params, "--memory-f32")
  162. }
  163. if opts.UseMLock {
  164. params = append(params, "--mlock")
  165. }
  166. if !opts.UseMMap {
  167. params = append(params, "--no-mmap")
  168. }
  169. if opts.UseNUMA {
  170. params = append(params, "--numa")
  171. }
  172. // Loop through potential servers
  173. var finalErr error
  174. for i := 0; i < len(servers); i++ {
  175. dir := availableServers[servers[i]]
  176. // Find an availableServers port, retry on each iterration in case the failure was a port conflict race
  177. port := 0
  178. if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
  179. var l *net.TCPListener
  180. if l, err = net.ListenTCP("tcp", a); err == nil {
  181. port = l.Addr().(*net.TCPAddr).Port
  182. l.Close()
  183. }
  184. }
  185. if port == 0 {
  186. slog.Debug("ResolveTCPAddr failed ", "error", err)
  187. port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
  188. }
  189. finalParams := append(params, "--port", strconv.Itoa(port))
  190. pathEnv := "LD_LIBRARY_PATH"
  191. if runtime.GOOS == "windows" {
  192. pathEnv = "PATH"
  193. }
  194. // append the server directory to LD_LIBRARY_PATH/PATH
  195. libraryPaths := []string{dir}
  196. if libraryPath, ok := os.LookupEnv(pathEnv); ok {
  197. // Append our runner directory to the path
  198. // This will favor system libraries over our bundled library dependencies
  199. libraryPaths = append(filepath.SplitList(libraryPath), libraryPaths...)
  200. }
  201. server := filepath.Join(dir, "ollama_llama_server")
  202. if runtime.GOOS == "windows" {
  203. server = server + ".exe"
  204. }
  205. s := &LlamaServer{
  206. port: port,
  207. cmd: exec.Command(server, finalParams...),
  208. status: NewStatusWriter(os.Stderr),
  209. options: opts,
  210. }
  211. libEnv := fmt.Sprintf("%s=%s", pathEnv, strings.Join(libraryPaths, string(filepath.ListSeparator)))
  212. slog.Debug(libEnv)
  213. s.cmd.Env = append(os.Environ(), libEnv)
  214. s.cmd.Stdout = os.Stdout
  215. s.cmd.Stderr = s.status
  216. slog.Info("starting llama server", "cmd", s.cmd.String())
  217. if err = s.cmd.Start(); err != nil {
  218. msg := ""
  219. if s.status != nil && s.status.LastErrMsg != "" {
  220. msg = s.status.LastErrMsg
  221. }
  222. err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
  223. finalErr = err
  224. continue
  225. }
  226. // reap subprocess when it exits
  227. go func() {
  228. // Exit status managed via getServerStatus
  229. _ = s.cmd.Wait()
  230. }()
  231. if err = s.waitUntilRunning(); err != nil {
  232. slog.Error("error starting llama server", "server", servers[i], "error", err)
  233. s.Close()
  234. finalErr = err
  235. continue
  236. }
  237. return s, nil
  238. }
  239. slog.Error("unable to load any llama server", "error", finalErr)
  240. return nil, finalErr
  241. }
  242. func projectorMemoryRequirements(filename string) int64 {
  243. file, err := os.Open(filename)
  244. if err != nil {
  245. return 0
  246. }
  247. defer file.Close()
  248. ggml, _, err := DecodeGGML(file)
  249. if err != nil {
  250. return 0
  251. }
  252. prefixes := make(map[string]struct{})
  253. for _, layer := range ggml.Tensors() {
  254. parts := strings.Split(layer.Name, ".")
  255. prefixes[strings.Join(parts[:2], ".")] = struct{}{}
  256. }
  257. var ask int64
  258. for prefix := range prefixes {
  259. ask += ggml.LayerSize(prefix)
  260. }
  261. return ask
  262. }
  263. type ServerStatus int
  264. const ( // iota is reset to 0
  265. ServerStatusReady ServerStatus = iota
  266. ServerStatusNoSlotsAvaialble
  267. ServerStatusLoadingModel
  268. ServerStatusNotResponding
  269. ServerStatusError
  270. )
  271. type ServerStatusResp struct {
  272. Status string `json:"status"`
  273. SlotsIdle int `json:"slots_idle"`
  274. SlotsProcessing int `json:"slots_processing"`
  275. Error string `json:"error"`
  276. }
  277. func (s *LlamaServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
  278. // Fail fast if its exited
  279. if s.cmd.ProcessState != nil {
  280. msg := ""
  281. if s.status != nil && s.status.LastErrMsg != "" {
  282. msg = s.status.LastErrMsg
  283. }
  284. return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  285. }
  286. req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
  287. if err != nil {
  288. return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
  289. }
  290. req.Header.Set("Content-Type", "application/json")
  291. resp, err := http.DefaultClient.Do(req)
  292. if err != nil {
  293. if errors.Is(err, context.DeadlineExceeded) {
  294. return ServerStatusNotResponding, fmt.Errorf("server not responding")
  295. }
  296. return ServerStatusError, fmt.Errorf("health resp: %w", err)
  297. }
  298. defer resp.Body.Close()
  299. body, err := io.ReadAll(resp.Body)
  300. if err != nil {
  301. return ServerStatusError, fmt.Errorf("read health request: %w", err)
  302. }
  303. var status ServerStatusResp
  304. if err := json.Unmarshal(body, &status); err != nil {
  305. return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
  306. }
  307. switch status.Status {
  308. case "ok":
  309. return ServerStatusReady, nil
  310. case "no slot available":
  311. return ServerStatusNoSlotsAvaialble, nil
  312. case "loading model":
  313. return ServerStatusLoadingModel, nil
  314. default:
  315. return ServerStatusError, fmt.Errorf("server error: %+v", status)
  316. }
  317. }
  318. func (s *LlamaServer) Ping(ctx context.Context) error {
  319. _, err := s.getServerStatus(ctx)
  320. if err != nil {
  321. slog.Debug("server unhealthy", "error", err)
  322. return err
  323. }
  324. return nil
  325. }
  326. func (s *LlamaServer) waitUntilRunning() error {
  327. start := time.Now()
  328. expiresAt := time.Now().Add(3 * time.Minute) // be generous with timeout, large models can take a while to load
  329. ticker := time.NewTicker(50 * time.Millisecond)
  330. defer ticker.Stop()
  331. slog.Info("waiting for llama runner to start responding")
  332. var lastStatus ServerStatus = -1
  333. for {
  334. select {
  335. case err := <-s.done:
  336. msg := ""
  337. if s.status != nil && s.status.LastErrMsg != "" {
  338. msg = s.status.LastErrMsg
  339. }
  340. return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
  341. case <-ticker.C:
  342. if time.Now().After(expiresAt) {
  343. // timeout
  344. msg := ""
  345. if s.status != nil && s.status.LastErrMsg != "" {
  346. msg = s.status.LastErrMsg
  347. }
  348. return fmt.Errorf("timed out waiting for llama runner to start: %s", msg)
  349. }
  350. if s.cmd.ProcessState != nil {
  351. msg := ""
  352. if s.status != nil && s.status.LastErrMsg != "" {
  353. msg = s.status.LastErrMsg
  354. }
  355. return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  356. }
  357. ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
  358. defer cancel()
  359. status, err := s.getServerStatus(ctx)
  360. if err != nil && lastStatus != status {
  361. slog.Debug("server not yet available", "error", err)
  362. lastStatus = status
  363. continue
  364. }
  365. switch status {
  366. case ServerStatusLoadingModel:
  367. // TODO - this state never seems to happen with the current server.cpp code (bug?)
  368. // it doesn't respond to the health endpoint until after the model is loaded
  369. slog.Debug("loading model")
  370. case ServerStatusReady:
  371. slog.Debug(fmt.Sprintf("llama runner started in %f seconds", time.Since(start).Seconds()))
  372. return nil
  373. }
  374. }
  375. }
  376. }
  377. const jsonGrammar = `
  378. root ::= object
  379. value ::= object | array | string | number | ("true" | "false" | "null") ws
  380. object ::=
  381. "{" ws (
  382. string ":" ws value
  383. ("," ws string ":" ws value)*
  384. )? "}" ws
  385. array ::=
  386. "[" ws (
  387. value
  388. ("," ws value)*
  389. )? "]" ws
  390. string ::=
  391. "\"" (
  392. [^"\\] |
  393. "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  394. )* "\"" ws
  395. number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
  396. # Optional space: by convention, applied in this grammar after literal chars when allowed
  397. ws ::= ([ \t\n] ws)?
  398. `
  399. const maxBufferSize = 512 * format.KiloByte
  400. const maxRetries = 3
  401. type ImageData struct {
  402. Data []byte `json:"data"`
  403. ID int `json:"id"`
  404. }
  405. type completion struct {
  406. Content string `json:"content"`
  407. Model string `json:"model"`
  408. Prompt string `json:"prompt"`
  409. Stop bool `json:"stop"`
  410. Timings struct {
  411. PredictedN int `json:"predicted_n"`
  412. PredictedMS float64 `json:"predicted_ms"`
  413. PromptN int `json:"prompt_n"`
  414. PromptMS float64 `json:"prompt_ms"`
  415. }
  416. }
  417. type CompletionRequest struct {
  418. Prompt string
  419. Format string
  420. Images []ImageData
  421. Options api.Options
  422. }
  423. type CompletionResponse struct {
  424. Content string
  425. Done bool
  426. PromptEvalCount int
  427. PromptEvalDuration time.Duration
  428. EvalCount int
  429. EvalDuration time.Duration
  430. }
  431. func (s *LlamaServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
  432. request := map[string]any{
  433. "prompt": req.Prompt,
  434. "stream": true,
  435. "n_predict": req.Options.NumPredict,
  436. "n_keep": req.Options.NumKeep,
  437. "main_gpu": req.Options.MainGPU,
  438. "temperature": req.Options.Temperature,
  439. "top_k": req.Options.TopK,
  440. "top_p": req.Options.TopP,
  441. "tfs_z": req.Options.TFSZ,
  442. "typical_p": req.Options.TypicalP,
  443. "repeat_last_n": req.Options.RepeatLastN,
  444. "repeat_penalty": req.Options.RepeatPenalty,
  445. "presence_penalty": req.Options.PresencePenalty,
  446. "frequency_penalty": req.Options.FrequencyPenalty,
  447. "mirostat": req.Options.Mirostat,
  448. "mirostat_tau": req.Options.MirostatTau,
  449. "mirostat_eta": req.Options.MirostatEta,
  450. "penalize_nl": req.Options.PenalizeNewline,
  451. "seed": req.Options.Seed,
  452. "stop": req.Options.Stop,
  453. "image_data": req.Images,
  454. "cache_prompt": true,
  455. }
  456. // Make sure the server is ready
  457. status, err := s.getServerStatus(ctx)
  458. if err != nil {
  459. return err
  460. } else if status != ServerStatusReady {
  461. return fmt.Errorf("unexpected server status: %d", status)
  462. }
  463. if req.Format == "json" {
  464. request["grammar"] = jsonGrammar
  465. if !strings.Contains(strings.ToLower(req.Prompt), "json") {
  466. slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
  467. }
  468. }
  469. retryDelay := 100 * time.Microsecond
  470. for retries := 0; retries < maxRetries; retries++ {
  471. if retries > 0 {
  472. time.Sleep(retryDelay) // wait before retrying
  473. retryDelay *= 2 // exponential backoff
  474. }
  475. // Handling JSON marshaling with special characters unescaped.
  476. buffer := &bytes.Buffer{}
  477. enc := json.NewEncoder(buffer)
  478. enc.SetEscapeHTML(false)
  479. if err := enc.Encode(request); err != nil {
  480. return fmt.Errorf("failed to marshal data: %v", err)
  481. }
  482. endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
  483. req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
  484. if err != nil {
  485. return fmt.Errorf("error creating POST request: %v", err)
  486. }
  487. req.Header.Set("Content-Type", "application/json")
  488. resp, err := http.DefaultClient.Do(req)
  489. if err != nil {
  490. return fmt.Errorf("POST predict: %v", err)
  491. }
  492. defer resp.Body.Close()
  493. if resp.StatusCode >= 400 {
  494. bodyBytes, err := io.ReadAll(resp.Body)
  495. if err != nil {
  496. return fmt.Errorf("failed reading llm error response: %w", err)
  497. }
  498. log.Printf("llm predict error: %s", bodyBytes)
  499. return fmt.Errorf("%s", bodyBytes)
  500. }
  501. scanner := bufio.NewScanner(resp.Body)
  502. buf := make([]byte, 0, maxBufferSize)
  503. scanner.Buffer(buf, maxBufferSize)
  504. retryNeeded := false
  505. // keep track of the last token generated, this is used to abort if the model starts looping
  506. var lastToken string
  507. var tokenRepeat int
  508. for scanner.Scan() {
  509. select {
  510. case <-ctx.Done():
  511. // This handles the request cancellation
  512. return ctx.Err()
  513. default:
  514. line := scanner.Bytes()
  515. if len(line) == 0 {
  516. continue
  517. }
  518. // try again on slot unavailable
  519. if bytes.Contains(line, []byte("slot unavailable")) {
  520. retryNeeded = true
  521. break
  522. }
  523. evt, ok := bytes.CutPrefix(line, []byte("data: "))
  524. if !ok {
  525. return fmt.Errorf("error parsing llm response stream: %s", line)
  526. }
  527. var c completion
  528. if err := json.Unmarshal(evt, &c); err != nil {
  529. return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
  530. }
  531. switch {
  532. case strings.TrimSpace(c.Content) == lastToken:
  533. tokenRepeat++
  534. default:
  535. lastToken = strings.TrimSpace(c.Content)
  536. tokenRepeat = 0
  537. }
  538. // 30 picked as an arbitrary max token repeat limit, modify as needed
  539. if tokenRepeat > 30 {
  540. slog.Debug("prediction aborted, token repeat limit reached")
  541. return ctx.Err()
  542. }
  543. if c.Content != "" {
  544. fn(CompletionResponse{
  545. Content: c.Content,
  546. })
  547. }
  548. if c.Stop {
  549. fn(CompletionResponse{
  550. Done: true,
  551. PromptEvalCount: c.Timings.PromptN,
  552. PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
  553. EvalCount: c.Timings.PredictedN,
  554. EvalDuration: parseDurationMs(c.Timings.PredictedMS),
  555. })
  556. return nil
  557. }
  558. }
  559. }
  560. if err := scanner.Err(); err != nil {
  561. if strings.Contains(err.Error(), "unexpected EOF") {
  562. s.Close()
  563. msg := ""
  564. if s.status != nil && s.status.LastErrMsg != "" {
  565. msg = s.status.LastErrMsg
  566. }
  567. return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
  568. }
  569. return fmt.Errorf("error reading llm response: %v", err)
  570. }
  571. if !retryNeeded {
  572. return nil // success
  573. }
  574. }
  575. // should never reach here ideally
  576. return fmt.Errorf("max retries exceeded")
  577. }
  578. type EmbeddingRequest struct {
  579. Content string `json:"content"`
  580. }
  581. type EmbeddingResponse struct {
  582. Embedding []float64 `json:"embedding"`
  583. }
  584. func (s *LlamaServer) Embedding(ctx context.Context, prompt string) ([]float64, error) {
  585. // Make sure the server is ready
  586. status, err := s.getServerStatus(ctx)
  587. if err != nil {
  588. return nil, err
  589. } else if status != ServerStatusReady {
  590. return nil, fmt.Errorf("unexpected server status: %d", status)
  591. }
  592. data, err := json.Marshal(TokenizeRequest{Content: prompt})
  593. if err != nil {
  594. return nil, fmt.Errorf("error marshaling embed data: %w", err)
  595. }
  596. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
  597. if err != nil {
  598. return nil, fmt.Errorf("error creating embed request: %w", err)
  599. }
  600. req.Header.Set("Content-Type", "application/json")
  601. resp, err := http.DefaultClient.Do(req)
  602. if err != nil {
  603. return nil, fmt.Errorf("do embedding request: %w", err)
  604. }
  605. defer resp.Body.Close()
  606. body, err := io.ReadAll(resp.Body)
  607. if err != nil {
  608. return nil, fmt.Errorf("error reading embed response: %w", err)
  609. }
  610. if resp.StatusCode >= 400 {
  611. log.Printf("llm encode error: %s", body)
  612. return nil, fmt.Errorf("%s", body)
  613. }
  614. var embedding EmbeddingResponse
  615. if err := json.Unmarshal(body, &embedding); err != nil {
  616. return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
  617. }
  618. return embedding.Embedding, nil
  619. }
  620. type TokenizeRequest struct {
  621. Content string `json:"content"`
  622. }
  623. type TokenizeResponse struct {
  624. Tokens []int `json:"tokens"`
  625. }
  626. func (s *LlamaServer) Tokenize(ctx context.Context, content string) ([]int, error) {
  627. // Make sure the server is ready
  628. status, err := s.getServerStatus(ctx)
  629. if err != nil {
  630. return nil, err
  631. } else if status != ServerStatusReady {
  632. return nil, fmt.Errorf("unexpected server status: %d", status)
  633. }
  634. data, err := json.Marshal(TokenizeRequest{Content: content})
  635. if err != nil {
  636. return nil, fmt.Errorf("marshaling encode data: %w", err)
  637. }
  638. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
  639. if err != nil {
  640. return nil, fmt.Errorf("encode request: %w", err)
  641. }
  642. req.Header.Set("Content-Type", "application/json")
  643. resp, err := http.DefaultClient.Do(req)
  644. if err != nil {
  645. return nil, fmt.Errorf("do encode request: %w", err)
  646. }
  647. defer resp.Body.Close()
  648. body, err := io.ReadAll(resp.Body)
  649. if err != nil {
  650. return nil, fmt.Errorf("read encode request: %w", err)
  651. }
  652. if resp.StatusCode >= 400 {
  653. log.Printf("llm encode error: %s", body)
  654. return nil, fmt.Errorf("%s", body)
  655. }
  656. var encoded TokenizeResponse
  657. if err := json.Unmarshal(body, &encoded); err != nil {
  658. return nil, fmt.Errorf("unmarshal encode response: %w", err)
  659. }
  660. return encoded.Tokens, nil
  661. }
  662. type DetokenizeRequest struct {
  663. Tokens []int `json:"tokens"`
  664. }
  665. type DetokenizeResponse struct {
  666. Content string `json:"content"`
  667. }
  668. func (s *LlamaServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
  669. // Make sure the server is ready
  670. status, err := s.getServerStatus(ctx)
  671. if err != nil {
  672. return "", err
  673. } else if status != ServerStatusReady {
  674. return "", fmt.Errorf("unexpected server status: %d", status)
  675. }
  676. data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
  677. if err != nil {
  678. return "", fmt.Errorf("marshaling decode data: %w", err)
  679. }
  680. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
  681. if err != nil {
  682. return "", fmt.Errorf("decode request: %w", err)
  683. }
  684. req.Header.Set("Content-Type", "application/json")
  685. resp, err := http.DefaultClient.Do(req)
  686. if err != nil {
  687. return "", fmt.Errorf("do decode request: %w", err)
  688. }
  689. defer resp.Body.Close()
  690. body, err := io.ReadAll(resp.Body)
  691. if err != nil {
  692. return "", fmt.Errorf("read decode request: %w", err)
  693. }
  694. if resp.StatusCode >= 400 {
  695. log.Printf("llm decode error: %s", body)
  696. return "", fmt.Errorf("%s", body)
  697. }
  698. var decoded DetokenizeResponse
  699. if err := json.Unmarshal(body, &decoded); err != nil {
  700. return "", fmt.Errorf("unmarshal encode response: %w", err)
  701. }
  702. return decoded.Content, nil
  703. }
  704. func (s *LlamaServer) Close() error {
  705. if s.cmd != nil {
  706. slog.Debug("stopping llama server")
  707. return s.cmd.Process.Kill()
  708. }
  709. return nil
  710. }
  711. func parseDurationMs(ms float64) time.Duration {
  712. dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
  713. if err != nil {
  714. panic(err)
  715. }
  716. return dur
  717. }