server.go 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850
  1. package llm
  2. import (
  3. "bufio"
  4. "bytes"
  5. "context"
  6. "encoding/json"
  7. "errors"
  8. "fmt"
  9. "io"
  10. "log"
  11. "log/slog"
  12. "math/rand"
  13. "net"
  14. "net/http"
  15. "os"
  16. "os/exec"
  17. "path/filepath"
  18. "runtime"
  19. "slices"
  20. "strconv"
  21. "strings"
  22. "time"
  23. "github.com/ollama/ollama/api"
  24. "github.com/ollama/ollama/format"
  25. "github.com/ollama/ollama/gpu"
  26. )
  27. // LlamaServer is an instance of the llama.cpp server
  28. type LlamaServer struct {
  29. port int
  30. cmd *exec.Cmd
  31. done chan error // Channel to signal when the process exits
  32. status *StatusWriter
  33. options api.Options
  34. }
  35. var cpuOnlyFamilies = []string{
  36. "mamba",
  37. }
  38. func NewLlamaServer(model string, adapters, projectors []string, opts api.Options) (*LlamaServer, error) {
  39. f, err := os.Open(model)
  40. if err != nil {
  41. return nil, err
  42. }
  43. defer f.Close()
  44. ggml, _, err := DecodeGGML(f)
  45. if err != nil {
  46. return nil, err
  47. }
  48. if opts.NumCtx > int(ggml.KV().ContextLength()) {
  49. slog.Warn("requested context length is greater than model max context length", "requested", opts.NumCtx, "model", ggml.KV().ContextLength())
  50. opts.NumCtx = int(ggml.KV().ContextLength())
  51. }
  52. if opts.NumCtx < 4 {
  53. opts.NumCtx = 4
  54. }
  55. memoryAvailable, _ := gpu.CheckVRAM()
  56. info := gpu.GetGPUInfo()
  57. memoryMinimum := info.MinimumMemory
  58. for _, projector := range projectors {
  59. memoryMinimum += projectorMemoryRequirements(projector)
  60. // multimodal models require at least 2048 context
  61. opts.NumCtx = max(opts.NumCtx, 2048)
  62. }
  63. // fp16 k,v = (1 (k) + 1 (v)) * sizeof(float16) * n_ctx * n_layer * n_embd / n_head * n_head_kv
  64. var kv uint64 = 2 * 2 * uint64(opts.NumCtx) * ggml.KV().BlockCount() * ggml.KV().EmbeddingLength() / ggml.KV().HeadCount() * ggml.KV().HeadCountKV()
  65. graphPartialOffload, graphFullOffload := ggml.GraphSize(uint64(opts.NumCtx), uint64(min(opts.NumCtx, opts.NumBatch)))
  66. if graphPartialOffload == 0 {
  67. graphPartialOffload = ggml.KV().GQA() * kv / 6
  68. }
  69. if graphFullOffload == 0 {
  70. graphFullOffload = graphPartialOffload
  71. }
  72. // memoryRequiredTotal represents the memory required for full GPU offloading (all layers)
  73. memoryRequiredTotal := memoryMinimum + graphFullOffload
  74. // memoryRequiredPartial represents the memory required for partial GPU offloading (n > 0, n < layers)
  75. memoryRequiredPartial := memoryMinimum + graphPartialOffload
  76. if info.Library != "metal" {
  77. if memoryRequiredPartial > memoryAvailable || slices.Contains(cpuOnlyFamilies, ggml.KV().Architecture()) {
  78. info.Library = "cpu"
  79. }
  80. }
  81. var layerCount int
  82. layers := ggml.Tensors().Layers()
  83. for i := 0; i < int(ggml.KV().BlockCount()); i++ {
  84. memoryLayer := layers[fmt.Sprintf("%d", i)].size()
  85. // KV is proportional to the number of layers
  86. memoryLayer += kv / ggml.KV().BlockCount()
  87. memoryRequiredTotal += memoryLayer
  88. if memoryAvailable > memoryRequiredPartial+memoryLayer {
  89. memoryRequiredPartial += memoryLayer
  90. layerCount++
  91. }
  92. }
  93. memoryLayerOutput := layers["output"].size()
  94. memoryRequiredTotal += memoryLayerOutput
  95. if memoryAvailable > memoryRequiredTotal {
  96. layerCount = int(ggml.KV().BlockCount()) + 1
  97. memoryRequiredPartial = memoryRequiredTotal
  98. }
  99. if opts.NumGPU < 0 {
  100. opts.NumGPU = layerCount
  101. }
  102. slog.Info(
  103. "offload to gpu",
  104. "reallayers", opts.NumGPU,
  105. "layers", layerCount,
  106. "required", format.HumanBytes2(memoryRequiredTotal),
  107. "used", format.HumanBytes2(memoryRequiredPartial),
  108. "available", format.HumanBytes2(memoryAvailable),
  109. "kv", format.HumanBytes2(kv),
  110. "fulloffload", format.HumanBytes2(graphFullOffload),
  111. "partialoffload", format.HumanBytes2(graphPartialOffload),
  112. )
  113. if len(adapters) > 1 {
  114. return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
  115. }
  116. availableServers := availableServers()
  117. servers := serversForGpu(info)
  118. demandLib := os.Getenv("OLLAMA_LLM_LIBRARY")
  119. if demandLib != "" {
  120. serverPath := availableServers[demandLib]
  121. if serverPath == "" {
  122. slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
  123. } else {
  124. slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
  125. servers = []string{demandLib}
  126. }
  127. }
  128. if len(servers) == 0 {
  129. return nil, fmt.Errorf("no servers found for %v", info)
  130. }
  131. params := []string{
  132. "--model", model,
  133. "--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
  134. "--batch-size", fmt.Sprintf("%d", opts.NumBatch),
  135. "--embedding",
  136. }
  137. if debug := os.Getenv("OLLAMA_DEBUG"); debug != "" {
  138. params = append(params, "--log-format", "json")
  139. } else {
  140. params = append(params, "--log-disable")
  141. }
  142. if opts.NumGPU >= 0 {
  143. params = append(params, "--n-gpu-layers", fmt.Sprintf("%d", opts.NumGPU))
  144. }
  145. if debug := os.Getenv("OLLAMA_DEBUG"); debug != "" {
  146. params = append(params, "--verbose")
  147. }
  148. if opts.MainGPU > 0 {
  149. params = append(params, "--main-gpu", fmt.Sprintf("%d", opts.MainGPU))
  150. }
  151. if len(adapters) > 0 {
  152. // TODO: applying multiple adapters is not supported by the llama.cpp server yet
  153. params = append(params, "--lora", adapters[0])
  154. }
  155. if len(projectors) > 0 {
  156. // TODO: applying multiple projectors is not supported by the llama.cpp server yet
  157. params = append(params, "--mmproj", projectors[0])
  158. }
  159. if opts.NumThread > 0 {
  160. params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
  161. }
  162. if !opts.F16KV {
  163. params = append(params, "--memory-f32")
  164. }
  165. if opts.UseMLock {
  166. params = append(params, "--mlock")
  167. }
  168. if !opts.UseMMap {
  169. params = append(params, "--no-mmap")
  170. }
  171. if opts.UseNUMA {
  172. params = append(params, "--numa")
  173. }
  174. // Loop through potential servers
  175. var finalErr error
  176. for i := 0; i < len(servers); i++ {
  177. dir := availableServers[servers[i]]
  178. // Find an availableServers port, retry on each iterration in case the failure was a port conflict race
  179. port := 0
  180. if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
  181. var l *net.TCPListener
  182. if l, err = net.ListenTCP("tcp", a); err == nil {
  183. port = l.Addr().(*net.TCPAddr).Port
  184. l.Close()
  185. }
  186. }
  187. if port == 0 {
  188. slog.Debug("ResolveTCPAddr failed ", "error", err)
  189. port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
  190. }
  191. finalParams := append(params, "--port", strconv.Itoa(port))
  192. pathEnv := "LD_LIBRARY_PATH"
  193. if runtime.GOOS == "windows" {
  194. pathEnv = "PATH"
  195. }
  196. // append the server directory to LD_LIBRARY_PATH/PATH
  197. libraryPaths := []string{dir}
  198. if libraryPath, ok := os.LookupEnv(pathEnv); ok {
  199. // Append our runner directory to the path
  200. // This will favor system libraries over our bundled library dependencies
  201. libraryPaths = append(filepath.SplitList(libraryPath), libraryPaths...)
  202. }
  203. server := filepath.Join(dir, "ollama_llama_server")
  204. if runtime.GOOS == "windows" {
  205. server = server + ".exe"
  206. }
  207. s := &LlamaServer{
  208. port: port,
  209. cmd: exec.Command(server, finalParams...),
  210. status: NewStatusWriter(os.Stderr),
  211. options: opts,
  212. }
  213. libEnv := fmt.Sprintf("%s=%s", pathEnv, strings.Join(libraryPaths, string(filepath.ListSeparator)))
  214. slog.Debug(libEnv)
  215. s.cmd.Env = append(os.Environ(), libEnv)
  216. s.cmd.Stdout = os.Stdout
  217. s.cmd.Stderr = s.status
  218. slog.Info("starting llama server", "cmd", s.cmd.String())
  219. if err = s.cmd.Start(); err != nil {
  220. msg := ""
  221. if s.status != nil && s.status.LastErrMsg != "" {
  222. msg = s.status.LastErrMsg
  223. }
  224. err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
  225. finalErr = err
  226. continue
  227. }
  228. // reap subprocess when it exits
  229. go func() {
  230. // Exit status managed via getServerStatus
  231. _ = s.cmd.Wait()
  232. }()
  233. if err = s.waitUntilRunning(); err != nil {
  234. slog.Error("error starting llama server", "server", servers[i], "error", err)
  235. s.Close()
  236. finalErr = err
  237. continue
  238. }
  239. return s, nil
  240. }
  241. slog.Error("unable to load any llama server", "error", finalErr)
  242. return nil, finalErr
  243. }
  244. func projectorMemoryRequirements(filename string) uint64 {
  245. file, err := os.Open(filename)
  246. if err != nil {
  247. return 0
  248. }
  249. defer file.Close()
  250. ggml, _, err := DecodeGGML(file)
  251. if err != nil {
  252. return 0
  253. }
  254. var mem uint64
  255. for _, layer := range ggml.Tensors().Layers() {
  256. mem += layer.size()
  257. }
  258. return mem
  259. }
  260. type ServerStatus int
  261. const ( // iota is reset to 0
  262. ServerStatusReady ServerStatus = iota
  263. ServerStatusNoSlotsAvaialble
  264. ServerStatusLoadingModel
  265. ServerStatusNotResponding
  266. ServerStatusError
  267. )
  268. type ServerStatusResp struct {
  269. Status string `json:"status"`
  270. SlotsIdle int `json:"slots_idle"`
  271. SlotsProcessing int `json:"slots_processing"`
  272. Error string `json:"error"`
  273. }
  274. func (s *LlamaServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
  275. // Fail fast if its exited
  276. if s.cmd.ProcessState != nil {
  277. msg := ""
  278. if s.status != nil && s.status.LastErrMsg != "" {
  279. msg = s.status.LastErrMsg
  280. }
  281. return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  282. }
  283. req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
  284. if err != nil {
  285. return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
  286. }
  287. req.Header.Set("Content-Type", "application/json")
  288. resp, err := http.DefaultClient.Do(req)
  289. if err != nil {
  290. if errors.Is(err, context.DeadlineExceeded) {
  291. return ServerStatusNotResponding, fmt.Errorf("server not responding")
  292. }
  293. return ServerStatusError, fmt.Errorf("health resp: %w", err)
  294. }
  295. defer resp.Body.Close()
  296. body, err := io.ReadAll(resp.Body)
  297. if err != nil {
  298. return ServerStatusError, fmt.Errorf("read health request: %w", err)
  299. }
  300. var status ServerStatusResp
  301. if err := json.Unmarshal(body, &status); err != nil {
  302. return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
  303. }
  304. switch status.Status {
  305. case "ok":
  306. return ServerStatusReady, nil
  307. case "no slot available":
  308. return ServerStatusNoSlotsAvaialble, nil
  309. case "loading model":
  310. return ServerStatusLoadingModel, nil
  311. default:
  312. return ServerStatusError, fmt.Errorf("server error: %+v", status)
  313. }
  314. }
  315. func (s *LlamaServer) Ping(ctx context.Context) error {
  316. _, err := s.getServerStatus(ctx)
  317. if err != nil {
  318. slog.Debug("server unhealthy", "error", err)
  319. return err
  320. }
  321. return nil
  322. }
  323. func (s *LlamaServer) waitUntilRunning() error {
  324. start := time.Now()
  325. // TODO we need to wire up a better way to detect hangs during model load and startup of the server
  326. expiresAt := time.Now().Add(10 * time.Minute) // be generous with timeout, large models can take a while to load
  327. ticker := time.NewTicker(50 * time.Millisecond)
  328. defer ticker.Stop()
  329. slog.Info("waiting for llama runner to start responding")
  330. var lastStatus ServerStatus = -1
  331. for {
  332. select {
  333. case err := <-s.done:
  334. msg := ""
  335. if s.status != nil && s.status.LastErrMsg != "" {
  336. msg = s.status.LastErrMsg
  337. }
  338. return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
  339. case <-ticker.C:
  340. if time.Now().After(expiresAt) {
  341. // timeout
  342. msg := ""
  343. if s.status != nil && s.status.LastErrMsg != "" {
  344. msg = s.status.LastErrMsg
  345. }
  346. return fmt.Errorf("timed out waiting for llama runner to start: %s", msg)
  347. }
  348. if s.cmd.ProcessState != nil {
  349. msg := ""
  350. if s.status != nil && s.status.LastErrMsg != "" {
  351. msg = s.status.LastErrMsg
  352. }
  353. return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  354. }
  355. ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
  356. defer cancel()
  357. status, err := s.getServerStatus(ctx)
  358. if err != nil && lastStatus != status {
  359. slog.Debug("server not yet available", "error", err)
  360. lastStatus = status
  361. continue
  362. }
  363. switch status {
  364. case ServerStatusLoadingModel:
  365. // TODO - this state never seems to happen with the current server.cpp code (bug?)
  366. // it doesn't respond to the health endpoint until after the model is loaded
  367. slog.Debug("loading model")
  368. case ServerStatusReady:
  369. slog.Debug(fmt.Sprintf("llama runner started in %f seconds", time.Since(start).Seconds()))
  370. return nil
  371. }
  372. }
  373. }
  374. }
  375. const jsonGrammar = `
  376. root ::= object
  377. value ::= object | array | string | number | ("true" | "false" | "null") ws
  378. object ::=
  379. "{" ws (
  380. string ":" ws value
  381. ("," ws string ":" ws value)*
  382. )? "}" ws
  383. array ::=
  384. "[" ws (
  385. value
  386. ("," ws value)*
  387. )? "]" ws
  388. string ::=
  389. "\"" (
  390. [^"\\] |
  391. "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  392. )* "\"" ws
  393. number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
  394. # Optional space: by convention, applied in this grammar after literal chars when allowed
  395. ws ::= ([ \t\n] ws)?
  396. `
  397. const maxBufferSize = 512 * format.KiloByte
  398. const maxRetries = 3
  399. type ImageData struct {
  400. Data []byte `json:"data"`
  401. ID int `json:"id"`
  402. }
  403. type completion struct {
  404. Content string `json:"content"`
  405. Model string `json:"model"`
  406. Prompt string `json:"prompt"`
  407. Stop bool `json:"stop"`
  408. Timings struct {
  409. PredictedN int `json:"predicted_n"`
  410. PredictedMS float64 `json:"predicted_ms"`
  411. PromptN int `json:"prompt_n"`
  412. PromptMS float64 `json:"prompt_ms"`
  413. }
  414. }
  415. type CompletionRequest struct {
  416. Prompt string
  417. Format string
  418. Images []ImageData
  419. Options api.Options
  420. }
  421. type CompletionResponse struct {
  422. Content string
  423. Done bool
  424. PromptEvalCount int
  425. PromptEvalDuration time.Duration
  426. EvalCount int
  427. EvalDuration time.Duration
  428. }
  429. func (s *LlamaServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
  430. request := map[string]any{
  431. "prompt": req.Prompt,
  432. "stream": true,
  433. "n_predict": req.Options.NumPredict,
  434. "n_keep": req.Options.NumKeep,
  435. "main_gpu": req.Options.MainGPU,
  436. "temperature": req.Options.Temperature,
  437. "top_k": req.Options.TopK,
  438. "top_p": req.Options.TopP,
  439. "tfs_z": req.Options.TFSZ,
  440. "typical_p": req.Options.TypicalP,
  441. "repeat_last_n": req.Options.RepeatLastN,
  442. "repeat_penalty": req.Options.RepeatPenalty,
  443. "presence_penalty": req.Options.PresencePenalty,
  444. "frequency_penalty": req.Options.FrequencyPenalty,
  445. "mirostat": req.Options.Mirostat,
  446. "mirostat_tau": req.Options.MirostatTau,
  447. "mirostat_eta": req.Options.MirostatEta,
  448. "penalize_nl": req.Options.PenalizeNewline,
  449. "seed": req.Options.Seed,
  450. "stop": req.Options.Stop,
  451. "image_data": req.Images,
  452. "cache_prompt": true,
  453. }
  454. // Make sure the server is ready
  455. status, err := s.getServerStatus(ctx)
  456. if err != nil {
  457. return err
  458. } else if status != ServerStatusReady {
  459. return fmt.Errorf("unexpected server status: %d", status)
  460. }
  461. if req.Format == "json" {
  462. request["grammar"] = jsonGrammar
  463. if !strings.Contains(strings.ToLower(req.Prompt), "json") {
  464. slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
  465. }
  466. }
  467. retryDelay := 100 * time.Microsecond
  468. for retries := 0; retries < maxRetries; retries++ {
  469. if retries > 0 {
  470. time.Sleep(retryDelay) // wait before retrying
  471. retryDelay *= 2 // exponential backoff
  472. }
  473. // Handling JSON marshaling with special characters unescaped.
  474. buffer := &bytes.Buffer{}
  475. enc := json.NewEncoder(buffer)
  476. enc.SetEscapeHTML(false)
  477. if err := enc.Encode(request); err != nil {
  478. return fmt.Errorf("failed to marshal data: %v", err)
  479. }
  480. endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
  481. req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
  482. if err != nil {
  483. return fmt.Errorf("error creating POST request: %v", err)
  484. }
  485. req.Header.Set("Content-Type", "application/json")
  486. resp, err := http.DefaultClient.Do(req)
  487. if err != nil {
  488. return fmt.Errorf("POST predict: %v", err)
  489. }
  490. defer resp.Body.Close()
  491. if resp.StatusCode >= 400 {
  492. bodyBytes, err := io.ReadAll(resp.Body)
  493. if err != nil {
  494. return fmt.Errorf("failed reading llm error response: %w", err)
  495. }
  496. log.Printf("llm predict error: %s", bodyBytes)
  497. return fmt.Errorf("%s", bodyBytes)
  498. }
  499. scanner := bufio.NewScanner(resp.Body)
  500. buf := make([]byte, 0, maxBufferSize)
  501. scanner.Buffer(buf, maxBufferSize)
  502. retryNeeded := false
  503. // keep track of the last token generated, this is used to abort if the model starts looping
  504. var lastToken string
  505. var tokenRepeat int
  506. for scanner.Scan() {
  507. select {
  508. case <-ctx.Done():
  509. // This handles the request cancellation
  510. return ctx.Err()
  511. default:
  512. line := scanner.Bytes()
  513. if len(line) == 0 {
  514. continue
  515. }
  516. // try again on slot unavailable
  517. if bytes.Contains(line, []byte("slot unavailable")) {
  518. retryNeeded = true
  519. break
  520. }
  521. evt, ok := bytes.CutPrefix(line, []byte("data: "))
  522. if !ok {
  523. return fmt.Errorf("error parsing llm response stream: %s", line)
  524. }
  525. var c completion
  526. if err := json.Unmarshal(evt, &c); err != nil {
  527. return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
  528. }
  529. switch {
  530. case strings.TrimSpace(c.Content) == lastToken:
  531. tokenRepeat++
  532. default:
  533. lastToken = strings.TrimSpace(c.Content)
  534. tokenRepeat = 0
  535. }
  536. // 30 picked as an arbitrary max token repeat limit, modify as needed
  537. if tokenRepeat > 30 {
  538. slog.Debug("prediction aborted, token repeat limit reached")
  539. return ctx.Err()
  540. }
  541. if c.Content != "" {
  542. fn(CompletionResponse{
  543. Content: c.Content,
  544. })
  545. }
  546. if c.Stop {
  547. fn(CompletionResponse{
  548. Done: true,
  549. PromptEvalCount: c.Timings.PromptN,
  550. PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
  551. EvalCount: c.Timings.PredictedN,
  552. EvalDuration: parseDurationMs(c.Timings.PredictedMS),
  553. })
  554. return nil
  555. }
  556. }
  557. }
  558. if err := scanner.Err(); err != nil {
  559. if strings.Contains(err.Error(), "unexpected EOF") {
  560. s.Close()
  561. msg := ""
  562. if s.status != nil && s.status.LastErrMsg != "" {
  563. msg = s.status.LastErrMsg
  564. }
  565. return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
  566. }
  567. return fmt.Errorf("error reading llm response: %v", err)
  568. }
  569. if !retryNeeded {
  570. return nil // success
  571. }
  572. }
  573. // should never reach here ideally
  574. return fmt.Errorf("max retries exceeded")
  575. }
  576. type EmbeddingRequest struct {
  577. Content string `json:"content"`
  578. }
  579. type EmbeddingResponse struct {
  580. Embedding []float64 `json:"embedding"`
  581. }
  582. func (s *LlamaServer) Embedding(ctx context.Context, prompt string) ([]float64, error) {
  583. // Make sure the server is ready
  584. status, err := s.getServerStatus(ctx)
  585. if err != nil {
  586. return nil, err
  587. } else if status != ServerStatusReady {
  588. return nil, fmt.Errorf("unexpected server status: %d", status)
  589. }
  590. data, err := json.Marshal(TokenizeRequest{Content: prompt})
  591. if err != nil {
  592. return nil, fmt.Errorf("error marshaling embed data: %w", err)
  593. }
  594. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
  595. if err != nil {
  596. return nil, fmt.Errorf("error creating embed request: %w", err)
  597. }
  598. req.Header.Set("Content-Type", "application/json")
  599. resp, err := http.DefaultClient.Do(req)
  600. if err != nil {
  601. return nil, fmt.Errorf("do embedding request: %w", err)
  602. }
  603. defer resp.Body.Close()
  604. body, err := io.ReadAll(resp.Body)
  605. if err != nil {
  606. return nil, fmt.Errorf("error reading embed response: %w", err)
  607. }
  608. if resp.StatusCode >= 400 {
  609. log.Printf("llm encode error: %s", body)
  610. return nil, fmt.Errorf("%s", body)
  611. }
  612. var embedding EmbeddingResponse
  613. if err := json.Unmarshal(body, &embedding); err != nil {
  614. return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
  615. }
  616. return embedding.Embedding, nil
  617. }
  618. type TokenizeRequest struct {
  619. Content string `json:"content"`
  620. }
  621. type TokenizeResponse struct {
  622. Tokens []int `json:"tokens"`
  623. }
  624. func (s *LlamaServer) Tokenize(ctx context.Context, content string) ([]int, error) {
  625. // Make sure the server is ready
  626. status, err := s.getServerStatus(ctx)
  627. if err != nil {
  628. return nil, err
  629. } else if status != ServerStatusReady {
  630. return nil, fmt.Errorf("unexpected server status: %d", status)
  631. }
  632. data, err := json.Marshal(TokenizeRequest{Content: content})
  633. if err != nil {
  634. return nil, fmt.Errorf("marshaling encode data: %w", err)
  635. }
  636. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
  637. if err != nil {
  638. return nil, fmt.Errorf("encode request: %w", err)
  639. }
  640. req.Header.Set("Content-Type", "application/json")
  641. resp, err := http.DefaultClient.Do(req)
  642. if err != nil {
  643. return nil, fmt.Errorf("do encode request: %w", err)
  644. }
  645. defer resp.Body.Close()
  646. body, err := io.ReadAll(resp.Body)
  647. if err != nil {
  648. return nil, fmt.Errorf("read encode request: %w", err)
  649. }
  650. if resp.StatusCode >= 400 {
  651. log.Printf("llm encode error: %s", body)
  652. return nil, fmt.Errorf("%s", body)
  653. }
  654. var encoded TokenizeResponse
  655. if err := json.Unmarshal(body, &encoded); err != nil {
  656. return nil, fmt.Errorf("unmarshal encode response: %w", err)
  657. }
  658. return encoded.Tokens, nil
  659. }
  660. type DetokenizeRequest struct {
  661. Tokens []int `json:"tokens"`
  662. }
  663. type DetokenizeResponse struct {
  664. Content string `json:"content"`
  665. }
  666. func (s *LlamaServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
  667. // Make sure the server is ready
  668. status, err := s.getServerStatus(ctx)
  669. if err != nil {
  670. return "", err
  671. } else if status != ServerStatusReady {
  672. return "", fmt.Errorf("unexpected server status: %d", status)
  673. }
  674. data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
  675. if err != nil {
  676. return "", fmt.Errorf("marshaling decode data: %w", err)
  677. }
  678. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
  679. if err != nil {
  680. return "", fmt.Errorf("decode request: %w", err)
  681. }
  682. req.Header.Set("Content-Type", "application/json")
  683. resp, err := http.DefaultClient.Do(req)
  684. if err != nil {
  685. return "", fmt.Errorf("do decode request: %w", err)
  686. }
  687. defer resp.Body.Close()
  688. body, err := io.ReadAll(resp.Body)
  689. if err != nil {
  690. return "", fmt.Errorf("read decode request: %w", err)
  691. }
  692. if resp.StatusCode >= 400 {
  693. log.Printf("llm decode error: %s", body)
  694. return "", fmt.Errorf("%s", body)
  695. }
  696. var decoded DetokenizeResponse
  697. if err := json.Unmarshal(body, &decoded); err != nil {
  698. return "", fmt.Errorf("unmarshal encode response: %w", err)
  699. }
  700. return decoded.Content, nil
  701. }
  702. func (s *LlamaServer) Close() error {
  703. if s.cmd != nil {
  704. slog.Debug("stopping llama server")
  705. return s.cmd.Process.Kill()
  706. }
  707. return nil
  708. }
  709. func parseDurationMs(ms float64) time.Duration {
  710. dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
  711. if err != nil {
  712. panic(err)
  713. }
  714. return dur
  715. }