server.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843
  1. package llm
  2. import (
  3. "bufio"
  4. "bytes"
  5. "context"
  6. "encoding/json"
  7. "errors"
  8. "fmt"
  9. "io"
  10. "log"
  11. "log/slog"
  12. "math/rand"
  13. "net"
  14. "net/http"
  15. "os"
  16. "os/exec"
  17. "path/filepath"
  18. "runtime"
  19. "strconv"
  20. "strings"
  21. "time"
  22. "github.com/ollama/ollama/api"
  23. "github.com/ollama/ollama/format"
  24. "github.com/ollama/ollama/gpu"
  25. )
  26. // LlamaServer is an instance of the llama.cpp server
  27. type LlamaServer struct {
  28. port int
  29. cmd *exec.Cmd
  30. done chan error // Channel to signal when the process exits
  31. status *StatusWriter
  32. options api.Options
  33. }
  34. func NewLlamaServer(model string, adapters, projectors []string, opts api.Options) (*LlamaServer, error) {
  35. f, err := os.Open(model)
  36. if err != nil {
  37. return nil, err
  38. }
  39. defer f.Close()
  40. ggml, _, err := DecodeGGML(f)
  41. if err != nil {
  42. return nil, err
  43. }
  44. if opts.NumCtx > int(ggml.KV().ContextLength()) {
  45. slog.Warn("requested context length is greater than model max context length", "requested", opts.NumCtx, "model", ggml.KV().ContextLength())
  46. opts.NumCtx = int(ggml.KV().ContextLength())
  47. }
  48. if opts.NumCtx < 4 {
  49. opts.NumCtx = 4
  50. }
  51. memoryAvailable, _ := gpu.CheckVRAM()
  52. info := gpu.GetGPUInfo()
  53. memoryMinimum := info.MinimumMemory
  54. for _, projector := range projectors {
  55. memoryMinimum += projectorMemoryRequirements(projector)
  56. // multimodal models require at least 2048 context
  57. opts.NumCtx = max(opts.NumCtx, 2048)
  58. }
  59. // fp16 k,v = (1 (k) + 1 (v)) * sizeof(float16) * n_ctx * n_layer * n_embd / n_head * n_head_kv
  60. var kv uint64 = 2 * 2 * uint64(opts.NumCtx) * ggml.KV().BlockCount() * ggml.KV().EmbeddingLength() / ggml.KV().HeadCount() * ggml.KV().HeadCountKV()
  61. graphPartialOffload, graphFullOffload := ggml.GraphSize(uint64(opts.NumCtx), uint64(min(opts.NumCtx, opts.NumBatch)))
  62. if graphPartialOffload == 0 {
  63. graphPartialOffload = ggml.KV().GQA() * kv / 6
  64. }
  65. if graphFullOffload == 0 {
  66. graphFullOffload = graphPartialOffload
  67. }
  68. // memoryRequiredTotal represents the memory required for full GPU offloading (all layers)
  69. memoryRequiredTotal := memoryMinimum + graphFullOffload
  70. // memoryRequiredPartial represents the memory required for partial GPU offloading (n > 0, n < layers)
  71. memoryRequiredPartial := memoryMinimum + graphPartialOffload
  72. if info.Library != "metal" {
  73. if memoryRequiredPartial > memoryAvailable {
  74. info.Library = "cpu"
  75. }
  76. }
  77. var layerCount int
  78. layers := ggml.Tensors().Layers()
  79. for i := 0; i < int(ggml.KV().BlockCount()); i++ {
  80. memoryLayer := layers[fmt.Sprintf("%d", i)].size()
  81. // KV is proportional to the number of layers
  82. memoryLayer += kv / ggml.KV().BlockCount()
  83. memoryRequiredTotal += memoryLayer
  84. if memoryAvailable > memoryRequiredPartial+memoryLayer {
  85. memoryRequiredPartial += memoryLayer
  86. layerCount++
  87. }
  88. }
  89. memoryLayerOutput := layers["output"].size()
  90. memoryRequiredTotal += memoryLayerOutput
  91. if info.Library == "metal" && memoryRequiredTotal > info.TotalMemory {
  92. // disable partial offloading when model is greater than total system memory
  93. opts.NumGPU = 0
  94. } else if memoryAvailable > memoryRequiredTotal {
  95. layerCount = int(ggml.KV().BlockCount()) + 1
  96. memoryRequiredPartial = memoryRequiredTotal
  97. }
  98. if opts.NumGPU < 0 {
  99. opts.NumGPU = layerCount
  100. }
  101. slog.Info(
  102. "offload to gpu",
  103. "reallayers", opts.NumGPU,
  104. "layers", layerCount,
  105. "required", format.HumanBytes2(memoryRequiredTotal),
  106. "used", format.HumanBytes2(memoryRequiredPartial),
  107. "available", format.HumanBytes2(memoryAvailable),
  108. "kv", format.HumanBytes2(kv),
  109. "fulloffload", format.HumanBytes2(graphFullOffload),
  110. "partialoffload", format.HumanBytes2(graphPartialOffload),
  111. )
  112. if len(adapters) > 1 {
  113. return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
  114. }
  115. availableServers := availableServers()
  116. servers := serversForGpu(info)
  117. demandLib := os.Getenv("OLLAMA_LLM_LIBRARY")
  118. if demandLib != "" {
  119. serverPath := availableServers[demandLib]
  120. if serverPath == "" {
  121. slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
  122. } else {
  123. slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
  124. servers = []string{demandLib}
  125. }
  126. }
  127. if len(servers) == 0 {
  128. return nil, fmt.Errorf("no servers found for %v", info)
  129. }
  130. params := []string{
  131. "--model", model,
  132. "--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
  133. "--batch-size", fmt.Sprintf("%d", opts.NumBatch),
  134. "--embedding",
  135. }
  136. if debug := os.Getenv("OLLAMA_DEBUG"); debug != "" {
  137. params = append(params, "--log-format", "json")
  138. } else {
  139. params = append(params, "--log-disable")
  140. }
  141. if opts.NumGPU >= 0 {
  142. params = append(params, "--n-gpu-layers", fmt.Sprintf("%d", opts.NumGPU))
  143. }
  144. if debug := os.Getenv("OLLAMA_DEBUG"); debug != "" {
  145. params = append(params, "--verbose")
  146. }
  147. if opts.MainGPU > 0 {
  148. params = append(params, "--main-gpu", fmt.Sprintf("%d", opts.MainGPU))
  149. }
  150. if len(adapters) > 0 {
  151. // TODO: applying multiple adapters is not supported by the llama.cpp server yet
  152. params = append(params, "--lora", adapters[0])
  153. }
  154. if len(projectors) > 0 {
  155. // TODO: applying multiple projectors is not supported by the llama.cpp server yet
  156. params = append(params, "--mmproj", projectors[0])
  157. }
  158. if opts.NumThread > 0 {
  159. params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
  160. }
  161. if !opts.F16KV {
  162. params = append(params, "--memory-f32")
  163. }
  164. if opts.UseMLock {
  165. params = append(params, "--mlock")
  166. }
  167. if !opts.UseMMap {
  168. params = append(params, "--no-mmap")
  169. }
  170. if opts.UseNUMA {
  171. params = append(params, "--numa")
  172. }
  173. // Loop through potential servers
  174. var finalErr error
  175. for i := 0; i < len(servers); i++ {
  176. dir := availableServers[servers[i]]
  177. // Find an availableServers port, retry on each iterration in case the failure was a port conflict race
  178. port := 0
  179. if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
  180. var l *net.TCPListener
  181. if l, err = net.ListenTCP("tcp", a); err == nil {
  182. port = l.Addr().(*net.TCPAddr).Port
  183. l.Close()
  184. }
  185. }
  186. if port == 0 {
  187. slog.Debug("ResolveTCPAddr failed ", "error", err)
  188. port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
  189. }
  190. finalParams := append(params, "--port", strconv.Itoa(port))
  191. pathEnv := "LD_LIBRARY_PATH"
  192. if runtime.GOOS == "windows" {
  193. pathEnv = "PATH"
  194. }
  195. // append the server directory to LD_LIBRARY_PATH/PATH
  196. libraryPaths := []string{dir}
  197. if libraryPath, ok := os.LookupEnv(pathEnv); ok {
  198. // Append our runner directory to the path
  199. // This will favor system libraries over our bundled library dependencies
  200. libraryPaths = append(filepath.SplitList(libraryPath), libraryPaths...)
  201. }
  202. server := filepath.Join(dir, "ollama_llama_server")
  203. if runtime.GOOS == "windows" {
  204. server = server + ".exe"
  205. }
  206. s := &LlamaServer{
  207. port: port,
  208. cmd: exec.Command(server, finalParams...),
  209. status: NewStatusWriter(os.Stderr),
  210. options: opts,
  211. }
  212. libEnv := fmt.Sprintf("%s=%s", pathEnv, strings.Join(libraryPaths, string(filepath.ListSeparator)))
  213. slog.Debug(libEnv)
  214. s.cmd.Env = append(os.Environ(), libEnv)
  215. s.cmd.Stdout = os.Stdout
  216. s.cmd.Stderr = s.status
  217. slog.Info("starting llama server", "cmd", s.cmd.String())
  218. if err = s.cmd.Start(); err != nil {
  219. msg := ""
  220. if s.status != nil && s.status.LastErrMsg != "" {
  221. msg = s.status.LastErrMsg
  222. }
  223. err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
  224. finalErr = err
  225. continue
  226. }
  227. // reap subprocess when it exits
  228. go func() {
  229. // Exit status managed via getServerStatus
  230. _ = s.cmd.Wait()
  231. }()
  232. return s, nil
  233. }
  234. slog.Error("unable to load any llama server", "error", finalErr)
  235. return nil, finalErr
  236. }
  237. func projectorMemoryRequirements(filename string) uint64 {
  238. file, err := os.Open(filename)
  239. if err != nil {
  240. return 0
  241. }
  242. defer file.Close()
  243. ggml, _, err := DecodeGGML(file)
  244. if err != nil {
  245. return 0
  246. }
  247. var mem uint64
  248. for _, layer := range ggml.Tensors().Layers() {
  249. mem += layer.size()
  250. }
  251. return mem
  252. }
  253. type ServerStatus int
  254. const ( // iota is reset to 0
  255. ServerStatusReady ServerStatus = iota
  256. ServerStatusNoSlotsAvaialble
  257. ServerStatusLoadingModel
  258. ServerStatusNotResponding
  259. ServerStatusError
  260. )
  261. type ServerStatusResp struct {
  262. Status string `json:"status"`
  263. SlotsIdle int `json:"slots_idle"`
  264. SlotsProcessing int `json:"slots_processing"`
  265. Error string `json:"error"`
  266. }
  267. func (s *LlamaServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
  268. // Fail fast if its exited
  269. if s.cmd.ProcessState != nil {
  270. msg := ""
  271. if s.status != nil && s.status.LastErrMsg != "" {
  272. msg = s.status.LastErrMsg
  273. }
  274. return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  275. }
  276. req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
  277. if err != nil {
  278. return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
  279. }
  280. req.Header.Set("Content-Type", "application/json")
  281. resp, err := http.DefaultClient.Do(req)
  282. if err != nil {
  283. if errors.Is(err, context.DeadlineExceeded) {
  284. return ServerStatusNotResponding, fmt.Errorf("server not responding")
  285. }
  286. return ServerStatusError, fmt.Errorf("health resp: %w", err)
  287. }
  288. defer resp.Body.Close()
  289. body, err := io.ReadAll(resp.Body)
  290. if err != nil {
  291. return ServerStatusError, fmt.Errorf("read health request: %w", err)
  292. }
  293. var status ServerStatusResp
  294. if err := json.Unmarshal(body, &status); err != nil {
  295. return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
  296. }
  297. switch status.Status {
  298. case "ok":
  299. return ServerStatusReady, nil
  300. case "no slot available":
  301. return ServerStatusNoSlotsAvaialble, nil
  302. case "loading model":
  303. return ServerStatusLoadingModel, nil
  304. default:
  305. return ServerStatusError, fmt.Errorf("server error: %+v", status)
  306. }
  307. }
  308. func (s *LlamaServer) Ping(ctx context.Context) error {
  309. _, err := s.getServerStatus(ctx)
  310. if err != nil {
  311. slog.Debug("server unhealthy", "error", err)
  312. return err
  313. }
  314. return nil
  315. }
  316. func (s *LlamaServer) WaitUntilRunning() error {
  317. start := time.Now()
  318. // TODO we need to wire up a better way to detect hangs during model load and startup of the server
  319. expiresAt := time.Now().Add(10 * time.Minute) // be generous with timeout, large models can take a while to load
  320. ticker := time.NewTicker(50 * time.Millisecond)
  321. defer ticker.Stop()
  322. slog.Info("waiting for llama runner to start responding")
  323. var lastStatus ServerStatus = -1
  324. for {
  325. select {
  326. case err := <-s.done:
  327. msg := ""
  328. if s.status != nil && s.status.LastErrMsg != "" {
  329. msg = s.status.LastErrMsg
  330. }
  331. return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
  332. case <-ticker.C:
  333. if time.Now().After(expiresAt) {
  334. // timeout
  335. msg := ""
  336. if s.status != nil && s.status.LastErrMsg != "" {
  337. msg = s.status.LastErrMsg
  338. }
  339. return fmt.Errorf("timed out waiting for llama runner to start: %s", msg)
  340. }
  341. if s.cmd.ProcessState != nil {
  342. msg := ""
  343. if s.status != nil && s.status.LastErrMsg != "" {
  344. msg = s.status.LastErrMsg
  345. }
  346. return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  347. }
  348. ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
  349. defer cancel()
  350. status, err := s.getServerStatus(ctx)
  351. if err != nil && lastStatus != status {
  352. slog.Debug("server not yet available", "error", err)
  353. lastStatus = status
  354. continue
  355. }
  356. switch status {
  357. case ServerStatusLoadingModel:
  358. // TODO - this state never seems to happen with the current server.cpp code (bug?)
  359. // it doesn't respond to the health endpoint until after the model is loaded
  360. slog.Debug("loading model")
  361. case ServerStatusReady:
  362. slog.Debug(fmt.Sprintf("llama runner started in %f seconds", time.Since(start).Seconds()))
  363. return nil
  364. }
  365. }
  366. }
  367. }
  368. const jsonGrammar = `
  369. root ::= object
  370. value ::= object | array | string | number | ("true" | "false" | "null") ws
  371. object ::=
  372. "{" ws (
  373. string ":" ws value
  374. ("," ws string ":" ws value)*
  375. )? "}" ws
  376. array ::=
  377. "[" ws (
  378. value
  379. ("," ws value)*
  380. )? "]" ws
  381. string ::=
  382. "\"" (
  383. [^"\\] |
  384. "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  385. )* "\"" ws
  386. number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
  387. # Optional space: by convention, applied in this grammar after literal chars when allowed
  388. ws ::= ([ \t\n] ws)?
  389. `
  390. const maxBufferSize = 512 * format.KiloByte
  391. const maxRetries = 3
  392. type ImageData struct {
  393. Data []byte `json:"data"`
  394. ID int `json:"id"`
  395. }
  396. type completion struct {
  397. Content string `json:"content"`
  398. Model string `json:"model"`
  399. Prompt string `json:"prompt"`
  400. Stop bool `json:"stop"`
  401. Timings struct {
  402. PredictedN int `json:"predicted_n"`
  403. PredictedMS float64 `json:"predicted_ms"`
  404. PromptN int `json:"prompt_n"`
  405. PromptMS float64 `json:"prompt_ms"`
  406. }
  407. }
  408. type CompletionRequest struct {
  409. Prompt string
  410. Format string
  411. Images []ImageData
  412. Options api.Options
  413. }
  414. type CompletionResponse struct {
  415. Content string
  416. Done bool
  417. PromptEvalCount int
  418. PromptEvalDuration time.Duration
  419. EvalCount int
  420. EvalDuration time.Duration
  421. }
  422. func (s *LlamaServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
  423. request := map[string]any{
  424. "prompt": req.Prompt,
  425. "stream": true,
  426. "n_predict": req.Options.NumPredict,
  427. "n_keep": req.Options.NumKeep,
  428. "main_gpu": req.Options.MainGPU,
  429. "temperature": req.Options.Temperature,
  430. "top_k": req.Options.TopK,
  431. "top_p": req.Options.TopP,
  432. "tfs_z": req.Options.TFSZ,
  433. "typical_p": req.Options.TypicalP,
  434. "repeat_last_n": req.Options.RepeatLastN,
  435. "repeat_penalty": req.Options.RepeatPenalty,
  436. "presence_penalty": req.Options.PresencePenalty,
  437. "frequency_penalty": req.Options.FrequencyPenalty,
  438. "mirostat": req.Options.Mirostat,
  439. "mirostat_tau": req.Options.MirostatTau,
  440. "mirostat_eta": req.Options.MirostatEta,
  441. "penalize_nl": req.Options.PenalizeNewline,
  442. "seed": req.Options.Seed,
  443. "stop": req.Options.Stop,
  444. "image_data": req.Images,
  445. "cache_prompt": true,
  446. }
  447. // Make sure the server is ready
  448. status, err := s.getServerStatus(ctx)
  449. if err != nil {
  450. return err
  451. } else if status != ServerStatusReady {
  452. return fmt.Errorf("unexpected server status: %d", status)
  453. }
  454. if req.Format == "json" {
  455. request["grammar"] = jsonGrammar
  456. if !strings.Contains(strings.ToLower(req.Prompt), "json") {
  457. slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
  458. }
  459. }
  460. retryDelay := 100 * time.Microsecond
  461. for retries := 0; retries < maxRetries; retries++ {
  462. if retries > 0 {
  463. time.Sleep(retryDelay) // wait before retrying
  464. retryDelay *= 2 // exponential backoff
  465. }
  466. // Handling JSON marshaling with special characters unescaped.
  467. buffer := &bytes.Buffer{}
  468. enc := json.NewEncoder(buffer)
  469. enc.SetEscapeHTML(false)
  470. if err := enc.Encode(request); err != nil {
  471. return fmt.Errorf("failed to marshal data: %v", err)
  472. }
  473. endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
  474. req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
  475. if err != nil {
  476. return fmt.Errorf("error creating POST request: %v", err)
  477. }
  478. req.Header.Set("Content-Type", "application/json")
  479. resp, err := http.DefaultClient.Do(req)
  480. if err != nil {
  481. return fmt.Errorf("POST predict: %v", err)
  482. }
  483. defer resp.Body.Close()
  484. if resp.StatusCode >= 400 {
  485. bodyBytes, err := io.ReadAll(resp.Body)
  486. if err != nil {
  487. return fmt.Errorf("failed reading llm error response: %w", err)
  488. }
  489. log.Printf("llm predict error: %s", bodyBytes)
  490. return fmt.Errorf("%s", bodyBytes)
  491. }
  492. scanner := bufio.NewScanner(resp.Body)
  493. buf := make([]byte, 0, maxBufferSize)
  494. scanner.Buffer(buf, maxBufferSize)
  495. retryNeeded := false
  496. // keep track of the last token generated, this is used to abort if the model starts looping
  497. var lastToken string
  498. var tokenRepeat int
  499. for scanner.Scan() {
  500. select {
  501. case <-ctx.Done():
  502. // This handles the request cancellation
  503. return ctx.Err()
  504. default:
  505. line := scanner.Bytes()
  506. if len(line) == 0 {
  507. continue
  508. }
  509. // try again on slot unavailable
  510. if bytes.Contains(line, []byte("slot unavailable")) {
  511. retryNeeded = true
  512. break
  513. }
  514. evt, ok := bytes.CutPrefix(line, []byte("data: "))
  515. if !ok {
  516. return fmt.Errorf("error parsing llm response stream: %s", line)
  517. }
  518. var c completion
  519. if err := json.Unmarshal(evt, &c); err != nil {
  520. return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
  521. }
  522. switch {
  523. case strings.TrimSpace(c.Content) == lastToken:
  524. tokenRepeat++
  525. default:
  526. lastToken = strings.TrimSpace(c.Content)
  527. tokenRepeat = 0
  528. }
  529. // 30 picked as an arbitrary max token repeat limit, modify as needed
  530. if tokenRepeat > 30 {
  531. slog.Debug("prediction aborted, token repeat limit reached")
  532. return ctx.Err()
  533. }
  534. if c.Content != "" {
  535. fn(CompletionResponse{
  536. Content: c.Content,
  537. })
  538. }
  539. if c.Stop {
  540. fn(CompletionResponse{
  541. Done: true,
  542. PromptEvalCount: c.Timings.PromptN,
  543. PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
  544. EvalCount: c.Timings.PredictedN,
  545. EvalDuration: parseDurationMs(c.Timings.PredictedMS),
  546. })
  547. return nil
  548. }
  549. }
  550. }
  551. if err := scanner.Err(); err != nil {
  552. if strings.Contains(err.Error(), "unexpected EOF") {
  553. s.Close()
  554. msg := ""
  555. if s.status != nil && s.status.LastErrMsg != "" {
  556. msg = s.status.LastErrMsg
  557. }
  558. return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
  559. }
  560. return fmt.Errorf("error reading llm response: %v", err)
  561. }
  562. if !retryNeeded {
  563. return nil // success
  564. }
  565. }
  566. // should never reach here ideally
  567. return fmt.Errorf("max retries exceeded")
  568. }
  569. type EmbeddingRequest struct {
  570. Content string `json:"content"`
  571. }
  572. type EmbeddingResponse struct {
  573. Embedding []float64 `json:"embedding"`
  574. }
  575. func (s *LlamaServer) Embedding(ctx context.Context, prompt string) ([]float64, error) {
  576. // Make sure the server is ready
  577. status, err := s.getServerStatus(ctx)
  578. if err != nil {
  579. return nil, err
  580. } else if status != ServerStatusReady {
  581. return nil, fmt.Errorf("unexpected server status: %d", status)
  582. }
  583. data, err := json.Marshal(TokenizeRequest{Content: prompt})
  584. if err != nil {
  585. return nil, fmt.Errorf("error marshaling embed data: %w", err)
  586. }
  587. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
  588. if err != nil {
  589. return nil, fmt.Errorf("error creating embed request: %w", err)
  590. }
  591. req.Header.Set("Content-Type", "application/json")
  592. resp, err := http.DefaultClient.Do(req)
  593. if err != nil {
  594. return nil, fmt.Errorf("do embedding request: %w", err)
  595. }
  596. defer resp.Body.Close()
  597. body, err := io.ReadAll(resp.Body)
  598. if err != nil {
  599. return nil, fmt.Errorf("error reading embed response: %w", err)
  600. }
  601. if resp.StatusCode >= 400 {
  602. log.Printf("llm encode error: %s", body)
  603. return nil, fmt.Errorf("%s", body)
  604. }
  605. var embedding EmbeddingResponse
  606. if err := json.Unmarshal(body, &embedding); err != nil {
  607. return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
  608. }
  609. return embedding.Embedding, nil
  610. }
  611. type TokenizeRequest struct {
  612. Content string `json:"content"`
  613. }
  614. type TokenizeResponse struct {
  615. Tokens []int `json:"tokens"`
  616. }
  617. func (s *LlamaServer) Tokenize(ctx context.Context, content string) ([]int, error) {
  618. // Make sure the server is ready
  619. status, err := s.getServerStatus(ctx)
  620. if err != nil {
  621. return nil, err
  622. } else if status != ServerStatusReady {
  623. return nil, fmt.Errorf("unexpected server status: %d", status)
  624. }
  625. data, err := json.Marshal(TokenizeRequest{Content: content})
  626. if err != nil {
  627. return nil, fmt.Errorf("marshaling encode data: %w", err)
  628. }
  629. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
  630. if err != nil {
  631. return nil, fmt.Errorf("encode request: %w", err)
  632. }
  633. req.Header.Set("Content-Type", "application/json")
  634. resp, err := http.DefaultClient.Do(req)
  635. if err != nil {
  636. return nil, fmt.Errorf("do encode request: %w", err)
  637. }
  638. defer resp.Body.Close()
  639. body, err := io.ReadAll(resp.Body)
  640. if err != nil {
  641. return nil, fmt.Errorf("read encode request: %w", err)
  642. }
  643. if resp.StatusCode >= 400 {
  644. log.Printf("llm encode error: %s", body)
  645. return nil, fmt.Errorf("%s", body)
  646. }
  647. var encoded TokenizeResponse
  648. if err := json.Unmarshal(body, &encoded); err != nil {
  649. return nil, fmt.Errorf("unmarshal encode response: %w", err)
  650. }
  651. return encoded.Tokens, nil
  652. }
  653. type DetokenizeRequest struct {
  654. Tokens []int `json:"tokens"`
  655. }
  656. type DetokenizeResponse struct {
  657. Content string `json:"content"`
  658. }
  659. func (s *LlamaServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
  660. // Make sure the server is ready
  661. status, err := s.getServerStatus(ctx)
  662. if err != nil {
  663. return "", err
  664. } else if status != ServerStatusReady {
  665. return "", fmt.Errorf("unexpected server status: %d", status)
  666. }
  667. data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
  668. if err != nil {
  669. return "", fmt.Errorf("marshaling decode data: %w", err)
  670. }
  671. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
  672. if err != nil {
  673. return "", fmt.Errorf("decode request: %w", err)
  674. }
  675. req.Header.Set("Content-Type", "application/json")
  676. resp, err := http.DefaultClient.Do(req)
  677. if err != nil {
  678. return "", fmt.Errorf("do decode request: %w", err)
  679. }
  680. defer resp.Body.Close()
  681. body, err := io.ReadAll(resp.Body)
  682. if err != nil {
  683. return "", fmt.Errorf("read decode request: %w", err)
  684. }
  685. if resp.StatusCode >= 400 {
  686. log.Printf("llm decode error: %s", body)
  687. return "", fmt.Errorf("%s", body)
  688. }
  689. var decoded DetokenizeResponse
  690. if err := json.Unmarshal(body, &decoded); err != nil {
  691. return "", fmt.Errorf("unmarshal encode response: %w", err)
  692. }
  693. return decoded.Content, nil
  694. }
  695. func (s *LlamaServer) Close() error {
  696. if s.cmd != nil {
  697. slog.Debug("stopping llama server")
  698. return s.cmd.Process.Kill()
  699. }
  700. return nil
  701. }
  702. func parseDurationMs(ms float64) time.Duration {
  703. dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
  704. if err != nil {
  705. panic(err)
  706. }
  707. return dur
  708. }