server.go 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919
  1. package llm
  2. import (
  3. "bufio"
  4. "bytes"
  5. "context"
  6. "encoding/json"
  7. "errors"
  8. "fmt"
  9. "io"
  10. "log"
  11. "log/slog"
  12. "math/rand"
  13. "net"
  14. "net/http"
  15. "os"
  16. "os/exec"
  17. "path/filepath"
  18. "runtime"
  19. "strconv"
  20. "strings"
  21. "time"
  22. "golang.org/x/sync/semaphore"
  23. "github.com/ollama/ollama/api"
  24. "github.com/ollama/ollama/format"
  25. "github.com/ollama/ollama/gpu"
  26. )
  27. type LlamaServer interface {
  28. Ping(ctx context.Context) error
  29. WaitUntilRunning(ctx context.Context) error
  30. Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
  31. Embedding(ctx context.Context, prompt string) ([]float64, error)
  32. Tokenize(ctx context.Context, content string) ([]int, error)
  33. Detokenize(ctx context.Context, tokens []int) (string, error)
  34. Close() error
  35. EstimatedVRAM() uint64
  36. }
  37. // llmServer is an instance of the llama.cpp server
  38. type llmServer struct {
  39. port int
  40. cmd *exec.Cmd
  41. done chan error // Channel to signal when the process exits
  42. status *StatusWriter
  43. options api.Options
  44. // TODO - this should be broken down by GPU
  45. estimatedVRAM uint64 // Estimated usage of VRAM by the loaded model
  46. sem *semaphore.Weighted
  47. }
  48. func LoadModel(model string) (*GGML, error) {
  49. if _, err := os.Stat(model); err != nil {
  50. return nil, err
  51. }
  52. f, err := os.Open(model)
  53. if err != nil {
  54. return nil, err
  55. }
  56. defer f.Close()
  57. ggml, _, err := DecodeGGML(f)
  58. return ggml, err
  59. }
  60. // NewLlamaServer will run a server for the given GPUs
  61. // The gpu list must be a single family.
  62. func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options) (LlamaServer, error) {
  63. var err error
  64. if opts.NumCtx > int(ggml.KV().ContextLength()) {
  65. slog.Warn("requested context length is greater than the model's training context window size", "requested", opts.NumCtx, "training size", ggml.KV().ContextLength())
  66. }
  67. if opts.NumCtx < 4 {
  68. opts.NumCtx = 4
  69. }
  70. cpuRunner := ""
  71. var estimatedVRAM uint64
  72. var systemMemory uint64
  73. if (len(gpus) == 1 && gpus[0].Library == "cpu") || opts.NumGPU == 0 {
  74. // TODO evaluate system memory to see if we should block the load, or force an unload of another CPU runner
  75. cpuRunner = serverForCpu()
  76. } else {
  77. if gpus[0].Library == "metal" {
  78. memInfo, err := gpu.GetCPUMem()
  79. if err != nil {
  80. slog.Error("failed to lookup system memory", "error", err)
  81. } else {
  82. systemMemory = memInfo.TotalMemory
  83. slog.Debug("system memory", "total", format.HumanBytes2(systemMemory))
  84. }
  85. }
  86. var layers int
  87. layers, estimatedVRAM = EstimateGPULayers(gpus, ggml, projectors, opts)
  88. if gpus[0].Library == "metal" && estimatedVRAM > systemMemory {
  89. // disable partial offloading when model is greater than total system memory as this
  90. // can lead to locking up the system
  91. opts.NumGPU = 0
  92. } else if opts.NumGPU < 0 && layers > 0 && gpus[0].Library != "cpu" {
  93. opts.NumGPU = layers
  94. }
  95. }
  96. // Loop through potential servers
  97. finalErr := fmt.Errorf("no suitable llama servers found")
  98. if len(adapters) > 1 {
  99. return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
  100. }
  101. availableServers := availableServers()
  102. var servers []string
  103. if cpuRunner != "" {
  104. servers = []string{cpuRunner}
  105. } else {
  106. servers = serversForGpu(gpus[0]) // All GPUs in the list are matching Library and Variant
  107. }
  108. demandLib := strings.Trim(os.Getenv("OLLAMA_LLM_LIBRARY"), "\"' ")
  109. if demandLib != "" {
  110. serverPath := availableServers[demandLib]
  111. if serverPath == "" {
  112. slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
  113. } else {
  114. slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
  115. servers = []string{demandLib}
  116. }
  117. }
  118. if len(servers) == 0 {
  119. return nil, fmt.Errorf("no servers found for %v", gpus)
  120. }
  121. params := []string{
  122. "--model", model,
  123. "--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
  124. "--batch-size", fmt.Sprintf("%d", opts.NumBatch),
  125. "--embedding",
  126. }
  127. if debug := os.Getenv("OLLAMA_DEBUG"); debug != "" {
  128. params = append(params, "--log-format", "json")
  129. } else {
  130. params = append(params, "--log-disable")
  131. }
  132. if opts.NumGPU >= 0 {
  133. params = append(params, "--n-gpu-layers", fmt.Sprintf("%d", opts.NumGPU))
  134. }
  135. if debug := os.Getenv("OLLAMA_DEBUG"); debug != "" {
  136. params = append(params, "--verbose")
  137. }
  138. if opts.MainGPU > 0 {
  139. params = append(params, "--main-gpu", fmt.Sprintf("%d", opts.MainGPU))
  140. }
  141. if len(adapters) > 0 {
  142. // TODO: applying multiple adapters is not supported by the llama.cpp server yet
  143. params = append(params, "--lora", adapters[0])
  144. }
  145. if len(projectors) > 0 {
  146. // TODO: applying multiple projectors is not supported by the llama.cpp server yet
  147. params = append(params, "--mmproj", projectors[0])
  148. }
  149. if opts.NumThread > 0 {
  150. params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
  151. }
  152. if !opts.F16KV {
  153. params = append(params, "--memory-f32")
  154. }
  155. if opts.UseMLock {
  156. params = append(params, "--mlock")
  157. }
  158. if !opts.UseMMap {
  159. params = append(params, "--no-mmap")
  160. }
  161. if opts.UseNUMA {
  162. params = append(params, "--numa")
  163. }
  164. // "--cont-batching", // TODO - doesn't seem to have any noticeable perf change for multiple requests
  165. numParallel := 1
  166. if onp := os.Getenv("OLLAMA_NUM_PARALLEL"); onp != "" {
  167. numParallel, err = strconv.Atoi(onp)
  168. if err != nil || numParallel <= 0 {
  169. err = fmt.Errorf("invalid OLLAMA_NUM_PARALLEL=%s must be greater than zero - %w", onp, err)
  170. slog.Error("misconfiguration", "error", err)
  171. return nil, err
  172. }
  173. }
  174. params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))
  175. for i := 0; i < len(servers); i++ {
  176. dir := availableServers[servers[i]]
  177. if dir == "" {
  178. // Shouldn't happen
  179. finalErr = fmt.Errorf("[%d] server %s not listed in available servers %v", i, servers[i], availableServers)
  180. slog.Error("sever list inconsistent", "error", finalErr)
  181. continue
  182. }
  183. // Find an availableServers port, retry on each iterration in case the failure was a port conflict race
  184. port := 0
  185. if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
  186. var l *net.TCPListener
  187. if l, err = net.ListenTCP("tcp", a); err == nil {
  188. port = l.Addr().(*net.TCPAddr).Port
  189. l.Close()
  190. }
  191. }
  192. if port == 0 {
  193. slog.Debug("ResolveTCPAddr failed ", "error", err)
  194. port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
  195. }
  196. finalParams := append(params, "--port", strconv.Itoa(port))
  197. pathEnv := "LD_LIBRARY_PATH"
  198. if runtime.GOOS == "windows" {
  199. pathEnv = "PATH"
  200. }
  201. // append the server directory to LD_LIBRARY_PATH/PATH
  202. libraryPaths := []string{dir}
  203. if libraryPath, ok := os.LookupEnv(pathEnv); ok {
  204. // Append our runner directory to the path
  205. // This will favor system libraries over our bundled library dependencies
  206. libraryPaths = append(filepath.SplitList(libraryPath), libraryPaths...)
  207. }
  208. // Note: we always put the dependency path first
  209. // since this was the exact version we verified for AMD GPUs
  210. // and we favor what the user had in their path
  211. if gpus[0].DependencyPath != "" {
  212. // TODO refine for multi-gpu support
  213. libraryPaths = append([]string{gpus[0].DependencyPath}, libraryPaths...)
  214. }
  215. server := filepath.Join(dir, "ollama_llama_server")
  216. if runtime.GOOS == "windows" {
  217. server = server + ".exe"
  218. }
  219. // Detect tmp cleaners wiping out the file
  220. _, err := os.Stat(server)
  221. if errors.Is(err, os.ErrNotExist) {
  222. slog.Warn("llama server disappeared, reinitializing payloads", "path", server, "error", err)
  223. err = Init()
  224. if err != nil {
  225. slog.Warn("failed to reinitialize payloads", "error", err)
  226. return nil, err
  227. }
  228. }
  229. s := &llmServer{
  230. port: port,
  231. cmd: exec.Command(server, finalParams...),
  232. status: NewStatusWriter(os.Stderr),
  233. options: opts,
  234. estimatedVRAM: estimatedVRAM,
  235. sem: semaphore.NewWeighted(int64(numParallel)),
  236. }
  237. libEnv := fmt.Sprintf("%s=%s", pathEnv, strings.Join(libraryPaths, string(filepath.ListSeparator)))
  238. s.cmd.Env = append(os.Environ(), libEnv)
  239. s.cmd.Stdout = os.Stdout
  240. s.cmd.Stderr = s.status
  241. // TODO - multiple GPU selection logic...
  242. key, val := gpu.GpuInfoList(gpus).GetVisibleDevicesEnv()
  243. if key != "" {
  244. s.cmd.Env = append(s.cmd.Env, key+"="+val)
  245. }
  246. slog.Info("starting llama server", "cmd", s.cmd.String())
  247. // Log at debug as the environment is inherited and might contain sensitive information
  248. slog.Debug("subprocess", "environment", s.cmd.Env)
  249. if err = s.cmd.Start(); err != nil {
  250. msg := ""
  251. if s.status != nil && s.status.LastErrMsg != "" {
  252. msg = s.status.LastErrMsg
  253. }
  254. err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
  255. finalErr = err
  256. continue
  257. }
  258. // TODO - make sure this is all wired up correctly
  259. // if err = s.WaitUntilRunning(); err != nil {
  260. // slog.Error("error starting llama server", "server", servers[i], "error", err)
  261. // s.Close()
  262. // finalErr = err
  263. // continue
  264. // }
  265. return s, nil
  266. }
  267. slog.Error("unable to load any llama server", "error", finalErr)
  268. return nil, finalErr
  269. }
  270. func projectorMemoryRequirements(filename string) uint64 {
  271. file, err := os.Open(filename)
  272. if err != nil {
  273. return 0
  274. }
  275. defer file.Close()
  276. ggml, _, err := DecodeGGML(file)
  277. if err != nil {
  278. return 0
  279. }
  280. var mem uint64
  281. for _, layer := range ggml.Tensors().Layers() {
  282. mem += layer.size()
  283. }
  284. return mem
  285. }
  286. type ServerStatus int
  287. const ( // iota is reset to 0
  288. ServerStatusReady ServerStatus = iota
  289. ServerStatusNoSlotsAvaialble
  290. ServerStatusLoadingModel
  291. ServerStatusNotResponding
  292. ServerStatusError
  293. )
  294. func (s ServerStatus) ToString() string {
  295. switch s {
  296. case ServerStatusReady:
  297. return "llm server ready"
  298. case ServerStatusNoSlotsAvaialble:
  299. return "llm busy - no slots available"
  300. case ServerStatusLoadingModel:
  301. return "llm server loading model"
  302. case ServerStatusNotResponding:
  303. return "llm server not responding"
  304. default:
  305. return "llm server error"
  306. }
  307. }
  308. type ServerStatusResp struct {
  309. Status string `json:"status"`
  310. SlotsIdle int `json:"slots_idle"`
  311. SlotsProcessing int `json:"slots_processing"`
  312. Error string `json:"error"`
  313. }
  314. func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
  315. // Fail fast if its exited
  316. if s.cmd.ProcessState != nil {
  317. msg := ""
  318. if s.status != nil && s.status.LastErrMsg != "" {
  319. msg = s.status.LastErrMsg
  320. }
  321. return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  322. }
  323. req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
  324. if err != nil {
  325. return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
  326. }
  327. req.Header.Set("Content-Type", "application/json")
  328. resp, err := http.DefaultClient.Do(req)
  329. if err != nil {
  330. if errors.Is(err, context.DeadlineExceeded) {
  331. return ServerStatusNotResponding, fmt.Errorf("server not responding")
  332. }
  333. return ServerStatusError, fmt.Errorf("health resp: %w", err)
  334. }
  335. defer resp.Body.Close()
  336. body, err := io.ReadAll(resp.Body)
  337. if err != nil {
  338. return ServerStatusError, fmt.Errorf("read health request: %w", err)
  339. }
  340. var status ServerStatusResp
  341. if err := json.Unmarshal(body, &status); err != nil {
  342. return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
  343. }
  344. switch status.Status {
  345. case "ok":
  346. return ServerStatusReady, nil
  347. case "no slot available":
  348. return ServerStatusNoSlotsAvaialble, nil
  349. case "loading model":
  350. return ServerStatusLoadingModel, nil
  351. default:
  352. return ServerStatusError, fmt.Errorf("server error: %+v", status)
  353. }
  354. }
  355. func (s *llmServer) Ping(ctx context.Context) error {
  356. _, err := s.getServerStatus(ctx)
  357. if err != nil {
  358. slog.Debug("server unhealthy", "error", err)
  359. return err
  360. }
  361. return nil
  362. }
  363. func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
  364. start := time.Now()
  365. // TODO we need to wire up a better way to detect hangs during model load and startup of the server
  366. expiresAt := time.Now().Add(10 * time.Minute) // be generous with timeout, large models can take a while to load
  367. ticker := time.NewTicker(50 * time.Millisecond)
  368. defer ticker.Stop()
  369. slog.Info("waiting for llama runner to start responding")
  370. var lastStatus ServerStatus = -1
  371. for {
  372. select {
  373. case <-ctx.Done():
  374. slog.Info("context expired before server started")
  375. return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
  376. case err := <-s.done:
  377. msg := ""
  378. if s.status != nil && s.status.LastErrMsg != "" {
  379. msg = s.status.LastErrMsg
  380. }
  381. return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
  382. case <-ticker.C:
  383. if time.Now().After(expiresAt) {
  384. // timeout
  385. msg := ""
  386. if s.status != nil && s.status.LastErrMsg != "" {
  387. msg = s.status.LastErrMsg
  388. }
  389. return fmt.Errorf("timed out waiting for llama runner to start: %s", msg)
  390. }
  391. if s.cmd.ProcessState != nil {
  392. msg := ""
  393. if s.status != nil && s.status.LastErrMsg != "" {
  394. msg = s.status.LastErrMsg
  395. }
  396. return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  397. }
  398. c, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
  399. defer cancel()
  400. status, err := s.getServerStatus(c)
  401. if err != nil && lastStatus != status {
  402. slog.Debug("server not yet available", "error", err)
  403. lastStatus = status
  404. continue
  405. }
  406. switch status {
  407. case ServerStatusLoadingModel:
  408. // TODO - this state never seems to happen with the current server.cpp code (bug?)
  409. // it doesn't respond to the health endpoint until after the model is loaded
  410. slog.Debug("loading model")
  411. case ServerStatusReady:
  412. slog.Debug(fmt.Sprintf("llama runner started in %f seconds", time.Since(start).Seconds()))
  413. return nil
  414. }
  415. }
  416. }
  417. }
  418. const jsonGrammar = `
  419. root ::= object
  420. value ::= object | array | string | number | ("true" | "false" | "null") ws
  421. object ::=
  422. "{" ws (
  423. string ":" ws value
  424. ("," ws string ":" ws value)*
  425. )? "}" ws
  426. array ::=
  427. "[" ws (
  428. value
  429. ("," ws value)*
  430. )? "]" ws
  431. string ::=
  432. "\"" (
  433. [^"\\] |
  434. "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  435. )* "\"" ws
  436. number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
  437. # Optional space: by convention, applied in this grammar after literal chars when allowed
  438. ws ::= ([ \t\n] ws)?
  439. `
  440. const maxBufferSize = 512 * format.KiloByte
  441. const maxRetries = 3
  442. type ImageData struct {
  443. Data []byte `json:"data"`
  444. ID int `json:"id"`
  445. }
  446. type completion struct {
  447. Content string `json:"content"`
  448. Model string `json:"model"`
  449. Prompt string `json:"prompt"`
  450. Stop bool `json:"stop"`
  451. Timings struct {
  452. PredictedN int `json:"predicted_n"`
  453. PredictedMS float64 `json:"predicted_ms"`
  454. PromptN int `json:"prompt_n"`
  455. PromptMS float64 `json:"prompt_ms"`
  456. }
  457. }
  458. type CompletionRequest struct {
  459. Prompt string
  460. Format string
  461. Images []ImageData
  462. Options api.Options
  463. }
  464. type CompletionResponse struct {
  465. Content string
  466. Done bool
  467. PromptEvalCount int
  468. PromptEvalDuration time.Duration
  469. EvalCount int
  470. EvalDuration time.Duration
  471. }
  472. func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
  473. if err := s.sem.Acquire(ctx, 1); err != nil {
  474. slog.Error("Failed to acquire semaphore", "error", err)
  475. return err
  476. }
  477. defer s.sem.Release(1)
  478. // only allow maximum 10 "context shifts" to avoid infinite generation
  479. if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
  480. req.Options.NumPredict = 10 * s.options.NumCtx
  481. slog.Debug("setting token limit to 10x num_ctx", "num_ctx", s.options.NumCtx, "num_predict", req.Options.NumPredict)
  482. }
  483. request := map[string]any{
  484. "prompt": req.Prompt,
  485. "stream": true,
  486. "n_predict": req.Options.NumPredict,
  487. "n_keep": req.Options.NumKeep,
  488. "main_gpu": req.Options.MainGPU,
  489. "temperature": req.Options.Temperature,
  490. "top_k": req.Options.TopK,
  491. "top_p": req.Options.TopP,
  492. "tfs_z": req.Options.TFSZ,
  493. "typical_p": req.Options.TypicalP,
  494. "repeat_last_n": req.Options.RepeatLastN,
  495. "repeat_penalty": req.Options.RepeatPenalty,
  496. "presence_penalty": req.Options.PresencePenalty,
  497. "frequency_penalty": req.Options.FrequencyPenalty,
  498. "mirostat": req.Options.Mirostat,
  499. "mirostat_tau": req.Options.MirostatTau,
  500. "mirostat_eta": req.Options.MirostatEta,
  501. "penalize_nl": req.Options.PenalizeNewline,
  502. "seed": req.Options.Seed,
  503. "stop": req.Options.Stop,
  504. "image_data": req.Images,
  505. "cache_prompt": true,
  506. }
  507. // Make sure the server is ready
  508. status, err := s.getServerStatus(ctx)
  509. if err != nil {
  510. return err
  511. } else if status != ServerStatusReady {
  512. return fmt.Errorf("unexpected server status: %s", status.ToString())
  513. }
  514. if req.Format == "json" {
  515. request["grammar"] = jsonGrammar
  516. if !strings.Contains(strings.ToLower(req.Prompt), "json") {
  517. slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
  518. }
  519. }
  520. retryDelay := 100 * time.Microsecond
  521. for retries := 0; retries < maxRetries; retries++ {
  522. if retries > 0 {
  523. time.Sleep(retryDelay) // wait before retrying
  524. retryDelay *= 2 // exponential backoff
  525. }
  526. // Handling JSON marshaling with special characters unescaped.
  527. buffer := &bytes.Buffer{}
  528. enc := json.NewEncoder(buffer)
  529. enc.SetEscapeHTML(false)
  530. if err := enc.Encode(request); err != nil {
  531. return fmt.Errorf("failed to marshal data: %v", err)
  532. }
  533. endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
  534. req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
  535. if err != nil {
  536. return fmt.Errorf("error creating POST request: %v", err)
  537. }
  538. req.Header.Set("Content-Type", "application/json")
  539. resp, err := http.DefaultClient.Do(req)
  540. if err != nil {
  541. return fmt.Errorf("POST predict: %v", err)
  542. }
  543. defer resp.Body.Close()
  544. if resp.StatusCode >= 400 {
  545. bodyBytes, err := io.ReadAll(resp.Body)
  546. if err != nil {
  547. return fmt.Errorf("failed reading llm error response: %w", err)
  548. }
  549. log.Printf("llm predict error: %s", bodyBytes)
  550. return fmt.Errorf("%s", bodyBytes)
  551. }
  552. scanner := bufio.NewScanner(resp.Body)
  553. buf := make([]byte, 0, maxBufferSize)
  554. scanner.Buffer(buf, maxBufferSize)
  555. retryNeeded := false
  556. // keep track of the last token generated, this is used to abort if the model starts looping
  557. var lastToken string
  558. var tokenRepeat int
  559. for scanner.Scan() {
  560. select {
  561. case <-ctx.Done():
  562. // This handles the request cancellation
  563. return ctx.Err()
  564. default:
  565. line := scanner.Bytes()
  566. if len(line) == 0 {
  567. continue
  568. }
  569. // try again on slot unavailable
  570. if bytes.Contains(line, []byte("slot unavailable")) {
  571. retryNeeded = true
  572. break
  573. }
  574. evt, ok := bytes.CutPrefix(line, []byte("data: "))
  575. if !ok {
  576. return fmt.Errorf("error parsing llm response stream: %s", line)
  577. }
  578. var c completion
  579. if err := json.Unmarshal(evt, &c); err != nil {
  580. return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
  581. }
  582. switch {
  583. case strings.TrimSpace(c.Content) == lastToken:
  584. tokenRepeat++
  585. default:
  586. lastToken = strings.TrimSpace(c.Content)
  587. tokenRepeat = 0
  588. }
  589. // 30 picked as an arbitrary max token repeat limit, modify as needed
  590. if tokenRepeat > 30 {
  591. slog.Debug("prediction aborted, token repeat limit reached")
  592. return ctx.Err()
  593. }
  594. if c.Content != "" {
  595. fn(CompletionResponse{
  596. Content: c.Content,
  597. })
  598. }
  599. if c.Stop {
  600. fn(CompletionResponse{
  601. Done: true,
  602. PromptEvalCount: c.Timings.PromptN,
  603. PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
  604. EvalCount: c.Timings.PredictedN,
  605. EvalDuration: parseDurationMs(c.Timings.PredictedMS),
  606. })
  607. return nil
  608. }
  609. }
  610. }
  611. if err := scanner.Err(); err != nil {
  612. if strings.Contains(err.Error(), "unexpected EOF") {
  613. s.Close()
  614. msg := ""
  615. if s.status != nil && s.status.LastErrMsg != "" {
  616. msg = s.status.LastErrMsg
  617. }
  618. return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
  619. }
  620. return fmt.Errorf("error reading llm response: %v", err)
  621. }
  622. if !retryNeeded {
  623. return nil // success
  624. }
  625. }
  626. // should never reach here ideally
  627. return fmt.Errorf("max retries exceeded")
  628. }
  629. type EmbeddingRequest struct {
  630. Content string `json:"content"`
  631. }
  632. type EmbeddingResponse struct {
  633. Embedding []float64 `json:"embedding"`
  634. }
  635. func (s *llmServer) Embedding(ctx context.Context, prompt string) ([]float64, error) {
  636. if err := s.sem.Acquire(ctx, 1); err != nil {
  637. slog.Error("Failed to acquire semaphore", "error", err)
  638. return nil, err
  639. }
  640. defer s.sem.Release(1)
  641. // Make sure the server is ready
  642. status, err := s.getServerStatus(ctx)
  643. if err != nil {
  644. return nil, err
  645. } else if status != ServerStatusReady {
  646. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  647. }
  648. data, err := json.Marshal(TokenizeRequest{Content: prompt})
  649. if err != nil {
  650. return nil, fmt.Errorf("error marshaling embed data: %w", err)
  651. }
  652. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
  653. if err != nil {
  654. return nil, fmt.Errorf("error creating embed request: %w", err)
  655. }
  656. req.Header.Set("Content-Type", "application/json")
  657. resp, err := http.DefaultClient.Do(req)
  658. if err != nil {
  659. return nil, fmt.Errorf("do embedding request: %w", err)
  660. }
  661. defer resp.Body.Close()
  662. body, err := io.ReadAll(resp.Body)
  663. if err != nil {
  664. return nil, fmt.Errorf("error reading embed response: %w", err)
  665. }
  666. if resp.StatusCode >= 400 {
  667. log.Printf("llm encode error: %s", body)
  668. return nil, fmt.Errorf("%s", body)
  669. }
  670. var embedding EmbeddingResponse
  671. if err := json.Unmarshal(body, &embedding); err != nil {
  672. return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
  673. }
  674. return embedding.Embedding, nil
  675. }
  676. type TokenizeRequest struct {
  677. Content string `json:"content"`
  678. }
  679. type TokenizeResponse struct {
  680. Tokens []int `json:"tokens"`
  681. }
  682. func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
  683. // Make sure the server is ready
  684. status, err := s.getServerStatus(ctx)
  685. if err != nil {
  686. return nil, err
  687. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvaialble {
  688. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  689. }
  690. data, err := json.Marshal(TokenizeRequest{Content: content})
  691. if err != nil {
  692. return nil, fmt.Errorf("marshaling encode data: %w", err)
  693. }
  694. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
  695. if err != nil {
  696. return nil, fmt.Errorf("encode request: %w", err)
  697. }
  698. req.Header.Set("Content-Type", "application/json")
  699. resp, err := http.DefaultClient.Do(req)
  700. if err != nil {
  701. return nil, fmt.Errorf("do encode request: %w", err)
  702. }
  703. defer resp.Body.Close()
  704. body, err := io.ReadAll(resp.Body)
  705. if err != nil {
  706. return nil, fmt.Errorf("read encode request: %w", err)
  707. }
  708. if resp.StatusCode >= 400 {
  709. log.Printf("llm encode error: %s", body)
  710. return nil, fmt.Errorf("%s", body)
  711. }
  712. var encoded TokenizeResponse
  713. if err := json.Unmarshal(body, &encoded); err != nil {
  714. return nil, fmt.Errorf("unmarshal encode response: %w", err)
  715. }
  716. return encoded.Tokens, nil
  717. }
  718. type DetokenizeRequest struct {
  719. Tokens []int `json:"tokens"`
  720. }
  721. type DetokenizeResponse struct {
  722. Content string `json:"content"`
  723. }
  724. func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
  725. // Make sure the server is ready
  726. status, err := s.getServerStatus(ctx)
  727. if err != nil {
  728. return "", err
  729. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvaialble {
  730. return "", fmt.Errorf("unexpected server status: %s", status.ToString())
  731. }
  732. data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
  733. if err != nil {
  734. return "", fmt.Errorf("marshaling decode data: %w", err)
  735. }
  736. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
  737. if err != nil {
  738. return "", fmt.Errorf("decode request: %w", err)
  739. }
  740. req.Header.Set("Content-Type", "application/json")
  741. resp, err := http.DefaultClient.Do(req)
  742. if err != nil {
  743. return "", fmt.Errorf("do decode request: %w", err)
  744. }
  745. defer resp.Body.Close()
  746. body, err := io.ReadAll(resp.Body)
  747. if err != nil {
  748. return "", fmt.Errorf("read decode request: %w", err)
  749. }
  750. if resp.StatusCode >= 400 {
  751. log.Printf("llm decode error: %s", body)
  752. return "", fmt.Errorf("%s", body)
  753. }
  754. var decoded DetokenizeResponse
  755. if err := json.Unmarshal(body, &decoded); err != nil {
  756. return "", fmt.Errorf("unmarshal encode response: %w", err)
  757. }
  758. return decoded.Content, nil
  759. }
  760. func (s *llmServer) Close() error {
  761. if s.cmd != nil {
  762. slog.Debug("stopping llama server")
  763. if err := s.cmd.Process.Kill(); err != nil {
  764. return err
  765. }
  766. _ = s.cmd.Wait()
  767. slog.Debug("llama server stopped")
  768. }
  769. return nil
  770. }
  771. func (s *llmServer) EstimatedVRAM() uint64 {
  772. return s.estimatedVRAM
  773. }
  774. func parseDurationMs(ms float64) time.Duration {
  775. dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
  776. if err != nil {
  777. panic(err)
  778. }
  779. return dur
  780. }