server.go 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931
  1. package llm
  2. import (
  3. "bufio"
  4. "bytes"
  5. "context"
  6. "encoding/json"
  7. "errors"
  8. "fmt"
  9. "io"
  10. "log"
  11. "log/slog"
  12. "math/rand"
  13. "net"
  14. "net/http"
  15. "os"
  16. "os/exec"
  17. "path/filepath"
  18. "runtime"
  19. "strconv"
  20. "strings"
  21. "time"
  22. "golang.org/x/sync/semaphore"
  23. "github.com/ollama/ollama/api"
  24. "github.com/ollama/ollama/format"
  25. "github.com/ollama/ollama/gpu"
  26. "github.com/ollama/ollama/server/envconfig"
  27. )
  28. type LlamaServer interface {
  29. Ping(ctx context.Context) error
  30. WaitUntilRunning(ctx context.Context) error
  31. Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
  32. Embedding(ctx context.Context, prompt string) ([]float64, error)
  33. Tokenize(ctx context.Context, content string) ([]int, error)
  34. Detokenize(ctx context.Context, tokens []int) (string, error)
  35. Close() error
  36. EstimatedVRAM() uint64
  37. }
  38. // llmServer is an instance of the llama.cpp server
  39. type llmServer struct {
  40. port int
  41. cmd *exec.Cmd
  42. done chan error // Channel to signal when the process exits
  43. status *StatusWriter
  44. options api.Options
  45. // TODO - this should be broken down by GPU
  46. estimatedVRAM uint64 // Estimated usage of VRAM by the loaded model
  47. sem *semaphore.Weighted
  48. }
  49. func LoadModel(model string) (*GGML, error) {
  50. if _, err := os.Stat(model); err != nil {
  51. return nil, err
  52. }
  53. f, err := os.Open(model)
  54. if err != nil {
  55. return nil, err
  56. }
  57. defer f.Close()
  58. ggml, _, err := DecodeGGML(f)
  59. return ggml, err
  60. }
  61. // NewLlamaServer will run a server for the given GPUs
  62. // The gpu list must be a single family.
  63. func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options) (LlamaServer, error) {
  64. var err error
  65. if opts.NumCtx > int(ggml.KV().ContextLength()) {
  66. slog.Warn("requested context length is greater than the model's training context window size", "requested", opts.NumCtx, "training size", ggml.KV().ContextLength())
  67. }
  68. if opts.NumCtx < 4 {
  69. opts.NumCtx = 4
  70. }
  71. cpuRunner := ""
  72. var estimatedVRAM uint64
  73. var systemMemory uint64
  74. if (len(gpus) == 1 && gpus[0].Library == "cpu") || opts.NumGPU == 0 {
  75. // TODO evaluate system memory to see if we should block the load, or force an unload of another CPU runner
  76. cpuRunner = serverForCpu()
  77. } else {
  78. if gpus[0].Library == "metal" {
  79. memInfo, err := gpu.GetCPUMem()
  80. if err != nil {
  81. slog.Error("failed to lookup system memory", "error", err)
  82. } else {
  83. systemMemory = memInfo.TotalMemory
  84. slog.Debug("system memory", "total", format.HumanBytes2(systemMemory))
  85. }
  86. }
  87. var layers int
  88. layers, estimatedVRAM = EstimateGPULayers(gpus, ggml, projectors, opts)
  89. if gpus[0].Library == "metal" && estimatedVRAM > systemMemory {
  90. // disable partial offloading when model is greater than total system memory as this
  91. // can lead to locking up the system
  92. opts.NumGPU = 0
  93. } else if opts.NumGPU < 0 && layers > 0 && gpus[0].Library != "cpu" {
  94. opts.NumGPU = layers
  95. }
  96. }
  97. // Loop through potential servers
  98. finalErr := fmt.Errorf("no suitable llama servers found")
  99. if len(adapters) > 1 {
  100. return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
  101. }
  102. availableServers := availableServers()
  103. var servers []string
  104. if cpuRunner != "" {
  105. servers = []string{cpuRunner}
  106. } else {
  107. servers = serversForGpu(gpus[0]) // All GPUs in the list are matching Library and Variant
  108. }
  109. demandLib := envconfig.LLMLibrary
  110. if demandLib != "" {
  111. serverPath := availableServers[demandLib]
  112. if serverPath == "" {
  113. slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
  114. } else {
  115. slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
  116. servers = []string{demandLib}
  117. }
  118. }
  119. if len(servers) == 0 {
  120. return nil, fmt.Errorf("no servers found for %v", gpus)
  121. }
  122. params := []string{
  123. "--model", model,
  124. "--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
  125. "--batch-size", fmt.Sprintf("%d", opts.NumBatch),
  126. "--embedding",
  127. }
  128. if envconfig.Debug {
  129. params = append(params, "--log-format", "json")
  130. } else {
  131. params = append(params, "--log-disable")
  132. }
  133. if opts.NumGPU >= 0 {
  134. params = append(params, "--n-gpu-layers", fmt.Sprintf("%d", opts.NumGPU))
  135. }
  136. if envconfig.Debug {
  137. params = append(params, "--verbose")
  138. }
  139. if opts.MainGPU > 0 {
  140. params = append(params, "--main-gpu", fmt.Sprintf("%d", opts.MainGPU))
  141. }
  142. if len(adapters) > 0 {
  143. // TODO: applying multiple adapters is not supported by the llama.cpp server yet
  144. params = append(params, "--lora", adapters[0])
  145. }
  146. if len(projectors) > 0 {
  147. // TODO: applying multiple projectors is not supported by the llama.cpp server yet
  148. params = append(params, "--mmproj", projectors[0])
  149. }
  150. if opts.NumThread > 0 {
  151. params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
  152. }
  153. if !opts.F16KV {
  154. params = append(params, "--memory-f32")
  155. }
  156. if opts.UseMLock {
  157. params = append(params, "--mlock")
  158. }
  159. if !opts.UseMMap {
  160. params = append(params, "--no-mmap")
  161. }
  162. if opts.UseNUMA {
  163. params = append(params, "--numa")
  164. }
  165. numParallel := envconfig.NumParallel
  166. // TODO (jmorganca): multimodal models don't support parallel yet
  167. // see https://github.com/ollama/ollama/issues/4165
  168. if len(projectors) > 0 {
  169. numParallel = 1
  170. slog.Warn("multimodal models don't support parallel requests yet")
  171. }
  172. params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))
  173. for i := 0; i < len(servers); i++ {
  174. dir := availableServers[servers[i]]
  175. if dir == "" {
  176. // Shouldn't happen
  177. finalErr = fmt.Errorf("[%d] server %s not listed in available servers %v", i, servers[i], availableServers)
  178. slog.Error("sever list inconsistent", "error", finalErr)
  179. continue
  180. }
  181. // Find an availableServers port, retry on each iterration in case the failure was a port conflict race
  182. port := 0
  183. if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
  184. var l *net.TCPListener
  185. if l, err = net.ListenTCP("tcp", a); err == nil {
  186. port = l.Addr().(*net.TCPAddr).Port
  187. l.Close()
  188. }
  189. }
  190. if port == 0 {
  191. slog.Debug("ResolveTCPAddr failed ", "error", err)
  192. port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
  193. }
  194. finalParams := append(params, "--port", strconv.Itoa(port))
  195. pathEnv := "LD_LIBRARY_PATH"
  196. if runtime.GOOS == "windows" {
  197. pathEnv = "PATH"
  198. }
  199. // prepend the server directory to LD_LIBRARY_PATH/PATH
  200. libraryPaths := []string{dir}
  201. if libraryPath, ok := os.LookupEnv(pathEnv); ok {
  202. // Append our runner directory to the path
  203. // This will favor system libraries over our bundled library dependencies
  204. libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
  205. }
  206. // Note: we always put the dependency path first
  207. // since this was the exact version we verified for AMD GPUs
  208. // and we favor what the user had in their path
  209. if gpus[0].DependencyPath != "" {
  210. // TODO refine for multi-gpu support
  211. libraryPaths = append([]string{gpus[0].DependencyPath}, libraryPaths...)
  212. }
  213. server := filepath.Join(dir, "ollama_llama_server")
  214. if runtime.GOOS == "windows" {
  215. server = server + ".exe"
  216. }
  217. // Detect tmp cleaners wiping out the file
  218. _, err := os.Stat(server)
  219. if errors.Is(err, os.ErrNotExist) {
  220. slog.Warn("llama server disappeared, reinitializing payloads", "path", server, "error", err)
  221. err = Init()
  222. if err != nil {
  223. slog.Warn("failed to reinitialize payloads", "error", err)
  224. return nil, err
  225. }
  226. }
  227. s := &llmServer{
  228. port: port,
  229. cmd: exec.Command(server, finalParams...),
  230. status: NewStatusWriter(os.Stderr),
  231. options: opts,
  232. estimatedVRAM: estimatedVRAM,
  233. sem: semaphore.NewWeighted(int64(numParallel)),
  234. }
  235. s.cmd.Env = os.Environ()
  236. s.cmd.Stdout = os.Stdout
  237. s.cmd.Stderr = s.status
  238. visibleDevicesEnv, visibleDevicesEnvVal := gpu.GpuInfoList(gpus).GetVisibleDevicesEnv()
  239. pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))
  240. // Update or add the path and visible devices variable with our adjusted version
  241. pathNeeded := true
  242. devicesNeeded := visibleDevicesEnv != ""
  243. for i := range s.cmd.Env {
  244. cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
  245. if strings.EqualFold(cmp[0], pathEnv) {
  246. s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
  247. pathNeeded = false
  248. } else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
  249. s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
  250. devicesNeeded = false
  251. }
  252. }
  253. if pathNeeded {
  254. s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
  255. }
  256. if devicesNeeded {
  257. s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
  258. }
  259. slog.Info("starting llama server", "cmd", s.cmd.String())
  260. // Log at debug as the environment is inherited and might contain sensitive information
  261. slog.Debug("subprocess", "environment", s.cmd.Env)
  262. if err = s.cmd.Start(); err != nil {
  263. msg := ""
  264. if s.status != nil && s.status.LastErrMsg != "" {
  265. msg = s.status.LastErrMsg
  266. }
  267. err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
  268. finalErr = err
  269. continue
  270. }
  271. return s, nil
  272. }
  273. slog.Error("unable to load any llama server", "error", finalErr)
  274. return nil, finalErr
  275. }
  276. func projectorMemoryRequirements(filename string) uint64 {
  277. file, err := os.Open(filename)
  278. if err != nil {
  279. return 0
  280. }
  281. defer file.Close()
  282. ggml, _, err := DecodeGGML(file)
  283. if err != nil {
  284. return 0
  285. }
  286. var mem uint64
  287. for _, layer := range ggml.Tensors().Layers() {
  288. mem += layer.size()
  289. }
  290. return mem
  291. }
  292. type ServerStatus int
  293. const ( // iota is reset to 0
  294. ServerStatusReady ServerStatus = iota
  295. ServerStatusNoSlotsAvailable
  296. ServerStatusLoadingModel
  297. ServerStatusNotResponding
  298. ServerStatusError
  299. )
  300. func (s ServerStatus) ToString() string {
  301. switch s {
  302. case ServerStatusReady:
  303. return "llm server ready"
  304. case ServerStatusNoSlotsAvailable:
  305. return "llm busy - no slots available"
  306. case ServerStatusLoadingModel:
  307. return "llm server loading model"
  308. case ServerStatusNotResponding:
  309. return "llm server not responding"
  310. default:
  311. return "llm server error"
  312. }
  313. }
  314. type ServerStatusResp struct {
  315. Status string `json:"status"`
  316. SlotsIdle int `json:"slots_idle"`
  317. SlotsProcessing int `json:"slots_processing"`
  318. Error string `json:"error"`
  319. }
  320. func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
  321. // Fail fast if its exited
  322. if s.cmd.ProcessState != nil {
  323. msg := ""
  324. if s.status != nil && s.status.LastErrMsg != "" {
  325. msg = s.status.LastErrMsg
  326. }
  327. return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  328. }
  329. req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
  330. if err != nil {
  331. return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
  332. }
  333. req.Header.Set("Content-Type", "application/json")
  334. resp, err := http.DefaultClient.Do(req)
  335. if err != nil {
  336. if errors.Is(err, context.DeadlineExceeded) {
  337. return ServerStatusNotResponding, fmt.Errorf("server not responding")
  338. }
  339. return ServerStatusError, fmt.Errorf("health resp: %w", err)
  340. }
  341. defer resp.Body.Close()
  342. body, err := io.ReadAll(resp.Body)
  343. if err != nil {
  344. return ServerStatusError, fmt.Errorf("read health request: %w", err)
  345. }
  346. var status ServerStatusResp
  347. if err := json.Unmarshal(body, &status); err != nil {
  348. return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
  349. }
  350. switch status.Status {
  351. case "ok":
  352. return ServerStatusReady, nil
  353. case "no slot available":
  354. return ServerStatusNoSlotsAvailable, nil
  355. case "loading model":
  356. return ServerStatusLoadingModel, nil
  357. default:
  358. return ServerStatusError, fmt.Errorf("server error: %+v", status)
  359. }
  360. }
  361. // getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
  362. func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
  363. var retries int
  364. for {
  365. status, err := s.getServerStatus(ctx)
  366. if err != nil {
  367. return status, err
  368. }
  369. if status == ServerStatusNoSlotsAvailable {
  370. if retries >= 10 {
  371. return status, fmt.Errorf("no slots available after %d retries", retries)
  372. }
  373. time.Sleep(5 * time.Millisecond)
  374. retries++
  375. continue
  376. }
  377. return status, nil
  378. }
  379. }
  380. func (s *llmServer) Ping(ctx context.Context) error {
  381. _, err := s.getServerStatus(ctx)
  382. if err != nil {
  383. slog.Debug("server unhealthy", "error", err)
  384. return err
  385. }
  386. return nil
  387. }
  388. func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
  389. start := time.Now()
  390. // TODO we need to wire up a better way to detect hangs during model load and startup of the server
  391. expiresAt := time.Now().Add(10 * time.Minute) // be generous with timeout, large models can take a while to load
  392. ticker := time.NewTicker(50 * time.Millisecond)
  393. defer ticker.Stop()
  394. slog.Info("waiting for llama runner to start responding")
  395. var lastStatus ServerStatus = -1
  396. for {
  397. select {
  398. case <-ctx.Done():
  399. slog.Info("context expired before server started")
  400. return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
  401. case err := <-s.done:
  402. msg := ""
  403. if s.status != nil && s.status.LastErrMsg != "" {
  404. msg = s.status.LastErrMsg
  405. }
  406. return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
  407. case <-ticker.C:
  408. if time.Now().After(expiresAt) {
  409. // timeout
  410. msg := ""
  411. if s.status != nil && s.status.LastErrMsg != "" {
  412. msg = s.status.LastErrMsg
  413. }
  414. return fmt.Errorf("timed out waiting for llama runner to start: %s", msg)
  415. }
  416. if s.cmd.ProcessState != nil {
  417. msg := ""
  418. if s.status != nil && s.status.LastErrMsg != "" {
  419. msg = s.status.LastErrMsg
  420. }
  421. return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  422. }
  423. c, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
  424. defer cancel()
  425. status, err := s.getServerStatus(c)
  426. if err != nil && lastStatus != status {
  427. slog.Debug("server not yet available", "error", err)
  428. lastStatus = status
  429. continue
  430. }
  431. switch status {
  432. case ServerStatusLoadingModel:
  433. // TODO - this state never seems to happen with the current server.cpp code (bug?)
  434. // it doesn't respond to the health endpoint until after the model is loaded
  435. slog.Debug("loading model")
  436. case ServerStatusReady:
  437. slog.Debug(fmt.Sprintf("llama runner started in %f seconds", time.Since(start).Seconds()))
  438. return nil
  439. }
  440. }
  441. }
  442. }
  443. const jsonGrammar = `
  444. root ::= object
  445. value ::= object | array | string | number | ("true" | "false" | "null") ws
  446. object ::=
  447. "{" ws (
  448. string ":" ws value
  449. ("," ws string ":" ws value)*
  450. )? "}" ws
  451. array ::=
  452. "[" ws (
  453. value
  454. ("," ws value)*
  455. )? "]" ws
  456. string ::=
  457. "\"" (
  458. [^"\\] |
  459. "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  460. )* "\"" ws
  461. number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
  462. # Optional space: by convention, applied in this grammar after literal chars when allowed
  463. ws ::= ([ \t\n] ws)?
  464. `
  465. const maxBufferSize = 512 * format.KiloByte
  466. type ImageData struct {
  467. Data []byte `json:"data"`
  468. ID int `json:"id"`
  469. }
  470. type completion struct {
  471. Content string `json:"content"`
  472. Model string `json:"model"`
  473. Prompt string `json:"prompt"`
  474. Stop bool `json:"stop"`
  475. Timings struct {
  476. PredictedN int `json:"predicted_n"`
  477. PredictedMS float64 `json:"predicted_ms"`
  478. PromptN int `json:"prompt_n"`
  479. PromptMS float64 `json:"prompt_ms"`
  480. }
  481. }
  482. type CompletionRequest struct {
  483. Prompt string
  484. Format string
  485. Images []ImageData
  486. Options api.Options
  487. }
  488. type CompletionResponse struct {
  489. Content string
  490. Done bool
  491. PromptEvalCount int
  492. PromptEvalDuration time.Duration
  493. EvalCount int
  494. EvalDuration time.Duration
  495. }
  496. func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
  497. if err := s.sem.Acquire(ctx, 1); err != nil {
  498. slog.Error("Failed to acquire semaphore", "error", err)
  499. return err
  500. }
  501. defer s.sem.Release(1)
  502. // only allow maximum 10 "context shifts" to avoid infinite generation
  503. if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
  504. req.Options.NumPredict = 10 * s.options.NumCtx
  505. slog.Debug("setting token limit to 10x num_ctx", "num_ctx", s.options.NumCtx, "num_predict", req.Options.NumPredict)
  506. }
  507. request := map[string]any{
  508. "prompt": req.Prompt,
  509. "stream": true,
  510. "n_predict": req.Options.NumPredict,
  511. "n_keep": req.Options.NumKeep,
  512. "main_gpu": req.Options.MainGPU,
  513. "temperature": req.Options.Temperature,
  514. "top_k": req.Options.TopK,
  515. "top_p": req.Options.TopP,
  516. "tfs_z": req.Options.TFSZ,
  517. "typical_p": req.Options.TypicalP,
  518. "repeat_last_n": req.Options.RepeatLastN,
  519. "repeat_penalty": req.Options.RepeatPenalty,
  520. "presence_penalty": req.Options.PresencePenalty,
  521. "frequency_penalty": req.Options.FrequencyPenalty,
  522. "mirostat": req.Options.Mirostat,
  523. "mirostat_tau": req.Options.MirostatTau,
  524. "mirostat_eta": req.Options.MirostatEta,
  525. "penalize_nl": req.Options.PenalizeNewline,
  526. "seed": req.Options.Seed,
  527. "stop": req.Options.Stop,
  528. "image_data": req.Images,
  529. "cache_prompt": true,
  530. }
  531. // Make sure the server is ready
  532. status, err := s.getServerStatusRetry(ctx)
  533. if err != nil {
  534. return err
  535. } else if status != ServerStatusReady {
  536. return fmt.Errorf("unexpected server status: %s", status.ToString())
  537. }
  538. if req.Format == "json" {
  539. request["grammar"] = jsonGrammar
  540. if !strings.Contains(strings.ToLower(req.Prompt), "json") {
  541. slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
  542. }
  543. }
  544. // Handling JSON marshaling with special characters unescaped.
  545. buffer := &bytes.Buffer{}
  546. enc := json.NewEncoder(buffer)
  547. enc.SetEscapeHTML(false)
  548. if err := enc.Encode(request); err != nil {
  549. return fmt.Errorf("failed to marshal data: %v", err)
  550. }
  551. endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
  552. serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
  553. if err != nil {
  554. return fmt.Errorf("error creating POST request: %v", err)
  555. }
  556. serverReq.Header.Set("Content-Type", "application/json")
  557. res, err := http.DefaultClient.Do(serverReq)
  558. if err != nil {
  559. return fmt.Errorf("POST predict: %v", err)
  560. }
  561. defer res.Body.Close()
  562. if res.StatusCode >= 400 {
  563. bodyBytes, err := io.ReadAll(res.Body)
  564. if err != nil {
  565. return fmt.Errorf("failed reading llm error response: %w", err)
  566. }
  567. log.Printf("llm predict error: %s", bodyBytes)
  568. return fmt.Errorf("%s", bodyBytes)
  569. }
  570. scanner := bufio.NewScanner(res.Body)
  571. buf := make([]byte, 0, maxBufferSize)
  572. scanner.Buffer(buf, maxBufferSize)
  573. // keep track of the last token generated, this is used to abort if the model starts looping
  574. var lastToken string
  575. var tokenRepeat int
  576. for scanner.Scan() {
  577. select {
  578. case <-ctx.Done():
  579. // This handles the request cancellation
  580. return ctx.Err()
  581. default:
  582. line := scanner.Bytes()
  583. if len(line) == 0 {
  584. continue
  585. }
  586. evt, ok := bytes.CutPrefix(line, []byte("data: "))
  587. if !ok {
  588. return fmt.Errorf("error parsing llm response stream: %s", line)
  589. }
  590. var c completion
  591. if err := json.Unmarshal(evt, &c); err != nil {
  592. return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
  593. }
  594. switch {
  595. case strings.TrimSpace(c.Content) == lastToken:
  596. tokenRepeat++
  597. default:
  598. lastToken = strings.TrimSpace(c.Content)
  599. tokenRepeat = 0
  600. }
  601. // 30 picked as an arbitrary max token repeat limit, modify as needed
  602. if tokenRepeat > 30 {
  603. slog.Debug("prediction aborted, token repeat limit reached")
  604. return ctx.Err()
  605. }
  606. if c.Content != "" {
  607. fn(CompletionResponse{
  608. Content: c.Content,
  609. })
  610. }
  611. if c.Stop {
  612. fn(CompletionResponse{
  613. Done: true,
  614. PromptEvalCount: c.Timings.PromptN,
  615. PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
  616. EvalCount: c.Timings.PredictedN,
  617. EvalDuration: parseDurationMs(c.Timings.PredictedMS),
  618. })
  619. return nil
  620. }
  621. }
  622. }
  623. if err := scanner.Err(); err != nil {
  624. if strings.Contains(err.Error(), "unexpected EOF") {
  625. s.Close()
  626. msg := ""
  627. if s.status != nil && s.status.LastErrMsg != "" {
  628. msg = s.status.LastErrMsg
  629. }
  630. return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
  631. }
  632. return fmt.Errorf("error reading llm response: %v", err)
  633. }
  634. return nil
  635. }
  636. type EmbeddingRequest struct {
  637. Content string `json:"content"`
  638. }
  639. type EmbeddingResponse struct {
  640. Embedding []float64 `json:"embedding"`
  641. }
  642. func (s *llmServer) Embedding(ctx context.Context, prompt string) ([]float64, error) {
  643. if err := s.sem.Acquire(ctx, 1); err != nil {
  644. slog.Error("Failed to acquire semaphore", "error", err)
  645. return nil, err
  646. }
  647. defer s.sem.Release(1)
  648. // Make sure the server is ready
  649. status, err := s.getServerStatusRetry(ctx)
  650. if err != nil {
  651. return nil, err
  652. } else if status != ServerStatusReady {
  653. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  654. }
  655. data, err := json.Marshal(TokenizeRequest{Content: prompt})
  656. if err != nil {
  657. return nil, fmt.Errorf("error marshaling embed data: %w", err)
  658. }
  659. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
  660. if err != nil {
  661. return nil, fmt.Errorf("error creating embed request: %w", err)
  662. }
  663. req.Header.Set("Content-Type", "application/json")
  664. resp, err := http.DefaultClient.Do(req)
  665. if err != nil {
  666. return nil, fmt.Errorf("do embedding request: %w", err)
  667. }
  668. defer resp.Body.Close()
  669. body, err := io.ReadAll(resp.Body)
  670. if err != nil {
  671. return nil, fmt.Errorf("error reading embed response: %w", err)
  672. }
  673. if resp.StatusCode >= 400 {
  674. log.Printf("llm encode error: %s", body)
  675. return nil, fmt.Errorf("%s", body)
  676. }
  677. var embedding EmbeddingResponse
  678. if err := json.Unmarshal(body, &embedding); err != nil {
  679. return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
  680. }
  681. return embedding.Embedding, nil
  682. }
  683. type TokenizeRequest struct {
  684. Content string `json:"content"`
  685. }
  686. type TokenizeResponse struct {
  687. Tokens []int `json:"tokens"`
  688. }
  689. func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
  690. // Make sure the server is ready
  691. status, err := s.getServerStatus(ctx)
  692. if err != nil {
  693. return nil, err
  694. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
  695. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  696. }
  697. data, err := json.Marshal(TokenizeRequest{Content: content})
  698. if err != nil {
  699. return nil, fmt.Errorf("marshaling encode data: %w", err)
  700. }
  701. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
  702. if err != nil {
  703. return nil, fmt.Errorf("encode request: %w", err)
  704. }
  705. req.Header.Set("Content-Type", "application/json")
  706. resp, err := http.DefaultClient.Do(req)
  707. if err != nil {
  708. return nil, fmt.Errorf("do encode request: %w", err)
  709. }
  710. defer resp.Body.Close()
  711. body, err := io.ReadAll(resp.Body)
  712. if err != nil {
  713. return nil, fmt.Errorf("read encode request: %w", err)
  714. }
  715. if resp.StatusCode >= 400 {
  716. log.Printf("llm encode error: %s", body)
  717. return nil, fmt.Errorf("%s", body)
  718. }
  719. var encoded TokenizeResponse
  720. if err := json.Unmarshal(body, &encoded); err != nil {
  721. return nil, fmt.Errorf("unmarshal encode response: %w", err)
  722. }
  723. return encoded.Tokens, nil
  724. }
  725. type DetokenizeRequest struct {
  726. Tokens []int `json:"tokens"`
  727. }
  728. type DetokenizeResponse struct {
  729. Content string `json:"content"`
  730. }
  731. func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
  732. // Make sure the server is ready
  733. status, err := s.getServerStatus(ctx)
  734. if err != nil {
  735. return "", err
  736. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
  737. return "", fmt.Errorf("unexpected server status: %s", status.ToString())
  738. }
  739. data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
  740. if err != nil {
  741. return "", fmt.Errorf("marshaling decode data: %w", err)
  742. }
  743. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
  744. if err != nil {
  745. return "", fmt.Errorf("decode request: %w", err)
  746. }
  747. req.Header.Set("Content-Type", "application/json")
  748. resp, err := http.DefaultClient.Do(req)
  749. if err != nil {
  750. return "", fmt.Errorf("do decode request: %w", err)
  751. }
  752. defer resp.Body.Close()
  753. body, err := io.ReadAll(resp.Body)
  754. if err != nil {
  755. return "", fmt.Errorf("read decode request: %w", err)
  756. }
  757. if resp.StatusCode >= 400 {
  758. log.Printf("llm decode error: %s", body)
  759. return "", fmt.Errorf("%s", body)
  760. }
  761. var decoded DetokenizeResponse
  762. if err := json.Unmarshal(body, &decoded); err != nil {
  763. return "", fmt.Errorf("unmarshal encode response: %w", err)
  764. }
  765. return decoded.Content, nil
  766. }
  767. func (s *llmServer) Close() error {
  768. if s.cmd != nil {
  769. slog.Debug("stopping llama server")
  770. if err := s.cmd.Process.Kill(); err != nil {
  771. return err
  772. }
  773. _ = s.cmd.Wait()
  774. slog.Debug("llama server stopped")
  775. }
  776. return nil
  777. }
  778. func (s *llmServer) EstimatedVRAM() uint64 {
  779. return s.estimatedVRAM
  780. }
  781. func parseDurationMs(ms float64) time.Duration {
  782. dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
  783. if err != nil {
  784. panic(err)
  785. }
  786. return dur
  787. }