server.go 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959
  1. package llm
  2. import (
  3. "bufio"
  4. "bytes"
  5. "context"
  6. "encoding/json"
  7. "errors"
  8. "fmt"
  9. "io"
  10. "log"
  11. "log/slog"
  12. "math/rand"
  13. "net"
  14. "net/http"
  15. "os"
  16. "os/exec"
  17. "path/filepath"
  18. "runtime"
  19. "strconv"
  20. "strings"
  21. "time"
  22. "golang.org/x/sync/semaphore"
  23. "github.com/ollama/ollama/api"
  24. "github.com/ollama/ollama/format"
  25. "github.com/ollama/ollama/gpu"
  26. "github.com/ollama/ollama/server/envconfig"
  27. )
  28. type LlamaServer interface {
  29. Ping(ctx context.Context) error
  30. WaitUntilRunning(ctx context.Context) error
  31. Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
  32. Embedding(ctx context.Context, prompt string) ([]float64, error)
  33. Tokenize(ctx context.Context, content string) ([]int, error)
  34. Detokenize(ctx context.Context, tokens []int) (string, error)
  35. Close() error
  36. EstimatedVRAM() uint64
  37. }
  38. // llmServer is an instance of the llama.cpp server
  39. type llmServer struct {
  40. port int
  41. cmd *exec.Cmd
  42. done chan error // Channel to signal when the process exits
  43. status *StatusWriter
  44. options api.Options
  45. // TODO - this should be broken down by GPU
  46. estimatedVRAM uint64 // Estimated usage of VRAM by the loaded model
  47. estimatedTotal uint64 // Total size of model
  48. totalLayers uint64
  49. gpuCount int
  50. loadDuration time.Duration // Record how long it took the model to load
  51. sem *semaphore.Weighted
  52. }
  53. func LoadModel(model string) (*GGML, error) {
  54. if _, err := os.Stat(model); err != nil {
  55. return nil, err
  56. }
  57. f, err := os.Open(model)
  58. if err != nil {
  59. return nil, err
  60. }
  61. defer f.Close()
  62. ggml, _, err := DecodeGGML(f)
  63. return ggml, err
  64. }
  65. // NewLlamaServer will run a server for the given GPUs
  66. // The gpu list must be a single family.
  67. func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options) (LlamaServer, error) {
  68. var err error
  69. var cpuRunner string
  70. var estimatedVRAM uint64
  71. var estimatedTotal uint64
  72. var systemMemory uint64
  73. gpuCount := len(gpus)
  74. if (len(gpus) == 1 && gpus[0].Library == "cpu") || opts.NumGPU == 0 {
  75. // TODO evaluate system memory to see if we should block the load, or force an unload of another CPU runner
  76. cpuRunner = serverForCpu()
  77. gpuCount = 0
  78. } else {
  79. if gpus[0].Library == "metal" {
  80. memInfo, err := gpu.GetCPUMem()
  81. if err != nil {
  82. slog.Error("failed to lookup system memory", "error", err)
  83. } else {
  84. systemMemory = memInfo.TotalMemory
  85. slog.Debug("system memory", "total", format.HumanBytes2(systemMemory))
  86. }
  87. }
  88. var layers int
  89. layers, estimatedVRAM, estimatedTotal = EstimateGPULayers(gpus, ggml, projectors, opts)
  90. if gpus[0].Library == "metal" && estimatedVRAM > systemMemory {
  91. // disable partial offloading when model is greater than total system memory as this
  92. // can lead to locking up the system
  93. opts.NumGPU = 0
  94. } else if gpus[0].Library != "metal" && layers == 0 {
  95. // Don't bother loading into the GPU if no layers can fit
  96. cpuRunner = serverForCpu()
  97. gpuCount = 0
  98. } else if opts.NumGPU < 0 && layers > 0 && gpus[0].Library != "cpu" {
  99. opts.NumGPU = layers
  100. }
  101. }
  102. // Loop through potential servers
  103. finalErr := fmt.Errorf("no suitable llama servers found")
  104. if len(adapters) > 1 {
  105. return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
  106. }
  107. availableServers := availableServers()
  108. var servers []string
  109. if cpuRunner != "" {
  110. servers = []string{cpuRunner}
  111. } else {
  112. servers = serversForGpu(gpus[0]) // All GPUs in the list are matching Library and Variant
  113. }
  114. demandLib := envconfig.LLMLibrary
  115. if demandLib != "" {
  116. serverPath := availableServers[demandLib]
  117. if serverPath == "" {
  118. slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
  119. } else {
  120. slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
  121. servers = []string{demandLib}
  122. if strings.HasPrefix(demandLib, "cpu") {
  123. // Omit the GPU flag to silence the warning
  124. opts.NumGPU = -1
  125. }
  126. }
  127. }
  128. if len(servers) == 0 {
  129. return nil, fmt.Errorf("no servers found for %v", gpus)
  130. }
  131. params := []string{
  132. "--model", model,
  133. "--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
  134. "--batch-size", fmt.Sprintf("%d", opts.NumBatch),
  135. "--embedding",
  136. }
  137. params = append(params, "--log-disable")
  138. if opts.NumGPU >= 0 {
  139. params = append(params, "--n-gpu-layers", fmt.Sprintf("%d", opts.NumGPU))
  140. }
  141. if envconfig.Debug {
  142. params = append(params, "--verbose")
  143. }
  144. if opts.MainGPU > 0 {
  145. params = append(params, "--main-gpu", fmt.Sprintf("%d", opts.MainGPU))
  146. }
  147. if len(adapters) > 0 {
  148. // TODO: applying multiple adapters is not supported by the llama.cpp server yet
  149. params = append(params, "--lora", adapters[0])
  150. }
  151. if len(projectors) > 0 {
  152. // TODO: applying multiple projectors is not supported by the llama.cpp server yet
  153. params = append(params, "--mmproj", projectors[0])
  154. }
  155. if opts.NumThread > 0 {
  156. params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
  157. }
  158. if !opts.F16KV {
  159. params = append(params, "--memory-f32")
  160. }
  161. if opts.UseMLock {
  162. params = append(params, "--mlock")
  163. }
  164. if !opts.UseMMap {
  165. params = append(params, "--no-mmap")
  166. }
  167. if opts.UseNUMA {
  168. params = append(params, "--numa")
  169. }
  170. numParallel := envconfig.NumParallel
  171. // TODO (jmorganca): multimodal models don't support parallel yet
  172. // see https://github.com/ollama/ollama/issues/4165
  173. if len(projectors) > 0 {
  174. numParallel = 1
  175. slog.Warn("multimodal models don't support parallel requests yet")
  176. }
  177. params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))
  178. for i := 0; i < len(servers); i++ {
  179. dir := availableServers[servers[i]]
  180. if dir == "" {
  181. // Shouldn't happen
  182. finalErr = fmt.Errorf("[%d] server %s not listed in available servers %v", i, servers[i], availableServers)
  183. slog.Error("server list inconsistent", "error", finalErr)
  184. continue
  185. }
  186. if strings.HasPrefix(servers[i], "cpu") {
  187. // TODO if we tried a gpu runner first, and it failed, record the error and bubble that back up
  188. gpuCount = 0
  189. }
  190. // Find an availableServers port, retry on each iterration in case the failure was a port conflict race
  191. port := 0
  192. if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
  193. var l *net.TCPListener
  194. if l, err = net.ListenTCP("tcp", a); err == nil {
  195. port = l.Addr().(*net.TCPAddr).Port
  196. l.Close()
  197. }
  198. }
  199. if port == 0 {
  200. slog.Debug("ResolveTCPAddr failed ", "error", err)
  201. port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
  202. }
  203. finalParams := append(params, "--port", strconv.Itoa(port))
  204. pathEnv := "LD_LIBRARY_PATH"
  205. if runtime.GOOS == "windows" {
  206. pathEnv = "PATH"
  207. }
  208. // prepend the server directory to LD_LIBRARY_PATH/PATH
  209. libraryPaths := []string{dir}
  210. if libraryPath, ok := os.LookupEnv(pathEnv); ok {
  211. // Append our runner directory to the path
  212. // This will favor system libraries over our bundled library dependencies
  213. libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
  214. }
  215. // Note: we always put the dependency path first
  216. // since this was the exact version we verified for AMD GPUs
  217. // and we favor what the user had in their path
  218. if gpus[0].DependencyPath != "" {
  219. // TODO refine for multi-gpu support
  220. libraryPaths = append([]string{gpus[0].DependencyPath}, libraryPaths...)
  221. }
  222. server := filepath.Join(dir, "ollama_llama_server")
  223. if runtime.GOOS == "windows" {
  224. server = server + ".exe"
  225. }
  226. // Detect tmp cleaners wiping out the file
  227. _, err := os.Stat(server)
  228. if errors.Is(err, os.ErrNotExist) {
  229. slog.Warn("llama server disappeared, reinitializing payloads", "path", server, "error", err)
  230. err = Init()
  231. if err != nil {
  232. slog.Warn("failed to reinitialize payloads", "error", err)
  233. return nil, err
  234. }
  235. }
  236. s := &llmServer{
  237. port: port,
  238. cmd: exec.Command(server, finalParams...),
  239. status: NewStatusWriter(os.Stderr),
  240. options: opts,
  241. estimatedVRAM: estimatedVRAM,
  242. estimatedTotal: estimatedTotal,
  243. sem: semaphore.NewWeighted(int64(numParallel)),
  244. totalLayers: ggml.KV().BlockCount() + 1,
  245. gpuCount: gpuCount,
  246. done: make(chan error, 1),
  247. }
  248. s.cmd.Stdout = os.Stdout
  249. s.cmd.Stderr = s.status
  250. if v := strings.Join(libraryPaths, string(filepath.ListSeparator)); v != "" {
  251. s.cmd.Env = append(s.cmd.Env, pathEnv+"="+v)
  252. }
  253. if k, v := gpu.GpuInfoList(gpus).GetVisibleDevicesEnv(); k != "" {
  254. s.cmd.Env = append(s.cmd.Env, k+"="+v)
  255. }
  256. for _, ev := range os.Environ() {
  257. if strings.HasPrefix(ev, "CUDA_") ||
  258. strings.HasPrefix(ev, "ROCM_") ||
  259. strings.HasPrefix(ev, "HIP_") ||
  260. strings.HasPrefix(ev, "HSA_") ||
  261. strings.HasPrefix(ev, "GGML_") {
  262. s.cmd.Env = append(s.cmd.Env, ev)
  263. }
  264. }
  265. slog.Info("starting llama server", "cmd", s.cmd.String())
  266. // Log at debug as the environment is inherited and might contain sensitive information
  267. slog.Debug("subprocess", "environment", s.cmd.Env)
  268. if err = s.cmd.Start(); err != nil {
  269. // Detect permission denied and augment them essage about noexec
  270. if errors.Is(err, os.ErrPermission) {
  271. finalErr = fmt.Errorf("unable to start server %w. %s may have noexec set. Set OLLAMA_TMPDIR for server to a writable executable directory", err, dir)
  272. continue
  273. }
  274. msg := ""
  275. if s.status != nil && s.status.LastErrMsg != "" {
  276. msg = s.status.LastErrMsg
  277. }
  278. err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
  279. finalErr = err
  280. continue
  281. }
  282. // reap subprocess when it exits
  283. go func() {
  284. s.done <- s.cmd.Wait()
  285. }()
  286. return s, nil
  287. }
  288. slog.Error("unable to load any llama server", "error", finalErr)
  289. return nil, finalErr
  290. }
  291. func projectorMemoryRequirements(filename string) uint64 {
  292. file, err := os.Open(filename)
  293. if err != nil {
  294. return 0
  295. }
  296. defer file.Close()
  297. ggml, _, err := DecodeGGML(file)
  298. if err != nil {
  299. return 0
  300. }
  301. var mem uint64
  302. for _, layer := range ggml.Tensors().Layers() {
  303. mem += layer.size()
  304. }
  305. return mem
  306. }
  307. type ServerStatus int
  308. const ( // iota is reset to 0
  309. ServerStatusReady ServerStatus = iota
  310. ServerStatusNoSlotsAvailable
  311. ServerStatusLoadingModel
  312. ServerStatusNotResponding
  313. ServerStatusError
  314. )
  315. func (s ServerStatus) ToString() string {
  316. switch s {
  317. case ServerStatusReady:
  318. return "llm server ready"
  319. case ServerStatusNoSlotsAvailable:
  320. return "llm busy - no slots available"
  321. case ServerStatusLoadingModel:
  322. return "llm server loading model"
  323. case ServerStatusNotResponding:
  324. return "llm server not responding"
  325. default:
  326. return "llm server error"
  327. }
  328. }
  329. type ServerStatusResp struct {
  330. Status string `json:"status"`
  331. SlotsIdle int `json:"slots_idle"`
  332. SlotsProcessing int `json:"slots_processing"`
  333. Error string `json:"error"`
  334. }
  335. func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
  336. // Fail fast if its exited
  337. if s.cmd.ProcessState != nil {
  338. msg := ""
  339. if s.status != nil && s.status.LastErrMsg != "" {
  340. msg = s.status.LastErrMsg
  341. }
  342. if s.cmd.ProcessState.ExitCode() == -1 {
  343. // Most likely a signal killed it, log some more details to try to help troubleshoot
  344. slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState.String())
  345. }
  346. return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  347. }
  348. req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
  349. if err != nil {
  350. return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
  351. }
  352. req.Header.Set("Content-Type", "application/json")
  353. resp, err := http.DefaultClient.Do(req)
  354. if err != nil {
  355. if errors.Is(err, context.DeadlineExceeded) {
  356. return ServerStatusNotResponding, fmt.Errorf("server not responding")
  357. }
  358. return ServerStatusError, fmt.Errorf("health resp: %w", err)
  359. }
  360. defer resp.Body.Close()
  361. body, err := io.ReadAll(resp.Body)
  362. if err != nil {
  363. return ServerStatusError, fmt.Errorf("read health request: %w", err)
  364. }
  365. var status ServerStatusResp
  366. if err := json.Unmarshal(body, &status); err != nil {
  367. return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
  368. }
  369. switch status.Status {
  370. case "ok":
  371. return ServerStatusReady, nil
  372. case "no slot available":
  373. return ServerStatusNoSlotsAvailable, nil
  374. case "loading model":
  375. return ServerStatusLoadingModel, nil
  376. default:
  377. return ServerStatusError, fmt.Errorf("server error: %+v", status)
  378. }
  379. }
  380. // getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
  381. func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
  382. var retries int
  383. for {
  384. status, err := s.getServerStatus(ctx)
  385. if err != nil {
  386. return status, err
  387. }
  388. if status == ServerStatusNoSlotsAvailable {
  389. if retries >= 10 {
  390. return status, fmt.Errorf("no slots available after %d retries", retries)
  391. }
  392. time.Sleep(5 * time.Millisecond)
  393. retries++
  394. continue
  395. }
  396. return status, nil
  397. }
  398. }
  399. func (s *llmServer) Ping(ctx context.Context) error {
  400. _, err := s.getServerStatus(ctx)
  401. if err != nil {
  402. slog.Debug("server unhealthy", "error", err)
  403. return err
  404. }
  405. return nil
  406. }
  407. func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
  408. start := time.Now()
  409. expiresAt := time.Now().Add(10 * time.Minute) // be generous with timeout, large models can take a while to load
  410. slog.Info("waiting for llama runner to start responding")
  411. var lastStatus ServerStatus = -1
  412. for {
  413. select {
  414. case <-ctx.Done():
  415. slog.Info("context expired before server started")
  416. return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
  417. case err := <-s.done:
  418. msg := ""
  419. if s.status != nil && s.status.LastErrMsg != "" {
  420. msg = s.status.LastErrMsg
  421. }
  422. return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
  423. default:
  424. }
  425. if time.Now().After(expiresAt) {
  426. // timeout
  427. msg := ""
  428. if s.status != nil && s.status.LastErrMsg != "" {
  429. msg = s.status.LastErrMsg
  430. }
  431. return fmt.Errorf("timed out waiting for llama runner to start: %s", msg)
  432. }
  433. if s.cmd.ProcessState != nil {
  434. msg := ""
  435. if s.status != nil && s.status.LastErrMsg != "" {
  436. msg = s.status.LastErrMsg
  437. }
  438. return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  439. }
  440. ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
  441. defer cancel()
  442. status, _ := s.getServerStatus(ctx)
  443. if lastStatus != status && status != ServerStatusReady {
  444. // Only log on status changes
  445. slog.Info("waiting for server to become available", "status", status.ToString())
  446. }
  447. switch status {
  448. case ServerStatusReady:
  449. s.loadDuration = time.Since(start)
  450. slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
  451. return nil
  452. default:
  453. lastStatus = status
  454. time.Sleep(time.Millisecond * 250)
  455. continue
  456. }
  457. }
  458. }
  459. const jsonGrammar = `
  460. root ::= object
  461. value ::= object | array | string | number | ("true" | "false" | "null") ws
  462. object ::=
  463. "{" ws (
  464. string ":" ws value
  465. ("," ws string ":" ws value)*
  466. )? "}" ws
  467. array ::=
  468. "[" ws (
  469. value
  470. ("," ws value)*
  471. )? "]" ws
  472. string ::=
  473. "\"" (
  474. [^"\\] |
  475. "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  476. )* "\"" ws
  477. number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
  478. # Optional space: by convention, applied in this grammar after literal chars when allowed
  479. ws ::= ([ \t\n] ws)?
  480. `
  481. const maxBufferSize = 512 * format.KiloByte
  482. type ImageData struct {
  483. Data []byte `json:"data"`
  484. ID int `json:"id"`
  485. }
  486. type completion struct {
  487. Content string `json:"content"`
  488. Model string `json:"model"`
  489. Prompt string `json:"prompt"`
  490. Stop bool `json:"stop"`
  491. StoppedLimit bool `json:"stopped_limit"`
  492. Timings struct {
  493. PredictedN int `json:"predicted_n"`
  494. PredictedMS float64 `json:"predicted_ms"`
  495. PromptN int `json:"prompt_n"`
  496. PromptMS float64 `json:"prompt_ms"`
  497. }
  498. }
  499. type CompletionRequest struct {
  500. Prompt string
  501. Format string
  502. Images []ImageData
  503. Options api.Options
  504. }
  505. type CompletionResponse struct {
  506. Content string
  507. DoneReason string
  508. Done bool
  509. PromptEvalCount int
  510. PromptEvalDuration time.Duration
  511. EvalCount int
  512. EvalDuration time.Duration
  513. }
  514. func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
  515. if err := s.sem.Acquire(ctx, 1); err != nil {
  516. slog.Error("Failed to acquire semaphore", "error", err)
  517. return err
  518. }
  519. defer s.sem.Release(1)
  520. // only allow maximum 10 "context shifts" to avoid infinite generation
  521. if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
  522. req.Options.NumPredict = 10 * s.options.NumCtx
  523. slog.Debug("setting token limit to 10x num_ctx", "num_ctx", s.options.NumCtx, "num_predict", req.Options.NumPredict)
  524. }
  525. request := map[string]any{
  526. "prompt": req.Prompt,
  527. "stream": true,
  528. "n_predict": req.Options.NumPredict,
  529. "n_keep": req.Options.NumKeep,
  530. "main_gpu": req.Options.MainGPU,
  531. "temperature": req.Options.Temperature,
  532. "top_k": req.Options.TopK,
  533. "top_p": req.Options.TopP,
  534. "tfs_z": req.Options.TFSZ,
  535. "typical_p": req.Options.TypicalP,
  536. "repeat_last_n": req.Options.RepeatLastN,
  537. "repeat_penalty": req.Options.RepeatPenalty,
  538. "presence_penalty": req.Options.PresencePenalty,
  539. "frequency_penalty": req.Options.FrequencyPenalty,
  540. "mirostat": req.Options.Mirostat,
  541. "mirostat_tau": req.Options.MirostatTau,
  542. "mirostat_eta": req.Options.MirostatEta,
  543. "penalize_nl": req.Options.PenalizeNewline,
  544. "seed": req.Options.Seed,
  545. "stop": req.Options.Stop,
  546. "image_data": req.Images,
  547. "cache_prompt": true,
  548. }
  549. // Make sure the server is ready
  550. status, err := s.getServerStatusRetry(ctx)
  551. if err != nil {
  552. return err
  553. } else if status != ServerStatusReady {
  554. return fmt.Errorf("unexpected server status: %s", status.ToString())
  555. }
  556. if req.Format == "json" {
  557. request["grammar"] = jsonGrammar
  558. if !strings.Contains(strings.ToLower(req.Prompt), "json") {
  559. slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
  560. }
  561. }
  562. // Handling JSON marshaling with special characters unescaped.
  563. buffer := &bytes.Buffer{}
  564. enc := json.NewEncoder(buffer)
  565. enc.SetEscapeHTML(false)
  566. if err := enc.Encode(request); err != nil {
  567. return fmt.Errorf("failed to marshal data: %v", err)
  568. }
  569. endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
  570. serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
  571. if err != nil {
  572. return fmt.Errorf("error creating POST request: %v", err)
  573. }
  574. serverReq.Header.Set("Content-Type", "application/json")
  575. res, err := http.DefaultClient.Do(serverReq)
  576. if err != nil {
  577. return fmt.Errorf("POST predict: %v", err)
  578. }
  579. defer res.Body.Close()
  580. if res.StatusCode >= 400 {
  581. bodyBytes, err := io.ReadAll(res.Body)
  582. if err != nil {
  583. return fmt.Errorf("failed reading llm error response: %w", err)
  584. }
  585. log.Printf("llm predict error: %s", bodyBytes)
  586. return fmt.Errorf("%s", bodyBytes)
  587. }
  588. scanner := bufio.NewScanner(res.Body)
  589. buf := make([]byte, 0, maxBufferSize)
  590. scanner.Buffer(buf, maxBufferSize)
  591. // keep track of the last token generated, this is used to abort if the model starts looping
  592. var lastToken string
  593. var tokenRepeat int
  594. for scanner.Scan() {
  595. select {
  596. case <-ctx.Done():
  597. // This handles the request cancellation
  598. return ctx.Err()
  599. default:
  600. line := scanner.Bytes()
  601. if len(line) == 0 {
  602. continue
  603. }
  604. evt, ok := bytes.CutPrefix(line, []byte("data: "))
  605. if !ok {
  606. return fmt.Errorf("error parsing llm response stream: %s", line)
  607. }
  608. var c completion
  609. if err := json.Unmarshal(evt, &c); err != nil {
  610. return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
  611. }
  612. switch {
  613. case strings.TrimSpace(c.Content) == lastToken:
  614. tokenRepeat++
  615. default:
  616. lastToken = strings.TrimSpace(c.Content)
  617. tokenRepeat = 0
  618. }
  619. // 30 picked as an arbitrary max token repeat limit, modify as needed
  620. if tokenRepeat > 30 {
  621. slog.Debug("prediction aborted, token repeat limit reached")
  622. return ctx.Err()
  623. }
  624. if c.Content != "" {
  625. fn(CompletionResponse{
  626. Content: c.Content,
  627. })
  628. }
  629. if c.Stop {
  630. doneReason := "stop"
  631. if c.StoppedLimit {
  632. doneReason = "length"
  633. }
  634. fn(CompletionResponse{
  635. Done: true,
  636. DoneReason: doneReason,
  637. PromptEvalCount: c.Timings.PromptN,
  638. PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
  639. EvalCount: c.Timings.PredictedN,
  640. EvalDuration: parseDurationMs(c.Timings.PredictedMS),
  641. })
  642. return nil
  643. }
  644. }
  645. }
  646. if err := scanner.Err(); err != nil {
  647. if strings.Contains(err.Error(), "unexpected EOF") {
  648. s.Close()
  649. msg := ""
  650. if s.status != nil && s.status.LastErrMsg != "" {
  651. msg = s.status.LastErrMsg
  652. }
  653. return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
  654. }
  655. return fmt.Errorf("error reading llm response: %v", err)
  656. }
  657. return nil
  658. }
  659. type EmbeddingRequest struct {
  660. Content string `json:"content"`
  661. }
  662. type EmbeddingResponse struct {
  663. Embedding []float64 `json:"embedding"`
  664. }
  665. func (s *llmServer) Embedding(ctx context.Context, prompt string) ([]float64, error) {
  666. if err := s.sem.Acquire(ctx, 1); err != nil {
  667. slog.Error("Failed to acquire semaphore", "error", err)
  668. return nil, err
  669. }
  670. defer s.sem.Release(1)
  671. // Make sure the server is ready
  672. status, err := s.getServerStatusRetry(ctx)
  673. if err != nil {
  674. return nil, err
  675. } else if status != ServerStatusReady {
  676. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  677. }
  678. data, err := json.Marshal(TokenizeRequest{Content: prompt})
  679. if err != nil {
  680. return nil, fmt.Errorf("error marshaling embed data: %w", err)
  681. }
  682. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
  683. if err != nil {
  684. return nil, fmt.Errorf("error creating embed request: %w", err)
  685. }
  686. req.Header.Set("Content-Type", "application/json")
  687. resp, err := http.DefaultClient.Do(req)
  688. if err != nil {
  689. return nil, fmt.Errorf("do embedding request: %w", err)
  690. }
  691. defer resp.Body.Close()
  692. body, err := io.ReadAll(resp.Body)
  693. if err != nil {
  694. return nil, fmt.Errorf("error reading embed response: %w", err)
  695. }
  696. if resp.StatusCode >= 400 {
  697. log.Printf("llm encode error: %s", body)
  698. return nil, fmt.Errorf("%s", body)
  699. }
  700. var embedding EmbeddingResponse
  701. if err := json.Unmarshal(body, &embedding); err != nil {
  702. return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
  703. }
  704. return embedding.Embedding, nil
  705. }
  706. type TokenizeRequest struct {
  707. Content string `json:"content"`
  708. }
  709. type TokenizeResponse struct {
  710. Tokens []int `json:"tokens"`
  711. }
  712. func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
  713. // Make sure the server is ready
  714. status, err := s.getServerStatus(ctx)
  715. if err != nil {
  716. return nil, err
  717. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
  718. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  719. }
  720. data, err := json.Marshal(TokenizeRequest{Content: content})
  721. if err != nil {
  722. return nil, fmt.Errorf("marshaling encode data: %w", err)
  723. }
  724. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
  725. if err != nil {
  726. return nil, fmt.Errorf("encode request: %w", err)
  727. }
  728. req.Header.Set("Content-Type", "application/json")
  729. resp, err := http.DefaultClient.Do(req)
  730. if err != nil {
  731. return nil, fmt.Errorf("do encode request: %w", err)
  732. }
  733. defer resp.Body.Close()
  734. body, err := io.ReadAll(resp.Body)
  735. if err != nil {
  736. return nil, fmt.Errorf("read encode request: %w", err)
  737. }
  738. if resp.StatusCode >= 400 {
  739. log.Printf("llm encode error: %s", body)
  740. return nil, fmt.Errorf("%s", body)
  741. }
  742. var encoded TokenizeResponse
  743. if err := json.Unmarshal(body, &encoded); err != nil {
  744. return nil, fmt.Errorf("unmarshal encode response: %w", err)
  745. }
  746. return encoded.Tokens, nil
  747. }
  748. type DetokenizeRequest struct {
  749. Tokens []int `json:"tokens"`
  750. }
  751. type DetokenizeResponse struct {
  752. Content string `json:"content"`
  753. }
  754. func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
  755. // Make sure the server is ready
  756. status, err := s.getServerStatus(ctx)
  757. if err != nil {
  758. return "", err
  759. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
  760. return "", fmt.Errorf("unexpected server status: %s", status.ToString())
  761. }
  762. data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
  763. if err != nil {
  764. return "", fmt.Errorf("marshaling decode data: %w", err)
  765. }
  766. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
  767. if err != nil {
  768. return "", fmt.Errorf("decode request: %w", err)
  769. }
  770. req.Header.Set("Content-Type", "application/json")
  771. resp, err := http.DefaultClient.Do(req)
  772. if err != nil {
  773. return "", fmt.Errorf("do decode request: %w", err)
  774. }
  775. defer resp.Body.Close()
  776. body, err := io.ReadAll(resp.Body)
  777. if err != nil {
  778. return "", fmt.Errorf("read decode request: %w", err)
  779. }
  780. if resp.StatusCode >= 400 {
  781. log.Printf("llm decode error: %s", body)
  782. return "", fmt.Errorf("%s", body)
  783. }
  784. var decoded DetokenizeResponse
  785. if err := json.Unmarshal(body, &decoded); err != nil {
  786. return "", fmt.Errorf("unmarshal encode response: %w", err)
  787. }
  788. return decoded.Content, nil
  789. }
  790. func (s *llmServer) Close() error {
  791. if s.cmd != nil {
  792. slog.Debug("stopping llama server")
  793. if err := s.cmd.Process.Kill(); err != nil {
  794. return err
  795. }
  796. // if ProcessState is already populated, Wait already completed, no need to wait again
  797. if s.cmd.ProcessState == nil {
  798. slog.Debug("waiting for llama server to exit")
  799. <-s.done
  800. }
  801. slog.Debug("llama server stopped")
  802. }
  803. return nil
  804. }
  805. func (s *llmServer) EstimatedVRAM() uint64 {
  806. return s.estimatedVRAM
  807. }
  808. func parseDurationMs(ms float64) time.Duration {
  809. dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
  810. if err != nil {
  811. panic(err)
  812. }
  813. return dur
  814. }