server.go 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985
  1. package llm
  2. import (
  3. "bufio"
  4. "bytes"
  5. "context"
  6. "encoding/json"
  7. "errors"
  8. "fmt"
  9. "io"
  10. "log"
  11. "log/slog"
  12. "math/rand"
  13. "net"
  14. "net/http"
  15. "os"
  16. "os/exec"
  17. "path/filepath"
  18. "runtime"
  19. "strconv"
  20. "strings"
  21. "time"
  22. "golang.org/x/sync/semaphore"
  23. "github.com/ollama/ollama/api"
  24. "github.com/ollama/ollama/format"
  25. "github.com/ollama/ollama/gpu"
  26. "github.com/ollama/ollama/server/envconfig"
  27. )
  28. type LlamaServer interface {
  29. Ping(ctx context.Context) error
  30. WaitUntilRunning(ctx context.Context) error
  31. Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
  32. Embedding(ctx context.Context, prompt string) ([]float64, error)
  33. Tokenize(ctx context.Context, content string) ([]int, error)
  34. Detokenize(ctx context.Context, tokens []int) (string, error)
  35. Close() error
  36. EstimatedVRAM() uint64
  37. EstimatedTotal() uint64
  38. }
  39. // llmServer is an instance of the llama.cpp server
  40. type llmServer struct {
  41. port int
  42. cmd *exec.Cmd
  43. done chan error // Channel to signal when the process exits
  44. status *StatusWriter
  45. options api.Options
  46. // TODO - this should be broken down by GPU
  47. estimatedVRAM uint64 // Estimated usage of VRAM by the loaded model
  48. estimatedTotal uint64 // Total size of model
  49. totalLayers uint64
  50. gpuCount int
  51. loadDuration time.Duration // Record how long it took the model to load
  52. sem *semaphore.Weighted
  53. }
  54. func LoadModel(model string) (*GGML, error) {
  55. if _, err := os.Stat(model); err != nil {
  56. return nil, err
  57. }
  58. f, err := os.Open(model)
  59. if err != nil {
  60. return nil, err
  61. }
  62. defer f.Close()
  63. ggml, _, err := DecodeGGML(f)
  64. return ggml, err
  65. }
  66. // NewLlamaServer will run a server for the given GPUs
  67. // The gpu list must be a single family.
  68. func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options) (LlamaServer, error) {
  69. var err error
  70. var cpuRunner string
  71. var estimatedVRAM uint64
  72. var estimatedTotal uint64
  73. var systemMemory uint64
  74. gpuCount := len(gpus)
  75. if (len(gpus) == 1 && gpus[0].Library == "cpu") || opts.NumGPU == 0 {
  76. // TODO evaluate system memory to see if we should block the load, or force an unload of another CPU runner
  77. cpuRunner = serverForCpu()
  78. gpuCount = 0
  79. _, _, estimatedTotal = EstimateGPULayers(gpus, ggml, projectors, opts)
  80. } else {
  81. if gpus[0].Library == "metal" {
  82. memInfo, err := gpu.GetCPUMem()
  83. if err != nil {
  84. slog.Error("failed to lookup system memory", "error", err)
  85. } else {
  86. systemMemory = memInfo.TotalMemory
  87. slog.Debug("system memory", "total", format.HumanBytes2(systemMemory))
  88. }
  89. }
  90. var layers int
  91. layers, estimatedVRAM, estimatedTotal = EstimateGPULayers(gpus, ggml, projectors, opts)
  92. if gpus[0].Library == "metal" && estimatedVRAM > systemMemory {
  93. // disable partial offloading when model is greater than total system memory as this
  94. // can lead to locking up the system
  95. opts.NumGPU = 0
  96. } else if gpus[0].Library != "metal" && layers == 0 {
  97. // Don't bother loading into the GPU if no layers can fit
  98. cpuRunner = serverForCpu()
  99. gpuCount = 0
  100. } else if opts.NumGPU < 0 && layers > 0 && gpus[0].Library != "cpu" {
  101. opts.NumGPU = layers
  102. }
  103. }
  104. // Loop through potential servers
  105. finalErr := fmt.Errorf("no suitable llama servers found")
  106. if len(adapters) > 1 {
  107. return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
  108. }
  109. availableServers := availableServers()
  110. var servers []string
  111. if cpuRunner != "" {
  112. servers = []string{cpuRunner}
  113. } else {
  114. servers = serversForGpu(gpus[0]) // All GPUs in the list are matching Library and Variant
  115. }
  116. demandLib := envconfig.LLMLibrary
  117. if demandLib != "" {
  118. serverPath := availableServers[demandLib]
  119. if serverPath == "" {
  120. slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
  121. } else {
  122. slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
  123. servers = []string{demandLib}
  124. if strings.HasPrefix(demandLib, "cpu") {
  125. // Omit the GPU flag to silence the warning
  126. opts.NumGPU = -1
  127. }
  128. }
  129. }
  130. if len(servers) == 0 {
  131. return nil, fmt.Errorf("no servers found for %v", gpus)
  132. }
  133. params := []string{
  134. "--model", model,
  135. "--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
  136. "--batch-size", fmt.Sprintf("%d", opts.NumBatch),
  137. "--embedding",
  138. }
  139. params = append(params, "--log-disable")
  140. if opts.NumGPU >= 0 {
  141. params = append(params, "--n-gpu-layers", fmt.Sprintf("%d", opts.NumGPU))
  142. }
  143. if envconfig.Debug {
  144. params = append(params, "--verbose")
  145. }
  146. if opts.MainGPU > 0 {
  147. params = append(params, "--main-gpu", fmt.Sprintf("%d", opts.MainGPU))
  148. }
  149. if len(adapters) > 0 {
  150. // TODO: applying multiple adapters is not supported by the llama.cpp server yet
  151. params = append(params, "--lora", adapters[0])
  152. }
  153. if len(projectors) > 0 {
  154. // TODO: applying multiple projectors is not supported by the llama.cpp server yet
  155. params = append(params, "--mmproj", projectors[0])
  156. }
  157. if opts.NumThread > 0 {
  158. params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
  159. }
  160. if !opts.F16KV {
  161. params = append(params, "--memory-f32")
  162. }
  163. if opts.UseMLock {
  164. params = append(params, "--mlock")
  165. }
  166. if !opts.UseMMap {
  167. params = append(params, "--no-mmap")
  168. }
  169. if opts.UseNUMA {
  170. params = append(params, "--numa")
  171. }
  172. numParallel := envconfig.NumParallel
  173. // TODO (jmorganca): multimodal models don't support parallel yet
  174. // see https://github.com/ollama/ollama/issues/4165
  175. if len(projectors) > 0 {
  176. numParallel = 1
  177. slog.Warn("multimodal models don't support parallel requests yet")
  178. }
  179. params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))
  180. for i := 0; i < len(servers); i++ {
  181. dir := availableServers[servers[i]]
  182. if dir == "" {
  183. // Shouldn't happen
  184. finalErr = fmt.Errorf("[%d] server %s not listed in available servers %v", i, servers[i], availableServers)
  185. slog.Error("server list inconsistent", "error", finalErr)
  186. continue
  187. }
  188. if strings.HasPrefix(servers[i], "cpu") {
  189. // TODO if we tried a gpu runner first, and it failed, record the error and bubble that back up
  190. gpuCount = 0
  191. }
  192. // Find an availableServers port, retry on each iterration in case the failure was a port conflict race
  193. port := 0
  194. if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
  195. var l *net.TCPListener
  196. if l, err = net.ListenTCP("tcp", a); err == nil {
  197. port = l.Addr().(*net.TCPAddr).Port
  198. l.Close()
  199. }
  200. }
  201. if port == 0 {
  202. slog.Debug("ResolveTCPAddr failed ", "error", err)
  203. port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
  204. }
  205. finalParams := append(params, "--port", strconv.Itoa(port))
  206. pathEnv := "LD_LIBRARY_PATH"
  207. if runtime.GOOS == "windows" {
  208. pathEnv = "PATH"
  209. }
  210. // prepend the server directory to LD_LIBRARY_PATH/PATH
  211. libraryPaths := []string{dir}
  212. if libraryPath, ok := os.LookupEnv(pathEnv); ok {
  213. // Append our runner directory to the path
  214. // This will favor system libraries over our bundled library dependencies
  215. libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
  216. }
  217. // Note: we always put the dependency path first
  218. // since this was the exact version we verified for AMD GPUs
  219. // and we favor what the user had in their path
  220. if gpus[0].DependencyPath != "" {
  221. // TODO refine for multi-gpu support
  222. libraryPaths = append([]string{gpus[0].DependencyPath}, libraryPaths...)
  223. }
  224. server := filepath.Join(dir, "ollama_llama_server")
  225. if runtime.GOOS == "windows" {
  226. server = server + ".exe"
  227. }
  228. // Detect tmp cleaners wiping out the file
  229. _, err := os.Stat(server)
  230. if errors.Is(err, os.ErrNotExist) {
  231. slog.Warn("llama server disappeared, reinitializing payloads", "path", server, "error", err)
  232. err = Init()
  233. if err != nil {
  234. slog.Warn("failed to reinitialize payloads", "error", err)
  235. return nil, err
  236. }
  237. }
  238. s := &llmServer{
  239. port: port,
  240. cmd: exec.Command(server, finalParams...),
  241. status: NewStatusWriter(os.Stderr),
  242. options: opts,
  243. estimatedVRAM: estimatedVRAM,
  244. estimatedTotal: estimatedTotal,
  245. sem: semaphore.NewWeighted(int64(numParallel)),
  246. totalLayers: ggml.KV().BlockCount() + 1,
  247. gpuCount: gpuCount,
  248. done: make(chan error, 1),
  249. }
  250. s.cmd.Env = os.Environ()
  251. s.cmd.Stdout = os.Stdout
  252. s.cmd.Stderr = s.status
  253. visibleDevicesEnv, visibleDevicesEnvVal := gpu.GpuInfoList(gpus).GetVisibleDevicesEnv()
  254. pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))
  255. // Update or add the path and visible devices variable with our adjusted version
  256. pathNeeded := true
  257. devicesNeeded := visibleDevicesEnv != ""
  258. for i := range s.cmd.Env {
  259. cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
  260. if strings.EqualFold(cmp[0], pathEnv) {
  261. s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
  262. pathNeeded = false
  263. } else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
  264. s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
  265. devicesNeeded = false
  266. }
  267. }
  268. if pathNeeded {
  269. s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
  270. }
  271. if devicesNeeded {
  272. s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
  273. }
  274. slog.Info("starting llama server", "cmd", s.cmd.String())
  275. if envconfig.Debug {
  276. filteredEnv := []string{}
  277. for _, ev := range s.cmd.Env {
  278. if strings.HasPrefix(ev, "CUDA_") ||
  279. strings.HasPrefix(ev, "ROCM_") ||
  280. strings.HasPrefix(ev, "HIP_") ||
  281. strings.HasPrefix(ev, "HSA_") ||
  282. strings.HasPrefix(ev, "GGML_") ||
  283. strings.HasPrefix(ev, "PATH=") ||
  284. strings.HasPrefix(ev, "LD_LIBRARY_PATH=") {
  285. filteredEnv = append(filteredEnv, ev)
  286. }
  287. }
  288. // Log at debug as the environment is inherited and might contain sensitive information
  289. slog.Debug("subprocess", "environment", filteredEnv)
  290. }
  291. if err = s.cmd.Start(); err != nil {
  292. // Detect permission denied and augment them essage about noexec
  293. if errors.Is(err, os.ErrPermission) {
  294. finalErr = fmt.Errorf("unable to start server %w. %s may have noexec set. Set OLLAMA_TMPDIR for server to a writable executable directory", err, dir)
  295. continue
  296. }
  297. msg := ""
  298. if s.status != nil && s.status.LastErrMsg != "" {
  299. msg = s.status.LastErrMsg
  300. }
  301. err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
  302. finalErr = err
  303. continue
  304. }
  305. // reap subprocess when it exits
  306. go func() {
  307. s.done <- s.cmd.Wait()
  308. }()
  309. return s, nil
  310. }
  311. slog.Error("unable to load any llama server", "error", finalErr)
  312. return nil, finalErr
  313. }
  314. func projectorMemoryRequirements(filename string) uint64 {
  315. file, err := os.Open(filename)
  316. if err != nil {
  317. return 0
  318. }
  319. defer file.Close()
  320. ggml, _, err := DecodeGGML(file)
  321. if err != nil {
  322. return 0
  323. }
  324. var mem uint64
  325. for _, layer := range ggml.Tensors().Layers() {
  326. mem += layer.size()
  327. }
  328. return mem
  329. }
  330. type ServerStatus int
  331. const ( // iota is reset to 0
  332. ServerStatusReady ServerStatus = iota
  333. ServerStatusNoSlotsAvailable
  334. ServerStatusLoadingModel
  335. ServerStatusNotResponding
  336. ServerStatusError
  337. )
  338. func (s ServerStatus) ToString() string {
  339. switch s {
  340. case ServerStatusReady:
  341. return "llm server ready"
  342. case ServerStatusNoSlotsAvailable:
  343. return "llm busy - no slots available"
  344. case ServerStatusLoadingModel:
  345. return "llm server loading model"
  346. case ServerStatusNotResponding:
  347. return "llm server not responding"
  348. default:
  349. return "llm server error"
  350. }
  351. }
  352. type ServerStatusResp struct {
  353. Status string `json:"status"`
  354. SlotsIdle int `json:"slots_idle"`
  355. SlotsProcessing int `json:"slots_processing"`
  356. Error string `json:"error"`
  357. }
  358. func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
  359. // Fail fast if its exited
  360. if s.cmd.ProcessState != nil {
  361. msg := ""
  362. if s.status != nil && s.status.LastErrMsg != "" {
  363. msg = s.status.LastErrMsg
  364. }
  365. if s.cmd.ProcessState.ExitCode() == -1 {
  366. // Most likely a signal killed it, log some more details to try to help troubleshoot
  367. slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState.String())
  368. }
  369. return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  370. }
  371. req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
  372. if err != nil {
  373. return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
  374. }
  375. req.Header.Set("Content-Type", "application/json")
  376. resp, err := http.DefaultClient.Do(req)
  377. if err != nil {
  378. if errors.Is(err, context.DeadlineExceeded) {
  379. return ServerStatusNotResponding, fmt.Errorf("server not responding")
  380. }
  381. return ServerStatusError, fmt.Errorf("health resp: %w", err)
  382. }
  383. defer resp.Body.Close()
  384. body, err := io.ReadAll(resp.Body)
  385. if err != nil {
  386. return ServerStatusError, fmt.Errorf("read health request: %w", err)
  387. }
  388. var status ServerStatusResp
  389. if err := json.Unmarshal(body, &status); err != nil {
  390. return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
  391. }
  392. switch status.Status {
  393. case "ok":
  394. return ServerStatusReady, nil
  395. case "no slot available":
  396. return ServerStatusNoSlotsAvailable, nil
  397. case "loading model":
  398. return ServerStatusLoadingModel, nil
  399. default:
  400. return ServerStatusError, fmt.Errorf("server error: %+v", status)
  401. }
  402. }
  403. // getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
  404. func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
  405. var retries int
  406. for {
  407. status, err := s.getServerStatus(ctx)
  408. if err != nil {
  409. return status, err
  410. }
  411. if status == ServerStatusNoSlotsAvailable {
  412. if retries >= 10 {
  413. return status, fmt.Errorf("no slots available after %d retries", retries)
  414. }
  415. time.Sleep(5 * time.Millisecond)
  416. retries++
  417. continue
  418. }
  419. return status, nil
  420. }
  421. }
  422. func (s *llmServer) Ping(ctx context.Context) error {
  423. _, err := s.getServerStatus(ctx)
  424. if err != nil {
  425. slog.Debug("server unhealthy", "error", err)
  426. return err
  427. }
  428. return nil
  429. }
  430. func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
  431. start := time.Now()
  432. expiresAt := time.Now().Add(10 * time.Minute) // be generous with timeout, large models can take a while to load
  433. slog.Info("waiting for llama runner to start responding")
  434. var lastStatus ServerStatus = -1
  435. for {
  436. select {
  437. case <-ctx.Done():
  438. slog.Info("context expired before server started")
  439. return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
  440. case err := <-s.done:
  441. msg := ""
  442. if s.status != nil && s.status.LastErrMsg != "" {
  443. msg = s.status.LastErrMsg
  444. }
  445. return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
  446. default:
  447. }
  448. if time.Now().After(expiresAt) {
  449. // timeout
  450. msg := ""
  451. if s.status != nil && s.status.LastErrMsg != "" {
  452. msg = s.status.LastErrMsg
  453. }
  454. return fmt.Errorf("timed out waiting for llama runner to start: %s", msg)
  455. }
  456. if s.cmd.ProcessState != nil {
  457. msg := ""
  458. if s.status != nil && s.status.LastErrMsg != "" {
  459. msg = s.status.LastErrMsg
  460. }
  461. return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  462. }
  463. ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
  464. defer cancel()
  465. status, _ := s.getServerStatus(ctx)
  466. if lastStatus != status && status != ServerStatusReady {
  467. // Only log on status changes
  468. slog.Info("waiting for server to become available", "status", status.ToString())
  469. }
  470. switch status {
  471. case ServerStatusReady:
  472. s.loadDuration = time.Since(start)
  473. slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
  474. return nil
  475. default:
  476. lastStatus = status
  477. time.Sleep(time.Millisecond * 250)
  478. continue
  479. }
  480. }
  481. }
  482. const jsonGrammar = `
  483. root ::= object
  484. value ::= object | array | string | number | ("true" | "false" | "null") ws
  485. object ::=
  486. "{" ws (
  487. string ":" ws value
  488. ("," ws string ":" ws value)*
  489. )? "}" ws
  490. array ::=
  491. "[" ws (
  492. value
  493. ("," ws value)*
  494. )? "]" ws
  495. string ::=
  496. "\"" (
  497. [^"\\] |
  498. "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  499. )* "\"" ws
  500. number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
  501. # Optional space: by convention, applied in this grammar after literal chars when allowed
  502. ws ::= ([ \t\n] ws)?
  503. `
  504. const maxBufferSize = 512 * format.KiloByte
  505. type ImageData struct {
  506. Data []byte `json:"data"`
  507. ID int `json:"id"`
  508. }
  509. type completion struct {
  510. Content string `json:"content"`
  511. Model string `json:"model"`
  512. Prompt string `json:"prompt"`
  513. Stop bool `json:"stop"`
  514. StoppedLimit bool `json:"stopped_limit"`
  515. Timings struct {
  516. PredictedN int `json:"predicted_n"`
  517. PredictedMS float64 `json:"predicted_ms"`
  518. PromptN int `json:"prompt_n"`
  519. PromptMS float64 `json:"prompt_ms"`
  520. }
  521. }
  522. type CompletionRequest struct {
  523. Prompt string
  524. Format string
  525. Images []ImageData
  526. Options api.Options
  527. }
  528. type CompletionResponse struct {
  529. Content string
  530. DoneReason string
  531. Done bool
  532. PromptEvalCount int
  533. PromptEvalDuration time.Duration
  534. EvalCount int
  535. EvalDuration time.Duration
  536. }
  537. func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
  538. if err := s.sem.Acquire(ctx, 1); err != nil {
  539. slog.Error("Failed to acquire semaphore", "error", err)
  540. return err
  541. }
  542. defer s.sem.Release(1)
  543. // only allow maximum 10 "context shifts" to avoid infinite generation
  544. if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
  545. req.Options.NumPredict = 10 * s.options.NumCtx
  546. slog.Debug("setting token limit to 10x num_ctx", "num_ctx", s.options.NumCtx, "num_predict", req.Options.NumPredict)
  547. }
  548. request := map[string]any{
  549. "prompt": req.Prompt,
  550. "stream": true,
  551. "n_predict": req.Options.NumPredict,
  552. "n_keep": req.Options.NumKeep,
  553. "main_gpu": req.Options.MainGPU,
  554. "temperature": req.Options.Temperature,
  555. "top_k": req.Options.TopK,
  556. "top_p": req.Options.TopP,
  557. "tfs_z": req.Options.TFSZ,
  558. "typical_p": req.Options.TypicalP,
  559. "repeat_last_n": req.Options.RepeatLastN,
  560. "repeat_penalty": req.Options.RepeatPenalty,
  561. "presence_penalty": req.Options.PresencePenalty,
  562. "frequency_penalty": req.Options.FrequencyPenalty,
  563. "mirostat": req.Options.Mirostat,
  564. "mirostat_tau": req.Options.MirostatTau,
  565. "mirostat_eta": req.Options.MirostatEta,
  566. "penalize_nl": req.Options.PenalizeNewline,
  567. "seed": req.Options.Seed,
  568. "stop": req.Options.Stop,
  569. "image_data": req.Images,
  570. "cache_prompt": true,
  571. }
  572. // Make sure the server is ready
  573. status, err := s.getServerStatusRetry(ctx)
  574. if err != nil {
  575. return err
  576. } else if status != ServerStatusReady {
  577. return fmt.Errorf("unexpected server status: %s", status.ToString())
  578. }
  579. if req.Format == "json" {
  580. request["grammar"] = jsonGrammar
  581. if !strings.Contains(strings.ToLower(req.Prompt), "json") {
  582. slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
  583. }
  584. }
  585. // Handling JSON marshaling with special characters unescaped.
  586. buffer := &bytes.Buffer{}
  587. enc := json.NewEncoder(buffer)
  588. enc.SetEscapeHTML(false)
  589. if err := enc.Encode(request); err != nil {
  590. return fmt.Errorf("failed to marshal data: %v", err)
  591. }
  592. endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
  593. serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
  594. if err != nil {
  595. return fmt.Errorf("error creating POST request: %v", err)
  596. }
  597. serverReq.Header.Set("Content-Type", "application/json")
  598. res, err := http.DefaultClient.Do(serverReq)
  599. if err != nil {
  600. return fmt.Errorf("POST predict: %v", err)
  601. }
  602. defer res.Body.Close()
  603. if res.StatusCode >= 400 {
  604. bodyBytes, err := io.ReadAll(res.Body)
  605. if err != nil {
  606. return fmt.Errorf("failed reading llm error response: %w", err)
  607. }
  608. log.Printf("llm predict error: %s", bodyBytes)
  609. return fmt.Errorf("%s", bodyBytes)
  610. }
  611. scanner := bufio.NewScanner(res.Body)
  612. buf := make([]byte, 0, maxBufferSize)
  613. scanner.Buffer(buf, maxBufferSize)
  614. // keep track of the last token generated, this is used to abort if the model starts looping
  615. var lastToken string
  616. var tokenRepeat int
  617. for scanner.Scan() {
  618. select {
  619. case <-ctx.Done():
  620. // This handles the request cancellation
  621. return ctx.Err()
  622. default:
  623. line := scanner.Bytes()
  624. if len(line) == 0 {
  625. continue
  626. }
  627. evt, ok := bytes.CutPrefix(line, []byte("data: "))
  628. if !ok {
  629. return fmt.Errorf("error parsing llm response stream: %s", line)
  630. }
  631. var c completion
  632. if err := json.Unmarshal(evt, &c); err != nil {
  633. return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
  634. }
  635. switch {
  636. case strings.TrimSpace(c.Content) == lastToken:
  637. tokenRepeat++
  638. default:
  639. lastToken = strings.TrimSpace(c.Content)
  640. tokenRepeat = 0
  641. }
  642. // 30 picked as an arbitrary max token repeat limit, modify as needed
  643. if tokenRepeat > 30 {
  644. slog.Debug("prediction aborted, token repeat limit reached")
  645. return ctx.Err()
  646. }
  647. if c.Content != "" {
  648. fn(CompletionResponse{
  649. Content: c.Content,
  650. })
  651. }
  652. if c.Stop {
  653. doneReason := "stop"
  654. if c.StoppedLimit {
  655. doneReason = "length"
  656. }
  657. fn(CompletionResponse{
  658. Done: true,
  659. DoneReason: doneReason,
  660. PromptEvalCount: c.Timings.PromptN,
  661. PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
  662. EvalCount: c.Timings.PredictedN,
  663. EvalDuration: parseDurationMs(c.Timings.PredictedMS),
  664. })
  665. return nil
  666. }
  667. }
  668. }
  669. if err := scanner.Err(); err != nil {
  670. if strings.Contains(err.Error(), "unexpected EOF") {
  671. s.Close()
  672. msg := ""
  673. if s.status != nil && s.status.LastErrMsg != "" {
  674. msg = s.status.LastErrMsg
  675. }
  676. return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
  677. }
  678. return fmt.Errorf("error reading llm response: %v", err)
  679. }
  680. return nil
  681. }
  682. type EmbeddingRequest struct {
  683. Content string `json:"content"`
  684. }
  685. type EmbeddingResponse struct {
  686. Embedding []float64 `json:"embedding"`
  687. }
  688. func (s *llmServer) Embedding(ctx context.Context, prompt string) ([]float64, error) {
  689. if err := s.sem.Acquire(ctx, 1); err != nil {
  690. slog.Error("Failed to acquire semaphore", "error", err)
  691. return nil, err
  692. }
  693. defer s.sem.Release(1)
  694. // Make sure the server is ready
  695. status, err := s.getServerStatusRetry(ctx)
  696. if err != nil {
  697. return nil, err
  698. } else if status != ServerStatusReady {
  699. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  700. }
  701. data, err := json.Marshal(TokenizeRequest{Content: prompt})
  702. if err != nil {
  703. return nil, fmt.Errorf("error marshaling embed data: %w", err)
  704. }
  705. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
  706. if err != nil {
  707. return nil, fmt.Errorf("error creating embed request: %w", err)
  708. }
  709. req.Header.Set("Content-Type", "application/json")
  710. resp, err := http.DefaultClient.Do(req)
  711. if err != nil {
  712. return nil, fmt.Errorf("do embedding request: %w", err)
  713. }
  714. defer resp.Body.Close()
  715. body, err := io.ReadAll(resp.Body)
  716. if err != nil {
  717. return nil, fmt.Errorf("error reading embed response: %w", err)
  718. }
  719. if resp.StatusCode >= 400 {
  720. log.Printf("llm encode error: %s", body)
  721. return nil, fmt.Errorf("%s", body)
  722. }
  723. var embedding EmbeddingResponse
  724. if err := json.Unmarshal(body, &embedding); err != nil {
  725. return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
  726. }
  727. return embedding.Embedding, nil
  728. }
  729. type TokenizeRequest struct {
  730. Content string `json:"content"`
  731. }
  732. type TokenizeResponse struct {
  733. Tokens []int `json:"tokens"`
  734. }
  735. func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
  736. // Make sure the server is ready
  737. status, err := s.getServerStatus(ctx)
  738. if err != nil {
  739. return nil, err
  740. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
  741. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  742. }
  743. data, err := json.Marshal(TokenizeRequest{Content: content})
  744. if err != nil {
  745. return nil, fmt.Errorf("marshaling encode data: %w", err)
  746. }
  747. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
  748. if err != nil {
  749. return nil, fmt.Errorf("encode request: %w", err)
  750. }
  751. req.Header.Set("Content-Type", "application/json")
  752. resp, err := http.DefaultClient.Do(req)
  753. if err != nil {
  754. return nil, fmt.Errorf("do encode request: %w", err)
  755. }
  756. defer resp.Body.Close()
  757. body, err := io.ReadAll(resp.Body)
  758. if err != nil {
  759. return nil, fmt.Errorf("read encode request: %w", err)
  760. }
  761. if resp.StatusCode >= 400 {
  762. log.Printf("llm encode error: %s", body)
  763. return nil, fmt.Errorf("%s", body)
  764. }
  765. var encoded TokenizeResponse
  766. if err := json.Unmarshal(body, &encoded); err != nil {
  767. return nil, fmt.Errorf("unmarshal encode response: %w", err)
  768. }
  769. return encoded.Tokens, nil
  770. }
  771. type DetokenizeRequest struct {
  772. Tokens []int `json:"tokens"`
  773. }
  774. type DetokenizeResponse struct {
  775. Content string `json:"content"`
  776. }
  777. func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
  778. // Make sure the server is ready
  779. status, err := s.getServerStatus(ctx)
  780. if err != nil {
  781. return "", err
  782. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
  783. return "", fmt.Errorf("unexpected server status: %s", status.ToString())
  784. }
  785. data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
  786. if err != nil {
  787. return "", fmt.Errorf("marshaling decode data: %w", err)
  788. }
  789. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
  790. if err != nil {
  791. return "", fmt.Errorf("decode request: %w", err)
  792. }
  793. req.Header.Set("Content-Type", "application/json")
  794. resp, err := http.DefaultClient.Do(req)
  795. if err != nil {
  796. return "", fmt.Errorf("do decode request: %w", err)
  797. }
  798. defer resp.Body.Close()
  799. body, err := io.ReadAll(resp.Body)
  800. if err != nil {
  801. return "", fmt.Errorf("read decode request: %w", err)
  802. }
  803. if resp.StatusCode >= 400 {
  804. log.Printf("llm decode error: %s", body)
  805. return "", fmt.Errorf("%s", body)
  806. }
  807. var decoded DetokenizeResponse
  808. if err := json.Unmarshal(body, &decoded); err != nil {
  809. return "", fmt.Errorf("unmarshal encode response: %w", err)
  810. }
  811. return decoded.Content, nil
  812. }
  813. func (s *llmServer) Close() error {
  814. if s.cmd != nil {
  815. slog.Debug("stopping llama server")
  816. if err := s.cmd.Process.Kill(); err != nil {
  817. return err
  818. }
  819. // if ProcessState is already populated, Wait already completed, no need to wait again
  820. if s.cmd.ProcessState == nil {
  821. slog.Debug("waiting for llama server to exit")
  822. <-s.done
  823. }
  824. slog.Debug("llama server stopped")
  825. }
  826. return nil
  827. }
  828. func (s *llmServer) EstimatedVRAM() uint64 {
  829. return s.estimatedVRAM
  830. }
  831. func (s *llmServer) EstimatedTotal() uint64 {
  832. return s.estimatedTotal
  833. }
  834. func parseDurationMs(ms float64) time.Duration {
  835. dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
  836. if err != nil {
  837. panic(err)
  838. }
  839. return dur
  840. }