server.go 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059
  1. package llm
  2. import (
  3. "bufio"
  4. "bytes"
  5. "context"
  6. "encoding/json"
  7. "errors"
  8. "fmt"
  9. "io"
  10. "log"
  11. "log/slog"
  12. "math/rand"
  13. "net"
  14. "net/http"
  15. "os"
  16. "os/exec"
  17. "path/filepath"
  18. "runtime"
  19. "strconv"
  20. "strings"
  21. "time"
  22. "golang.org/x/sync/semaphore"
  23. "github.com/ollama/ollama/api"
  24. "github.com/ollama/ollama/envconfig"
  25. "github.com/ollama/ollama/format"
  26. "github.com/ollama/ollama/gpu"
  27. )
  28. type LlamaServer interface {
  29. Ping(ctx context.Context) error
  30. WaitUntilRunning(ctx context.Context) error
  31. Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
  32. Embedding(ctx context.Context, prompt string) ([]float64, error)
  33. Tokenize(ctx context.Context, content string) ([]int, error)
  34. Detokenize(ctx context.Context, tokens []int) (string, error)
  35. Close() error
  36. EstimatedVRAM() uint64 // Total VRAM across all GPUs
  37. EstimatedTotal() uint64
  38. EstimatedVRAMByGPU(gpuID string) uint64
  39. }
  40. // llmServer is an instance of the llama.cpp server
  41. type llmServer struct {
  42. port int
  43. cmd *exec.Cmd
  44. done chan error // Channel to signal when the process exits
  45. status *StatusWriter
  46. options api.Options
  47. estimate MemoryEstimate
  48. totalLayers uint64
  49. // gpuCount int
  50. gpus gpu.GpuInfoList // Recorded just before the model loaded, free space will be incorrect
  51. loadDuration time.Duration // Record how long it took the model to load
  52. loadProgress float32
  53. sem *semaphore.Weighted
  54. }
  55. // LoadModel will load a model from disk. The model must be in the GGML format.
  56. //
  57. // It collects array values for arrays with a size less than or equal to
  58. // maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
  59. // the maxArraySize is negative, all arrays are collected.
  60. func LoadModel(model string, maxArraySize int) (*GGML, error) {
  61. if _, err := os.Stat(model); err != nil {
  62. return nil, err
  63. }
  64. f, err := os.Open(model)
  65. if err != nil {
  66. return nil, err
  67. }
  68. defer f.Close()
  69. ggml, _, err := DecodeGGML(f, maxArraySize)
  70. return ggml, err
  71. }
  72. // NewLlamaServer will run a server for the given GPUs
  73. // The gpu list must be a single family.
  74. func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options) (LlamaServer, error) {
  75. var err error
  76. var cpuRunner string
  77. var estimate MemoryEstimate
  78. var systemTotalMemory uint64
  79. var systemFreeMemory uint64
  80. systemMemInfo, err := gpu.GetCPUMem()
  81. if err != nil {
  82. slog.Error("failed to lookup system memory", "error", err)
  83. } else {
  84. systemTotalMemory = systemMemInfo.TotalMemory
  85. systemFreeMemory = systemMemInfo.FreeMemory
  86. slog.Debug("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", systemFreeMemory)
  87. }
  88. // If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
  89. if opts.NumGPU == 0 {
  90. gpus = gpu.GetCPUInfo()
  91. }
  92. if len(gpus) == 1 && gpus[0].Library == "cpu" {
  93. cpuRunner = serverForCpu()
  94. estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
  95. } else {
  96. estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
  97. switch {
  98. case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory:
  99. // disable partial offloading when model is greater than total system memory as this
  100. // can lead to locking up the system
  101. opts.NumGPU = 0
  102. case gpus[0].Library != "metal" && estimate.Layers == 0:
  103. // Don't bother loading into the GPU if no layers can fit
  104. cpuRunner = serverForCpu()
  105. gpus = gpu.GetCPUInfo()
  106. case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu":
  107. opts.NumGPU = estimate.Layers
  108. }
  109. }
  110. estimate.log()
  111. // Loop through potential servers
  112. finalErr := errors.New("no suitable llama servers found")
  113. if len(adapters) > 1 {
  114. return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
  115. }
  116. availableServers := availableServers()
  117. var servers []string
  118. if cpuRunner != "" {
  119. servers = []string{cpuRunner}
  120. } else {
  121. servers = serversForGpu(gpus[0]) // All GPUs in the list are matching Library and Variant
  122. }
  123. demandLib := envconfig.LLMLibrary
  124. if demandLib != "" {
  125. serverPath := availableServers[demandLib]
  126. if serverPath == "" {
  127. slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib))
  128. } else {
  129. slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath)
  130. servers = []string{demandLib}
  131. if strings.HasPrefix(demandLib, "cpu") {
  132. // Omit the GPU flag to silence the warning
  133. opts.NumGPU = -1
  134. }
  135. }
  136. }
  137. if len(servers) == 0 {
  138. return nil, fmt.Errorf("no servers found for %v", gpus)
  139. }
  140. params := []string{
  141. "--model", model,
  142. "--ctx-size", fmt.Sprintf("%d", opts.NumCtx),
  143. "--batch-size", fmt.Sprintf("%d", opts.NumBatch),
  144. "--embedding",
  145. }
  146. params = append(params, "--log-disable")
  147. params = append(params, "--timeout", fmt.Sprintf("%d", 600))
  148. if opts.NumGPU >= 0 {
  149. params = append(params, "--n-gpu-layers", fmt.Sprintf("%d", opts.NumGPU))
  150. }
  151. if envconfig.Debug {
  152. params = append(params, "--verbose")
  153. }
  154. if opts.MainGPU > 0 {
  155. params = append(params, "--main-gpu", fmt.Sprintf("%d", opts.MainGPU))
  156. }
  157. if len(adapters) > 0 {
  158. // TODO: applying multiple adapters is not supported by the llama.cpp server yet
  159. params = append(params, "--lora", adapters[0])
  160. }
  161. if len(projectors) > 0 {
  162. // TODO: applying multiple projectors is not supported by the llama.cpp server yet
  163. params = append(params, "--mmproj", projectors[0])
  164. }
  165. if opts.NumThread > 0 {
  166. params = append(params, "--threads", fmt.Sprintf("%d", opts.NumThread))
  167. }
  168. if !opts.F16KV {
  169. params = append(params, "--memory-f32")
  170. }
  171. flashAttnEnabled := envconfig.FlashAttention
  172. for _, g := range gpus {
  173. // only cuda (compute capability 7+) and metal support flash attention
  174. if g.Library != "metal" && (g.Library != "cuda" || g.DriverMajor < 7) {
  175. flashAttnEnabled = false
  176. }
  177. // mmap has issues with partial offloading on metal
  178. if g.Library == "metal" &&
  179. uint64(opts.NumGPU) > 0 &&
  180. uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
  181. opts.UseMMap = api.TriStateFalse
  182. }
  183. }
  184. if flashAttnEnabled {
  185. params = append(params, "--flash-attn")
  186. }
  187. // Windows CUDA should not use mmap for best performance
  188. // Linux with a model larger than free space, mmap leads to thrashing
  189. if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == api.TriStateUndefined) ||
  190. (runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == api.TriStateUndefined) ||
  191. opts.UseMMap == api.TriStateFalse {
  192. params = append(params, "--no-mmap")
  193. }
  194. if opts.UseMLock {
  195. params = append(params, "--mlock")
  196. }
  197. if opts.UseNUMA {
  198. params = append(params, "--numa")
  199. }
  200. numParallel := envconfig.NumParallel
  201. // TODO (jmorganca): multimodal models don't support parallel yet
  202. // see https://github.com/ollama/ollama/issues/4165
  203. if len(projectors) > 0 {
  204. numParallel = 1
  205. slog.Warn("multimodal models don't support parallel requests yet")
  206. }
  207. params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))
  208. if estimate.TensorSplit != "" {
  209. params = append(params, "--tensor-split", estimate.TensorSplit)
  210. }
  211. if estimate.TensorSplit != "" {
  212. params = append(params, "--tensor-split", estimate.TensorSplit)
  213. }
  214. for i := range len(servers) {
  215. dir := availableServers[servers[i]]
  216. if dir == "" {
  217. // Shouldn't happen
  218. finalErr = fmt.Errorf("[%d] server %s not listed in available servers %v", i, servers[i], availableServers)
  219. slog.Error("server list inconsistent", "error", finalErr)
  220. continue
  221. }
  222. if strings.HasPrefix(servers[i], "cpu") {
  223. gpus = gpu.GetCPUInfo()
  224. }
  225. // Find an availableServers port, retry on each iteration in case the failure was a port conflict race
  226. port := 0
  227. if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
  228. var l *net.TCPListener
  229. if l, err = net.ListenTCP("tcp", a); err == nil {
  230. port = l.Addr().(*net.TCPAddr).Port
  231. l.Close()
  232. }
  233. }
  234. if port == 0 {
  235. slog.Debug("ResolveTCPAddr failed ", "error", err)
  236. port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range
  237. }
  238. finalParams := append(params, "--port", strconv.Itoa(port))
  239. pathEnv := "LD_LIBRARY_PATH"
  240. if runtime.GOOS == "windows" {
  241. pathEnv = "PATH"
  242. }
  243. // prepend the server directory to LD_LIBRARY_PATH/PATH and the parent dir for common dependencies
  244. libraryPaths := []string{dir, filepath.Dir(dir)}
  245. if libraryPath, ok := os.LookupEnv(pathEnv); ok {
  246. // Append our runner directory to the path
  247. // This will favor system libraries over our bundled library dependencies
  248. libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...)
  249. }
  250. // Note: we always put the dependency path first
  251. // since this was the exact version we verified for AMD GPUs
  252. // and we favor what the user had in their path
  253. if gpus[0].DependencyPath != "" {
  254. // TODO refine for multi-gpu support
  255. libraryPaths = append([]string{gpus[0].DependencyPath}, libraryPaths...)
  256. }
  257. server := filepath.Join(dir, "ollama_llama_server")
  258. if runtime.GOOS == "windows" {
  259. server += ".exe"
  260. }
  261. // Detect tmp cleaners wiping out the file
  262. _, err := os.Stat(server)
  263. if errors.Is(err, os.ErrNotExist) {
  264. slog.Warn("llama server disappeared, reinitializing payloads", "path", server, "error", err)
  265. err = Init()
  266. if err != nil {
  267. slog.Warn("failed to reinitialize payloads", "error", err)
  268. return nil, err
  269. }
  270. }
  271. s := &llmServer{
  272. port: port,
  273. cmd: exec.Command(server, finalParams...),
  274. status: NewStatusWriter(os.Stderr),
  275. options: opts,
  276. estimate: estimate,
  277. sem: semaphore.NewWeighted(int64(numParallel)),
  278. totalLayers: ggml.KV().BlockCount() + 1,
  279. gpus: gpus,
  280. done: make(chan error, 1),
  281. }
  282. s.cmd.Env = os.Environ()
  283. s.cmd.Stdout = os.Stdout
  284. s.cmd.Stderr = s.status
  285. envWorkarounds := [][2]string{}
  286. for _, gpu := range gpus {
  287. envWorkarounds = append(envWorkarounds, gpu.EnvWorkarounds...)
  288. }
  289. visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
  290. pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))
  291. // Update or add the path and visible devices variable with our adjusted version
  292. pathNeeded := true
  293. devicesNeeded := visibleDevicesEnv != ""
  294. for i := range s.cmd.Env {
  295. cmp := strings.SplitN(s.cmd.Env[i], "=", 2)
  296. if strings.EqualFold(cmp[0], pathEnv) {
  297. s.cmd.Env[i] = pathEnv + "=" + pathEnvVal
  298. pathNeeded = false
  299. } else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
  300. s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
  301. devicesNeeded = false
  302. } else if len(envWorkarounds) != 0 {
  303. for _, kv := range envWorkarounds {
  304. if strings.EqualFold(cmp[0], kv[0]) {
  305. s.cmd.Env[i] = kv[0] + "=" + kv[1]
  306. }
  307. }
  308. }
  309. }
  310. if pathNeeded {
  311. s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal)
  312. }
  313. if devicesNeeded {
  314. s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal)
  315. }
  316. slog.Info("starting llama server", "cmd", s.cmd.String())
  317. if envconfig.Debug {
  318. filteredEnv := []string{}
  319. for _, ev := range s.cmd.Env {
  320. if strings.HasPrefix(ev, "CUDA_") ||
  321. strings.HasPrefix(ev, "ROCM_") ||
  322. strings.HasPrefix(ev, "HIP_") ||
  323. strings.HasPrefix(ev, "HSA_") ||
  324. strings.HasPrefix(ev, "GGML_") ||
  325. strings.HasPrefix(ev, "PATH=") ||
  326. strings.HasPrefix(ev, "LD_LIBRARY_PATH=") {
  327. filteredEnv = append(filteredEnv, ev)
  328. }
  329. }
  330. // Log at debug as the environment is inherited and might contain sensitive information
  331. slog.Debug("subprocess", "environment", filteredEnv)
  332. }
  333. if err = s.cmd.Start(); err != nil {
  334. // Detect permission denied and augment them essage about noexec
  335. if errors.Is(err, os.ErrPermission) {
  336. finalErr = fmt.Errorf("unable to start server %w. %s may have noexec set. Set OLLAMA_TMPDIR for server to a writable executable directory", err, dir)
  337. continue
  338. }
  339. msg := ""
  340. if s.status != nil && s.status.LastErrMsg != "" {
  341. msg = s.status.LastErrMsg
  342. }
  343. err = fmt.Errorf("error starting the external llama server: %v %s", err, msg)
  344. finalErr = err
  345. continue
  346. }
  347. // reap subprocess when it exits
  348. go func() {
  349. s.done <- s.cmd.Wait()
  350. }()
  351. return s, nil
  352. }
  353. slog.Error("unable to load any llama server", "error", finalErr)
  354. return nil, finalErr
  355. }
  356. func projectorMemoryRequirements(filename string) uint64 {
  357. file, err := os.Open(filename)
  358. if err != nil {
  359. return 0
  360. }
  361. defer file.Close()
  362. ggml, _, err := DecodeGGML(file, 0)
  363. if err != nil {
  364. return 0
  365. }
  366. var mem uint64
  367. for _, layer := range ggml.Tensors().Layers() {
  368. mem += layer.size()
  369. }
  370. return mem
  371. }
  372. type ServerStatus int
  373. const ( // iota is reset to 0
  374. ServerStatusReady ServerStatus = iota
  375. ServerStatusNoSlotsAvailable
  376. ServerStatusLoadingModel
  377. ServerStatusNotResponding
  378. ServerStatusError
  379. )
  380. func (s ServerStatus) ToString() string {
  381. switch s {
  382. case ServerStatusReady:
  383. return "llm server ready"
  384. case ServerStatusNoSlotsAvailable:
  385. return "llm busy - no slots available"
  386. case ServerStatusLoadingModel:
  387. return "llm server loading model"
  388. case ServerStatusNotResponding:
  389. return "llm server not responding"
  390. default:
  391. return "llm server error"
  392. }
  393. }
  394. type ServerStatusResp struct {
  395. Status string `json:"status"`
  396. SlotsIdle int `json:"slots_idle"`
  397. SlotsProcessing int `json:"slots_processing"`
  398. Error string `json:"error"`
  399. Progress float32 `json:"progress"`
  400. }
  401. func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
  402. // Fail fast if its exited
  403. if s.cmd.ProcessState != nil {
  404. msg := ""
  405. if s.status != nil && s.status.LastErrMsg != "" {
  406. msg = s.status.LastErrMsg
  407. }
  408. if s.cmd.ProcessState.ExitCode() == -1 {
  409. // Most likely a signal killed it, log some more details to try to help troubleshoot
  410. slog.Warn("llama runner process no longer running", "sys", s.cmd.ProcessState.Sys(), "string", s.cmd.ProcessState.String())
  411. }
  412. return ServerStatusError, fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  413. }
  414. req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/health", s.port), nil)
  415. if err != nil {
  416. return ServerStatusError, fmt.Errorf("error creating GET request: %v", err)
  417. }
  418. req.Header.Set("Content-Type", "application/json")
  419. resp, err := http.DefaultClient.Do(req)
  420. if err != nil {
  421. if errors.Is(err, context.DeadlineExceeded) {
  422. return ServerStatusNotResponding, errors.New("server not responding")
  423. }
  424. return ServerStatusError, fmt.Errorf("health resp: %w", err)
  425. }
  426. defer resp.Body.Close()
  427. body, err := io.ReadAll(resp.Body)
  428. if err != nil {
  429. return ServerStatusError, fmt.Errorf("read health request: %w", err)
  430. }
  431. var status ServerStatusResp
  432. if err := json.Unmarshal(body, &status); err != nil {
  433. return ServerStatusError, fmt.Errorf("health unmarshal encode response: %w", err)
  434. }
  435. switch status.Status {
  436. case "ok":
  437. return ServerStatusReady, nil
  438. case "no slot available":
  439. return ServerStatusNoSlotsAvailable, nil
  440. case "loading model":
  441. s.loadProgress = status.Progress
  442. return ServerStatusLoadingModel, nil
  443. default:
  444. return ServerStatusError, fmt.Errorf("server error: %+v", status)
  445. }
  446. }
  447. // getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received
  448. func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) {
  449. var retries int
  450. for {
  451. status, err := s.getServerStatus(ctx)
  452. if err != nil {
  453. return status, err
  454. }
  455. if status == ServerStatusNoSlotsAvailable {
  456. if retries >= 10 {
  457. return status, fmt.Errorf("no slots available after %d retries", retries)
  458. }
  459. time.Sleep(5 * time.Millisecond)
  460. retries++
  461. continue
  462. }
  463. return status, nil
  464. }
  465. }
  466. func (s *llmServer) Ping(ctx context.Context) error {
  467. _, err := s.getServerStatus(ctx)
  468. if err != nil {
  469. slog.Debug("server unhealthy", "error", err)
  470. return err
  471. }
  472. return nil
  473. }
  474. func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
  475. start := time.Now()
  476. stallDuration := 5 * time.Minute // If no progress happens
  477. finalLoadDuration := 5 * time.Minute // After we hit 100%, give the runner more time to come online
  478. stallTimer := time.Now().Add(stallDuration) // give up if we stall
  479. slog.Info("waiting for llama runner to start responding")
  480. var lastStatus ServerStatus = -1
  481. fullyLoaded := false
  482. for {
  483. select {
  484. case <-ctx.Done():
  485. slog.Warn("client connection closed before server finished loading, aborting load")
  486. return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
  487. case err := <-s.done:
  488. msg := ""
  489. if s.status != nil && s.status.LastErrMsg != "" {
  490. msg = s.status.LastErrMsg
  491. }
  492. return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
  493. default:
  494. }
  495. if time.Now().After(stallTimer) {
  496. // timeout
  497. msg := ""
  498. if s.status != nil && s.status.LastErrMsg != "" {
  499. msg = s.status.LastErrMsg
  500. }
  501. return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
  502. }
  503. if s.cmd.ProcessState != nil {
  504. msg := ""
  505. if s.status != nil && s.status.LastErrMsg != "" {
  506. msg = s.status.LastErrMsg
  507. }
  508. return fmt.Errorf("llama runner process no longer running: %d %s", s.cmd.ProcessState.ExitCode(), msg)
  509. }
  510. ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
  511. defer cancel()
  512. priorProgress := s.loadProgress
  513. status, _ := s.getServerStatus(ctx)
  514. if lastStatus != status && status != ServerStatusReady {
  515. // Only log on status changes
  516. slog.Info("waiting for server to become available", "status", status.ToString())
  517. }
  518. switch status {
  519. case ServerStatusReady:
  520. s.loadDuration = time.Since(start)
  521. slog.Info(fmt.Sprintf("llama runner started in %0.2f seconds", s.loadDuration.Seconds()))
  522. return nil
  523. default:
  524. lastStatus = status
  525. // Reset the timer as long as we're making forward progress on the load
  526. if priorProgress != s.loadProgress {
  527. slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
  528. stallTimer = time.Now().Add(stallDuration)
  529. } else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
  530. slog.Debug("model load completed, waiting for server to become available", "status", status.ToString())
  531. stallTimer = time.Now().Add(finalLoadDuration)
  532. fullyLoaded = true
  533. }
  534. time.Sleep(time.Millisecond * 250)
  535. continue
  536. }
  537. }
  538. }
  539. const jsonGrammar = `
  540. root ::= object
  541. value ::= object | array | string | number | ("true" | "false" | "null") ws
  542. object ::=
  543. "{" ws (
  544. string ":" ws value
  545. ("," ws string ":" ws value)*
  546. )? "}" ws
  547. array ::=
  548. "[" ws (
  549. value
  550. ("," ws value)*
  551. )? "]" ws
  552. string ::=
  553. "\"" (
  554. [^"\\\x7F\x00-\x1F] |
  555. "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
  556. )* "\"" ws
  557. number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
  558. # Optional space: by convention, applied in this grammar after literal chars when allowed
  559. ws ::= ([ \t\n] ws)?
  560. `
  561. const maxBufferSize = 512 * format.KiloByte
  562. type ImageData struct {
  563. Data []byte `json:"data"`
  564. ID int `json:"id"`
  565. }
  566. type completion struct {
  567. Content string `json:"content"`
  568. Model string `json:"model"`
  569. Prompt string `json:"prompt"`
  570. Stop bool `json:"stop"`
  571. StoppedLimit bool `json:"stopped_limit"`
  572. Timings struct {
  573. PredictedN int `json:"predicted_n"`
  574. PredictedMS float64 `json:"predicted_ms"`
  575. PromptN int `json:"prompt_n"`
  576. PromptMS float64 `json:"prompt_ms"`
  577. }
  578. }
  579. type CompletionRequest struct {
  580. Prompt string
  581. Format string
  582. Images []ImageData
  583. Options api.Options
  584. }
  585. type CompletionResponse struct {
  586. Content string
  587. DoneReason string
  588. Done bool
  589. PromptEvalCount int
  590. PromptEvalDuration time.Duration
  591. EvalCount int
  592. EvalDuration time.Duration
  593. }
  594. func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error {
  595. if err := s.sem.Acquire(ctx, 1); err != nil {
  596. slog.Error("Failed to acquire semaphore", "error", err)
  597. return err
  598. }
  599. defer s.sem.Release(1)
  600. // only allow maximum 10 "context shifts" to avoid infinite generation
  601. if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
  602. req.Options.NumPredict = 10 * s.options.NumCtx
  603. slog.Debug("setting token limit to 10x num_ctx", "num_ctx", s.options.NumCtx, "num_predict", req.Options.NumPredict)
  604. }
  605. request := map[string]any{
  606. "prompt": req.Prompt,
  607. "stream": true,
  608. "n_predict": req.Options.NumPredict,
  609. "n_keep": req.Options.NumKeep,
  610. "main_gpu": req.Options.MainGPU,
  611. "temperature": req.Options.Temperature,
  612. "top_k": req.Options.TopK,
  613. "top_p": req.Options.TopP,
  614. "tfs_z": req.Options.TFSZ,
  615. "typical_p": req.Options.TypicalP,
  616. "repeat_last_n": req.Options.RepeatLastN,
  617. "repeat_penalty": req.Options.RepeatPenalty,
  618. "presence_penalty": req.Options.PresencePenalty,
  619. "frequency_penalty": req.Options.FrequencyPenalty,
  620. "mirostat": req.Options.Mirostat,
  621. "mirostat_tau": req.Options.MirostatTau,
  622. "mirostat_eta": req.Options.MirostatEta,
  623. "penalize_nl": req.Options.PenalizeNewline,
  624. "seed": req.Options.Seed,
  625. "stop": req.Options.Stop,
  626. "image_data": req.Images,
  627. "cache_prompt": true,
  628. }
  629. // Make sure the server is ready
  630. status, err := s.getServerStatusRetry(ctx)
  631. if err != nil {
  632. return err
  633. } else if status != ServerStatusReady {
  634. return fmt.Errorf("unexpected server status: %s", status.ToString())
  635. }
  636. if req.Format == "json" {
  637. request["grammar"] = jsonGrammar
  638. if !strings.Contains(strings.ToLower(req.Prompt), "json") {
  639. slog.Warn("Prompt does not specify that the LLM should response in JSON, but JSON format is expected. For best results specify that JSON is expected in the system prompt.")
  640. }
  641. }
  642. // Handling JSON marshaling with special characters unescaped.
  643. buffer := &bytes.Buffer{}
  644. enc := json.NewEncoder(buffer)
  645. enc.SetEscapeHTML(false)
  646. if err := enc.Encode(request); err != nil {
  647. return fmt.Errorf("failed to marshal data: %v", err)
  648. }
  649. endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port)
  650. serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer)
  651. if err != nil {
  652. return fmt.Errorf("error creating POST request: %v", err)
  653. }
  654. serverReq.Header.Set("Content-Type", "application/json")
  655. res, err := http.DefaultClient.Do(serverReq)
  656. if err != nil {
  657. return fmt.Errorf("POST predict: %v", err)
  658. }
  659. defer res.Body.Close()
  660. if res.StatusCode >= 400 {
  661. bodyBytes, err := io.ReadAll(res.Body)
  662. if err != nil {
  663. return fmt.Errorf("failed reading llm error response: %w", err)
  664. }
  665. log.Printf("llm predict error: %s", bodyBytes)
  666. return fmt.Errorf("%s", bodyBytes)
  667. }
  668. scanner := bufio.NewScanner(res.Body)
  669. buf := make([]byte, 0, maxBufferSize)
  670. scanner.Buffer(buf, maxBufferSize)
  671. // keep track of the last token generated, this is used to abort if the model starts looping
  672. var lastToken string
  673. var tokenRepeat int
  674. for scanner.Scan() {
  675. select {
  676. case <-ctx.Done():
  677. // This handles the request cancellation
  678. return ctx.Err()
  679. default:
  680. line := scanner.Bytes()
  681. if len(line) == 0 {
  682. continue
  683. }
  684. evt, ok := bytes.CutPrefix(line, []byte("data: "))
  685. if !ok {
  686. return fmt.Errorf("error parsing llm response stream: %s", line)
  687. }
  688. var c completion
  689. if err := json.Unmarshal(evt, &c); err != nil {
  690. return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
  691. }
  692. switch {
  693. case strings.TrimSpace(c.Content) == lastToken:
  694. tokenRepeat++
  695. default:
  696. lastToken = strings.TrimSpace(c.Content)
  697. tokenRepeat = 0
  698. }
  699. // 30 picked as an arbitrary max token repeat limit, modify as needed
  700. if tokenRepeat > 30 {
  701. slog.Debug("prediction aborted, token repeat limit reached")
  702. return ctx.Err()
  703. }
  704. if c.Content != "" {
  705. fn(CompletionResponse{
  706. Content: c.Content,
  707. })
  708. }
  709. if c.Stop {
  710. doneReason := "stop"
  711. if c.StoppedLimit {
  712. doneReason = "length"
  713. }
  714. fn(CompletionResponse{
  715. Done: true,
  716. DoneReason: doneReason,
  717. PromptEvalCount: c.Timings.PromptN,
  718. PromptEvalDuration: parseDurationMs(c.Timings.PromptMS),
  719. EvalCount: c.Timings.PredictedN,
  720. EvalDuration: parseDurationMs(c.Timings.PredictedMS),
  721. })
  722. return nil
  723. }
  724. }
  725. }
  726. if err := scanner.Err(); err != nil {
  727. if strings.Contains(err.Error(), "unexpected EOF") {
  728. s.Close()
  729. msg := ""
  730. if s.status != nil && s.status.LastErrMsg != "" {
  731. msg = s.status.LastErrMsg
  732. }
  733. return fmt.Errorf("an unknown error was encountered while running the model %s", msg)
  734. }
  735. return fmt.Errorf("error reading llm response: %v", err)
  736. }
  737. return nil
  738. }
  739. type EmbeddingRequest struct {
  740. Content string `json:"content"`
  741. }
  742. type EmbeddingResponse struct {
  743. Embedding []float64 `json:"embedding"`
  744. }
  745. func (s *llmServer) Embedding(ctx context.Context, prompt string) ([]float64, error) {
  746. if err := s.sem.Acquire(ctx, 1); err != nil {
  747. slog.Error("Failed to acquire semaphore", "error", err)
  748. return nil, err
  749. }
  750. defer s.sem.Release(1)
  751. // Make sure the server is ready
  752. status, err := s.getServerStatusRetry(ctx)
  753. if err != nil {
  754. return nil, err
  755. } else if status != ServerStatusReady {
  756. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  757. }
  758. data, err := json.Marshal(TokenizeRequest{Content: prompt})
  759. if err != nil {
  760. return nil, fmt.Errorf("error marshaling embed data: %w", err)
  761. }
  762. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
  763. if err != nil {
  764. return nil, fmt.Errorf("error creating embed request: %w", err)
  765. }
  766. req.Header.Set("Content-Type", "application/json")
  767. resp, err := http.DefaultClient.Do(req)
  768. if err != nil {
  769. return nil, fmt.Errorf("do embedding request: %w", err)
  770. }
  771. defer resp.Body.Close()
  772. body, err := io.ReadAll(resp.Body)
  773. if err != nil {
  774. return nil, fmt.Errorf("error reading embed response: %w", err)
  775. }
  776. if resp.StatusCode >= 400 {
  777. log.Printf("llm encode error: %s", body)
  778. return nil, fmt.Errorf("%s", body)
  779. }
  780. var embedding EmbeddingResponse
  781. if err := json.Unmarshal(body, &embedding); err != nil {
  782. return nil, fmt.Errorf("unmarshal tokenize response: %w", err)
  783. }
  784. return embedding.Embedding, nil
  785. }
  786. type TokenizeRequest struct {
  787. Content string `json:"content"`
  788. }
  789. type TokenizeResponse struct {
  790. Tokens []int `json:"tokens"`
  791. }
  792. func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
  793. // Make sure the server is ready
  794. status, err := s.getServerStatus(ctx)
  795. if err != nil {
  796. return nil, err
  797. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
  798. return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
  799. }
  800. data, err := json.Marshal(TokenizeRequest{Content: content})
  801. if err != nil {
  802. return nil, fmt.Errorf("marshaling encode data: %w", err)
  803. }
  804. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
  805. if err != nil {
  806. return nil, fmt.Errorf("encode request: %w", err)
  807. }
  808. req.Header.Set("Content-Type", "application/json")
  809. resp, err := http.DefaultClient.Do(req)
  810. if err != nil {
  811. return nil, fmt.Errorf("do encode request: %w", err)
  812. }
  813. defer resp.Body.Close()
  814. body, err := io.ReadAll(resp.Body)
  815. if err != nil {
  816. return nil, fmt.Errorf("read encode request: %w", err)
  817. }
  818. if resp.StatusCode >= 400 {
  819. log.Printf("llm encode error: %s", body)
  820. return nil, fmt.Errorf("%s", body)
  821. }
  822. var encoded TokenizeResponse
  823. if err := json.Unmarshal(body, &encoded); err != nil {
  824. return nil, fmt.Errorf("unmarshal encode response: %w", err)
  825. }
  826. return encoded.Tokens, nil
  827. }
  828. type DetokenizeRequest struct {
  829. Tokens []int `json:"tokens"`
  830. }
  831. type DetokenizeResponse struct {
  832. Content string `json:"content"`
  833. }
  834. func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
  835. // Make sure the server is ready
  836. status, err := s.getServerStatus(ctx)
  837. if err != nil {
  838. return "", err
  839. } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
  840. return "", fmt.Errorf("unexpected server status: %s", status.ToString())
  841. }
  842. data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
  843. if err != nil {
  844. return "", fmt.Errorf("marshaling decode data: %w", err)
  845. }
  846. req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
  847. if err != nil {
  848. return "", fmt.Errorf("decode request: %w", err)
  849. }
  850. req.Header.Set("Content-Type", "application/json")
  851. resp, err := http.DefaultClient.Do(req)
  852. if err != nil {
  853. return "", fmt.Errorf("do decode request: %w", err)
  854. }
  855. defer resp.Body.Close()
  856. body, err := io.ReadAll(resp.Body)
  857. if err != nil {
  858. return "", fmt.Errorf("read decode request: %w", err)
  859. }
  860. if resp.StatusCode >= 400 {
  861. log.Printf("llm decode error: %s", body)
  862. return "", fmt.Errorf("%s", body)
  863. }
  864. var decoded DetokenizeResponse
  865. if err := json.Unmarshal(body, &decoded); err != nil {
  866. return "", fmt.Errorf("unmarshal encode response: %w", err)
  867. }
  868. return decoded.Content, nil
  869. }
  870. func (s *llmServer) Close() error {
  871. if s.cmd != nil {
  872. slog.Debug("stopping llama server")
  873. if err := s.cmd.Process.Kill(); err != nil {
  874. return err
  875. }
  876. // if ProcessState is already populated, Wait already completed, no need to wait again
  877. if s.cmd.ProcessState == nil {
  878. slog.Debug("waiting for llama server to exit")
  879. <-s.done
  880. }
  881. slog.Debug("llama server stopped")
  882. }
  883. return nil
  884. }
  885. func (s *llmServer) EstimatedVRAM() uint64 {
  886. return s.estimate.VRAMSize
  887. }
  888. func (s *llmServer) EstimatedTotal() uint64 {
  889. return s.estimate.TotalSize
  890. }
  891. func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
  892. for i, gpu := range s.gpus {
  893. if gpu.ID == gpuID {
  894. return s.estimate.GPUSizes[i]
  895. }
  896. }
  897. return 0
  898. }
  899. func parseDurationMs(ms float64) time.Duration {
  900. dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
  901. if err != nil {
  902. panic(err)
  903. }
  904. return dur
  905. }